Full Code of WenbinLee/DN4 for AI

master 440857b23a5b cached
81 files
9.8 MB
2.6M tokens
199 symbols
1 requests
Copy disabled (too large) Download .txt
Showing preview only (10,330K chars total). Download the full file to get everything.
Repository: WenbinLee/DN4
Branch: master
Commit: 440857b23a5b
Files: 81
Total size: 9.8 MB

Directory structure:
gitextract_je04xax4/

├── DN4_2019_Version/
│   ├── DN4_Test_5way1shot.py
│   ├── DN4_Test_5way5shot.py
│   ├── DN4_Train_5way1shot.py
│   ├── DN4_Train_5way1shot_DA.py
│   ├── DN4_Train_5way1shot_Resnet.py
│   ├── DN4_Train_5way5shot.py
│   ├── DN4_Train_5way5shot_DA.py
│   ├── DN4_Train_5way5shot_Resnet.py
│   ├── LICENSE
│   ├── README.md
│   ├── dataset/
│   │   ├── CubBird_prepare_csv.py
│   │   ├── CubBirds/
│   │   │   ├── test.csv
│   │   │   ├── train.csv
│   │   │   └── val.csv
│   │   ├── StanforCar_prepare_csv.py
│   │   ├── StanfordCars/
│   │   │   ├── test.csv
│   │   │   ├── train.csv
│   │   │   └── val.csv
│   │   ├── StanfordDog_prepare_csv.py
│   │   ├── StanfordDogs/
│   │   │   ├── test.csv
│   │   │   ├── train.csv
│   │   │   └── val.csv
│   │   ├── datasets_csv.py
│   │   └── miniImageNet/
│   │       ├── test.csv
│   │       ├── train.csv
│   │       └── val.csv
│   ├── models/
│   │   └── network.py
│   └── results/
│       ├── DN4_miniImageNet_Conv64F_5Way_1Shot_K3/
│       │   ├── Test_resutls.txt
│       │   └── opt_resutls.txt
│       ├── DN4_miniImageNet_Conv64F_5Way_5Shot_K3/
│       │   ├── Test_resutls.txt
│       │   └── opt_resutls.txt
│       ├── DN4_miniImageNet_ResNet256F_5Way_1Shot_K3/
│       │   ├── Test_resutls.txt
│       │   └── opt_resutls.txt
│       └── DN4_miniImageNet_ResNet256F_5Way_5Shot_K3/
│           ├── Test_resutls.txt
│           └── opt_resutls.txt
├── LICENSE
├── README.md
├── Test_DN4.py
├── Train_DN4.py
├── dataset/
│   ├── CubBirds/
│   │   ├── test.csv
│   │   ├── train.csv
│   │   └── val.csv
│   ├── Prepare_csv_CubBird.py
│   ├── Prepare_csv_StanfordCar.py
│   ├── Prepare_csv_StanfordDog.py
│   ├── StanfordCars/
│   │   ├── test.csv
│   │   ├── train.csv
│   │   └── val.csv
│   ├── StanfordDogs/
│   │   ├── test.csv
│   │   ├── train.csv
│   │   └── val.csv
│   ├── general_dataloader.py
│   └── miniImageNet/
│       ├── test.csv
│       ├── train.csv
│       └── val.csv
├── models/
│   ├── backbone.py
│   ├── classifier.py
│   └── network.py
├── results/
│   ├── SGD_Cosine_Lr0.01_DN4_ResNet12_Epoch_30_miniImageNet_84_84_5Way_1Shot/
│   │   ├── Test_results.txt
│   │   ├── opt_results.txt
│   │   ├── test_loss.txt
│   │   ├── train_loss.txt
│   │   └── val_loss.txt
│   ├── SGD_Cosine_Lr0.01_DN4_ResNet12_Epoch_30_miniImageNet_84_84_5Way_5Shot/
│   │   ├── Test_results.txt
│   │   ├── opt_results.txt
│   │   ├── test_loss.txt
│   │   ├── train_loss.txt
│   │   └── val_loss.txt
│   ├── SGD_Cosine_Lr0.05_DN4_Conv64F_Local_Epoch_30_miniImageNet_84_84_5Way_1Shot/
│   │   ├── Test_results.txt
│   │   ├── Test_results_New.txt
│   │   ├── opt_results.txt
│   │   ├── test_loss.txt
│   │   ├── train_loss.txt
│   │   └── val_loss.txt
│   ├── SGD_Cosine_Lr0.05_DN4_Conv64F_Local_Epoch_30_miniImageNet_84_84_5Way_5Shot/
│   │   ├── Test_results.txt
│   │   ├── opt_results.txt
│   │   ├── test_loss.txt
│   │   ├── train_loss.txt
│   │   └── val_loss.txt
│   └── test
└── utils.py

================================================
FILE CONTENTS
================================================

================================================
FILE: DN4_2019_Version/DN4_Test_5way1shot.py
================================================
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
Author: Wenbin Li (liwenbin.nju@gmail.com)
Date: April 9, 2019
Version: V0

Citation: 
@inproceedings{li2019DN4,
  title={Revisiting Local Descriptor based Image-to-Class Measure for Few-shot Learning},
  author={Li, Wenbin and Wang, Lei and Xu, Jinglin and Huo, Jing and Gao Yang and Luo, Jiebo},
  booktitle={CVPR},
  year={2019}
}
"""


from __future__ import print_function
import argparse
import os
import random
import shutil
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import grad
import time
from torch import autograd
from PIL import ImageFile
import scipy as sp
import scipy.stats
import pdb


# ============================ Data & Networks =====================================
from dataset.datasets_csv import Imagefolder_csv
import models.network as DN4Net
# ==================================================================================


ImageFile.LOAD_TRUNCATED_IMAGES = True
os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES']='0'


# Load the pre-trained model
model_trained = './results/DN4_miniImageNet_Conv64F_5Way_1Shot_K3/model_best.pth.tar'


parser = argparse.ArgumentParser()
parser.add_argument('--dataset_dir', default='/Datasets/miniImageNet--ravi', help='/miniImageNet')
parser.add_argument('--data_name', default='miniImageNet', help='miniImageNet|StanfordDog|StanfordCar|CubBird')
parser.add_argument('--mode', default='test', help='train|val|test')
parser.add_argument('--outf', default='./results/DN4')
parser.add_argument('--resume', default=model_trained, type=str, help='path to the lastest checkpoint (default: none)')
parser.add_argument('--basemodel', default='Conv64F', help='Conv64F|ResNet256F')
parser.add_argument('--workers', type=int, default=8)
#  Few-shot parameters  #
parser.add_argument('--imageSize', type=int, default=84)
parser.add_argument('--episodeSize', type=int, default=1, help='the mini-batch size of training')
parser.add_argument('--testepisodeSize', type=int, default=1, help='one episode is taken as a mini-batch')
parser.add_argument('--epochs', type=int, default=30, help='the total number of training epoch')
parser.add_argument('--episode_train_num', type=int, default=10000, help='the total number of training episodes')
parser.add_argument('--episode_val_num', type=int, default=1000, help='the total number of evaluation episodes')
parser.add_argument('--episode_test_num', type=int, default=600, help='the total number of testing episodes')
parser.add_argument('--way_num', type=int, default=5, help='the number of way/class')
parser.add_argument('--shot_num', type=int, default=1, help='the number of shot')
parser.add_argument('--query_num', type=int, default=15, help='the number of queries')
parser.add_argument('--neighbor_k', type=int, default=3, help='the number of k-nearest neighbors')
parser.add_argument('--lr', type=float, default=0.005, help='learning rate, default=0.005')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', default=True, help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='the number of gpus')
parser.add_argument('--nc', type=int, default=3, help='input image channels')
parser.add_argument('--clamp_lower', type=float, default=-0.01)
parser.add_argument('--clamp_upper', type=float, default=0.01)
parser.add_argument('--print_freq', '-p', default=100, type=int, metavar='N', help='print frequency (default: 100)')
opt = parser.parse_args()
opt.cuda = True
cudnn.benchmark = True



# ======================================= Define functions =============================================
def validate(val_loader, model, criterion, epoch_index, F_txt):
	batch_time = AverageMeter()
	losses = AverageMeter()
	top1 = AverageMeter()
  

	# switch to evaluate mode
	model.eval()
	accuracies = []


	end = time.time()
	for episode_index, (query_images, query_targets, support_images, support_targets) in enumerate(val_loader):

		# Convert query and support images
		query_images = torch.cat(query_images, 0)
		input_var1 = query_images.cuda()


		input_var2 = []
		for i in range(len(support_images)):
			temp_support = support_images[i]
			temp_support = torch.cat(temp_support, 0)
			temp_support = temp_support.cuda()
			input_var2.append(temp_support)


		# Deal with the targets
		target = torch.cat(query_targets, 0)
		target = target.cuda()

		# Calculate the output 
		output = model(input_var1, input_var2)
		loss = criterion(output, target)


		# measure accuracy and record loss
		prec1, _ = accuracy(output, target, topk=(1, 3))
		losses.update(loss.item(), query_images.size(0))
		top1.update(prec1[0], query_images.size(0))
		accuracies.append(prec1)


		# measure elapsed time
		batch_time.update(time.time() - end)
		end = time.time()


		#============== print the intermediate results ==============#
		if episode_index % opt.print_freq == 0 and episode_index != 0:

			print('Test-({0}): [{1}/{2}]\t'
				'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
				'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
				'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
					epoch_index, episode_index, len(val_loader), batch_time=batch_time, loss=losses, top1=top1))

			print('Test-({0}): [{1}/{2}]\t'
				'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
				'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
				'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
					epoch_index, episode_index, len(val_loader), batch_time=batch_time, loss=losses, top1=top1), file=F_txt)

		
	print(' * Prec@1 {top1.avg:.3f} Best_prec1 {best_prec1:.3f}'.format(top1=top1, best_prec1=best_prec1))
	print(' * Prec@1 {top1.avg:.3f} Best_prec1 {best_prec1:.3f}'.format(top1=top1, best_prec1=best_prec1), file=F_txt)

	return top1.avg, accuracies


class AverageMeter(object):
	"""Computes and stores the average and current value"""
	def __init__(self):
		self.reset()

	def reset(self):
		self.val = 0
		self.avg = 0
		self.sum = 0
		self.count = 0

	def update(self, val, n=1):
		self.val = val
		self.sum += val * n
		self.count += n
		self.avg = self.sum / self.count



def accuracy(output, target, topk=(1,)):
	"""Computes the precision@k for the specified values of k"""
	with torch.no_grad():
		maxk = max(topk)
		batch_size = target.size(0)

		_, pred = output.topk(maxk, 1, True, True)
		pred = pred.t()
		correct = pred.eq(target.view(1, -1).expand_as(pred))

		res = []
		for k in topk:
			correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
			res.append(correct_k.mul_(100.0 / batch_size))
		return res


def mean_confidence_interval(data, confidence=0.95):
	a = [1.0*np.array(data[i].cpu()) for i in range(len(data))]
	n = len(a)
	m, se = np.mean(a), scipy.stats.sem(a)
	h = se * sp.stats.t._ppf((1+confidence)/2., n-1)
	return m,h



# ======================================== Settings of path ============================================
# saving path
opt.outf = opt.outf+'_'+opt.data_name+'_'+str(opt.basemodel)+'_'+str(opt.way_num)+'Way_'+str(opt.shot_num)+'Shot'+'_K'+str(opt.neighbor_k)

if not os.path.exists(opt.outf):
	os.makedirs(opt.outf)

if torch.cuda.is_available() and not opt.cuda:
	print("WARNING: You have a CUDA device, so you should probably run with --cuda")

# save the opt and results to a txt file
txt_save_path = os.path.join(opt.outf, 'Test_resutls.txt')
F_txt = open(txt_save_path, 'a+')
print(opt)
print(opt, file=F_txt)



# ========================================== Model Config ===============================================
ngpu = int(opt.ngpu)
global best_prec1, epoch_index
best_prec1 = 0
epoch_index = 0

model = DN4Net.define_DN4Net(which_model=opt.basemodel, num_classes=opt.way_num, neighbor_k=opt.neighbor_k, norm='batch', 
	init_type='normal', use_gpu=opt.cuda)

# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.Adam(model.parameters(), lr=opt.lr, betas=(opt.beta1, 0.9))


# optionally resume from a checkpoint
if opt.resume:
	if os.path.isfile(opt.resume):
		print("=> loading checkpoint '{}'".format(opt.resume))
		checkpoint = torch.load(opt.resume)
		epoch_index = checkpoint['epoch_index']
		best_prec1 = checkpoint['best_prec1']
		model.load_state_dict(checkpoint['state_dict'])
		optimizer.load_state_dict(checkpoint['optimizer'])
		print("=> loaded checkpoint '{}' (epoch {})".format(opt.resume, checkpoint['epoch_index']))
		print("=> loaded checkpoint '{}' (epoch {})".format(opt.resume, checkpoint['epoch_index']), file=F_txt)
	else:
		print("=> no checkpoint found at '{}'".format(opt.resume))
		print("=> no checkpoint found at '{}'".format(opt.resume), file=F_txt)

if opt.ngpu > 1:
	model = nn.DataParallel(model, range(opt.ngpu))

# print the architecture of the network
print(model) 
print(model, file=F_txt) 




# ============================================ Testing phase ========================================
print('\n............Start testing............')
start_time = time.time()
repeat_num = 5       # repeat running the testing code several times


total_accuracy = 0.0
total_h = np.zeros(repeat_num)
total_accuracy_vector = []
for r in range(repeat_num):
	print('===================================== Round %d =====================================' %r)
	print('===================================== Round %d =====================================' %r, file=F_txt)

	# ======================================= Folder of Datasets =======================================
	
	# image transform & normalization
	ImgTransform = transforms.Compose([
			transforms.Resize((opt.imageSize, opt.imageSize)),
			transforms.ToTensor(),
			transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
			])

	testset = Imagefolder_csv(
		data_dir=opt.dataset_dir, mode=opt.mode, image_size=opt.imageSize, transform=ImgTransform,
		episode_num=opt.episode_test_num, way_num=opt.way_num, shot_num=opt.shot_num, query_num=opt.query_num
	)
	print('Testset: %d-------------%d' %(len(testset), r), file=F_txt)



	# ========================================== Load Datasets =========================================
	test_loader = torch.utils.data.DataLoader(
		testset, batch_size=opt.testepisodeSize, shuffle=True, 
		num_workers=int(opt.workers), drop_last=True, pin_memory=True
		) 


	# =========================================== Evaluation ==========================================
	prec1, accuracies = validate(test_loader, model, criterion, epoch_index, F_txt)


	test_accuracy, h = mean_confidence_interval(accuracies)
	print("Test accuracy", test_accuracy, "h", h[0])
	print("Test accuracy", test_accuracy, "h", h[0], file=F_txt)
	total_accuracy += test_accuracy
	total_accuracy_vector.extend(accuracies)
	total_h[r] = h


aver_accuracy, _ = mean_confidence_interval(total_accuracy_vector)
print("Aver_accuracy:", aver_accuracy, "Aver_h", total_h.mean())
print("Aver_accuracy:", aver_accuracy, "Aver_h", total_h.mean(), file=F_txt)
F_txt.close()

# ============================================== Testing End ==========================================


================================================
FILE: DN4_2019_Version/DN4_Test_5way5shot.py
================================================
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
Author: Wenbin Li (liwenbin.nju@gmail.com)
Date: April 9, 2019
Version: V0

Citation: 
@inproceedings{li2019DN4,
  title={Revisiting Local Descriptor based Image-to-Class Measure for Few-shot Learning},
  author={Li, Wenbin and Wang, Lei and Xu, Jinglin and Huo, Jing and Gao Yang and Luo, Jiebo},
  booktitle={CVPR},
  year={2019}
}
"""


from __future__ import print_function
import argparse
import os
import random
import shutil
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import grad
import time
from torch import autograd
from PIL import ImageFile
import scipy as sp
import scipy.stats
import pdb


# ============================ Data & Networks =====================================
from dataset.datasets_csv import Imagefolder_csv
import models.network as DN4Net
# ==================================================================================


ImageFile.LOAD_TRUNCATED_IMAGES = True
os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES']='0'


# Load the pre-trained model
model_trained = './results/DN4_miniImageNet_Conv64F_5Way_5Shot_K3/model_best.pth.tar'


parser = argparse.ArgumentParser()
parser.add_argument('--dataset_dir', default='/Datasets/miniImageNet--ravi', help='/miniImageNet')
parser.add_argument('--data_name', default='miniImageNet', help='miniImageNet|StanfordDog|StanfordCar|CubBird')
parser.add_argument('--mode', default='test', help='train|val|test')
parser.add_argument('--outf', default='./results/DN4')
parser.add_argument('--resume', default=model_trained, type=str, help='path to the lastest checkpoint (default: none)')
parser.add_argument('--basemodel', default='Conv64F', help='Conv64F|ResNet256F')
parser.add_argument('--workers', type=int, default=8)
#  Few-shot parameters  #
parser.add_argument('--imageSize', type=int, default=84)
parser.add_argument('--episodeSize', type=int, default=1, help='the mini-batch size of training')
parser.add_argument('--testepisodeSize', type=int, default=1, help='one episode is taken as a mini-batch')
parser.add_argument('--epochs', type=int, default=30, help='the total number of training epoch')
parser.add_argument('--episode_train_num', type=int, default=10000, help='the total number of training episodes')
parser.add_argument('--episode_val_num', type=int, default=1000, help='the total number of evaluation episodes')
parser.add_argument('--episode_test_num', type=int, default=600, help='the total number of testing episodes')
parser.add_argument('--way_num', type=int, default=5, help='the number of way/class')
parser.add_argument('--shot_num', type=int, default=5, help='the number of shot')
parser.add_argument('--query_num', type=int, default=15, help='the number of queries')
parser.add_argument('--neighbor_k', type=int, default=3, help='the number of k-nearest neighbors')
parser.add_argument('--lr', type=float, default=0.005, help='learning rate, default=0.005')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', default=True, help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='the number of gpus')
parser.add_argument('--nc', type=int, default=3, help='input image channels')
parser.add_argument('--clamp_lower', type=float, default=-0.01)
parser.add_argument('--clamp_upper', type=float, default=0.01)
parser.add_argument('--print_freq', '-p', default=100, type=int, metavar='N', help='print frequency (default: 100)')
opt = parser.parse_args()
opt.cuda = True
cudnn.benchmark = True



# ======================================= Define functions =============================================
def validate(val_loader, model, criterion, epoch_index, F_txt):
	batch_time = AverageMeter()
	losses = AverageMeter()
	top1 = AverageMeter()
  

	# switch to evaluate mode
	model.eval()
	accuracies = []


	end = time.time()
	for episode_index, (query_images, query_targets, support_images, support_targets) in enumerate(val_loader):

		# Convert query and support images
		query_images = torch.cat(query_images, 0)
		input_var1 = query_images.cuda()


		input_var2 = []
		for i in range(len(support_images)):
			temp_support = support_images[i]
			temp_support = torch.cat(temp_support, 0)
			temp_support = temp_support.cuda()
			input_var2.append(temp_support)


		# Deal with the target
		target = torch.cat(query_targets, 0)
		target = target.cuda()

		# Calculate the output 
		output = model(input_var1, input_var2)
		loss = criterion(output, target)


		# measure accuracy and record loss
		prec1, _ = accuracy(output, target, topk=(1, 3))
		losses.update(loss.item(), query_images.size(0))
		top1.update(prec1[0], query_images.size(0))
		accuracies.append(prec1)


		# measure elapsed time
		batch_time.update(time.time() - end)
		end = time.time()


		#============== print the intermediate results ==============#
		if episode_index % opt.print_freq == 0 and episode_index != 0:

			print('Test-({0}): [{1}/{2}]\t'
				'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
				'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
				'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
					epoch_index, episode_index, len(val_loader), batch_time=batch_time, loss=losses, top1=top1))

			print('Test-({0}): [{1}/{2}]\t'
				'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
				'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
				'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
					epoch_index, episode_index, len(val_loader), batch_time=batch_time, loss=losses, top1=top1), file=F_txt)

		
	print(' * Prec@1 {top1.avg:.3f} Best_prec1 {best_prec1:.3f}'.format(top1=top1, best_prec1=best_prec1))
	print(' * Prec@1 {top1.avg:.3f} Best_prec1 {best_prec1:.3f}'.format(top1=top1, best_prec1=best_prec1), file=F_txt)

	return top1.avg, accuracies


class AverageMeter(object):
	"""Computes and stores the average and current value"""
	def __init__(self):
		self.reset()

	def reset(self):
		self.val = 0
		self.avg = 0
		self.sum = 0
		self.count = 0

	def update(self, val, n=1):
		self.val = val
		self.sum += val * n
		self.count += n
		self.avg = self.sum / self.count



def accuracy(output, target, topk=(1,)):
	"""Computes the precision@k for the specified values of k"""
	with torch.no_grad():
		maxk = max(topk)
		batch_size = target.size(0)

		_, pred = output.topk(maxk, 1, True, True)
		pred = pred.t()
		correct = pred.eq(target.view(1, -1).expand_as(pred))

		res = []
		for k in topk:
			correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
			res.append(correct_k.mul_(100.0 / batch_size))
		return res


def mean_confidence_interval(data, confidence=0.95):
	a = [1.0*np.array(data[i].cpu()) for i in range(len(data))]
	n = len(a)
	m, se = np.mean(a), scipy.stats.sem(a)
	h = se * sp.stats.t._ppf((1+confidence)/2., n-1)
	return m,h



# ======================================== Settings of path ============================================
# saving path
opt.outf = opt.outf+'_'+opt.data_name+'_'+str(opt.basemodel)+'_'+str(opt.way_num)+'Way_'+str(opt.shot_num)+'Shot'+'_K'+str(opt.neighbor_k)

if not os.path.exists(opt.outf):
	os.makedirs(opt.outf)

if torch.cuda.is_available() and not opt.cuda:
	print("WARNING: You have a CUDA device, so you should probably run with --cuda")

# save the opt and results to a txt file
txt_save_path = os.path.join(opt.outf, 'Test_resutls.txt')
F_txt = open(txt_save_path, 'a+')
print(opt)
print(opt, file=F_txt)



# ========================================== Model Config ===============================================
ngpu = int(opt.ngpu)
global best_prec1, epoch_index
best_prec1 = 0
epoch_index = 0

model = DN4Net.define_DN4Net(which_model=opt.basemodel, num_classes=opt.way_num, neighbor_k=opt.neighbor_k, norm='batch', 
	init_type='normal', use_gpu=opt.cuda)

# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.Adam(model.parameters(), lr=opt.lr, betas=(opt.beta1, 0.9))


# optionally resume from a checkpoint
if opt.resume:
	if os.path.isfile(opt.resume):
		print("=> loading checkpoint '{}'".format(opt.resume))
		checkpoint = torch.load(opt.resume)
		epoch_index = checkpoint['epoch_index']
		best_prec1 = checkpoint['best_prec1']
		model.load_state_dict(checkpoint['state_dict'])
		optimizer.load_state_dict(checkpoint['optimizer'])
		print("=> loaded checkpoint '{}' (epoch {})".format(opt.resume, checkpoint['epoch_index']))
		print("=> loaded checkpoint '{}' (epoch {})".format(opt.resume, checkpoint['epoch_index']), file=F_txt)
	else:
		print("=> no checkpoint found at '{}'".format(opt.resume))
		print("=> no checkpoint found at '{}'".format(opt.resume), file=F_txt)

if opt.ngpu > 1:
	model = nn.DataParallel(model, range(opt.ngpu))

# print the architecture of the network
print(model) 
print(model, file=F_txt) 




# ============================================ Testing phase ========================================
print('\n............Start testing............')
start_time = time.time()
repeat_num = 5       # repeat running the testing code several times


total_accuracy = 0.0
total_h = np.zeros(repeat_num)
total_accuracy_vector = []
for r in range(repeat_num):
	print('===================================== Round %d =====================================' %r)
	print('===================================== Round %d =====================================' %r, file=F_txt)

	# ======================================= Folder of Datasets =======================================
	
	# image transform & normalization
	ImgTransform = transforms.Compose([
			transforms.Resize((opt.imageSize, opt.imageSize)),
			transforms.ToTensor(),
			transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
			])

	testset = Imagefolder_csv(
		data_dir=opt.dataset_dir, mode=opt.mode, image_size=opt.imageSize, transform=ImgTransform,
		episode_num=opt.episode_test_num, way_num=opt.way_num, shot_num=opt.shot_num, query_num=opt.query_num
	)
	print('Testset: %d-------------%d' %(len(testset), r), file=F_txt)



	# ========================================== Load Datasets =========================================
	test_loader = torch.utils.data.DataLoader(
		testset, batch_size=opt.testepisodeSize, shuffle=True, 
		num_workers=int(opt.workers), drop_last=True, pin_memory=True
		) 


	# =========================================== Evaluation ==========================================
	prec1, accuracies = validate(test_loader, model, criterion, epoch_index, F_txt)


	test_accuracy, h = mean_confidence_interval(accuracies)
	print("Test accuracy", test_accuracy, "h", h[0])
	print("Test accuracy", test_accuracy, "h", h[0], file=F_txt)
	total_accuracy += test_accuracy
	total_accuracy_vector.extend(accuracies)
	total_h[r] = h


aver_accuracy, _ = mean_confidence_interval(total_accuracy_vector)
print("Aver_accuracy:", aver_accuracy, "Aver_h", total_h.mean())
print("Aver_accuracy:", aver_accuracy, "Aver_h", total_h.mean(), file=F_txt)
F_txt.close()

# ============================================== Testing End ==========================================


================================================
FILE: DN4_2019_Version/DN4_Train_5way1shot.py
================================================
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
Author: Wenbin Li (liwenbin.nju@gmail.com)
Date: April 9, 2019
Version: V0

Citation: 
@inproceedings{li2019DN4,
  title={Revisiting Local Descriptor based Image-to-Class Measure for Few-shot Learning},
  author={Li, Wenbin and Wang, Lei and Xu, Jinglin and Huo, Jing and Gao Yang and Luo, Jiebo},
  booktitle={CVPR},
  year={2019}
}
"""


from __future__ import print_function
import argparse
import os
import random
import shutil
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import grad
import time
from torch import autograd
from PIL import ImageFile
import pdb
import sys
sys.dont_write_bytecode = True


# ============================ Data & Networks =====================================
from dataset.datasets_csv import Imagefolder_csv
import models.network as DN4Net
# ==================================================================================


ImageFile.LOAD_TRUNCATED_IMAGES = True
os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES']='0'


parser = argparse.ArgumentParser()
parser.add_argument('--dataset_dir', default='/Datasets/miniImageNet--ravi', help='/miniImageNet')
parser.add_argument('--data_name', default='miniImageNet', help='miniImageNet|StanfordDog|StanfordCar|CubBird')
parser.add_argument('--mode', default='train', help='train|val|test')
parser.add_argument('--outf', default='./results/DN4')
parser.add_argument('--resume', default='', type=str, help='path to the lastest checkpoint (default: none)')
parser.add_argument('--basemodel', default='Conv64F', help='Conv64F|ResNet256F')
parser.add_argument('--workers', type=int, default=8)
#  Few-shot parameters  #
parser.add_argument('--imageSize', type=int, default=84)
parser.add_argument('--episodeSize', type=int, default=1, help='the mini-batch size of training')
parser.add_argument('--testepisodeSize', type=int, default=1, help='one episode is taken as a mini-batch')
parser.add_argument('--epochs', type=int, default=30, help='the total number of training epoch')
parser.add_argument('--episode_train_num', type=int, default=10000, help='the total number of training episodes')
parser.add_argument('--episode_val_num', type=int, default=1000, help='the total number of evaluation episodes')
parser.add_argument('--episode_test_num', type=int, default=1000, help='the total number of testing episodes')
parser.add_argument('--way_num', type=int, default=5, help='the number of way/class')
parser.add_argument('--shot_num', type=int, default=1, help='the number of shot')
parser.add_argument('--query_num', type=int, default=15, help='the number of queries')
parser.add_argument('--neighbor_k', type=int, default=3, help='the number of k-nearest neighbors')
parser.add_argument('--lr', type=float, default=0.005, help='learning rate, default=0.005')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', default=True, help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='the number of gpus')
parser.add_argument('--nc', type=int, default=3, help='input image channels')
parser.add_argument('--clamp_lower', type=float, default=-0.01)
parser.add_argument('--clamp_upper', type=float, default=0.01)
parser.add_argument('--print_freq', '-p', default=100, type=int, metavar='N', help='print frequency (default: 100)')
opt = parser.parse_args()
opt.cuda = True
cudnn.benchmark = True



# ======================================= Define functions =============================================

def adjust_learning_rate(optimizer, epoch_num):
	"""Sets the learning rate to the initial LR decayed by 0.05 every 10 epochs"""
	lr = opt.lr * (0.05 ** (epoch_num // 10))
	for param_group in optimizer.param_groups:
		param_group['lr'] = lr


def train(train_loader, model, criterion, optimizer, epoch_index, F_txt):
	batch_time = AverageMeter()
	data_time = AverageMeter()
	losses = AverageMeter()
	top1 = AverageMeter()


	end = time.time()
	for episode_index, (query_images, query_targets, support_images, support_targets) in enumerate(train_loader):

		# Measure data loading time
		data_time.update(time.time() - end)

		# Convert query and support images
		query_images = torch.cat(query_images, 0)
		input_var1 = query_images.cuda()

		input_var2 = []
		for i in range(len(support_images)):
			temp_support = support_images[i]
			temp_support = torch.cat(temp_support, 0)
			temp_support = temp_support.cuda()
			input_var2.append(temp_support)

		# Deal with the targets
		target = torch.cat(query_targets, 0)
		target = target.cuda()

		# Calculate the output
		output = model(input_var1, input_var2)
		loss = criterion(output, target)

		# Compute gradients and do SGD step
		optimizer.zero_grad()
		loss.backward()
		optimizer.step()

	  
		# Measure accuracy and record loss
		prec1, _ = accuracy(output, target, topk=(1,3))
		losses.update(loss.item(), query_images.size(0))
		top1.update(prec1[0], query_images.size(0))


		# Measure elapsed time
		batch_time.update(time.time() - end)
		end = time.time()


		#============== print the intermediate results ==============#
		if episode_index % opt.print_freq == 0 and episode_index != 0:

			print('Eposide-({0}): [{1}/{2}]\t'
				'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
				'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
				'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
				'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
					epoch_index, episode_index, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1))

			print('Eposide-({0}): [{1}/{2}]\t'
				'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
				'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
				'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
				'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
					epoch_index, episode_index, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1), file=F_txt)



def validate(val_loader, model, criterion, epoch_index, best_prec1, F_txt):
	batch_time = AverageMeter()
	losses = AverageMeter()
	top1 = AverageMeter()
  

	# switch to evaluate mode
	model.eval()
	accuracies = []


	end = time.time()
	for episode_index, (query_images, query_targets, support_images, support_targets) in enumerate(val_loader):

		# Convert query and support images
		query_images = torch.cat(query_images, 0)
		input_var1 = query_images.cuda()


		input_var2 = []
		for i in range(len(support_images)):
			temp_support = support_images[i]
			temp_support = torch.cat(temp_support, 0)
			temp_support = temp_support.cuda()
			input_var2.append(temp_support)


		# Deal with the targets
		target = torch.cat(query_targets, 0)
		target = target.cuda()

		# Calculate the output 
		output = model(input_var1, input_var2)
		loss = criterion(output, target)


		# measure accuracy and record loss
		prec1, _ = accuracy(output, target, topk=(1, 3))
		losses.update(loss.item(), query_images.size(0))
		top1.update(prec1[0], query_images.size(0))
		accuracies.append(prec1)


		# measure elapsed time
		batch_time.update(time.time() - end)
		end = time.time()


		#============== print the intermediate results ==============#
		if episode_index % opt.print_freq == 0 and episode_index != 0:

			print('Test-({0}): [{1}/{2}]\t'
				'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
				'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
				'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
					epoch_index, episode_index, len(val_loader), batch_time=batch_time, loss=losses, top1=top1))

			print('Test-({0}): [{1}/{2}]\t'
				'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
				'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
				'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
					epoch_index, episode_index, len(val_loader), batch_time=batch_time, loss=losses, top1=top1), file=F_txt)

		
	print(' * Prec@1 {top1.avg:.3f} Best_prec1 {best_prec1:.3f}'.format(top1=top1, best_prec1=best_prec1))
	print(' * Prec@1 {top1.avg:.3f} Best_prec1 {best_prec1:.3f}'.format(top1=top1, best_prec1=best_prec1), file=F_txt)

	return top1.avg, accuracies



def save_checkpoint(state, filename='checkpoint.pth.tar'):
	torch.save(state, filename)


class AverageMeter(object):
	"""Computes and stores the average and current value"""
	def __init__(self):
		self.reset()

	def reset(self):
		self.val = 0
		self.avg = 0
		self.sum = 0
		self.count = 0

	def update(self, val, n=1):
		self.val = val
		self.sum += val * n
		self.count += n
		self.avg = self.sum / self.count


def accuracy(output, target, topk=(1,)):
	"""Computes the precision@k for the specified values of k"""
	with torch.no_grad():
		maxk = max(topk)
		batch_size = target.size(0)

		_, pred = output.topk(maxk, 1, True, True)
		pred = pred.t()
		correct = pred.eq(target.view(1, -1).expand_as(pred))

		res = []
		for k in topk:
			correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
			res.append(correct_k.mul_(100.0 / batch_size))
		return res



# ======================================== Settings of path ============================================
# saving path
opt.outf = opt.outf+'_'+opt.data_name+'_'+str(opt.basemodel)+'_'+str(opt.way_num)+'Way_'+str(opt.shot_num)+'Shot'+'_K'+str(opt.neighbor_k)

if not os.path.exists(opt.outf):
	os.makedirs(opt.outf)

if torch.cuda.is_available() and not opt.cuda:
	print("WARNING: You have a CUDA device, so you should probably run with --cuda")

# save the opt and results to a txt file
txt_save_path = os.path.join(opt.outf, 'opt_resutls.txt')
F_txt = open(txt_save_path, 'a+')
print(opt)
print(opt, file=F_txt)



# ========================================== Model Config ===============================================
ngpu = int(opt.ngpu)
global best_prec1, epoch_index
best_prec1 = 0
epoch_index = 0

model = DN4Net.define_DN4Net(which_model=opt.basemodel, num_classes=opt.way_num, neighbor_k=opt.neighbor_k, norm='batch', 
	init_type='normal', use_gpu=opt.cuda)

# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.Adam(model.parameters(), lr=opt.lr, betas=(opt.beta1, 0.9))


# optionally resume from a checkpoint
if opt.resume:
	if os.path.isfile(opt.resume):
		print("=> loading checkpoint '{}'".format(opt.resume))
		checkpoint = torch.load(opt.resume)
		epoch_index = checkpoint['epoch_index']
		best_prec1 = checkpoint['best_prec1']
		model.load_state_dict(checkpoint['state_dict'])
		optimizer.load_state_dict(checkpoint['optimizer'])
		print("=> loaded checkpoint '{}' (epoch {})".format(opt.resume, checkpoint['epoch_index']))
		print("=> loaded checkpoint '{}' (epoch {})".format(opt.resume, checkpoint['epoch_index']), file=F_txt)
	else:
		print("=> no checkpoint found at '{}'".format(opt.resume))
		print("=> no checkpoint found at '{}'".format(opt.resume), file=F_txt)

if opt.ngpu > 1:
	model = nn.DataParallel(model, range(opt.ngpu))

# print the architecture of the network
print(model) 
print(model, file=F_txt) 




# ======================================== Training phase ===============================================
print('\n............Start training............\n')
start_time = time.time()


for epoch_item in range(opt.epochs):
	print('===================================== Epoch %d =====================================' %epoch_item)
	print('===================================== Epoch %d =====================================' %epoch_item, file=F_txt)
	adjust_learning_rate(optimizer, epoch_item) 
	

	# ======================================= Folder of Datasets =======================================
	# image transform & normalization
	ImgTransform = transforms.Compose([
			transforms.Resize((opt.imageSize, opt.imageSize)),
			transforms.ToTensor(),
			transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
			])

	trainset = Imagefolder_csv(
		data_dir=opt.dataset_dir, mode=opt.mode, image_size=opt.imageSize, transform=ImgTransform,
		episode_num=opt.episode_train_num, way_num=opt.way_num, shot_num=opt.shot_num, query_num=opt.query_num
	)
	valset = Imagefolder_csv(
		data_dir=opt.dataset_dir, mode='val', image_size=opt.imageSize, transform=ImgTransform,
		episode_num=opt.episode_val_num, way_num=opt.way_num, shot_num=opt.shot_num, query_num=opt.query_num
	)
	testset = Imagefolder_csv(
		data_dir=opt.dataset_dir, mode='test', image_size=opt.imageSize, transform=ImgTransform,
		episode_num=opt.episode_test_num, way_num=opt.way_num, shot_num=opt.shot_num, query_num=opt.query_num
	)

	print('Trainset: %d' %len(trainset))
	print('Valset: %d' %len(valset))
	print('Testset: %d' %len(testset))
	print('Trainset: %d' %len(trainset), file=F_txt)
	print('Valset: %d' %len(valset), file=F_txt)
	print('Testset: %d' %len(testset), file=F_txt)



	# ========================================== Load Datasets =========================================
	train_loader = torch.utils.data.DataLoader(
		trainset, batch_size=opt.episodeSize, shuffle=True, 
		num_workers=int(opt.workers), drop_last=True, pin_memory=True
		)
	val_loader = torch.utils.data.DataLoader(
		valset, batch_size=opt.testepisodeSize, shuffle=True, 
		num_workers=int(opt.workers), drop_last=True, pin_memory=True
		) 
	test_loader = torch.utils.data.DataLoader(
		testset, batch_size=opt.testepisodeSize, shuffle=True, 
		num_workers=int(opt.workers), drop_last=True, pin_memory=True
		) 


	# ============================================ Training ===========================================
	# Fix the parameters of Batch Normalization after 10000 episodes (1 epoch)
	if epoch_item < 1:
		model.train()
	else:
		model.eval()

	# Train for 10000 episodes in each epoch
	train(train_loader, model, criterion, optimizer, epoch_item, F_txt)


	# =========================================== Evaluation ==========================================
	print('============ Validation on the val set ============')
	print('============ validation on the val set ============', file=F_txt)
	prec1, _ = validate(val_loader, model, criterion, epoch_item, best_prec1, F_txt)


	# record the best prec@1 and save checkpoint
	is_best = prec1 > best_prec1
	best_prec1 = max(prec1, best_prec1)

	# save the checkpoint
	if is_best:
		save_checkpoint(
			{
				'epoch_index': epoch_item,
				'arch': opt.basemodel,
				'state_dict': model.state_dict(),
				'best_prec1': best_prec1,
				'optimizer' : optimizer.state_dict(),
			}, os.path.join(opt.outf, 'model_best.pth.tar'))


	if epoch_item % 10 == 0:
		filename = os.path.join(opt.outf, 'epoch_%d.pth.tar' %epoch_item)
		save_checkpoint(
		{
			'epoch_index': epoch_item,
			'arch': opt.basemodel,
			'state_dict': model.state_dict(),
			'best_prec1': best_prec1,
			'optimizer' : optimizer.state_dict(),
		}, filename)

	
	# Testing Prase
	print('============ Testing on the test set ============')
	print('============ Testing on the test set ============', file=F_txt)
	prec1, _ = validate(test_loader, model, criterion, epoch_item, best_prec1, F_txt)


F_txt.close()
print('............Training is end............')

# ============================================ Training End ==============================================================


================================================
FILE: DN4_2019_Version/DN4_Train_5way1shot_DA.py
================================================
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
Author: Wenbin Li (liwenbin.nju@gmail.com)
Date: April 9, 2019
Version: V0

Citation: 
@inproceedings{li2019DN4,
  title={Revisiting Local Descriptor based Image-to-Class Measure for Few-shot Learning},
  author={Li, Wenbin and Wang, Lei and Xu, Jinglin and Huo, Jing and Gao Yang and Luo, Jiebo},
  booktitle={CVPR},
  year={2019}
}
"""


from __future__ import print_function
import argparse
import os
import random
import shutil
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import grad
import time
from torch import autograd
from PIL import ImageFile
import pdb
import sys
sys.dont_write_bytecode = True


# ============================ Data & Networks =====================================
from dataset.datasets_csv import Imagefolder_csv
import models.network as DN4Net
# ==================================================================================


ImageFile.LOAD_TRUNCATED_IMAGES = True
os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES']='0'


parser = argparse.ArgumentParser()
parser.add_argument('--dataset_dir', default='/Datasets/miniImageNet--ravi', help='/miniImageNet')
parser.add_argument('--data_name', default='miniImageNet', help='miniImageNet|StanfordDog|StanfordCar|CubBird')
parser.add_argument('--mode', default='train', help='train|val|test')
parser.add_argument('--outf', default='./results/DN4_DA')
parser.add_argument('--resume', default='', type=str, help='path to the lastest checkpoint (default: none)')
parser.add_argument('--basemodel', default='Conv64F', help='Conv64F|ResNet256F')
parser.add_argument('--workers', type=int, default=8)
#  Few-shot parameters  #
parser.add_argument('--imageSize', type=int, default=84)
parser.add_argument('--episodeSize', type=int, default=1, help='the mini-batch size of training')
parser.add_argument('--testepisodeSize', type=int, default=1, help='one episode is taken as a mini-batch')
parser.add_argument('--epochs', type=int, default=30, help='the total number of training epoch')
parser.add_argument('--episode_train_num', type=int, default=10000, help='the total number of training episodes')
parser.add_argument('--episode_val_num', type=int, default=1000, help='the total number of evaluation episodes')
parser.add_argument('--episode_test_num', type=int, default=1000, help='the total number of testing episodes')
parser.add_argument('--way_num', type=int, default=5, help='the number of way/class')
parser.add_argument('--shot_num', type=int, default=1, help='the number of shot')
parser.add_argument('--query_num', type=int, default=15, help='the number of queries')
parser.add_argument('--neighbor_k', type=int, default=3, help='the number of k-nearest neighbors')
parser.add_argument('--lr', type=float, default=0.005, help='learning rate, default=0.005')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', default=True, help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='the number of gpus')
parser.add_argument('--nc', type=int, default=3, help='input image channels')
parser.add_argument('--clamp_lower', type=float, default=-0.01)
parser.add_argument('--clamp_upper', type=float, default=0.01)
parser.add_argument('--print_freq', '-p', default=100, type=int, metavar='N', help='print frequency (default: 100)')
opt = parser.parse_args()
opt.cuda = True
cudnn.benchmark = True



# ======================================= Define functions =============================================

def adjust_learning_rate(optimizer, epoch_num):
	"""Sets the learning rate to the initial LR decayed by 0.05 every 10 epochs"""
	lr = opt.lr * (0.05 ** (epoch_num // 10))
	for param_group in optimizer.param_groups:
		param_group['lr'] = lr


def train(train_loader, model, criterion, optimizer, epoch_index, F_txt):
	batch_time = AverageMeter()
	data_time = AverageMeter()
	losses = AverageMeter()
	top1 = AverageMeter()


	end = time.time()
	for episode_index, (query_images, query_targets, support_images, support_targets) in enumerate(train_loader):

		# Measure data loading time
		data_time.update(time.time() - end)

		# Convert query and support images
		query_images = torch.cat(query_images, 0)
		input_var1 = query_images.cuda()

		input_var2 = []
		for i in range(len(support_images)):
			temp_support = support_images[i]
			temp_support = torch.cat(temp_support, 0)
			temp_support = temp_support.cuda()
			input_var2.append(temp_support)

		# Deal with the targets
		target = torch.cat(query_targets, 0)
		target = target.cuda()

		# Calculate the output
		output = model(input_var1, input_var2)
		loss = criterion(output, target)

		# Compute gradients and do SGD step
		optimizer.zero_grad()
		loss.backward()
		optimizer.step()

	  
		# Measure accuracy and record loss
		prec1, _ = accuracy(output, target, topk=(1,3))
		losses.update(loss.item(), query_images.size(0))
		top1.update(prec1[0], query_images.size(0))


		# Measure elapsed time
		batch_time.update(time.time() - end)
		end = time.time()


		#============== print the intermediate results ==============#
		if episode_index % opt.print_freq == 0 and episode_index != 0:

			print('Eposide-({0}): [{1}/{2}]\t'
				'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
				'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
				'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
				'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
					epoch_index, episode_index, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1))

			print('Eposide-({0}): [{1}/{2}]\t'
				'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
				'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
				'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
				'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
					epoch_index, episode_index, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1), file=F_txt)



def validate(val_loader, model, criterion, epoch_index, best_prec1, F_txt):
	batch_time = AverageMeter()
	losses = AverageMeter()
	top1 = AverageMeter()
  

	# switch to evaluate mode
	model.eval()
	accuracies = []


	end = time.time()
	for episode_index, (query_images, query_targets, support_images, support_targets) in enumerate(val_loader):

		# Convert query and support images
		query_images = torch.cat(query_images, 0)
		input_var1 = query_images.cuda()


		input_var2 = []
		for i in range(len(support_images)):
			temp_support = support_images[i]
			temp_support = torch.cat(temp_support, 0)
			temp_support = temp_support.cuda()
			input_var2.append(temp_support)


		# Deal with the targets
		target = torch.cat(query_targets, 0)
		target = target.cuda()

		# Calculate the output 
		output = model(input_var1, input_var2)
		loss = criterion(output, target)


		# measure accuracy and record loss
		prec1, _ = accuracy(output, target, topk=(1, 3))
		losses.update(loss.item(), query_images.size(0))
		top1.update(prec1[0], query_images.size(0))
		accuracies.append(prec1)


		# measure elapsed time
		batch_time.update(time.time() - end)
		end = time.time()


		#============== print the intermediate results ==============#
		if episode_index % opt.print_freq == 0 and episode_index != 0:

			print('Test-({0}): [{1}/{2}]\t'
				'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
				'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
				'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
					epoch_index, episode_index, len(val_loader), batch_time=batch_time, loss=losses, top1=top1))

			print('Test-({0}): [{1}/{2}]\t'
				'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
				'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
				'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
					epoch_index, episode_index, len(val_loader), batch_time=batch_time, loss=losses, top1=top1), file=F_txt)

		
	print(' * Prec@1 {top1.avg:.3f} Best_prec1 {best_prec1:.3f}'.format(top1=top1, best_prec1=best_prec1))
	print(' * Prec@1 {top1.avg:.3f} Best_prec1 {best_prec1:.3f}'.format(top1=top1, best_prec1=best_prec1), file=F_txt)

	return top1.avg, accuracies



def save_checkpoint(state, filename='checkpoint.pth.tar'):
	torch.save(state, filename)


class AverageMeter(object):
	"""Computes and stores the average and current value"""
	def __init__(self):
		self.reset()

	def reset(self):
		self.val = 0
		self.avg = 0
		self.sum = 0
		self.count = 0

	def update(self, val, n=1):
		self.val = val
		self.sum += val * n
		self.count += n
		self.avg = self.sum / self.count


def accuracy(output, target, topk=(1,)):
	"""Computes the precision@k for the specified values of k"""
	with torch.no_grad():
		maxk = max(topk)
		batch_size = target.size(0)

		_, pred = output.topk(maxk, 1, True, True)
		pred = pred.t()
		correct = pred.eq(target.view(1, -1).expand_as(pred))

		res = []
		for k in topk:
			correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
			res.append(correct_k.mul_(100.0 / batch_size))
		return res



# ======================================== Settings of path ============================================
# saving path
opt.outf = opt.outf+'_'+opt.data_name+'_'+str(opt.basemodel)+'_'+str(opt.way_num)+'Way_'+str(opt.shot_num)+'Shot'+'_K'+str(opt.neighbor_k)

if not os.path.exists(opt.outf):
	os.makedirs(opt.outf)

if torch.cuda.is_available() and not opt.cuda:
	print("WARNING: You have a CUDA device, so you should probably run with --cuda")

# save the opt and results to a txt file
txt_save_path = os.path.join(opt.outf, 'opt_resutls.txt')
F_txt = open(txt_save_path, 'a+')
print(opt)
print(opt, file=F_txt)



# ========================================== Model Config ===============================================
ngpu = int(opt.ngpu)
global best_prec1, epoch_index
best_prec1 = 0
epoch_index = 0

model = DN4Net.define_DN4Net(which_model=opt.basemodel, num_classes=opt.way_num, neighbor_k=opt.neighbor_k, norm='batch', 
	init_type='normal', use_gpu=opt.cuda)

# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.Adam(model.parameters(), lr=opt.lr, betas=(opt.beta1, 0.9))


# optionally resume from a checkpoint
if opt.resume:
	if os.path.isfile(opt.resume):
		print("=> loading checkpoint '{}'".format(opt.resume))
		checkpoint = torch.load(opt.resume)
		epoch_index = checkpoint['epoch_index']
		best_prec1 = checkpoint['best_prec1']
		model.load_state_dict(checkpoint['state_dict'])
		optimizer.load_state_dict(checkpoint['optimizer'])
		print("=> loaded checkpoint '{}' (epoch {})".format(opt.resume, checkpoint['epoch_index']))
		print("=> loaded checkpoint '{}' (epoch {})".format(opt.resume, checkpoint['epoch_index']), file=F_txt)
	else:
		print("=> no checkpoint found at '{}'".format(opt.resume))
		print("=> no checkpoint found at '{}'".format(opt.resume), file=F_txt)

if opt.ngpu > 1:
	model = nn.DataParallel(model, range(opt.ngpu))

# print the architecture of the network
print(model) 
print(model, file=F_txt) 




# ======================================== Training phase ===============================================
print('\n............Start training............\n')
start_time = time.time()


for epoch_item in range(opt.epochs):
	print('===================================== Epoch %d =====================================' %epoch_item)
	print('===================================== Epoch %d =====================================' %epoch_item, file=F_txt)
	adjust_learning_rate(optimizer, epoch_item) 
	

	# ======================================= Folder of Datasets =======================================
	# image transform & normalization
	ImgTransform = transforms.Compose([
			transforms.Resize((opt.imageSize, opt.imageSize)),
			transforms.ToTensor(),
			transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
			])

	ImgTransform_DA = transforms.Compose([
		transforms.Resize((100, 100)),
		transforms.RandomCrop(84),
		transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
		transforms.RandomHorizontalFlip(),
		transforms.ToTensor(),
		transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
		])

	trainset = Imagefolder_csv(
		data_dir=opt.dataset_dir, mode=opt.mode, image_size=opt.imageSize, transform=ImgTransform_DA,
		episode_num=opt.episode_train_num, way_num=opt.way_num, shot_num=opt.shot_num, query_num=opt.query_num
	)
	valset = Imagefolder_csv(
		data_dir=opt.dataset_dir, mode='val', image_size=opt.imageSize, transform=ImgTransform,
		episode_num=opt.episode_val_num, way_num=opt.way_num, shot_num=opt.shot_num, query_num=opt.query_num
	)
	testset = Imagefolder_csv(
		data_dir=opt.dataset_dir, mode='test', image_size=opt.imageSize, transform=ImgTransform,
		episode_num=opt.episode_test_num, way_num=opt.way_num, shot_num=opt.shot_num, query_num=opt.query_num
	)

	print('Trainset: %d' %len(trainset))
	print('Valset: %d' %len(valset))
	print('Testset: %d' %len(testset))
	print('Trainset: %d' %len(trainset), file=F_txt)
	print('Valset: %d' %len(valset), file=F_txt)
	print('Testset: %d' %len(testset), file=F_txt)



	# ========================================== Load Datasets =========================================
	train_loader = torch.utils.data.DataLoader(
		trainset, batch_size=opt.episodeSize, shuffle=True, 
		num_workers=int(opt.workers), drop_last=True, pin_memory=True
		)
	val_loader = torch.utils.data.DataLoader(
		valset, batch_size=opt.testepisodeSize, shuffle=True, 
		num_workers=int(opt.workers), drop_last=True, pin_memory=True
		) 
	test_loader = torch.utils.data.DataLoader(
		testset, batch_size=opt.testepisodeSize, shuffle=True, 
		num_workers=int(opt.workers), drop_last=True, pin_memory=True
		) 


	# ============================================ Training ===========================================
	# Fix the parameters of Batch Normalization after 10000 episodes (1 epoch)
	if epoch_item < 1:
		model.train()
	else:
		model.eval()

	# Train for 10000 episodes in each epoch
	train(train_loader, model, criterion, optimizer, epoch_item, F_txt)


	# =========================================== Evaluation ==========================================
	print('============ Validation on the val set ============')
	print('============ validation on the val set ============', file=F_txt)
	prec1, _ = validate(val_loader, model, criterion, epoch_item, best_prec1, F_txt)


	# record the best prec@1 and save checkpoint
	is_best = prec1 > best_prec1
	best_prec1 = max(prec1, best_prec1)

	# save the checkpoint
	if is_best:
		save_checkpoint(
			{
				'epoch_index': epoch_item,
				'arch': opt.basemodel,
				'state_dict': model.state_dict(),
				'best_prec1': best_prec1,
				'optimizer' : optimizer.state_dict(),
			}, os.path.join(opt.outf, 'model_best.pth.tar'))


	if epoch_item % 10 == 0:
		filename = os.path.join(opt.outf, 'epoch_%d.pth.tar' %epoch_item)
		save_checkpoint(
		{
			'epoch_index': epoch_item,
			'arch': opt.basemodel,
			'state_dict': model.state_dict(),
			'best_prec1': best_prec1,
			'optimizer' : optimizer.state_dict(),
		}, filename)

	
	# Testing Prase
	print('============ Testing on the test set ============')
	print('============ Testing on the test set ============', file=F_txt)
	prec1, _ = validate(test_loader, model, criterion, epoch_item, best_prec1, F_txt)


F_txt.close()
print('............Training is end............')

# ============================================ Training End ==============================================================


================================================
FILE: DN4_2019_Version/DN4_Train_5way1shot_Resnet.py
================================================
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
Author: Wenbin Li (liwenbin.nju@gmail.com)
Date: April 9, 2019
Version: V0

Citation: 
@inproceedings{li2019DN4,
  title={Revisiting Local Descriptor based Image-to-Class Measure for Few-shot Learning},
  author={Li, Wenbin and Wang, Lei and Xu, Jinglin and Huo, Jing and Gao Yang and Luo, Jiebo},
  booktitle={CVPR},
  year={2019}
}
"""


from __future__ import print_function
import argparse
import os
import random
import shutil
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import grad
import time
from torch import autograd
from PIL import ImageFile
import pdb
import sys
sys.dont_write_bytecode = True


# ============================ Data & Networks =====================================
from dataset.datasets_csv import Imagefolder_csv
import models.network as DN4Net
# ==================================================================================


ImageFile.LOAD_TRUNCATED_IMAGES = True
os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES']='0'


parser = argparse.ArgumentParser()
parser.add_argument('--dataset_dir', default='/Datasets/miniImageNet--ravi', help='/miniImageNet')
parser.add_argument('--data_name', default='miniImageNet', help='miniImageNet|StanfordDog|StanfordCar|CubBird')
parser.add_argument('--mode', default='train', help='train|val|test')
parser.add_argument('--outf', default='./results/DN4')
parser.add_argument('--resume', default='', type=str, help='path to the lastest checkpoint (default: none)')
parser.add_argument('--basemodel', default='ResNet256F', help='Conv64F|ResNet256F')
parser.add_argument('--workers', type=int, default=8)
#  Few-shot parameters  #
parser.add_argument('--imageSize', type=int, default=84)
parser.add_argument('--episodeSize', type=int, default=1, help='the mini-batch size of training')
parser.add_argument('--testepisodeSize', type=int, default=1, help='one episode is taken as a mini-batch')
parser.add_argument('--epochs', type=int, default=30, help='the total number of training epoch')
parser.add_argument('--episode_train_num', type=int, default=10000, help='the total number of training episodes')
parser.add_argument('--episode_val_num', type=int, default=1000, help='the total number of evaluation episodes')
parser.add_argument('--episode_test_num', type=int, default=1000, help='the total number of testing episodes')
parser.add_argument('--way_num', type=int, default=5, help='the number of way/class')
parser.add_argument('--shot_num', type=int, default=1, help='the number of shot')
parser.add_argument('--query_num', type=int, default=15, help='the number of queries')
parser.add_argument('--neighbor_k', type=int, default=3, help='the number of k-nearest neighbors')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate, default=0.005')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', default=True, help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='the number of gpus')
parser.add_argument('--nc', type=int, default=3, help='input image channels')
parser.add_argument('--clamp_lower', type=float, default=-0.01)
parser.add_argument('--clamp_upper', type=float, default=0.01)
parser.add_argument('--print_freq', '-p', default=100, type=int, metavar='N', help='print frequency (default: 100)')
opt = parser.parse_args()
opt.cuda = True
cudnn.benchmark = True



# ======================================= Define functions =============================================

def adjust_learning_rate(optimizer, epoch_num):
	"""Sets the learning rate to the initial LR decayed by 0.05 every 10 epochs"""
	lr = opt.lr * (0.05 ** (epoch_num // 5))
	for param_group in optimizer.param_groups:
		param_group['lr'] = lr


def train(train_loader, model, criterion, optimizer, epoch_index, F_txt):
	batch_time = AverageMeter()
	data_time = AverageMeter()
	losses = AverageMeter()
	top1 = AverageMeter()


	end = time.time()
	for episode_index, (query_images, query_targets, support_images, support_targets) in enumerate(train_loader):

		# Measure data loading time
		data_time.update(time.time() - end)

		# Convert query and support images
		query_images = torch.cat(query_images, 0)
		input_var1 = query_images.cuda()

		input_var2 = []
		for i in range(len(support_images)):
			temp_support = support_images[i]
			temp_support = torch.cat(temp_support, 0)
			temp_support = temp_support.cuda()
			input_var2.append(temp_support)

		# Deal with the targets
		target = torch.cat(query_targets, 0)
		target = target.cuda()

		# Calculate the output
		output = model(input_var1, input_var2)
		loss = criterion(output, target)

		# Compute gradient and do SGD step
		optimizer.zero_grad()
		loss.backward()
		optimizer.step()

	  
		# Measure accuracy and record loss
		prec1, _ = accuracy(output, target, topk=(1,3))
		losses.update(loss.item(), query_images.size(0))
		top1.update(prec1[0], query_images.size(0))


		# Measure elapsed time
		batch_time.update(time.time() - end)
		end = time.time()


		#============== print the intermediate results ==============#
		if episode_index % opt.print_freq == 0 and episode_index != 0:

			print('Eposide-({0}): [{1}/{2}]\t'
				'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
				'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
				'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
				'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
					epoch_index, episode_index, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1))

			print('Eposide-({0}): [{1}/{2}]\t'
				'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
				'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
				'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
				'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
					epoch_index, episode_index, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1), file=F_txt)



def validate(val_loader, model, criterion, epoch_index, best_prec1, F_txt):
	batch_time = AverageMeter()
	losses = AverageMeter()
	top1 = AverageMeter()
  

	# switch to evaluate mode
	model.eval()
	accuracies = []


	end = time.time()
	for episode_index, (query_images, query_targets, support_images, support_targets) in enumerate(val_loader):

		# Convert query and support images
		query_images = torch.cat(query_images, 0)
		input_var1 = query_images.cuda()


		input_var2 = []
		for i in range(len(support_images)):
			temp_support = support_images[i]
			temp_support = torch.cat(temp_support, 0)
			temp_support = temp_support.cuda()
			input_var2.append(temp_support)


		# Deal with the targets
		target = torch.cat(query_targets, 0)
		target = target.cuda()

		# Calculate the output 
		output = model(input_var1, input_var2)
		loss = criterion(output, target)


		# measure accuracy and record loss
		prec1, _ = accuracy(output, target, topk=(1, 3))
		losses.update(loss.item(), query_images.size(0))
		top1.update(prec1[0], query_images.size(0))
		accuracies.append(prec1)


		# measure elapsed time
		batch_time.update(time.time() - end)
		end = time.time()


		#============== print the intermediate results ==============#
		if episode_index % opt.print_freq == 0 and episode_index != 0:

			print('Test-({0}): [{1}/{2}]\t'
				'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
				'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
				'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
					epoch_index, episode_index, len(val_loader), batch_time=batch_time, loss=losses, top1=top1))

			print('Test-({0}): [{1}/{2}]\t'
				'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
				'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
				'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
					epoch_index, episode_index, len(val_loader), batch_time=batch_time, loss=losses, top1=top1), file=F_txt)

		
	print(' * Prec@1 {top1.avg:.3f} Best_prec1 {best_prec1:.3f}'.format(top1=top1, best_prec1=best_prec1))
	print(' * Prec@1 {top1.avg:.3f} Best_prec1 {best_prec1:.3f}'.format(top1=top1, best_prec1=best_prec1), file=F_txt)

	return top1.avg, accuracies



def save_checkpoint(state, filename='checkpoint.pth.tar'):
	torch.save(state, filename)


class AverageMeter(object):
	"""Computes and stores the average and current value"""
	def __init__(self):
		self.reset()

	def reset(self):
		self.val = 0
		self.avg = 0
		self.sum = 0
		self.count = 0

	def update(self, val, n=1):
		self.val = val
		self.sum += val * n
		self.count += n
		self.avg = self.sum / self.count


def accuracy(output, target, topk=(1,)):
	"""Computes the precision@k for the specified values of k"""
	with torch.no_grad():
		maxk = max(topk)
		batch_size = target.size(0)

		_, pred = output.topk(maxk, 1, True, True)
		pred = pred.t()
		correct = pred.eq(target.view(1, -1).expand_as(pred))

		res = []
		for k in topk:
			correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
			res.append(correct_k.mul_(100.0 / batch_size))
		return res



# ======================================== Settings of path ============================================
# saving path
opt.outf = opt.outf+'_'+opt.data_name+'_'+str(opt.basemodel)+'_'+str(opt.way_num)+'Way_'+str(opt.shot_num)+'Shot'+'_K'+str(opt.neighbor_k)

if not os.path.exists(opt.outf):
	os.makedirs(opt.outf)

if torch.cuda.is_available() and not opt.cuda:
	print("WARNING: You have a CUDA device, so you should probably run with --cuda")

# save the opt and results to a txt file
txt_save_path = os.path.join(opt.outf, 'opt_resutls.txt')
F_txt = open(txt_save_path, 'a+')
print(opt)
print(opt, file=F_txt)



# ========================================== Model Config ===============================================
ngpu = int(opt.ngpu)
global best_prec1, epoch_index
best_prec1 = 0
epoch_index = 0

model = DN4Net.define_DN4Net(which_model=opt.basemodel, num_classes=opt.way_num, neighbor_k=opt.neighbor_k, norm='batch', 
	init_type='normal', use_gpu=opt.cuda)

# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.Adam(model.parameters(), lr=opt.lr, betas=(opt.beta1, 0.9))


# optionally resume from a checkpoint
if opt.resume:
	if os.path.isfile(opt.resume):
		print("=> loading checkpoint '{}'".format(opt.resume))
		checkpoint = torch.load(opt.resume)
		epoch_index = checkpoint['epoch_index']
		best_prec1 = checkpoint['best_prec1']
		model.load_state_dict(checkpoint['state_dict'])
		optimizer.load_state_dict(checkpoint['optimizer'])
		print("=> loaded checkpoint '{}' (epoch {})".format(opt.resume, checkpoint['epoch_index']))
		print("=> loaded checkpoint '{}' (epoch {})".format(opt.resume, checkpoint['epoch_index']), file=F_txt)
	else:
		print("=> no checkpoint found at '{}'".format(opt.resume))
		print("=> no checkpoint found at '{}'".format(opt.resume), file=F_txt)

if opt.ngpu > 1:
	model = nn.DataParallel(model, range(opt.ngpu))

# print the architecture of the network
print(model) 
print(model, file=F_txt) 




# ======================================== Training phase ===============================================
print('\n............Start training............\n')
start_time = time.time()


for epoch_item in range(opt.epochs):
	print('===================================== Epoch %d =====================================' %epoch_item)
	print('===================================== Epoch %d =====================================' %epoch_item, file=F_txt)
	adjust_learning_rate(optimizer, epoch_item) 
	

	# ======================================= Folder of Datasets =======================================
	# image transform & normalization
	ImgTransform = transforms.Compose([
			transforms.Resize((opt.imageSize, opt.imageSize)),
			transforms.ToTensor(),
			transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
			])

	trainset = Imagefolder_csv(
		data_dir=opt.dataset_dir, mode=opt.mode, image_size=opt.imageSize, transform=ImgTransform,
		episode_num=opt.episode_train_num, way_num=opt.way_num, shot_num=opt.shot_num, query_num=opt.query_num
	)
	valset = Imagefolder_csv(
		data_dir=opt.dataset_dir, mode='val', image_size=opt.imageSize, transform=ImgTransform,
		episode_num=opt.episode_val_num, way_num=opt.way_num, shot_num=opt.shot_num, query_num=opt.query_num
	)
	testset = Imagefolder_csv(
		data_dir=opt.dataset_dir, mode='test', image_size=opt.imageSize, transform=ImgTransform,
		episode_num=opt.episode_test_num, way_num=opt.way_num, shot_num=opt.shot_num, query_num=opt.query_num
	)

	print('Trainset: %d' %len(trainset))
	print('Valset: %d' %len(valset))
	print('Testset: %d' %len(testset))
	print('Trainset: %d' %len(trainset), file=F_txt)
	print('Valset: %d' %len(valset), file=F_txt)
	print('Testset: %d' %len(testset), file=F_txt)



	# ========================================== Load Datasets =========================================
	train_loader = torch.utils.data.DataLoader(
		trainset, batch_size=opt.episodeSize, shuffle=True, 
		num_workers=int(opt.workers), drop_last=True, pin_memory=True
		)
	val_loader = torch.utils.data.DataLoader(
		valset, batch_size=opt.testepisodeSize, shuffle=True, 
		num_workers=int(opt.workers), drop_last=True, pin_memory=True
		) 
	test_loader = torch.utils.data.DataLoader(
		testset, batch_size=opt.testepisodeSize, shuffle=True, 
		num_workers=int(opt.workers), drop_last=True, pin_memory=True
		) 


	# ============================================ Training ===========================================
	# Fix the parameters of Batch Normalization after 10000 episodes (1 epoch)
	if epoch_item < 1:
		model.train()
	else:
		model.eval()
		
	# Train for 10000 episodes in each epoch
	train(train_loader, model, criterion, optimizer, epoch_item, F_txt)


	# =========================================== Evaluation ==========================================
	print('============ Validation on the val set ============')
	print('============ validation on the val set ============', file=F_txt)
	prec1, _ = validate(val_loader, model, criterion, epoch_item, best_prec1, F_txt)


	# record the best prec@1 and save checkpoint
	is_best = prec1 > best_prec1
	best_prec1 = max(prec1, best_prec1)

	# save the checkpoint
	if is_best:
		save_checkpoint(
			{
				'epoch_index': epoch_item,
				'arch': opt.basemodel,
				'state_dict': model.state_dict(),
				'best_prec1': best_prec1,
				'optimizer' : optimizer.state_dict(),
			}, os.path.join(opt.outf, 'model_best.pth.tar'))


	if epoch_item % 10 == 0:
		filename = os.path.join(opt.outf, 'epoch_%d.pth.tar' %epoch_item)
		save_checkpoint(
		{
			'epoch_index': epoch_item,
			'arch': opt.basemodel,
			'state_dict': model.state_dict(),
			'best_prec1': best_prec1,
			'optimizer' : optimizer.state_dict(),
		}, filename)

	
	# Testing Prase
	print('============ Testing on the test set ============')
	print('============ Testing on the test set ============', file=F_txt)
	prec1, _ = validate(test_loader, model, criterion, epoch_item, best_prec1, F_txt)


F_txt.close()
print('............Training is end............')

# ============================================ Training End ==============================================================


================================================
FILE: DN4_2019_Version/DN4_Train_5way5shot.py
================================================
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
Author: Wenbin Li (liwenbin.nju@gmail.com)
Date: April 9, 2019
Version: V0

Citation: 
@inproceedings{li2019DN4,
  title={Revisiting Local Descriptor based Image-to-Class Measure for Few-shot Learning},
  author={Li, Wenbin and Wang, Lei and Xu, Jinglin and Huo, Jing and Gao Yang and Luo, Jiebo},
  booktitle={CVPR},
  year={2019}
}
"""


from __future__ import print_function
import argparse
import os
import random
import shutil
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import grad
import time
from torch import autograd
from PIL import ImageFile
import pdb
import sys
sys.dont_write_bytecode = True


# ============================ Data & Networks =====================================
from dataset.datasets_csv import Imagefolder_csv
import models.network as DN4Net
# ==================================================================================


ImageFile.LOAD_TRUNCATED_IMAGES = True
os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES']='0'


parser = argparse.ArgumentParser()
parser.add_argument('--dataset_dir', default='/Datasets/miniImageNet--ravi', help='/miniImageNet')
parser.add_argument('--data_name', default='miniImageNet', help='miniImageNet|StanfordDog|StanfordCar|CubBird')
parser.add_argument('--mode', default='train', help='train|val|test')
parser.add_argument('--outf', default='./results/DN4')
parser.add_argument('--resume', default='', type=str, help='path to the lastest checkpoint (default: none)')
parser.add_argument('--basemodel', default='Conv64F', help='Conv64F|ResNet256F')
parser.add_argument('--workers', type=int, default=8)
#  Few-shot parameters  #
parser.add_argument('--imageSize', type=int, default=84)
parser.add_argument('--episodeSize', type=int, default=1, help='the mini-batch size of training')
parser.add_argument('--testepisodeSize', type=int, default=1, help='one episode is taken as a mini-batch')
parser.add_argument('--epochs', type=int, default=30, help='the total number of training epoch')
parser.add_argument('--episode_train_num', type=int, default=10000, help='the total number of training episodes')
parser.add_argument('--episode_val_num', type=int, default=1000, help='the total number of evaluation episodes')
parser.add_argument('--episode_test_num', type=int, default=1000, help='the total number of testing episodes')
parser.add_argument('--way_num', type=int, default=5, help='the number of way/class')
parser.add_argument('--shot_num', type=int, default=5, help='the number of shot')
parser.add_argument('--query_num', type=int, default=10, help='the number of queries')
parser.add_argument('--neighbor_k', type=int, default=3, help='the number of k-nearest neighbors')
parser.add_argument('--lr', type=float, default=0.005, help='learning rate, default=0.005')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', default=True, help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='the number of gpus')
parser.add_argument('--nc', type=int, default=3, help='input image channels')
parser.add_argument('--clamp_lower', type=float, default=-0.01)
parser.add_argument('--clamp_upper', type=float, default=0.01)
parser.add_argument('--print_freq', '-p', default=100, type=int, metavar='N', help='print frequency (default: 100)')
opt = parser.parse_args()
opt.cuda = True
cudnn.benchmark = True



# ======================================= Define functions =============================================

def adjust_learning_rate(optimizer, epoch_num):
	"""Sets the learning rate to the initial LR decayed by 0.05 every 10 epochs"""
	lr = opt.lr * (0.05 ** (epoch_num // 10))
	for param_group in optimizer.param_groups:
		param_group['lr'] = lr


def train(train_loader, model, criterion, optimizer, epoch_index, F_txt):
	batch_time = AverageMeter()
	data_time = AverageMeter()
	losses = AverageMeter()
	top1 = AverageMeter()


	end = time.time()
	for episode_index, (query_images, query_targets, support_images, support_targets) in enumerate(train_loader):

		# Measure data loading time
		data_time.update(time.time() - end)

		# Convert query and support images
		query_images = torch.cat(query_images, 0)
		input_var1 = query_images.cuda()

		input_var2 = []
		for i in range(len(support_images)):
			temp_support = support_images[i]
			temp_support = torch.cat(temp_support, 0)
			temp_support = temp_support.cuda()
			input_var2.append(temp_support)

		# Deal with the targets
		target = torch.cat(query_targets, 0)
		target = target.cuda()

		# Calculate the output
		output = model(input_var1, input_var2)
		loss = criterion(output, target)

		# Compute gradients and do SGD step
		optimizer.zero_grad()
		loss.backward()
		optimizer.step()

	  
		# Measure accuracy and record loss
		prec1, _ = accuracy(output, target, topk=(1,3))
		losses.update(loss.item(), query_images.size(0))
		top1.update(prec1[0], query_images.size(0))


		# Measure elapsed time
		batch_time.update(time.time() - end)
		end = time.time()


		#============== print the intermediate results ==============#
		if episode_index % opt.print_freq == 0 and episode_index != 0:

			print('Eposide-({0}): [{1}/{2}]\t'
				'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
				'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
				'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
				'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
					epoch_index, episode_index, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1))

			print('Eposide-({0}): [{1}/{2}]\t'
				'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
				'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
				'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
				'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
					epoch_index, episode_index, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1), file=F_txt)



def validate(val_loader, model, criterion, epoch_index, best_prec1, F_txt):
	batch_time = AverageMeter()
	losses = AverageMeter()
	top1 = AverageMeter()
  

	# switch to evaluate mode
	model.eval()
	accuracies = []


	end = time.time()
	for episode_index, (query_images, query_targets, support_images, support_targets) in enumerate(val_loader):

		# Convert query and support images
		query_images = torch.cat(query_images, 0)
		input_var1 = query_images.cuda()


		input_var2 = []
		for i in range(len(support_images)):
			temp_support = support_images[i]
			temp_support = torch.cat(temp_support, 0)
			temp_support = temp_support.cuda()
			input_var2.append(temp_support)


		# Deal with the target
		target = torch.cat(query_targets, 0)
		target = target.cuda()

		# Calculate the output 
		output = model(input_var1, input_var2)
		loss = criterion(output, target)


		# measure accuracy and record loss
		prec1, _ = accuracy(output, target, topk=(1, 3))
		losses.update(loss.item(), query_images.size(0))
		top1.update(prec1[0], query_images.size(0))
		accuracies.append(prec1)


		# measure elapsed time
		batch_time.update(time.time() - end)
		end = time.time()


		#============== print the intermediate results ==============#
		if episode_index % opt.print_freq == 0 and episode_index != 0:

			print('Test-({0}): [{1}/{2}]\t'
				'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
				'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
				'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
					epoch_index, episode_index, len(val_loader), batch_time=batch_time, loss=losses, top1=top1))

			print('Test-({0}): [{1}/{2}]\t'
				'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
				'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
				'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
					epoch_index, episode_index, len(val_loader), batch_time=batch_time, loss=losses, top1=top1), file=F_txt)

		
	print(' * Prec@1 {top1.avg:.3f} Best_prec1 {best_prec1:.3f}'.format(top1=top1, best_prec1=best_prec1))
	print(' * Prec@1 {top1.avg:.3f} Best_prec1 {best_prec1:.3f}'.format(top1=top1, best_prec1=best_prec1), file=F_txt)

	return top1.avg, accuracies



def save_checkpoint(state, filename='checkpoint.pth.tar'):
	torch.save(state, filename)


class AverageMeter(object):
	"""Computes and stores the average and current value"""
	def __init__(self):
		self.reset()

	def reset(self):
		self.val = 0
		self.avg = 0
		self.sum = 0
		self.count = 0

	def update(self, val, n=1):
		self.val = val
		self.sum += val * n
		self.count += n
		self.avg = self.sum / self.count


def accuracy(output, target, topk=(1,)):
	"""Computes the precision@k for the specified values of k"""
	with torch.no_grad():
		maxk = max(topk)
		batch_size = target.size(0)

		_, pred = output.topk(maxk, 1, True, True)
		pred = pred.t()
		correct = pred.eq(target.view(1, -1).expand_as(pred))

		res = []
		for k in topk:
			correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
			res.append(correct_k.mul_(100.0 / batch_size))
		return res



# ======================================== Settings of path ============================================
# saving path
opt.outf = opt.outf+'_'+opt.data_name+'_'+str(opt.basemodel)+'_'+str(opt.way_num)+'Way_'+str(opt.shot_num)+'Shot'+'_K'+str(opt.neighbor_k)

if not os.path.exists(opt.outf):
	os.makedirs(opt.outf)

if torch.cuda.is_available() and not opt.cuda:
	print("WARNING: You have a CUDA device, so you should probably run with --cuda")

# save the opt and results to a txt file
txt_save_path = os.path.join(opt.outf, 'opt_resutls.txt')
F_txt = open(txt_save_path, 'a+')
print(opt)
print(opt, file=F_txt)



# ========================================== Model Config ===============================================
ngpu = int(opt.ngpu)
global best_prec1, epoch_index
best_prec1 = 0
epoch_index = 0

model = DN4Net.define_DN4Net(which_model=opt.basemodel, num_classes=opt.way_num, neighbor_k=opt.neighbor_k, norm='batch', 
	init_type='normal', use_gpu=opt.cuda)

# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.Adam(model.parameters(), lr=opt.lr, betas=(opt.beta1, 0.9))


# optionally resume from a checkpoint
if opt.resume:
	if os.path.isfile(opt.resume):
		print("=> loading checkpoint '{}'".format(opt.resume))
		checkpoint = torch.load(opt.resume)
		epoch_index = checkpoint['epoch_index']
		best_prec1 = checkpoint['best_prec1']
		model.load_state_dict(checkpoint['state_dict'])
		optimizer.load_state_dict(checkpoint['optimizer'])
		print("=> loaded checkpoint '{}' (epoch {})".format(opt.resume, checkpoint['epoch_index']))
		print("=> loaded checkpoint '{}' (epoch {})".format(opt.resume, checkpoint['epoch_index']), file=F_txt)
	else:
		print("=> no checkpoint found at '{}'".format(opt.resume))
		print("=> no checkpoint found at '{}'".format(opt.resume), file=F_txt)

if opt.ngpu > 1:
	model = nn.DataParallel(model, range(opt.ngpu))

# print the architecture of the network
print(model) 
print(model, file=F_txt) 




# ======================================== Training phase ===============================================
print('\n............Start training............\n')
start_time = time.time()


for epoch_item in range(opt.epochs):
	print('===================================== Epoch %d =====================================' %epoch_item)
	print('===================================== Epoch %d =====================================' %epoch_item, file=F_txt)
	adjust_learning_rate(optimizer, epoch_item) 
	

	# ======================================= Folder of Datasets =======================================
	# image transform & normalization
	ImgTransform = transforms.Compose([
			transforms.Resize((opt.imageSize, opt.imageSize)),
			transforms.ToTensor(),
			transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
			])

	trainset = Imagefolder_csv(
		data_dir=opt.dataset_dir, mode=opt.mode, image_size=opt.imageSize, transform=ImgTransform,
		episode_num=opt.episode_train_num, way_num=opt.way_num, shot_num=opt.shot_num, query_num=opt.query_num
	)
	valset = Imagefolder_csv(
		data_dir=opt.dataset_dir, mode='val', image_size=opt.imageSize, transform=ImgTransform,
		episode_num=opt.episode_val_num, way_num=opt.way_num, shot_num=opt.shot_num, query_num=opt.query_num
	)
	testset = Imagefolder_csv(
		data_dir=opt.dataset_dir, mode='test', image_size=opt.imageSize, transform=ImgTransform,
		episode_num=opt.episode_test_num, way_num=opt.way_num, shot_num=opt.shot_num, query_num=opt.query_num
	)

	print('Trainset: %d' %len(trainset))
	print('Valset: %d' %len(valset))
	print('Testset: %d' %len(testset))
	print('Trainset: %d' %len(trainset), file=F_txt)
	print('Valset: %d' %len(valset), file=F_txt)
	print('Testset: %d' %len(testset), file=F_txt)



	# ========================================== Load Datasets =========================================
	train_loader = torch.utils.data.DataLoader(
		trainset, batch_size=opt.episodeSize, shuffle=True, 
		num_workers=int(opt.workers), drop_last=True, pin_memory=True
		)
	val_loader = torch.utils.data.DataLoader(
		valset, batch_size=opt.testepisodeSize, shuffle=True, 
		num_workers=int(opt.workers), drop_last=True, pin_memory=True
		) 
	test_loader = torch.utils.data.DataLoader(
		testset, batch_size=opt.testepisodeSize, shuffle=True, 
		num_workers=int(opt.workers), drop_last=True, pin_memory=True
		) 


	# ============================================ Training ===========================================
	# Fix the parameters of Batch Normalization after 10000 episodes (1 epoch)
	if epoch_item < 1:
		model.train()
	else:
		model.eval()

	# Train for 10000 episodes in each epoch
	train(train_loader, model, criterion, optimizer, epoch_item, F_txt)


	# =========================================== Evaluation ==========================================
	print('============ Validation on the val set ============')
	print('============ validation on the val set ============', file=F_txt)
	prec1, _ = validate(val_loader, model, criterion, epoch_item, best_prec1, F_txt)


	# record the best prec@1 and save checkpoint
	is_best = prec1 > best_prec1
	best_prec1 = max(prec1, best_prec1)

	# save the checkpoint
	if is_best:
		save_checkpoint(
			{
				'epoch_index': epoch_item,
				'arch': opt.basemodel,
				'state_dict': model.state_dict(),
				'best_prec1': best_prec1,
				'optimizer' : optimizer.state_dict(),
			}, os.path.join(opt.outf, 'model_best.pth.tar'))


	if epoch_item % 10 == 0:
		filename = os.path.join(opt.outf, 'epoch_%d.pth.tar' %epoch_item)
		save_checkpoint(
		{
			'epoch_index': epoch_item,
			'arch': opt.basemodel,
			'state_dict': model.state_dict(),
			'best_prec1': best_prec1,
			'optimizer' : optimizer.state_dict(),
		}, filename)

	
	# Testing Prase
	print('============ Testing on the test set ============')
	print('============ Testing on the test set ============', file=F_txt)
	prec1, _ = validate(test_loader, model, criterion, epoch_item, best_prec1, F_txt)


F_txt.close()
print('............Training is end............')

# ============================================ Training End ==============================================================


================================================
FILE: DN4_2019_Version/DN4_Train_5way5shot_DA.py
================================================
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
Author: Wenbin Li (liwenbin.nju@gmail.com)
Date: April 9, 2019
Version: V0

Citation: 
@inproceedings{li2019DN4,
  title={Revisiting Local Descriptor based Image-to-Class Measure for Few-shot Learning},
  author={Li, Wenbin and Wang, Lei and Xu, Jinglin and Huo, Jing and Gao Yang and Luo, Jiebo},
  booktitle={CVPR},
  year={2019}
}
"""


from __future__ import print_function
import argparse
import os
import random
import shutil
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import grad
import time
from torch import autograd
from PIL import ImageFile
import pdb
import sys
sys.dont_write_bytecode = True


# ============================ Data & Networks =====================================
from dataset.datasets_csv import Imagefolder_csv
import models.network as DN4Net
# ==================================================================================


ImageFile.LOAD_TRUNCATED_IMAGES = True
os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES']='0'


parser = argparse.ArgumentParser()
parser.add_argument('--dataset_dir', default='/Datasets/miniImageNet--ravi', help='/miniImageNet')
parser.add_argument('--data_name', default='miniImageNet', help='miniImageNet|StanfordDog|StanfordCar|CubBird')
parser.add_argument('--mode', default='train', help='train|val|test')
parser.add_argument('--outf', default='./results/DN4_DA')
parser.add_argument('--resume', default='', type=str, help='path to the lastest checkpoint (default: none)')
parser.add_argument('--basemodel', default='Conv64F', help='Conv64F|ResNet256F')
parser.add_argument('--workers', type=int, default=8)
#  Few-shot parameters  #
parser.add_argument('--imageSize', type=int, default=84)
parser.add_argument('--episodeSize', type=int, default=1, help='the mini-batch size of training')
parser.add_argument('--testepisodeSize', type=int, default=1, help='one episode is taken as a mini-batch')
parser.add_argument('--epochs', type=int, default=30, help='the total number of training epoch')
parser.add_argument('--episode_train_num', type=int, default=10000, help='the total number of training episodes')
parser.add_argument('--episode_val_num', type=int, default=1000, help='the total number of evaluation episodes')
parser.add_argument('--episode_test_num', type=int, default=1000, help='the total number of testing episodes')
parser.add_argument('--way_num', type=int, default=5, help='the number of way/class')
parser.add_argument('--shot_num', type=int, default=5, help='the number of shot')
parser.add_argument('--query_num', type=int, default=10, help='the number of queries')
parser.add_argument('--neighbor_k', type=int, default=3, help='the number of k-nearest neighbors')
parser.add_argument('--lr', type=float, default=0.005, help='learning rate, default=0.005')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', default=True, help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='the number of gpus')
parser.add_argument('--nc', type=int, default=3, help='input image channels')
parser.add_argument('--clamp_lower', type=float, default=-0.01)
parser.add_argument('--clamp_upper', type=float, default=0.01)
parser.add_argument('--print_freq', '-p', default=100, type=int, metavar='N', help='print frequency (default: 100)')
opt = parser.parse_args()
opt.cuda = True
cudnn.benchmark = True



# ======================================= Define functions =============================================

def adjust_learning_rate(optimizer, epoch_num):
	"""Sets the learning rate to the initial LR decayed by 0.05 every 10 epochs"""
	lr = opt.lr * (0.05 ** (epoch_num // 10))
	for param_group in optimizer.param_groups:
		param_group['lr'] = lr


def train(train_loader, model, criterion, optimizer, epoch_index, F_txt):
	batch_time = AverageMeter()
	data_time = AverageMeter()
	losses = AverageMeter()
	top1 = AverageMeter()


	end = time.time()
	for episode_index, (query_images, query_targets, support_images, support_targets) in enumerate(train_loader):

		# Measure data loading time
		data_time.update(time.time() - end)

		# Convert query and support images
		query_images = torch.cat(query_images, 0)
		input_var1 = query_images.cuda()

		input_var2 = []
		for i in range(len(support_images)):
			temp_support = support_images[i]
			temp_support = torch.cat(temp_support, 0)
			temp_support = temp_support.cuda()
			input_var2.append(temp_support)

		# Deal with the targets
		target = torch.cat(query_targets, 0)
		target = target.cuda()

		# Calculate the output
		output = model(input_var1, input_var2)
		loss = criterion(output, target)

		# Compute gradients and do SGD step
		optimizer.zero_grad()
		loss.backward()
		optimizer.step()

	  
		# Measure accuracy and record loss
		prec1, _ = accuracy(output, target, topk=(1,3))
		losses.update(loss.item(), query_images.size(0))
		top1.update(prec1[0], query_images.size(0))


		# Measure elapsed time
		batch_time.update(time.time() - end)
		end = time.time()


		#============== print the intermediate results ==============#
		if episode_index % opt.print_freq == 0 and episode_index != 0:

			print('Eposide-({0}): [{1}/{2}]\t'
				'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
				'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
				'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
				'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
					epoch_index, episode_index, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1))

			print('Eposide-({0}): [{1}/{2}]\t'
				'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
				'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
				'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
				'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
					epoch_index, episode_index, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1), file=F_txt)



def validate(val_loader, model, criterion, epoch_index, best_prec1, F_txt):
	batch_time = AverageMeter()
	losses = AverageMeter()
	top1 = AverageMeter()
  

	# switch to evaluate mode
	model.eval()
	accuracies = []


	end = time.time()
	for episode_index, (query_images, query_targets, support_images, support_targets) in enumerate(val_loader):

		# Convert query and support images
		query_images = torch.cat(query_images, 0)
		input_var1 = query_images.cuda()


		input_var2 = []
		for i in range(len(support_images)):
			temp_support = support_images[i]
			temp_support = torch.cat(temp_support, 0)
			temp_support = temp_support.cuda()
			input_var2.append(temp_support)


		# Deal with the target
		target = torch.cat(query_targets, 0)
		target = target.cuda()

		# Calculate the output 
		output = model(input_var1, input_var2)
		loss = criterion(output, target)


		# measure accuracy and record loss
		prec1, _ = accuracy(output, target, topk=(1, 3))
		losses.update(loss.item(), query_images.size(0))
		top1.update(prec1[0], query_images.size(0))
		accuracies.append(prec1)


		# measure elapsed time
		batch_time.update(time.time() - end)
		end = time.time()


		#============== print the intermediate results ==============#
		if episode_index % opt.print_freq == 0 and episode_index != 0:

			print('Test-({0}): [{1}/{2}]\t'
				'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
				'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
				'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
					epoch_index, episode_index, len(val_loader), batch_time=batch_time, loss=losses, top1=top1))

			print('Test-({0}): [{1}/{2}]\t'
				'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
				'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
				'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
					epoch_index, episode_index, len(val_loader), batch_time=batch_time, loss=losses, top1=top1), file=F_txt)

		
	print(' * Prec@1 {top1.avg:.3f} Best_prec1 {best_prec1:.3f}'.format(top1=top1, best_prec1=best_prec1))
	print(' * Prec@1 {top1.avg:.3f} Best_prec1 {best_prec1:.3f}'.format(top1=top1, best_prec1=best_prec1), file=F_txt)

	return top1.avg, accuracies



def save_checkpoint(state, filename='checkpoint.pth.tar'):
	torch.save(state, filename)


class AverageMeter(object):
	"""Computes and stores the average and current value"""
	def __init__(self):
		self.reset()

	def reset(self):
		self.val = 0
		self.avg = 0
		self.sum = 0
		self.count = 0

	def update(self, val, n=1):
		self.val = val
		self.sum += val * n
		self.count += n
		self.avg = self.sum / self.count


def accuracy(output, target, topk=(1,)):
	"""Computes the precision@k for the specified values of k"""
	with torch.no_grad():
		maxk = max(topk)
		batch_size = target.size(0)

		_, pred = output.topk(maxk, 1, True, True)
		pred = pred.t()
		correct = pred.eq(target.view(1, -1).expand_as(pred))

		res = []
		for k in topk:
			correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
			res.append(correct_k.mul_(100.0 / batch_size))
		return res



# ======================================== Settings of path ============================================
# saving path
opt.outf = opt.outf+'_'+opt.data_name+'_'+str(opt.basemodel)+'_'+str(opt.way_num)+'Way_'+str(opt.shot_num)+'Shot'+'_K'+str(opt.neighbor_k)

if not os.path.exists(opt.outf):
	os.makedirs(opt.outf)

if torch.cuda.is_available() and not opt.cuda:
	print("WARNING: You have a CUDA device, so you should probably run with --cuda")

# save the opt and results to a txt file
txt_save_path = os.path.join(opt.outf, 'opt_resutls.txt')
F_txt = open(txt_save_path, 'a+')
print(opt)
print(opt, file=F_txt)



# ========================================== Model Config ===============================================
ngpu = int(opt.ngpu)
global best_prec1, epoch_index
best_prec1 = 0
epoch_index = 0

model = DN4Net.define_DN4Net(which_model=opt.basemodel, num_classes=opt.way_num, neighbor_k=opt.neighbor_k, norm='batch', 
	init_type='normal', use_gpu=opt.cuda)

# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.Adam(model.parameters(), lr=opt.lr, betas=(opt.beta1, 0.9))


# optionally resume from a checkpoint
if opt.resume:
	if os.path.isfile(opt.resume):
		print("=> loading checkpoint '{}'".format(opt.resume))
		checkpoint = torch.load(opt.resume)
		epoch_index = checkpoint['epoch_index']
		best_prec1 = checkpoint['best_prec1']
		model.load_state_dict(checkpoint['state_dict'])
		optimizer.load_state_dict(checkpoint['optimizer'])
		print("=> loaded checkpoint '{}' (epoch {})".format(opt.resume, checkpoint['epoch_index']))
		print("=> loaded checkpoint '{}' (epoch {})".format(opt.resume, checkpoint['epoch_index']), file=F_txt)
	else:
		print("=> no checkpoint found at '{}'".format(opt.resume))
		print("=> no checkpoint found at '{}'".format(opt.resume), file=F_txt)

if opt.ngpu > 1:
	model = nn.DataParallel(model, range(opt.ngpu))

# print the architecture of the network
print(model) 
print(model, file=F_txt) 




# ======================================== Training phase ===============================================
print('\n............Start training............\n')
start_time = time.time()


for epoch_item in range(opt.epochs):
	print('===================================== Epoch %d =====================================' %epoch_item)
	print('===================================== Epoch %d =====================================' %epoch_item, file=F_txt)
	adjust_learning_rate(optimizer, epoch_item) 
	

	# ======================================= Folder of Datasets =======================================
	# image transform & normalization
	ImgTransform = transforms.Compose([
			transforms.Resize((opt.imageSize, opt.imageSize)),
			transforms.ToTensor(),
			transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
			])

	ImgTransform_DA = transforms.Compose([
		transforms.Resize((100, 100)),
		transforms.RandomCrop(84),
		transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
		transforms.RandomHorizontalFlip(),
		transforms.ToTensor(),
		transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
		])

	trainset = Imagefolder_csv(
		data_dir=opt.dataset_dir, mode=opt.mode, image_size=opt.imageSize, transform=ImgTransform_DA,
		episode_num=opt.episode_train_num, way_num=opt.way_num, shot_num=opt.shot_num, query_num=opt.query_num
	)
	valset = Imagefolder_csv(
		data_dir=opt.dataset_dir, mode='val', image_size=opt.imageSize, transform=ImgTransform,
		episode_num=opt.episode_val_num, way_num=opt.way_num, shot_num=opt.shot_num, query_num=opt.query_num
	)
	testset = Imagefolder_csv(
		data_dir=opt.dataset_dir, mode='test', image_size=opt.imageSize, transform=ImgTransform,
		episode_num=opt.episode_test_num, way_num=opt.way_num, shot_num=opt.shot_num, query_num=opt.query_num
	)

	print('Trainset: %d' %len(trainset))
	print('Valset: %d' %len(valset))
	print('Testset: %d' %len(testset))
	print('Trainset: %d' %len(trainset), file=F_txt)
	print('Valset: %d' %len(valset), file=F_txt)
	print('Testset: %d' %len(testset), file=F_txt)



	# ========================================== Load Datasets =========================================
	train_loader = torch.utils.data.DataLoader(
		trainset, batch_size=opt.episodeSize, shuffle=True, 
		num_workers=int(opt.workers), drop_last=True, pin_memory=True
		)
	val_loader = torch.utils.data.DataLoader(
		valset, batch_size=opt.testepisodeSize, shuffle=True, 
		num_workers=int(opt.workers), drop_last=True, pin_memory=True
		) 
	test_loader = torch.utils.data.DataLoader(
		testset, batch_size=opt.testepisodeSize, shuffle=True, 
		num_workers=int(opt.workers), drop_last=True, pin_memory=True
		) 


	# ============================================ Training ===========================================
	# Fix the parameters of Batch Normalization after 10000 episodes (1 epoch)
	if epoch_item < 1:
		model.train()
	else:
		model.eval()

	# Train for 10000 episodes in each epoch
	train(train_loader, model, criterion, optimizer, epoch_item, F_txt)


	# =========================================== Evaluation ==========================================
	print('============ Validation on the val set ============')
	print('============ validation on the val set ============', file=F_txt)
	prec1, _ = validate(val_loader, model, criterion, epoch_item, best_prec1, F_txt)


	# record the best prec@1 and save checkpoint
	is_best = prec1 > best_prec1
	best_prec1 = max(prec1, best_prec1)

	# save the checkpoint
	if is_best:
		save_checkpoint(
			{
				'epoch_index': epoch_item,
				'arch': opt.basemodel,
				'state_dict': model.state_dict(),
				'best_prec1': best_prec1,
				'optimizer' : optimizer.state_dict(),
			}, os.path.join(opt.outf, 'model_best.pth.tar'))


	if epoch_item % 10 == 0:
		filename = os.path.join(opt.outf, 'epoch_%d.pth.tar' %epoch_item)
		save_checkpoint(
		{
			'epoch_index': epoch_item,
			'arch': opt.basemodel,
			'state_dict': model.state_dict(),
			'best_prec1': best_prec1,
			'optimizer' : optimizer.state_dict(),
		}, filename)

	
	# Testing Prase
	print('============ Testing on the test set ============')
	print('============ Testing on the test set ============', file=F_txt)
	prec1, _ = validate(test_loader, model, criterion, epoch_item, best_prec1, F_txt)


F_txt.close()
print('............Training is end............')

# ============================================ Training End ==============================================================


================================================
FILE: DN4_2019_Version/DN4_Train_5way5shot_Resnet.py
================================================
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
Author: Wenbin Li (liwenbin.nju@gmail.com)
Date: April 9, 2019
Version: V0

Citation: 
@inproceedings{li2019DN4,
  title={Revisiting Local Descriptor based Image-to-Class Measure for Few-shot Learning},
  author={Li, Wenbin and Wang, Lei and Xu, Jinglin and Huo, Jing and Gao Yang and Luo, Jiebo},
  booktitle={CVPR},
  year={2019}
}
"""


from __future__ import print_function
import argparse
import os
import random
import shutil
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import grad
import time
from torch import autograd
from PIL import ImageFile
import pdb
import sys
sys.dont_write_bytecode = True


# ============================ Data & Networks =====================================
from dataset.datasets_csv import Imagefolder_csv
import models.network as DN4Net
# ==================================================================================


ImageFile.LOAD_TRUNCATED_IMAGES = True
os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES']='0'


parser = argparse.ArgumentParser()
parser.add_argument('--dataset_dir', default='/Datasets/miniImageNet--ravi', help='/miniImageNet')
parser.add_argument('--data_name', default='miniImageNet', help='miniImageNet|StanfordDog|StanfordCar|CubBird')
parser.add_argument('--mode', default='train', help='train|val|test')
parser.add_argument('--outf', default='./results/DN4')
parser.add_argument('--resume', default='', type=str, help='path to the lastest checkpoint (default: none)')
parser.add_argument('--basemodel', default='ResNet256F', help='Conv64F|ResNet256F')
parser.add_argument('--workers', type=int, default=8)
#  Few-shot parameters  #
parser.add_argument('--imageSize', type=int, default=84)
parser.add_argument('--episodeSize', type=int, default=1, help='the mini-batch size of training')
parser.add_argument('--testepisodeSize', type=int, default=1, help='one episode is taken as a mini-batch')
parser.add_argument('--epochs', type=int, default=30, help='the total number of training epoch')
parser.add_argument('--episode_train_num', type=int, default=10000, help='the total number of training episodes')
parser.add_argument('--episode_val_num', type=int, default=1000, help='the total number of evaluation episodes')
parser.add_argument('--episode_test_num', type=int, default=1000, help='the total number of testing episodes')
parser.add_argument('--way_num', type=int, default=5, help='the number of way/class')
parser.add_argument('--shot_num', type=int, default=5, help='the number of shot')
parser.add_argument('--query_num', type=int, default=10, help='the number of queries')
parser.add_argument('--neighbor_k', type=int, default=3, help='the number of k-nearest neighbors')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate, default=0.005')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', default=True, help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='the number of gpus')
parser.add_argument('--nc', type=int, default=3, help='input image channels')
parser.add_argument('--clamp_lower', type=float, default=-0.01)
parser.add_argument('--clamp_upper', type=float, default=0.01)
parser.add_argument('--print_freq', '-p', default=100, type=int, metavar='N', help='print frequency (default: 100)')
opt = parser.parse_args()
opt.cuda = True
cudnn.benchmark = True



# ======================================= Define functions =============================================

def adjust_learning_rate(optimizer, epoch_num):
	"""Sets the learning rate to the initial LR decayed by 0.05 every 10 epochs"""
	lr = opt.lr * (0.05 ** (epoch_num // 5))
	for param_group in optimizer.param_groups:
		param_group['lr'] = lr


def train(train_loader, model, criterion, optimizer, epoch_index, F_txt):
	batch_time = AverageMeter()
	data_time = AverageMeter()
	losses = AverageMeter()
	top1 = AverageMeter()


	end = time.time()
	for episode_index, (query_images, query_targets, support_images, support_targets) in enumerate(train_loader):

		# Measure data loading time
		data_time.update(time.time() - end)

		# Convert query and support images
		query_images = torch.cat(query_images, 0)
		input_var1 = query_images.cuda()

		input_var2 = []
		for i in range(len(support_images)):
			temp_support = support_images[i]
			temp_support = torch.cat(temp_support, 0)
			temp_support = temp_support.cuda()
			input_var2.append(temp_support)

		# Deal with the target
		target = torch.cat(query_targets, 0)
		target = target.cuda()

		# Calculate the output
		output = model(input_var1, input_var2)
		loss = criterion(output, target)

		# Compute gradient and do SGD step
		optimizer.zero_grad()
		loss.backward()
		optimizer.step()

	  
		# Measure accuracy and record loss
		prec1, _ = accuracy(output, target, topk=(1,3))
		losses.update(loss.item(), query_images.size(0))
		top1.update(prec1[0], query_images.size(0))


		# Measure elapsed time
		batch_time.update(time.time() - end)
		end = time.time()


		#============== print the intermediate results ==============#
		if episode_index % opt.print_freq == 0 and episode_index != 0:

			print('Eposide-({0}): [{1}/{2}]\t'
				'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
				'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
				'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
				'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
					epoch_index, episode_index, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1))

			print('Eposide-({0}): [{1}/{2}]\t'
				'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
				'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
				'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
				'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
					epoch_index, episode_index, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1), file=F_txt)



def validate(val_loader, model, criterion, epoch_index, best_prec1, F_txt):
	batch_time = AverageMeter()
	losses = AverageMeter()
	top1 = AverageMeter()
  

	# switch to evaluate mode
	model.eval()
	accuracies = []


	end = time.time()
	for episode_index, (query_images, query_targets, support_images, support_targets) in enumerate(val_loader):

		# Convert query and support images
		query_images = torch.cat(query_images, 0)
		input_var1 = query_images.cuda()


		input_var2 = []
		for i in range(len(support_images)):
			temp_support = support_images[i]
			temp_support = torch.cat(temp_support, 0)
			temp_support = temp_support.cuda()
			input_var2.append(temp_support)


		# Deal with the target
		target = torch.cat(query_targets, 0)
		target = target.cuda()

		# Calculate the output 
		output = model(input_var1, input_var2)
		loss = criterion(output, target)


		# measure accuracy and record loss
		prec1, _ = accuracy(output, target, topk=(1, 3))
		losses.update(loss.item(), query_images.size(0))
		top1.update(prec1[0], query_images.size(0))
		accuracies.append(prec1)


		# measure elapsed time
		batch_time.update(time.time() - end)
		end = time.time()


		#============== print the intermediate results ==============#
		if episode_index % opt.print_freq == 0 and episode_index != 0:

			print('Test-({0}): [{1}/{2}]\t'
				'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
				'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
				'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
					epoch_index, episode_index, len(val_loader), batch_time=batch_time, loss=losses, top1=top1))

			print('Test-({0}): [{1}/{2}]\t'
				'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
				'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
				'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
					epoch_index, episode_index, len(val_loader), batch_time=batch_time, loss=losses, top1=top1), file=F_txt)

		
	print(' * Prec@1 {top1.avg:.3f} Best_prec1 {best_prec1:.3f}'.format(top1=top1, best_prec1=best_prec1))
	print(' * Prec@1 {top1.avg:.3f} Best_prec1 {best_prec1:.3f}'.format(top1=top1, best_prec1=best_prec1), file=F_txt)

	return top1.avg, accuracies



def save_checkpoint(state, filename='checkpoint.pth.tar'):
	torch.save(state, filename)


class AverageMeter(object):
	"""Computes and stores the average and current value"""
	def __init__(self):
		self.reset()

	def reset(self):
		self.val = 0
		self.avg = 0
		self.sum = 0
		self.count = 0

	def update(self, val, n=1):
		self.val = val
		self.sum += val * n
		self.count += n
		self.avg = self.sum / self.count


def accuracy(output, target, topk=(1,)):
	"""Computes the precision@k for the specified values of k"""
	with torch.no_grad():
		maxk = max(topk)
		batch_size = target.size(0)

		_, pred = output.topk(maxk, 1, True, True)
		pred = pred.t()
		correct = pred.eq(target.view(1, -1).expand_as(pred))

		res = []
		for k in topk:
			correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
			res.append(correct_k.mul_(100.0 / batch_size))
		return res



# ======================================== Settings of path ============================================
# saving path
opt.outf = opt.outf+'_'+opt.data_name+'_'+str(opt.basemodel)+'_'+str(opt.way_num)+'Way_'+str(opt.shot_num)+'Shot'+'_K'+str(opt.neighbor_k)

if not os.path.exists(opt.outf):
	os.makedirs(opt.outf)

if torch.cuda.is_available() and not opt.cuda:
	print("WARNING: You have a CUDA device, so you should probably run with --cuda")

# save the opt and results to a txt file
txt_save_path = os.path.join(opt.outf, 'opt_resutls.txt')
F_txt = open(txt_save_path, 'a+')
print(opt)
print(opt, file=F_txt)



# ========================================== Model Config ===============================================
ngpu = int(opt.ngpu)
global best_prec1, epoch_index
best_prec1 = 0
epoch_index = 0

model = DN4Net.define_DN4Net(which_model=opt.basemodel, num_classes=opt.way_num, neighbor_k=opt.neighbor_k, norm='batch', 
	init_type='normal', use_gpu=opt.cuda)

# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.Adam(model.parameters(), lr=opt.lr, betas=(opt.beta1, 0.9))


# optionally resume from a checkpoint
if opt.resume:
	if os.path.isfile(opt.resume):
		print("=> loading checkpoint '{}'".format(opt.resume))
		checkpoint = torch.load(opt.resume)
		epoch_index = checkpoint['epoch_index']
		best_prec1 = checkpoint['best_prec1']
		model.load_state_dict(checkpoint['state_dict'])
		optimizer.load_state_dict(checkpoint['optimizer'])
		print("=> loaded checkpoint '{}' (epoch {})".format(opt.resume, checkpoint['epoch_index']))
		print("=> loaded checkpoint '{}' (epoch {})".format(opt.resume, checkpoint['epoch_index']), file=F_txt)
	else:
		print("=> no checkpoint found at '{}'".format(opt.resume))
		print("=> no checkpoint found at '{}'".format(opt.resume), file=F_txt)

if opt.ngpu > 1:
	model = nn.DataParallel(model, range(opt.ngpu))

# print the architecture of the network
print(model) 
print(model, file=F_txt) 




# ======================================== Training phase ===============================================
print('\n............Start training............\n')
start_time = time.time()


for epoch_item in range(opt.epochs):
	print('===================================== Epoch %d =====================================' %epoch_item)
	print('===================================== Epoch %d =====================================' %epoch_item, file=F_txt)
	adjust_learning_rate(optimizer, epoch_item) 
	

	# ======================================= Folder of Datasets =======================================
	# image transform & normalization
	ImgTransform = transforms.Compose([
			transforms.Resize((opt.imageSize, opt.imageSize)),
			transforms.ToTensor(),
			transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
			])

	trainset = Imagefolder_csv(
		data_dir=opt.dataset_dir, mode=opt.mode, image_size=opt.imageSize, transform=ImgTransform,
		episode_num=opt.episode_train_num, way_num=opt.way_num, shot_num=opt.shot_num, query_num=opt.query_num
	)
	valset = Imagefolder_csv(
		data_dir=opt.dataset_dir, mode='val', image_size=opt.imageSize, transform=ImgTransform,
		episode_num=opt.episode_val_num, way_num=opt.way_num, shot_num=opt.shot_num, query_num=opt.query_num
	)
	testset = Imagefolder_csv(
		data_dir=opt.dataset_dir, mode='test', image_size=opt.imageSize, transform=ImgTransform,
		episode_num=opt.episode_test_num, way_num=opt.way_num, shot_num=opt.shot_num, query_num=opt.query_num
	)

	print('Trainset: %d' %len(trainset))
	print('Valset: %d' %len(valset))
	print('Testset: %d' %len(testset))
	print('Trainset: %d' %len(trainset), file=F_txt)
	print('Valset: %d' %len(valset), file=F_txt)
	print('Testset: %d' %len(testset), file=F_txt)



	# ========================================== Load Datasets =========================================
	train_loader = torch.utils.data.DataLoader(
		trainset, batch_size=opt.episodeSize, shuffle=True, 
		num_workers=int(opt.workers), drop_last=True, pin_memory=True
		)
	val_loader = torch.utils.data.DataLoader(
		valset, batch_size=opt.testepisodeSize, shuffle=True, 
		num_workers=int(opt.workers), drop_last=True, pin_memory=True
		) 
	test_loader = torch.utils.data.DataLoader(
		testset, batch_size=opt.testepisodeSize, shuffle=True, 
		num_workers=int(opt.workers), drop_last=True, pin_memory=True
		) 


	# ============================================ Training ===========================================
	# Fix the parameters of Batch Normalization after 10000 episodes (1 epoch)
	if epoch_item < 1:
		model.train()
	else:
		model.eval()

	# Train for 10000 episodes in each epoch
	train(train_loader, model, criterion, optimizer, epoch_item, F_txt)


	# =========================================== Evaluation ==========================================
	print('============ Validation on the val set ============')
	print('============ validation on the val set ============', file=F_txt)
	prec1, _ = validate(val_loader, model, criterion, epoch_item, best_prec1, F_txt)


	# record the best prec@1 and save checkpoint
	is_best = prec1 > best_prec1
	best_prec1 = max(prec1, best_prec1)

	# save the checkpoint
	if is_best:
		save_checkpoint(
			{
				'epoch_index': epoch_item,
				'arch': opt.basemodel,
				'state_dict': model.state_dict(),
				'best_prec1': best_prec1,
				'optimizer' : optimizer.state_dict(),
			}, os.path.join(opt.outf, 'model_best.pth.tar'))


	if epoch_item % 10 == 0:
		filename = os.path.join(opt.outf, 'epoch_%d.pth.tar' %epoch_item)
		save_checkpoint(
		{
			'epoch_index': epoch_item,
			'arch': opt.basemodel,
			'state_dict': model.state_dict(),
			'best_prec1': best_prec1,
			'optimizer' : optimizer.state_dict(),
		}, filename)

	
	# Testing Prase
	print('============ Testing on the test set ============')
	print('============ Testing on the test set ============', file=F_txt)
	prec1, _ = validate(test_loader, model, criterion, epoch_item, best_prec1, F_txt)


F_txt.close()
print('............Training is end............')

# ============================================ Training End ==============================================================


================================================
FILE: DN4_2019_Version/LICENSE
================================================
Copyright (c) 2019, Wenbin Li 
All rights reserved.

Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:

* Redistributions of source code must retain the above copyright notice, this
  list of conditions and the following disclaimer.

* Redistributions in binary form must reproduce the above copyright notice,
  this list of conditions and the following disclaimer in the documentation
  and/or other materials provided with the distribution.

THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.


--------------------------- LICENSE FOR CovaMNet --------------------------------
BSD License

For CovaMNet software
Copyright (c) 2019, Wenbin Li
All rights reserved.

Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:

* Redistributions of source code must retain the above copyright notice, this
  list of conditions and the following disclaimer.

* Redistributions in binary form must reproduce the above copyright notice,
  this list of conditions and the following disclaimer in the documentation
  and/or other materials provided with the distribution.


THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

================================================
FILE: DN4_2019_Version/README.md
================================================
# DN4 in PyTorch

We provide a PyTorch implementation of DN4 for few-shot learning. If you use this code for your research, please cite: 

[Revisiting Local Descriptor based Image-to-Class Measure for Few-shot Learning](http://cs.nju.edu.cn/rl/people/liwb/CVPR19.pdf).<br> 
[Wenbin Li](https://cs.nju.edu.cn/liwenbin/), Lei Wang, Jinglin Xu, Jing Huo, Yang Gao and Jiebo Luo. In CVPR 2019.<br> 
<img src='imgs/Flowchart.bmp' width=600/>


## Prerequisites
- Linux
- Python 3
- Pytorch 0.4 or 1.0
- GPU + CUDA CuDNN
- pillow, torchvision, scipy, numpy

## Getting Started
### Installation

- Clone this repo:
```bash
git clone https://github.com/WenbinLee/DN4.git
cd DN4
```

- Install [PyTorch](http://pytorch.org) 1.0 and other dependencies.

### Datasets
- [miniImageNet](https://drive.google.com/file/d/1fUBrpv8iutYwdL4xE1rX_R9ef6tyncX9/view). 
- [StanfordDog](http://vision.stanford.edu/aditya86/ImageNetDogs/).
- [StanfordCar](https://ai.stanford.edu/~jkrause/cars/car_dataset.html).
- [CUB-200](http://www.vision.caltech.edu/visipedia/CUB-200.html). <br>
Thanks [Victor Garcia](https://github.com/vgsatorras/few-shot-gnn) for providing the miniImageNet dataset. In our paper, we just used the CUB-200 dataset. In fact, there is a newer revision of this dataset with more images, see [Caltech-UCSD Birds-200-2011](http://www.vision.caltech.edu/visipedia/CUB-200-2011.html). Note, if you use these datasets, please cite the corresponding papers. 


###  miniImageNet Few-shot Classification
- Train a 5-way 1-shot model based on Conv64F or ResNet256F:
```bash
python DN4_Train_5way1shot.py --dataset_dir ./datasets/miniImageNet --data_name miniImageNet
or
python DN4_Train_5way1shot_Resnet.py --dataset_dir ./datasets/miniImageNet --data_name miniImageNet
```
- Test the model (specify the dataset_dir, basemodel, and data_name first):
```bash
python DN4_Test_5way1shot.py --resume ./results/DN4_miniImageNet_Conv64F_5Way_1Shot_K3/model_best.pth.tar --basemodel Conv64F
or
python DN4_Test_5way1shot.py --resume ./results/DN4_miniImageNet_ResNet256F_5Way_1Shot_K3/model_best.pth.tar --basemodel ResNet256F
```

- The results on the miniImageNet dataset (If you set neighbor_k as 1, you may get better results in some cases): 
<img src='imgs/Results_miniImageNet2.bmp' align="center" width=900>


###  Fine-grained Few-shot Classification
- Data prepocessing (e.g., StanfordDog)
 - Specify the path of the dataset and the saving path.
 - Run the preprocessing script.
```bash
#!./dataset/StanfordDog_prepare_csv.py
python ./dataset/StanfordDog_prepare_csv.py
```
- Train a 5-way 1-shot model:
```bash
python DN4_Train_5way1shot.py --dataset_dir ./datasets/StanfordDog --data_name StanfordDog
```
- Test the model (specify the dataset_dir and data_name first):
```bash
python DN4_Test_5way1shot.py --resume ./results/DN4_StanfordDog_Conv64F_5_Way_1_Shot/model_best.pth.tar --basemodel Conv64F
```
- The results on the fine-grained datasets: 
<img src='imgs/Results_finegrained.bmp' align="center" width=900>



## Citation
If you use this code for your research, please cite our paper.
```
@inproceedings{li2019DN4,
  title={Revisiting Local Descriptor based Image-to-Class Measure for Few-shot Learning},
  author={Li, Wenbin and Wang, Lei and Xu, Jinglin and Huo, Jing and Gao Yang and Luo, Jiebo},
  booktitle={CVPR},
  year={2019}
}
```


================================================
FILE: DN4_2019_Version/dataset/CubBird_prepare_csv.py
================================================
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Wenbin Li
## Date: Dec. 16 2018
##
## Divide data into train/val/test in a csv version
## Output: train.csv, val.csv, test.csv 
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

import os
import csv
import numpy as np
import random
from PIL import Image
import pdb


data_dir = '/FewShot/Datasets/CUB_birds'              # the path of the download dataset
save_dir = '/FewShot/Datasets/CUB_birds/For_FewShot'  # the saving path of the divided dataset


if not os.path.exists(os.path.join(save_dir, 'images')):
	os.makedirs(os.path.join(save_dir, 'images'))

images_dir = os.path.join(data_dir, 'images')
train_class_num = 130
val_class_num =  20
test_class_num = 50



# get all the dog classes
classes_list = [class_name for class_name in os.listdir(images_dir) if os.path.isdir(os.path.join(images_dir, class_name))]


# divide the train/val/test set
random.seed(200)
train_list = random.sample(classes_list, train_class_num)
remain_list = [rem for rem in classes_list if rem not in train_list]
val_list = random.sample(remain_list, val_class_num)
test_list = [rem for rem in remain_list if rem not in val_list]


# save data into csv file----- Train
train_data = []
for class_name in train_list:
	images = [[i, class_name] for i in os.listdir(os.path.join(images_dir, class_name))]
	train_data.extend(images)
	print('Train----%s' %class_name)

	# read images and store these images
	img_paths = [os.path.join(images_dir, class_name, i) for i in os.listdir(os.path.join(images_dir, class_name))]
	for index, img_path in enumerate(img_paths):
		img = Image.open(img_path)
		img = img.convert('RGB')
		img.save(os.path.join(save_dir, 'images', images[index][0]), quality=100)


with open(os.path.join(save_dir, 'train.csv'), 'w') as csvfile:
	writer = csv.writer(csvfile)

	writer.writerow(['filename', 'label'])
	writer.writerows(train_data)




# save data into csv file----- Val
val_data = []
for class_name in val_list:
	images = [[i, class_name] for i in os.listdir(os.path.join(images_dir, class_name))]
	val_data.extend(images)
	print('Val----%s' %class_name)

	# read images and store these images
	img_paths = [os.path.join(images_dir, class_name, i) for i in os.listdir(os.path.join(images_dir, class_name))]
	for index, img_path in enumerate(img_paths):
		img = Image.open(img_path)
		img = img.convert('RGB')
		img.save(os.path.join(save_dir, 'images', images[index][0]), quality=100)

with open(os.path.join(save_dir, 'val.csv'), 'w') as csvfile:
	writer = csv.writer(csvfile)

	writer.writerow(['filename', 'label'])
	writer.writerows(val_data)




# save data into csv file----- Test
test_data = []
for class_name in test_list:
	images = [[i, class_name] for i in os.listdir(os.path.join(images_dir, class_name))]
	test_data.extend(images)
	print('Test----%s' %class_name)

	# read images and store these images
	img_paths = [os.path.join(images_dir, class_name, i) for i in os.listdir(os.path.join(images_dir, class_name))]
	for index, img_path in enumerate(img_paths):
		img = Image.open(img_path)
		img = img.convert('RGB')
		img.save(os.path.join(save_dir, 'images', images[index][0]), quality=100)


with open(os.path.join(save_dir, 'test.csv'), 'w') as csvfile:
	writer = csv.writer(csvfile)

	writer.writerow(['filename', 'label'])
	writer.writerows(test_data)


================================================
FILE: DN4_2019_Version/dataset/CubBirds/test.csv
================================================
filename,label
Bobolink_0023_2486988341.jpg,013.Bobolink
Bobolink_0006_323626208.jpg,013.Bobolink
Bobolink_0021_2563472456.jpg,013.Bobolink
Bobolink_0005_496052159.jpg,013.Bobolink
Bobolink_0007_2617557626.jpg,013.Bobolink
Bobolink_0018_2616868506.jpg,013.Bobolink
Bobolink_0002_525430387.jpg,013.Bobolink
Bobolink_0009_2497220773.jpg,013.Bobolink
Bobolink_0020_2665731300.jpg,013.Bobolink
Bobolink_0003_433654869.jpg,013.Bobolink
Bobolink_0016_2497219569.jpg,013.Bobolink
Bobolink_0017_2254397578.jpg,013.Bobolink
Bobolink_0012_2577892497.jpg,013.Bobolink
Bobolink_0022_2576769628.jpg,013.Bobolink
Bobolink_0015_2690957827.jpg,013.Bobolink
Bobolink_0019_2509438067.jpg,013.Bobolink
Bobolink_0008_2578264625.jpg,013.Bobolink
Bobolink_0025_2502658999.jpg,013.Bobolink
Bobolink_0004_2563658093.jpg,013.Bobolink
Bobolink_0024_2526087426.jpg,013.Bobolink
Bobolink_0001_145827142.jpg,013.Bobolink
Bobolink_0014_2545596115.jpg,013.Bobolink
Bobolink_0011_2578725502.jpg,013.Bobolink
Bobolink_0010_510123359.jpg,013.Bobolink
Bobolink_0013_2665369567.jpg,013.Bobolink
Spotted_Catbird_0009_426800838.jpg,018.Spotted_Catbird
Spotted_Catbird_0007_2035474443.jpg,018.Spotted_Catbird
Spotted_Catbird_0001_2609850087.jpg,018.Spotted_Catbird
Spotted_Catbird_0003_1429851736.jpg,018.Spotted_Catbird
Spotted_Catbird_0004_2928420799.jpg,018.Spotted_Catbird
Spotted_Catbird_0017_2438854023.jpg,018.Spotted_Catbird
Spotted_Catbird_0006_2132520881.jpg,018.Spotted_Catbird
Spotted_Catbird_0013_2493043199.jpg,018.Spotted_Catbird
Spotted_Catbird_0002_2609850097.jpg,018.Spotted_Catbird
Spotted_Catbird_0010_1429851916.jpg,018.Spotted_Catbird
Spotted_Catbird_0014_3025400207.jpg,018.Spotted_Catbird
Spotted_Catbird_0011_1429854950.jpg,018.Spotted_Catbird
Spotted_Catbird_0005_1996697500.jpg,018.Spotted_Catbird
Spotted_Catbird_0016_2174639099.jpg,018.Spotted_Catbird
Spotted_Catbird_0018_1268458495.jpg,018.Spotted_Catbird
Spotted_Catbird_0008_2036273422.jpg,018.Spotted_Catbird
Spotted_Catbird_0012_2132521001.jpg,018.Spotted_Catbird
Spotted_Catbird_0015_3026208002.jpg,018.Spotted_Catbird
Spotted_Catbird_0020_530075891.jpg,018.Spotted_Catbird
Spotted_Catbird_0019_2983503896.jpg,018.Spotted_Catbird
Artic_Tern_0010_1129265728.jpg,141.Artic_Tern
Artic_Tern_0022_906393403.jpg,141.Artic_Tern
Artic_Tern_0006_235286408.jpg,141.Artic_Tern
Artic_Tern_0030_2441979722.jpg,141.Artic_Tern
Artic_Tern_0005_2573396817.jpg,141.Artic_Tern
Artic_Tern_0011_349370649.jpg,141.Artic_Tern
Artic_Tern_0025_2695301965.jpg,141.Artic_Tern
Artic_Tern_0016_2674073789.jpg,141.Artic_Tern
Artic_Tern_0034_2584280442.jpg,141.Artic_Tern
Artic_Tern_0018_2128035103.jpg,141.Artic_Tern
Artic_Tern_0026_2583453781.jpg,141.Artic_Tern
Artic_Tern_0007_2947910147.jpg,141.Artic_Tern
Artic_Tern_0008_749633251.jpg,141.Artic_Tern
Artic_Tern_0027_2583452833.jpg,141.Artic_Tern
Artic_Tern_0002_228965064.jpg,141.Artic_Tern
Artic_Tern_0012_1081311027.jpg,141.Artic_Tern
Artic_Tern_0017_2781091081.jpg,141.Artic_Tern
Artic_Tern_0031_2583452319.jpg,141.Artic_Tern
Artic_Tern_0003_2572547634.jpg,141.Artic_Tern
Artic_Tern_0015_2584281156.jpg,141.Artic_Tern
Artic_Tern_0020_68981377.jpg,141.Artic_Tern
Artic_Tern_0013_2647278391.jpg,141.Artic_Tern
Artic_Tern_0032_368286416.jpg,141.Artic_Tern
Artic_Tern_0029_2583486621.jpg,141.Artic_Tern
Artic_Tern_0001_938838592.jpg,141.Artic_Tern
Artic_Tern_0014_2674895310.jpg,141.Artic_Tern
Artic_Tern_0033_2584281960.jpg,141.Artic_Tern
Artic_Tern_0021_2584282160.jpg,141.Artic_Tern
Artic_Tern_0023_2695301955.jpg,141.Artic_Tern
Artic_Tern_0004_2574219906.jpg,141.Artic_Tern
Artic_Tern_0019_2674892838.jpg,141.Artic_Tern
Artic_Tern_0024_2990784343.jpg,141.Artic_Tern
Artic_Tern_0028_879755980.jpg,141.Artic_Tern
Artic_Tern_0009_1572647757.jpg,141.Artic_Tern
Sayornis_0018_2089809922.jpg,103.Sayornis
Sayornis_0016_3004439198.jpg,103.Sayornis
Sayornis_0020_2521020630.jpg,103.Sayornis
Sayornis_0013_2150080862.jpg,103.Sayornis
Sayornis_0011_2461063448.jpg,103.Sayornis
Sayornis_0008_2912512475.jpg,103.Sayornis
Sayornis_0007_2938190089.jpg,103.Sayornis
Sayornis_0015_2149286741.jpg,103.Sayornis
Sayornis_0021_259404000.jpg,103.Sayornis
Sayornis_0010_93473442.jpg,103.Sayornis
Sayornis_0024_1511572508.jpg,103.Sayornis
Sayornis_0019_2267395901.jpg,103.Sayornis
Sayornis_0026_2360195314.jpg,103.Sayornis
Sayornis_0014_2924296106.jpg,103.Sayornis
Sayornis_0022_384793827.jpg,103.Sayornis
Sayornis_0023_28903210.jpg,103.Sayornis
Sayornis_0002_2221076030.jpg,103.Sayornis
Sayornis_0004_16712667.jpg,103.Sayornis
Sayornis_0005_2881422647.jpg,103.Sayornis
Sayornis_0017_3084067249.jpg,103.Sayornis
Sayornis_0009_328692366.jpg,103.Sayornis
Sayornis_0003_2220282009.jpg,103.Sayornis
Sayornis_0012_2398863391.jpg,103.Sayornis
Sayornis_0006_2404787776.jpg,103.Sayornis
Sayornis_0001_xxx.jpg,103.Sayornis
Sayornis_0025_2133958078.jpg,103.Sayornis
Gray_crowned_Rosy_Finch_0018_2768398174.jpg,034.Gray_crowned_Rosy_Finch
Gray_crowned_Rosy_Finch_0019_2191676472.jpg,034.Gray_crowned_Rosy_Finch
Gray_crowned_Rosy_Finch_0013_101394999.jpg,034.Gray_crowned_Rosy_Finch
Gray_crowned_Rosy_Finch_0020_2849331961.jpg,034.Gray_crowned_Rosy_Finch
Gray_crowned_Rosy_Finch_0002_2721878277.jpg,034.Gray_crowned_Rosy_Finch
Gray_crowned_Rosy_Finch_0025_2765944653.jpg,034.Gray_crowned_Rosy_Finch
Gray_crowned_Rosy_Finch_0005_277542337.jpg,034.Gray_crowned_Rosy_Finch
Gray_crowned_Rosy_Finch_0004_2721877877.jpg,034.Gray_crowned_Rosy_Finch
Gray_crowned_Rosy_Finch_0011_59209412.jpg,034.Gray_crowned_Rosy_Finch
Gray_crowned_Rosy_Finch_0021_2599409960.jpg,034.Gray_crowned_Rosy_Finch
Gray_crowned_Rosy_Finch_0003_3030304639.jpg,034.Gray_crowned_Rosy_Finch
Gray_crowned_Rosy_Finch_0015_2191676470.jpg,034.Gray_crowned_Rosy_Finch
Gray_crowned_Rosy_Finch_0001_2173103806.jpg,034.Gray_crowned_Rosy_Finch
Gray_crowned_Rosy_Finch_0017_2191676474.jpg,034.Gray_crowned_Rosy_Finch
Gray_crowned_Rosy_Finch_0016_920030103.jpg,034.Gray_crowned_Rosy_Finch
Gray_crowned_Rosy_Finch_0009_2763899575.jpg,034.Gray_crowned_Rosy_Finch
Gray_crowned_Rosy_Finch_0007_2196020398.jpg,034.Gray_crowned_Rosy_Finch
Gray_crowned_Rosy_Finch_0026_284114554.jpg,034.Gray_crowned_Rosy_Finch
Gray_crowned_Rosy_Finch_0010_59209402.jpg,034.Gray_crowned_Rosy_Finch
Gray_crowned_Rosy_Finch_0022_3085613547.jpg,034.Gray_crowned_Rosy_Finch
Gray_crowned_Rosy_Finch_0008_2728735820.jpg,034.Gray_crowned_Rosy_Finch
Gray_crowned_Rosy_Finch_0024_2768397870.jpg,034.Gray_crowned_Rosy_Finch
Gray_crowned_Rosy_Finch_0006_157699890.jpg,034.Gray_crowned_Rosy_Finch
Gray_crowned_Rosy_Finch_0012_1987115597.jpg,034.Gray_crowned_Rosy_Finch
Gray_crowned_Rosy_Finch_0014_3001168789.jpg,034.Gray_crowned_Rosy_Finch
Gray_crowned_Rosy_Finch_0023_2675071816.jpg,034.Gray_crowned_Rosy_Finch
Gray_crowned_Rosy_Finch_0027_1987924860.jpg,034.Gray_crowned_Rosy_Finch
Bewick_Wren_0009_2322086277.jpg,193.Bewick_Wren
Bewick_Wren_0014_169590206.jpg,193.Bewick_Wren
Bewick_Wren_0017_97056680.jpg,193.Bewick_Wren
Bewick_Wren_0027_3082786470.jpg,193.Bewick_Wren
Bewick_Wren_0022_3048441336.jpg,193.Bewick_Wren
Bewick_Wren_0011_1893765926.jpg,193.Bewick_Wren
Bewick_Wren_0013_266652832.jpg,193.Bewick_Wren
Bewick_Wren_0016_179398409.jpg,193.Bewick_Wren
Bewick_Wren_0010_2511477448.jpg,193.Bewick_Wren
Bewick_Wren_0002_340170022.jpg,193.Bewick_Wren
Bewick_Wren_0032_417053491.jpg,193.Bewick_Wren
Bewick_Wren_0004_2131598404.jpg,193.Bewick_Wren
Bewick_Wren_0012_2509766093.jpg,193.Bewick_Wren
Bewick_Wren_0001_415725072.jpg,193.Bewick_Wren
Bewick_Wren_0034_167744800.jpg,193.Bewick_Wren
Bewick_Wren_0023_2377362291.jpg,193.Bewick_Wren
Bewick_Wren_0020_2218723807.jpg,193.Bewick_Wren
Bewick_Wren_0033_2188826934.jpg,193.Bewick_Wren
Bewick_Wren_0024_2322927710.jpg,193.Bewick_Wren
Bewick_Wren_0021_1231817259.jpg,193.Bewick_Wren
Bewick_Wren_0008_305351821.jpg,193.Bewick_Wren
Bewick_Wren_0031_419873749.jpg,193.Bewick_Wren
Bewick_Wren_0025_95058213.jpg,193.Bewick_Wren
Bewick_Wren_0003_690355658.jpg,193.Bewick_Wren
Bewick_Wren_0030_2298302149.jpg,193.Bewick_Wren
Bewick_Wren_0007_2520613236.jpg,193.Bewick_Wren
Bewick_Wren_0028_105122745.jpg,193.Bewick_Wren
Bewick_Wren_0026_403648456.jpg,193.Bewick_Wren
Bewick_Wren_0005_169590202.jpg,193.Bewick_Wren
Bewick_Wren_0006_169590205.jpg,193.Bewick_Wren
Bewick_Wren_0018_305351820.jpg,193.Bewick_Wren
Bewick_Wren_0029_150571661.jpg,193.Bewick_Wren
Bewick_Wren_0015_2298823505.jpg,193.Bewick_Wren
Bewick_Wren_0019_305351818.jpg,193.Bewick_Wren
Baird_Sparrow_0002_2537220789.jpg,113.Baird_Sparrow
Baird_Sparrow_0026_652286801.jpg,113.Baird_Sparrow
Baird_Sparrow_0003_2540301276.jpg,113.Baird_Sparrow
Baird_Sparrow_0025_2779560955.jpg,113.Baird_Sparrow
Baird_Sparrow_0021_xxx.jpg,113.Baird_Sparrow
Baird_Sparrow_0009_388635228.jpg,113.Baird_Sparrow
Baird_Sparrow_0013_2102094672.jpg,113.Baird_Sparrow
Baird_Sparrow_0024_2607945365.jpg,113.Baird_Sparrow
Baird_Sparrow_0018_2102096030.jpg,113.Baird_Sparrow
Baird_Sparrow_0034_3047467612.jpg,113.Baird_Sparrow
Baird_Sparrow_0027_xxx.jpg,113.Baird_Sparrow
Baird_Sparrow_0033_3047466700.jpg,113.Baird_Sparrow
Baird_Sparrow_0006_2531887055.jpg,113.Baird_Sparrow
Baird_Sparrow_0019_2102093870.jpg,113.Baird_Sparrow
Baird_Sparrow_0017_2101316863.jpg,113.Baird_Sparrow
Baird_Sparrow_0020_2101315275.jpg,113.Baird_Sparrow
Baird_Sparrow_0016_2102098568.jpg,113.Baird_Sparrow
Baird_Sparrow_0011_2807105197.jpg,113.Baird_Sparrow
Baird_Sparrow_0031_3046632043.jpg,113.Baird_Sparrow
Baird_Sparrow_0005_2531886903.jpg,113.Baird_Sparrow
Baird_Sparrow_0007_672058133.jpg,113.Baird_Sparrow
Baird_Sparrow_0028_2964874171.jpg,113.Baird_Sparrow
Baird_Sparrow_0014_2102099406.jpg,113.Baird_Sparrow
Baird_Sparrow_0032_xxx.jpg,113.Baird_Sparrow
Baird_Sparrow_0029_xxx.jpg,113.Baird_Sparrow
Baird_Sparrow_0008_672058123.jpg,113.Baird_Sparrow
Baird_Sparrow_0004_2539483091.jpg,113.Baird_Sparrow
Baird_Sparrow_0015_2101317721.jpg,113.Baird_Sparrow
Baird_Sparrow_0010_388641170.jpg,113.Baird_Sparrow
Baird_Sparrow_0022_3047464770.jpg,113.Baird_Sparrow
Baird_Sparrow_0012_2924586071.jpg,113.Baird_Sparrow
Baird_Sparrow_0023_3046631067.jpg,113.Baird_Sparrow
Baird_Sparrow_0001_2538811870.jpg,113.Baird_Sparrow
Baird_Sparrow_0030_696169188.jpg,113.Baird_Sparrow
Harris_Sparrow_0008_2329719684.jpg,122.Harris_Sparrow
Harris_Sparrow_0002_1955680325.jpg,122.Harris_Sparrow
Harris_Sparrow_0007_331956418.jpg,122.Harris_Sparrow
Harris_Sparrow_0009_2942211538.jpg,122.Harris_Sparrow
Harris_Sparrow_0006_2152344261.jpg,122.Harris_Sparrow
Harris_Sparrow_0014_366071626.jpg,122.Harris_Sparrow
Harris_Sparrow_0001_1955680577.jpg,122.Harris_Sparrow
Harris_Sparrow_0019_2941352965.jpg,122.Harris_Sparrow
Harris_Sparrow_0025_306935439.jpg,122.Harris_Sparrow
Harris_Sparrow_0024_361838471.jpg,122.Harris_Sparrow
Harris_Sparrow_0023_3018131728.jpg,122.Harris_Sparrow
Harris_Sparrow_0022_361838480.jpg,122.Harris_Sparrow
Harris_Sparrow_0012_399463859.jpg,122.Harris_Sparrow
Harris_Sparrow_0005_314579198.jpg,122.Harris_Sparrow
Harris_Sparrow_0017_2514656459.jpg,122.Harris_Sparrow
Harris_Sparrow_0010_410159897.jpg,122.Harris_Sparrow
Harris_Sparrow_0003_xxx.jpg,122.Harris_Sparrow
Harris_Sparrow_0011_3083221775.jpg,122.Harris_Sparrow
Harris_Sparrow_0013_366069489.jpg,122.Harris_Sparrow
Harris_Sparrow_0016_366065189.jpg,122.Harris_Sparrow
Harris_Sparrow_0021_387448796.jpg,122.Harris_Sparrow
Harris_Sparrow_0004_1956509014.jpg,122.Harris_Sparrow
Harris_Sparrow_0020_2942208666.jpg,122.Harris_Sparrow
Harris_Sparrow_0015_2515443666.jpg,122.Harris_Sparrow
Harris_Sparrow_0018_2941353289.jpg,122.Harris_Sparrow
Red_eyed_Vireo_0003_59210336.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0008_2693140149.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0031_2740243174.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0016_2917350638.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0026_2502710393.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0021_2693953672.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0022_2693134681.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0030_305729945.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0012_531934627.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0010_2224604942.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0007_2545288204.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0019_504726670.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0025_2502711037.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0028_2502711347.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0020_2693135269.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0017_2693952976.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0015_2916506313.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0024_2879181819.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0018_525553976.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0005_2850972890.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0033_2495509969.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0009_2748298992.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0002_2697832838.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0014_18090319.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0029_59210443.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0032_2851633441.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0023_2880018380.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0004_2805679283.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0006_2928288319.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0001_2879184061.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0027_2503540454.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0034_500254638.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0011_2224577638.jpg,154.Red_eyed_Vireo
Red_eyed_Vireo_0013_2880017344.jpg,154.Red_eyed_Vireo
Vermilion_Flycatcher_0006_2415600056.jpg,042.Vermilion_Flycatcher
Vermilion_Flycatcher_0005_2066887156.jpg,042.Vermilion_Flycatcher
Vermilion_Flycatcher_0020_471703845.jpg,042.Vermilion_Flycatcher
Vermilion_Flycatcher_0007_2213007308.jpg,042.Vermilion_Flycatcher
Vermilion_Flycatcher_0013_118680869.jpg,042.Vermilion_Flycatcher
Vermilion_Flycatcher_0003_415149961.jpg,042.Vermilion_Flycatcher
Vermilion_Flycatcher_0022_99087550.jpg,042.Vermilion_Flycatcher
Vermilion_Flycatcher_0019_2848875652.jpg,042.Vermilion_Flycatcher
Vermilion_Flycatcher_0014_2055483802.jpg,042.Vermilion_Flycatcher
Vermilion_Flycatcher_0016_2357262483.jpg,042.Vermilion_Flycatcher
Vermilion_Flycatcher_0004_9399119.jpg,042.Vermilion_Flycatcher
Vermilion_Flycatcher_0011_3079161466.jpg,042.Vermilion_Flycatcher
Vermilion_Flycatcher_0010_2159616740.jpg,042.Vermilion_Flycatcher
Vermilion_Flycatcher_0001_2899309839.jpg,042.Vermilion_Flycatcher
Vermilion_Flycatcher_0018_2766796044.jpg,042.Vermilion_Flycatcher
Vermilion_Flycatcher_0002_2801919792.jpg,042.Vermilion_Flycatcher
Vermilion_Flycatcher_0017_2900886674.jpg,042.Vermilion_Flycatcher
Vermilion_Flycatcher_0015_2391761821.jpg,042.Vermilion_Flycatcher
Vermilion_Flycatcher_0024_2040965016.jpg,042.Vermilion_Flycatcher
Vermilion_Flycatcher_0023_2160150136.jpg,042.Vermilion_Flycatcher
Vermilion_Flycatcher_0012_2762935633.jpg,042.Vermilion_Flycatcher
Vermilion_Flycatcher_0021_2352580055.jpg,042.Vermilion_Flycatcher
Vermilion_Flycatcher_0009_2301571107.jpg,042.Vermilion_Flycatcher
Vermilion_Flycatcher_0008_180072755.jpg,042.Vermilion_Flycatcher
Mourning_Warbler_0023_343485567.jpg,170.Mourning_Warbler
Mourning_Warbler_0034_2812894924.jpg,170.Mourning_Warbler
Mourning_Warbler_0002_2526436960.jpg,170.Mourning_Warbler
Mourning_Warbler_0003_156826597.jpg,170.Mourning_Warbler
Mourning_Warbler_0029_2523031180.jpg,170.Mourning_Warbler
Mourning_Warbler_0017_2497552546.jpg,170.Mourning_Warbler
Mourning_Warbler_0021_2496732667.jpg,170.Mourning_Warbler
Mourning_Warbler_0007_2501122824.jpg,170.Mourning_Warbler
Mourning_Warbler_0014_2929519747.jpg,170.Mourning_Warbler
Mourning_Warbler_0020_2522208525.jpg,170.Mourning_Warbler
Mourning_Warbler_0010_881092871.jpg,170.Mourning_Warbler
Mourning_Warbler_0009_2500291477.jpg,170.Mourning_Warbler
Mourning_Warbler_0005_2518946213.jpg,170.Mourning_Warbler
Mourning_Warbler_0008_2501121196.jpg,170.Mourning_Warbler
Mourning_Warbler_0033_1460142538.jpg,170.Mourning_Warbler
Mourning_Warbler_0030_1470136937.jpg,170.Mourning_Warbler
Mourning_Warbler_0022_2522207631.jpg,170.Mourning_Warbler
Mourning_Warbler_0027_2496724815.jpg,170.Mourning_Warbler
Mourning_Warbler_0011_610835987.jpg,170.Mourning_Warbler
Mourning_Warbler_0016_2522208443.jpg,170.Mourning_Warbler
Mourning_Warbler_0026_2963301112.jpg,170.Mourning_Warbler
Mourning_Warbler_0015_2518496557.jpg,170.Mourning_Warbler
Mourning_Warbler_0001_2099292330.jpg,170.Mourning_Warbler
Mourning_Warbler_0013_392495257.jpg,170.Mourning_Warbler
Mourning_Warbler_0004_2274530798.jpg,170.Mourning_Warbler
Mourning_Warbler_0024_xxx.jpg,170.Mourning_Warbler
Mourning_Warbler_0018_392495285.jpg,170.Mourning_Warbler
Mourning_Warbler_0012_2494379595.jpg,170.Mourning_Warbler
Mourning_Warbler_0032_xxx.jpg,170.Mourning_Warbler
Mourning_Warbler_0019_2523030398.jpg,170.Mourning_Warbler
Mourning_Warbler_0031_219327644.jpg,170.Mourning_Warbler
Mourning_Warbler_0028_2522208569.jpg,170.Mourning_Warbler
Mourning_Warbler_0006_2500291273.jpg,170.Mourning_Warbler
Mourning_Warbler_0025_2963301056.jpg,170.Mourning_Warbler
Northern_Fulmar_0018_2707368242.jpg,045.Northern_Fulmar
Northern_Fulmar_0007_2517464695.jpg,045.Northern_Fulmar
Northern_Fulmar_0010_2236891087.jpg,045.Northern_Fulmar
Northern_Fulmar_0008_2220889524.jpg,045.Northern_Fulmar
Northern_Fulmar_0023_538672323.jpg,045.Northern_Fulmar
Northern_Fulmar_0017_163433610.jpg,045.Northern_Fulmar
Northern_Fulmar_0020_409897200.jpg,045.Northern_Fulmar
Northern_Fulmar_0001_2650683069.jpg,045.Northern_Fulmar
Northern_Fulmar_0028_409849707.jpg,045.Northern_Fulmar
Northern_Fulmar_0019_2537234580.jpg,045.Northern_Fulmar
Northern_Fulmar_0012_2531554895.jpg,045.Northern_Fulmar
Northern_Fulmar_0004_1393774680.jpg,045.Northern_Fulmar
Northern_Fulmar_0026_413770047.jpg,045.Northern_Fulmar
Northern_Fulmar_0025_2841209829.jpg,045.Northern_Fulmar
Northern_Fulmar_0013_533814410.jpg,045.Northern_Fulmar
Northern_Fulmar_0011_2557437857.jpg,045.Northern_Fulmar
Northern_Fulmar_0022_2596698954.jpg,045.Northern_Fulmar
Northern_Fulmar_0002_2758082365.jpg,045.Northern_Fulmar
Northern_Fulmar_0016_2787660415.jpg,045.Northern_Fulmar
Northern_Fulmar_0021_436718602.jpg,045.Northern_Fulmar
Northern_Fulmar_0014_2657494321.jpg,045.Northern_Fulmar
Northern_Fulmar_0005_2517464477.jpg,045.Northern_Fulmar
Northern_Fulmar_0024_242103861.jpg,045.Northern_Fulmar
Northern_Fulmar_0003_2521606263.jpg,045.Northern_Fulmar
Northern_Fulmar_0015_415814058.jpg,045.Northern_Fulmar
Northern_Fulmar_0006_1392811437.jpg,045.Northern_Fulmar
Northern_Fulmar_0027_429087843.jpg,045.Northern_Fulmar
Northern_Fulmar_0009_2220079159.jpg,045.Northern_Fulmar
Golden_winged_Warbler_0032_520358253.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0010_2316599673.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0022_2507667229.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0013_392418672.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0012_2316598975.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0017_2895643009.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0016_2895643579.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0030_520345206.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0029_495530911.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0009_2591413864.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0026_1348631442.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0007_2468962218.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0020_2508494492.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0024_2501123056.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0021_2507666467.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0015_2895642459.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0002_2471636465.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0034_1347737145.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0019_2654634804.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0003_2540117270.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0005_498814033.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0011_2317407918.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0023_2489764877.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0018_2507666617.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0006_2519317376.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0008_2464457843.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0028_1236870078.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0001_495149583.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0031_2554720574.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0027_2494039215.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0025_2501121376.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0014_2896481930.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0033_2494038833.jpg,166.Golden_winged_Warbler
Golden_winged_Warbler_0004_2301685839.jpg,166.Golden_winged_Warbler
Swainson_Warbler_0004_xxx.jpg,178.Swainson_Warbler
Swainson_Warbler_0022_2501065142.jpg,178.Swainson_Warbler
Swainson_Warbler_0018_437031544.jpg,178.Swainson_Warbler
Swainson_Warbler_0021_2517976546.jpg,178.Swainson_Warbler
Swainson_Warbler_0020_2432620628.jpg,178.Swainson_Warbler
Swainson_Warbler_0014_2517642117.jpg,178.Swainson_Warbler
Swainson_Warbler_0024_1208631223.jpg,178.Swainson_Warbler
Swainson_Warbler_0009_61645810.jpg,178.Swainson_Warbler
Swainson_Warbler_0023_1016597937.jpg,178.Swainson_Warbler
Swainson_Warbler_0005_2658928294.jpg,178.Swainson_Warbler
Swainson_Warbler_0010_2516731661.jpg,178.Swainson_Warbler
Swainson_Warbler_0025_2378128429.jpg,178.Swainson_Warbler
Swainson_Warbler_0012_2517157423.jpg,178.Swainson_Warbler
Swainson_Warbler_0008_2658928324.jpg,178.Swainson_Warbler
Swainson_Warbler_0011_520143156.jpg,178.Swainson_Warbler
Swainson_Warbler_0007_2658101657.jpg,178.Swainson_Warbler
Swainson_Warbler_0016_2479691118.jpg,178.Swainson_Warbler
Swainson_Warbler_0015_2431819471.jpg,178.Swainson_Warbler
Swainson_Warbler_0002_2701350411.jpg,178.Swainson_Warbler
Swainson_Warbler_0001_2564043863.jpg,178.Swainson_Warbler
Swainson_Warbler_0003_2629627673.jpg,178.Swainson_Warbler
Swainson_Warbler_0019_2311786618.jpg,178.Swainson_Warbler
Swainson_Warbler_0013_2122022264.jpg,178.Swainson_Warbler
Swainson_Warbler_0017_2378127803.jpg,178.Swainson_Warbler
Swainson_Warbler_0006_2658101611.jpg,178.Swainson_Warbler
Brandt_Cormorant_0020_475428382.jpg,023.Brandt_Cormorant
Brandt_Cormorant_0012_xxx.jpg,023.Brandt_Cormorant
Brandt_Cormorant_0025_459901258.jpg,023.Brandt_Cormorant
Brandt_Cormorant_0016_2621591971.jpg,023.Brandt_Cormorant
Brandt_Cormorant_0021_2783544051.jpg,023.Brandt_Cormorant
Brandt_Cormorant_0008_426003441.jpg,023.Brandt_Cormorant
Brandt_Cormorant_0005_2619840999.jpg,023.Brandt_Cormorant
Brandt_Cormorant_0002_426004050.jpg,023.Brandt_Cormorant
Brandt_Cormorant_0013_1368929336.jpg,023.Brandt_Cormorant
Brandt_Cormorant_0011_2205747286.jpg,023.Brandt_Cormorant
Brandt_Cormorant_0004_376009772.jpg,023.Brandt_Cormorant
Brandt_Cormorant_0019_424721750.jpg,023.Brandt_Cormorant
Brandt_Cormorant_0023_2610416910.jpg,023.Brandt_Cormorant
Brandt_Cormorant_0010_173310459.jpg,023.Brandt_Cormorant
Brandt_Cormorant_0026_474479554.jpg,023.Brandt_Cormorant
Brandt_Cormorant_0007_426001648.jpg,023.Brandt_Cormorant
Brandt_Cormorant_0009_426236586.jpg,023.Brandt_Cormorant
Brandt_Cormorant_0018_439839683.jpg,023.Brandt_Cormorant
Brandt_Cormorant_0001_426006321.jpg,023.Brandt_Cormorant
Brandt_Cormorant_0024_2613464506.jpg,023.Brandt_Cormorant
Brandt_Cormorant_0022_143260613.jpg,023.Brandt_Cormorant
Brandt_Cormorant_0015_444861364.jpg,023.Brandt_Cormorant
Brandt_Cormorant_0006_426002886.jpg,023.Brandt_Cormorant
Brandt_Cormorant_0017_493153789.jpg,023.Brandt_Cormorant
Brandt_Cormorant_0014_2506711291.jpg,023.Brandt_Cormorant
Brandt_Cormorant_0003_426005033.jpg,023.Brandt_Cormorant
Red_headed_Woodpecker_0015_2580881179.jpg,191.Red_headed_Woodpecker
Red_headed_Woodpecker_0009_2195922004.jpg,191.Red_headed_Woodpecker
Red_headed_Woodpecker_0012_2578571305.jpg,191.Red_headed_Woodpecker
Red_headed_Woodpecker_0014_2506501387.jpg,191.Red_headed_Woodpecker
Red_headed_Woodpecker_0025_192363961.jpg,191.Red_headed_Woodpecker
Red_headed_Woodpecker_0026_562180201.jpg,191.Red_headed_Woodpecker
Red_headed_Woodpecker_0029_631526205.jpg,191.Red_headed_Woodpecker
Red_headed_Woodpecker_0003_3000043444.jpg,191.Red_headed_Woodpecker
Red_headed_Woodpecker_0024_316230604.jpg,191.Red_headed_Woodpecker
Red_headed_Woodpecker_0010_2422275300.jpg,191.Red_headed_Woodpecker
Red_headed_Woodpecker_0021_566993404.jpg,191.Red_headed_Woodpecker
Red_headed_Woodpecker_0004_2451274729.jpg,191.Red_headed_Woodpecker
Red_headed_Woodpecker_0011_309708127.jpg,191.Red_headed_Woodpecker
Red_headed_Woodpecker_0017_2917925915.jpg,191.Red_headed_Woodpecker
Red_headed_Woodpecker_0002_2611269700.jpg,191.Red_headed_Woodpecker
Red_headed_Woodpecker_0019_454761427.jpg,191.Red_headed_Woodpecker
Red_headed_Woodpecker_0008_2640495822.jpg,191.Red_headed_Woodpecker
Red_headed_Woodpecker_0018_2579398144.jpg,191.Red_headed_Woodpecker
Red_headed_Woodpecker_0001_561876514.jpg,191.Red_headed_Woodpecker
Red_headed_Woodpecker_0028_192379758.jpg,191.Red_headed_Woodpecker
Red_headed_Woodpecker_0016_410463447.jpg,191.Red_headed_Woodpecker
Red_headed_Woodpecker_0007_2653260478.jpg,191.Red_headed_Woodpecker
Red_headed_Woodpecker_0023_2714536014.jpg,191.Red_headed_Woodpecker
Red_headed_Woodpecker_0006_1444142273.jpg,191.Red_headed_Woodpecker
Red_headed_Woodpecker_0022_2720313574.jpg,191.Red_headed_Woodpecker
Red_headed_Woodpecker_0013_503456158.jpg,191.Red_headed_Woodpecker
Red_headed_Woodpecker_0005_2195928350.jpg,191.Red_headed_Woodpecker
Red_headed_Woodpecker_0020_2599542610.jpg,191.Red_headed_Woodpecker
Red_headed_Woodpecker_0027_545676369.jpg,191.Red_headed_Woodpecker
Blue_headed_Vireo_0003_2403810957.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0022_1445270876.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0006_2901927112.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0020_2517847398.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0002_xxx.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0034_2691472948.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0023_2913359984.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0011_2691052618.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0019_2490695980.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0028_2691214888.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0004_261315463.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0017_2846234840.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0031_2690660569.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0033_3040371709.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0021_277549532.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0027_2942936544.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0030_477725632.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0014_451681288.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0015_451681300.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0001_3057218592.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0007_59208729.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0010_85053176.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0008_2372359048.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0013_59208715.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0018_2524368586.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0012_492700644.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0024_2912245805.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0029_1445270560.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0016_2846234634.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0025_1557914378.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0026_507580600.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0005_2371579165.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0009_2484454863.jpg,152.Blue_headed_Vireo
Blue_headed_Vireo_0032_2691052162.jpg,152.Blue_headed_Vireo
Rufous_Hummingbird_0008_255045232.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0002_528597122.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0015_2731191704.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0024_1878503844.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0028_2432568233.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0032_2546738115.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0025_990418457.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0026_2433383376.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0022_2432568529.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0013_2823135849.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0017_2298565516.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0007_2444717300.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0011_15332219.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0005_256030789.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0023_2535857725.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0031_2432568137.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0033_2523552268.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0009_140559926.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0010_2762659158.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0029_2432568433.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0014_159648485.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0019_317146792.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0027_169289992.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0001_1751968367.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0003_256026924.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0018_224890838.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0012_2069558331.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0021_2332117598.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0004_2472798444.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0030_1794383666.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0034_2687512649.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0016_1794383614.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0020_266087696.jpg,069.Rufous_Hummingbird
Rufous_Hummingbird_0006_256026923.jpg,069.Rufous_Hummingbird
Rhinoceros_Auklet_0013_2916749261.jpg,008.Rhinoceros_Auklet
Rhinoceros_Auklet_0014_2718152489.jpg,008.Rhinoceros_Auklet
Rhinoceros_Auklet_0028_1109884434.jpg,008.Rhinoceros_Auklet
Rhinoceros_Auklet_0006_2555181966.jpg,008.Rhinoceros_Auklet
Rhinoceros_Auklet_0001_2673620250.jpg,008.Rhinoceros_Auklet
Rhinoceros_Auklet_0008_804374231.jpg,008.Rhinoceros_Auklet
Rhinoceros_Auklet_0016_646789676.jpg,008.Rhinoceros_Auklet
Rhinoceros_Auklet_0005_2558782887.jpg,008.Rhinoceros_Auklet
Rhinoceros_Auklet_0019_1232573433.jpg,008.Rhinoceros_Auklet
Rhinoceros_Auklet_0018_459515769.jpg,008.Rhinoceros_Auklet
Rhinoceros_Auklet_0017_765922003.jpg,008.Rhinoceros_Auklet
Rhinoceros_Auklet_0004_2978994669.jpg,008.Rhinoceros_Auklet
Rhinoceros_Auklet_0015_2718971766.jpg,008.Rhinoceros_Auklet
Rhinoceros_Auklet_0011_458012208.jpg,008.Rhinoceros_Auklet
Rhinoceros_Auklet_0009_739605990.jpg,008.Rhinoceros_Auklet
Rhinoceros_Auklet_0023_3073852636.jpg,008.Rhinoceros_Auklet
Rhinoceros_Auklet_0007_2928370434.jpg,008.Rhinoceros_Auklet
Rhinoceros_Auklet_0020_172412571.jpg,008.Rhinoceros_Auklet
Rhinoceros_Auklet_0024_2718152733.jpg,008.Rhinoceros_Auklet
Rhinoceros_Auklet_0030_2744259331.jpg,008.Rhinoceros_Auklet
Rhinoceros_Auklet_0026_2211152978.jpg,008.Rhinoceros_Auklet
Rhinoceros_Auklet_0010_739605948.jpg,008.Rhinoceros_Auklet
Rhinoceros_Auklet_0025_1133842516.jpg,008.Rhinoceros_Auklet
Rhinoceros_Auklet_0003_804374223.jpg,008.Rhinoceros_Auklet
Rhinoceros_Auklet_0021_2568978736.jpg,008.Rhinoceros_Auklet
Rhinoceros_Auklet_0002_2444500018.jpg,008.Rhinoceros_Auklet
Rhinoceros_Auklet_0032_2895204221.jpg,008.Rhinoceros_Auklet
Rhinoceros_Auklet_0031_7658455.jpg,008.Rhinoceros_Auklet
Rhinoceros_Auklet_0027_2671199517.jpg,008.Rhinoceros_Auklet
Rhinoceros_Auklet_0022_2421989415.jpg,008.Rhinoceros_Auklet
Rhinoceros_Auklet_0012_2281384724.jpg,008.Rhinoceros_Auklet
Rhinoceros_Auklet_0029_2664097401.jpg,008.Rhinoceros_Auklet
Brown_Creeper_0016_2272314019.jpg,028.Brown_Creeper
Brown_Creeper_0021_105122744.jpg,028.Brown_Creeper
Brown_Creeper_0027_2504631596.jpg,028.Brown_Creeper
Brown_Creeper_0032_2410476265.jpg,028.Brown_Creeper
Brown_Creeper_0007_2403872606.jpg,028.Brown_Creeper
Brown_Creeper_0024_2344375414.jpg,028.Brown_Creeper
Brown_Creeper_0001_2345844695.jpg,028.Brown_Creeper
Brown_Creeper_0018_2970163676.jpg,028.Brown_Creeper
Brown_Creeper_0010_38011241.jpg,028.Brown_Creeper
Brown_Creeper_0003_2121238751.jpg,028.Brown_Creeper
Brown_Creeper_0031_2411305524.jpg,028.Brown_Creeper
Brown_Creeper_0029_2202868648.jpg,028.Brown_Creeper
Brown_Creeper_0020_1423170614.jpg,028.Brown_Creeper
Brown_Creeper_0022_2178549648.jpg,028.Brown_Creeper
Brown_Creeper_0034_1917752363.jpg,028.Brown_Creeper
Brown_Creeper_0005_2944141013.jpg,028.Brown_Creeper
Brown_Creeper_0011_114863052.jpg,028.Brown_Creeper
Brown_Creeper_0023_86232851.jpg,028.Brown_Creeper
Brown_Creeper_0006_2410485405.jpg,028.Brown_Creeper
Brown_Creeper_0019_2410481067.jpg,028.Brown_Creeper
Brown_Creeper_0025_845328381.jpg,028.Brown_Creeper
Brown_Creeper_0026_2411300046.jpg,028.Brown_Creeper
Brown_Creeper_0002_2410487281.jpg,028.Brown_Creeper
Brown_Creeper_0013_78728773.jpg,028.Brown_Creeper
Brown_Creeper_0035_581000120.jpg,028.Brown_Creeper
Brown_Creeper_0009_2125321569.jpg,028.Brown_Creeper
Brown_Creeper_0033_1322780630.jpg,028.Brown_Creeper
Brown_Creeper_0028_2411313096.jpg,028.Brown_Creeper
Brown_Creeper_0014_2411309892.jpg,028.Brown_Creeper
Brown_Creeper_0004_2116151621.jpg,028.Brown_Creeper
Brown_Creeper_0015_54657146.jpg,028.Brown_Creeper
Brown_Creeper_0008_3059117386.jpg,028.Brown_Creeper
Brown_Creeper_0030_2396737027.jpg,028.Brown_Creeper
Brown_Creeper_0012_2292466686.jpg,028.Brown_Creeper
Brown_Creeper_0017_452765456.jpg,028.Brown_Creeper
California_Gull_0013_1245399138.jpg,059.California_Gull
California_Gull_0017_772815179.jpg,059.California_Gull
California_Gull_0012_357599904.jpg,059.California_Gull
California_Gull_0004_535816916.jpg,059.California_Gull
California_Gull_0005_2255891892.jpg,059.California_Gull
California_Gull_0020_2401678158.jpg,059.California_Gull
California_Gull_0018_75521211.jpg,059.California_Gull
California_Gull_0002_2627117525.jpg,059.California_Gull
California_Gull_0007_2629417225.jpg,059.California_Gull
California_Gull_0019_345031999.jpg,059.California_Gull
California_Gull_0014_2255892736.jpg,059.California_Gull
California_Gull_0016_2492872310.jpg,059.California_Gull
California_Gull_0003_3033320042.jpg,059.California_Gull
California_Gull_0001_1507593116.jpg,059.California_Gull
California_Gull_0024_2414206799.jpg,059.California_Gull
California_Gull_0009_3000843538.jpg,059.California_Gull
California_Gull_0015_2255892302.jpg,059.California_Gull
California_Gull_0010_74338535.jpg,059.California_Gull
California_Gull_0023_1189959482.jpg,059.California_Gull
California_Gull_0008_490813465.jpg,059.California_Gull
California_Gull_0021_107103581.jpg,059.California_Gull
California_Gull_0011_535816920.jpg,059.California_Gull
California_Gull_0006_2245452576.jpg,059.California_Gull
California_Gull_0022_2165283064.jpg,059.California_Gull
Red_faced_Cormorant_0002_56199272.jpg,024.Red_faced_Cormorant
Red_faced_Cormorant_0022_2570526392.jpg,024.Red_faced_Cormorant
Red_faced_Cormorant_0001_2221540205.jpg,024.Red_faced_Cormorant
Red_faced_Cormorant_0010_2673081452.jpg,024.Red_faced_Cormorant
Red_faced_Cormorant_0017_2939331170.jpg,024.Red_faced_Cormorant
Red_faced_Cormorant_0005_392294332.jpg,024.Red_faced_Cormorant
Red_faced_Cormorant_0003_212248530.jpg,024.Red_faced_Cormorant
Red_faced_Cormorant_0021_992493481.jpg,024.Red_faced_Cormorant
Red_faced_Cormorant_0016_2570526530.jpg,024.Red_faced_Cormorant
Red_faced_Cormorant_0012_497178769.jpg,024.Red_faced_Cormorant
Red_faced_Cormorant_0015_858186405.jpg,024.Red_faced_Cormorant
Red_faced_Cormorant_0014_846089391.jpg,024.Red_faced_Cormorant
Red_faced_Cormorant_0004_965934424.jpg,024.Red_faced_Cormorant
Red_faced_Cormorant_0020_290250302.jpg,024.Red_faced_Cormorant
Red_faced_Cormorant_0007_290186291.jpg,024.Red_faced_Cormorant
Red_faced_Cormorant_0019_935092571.jpg,024.Red_faced_Cormorant
Red_faced_Cormorant_0008_707313905.jpg,024.Red_faced_Cormorant
Red_faced_Cormorant_0011_402353446.jpg,024.Red_faced_Cormorant
Red_faced_Cormorant_0009_2782350115.jpg,024.Red_faced_Cormorant
Red_faced_Cormorant_0006_2414670336.jpg,024.Red_faced_Cormorant
Red_faced_Cormorant_0013_2288210035.jpg,024.Red_faced_Cormorant
Red_faced_Cormorant_0018_941582069.jpg,024.Red_faced_Cormorant
Pine_Grosbeak_0007_197369577.jpg,056.Pine_Grosbeak
Pine_Grosbeak_0018_394731872.jpg,056.Pine_Grosbeak
Pine_Grosbeak_0022_2103897103.jpg,056.Pine_Grosbeak
Pine_Grosbeak_0001_2141544739.jpg,056.Pine_Grosbeak
Pine_Grosbeak_0019_2375156172.jpg,056.Pine_Grosbeak
Pine_Grosbeak_0011_2162441116.jpg,056.Pine_Grosbeak
Pine_Grosbeak_0015_2312043326.jpg,056.Pine_Grosbeak
Pine_Grosbeak_0008_2207763864.jpg,056.Pine_Grosbeak
Pine_Grosbeak_0021_2605826060.jpg,056.Pine_Grosbeak
Pine_Grosbeak_0004_2218734106.jpg,056.Pine_Grosbeak
Pine_Grosbeak_0012_311599485.jpg,056.Pine_Grosbeak
Pine_Grosbeak_0003_2169853752.jpg,056.Pine_Grosbeak
Pine_Grosbeak_0010_2237778716.jpg,056.Pine_Grosbeak
Pine_Grosbeak_0014_2047883813.jpg,056.Pine_Grosbeak
Pine_Grosbeak_0016_2153116032.jpg,056.Pine_Grosbeak
Pine_Grosbeak_0005_2064080685.jpg,056.Pine_Grosbeak
Pine_Grosbeak_0020_2169830366.jpg,056.Pine_Grosbeak
Pine_Grosbeak_0002_3048354923.jpg,056.Pine_Grosbeak
Pine_Grosbeak_0006_2036997303.jpg,056.Pine_Grosbeak
Pine_Grosbeak_0013_197369575.jpg,056.Pine_Grosbeak
Pine_Grosbeak_0009_2123512072.jpg,056.Pine_Grosbeak
Pine_Grosbeak_0017_394731873.jpg,056.Pine_Grosbeak
American_Crow_0025_359551851.jpg,029.American_Crow
American_Crow_0002_165448164.jpg,029.American_Crow
American_Crow_0020_2934449411.jpg,029.American_Crow
American_Crow_0004_2989461804.jpg,029.American_Crow
American_Crow_0009_2365084910.jpg,029.American_Crow
American_Crow_0015_2365091282.jpg,029.American_Crow
American_Crow_0012_1003355.jpg,029.American_Crow
American_Crow_0027_2324865969.jpg,029.American_Crow
American_Crow_0013_3051549869.jpg,029.American_Crow
American_Crow_0010_2498554080.jpg,029.American_Crow
American_Crow_0024_2618947526.jpg,029.American_Crow
American_Crow_0018_1877680209.jpg,029.American_Crow
American_Crow_0023_xxx.jpg,029.American_Crow
American_Crow_0026_2566376318.jpg,029.American_Crow
American_Crow_0001_4183257.jpg,029.American_Crow
American_Crow_0003_1507991326.jpg,029.American_Crow
American_Crow_0021_2626777569.jpg,029.American_Crow
American_Crow_0008_2150236048.jpg,029.American_Crow
American_Crow_0017_2359869348.jpg,029.American_Crow
American_Crow_0014_2769157461.jpg,029.American_Crow
American_Crow_0006_2179902676.jpg,029.American_Crow
American_Crow_0019_2863695170.jpg,029.American_Crow
American_Crow_0007_2358962421.jpg,029.American_Crow
American_Crow_0005_1095409.jpg,029.American_Crow
American_Crow_0022_2975254436.jpg,029.American_Crow
American_Crow_0016_2753381796.jpg,029.American_Crow
American_Crow_0011_2470003522.jpg,029.American_Crow
Long_tailed_Jaeger_0006_189454806.jpg,071.Long_tailed_Jaeger
Long_tailed_Jaeger_0028_2814633142.jpg,071.Long_tailed_Jaeger
Long_tailed_Jaeger_0014_2666246530.jpg,071.Long_tailed_Jaeger
Long_tailed_Jaeger_0007_3023820382.jpg,071.Long_tailed_Jaeger
Long_tailed_Jaeger_0003_2181661942.jpg,071.Long_tailed_Jaeger
Long_tailed_Jaeger_0016_2203352544.jpg,071.Long_tailed_Jaeger
Long_tailed_Jaeger_0015_2657605150.jpg,071.Long_tailed_Jaeger
Long_tailed_Jaeger_0010_190372779.jpg,071.Long_tailed_Jaeger
Long_tailed_Jaeger_0012_1236521310.jpg,071.Long_tailed_Jaeger
Long_tailed_Jaeger_0020_195521844.jpg,071.Long_tailed_Jaeger
Long_tailed_Jaeger_0024_1289048247.jpg,071.Long_tailed_Jaeger
Long_tailed_Jaeger_0025_81892960.jpg,071.Long_tailed_Jaeger
Long_tailed_Jaeger_0013_1235660701.jpg,071.Long_tailed_Jaeger
Long_tailed_Jaeger_0011_1236522668.jpg,071.Long_tailed_Jaeger
Long_tailed_Jaeger_0017_2709008441.jpg,071.Long_tailed_Jaeger
Long_tailed_Jaeger_0002_2948173936.jpg,071.Long_tailed_Jaeger
Long_tailed_Jaeger_0005_175114787.jpg,071.Long_tailed_Jaeger
Long_tailed_Jaeger_0009_2621285831.jpg,071.Long_tailed_Jaeger
Long_tailed_Jaeger_0004_776135429.jpg,071.Long_tailed_Jaeger
Long_tailed_Jaeger_0018_2709006867.jpg,071.Long_tailed_Jaeger
Long_tailed_Jaeger_0008_3160677.jpg,071.Long_tailed_Jaeger
Long_tailed_Jaeger_0023_2202557923.jpg,071.Long_tailed_Jaeger
Long_tailed_Jaeger_0022_1297030820.jpg,071.Long_tailed_Jaeger
Long_tailed_Jaeger_0019_2398164933.jpg,071.Long_tailed_Jaeger
Long_tailed_Jaeger_0027_422167455.jpg,071.Long_tailed_Jaeger
Long_tailed_Jaeger_0021_1289048239.jpg,071.Long_tailed_Jaeger
Long_tailed_Jaeger_0001_2931557732.jpg,071.Long_tailed_Jaeger
Long_tailed_Jaeger_0026_2130462760.jpg,071.Long_tailed_Jaeger
Pied_billed_Grebe_0012_2388057959.jpg,052.Pied_billed_Grebe
Pied_billed_Grebe_0022_397190380.jpg,052.Pied_billed_Grebe
Pied_billed_Grebe_0030_2503376738.jpg,052.Pied_billed_Grebe
Pied_billed_Grebe_0009_353287151.jpg,052.Pied_billed_Grebe
Pied_billed_Grebe_0024_389578482.jpg,052.Pied_billed_Grebe
Pied_billed_Grebe_0001_2530908405.jpg,052.Pied_billed_Grebe
Pied_billed_Grebe_0013_2127255009.jpg,052.Pied_billed_Grebe
Pied_billed_Grebe_0016_269660963.jpg,052.Pied_billed_Grebe
Pied_billed_Grebe_0015_2217349790.jpg,052.Pied_billed_Grebe
Pied_billed_Grebe_0020_397191373.jpg,052.Pied_billed_Grebe
Pied_billed_Grebe_0002_153897739.jpg,052.Pied_billed_Grebe
Pied_billed_Grebe_0023_674336278.jpg,052.Pied_billed_Grebe
Pied_billed_Grebe_0026_2159703847.jpg,052.Pied_billed_Grebe
Pied_billed_Grebe_0027_2531330942.jpg,052.Pied_billed_Grebe
Pied_billed_Grebe_0025_673188429.jpg,052.Pied_billed_Grebe
Pied_billed_Grebe_0004_2530908223.jpg,052.Pied_billed_Grebe
Pied_billed_Grebe_0014_2646368350.jpg,052.Pied_billed_Grebe
Pied_billed_Grebe_0021_397190735.jpg,052.Pied_billed_Grebe
Pied_billed_Grebe_0011_2970735357.jpg,052.Pied_billed_Grebe
Pied_billed_Grebe_0017_253054913.jpg,052.Pied_billed_Grebe
Pied_billed_Grebe_0005_2530908141.jpg,052.Pied_billed_Grebe
Pied_billed_Grebe_0029_300538392.jpg,052.Pied_billed_Grebe
Pied_billed_Grebe_0003_324178211.jpg,052.Pied_billed_Grebe
Pied_billed_Grebe_0006_2346956928.jpg,052.Pied_billed_Grebe
Pied_billed_Grebe_0018_673457429.jpg,052.Pied_billed_Grebe
Pied_billed_Grebe_0019_397191117.jpg,052.Pied_billed_Grebe
Pied_billed_Grebe_0010_265699237.jpg,052.Pied_billed_Grebe
Pied_billed_Grebe_0028_674298766.jpg,052.Pied_billed_Grebe
Pied_billed_Grebe_0008_397191752.jpg,052.Pied_billed_Grebe
Pied_billed_Grebe_0007_2359398581.jpg,052.Pied_billed_Grebe
Great_Crested_Flycatcher_0026_447799897.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0014_2621176627.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0030_493167351.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0019_997998827.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0024_542841461.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0020_2542490778.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0008_2448132402.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0017_1573235193.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0033_452562985.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0018_2910456471.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0021_2674902879.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0006_165034762.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0010_2611856475.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0016_2561412389.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0005_171739872.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0001_139721918.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0027_2538117932.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0032_1989175128.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0015_122493513.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0023_2545251715.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0031_2666865049.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0004_171950526.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0012_2542508476.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0022_2508502346.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0029_566075203.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0003_130771123.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0025_2710954794.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0028_2621750343.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0007_2623102305.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0002_113948343.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0009_2541273287.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0013_2327056469.jpg,038.Great_Crested_Flycatcher
Great_Crested_Flycatcher_0011_1244722528.jpg,038.Great_Crested_Flycatcher
Purple_Finch_0018_2461574339.jpg,035.Purple_Finch
Purple_Finch_0011_2421716976.jpg,035.Purple_Finch
Purple_Finch_0006_2329434675.jpg,035.Purple_Finch
Purple_Finch_0013_2466061630.jpg,035.Purple_Finch
Purple_Finch_0023_2345285196.jpg,035.Purple_Finch
Purple_Finch_0010_2445872065.jpg,035.Purple_Finch
Purple_Finch_0024_15786109.jpg,035.Purple_Finch
Purple_Finch_0014_2436764723.jpg,035.Purple_Finch
Purple_Finch_0002_482189394.jpg,035.Purple_Finch
Purple_Finch_0004_2356254630.jpg,035.Purple_Finch
Purple_Finch_0008_2400934286.jpg,035.Purple_Finch
Purple_Finch_0025_500207789.jpg,035.Purple_Finch
Purple_Finch_0001_2237042103.jpg,035.Purple_Finch
Purple_Finch_0022_2359851765.jpg,035.Purple_Finch
Purple_Finch_0007_146872871.jpg,035.Purple_Finch
Purple_Finch_0031_2414086362.jpg,035.Purple_Finch
Purple_Finch_0032_2085022322.jpg,035.Purple_Finch
Purple_Finch_0009_2275673758.jpg,035.Purple_Finch
Purple_Finch_0020_2401252794.jpg,035.Purple_Finch
Purple_Finch_0028_228676926.jpg,035.Purple_Finch
Purple_Finch_0029_59203398.jpg,035.Purple_Finch
Purple_Finch_0030_2981259863.jpg,035.Purple_Finch
Purple_Finch_0005_2263693341.jpg,035.Purple_Finch
Purple_Finch_0026_437081065.jpg,035.Purple_Finch
Purple_Finch_0016_2265549038.jpg,035.Purple_Finch
Purple_Finch_0015_2367643402.jpg,035.Purple_Finch
Purple_Finch_0027_2240951910.jpg,035.Purple_Finch
Purple_Finch_0012_2960556842.jpg,035.Purple_Finch
Purple_Finch_0003_2374852324.jpg,035.Purple_Finch
Purple_Finch_0017_2254265069.jpg,035.Purple_Finch
Purple_Finch_0021_2265118384.jpg,035.Purple_Finch
Purple_Finch_0019_2390716461.jpg,035.Purple_Finch
Pied_Kingfisher_0024_2887253470.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0010_440571393.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0019_2109660449.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0018_2909316808.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0029_2744388155.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0022_257153161.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0023_2050240281.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0028_2320712147.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0015_2990735907.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0014_2237320120.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0011_1336416582.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0027_2744390557.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0032_2050240459.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0026_2431818658.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0006_2967227448.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0001_2100377731.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0037_853160105.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0008_99323233.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0017_440571447.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0036_2731379925.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0021_3080449204.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0009_407710776.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0012_2346771419.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0004_411032570.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0013_462076142.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0016_548520466.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0003_1332201388.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0033_2486197230.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0002_1020026028.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0005_359879785.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0025_2923147843.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0034_2431818662.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0020_2178162730.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0031_2485378591.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0035_913016413.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0007_2496956771.jpg,081.Pied_Kingfisher
Pied_Kingfisher_0030_2746860827.jpg,081.Pied_Kingfisher
Le_Conte_Sparrow_0003_xxx.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0013_2124444640.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0006_2906223538.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0009_450136733.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0034_3048669507.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0011_1344523108.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0021_3047471462.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0033_3046633299.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0016_2715451691.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0007_59204851.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0029_2459533575.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0004_2463152544.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0032_348145672.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0005_2463153164.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0015_2123672857.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0018_59204846.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0012_2213868006.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0017_105408822.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0014_2928308298.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0002_2626936840.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0020_3047468050.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0010_1343624657.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0025_3047468964.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0028_428244372.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0019_2121758267.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0001_254627831.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0022_3047469388.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0031_3046634605.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0027_2963509623.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0026_3046635781.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0023_3047470704.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0024_3047471796.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0008_450136735.jpg,124.Le_Conte_Sparrow
Le_Conte_Sparrow_0030_2121761826.jpg,124.Le_Conte_Sparrow
Western_Meadowlark_0001_2400317972.jpg,088.Western_Meadowlark
Western_Meadowlark_0005_267876275.jpg,088.Western_Meadowlark
Western_Meadowlark_0015_2520738486.jpg,088.Western_Meadowlark
Western_Meadowlark_0026_2211027176.jpg,088.Western_Meadowlark
Western_Meadowlark_0008_2616073401.jpg,088.Western_Meadowlark
Western_Meadowlark_0016_2940699448.jpg,088.Western_Meadowlark
Western_Meadowlark_0029_3091515936.jpg,088.Western_Meadowlark
Western_Meadowlark_0028_2980525131.jpg,088.Western_Meadowlark
Western_Meadowlark_0024_2131506951.jpg,088.Western_Meadowlark
Western_Meadowlark_0019_3001893284.jpg,088.Western_Meadowlark
Western_Meadowlark_0025_2150260191.jpg,088.Western_Meadowlark
Western_Meadowlark_0030_85052447.jpg,088.Western_Meadowlark
Western_Meadowlark_0017_2222824015.jpg,088.Western_Meadowlark
Western_Meadowlark_0011_2067707524.jpg,088.Western_Meadowlark
Western_Meadowlark_0009_2327954530.jpg,088.Western_Meadowlark
Western_Meadowlark_0014_1628406989.jpg,088.Western_Meadowlark
Western_Meadowlark_0004_2334307819.jpg,088.Western_Meadowlark
Western_Meadowlark_0003_491561208.jpg,088.Western_Meadowlark
Western_Meadowlark_0002_336786293.jpg,088.Western_Meadowlark
Western_Meadowlark_0006_498117247.jpg,088.Western_Meadowlark
Western_Meadowlark_0013_2458712111.jpg,088.Western_Meadowlark
Western_Meadowlark_0007_2536966613.jpg,088.Western_Meadowlark
Western_Meadowlark_0032_371587162.jpg,088.Western_Meadowlark
Western_Meadowlark_0031_126227376.jpg,088.Western_Meadowlark
Western_Meadowlark_0012_783456360.jpg,088.Western_Meadowlark
Western_Meadowlark_0022_3001025837.jpg,088.Western_Meadowlark
Western_Meadowlark_0018_2960868238.jpg,088.Western_Meadowlark
Western_Meadowlark_0020_2541828180.jpg,088.Western_Meadowlark
Western_Meadowlark_0021_2541002813.jpg,088.Western_Meadowlark
Western_Meadowlark_0010_2360582478.jpg,088.Western_Meadowlark
Western_Meadowlark_0023_3001862164.jpg,088.Western_Meadowlark
Western_Meadowlark_0027_3054428882.jpg,088.Western_Meadowlark
Lazuli_Bunting_0011_506149906.jpg,015.Lazuli_Bunting
Lazuli_Bunting_0017_495520503.jpg,015.Lazuli_Bunting
Lazuli_Bunting_0024_2703815407.jpg,015.Lazuli_Bunting
Lazuli_Bunting_0001_483696767.jpg,015.Lazuli_Bunting
Lazuli_Bunting_0012_xxx.jpg,015.Lazuli_Bunting
Lazuli_Bunting_0030_20258145.jpg,015.Lazuli_Bunting
Lazuli_Bunting_0032_2542221058.jpg,015.Lazuli_Bunting
Lazuli_Bunting_0021_2541398591.jpg,015.Lazuli_Bunting
Lazuli_Bunting_0005_2491054962.jpg,015.Lazuli_Bunting
Lazuli_Bunting_0029_2500422243.jpg,015.Lazuli_Bunting
Lazuli_Bunting_0020_2582678163.jpg,015.Lazuli_Bunting
Lazuli_Bunting_0006_483195184.jpg,015.Lazuli_Bunting
Lazuli_Bunting_0019_2785250577.jpg,015.Lazuli_Bunting
Lazuli_Bunting_0008_2453066940.jpg,015.Lazuli_Bunting
Lazuli_Bunting_0014_685415537.jpg,015.Lazuli_Bunting
Lazuli_Bunting_0018_772550497.jpg,015.Lazuli_Bunting
Lazuli_Bunting_0027_2576093998.jpg,015.Lazuli_Bunting
Lazuli_Bunting_0015_750064649.jpg,015.Lazuli_Bunting
Lazuli_Bunting_0022_20258144.jpg,015.Lazuli_Bunting
Lazuli_Bunting_0016_750064695.jpg,015.Lazuli_Bunting
Lazuli_Bunting_0026_2582013592.jpg,015.Lazuli_Bunting
Lazuli_Bunting_0023_174692012.jpg,015.Lazuli_Bunting
Lazuli_Bunting_0013_2535893440.jpg,015.Lazuli_Bunting
Lazuli_Bunting_0009_2516620927.jpg,015.Lazuli_Bunting
Lazuli_Bunting_0010_522399154.jpg,015.Lazuli_Bunting
Lazuli_Bunting_0002_2587159269.jpg,015.Lazuli_Bunting
Lazuli_Bunting_0025_513649458.jpg,015.Lazuli_Bunting
Lazuli_Bunting_0007_483196851.jpg,015.Lazuli_Bunting
Lazuli_Bunting_0031_177482118.jpg,015.Lazuli_Bunting
Lazuli_Bunting_0003_2498669472.jpg,015.Lazuli_Bunting
Lazuli_Bunting_0004_488982485.jpg,015.Lazuli_Bunting
Lazuli_Bunting_0028_485950498.jpg,015.Lazuli_Bunting
Worm_eating_Warbler_0007_2817738577.jpg,181.Worm_eating_Warbler
Worm_eating_Warbler_0006_59214859.jpg,181.Worm_eating_Warbler
Worm_eating_Warbler_0016_2513080684.jpg,181.Worm_eating_Warbler
Worm_eating_Warbler_0009_2661303277.jpg,181.Worm_eating_Warbler
Worm_eating_Warbler_0001_2830243011.jpg,181.Worm_eating_Warbler
Worm_eating_Warbler_0013_162829277.jpg,181.Worm_eating_Warbler
Worm_eating_Warbler_0018_59214856.jpg,181.Worm_eating_Warbler
Worm_eating_Warbler_0003_189744419.jpg,181.Worm_eating_Warbler
Worm_eating_Warbler_0023_2658928642.jpg,181.Worm_eating_Warbler
Worm_eating_Warbler_0002_2457261355.jpg,181.Worm_eating_Warbler
Worm_eating_Warbler_0027_163219727.jpg,181.Worm_eating_Warbler
Worm_eating_Warbler_0005_414613721.jpg,181.Worm_eating_Warbler
Worm_eating_Warbler_0026_487487557.jpg,181.Worm_eating_Warbler
Worm_eating_Warbler_0024_2701320621.jpg,181.Worm_eating_Warbler
Worm_eating_Warbler_0029_2740900214.jpg,181.Worm_eating_Warbler
Worm_eating_Warbler_0032_2628467792.jpg,181.Worm_eating_Warbler
Worm_eating_Warbler_0017_3028697970.jpg,181.Worm_eating_Warbler
Worm_eating_Warbler_0020_2837683220.jpg,181.Worm_eating_Warbler
Worm_eating_Warbler_0031_2469186267.jpg,181.Worm_eating_Warbler
Worm_eating_Warbler_0021_2302580267.jpg,181.Worm_eating_Warbler
Worm_eating_Warbler_0025_2464667213.jpg,181.Worm_eating_Warbler
Worm_eating_Warbler_0022_392418617.jpg,181.Worm_eating_Warbler
Worm_eating_Warbler_0012_2837738946.jpg,181.Worm_eating_Warbler
Worm_eating_Warbler_0030_521987572.jpg,181.Worm_eating_Warbler
Worm_eating_Warbler_0010_1989202690.jpg,181.Worm_eating_Warbler
Worm_eating_Warbler_0004_414613727.jpg,181.Worm_eating_Warbler
Worm_eating_Warbler_0015_2226339905.jpg,181.Worm_eating_Warbler
Worm_eating_Warbler_0019_2837637702.jpg,181.Worm_eating_Warbler
Worm_eating_Warbler_0028_163219780.jpg,181.Worm_eating_Warbler
Worm_eating_Warbler_0008_2211014016.jpg,181.Worm_eating_Warbler
Worm_eating_Warbler_0014_1344001975.jpg,181.Worm_eating_Warbler
Worm_eating_Warbler_0011_2867847385.jpg,181.Worm_eating_Warbler
Bronzed_Cowbird_0016_815138575.jpg,026.Bronzed_Cowbird
Bronzed_Cowbird_0013_2417085161.jpg,026.Bronzed_Cowbird
Bronzed_Cowbird_0009_815138425.jpg,026.Bronzed_Cowbird
Bronzed_Cowbird_0010_816113556.jpg,026.Bronzed_Cowbird
Bronzed_Cowbird_0007_424570095.jpg,026.Bronzed_Cowbird
Bronzed_Cowbird_0002_2660673463.jpg,026.Bronzed_Cowbird
Bronzed_Cowbird_0015_59207957.jpg,026.Bronzed_Cowbird
Bronzed_Cowbird_0008_815138925.jpg,026.Bronzed_Cowbird
Bronzed_Cowbird_0017_2417906344.jpg,026.Bronzed_Cowbird
Bronzed_Cowbird_0020_3092637605.jpg,026.Bronzed_Cowbird
Bronzed_Cowbird_0022_3055262329.jpg,026.Bronzed_Cowbird
Bronzed_Cowbird_0014_2256262769.jpg,026.Bronzed_Cowbird
Bronzed_Cowbird_0021_3056098406.jpg,026.Bronzed_Cowbird
Bronzed_Cowbird_0024_446633377.jpg,026.Bronzed_Cowbird
Bronzed_Cowbird_0023_2740056869.jpg,026.Bronzed_Cowbird
Bronzed_Cowbird_0019_3092637207.jpg,026.Bronzed_Cowbird
Bronzed_Cowbird_0011_815138541.jpg,026.Bronzed_Cowbird
Bronzed_Cowbird_0006_483532429.jpg,026.Bronzed_Cowbird
Bronzed_Cowbird_0012_815138567.jpg,026.Bronzed_Cowbird
Bronzed_Cowbird_0004_2407136000.jpg,026.Bronzed_Cowbird
Bronzed_Cowbird_0005_816113586.jpg,026.Bronzed_Cowbird
Bronzed_Cowbird_0025_446633937.jpg,026.Bronzed_Cowbird
Bronzed_Cowbird_0018_59205913.jpg,026.Bronzed_Cowbird
Bronzed_Cowbird_0003_446682575.jpg,026.Bronzed_Cowbird
Bronzed_Cowbird_0001_2660673621.jpg,026.Bronzed_Cowbird
Chipping_Sparrow_0015_444314033.jpg,116.Chipping_Sparrow
Chipping_Sparrow_0012_2462909394.jpg,116.Chipping_Sparrow
Chipping_Sparrow_0024_2060864636.jpg,116.Chipping_Sparrow
Chipping_Sparrow_0019_3000044372.jpg,116.Chipping_Sparrow
Chipping_Sparrow_0005_448526175.jpg,116.Chipping_Sparrow
Chipping_Sparrow_0027_2270306345.jpg,116.Chipping_Sparrow
Chipping_Sparrow_0025_2407558763.jpg,116.Chipping_Sparrow
Chipping_Sparrow_0026_59210006.jpg,116.Chipping_Sparrow
Chipping_Sparrow_0004_2408400216.jpg,116.Chipping_Sparrow
Chipping_Sparrow_0011_452423696.jpg,116.Chipping_Sparrow
Chipping_Sparrow_0022_2430106510.jpg,116.Chipping_Sparrow
Chipping_Sparrow_0021_2408396988.jpg,116.Chipping_Sparrow
Chipping_Sparrow_0010_2635717038.jpg,116.Chipping_Sparrow
Chipping_Sparrow_0014_537911279.jpg,116.Chipping_Sparrow
Chipping_Sparrow_0002_493493236.jpg,116.Chipping_Sparrow
Chipping_Sparrow_0008_168939557.jpg,116.Chipping_Sparrow
Chipping_Sparrow_0009_2687725170.jpg,116.Chipping_Sparrow
Chipping_Sparrow_0020_2474004537.jpg,116.Chipping_Sparrow
Chipping_Sparrow_0023_2408394396.jpg,116.Chipping_Sparrow
Chipping_Sparrow_0016_2370612889.jpg,116.Chipping_Sparrow
Chipping_Sparrow_0018_475061091.jpg,116.Chipping_Sparrow
Chipping_Sparrow_0013_2457750345.jpg,116.Chipping_Sparrow
Chipping_Sparrow_0001_226687764.jpg,116.Chipping_Sparrow
Chipping_Sparrow_0017_2747497035.jpg,116.Chipping_Sparrow
Chipping_Sparrow_0028_2620275641.jpg,116.Chipping_Sparrow
Chipping_Sparrow_0003_2537948838.jpg,116.Chipping_Sparrow
Chipping_Sparrow_0007_2407562927.jpg,116.Chipping_Sparrow
Chipping_Sparrow_0006_3075226789.jpg,116.Chipping_Sparrow
House_Wren_0012_716991968.jpg,196.House_Wren
House_Wren_0016_172312721.jpg,196.House_Wren
House_Wren_0003_2319039001.jpg,196.House_Wren
House_Wren_0025_2529066063.jpg,196.House_Wren
House_Wren_0023_792592159.jpg,196.House_Wren
House_Wren_0002_1008816651.jpg,196.House_Wren
House_Wren_0018_12472312.jpg,196.House_Wren
House_Wren_0007_2595202370.jpg,196.House_Wren
House_Wren_0001_200969708.jpg,196.House_Wren
House_Wren_0013_2835764926.jpg,196.House_Wren
House_Wren_0015_142884140.jpg,196.House_Wren
House_Wren_0019_566565393.jpg,196.House_Wren
House_Wren_0014_2605427536.jpg,196.House_Wren
House_Wren_0011_2896312595.jpg,196.House_Wren
House_Wren_0004_2513233267.jpg,196.House_Wren
House_Wren_0017_166159476.jpg,196.House_Wren
House_Wren_0008_2593085732.jpg,196.House_Wren
House_Wren_0024_2476065510.jpg,196.House_Wren
House_Wren_0022_2982711158.jpg,196.House_Wren
House_Wren_0006_885134160.jpg,196.House_Wren
House_Wren_0020_2754558390.jpg,196.House_Wren
House_Wren_0009_2632288767.jpg,196.House_Wren
House_Wren_0005_2896309429.jpg,196.House_Wren
House_Wren_0021_210294704.jpg,196.House_Wren
House_Wren_0010_2886596722.jpg,196.House_Wren
Cliff_Swallow_0006_2028340958.jpg,137.Cliff_Swallow
Cliff_Swallow_0011_2446385997.jpg,137.Cliff_Swallow
Cliff_Swallow_0004_2516799789.jpg,137.Cliff_Swallow
Cliff_Swallow_0016_2616098515.jpg,137.Cliff_Swallow
Cliff_Swallow_0020_2439269023.jpg,137.Cliff_Swallow
Cliff_Swallow_0025_1288207122.jpg,137.Cliff_Swallow
Cliff_Swallow_0008_2475295022.jpg,137.Cliff_Swallow
Cliff_Swallow_0028_131572746.jpg,137.Cliff_Swallow
Cliff_Swallow_0014_2517620738.jpg,137.Cliff_Swallow
Cliff_Swallow_0002_179398411.jpg,137.Cliff_Swallow
Cliff_Swallow_0023_1195006259.jpg,137.Cliff_Swallow
Cliff_Swallow_0013_2578580392.jpg,137.Cliff_Swallow
Cliff_Swallow_0030_505132356.jpg,137.Cliff_Swallow
Cliff_Swallow_0031_2601703522.jpg,137.Cliff_Swallow
Cliff_Swallow_0022_2044912569.jpg,137.Cliff_Swallow
Cliff_Swallow_0027_924529134.jpg,137.Cliff_Swallow
Cliff_Swallow_0019_2448145154.jpg,137.Cliff_Swallow
Cliff_Swallow_0015_2616937730.jpg,137.Cliff_Swallow
Cliff_Swallow_0009_535289093.jpg,137.Cliff_Swallow
Cliff_Swallow_0029_2514295561.jpg,137.Cliff_Swallow
Cliff_Swallow_0003_2606019691.jpg,137.Cliff_Swallow
Cliff_Swallow_0010_2439269027.jpg,137.Cliff_Swallow
Cliff_Swallow_0001_1199523234.jpg,137.Cliff_Swallow
Cliff_Swallow_0021_2439269019.jpg,137.Cliff_Swallow
Cliff_Swallow_0012_2577747185.jpg,137.Cliff_Swallow
Cliff_Swallow_0018_2890238370.jpg,137.Cliff_Swallow
Cliff_Swallow_0017_2548235315.jpg,137.Cliff_Swallow
Cliff_Swallow_0024_300459970.jpg,137.Cliff_Swallow
Cliff_Swallow_0005_493091581.jpg,137.Cliff_Swallow
Cliff_Swallow_0007_131572778.jpg,137.Cliff_Swallow
Cliff_Swallow_0026_131574715.jpg,137.Cliff_Swallow
Green_tailed_Towhee_0010_136678398.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0014_184430670.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0027_2586589128.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0025_2663079834.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0024_2660589322.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0008_550024168.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0002_3047580332.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0017_22766565.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0033_2645209352.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0022_468326839.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0034_2056851125.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0009_2540015850.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0015_136678400.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0005_2423344222.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0026_3079106432.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0001_2201100778.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0021_149577903.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0003_2950881652.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0011_117938965.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0016_16400796.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0020_22766551.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0030_2585752576.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0029_2585746290.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0012_117938968.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0006_1022019525.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0013_184430669.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0004_2948557942.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0018_567449096.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0031_155934246.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0028_2585753661.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0019_404162240.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0023_121586071.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0032_2602420030.jpg,148.Green_tailed_Towhee
Green_tailed_Towhee_0007_200535228.jpg,148.Green_tailed_Towhee
Florida_Jay_0016_2333884847.jpg,074.Florida_Jay
Florida_Jay_0006_2847477101.jpg,074.Florida_Jay
Florida_Jay_0015_2334715568.jpg,074.Florida_Jay
Florida_Jay_0010_2334709876.jpg,074.Florida_Jay
Florida_Jay_0011_2443833371.jpg,074.Florida_Jay
Florida_Jay_0027_2327395056.jpg,074.Florida_Jay
Florida_Jay_0007_2321631355.jpg,074.Florida_Jay
Florida_Jay_0018_2334720072.jpg,074.Florida_Jay
Florida_Jay_0033_1233902148.jpg,074.Florida_Jay
Florida_Jay_0032_2565468217.jpg,074.Florida_Jay
Florida_Jay_0029_2721947456.jpg,074.Florida_Jay
Florida_Jay_0012_2333891141.jpg,074.Florida_Jay
Florida_Jay_0003_409254398.jpg,074.Florida_Jay
Florida_Jay_0028_2485111000.jpg,074.Florida_Jay
Florida_Jay_0005_2334711470.jpg,074.Florida_Jay
Florida_Jay_0008_21506745.jpg,074.Florida_Jay
Florida_Jay_0020_2333888857.jpg,074.Florida_Jay
Florida_Jay_0022_429285351.jpg,074.Florida_Jay
Florida_Jay_0026_2267626839.jpg,074.Florida_Jay
Florida_Jay_0019_2334718020.jpg,074.Florida_Jay
Florida_Jay_0014_2333886235.jpg,074.Florida_Jay
Florida_Jay_0001_2599154742.jpg,074.Florida_Jay
Florida_Jay_0009_1519486691.jpg,074.Florida_Jay
Florida_Jay_0023_739264851.jpg,074.Florida_Jay
Florida_Jay_0021_1806277743.jpg,074.Florida_Jay
Florida_Jay_0035_171539494.jpg,074.Florida_Jay
Florida_Jay_0030_458297926.jpg,074.Florida_Jay
Florida_Jay_0024_2300511239.jpg,074.Florida_Jay
Florida_Jay_0013_2333893193.jpg,074.Florida_Jay
Florida_Jay_0025_2896321030.jpg,074.Florida_Jay
Florida_Jay_0034_2565468527.jpg,074.Florida_Jay
Florida_Jay_0002_2279244393.jpg,074.Florida_Jay
Florida_Jay_0004_2562114580.jpg,074.Florida_Jay
Florida_Jay_0031_266193041.jpg,074.Florida_Jay
Florida_Jay_0017_2333894291.jpg,074.Florida_Jay
Whip_poor_Will_0008_483309652.jpg,105.Whip_poor_Will
Whip_poor_Will_0004_3083435467.jpg,105.Whip_poor_Will
Whip_poor_Will_0002_2642965700.jpg,105.Whip_poor_Will
Whip_poor_Will_0019_3041308459.jpg,105.Whip_poor_Will
Whip_poor_Will_0006_141932898.jpg,105.Whip_poor_Will
Whip_poor_Will_0013_2495815770.jpg,105.Whip_poor_Will
Whip_poor_Will_0020_3041309277.jpg,105.Whip_poor_Will
Whip_poor_Will_0016_2474327445.jpg,105.Whip_poor_Will
Whip_poor_Will_0003_487306119.jpg,105.Whip_poor_Will
Whip_poor_Will_0017_497914254.jpg,105.Whip_poor_Will
Whip_poor_Will_0018_3042152616.jpg,105.Whip_poor_Will
Whip_poor_Will_0010_571659404.jpg,105.Whip_poor_Will
Whip_poor_Will_0007_288062508.jpg,105.Whip_poor_Will
Whip_poor_Will_0012_2665496210.jpg,105.Whip_poor_Will
Whip_poor_Will_0011_485797159.jpg,105.Whip_poor_Will
Whip_poor_Will_0009_1250554231.jpg,105.Whip_poor_Will
Whip_poor_Will_0001_2497980902.jpg,105.Whip_poor_Will
Whip_poor_Will_0015_2842866197.jpg,105.Whip_poor_Will
Whip_poor_Will_0014_2509410479.jpg,105.Whip_poor_Will
Whip_poor_Will_0005_2535488085.jpg,105.Whip_poor_Will
White_crowned_Sparrow_0024_521882124.jpg,132.White_crowned_Sparrow
White_crowned_Sparrow_0021_xxx.jpg,132.White_crowned_Sparrow
White_crowned_Sparrow_0006_2671941553.jpg,132.White_crowned_Sparrow
White_crowned_Sparrow_0004_2232004154.jpg,132.White_crowned_Sparrow
White_crowned_Sparrow_0013_795582815.jpg,132.White_crowned_Sparrow
White_crowned_Sparrow_0009_444466313.jpg,132.White_crowned_Sparrow
White_crowned_Sparrow_0001_2474971113.jpg,132.White_crowned_Sparrow
White_crowned_Sparrow_0020_2327623753.jpg,132.White_crowned_Sparrow
White_crowned_Sparrow_0019_xxx.jpg,132.White_crowned_Sparrow
White_crowned_Sparrow_0010_268039933.jpg,132.White_crowned_Sparrow
White_crowned_Sparrow_0011_349646923.jpg,132.White_crowned_Sparrow
White_crowned_Sparrow_0015_2473980671.jpg,132.White_crowned_Sparrow
White_crowned_Sparrow_0016_2501553875.jpg,132.White_crowned_Sparrow
White_crowned_Sparrow_0014_2298723869.jpg,132.White_crowned_Sparrow
White_crowned_Sparrow_0002_121247040.jpg,132.White_crowned_Sparrow
White_crowned_Sparrow_0023_2225913204.jpg,132.White_crowned_Sparrow
White_crowned_Sparrow_0018_2348507049.jpg,132.White_crowned_Sparrow
White_crowned_Sparrow_0022_2472817511.jpg,132.White_crowned_Sparrow
White_crowned_Sparrow_0012_xxx.jpg,132.White_crowned_Sparrow
White_crowned_Sparrow_0003_115744779.jpg,132.White_crowned_Sparrow
White_crowned_Sparrow_0017_3040625222.jpg,132.White_crowned_Sparrow
White_crowned_Sparrow_0007_2938820313.jpg,132.White_crowned_Sparrow
White_crowned_Sparrow_0025_2413956917.jpg,132.White_crowned_Sparrow
White_crowned_Sparrow_0005_352279625.jpg,132.White_crowned_Sparrow
White_crowned_Sparrow_0008_2229271003.jpg,132.White_crowned_Sparrow
Hooded_Warbler_0022_1344892174.jpg,167.Hooded_Warbler
Hooded_Warbler_0015_2490084541.jpg,167.Hooded_Warbler
Hooded_Warbler_0006_467605144.jpg,167.Hooded_Warbler
Hooded_Warbler_0009_67483709.jpg,167.Hooded_Warbler
Hooded_Warbler_0013_2490084287.jpg,167.Hooded_Warbler
Hooded_Warbler_0021_1467417562.jpg,167.Hooded_Warbler
Hooded_Warbler_0008_2068462669.jpg,167.Hooded_Warbler
Hooded_Warbler_0004_2546770991.jpg,167.Hooded_Warbler
Hooded_Warbler_0002_2499838711.jpg,167.Hooded_Warbler
Hooded_Warbler_0007_2549113707.jpg,167.Hooded_Warbler
Hooded_Warbler_0014_59215782.jpg,167.Hooded_Warbler
Hooded_Warbler_0012_456597862.jpg,167.Hooded_Warbler
Hooded_Warbler_0011_1472759322.jpg,167.Hooded_Warbler
Hooded_Warbler_0026_2817738193.jpg,167.Hooded_Warbler
Hooded_Warbler_0020_2495513053.jpg,167.Hooded_Warbler
Hooded_Warbler_0010_53159577.jpg,167.Hooded_Warbler
Hooded_Warbler_0017_184474490.jpg,167.Hooded_Warbler
Hooded_Warbler_0001_469065349.jpg,167.Hooded_Warbler
Hooded_Warbler_0024_2654633686.jpg,167.Hooded_Warbler
Hooded_Warbler_0016_209549219.jpg,167.Hooded_Warbler
Hooded_Warbler_0018_162158972.jpg,167.Hooded_Warbler
Hooded_Warbler_0023_138405654.jpg,167.Hooded_Warbler
Hooded_Warbler_0019_2102837510.jpg,167.Hooded_Warbler
Hooded_Warbler_0003_2572258906.jpg,167.Hooded_Warbler
Hooded_Warbler_0025_2690306769.jpg,167.Hooded_Warbler
Hooded_Warbler_0005_1510195343.jpg,167.Hooded_Warbler
American_Redstart_0014_515236812.jpg,109.American_Redstart
American_Redstart_0007_491148400.jpg,109.American_Redstart
American_Redstart_0026_541621959.jpg,109.American_Redstart
American_Redstart_0006_749700619.jpg,109.American_Redstart
American_Redstart_0001_3077357840.jpg,109.American_Redstart
American_Redstart_0018_2502869259.jpg,109.American_Redstart
American_Redstart_0009_2502869853.jpg,109.American_Redstart
American_Redstart_0027_2449398232.jpg,109.American_Redstart
American_Redstart_0020_2503700478.jpg,109.American_Redstart
American_Redstart_0019_2983863945.jpg,109.American_Redstart
American_Redstart_0002_2507161450.jpg,109.American_Redstart
American_Redstart_0003_2551180132.jpg,109.American_Redstart
American_Redstart_0010_2227581326.jpg,109.American_Redstart
American_Redstart_0013_2541350319.jpg,109.American_Redstart
American_Redstart_0012_29497273.jpg,109.American_Redstart
American_Redstart_0005_515944470.jpg,109.American_Redstart
American_Redstart_0021_510357025.jpg,109.American_Redstart
American_Redstart_0022_168809889.jpg,109.American_Redstart
American_Redstart_0011_515214271.jpg,109.American_Redstart
American_Redstart_0004_534938556.jpg,109.American_Redstart
American_Redstart_0023_2520401491.jpg,109.American_Redstart
American_Redstart_0024_496023728.jpg,109.American_Redstart
American_Redstart_0029_2532979219.jpg,109.American_Redstart
American_Redstart_0017_823881881.jpg,109.American_Redstart
American_Redstart_0025_2553644192.jpg,109.American_Redstart
American_Redstart_0016_2951245949.jpg,109.American_Redstart
American_Redstart_0008_2500979908.jpg,109.American_Redstart
American_Redstart_0015_2500963505.jpg,109.American_Redstart
American_Redstart_0028_2799100119.jpg,109.American_Redstart
Cactus_Wren_0016_2487431.jpg,194.Cactus_Wren
Cactus_Wren_0003_384944513.jpg,194.Cactus_Wren
Cactus_Wren_0018_1653250191.jpg,194.Cactus_Wren
Cactus_Wren_0012_323264245.jpg,194.Cactus_Wren
Cactus_Wren_0014_2964486988.jpg,194.Cactus_Wren
Cactus_Wren_0010_2247090733.jpg,194.Cactus_Wren
Cactus_Wren_0017_2893333343.jpg,194.Cactus_Wren
Cactus_Wren_0013_1172327294.jpg,194.Cactus_Wren
Cactus_Wren_0009_2056959965.jpg,194.Cactus_Wren
Cactus_Wren_0015_2393905297.jpg,194.Cactus_Wren
Cactus_Wren_0007_30646482.jpg,194.Cactus_Wren
Cactus_
Download .txt
gitextract_je04xax4/

├── DN4_2019_Version/
│   ├── DN4_Test_5way1shot.py
│   ├── DN4_Test_5way5shot.py
│   ├── DN4_Train_5way1shot.py
│   ├── DN4_Train_5way1shot_DA.py
│   ├── DN4_Train_5way1shot_Resnet.py
│   ├── DN4_Train_5way5shot.py
│   ├── DN4_Train_5way5shot_DA.py
│   ├── DN4_Train_5way5shot_Resnet.py
│   ├── LICENSE
│   ├── README.md
│   ├── dataset/
│   │   ├── CubBird_prepare_csv.py
│   │   ├── CubBirds/
│   │   │   ├── test.csv
│   │   │   ├── train.csv
│   │   │   └── val.csv
│   │   ├── StanforCar_prepare_csv.py
│   │   ├── StanfordCars/
│   │   │   ├── test.csv
│   │   │   ├── train.csv
│   │   │   └── val.csv
│   │   ├── StanfordDog_prepare_csv.py
│   │   ├── StanfordDogs/
│   │   │   ├── test.csv
│   │   │   ├── train.csv
│   │   │   └── val.csv
│   │   ├── datasets_csv.py
│   │   └── miniImageNet/
│   │       ├── test.csv
│   │       ├── train.csv
│   │       └── val.csv
│   ├── models/
│   │   └── network.py
│   └── results/
│       ├── DN4_miniImageNet_Conv64F_5Way_1Shot_K3/
│       │   ├── Test_resutls.txt
│       │   └── opt_resutls.txt
│       ├── DN4_miniImageNet_Conv64F_5Way_5Shot_K3/
│       │   ├── Test_resutls.txt
│       │   └── opt_resutls.txt
│       ├── DN4_miniImageNet_ResNet256F_5Way_1Shot_K3/
│       │   ├── Test_resutls.txt
│       │   └── opt_resutls.txt
│       └── DN4_miniImageNet_ResNet256F_5Way_5Shot_K3/
│           ├── Test_resutls.txt
│           └── opt_resutls.txt
├── LICENSE
├── README.md
├── Test_DN4.py
├── Train_DN4.py
├── dataset/
│   ├── CubBirds/
│   │   ├── test.csv
│   │   ├── train.csv
│   │   └── val.csv
│   ├── Prepare_csv_CubBird.py
│   ├── Prepare_csv_StanfordCar.py
│   ├── Prepare_csv_StanfordDog.py
│   ├── StanfordCars/
│   │   ├── test.csv
│   │   ├── train.csv
│   │   └── val.csv
│   ├── StanfordDogs/
│   │   ├── test.csv
│   │   ├── train.csv
│   │   └── val.csv
│   ├── general_dataloader.py
│   └── miniImageNet/
│       ├── test.csv
│       ├── train.csv
│       └── val.csv
├── models/
│   ├── backbone.py
│   ├── classifier.py
│   └── network.py
├── results/
│   ├── SGD_Cosine_Lr0.01_DN4_ResNet12_Epoch_30_miniImageNet_84_84_5Way_1Shot/
│   │   ├── Test_results.txt
│   │   ├── opt_results.txt
│   │   ├── test_loss.txt
│   │   ├── train_loss.txt
│   │   └── val_loss.txt
│   ├── SGD_Cosine_Lr0.01_DN4_ResNet12_Epoch_30_miniImageNet_84_84_5Way_5Shot/
│   │   ├── Test_results.txt
│   │   ├── opt_results.txt
│   │   ├── test_loss.txt
│   │   ├── train_loss.txt
│   │   └── val_loss.txt
│   ├── SGD_Cosine_Lr0.05_DN4_Conv64F_Local_Epoch_30_miniImageNet_84_84_5Way_1Shot/
│   │   ├── Test_results.txt
│   │   ├── Test_results_New.txt
│   │   ├── opt_results.txt
│   │   ├── test_loss.txt
│   │   ├── train_loss.txt
│   │   └── val_loss.txt
│   ├── SGD_Cosine_Lr0.05_DN4_Conv64F_Local_Epoch_30_miniImageNet_84_84_5Way_5Shot/
│   │   ├── Test_results.txt
│   │   ├── opt_results.txt
│   │   ├── test_loss.txt
│   │   ├── train_loss.txt
│   │   └── val_loss.txt
│   └── test
└── utils.py
Download .txt
SYMBOL INDEX (199 symbols across 17 files)

FILE: DN4_2019_Version/DN4_Test_5way1shot.py
  function validate (line 93) | def validate(val_loader, model, criterion, epoch_index, F_txt):
  class AverageMeter (line 163) | class AverageMeter(object):
    method __init__ (line 165) | def __init__(self):
    method reset (line 168) | def reset(self):
    method update (line 174) | def update(self, val, n=1):
  function accuracy (line 182) | def accuracy(output, target, topk=(1,)):
  function mean_confidence_interval (line 199) | def mean_confidence_interval(data, confidence=0.95):

FILE: DN4_2019_Version/DN4_Test_5way5shot.py
  function validate (line 93) | def validate(val_loader, model, criterion, epoch_index, F_txt):
  class AverageMeter (line 163) | class AverageMeter(object):
    method __init__ (line 165) | def __init__(self):
    method reset (line 168) | def reset(self):
    method update (line 174) | def update(self, val, n=1):
  function accuracy (line 182) | def accuracy(output, target, topk=(1,)):
  function mean_confidence_interval (line 199) | def mean_confidence_interval(data, confidence=0.95):

FILE: DN4_2019_Version/DN4_Train_5way1shot.py
  function adjust_learning_rate (line 90) | def adjust_learning_rate(optimizer, epoch_num):
  function train (line 97) | def train(train_loader, model, criterion, optimizer, epoch_index, F_txt):
  function validate (line 165) | def validate(val_loader, model, criterion, epoch_index, best_prec1, F_txt):
  function save_checkpoint (line 236) | def save_checkpoint(state, filename='checkpoint.pth.tar'):
  class AverageMeter (line 240) | class AverageMeter(object):
    method __init__ (line 242) | def __init__(self):
    method reset (line 245) | def reset(self):
    method update (line 251) | def update(self, val, n=1):
  function accuracy (line 258) | def accuracy(output, target, topk=(1,)):

FILE: DN4_2019_Version/DN4_Train_5way1shot_DA.py
  function adjust_learning_rate (line 90) | def adjust_learning_rate(optimizer, epoch_num):
  function train (line 97) | def train(train_loader, model, criterion, optimizer, epoch_index, F_txt):
  function validate (line 165) | def validate(val_loader, model, criterion, epoch_index, best_prec1, F_txt):
  function save_checkpoint (line 236) | def save_checkpoint(state, filename='checkpoint.pth.tar'):
  class AverageMeter (line 240) | class AverageMeter(object):
    method __init__ (line 242) | def __init__(self):
    method reset (line 245) | def reset(self):
    method update (line 251) | def update(self, val, n=1):
  function accuracy (line 258) | def accuracy(output, target, topk=(1,)):

FILE: DN4_2019_Version/DN4_Train_5way1shot_Resnet.py
  function adjust_learning_rate (line 90) | def adjust_learning_rate(optimizer, epoch_num):
  function train (line 97) | def train(train_loader, model, criterion, optimizer, epoch_index, F_txt):
  function validate (line 165) | def validate(val_loader, model, criterion, epoch_index, best_prec1, F_txt):
  function save_checkpoint (line 236) | def save_checkpoint(state, filename='checkpoint.pth.tar'):
  class AverageMeter (line 240) | class AverageMeter(object):
    method __init__ (line 242) | def __init__(self):
    method reset (line 245) | def reset(self):
    method update (line 251) | def update(self, val, n=1):
  function accuracy (line 258) | def accuracy(output, target, topk=(1,)):

FILE: DN4_2019_Version/DN4_Train_5way5shot.py
  function adjust_learning_rate (line 90) | def adjust_learning_rate(optimizer, epoch_num):
  function train (line 97) | def train(train_loader, model, criterion, optimizer, epoch_index, F_txt):
  function validate (line 165) | def validate(val_loader, model, criterion, epoch_index, best_prec1, F_txt):
  function save_checkpoint (line 236) | def save_checkpoint(state, filename='checkpoint.pth.tar'):
  class AverageMeter (line 240) | class AverageMeter(object):
    method __init__ (line 242) | def __init__(self):
    method reset (line 245) | def reset(self):
    method update (line 251) | def update(self, val, n=1):
  function accuracy (line 258) | def accuracy(output, target, topk=(1,)):

FILE: DN4_2019_Version/DN4_Train_5way5shot_DA.py
  function adjust_learning_rate (line 90) | def adjust_learning_rate(optimizer, epoch_num):
  function train (line 97) | def train(train_loader, model, criterion, optimizer, epoch_index, F_txt):
  function validate (line 165) | def validate(val_loader, model, criterion, epoch_index, best_prec1, F_txt):
  function save_checkpoint (line 236) | def save_checkpoint(state, filename='checkpoint.pth.tar'):
  class AverageMeter (line 240) | class AverageMeter(object):
    method __init__ (line 242) | def __init__(self):
    method reset (line 245) | def reset(self):
    method update (line 251) | def update(self, val, n=1):
  function accuracy (line 258) | def accuracy(output, target, topk=(1,)):

FILE: DN4_2019_Version/DN4_Train_5way5shot_Resnet.py
  function adjust_learning_rate (line 90) | def adjust_learning_rate(optimizer, epoch_num):
  function train (line 97) | def train(train_loader, model, criterion, optimizer, epoch_index, F_txt):
  function validate (line 165) | def validate(val_loader, model, criterion, epoch_index, best_prec1, F_txt):
  function save_checkpoint (line 236) | def save_checkpoint(state, filename='checkpoint.pth.tar'):
  class AverageMeter (line 240) | class AverageMeter(object):
    method __init__ (line 242) | def __init__(self):
    method reset (line 245) | def reset(self):
    method update (line 251) | def update(self, val, n=1):
  function accuracy (line 258) | def accuracy(output, target, topk=(1,)):

FILE: DN4_2019_Version/dataset/datasets_csv.py
  function pil_loader (line 16) | def pil_loader(path):
  function accimage_loader (line 23) | def accimage_loader(path):
  function gray_loader (line 32) | def gray_loader(path):
  function default_loader (line 38) | def default_loader(path):
  function find_classes (line 46) | def find_classes(dir):
  class Imagefolder_csv (line 54) | class Imagefolder_csv(object):
    method __init__ (line 61) | def __init__(self, data_dir="", mode="train", image_size=84, data_name...
    method __len__ (line 238) | def __len__(self):
    method __getitem__ (line 242) | def __getitem__(self, index):

FILE: DN4_2019_Version/models/network.py
  function weights_init_normal (line 22) | def weights_init_normal(m):
  function weights_init_xavier (line 34) | def weights_init_xavier(m):
  function weights_init_kaiming (line 46) | def weights_init_kaiming(m):
  function weights_init_orthogonal (line 58) | def weights_init_orthogonal(m):
  function init_weights (line 70) | def init_weights(net, init_type='normal'):
  function get_norm_layer (line 84) | def get_norm_layer(norm_type='instance'):
  function define_DN4Net (line 97) | def define_DN4Net(pretrained=False, model_root=None, which_model='Conv64...
  function print_network (line 122) | def print_network(net):
  class FourLayer_64F (line 143) | class FourLayer_64F(nn.Module):
    method __init__ (line 144) | def __init__(self, norm_layer=nn.BatchNorm2d, num_classes=5, neighbor_...
    method forward (line 176) | def forward(self, input1, input2):
  class ImgtoClass_Metric (line 199) | class ImgtoClass_Metric(nn.Module):
    method __init__ (line 200) | def __init__(self, neighbor_k=3):
    method cal_cosinesimilarity (line 206) | def cal_cosinesimilarity(self, input1, input2):
    method forward (line 239) | def forward(self, x1, x2):
  class ResBlock (line 261) | class ResBlock(nn.Module):
    method __init__ (line 262) | def __init__(self, nFin, nFout):
    method forward (line 279) | def forward(self, x):
  class ResNetLike (line 284) | class ResNetLike(nn.Module):
    method __init__ (line 285) | def __init__(self, opt, neighbor_k=3):
    method forward (line 330) | def forward(self, input1, input2):

FILE: Test_DN4.py
  function test (line 111) | def test(test_loader, model, criterion, epoch_index, best_prec1, F_txt):

FILE: Train_DN4.py
  function train (line 108) | def train(train_loader, model, criterion, optimizer, epoch_index, F_txt):
  function validate (line 175) | def validate(val_loader, model, criterion, epoch_index, best_prec1, F_txt):

FILE: dataset/general_dataloader.py
  function pil_loader (line 18) | def pil_loader(path):
  function RGB_loader (line 24) | def RGB_loader(path):
  function accimage_loader (line 28) | def accimage_loader(path):
  function gray_loader (line 37) | def gray_loader(path):
  function default_loader (line 43) | def default_loader(path):
  function find_classes (line 51) | def find_classes(dir):
  function load_csv2dict (line 59) | def load_csv2dict(csv_path):
  function data_split (line 82) | def data_split(data_dir, class_img_dict, class_list, class_to_idx, mode):
  function read_dataset (line 149) | def read_dataset(data_dir):
  function episode_sampling (line 158) | def episode_sampling(data_dir, class_list, class_img_dict, episode_num, ...
  class GeneralDataSet (line 197) | class GeneralDataSet(object):
    method __init__ (line 201) | def __init__(self, opt, transform=None, mode='train', loader=RGB_loader):
    method __len__ (line 241) | def __len__(self):
    method __getitem__ (line 245) | def __getitem__(self, index):
  class FewShotDataSet (line 263) | class FewShotDataSet(object):
    method __init__ (line 267) | def __init__(self, opt, transform=None, support_transform=None, mode='...
    method __len__ (line 309) | def __len__(self):
    method __getitem__ (line 313) | def __getitem__(self, index):
  function get_dataloader (line 415) | def get_dataloader(opt, modes):
  function get_Fewshot_dataloader (line 481) | def get_Fewshot_dataloader(opt, modes):

FILE: models/backbone.py
  class Conv64F (line 17) | class Conv64F(nn.Module):
    method __init__ (line 26) | def __init__(self):
    method forward (line 65) | def forward(self, x):
  class Conv64F_Local (line 73) | class Conv64F_Local(nn.Module):
    method __init__ (line 81) | def __init__(self):
    method forward (line 118) | def forward(self, x):
  function init_layer (line 135) | def init_layer(L):
  class Flatten (line 145) | class Flatten(nn.Module):
    method __init__ (line 146) | def __init__(self):
    method forward (line 149) | def forward(self, x):
  class SimpleBlock (line 154) | class SimpleBlock(nn.Module):
    method __init__ (line 156) | def __init__(self, indim, outdim, half_res):
    method forward (line 195) | def forward(self, x):
  class BottleneckBlock (line 209) | class BottleneckBlock(nn.Module):
    method __init__ (line 211) | def __init__(self, indim, outdim, half_res):
    method forward (line 252) | def forward(self, x):
  class ResNet_224 (line 270) | class ResNet_224(nn.Module):
    method __init__ (line 272) | def __init__(self,block,list_of_num_layers, list_of_out_dims, No_pool=...
    method forward (line 317) | def forward(self,x):
  function conv3x3 (line 331) | def conv3x3(in_planes, out_planes, stride=1):
  class SELayer (line 337) | class SELayer(nn.Module):
    method __init__ (line 338) | def __init__(self, channel, reduction=16):
    method forward (line 348) | def forward(self, x):
  class DropBlock (line 355) | class DropBlock(nn.Module):
    method __init__ (line 356) | def __init__(self, block_size):
    method forward (line 363) | def forward(self, x, gamma):
    method _compute_block_mask (line 379) | def _compute_block_mask(self, mask):
  class BasicBlock (line 412) | class BasicBlock(nn.Module):
    method __init__ (line 415) | def __init__(self, inplanes, planes, stride=1, downsample=None, drop_r...
    method forward (line 438) | def forward(self, x):
  class ResNet_84 (line 475) | class ResNet_84(nn.Module):
    method __init__ (line 477) | def __init__(self, block, n_blocks, keep_prob=1.0, avg_pool=False, fla...
    method _make_layer (line 512) | def _make_layer(self, block, n_block, planes, stride=1, drop_rate=0.0,...
    method forward (line 539) | def forward(self, x, is_feat=False, rot=False):
  function ResNet12 (line 568) | def ResNet12(keep_prob=1.0, avg_pool=False, flatten=False, **kwargs):
  function SeResNet12 (line 573) | def SeResNet12(keep_prob=1.0, avg_pool=False, flatten=False, **kwargs):
  function ResNet10 (line 578) | def ResNet10(flatten = False):
  function ResNet18 (line 581) | def ResNet18(flatten = False):
  function ResNet34 (line 584) | def ResNet34(flatten = False):
  function ResNet50 (line 587) | def ResNet50(flatten = False):
  function ResNet101 (line 590) | def ResNet101(flatten = False):

FILE: models/classifier.py
  class Prototype_Metric (line 14) | class Prototype_Metric(nn.Module):
    method __init__ (line 20) | def __init__(self, way_num=5, shot_num=5, neighbor_k=3):
    method cal_EuclideanDis (line 27) | def cal_EuclideanDis(self, input1, input2):
    method forward (line 53) | def forward(self, x1, x2):
  class ImgtoClass_Metric (line 62) | class ImgtoClass_Metric(nn.Module):
    method __init__ (line 67) | def __init__(self, way_num=5, shot_num=5, neighbor_k=3):
    method cal_cosinesimilarity (line 74) | def cal_cosinesimilarity(self, input1, input2):
    method forward (line 115) | def forward(self, x1, x2):

FILE: models/network.py
  function weights_init_normal (line 47) | def weights_init_normal(m):
  function weights_init_xavier (line 60) | def weights_init_xavier(m):
  function weights_init_kaiming (line 72) | def weights_init_kaiming(m):
  function weights_init_orthogonal (line 84) | def weights_init_orthogonal(m):
  function init_weights (line 96) | def init_weights(net, init_type='normal'):
  function get_norm_layer (line 110) | def get_norm_layer(norm_type='instance'):
  function print_network (line 122) | def print_network(net):
  function define_model (line 131) | def define_model(pretrained=False, model_root=None, encoder_model='Conv6...
  class Fewshot_model (line 158) | class Fewshot_model(nn.Module):
    method __init__ (line 162) | def __init__(self, encoder_model='Conv64F', classifier_model='DN4', cl...
    method forward (line 200) | def forward(self, input1, input2, is_feature=False):
  class Model_with_reused_Encoder (line 214) | class Model_with_reused_Encoder(nn.Module):
    method __init__ (line 218) | def __init__(self, pre_trained_model, new_classifier='DN4', way_num=5,...
    method forward (line 234) | def forward(self, input1, input2):

FILE: utils.py
  function adjust_learning_rate (line 12) | def adjust_learning_rate(opt, optimizer, epoch, F_txt):
  function adjust_learning_rate2 (line 24) | def adjust_learning_rate2(opt, optimizer, epoch, F_txt):
  function count_parameters (line 35) | def count_parameters(model):
  function save_checkpoint (line 40) | def save_checkpoint(state, filename='checkpoint.pth.tar'):
  class AverageMeter (line 44) | class AverageMeter(object):
    method __init__ (line 46) | def __init__(self):
    method reset (line 49) | def reset(self):
    method update (line 55) | def update(self, val, n=1):
  function accuracy (line 62) | def accuracy(output, target, topk=(1,)):
  function mean_confidence_interval (line 79) | def mean_confidence_interval(data, confidence=0.95):
  function set_save_path (line 88) | def set_save_path(opt):
  function set_save_test_path (line 110) | def set_save_test_path(opt, finetune=False):
  function set_save_test_path2 (line 132) | def set_save_test_path2(opt, finetune=False):
  function get_resume_file (line 159) | def get_resume_file(checkpoint_dir, F_txt):
  function plot_loss_curve (line 176) | def plot_loss_curve(opt, train_loss, val_loss, test_loss=None):
Copy disabled (too large) Download .json
Condensed preview — 81 files, each showing path, character count, and a content snippet. Download the .json file for the full structured content (10,779K chars).
[
  {
    "path": "DN4_2019_Version/DN4_Test_5way1shot.py",
    "chars": 11333,
    "preview": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor: Wenbin Li (liwenbin.nju@gmail.com)\nDate: April 9, 2019\nVersi"
  },
  {
    "path": "DN4_2019_Version/DN4_Test_5way5shot.py",
    "chars": 11332,
    "preview": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor: Wenbin Li (liwenbin.nju@gmail.com)\nDate: April 9, 2019\nVersi"
  },
  {
    "path": "DN4_2019_Version/DN4_Train_5way1shot.py",
    "chars": 15405,
    "preview": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor: Wenbin Li (liwenbin.nju@gmail.com)\nDate: April 9, 2019\nVersi"
  },
  {
    "path": "DN4_2019_Version/DN4_Train_5way1shot_DA.py",
    "chars": 15711,
    "preview": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor: Wenbin Li (liwenbin.nju@gmail.com)\nDate: April 9, 2019\nVersi"
  },
  {
    "path": "DN4_2019_Version/DN4_Train_5way1shot_Resnet.py",
    "chars": 15408,
    "preview": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor: Wenbin Li (liwenbin.nju@gmail.com)\nDate: April 9, 2019\nVersi"
  },
  {
    "path": "DN4_2019_Version/DN4_Train_5way5shot.py",
    "chars": 15404,
    "preview": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor: Wenbin Li (liwenbin.nju@gmail.com)\nDate: April 9, 2019\nVersi"
  },
  {
    "path": "DN4_2019_Version/DN4_Train_5way5shot_DA.py",
    "chars": 15710,
    "preview": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor: Wenbin Li (liwenbin.nju@gmail.com)\nDate: April 9, 2019\nVersi"
  },
  {
    "path": "DN4_2019_Version/DN4_Train_5way5shot_Resnet.py",
    "chars": 15404,
    "preview": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor: Wenbin Li (liwenbin.nju@gmail.com)\nDate: April 9, 2019\nVersi"
  },
  {
    "path": "DN4_2019_Version/LICENSE",
    "chars": 2704,
    "preview": "Copyright (c) 2019, Wenbin Li \nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\n"
  },
  {
    "path": "DN4_2019_Version/README.md",
    "chars": 3342,
    "preview": "# DN4 in PyTorch\n\nWe provide a PyTorch implementation of DN4 for few-shot learning. If you use this code for your resear"
  },
  {
    "path": "DN4_2019_Version/dataset/CubBird_prepare_csv.py",
    "chars": 3417,
    "preview": "##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n## Created by: Wenbin Li\n## Date: De"
  },
  {
    "path": "DN4_2019_Version/dataset/CubBirds/test.csv",
    "chars": 84321,
    "preview": "filename,label\r\nBobolink_0023_2486988341.jpg,013.Bobolink\r\nBobolink_0006_323626208.jpg,013.Bobolink\r\nBobolink_0021_25634"
  },
  {
    "path": "DN4_2019_Version/dataset/CubBirds/train.csv",
    "chars": 229956,
    "preview": "filename,label\r\nRed_winged_Blackbird_0021_2529270051.jpg,010.Red_winged_Blackbird\r\nRed_winged_Blackbird_0022_2179939448."
  },
  {
    "path": "DN4_2019_Version/dataset/CubBirds/val.csv",
    "chars": 35350,
    "preview": "filename,label\r\nIndigo_Bunting_0018_12981558.jpg,014.Indigo_Bunting\r\nIndigo_Bunting_0033_2494583025.jpg,014.Indigo_Bunti"
  },
  {
    "path": "DN4_2019_Version/dataset/StanforCar_prepare_csv.py",
    "chars": 3429,
    "preview": "##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n## Created by: Wenbin Li\n## Date: De"
  },
  {
    "path": "DN4_2019_Version/dataset/StanfordCars/test.csv",
    "chars": 62158,
    "preview": "filename,label\r\n007313.jpg,90\r\n007333.jpg,90\r\n007329.jpg,90\r\n007364.jpg,90\r\n007310.jpg,90\r\n007374.jpg,90\r\n007345.jpg,90\r"
  },
  {
    "path": "DN4_2019_Version/dataset/StanfordCars/train.csv",
    "chars": 166036,
    "preview": "filename,label\r\n010677.jpg,130\r\n010690.jpg,130\r\n010708.jpg,130\r\n010694.jpg,130\r\n010688.jpg,130\r\n010746.jpg,130\r\n010685.j"
  },
  {
    "path": "DN4_2019_Version/dataset/StanfordCars/val.csv",
    "chars": 21962,
    "preview": "filename,label\r\n012146.jpg,148\r\n012194.jpg,148\r\n012189.jpg,148\r\n012172.jpg,148\r\n012123.jpg,148\r\n012160.jpg,148\r\n012193.j"
  },
  {
    "path": "DN4_2019_Version/dataset/StanfordDog_prepare_csv.py",
    "chars": 3428,
    "preview": "##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n## Created by: Wenbin Li\n## Date: De"
  },
  {
    "path": "DN4_2019_Version/dataset/StanfordDogs/test.csv",
    "chars": 222060,
    "preview": "filename,label\r\nn02096585_2828.jpg,n02096585-Boston_bull\r\nn02096585_1668.jpg,n02096585-Boston_bull\r\nn02096585_2177.jpg,n"
  },
  {
    "path": "DN4_2019_Version/dataset/StanfordDogs/train.csv",
    "chars": 531264,
    "preview": "filename,label\r\nn02089973_1458.jpg,n02089973-English_foxhound\r\nn02089973_556.jpg,n02089973-English_foxhound\r\nn02089973_1"
  },
  {
    "path": "DN4_2019_Version/dataset/StanfordDogs/val.csv",
    "chars": 154564,
    "preview": "filename,label\r\nn02087394_5552.jpg,n02087394-Rhodesian_ridgeback\r\nn02087394_7467.jpg,n02087394-Rhodesian_ridgeback\r\nn020"
  },
  {
    "path": "DN4_2019_Version/dataset/datasets_csv.py",
    "chars": 7438,
    "preview": "import os\nimport os.path as path\nimport json\nimport torch\nimport torch.utils.data as data\nimport numpy as np\nimport rand"
  },
  {
    "path": "DN4_2019_Version/dataset/miniImageNet/test.csv",
    "chars": 384015,
    "preview": "filename,label\nn0193011200000001.jpg,n01930112\nn0193011200000004.jpg,n01930112\nn0193011200000005.jpg,n01930112\nn01930112"
  },
  {
    "path": "DN4_2019_Version/dataset/miniImageNet/train.csv",
    "chars": 1228815,
    "preview": "filename,label\nn0153282900000005.jpg,n01532829\nn0153282900000006.jpg,n01532829\nn0153282900000007.jpg,n01532829\nn01532829"
  },
  {
    "path": "DN4_2019_Version/dataset/miniImageNet/val.csv",
    "chars": 307215,
    "preview": "filename,label\nn0185567200000003.jpg,n01855672\nn0185567200000004.jpg,n01855672\nn0185567200000010.jpg,n01855672\nn01855672"
  },
  {
    "path": "DN4_2019_Version/models/network.py",
    "chars": 10605,
    "preview": "import torch\nimport torch.nn as nn\nfrom torch.nn import init\nimport functools\nimport pdb\nimport math\nimport sys\nsys.dont"
  },
  {
    "path": "DN4_2019_Version/results/DN4_miniImageNet_Conv64F_5Way_1Shot_K3/Test_resutls.txt",
    "chars": 4856,
    "preview": "Namespace(basemodel='Conv64', beta1=0.5, clamp_lower=-0.01, clamp_upper=0.01, cuda=True, data_name='miniImageNet', datas"
  },
  {
    "path": "DN4_2019_Version/results/DN4_miniImageNet_Conv64F_5Way_1Shot_K3/opt_resutls.txt",
    "chars": 371598,
    "preview": "Namespace(basemodel='Conv64', beta1=0.5, clamp_lower=-0.01, clamp_upper=0.01, cuda=True, data_name='miniImageNet', datas"
  },
  {
    "path": "DN4_2019_Version/results/DN4_miniImageNet_Conv64F_5Way_5Shot_K3/Test_resutls.txt",
    "chars": 4854,
    "preview": "Namespace(basemodel='Conv64', beta1=0.5, clamp_lower=-0.01, clamp_upper=0.01, cuda=True, data_name='miniImageNet', datas"
  },
  {
    "path": "DN4_2019_Version/results/DN4_miniImageNet_Conv64F_5Way_5Shot_K3/opt_resutls.txt",
    "chars": 371598,
    "preview": "Namespace(basemodel='Conv64', beta1=0.5, clamp_lower=-0.01, clamp_upper=0.01, cuda=True, data_name='miniImageNet', datas"
  },
  {
    "path": "DN4_2019_Version/results/DN4_miniImageNet_ResNet256F_5Way_1Shot_K3/Test_resutls.txt",
    "chars": 7614,
    "preview": "Namespace(basemodel='ResNet256F', beta1=0.5, clamp_lower=-0.01, clamp_upper=0.01, cuda=True, data_name='miniImageNet', d"
  },
  {
    "path": "DN4_2019_Version/results/DN4_miniImageNet_ResNet256F_5Way_1Shot_K3/opt_resutls.txt",
    "chars": 374349,
    "preview": "Namespace(basemodel='ResNet256F', beta1=0.5, clamp_lower=-0.01, clamp_upper=0.01, cuda=True, data_name='miniImageNet', d"
  },
  {
    "path": "DN4_2019_Version/results/DN4_miniImageNet_ResNet256F_5Way_5Shot_K3/Test_resutls.txt",
    "chars": 7598,
    "preview": "Namespace(basemodel='ResNet256F', beta1=0.5, clamp_lower=-0.01, clamp_upper=0.01, cuda=True, data_name='miniImageNet', d"
  },
  {
    "path": "DN4_2019_Version/results/DN4_miniImageNet_ResNet256F_5Way_5Shot_K3/opt_resutls.txt",
    "chars": 374494,
    "preview": "Namespace(basemodel='ResNet256F', beta1=0.5, clamp_lower=-0.01, clamp_upper=0.01, cuda=True, data_name='miniImageNet', d"
  },
  {
    "path": "LICENSE",
    "chars": 2695,
    "preview": "Copyright (c) 2019, Wenbin Li \nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\n"
  },
  {
    "path": "README.md",
    "chars": 3414,
    "preview": "# DN4 in PyTorch (2023 Version)\n\nWe provide a PyTorch implementation of DN4 for few-shot learning.\nIf you use this code,"
  },
  {
    "path": "Test_DN4.py",
    "chars": 9882,
    "preview": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor: Wenbin Li (liwenbin@nju.edu.cn)\nDate: June 18, 2023\nVersion:"
  },
  {
    "path": "Train_DN4.py",
    "chars": 16578,
    "preview": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nAuthor: Wenbin Li (liwenbin@nju.edu.cn)\nDate: June 18, 2023\nVersion: "
  },
  {
    "path": "dataset/CubBirds/test.csv",
    "chars": 158026,
    "preview": "filename,label\r\nEared_Grebe_0056_34098.jpg,050.Eared_Grebe\r\nEared_Grebe_0082_34227.jpg,050.Eared_Grebe\r\nEared_Grebe_0035"
  },
  {
    "path": "dataset/CubBirds/train.csv",
    "chars": 411935,
    "preview": "filename,label\r\nWestern_Grebe_0080_36310.jpg,053.Western_Grebe\r\nWestern_Grebe_0077_36355.jpg,053.Western_Grebe\r\nWestern_"
  },
  {
    "path": "dataset/CubBirds/val.csv",
    "chars": 64540,
    "preview": "filename,label\r\nPainted_Bunting_0028_15205.jpg,016.Painted_Bunting\r\nPainted_Bunting_0060_15224.jpg,016.Painted_Bunting\r\n"
  },
  {
    "path": "dataset/Prepare_csv_CubBird.py",
    "chars": 3417,
    "preview": "##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n## Created by: Wenbin Li\n## Date: De"
  },
  {
    "path": "dataset/Prepare_csv_StanfordCar.py",
    "chars": 3429,
    "preview": "##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n## Created by: Wenbin Li\n## Date: De"
  },
  {
    "path": "dataset/Prepare_csv_StanfordDog.py",
    "chars": 3428,
    "preview": "##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n## Created by: Wenbin Li\n## Date: De"
  },
  {
    "path": "dataset/StanfordCars/test.csv",
    "chars": 62158,
    "preview": "filename,label\r\n007313.jpg,90\r\n007333.jpg,90\r\n007329.jpg,90\r\n007364.jpg,90\r\n007310.jpg,90\r\n007374.jpg,90\r\n007345.jpg,90\r"
  },
  {
    "path": "dataset/StanfordCars/train.csv",
    "chars": 166036,
    "preview": "filename,label\r\n010677.jpg,130\r\n010690.jpg,130\r\n010708.jpg,130\r\n010694.jpg,130\r\n010688.jpg,130\r\n010746.jpg,130\r\n010685.j"
  },
  {
    "path": "dataset/StanfordCars/val.csv",
    "chars": 21962,
    "preview": "filename,label\r\n012146.jpg,148\r\n012194.jpg,148\r\n012189.jpg,148\r\n012172.jpg,148\r\n012123.jpg,148\r\n012160.jpg,148\r\n012193.j"
  },
  {
    "path": "dataset/StanfordDogs/test.csv",
    "chars": 222060,
    "preview": "filename,label\r\nn02096585_2828.jpg,n02096585-Boston_bull\r\nn02096585_1668.jpg,n02096585-Boston_bull\r\nn02096585_2177.jpg,n"
  },
  {
    "path": "dataset/StanfordDogs/train.csv",
    "chars": 531264,
    "preview": "filename,label\r\nn02089973_1458.jpg,n02089973-English_foxhound\r\nn02089973_556.jpg,n02089973-English_foxhound\r\nn02089973_1"
  },
  {
    "path": "dataset/StanfordDogs/val.csv",
    "chars": 154564,
    "preview": "filename,label\r\nn02087394_5552.jpg,n02087394-Rhodesian_ridgeback\r\nn02087394_7467.jpg,n02087394-Rhodesian_ridgeback\r\nn020"
  },
  {
    "path": "dataset/general_dataloader.py",
    "chars": 15476,
    "preview": "import os\nimport os.path as path\nimport json\nimport torch\nimport torch.utils.data as data\nimport torchvision.transforms "
  },
  {
    "path": "dataset/miniImageNet/test.csv",
    "chars": 384015,
    "preview": "filename,label\nn0193011200000001.jpg,n01930112\nn0193011200000004.jpg,n01930112\nn0193011200000005.jpg,n01930112\nn01930112"
  },
  {
    "path": "dataset/miniImageNet/train.csv",
    "chars": 1228815,
    "preview": "filename,label\nn0153282900000005.jpg,n01532829\nn0153282900000006.jpg,n01532829\nn0153282900000007.jpg,n01532829\nn01532829"
  },
  {
    "path": "dataset/miniImageNet/val.csv",
    "chars": 307215,
    "preview": "filename,label\nn0185567200000003.jpg,n01855672\nn0185567200000004.jpg,n01855672\nn0185567200000010.jpg,n01855672\nn01855672"
  },
  {
    "path": "models/backbone.py",
    "chars": 18282,
    "preview": "import torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nfrom torch.nn import init\nimport math\nimport pdb\n"
  },
  {
    "path": "models/classifier.py",
    "chars": 4419,
    "preview": "import torch\nimport torch.nn as nn\nfrom torch.nn import init\nimport numpy as np\nimport torch.nn.functional as F\nimport u"
  },
  {
    "path": "models/network.py",
    "chars": 7060,
    "preview": "import torch\nimport torch.nn as nn\nfrom torch.nn import init\nimport numpy as np\nimport functools\nimport random\nimport pd"
  },
  {
    "path": "results/SGD_Cosine_Lr0.01_DN4_ResNet12_Epoch_30_miniImageNet_84_84_5Way_1Shot/Test_results.txt",
    "chars": 9481,
    "preview": "========================================== Start Test ==========================================\n\n=> loading checkpoint "
  },
  {
    "path": "results/SGD_Cosine_Lr0.01_DN4_ResNet12_Epoch_30_miniImageNet_84_84_5Way_1Shot/opt_results.txt",
    "chars": 342459,
    "preview": "Namespace(adam=False, aug_shot_num=20, beta1=0.5, clamp_lower=-0.01, clamp_upper=0.01, classifier_model='DN4', cosine=Tr"
  },
  {
    "path": "results/SGD_Cosine_Lr0.01_DN4_ResNet12_Epoch_30_miniImageNet_84_84_5Way_1Shot/test_loss.txt",
    "chars": 750,
    "preview": "1.253673746883869233e+00\n1.175363154888153128e+00\n1.126533054888248397e+00\n1.116016005635261488e+00\n1.086189859867096041"
  },
  {
    "path": "results/SGD_Cosine_Lr0.01_DN4_ResNet12_Epoch_30_miniImageNet_84_84_5Way_1Shot/train_loss.txt",
    "chars": 750,
    "preview": "1.395911455857753669e+00\n1.235713104736805068e+00\n1.144744067624211326e+00\n1.076060488390922609e+00\n1.011772379270196032"
  },
  {
    "path": "results/SGD_Cosine_Lr0.01_DN4_ResNet12_Epoch_30_miniImageNet_84_84_5Way_1Shot/val_loss.txt",
    "chars": 750,
    "preview": "1.302756758093833955e+00\n1.206469823241233863e+00\n1.167996160447597465e+00\n1.110277335435152057e+00\n1.078763202548026934"
  },
  {
    "path": "results/SGD_Cosine_Lr0.01_DN4_ResNet12_Epoch_30_miniImageNet_84_84_5Way_5Shot/Test_results.txt",
    "chars": 9481,
    "preview": "========================================== Start Test ==========================================\n\n=> loading checkpoint "
  },
  {
    "path": "results/SGD_Cosine_Lr0.01_DN4_ResNet12_Epoch_30_miniImageNet_84_84_5Way_5Shot/opt_results.txt",
    "chars": 342528,
    "preview": "Namespace(adam=False, aug_shot_num=20, beta1=0.5, clamp_lower=-0.01, clamp_upper=0.01, classifier_model='DN4', cosine=Tr"
  },
  {
    "path": "results/SGD_Cosine_Lr0.01_DN4_ResNet12_Epoch_30_miniImageNet_84_84_5Way_5Shot/test_loss.txt",
    "chars": 750,
    "preview": "8.425347571074962572e-01\n7.676017894446849343e-01\n7.155224827527999487e-01\n6.975680709481238972e-01\n6.747762677669525422"
  },
  {
    "path": "results/SGD_Cosine_Lr0.01_DN4_ResNet12_Epoch_30_miniImageNet_84_84_5Way_5Shot/train_loss.txt",
    "chars": 750,
    "preview": "1.069172682148218145e+00\n8.350652296170592015e-01\n7.287498992070555737e-01\n6.581147375971079239e-01\n6.009942553445696278"
  },
  {
    "path": "results/SGD_Cosine_Lr0.01_DN4_ResNet12_Epoch_30_miniImageNet_84_84_5Way_5Shot/val_loss.txt",
    "chars": 750,
    "preview": "8.759202211499214075e-01\n8.044318869411944961e-01\n7.252530652880668782e-01\n6.950626986473799063e-01\n6.620363071709871594"
  },
  {
    "path": "results/SGD_Cosine_Lr0.05_DN4_Conv64F_Local_Epoch_30_miniImageNet_84_84_5Way_1Shot/Test_results.txt",
    "chars": 6486,
    "preview": "========================================== Start Test ==========================================\n\n=> loading checkpoint "
  },
  {
    "path": "results/SGD_Cosine_Lr0.05_DN4_Conv64F_Local_Epoch_30_miniImageNet_84_84_5Way_1Shot/Test_results_New.txt",
    "chars": 6923,
    "preview": "========================================== Start Test ==========================================\n\n=> loading checkpoint "
  },
  {
    "path": "results/SGD_Cosine_Lr0.05_DN4_Conv64F_Local_Epoch_30_miniImageNet_84_84_5Way_1Shot/opt_results.txt",
    "chars": 339454,
    "preview": "Namespace(adam=False, aug_shot_num=20, beta1=0.5, clamp_lower=-0.01, clamp_upper=0.01, classifier_model='DN4', cosine=Tr"
  },
  {
    "path": "results/SGD_Cosine_Lr0.05_DN4_Conv64F_Local_Epoch_30_miniImageNet_84_84_5Way_1Shot/test_loss.txt",
    "chars": 750,
    "preview": "1.588965280413627612e+00\n1.580805958509445164e+00\n1.553094785094261132e+00\n1.513077440857887312e+00\n1.484222897410392816"
  },
  {
    "path": "results/SGD_Cosine_Lr0.05_DN4_Conv64F_Local_Epoch_30_miniImageNet_84_84_5Way_1Shot/train_loss.txt",
    "chars": 750,
    "preview": "1.761601435124874104e+00\n1.562190455317497229e+00\n1.549602791261672996e+00\n1.530674070775508877e+00\n1.498226171290874431"
  },
  {
    "path": "results/SGD_Cosine_Lr0.05_DN4_Conv64F_Local_Epoch_30_miniImageNet_84_84_5Way_1Shot/val_loss.txt",
    "chars": 750,
    "preview": "1.575199795722961316e+00\n1.569603443980216895e+00\n1.561417399406433093e+00\n1.534682530045509363e+00\n1.505603410720825153"
  },
  {
    "path": "results/SGD_Cosine_Lr0.05_DN4_Conv64F_Local_Epoch_30_miniImageNet_84_84_5Way_5Shot/Test_results.txt",
    "chars": 6486,
    "preview": "========================================== Start Test ==========================================\n\n=> loading checkpoint "
  },
  {
    "path": "results/SGD_Cosine_Lr0.05_DN4_Conv64F_Local_Epoch_30_miniImageNet_84_84_5Way_5Shot/opt_results.txt",
    "chars": 339452,
    "preview": "Namespace(adam=False, aug_shot_num=20, beta1=0.5, clamp_lower=-0.01, clamp_upper=0.01, classifier_model='DN4', cosine=Tr"
  },
  {
    "path": "results/SGD_Cosine_Lr0.05_DN4_Conv64F_Local_Epoch_30_miniImageNet_84_84_5Way_5Shot/test_loss.txt",
    "chars": 750,
    "preview": "1.345906615495681802e+00\n1.103285467445850321e+00\n1.060474659025669064e+00\n9.981950015425682388e-01\n9.811021184921264737"
  },
  {
    "path": "results/SGD_Cosine_Lr0.05_DN4_Conv64F_Local_Epoch_30_miniImageNet_84_84_5Way_5Shot/train_loss.txt",
    "chars": 750,
    "preview": "1.496538908123969946e+00\n1.257998045879602511e+00\n1.146439006930589644e+00\n1.095848687407374289e+00\n1.059380434823036232"
  },
  {
    "path": "results/SGD_Cosine_Lr0.05_DN4_Conv64F_Local_Epoch_30_miniImageNet_84_84_5Way_5Shot/val_loss.txt",
    "chars": 750,
    "preview": "1.386973905324935918e+00\n1.162551319837570096e+00\n1.106109781026840100e+00\n1.060794358670711590e+00\n1.056823077559471136"
  },
  {
    "path": "results/test",
    "chars": 1,
    "preview": "\n"
  },
  {
    "path": "utils.py",
    "chars": 6080,
    "preview": "import torch\nimport os\nimport pdb\nimport scipy as sp\nimport scipy.stats\nimport numpy as np\nimport matplotlib\nimport matp"
  }
]

About this extraction

This page contains the full source code of the WenbinLee/DN4 GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 81 files (9.8 MB), approximately 2.6M tokens, and a symbol index with 199 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!