[
  {
    "path": ".gitignore",
    "content": "/data/\n"
  },
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2020 Fan Jingbo\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "# pytorch_deephash\n\n## Introduction\n\nThis is the Pytorch implementation of [Deep Learning of Binary Hash Codes for Fast Image Retrieval](https://github.com/kevinlin311tw/caffe-cvprw15), and can achieve more than 93% mAP in CIFAR10 dataset.\n\n## Environment\n\n> Pytorch 1.4.0\n>\n> torchvision 0.5.0\n>\n> tqdm\n>\n> numpy\n\n\n## Training\n\n```bash\npython train.py\n```\n\nYou will get trained models in model folder by default, and models' names are their test accuracy.\n\n## Evaluation\n\n```bash\npython evaluate.py --pretrained {your saved model name in model folder by default}\n```\n\n## Tips\n\n1. If using Windows, keep num_works zero\n\n2. There are some other args, which you can get them by adding '-h' or reading the code.\n"
  },
  {
    "path": "evaluate.py",
    "content": "import argparse\nimport os\nfrom timeit import time\n\nimport numpy as np\nimport torch\nimport torch.optim.lr_scheduler\nfrom torchvision import datasets, transforms\nfrom tqdm import tqdm\n\nfrom net import AlexNetPlusLatent\n\nparser = argparse.ArgumentParser(description='Deep Hashing evaluate mAP')\nparser.add_argument('--pretrained', type=float, default=0, metavar='pretrained_model',\n                    help='loading pretrained model(default = None)')\nparser.add_argument('--bits', type=int, default=48, metavar='bts',\n                    help='binary bits')\nargs = parser.parse_args()\n\n\ndef load_data():\n    transform_train = transforms.Compose(\n        [transforms.Resize(227),\n         transforms.ToTensor(),\n         transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])\n    transform_test = transforms.Compose(\n        [transforms.Resize(227),\n         transforms.ToTensor(),\n         transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])\n    trainset = datasets.CIFAR10(root='./data', train=True, download=True,\n                                transform=transform_train)\n    trainloader = torch.utils.data.DataLoader(trainset, batch_size=100,\n                                              shuffle=False, num_workers=0)\n\n    testset = datasets.CIFAR10(root='./data', train=False, download=True,\n                               transform=transform_test)\n    testloader = torch.utils.data.DataLoader(testset, batch_size=100,\n                                             shuffle=False, num_workers=0)\n    return trainloader, testloader\n\n\ndef binary_output(dataloader):\n    net = AlexNetPlusLatent(args.bits)\n    net.load_state_dict(torch.load('./model/{}'.format(args.pretrained)))\n    device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n    print(\"Use device: \" + str(device))\n    net.to(device)\n    full_batch_output = torch.cuda.FloatTensor()\n    full_batch_label = torch.cuda.LongTensor()\n    with torch.no_grad():\n        for batch_idx, (inputs, targets) in enumerate(dataloader):\n            inputs, targets = inputs.to(device), targets.to(device)\n            outputs, _ = net(inputs)\n            full_batch_output = torch.cat((full_batch_output, outputs.data), 0)\n            full_batch_label = torch.cat((full_batch_label, targets.data), 0)\n        return torch.round(full_batch_output), full_batch_label\n\n\ndef evaluate(trn_binary, trn_label, tst_binary, tst_label):\n    classes = np.max(tst_label) + 1\n    for i in range(classes):\n        if i == 0:\n            tst_sample_binary = tst_binary[np.random.RandomState(seed=i).permutation(np.where(tst_label == i)[0])[:100]]\n            tst_sample_label = np.array([i]).repeat(100)\n            continue\n        else:\n            tst_sample_binary = np.concatenate([tst_sample_binary, tst_binary[np.random.RandomState(seed=i).permutation(np.where(tst_label==i)[0])[:100]]])\n            tst_sample_label = np.concatenate([tst_sample_label, np.array([i]).repeat(100)])\n    query_times = tst_sample_binary.shape[0]\n    trainset_len = trn_binary.shape[0]\n    AP = np.zeros(query_times)\n    precision_radius = np.zeros(query_times)\n    Ns = np.arange(1, trainset_len + 1)\n    sum_tp = np.zeros(trainset_len)\n    total_time_start = time.time()\n    with tqdm(total=query_times, desc=\"Query\") as pbar:\n        for i in range(query_times):\n            query_label = tst_sample_label[i]\n            query_binary = tst_sample_binary[i, :]\n            query_result = np.count_nonzero(query_binary != trn_binary, axis=1)    # don't need to divide binary length\n            sort_indices = np.argsort(query_result)\n            buffer_yes = np.equal(query_label, trn_label[sort_indices]).astype(int)\n            P = np.cumsum(buffer_yes) / Ns\n            precision_radius[i] = P[np.where(np.sort(query_result) > 2)[0][0]-1]\n            AP[i] = np.sum(P * buffer_yes) / sum(buffer_yes)\n            sum_tp = sum_tp + np.cumsum(buffer_yes)\n            pbar.set_postfix({'Average Precision': '{0:1.5f}'.format(AP[i])})\n            pbar.update(1)\n    pbar.close()\n    mAP = np.mean(AP)\n    precision_at_k = sum_tp / Ns / query_times\n    index = [100, 200, 400, 600, 800, 1000]\n    index = [i - 1 for i in index]\n    print('precision at k:', precision_at_k[index])\n    print('precision within Hamming radius 2:', np.mean(precision_radius))\n    map = np.mean(AP)\n    print('mAP:', map)\n    print('Total query time:', time.time() - total_time_start)\n\n\nif __name__ == \"__main__\":\n    if os.path.exists('./result/train_binary') and os.path.exists('./result/train_label') and \\\n       os.path.exists('./result/test_binary') and os.path.exists('./result/test_label') and args.pretrained == 0:\n        train_binary = torch.load('./result/train_binary')\n        train_label = torch.load('./result/train_label')\n        test_binary = torch.load('./result/test_binary')\n        test_label = torch.load('./result/test_label')\n\n    else:\n        trainloader, testloader = load_data()\n        train_binary, train_label = binary_output(trainloader)\n        test_binary, test_label = binary_output(testloader)\n        if not os.path.isdir('result'):\n            os.mkdir('result')\n        torch.save(train_binary, './result/train_binary')\n        torch.save(train_label, './result/train_label')\n        torch.save(test_binary, './result/test_binary')\n        torch.save(test_label, './result/test_label')\n\n    train_binary = train_binary.cpu().numpy()\n    train_binary = np.asarray(train_binary, np.int32)\n    train_label = train_label.cpu().numpy()\n    test_binary = test_binary.cpu().numpy()\n    test_binary = np.asarray(test_binary, np.int32)\n    test_label = test_label.cpu().numpy()\n\n    evaluate(train_binary, train_label, test_binary, test_label)\n\n\n"
  },
  {
    "path": "net.py",
    "content": "import os\nimport torch.nn as nn\nfrom torchvision import models\n\nos.environ['TORCH_HOME'] = 'models'\nalexnet_model = models.alexnet(pretrained=True)\n\n\nclass AlexNetPlusLatent(nn.Module):\n    def __init__(self, bits):\n        super(AlexNetPlusLatent, self).__init__()\n        self.bits = bits\n        self.features = nn.Sequential(*list(alexnet_model.features.children()))\n        self.remain = nn.Sequential(*list(alexnet_model.classifier.children())[:-1])\n        self.Linear1 = nn.Linear(4096, self.bits)\n        self.sigmoid = nn.Sigmoid()\n        self.Linear2 = nn.Linear(self.bits, 10)\n\n    def forward(self, x):\n        x = self.features(x)\n        x = x.view(x.size(0), 256 * 6 * 6)\n        x = self.remain(x)\n        x = self.Linear1(x)\n        features = self.sigmoid(x)\n        result = self.Linear2(features)\n        return features, result\n"
  },
  {
    "path": "train.py",
    "content": "import argparse\nimport math\nimport os\nimport shutil\n\nimport torch\nimport torch.nn as nn\nimport torch.optim.lr_scheduler\nfrom torchvision import datasets, transforms\nfrom tqdm import tqdm\n\nfrom net import AlexNetPlusLatent\n\nparser = argparse.ArgumentParser(description='Deep Hashing')\nparser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n                    help='learning rate (default: 0.01)')\nparser.add_argument('--momentum', type=float, default=0.9, metavar='M',\n                    help='SGD momentum (default: 0.9)')\nparser.add_argument('--epoch', type=int, default=128, metavar='epoch',\n                    help='epoch')\nparser.add_argument('--pretrained', type=str, default=0, metavar='pretrained_model',\n                    help='loading pretrained model(default = None)')\nparser.add_argument('--bits', type=int, default=48, metavar='bts',\n                    help='binary bits')\nparser.add_argument('--path', type=str, default='model', metavar='P',\n                    help='path directory')\nargs = parser.parse_args()\n\n\ndef init_dataset():\n    transform_train = transforms.Compose(\n        [transforms.Resize(256),\n         transforms.RandomCrop(227),\n         transforms.RandomHorizontalFlip(),\n         transforms.ToTensor(),\n         transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])\n    transform_test = transforms.Compose(\n        [transforms.Resize(227),\n         transforms.ToTensor(),\n         transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])\n    trainset = datasets.CIFAR10(root='./data', train=True, download=True,\n                                transform=transform_train)\n    trainloader = torch.utils.data.DataLoader(trainset, batch_size=128,\n                                              shuffle=True, num_workers=0)\n\n    testset = datasets.CIFAR10(root='./data', train=False, download=True,\n                               transform=transform_test)\n    testloader = torch.utils.data.DataLoader(testset, batch_size=100,\n                                             shuffle=True, num_workers=0)\n    return trainloader, testloader\n\n\ndef train(epoch_num):\n    print('\\nEpoch: %d' % epoch_num)\n    net.train()\n    train_loss = 0\n    correct = 0\n    total = 0\n    with tqdm(total=math.ceil(len(trainloader)), desc=\"Training\") as pbar:\n        for batch_idx, (inputs, targets) in enumerate(trainloader):\n            inputs, targets = inputs.to(device), targets.to(device)\n            _, outputs = net(inputs)\n            loss = softmaxloss(outputs, targets)\n            optimizer4nn.zero_grad()\n            loss.backward()\n            optimizer4nn.step()\n            train_loss += softmaxloss(outputs, targets).item()\n            _, predicted = torch.max(outputs.data, 1)\n            total += targets.size(0)\n            correct += predicted.eq(targets.data).sum()\n            pbar.set_postfix({'loss': '{0:1.5f}'.format(loss), 'accurate': '{:.2%}'.format(correct.item() / total)})\n            pbar.update(1)\n    pbar.close()\n    return train_loss / (batch_idx + 1)\n\n\ndef test():\n    net.eval()\n    with torch.no_grad():\n        test_loss = 0\n        correct = 0\n        total = 0\n        with tqdm(total=math.ceil(len(testloader)), desc=\"Testing\") as pbar:\n            for batch_idx, (inputs, targets) in enumerate(testloader):\n                inputs, targets = inputs.to(device), targets.to(device)\n                _, outputs = net(inputs)\n                loss = softmaxloss(outputs, targets)\n                test_loss += loss.item()\n                _, predicted = torch.max(outputs.data, 1)\n                total += targets.size(0)\n                correct += predicted.eq(targets.data).sum()\n                pbar.set_postfix({'loss': '{0:1.5f}'.format(loss), 'accurate': '{:.2%}'.format(correct.item() / total)})\n                pbar.update(1)\n        pbar.close()\n        acc = 100 * int(correct) / int(total)\n        if epoch == args.epoch:\n            print('Saving')\n            if not os.path.isdir('{}'.format(args.path)):\n                os.mkdir('{}'.format(args.path))\n            torch.save(net.state_dict(), './{}/{}'.format(args.path, acc))\n\n\nif __name__ == '__main__':\n    torch.cuda.empty_cache()  # When using windows, this line is needed\n    trainloader, testloader = init_dataset()\n    net = AlexNetPlusLatent(args.bits)\n    device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n    print(\"Use device: \" + str(device))\n    net.to(device)\n    softmaxloss = nn.CrossEntropyLoss().cuda()\n    optimizer4nn = torch.optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=0.0005)\n    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer4nn, milestones=[args.epoch], gamma=0.1)\n    best_acc = 0\n    start_epoch = 1\n    if args.pretrained:\n        net.load_state_dict(torch.load('./{}/{}'.format(args.path, args.pretrained)))\n        test()\n    else:\n        if os.path.isdir('{}'.format(args.path)):\n            shutil.rmtree('{}'.format(args.path))\n        for epoch in range(start_epoch, start_epoch + args.epoch):\n            train(epoch)\n            test()\n            scheduler.step(epoch)\n\n"
  }
]