[
  {
    "path": "README.md",
    "content": "# Deep Pyramid Convolutional Neural Networks for Text Categorization\n\n> This is a simple version of the paper *Deep Pyramid Convolutional Neural Networks for Text Categorization*.\n\n\n!['model'](./pictures/figure1.png)\n\n\nYou should rewrite the Dataset class in the data/dataset.py  \nand put your data in '/data/train' or any other directory.\n\nrun by\n\n```\npython main.py --lr=0.001 --epoch=20 --batch_size=64 --gpu=0 --seed=0 --label_num=2\t\t\t\n```\n\n## Evaluation \n> \tI run the model in a dataset about AD identify.  \n\tAnd make a comparition between the TextCNN, LSTM and our DPCNN. \n\t \nLoss of **TextCNN** and **LSTM**.  \n<img src=\"./pictures/textcnn.png\" width=\"350\" height=\"250\">  <img src=\"./pictures/lstm.png\" width=\"350\" height=\"250\"> \n\n\nLoss of **DPCNN**.  \n<img src=\"./pictures/dpcnn.png\" width=\"350\" height=\"250\">\n\n"
  },
  {
    "path": "checkpoint/.placeholder",
    "content": ""
  },
  {
    "path": "config.py",
    "content": "# —*- coding: utf-8 -*-\n\n\nclass Config(object):\n    def __init__(self, word_embedding_dimension=100, word_num=20000,\n                 epoch=2000, sentence_max_size=40,\n                 learning_rate=0.01, batch_size=1,\n                 drop_out=0.5,\n                 dict_size=50000,\n                 bidirectional=False,\n                 doc_len=40):\n        self.word_embedding_dimension = word_embedding_dimension\n        self.word_num = word_num\n        self.epoch = epoch\n        self.sentence_max_size = sentence_max_size                   # 句子长度\n        self.lr = learning_rate\n        self.batch_size = batch_size\n        self.dict_size = dict_size\n        self.drop_out = drop_out\n        self.bidirectional = bidirectional\n        self.doc_len = doc_len\n\n"
  },
  {
    "path": "data/__init__.py",
    "content": "from .dataset import *"
  },
  {
    "path": "data/dataset.py",
    "content": "from torch.utils import data\nimport os\n\n\nclass TextDataset(data.Dataset):\n\n    def __init__(self, path):\n        self.file_name = os.listdir(path)\n\n    def __getitem__(self, index):\n        return self.train_set[index], self.labels[index]\n\n    def __len__(self):\n        return len(self.train_set)\n\n\n"
  },
  {
    "path": "main.py",
    "content": "# -*- coding: utf-8 -*-\n\nimport torch\nimport torch.autograd as autograd\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.utils.data as data\nfrom config import Config\nfrom model import DPCNN\nfrom data import TextDataset\nimport argparse\n\ntorch.manual_seed(1)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--lr', type=float, default=0.1)\nparser.add_argument('--batch_size', type=int, default=16)\nparser.add_argument('--epoch', type=int, default=20)\nparser.add_argument('--gpu', type=int, default=0)\nparser.add_argument('--out_channel', type=int, default=2)\nparser.add_argument('--label_num', type=int, default=2)\nparser.add_argument('--seed', type=int, default=1)\nargs = parser.parse_args()\n\n\ntorch.manual_seed(args.seed)\n\nif torch.cuda.is_available():\n    torch.cuda.set_device(args.gpu)\n\n# Create the configuration\nconfig = Config(sentence_max_size=50,\n                batch_size=args.batch_size,\n                word_num=11000,\n                label_num=args.label_num,\n                learning_rate=args.lr,\n                cuda=args.gpu,\n                epoch=args.epoch,\n                out_channel=args.out_channel)\n\ntraining_set = TextDataset(path='data/train')\n\ntraining_iter = data.DataLoader(dataset=training_set,\n                                batch_size=config.batch_size,\n                                num_workers=2)\n\n\nmodel = DPCNN(config)\nembeds = nn.Embedding(config.word_num, config.word_embedding_dimension)\n\nif torch.cuda.is_available():\n    model.cuda()\n    embeds = embeds.cuda()\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(model.parameters(), lr=config.lr)\n\ncount = 0\nloss_sum = 0\n# Train the model\nfor epoch in range(config.epoch):\n    for data, label in training_iter:\n        if config.cuda and torch.cuda.is_available():\n            data = data.cuda()\n            labels = label.cuda()\n\n        input_data = embeds(autograd.Variable(data))\n        out = model(input_data)\n        loss = criterion(out, autograd.Variable(label.float()))\n\n        loss_sum += loss.data[0]\n        count += 1\n\n        if count % 100 == 0:\n            print(\"epoch\", epoch, end='  ')\n            print(\"The loss is: %.5f\" % (loss_sum/100))\n\n            loss_sum = 0\n            count = 0\n\n        optimizer.zero_grad()\n        loss.backward()\n        optimizer.step()\n    # save the model in every epoch\n    model.save('checkpoints/epoch{}.ckpt'.format(epoch))\n\n"
  },
  {
    "path": "model/BasicModule.py",
    "content": "# -*- coding: utf-8 -*-\nimport torch\nimport torch.nn as nn\n\n\nclass BasicModule(nn.Module):\n    def __init__(self):\n        super(BasicModule, self).__init__()\n        self.model_name = str(type(self))\n\n    def load(self, path):\n        self.load_state_dict(torch.load(path))\n\n    def save(self, path):\n        torch.save(self.state_dict(), path)"
  },
  {
    "path": "model/DPCNN.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom .BasicModule import BasicModule\n\n\nclass DPCNN(BasicModule):\n    \"\"\"\n    DPCNN for sentences classification.\n    \"\"\"\n    def __init__(self, config):\n        super(DPCNN, self).__init__()\n        self.config = config\n        self.channel_size = 250\n        self.conv_region_embedding = nn.Conv2d(1, self.channel_size, (3, self.config.word_embedding_dimension), stride=1)\n        self.conv3 = nn.Conv2d(self.channel_size, self.channel_size, (3, 1), stride=1)\n        self.pooling = nn.MaxPool2d(kernel_size=(3, 1), stride=2)\n        self.padding_conv = nn.ZeroPad2d((0, 0, 1, 1))\n        self.padding_pool = nn.ZeroPad2d((0, 0, 0, 1))\n        self.act_fun = nn.ReLU()\n        self.linear_out = nn.Linear(2*self.channel_size, 2)\n\n    def forward(self, x):\n        batch = x.shape[0]\n\n        # Region embedding\n        x = self.conv_region_embedding(x)        # [batch_size, channel_size, length, 1]\n\n        x = self.padding_conv(x)                      # pad保证等长卷积，先通过激活函数再卷积\n        x = self.act_fun(x)\n        x = self.conv3(x)\n        x = self.padding_conv(x)\n        x = self.act_fun(x)\n        x = self.conv3(x)\n\n        while x.size()[-2] > 2:\n            x = self._block(x)\n\n        x = x.view(batch, 2*self.channel_size)\n        x = self.linear_out(x)\n\n        return x\n\n    def _block(self, x):\n        # Pooling\n        x = self.padding_pool(x)\n        px = self.pooling(x)\n\n        # Convolution\n        x = self.padding_conv(px)\n        x = F.relu(x)\n        x = self.conv3(x)\n\n        x = self.padding_conv(x)\n        x = F.relu(x)\n        x = self.conv3(x)\n\n        # Short Cut\n        x = x + px\n\n        return x\n\n    def predict(self, x):\n        self.eval()\n        out = self.forward(x)\n        predict_labels = torch.max(out, 1)[1]\n        self.train(mode=True)\n        return predict_labels\n\n"
  },
  {
    "path": "model/__init__.py",
    "content": ""
  }
]