[
  {
    "path": ".gitignore",
    "content": ".idea\n*.pyc\n*~\n*.swp"
  },
  {
    "path": "classifier/LogisticRegression.py",
    "content": "# -*- coding: utf-8 -*-\n# @Date    : 2016/10/16\n# @Author  : hrwhisper\nimport random\nimport sklearn\nimport numpy as np\nfrom sklearn.utils.extmath import safe_sparse_dot\n\n\nclass LogisticRegression(sklearn.base.BaseEstimator):\n    def __init__(self, alpha=0.1, max_iter=100):  # 0.02 200 0.9927425\n        self.alpha = alpha\n        self.max_iter = max_iter\n        self.weights = None\n\n    def _sigmoid(self, x):\n        return .5 * (1 + np.tanh(.5 * x))  # 1.0 / (1 + np.exp(-x))\n\n    def fit(self, X, y):\n        \"\"\"\n        :param X: sparse matrix(n_samples,n_features) the training feature vector\n        :param y:  array-like(n_samples,) Target vector relative to X.\n        :return: self\n        \"\"\"\n        m, n = X.shape\n        target = np.array(y).reshape((m, 1))\n        self.weights = np.ones((n, 1))\n        for _ in range(self.max_iter):\n            h = self._sigmoid(safe_sparse_dot(X, self.weights))  # X * weights\n            error = target - h\n            self.weights += self.alpha * safe_sparse_dot(X.T, error)  # alpha * X.T * error\n        return self\n\n    def predict(self, X):\n        return (self._sigmoid(safe_sparse_dot(X, self.weights)) > 0.5).ravel().astype('int')\n\n\nif __name__ == '__main__':\n    a = [1, 2, 3]\n    b = [4, 5, 6]\n    a = np.array(a)\n    b = np.array(b)\n    print(a, b)\n    a = a.reshape((-1, 1))\n    print(a)\n    print(a - b)\n"
  },
  {
    "path": "classifier/NaiveBayesian.py",
    "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time    : 2016/11/29 22:04\n# @Author  : wqs\n# @File    : NaiveBayesian.py\n\nimport sklearn\nimport numpy as np\nfrom scipy.sparse import csr_matrix\nfrom sklearn.utils.extmath import safe_sparse_dot\nfrom sklearn.preprocessing import LabelBinarizer\n\n\nclass NaiveBayesian(sklearn.base.BaseEstimator):\n    def __init__(self, alpha=1.0):\n        self.alpha = alpha\n        self.classes = None\n        self.class_log_prior = None\n        self.feature_log_prob = None\n\n    def fit(self, X, y):\n        _, n = X.shape\n        labelbin = LabelBinarizer()\n        Y = labelbin.fit_transform(y)\n        self.classes = labelbin.classes_\n        Y = np.concatenate((1 - Y, Y), axis=1).astype(np.float64)\n\n        class_count = np.zeros(2, dtype=np.float64)\n        feature_count = np.zeros((2, n), dtype=np.float64)\n\n        feature_count += safe_sparse_dot(Y.T, X)  # count frequency by y.T * X\n        class_count += Y.sum(axis=0)\n\n        smoothed_fc = feature_count + self.alpha\n        smoothed_cc = smoothed_fc.sum(axis=1)\n        self.feature_log_prob = (np.log(smoothed_fc) - np.log(smoothed_cc.reshape(-1, 1)))\n        # self.class_log_prior = np.zeros(2) - np.log(2)\n        self.class_log_prior = np.log(class_count / sum(class_count))\n        return self\n\n    def predict(self, X):\n        jll = safe_sparse_dot(X, self.feature_log_prob.T) + self.class_log_prior\n        return self.classes[np.argmax(jll, axis=1)]\n"
  },
  {
    "path": "classifier/Perceptron.py",
    "content": "# -*- coding: utf-8 -*-\n# @Date    : 2016/11/1\n# @Author  : hrwhisper\n\nimport numpy as np\nimport sklearn\nfrom sklearn.utils.extmath import safe_sparse_dot\n\n\nclass Perceptron(sklearn.base.BaseEstimator):\n    def __init__(self, alpha=0.1, max_iter=100):\n        self.threshold = 0.5\n        self.alpha = alpha\n        self.max_iter = max_iter\n        self.weights = None\n\n    def fit(self, X, y):\n        \"\"\"\n            :param X: sparse matrix(n_samples,n_features) the training feature vector\n            :param y:  array-like(n_samples,) Target vector relative to X.\n            :return: self\n        \"\"\"\n        m, n = X.shape\n        target = np.array(y).reshape((m, 1))\n        self.weights = np.ones((n, 1))\n        for _ in range(self.max_iter):\n            h = safe_sparse_dot(X, self.weights) > self.threshold\n            error = target - h\n            self.weights += self.alpha * safe_sparse_dot(X.T, error)\n        return self\n\n    def predict(self, X):\n        return (safe_sparse_dot(X, self.weights) > self.threshold).ravel().astype('int')\n"
  },
  {
    "path": "classifier/__init__.py",
    "content": "# -*- coding: utf-8 -*-\n# @Date    : 2016/10/16\n# @Author  : hrwhisper"
  },
  {
    "path": "judgeSpamMessage.py",
    "content": "# -*- coding: utf-8 -*-\n# @Date    : 2016/12/3\n# @Author  : hrwhisper\nimport codecs\nimport sys\nfrom collections import Counter\nfrom optparse import OptionParser\nfrom multiprocessing import Pool\nimport jieba\nfrom sklearn.externals import joblib\nfrom model_manage import BowTransform\nimport time\n\n\ndef token(x):\n    return Counter(jieba.lcut(x))\n\n\nif __name__ == \"__main__\":\n    parser = OptionParser()\n    parser.add_option('-c', '--classifier', dest=\"cls_name\", type='string', default='p',\n                      help=\"define the classifier you want to use:  \\t\\t\\n\"\n                           \"p  => Perceptron,\\t\\t\\t\\t\\t\\n\"\n                           \"lr => LogisticRegression,\\t\\t\\t\\t\\t\\n\"\n                           \"nb => NaiveBayesian,\\t\\t\\t\\t\\t\\n\"\n                           \"svm => SVM(sklearn),\\t\\t\\t\\t\\t\\n\"\n                           \"lrs => LogisticRegression(sklearn),\\t\\t\\t\\t\\t\\n\"\n                           \"nbs => NaiveBayesian(sklearn),\")\n\n    parser.add_option('-i', '--input', dest=\"input_filename\", type='string', default='./data/不带标签短信.txt',\n                      help=\"input file name\")\n\n    parser.add_option('-o', '--output', dest=\"output_filename\", type='string', default='./data/result.txt',\n                      help=\"output file name\")\n\n    options, args = parser.parse_args()\n    #\n    classifiers = {\n        'p': './model/Perceptron.pkl',  # 0.1 2000\n        'lr': './model/LogisticRegression.pkl',  # 0.2 2000\n        'nb': './model/NaiveBayesian.pkl',  # 0.00241\n        'svm': './model/SVM_sklearn.pkl',\n        'lrs': './model/Logistic_sklearn.pkl',\n        'nbs': './model/Bayes_sklearn.pkl'\n    }\n    #\n    cls_name = options.cls_name\n    file_path = options.input_filename\n    out_path = options.output_filename\n\n    if cls_name not in classifiers.keys():\n        print('check your classifiers name, you can use -h for help')\n        sys.exit()\n\n    start = time.time()\n    jieba.initialize()\n    try:\n        with codecs.open(file_path, 'r', 'utf-8') as f:\n            data = [line.strip() for line in f.read().split('\\n')]\n            if data[-1] == '':\n                data.pop()\n    except FileNotFoundError as e:\n        print('Please check your input filename')\n        sys.exit()\n\n    # data = [Counter(d) for d in map(jieba.cut, data)]\n    data = Pool().map(token, data)\n    print('end token in {}\\n'.format(time.time() - start))\n    cv = BowTransform.load_vsm()\n    data = cv.transform(data)\n    print('end bow in {}\\n'.format(time.time() - start))\n    cls = joblib.load(classifiers[cls_name])\n    predicted = cls.predict(data)\n\n    # print(predicted)\n    with open(out_path, 'w+') as f:\n        for x in predicted:\n            f.write(str(x) + '\\n')\n    print('task complete. total time: {}\\n using {}'.format(time.time() - start, cls))\n"
  },
  {
    "path": "model/__init__.py",
    "content": "# -*- coding: utf-8 -*-\n# @Date    : 2016/11/30\n# @Author  : hrwhisper"
  },
  {
    "path": "model_manage.py",
    "content": "# -*- coding: utf-8 -*-\n# @Date    : 2016/12/1\n# @Author  : hrwhisper\nfrom sklearn.externals import joblib\n\n\nclass BowTransform(object):\n    default_path = './model/vsm.pkl'\n\n    @staticmethod\n    def save_vsm(model, filename=None):\n        joblib.dump(model, filename if filename else BowTransform.default_path)\n\n    @staticmethod\n    def load_vsm(filename=None):\n        return joblib.load(filename if filename else BowTransform.default_path)\n\n\nclass TrainData(object):\n    default_path = './model/train_data.pkl'\n\n    @staticmethod\n    def save(model, filename=None):\n        joblib.dump(model, filename if filename else TrainData.default_path)\n\n    @staticmethod\n    def load(filename=None):\n        with open('./data/tags_token_results' + '_tag') as f:\n            return joblib.load(filename if filename else TrainData.default_path), list(map(int, f.read().split('\\n')[:-1]))\n"
  },
  {
    "path": "readme.md",
    "content": "\n## 环境 ##\n\n- jieba\n  - pip install jieba \n- sklearn\n  - pip install -U scikit-learn\n- python3.5\n\n\n\n## 运行方法\n- -c classfile_name 命令用来指定要用的分类器的名字：\n\n| **名字**  | **对应的分类器**                  |\n| ------- | --------------------------- |\n| **p**   | Perceptron                  |\n| **lr**  | LogisticRegression          |\n| **nb**  | NaiveBayesian               |\n| **svm** | SVM(sklearn)                |\n| **lrs** | LogisticRegression(sklearn) |\n| **nbs** | NaiveBayesian(sklearn)      |\n\n- -i filename 为指定输入的短信文件名（该文件一行为一条短信）\n\n\n- -o filename 为指定输出的结果文件（结果用0和1表示，1为垃圾短信，每一行对应输入文件的短信结果）\n\n例如：\n\n```\npython judgeSpamMessage.py -c svm -i ./data/不带标签短信.txt -o ./data/result.txt\n```\n\n上述的命令指定了使用svm分类器，判断./data/不带标签短信.txt中的短信是否为垃圾短信，并将结果输出到./data/result.txt中。\n\n\n## 训练啥的\n- 首先运行token_and_save_to_file.py，分词保存结果\n- test.py 中有交叉验证等方法\n\n\n\n## 文件说明\n\n文件夹解释如下：\n\n| **文件夹名**       | **作用**      |\n| -------------- | ----------- |\n| **classifier** | 分类器代码存放的文件夹 |\n| **data**       | 数据文件        |\n| **model**      | 保存的模型       |\n\n文件的解释如下：\n\n| **文件夹名**                         | **作用**                                   |\n| -------------------------------- | ---------------------------------------- |\n| classifier/LogisticRegression.py | 本组实现的逻辑回归分类器源代码                          |\n| classifier/NaiveBayesian.py      | 本组实现的朴素贝叶斯分类器源代码                         |\n| classifier/Perceptron.py         | 本组实现的感知器分类器源代码                           |\n| data/tags_token_results          | 带标签短信分词保存结果,token_and_save_to_file.py的生成的 |\n| data/ tags_token_results_tag     | 带标签短信的类别                                 |\n| data/不带标签短信.txt                  | 不带标签短信数据集                                |\n| data/带标签短信.txt                   | 带标签短信数据集                                 |\n| model/ Bayes_sklearn.pkl         | sklearn的贝叶斯分类器训练结果保存                     |\n| model/ Logistic_sklearn.pkl      | sklearn的逻辑回归分类器训练结果保存                    |\n| model/ LogisticRegression.pkl    | 本组实现的逻辑回归分类器训练结果保存                       |\n| model/ NaiveBayesian.pkl         | 本组实现的贝叶斯分类器训练结果保存                        |\n| model/ Perceptron.pkl            | 本组实现的感知器训练结果保存                           |\n| model/ SVM_sklearn.pkl           | sklearn的SVM分类器结果保存                       |\n| model/ train_data.pkl            | 带标签的短信的BOW表示结果                           |\n| model/ vsm.pkl                   | 用于将新文档表示为BOW的训练完的类保存                     |\n| judgeSpamMessage.py              | 用于判断输入的短信是否是垃圾短信                         |\n| model_manage.py                  | 用于读入保存模型                                 |\n| readme.md                        | 说明文件                                     |\n| test.py                          | 测试文件                                     |\n| token_and_save_to_file.py        | 分词并保存带标签的短信的结果，方便训练                      |"
  },
  {
    "path": "test.py",
    "content": "# -*- coding: utf-8 -*-\n# @Date    : 2016/10/16\n# @Author  : hrwhisper\nimport codecs\nfrom collections import Counter\nimport datetime\n\nfrom sklearn.externals import joblib\nfrom sklearn.metrics import precision_recall_fscore_support\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.feature_extraction import DictVectorizer\nimport numpy as np\nfrom sklearn import metrics, naive_bayes, svm, linear_model\nfrom classifier.LogisticRegression import LogisticRegression\nfrom classifier.NaiveBayesian import NaiveBayesian\nfrom classifier.Perceptron import Perceptron\nfrom model_manage import BowTransform, TrainData\n\n\ndef read_train_data():\n    file_path = './data/tags_token_results'\n    with codecs.open(file_path, 'r', 'utf-8') as f:\n        data = [line.strip().split() for line in f.read().split('\\n')]\n\n    with open(file_path + '_tag') as f:\n        return data[:-1], list(map(int, f.read().split('\\n')[:-1]))\n\n\ndef _test(classifier, test_data, test_target):\n    predicted = classifier.predict(test_data)\n    print(predicted.shape)  # 160 1\n\n    # print(sum(predicted == test_target), len(test_target), np.mean(predicted == test_target))\n    print(\"Classification report for classifier %s:\\n%s\\n\" % (\n        classifier, metrics.classification_report(test_target, predicted, digits=4)))\n    print(\"Confusion matrix:\\n%s\" % metrics.confusion_matrix(test_target, predicted))\n    print(precision_recall_fscore_support(test_target, predicted))\n\n\ndef test_one(cls, use_save_data=True, train_cls=False, save_cls_path=None):\n    if use_save_data:\n        data, target = TrainData.load()\n    else:\n        data, target = read_train_data()\n        # data_len = int(len(data) * 0.001)\n        # data, target = data[:data_len], target[:data_len]\n\n        data = [Counter(d) for d in data]  # 每一行为一个短信， 值就是TF\n        # print(data[0])\n        v = DictVectorizer()\n        print('fit transform')\n        data = v.fit_transform(data)  # 稀疏矩阵表示sparse matrix,词编好号\n        TrainData.save(data)\n\n    # print(data[0])\n    data_len = data.shape[0]\n    print('data', data.shape[1])\n    end = int(0.8 * data_len)\n    train_data, train_target = data[:end], target[:end]\n    test_data, test_target = data[end:], target[end:]\n\n    if train_cls:\n        print('train classifier....')\n        cls = cls.fit(train_data, train_target)\n        print('train classifier complete')\n\n    _test(cls, test_data, test_target)\n\n    if save_cls_path:\n        joblib.dump(cls, save_cls_path)\n\n\ndef cross_validation():\n    data, target = TrainData.load()\n\n    classifiers = {\n        'Logistic by yhr': LogisticRegression(alpha=0.2, max_iter=2000),\n        'Perceptron by yhr': Perceptron(alpha=0.1, max_iter=2000),\n        'Bayesian by wqs': NaiveBayesian(),\n        'Bernoulli Bayes from sklearn': naive_bayes.BernoulliNB(),\n        'svm from sklearn': svm.LinearSVC(),\n        'Logistic from sklearn': linear_model.LogisticRegression(),\n        # 'decision tree':tree.DecisionTreeClassifier(),\n    }\n\n    for name, classifier in classifiers.items():\n        this_scores = cross_val_score(classifier, data, target, cv=5, scoring='accuracy')\n        print(name)\n        print(this_scores)\n        print(np.mean(this_scores))\n        print(' ------------------------  \\n\\n')\n\n\ndef test_parameter():\n    data, target = TrainData.load()\n    max_score = 0\n    max_alpha = max_iter = 0\n    print('Perceptron')\n    start = datetime.datetime.now()\n    for alpha in [0.01, 0.1, 0.2]:  # 0.3 100 0.99133\n        for iter in [100, 2000]:\n            cls = Perceptron(alpha=alpha, max_iter=iter)\n            this_scores = cross_val_score(cls, data, target, cv=5, scoring='accuracy')\n            print(this_scores)\n            cur = np.mean(this_scores)\n            print(alpha, iter, cur)\n            print(' ------------------------  \\n\\n')\n            if cur > max_score:\n                max_score = cur\n                max_alpha, max_iter = alpha, iter\n            print('current_max: ', max_score, max_alpha, max_iter)\n    print((datetime.datetime.now() - start))\n\n\nif __name__ == '__main__':\n    # start = datetime.datetime.now()\n    # test_one(LogisticRegression(alpha=0.1, max_iter=2000),\n    #          train_cls=True, save_cls_path='./model/LogisticRegression.pkl')\n    # print((datetime.datetime.now() - start))\n    #\n    # classifiers = {\n    #     # 'Logistic by yhr': LogisticRegression(alpha=0.01, max_iter=200),\n    #     # 'Perceptron by yhr': Perceptron(),\n    #     # 'Bayesian by wqs': NaiveBayesian(),\n    #     # 'Bayes_sklearn': naive_bayes.BernoulliNB(),\n    #     # 'SVM_sklearn': svm.LinearSVC(),\n    #     # 'Logistic_sklearn': linear_model.LogisticRegression(),\n    #     # 'decision tree':tree.DecisionTreeClassifier(),\n    # }\n    # for name, cls in classifiers.items():\n    #     test_one(cls, train_cls=True, save_cls_path='./model/' + name + '.pkl')\n\n\n    #\n    # test_one(LogisticRegression(alpha=0.2, max_iter=2000), train_cls=True,\n    #          save_cls_path='./model/LogisticRegression.pkl')\n    # test_one(Perceptron(alpha=0.1, max_iter=2000), train_cls=True, save_cls_path='./model/Perceptron.pkl')\n    #\n    classifiers = {\n        'p': './model/Perceptron.pkl',  # 0.1 2000\n        'lr': './model/LogisticRegression.pkl',  # 0.2 2000\n        'nb': './model/NaiveBayesian.pkl',  # 0.00241\n        'svm': './model/SVM_sklearn.pkl',\n        'lrs': './model/Logistic_sklearn.pkl',\n        'nbs': './model/Bayes_sklearn.pkl'\n    }\n\n    for _path in classifiers.values():\n        cls = joblib.load(_path)\n        test_one(cls)\n\n        # cross_validation()\n        # test_one(LogisticRegression(max_iter=100), train_cls=True)\n        # test_parameter()\n"
  },
  {
    "path": "test_jieba.py",
    "content": "# -*- coding: utf-8 -*-\n# @Date    : 2016/12/5\n# @Author  : hrwhisper\nimport codecs\nimport datetime\nfrom collections import Counter\nimport jieba\nfrom multiprocessing import Pool\n\n\ndef test_not_tag_data():\n    with codecs.open('./data/不带标签短信.txt', 'r', 'utf-8') as f:\n        data = [line.strip() for line in f.read().split('\\n')]\n        if data[-1] == '':\n            data.pop()\n\n    return data\n\n\ndef f(x):\n    return Counter(jieba.cut(x))\n\n\nif __name__ == \"__main__\":\n    # jieba.enable_parallel(2)\n    start = datetime.datetime.now()\n    data = test_not_tag_data()\n    print('read data', datetime.datetime.now() - start)\n    # data = [Counter(d) for d in map(jieba.cut, data)]\n    res = Pool(4).map(f, data)\n    # print(res)\n    print('jieba ', datetime.datetime.now() - start)\n\n    # cv = BowTransform.load_vsm()\n    # data = cv.transform(data)\n    # print('transform', datetime.datetime.now() - start)\n    # cls = joblib.load(classifiers[cls_name])\n    # print('load', datetime.datetime.now() - start)\n    # predicted = cls.predict(data)\n    #\n    # print(predicted)\n    # with open('./data/result.txt', 'w+') as f:\n    #     for x in predicted:\n    #         f.write(str(x) + '\\n')\n\n    # print(datetime.datetime.now() - start, ' %s' % cls)\n"
  },
  {
    "path": "test_judge.py",
    "content": "# -*- coding: utf-8 -*-\n# @Date    : 2016/12/3\n# @Author  : hrwhisper\nimport codecs\nimport sys\nfrom collections import Counter\nfrom multiprocessing import Pool\nfrom optparse import OptionParser\nimport jieba\nfrom sklearn import metrics\nfrom sklearn.externals import joblib\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.metrics import precision_recall_fscore_support\nfrom model_manage import BowTransform\nfrom test_jieba import test_not_tag_data\n\n\ndef test_data():\n    def read_train_data():\n        file_path = './data/带标签短信.txt'\n        target = []\n        data = []\n        with codecs.open(file_path, 'r', 'utf-8') as f:\n            for line in f.read().split('\\n')[:-1]:\n                line = line.strip()\n                target.append(line[0])\n                data.append(line[1:].lstrip())\n        return data, target\n\n    data, target = read_train_data()\n    n = len(data) - int(0.8 * len(data))\n    return data[-n:], list(map(int, target[-n:]))\n\n\ndef f(x):\n    return Counter(jieba.cut(x))\n\n\nif __name__ == \"__main__\":\n    classifiers = {\n        'p': './model/Perceptron.pkl',  # 0.1 2000\n        'lr': './model/LogisticRegression.pkl',  # 0.2 2000\n        'nb': './model/NaiveBayesian.pkl',  # 0.00241\n        'svm': './model/SVM_sklearn.pkl',\n        'lrs': './model/Logistic_sklearn.pkl',\n        'nbs': './model/Bayes_sklearn.pkl'\n    }\n    import time\n\n    # for cls_name in classifiers.keys():\n    #     jieba.initialize()\n    #\n    #     start = time.time()\n    #     data = test_not_tag_data()\n    #     cls = joblib.load(classifiers[cls_name])\n    #\n    #     data = Pool().map(f, data)\n    #     # data = [Counter(d) for d in map(jieba.cut, data)]\n    #     print('end jieba', time.time() - start)\n    #     cv = BowTransform.load_vsm()\n    #     data = cv.transform(data)\n    #     predicted = cls.predict(data)\n    #     with open('./data/result.txt', 'w+') as f:\n    #         for x in predicted:\n    #             f.write(str(x) + '\\n')\n    #     print('end %s  with time:' % cls, time.time() - start)\n\n    start = time.time()\n    data, target = test_data()\n    # data = [Counter(d) for d in map(jieba.cut, data)]\n    data = Pool(4).map(f, data)\n    cv = BowTransform.load_vsm()\n    data = cv.transform(data)\n\n    for cls_name in classifiers.keys():\n        cls = joblib.load(classifiers[cls_name])\n        predicted = cls.predict(data)\n\n        print(predicted.shape)  # 160 1\n\n        # print(sum(predicted == test_target), len(test_target), np.mean(predicted == test_target))\n        print(\"Classification report for classifier %s:\\n%s\\n\" % (\n            cls, metrics.classification_report(target, predicted, digits=4)))\n        print(\"Confusion matrix:\\n%s\" % metrics.confusion_matrix(target, predicted))\n        print(precision_recall_fscore_support(target, predicted))\n        print('end', time.time() - start)\n"
  },
  {
    "path": "test_judge2.py",
    "content": "# -*- coding: utf-8 -*-\n# @Date    : 2016/12/3\n# @Author  : hrwhisper\nimport codecs\nimport os\nimport sys\nfrom collections import Counter\nfrom multiprocessing import Pool\nfrom optparse import OptionParser\nimport jieba\nfrom sklearn import metrics\nfrom sklearn.externals import joblib\nfrom sklearn.metrics import precision_recall_fscore_support\nfrom model_manage import BowTransform\n\n\ndef test_not_tag_data():\n    with codecs.open('./data/不带标签短信.txt', 'r', 'utf-8') as f:\n        data = f.read()\n        return data\n        #     if data[-1] == '':\n        #         data.pop()\n        # return data\n\n\ndef f(x):\n    return Counter(jieba.cut(x))\n\n\nif __name__ == \"__main__\":\n    #\n    classifiers = {\n        'p': './model/Perceptron.pkl',  # 0.1 2000\n        'lr': './model/LogisticRegression.pkl',  # 0.2 2000\n        'nb': './model/NaiveBayesian.pkl',  # 0.00241\n        'svm': './model/SVM_sklearn.pkl',\n        'lrs': './model/Logistic_sklearn.pkl',\n        'nbs': './model/Bayes_sklearn.pkl'\n    }\n    import time\n\n    if os.name != 'nt':\n        print('on linux enable parallel tantalization')\n        jieba.enable_parallel(4)\n\n    for cls_name in classifiers.keys():\n        start = time.time()\n        data = test_not_tag_data()\n        cls = joblib.load(classifiers[cls_name])\n        # data = Pool(4).map(f, data)\n\n        data = [Counter(x) for x in ' '.join(jieba.cut(data)).split('\\n')]\n        # with codecs.open('./data/result.txt', 'w+', 'utf-8')  as f:\n        #     f.write()\n\n        print('end jieba', time.time() - start)\n        cv = BowTransform.load_vsm()\n        data = cv.transform(data)\n        predicted = cls.predict(data)\n        with open('./data/result.txt', 'w+') as f:\n            for x in predicted:\n                f.write(str(x) + '\\n')\n        print('end %s  with time:' % cls, time.time() - start)\n\n\n        # data, target = test_data()\n        # # data = [Counter(d) for d in map(jieba.cut, data)]\n        # data = Pool(4).map(f, data)\n        # cv = BowTransform.load_vsm()\n        # data = cv.transform(data)\n\n\n        # for cls_name in classifiers.keys():\n        #     cls = joblib.load(classifiers[cls_name])\n        #     predicted = cls.predict(data)\n        #\n        #     print(predicted.shape)  # 160 1\n        #\n        #     # print(sum(predicted == test_target), len(test_target), np.mean(predicted == test_target))\n        #     print(\"Classification report for classifier %s:\\n%s\\n\" % (\n        #         cls, metrics.classification_report(target, predicted, digits=4)))\n        #     print(\"Confusion matrix:\\n%s\" % metrics.confusion_matrix(target, predicted))\n        #     print(precision_recall_fscore_support(target, predicted))\n        #     print('end', time.time() - start)\n"
  },
  {
    "path": "token_and_save_to_file.py",
    "content": "# -*- coding: utf-8 -*-\n# @Date    : 2016/10/7\n# @Author  : hrwhisper\nfrom collections import Counter\nfrom multiprocessing.pool import Pool\nimport jieba\nimport codecs\n\nfrom sklearn.feature_extraction import DictVectorizer\n\nfrom model_manage import TrainData\n\n\ndef read_train_data():\n    file_path = './data/带标签短信.txt'\n    target = []\n    data = []\n    with codecs.open(file_path, 'r', 'utf-8') as f:\n        for line in f.read().split('\\n')[:-1]:\n            line = line.strip()\n            target.append(line[0])\n            data.append(line[1:].lstrip())\n    return data, target\n\n\ndef save_tokenlization_result(data, target, file_path='./data/tags_token_results'):\n    with codecs.open(file_path, 'w', 'utf-8') as f:\n        for x in data:\n            f.write(' '.join(x) + '\\n')\n\n    with open(file_path + '_tag', 'w') as f:\n        for x in target:\n            f.write(x + '\\n')\n\n\nif __name__ == '__main__':\n    # seg_list = jieba.cut(\"我来到北京清华大学\", cut_all=False)\n    # print(\"Default Mode: \" + \"/ \".join(seg_list))  # 精确模式\n\n    data, target = read_train_data()\n    data = Pool().map(jieba.lcut, data)\n    # data = jieba.lcut(data)\n    save_tokenlization_result(data, target)\n\n    with codecs.open('./data/tags_token_results', 'r', 'utf-8') as f:\n        data = [line.strip().split() for line in f.read().split('\\n')]\n        if not data[-1]: data.pop()\n        t = [Counter(d) for d in data]  # 每一行为一个短信， 值就是TF\n        v = DictVectorizer()\n        t = v.fit_transform(t)  # 稀疏矩阵表示sparse matrix,词编好号\n        TrainData.save(t)\n"
  }
]