[
  {
    "path": "README.md",
    "content": "# yiguan_sex_age_predict_1st_solution \n易观性别年龄预测第一名解决方案\n\n##### [比赛链接](https://www.tinymind.cn/competitions/43)\n--------\n\n团队是分别个人做然后再合并，所以团队中特征文件有所交叉，主要用到的方案是stacking不同模型，因为数据产出的维度较高，通过不同模型stacking可以达到不会损失过量信息下达到降维的目的。\n\n以下是运行代码的顺序：\n\n* 1.产出特征文件 \n\n> 按照nb_cz_lwl_wcm文件夹运行说明分别运行 nb_cz_lwl_wcm文件夹下的所有文件产出特征文件 feature_one.csv\n> 按照thluo 文件夹下运行说明分别运行 thluo 文件夹下的代码生成 thluo_train_best_feat.csv\n\n* 2.模型加权\n注：模型所得到的结果在linwangli文件夹下\n\n> 运行完thluo文件夹下面的所有代码会生成thluo_prob\n> 用linwangli/code文件夹下面的模型以及上面所求得的特征文件可跑出对应概率文件，相关概率文件加权方案看 linwangli文件夹下面的融合思路ppt\n\n<br>\n<br>\n\nCONTRIBUTORS:[THLUO](https://github.com/THLUO)   [WangliLin](https://github.com/WangliLin)   [Puck Wang](https://github.com/PuckWong)   [chizhu](https://github.com/chizhu) [NURBS](https://github.com/suncostanx)\n\n\n\n\n\n\n"
  },
  {
    "path": "THLUO/1.w2c_model_start.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nfrom datetime import datetime,timedelta  \nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.preprocessing import LabelEncoder\nimport gc\nfrom gensim.test.utils import common_texts, get_tmpfile\nfrom gensim.models import Word2Vec\n\n\n# In[2]:\n\n\npath='input/'\ndata=pd.DataFrame()\nprint ('1.w2c_model_start.py')\n\n# In[3]:\n\n\ndeviceid_packages=pd.read_csv(path+'deviceid_packages.tsv',sep='\\t',names=['device_id','apps'])\ndeviceid_test=pd.read_csv(path+'deviceid_test.tsv',sep='\\t',names=['device_id'])\ndeviceid_train=pd.read_csv(path+'deviceid_train.tsv',sep='\\t',names=['device_id','sex','age'])\ndeviceid_brand = pd.read_csv(path+'deviceid_brand.tsv',sep='\\t', names=['device_id','device_brand', 'device_type'])\ndeviceid_package_start_close = pd.read_csv(path+'deviceid_package_start_close.tsv',sep='\\t', names=['device_id','app_id','start_time','close_time'])\npackage_label = pd.read_csv(path+'package_label.tsv',sep='\\t',names=['app_id','app_parent_type', 'app_child_type'])\n\n\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : str(x).split(' ')[0])\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 1].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other' if x in one_time_brand else x)\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 2].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other_2' if x in one_time_brand else x)\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 3].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other_3' if x in one_time_brand else x)\n\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_brand.values))\ndeviceid_brand['device_brand'] = lbl.transform(list(deviceid_brand.device_brand.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_type.values))\ndeviceid_brand['device_type'] = lbl.transform(list(deviceid_brand.device_type.values))\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_parent_type.values))\npackage_label['app_parent_type'] = lbl.transform(list(package_label.app_parent_type.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_child_type.values))\npackage_label['app_child_type'] = lbl.transform(list(package_label.app_child_type.values))\n\n\n# In[4]:\n\n\ndf_sorted = deviceid_package_start_close.sort_values(by='start_time')\n\n\n# In[20]:\n\n\ndf_results = df_sorted.groupby('device_id')['app_id'].apply(lambda x:' '.join(x)).reset_index().rename(columns = {'app_id' : 'app_list'})\ndf_results.to_csv('01.device_click_app_sorted_by_start.csv', index=None)\ndel df_results\n\n\n# In[5]:\n\n\ndf_device_start_app_list = df_sorted.groupby('device_id').apply(lambda x : list(x.app_id)).reset_index().rename(columns = {0 : 'app_list'})\n\n\n# In[7]:\n\n\napp_list = list(df_device_start_app_list.app_list.values)\n\n\n# In[9]:\n\n\nmodel = Word2Vec(app_list, size=10, window=10, min_count=2, workers=4)\nmodel.save(\"word2vec.model\")\n\n\n# In[10]:\n\n\nvocab = list(model.wv.vocab.keys())\n\nw2c_arr = []\n\nfor v in vocab :\n    w2c_arr.append(list(model.wv[v]))\n\n\n# In[11]:\n\n\ndf_w2c_start = pd.DataFrame()\ndf_w2c_start['app_id'] = vocab\ndf_w2c_start = pd.concat([df_w2c_start, pd.DataFrame(w2c_arr)], axis=1)\ndf_w2c_start.columns = ['app_id'] + ['w2c_start_app_' + str(i) for i in range(10)]\n\n\n# In[13]:\n\n\nw2c_nums = 10\nagg = {}\nfor l in ['w2c_start_app_' + str(i) for i in range(w2c_nums)] :\n    agg[l] = ['mean', 'std', 'max', 'min']\n\n\n# In[14]:\n\n\ndeviceid_package_start_close = deviceid_package_start_close.merge(df_w2c_start, on='app_id', how='left')\n\n\n# In[15]:\n\n\ndf_agg = deviceid_package_start_close.groupby('device_id').agg(agg)\ndf_agg.columns = pd.Index(['device_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\ndf_agg = df_agg.reset_index()\ndf_agg.to_csv('device_start_app_w2c.csv', index=None)\n\n\n# In[16]:\n\n\ndf_results = deviceid_package_start_close.groupby(['device_id', 'app_id'])['start_time'].mean().reset_index()\ndf_results = df_results.merge(df_w2c_start, on='app_id', how='left')\n\n\n# In[18]:\n\n\ndf_agg = df_results.groupby('device_id').agg(agg)\ndf_agg.columns = pd.Index(['device_app_unique_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\ndf_agg = df_agg.reset_index()\n\n\n# In[24]:\n\n\ndf_agg.to_csv('device_app_unique_start_app_w2c.csv', index=None)\nprint ('success.....')\n"
  },
  {
    "path": "THLUO/10.age_bin_prob_oof.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nfrom datetime import datetime,timedelta  \nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.preprocessing import LabelEncoder\nimport gc\n\n\n\n\n# In[2]:\n\nprint ('10.age_bin_prob_oof.py')\npath='input/'\ndata=pd.DataFrame()\n#sex_age=pd.read_excel('./data/性别年龄对照表.xlsx')\n\n\n# In[3]:\n\n\ndeviceid_packages=pd.read_csv(path+'deviceid_packages.tsv',sep='\\t',names=['device_id','apps'])\ndeviceid_test=pd.read_csv(path+'deviceid_test.tsv',sep='\\t',names=['device_id'])\ndeviceid_train=pd.read_csv(path+'deviceid_train.tsv',sep='\\t',names=['device_id','sex','age'])\ndeviceid_brand = pd.read_csv(path+'deviceid_brand.tsv',sep='\\t', names=['device_id','device_brand', 'device_type'])\ndeviceid_package_start_close = pd.read_csv(path+'deviceid_package_start_close.tsv',sep='\\t', names=['device_id','app_id','start_time','close_time'])\npackage_label = pd.read_csv(path+'package_label.tsv',sep='\\t',names=['app_id','app_parent_type', 'app_child_type'])\n\n\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : str(x).split(' ')[0])\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 1].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other' if x in one_time_brand else x)\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 2].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other_2' if x in one_time_brand else x)\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 3].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other_3' if x in one_time_brand else x)\n\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_brand.values))\ndeviceid_brand['device_brand'] = lbl.transform(list(deviceid_brand.device_brand.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_type.values))\ndeviceid_brand['device_type'] = lbl.transform(list(deviceid_brand.device_type.values))\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_parent_type.values))\npackage_label['app_parent_type'] = lbl.transform(list(package_label.app_parent_type.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_child_type.values))\npackage_label['app_child_type'] = lbl.transform(list(package_label.app_child_type.values))\n\n\n# In[4]:\n\n\nimport time\n\n# 输入毫秒级的时间，转出正常格式的时间\ndef timeStamp(timeNum):\n    timeStamp = float(timeNum/1000)\n    timeArray = time.localtime(timeStamp)\n    otherStyleTime = time.strftime(\"%Y-%m-%d %H:%M:%S\", timeArray)\n    return otherStyleTime\n\n#解析出具体的时间\ndeviceid_package_start_close['start_date'] = pd.to_datetime(deviceid_package_start_close.start_time.apply(timeStamp))\ndeviceid_package_start_close['end_date'] = pd.to_datetime(deviceid_package_start_close.close_time.apply(timeStamp))\ndeviceid_package_start_close['start_hour'] = deviceid_package_start_close.start_date.dt.hour\ndeviceid_package_start_close['end_hour'] = deviceid_package_start_close.end_date.dt.hour\ndeviceid_package_start_close['time_gap'] = (deviceid_package_start_close['end_date'] - deviceid_package_start_close['start_date']).astype('timedelta64[s]')\n\ndeviceid_package_start_close = deviceid_package_start_close.merge(package_label, on='app_id', how='left')\ndeviceid_package_start_close.app_parent_type.fillna(-1, inplace=True)\ndeviceid_package_start_close.app_child_type.fillna(-1, inplace=True)\ndeviceid_package_start_close['start_year'] = deviceid_package_start_close.start_date.dt.year\ndeviceid_package_start_close['end_year'] = deviceid_package_start_close.end_date.dt.year\ndeviceid_package_start_close['year_gap'] = deviceid_package_start_close['end_year'] - deviceid_package_start_close['start_year']\n\n\n# In[5]:\n\n\ndeviceid_train=pd.concat([deviceid_train,deviceid_test])\n\n\n# In[6]:\n\n\ndeviceid_packages['apps']=deviceid_packages['apps'].apply(lambda x:x.split(','))\ndeviceid_packages['app_lenghth']=deviceid_packages['apps'].apply(lambda x:len(x))\n\n\n\n\napps=deviceid_packages['apps'].apply(lambda x:' '.join(x)).tolist()\nvectorizer=CountVectorizer()\ntransformer=TfidfTransformer()\ncntTf = vectorizer.fit_transform(apps)\ntfidf=transformer.fit_transform(cntTf)\nword=vectorizer.get_feature_names()\nweight=tfidf.toarray()\ndf_weight=pd.DataFrame(weight)\nfeature=df_weight.columns\ndf_weight['sum']=0\nfor f in tqdm(feature):\n    df_weight['sum']+=df_weight[f]\ndeviceid_packages['tfidf_sum']=df_weight['sum']\n\n\n# In[10]:\n\n\nlda = LatentDirichletAllocation(n_topics=5,\n                                learning_offset=50.,\n                                random_state=666)\ndocres = lda.fit_transform(cntTf)\n\n\n# In[11]:\n\n\ndeviceid_packages = pd.concat([deviceid_packages,pd.DataFrame(docres)],axis=1)\n\n\n# In[12]:\n\n\ntemp=deviceid_packages.drop('apps',axis=1)\ndeviceid_train=pd.merge(deviceid_train,temp,on='device_id',how='left')\n\n\n# In[13]:\n\n\n#解析出所有的device_app_pair\ndevice_id_arr = []\napp_arr = []\ndf_device_app_pair = pd.DataFrame()\nfor row in deviceid_packages.values :\n    device_id = row[0]\n    app_list = row[1]\n    for app in app_list :\n        device_id_arr.append(device_id)\n        app_arr.append(app)\n#生成pair        \ndf_device_app_pair['device_id'] = device_id_arr\ndf_device_app_pair['app_id'] = app_arr    \n\ndf_device_app_pair = df_device_app_pair.merge(package_label, how='left', on='app_id')\n\n#特征工程\ndef open_app_timegap_in_hour() :\n    df_temp = deviceid_package_start_close.groupby(['device_id', 'start_hour'])['time_gap'].mean().reset_index().rename(columns = {'time_gap': 'mean_time_gap'})\n    df_mean_temp = pd.pivot_table(df_temp, index='device_id', columns='start_hour', values='mean_time_gap').reset_index()\n    df_mean_temp.columns = ['device_id'] + ['open_app_timegap_in_'+str(i) + '_mean_hour' for i in range(0,24)]\n    df_mean_temp.fillna(0, inplace=True)\n\n\n    \n    return df_mean_temp\n\n\n# In[8]:\n\n\ndef device_start_end_app_timegap() :\n    #用户打开，关闭app的时间间隔\n    df_ = deviceid_package_start_close.sort_values(by=['device_id', 'start_date'], ascending=False)\n    df_['prev_start_date'] = df_.groupby('device_id')['start_date'].shift(-1)\n    df_['start_date_gap'] = (df_['start_date'] - df_['prev_start_date']).astype('timedelta64[s]')\n    agg_dic = {'start_date_gap' : ['min', 'max', 'mean', 'median', 'std']}\n    df_start_gap_agg = df_.groupby('device_id').agg(agg_dic)\n    df_start_gap_agg.columns = pd.Index(['device_' + e[0] + \"_\" + e[1].upper() for e in df_start_gap_agg.columns.tolist()])\n    df_start_gap_agg = df_start_gap_agg.reset_index()\n    #del df_\n    gc.collect()\n    #关闭时间间隔\n    df_ = deviceid_package_start_close.sort_values(by=['device_id', 'end_date'], ascending=False)\n    df_['prev_end_date'] = df_.groupby('device_id')['end_date'].shift(-1)\n    df_['end_date_gap'] = (df_['end_date'] - df_['prev_end_date']).astype('timedelta64[s]')\n    agg_dic = {'end_date_gap' : ['min', 'max', 'mean', 'median', 'std']}\n    df_end_gap_agg = df_.groupby('device_id').agg(agg_dic)\n    df_end_gap_agg.columns = pd.Index(['device_' + e[0] + \"_\" + e[1].upper() for e in df_end_gap_agg.columns.tolist()])\n    df_end_gap_agg = df_end_gap_agg.reset_index()\n    #del df_\n    gc.collect()\n\n\n\n    df_agg = df_start_gap_agg.merge(df_end_gap_agg, on='device_id', how='left')\n    #df_agg = df_agg.merge(df_app_start_gap_agg, on='device_id', how='left')\n    #df_agg = df_agg.merge(df_app_end_gap_agg, on='device_id', how='left')\n    return df_agg\n\ndef open_app_counts_in_hour() :\n    df_temp = deviceid_package_start_close.groupby(['device_id', 'start_hour'])['app_id'].count().reset_index().rename(columns = {'app_id': 'app_counts'})\n    df_temp = pd.pivot_table(df_temp, index='device_id', columns='start_hour', values='app_counts').reset_index()\n    df_temp.columns = ['device_id'] + ['open_app_counts_in'+str(i) + '_hour' for i in range(0,24)]\n    df_temp.fillna(0, inplace=True)\n    return df_temp\n\ndef close_app_counts_in_hour() :\n    df_temp = deviceid_package_start_close.groupby(['device_id', 'end_hour'])['app_id'].count().reset_index().rename(columns = {'app_id': 'app_counts'})\n    df_temp = pd.pivot_table(df_temp, index='device_id', columns='end_hour', values='app_counts').reset_index()\n    df_temp.columns = ['device_id'] + ['close_app_counts_in'+str(i) + '_hour' for i in range(0,24)]\n    df_temp.fillna(0, inplace=True)\n    return df_temp\n\ndef app_type_mean_time_gap_one_hot () :\n    df_temp = deviceid_package_start_close.groupby(['device_id', 'app_parent_type'])['time_gap'].mean().reset_index()\n    df_temp = pd.pivot_table(df_temp, index='device_id', columns='app_parent_type', values='time_gap').reset_index()\n    df_temp.columns = ['device_id'] + ['app_parent_type_mean_time_gap'+str(i) for i in range(-1,45)]\n    df_temp.fillna(-1, inplace=True)\n    return df_temp  \n\ndef device_active_hour() :\n    aggregations = {\n        'start_hour' : ['std','mean','max','min'],\n        'end_hour' : ['std','mean','max','min']\n    }\n    df_agg = deviceid_package_start_close.groupby('device_id').agg(aggregations)\n    df_agg.columns = pd.Index(['device_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\n    df_agg = df_agg.reset_index()   \n    \n    return df_agg\n\n\ndef device_brand_encoding() :\n    df_temp = deviceid_brand.merge(deviceid_train[['device_id', 'age', 'sex']], on='device_id', how='left')\n\n    aggregations = {\n        'age' : ['std','mean'],\n        'sex' : ['mean'],\n    }\n\n    df_device_brand = df_temp.groupby('device_brand').agg(aggregations)\n    df_device_brand.columns = pd.Index(['device_brand_' + e[0] + \"_\" + e[1].upper() for e in df_device_brand.columns.tolist()])\n    df_device_brand = df_device_brand.reset_index()\n\n    df_device_type = df_temp.groupby('device_type').agg(aggregations)\n    df_device_type.columns = pd.Index(['device_type_' + e[0] + \"_\" + e[1].upper() for e in df_device_type.columns.tolist()])\n    df_device_type = df_device_type.reset_index()\n\n    df_temp = df_temp.merge(df_device_brand, on='device_brand', how='left')\n    df_temp = df_temp.merge(df_device_type, on='device_type', how='left')\n\n    aggregations = {\n        'device_brand_age_STD' : ['mean'],\n        'device_brand_age_MEAN' : ['mean'],\n        'device_brand_sex_MEAN' : ['mean'],\n        #'device_type_age_STD' : ['mean'],\n        #'device_type_age_MEAN' : ['mean'],\n        #'device_type_sex_MEAN' : ['mean']\n    }\n\n    df_agg = df_temp.groupby('device_id').agg(aggregations)\n    df_agg.columns = pd.Index(['device_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\n    df_agg = df_agg.reset_index()\n    return df_agg\n\n\n#统计device运行app的情况\ndef device_active_time_time_stat() :\n    #device开启app的时间统计信息\n    deviceid_package_start_close['active_time'] = deviceid_package_start_close['close_time'] - deviceid_package_start_close['start_time']\n\n    #device开启了多少次app\n    #device开启了多少个app\n    aggregations = {\n        'app_id' : ['count', 'nunique'],\n        'active_time' : ['mean', 'std', 'max', 'min'],\n    }\n    df_agg = deviceid_package_start_close.groupby('device_id').agg(aggregations)\n    df_agg.columns = pd.Index(['device_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\n    df_agg = df_agg.reset_index()\n\n    aggregations = {\n        'active_time' : ['mean', 'std', 'max', 'min', 'count'],\n    }\n    df_da_agg = deviceid_package_start_close.groupby(['device_id', 'app_id']).agg(aggregations)\n    df_da_agg.columns = pd.Index(['device_app_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_da_agg.columns.tolist()])\n    df_da_agg = df_da_agg.reset_index()\n\n    #device开启app的平均时间\n    aggregations = {\n        'device_app_grouped_active_time_MEAN' : ['mean', 'std', 'max', 'min'],\n        'device_app_grouped_active_time_STD' : ['mean', 'std', 'max', 'min'],\n        'device_app_grouped_active_time_MAX' : ['mean', 'std', 'max', 'min'],\n        'device_app_grouped_active_time_MIN' : ['mean', 'std', 'max', 'min'],\n        'device_app_grouped_active_time_COUNT' : ['mean', 'std', 'max', 'min'],\n    }\n    df_temp = df_da_agg.groupby(['device_id']).agg(aggregations)\n    df_temp.columns = pd.Index([e[0] + \"_\" + e[1].upper() for e in df_temp.columns.tolist()])\n    df_temp = df_temp.reset_index()\n\n    df_agg = df_agg.merge(df_temp, on='device_id', how='left')\n    return df_agg\n\n\ndef app_type_encoding() :\n    df_temp = df_device_app_pair.merge(deviceid_train[['device_id', 'age', 'sex']], on='device_id', how='left')\n\n    aggregations = {\n        'age' : ['std','mean'],\n        'sex' : ['mean'],\n    }\n\n    df_agg_app_parent_type = df_temp.groupby('app_parent_type').agg(aggregations)\n    df_agg_app_parent_type.columns = pd.Index(['app_parent_type_' + e[0] + \"_\" + e[1].upper() for e in df_agg_app_parent_type.columns.tolist()])\n    df_agg_app_parent_type = df_agg_app_parent_type.reset_index()\n\n    df_agg_app_child_type = df_temp.groupby('app_child_type').agg(aggregations)\n    df_agg_app_child_type.columns = pd.Index(['app_child_type_' + e[0] + \"_\" + e[1].upper() for e in df_agg_app_child_type.columns.tolist()])\n    df_agg_app_child_type = df_agg_app_child_type.reset_index()\n\n    df_temp = df_temp.merge(df_agg_app_parent_type, on='app_parent_type', how='left')\n    df_temp = df_temp.merge(df_agg_app_child_type, on='app_child_type', how='left')\n\n    aggregations = {\n        'app_parent_type_age_STD' : ['mean'],\n        'app_parent_type_age_MEAN' : ['mean'],\n        'app_parent_type_sex_MEAN' : ['mean'],\n        'app_child_type_age_STD' : ['mean'],\n        'app_child_type_age_MEAN' : ['mean'],\n        'app_child_type_sex_MEAN' : ['mean']\n    }\n    \n    df_agg = df_temp.groupby('device_id').agg(aggregations)\n    df_agg.columns = pd.Index(['device_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\n    df_agg = df_agg.reset_index()\n    return df_agg\n\n#每个device对应的app_parent_type计数\ndef app_type_onehot_in_device(df) :\n    df_copy = df.fillna(-1)\n    df_temp = df_copy.groupby(['device_id', 'app_parent_type'])['app_id'].size().reset_index()\n    df_temp.rename(columns = {'app_id' : 'app_parent_type_counts'}, inplace=True)\n    df_temp = pd.pivot_table(df_temp, index='device_id', columns='app_parent_type', values='app_parent_type_counts').reset_index()\n    df_temp.columns = ['device_id'] + ['app_parent_type'+str(i) for i in range(-1,45)]\n    df_temp.fillna(0, inplace=True)\n    return df_temp\n\n\n\n# In[15]:\n\n\n#提取特征\ndf_train = deviceid_train.merge(device_active_time_time_stat(), on='device_id', how='left')\ndf_train = df_train.merge(deviceid_brand, on='device_id', how='left')\ndf_train = df_train.merge(app_type_onehot_in_device(df_device_app_pair), on='device_id', how='left')\ndf_train = df_train.merge(app_type_encoding(), on='device_id', how='left')\ndf_train = df_train.merge(device_active_hour(), on='device_id', how='left')\ndf_train = df_train.merge(app_type_mean_time_gap_one_hot(), on='device_id', how='left')\ndf_train = df_train.merge(open_app_counts_in_hour(), on='device_id', how='left')\ndf_train = df_train.merge(close_app_counts_in_hour(), on='device_id', how='left')\ndf_train = df_train.merge(device_brand_encoding(), on='device_id', how='left')\ndf_train = df_train.merge(device_start_end_app_timegap(), on='device_id', how='left')\ndf_train = df_train.merge(open_app_timegap_in_hour(), on='device_id', how='left')\n\n\n# In[16]:\n\n\ndf_w2c_start = pd.read_csv('device_start_app_w2c.csv')\ndf_w2c_close = pd.read_csv('device_close_app_w2c.csv')\ndf_w2c_all = pd.read_csv('device_all_app_w2c.csv')\ndf_device_quchong_start_app_w2c = pd.read_csv('device_quchong_start_app_w2c.csv')\ndf_device_app_unique_start_app_w2c = pd.read_csv('device_app_unique_start_app_w2c.csv')\ndf_device_app_unique_close_app_w2c = pd.read_csv('device_app_unique_close_app_w2c.csv')\ndf_device_app_unique_all_app_w2c = pd.read_csv('device_app_unique_all_app_w2c.csv')\n\n\n# In[17]:\n\n\ndf_train_w2v = df_train.merge(df_w2c_start, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_w2c_close, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_w2c_all, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_quchong_start_app_w2c, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_app_unique_start_app_w2c, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_app_unique_close_app_w2c, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_app_unique_all_app_w2c, on='device_id', how='left')\n\n\n# In[19]:\n\n\ndf_train_w2v['sex'] = df_train_w2v['sex'].apply(lambda x:str(x))\ndf_train_w2v['age'] = df_train_w2v['age'].apply(lambda x:str(x))\ndef tool(x):\n    if x=='nan':\n        return x\n    else:\n        return str(int(float(x)))\ndf_train_w2v['sex']=df_train_w2v['sex'].apply(tool)\ndf_train_w2v['age']=df_train_w2v['age'].apply(tool)\ndf_train_w2v['sex_age']=df_train_w2v['sex']+'-'+df_train_w2v['age']\n\ndf_train_w2v = df_train_w2v.replace({'nan':np.NaN,'nan-nan':np.NaN})\n\n\n# In[31]:\n\n\ntrain = df_train_w2v[df_train_w2v['sex_age'].notnull()]\ntest = df_train_w2v[df_train_w2v['sex_age'].isnull()]\ntrain = train.reset_index(drop=True)\ntest = test.reset_index(drop=True)\n\n\n# In[32]:\n\n\nY = train['age']\ntrain['label'] = Y\n\n\n# In[35]:\n\n\nfrom sklearn.model_selection import KFold, StratifiedKFold\nlabel_set = train.label.unique()\nlgb_round = {'3': 363,\n             '5': 273,\n             '4': 328,\n             '7': 228,\n             '6': 361,\n             '9': 181,\n             '10': 338,\n             '2': 312,\n             '8': 234,\n             '1': 220,\n             '0': 200}\nfor sex_age in label_set :\n    print (sex_age)\n    X = train.drop(['sex', 'age', 'sex_age', 'label', 'device_id'],axis=1)\n    Y = train.label.apply(lambda x : 1 if x == sex_age else 0)\n    print (Y.value_counts())\n    seed = 2018\n    num_folds = 5\n    folds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=seed)\n\n    sub_list = []\n\n    oof_preds = np.zeros(train.shape[0])\n    sub_preds = np.zeros(test.shape[0])\n\n    params = {\n        'boosting_type': 'gbdt',\n        'learning_rate' : 0.02,\n        #'max_depth':5,\n        'num_leaves' : 2 ** 5,\n        'metric': {'binary_logloss'},\n        #'num_class' : 22,\n        'objective' : 'binary',\n        'random_state' : 6666,\n        'bagging_freq' : 5,\n        'feature_fraction' : 0.7,\n        'bagging_fraction' : 0.7,\n        'min_split_gain' : 0.0970905919552776,\n        'min_child_weight' : 9.42012323936088,  \n    }\n\n    for n_fold, (train_idx, valid_idx) in enumerate(folds.split(X, Y)):\n        train_x, train_y = X.iloc[train_idx], Y.iloc[train_idx]\n        valid_x, valid_y = X.iloc[valid_idx], Y.iloc[valid_idx] \n\n        lgb_train=lgb.Dataset(train_x,label=train_y)\n        lgb_eval = lgb.Dataset(valid_x, valid_y, reference=lgb_train)\n\n        gbm = lgb.train(params, lgb_train, num_boost_round=lgb_round[sex_age], valid_sets=[lgb_train, lgb_eval], verbose_eval=50)  \n\n        oof_preds[valid_idx] = gbm.predict(valid_x[X.columns.values])\n\n\n    train['age_bin_prob_oof_' + str(sex_age)] = oof_preds      \n    \n    \n    #用全部的train来预测test\n    lgb_train = lgb.Dataset(X,label=Y)\n\n    gbm = lgb.train(params, lgb_train, num_boost_round=lgb_round[sex_age], valid_sets=lgb_train, verbose_eval=50)  \n\n    test['age_bin_prob_oof_' + str(sex_age)] = gbm.predict(test[X.columns.values])\n\n\n# In[36]:\n\n\ncolumns = ['device_id'] + ['age_bin_prob_oof_' + str(i) for i in range(11)]\n\n\n# In[38]:\n\n\npd.concat([train[columns], test[columns]]).to_csv('age_bin_prob_oof.csv', index=None)\n\n"
  },
  {
    "path": "THLUO/11.hcc_device_brand_age_sex.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nfrom datetime import datetime,timedelta  \nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.preprocessing import LabelEncoder\nimport gc\nfrom sklearn.model_selection import StratifiedKFold\n\n\n\n# In[2]:\n\nprint ('11.hcc_device_brand_age_sex.py')\npath='input/'\ndata=pd.DataFrame()\n#sex_age=pd.read_excel('./data/性别年龄对照表.xlsx')\n\n\n# In[3]:\n\n\ndeviceid_packages=pd.read_csv(path+'deviceid_packages.tsv',sep='\\t',names=['device_id','apps'])\ndeviceid_test=pd.read_csv(path+'deviceid_test.tsv',sep='\\t',names=['device_id'])\ndeviceid_train=pd.read_csv(path+'deviceid_train.tsv',sep='\\t',names=['device_id','sex','age'])\ndeviceid_brand = pd.read_csv(path+'deviceid_brand.tsv',sep='\\t', names=['device_id','device_brand', 'device_type'])\ndeviceid_package_start_close = pd.read_csv(path+'deviceid_package_start_close.tsv',sep='\\t', names=['device_id','app_id','start_time','close_time'])\npackage_label = pd.read_csv(path+'package_label.tsv',sep='\\t',names=['app_id','app_parent_type', 'app_child_type'])\n\n\n#deviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : str(x).split(' ')[0])\n\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_brand.values))\ndeviceid_brand['device_brand'] = lbl.transform(list(deviceid_brand.device_brand.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_type.values))\ndeviceid_brand['device_type'] = lbl.transform(list(deviceid_brand.device_type.values))\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_parent_type.values))\npackage_label['app_parent_type'] = lbl.transform(list(package_label.app_parent_type.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_child_type.values))\npackage_label['app_child_type'] = lbl.transform(list(package_label.app_child_type.values))\n\n\n# In[4]:\n\n\ndf_train = deviceid_train.merge(deviceid_brand, how='left', on='device_id')\ndf_train.fillna(-1, inplace=True)\ndf_test = deviceid_test.merge(deviceid_brand, how='left', on='device_id')\ndf_test.fillna(-1, inplace=True)\n\n\n# In[5]:\n\n\ndf_train['sex'] = df_train.sex.apply(lambda x : 1 if x == 1 else 0)\ndf_train = df_train.join(pd.get_dummies(df_train[\"age\"], prefix=\"age\").astype(int))\ndf_train['sex_age'] = df_train['sex'].map(str) + '_' + df_train['age'].map(str)\nY = df_train['sex_age']\nY_CAT = pd.Categorical(Y)\ndf_train['sex_age'] = pd.Series(Y_CAT.codes)\ndf_train = df_train.join(pd.get_dummies(df_train[\"sex_age\"], prefix=\"sex_age\").astype(int))\n\n\n# In[6]:\n\n\nsex_age_columns = ['sex_age_' + str(i) for i in range(22)]\nsex_age_prior_set = df_train[sex_age_columns].mean().values\nage_columns = ['age_' + str(i) for i in range(11)]\nage_prior_set = df_train[age_columns].mean().values\nsex_prior_prob= df_train.sex.mean()\nsex_prior_prob\n\n\n# In[7]:\n\n\ndef hcc_encode(train_df, test_df, variable, target, prior_prob, k=5, f=1, g=1, update_df=None):\n    \"\"\"\n    See \"A Preprocessing Scheme for High-Cardinality Categorical Attributes in\n    Classification and Prediction Problems\" by Daniele Micci-Barreca\n    \"\"\"\n    hcc_name = \"_\".join([\"hcc\", variable, target])\n\n    grouped = train_df.groupby(variable)[target].agg({\"size\": \"size\", \"mean\": \"mean\"})\n    grouped[\"lambda\"] = 1 / (g + np.exp((k - grouped[\"size\"]) / f))\n    grouped[hcc_name] = grouped[\"lambda\"] * grouped[\"mean\"] + (1 - grouped[\"lambda\"]) * prior_prob\n\n    df = test_df[[variable]].join(grouped, on=variable, how=\"left\")[hcc_name].fillna(prior_prob)\n\n    if update_df is None: update_df = test_df\n    if hcc_name not in update_df.columns: update_df[hcc_name] = np.nan\n    update_df.update(df)\n    return\n\n\n# In[8]:\n\n\n#拟合年龄\n#拟合测试集\n# High-Cardinality Categorical encoding\nskf = StratifiedKFold(5)\nnums = 11\nfor variable in ['device_brand', 'device_type'] : \n    for i in range(nums) :\n        target = age_columns[i]\n        age_prior_prob = age_prior_set[i]\n        print (variable, target, age_prior_prob)\n        hcc_encode(df_train, df_test, variable, target, age_prior_prob, k=5, f=1, g=1, update_df=None)\n        #拟合验证集\n        for train, test in skf.split(np.zeros(len(df_train)), df_train['age']):\n            hcc_encode(df_train.iloc[train], df_train.iloc[test], variable, target, age_prior_prob, k=5, update_df=df_train)        \n\n\n# In[9]:\n\n\n#拟合性别\n#拟合测试集\n# High-Cardinality Categorical encoding\nskf = StratifiedKFold(5)\nfor variable in ['device_brand', 'device_type'] : \n    target = 'sex'\n    print (variable, target, sex_prior_prob)\n    hcc_encode(df_train, df_test, variable, target, sex_prior_prob, k=5, f=1, g=1, update_df=None)\n    #拟合验证集\n    for train, test in skf.split(np.zeros(len(df_train)), df_train['age']):\n        hcc_encode(df_train.iloc[train], df_train.iloc[test], variable, target, sex_prior_prob, k=5, f=1, g=1, update_df=df_train)        \n\n\n# In[10]:\n\n\n#拟合性别年龄\n#拟合测试集\n# High-Cardinality Categorical encoding\nskf = StratifiedKFold(5)\nnums = 22\nfor variable in ['device_brand', 'device_type'] : \n    for i in range(nums) :\n        target = sex_age_columns[i]\n        sex_age_prior_prob = sex_age_prior_set[i]\n        print (variable, target, sex_age_prior_prob)\n        hcc_encode(df_train, df_test, variable, target, sex_age_prior_prob, k=5, f=1, g=1, update_df=None)\n        #拟合验证集\n        for train, test in skf.split(np.zeros(len(df_train)), df_train['sex_age']):\n            hcc_encode(df_train.iloc[train], df_train.iloc[test], variable, target, sex_age_prior_prob, k=5, update_df=df_train)        \n\n\n# In[14]:\n\n\nhcc_columns = ['device_id'] + ['hcc_device_brand_age_' + str(i) for i in range(11)] + ['hcc_device_brand_sex'] + ['hcc_device_type_age_' + str(i) for i in range(11)] + ['hcc_device_type_sex'] + ['hcc_device_type_sex_age_' + str(i) for i in range(22)]  \ndf_total = pd.concat([df_train[hcc_columns], df_test[hcc_columns]])\n\n\n# In[15]:\n\n\ndf_total.to_csv('hcc_device_brand_age_sex.csv', index=None)\n\n"
  },
  {
    "path": "THLUO/12.device_age_regression_prob_oof.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nfrom datetime import datetime,timedelta  \nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.preprocessing import LabelEncoder\nimport gc\nfrom feat_util import *\n\n\n\n# In[2]:\n\nprint ('12.device_age_regression_prob_oof.py')\npath='input/'\ndata=pd.DataFrame()\n#sex_age=pd.read_excel('./data/性别年龄对照表.xlsx')\n\n\n# In[3]:\n\n\ndeviceid_packages=pd.read_csv(path+'deviceid_packages.tsv',sep='\\t',names=['device_id','apps'])\ndeviceid_test=pd.read_csv(path+'deviceid_test.tsv',sep='\\t',names=['device_id'])\ndeviceid_train=pd.read_csv(path+'deviceid_train.tsv',sep='\\t',names=['device_id','sex','age'])\ndeviceid_brand = pd.read_csv(path+'deviceid_brand.tsv',sep='\\t', names=['device_id','device_brand', 'device_type'])\ndeviceid_package_start_close = pd.read_csv(path+'deviceid_package_start_close.tsv',sep='\\t', names=['device_id','app_id','start_time','close_time'])\npackage_label = pd.read_csv(path+'package_label.tsv',sep='\\t',names=['app_id','app_parent_type', 'app_child_type'])\n\n\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : str(x).split(' ')[0])\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 1].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other' if x in one_time_brand else x)\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 2].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other_2' if x in one_time_brand else x)\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 3].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other_3' if x in one_time_brand else x)\n\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_brand.values))\ndeviceid_brand['device_brand'] = lbl.transform(list(deviceid_brand.device_brand.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_type.values))\ndeviceid_brand['device_type'] = lbl.transform(list(deviceid_brand.device_type.values))\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_parent_type.values))\npackage_label['app_parent_type'] = lbl.transform(list(package_label.app_parent_type.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_child_type.values))\npackage_label['app_child_type'] = lbl.transform(list(package_label.app_child_type.values))\n\n\n# In[4]:\n\n\nimport time\n\n# 输入毫秒级的时间，转出正常格式的时间\ndef timeStamp(timeNum):\n    timeStamp = float(timeNum/1000)\n    timeArray = time.localtime(timeStamp)\n    otherStyleTime = time.strftime(\"%Y-%m-%d %H:%M:%S\", timeArray)\n    return otherStyleTime\n\n#解析出具体的时间\ndeviceid_package_start_close['start_date'] = pd.to_datetime(deviceid_package_start_close.start_time.apply(timeStamp))\ndeviceid_package_start_close['end_date'] = pd.to_datetime(deviceid_package_start_close.close_time.apply(timeStamp))\ndeviceid_package_start_close['start_hour'] = deviceid_package_start_close.start_date.dt.hour\ndeviceid_package_start_close['end_hour'] = deviceid_package_start_close.end_date.dt.hour\ndeviceid_package_start_close['time_gap'] = (deviceid_package_start_close['end_date'] - deviceid_package_start_close['start_date']).astype('timedelta64[s]')\n\ndeviceid_package_start_close = deviceid_package_start_close.merge(package_label, on='app_id', how='left')\ndeviceid_package_start_close.app_parent_type.fillna(-1, inplace=True)\ndeviceid_package_start_close.app_child_type.fillna(-1, inplace=True)\ndeviceid_package_start_close['start_year'] = deviceid_package_start_close.start_date.dt.year\ndeviceid_package_start_close['end_year'] = deviceid_package_start_close.end_date.dt.year\ndeviceid_package_start_close['year_gap'] = deviceid_package_start_close['end_year'] - deviceid_package_start_close['start_year']\n\n\n# In[5]:\n\n\ndeviceid_train=pd.concat([deviceid_train,deviceid_test])\n\n\n# In[6]:\n\n\ndeviceid_packages['apps']=deviceid_packages['apps'].apply(lambda x:x.split(','))\ndeviceid_packages['app_lenghth']=deviceid_packages['apps'].apply(lambda x:len(x))\n\n\n\n#特征工程\ndef open_app_timegap_in_hour() :\n    df_temp = deviceid_package_start_close.groupby(['device_id', 'start_hour'])['time_gap'].mean().reset_index().rename(columns = {'time_gap': 'mean_time_gap'})\n    df_mean_temp = pd.pivot_table(df_temp, index='device_id', columns='start_hour', values='mean_time_gap').reset_index()\n    df_mean_temp.columns = ['device_id'] + ['open_app_timegap_in_'+str(i) + '_mean_hour' for i in range(0,24)]\n    df_mean_temp.fillna(0, inplace=True)\n\n\n    \n    return df_mean_temp\n\n\n# In[8]:\n\n\ndef device_start_end_app_timegap() :\n    #用户打开，关闭app的时间间隔\n    df_ = deviceid_package_start_close.sort_values(by=['device_id', 'start_date'], ascending=False)\n    df_['prev_start_date'] = df_.groupby('device_id')['start_date'].shift(-1)\n    df_['start_date_gap'] = (df_['start_date'] - df_['prev_start_date']).astype('timedelta64[s]')\n    agg_dic = {'start_date_gap' : ['min', 'max', 'mean', 'median', 'std']}\n    df_start_gap_agg = df_.groupby('device_id').agg(agg_dic)\n    df_start_gap_agg.columns = pd.Index(['device_' + e[0] + \"_\" + e[1].upper() for e in df_start_gap_agg.columns.tolist()])\n    df_start_gap_agg = df_start_gap_agg.reset_index()\n    #del df_\n    gc.collect()\n    #关闭时间间隔\n    df_ = deviceid_package_start_close.sort_values(by=['device_id', 'end_date'], ascending=False)\n    df_['prev_end_date'] = df_.groupby('device_id')['end_date'].shift(-1)\n    df_['end_date_gap'] = (df_['end_date'] - df_['prev_end_date']).astype('timedelta64[s]')\n    agg_dic = {'end_date_gap' : ['min', 'max', 'mean', 'median', 'std']}\n    df_end_gap_agg = df_.groupby('device_id').agg(agg_dic)\n    df_end_gap_agg.columns = pd.Index(['device_' + e[0] + \"_\" + e[1].upper() for e in df_end_gap_agg.columns.tolist()])\n    df_end_gap_agg = df_end_gap_agg.reset_index()\n    #del df_\n    gc.collect()\n\n\n\n    df_agg = df_start_gap_agg.merge(df_end_gap_agg, on='device_id', how='left')\n    #df_agg = df_agg.merge(df_app_start_gap_agg, on='device_id', how='left')\n    #df_agg = df_agg.merge(df_app_end_gap_agg, on='device_id', how='left')\n    return df_agg\n\ndef open_app_counts_in_hour() :\n    df_temp = deviceid_package_start_close.groupby(['device_id', 'start_hour'])['app_id'].count().reset_index().rename(columns = {'app_id': 'app_counts'})\n    df_temp = pd.pivot_table(df_temp, index='device_id', columns='start_hour', values='app_counts').reset_index()\n    df_temp.columns = ['device_id'] + ['open_app_counts_in'+str(i) + '_hour' for i in range(0,24)]\n    df_temp.fillna(0, inplace=True)\n    return df_temp\n\ndef close_app_counts_in_hour() :\n    df_temp = deviceid_package_start_close.groupby(['device_id', 'end_hour'])['app_id'].count().reset_index().rename(columns = {'app_id': 'app_counts'})\n    df_temp = pd.pivot_table(df_temp, index='device_id', columns='end_hour', values='app_counts').reset_index()\n    df_temp.columns = ['device_id'] + ['close_app_counts_in'+str(i) + '_hour' for i in range(0,24)]\n    df_temp.fillna(0, inplace=True)\n    return df_temp\n\ndef app_type_mean_time_gap_one_hot () :\n    df_temp = deviceid_package_start_close.groupby(['device_id', 'app_parent_type'])['time_gap'].mean().reset_index()\n    df_temp = pd.pivot_table(df_temp, index='device_id', columns='app_parent_type', values='time_gap').reset_index()\n    df_temp.columns = ['device_id'] + ['app_parent_type_mean_time_gap'+str(i) for i in range(-1,45)]\n    df_temp.fillna(-1, inplace=True)\n    return df_temp  \n\ndef device_active_hour() :\n    aggregations = {\n        'start_hour' : ['std','mean','max','min'],\n        'end_hour' : ['std','mean','max','min']\n    }\n    df_agg = deviceid_package_start_close.groupby('device_id').agg(aggregations)\n    df_agg.columns = pd.Index(['device_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\n    df_agg = df_agg.reset_index()   \n    \n    return df_agg\n\n\ndef device_brand_encoding() :\n    df_temp = deviceid_brand.merge(deviceid_train[['device_id', 'age', 'sex']], on='device_id', how='left')\n\n    aggregations = {\n        'age' : ['std','mean'],\n        'sex' : ['mean'],\n    }\n\n    df_device_brand = df_temp.groupby('device_brand').agg(aggregations)\n    df_device_brand.columns = pd.Index(['device_brand_' + e[0] + \"_\" + e[1].upper() for e in df_device_brand.columns.tolist()])\n    df_device_brand = df_device_brand.reset_index()\n\n    df_device_type = df_temp.groupby('device_type').agg(aggregations)\n    df_device_type.columns = pd.Index(['device_type_' + e[0] + \"_\" + e[1].upper() for e in df_device_type.columns.tolist()])\n    df_device_type = df_device_type.reset_index()\n\n    df_temp = df_temp.merge(df_device_brand, on='device_brand', how='left')\n    df_temp = df_temp.merge(df_device_type, on='device_type', how='left')\n\n    aggregations = {\n        'device_brand_age_STD' : ['mean'],\n        'device_brand_age_MEAN' : ['mean'],\n        'device_brand_sex_MEAN' : ['mean'],\n        #'device_type_age_STD' : ['mean'],\n        #'device_type_age_MEAN' : ['mean'],\n        #'device_type_sex_MEAN' : ['mean']\n    }\n\n    df_agg = df_temp.groupby('device_id').agg(aggregations)\n    df_agg.columns = pd.Index(['device_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\n    df_agg = df_agg.reset_index()\n    return df_agg\n\n\n#统计device运行app的情况\ndef device_active_time_time_stat() :\n    #device开启app的时间统计信息\n    deviceid_package_start_close['active_time'] = deviceid_package_start_close['close_time'] - deviceid_package_start_close['start_time']\n\n    #device开启了多少次app\n    #device开启了多少个app\n    aggregations = {\n        'app_id' : ['count', 'nunique'],\n        'active_time' : ['mean', 'std', 'max', 'min'],\n    }\n    df_agg = deviceid_package_start_close.groupby('device_id').agg(aggregations)\n    df_agg.columns = pd.Index(['device_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\n    df_agg = df_agg.reset_index()\n\n    aggregations = {\n        'active_time' : ['mean', 'std', 'max', 'min', 'count'],\n    }\n    df_da_agg = deviceid_package_start_close.groupby(['device_id', 'app_id']).agg(aggregations)\n    df_da_agg.columns = pd.Index(['device_app_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_da_agg.columns.tolist()])\n    df_da_agg = df_da_agg.reset_index()\n\n    #device开启app的平均时间\n    aggregations = {\n        'device_app_grouped_active_time_MEAN' : ['mean', 'std', 'max', 'min'],\n        'device_app_grouped_active_time_STD' : ['mean', 'std', 'max', 'min'],\n        'device_app_grouped_active_time_MAX' : ['mean', 'std', 'max', 'min'],\n        'device_app_grouped_active_time_MIN' : ['mean', 'std', 'max', 'min'],\n        'device_app_grouped_active_time_COUNT' : ['mean', 'std', 'max', 'min'],\n    }\n    df_temp = df_da_agg.groupby(['device_id']).agg(aggregations)\n    df_temp.columns = pd.Index([e[0] + \"_\" + e[1].upper() for e in df_temp.columns.tolist()])\n    df_temp = df_temp.reset_index()\n\n    df_agg = df_agg.merge(df_temp, on='device_id', how='left')\n    return df_agg\n\n\ndef app_type_encoding() :\n    df_temp = df_device_app_pair.merge(deviceid_train[['device_id', 'age', 'sex']], on='device_id', how='left')\n\n    aggregations = {\n        'age' : ['std','mean'],\n        'sex' : ['mean'],\n    }\n\n    df_agg_app_parent_type = df_temp.groupby('app_parent_type').agg(aggregations)\n    df_agg_app_parent_type.columns = pd.Index(['app_parent_type_' + e[0] + \"_\" + e[1].upper() for e in df_agg_app_parent_type.columns.tolist()])\n    df_agg_app_parent_type = df_agg_app_parent_type.reset_index()\n\n    df_agg_app_child_type = df_temp.groupby('app_child_type').agg(aggregations)\n    df_agg_app_child_type.columns = pd.Index(['app_child_type_' + e[0] + \"_\" + e[1].upper() for e in df_agg_app_child_type.columns.tolist()])\n    df_agg_app_child_type = df_agg_app_child_type.reset_index()\n\n    df_temp = df_temp.merge(df_agg_app_parent_type, on='app_parent_type', how='left')\n    df_temp = df_temp.merge(df_agg_app_child_type, on='app_child_type', how='left')\n\n    aggregations = {\n        'app_parent_type_age_STD' : ['mean'],\n        'app_parent_type_age_MEAN' : ['mean'],\n        'app_parent_type_sex_MEAN' : ['mean'],\n        'app_child_type_age_STD' : ['mean'],\n        'app_child_type_age_MEAN' : ['mean'],\n        'app_child_type_sex_MEAN' : ['mean']\n    }\n    \n    df_agg = df_temp.groupby('device_id').agg(aggregations)\n    df_agg.columns = pd.Index(['device_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\n    df_agg = df_agg.reset_index()\n    return df_agg\n\n#每个device对应的app_parent_type计数\ndef app_type_onehot_in_device(df) :\n    df_copy = df.fillna(-1)\n    df_temp = df_copy.groupby(['device_id', 'app_parent_type'])['app_id'].size().reset_index()\n    df_temp.rename(columns = {'app_id' : 'app_parent_type_counts'}, inplace=True)\n    df_temp = pd.pivot_table(df_temp, index='device_id', columns='app_parent_type', values='app_parent_type_counts').reset_index()\n    df_temp.columns = ['device_id'] + ['app_parent_type'+str(i) for i in range(-1,45)]\n    df_temp.fillna(0, inplace=True)\n    return df_temp\n\n\n\napps=deviceid_packages['apps'].apply(lambda x:' '.join(x)).tolist()\nvectorizer=CountVectorizer()\ntransformer=TfidfTransformer()\ncntTf = vectorizer.fit_transform(apps)\ntfidf=transformer.fit_transform(cntTf)\nword=vectorizer.get_feature_names()\nweight=tfidf.toarray()\ndf_weight=pd.DataFrame(weight)\nfeature=df_weight.columns\ndf_weight['sum']=0\nfor f in tqdm(feature):\n    df_weight['sum']+=df_weight[f]\ndeviceid_packages['tfidf_sum']=df_weight['sum']\n\n\n# In[10]:\n\n\nlda = LatentDirichletAllocation(n_topics=5,\n                                learning_offset=50.,\n                                random_state=666)\ndocres = lda.fit_transform(cntTf)\n\n\n# In[11]:\n\n\ndeviceid_packages = pd.concat([deviceid_packages,pd.DataFrame(docres)],axis=1)\n\n\n# In[12]:\n\n\ntemp=deviceid_packages.drop('apps',axis=1)\ndeviceid_train=pd.merge(deviceid_train,temp,on='device_id',how='left')\n\n\n# In[13]:\n\n\n#解析出所有的device_app_pair\ndevice_id_arr = []\napp_arr = []\ndf_device_app_pair = pd.DataFrame()\nfor row in deviceid_packages.values :\n    device_id = row[0]\n    app_list = row[1]\n    for app in app_list :\n        device_id_arr.append(device_id)\n        app_arr.append(app)\n#生成pair        \ndf_device_app_pair['device_id'] = device_id_arr\ndf_device_app_pair['app_id'] = app_arr    \n\ndf_device_app_pair = df_device_app_pair.merge(package_label, how='left', on='app_id')\n\n\n# In[15]:\n\n\n#提取特征\ndf_train = deviceid_train.merge(device_active_time_time_stat(), on='device_id', how='left')\ndf_train = df_train.merge(deviceid_brand, on='device_id', how='left')\ndf_train = df_train.merge(app_type_onehot_in_device(df_device_app_pair), on='device_id', how='left')\ndf_train = df_train.merge(app_type_encoding(), on='device_id', how='left')\ndf_train = df_train.merge(device_active_hour(), on='device_id', how='left')\ndf_train = df_train.merge(app_type_mean_time_gap_one_hot(), on='device_id', how='left')\ndf_train = df_train.merge(open_app_counts_in_hour(), on='device_id', how='left')\ndf_train = df_train.merge(close_app_counts_in_hour(), on='device_id', how='left')\ndf_train = df_train.merge(device_brand_encoding(), on='device_id', how='left')\ndf_train = df_train.merge(device_start_end_app_timegap(), on='device_id', how='left')\ndf_train = df_train.merge(open_app_timegap_in_hour(), on='device_id', how='left')\n\n\n# In[16]:\n\n\ndf_w2c_start = pd.read_csv('device_start_app_w2c.csv')\ndf_w2c_close = pd.read_csv('device_close_app_w2c.csv')\ndf_w2c_all = pd.read_csv('device_all_app_w2c.csv')\ndf_device_quchong_start_app_w2c = pd.read_csv('device_quchong_start_app_w2c.csv')\ndf_device_app_unique_start_app_w2c = pd.read_csv('device_app_unique_start_app_w2c.csv')\ndf_device_app_unique_close_app_w2c = pd.read_csv('device_app_unique_close_app_w2c.csv')\ndf_device_app_unique_all_app_w2c = pd.read_csv('device_app_unique_all_app_w2c.csv')\ndf_hcc_device_brand_age_sex = pd.read_csv('hcc_device_brand_age_sex.csv')\n\ndf_train_w2v = df_train.merge(df_w2c_start, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_w2c_close, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_w2c_all, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_quchong_start_app_w2c, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_app_unique_start_app_w2c, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_app_unique_close_app_w2c, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_app_unique_all_app_w2c, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_hcc_device_brand_age_sex, on='device_id', how='left')\n\n\n# In[22]:\n\n\ntrain = df_train_w2v[df_train_w2v['age'].notnull()]\ntest = df_train_w2v[df_train_w2v['age'].isnull()]\n\n\n# In[23]:\n\n\nX = train.drop(['sex', 'age', 'device_id'],axis=1)\nY = train['age']\n\n\n# In[24]:\n\n\nfrom sklearn.model_selection import KFold, StratifiedKFold\n\nseed = 2018\nnum_folds = 5\nfolds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=seed)\n\nsub_list = []\n\noof_preds = np.zeros(train.shape[0])\n\ncate_feat = ['device_type','device_brand']\n\nparams = {\n    'boosting_type': 'gbdt',\n    'learning_rate' : 0.02,\n    'num_leaves' : 2 ** 5,\n    'objective' : 'regression',\n    'metric' : 'rmse',\n    'random_state' : 6666,\n    'bagging_freq' : 5,\n    'feature_fraction' : 0.7,\n    'bagging_fraction' : 0.7,\n    'min_split_gain' : 0.0970905919552776,\n    'min_child_weight' : 9.42012323936088,  \n}\n\nfor n_fold, (train_idx, valid_idx) in enumerate(folds.split(X, Y)):\n    train_x, train_y = X.iloc[train_idx], Y.iloc[train_idx]\n    valid_x, valid_y = X.iloc[valid_idx], Y.iloc[valid_idx] \n    \n    lgb_train=lgb.Dataset(train_x,label=train_y)\n    lgb_eval = lgb.Dataset(valid_x, valid_y, reference=lgb_train)\n    \n    gbm = lgb.train(params, lgb_train, num_boost_round=800, valid_sets=[lgb_train, lgb_eval], verbose_eval=50)  \n    \n    oof_preds[valid_idx] = gbm.predict(valid_x[X.columns.values])\n\n    \ntrain['age_regression_prob_oof'] = oof_preds\n\n\n# In[26]:\n\n\n#用全部的train来预测test\nlgb_train = lgb.Dataset(X,label=Y)\n\ngbm = lgb.train(params, lgb_train, num_boost_round=800, valid_sets=lgb_train, verbose_eval=50)  \n\ntest = test.reset_index(drop=True)\ntest_preds = gbm.predict(test[X.columns.values])\n\n\n# In[27]:\n\n\ntest['age_regression_prob_oof'] = test_preds\n\n\n# In[30]:\n\n\ndf_age_prob_oof = pd.concat([train[['device_id', 'age_regression_prob_oof']], \n                             test[['device_id', 'age_regression_prob_oof']]])\ndf_age_prob_oof.to_csv('device_age_regression_prob_oof.csv', index=None)\n\n"
  },
  {
    "path": "THLUO/13.device_start_GRU_pred.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\n# coding: utf-8\nimport feather\nimport os\nimport re\nimport sys  \nimport gc\nimport random\nimport pandas as pd\nimport numpy as np\nimport gensim\nfrom gensim.models import Word2Vec\nfrom gensim.models.word2vec import LineSentence\nfrom scipy import stats\nimport tensorflow as tf\nimport keras\nfrom keras.layers import *\nfrom keras.models import *\nfrom keras.optimizers import *\nfrom keras.callbacks import *\nfrom keras.preprocessing import text, sequence\nfrom keras.utils import to_categorical\nfrom keras.engine.topology import Layer\nfrom sklearn.preprocessing import LabelEncoder\nfrom keras.utils import np_utils\nfrom keras.utils.training_utils import multi_gpu_model\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import f1_score\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import  accuracy_score\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import f1_score\nimport warnings\nfrom TextModel import *\nwarnings.filterwarnings('ignore')\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsession = tf.Session(config=config)\n\n\n# In[2]:\nprint ('13.device_start_GRU_pred.py')\n\ndf_doc = pd.read_csv('01.device_click_app_sorted_by_start.csv')\ndeviceid_test=pd.read_csv('input/deviceid_test.tsv',sep='\\t',names=['device_id'])\ndeviceid_train=pd.read_csv('input/deviceid_train.tsv',sep='\\t',names=['device_id','sex','age'])\ndf_total = pd.concat([deviceid_train, deviceid_test])\ndf_doc = df_doc.merge(df_total, on='device_id', how='left')\n\n\ndf_wv2_all = pd.read_csv('w2c_all_emb.csv')\n\ndic_w2c_all = {}\nfor row in df_wv2_all.values :\n    app_id = row[0]\n    vector = row[1:]\n    dic_w2c_all[app_id] = vector\n\n\n# In[3]:\n\n\ndf_doc['sex'] = df_doc['sex'].apply(lambda x:str(x))\ndf_doc['age'] = df_doc['age'].apply(lambda x:str(x))\ndef tool(x):\n    if x=='nan':\n        return x\n    else:\n        return str(int(float(x)))\ndf_doc['sex']=df_doc['sex'].apply(tool)\ndf_doc['age']=df_doc['age'].apply(tool)\ndf_doc['sex_age']=df_doc['sex']+'-'+df_doc['age']\ndf_doc = df_doc.replace({'nan':np.NaN,'nan-nan':np.NaN})\ntrain = df_doc[df_doc['sex_age'].notnull()]\ntest = df_doc[df_doc['sex_age'].isnull()]\ntrain.reset_index(drop=True, inplace=True)\ntest.reset_index(drop=True, inplace=True)\n\nlb = LabelEncoder()\ntrain_label = lb.fit_transform(train['sex_age'].values)\ntrain['class'] = train_label\n\n\n# In[5]:\n\n\ncolumn_name=\"app_list\"\nword_seq_len = 900\nvictor_size = 200\nnum_words = 35000\nbatch_size = 64\nclassification = 22\nkfold=10\n\n\n# In[6]:\n\n\nfrom sklearn.metrics import log_loss\n\ndef get_mut_label(y_label) :\n    results = []\n    for ele in y_label :\n        results.append(ele.argmax())\n    return  results  \n\nclass RocAucEvaluation(Callback):\n    def __init__(self, validation_data=(), interval=1):\n        super(Callback, self).__init__()\n\n        self.interval = interval\n        self.X_val, self.y_val = validation_data\n\n    def on_epoch_end(self, epoch, logs={}):\n        if epoch % self.interval == 0:\n            y_pred = self.model.predict(self.X_val, verbose=0)\n            val_y = get_mut_label(self.y_val)\n            score = log_loss(val_y, y_pred)\n            print(\"\\n mlogloss - epoch: %d - score: %.6f \\n\" % (epoch+1, score))\n\n\n# In[7]:\n\n\n#词向量\ndef w2v_pad(df_train,df_test,col, maxlen_,victor_size, num_words):\n\n    tokenizer = text.Tokenizer(num_words=num_words, lower=False,filters=\"\")\n    tokenizer.fit_on_texts(list(df_train[col].values)+list(df_test[col].values))\n\n    train_ = sequence.pad_sequences(tokenizer.texts_to_sequences(df_train[col].values), maxlen=maxlen_)\n    test_ = sequence.pad_sequences(tokenizer.texts_to_sequences(df_test[col].values), maxlen=maxlen_)\n    \n    word_index = tokenizer.word_index\n    \n    count = 0\n    nb_words = len(word_index)\n    print(nb_words)\n    all_data=pd.concat([df_train[col],df_test[col]])\n    file_name = 'embedding/' + 'Word2Vec_start_' + col  +\"_\"+ str(victor_size) + '.model'\n    if not os.path.exists(file_name):\n        model = Word2Vec([[word for word in document.split(' ')] for document in all_data.values],\n                         size=victor_size, window=5, iter=10, workers=11, seed=2018, min_count=2)\n        model.save(file_name)\n    else:\n        model = Word2Vec.load(file_name)\n    print(\"add word2vec finished....\")    \n\n\n                 \n    embedding_word2vec_matrix = np.zeros((nb_words + 1, victor_size))\n    for word, i in word_index.items():\n        embedding_vector = model[word] if word in model else None\n        if embedding_vector is not None:\n            count += 1\n            embedding_word2vec_matrix[i] = embedding_vector\n        else:\n            unk_vec = np.random.random(victor_size) * 0.5\n            unk_vec = unk_vec - unk_vec.mean()\n            embedding_word2vec_matrix[i] = unk_vec\n\n    embedding_w2c_all = np.zeros((nb_words + 1, victor_size))  \n    for word, i in word_index.items():\n        embedding_vector = dic_w2c_all[word] \n        embedding_w2c_all[i] = embedding_vector\n                    \n\n    #embedding_matrix = np.concatenate((embedding_word2vec_matrix,embedding_w2c_all),axis=1)\n    embedding_matrix = embedding_word2vec_matrix\n    \n    return train_, test_, word_index, embedding_matrix\n\n\n# In[8]:\n\n\ntrain_, test_,word2idx, word_embedding = w2v_pad(train,test,column_name, word_seq_len,victor_size, num_words)\n\n\n# In[11]:\n\n\nmy_opt=\"bi_gru_model\"\n#参数\nY = train['class'].values\n\nif not os.path.exists(\"cache/\"+my_opt):\n    os.mkdir(\"cache/\"+my_opt)\n\n\n# In[12]:\n\n\nfrom sklearn.model_selection import KFold, StratifiedKFold\ngc.collect()\nseed = 2006\nnum_folds = 10\nkf = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=seed).split(train_, Y)\n\n\n# In[13]:\n\n\nepochs = 4\nmy_opt=eval(my_opt)\ntrain_model_pred = np.zeros((train_.shape[0], classification))\ntest_model_pred = np.zeros((test_.shape[0], classification))\nfor i, (train_fold, val_fold) in enumerate(kf):\n    X_train, X_valid, = train_[train_fold, :], train_[val_fold, :]\n    y_train, y_valid = Y[train_fold], Y[val_fold]\n\n    y_tra = to_categorical(y_train)\n    y_val = to_categorical(y_valid)\n    \n    #模型\n    name = str(my_opt.__name__)    \n\n    model = my_opt(word_seq_len, word_embedding, classification)    \n    \n    \n    RocAuc = RocAucEvaluation(validation_data=(X_valid, y_val), interval=1)\n\n    hist = model.fit(X_train, y_tra, batch_size=batch_size, epochs=epochs, validation_data=(X_valid, y_val),\n                     callbacks=[RocAuc])   \n    \n    \n    train_model_pred[val_fold, :] =  model.predict(X_valid)\n\n\n# In[26]:\n\n\n#模型\n#用全部的数据预测\ntrain_label = to_categorical(Y)\nname = str(my_opt.__name__)    \n\nmodel = my_opt(word_seq_len, word_embedding, classification)    \n\n\nRocAuc = RocAucEvaluation(validation_data=(train_, train_label), interval=1)\n\nhist = model.fit(train_, train_label, batch_size=batch_size, epochs=epochs, validation_data=(train_, train_label),\n                 callbacks=[RocAuc])   \n\n\ntest_model_pred =  model.predict(test_)\n\n\n# In[27]:\n\n\ndf_train_pred = pd.DataFrame(train_model_pred)\ndf_test_pred = pd.DataFrame(test_model_pred)\ndf_train_pred.columns = ['device_start_GRU_pred_' + str(i) for i in range(22)]\ndf_test_pred.columns = ['device_start_GRU_pred_' + str(i) for i in range(22)]\n\n\n# In[35]:\n\n\ndf_train_pred = pd.concat([train[['device_id']], df_train_pred], axis=1)\ndf_test_pred = pd.concat([test[['device_id']], df_test_pred], axis=1)\n\n\n# In[37]:\n\n\ndf_results = pd.concat([df_train_pred, df_test_pred])\ndf_results.to_csv('device_start_GRU_pred.csv', index=None)\n\n"
  },
  {
    "path": "THLUO/14.device_start_GRU_pred_age.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\n# coding: utf-8\nimport feather\nimport os\nimport re\nimport sys  \nimport gc\nimport random\nimport pandas as pd\nimport numpy as np\nimport gensim\nfrom gensim.models import Word2Vec\nfrom gensim.models.word2vec import LineSentence\nfrom scipy import stats\nimport tensorflow as tf\nimport keras\nfrom keras.layers import *\nfrom keras.models import *\nfrom keras.optimizers import *\nfrom keras.callbacks import *\nfrom keras.preprocessing import text, sequence\nfrom keras.utils import to_categorical\nfrom keras.engine.topology import Layer\nfrom sklearn.preprocessing import LabelEncoder\nfrom keras.utils import np_utils\nfrom keras.utils.training_utils import multi_gpu_model\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import f1_score\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import  accuracy_score\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import f1_score\nfrom TextModel import *\nimport warnings\nwarnings.filterwarnings('ignore')\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsession = tf.Session(config=config)\n\n\n# In[2]:\nprint('14.device_start_GRU_pred_age.py')\n\ndf_doc = pd.read_csv('01.device_click_app_sorted_by_start.csv')\ndeviceid_test=pd.read_csv('input/deviceid_test.tsv',sep='\\t',names=['device_id'])\ndeviceid_train=pd.read_csv('input/deviceid_train.tsv',sep='\\t',names=['device_id','sex','age'])\ndf_total = pd.concat([deviceid_train, deviceid_test])\ndf_doc = df_doc.merge(df_total, on='device_id', how='left')\n\n\ndf_wv2_all = pd.read_csv('w2c_all_emb.csv')\n\ndic_w2c_all = {}\nfor row in df_wv2_all.values :\n    app_id = row[0]\n    vector = row[1:]\n    dic_w2c_all[app_id] = vector\n\n\n# In[3]:\n\n\ntrain = df_doc[df_doc['age'].notnull()]\ntest = df_doc[df_doc['age'].isnull()]\ntrain.reset_index(drop=True, inplace=True)\ntest.reset_index(drop=True, inplace=True)\n\nlb = LabelEncoder()\ntrain_label = lb.fit_transform(train['age'].values)\ntrain['class'] = train_label\n\n\n# In[5]:\n\n\ncolumn_name=\"app_list\"\nword_seq_len = 900\nvictor_size = 200\nnum_words = 35000\nbatch_size = 64\nclassification = 11\nkfold=10\n\n\n# In[6]:\n\n\nfrom sklearn.metrics import log_loss\n\ndef get_mut_label(y_label) :\n    results = []\n    for ele in y_label :\n        results.append(ele.argmax())\n    return  results  \n\nclass RocAucEvaluation(Callback):\n    def __init__(self, validation_data=(), interval=1):\n        super(Callback, self).__init__()\n\n        self.interval = interval\n        self.X_val, self.y_val = validation_data\n\n    def on_epoch_end(self, epoch, logs={}):\n        if epoch % self.interval == 0:\n            y_pred = self.model.predict(self.X_val, verbose=0)\n            val_y = get_mut_label(self.y_val)\n            score = log_loss(val_y, y_pred)\n            print(\"\\n mlogloss - epoch: %d - score: %.6f \\n\" % (epoch+1, score))\n\n\n# In[7]:\n\n\n#词向量\ndef w2v_pad(df_train,df_test,col, maxlen_,victor_size, num_words):\n\n    tokenizer = text.Tokenizer(num_words=num_words, lower=False,filters=\"\")\n    tokenizer.fit_on_texts(list(df_train[col].values)+list(df_test[col].values))\n\n    train_ = sequence.pad_sequences(tokenizer.texts_to_sequences(df_train[col].values), maxlen=maxlen_)\n    test_ = sequence.pad_sequences(tokenizer.texts_to_sequences(df_test[col].values), maxlen=maxlen_)\n    \n    word_index = tokenizer.word_index\n    \n    count = 0\n    nb_words = len(word_index)\n    print(nb_words)\n    all_data=pd.concat([df_train[col],df_test[col]])\n    file_name = 'embedding/' + 'Word2Vec_start_' + col  +\"_\"+ str(victor_size) + '.model'\n    if not os.path.exists(file_name):\n        model = Word2Vec([[word for word in document.split(' ')] for document in all_data.values],\n                         size=victor_size, window=5, iter=10, workers=11, seed=2018, min_count=2)\n        model.save(file_name)\n    else:\n        model = Word2Vec.load(file_name)\n    print(\"add word2vec finished....\")    \n\n\n                 \n    embedding_word2vec_matrix = np.zeros((nb_words + 1, victor_size))\n    for word, i in word_index.items():\n        embedding_vector = model[word] if word in model else None\n        if embedding_vector is not None:\n            count += 1\n            embedding_word2vec_matrix[i] = embedding_vector\n        else:\n            unk_vec = np.random.random(victor_size) * 0.5\n            unk_vec = unk_vec - unk_vec.mean()\n            embedding_word2vec_matrix[i] = unk_vec\n\n    embedding_w2c_all = np.zeros((nb_words + 1, victor_size))  \n    for word, i in word_index.items():\n        embedding_vector = dic_w2c_all[word] \n        embedding_w2c_all[i] = embedding_vector\n                    \n\n    #embedding_matrix = np.concatenate((embedding_word2vec_matrix,embedding_w2c_all),axis=1)\n    embedding_matrix = embedding_word2vec_matrix\n    \n    return train_, test_, word_index, embedding_matrix\n\n\n# In[8]:\n\n\ntrain_, test_,word2idx, word_embedding = w2v_pad(train,test,column_name, word_seq_len,victor_size, num_words)\n\n\n# In[11]:\n\n\nmy_opt=\"bi_gru_model\"\n#参数\nY = train['class'].values\n\nif not os.path.exists(\"cache/\"+my_opt):\n    os.mkdir(\"cache/\"+my_opt)\n\n\n# In[17]:\n\n\nfrom sklearn.model_selection import KFold, StratifiedKFold\ngc.collect()\nseed = 2006\nnum_folds = 10\nkf = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=seed).split(train_, Y)\n\nepochs = 4\nmy_opt=eval(my_opt)\ntrain_model_pred = np.zeros((train_.shape[0], classification))\ntest_model_pred = np.zeros((test_.shape[0], classification))\nfor i, (train_fold, val_fold) in enumerate(kf):\n    X_train, X_valid, = train_[train_fold, :], train_[val_fold, :]\n    y_train, y_valid = Y[train_fold], Y[val_fold]\n\n    y_tra = to_categorical(y_train)\n    y_val = to_categorical(y_valid)\n    \n    #模型\n    name = str(my_opt.__name__)    \n\n    model = my_opt(word_seq_len, word_embedding, classification)    \n    \n    \n    RocAuc = RocAucEvaluation(validation_data=(X_valid, y_val), interval=1)\n\n    hist = model.fit(X_train, y_tra, batch_size=batch_size, epochs=epochs, validation_data=(X_valid, y_val),\n                     callbacks=[RocAuc])   \n    \n    \n    train_model_pred[val_fold, :] =  model.predict(X_valid)\n\n\n# In[21]:\n\n\n#模型\n#用全部的数据预测\ntrain_label = to_categorical(Y)\nname = str(my_opt.__name__)    \n\nmodel = my_opt(word_seq_len, word_embedding, classification)    \n\n\nRocAuc = RocAucEvaluation(validation_data=(train_, train_label), interval=1)\n\nhist = model.fit(train_, train_label, batch_size=batch_size, epochs=epochs, validation_data=(train_, train_label),\n                 callbacks=[RocAuc])   \n\n\ntest_model_pred =  model.predict(test_)\n\n\n# In[22]:\n\n\ndf_train_pred = pd.DataFrame(train_model_pred)\ndf_test_pred = pd.DataFrame(test_model_pred)\ndf_train_pred.columns = ['device_start_GRU_pred_age_' + str(i) for i in range(11)]\ndf_test_pred.columns = ['device_start_GRU_pred_age_' + str(i) for i in range(11)]\n\n\n# In[23]:\n\n\ndf_train_pred = pd.concat([train[['device_id']], df_train_pred], axis=1)\ndf_test_pred = pd.concat([test[['device_id']], df_test_pred], axis=1)\n\n\n# In[24]:\n\n\ndf_results = pd.concat([df_train_pred, df_test_pred])\ndf_results.to_csv('device_start_GRU_pred_age.csv', index=None)\n\n"
  },
  {
    "path": "THLUO/15.device_all_GRU_pred.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\n# coding: utf-8\nimport feather\nimport os\nimport re\nimport sys  \nimport gc\nimport random\nimport pandas as pd\nimport numpy as np\nimport gensim\nfrom gensim.models import Word2Vec\nfrom gensim.models.word2vec import LineSentence\nfrom scipy import stats\nimport tensorflow as tf\nimport keras\nfrom keras.layers import *\nfrom keras.models import *\nfrom keras.optimizers import *\nfrom keras.callbacks import *\nfrom keras.preprocessing import text, sequence\nfrom keras.utils import to_categorical\nfrom keras.engine.topology import Layer\nfrom sklearn.preprocessing import LabelEncoder\nfrom keras.utils import np_utils\nfrom keras.utils.training_utils import multi_gpu_model\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import f1_score\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import  accuracy_score\nfrom sklearn.preprocessing import LabelEncoder\nfrom TextModel import *\nfrom sklearn.metrics import f1_score\nimport warnings\nwarnings.filterwarnings('ignore')\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsession = tf.Session(config=config)\n\n\n# In[2]:\nprint('15.device_all_GRU_pred.py')\n\ndf_doc = pd.read_csv('03.device_click_app_sorted_by_all.csv')\ndeviceid_test=pd.read_csv('input/deviceid_test.tsv',sep='\\t',names=['device_id'])\ndeviceid_train=pd.read_csv('input/deviceid_train.tsv',sep='\\t',names=['device_id','sex','age'])\ndf_total = pd.concat([deviceid_train, deviceid_test])\ndf_doc = df_doc.merge(df_total, on='device_id', how='left')\n\n\ndf_wv2_all = pd.read_csv('w2c_all_emb.csv')\n\ndic_w2c_all = {}\nfor row in df_wv2_all.values :\n    app_id = row[0]\n    vector = row[1:]\n    dic_w2c_all[app_id] = vector\n\n# In[3]:\n\n\ndf_doc['sex'] = df_doc['sex'].apply(lambda x:str(x))\ndf_doc['age'] = df_doc['age'].apply(lambda x:str(x))\ndef tool(x):\n    if x=='nan':\n        return x\n    else:\n        return str(int(float(x)))\ndf_doc['sex']=df_doc['sex'].apply(tool)\ndf_doc['age']=df_doc['age'].apply(tool)\ndf_doc['sex_age']=df_doc['sex']+'-'+df_doc['age']\ndf_doc = df_doc.replace({'nan':np.NaN,'nan-nan':np.NaN})\ntrain = df_doc[df_doc['sex_age'].notnull()]\ntest = df_doc[df_doc['sex_age'].isnull()]\ntrain.reset_index(drop=True, inplace=True)\ntest.reset_index(drop=True, inplace=True)\n\nlb = LabelEncoder()\ntrain_label = lb.fit_transform(train['sex_age'].values)\ntrain['class'] = train_label\n\n\n# In[6]:\n\n\ncolumn_name=\"app_list\"\nword_seq_len = 1800\nvictor_size = 200\nnum_words = 35000\nbatch_size = 64\nclassification = 22\nkfold=10\n\n\n# In[7]:\n\n\nfrom sklearn.metrics import log_loss\n\ndef get_mut_label(y_label) :\n    results = []\n    for ele in y_label :\n        results.append(ele.argmax())\n    return  results  \n\nclass RocAucEvaluation(Callback):\n    def __init__(self, validation_data=(), interval=1):\n        super(Callback, self).__init__()\n\n        self.interval = interval\n        self.X_val, self.y_val = validation_data\n\n    def on_epoch_end(self, epoch, logs={}):\n        if epoch % self.interval == 0:\n            y_pred = self.model.predict(self.X_val, verbose=0)\n            val_y = get_mut_label(self.y_val)\n            score = log_loss(val_y, y_pred)\n            print(\"\\n mlogloss - epoch: %d - score: %.6f \\n\" % (epoch+1, score))\n\n\n# In[14]:\n\n\n#词向量\ndef w2v_pad(df_train,df_test,col, maxlen_,victor_size, num_words):\n\n    tokenizer = text.Tokenizer(num_words=num_words, lower=False,filters=\"\")\n    tokenizer.fit_on_texts(list(df_train[col].values)+list(df_test[col].values))\n\n    train_ = sequence.pad_sequences(tokenizer.texts_to_sequences(df_train[col].values), maxlen=maxlen_)\n    test_ = sequence.pad_sequences(tokenizer.texts_to_sequences(df_test[col].values), maxlen=maxlen_)\n    \n    word_index = tokenizer.word_index\n    \n    count = 0\n    nb_words = len(word_index)\n    print(nb_words)\n    all_data=pd.concat([df_train[col],df_test[col]])\n    file_name = 'embedding/' + 'Word2Vec_all' + col  +\"_\"+ str(victor_size) + '.model'\n    if not os.path.exists(file_name):\n        model = Word2Vec([[word for word in document.split(' ')] for document in all_data.values],\n                         size=victor_size, window=30, iter=10, workers=11, seed=2018, min_count=2)\n        model.save(file_name)\n    else:\n        model = Word2Vec.load(file_name)\n    print(\"add word2vec finished....\")    \n\n\n                 \n    embedding_word2vec_matrix = np.zeros((nb_words + 1, victor_size))\n    for word, i in word_index.items():\n        embedding_vector = model[word] if word in model else None\n        if embedding_vector is not None:\n            count += 1\n            embedding_word2vec_matrix[i] = embedding_vector\n        else:\n            unk_vec = np.random.random(victor_size) * 0.5\n            unk_vec = unk_vec - unk_vec.mean()\n            embedding_word2vec_matrix[i] = unk_vec\n\n    embedding_w2c_all = np.zeros((nb_words + 1, victor_size))  \n    for word, i in word_index.items():\n        embedding_vector = dic_w2c_all[word] \n        embedding_w2c_all[i] = embedding_vector\n                    \n\n    #embedding_matrix = np.concatenate((embedding_word2vec_matrix,embedding_w2c_all),axis=1)\n    embedding_matrix = embedding_word2vec_matrix\n    \n    return train_, test_, word_index, embedding_matrix\n\n\n# In[15]:\n\n\ntrain_, test_,word2idx, word_embedding = w2v_pad(train,test,column_name, word_seq_len,victor_size, num_words)\n\n\n# In[21]:\n\n\nmy_opt=\"bi_gru_model\"\n#参数\nY = train['class'].values\n\nif not os.path.exists(\"cache/\"+my_opt):\n    os.mkdir(\"cache/\"+my_opt)\n\n\n# In[22]:\n\n\nfrom sklearn.model_selection import KFold, StratifiedKFold\ngc.collect()\nseed = 2006\nnum_folds = 10\nkf = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=seed).split(train_, Y)\n\n\n# In[23]:\n\n\nepochs = 4\nmy_opt=eval(my_opt)\ntrain_model_pred = np.zeros((train_.shape[0], classification))\ntest_model_pred = np.zeros((test_.shape[0], classification))\nfor i, (train_fold, val_fold) in enumerate(kf):\n    X_train, X_valid, = train_[train_fold, :], train_[val_fold, :]\n    y_train, y_valid = Y[train_fold], Y[val_fold]\n\n    y_tra = to_categorical(y_train)\n    y_val = to_categorical(y_valid)\n    \n    #模型\n    name = str(my_opt.__name__)    \n\n    model = my_opt(word_seq_len, word_embedding, classification)    \n    \n    \n    RocAuc = RocAucEvaluation(validation_data=(X_valid, y_val), interval=1)\n\n    hist = model.fit(X_train, y_tra, batch_size=batch_size, epochs=epochs, validation_data=(X_valid, y_val),\n                     callbacks=[RocAuc])   \n    \n    \n    train_model_pred[val_fold, :] =  model.predict(X_valid)\n\n    del model\n    del hist\n    gc.collect()\n\n\n# In[27]:\n\n\n#模型\n#用全部的数据预测\ntrain_label = to_categorical(Y)\nname = str(my_opt.__name__)    \n\nmodel = my_opt(word_seq_len, word_embedding, classification)    \n\n\nRocAuc = RocAucEvaluation(validation_data=(train_, train_label), interval=1)\n\nhist = model.fit(train_, train_label, batch_size=batch_size, epochs=epochs, validation_data=(train_, train_label),\n                 callbacks=[RocAuc])   \n\n\ntest_model_pred =  model.predict(test_)\n\n\n# In[28]:\n\n\ndf_train_pred = pd.DataFrame(train_model_pred)\ndf_test_pred = pd.DataFrame(test_model_pred)\ndf_train_pred.columns = ['device_all_GRU_pred_' + str(i) for i in range(22)]\ndf_test_pred.columns = ['device_all_GRU_pred_' + str(i) for i in range(22)]\n\n\n# In[29]:\n\n\ndf_train_pred = pd.concat([train[['device_id']], df_train_pred], axis=1)\ndf_test_pred = pd.concat([test[['device_id']], df_test_pred], axis=1)\n\n\n# In[30]:\n\n\ndf_results = pd.concat([df_train_pred, df_test_pred])\ndf_results.to_csv('device_all_GRU_pred.csv', index=None)\n\n"
  },
  {
    "path": "THLUO/16.device_start_capsule_pred.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\n# coding: utf-8\nimport feather\nimport os\nimport re\nimport sys  \nimport gc\nimport random\nimport pandas as pd\nimport numpy as np\nimport gensim\nfrom gensim.models import Word2Vec\nfrom gensim.models.word2vec import LineSentence\nfrom scipy import stats\nimport tensorflow as tf\nimport keras\nfrom keras.layers import *\nfrom keras.models import *\nfrom keras.optimizers import *\nfrom keras.callbacks import *\nfrom keras.preprocessing import text, sequence\nfrom keras.utils import to_categorical\nfrom keras.engine.topology import Layer\nfrom sklearn.preprocessing import LabelEncoder\nfrom keras.utils import np_utils\nfrom keras.utils.training_utils import multi_gpu_model\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import f1_score\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import  accuracy_score\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import f1_score\nimport warnings\nwarnings.filterwarnings('ignore')\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsession = tf.Session(config=config)\n\n\n# In[2]:\nprint ('16.device_start_capsule_pred.py')\n\ndf_doc = pd.read_csv('01.device_click_app_sorted_by_start.csv')\ndeviceid_test=pd.read_csv('input/deviceid_test.tsv',sep='\\t',names=['device_id'])\ndeviceid_train=pd.read_csv('input/deviceid_train.tsv',sep='\\t',names=['device_id','sex','age'])\ndf_total = pd.concat([deviceid_train, deviceid_test])\ndf_doc = df_doc.merge(df_total, on='device_id', how='left')\n\n\ndf_wv2_all = pd.read_csv('w2c_all_emb.csv')\n\ndic_w2c_all = {}\nfor row in df_wv2_all.values :\n    app_id = row[0]\n    vector = row[1:]\n    dic_w2c_all[app_id] = vector\n\n\n# In[3]:\n\n\ndf_doc['sex'] = df_doc['sex'].apply(lambda x:str(x))\ndf_doc['age'] = df_doc['age'].apply(lambda x:str(x))\ndef tool(x):\n    if x=='nan':\n        return x\n    else:\n        return str(int(float(x)))\ndf_doc['sex']=df_doc['sex'].apply(tool)\ndf_doc['age']=df_doc['age'].apply(tool)\ndf_doc['sex_age']=df_doc['sex']+'-'+df_doc['age']\ndf_doc = df_doc.replace({'nan':np.NaN,'nan-nan':np.NaN})\ntrain = df_doc[df_doc['sex_age'].notnull()]\ntest = df_doc[df_doc['sex_age'].isnull()]\ntrain.reset_index(drop=True, inplace=True)\ntest.reset_index(drop=True, inplace=True)\n\nlb = LabelEncoder()\ntrain_label = lb.fit_transform(train['sex_age'].values)\ntrain['class'] = train_label\n\n\n# In[5]:\n\n\ncolumn_name=\"app_list\"\nword_seq_len = 900\nvictor_size = 200\nnum_words = 35000\nbatch_size = 64\nclassification = 22\nkfold=10\n\n\n# In[6]:\n\n\nfrom sklearn.metrics import log_loss\n\ndef get_mut_label(y_label) :\n    results = []\n    for ele in y_label :\n        results.append(ele.argmax())\n    return  results  \n\nclass RocAucEvaluation(Callback):\n    def __init__(self, validation_data=(), interval=1):\n        super(Callback, self).__init__()\n\n        self.interval = interval\n        self.X_val, self.y_val = validation_data\n\n    def on_epoch_end(self, epoch, logs={}):\n        if epoch % self.interval == 0:\n            y_pred = self.model.predict(self.X_val, verbose=0)\n            val_y = get_mut_label(self.y_val)\n            score = log_loss(val_y, y_pred)\n            print(\"\\n mlogloss - epoch: %d - score: %.6f \\n\" % (epoch+1, score))\n\n\n# In[7]:\n\n\n#词向量\ndef w2v_pad(df_train,df_test,col, maxlen_,victor_size, num_words):\n\n    tokenizer = text.Tokenizer(num_words=num_words, lower=False,filters=\"\")\n    tokenizer.fit_on_texts(list(df_train[col].values)+list(df_test[col].values))\n\n    train_ = sequence.pad_sequences(tokenizer.texts_to_sequences(df_train[col].values), maxlen=maxlen_)\n    test_ = sequence.pad_sequences(tokenizer.texts_to_sequences(df_test[col].values), maxlen=maxlen_)\n    \n    word_index = tokenizer.word_index\n    \n    count = 0\n    nb_words = len(word_index)\n    print(nb_words)\n    all_data=pd.concat([df_train[col],df_test[col]])\n    file_name = 'embedding/' + 'Word2Vec_start_' + col  +\"_\"+ str(victor_size) + '.model'\n    if not os.path.exists(file_name):\n        model = Word2Vec([[word for word in document.split(' ')] for document in all_data.values],\n                         size=victor_size, window=5, iter=10, workers=11, seed=2018, min_count=2)\n        model.save(file_name)\n    else:\n        model = Word2Vec.load(file_name)\n    print(\"add word2vec finished....\")    \n\n\n                 \n    embedding_word2vec_matrix = np.zeros((nb_words + 1, victor_size))\n    for word, i in word_index.items():\n        embedding_vector = model[word] if word in model else None\n        if embedding_vector is not None:\n            count += 1\n            embedding_word2vec_matrix[i] = embedding_vector\n        else:\n            unk_vec = np.random.random(victor_size) * 0.5\n            unk_vec = unk_vec - unk_vec.mean()\n            embedding_word2vec_matrix[i] = unk_vec\n\n    embedding_w2c_all = np.zeros((nb_words + 1, victor_size))  \n    for word, i in word_index.items():\n        embedding_vector = dic_w2c_all[word] \n        embedding_w2c_all[i] = embedding_vector\n                    \n\n    #embedding_matrix = np.concatenate((embedding_word2vec_matrix,embedding_w2c_all),axis=1)\n    embedding_matrix = embedding_word2vec_matrix\n    \n    return train_, test_, word_index, embedding_matrix\n\n\n# In[8]:\n\n\ntrain_, test_,word2idx, word_embedding = w2v_pad(train,test,column_name, word_seq_len,victor_size, num_words)\n\n\n# In[10]:\n\n\nfrom TextModel import *\n\n\n# In[18]:\n\n\nmy_opt=\"get_text_capsule\"\n#参数\nY = train['class'].values\n\nif not os.path.exists(\"cache/\"+my_opt):\n    os.mkdir(\"cache/\"+my_opt)    \n    \n\n\n# In[19]:\n\n\nfrom sklearn.model_selection import KFold, StratifiedKFold\ngc.collect()\nseed = 2006\nnum_folds = 5\nkf = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=seed).split(train_, Y)\n\n\n# In[20]:\n\n\nepochs = 10\nmy_opt=eval(my_opt)\ntrain_model_pred = np.zeros((train_.shape[0], classification))\ntest_model_pred = np.zeros((test_.shape[0], classification))\nfor i, (train_fold, val_fold) in enumerate(kf):\n    X_train, X_valid, = train_[train_fold, :], train_[val_fold, :]\n    y_train, y_valid = Y[train_fold], Y[val_fold]\n\n    y_tra = to_categorical(y_train)\n    y_val = to_categorical(y_valid)\n    \n    #模型\n    name = str(my_opt.__name__)    \n\n    model = my_opt(word_seq_len, word_embedding, classification)    \n    \n    \n    RocAuc = RocAucEvaluation(validation_data=(X_valid, y_val), interval=1)\n\n    hist = model.fit(X_train, y_tra, batch_size=batch_size, epochs=epochs, validation_data=(X_valid, y_val),\n                     callbacks=[RocAuc])   \n    \n    \n    train_model_pred[val_fold, :] =  model.predict(X_valid)\n\n\n# In[24]:\n\n\n#模型\n#用全部的数据预测\ntrain_label = to_categorical(Y)\nname = str(my_opt.__name__)    \n\nmodel = my_opt(word_seq_len, word_embedding, classification)    \n\n\nRocAuc = RocAucEvaluation(validation_data=(train_, train_label), interval=1)\n\nhist = model.fit(train_, train_label, batch_size=batch_size, epochs=epochs, validation_data=(train_, train_label),\n                 callbacks=[RocAuc])   \n\n\ntest_model_pred =  model.predict(test_)\n\n\n# In[25]:\n\n\ndf_train_pred = pd.DataFrame(train_model_pred)\ndf_test_pred = pd.DataFrame(test_model_pred)\ndf_train_pred.columns = ['device_start_capsule_pred_' + str(i) for i in range(22)]\ndf_test_pred.columns = ['device_start_capsule_pred_' + str(i) for i in range(22)]\n\n\n# In[26]:\n\n\ndf_train_pred = pd.concat([train[['device_id']], df_train_pred], axis=1)\ndf_test_pred = pd.concat([test[['device_id']], df_test_pred], axis=1)\n\n\n# In[27]:\n\n\ndf_results = pd.concat([df_train_pred, df_test_pred])\ndf_results.to_csv('device_start_capsule_pred.csv', index=None)\n\n"
  },
  {
    "path": "THLUO/17.device_start_textcnn_pred.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\n# coding: utf-8\nimport feather\nimport os\nimport re\nimport sys  \nimport gc\nimport random\nimport pandas as pd\nimport numpy as np\nimport gensim\nfrom gensim.models import Word2Vec\nfrom gensim.models.word2vec import LineSentence\nfrom scipy import stats\nimport tensorflow as tf\nimport keras\nfrom keras.layers import *\nfrom keras.models import *\nfrom keras.optimizers import *\nfrom keras.callbacks import *\nfrom keras.preprocessing import text, sequence\nfrom keras.utils import to_categorical\nfrom keras.engine.topology import Layer\nfrom sklearn.preprocessing import LabelEncoder\nfrom keras.utils import np_utils\nfrom keras.utils.training_utils import multi_gpu_model\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import f1_score\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import  accuracy_score\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import f1_score\nimport warnings\nwarnings.filterwarnings('ignore')\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsession = tf.Session(config=config)\n\n\n# In[2]:\nprint ('17.device_start_textcnn_pred.py')\n\ndf_doc = pd.read_csv('01.device_click_app_sorted_by_start.csv')\ndeviceid_test=pd.read_csv('input/deviceid_test.tsv',sep='\\t',names=['device_id'])\ndeviceid_train=pd.read_csv('input/deviceid_train.tsv',sep='\\t',names=['device_id','sex','age'])\ndf_total = pd.concat([deviceid_train, deviceid_test])\ndf_doc = df_doc.merge(df_total, on='device_id', how='left')\n\n\ndf_wv2_all = pd.read_csv('w2c_all_emb.csv')\n\ndic_w2c_all = {}\nfor row in df_wv2_all.values :\n    app_id = row[0]\n    vector = row[1:]\n    dic_w2c_all[app_id] = vector\n\n\n# In[3]:\n\n\ndf_doc['sex'] = df_doc['sex'].apply(lambda x:str(x))\ndf_doc['age'] = df_doc['age'].apply(lambda x:str(x))\ndef tool(x):\n    if x=='nan':\n        return x\n    else:\n        return str(int(float(x)))\ndf_doc['sex']=df_doc['sex'].apply(tool)\ndf_doc['age']=df_doc['age'].apply(tool)\ndf_doc['sex_age']=df_doc['sex']+'-'+df_doc['age']\ndf_doc = df_doc.replace({'nan':np.NaN,'nan-nan':np.NaN})\ntrain = df_doc[df_doc['sex_age'].notnull()]\ntest = df_doc[df_doc['sex_age'].isnull()]\ntrain.reset_index(drop=True, inplace=True)\ntest.reset_index(drop=True, inplace=True)\n\nlb = LabelEncoder()\ntrain_label = lb.fit_transform(train['sex_age'].values)\ntrain['class'] = train_label\n\n\n# In[5]:\n\n\ncolumn_name=\"app_list\"\nword_seq_len = 900\nvictor_size = 200\nnum_words = 35000\nbatch_size = 64\nclassification = 22\nkfold=10\n\n\n# In[6]:\n\n\nfrom sklearn.metrics import log_loss\n\ndef get_mut_label(y_label) :\n    results = []\n    for ele in y_label :\n        results.append(ele.argmax())\n    return  results  \n\nclass RocAucEvaluation(Callback):\n    def __init__(self, validation_data=(), interval=1):\n        super(Callback, self).__init__()\n\n        self.interval = interval\n        self.X_val, self.y_val = validation_data\n\n    def on_epoch_end(self, epoch, logs={}):\n        if epoch % self.interval == 0:\n            y_pred = self.model.predict(self.X_val, verbose=0)\n            val_y = get_mut_label(self.y_val)\n            score = log_loss(val_y, y_pred)\n            print(\"\\n mlogloss - epoch: %d - score: %.6f \\n\" % (epoch+1, score))\n\n\n# In[7]:\n\n\n#词向量\ndef w2v_pad(df_train,df_test,col, maxlen_,victor_size, num_words):\n\n    tokenizer = text.Tokenizer(num_words=num_words, lower=False,filters=\"\")\n    tokenizer.fit_on_texts(list(df_train[col].values)+list(df_test[col].values))\n\n    train_ = sequence.pad_sequences(tokenizer.texts_to_sequences(df_train[col].values), maxlen=maxlen_)\n    test_ = sequence.pad_sequences(tokenizer.texts_to_sequences(df_test[col].values), maxlen=maxlen_)\n    \n    word_index = tokenizer.word_index\n    \n    count = 0\n    nb_words = len(word_index)\n    print(nb_words)\n    all_data=pd.concat([df_train[col],df_test[col]])\n    file_name = 'embedding/' + 'Word2Vec_start_' + col  +\"_\"+ str(victor_size) + '.model'\n    if not os.path.exists(file_name):\n        model = Word2Vec([[word for word in document.split(' ')] for document in all_data.values],\n                         size=victor_size, window=5, iter=10, workers=11, seed=2018, min_count=2)\n        model.save(file_name)\n    else:\n        model = Word2Vec.load(file_name)\n    print(\"add word2vec finished....\")    \n\n\n                 \n    embedding_word2vec_matrix = np.zeros((nb_words + 1, victor_size))\n    for word, i in word_index.items():\n        embedding_vector = model[word] if word in model else None\n        if embedding_vector is not None:\n            count += 1\n            embedding_word2vec_matrix[i] = embedding_vector\n        else:\n            unk_vec = np.random.random(victor_size) * 0.5\n            unk_vec = unk_vec - unk_vec.mean()\n            embedding_word2vec_matrix[i] = unk_vec\n\n    embedding_w2c_all = np.zeros((nb_words + 1, victor_size))  \n    for word, i in word_index.items():\n        embedding_vector = dic_w2c_all[word] \n        embedding_w2c_all[i] = embedding_vector\n                    \n\n    #embedding_matrix = np.concatenate((embedding_word2vec_matrix,embedding_w2c_all),axis=1)\n    embedding_matrix = embedding_word2vec_matrix\n    \n    return train_, test_, word_index, embedding_matrix\n\n\n# In[8]:\n\n\ntrain_, test_,word2idx, word_embedding = w2v_pad(train,test,column_name, word_seq_len,victor_size, num_words)\n\n\n# In[10]:\n\n\nfrom TextModel import *\n\n\n# In[19]:\n\n\nmy_opt=\"get_text_cnn2\"\n#参数\nY = train['class'].values\n\nif not os.path.exists(\"cache/\"+my_opt):\n    os.mkdir(\"cache/\"+my_opt)    \n    \n\n\n# In[20]:\n\n\nfrom sklearn.model_selection import KFold, StratifiedKFold\ngc.collect()\nseed = 2006\nnum_folds = 5\nkf = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=seed).split(train_, Y)\n\n\n# In[21]:\n\n\nepochs = 6\nmy_opt=eval(my_opt)\ntrain_model_pred = np.zeros((train_.shape[0], classification))\ntest_model_pred = np.zeros((test_.shape[0], classification))\nfor i, (train_fold, val_fold) in enumerate(kf):\n    X_train, X_valid, = train_[train_fold, :], train_[val_fold, :]\n    y_train, y_valid = Y[train_fold], Y[val_fold]\n\n    y_tra = to_categorical(y_train)\n    y_val = to_categorical(y_valid)\n    \n    #模型\n    name = str(my_opt.__name__)    \n\n    model = my_opt(word_seq_len, word_embedding, classification)    \n    \n    \n    RocAuc = RocAucEvaluation(validation_data=(X_valid, y_val), interval=1)\n\n    hist = model.fit(X_train, y_tra, batch_size=batch_size, epochs=epochs, validation_data=(X_valid, y_val),\n                     callbacks=[RocAuc])   \n    \n    \n    train_model_pred[val_fold, :] =  model.predict(X_valid)\n\n\n# In[25]:\n\n\n#模型\n#用全部的数据预测\ntrain_label = to_categorical(Y)\nname = str(my_opt.__name__)    \n\nmodel = my_opt(word_seq_len, word_embedding, classification)    \n\n\nRocAuc = RocAucEvaluation(validation_data=(train_, train_label), interval=1)\n\nhist = model.fit(train_, train_label, batch_size=batch_size, epochs=epochs, validation_data=(train_, train_label),\n                 callbacks=[RocAuc])   \n\n\ntest_model_pred =  model.predict(test_)\n\n\n# In[26]:\n\n\ndf_train_pred = pd.DataFrame(train_model_pred)\ndf_test_pred = pd.DataFrame(test_model_pred)\ndf_train_pred.columns = ['device_start_textcnn_pred_' + str(i) for i in range(22)]\ndf_test_pred.columns = ['device_start_textcnn_pred_' + str(i) for i in range(22)]\n\n\n# In[27]:\n\n\ndf_train_pred = pd.concat([train[['device_id']], df_train_pred], axis=1)\ndf_test_pred = pd.concat([test[['device_id']], df_test_pred], axis=1)\n\n\n# In[28]:\n\n\ndf_results = pd.concat([df_train_pred, df_test_pred])\ndf_results.to_csv('device_start_textcnn_pred.csv', index=None)\n\n"
  },
  {
    "path": "THLUO/18.device_start_text_dpcnn_pred.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\n# coding: utf-8\nimport feather\nimport os\nimport re\nimport sys  \nimport gc\nimport random\nimport pandas as pd\nimport numpy as np\nimport gensim\nfrom gensim.models import Word2Vec\nfrom gensim.models.word2vec import LineSentence\nfrom scipy import stats\nimport tensorflow as tf\nimport keras\nfrom keras.layers import *\nfrom keras.models import *\nfrom keras.optimizers import *\nfrom keras.callbacks import *\nfrom keras.preprocessing import text, sequence\nfrom keras.utils import to_categorical\nfrom keras.engine.topology import Layer\nfrom sklearn.preprocessing import LabelEncoder\nfrom keras.utils import np_utils\nfrom keras.utils.training_utils import multi_gpu_model\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import f1_score\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import  accuracy_score\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import f1_score\nimport warnings\nwarnings.filterwarnings('ignore')\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsession = tf.Session(config=config)\n\n\n# In[2]:\nprint ('18.device_start_text_dpcnn_pred.py')\n\ndf_doc = pd.read_csv('01.device_click_app_sorted_by_start.csv')\ndeviceid_test=pd.read_csv('input/deviceid_test.tsv',sep='\\t',names=['device_id'])\ndeviceid_train=pd.read_csv('input/deviceid_train.tsv',sep='\\t',names=['device_id','sex','age'])\ndf_total = pd.concat([deviceid_train, deviceid_test])\ndf_doc = df_doc.merge(df_total, on='device_id', how='left')\n\n\ndf_wv2_all = pd.read_csv('w2c_all_emb.csv')\n\ndic_w2c_all = {}\nfor row in df_wv2_all.values :\n    app_id = row[0]\n    vector = row[1:]\n    dic_w2c_all[app_id] = vector\n\n\n# In[3]:\n\n\ndf_doc['sex'] = df_doc['sex'].apply(lambda x:str(x))\ndf_doc['age'] = df_doc['age'].apply(lambda x:str(x))\ndef tool(x):\n    if x=='nan':\n        return x\n    else:\n        return str(int(float(x)))\ndf_doc['sex']=df_doc['sex'].apply(tool)\ndf_doc['age']=df_doc['age'].apply(tool)\ndf_doc['sex_age']=df_doc['sex']+'-'+df_doc['age']\ndf_doc = df_doc.replace({'nan':np.NaN,'nan-nan':np.NaN})\ntrain = df_doc[df_doc['sex_age'].notnull()]\ntest = df_doc[df_doc['sex_age'].isnull()]\ntrain.reset_index(drop=True, inplace=True)\ntest.reset_index(drop=True, inplace=True)\n\nlb = LabelEncoder()\ntrain_label = lb.fit_transform(train['sex_age'].values)\ntrain['class'] = train_label\n\n\n# In[5]:\n\n\ncolumn_name=\"app_list\"\nword_seq_len = 900\nvictor_size = 200\nnum_words = 35000\nbatch_size = 64\nclassification = 22\nkfold=10\n\n\n# In[6]:\n\n\nfrom sklearn.metrics import log_loss\n\ndef get_mut_label(y_label) :\n    results = []\n    for ele in y_label :\n        results.append(ele.argmax())\n    return  results  \n\nclass RocAucEvaluation(Callback):\n    def __init__(self, validation_data=(), interval=1):\n        super(Callback, self).__init__()\n\n        self.interval = interval\n        self.X_val, self.y_val = validation_data\n\n    def on_epoch_end(self, epoch, logs={}):\n        if epoch % self.interval == 0:\n            y_pred = self.model.predict(self.X_val, verbose=0)\n            val_y = get_mut_label(self.y_val)\n            score = log_loss(val_y, y_pred)\n            print(\"\\n mlogloss - epoch: %d - score: %.6f \\n\" % (epoch+1, score))\n\n\n# In[7]:\n\n\n#词向量\ndef w2v_pad(df_train,df_test,col, maxlen_,victor_size, num_words):\n\n    tokenizer = text.Tokenizer(num_words=num_words, lower=False,filters=\"\")\n    tokenizer.fit_on_texts(list(df_train[col].values)+list(df_test[col].values))\n\n    train_ = sequence.pad_sequences(tokenizer.texts_to_sequences(df_train[col].values), maxlen=maxlen_)\n    test_ = sequence.pad_sequences(tokenizer.texts_to_sequences(df_test[col].values), maxlen=maxlen_)\n    \n    word_index = tokenizer.word_index\n    \n    count = 0\n    nb_words = len(word_index)\n    print(nb_words)\n    all_data=pd.concat([df_train[col],df_test[col]])\n    file_name = 'embedding/' + 'Word2Vec_start_' + col  +\"_\"+ str(victor_size) + '.model'\n    if not os.path.exists(file_name):\n        model = Word2Vec([[word for word in document.split(' ')] for document in all_data.values],\n                         size=victor_size, window=5, iter=10, workers=11, seed=2018, min_count=2)\n        model.save(file_name)\n    else:\n        model = Word2Vec.load(file_name)\n    print(\"add word2vec finished....\")    \n\n\n                 \n    embedding_word2vec_matrix = np.zeros((nb_words + 1, victor_size))\n    for word, i in word_index.items():\n        embedding_vector = model[word] if word in model else None\n        if embedding_vector is not None:\n            count += 1\n            embedding_word2vec_matrix[i] = embedding_vector\n        else:\n            unk_vec = np.random.random(victor_size) * 0.5\n            unk_vec = unk_vec - unk_vec.mean()\n            embedding_word2vec_matrix[i] = unk_vec\n\n    embedding_w2c_all = np.zeros((nb_words + 1, victor_size))  \n    for word, i in word_index.items():\n        embedding_vector = dic_w2c_all[word] \n        embedding_w2c_all[i] = embedding_vector\n                    \n\n    #embedding_matrix = np.concatenate((embedding_word2vec_matrix,embedding_w2c_all),axis=1)\n    embedding_matrix = embedding_word2vec_matrix\n    \n    return train_, test_, word_index, embedding_matrix\n\n\n# In[8]:\n\n\ntrain_, test_,word2idx, word_embedding = w2v_pad(train,test,column_name, word_seq_len,victor_size, num_words)\n\n\n# In[10]:\n\n\nfrom TextModel import *\n\n\n# In[12]:\n\n\nmy_opt=\"get_text_dpcnn\"\n#参数\nY = train['class'].values\n\nif not os.path.exists(\"cache/\"+my_opt):\n    os.mkdir(\"cache/\"+my_opt)    \n    \n\n\n# In[13]:\n\n\nfrom sklearn.model_selection import KFold, StratifiedKFold\ngc.collect()\nseed = 2006\nnum_folds = 5\nkf = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=seed).split(train_, Y)\n\n\n# In[14]:\n\n\nfrom keras import backend as K\n\nepochs = 6\nmy_opt=eval(my_opt)\ntrain_model_pred = np.zeros((train_.shape[0], classification))\ntest_model_pred = np.zeros((test_.shape[0], classification))\nfor i, (train_fold, val_fold) in enumerate(kf):\n    X_train, X_valid, = train_[train_fold, :], train_[val_fold, :]\n    y_train, y_valid = Y[train_fold], Y[val_fold]\n\n    y_tra = to_categorical(y_train)\n    y_val = to_categorical(y_valid)\n    \n    #模型\n    name = str(my_opt.__name__)    \n\n    model = my_opt(word_seq_len, word_embedding, classification)    \n    \n    \n    RocAuc = RocAucEvaluation(validation_data=(X_valid, y_val), interval=1)\n\n    hist = model.fit(X_train, y_tra, batch_size=batch_size, epochs=epochs, validation_data=(X_valid, y_val),\n                     callbacks=[RocAuc])   \n    \n    \n    train_model_pred[val_fold, :] =  model.predict(X_valid)\n\n    \n    del model\n    del hist\n    gc.collect()\n    K.clear_session()\n    tf.reset_default_graph()      \n    \n\n\n# In[15]:\n\n\n#模型\n#用全部的数据预测\ntrain_label = to_categorical(Y)\nname = str(my_opt.__name__)    \n\nmodel = my_opt(word_seq_len, word_embedding, classification)    \n\n\nRocAuc = RocAucEvaluation(validation_data=(train_, train_label), interval=1)\n\nhist = model.fit(train_, train_label, batch_size=batch_size, epochs=epochs, validation_data=(train_, train_label),\n                 callbacks=[RocAuc])   \n\n\ntest_model_pred =  model.predict(test_)\n\n\n# In[16]:\n\n\ndf_train_pred = pd.DataFrame(train_model_pred)\ndf_test_pred = pd.DataFrame(test_model_pred)\ndf_train_pred.columns = ['device_start_text_dpcnn_pred_' + str(i) for i in range(22)]\ndf_test_pred.columns = ['device_start_text_dpcnn_pred_' + str(i) for i in range(22)]\n\n\n# In[17]:\n\n\ndf_train_pred = pd.concat([train[['device_id']], df_train_pred], axis=1)\ndf_test_pred = pd.concat([test[['device_id']], df_test_pred], axis=1)\n\n\n# In[18]:\n\n\ndf_results = pd.concat([df_train_pred, df_test_pred])\ndf_results.to_csv('device_start_text_dpcnn_pred.csv', index=None)\n\n"
  },
  {
    "path": "THLUO/19.device_start_lstm_pred.py",
    "content": "import feather\nimport os\nimport re\nimport sys  \nimport gc\nimport random\nimport pandas as pd\nimport numpy as np\nimport gensim\nfrom gensim.models import Word2Vec\nfrom gensim.models.word2vec import LineSentence\nfrom scipy import stats\nimport tensorflow as tf\nimport keras\nfrom keras.layers import *\nfrom keras.models import *\nfrom keras.optimizers import *\nfrom keras.callbacks import *\nfrom keras.preprocessing import text, sequence\nfrom keras.utils import to_categorical\nfrom keras.engine.topology import Layer\nfrom sklearn.preprocessing import LabelEncoder\nfrom keras.utils import np_utils\nfrom keras.utils.training_utils import multi_gpu_model\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import f1_score\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import  accuracy_score\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import f1_score\nimport warnings\nwarnings.filterwarnings('ignore')\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsession = tf.Session(config=config)\n\nprint ('19.lstm...........py')\n# In[2]:\n\n\ndf_doc = pd.read_csv('01.device_click_app_sorted_by_start.csv')\ndeviceid_test=pd.read_csv('input/deviceid_test.tsv',sep='\\t',names=['device_id'])\ndeviceid_train=pd.read_csv('input/deviceid_train.tsv',sep='\\t',names=['device_id','sex','age'])\ndf_total = pd.concat([deviceid_train, deviceid_test])\ndf_doc = df_doc.merge(df_total, on='device_id', how='left')\n\ndf_wv2_all = pd.read_csv('w2c_all_emb.csv')\n\ndic_w2c_all = {}\nfor row in df_wv2_all.values :\n    app_id = row[0]\n    vector = row[1:]\n    dic_w2c_all[app_id] = vector\n\n\n# In[3]:\n\n\ndf_doc['sex'] = df_doc['sex'].apply(lambda x:str(x))\ndf_doc['age'] = df_doc['age'].apply(lambda x:str(x))\ndef tool(x):\n    if x=='nan':\n        return x\n    else:\n        return str(int(float(x)))\ndf_doc['sex']=df_doc['sex'].apply(tool)\ndf_doc['age']=df_doc['age'].apply(tool)\ndf_doc['sex_age']=df_doc['sex']+'-'+df_doc['age']\ndf_doc = df_doc.replace({'nan':np.NaN,'nan-nan':np.NaN})\ntrain = df_doc[df_doc['sex_age'].notnull()]\ntest = df_doc[df_doc['sex_age'].isnull()]\ntrain.reset_index(drop=True, inplace=True)\ntest.reset_index(drop=True, inplace=True)\n\nlb = LabelEncoder()\ntrain_label = lb.fit_transform(train['sex_age'].values)\ntrain['class'] = train_label\n\n\n# In[5]:\n\n\ncolumn_name=\"app_list\"\nword_seq_len = 900\nvictor_size = 200\nnum_words = 35000\nbatch_size = 64\nclassification = 22\nkfold=10\n\n\n# In[6]:\n\n\nfrom sklearn.metrics import log_loss\n\ndef get_mut_label(y_label) :\n    results = []\n    for ele in y_label :\n        results.append(ele.argmax())\n    return  results  \n\nclass RocAucEvaluation(Callback):\n    def __init__(self, validation_data=(), interval=1):\n        super(Callback, self).__init__()\n\n        self.interval = interval\n        self.X_val, self.y_val = validation_data\n\n    def on_epoch_end(self, epoch, logs={}):\n        if epoch % self.interval == 0:\n            y_pred = self.model.predict(self.X_val, verbose=0)\n            val_y = get_mut_label(self.y_val)\n            score = log_loss(val_y, y_pred)\n            print(\"\\n mlogloss - epoch: %d - score: %.6f \\n\" % (epoch+1, score))\n\n\n# In[7]:\n\n\n#词向量\ndef w2v_pad(df_train,df_test,col, maxlen_,victor_size, num_words):\n\n    tokenizer = text.Tokenizer(num_words=num_words, lower=False,filters=\"\")\n    tokenizer.fit_on_texts(list(df_train[col].values)+list(df_test[col].values))\n\n    train_ = sequence.pad_sequences(tokenizer.texts_to_sequences(df_train[col].values), maxlen=maxlen_)\n    test_ = sequence.pad_sequences(tokenizer.texts_to_sequences(df_test[col].values), maxlen=maxlen_)\n    \n    word_index = tokenizer.word_index\n    \n    count = 0\n    nb_words = len(word_index)\n    print(nb_words)\n    all_data=pd.concat([df_train[col],df_test[col]])\n    file_name = 'embedding/' + 'Word2Vec_start_' + col  +\"_\"+ str(victor_size) + '.model'\n    if not os.path.exists(file_name):\n        model = Word2Vec([[word for word in document.split(' ')] for document in all_data.values],\n                         size=victor_size, window=5, iter=10, workers=11, seed=2018, min_count=2)\n        model.save(file_name)\n    else:\n        model = Word2Vec.load(file_name)\n    print(\"add word2vec finished....\")    \n\n\n                 \n    embedding_word2vec_matrix = np.zeros((nb_words + 1, victor_size))\n    for word, i in word_index.items():\n        embedding_vector = model[word] if word in model else None\n        if embedding_vector is not None:\n            count += 1\n            embedding_word2vec_matrix[i] = embedding_vector\n        else:\n            unk_vec = np.random.random(victor_size) * 0.5\n            unk_vec = unk_vec - unk_vec.mean()\n            embedding_word2vec_matrix[i] = unk_vec\n\n    embedding_w2c_all = np.zeros((nb_words + 1, victor_size))  \n    for word, i in word_index.items():\n        embedding_vector = dic_w2c_all[word] \n        embedding_w2c_all[i] = embedding_vector\n                    \n\n    #embedding_matrix = np.concatenate((embedding_word2vec_matrix,embedding_w2c_all),axis=1)\n    embedding_matrix = embedding_word2vec_matrix\n    \n    return train_, test_, word_index, embedding_matrix\n\n\n# In[8]:\n\n\ntrain_, test_,word2idx, word_embedding = w2v_pad(train,test,column_name, word_seq_len,victor_size, num_words)\n\n\n# In[10]:\n\n\nfrom TextModel import *\n\n\n# In[13]:\n\n\nmy_opt=\"get_text_lstm1\"\n#参数\nY = train['class'].values\n\nif not os.path.exists(\"cache/\"+my_opt):\n    os.mkdir(\"cache/\"+my_opt)    \n    \n\n\n# In[14]:\n\n\nfrom sklearn.model_selection import KFold, StratifiedKFold\ngc.collect()\nseed = 2006\nnum_folds = 5\nkf = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=seed).split(train_, Y)\n\n\n# In[15]:\n\n\nfrom keras import backend as K\n\nepochs = 6\nmy_opt=eval(my_opt)\ntrain_model_pred = np.zeros((train_.shape[0], classification))\ntest_model_pred = np.zeros((test_.shape[0], classification))\nfor i, (train_fold, val_fold) in enumerate(kf):\n    X_train, X_valid, = train_[train_fold, :], train_[val_fold, :]\n    y_train, y_valid = Y[train_fold], Y[val_fold]\n\n    y_tra = to_categorical(y_train)\n    y_val = to_categorical(y_valid)\n    \n    #模型\n    name = str(my_opt.__name__)    \n\n    model = my_opt(word_seq_len, word_embedding, classification)    \n    \n    \n    RocAuc = RocAucEvaluation(validation_data=(X_valid, y_val), interval=1)\n\n    hist = model.fit(X_train, y_tra, batch_size=batch_size, epochs=epochs, validation_data=(X_valid, y_val),\n                     callbacks=[RocAuc])   \n    \n    \n    train_model_pred[val_fold, :] =  model.predict(X_valid)\n\n    \n    del model\n    del hist\n    gc.collect()\n    K.clear_session()\n    tf.reset_default_graph()      \n    \n\n\n# In[19]:\n\n\n#模型\n#用全部的数据预测\ntrain_label = to_categorical(Y)\nname = str(my_opt.__name__)    \n\nmodel = my_opt(word_seq_len, word_embedding, classification)    \n\n\nRocAuc = RocAucEvaluation(validation_data=(train_, train_label), interval=1)\n\nhist = model.fit(train_, train_label, batch_size=batch_size, epochs=epochs, validation_data=(train_, train_label),\n                 callbacks=[RocAuc])   \n\n\ntest_model_pred =  model.predict(test_)\n\n\n# In[20]:\n\n\ndf_train_pred = pd.DataFrame(train_model_pred)\ndf_test_pred = pd.DataFrame(test_model_pred)\ndf_train_pred.columns = ['device_start_lstm_pred_' + str(i) for i in range(22)]\ndf_test_pred.columns = ['device_start_lstm_pred_' + str(i) for i in range(22)]\n\n\n# In[21]:\n\n\ndf_train_pred = pd.concat([train[['device_id']], df_train_pred], axis=1)\ndf_test_pred = pd.concat([test[['device_id']], df_test_pred], axis=1)\n\n\n# In[22]:\n\n\ndf_results = pd.concat([df_train_pred, df_test_pred])\ndf_results.to_csv('device_start_lstm_pred.csv', index=None)\n\n"
  },
  {
    "path": "THLUO/2.w2c_model_close.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nfrom datetime import datetime,timedelta  \nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.preprocessing import LabelEncoder\nimport gc\n\n\n\n\n# In[2]:\nprint ('2.w2c_model_close.py')\n\npath='input/'\ndata=pd.DataFrame()\n#sex_age=pd.read_excel('./data/性别年龄对照表.xlsx')\n\n\n# In[3]:\n\n\ndeviceid_packages=pd.read_csv(path+'deviceid_packages.tsv',sep='\\t',names=['device_id','apps'])\ndeviceid_test=pd.read_csv(path+'deviceid_test.tsv',sep='\\t',names=['device_id'])\ndeviceid_train=pd.read_csv(path+'deviceid_train.tsv',sep='\\t',names=['device_id','sex','age'])\ndeviceid_brand = pd.read_csv(path+'deviceid_brand.tsv',sep='\\t', names=['device_id','device_brand', 'device_type'])\ndeviceid_package_start_close = pd.read_csv(path+'deviceid_package_start_close.tsv',sep='\\t', names=['device_id','app_id','start_time','close_time'])\npackage_label = pd.read_csv(path+'package_label.tsv',sep='\\t',names=['app_id','app_parent_type', 'app_child_type'])\n\n\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : str(x).split(' ')[0])\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 1].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other' if x in one_time_brand else x)\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 2].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other_2' if x in one_time_brand else x)\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 3].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other_3' if x in one_time_brand else x)\n\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_brand.values))\ndeviceid_brand['device_brand'] = lbl.transform(list(deviceid_brand.device_brand.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_type.values))\ndeviceid_brand['device_type'] = lbl.transform(list(deviceid_brand.device_type.values))\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_parent_type.values))\npackage_label['app_parent_type'] = lbl.transform(list(package_label.app_parent_type.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_child_type.values))\npackage_label['app_child_type'] = lbl.transform(list(package_label.app_child_type.values))\n\n\n# In[4]:\n\n\ndf_sorted = deviceid_package_start_close.sort_values(by='close_time')\n\n\n# In[6]:\n\n\ndf_results = df_sorted.groupby('device_id')['app_id'].apply(lambda x:' '.join(x)).reset_index().rename(columns = {'app_id' : 'app_list'})\n\n\n# In[7]:\n\n\ndf_results.to_csv('02.device_click_app_sorted_by_close.csv', index=None)\n\n\n# In[6]:\n\n\ndf_device_start_app_list = df_sorted.groupby('device_id').apply(lambda x : list(x.app_id)).reset_index().rename(columns = {0 : 'app_list'})\n\n\n# In[7]:\n\n\napp_list = list(df_device_start_app_list.app_list.values)\n\n\n# In[8]:\n\n\nfrom gensim.test.utils import common_texts, get_tmpfile\nfrom gensim.models import Word2Vec\n\n\n# In[9]:\n\n\nmodel = Word2Vec(app_list, size=10, window=10, min_count=2, workers=4)\nmodel.save(\"word2vec.model\")\n\n\n# In[11]:\n\n\nvocab = list(model.wv.vocab.keys())\n\nw2c_arr = []\n\nfor v in vocab :\n    w2c_arr.append(list(model.wv[v]))\n\n\n# In[12]:\n\n\ndf_w2c_start = pd.DataFrame()\ndf_w2c_start['app_id'] = vocab\ndf_w2c_start = pd.concat([df_w2c_start, pd.DataFrame(w2c_arr)], axis=1)\ndf_w2c_start.columns = ['app_id'] + ['w2c_close_app_' + str(i) for i in range(10)]\n\n\n# In[ ]:\n\n\nw2c_nums = 10\nagg = {}\nfor l in ['w2c_close_app_' + str(i) for i in range(w2c_nums)] :\n    agg[l] = ['mean', 'std', 'max', 'min']\n\n\n# In[14]:\n\n\ndeviceid_package_start_close = deviceid_package_start_close.merge(df_w2c_start, on='app_id', how='left')\n\n\n# In[ ]:\n\n\ndf_agg = deviceid_package_start_close.groupby('device_id').agg(agg)\ndf_agg.columns = pd.Index(['device_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\ndf_agg = df_agg.reset_index()\ndf_agg.to_csv('device_close_app_w2c.csv', index=None)\n\n\n# In[14]:\n\n\ndf_results = deviceid_package_start_close.groupby(['device_id', 'app_id'])['start_time'].mean().reset_index()\ndf_results = df_results.merge(df_w2c_start, on='app_id', how='left')\n\n\n# In[17]:\n\n\ndf_agg = df_results.groupby('device_id').agg(agg)\ndf_agg.columns = pd.Index(['device_app_unique_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\ndf_agg = df_agg.reset_index()\n\n\n# In[18]:\n\n\ndf_agg.to_csv('device_app_unique_close_app_w2c.csv', index=None)\n\n"
  },
  {
    "path": "THLUO/20.lgb_sex_age_prob_oof.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nfrom datetime import datetime,timedelta  \nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.preprocessing import LabelEncoder\nimport gc\n\n\n\n\n# In[2]:\nprint ('20.lgb_sex_age_prob_oof.py')\n\npath='input/'\ndata=pd.DataFrame()\n#sex_age=pd.read_excel('./data/性别年龄对照表.xlsx')\n\n\n# In[3]:\n\n\ndeviceid_packages=pd.read_csv(path+'deviceid_packages.tsv',sep='\\t',names=['device_id','apps'])\ndeviceid_test=pd.read_csv(path+'deviceid_test.tsv',sep='\\t',names=['device_id'])\ndeviceid_train=pd.read_csv(path+'deviceid_train.tsv',sep='\\t',names=['device_id','sex','age'])\ndeviceid_brand = pd.read_csv(path+'deviceid_brand.tsv',sep='\\t', names=['device_id','device_brand', 'device_type'])\ndeviceid_package_start_close = pd.read_csv(path+'deviceid_package_start_close.tsv',sep='\\t', names=['device_id','app_id','start_time','close_time'])\npackage_label = pd.read_csv(path+'package_label.tsv',sep='\\t',names=['app_id','app_parent_type', 'app_child_type'])\n\n\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : str(x).split(' ')[0])\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 1].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other' if x in one_time_brand else x)\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 2].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other_2' if x in one_time_brand else x)\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 3].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other_3' if x in one_time_brand else x)\n\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_brand.values))\ndeviceid_brand['device_brand'] = lbl.transform(list(deviceid_brand.device_brand.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_type.values))\ndeviceid_brand['device_type'] = lbl.transform(list(deviceid_brand.device_type.values))\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_parent_type.values))\npackage_label['app_parent_type'] = lbl.transform(list(package_label.app_parent_type.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_child_type.values))\npackage_label['app_child_type'] = lbl.transform(list(package_label.app_child_type.values))\n\n\n# In[4]:\n\n\nimport time\n\n# 输入毫秒级的时间，转出正常格式的时间\ndef timeStamp(timeNum):\n    timeStamp = float(timeNum/1000)\n    timeArray = time.localtime(timeStamp)\n    otherStyleTime = time.strftime(\"%Y-%m-%d %H:%M:%S\", timeArray)\n    return otherStyleTime\n\n#解析出具体的时间\ndeviceid_package_start_close['start_date'] = pd.to_datetime(deviceid_package_start_close.start_time.apply(timeStamp))\ndeviceid_package_start_close['end_date'] = pd.to_datetime(deviceid_package_start_close.close_time.apply(timeStamp))\ndeviceid_package_start_close['start_hour'] = deviceid_package_start_close.start_date.dt.hour\ndeviceid_package_start_close['end_hour'] = deviceid_package_start_close.end_date.dt.hour\ndeviceid_package_start_close['time_gap'] = (deviceid_package_start_close['end_date'] - deviceid_package_start_close['start_date']).astype('timedelta64[s]')\n\ndeviceid_package_start_close = deviceid_package_start_close.merge(package_label, on='app_id', how='left')\ndeviceid_package_start_close.app_parent_type.fillna(-1, inplace=True)\ndeviceid_package_start_close.app_child_type.fillna(-1, inplace=True)\ndeviceid_package_start_close['start_year'] = deviceid_package_start_close.start_date.dt.year\ndeviceid_package_start_close['end_year'] = deviceid_package_start_close.end_date.dt.year\ndeviceid_package_start_close['year_gap'] = deviceid_package_start_close['end_year'] - deviceid_package_start_close['start_year']\n\n\n# In[5]:\n\n\ndeviceid_train=pd.concat([deviceid_train,deviceid_test])\n\n\n# In[6]:\n\n\ndeviceid_packages['apps']=deviceid_packages['apps'].apply(lambda x:x.split(','))\ndeviceid_packages['app_lenghth']=deviceid_packages['apps'].apply(lambda x:len(x))\n\n\n#特征工程\ndef open_app_timegap_in_hour() :\n    df_temp = deviceid_package_start_close.groupby(['device_id', 'start_hour'])['time_gap'].mean().reset_index().rename(columns = {'time_gap': 'mean_time_gap'})\n    df_mean_temp = pd.pivot_table(df_temp, index='device_id', columns='start_hour', values='mean_time_gap').reset_index()\n    df_mean_temp.columns = ['device_id'] + ['open_app_timegap_in_'+str(i) + '_mean_hour' for i in range(0,24)]\n    df_mean_temp.fillna(0, inplace=True)\n\n\n    \n    return df_mean_temp\n\n\n# In[8]:\n\n\ndef device_start_end_app_timegap() :\n    #用户打开，关闭app的时间间隔\n    df_ = deviceid_package_start_close.sort_values(by=['device_id', 'start_date'], ascending=False)\n    df_['prev_start_date'] = df_.groupby('device_id')['start_date'].shift(-1)\n    df_['start_date_gap'] = (df_['start_date'] - df_['prev_start_date']).astype('timedelta64[s]')\n    agg_dic = {'start_date_gap' : ['min', 'max', 'mean', 'median', 'std']}\n    df_start_gap_agg = df_.groupby('device_id').agg(agg_dic)\n    df_start_gap_agg.columns = pd.Index(['device_' + e[0] + \"_\" + e[1].upper() for e in df_start_gap_agg.columns.tolist()])\n    df_start_gap_agg = df_start_gap_agg.reset_index()\n    #del df_\n    gc.collect()\n    #关闭时间间隔\n    df_ = deviceid_package_start_close.sort_values(by=['device_id', 'end_date'], ascending=False)\n    df_['prev_end_date'] = df_.groupby('device_id')['end_date'].shift(-1)\n    df_['end_date_gap'] = (df_['end_date'] - df_['prev_end_date']).astype('timedelta64[s]')\n    agg_dic = {'end_date_gap' : ['min', 'max', 'mean', 'median', 'std']}\n    df_end_gap_agg = df_.groupby('device_id').agg(agg_dic)\n    df_end_gap_agg.columns = pd.Index(['device_' + e[0] + \"_\" + e[1].upper() for e in df_end_gap_agg.columns.tolist()])\n    df_end_gap_agg = df_end_gap_agg.reset_index()\n    #del df_\n    gc.collect()\n\n\n\n    df_agg = df_start_gap_agg.merge(df_end_gap_agg, on='device_id', how='left')\n    #df_agg = df_agg.merge(df_app_start_gap_agg, on='device_id', how='left')\n    #df_agg = df_agg.merge(df_app_end_gap_agg, on='device_id', how='left')\n    return df_agg\n\ndef open_app_counts_in_hour() :\n    df_temp = deviceid_package_start_close.groupby(['device_id', 'start_hour'])['app_id'].count().reset_index().rename(columns = {'app_id': 'app_counts'})\n    df_temp = pd.pivot_table(df_temp, index='device_id', columns='start_hour', values='app_counts').reset_index()\n    df_temp.columns = ['device_id'] + ['open_app_counts_in'+str(i) + '_hour' for i in range(0,24)]\n    df_temp.fillna(0, inplace=True)\n    return df_temp\n\ndef close_app_counts_in_hour() :\n    df_temp = deviceid_package_start_close.groupby(['device_id', 'end_hour'])['app_id'].count().reset_index().rename(columns = {'app_id': 'app_counts'})\n    df_temp = pd.pivot_table(df_temp, index='device_id', columns='end_hour', values='app_counts').reset_index()\n    df_temp.columns = ['device_id'] + ['close_app_counts_in'+str(i) + '_hour' for i in range(0,24)]\n    df_temp.fillna(0, inplace=True)\n    return df_temp\n\ndef app_type_mean_time_gap_one_hot () :\n    df_temp = deviceid_package_start_close.groupby(['device_id', 'app_parent_type'])['time_gap'].mean().reset_index()\n    df_temp = pd.pivot_table(df_temp, index='device_id', columns='app_parent_type', values='time_gap').reset_index()\n    df_temp.columns = ['device_id'] + ['app_parent_type_mean_time_gap'+str(i) for i in range(-1,45)]\n    df_temp.fillna(-1, inplace=True)\n    return df_temp  \n\ndef device_active_hour() :\n    aggregations = {\n        'start_hour' : ['std','mean','max','min'],\n        'end_hour' : ['std','mean','max','min']\n    }\n    df_agg = deviceid_package_start_close.groupby('device_id').agg(aggregations)\n    df_agg.columns = pd.Index(['device_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\n    df_agg = df_agg.reset_index()   \n    \n    return df_agg\n\n\ndef device_brand_encoding() :\n    df_temp = deviceid_brand.merge(deviceid_train[['device_id', 'age', 'sex']], on='device_id', how='left')\n\n    aggregations = {\n        'age' : ['std','mean'],\n        'sex' : ['mean'],\n    }\n\n    df_device_brand = df_temp.groupby('device_brand').agg(aggregations)\n    df_device_brand.columns = pd.Index(['device_brand_' + e[0] + \"_\" + e[1].upper() for e in df_device_brand.columns.tolist()])\n    df_device_brand = df_device_brand.reset_index()\n\n    df_device_type = df_temp.groupby('device_type').agg(aggregations)\n    df_device_type.columns = pd.Index(['device_type_' + e[0] + \"_\" + e[1].upper() for e in df_device_type.columns.tolist()])\n    df_device_type = df_device_type.reset_index()\n\n    df_temp = df_temp.merge(df_device_brand, on='device_brand', how='left')\n    df_temp = df_temp.merge(df_device_type, on='device_type', how='left')\n\n    aggregations = {\n        'device_brand_age_STD' : ['mean'],\n        'device_brand_age_MEAN' : ['mean'],\n        'device_brand_sex_MEAN' : ['mean'],\n        #'device_type_age_STD' : ['mean'],\n        #'device_type_age_MEAN' : ['mean'],\n        #'device_type_sex_MEAN' : ['mean']\n    }\n\n    df_agg = df_temp.groupby('device_id').agg(aggregations)\n    df_agg.columns = pd.Index(['device_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\n    df_agg = df_agg.reset_index()\n    return df_agg\n\n\n#统计device运行app的情况\ndef device_active_time_time_stat() :\n    #device开启app的时间统计信息\n    deviceid_package_start_close['active_time'] = deviceid_package_start_close['close_time'] - deviceid_package_start_close['start_time']\n\n    #device开启了多少次app\n    #device开启了多少个app\n    aggregations = {\n        'app_id' : ['count', 'nunique'],\n        'active_time' : ['mean', 'std', 'max', 'min'],\n    }\n    df_agg = deviceid_package_start_close.groupby('device_id').agg(aggregations)\n    df_agg.columns = pd.Index(['device_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\n    df_agg = df_agg.reset_index()\n\n    aggregations = {\n        'active_time' : ['mean', 'std', 'max', 'min', 'count'],\n    }\n    df_da_agg = deviceid_package_start_close.groupby(['device_id', 'app_id']).agg(aggregations)\n    df_da_agg.columns = pd.Index(['device_app_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_da_agg.columns.tolist()])\n    df_da_agg = df_da_agg.reset_index()\n\n    #device开启app的平均时间\n    aggregations = {\n        'device_app_grouped_active_time_MEAN' : ['mean', 'std', 'max', 'min'],\n        'device_app_grouped_active_time_STD' : ['mean', 'std', 'max', 'min'],\n        'device_app_grouped_active_time_MAX' : ['mean', 'std', 'max', 'min'],\n        'device_app_grouped_active_time_MIN' : ['mean', 'std', 'max', 'min'],\n        'device_app_grouped_active_time_COUNT' : ['mean', 'std', 'max', 'min'],\n    }\n    df_temp = df_da_agg.groupby(['device_id']).agg(aggregations)\n    df_temp.columns = pd.Index([e[0] + \"_\" + e[1].upper() for e in df_temp.columns.tolist()])\n    df_temp = df_temp.reset_index()\n\n    df_agg = df_agg.merge(df_temp, on='device_id', how='left')\n    return df_agg\n\n\ndef app_type_encoding() :\n    df_temp = df_device_app_pair.merge(deviceid_train[['device_id', 'age', 'sex']], on='device_id', how='left')\n\n    aggregations = {\n        'age' : ['std','mean'],\n        'sex' : ['mean'],\n    }\n\n    df_agg_app_parent_type = df_temp.groupby('app_parent_type').agg(aggregations)\n    df_agg_app_parent_type.columns = pd.Index(['app_parent_type_' + e[0] + \"_\" + e[1].upper() for e in df_agg_app_parent_type.columns.tolist()])\n    df_agg_app_parent_type = df_agg_app_parent_type.reset_index()\n\n    df_agg_app_child_type = df_temp.groupby('app_child_type').agg(aggregations)\n    df_agg_app_child_type.columns = pd.Index(['app_child_type_' + e[0] + \"_\" + e[1].upper() for e in df_agg_app_child_type.columns.tolist()])\n    df_agg_app_child_type = df_agg_app_child_type.reset_index()\n\n    df_temp = df_temp.merge(df_agg_app_parent_type, on='app_parent_type', how='left')\n    df_temp = df_temp.merge(df_agg_app_child_type, on='app_child_type', how='left')\n\n    aggregations = {\n        'app_parent_type_age_STD' : ['mean'],\n        'app_parent_type_age_MEAN' : ['mean'],\n        'app_parent_type_sex_MEAN' : ['mean'],\n        'app_child_type_age_STD' : ['mean'],\n        'app_child_type_age_MEAN' : ['mean'],\n        'app_child_type_sex_MEAN' : ['mean']\n    }\n    \n    df_agg = df_temp.groupby('device_id').agg(aggregations)\n    df_agg.columns = pd.Index(['device_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\n    df_agg = df_agg.reset_index()\n    return df_agg\n\n#每个device对应的app_parent_type计数\ndef app_type_onehot_in_device(df) :\n    df_copy = df.fillna(-1)\n    df_temp = df_copy.groupby(['device_id', 'app_parent_type'])['app_id'].size().reset_index()\n    df_temp.rename(columns = {'app_id' : 'app_parent_type_counts'}, inplace=True)\n    df_temp = pd.pivot_table(df_temp, index='device_id', columns='app_parent_type', values='app_parent_type_counts').reset_index()\n    df_temp.columns = ['device_id'] + ['app_parent_type'+str(i) for i in range(-1,45)]\n    df_temp.fillna(0, inplace=True)\n    return df_temp\n\n\n\n\napps=deviceid_packages['apps'].apply(lambda x:' '.join(x)).tolist()\nvectorizer=CountVectorizer()\ntransformer=TfidfTransformer()\ncntTf = vectorizer.fit_transform(apps)\ntfidf=transformer.fit_transform(cntTf)\nword=vectorizer.get_feature_names()\nweight=tfidf.toarray()\ndf_weight=pd.DataFrame(weight)\nfeature=df_weight.columns\ndf_weight['sum']=0\nfor f in tqdm(feature):\n    df_weight['sum']+=df_weight[f]\ndeviceid_packages['tfidf_sum']=df_weight['sum']\n\n\n# In[10]:\n\n\nlda = LatentDirichletAllocation(n_topics=5,\n                                learning_offset=50.,\n                                random_state=666)\ndocres = lda.fit_transform(cntTf)\n\n\n# In[11]:\n\n\ndeviceid_packages = pd.concat([deviceid_packages,pd.DataFrame(docres)],axis=1)\n\n\n# In[12]:\n\n\ntemp=deviceid_packages.drop('apps',axis=1)\ndeviceid_train=pd.merge(deviceid_train,temp,on='device_id',how='left')\n\n\n# In[13]:\n\n\n#解析出所有的device_app_pair\ndevice_id_arr = []\napp_arr = []\ndf_device_app_pair = pd.DataFrame()\nfor row in deviceid_packages.values :\n    device_id = row[0]\n    app_list = row[1]\n    for app in app_list :\n        device_id_arr.append(device_id)\n        app_arr.append(app)\n#生成pair        \ndf_device_app_pair['device_id'] = device_id_arr\ndf_device_app_pair['app_id'] = app_arr    \n\ndf_device_app_pair = df_device_app_pair.merge(package_label, how='left', on='app_id')\n\n\n# In[15]:\n\n\n#提取特征\ndf_train = deviceid_train.merge(device_active_time_time_stat(), on='device_id', how='left')\ndf_train = df_train.merge(deviceid_brand, on='device_id', how='left')\ndf_train = df_train.merge(app_type_onehot_in_device(df_device_app_pair), on='device_id', how='left')\ndf_train = df_train.merge(app_type_encoding(), on='device_id', how='left')\ndf_train = df_train.merge(device_active_hour(), on='device_id', how='left')\ndf_train = df_train.merge(app_type_mean_time_gap_one_hot(), on='device_id', how='left')\ndf_train = df_train.merge(open_app_counts_in_hour(), on='device_id', how='left')\ndf_train = df_train.merge(close_app_counts_in_hour(), on='device_id', how='left')\ndf_train = df_train.merge(device_brand_encoding(), on='device_id', how='left')\ndf_train = df_train.merge(device_start_end_app_timegap(), on='device_id', how='left')\ndf_train = df_train.merge(open_app_timegap_in_hour(), on='device_id', how='left')\n\n\n# In[16]:\n\n\ndf_w2c_start = pd.read_csv('device_start_app_w2c.csv')\ndf_w2c_close = pd.read_csv('device_close_app_w2c.csv')\ndf_w2c_all = pd.read_csv('device_all_app_w2c.csv')\ndf_device_quchong_start_app_w2c = pd.read_csv('device_quchong_start_app_w2c.csv')\ndf_device_app_unique_start_app_w2c = pd.read_csv('device_app_unique_start_app_w2c.csv')\ndf_device_app_unique_close_app_w2c = pd.read_csv('device_app_unique_close_app_w2c.csv')\ndf_device_app_unique_all_app_w2c = pd.read_csv('device_app_unique_all_app_w2c.csv')\n\n\n# In[17]:\n\n\ndf_train_w2v = df_train.merge(df_w2c_start, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_w2c_close, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_w2c_all, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_quchong_start_app_w2c, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_app_unique_start_app_w2c, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_app_unique_close_app_w2c, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_app_unique_all_app_w2c, on='device_id', how='left')\n\n\n# In[19]:\n\n\ndf_train_w2v['sex'] = df_train_w2v['sex'].apply(lambda x:str(x))\ndf_train_w2v['age'] = df_train_w2v['age'].apply(lambda x:str(x))\ndef tool(x):\n    if x=='nan':\n        return x\n    else:\n        return str(int(float(x)))\ndf_train_w2v['sex']=df_train_w2v['sex'].apply(tool)\ndf_train_w2v['age']=df_train_w2v['age'].apply(tool)\ndf_train_w2v['sex_age']=df_train_w2v['sex']+'-'+df_train_w2v['age']\n\ndf_train_w2v = df_train_w2v.replace({'nan':np.NaN,'nan-nan':np.NaN})\n\n\n# In[33]:\n\n\ntrain = df_train_w2v[df_train_w2v['sex'].notnull()]\ntest = df_train_w2v[df_train_w2v['sex'].isnull()]\ntrain = train.reset_index(drop=True)\ntest = test.reset_index(drop=True)\n\nX = train.drop(['sex','age','sex_age','device_id'],axis=1)\nY = train['sex_age']\nY_CAT = pd.Categorical(Y)\nY = pd.Series(Y_CAT.codes)\n\n\n# In[36]:\n\n\nfrom sklearn.model_selection import KFold, StratifiedKFold\ngc.collect()\nseed = 2018\nnum_folds = 5\nfolds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=seed)\n\noof_preds = np.zeros([train.shape[0], 22])\n\nsub_list = []\n\ncate_feat = ['device_type','device_brand']\n\nfor n_fold, (train_idx, valid_idx) in enumerate(folds.split(X, Y)):\n    train_x, train_y = X.iloc[train_idx], Y.iloc[train_idx]\n    valid_x, valid_y = X.iloc[valid_idx], Y.iloc[valid_idx] \n    \n    lgb_train=lgb.Dataset(train_x,label=train_y)\n    lgb_eval = lgb.Dataset(valid_x, valid_y, reference=lgb_train)\n    params = {\n        'boosting_type': 'gbdt',\n        #'learning_rate' : 0.02,\n        'learning_rate' : 0.02,\n        'max_depth':5,\n        'num_leaves' : 2 ** 4,\n        'metric': {'multi_logloss'},\n        'num_class' : 22,\n        'objective' : 'multiclass',\n        'random_state' : 2018,\n        'bagging_freq' : 5,\n        'feature_fraction' : 0.7,\n        'bagging_fraction' : 0.7,\n        'min_split_gain' : 0.0970905919552776,\n        'min_child_weight' : 9.42012323936088,  \n    }  \n    \n    gbm = lgb.train(params,\n                    lgb_train,\n                    num_boost_round=600,\n                    valid_sets=[lgb_train, lgb_eval],\n                    #early_stopping_rounds=200, \n                    verbose_eval=100)  \n    \n    oof_preds[valid_idx] = gbm.predict(valid_x[X.columns.values])\n    \noof_train = pd.DataFrame(oof_preds)\noof_train.columns = ['lgb_sex_age_prob_oof_' + str(i)  for i in range(22)] \ntrain = pd.concat([train, oof_train], axis=1)    \n\n\n# In[37]:\n\n\n#用全部的数据来预测\n#用全部的train来预测test\nlgb_train = lgb.Dataset(X,label=Y)\n\ngbm = lgb.train(params, lgb_train, num_boost_round=600, valid_sets=lgb_train, verbose_eval=100)  \n\ntest = test.reset_index(drop=True)\ntest_preds = gbm.predict(test[X.columns.values])\n\noof_test = pd.DataFrame(test_preds)\noof_test.columns = ['lgb_sex_age_prob_oof_' + str(i)  for i in range(22)] \ntest = pd.concat([test, oof_test], axis=1)\n\n\n# In[39]:\n\n\ndf_sex_age_prob_oof = pd.concat([train[['device_id'] + ['lgb_sex_age_prob_oof_' + str(i)  for i in range(22)] ], \n                             test[['device_id'] + ['lgb_sex_age_prob_oof_' + str(i)  for i in range(22)] ]])\ndf_sex_age_prob_oof.to_csv('lgb_sex_age_prob_oof.csv', index=None)\n\n"
  },
  {
    "path": "THLUO/21.tfidf_lr_sex_age_prob_oof.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nfrom datetime import datetime,timedelta  \nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import LabelEncoder\nimport gc\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import log_loss\n\n\n\n# In[2]:\n\nprint('21.tfidf_lr.py')\npath='input/'\ndata=pd.DataFrame()\n#sex_age=pd.read_excel('./data/性别年龄对照表.xlsx')\n\n\n# In[3]:\n\n\ndeviceid_packages=pd.read_csv(path+'deviceid_packages.tsv',sep='\\t',names=['device_id','apps'])\ndeviceid_test=pd.read_csv(path+'deviceid_test.tsv',sep='\\t',names=['device_id'])\ndeviceid_train=pd.read_csv(path+'deviceid_train.tsv',sep='\\t',names=['device_id','sex','age'])\ndeviceid_brand = pd.read_csv(path+'deviceid_brand.tsv',sep='\\t', names=['device_id','device_brand', 'device_type'])\ndeviceid_package_start_close = pd.read_csv(path+'deviceid_package_start_close.tsv',sep='\\t', names=['device_id','app_id','start_time','close_time'])\npackage_label = pd.read_csv(path+'package_label.tsv',sep='\\t',names=['app_id','app_parent_type', 'app_child_type'])\n\n\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : str(x).split(' ')[0])\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 1].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other' if x in one_time_brand else x)\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 2].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other_2' if x in one_time_brand else x)\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 3].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other_3' if x in one_time_brand else x)\n\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_brand.values))\ndeviceid_brand['device_brand'] = lbl.transform(list(deviceid_brand.device_brand.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_type.values))\ndeviceid_brand['device_type'] = lbl.transform(list(deviceid_brand.device_type.values))\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_parent_type.values))\npackage_label['app_parent_type'] = lbl.transform(list(package_label.app_parent_type.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_child_type.values))\npackage_label['app_child_type'] = lbl.transform(list(package_label.app_child_type.values))\n\ndeviceid_train = pd.concat([deviceid_train, deviceid_test])\n\n\n# In[4]:\n\n\ndeviceid_package_start = deviceid_package_start_close[['device_id', 'app_id', 'start_time']]\ndeviceid_package_start.columns = ['device_id', 'app_id', 'all_time']\ndeviceid_package_close = deviceid_package_start_close[['device_id', 'app_id', 'close_time']]\ndeviceid_package_close.columns = ['device_id', 'app_id', 'all_time']\ndeviceid_package_all = pd.concat([deviceid_package_start, deviceid_package_close])\ndeviceid_package_all = deviceid_package_all.sort_values(by='all_time')\n#deviceid_package_all = deviceid_package_all.merge(deviceid_train, on='device_id', how='left')\n\n\n# In[5]:\n\n\ndf = deviceid_package_all.groupby('device_id').apply(lambda x : list(x.app_id)).reset_index().rename(columns = {0 : 'app_list'})\n\n\n# In[6]:\n\n\ndf_sex_prob_oof = pd.read_csv('device_sex_prob_oof.csv')\ndf_age_prob_oof = pd.read_csv('device_age_prob_oof.csv')\ndf_start_close_sex_prob_oof = pd.read_csv('start_close_sex_prob_oof.csv')\ndf_start_close_age_prob_oof = pd.read_csv('start_close_age_prob_oof.csv')\ndf_start_close_sex_age_prob_oof = pd.read_csv('start_close_sex_age_prob_oof.csv')\n\n\ngc.collect()\ndf = df.merge(df_sex_prob_oof, on='device_id', how='left')\ndf = df.merge(df_age_prob_oof, on='device_id', how='left')\ndf = df.merge(df_start_close_sex_prob_oof, on='device_id', how='left')\ndf = df.merge(df_start_close_age_prob_oof, on='device_id', how='left')\ndf = df.merge(df_start_close_sex_age_prob_oof, on='device_id', how='left')\ndf.fillna(0, inplace=True)\napps = df['app_list'].apply(lambda x:' '.join(x)).tolist()\ndel df['app_list']\n\n\ndf = df.merge(deviceid_train, on='device_id', how='left')\n\n\n# In[8]:\n\n\nvectorizer=CountVectorizer()\ntransformer=TfidfTransformer()\ncntTf = vectorizer.fit_transform(apps)\ntfidf=transformer.fit_transform(cntTf)\nword=vectorizer.get_feature_names()\nweight=tfidf.toarray()\ndf_weight=pd.DataFrame(weight)\nfeature=df_weight.columns\n\n\n# In[9]:\n\n\nfor i in df.columns.values:\n    df_weight[i] = df[i]\n    df_weight[i] = df[i]\n\n\n# In[11]:\n\n\ndf_weight['sex'] = df_weight['sex'].apply(lambda x:str(x))\ndf_weight['age'] = df_weight['age'].apply(lambda x:str(x))\ndef tool(x):\n    if x == 'nan':\n        return x\n    else:\n        return str(int(float(x)))\ndf_weight['sex'] = df_weight['sex'].apply(tool)\ndf_weight['age'] = df_weight['age'].apply(tool)\ndf_weight['sex_age'] = df_weight['sex']+'-'+df_weight['age']\ndf_weight['sex_age'] = df_weight.sex_age.replace({'nan':np.NaN,'nan-nan':np.NaN})\n\n\n# In[12]:\n\n\ntrain = df_weight[df_weight.sex_age.notnull()]\ntrain.reset_index(drop=True, inplace=True)\ntest = df_weight[df_weight.sex_age.isnull()]\ntest.reset_index(drop=True, inplace=True)\ngc.collect()\n\n\n# In[16]:\n\n\nX = train.drop(['sex','age','sex_age','device_id'],axis=1)\nY = train['sex_age']\nY_CAT = pd.Categorical(Y)\nY = pd.Series(Y_CAT.codes)\n\n\n# In[18]:\n\n\nfrom sklearn.model_selection import KFold, StratifiedKFold\ngc.collect()\nseed = 666\nnum_folds = 5\nfolds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=seed)\n\noof_preds = np.zeros([train.shape[0], 22])\n\nfor n_fold, (train_idx, valid_idx) in enumerate(folds.split(X, Y)):\n    train_x, train_y = X.iloc[train_idx], Y.iloc[train_idx]\n    valid_x, valid_y = X.iloc[valid_idx], Y.iloc[valid_idx] \n    \n\n    clf = LogisticRegression(C=4)\n    clf.fit(train_x, train_y)\n    valid_preds=clf.predict_proba(valid_x)\n    train_preds=clf.predict_proba(train_x)\n    \n    oof_preds[valid_idx] = valid_preds\n    \n    print (log_loss(train_y.values, train_preds), log_loss(valid_y.values, valid_preds))\n    \n    \noof_train = pd.DataFrame(oof_preds)\noof_train.columns = ['tfidf_lr_sex_age_prob_oof_' + str(i)  for i in range(22)] \ntrain_temp = pd.concat([train[['device_id']], oof_train], axis=1)    \n\n\n# In[20]:\n\n\n#用全部的数据预测\nclf = LogisticRegression(C=4)\nclf.fit(X, Y)\ntrain_preds=clf.predict_proba(X)\ntest_preds=clf.predict_proba(test[X.columns])\nprint (log_loss(Y.values, train_preds))\n\noof_test = pd.DataFrame(test_preds)\noof_test.columns = ['tfidf_lr_sex_age_prob_oof_' + str(i)  for i in range(22)] \n\n\n# In[24]:\n\n\noof_test\n\n\n# In[25]:\n\n\ntest_temp = pd.concat([test[['device_id']], oof_test], axis=1)    \ntest_temp\n\n\n# In[26]:\n\n\nsex_age_oof = pd.concat([train_temp, test_temp])\nsex_age_oof\n\n\n# In[29]:\n\n\nsex_age_oof.to_csv('tfidf_lr_sex_age_prob_oof.csv', index=None)\n\n"
  },
  {
    "path": "THLUO/22.base_feat.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nfrom datetime import datetime,timedelta  \nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.preprocessing import LabelEncoder\nimport gc\n\n\n\n# In[2]:\n\n\npath='input/'\ndata=pd.DataFrame()\n#sex_age=pd.read_excel('./data/性别年龄对照表.xlsx')\n\n\n# In[3]:\n\n\ndeviceid_packages=pd.read_csv(path+'deviceid_packages.tsv',sep='\\t',names=['device_id','apps'])\ndeviceid_test=pd.read_csv(path+'deviceid_test.tsv',sep='\\t',names=['device_id'])\ndeviceid_train=pd.read_csv(path+'deviceid_train.tsv',sep='\\t',names=['device_id','sex','age'])\ndeviceid_brand = pd.read_csv(path+'deviceid_brand.tsv',sep='\\t', names=['device_id','device_brand', 'device_type'])\ndeviceid_package_start_close = pd.read_csv(path+'deviceid_package_start_close.tsv',sep='\\t', names=['device_id','app_id','start_time','close_time'])\npackage_label = pd.read_csv(path+'package_label.tsv',sep='\\t',names=['app_id','app_parent_type', 'app_child_type'])\n\n\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : str(x).split(' ')[0])\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 1].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other' if x in one_time_brand else x)\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 2].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other_2' if x in one_time_brand else x)\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 3].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other_3' if x in one_time_brand else x)\n\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_brand.values))\ndeviceid_brand['device_brand'] = lbl.transform(list(deviceid_brand.device_brand.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_type.values))\ndeviceid_brand['device_type'] = lbl.transform(list(deviceid_brand.device_type.values))\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_parent_type.values))\npackage_label['app_parent_type'] = lbl.transform(list(package_label.app_parent_type.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_child_type.values))\npackage_label['app_child_type'] = lbl.transform(list(package_label.app_child_type.values))\n\n\n# In[4]:\n\n\nimport time\n\n# 输入毫秒级的时间，转出正常格式的时间\ndef timeStamp(timeNum):\n    timeStamp = float(timeNum/1000)\n    timeArray = time.localtime(timeStamp)\n    otherStyleTime = time.strftime(\"%Y-%m-%d %H:%M:%S\", timeArray)\n    return otherStyleTime\n\n#解析出具体的时间\ndeviceid_package_start_close['start_date'] = pd.to_datetime(deviceid_package_start_close.start_time.apply(timeStamp))\ndeviceid_package_start_close['end_date'] = pd.to_datetime(deviceid_package_start_close.close_time.apply(timeStamp))\ndeviceid_package_start_close['start_hour'] = deviceid_package_start_close.start_date.dt.hour\ndeviceid_package_start_close['end_hour'] = deviceid_package_start_close.end_date.dt.hour\ndeviceid_package_start_close['time_gap'] = (deviceid_package_start_close['end_date'] - deviceid_package_start_close['start_date']).astype('timedelta64[s]')\n\ndeviceid_package_start_close = deviceid_package_start_close.merge(package_label, on='app_id', how='left')\ndeviceid_package_start_close.app_parent_type.fillna(-1, inplace=True)\ndeviceid_package_start_close.app_child_type.fillna(-1, inplace=True)\ndeviceid_package_start_close['start_year'] = deviceid_package_start_close.start_date.dt.year\ndeviceid_package_start_close['end_year'] = deviceid_package_start_close.end_date.dt.year\ndeviceid_package_start_close['year_gap'] = deviceid_package_start_close['end_year'] - deviceid_package_start_close['start_year']\n\n\n# In[5]:\n\n\ndeviceid_train=pd.concat([deviceid_train,deviceid_test])\n\n\n# In[6]:\n\n\ndeviceid_packages['apps']=deviceid_packages['apps'].apply(lambda x:x.split(','))\ndeviceid_packages['app_lenghth']=deviceid_packages['apps'].apply(lambda x:len(x))\n\n\n\n#特征工程\ndef open_app_timegap_in_hour() :\n    df_temp = deviceid_package_start_close.groupby(['device_id', 'start_hour'])['time_gap'].mean().reset_index().rename(columns = {'time_gap': 'mean_time_gap'})\n    df_mean_temp = pd.pivot_table(df_temp, index='device_id', columns='start_hour', values='mean_time_gap').reset_index()\n    df_mean_temp.columns = ['device_id'] + ['open_app_timegap_in_'+str(i) + '_mean_hour' for i in range(0,24)]\n    df_mean_temp.fillna(0, inplace=True)\n\n\n    \n    return df_mean_temp\n\n\n# In[8]:\n\n\ndef device_start_end_app_timegap() :\n    #用户打开，关闭app的时间间隔\n    df_ = deviceid_package_start_close.sort_values(by=['device_id', 'start_date'], ascending=False)\n    df_['prev_start_date'] = df_.groupby('device_id')['start_date'].shift(-1)\n    df_['start_date_gap'] = (df_['start_date'] - df_['prev_start_date']).astype('timedelta64[s]')\n    agg_dic = {'start_date_gap' : ['min', 'max', 'mean', 'median', 'std']}\n    df_start_gap_agg = df_.groupby('device_id').agg(agg_dic)\n    df_start_gap_agg.columns = pd.Index(['device_' + e[0] + \"_\" + e[1].upper() for e in df_start_gap_agg.columns.tolist()])\n    df_start_gap_agg = df_start_gap_agg.reset_index()\n    #del df_\n    gc.collect()\n    #关闭时间间隔\n    df_ = deviceid_package_start_close.sort_values(by=['device_id', 'end_date'], ascending=False)\n    df_['prev_end_date'] = df_.groupby('device_id')['end_date'].shift(-1)\n    df_['end_date_gap'] = (df_['end_date'] - df_['prev_end_date']).astype('timedelta64[s]')\n    agg_dic = {'end_date_gap' : ['min', 'max', 'mean', 'median', 'std']}\n    df_end_gap_agg = df_.groupby('device_id').agg(agg_dic)\n    df_end_gap_agg.columns = pd.Index(['device_' + e[0] + \"_\" + e[1].upper() for e in df_end_gap_agg.columns.tolist()])\n    df_end_gap_agg = df_end_gap_agg.reset_index()\n    #del df_\n    gc.collect()\n\n\n\n    df_agg = df_start_gap_agg.merge(df_end_gap_agg, on='device_id', how='left')\n    #df_agg = df_agg.merge(df_app_start_gap_agg, on='device_id', how='left')\n    #df_agg = df_agg.merge(df_app_end_gap_agg, on='device_id', how='left')\n    return df_agg\n\ndef open_app_counts_in_hour() :\n    df_temp = deviceid_package_start_close.groupby(['device_id', 'start_hour'])['app_id'].count().reset_index().rename(columns = {'app_id': 'app_counts'})\n    df_temp = pd.pivot_table(df_temp, index='device_id', columns='start_hour', values='app_counts').reset_index()\n    df_temp.columns = ['device_id'] + ['open_app_counts_in'+str(i) + '_hour' for i in range(0,24)]\n    df_temp.fillna(0, inplace=True)\n    return df_temp\n\ndef close_app_counts_in_hour() :\n    df_temp = deviceid_package_start_close.groupby(['device_id', 'end_hour'])['app_id'].count().reset_index().rename(columns = {'app_id': 'app_counts'})\n    df_temp = pd.pivot_table(df_temp, index='device_id', columns='end_hour', values='app_counts').reset_index()\n    df_temp.columns = ['device_id'] + ['close_app_counts_in'+str(i) + '_hour' for i in range(0,24)]\n    df_temp.fillna(0, inplace=True)\n    return df_temp\n\ndef app_type_mean_time_gap_one_hot () :\n    df_temp = deviceid_package_start_close.groupby(['device_id', 'app_parent_type'])['time_gap'].mean().reset_index()\n    df_temp = pd.pivot_table(df_temp, index='device_id', columns='app_parent_type', values='time_gap').reset_index()\n    df_temp.columns = ['device_id'] + ['app_parent_type_mean_time_gap'+str(i) for i in range(-1,45)]\n    df_temp.fillna(-1, inplace=True)\n    return df_temp  \n\ndef device_active_hour() :\n    aggregations = {\n        'start_hour' : ['std','mean','max','min'],\n        'end_hour' : ['std','mean','max','min']\n    }\n    df_agg = deviceid_package_start_close.groupby('device_id').agg(aggregations)\n    df_agg.columns = pd.Index(['device_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\n    df_agg = df_agg.reset_index()   \n    \n    return df_agg\n\n\ndef device_brand_encoding() :\n    df_temp = deviceid_brand.merge(deviceid_train[['device_id', 'age', 'sex']], on='device_id', how='left')\n\n    aggregations = {\n        'age' : ['std','mean'],\n        'sex' : ['mean'],\n    }\n\n    df_device_brand = df_temp.groupby('device_brand').agg(aggregations)\n    df_device_brand.columns = pd.Index(['device_brand_' + e[0] + \"_\" + e[1].upper() for e in df_device_brand.columns.tolist()])\n    df_device_brand = df_device_brand.reset_index()\n\n    df_device_type = df_temp.groupby('device_type').agg(aggregations)\n    df_device_type.columns = pd.Index(['device_type_' + e[0] + \"_\" + e[1].upper() for e in df_device_type.columns.tolist()])\n    df_device_type = df_device_type.reset_index()\n\n    df_temp = df_temp.merge(df_device_brand, on='device_brand', how='left')\n    df_temp = df_temp.merge(df_device_type, on='device_type', how='left')\n\n    aggregations = {\n        'device_brand_age_STD' : ['mean'],\n        'device_brand_age_MEAN' : ['mean'],\n        'device_brand_sex_MEAN' : ['mean'],\n        #'device_type_age_STD' : ['mean'],\n        #'device_type_age_MEAN' : ['mean'],\n        #'device_type_sex_MEAN' : ['mean']\n    }\n\n    df_agg = df_temp.groupby('device_id').agg(aggregations)\n    df_agg.columns = pd.Index(['device_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\n    df_agg = df_agg.reset_index()\n    return df_agg\n\n\n#统计device运行app的情况\ndef device_active_time_time_stat() :\n    #device开启app的时间统计信息\n    deviceid_package_start_close['active_time'] = deviceid_package_start_close['close_time'] - deviceid_package_start_close['start_time']\n\n    #device开启了多少次app\n    #device开启了多少个app\n    aggregations = {\n        'app_id' : ['count', 'nunique'],\n        'active_time' : ['mean', 'std', 'max', 'min'],\n    }\n    df_agg = deviceid_package_start_close.groupby('device_id').agg(aggregations)\n    df_agg.columns = pd.Index(['device_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\n    df_agg = df_agg.reset_index()\n\n    aggregations = {\n        'active_time' : ['mean', 'std', 'max', 'min', 'count'],\n    }\n    df_da_agg = deviceid_package_start_close.groupby(['device_id', 'app_id']).agg(aggregations)\n    df_da_agg.columns = pd.Index(['device_app_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_da_agg.columns.tolist()])\n    df_da_agg = df_da_agg.reset_index()\n\n    #device开启app的平均时间\n    aggregations = {\n        'device_app_grouped_active_time_MEAN' : ['mean', 'std', 'max', 'min'],\n        'device_app_grouped_active_time_STD' : ['mean', 'std', 'max', 'min'],\n        'device_app_grouped_active_time_MAX' : ['mean', 'std', 'max', 'min'],\n        'device_app_grouped_active_time_MIN' : ['mean', 'std', 'max', 'min'],\n        'device_app_grouped_active_time_COUNT' : ['mean', 'std', 'max', 'min'],\n    }\n    df_temp = df_da_agg.groupby(['device_id']).agg(aggregations)\n    df_temp.columns = pd.Index([e[0] + \"_\" + e[1].upper() for e in df_temp.columns.tolist()])\n    df_temp = df_temp.reset_index()\n\n    df_agg = df_agg.merge(df_temp, on='device_id', how='left')\n    return df_agg\n\n\ndef app_type_encoding() :\n    df_temp = df_device_app_pair.merge(deviceid_train[['device_id', 'age', 'sex']], on='device_id', how='left')\n\n    aggregations = {\n        'age' : ['std','mean'],\n        'sex' : ['mean'],\n    }\n\n    df_agg_app_parent_type = df_temp.groupby('app_parent_type').agg(aggregations)\n    df_agg_app_parent_type.columns = pd.Index(['app_parent_type_' + e[0] + \"_\" + e[1].upper() for e in df_agg_app_parent_type.columns.tolist()])\n    df_agg_app_parent_type = df_agg_app_parent_type.reset_index()\n\n    df_agg_app_child_type = df_temp.groupby('app_child_type').agg(aggregations)\n    df_agg_app_child_type.columns = pd.Index(['app_child_type_' + e[0] + \"_\" + e[1].upper() for e in df_agg_app_child_type.columns.tolist()])\n    df_agg_app_child_type = df_agg_app_child_type.reset_index()\n\n    df_temp = df_temp.merge(df_agg_app_parent_type, on='app_parent_type', how='left')\n    df_temp = df_temp.merge(df_agg_app_child_type, on='app_child_type', how='left')\n\n    aggregations = {\n        'app_parent_type_age_STD' : ['mean'],\n        'app_parent_type_age_MEAN' : ['mean'],\n        'app_parent_type_sex_MEAN' : ['mean'],\n        'app_child_type_age_STD' : ['mean'],\n        'app_child_type_age_MEAN' : ['mean'],\n        'app_child_type_sex_MEAN' : ['mean']\n    }\n    \n    df_agg = df_temp.groupby('device_id').agg(aggregations)\n    df_agg.columns = pd.Index(['device_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\n    df_agg = df_agg.reset_index()\n    return df_agg\n\n#每个device对应的app_parent_type计数\ndef app_type_onehot_in_device(df) :\n    df_copy = df.fillna(-1)\n    df_temp = df_copy.groupby(['device_id', 'app_parent_type'])['app_id'].size().reset_index()\n    df_temp.rename(columns = {'app_id' : 'app_parent_type_counts'}, inplace=True)\n    df_temp = pd.pivot_table(df_temp, index='device_id', columns='app_parent_type', values='app_parent_type_counts').reset_index()\n    df_temp.columns = ['device_id'] + ['app_parent_type'+str(i) for i in range(-1,45)]\n    df_temp.fillna(0, inplace=True)\n    return df_temp\n\n\n\napps=deviceid_packages['apps'].apply(lambda x:' '.join(x)).tolist()\nvectorizer=CountVectorizer()\ntransformer=TfidfTransformer()\ncntTf = vectorizer.fit_transform(apps)\ntfidf=transformer.fit_transform(cntTf)\nword=vectorizer.get_feature_names()\nweight=tfidf.toarray()\ndf_weight=pd.DataFrame(weight)\nfeature=df_weight.columns\ndf_weight['sum']=0\nfor f in tqdm(feature):\n    df_weight['sum']+=df_weight[f]\ndeviceid_packages['tfidf_sum']=df_weight['sum']\n\n\n# In[10]:\n\n\nlda = LatentDirichletAllocation(n_topics=5,\n                                learning_offset=50.,\n                                random_state=666)\ndocres = lda.fit_transform(cntTf)\n\n\n# In[11]:\n\n\ndeviceid_packages = pd.concat([deviceid_packages,pd.DataFrame(docres)],axis=1)\n\n\n# In[12]:\n\n\ntemp=deviceid_packages.drop('apps',axis=1)\ndeviceid_train=pd.merge(deviceid_train,temp,on='device_id',how='left')\n\n\n# In[13]:\n\n\n#解析出所有的device_app_pair\ndevice_id_arr = []\napp_arr = []\ndf_device_app_pair = pd.DataFrame()\nfor row in deviceid_packages.values :\n    device_id = row[0]\n    app_list = row[1]\n    for app in app_list :\n        device_id_arr.append(device_id)\n        app_arr.append(app)\n#生成pair        \ndf_device_app_pair['device_id'] = device_id_arr\ndf_device_app_pair['app_id'] = app_arr    \n\ndf_device_app_pair = df_device_app_pair.merge(package_label, how='left', on='app_id')\n\n\n# In[15]:\n\n\n#提取特征\ndf_train = deviceid_train.merge(device_active_time_time_stat(), on='device_id', how='left')\ndf_train = df_train.merge(deviceid_brand, on='device_id', how='left')\ndf_train = df_train.merge(app_type_onehot_in_device(df_device_app_pair), on='device_id', how='left')\ndf_train = df_train.merge(app_type_encoding(), on='device_id', how='left')\ndf_train = df_train.merge(device_active_hour(), on='device_id', how='left')\ndf_train = df_train.merge(app_type_mean_time_gap_one_hot(), on='device_id', how='left')\ndf_train = df_train.merge(open_app_counts_in_hour(), on='device_id', how='left')\ndf_train = df_train.merge(close_app_counts_in_hour(), on='device_id', how='left')\ndf_train = df_train.merge(device_brand_encoding(), on='device_id', how='left')\ndf_train = df_train.merge(device_start_end_app_timegap(), on='device_id', how='left')\ndf_train = df_train.merge(open_app_timegap_in_hour(), on='device_id', how='left')\n\n\n# In[33]:\n\n\ndf_w2c_start = pd.read_csv('device_start_app_w2c.csv')\ndf_sex_prob_oof = pd.read_csv('device_sex_prob_oof.csv')\ndf_age_prob_oof = pd.read_csv('device_age_prob_oof.csv')\ndf_w2c_close = pd.read_csv('device_close_app_w2c.csv')\ndf_w2c_all = pd.read_csv('device_all_app_w2c.csv')\ndf_start_close_sex_prob_oof = pd.read_csv('start_close_sex_prob_oof.csv')\n#后面两个，线上线下不对应，线下过拟合了\ndf_start_close_age_prob_oof = pd.read_csv('start_close_age_prob_oof.csv')\ndf_device_quchong_start_app_w2c = pd.read_csv('device_quchong_start_app_w2c.csv')\ndf_tfidf_lr_sex_age_prob_oof = pd.read_csv('tfidf_lr_sex_age_prob_oof.csv')\ndf_device_app_unique_start_app_w2c = pd.read_csv('device_app_unique_start_app_w2c.csv')\ndf_device_app_unique_close_app_w2c = pd.read_csv('device_app_unique_close_app_w2c.csv')\ndf_device_app_unique_all_app_w2c = pd.read_csv('device_app_unique_all_app_w2c.csv')\n#之前的有用的\ndf_sex_age_bin_prob_oof = pd.read_csv('sex_age_bin_prob_oof.csv')\n\ndf_age_bin_prob_oof = pd.read_csv('age_bin_prob_oof.csv')\ndf_hcc_device_brand_age_sex = pd.read_csv('hcc_device_brand_age_sex.csv')\ndf_device_age_regression_prob_oof = pd.read_csv('device_age_regression_prob_oof.csv')\ndf_device_start_GRU_pred = pd.read_csv('device_start_GRU_pred.csv')\ndf_device_start_GRU_pred_age = pd.read_csv('device_start_GRU_pred_age.csv')\ndf_device_all_GRU_pred = pd.read_csv('device_all_GRU_pred.csv')\ndf_device_start_capsule_pred = pd.read_csv('device_start_capsule_pred.csv')\ndf_lgb_sex_age_prob_oof = pd.read_csv('lgb_sex_age_prob_oof.csv')\ndf_device_start_textcnn_pred = pd.read_csv('device_start_textcnn_pred.csv')\ndf_device_start_text_dpcnn_pred = pd.read_csv('device_start_text_dpcnn_pred.csv')\ndf_device_start_lstm_pred = pd.read_csv('device_start_lstm_pred.csv')\n\n#过拟合特征\ndel df_start_close_age_prob_oof['device_app_groupedstart_close_age_prob_oof_4_MEAN']\ndel df_start_close_sex_prob_oof['device_app_groupedstart_close_sex_prob_oof_MIN']\ndel df_start_close_sex_prob_oof['device_app_groupedstart_close_sex_prob_oof_MAX']\n\n\n# In[35]:\n\n\ndf_train_w2v = df_train.merge(df_w2c_start, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_sex_prob_oof, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_age_prob_oof, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_w2c_close, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_w2c_all, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_start_close_sex_prob_oof, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_start_close_age_prob_oof, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_quchong_start_app_w2c, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_tfidf_lr_sex_age_prob_oof, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_app_unique_start_app_w2c, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_app_unique_close_app_w2c, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_app_unique_all_app_w2c, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_sex_age_bin_prob_oof, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_age_bin_prob_oof, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_hcc_device_brand_age_sex, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_age_regression_prob_oof, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_start_GRU_pred, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_start_GRU_pred_age, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_all_GRU_pred, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_start_capsule_pred, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_lgb_sex_age_prob_oof, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_start_textcnn_pred, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_start_text_dpcnn_pred, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_start_lstm_pred, on='device_id', how='left')\n\n\n# In[24]:\n\n\ndf_train_w2v['sex'] = df_train_w2v['sex'].apply(lambda x:str(x))\ndf_train_w2v['age'] = df_train_w2v['age'].apply(lambda x:str(x))\ndef tool(x):\n    if x=='nan':\n        return x\n    else:\n        return str(int(float(x)))\ndf_train_w2v['sex']=df_train_w2v['sex'].apply(tool)\ndf_train_w2v['age']=df_train_w2v['age'].apply(tool)\ndf_train_w2v['sex_age']=df_train_w2v['sex']+'-'+df_train_w2v['age']\n\ndf_train_w2v = df_train_w2v.replace({'nan':np.NaN,'nan-nan':np.NaN})\n\n\n# In[ ]:\n\n\ndf_train_w2v.to_csv('thluo_train_best_feat.csv', index=None)\n\n"
  },
  {
    "path": "THLUO/23.ATT_v6.py",
    "content": "import pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom datetime import datetime,timedelta  \nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom scipy.sparse import hstack\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import cross_val_score\nfrom gensim.models import FastText, Word2Vec\nimport re\nfrom keras.layers import *\nfrom keras.models import *\nfrom keras.preprocessing.text import Tokenizer, text_to_word_sequence\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.preprocessing import text, sequence\nfrom keras.callbacks import *\nfrom keras.layers.advanced_activations import LeakyReLU, PReLU\nimport keras.backend as K\nfrom keras.optimizers import *\nfrom keras.utils import to_categorical\nfrom keras.utils import multi_gpu_model\n\nimport tensorflow as tf\n#from keras.backend.tensorflow_backend import set_session\n#config = tf.ConfigProto()\n#config.gpu_options.per_process_gpu_memory_fraction = 0.9\n#set_session(tf.Session(config=config))\nprint ('23.ATT_V6.py')\npath=\"input/\"\nnp.random.seed(1337)\n\npackages = pd.read_csv(path+'deviceid_packages.tsv', sep='\\t', names=['device_id', 'apps'])\ntest = pd.read_csv(path+'deviceid_test.tsv', sep='\\t', names=['device_id'])\ntrain = pd.read_csv(path+'deviceid_train.tsv', sep='\\t', names=['device_id', 'sex', 'age'])\n\nbrand = pd.read_table(path+'deviceid_brand.tsv', names=['device_id', 'vendor', 'version'])\n\ndata = pd.read_csv('thluo_train_best_feat.csv')\ndata.head()\n\ntrain = pd.merge(train, data, on='device_id', how='left')\ntest = pd.merge(test, data, on='device_id', how='left')\ntrain.head()\n\nX_h = train.drop(['device_id', 'sex', 'age'], axis=1).values\nX_h_test = test.drop(['device_id'], axis=1).values\n\npackages['app_lenghth'] = packages['apps'].apply(lambda x:x.split(',')).apply(lambda x:len(x))\npackages['app_list'] = packages['apps'].apply(lambda x:x.split(','))\ntrain = pd.merge(train, packages, on='device_id', how='left')\ntest = pd.merge(test, packages, on='device_id', how='left')\n\nembed_size = 128\nfastmodel = FastText(list(packages['app_list']), size=embed_size, window=4, min_count=3, negative=2,\n                 sg=1, sample=0.002, hs=1, workers=4)  \n\nembedding_fast = pd.DataFrame([fastmodel[word] for word in (fastmodel.wv.vocab)])\nembedding_fast['app'] = list(fastmodel.wv.vocab)\nembedding_fast.columns= [\"fdim_%s\" % str(i) for i in range(embed_size)]+[\"app\"]\n\ntokenizer = Tokenizer(lower=False, char_level=False, split=',')\n\ntokenizer.fit_on_texts(list(packages['apps']))\n\nX_seq = tokenizer.texts_to_sequences(train['apps'])\nX_test_seq = tokenizer.texts_to_sequences(test['apps'])\n\nmaxlen = 50\nX = pad_sequences(X_seq, maxlen=maxlen, value=0)\nX_test = pad_sequences(X_test_seq, maxlen=maxlen, value=0)\nY_sex = train['sex']-1\n\n\nmax_feaures=35001\nembedding_matrix = np.zeros((max_feaures, embed_size))\nfor word in tokenizer.word_index:\n    if word not in fastmodel.wv.vocab:\n        continue\n    embedding_matrix[tokenizer.word_index[word]] = fastmodel[word]\n\n\ndef dot_product(x, kernel):\n    \"\"\"\n    Wrapper for dot product operation, in order to be compatible with both\n    Theano and Tensorflow\n    Args:\n        x (): input\n        kernel (): weights\n    Returns:\n    \"\"\"\n    if K.backend() == 'tensorflow':\n        return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)\n    else:\n        return K.dot(x, kernel)\n    \nclass AttentionWithContext(Layer):\n    \"\"\"\n    Attention operation, with a context/query vector, for temporal data.\n    Supports Masking.\n    Follows the work of Yang et al. [https://www.cs.cmu.edu/~diyiy/docs/naacl16.pdf]\n    \"Hierarchical Attention Networks for Document Classification\"\n    by using a context vector to assist the attention\n    # Input shape\n        3D tensor with shape: `(samples, steps, features)`.\n    # Output shape\n        2D tensor with shape: `(samples, features)`.\n    How to use:\n    Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.\n    The dimensions are inferred based on the output shape of the RNN.\n    Note: The layer has been tested with Keras 2.0.6\n    Example:\n        model.add(LSTM(64, return_sequences=True))\n        model.add(AttentionWithContext())\n        # next add a Dense layer (for classification/regression) or whatever...\n    \"\"\"\n\n    def __init__(self,\n                 W_regularizer=None, u_regularizer=None, b_regularizer=None,\n                 W_constraint=None, u_constraint=None, b_constraint=None,\n                 bias=True, **kwargs):\n\n        self.supports_masking = True\n        self.init = initializers.get('glorot_uniform')\n\n        self.W_regularizer = regularizers.get(W_regularizer)\n        self.u_regularizer = regularizers.get(u_regularizer)\n        self.b_regularizer = regularizers.get(b_regularizer)\n        \n        self.W_constraint = constraints.get(W_constraint)\n        self.u_constraint = constraints.get(u_constraint)\n        self.b_constraint = constraints.get(b_constraint)\n\n        self.bias = bias\n        super(AttentionWithContext, self).__init__(**kwargs)\n\n    def build(self, input_shape):\n        assert len(input_shape) == 3\n\n        self.W = self.add_weight((input_shape[-1], input_shape[-1],),\n                                 initializer=self.init,\n                                 name='{}_W'.format(self.name),\n                                 regularizer=self.W_regularizer,\n                                 constraint=self.W_constraint)\n        if self.bias:\n            self.b = self.add_weight((input_shape[-1],),\n                                     initializer='zero',\n                                     name='{}_b'.format(self.name),\n                                     regularizer=self.b_regularizer,\n                                     constraint=self.b_constraint)\n\n        self.u = self.add_weight((input_shape[-1],),\n                                 initializer=self.init,\n                                 name='{}_u'.format(self.name),\n                                 regularizer=self.u_regularizer,\n                                 constraint=self.u_constraint)\n\n        super(AttentionWithContext, self).build(input_shape)\n        \n    def compute_mask(self, input, input_mask=None):\n        # do not pass the mask to the next layers\n        return None\n\n    def call(self, x, mask=None):\n        uit = dot_product(x, self.W)\n\n        if self.bias:\n            uit += self.b\n\n        uit = K.tanh(uit)\n        ait = dot_product(uit, self.u)\n\n        a = K.exp(ait)\n\n        # apply mask after the exp. will be re-normalized next\n        if mask is not None:\n            # Cast the mask to floatX to avoid float64 upcasting in theano\n            a *= K.cast(mask, K.floatx())\n\n        # in some cases especially in the early stages of training the sum may be almost zero\n        # and this results in NaN's. A workaround is to add a very small positive number ε to the sum.\n        # a /= K.cast(K.sum(a, axis=1, keepdims=True), K.floatx())\n        a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())\n\n        a = K.expand_dims(a)\n        weighted_input = x * a\n        return K.sum(weighted_input, axis=1)\n\n    def compute_output_shape(self, input_shape):\n        return input_shape[0], input_shape[-1]\n\nclass AdamW(Optimizer):\n    def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, weight_decay=1e-4,  # decoupled weight decay (1/4)\n                 epsilon=1e-8, decay=0., **kwargs):\n        super(AdamW, self).__init__(**kwargs)\n        with K.name_scope(self.__class__.__name__):\n            self.iterations = K.variable(0, dtype='int64', name='iterations')\n            self.lr = K.variable(lr, name='lr')\n            self.beta_1 = K.variable(beta_1, name='beta_1')\n            self.beta_2 = K.variable(beta_2, name='beta_2')\n            self.decay = K.variable(decay, name='decay')\n            self.wd = K.variable(weight_decay, name='weight_decay') # decoupled weight decay (2/4)\n        self.epsilon = epsilon\n        self.initial_decay = decay\n\n    @interfaces.legacy_get_updates_support\n    def get_updates(self, loss, params):\n        grads = self.get_gradients(loss, params)\n        self.updates = [K.update_add(self.iterations, 1)]\n        wd = self.wd # decoupled weight decay (3/4)\n\n        lr = self.lr\n        if self.initial_decay > 0:\n            lr *= (1. / (1. + self.decay * K.cast(self.iterations,\n                                                  K.dtype(self.decay))))\n\n        t = K.cast(self.iterations, K.floatx()) + 1\n        lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /\n                     (1. - K.pow(self.beta_1, t)))\n\n        ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n        vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n        self.weights = [self.iterations] + ms + vs\n\n        for p, g, m, v in zip(params, grads, ms, vs):\n            m_t = (self.beta_1 * m) + (1. - self.beta_1) * g\n            v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)\n            p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon) - lr * wd * p # decoupled weight decay (4/4)\n\n            self.updates.append(K.update(m, m_t))\n            self.updates.append(K.update(v, v_t))\n            new_p = p_t\n\n            # Apply constraints.\n            if getattr(p, 'constraint', None) is not None:\n                new_p = p.constraint(new_p)\n\n            self.updates.append(K.update(p, new_p))\n        return self.updates\n\n    def get_config(self):\n        config = {'lr': float(K.get_value(self.lr)),\n                  'beta_1': float(K.get_value(self.beta_1)),\n                  'beta_2': float(K.get_value(self.beta_2)),\n                  'decay': float(K.get_value(self.decay)),\n                  'weight_decay': float(K.get_value(self.wd)),\n                  'epsilon': self.epsilon}\n        base_config = super(AdamW, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))\n\ndef model_conv1D(embedding_matrix):\n    K.clear_session()\n    # The embedding layer containing the word vectors\n    emb_layer = Embedding(\n        input_dim=embedding_matrix.shape[0],\n        output_dim=embedding_matrix.shape[1],\n        weights=[embedding_matrix],\n        input_length=maxlen,\n        trainable=False)\n    lstm_layer = Bidirectional(GRU(128, recurrent_dropout=0.15, dropout=0.15, return_sequences=True))\n    att = AttentionWithContext()\n    # Define inputs\n    seq = Input(shape=(maxlen,))\n    # Run inputs through embedding\n    emb = emb_layer(seq)\n    lstm = lstm_layer(emb)\n    # Run through CONV + GAP layers\n    att1 = att(lstm)\n    hin = Input(shape=(X_h.shape[1], ))\n    htime = Dense(64, activation='relu')(hin)\n    merge1 = concatenate([att1, htime])\n    \n    # The MLP that determines the outcome\n    x = Dropout(0.3)(merge1)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.22)(x)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.22)(x)\n    x = BatchNormalization()(x)\n    pred = Dense(1, activation='sigmoid')(x)\n\n    model = Model(inputs=[seq, hin], outputs=pred)\n    model.compile(loss='binary_crossentropy', optimizer=AdamW(weight_decay=0.08,))###\n    return model\n\nkfold = StratifiedKFold(n_splits=5, random_state=20, shuffle=True)\nsub1 = np.zeros((X_test.shape[0], ))\noof_pref1 = np.zeros((X.shape[0], 1))\nscore = []\ncount=0\nfor i, (train_index, test_index) in enumerate(kfold.split(X, Y_sex)):\n    print(\"FOLD | \",count+1)\n    filepath=\"sex_weights_best.h5\"\n    checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n    reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.8, patience=2, min_lr=0.0001, verbose=1)\n    earlystopping = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=6, verbose=1, mode='auto')\n    callbacks = [checkpoint, reduce_lr, earlystopping]\n    \n    model_sex = model_conv1D(embedding_matrix)\n    X_tr, X_vl, X_tr2, X_vl2, y_tr, y_vl = X[train_index], X[test_index], X_h[train_index], X_h[test_index], Y_sex[train_index], Y_sex[test_index]\n    hist = model_sex.fit([X_tr, X_tr2], y_tr, batch_size=256, epochs=50, validation_data=([X_vl, X_vl2], y_vl),\n                 callbacks=callbacks, verbose=2, shuffle=True)\n    model_sex.load_weights(filepath)\n    sub1 += np.squeeze(model_sex.predict([X_test, X_h_test]))/kfold.n_splits\n    oof_pref1[test_index] = model_sex.predict([X_vl, X_vl2])\n    score.append(np.min(hist.history['val_loss']))\n    count+=1\n\nprint('log loss:',np.mean(score))\n\noof_pref1 = pd.DataFrame(oof_pref1, columns=['sex2'])\nsub1 = pd.DataFrame(sub1, columns=['sex2'])\nres1 = pd.concat([oof_pref1, sub1])\nres1['sex1'] = 1-res1['sex2']\nres1.to_csv(\"res1.csv\", index=False)\n\n\ndef model_age_conv(embedding_matrix):\n    \n    # The embedding layer containing the word vectors\n    K.clear_session()\n    emb_layer = Embedding(\n        input_dim=embedding_matrix.shape[0],\n        output_dim=embedding_matrix.shape[1],\n        weights=[embedding_matrix],\n        input_length=maxlen,\n        trainable=False\n    )\n    lstm_layer = Bidirectional(GRU(128, recurrent_dropout=0.15, dropout=0.15, return_sequences=True))\n    att = AttentionWithContext()\n    # Define inputs\n    seq = Input(shape=(maxlen,))\n    # Run inputs through embedding\n    emb = emb_layer(seq)\n    lstm = lstm_layer(emb)\n    # Run through CONV + GAP layers\n    att1 = att(lstm)\n    hin = Input(shape=(X_h.shape[1], ))\n    htime = Dense(64, activation='relu')(hin)\n    merge1 = concatenate([att1, htime])\n    \n    # The MLP that determines the outcome\n    x = Dropout(0.3)(merge1)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.22)(x)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.22)(x)\n    x = BatchNormalization()(x)\n    pred = Dense(11, activation='softmax')(x)\n\n    model = Model(inputs=[seq, hin], outputs=pred)\n    model.compile(loss='categorical_crossentropy', optimizer=AdamW(weight_decay=0.08,))\n    return model\n\nY_age = to_categorical(train['age'])\nX_h = np.hstack([X_h, train['sex'].values.reshape((-1, 1))])\nX_h_test1 = np.hstack([X_h_test, np.ones((X_h_test.shape[0], 1))])\nX_h_test2 = np.hstack([X_h_test, np.ones((X_h_test.shape[0], 1))*2])\n\nsub2_1 = np.zeros((X_test.shape[0], 11))\nsub2_2 = np.zeros((X_test.shape[0], 11))\noof_pref2 = np.zeros((X.shape[0], 11))\nscore = []\ncount=0\nfor i, (train_index, test_index) in enumerate(kfold.split(X, train['age'])):\n    print(\"FOLD | \",count+1)\n    \n    filepath2=\"age_weights_best_%d.h5\"%count\n    checkpoint2 = ModelCheckpoint(filepath2, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n    reduce_lr2 = ReduceLROnPlateau(monitor='val_loss', factor=0.8, patience=2, min_lr=0.0001, verbose=1)\n    earlystopping2 = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=8, verbose=1, mode='auto')\n    callbacks2 = [checkpoint2, reduce_lr2, earlystopping2]\n    \n    model_age = model_age_conv(embedding_matrix)\n    X_tr, X_vl, X_tr2, X_vl2, y_tr, y_vl = X[train_index], X[test_index], X_h[train_index], X_h[test_index], Y_age[train_index], Y_age[test_index]\n    hist = model_age.fit([X_tr, X_tr2], y_tr, batch_size=256, epochs=50, validation_data=([X_vl, X_vl2], y_vl),\n                 callbacks=callbacks2, verbose=2, shuffle=True)\n    \n    model_age.load_weights(filepath2)\n    oof_pref2[test_index] = model_age.predict([X_vl, X_vl2])\n    sub2_1 += model_age.predict([X_test, X_h_test1])/kfold.n_splits\n    sub2_2 += model_age.predict([X_test, X_h_test2])/kfold.n_splits\n    score.append(np.min(hist.history['val_loss']))\n    count+=1\nprint('log loss:',np.mean(score))\nres2_1 = np.vstack((oof_pref2, sub2_1))\nres2_1 = pd.DataFrame(res2_1)\nres2_1.to_csv(\"res2_1.csv\",index=False)\n\nres2_2 = np.vstack((oof_pref2, sub2_2))\nres2_2 = pd.DataFrame(res2_2)\nres2_2.to_csv(\"res2_2.csv\",index=False)\n\nres1.index=range(len(res1))\nres2_1.index=range(len(res2_1))\nres2_2.index=range(len(res2_2))\nfinal_1 = res2_1\nfinal_2 = res2_2\nfor i in range(11):\n    final_1[i] = res1['sex1']*res2_1[i]\n    final_2[i] = res1['sex2']*res2_2[i]\nid_list = pd.concat([train[['device_id']],test[['device_id']]])\nfinal = id_list\nfinal.index = range(len(final))\nfinal.columns= ['device_id']\nfinal_pred = pd.concat([final_1, final_2], axis=1)\nfinal = pd.concat([final, final_pred], axis=1)\nfinal.columns = ['device_id', '1-0', '1-1', '1-2', '1-3', '1-4', '1-5', '1-6', \n         '1-7','1-8', '1-9', '1-10', '2-0', '2-1', '2-2', '2-3', '2-4', \n         '2-5', '2-6', '2-7', '2-8', '2-9', '2-10']\n\nfinal.to_csv('att_nn_feat_v6.csv', index=False)\nsub = pd.merge(test[['device_id']], final, on='device_id', how='left')\nsub.columns = ['DeviceID', '1-0', '1-1', '1-2', '1-3', '1-4', '1-5', '1-6', \n         '1-7','1-8', '1-9', '1-10', '2-0', '2-1', '2-2', '2-3', '2-4', \n         '2-5', '2-6', '2-7', '2-8', '2-9', '2-10']\nsub.to_csv('Att_v6.csv', index=False)\n\n"
  },
  {
    "path": "THLUO/24.thluo_22_lgb.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nfrom datetime import datetime,timedelta  \nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.preprocessing import LabelEncoder\nimport gc\n\n\n\n# In[24]:\n\n\ndf_train_w2v = pd.read_csv('thluo_train_best_feat.csv')\ndf_att_nn_feat_v6 = pd.read_csv('att_nn_feat_v6.csv')\ndf_att_nn_feat_v6.columns = ['device_id'] + ['att_nn_feat_' + str(i) for i in range(22)]\ndf_train_w2v = df_train_w2v.merge(df_att_nn_feat_v6, on='device_id', how='left')\n\n\n# In[ ]:\n\n\ndf_train_w2v.to_csv('thluo_train_best_feat.csv', index=None)\n\n\n# In[26]:\n\n\ntrain = df_train_w2v[df_train_w2v['sex'].notnull()]\ntest = df_train_w2v[df_train_w2v['sex'].isnull()]\n\nX = train.drop(['sex','age','sex_age','device_id'],axis=1)\nY = train['sex_age']\nY_CAT = pd.Categorical(Y)\nY = pd.Series(Y_CAT.codes)\n\n\n# In[28]:\n\n\nfrom sklearn.model_selection import KFold, StratifiedKFold\ngc.collect()\nseed = 666\nnum_folds = 5\nfolds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=seed)\n\nsub_list = []\n\ncate_feat = ['device_type','device_brand']\n\nfor n_fold, (train_idx, valid_idx) in enumerate(folds.split(X, Y)):\n    train_x, train_y = X.iloc[train_idx], Y.iloc[train_idx]\n    valid_x, valid_y = X.iloc[valid_idx], Y.iloc[valid_idx] \n    \n    lgb_train=lgb.Dataset(train_x,label=train_y)\n    lgb_eval = lgb.Dataset(valid_x, valid_y, reference=lgb_train)\n    params = {\n        'boosting_type': 'gbdt',\n        #'learning_rate' : 0.02,\n        'learning_rate' : 0.01,\n        'max_depth':5,\n        'num_leaves' : 2 ** 4,\n        'metric': {'multi_logloss'},\n        'num_class' : 22,\n        'objective' : 'multiclass',\n        'random_state' : 2018,\n        'bagging_freq' : 5,\n        'feature_fraction' : 0.7,\n        'bagging_fraction' : 0.7,\n        'min_split_gain' : 0.0970905919552776,\n        'min_child_weight' : 9.42012323936088,  \n    }  \n    \n    gbm = lgb.train(params,\n                    lgb_train,\n                    num_boost_round=1000,\n                    valid_sets=lgb_eval,\n                    early_stopping_rounds=200, verbose_eval=100)  \n    \n    sub = pd.DataFrame(gbm.predict(test[X.columns.values],num_iteration=gbm.best_iteration))\n    sub_list.append(sub)\n\n\n# In[29]:\n\n\nsub = (sub_list[0] + sub_list[1] + sub_list[2] + sub_list[3] + sub_list[4]) / num_folds\n\n\n# In[31]:\n\n\nsub.columns=Y_CAT.categories\nsub['DeviceID']=test['device_id'].values\nsub=sub[['DeviceID', '1-0', '1-1', '1-2', '1-3', '1-4', '1-5', '1-6', '1-7','1-8', '1-9', '1-10', '2-0', '2-1', '2-2', '2-3', '2-4', '2-5', '2-6', '2-7', '2-8', '2-9', '2-10']]\n\n\n# In[32]:\n\n\nsub.to_csv('th_22_results_lgb.csv',index=False)\n\n"
  },
  {
    "path": "THLUO/25.thluo_22_xgb.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nimport xgboost as xgb\nfrom datetime import datetime,timedelta  \nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.preprocessing import LabelEncoder\nimport gc\nfrom feat_util import *\n\n\n# In[2]:\n\nprint ('25.thluo_22_xgb.py')\npath='input/'\ndata=pd.DataFrame()\n#sex_age=pd.read_excel('./data/性别年龄对照表.xlsx')\n\n\n# In[3]:\n\n\ndeviceid_packages=pd.read_csv(path+'deviceid_packages.tsv',sep='\\t',names=['device_id','apps'])\ndeviceid_test=pd.read_csv(path+'deviceid_test.tsv',sep='\\t',names=['device_id'])\ndeviceid_train=pd.read_csv(path+'deviceid_train.tsv',sep='\\t',names=['device_id','sex','age'])\n\n\n# In[4]:\n\n\ndf_train = pd.concat([deviceid_train, deviceid_test])\n\n\n# In[5]:\n\n\ndf_train\n\n\n# In[6]:\n\n\ndf_sex_prob_oof = pd.read_csv('device_sex_prob_oof.csv')\ndf_age_prob_oof = pd.read_csv('device_age_prob_oof.csv')\ndf_start_close_sex_prob_oof = pd.read_csv('start_close_sex_prob_oof.csv')\n#后面两个，线上线下不对应，线下过拟合了\ndf_start_close_age_prob_oof = pd.read_csv('start_close_age_prob_oof.csv')\ndf_tfidf_lr_sex_age_prob_oof = pd.read_csv('tfidf_lr_sex_age_prob_oof.csv')\n#之前的有用的\ndf_sex_age_bin_prob_oof = pd.read_csv('sex_age_bin_prob_oof.csv')\n\ndf_age_bin_prob_oof = pd.read_csv('age_bin_prob_oof.csv')\ndf_hcc_device_brand_age_sex = pd.read_csv('hcc_device_brand_age_sex.csv')\ndf_device_age_regression_prob_oof = pd.read_csv('device_age_regression_prob_oof.csv')\ndf_device_start_GRU_pred = pd.read_csv('device_start_GRU_pred.csv')\ndf_device_start_GRU_pred_age = pd.read_csv('device_start_GRU_pred_age.csv')\ndf_device_all_GRU_pred = pd.read_csv('device_all_GRU_pred.csv')\ndf_lgb_sex_age_prob_oof = pd.read_csv('lgb_sex_age_prob_oof.csv')\ndf_device_start_capsule_pred = pd.read_csv('device_start_capsule_pred.csv')\ndf_device_start_textcnn_pred = pd.read_csv('device_start_textcnn_pred.csv')\ndf_device_start_text_dpcnn_pred = pd.read_csv('device_start_text_dpcnn_pred.csv')\ndf_device_start_lstm_pred = pd.read_csv('device_start_lstm_pred.csv')\ndf_att_nn_feat_v6 = pd.read_csv('att_nn_feat_v6.csv')\ndf_att_nn_feat_v6.columns = ['device_id'] + ['att_nn_feat_' + str(i) for i in range(22)]\n\n#过拟合特征\ndel df_start_close_age_prob_oof['device_app_groupedstart_close_age_prob_oof_4_MEAN']\ndel df_start_close_sex_prob_oof['device_app_groupedstart_close_sex_prob_oof_MIN']\ndel df_start_close_sex_prob_oof['device_app_groupedstart_close_sex_prob_oof_MAX']\n\n\n# In[7]:\n\n\ndf_train_w2v = df_train.merge(df_sex_prob_oof, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_age_prob_oof, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_start_close_sex_prob_oof, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_start_close_age_prob_oof, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_sex_age_bin_prob_oof, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_age_bin_prob_oof, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_hcc_device_brand_age_sex, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_age_regression_prob_oof, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_start_GRU_pred, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_start_GRU_pred_age, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_all_GRU_pred, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_lgb_sex_age_prob_oof, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_start_capsule_pred, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_start_textcnn_pred, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_start_text_dpcnn_pred, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_start_lstm_pred, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_att_nn_feat_v6, on='device_id', how='left')\n\n\n# In[9]:\n\n\ndf_train_w2v['sex'] = df_train_w2v['sex'].apply(lambda x:str(x))\ndf_train_w2v['age'] = df_train_w2v['age'].apply(lambda x:str(x))\ndef tool(x):\n    if x=='nan':\n        return x\n    else:\n        return str(int(float(x)))\ndf_train_w2v['sex']=df_train_w2v['sex'].apply(tool)\ndf_train_w2v['age']=df_train_w2v['age'].apply(tool)\ndf_train_w2v['sex_age']=df_train_w2v['sex']+'-'+df_train_w2v['age']\ndf_train_w2v = df_train_w2v.replace({'nan':np.NaN,'nan-nan':np.NaN})\n\n\n# In[11]:\n\n\ntrain = df_train_w2v[df_train_w2v['sex'].notnull()]\ntest = df_train_w2v[df_train_w2v['sex'].isnull()]\n\nX = train.drop(['sex','age','sex_age','device_id'],axis=1)\nY = train['sex_age']\nY_CAT = pd.Categorical(Y)\nY = pd.Series(Y_CAT.codes)\n\n\n# In[14]:\n\n\nfrom sklearn.model_selection import KFold, StratifiedKFold\ngc.collect()\n#seed = 2048\nseed = 666\nnum_folds = 5\nfolds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=seed)\n\nsub_list = []\n\ncate_feat = ['device_type','device_brand']\n\nfor n_fold, (train_idx, valid_idx) in enumerate(folds.split(X, Y)):\n    train_x, train_y = X.iloc[train_idx], Y.iloc[train_idx]\n    valid_x, valid_y = X.iloc[valid_idx], Y.iloc[valid_idx] \n    \n    xg_train = xgb.DMatrix(train_x, label=train_y)\n    xg_val = xgb.DMatrix(valid_x, label=valid_y)    \n\n    param = {\n        'objective' : 'multi:softprob',\n        'eta' : 0.03,\n        'max_depth' : 3, \n        'num_class' : 22,\n        'eval_metric' : 'mlogloss',\n        'min_child_weight' : 3,\n        'subsample' : 0.7,\n        'colsample_bytree' : 0.7,\n        'seed' : 2006,\n        'nthread' : 5\n    } \n    \n    num_rounds = 1000\n\n    watchlist = [ (xg_train,'train'), (xg_val, 'val') ]\n    model = xgb.train(param, xg_train, num_rounds, watchlist, early_stopping_rounds=100, verbose_eval=50)    \n    \n    test_matrix = xgb.DMatrix(test[X.columns.values])\n    sub = pd.DataFrame(model.predict(test_matrix))\n    sub_list.append(sub)\n\n\n# In[15]:\n\n\nsub = (sub_list[0] + sub_list[1] + sub_list[2] + sub_list[3] + sub_list[4]) / num_folds\nsub\n\n\n# In[16]:\n\n\nsub.columns=Y_CAT.categories\nsub['DeviceID']=test['device_id'].values\nsub=sub[['DeviceID', '1-0', '1-1', '1-2', '1-3', '1-4', '1-5', '1-6', '1-7','1-8', '1-9', '1-10', '2-0', '2-1', '2-2', '2-3', '2-4', '2-5', '2-6', '2-7', '2-8', '2-9', '2-10']]\nsub.to_csv('th_22_results_xgb.csv',index=False)\n\n"
  },
  {
    "path": "THLUO/26.thluo_nb_lgb.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\n# coding: utf-8\n\n# In[1]:\n\nfrom sklearn.metrics import log_loss\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nfrom datetime import datetime,timedelta  \nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n#add\nimport gc\nfrom sklearn import preprocessing\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nfrom scipy.sparse import hstack, vstack\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import cross_val_score\n# from skopt.space import Integer, Categorical, Real, Log10\n# from skopt.utils import use_named_args\n# from skopt import gp_minimize\nfrom gensim.models import Word2Vec, FastText\nimport gensim \nimport re\nimport os\npath=\"./\"\nos.listdir(path)\n\n\n# In[2]:\nprint ('26.thluo_nb_lgb.py')\n\ntrain_id=pd.read_csv(\"input/deviceid_train.tsv\",sep=\"\\t\",names=['device_id','sex','age'])\ntest_id=pd.read_csv(\"input/deviceid_test.tsv\",sep=\"\\t\",names=['device_id'])\nall_id=pd.concat([train_id[['device_id']],test_id[['device_id']]])\n#nurbs=pd.read_csv(\"nurbs_feature_all.csv\")\n#nurbs.columns=[\"nurbs_\"+str(i) for i in nurbs.columns]\nthluo = pd.read_csv(\"thluo_train_best_feat.csv\")\ndel thluo['age']\ndel thluo['sex']\ndel thluo['sex_age']\n\n\n# In[7]:\n\n\nfeat = thluo.copy()\n\n\n# In[8]:\n\n\ntrain=pd.merge(train_id,feat,on=\"device_id\",how=\"left\")\ntest=pd.merge(test_id,feat,on=\"device_id\",how=\"left\")\n\n\n# In[11]:\n\n\nfeatures = [x for x in train.columns if x not in ['device_id', 'sex',\"age\",]]\nY = train['sex'] - 1\n\n\n# In[12]:\n\n\nfrom sklearn.model_selection import KFold, StratifiedKFold\ngc.collect()\nseed = 1024\nnum_folds = 5\nfolds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=seed)\n\n\n# In[13]:\n\n\nparams = {\n    'boosting_type': 'gbdt',\n    'learning_rate' : 0.02,\n    #'max_depth':5,\n    'num_leaves' : 2 ** 5,\n    'metric': {'binary_logloss'},\n    #'num_class' : 22,\n    'objective' : 'binary',\n    'random_state' : 6666,\n    'bagging_freq' : 5,\n    'feature_fraction' : 0.7,\n    'bagging_fraction' : 0.7,\n    'min_split_gain' : 0.0970905919552776,\n    'min_child_weight' : 9.42012323936088,  \n}\n\n\n# In[14]:\n\n\n#预测性别\naus = []\nsub1 = np.zeros((len(test), ))\npred_oob1=np.zeros((len(train),))\nfor i,(train_index,test_index) in enumerate(folds.split(train[features], Y)):\n  \n    tr_x = train[features].reindex(index=train_index, copy=False)\n    tr_y = Y[train_index]\n    te_x = train[features].reindex(index=test_index, copy=False)\n    te_y = Y[test_index]\n\n    lgb_train=lgb.Dataset(tr_x,label=tr_y)\n    lgb_eval = lgb.Dataset(te_x, te_y, reference=lgb_train)\n\n    gbm = lgb.train(params, lgb_train, num_boost_round=300, \n                    valid_sets=[lgb_train, lgb_eval], verbose_eval=100)         \n\n    pred = gbm.predict(te_x[tr_x.columns.values])\n    pred_oob1[test_index] =pred\n    # te_y=te_y.apply(lambda x:1 if x>0 else 0)\n    a = log_loss(te_y, pred)\n    \n\n    print (\"idx: \", i) \n    print (\" loss: %.5f\" % a)\n#     print \" gini: %.5f\" % g\n    aus.append(a)\n\nprint (\"mean\")\nprint (\"auc:       %s\" % (sum(aus) / 5.0))\n\n\n# In[15]:\n\n\n#用全部数据训练一个lgb\n#用全部的train来预测test\nlgb_train = lgb.Dataset(train[features],label=Y)\n\ngbm = lgb.train(params, lgb_train, num_boost_round=300, valid_sets=lgb_train, verbose_eval=100)  \n\nsub1 = gbm.predict(test[features])\n\n\n# In[16]:\n\n\npred_oob1 = pd.DataFrame(pred_oob1, columns=['sex2'])\nsub1 = pd.DataFrame(sub1, columns=['sex2'])\nres1=pd.concat([pred_oob1,sub1])\nres1['sex1'] = 1-res1['sex2']\n\n\n# In[18]:\n\n\n\n# In[50]:\n\n\nfeatures = [x for x in train.columns if x not in ['device_id',\"age\"]]\nY = train['age'] \n\n\n# In[51]:\n\n\nimport lightgbm as lgb\nimport xgboost as xgb\nfrom sklearn.metrics import auc, log_loss, roc_auc_score,f1_score,recall_score,precision_score\n\n\n# In[19]:\n\n\nfrom sklearn.model_selection import KFold, StratifiedKFold\ngc.collect()\nseed = 1024\nnum_folds = 5\nfolds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=seed)\n\n\n# In[20]:\n\n\nparams = {\n    'boosting_type': 'gbdt',\n    'learning_rate' : 0.02,\n    #'max_depth':5,\n    'num_leaves' : 2 ** 5,\n    'metric': {'multi_logloss'},\n    'num_class' : 11,\n    'objective' : 'multiclass',\n    'random_state' : 6666,\n    'bagging_freq' : 5,\n    'feature_fraction' : 0.7,\n    'bagging_fraction' : 0.7,\n    'min_split_gain' : 0.0970905919552776,\n    'min_child_weight' : 9.42012323936088,  \n}\n\n\n# In[22]:\n\n\n#预测性别\naus = []\nsub2 = np.zeros((len(test),11 ))\npred_oob2=np.zeros((len(train),11))\nmodels=[]\niters=[]\nfor i,(train_index,test_index) in enumerate(folds.split(train[features], Y)):\n  \n    tr_x = train[features].reindex(index=train_index, copy=False)\n    tr_y = Y[train_index]\n    te_x = train[features].reindex(index=test_index, copy=False)\n    te_y = Y[test_index]\n\n    lgb_train=lgb.Dataset(tr_x,label=tr_y)\n    lgb_eval = lgb.Dataset(te_x, te_y, reference=lgb_train)\n\n    gbm = lgb.train(params, lgb_train, num_boost_round=430, \n                    valid_sets=[lgb_train, lgb_eval], verbose_eval=100)         \n\n    pred = gbm.predict(te_x[tr_x.columns.values])\n    pred_oob2[test_index] = pred\n    # te_y=te_y.apply(lambda x:1 if x>0 else 0)\n    a = log_loss(te_y, pred)\n\n    #sub2 += gbm.predict(test[features], num_iteration=gbm.best_iteration) / 5\n    \n    models.append(gbm)\n    iters.append(gbm.best_iteration)    \n\n    print (\"idx: \", i) \n    print (\" loss: %.5f\" % a)\n#     print \" gini: %.5f\" % g\n    aus.append(a)\n\nprint (\"mean\")\nprint (\"auc:       %s\" % (sum(aus) / 5.0))\n\n\n# In[23]:\n\n\n#预测条件概率\n####sex1\ntest['sex']=1\n#用全部数据训练一个lgb\n#用全部的train来预测test\nlgb_train = lgb.Dataset(train[features],label=Y)\n\ngbm = lgb.train(params, lgb_train, num_boost_round=430, valid_sets=lgb_train, verbose_eval=100)  \nsub2 = gbm.predict(test[features])\n\nres2_1=np.vstack((pred_oob2,sub2))\nres2_1 = pd.DataFrame(res2_1)\n\n\n# In[24]:\n\n\n###sex2\n#预测条件概率\ntest['sex']=2\n\nsub2 = np.zeros((len(test),11))\nsub2 = gbm.predict(test[features], num_iteration = gbm.best_iteration)\nres2_2=np.vstack((pred_oob2,sub2))\nres2_2 = pd.DataFrame(res2_2) \n\n\n# In[27]:\n\n\nres1.index=range(len(res1))\nres2_1.index=range(len(res2_1))\nres2_2.index=range(len(res2_2))\nfinal_1=res2_1.copy()\nfinal_2=res2_2.copy()\n\n\n# In[28]:\n\n\nfor i in range(11):\n    final_1[i]=res1['sex1'] * res2_1[i]\n    final_2[i]=res1['sex2'] * res2_2[i]\nid_list = pd.concat([train[['device_id']],test[['device_id']]])\nfinal = id_list\nfinal.index = range(len(final))\nfinal.columns = ['DeviceID']\nfinal_pred = pd.concat([final_1,final_2], 1)\nfinal = pd.concat([final,final_pred],1)\nfinal.columns = ['DeviceID', '1-0', '1-1', '1-2', '1-3', '1-4', '1-5', '1-6', \n         '1-7','1-8', '1-9', '1-10', '2-0', '2-1', '2-2', '2-3', '2-4', \n         '2-5', '2-6', '2-7', '2-8', '2-9', '2-10']\n\n\n# In[30]:\n\n\ntest['DeviceID']=test['device_id']\nsub=pd.merge(test[['DeviceID']],final,on=\"DeviceID\",how=\"left\")\nsub.to_csv(\"th_lgb_nb.csv\",index=False)\n\n"
  },
  {
    "path": "THLUO/27.thluo_nb_xgb.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\n# coding: utf-8\n\n# In[1]:\n\nfrom sklearn.metrics import log_loss\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nfrom datetime import datetime,timedelta  \nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n#add\nimport gc\nfrom sklearn import preprocessing\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nfrom scipy.sparse import hstack, vstack\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import cross_val_score\n# from skopt.space import Integer, Categorical, Real, Log10\n# from skopt.utils import use_named_args\n# from skopt import gp_minimize\nfrom gensim.models import Word2Vec, FastText\nimport gensim \nimport re\nimport os\nimport xgboost as xgb\npath=\"./\"\nos.listdir(path)\n\n\n# In[2]:\nprint ('27.thluo_nb_xgb.py')\n\ntrain_id=pd.read_csv(\"input/deviceid_train.tsv\",sep=\"\\t\",names=['device_id','sex','age'])\ntest_id=pd.read_csv(\"input/deviceid_test.tsv\",sep=\"\\t\",names=['device_id'])\n\nall_id=pd.concat([train_id[['device_id']],test_id[['device_id']]])\ndf_sex_prob_oof = pd.read_csv('device_sex_prob_oof.csv')\ndf_age_prob_oof = pd.read_csv('device_age_prob_oof.csv')\ndf_start_close_sex_prob_oof = pd.read_csv('start_close_sex_prob_oof.csv')\n#后面两个，线上线下不对应，线下过拟合了\ndf_start_close_age_prob_oof = pd.read_csv('start_close_age_prob_oof.csv')\n#df_start_close_sex_age_prob_oof = pd.read_csv('start_close_sex_age_prob_oof.csv')\ndf_tfidf_lr_sex_age_prob_oof = pd.read_csv('tfidf_lr_sex_age_prob_oof.csv')\n#之前的有用的\ndf_sex_age_bin_prob_oof = pd.read_csv('sex_age_bin_prob_oof.csv')\n\ndf_age_bin_prob_oof = pd.read_csv('age_bin_prob_oof.csv')\ndf_hcc_device_brand_age_sex = pd.read_csv('hcc_device_brand_age_sex.csv')\ndf_device_age_regression_prob_oof = pd.read_csv('device_age_regression_prob_oof.csv')\ndf_device_start_GRU_pred = pd.read_csv('device_start_GRU_pred.csv')\ndf_device_start_GRU_pred_age = pd.read_csv('device_start_GRU_pred_age.csv')\ndf_device_all_GRU_pred = pd.read_csv('device_all_GRU_pred.csv')\n#df_boost_sex_age_prob_oof = pd.read_csv('boost_sex_age_prob_oof.csv')\ndf_lgb_sex_age_prob_oof = pd.read_csv('lgb_sex_age_prob_oof.csv')\ndf_device_start_capsule_pred = pd.read_csv('device_start_capsule_pred.csv')\ndf_device_start_textcnn_pred = pd.read_csv('device_start_textcnn_pred.csv')\ndf_device_start_text_dpcnn_pred = pd.read_csv('device_start_text_dpcnn_pred.csv')\ndf_device_start_lstm_pred = pd.read_csv('device_start_lstm_pred.csv')\ndf_att_nn_feat_v6 = pd.read_csv('att_nn_feat_v6.csv')\ndf_att_nn_feat_v6.columns = ['device_id'] + ['att_nn_feat_' + str(i) for i in range(22)]\n\n#过拟合特征\ndel df_start_close_age_prob_oof['device_app_groupedstart_close_age_prob_oof_4_MEAN']\ndel df_start_close_sex_prob_oof['device_app_groupedstart_close_sex_prob_oof_MIN']\ndel df_start_close_sex_prob_oof['device_app_groupedstart_close_sex_prob_oof_MAX']\n\n\n# In[3]:\n\n\ndf_train_w2v = all_id.merge(df_sex_prob_oof, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_age_prob_oof, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_start_close_sex_prob_oof, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_start_close_age_prob_oof, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_sex_age_bin_prob_oof, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_age_bin_prob_oof, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_hcc_device_brand_age_sex, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_age_regression_prob_oof, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_start_GRU_pred, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_start_GRU_pred_age, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_all_GRU_pred, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_lgb_sex_age_prob_oof, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_start_capsule_pred, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_start_textcnn_pred, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_start_text_dpcnn_pred, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_start_lstm_pred, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_att_nn_feat_v6, on='device_id', how='left')\n\n\n# In[5]:\n\n\nfeat = df_train_w2v.copy()\n\n\n# In[6]:\n\n\ntrain=pd.merge(train_id,feat,on=\"device_id\",how=\"left\")\ntest=pd.merge(test_id,feat,on=\"device_id\",how=\"left\")\n\n\n# In[8]:\n\n\nfeatures = [x for x in train.columns if x not in ['device_id', 'sex',\"age\",]]\nY = train['sex'] - 1\n\n\n# In[9]:\n\n\nfrom sklearn.model_selection import KFold, StratifiedKFold\ngc.collect()\nseed = 1024\nnum_folds = 5\nfolds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=seed)\n\n\n# In[10]:\n\n\nparams={\n    'booster':'gbtree',\n    'objective': 'binary:logistic',\n#      'is_unbalance':'True',\n# 'scale_pos_weight': 1500.0/13458.0,\n        'eval_metric': \"logloss\",\n    \n    'gamma':0.2,#0.2 is ok\n    'max_depth':6,\n# 'lambda':20,\n    # \"alpha\":5,\n        'subsample':0.7,\n        'colsample_bytree':0.4 ,\n#         'min_child_weight':2.5, \n        'eta': 0.01,\n    # 'learning_rate':0.01,\n    \"silent\":1,\n    'seed':1024,\n'nthread':5,\n   \n    }\n\nnum_round = 3500\nearly_stopping_rounds = 100\n\n\n# In[11]:\n\n\n#预测性别\naus = []\nsub1 = np.zeros((len(test), ))\npred_oob1=np.zeros((len(train),))\nfor i,(train_index,test_index) in enumerate(folds.split(train[features], Y)):\n  \n    tr_x = train[features].reindex(index=train_index, copy=False)\n    tr_y = Y[train_index]\n    te_x = train[features].reindex(index=test_index, copy=False)\n    te_y = Y[test_index]\n\n    d_tr = xgb.DMatrix(tr_x, label=tr_y)\n    d_te = xgb.DMatrix(te_x, label=te_y)\n    watchlist  = [(d_tr,'train'),\n    (d_te,'val')\n             ]\n    model = xgb.train(params, d_tr, num_boost_round=530, \n                      evals=watchlist,verbose_eval=100)\n    pred = model.predict(d_te)\n    pred_oob1[test_index] =pred\n    # te_y=te_y.apply(lambda x:1 if x>0 else 0)\n    a = log_loss(te_y, pred)\n\n\n    print (\"idx: \", i) \n    print (\" loss: %.5f\" % a)\n#     print \" gini: %.5f\" % g\n    aus.append(a)\n\nprint (\"mean\")\nprint (\"auc:       %s\" % (sum(aus) / 5.0))\n\n\n# In[12]:\n\n\n#用全部数据训练一个lgb\n#用全部的train来预测test\nxgb_train = xgb.DMatrix(train[features], label=Y)\nwatchlist  = [(xgb_train,'train')]\n\ngbm = xgb.train(params, xgb_train, num_boost_round=530, evals=watchlist, verbose_eval=100)  \n\nsub1 = gbm.predict(xgb.DMatrix(test[features]))\n\n\n# In[13]:\n\n\npred_oob1 = pd.DataFrame(pred_oob1, columns=['sex2'])\nsub1 = pd.DataFrame(sub1, columns=['sex2'])\nres1=pd.concat([pred_oob1,sub1])\nres1['sex1'] = 1-res1['sex2']\n\n\n# In[15]:\n\n\n# In[50]:\n\n\nfeatures = [x for x in train.columns if x not in ['device_id',\"age\"]]\nY = train['age'] \n\n\n# In[51]:\n\n\nfrom sklearn.metrics import auc, log_loss, roc_auc_score,f1_score,recall_score,precision_score\n\n\n# In[16]:\n\n\nfrom sklearn.model_selection import KFold, StratifiedKFold\ngc.collect()\nseed = 1024\nnum_folds = 5\nfolds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=seed)\n\n\n# In[17]:\n\n\nparams={\n    'booster':'gbtree',\n    'objective': 'multi:softprob',\n    'eval_metric': \"mlogloss\",\n    'num_class':11,\n    'gamma':0.1,#0.2 is ok\n    'max_depth':5,\n    'subsample':0.7,\n    'colsample_bytree':0.4 ,\n    # 'min_child_weight':2.5, \n    'eta': 0.02,\n    # 'learning_rate':0.01,\n    \"silent\":1,\n    'seed':1024,\n    'nthread':5,\n   \n    }\n\n\n# In[19]:\n\n\n#预测性别\naus = []\nsub2 = np.zeros((len(test),11 ))\npred_oob2=np.zeros((len(train),11))\nmodels=[]\niters=[]\nfor i,(train_index,test_index) in enumerate(folds.split(train[features], Y)):\n  \n    tr_x = train[features].reindex(index=train_index, copy=False)\n    tr_y = Y[train_index]\n    te_x = train[features].reindex(index=test_index, copy=False)\n    te_y = Y[test_index]\n\n    d_tr = xgb.DMatrix(tr_x, label=tr_y)\n    d_te = xgb.DMatrix(te_x, label=te_y)\n    watchlist  = [(d_tr,'train'),\n                  (d_te,'val')]\n    model = xgb.train(params, d_tr, num_boost_round=550, \n                      evals=watchlist,verbose_eval=100)\n\n    pred = model.predict(d_te)\n    pred_oob2[test_index] = pred\n    # te_y=te_y.apply(lambda x:1 if x>0 else 0)\n    a = log_loss(te_y, pred)\n\n    #sub2 += gbm.predict(test[features], num_iteration=gbm.best_iteration) / 5\n    \n    print (\"idx: \", i) \n    print (\" loss: %.5f\" % a)\n#     print \" gini: %.5f\" % g\n    aus.append(a)\n\nprint (\"mean\")\nprint (\"auc:       %s\" % (sum(aus) / 5.0))\n\n\n# In[20]:\n\n\n#预测条件概率\n####sex1\ntest['sex']=1\n#用全部数据训练一个lgb\n#用全部的train来预测test\nxgb_train = xgb.DMatrix(train[features], label=Y)\nwatchlist  = [(xgb_train,'train')]\n\ngbm = xgb.train(params, xgb_train, num_boost_round=550, evals=watchlist, verbose_eval=100)   \nsub2 = gbm.predict(xgb.DMatrix(test[features]))\n\nres2_1=np.vstack((pred_oob2,sub2))\nres2_1 = pd.DataFrame(res2_1)\n\n\n# In[21]:\n\n\n###sex2\n#预测条件概率\ntest['sex']=2\n\nsub2 = np.zeros((len(test),11))\nsub2 = gbm.predict(xgb.DMatrix(test[features]))\nres2_2=np.vstack((pred_oob2,sub2))\nres2_2 = pd.DataFrame(res2_2) \n\n\n# In[24]:\n\n\nres1.index=range(len(res1))\nres2_1.index=range(len(res2_1))\nres2_2.index=range(len(res2_2))\nfinal_1=res2_1.copy()\nfinal_2=res2_2.copy()\n\n\n# In[25]:\n\n\nfor i in range(11):\n    final_1[i]=res1['sex1'] * res2_1[i]\n    final_2[i]=res1['sex2'] * res2_2[i]\nid_list = pd.concat([train[['device_id']],test[['device_id']]])\nfinal = id_list\nfinal.index = range(len(final))\nfinal.columns = ['DeviceID']\nfinal_pred = pd.concat([final_1,final_2], 1)\nfinal = pd.concat([final,final_pred],1)\nfinal.columns = ['DeviceID', '1-0', '1-1', '1-2', '1-3', '1-4', '1-5', '1-6', \n         '1-7','1-8', '1-9', '1-10', '2-0', '2-1', '2-2', '2-3', '2-4', \n         '2-5', '2-6', '2-7', '2-8', '2-9', '2-10']\n\n\n# In[27]:\n\n\ntest['DeviceID']=test['device_id']\nsub=pd.merge(test[['DeviceID']],final,on=\"DeviceID\",how=\"left\")\nsub.to_csv(\"th_xgb_nb.csv\",index=False)\n\n"
  },
  {
    "path": "THLUO/28.final.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\n\n\n# In[2]:\n\n\nth_22_results_lgb = pd.read_csv('th_22_results_lgb.csv')\nth_22_results_xgb = pd.read_csv('th_22_results_xgb.csv')\nth_lgb_nb = pd.read_csv('th_lgb_nb.csv')\nth_xgb_nb = pd.read_csv('th_xgb_nb.csv')\n\n\n# In[5]:\n\n\n#直接22分类 lgb与xgb进行55 45加权融合\nresults_22 = pd.DataFrame(th_22_results_lgb.values[:,1:] * 0.55 + th_22_results_xgb.values[:,1:] * 0.45)\nresults_22.columns = th_22_results_lgb.columns[1:]\nresults_22['DeviceID'] = th_22_results_lgb['DeviceID']\n\n\n# In[6]:\n\n\n#条件概率分类, xgb与lgb进行65 35加权融合\nresults_nb = pd.DataFrame(th_xgb_nb.values[:,1:] * 0.65 + th_lgb_nb.values[:,1:] * 0.35)\nresults_nb.columns = th_xgb_nb.columns[1:]\nresults_nb['DeviceID'] = th_xgb_nb['DeviceID']\n\n\n# In[ ]:\n\n\n#两份结果继续进行加权融合\nresults_final = pd.DataFrame(results_22.values[:,1:] * 0.65 + results_nb.values[:,1:] * 0.35)\nresults_final.columns = results_22.columns[1:]\nresults_final['DeviceID'] = results_22['DeviceID']\n\n\n# In[ ]:\n\n\nresults_final.to_csv('result/thluo_final.csv', index=None)\n\n"
  },
  {
    "path": "THLUO/3.device_quchong_start_app_w2c.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nfrom datetime import datetime,timedelta  \nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.preprocessing import LabelEncoder\nimport gc\nfrom gensim.test.utils import common_texts, get_tmpfile\nfrom gensim.models import Word2Vec\n\n\n# In[2]:\n\nprint ('8.device_quchong_start_app_w2c.py')\npath='input/'\ndata=pd.DataFrame()\n#sex_age=pd.read_excel('./data/性别年龄对照表.xlsx')\n\n\n# In[3]:\n\n\ndeviceid_packages=pd.read_csv(path+'deviceid_packages.tsv',sep='\\t',names=['device_id','apps'])\ndeviceid_test=pd.read_csv(path+'deviceid_test.tsv',sep='\\t',names=['device_id'])\ndeviceid_train=pd.read_csv(path+'deviceid_train.tsv',sep='\\t',names=['device_id','sex','age'])\ndeviceid_brand = pd.read_csv(path+'deviceid_brand.tsv',sep='\\t', names=['device_id','device_brand', 'device_type'])\ndeviceid_package_start_close = pd.read_csv(path+'deviceid_package_start_close.tsv',sep='\\t', names=['device_id','app_id','start_time','close_time'])\npackage_label = pd.read_csv(path+'package_label.tsv',sep='\\t',names=['app_id','app_parent_type', 'app_child_type'])\n\n\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : str(x).split(' ')[0])\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 1].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other' if x in one_time_brand else x)\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 2].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other_2' if x in one_time_brand else x)\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 3].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other_3' if x in one_time_brand else x)\n\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_brand.values))\ndeviceid_brand['device_brand'] = lbl.transform(list(deviceid_brand.device_brand.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_type.values))\ndeviceid_brand['device_type'] = lbl.transform(list(deviceid_brand.device_type.values))\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_parent_type.values))\npackage_label['app_parent_type'] = lbl.transform(list(package_label.app_parent_type.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_child_type.values))\npackage_label['app_child_type'] = lbl.transform(list(package_label.app_child_type.values))\n\n\n# In[4]:\n\n\nimport time\n\n# 输入毫秒级的时间，转出正常格式的时间\ndef timeStamp(timeNum):\n    timeStamp = float(timeNum/1000)\n    timeArray = time.localtime(timeStamp)\n    otherStyleTime = time.strftime(\"%Y-%m-%d %H:%M:%S\", timeArray)\n    return otherStyleTime\n\n#解析出具体的时间\ndeviceid_package_start_close['start_date'] = pd.to_datetime(deviceid_package_start_close.start_time.apply(timeStamp))\ndeviceid_package_start_close['end_date'] = pd.to_datetime(deviceid_package_start_close.close_time.apply(timeStamp))\ndeviceid_package_start_close['start_hour'] = deviceid_package_start_close.start_date.dt.hour\ndeviceid_package_start_close['end_hour'] = deviceid_package_start_close.end_date.dt.hour\ndeviceid_package_start_close['time_gap'] = (deviceid_package_start_close['end_date'] - deviceid_package_start_close['start_date']).astype('timedelta64[s]')\n\ndeviceid_package_start_close = deviceid_package_start_close.merge(package_label, on='app_id', how='left')\ndeviceid_package_start_close.app_parent_type.fillna(-1, inplace=True)\ndeviceid_package_start_close.app_child_type.fillna(-1, inplace=True)\ndeviceid_package_start_close['start_year'] = deviceid_package_start_close.start_date.dt.year\ndeviceid_package_start_close['end_year'] = deviceid_package_start_close.end_date.dt.year\ndeviceid_package_start_close['year_gap'] = deviceid_package_start_close['end_year'] - deviceid_package_start_close['start_year']\n\n\n# In[9]:\n\n\ndf_temp = deviceid_package_start_close.groupby(['device_id', 'app_id'])['start_hour'].mean().reset_index()\ndf_temp\n\n\n# In[10]:\n\n\ndf_sorted = df_temp.sort_values(by='start_hour')\n\n\n# In[13]:\n\n\ndf_device_start_app_list = df_sorted.groupby('device_id').apply(lambda x : list(x.app_id)).reset_index().rename(columns = {0 : 'app_list'})\n\n\n# In[17]:\n\n\napp_list = list(df_device_start_app_list.app_list.values)\n\n\n# In[35]:\n\n\nmodel = Word2Vec(app_list, size=10, window=4, min_count=2, workers=4)\nmodel.save(\"word2vec.model\")\n\n\n# In[37]:\n\n\nvocab = list(model.wv.vocab.keys())\n\nw2c_arr = []\n\nfor v in vocab :\n    w2c_arr.append(list(model.wv[v]))\n\n\n# In[38]:\n\n\ndf_w2c_start = pd.DataFrame()\ndf_w2c_start['app_id'] = vocab\ndf_w2c_start = pd.concat([df_w2c_start, pd.DataFrame(w2c_arr)], axis=1)\ndf_w2c_start.columns = ['app_id'] + ['w2c_start_app_' + str(i) for i in range(10)]\n\n\n# In[47]:\n\n\ndf_sorted = df_sorted.merge(df_w2c_start, on='app_id', how='left')\ndf_sorted\n\n\n# In[48]:\n\n\nw2c_nums = 10\nagg = {}\nfor l in ['w2c_start_app_' + str(i) for i in range(w2c_nums)] :\n    agg[l] = ['mean', 'std', 'max', 'min']\n\n\n# In[50]:\n\n\ndf_agg = df_sorted.groupby('device_id').agg(agg)\ndf_agg.columns = pd.Index(['device_quchong' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\ndf_agg = df_agg.reset_index()\n\n\n# In[52]:\n\n\ndf_agg.to_csv('device_quchong_start_app_w2c.csv', index=None)\n\n"
  },
  {
    "path": "THLUO/3.w2c_all_emb.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nfrom datetime import datetime,timedelta  \nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.preprocessing import LabelEncoder\nimport gc\n\n\n\n# In[2]:\n\n\npath='input/'\ndata=pd.DataFrame()\n#sex_age=pd.read_excel('./data/性别年龄对照表.xlsx')\n\n\n# In[3]:\n\n\ndeviceid_packages=pd.read_csv(path+'deviceid_packages.tsv',sep='\\t',names=['device_id','apps'])\ndeviceid_test=pd.read_csv(path+'deviceid_test.tsv',sep='\\t',names=['device_id'])\ndeviceid_train=pd.read_csv(path+'deviceid_train.tsv',sep='\\t',names=['device_id','sex','age'])\ndeviceid_brand = pd.read_csv(path+'deviceid_brand.tsv',sep='\\t', names=['device_id','device_brand', 'device_type'])\ndeviceid_package_start_close = pd.read_csv(path+'deviceid_package_start_close.tsv',sep='\\t', names=['device_id','app_id','start_time','close_time'])\npackage_label = pd.read_csv(path+'package_label.tsv',sep='\\t',names=['app_id','app_parent_type', 'app_child_type'])\n\n\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : str(x).split(' ')[0])\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 1].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other' if x in one_time_brand else x)\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 2].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other_2' if x in one_time_brand else x)\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 3].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other_3' if x in one_time_brand else x)\n\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_brand.values))\ndeviceid_brand['device_brand'] = lbl.transform(list(deviceid_brand.device_brand.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_type.values))\ndeviceid_brand['device_type'] = lbl.transform(list(deviceid_brand.device_type.values))\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_parent_type.values))\npackage_label['app_parent_type'] = lbl.transform(list(package_label.app_parent_type.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_child_type.values))\npackage_label['app_child_type'] = lbl.transform(list(package_label.app_child_type.values))\n\n\n# In[4]:\n\n\ndeviceid_package_start = deviceid_package_start_close[['device_id', 'app_id', 'start_time']]\ndeviceid_package_start.columns = ['device_id', 'app_id', 'all_time']\ndeviceid_package_close = deviceid_package_start_close[['device_id', 'app_id', 'close_time']]\ndeviceid_package_close.columns = ['device_id', 'app_id', 'all_time']\ndeviceid_package_all = pd.concat([deviceid_package_start, deviceid_package_close])\n\n\n# In[6]:\n\n\ndf_sorted = deviceid_package_all.sort_values(by='all_time')\n\n\n# In[8]:\n\n\ndf_device_start_app_list = df_sorted.groupby('device_id').apply(lambda x : list(x.app_id)).reset_index().rename(columns = {0 : 'app_list'})\ndf_device_start_app_list\n\n\n# In[9]:\n\n\napp_list = list(df_device_start_app_list.app_list.values)\n\n\n# In[10]:\n\n\nfrom gensim.test.utils import common_texts, get_tmpfile\nfrom gensim.models import Word2Vec\n\n\n# In[11]:\n\n\nword_dim = 200\nmodel = Word2Vec(app_list, size=word_dim, window=20, min_count=2, workers=4)\nmodel.save(\"word2vec.model\")\n\n\n# In[13]:\n\n\nvocab = list(model.wv.vocab.keys())\n\nw2c_arr = []\n\nfor v in vocab :\n    w2c_arr.append(list(model.wv[v]))\n\n\n# In[14]:\n\n\ndf_w2c_start = pd.DataFrame()\ndf_w2c_start['app_id'] = vocab\ndf_w2c_start = pd.concat([df_w2c_start, pd.DataFrame(w2c_arr)], axis=1)\ndf_w2c_start.columns = ['app_id'] + ['w2c_all_app_' + str(i) for i in range(word_dim)]\n\n\n# In[16]:\n\n\ndf_w2c_start.to_csv('w2c_all_emb.csv', index=None)\n\n"
  },
  {
    "path": "THLUO/3.w2c_model_all.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nfrom datetime import datetime,timedelta  \nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.preprocessing import LabelEncoder\nfrom gensim.test.utils import common_texts, get_tmpfile\nfrom gensim.models import Word2Vec\nimport gc\n\n\n\n# In[2]:\nprint ('3.w2c_model_all.py')\n\npath='input/'\ndata=pd.DataFrame()\n#sex_age=pd.read_excel('./data/性别年龄对照表.xlsx')\n\n\n# In[3]:\n\n\ndeviceid_packages=pd.read_csv(path+'deviceid_packages.tsv',sep='\\t',names=['device_id','apps'])\ndeviceid_test=pd.read_csv(path+'deviceid_test.tsv',sep='\\t',names=['device_id'])\ndeviceid_train=pd.read_csv(path+'deviceid_train.tsv',sep='\\t',names=['device_id','sex','age'])\ndeviceid_brand = pd.read_csv(path+'deviceid_brand.tsv',sep='\\t', names=['device_id','device_brand', 'device_type'])\ndeviceid_package_start_close = pd.read_csv(path+'deviceid_package_start_close.tsv',sep='\\t', names=['device_id','app_id','start_time','close_time'])\npackage_label = pd.read_csv(path+'package_label.tsv',sep='\\t',names=['app_id','app_parent_type', 'app_child_type'])\n\n\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : str(x).split(' ')[0])\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 1].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other' if x in one_time_brand else x)\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 2].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other_2' if x in one_time_brand else x)\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 3].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other_3' if x in one_time_brand else x)\n\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_brand.values))\ndeviceid_brand['device_brand'] = lbl.transform(list(deviceid_brand.device_brand.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_type.values))\ndeviceid_brand['device_type'] = lbl.transform(list(deviceid_brand.device_type.values))\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_parent_type.values))\npackage_label['app_parent_type'] = lbl.transform(list(package_label.app_parent_type.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_child_type.values))\npackage_label['app_child_type'] = lbl.transform(list(package_label.app_child_type.values))\n\n\n# In[4]:\n\n\ndeviceid_package_start = deviceid_package_start_close[['device_id', 'app_id', 'start_time']]\ndeviceid_package_start.columns = ['device_id', 'app_id', 'all_time']\ndeviceid_package_close = deviceid_package_start_close[['device_id', 'app_id', 'close_time']]\ndeviceid_package_close.columns = ['device_id', 'app_id', 'all_time']\ndeviceid_package_all = pd.concat([deviceid_package_start, deviceid_package_close])\n\n\n# In[5]:\n\n\ndf_sorted = deviceid_package_all.sort_values(by='all_time')\n\n\n# In[7]:\n\n\ndf_results = df_sorted.groupby('device_id')['app_id'].apply(lambda x:' '.join(x)).reset_index().rename(columns = {'app_id' : 'app_list'})\ndf_results.to_csv('03.device_click_app_sorted_by_all.csv', index=None)\ndel df_results\n\n\n# In[8]:\n\n\ndf_device_start_app_list = df_sorted.groupby('device_id').apply(lambda x : list(x.app_id)).reset_index().rename(columns = {0 : 'app_list'})\n\n\n# In[9]:\n\n\napp_list = list(df_device_start_app_list.app_list.values)\n\n\n# In[11]:\n\n\nmodel = Word2Vec(app_list, size=10, window=50, min_count=2, workers=4)\nmodel.save(\"word2vec.model\")\n\n\n# In[12]:\n\n\nvocab = list(model.wv.vocab.keys())\n\nw2c_arr = []\n\nfor v in vocab :\n    w2c_arr.append(list(model.wv[v]))\n\n\n# In[13]:\n\n\ndf_w2c_start = pd.DataFrame()\ndf_w2c_start['app_id'] = vocab\ndf_w2c_start = pd.concat([df_w2c_start, pd.DataFrame(w2c_arr)], axis=1)\ndf_w2c_start.columns = ['app_id'] + ['w2c_all_app_' + str(i) for i in range(10)]\n\n\n# In[14]:\n\n\nw2c_nums = 10\nagg = {}\nfor l in ['w2c_all_app_' + str(i) for i in range(w2c_nums)] :\n    agg[l] = ['mean', 'std', 'max', 'min']\n\n\n# In[15]:\n\n\ndeviceid_package_start_close = deviceid_package_start_close.merge(df_w2c_start, on='app_id', how='left')\n\n\n# In[16]:\n\n\ndf_agg = deviceid_package_start_close.groupby('device_id').agg(agg)\ndf_agg.columns = pd.Index(['device_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\ndf_agg = df_agg.reset_index()\ndf_agg.to_csv('device_all_app_w2c.csv', index=None)\n\n\n# In[18]:\n\n\ndf_results = deviceid_package_start_close.groupby(['device_id', 'app_id'])['start_time'].mean().reset_index()\ndf_results = df_results.merge(df_w2c_start, on='app_id', how='left')\n\n\n# In[22]:\n\n\ndf_agg = df_results.groupby('device_id').agg(agg)\ndf_agg.columns = pd.Index(['device_app_unique' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\ndf_agg = df_agg.reset_index()\n\n\n# In[20]:\n\n\ndf_agg.to_csv('device_app_unique_all_app_w2c.csv', index=None)\n\n"
  },
  {
    "path": "THLUO/4.device_age_prob_oof.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nfrom datetime import datetime,timedelta  \nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.preprocessing import LabelEncoder\nimport gc\n\n\n\n\n\n# In[2]:\n\nprint ('4.device_age_prob_oof.py')\npath='input/'\ndata=pd.DataFrame()\n#sex_age=pd.read_excel('./data/性别年龄对照表.xlsx')\n\n\n# In[3]:\n\n\ndeviceid_packages=pd.read_csv(path+'deviceid_packages.tsv',sep='\\t',names=['device_id','apps'])\ndeviceid_test=pd.read_csv(path+'deviceid_test.tsv',sep='\\t',names=['device_id'])\ndeviceid_train=pd.read_csv(path+'deviceid_train.tsv',sep='\\t',names=['device_id','sex','age'])\ndeviceid_brand = pd.read_csv(path+'deviceid_brand.tsv',sep='\\t', names=['device_id','device_brand', 'device_type'])\ndeviceid_package_start_close = pd.read_csv(path+'deviceid_package_start_close.tsv',sep='\\t', names=['device_id','app_id','start_time','close_time'])\npackage_label = pd.read_csv(path+'package_label.tsv',sep='\\t',names=['app_id','app_parent_type', 'app_child_type'])\n\n\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : str(x).split(' ')[0])\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 1].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other' if x in one_time_brand else x)\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 2].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other_2' if x in one_time_brand else x)\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 3].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other_3' if x in one_time_brand else x)\n\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_brand.values))\ndeviceid_brand['device_brand'] = lbl.transform(list(deviceid_brand.device_brand.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_type.values))\ndeviceid_brand['device_type'] = lbl.transform(list(deviceid_brand.device_type.values))\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_parent_type.values))\npackage_label['app_parent_type'] = lbl.transform(list(package_label.app_parent_type.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_child_type.values))\npackage_label['app_child_type'] = lbl.transform(list(package_label.app_child_type.values))\n\n\n# In[4]:\n\n\nimport time\n\n# 输入毫秒级的时间，转出正常格式的时间\ndef timeStamp(timeNum):\n    timeStamp = float(timeNum/1000)\n    timeArray = time.localtime(timeStamp)\n    otherStyleTime = time.strftime(\"%Y-%m-%d %H:%M:%S\", timeArray)\n    return otherStyleTime\n\n#解析出具体的时间\ndeviceid_package_start_close['start_date'] = pd.to_datetime(deviceid_package_start_close.start_time.apply(timeStamp))\ndeviceid_package_start_close['end_date'] = pd.to_datetime(deviceid_package_start_close.close_time.apply(timeStamp))\ndeviceid_package_start_close['start_hour'] = deviceid_package_start_close.start_date.dt.hour\ndeviceid_package_start_close['end_hour'] = deviceid_package_start_close.end_date.dt.hour\ndeviceid_package_start_close['time_gap'] = (deviceid_package_start_close['end_date'] - deviceid_package_start_close['start_date']).astype('timedelta64[s]')\n\ndeviceid_package_start_close = deviceid_package_start_close.merge(package_label, on='app_id', how='left')\ndeviceid_package_start_close.app_parent_type.fillna(-1, inplace=True)\ndeviceid_package_start_close.app_child_type.fillna(-1, inplace=True)\ndeviceid_package_start_close['start_year'] = deviceid_package_start_close.start_date.dt.year\ndeviceid_package_start_close['end_year'] = deviceid_package_start_close.end_date.dt.year\ndeviceid_package_start_close['year_gap'] = deviceid_package_start_close['end_year'] - deviceid_package_start_close['start_year']\n\n\n# In[10]:\n\n\n#这里留一个伏笔，有些日期属于异常,去除掉那些开启app和关闭app不是同年的数据\n#df_temp = deviceid_package_start_close[deviceid_package_start_close.start_year != 2017]\n#df_temp['year_gap'] = df_temp['end_year'] - df_temp['start_year']\n\n\n# In[5]:\n\n\ndeviceid_train=pd.concat([deviceid_train,deviceid_test])\n\n\n# In[6]:\n\n\ndeviceid_packages['apps']=deviceid_packages['apps'].apply(lambda x:x.split(','))\ndeviceid_packages['app_lenghth']=deviceid_packages['apps'].apply(lambda x:len(x))\n\n\n# In[7]:\n#特征工程\ndef open_app_timegap_in_hour() :\n    df_temp = deviceid_package_start_close.groupby(['device_id', 'start_hour'])['time_gap'].mean().reset_index().rename(columns = {'time_gap': 'mean_time_gap'})\n    df_mean_temp = pd.pivot_table(df_temp, index='device_id', columns='start_hour', values='mean_time_gap').reset_index()\n    df_mean_temp.columns = ['device_id'] + ['open_app_timegap_in_'+str(i) + '_mean_hour' for i in range(0,24)]\n    df_mean_temp.fillna(0, inplace=True)\n\n\n    \n    return df_mean_temp\n\n\n# In[8]:\n\n\ndef device_start_end_app_timegap() :\n    #用户打开，关闭app的时间间隔\n    df_ = deviceid_package_start_close.sort_values(by=['device_id', 'start_date'], ascending=False)\n    df_['prev_start_date'] = df_.groupby('device_id')['start_date'].shift(-1)\n    df_['start_date_gap'] = (df_['start_date'] - df_['prev_start_date']).astype('timedelta64[s]')\n    agg_dic = {'start_date_gap' : ['min', 'max', 'mean', 'median', 'std']}\n    df_start_gap_agg = df_.groupby('device_id').agg(agg_dic)\n    df_start_gap_agg.columns = pd.Index(['device_' + e[0] + \"_\" + e[1].upper() for e in df_start_gap_agg.columns.tolist()])\n    df_start_gap_agg = df_start_gap_agg.reset_index()\n    #del df_\n    gc.collect()\n    #关闭时间间隔\n    df_ = deviceid_package_start_close.sort_values(by=['device_id', 'end_date'], ascending=False)\n    df_['prev_end_date'] = df_.groupby('device_id')['end_date'].shift(-1)\n    df_['end_date_gap'] = (df_['end_date'] - df_['prev_end_date']).astype('timedelta64[s]')\n    agg_dic = {'end_date_gap' : ['min', 'max', 'mean', 'median', 'std']}\n    df_end_gap_agg = df_.groupby('device_id').agg(agg_dic)\n    df_end_gap_agg.columns = pd.Index(['device_' + e[0] + \"_\" + e[1].upper() for e in df_end_gap_agg.columns.tolist()])\n    df_end_gap_agg = df_end_gap_agg.reset_index()\n    #del df_\n    gc.collect()\n\n\n\n    df_agg = df_start_gap_agg.merge(df_end_gap_agg, on='device_id', how='left')\n    #df_agg = df_agg.merge(df_app_start_gap_agg, on='device_id', how='left')\n    #df_agg = df_agg.merge(df_app_end_gap_agg, on='device_id', how='left')\n    return df_agg\n\ndef open_app_counts_in_hour() :\n    df_temp = deviceid_package_start_close.groupby(['device_id', 'start_hour'])['app_id'].count().reset_index().rename(columns = {'app_id': 'app_counts'})\n    df_temp = pd.pivot_table(df_temp, index='device_id', columns='start_hour', values='app_counts').reset_index()\n    df_temp.columns = ['device_id'] + ['open_app_counts_in'+str(i) + '_hour' for i in range(0,24)]\n    df_temp.fillna(0, inplace=True)\n    return df_temp\n\ndef close_app_counts_in_hour() :\n    df_temp = deviceid_package_start_close.groupby(['device_id', 'end_hour'])['app_id'].count().reset_index().rename(columns = {'app_id': 'app_counts'})\n    df_temp = pd.pivot_table(df_temp, index='device_id', columns='end_hour', values='app_counts').reset_index()\n    df_temp.columns = ['device_id'] + ['close_app_counts_in'+str(i) + '_hour' for i in range(0,24)]\n    df_temp.fillna(0, inplace=True)\n    return df_temp\n\ndef app_type_mean_time_gap_one_hot () :\n    df_temp = deviceid_package_start_close.groupby(['device_id', 'app_parent_type'])['time_gap'].mean().reset_index()\n    df_temp = pd.pivot_table(df_temp, index='device_id', columns='app_parent_type', values='time_gap').reset_index()\n    df_temp.columns = ['device_id'] + ['app_parent_type_mean_time_gap'+str(i) for i in range(-1,45)]\n    df_temp.fillna(-1, inplace=True)\n    return df_temp  \n\ndef device_active_hour() :\n    aggregations = {\n        'start_hour' : ['std','mean','max','min'],\n        'end_hour' : ['std','mean','max','min']\n    }\n    df_agg = deviceid_package_start_close.groupby('device_id').agg(aggregations)\n    df_agg.columns = pd.Index(['device_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\n    df_agg = df_agg.reset_index()   \n    \n    return df_agg\n\n\ndef device_brand_encoding() :\n    df_temp = deviceid_brand.merge(deviceid_train[['device_id', 'age', 'sex']], on='device_id', how='left')\n\n    aggregations = {\n        'age' : ['std','mean'],\n        'sex' : ['mean'],\n    }\n\n    df_device_brand = df_temp.groupby('device_brand').agg(aggregations)\n    df_device_brand.columns = pd.Index(['device_brand_' + e[0] + \"_\" + e[1].upper() for e in df_device_brand.columns.tolist()])\n    df_device_brand = df_device_brand.reset_index()\n\n    df_device_type = df_temp.groupby('device_type').agg(aggregations)\n    df_device_type.columns = pd.Index(['device_type_' + e[0] + \"_\" + e[1].upper() for e in df_device_type.columns.tolist()])\n    df_device_type = df_device_type.reset_index()\n\n    df_temp = df_temp.merge(df_device_brand, on='device_brand', how='left')\n    df_temp = df_temp.merge(df_device_type, on='device_type', how='left')\n\n    aggregations = {\n        'device_brand_age_STD' : ['mean'],\n        'device_brand_age_MEAN' : ['mean'],\n        'device_brand_sex_MEAN' : ['mean'],\n        #'device_type_age_STD' : ['mean'],\n        #'device_type_age_MEAN' : ['mean'],\n        #'device_type_sex_MEAN' : ['mean']\n    }\n\n    df_agg = df_temp.groupby('device_id').agg(aggregations)\n    df_agg.columns = pd.Index(['device_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\n    df_agg = df_agg.reset_index()\n    return df_agg\n\n\n#统计device运行app的情况\ndef device_active_time_time_stat() :\n    #device开启app的时间统计信息\n    deviceid_package_start_close['active_time'] = deviceid_package_start_close['close_time'] - deviceid_package_start_close['start_time']\n\n    #device开启了多少次app\n    #device开启了多少个app\n    aggregations = {\n        'app_id' : ['count', 'nunique'],\n        'active_time' : ['mean', 'std', 'max', 'min'],\n    }\n    df_agg = deviceid_package_start_close.groupby('device_id').agg(aggregations)\n    df_agg.columns = pd.Index(['device_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\n    df_agg = df_agg.reset_index()\n\n    aggregations = {\n        'active_time' : ['mean', 'std', 'max', 'min', 'count'],\n    }\n    df_da_agg = deviceid_package_start_close.groupby(['device_id', 'app_id']).agg(aggregations)\n    df_da_agg.columns = pd.Index(['device_app_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_da_agg.columns.tolist()])\n    df_da_agg = df_da_agg.reset_index()\n\n    #device开启app的平均时间\n    aggregations = {\n        'device_app_grouped_active_time_MEAN' : ['mean', 'std', 'max', 'min'],\n        'device_app_grouped_active_time_STD' : ['mean', 'std', 'max', 'min'],\n        'device_app_grouped_active_time_MAX' : ['mean', 'std', 'max', 'min'],\n        'device_app_grouped_active_time_MIN' : ['mean', 'std', 'max', 'min'],\n        'device_app_grouped_active_time_COUNT' : ['mean', 'std', 'max', 'min'],\n    }\n    df_temp = df_da_agg.groupby(['device_id']).agg(aggregations)\n    df_temp.columns = pd.Index([e[0] + \"_\" + e[1].upper() for e in df_temp.columns.tolist()])\n    df_temp = df_temp.reset_index()\n\n    df_agg = df_agg.merge(df_temp, on='device_id', how='left')\n    return df_agg\n\n\ndef app_type_encoding() :\n    df_temp = df_device_app_pair.merge(deviceid_train[['device_id', 'age', 'sex']], on='device_id', how='left')\n\n    aggregations = {\n        'age' : ['std','mean'],\n        'sex' : ['mean'],\n    }\n\n    df_agg_app_parent_type = df_temp.groupby('app_parent_type').agg(aggregations)\n    df_agg_app_parent_type.columns = pd.Index(['app_parent_type_' + e[0] + \"_\" + e[1].upper() for e in df_agg_app_parent_type.columns.tolist()])\n    df_agg_app_parent_type = df_agg_app_parent_type.reset_index()\n\n    df_agg_app_child_type = df_temp.groupby('app_child_type').agg(aggregations)\n    df_agg_app_child_type.columns = pd.Index(['app_child_type_' + e[0] + \"_\" + e[1].upper() for e in df_agg_app_child_type.columns.tolist()])\n    df_agg_app_child_type = df_agg_app_child_type.reset_index()\n\n    df_temp = df_temp.merge(df_agg_app_parent_type, on='app_parent_type', how='left')\n    df_temp = df_temp.merge(df_agg_app_child_type, on='app_child_type', how='left')\n\n    aggregations = {\n        'app_parent_type_age_STD' : ['mean'],\n        'app_parent_type_age_MEAN' : ['mean'],\n        'app_parent_type_sex_MEAN' : ['mean'],\n        'app_child_type_age_STD' : ['mean'],\n        'app_child_type_age_MEAN' : ['mean'],\n        'app_child_type_sex_MEAN' : ['mean']\n    }\n    \n    df_agg = df_temp.groupby('device_id').agg(aggregations)\n    df_agg.columns = pd.Index(['device_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\n    df_agg = df_agg.reset_index()\n    return df_agg\n\n#每个device对应的app_parent_type计数\ndef app_type_onehot_in_device(df) :\n    df_copy = df.fillna(-1)\n    df_temp = df_copy.groupby(['device_id', 'app_parent_type'])['app_id'].size().reset_index()\n    df_temp.rename(columns = {'app_id' : 'app_parent_type_counts'}, inplace=True)\n    df_temp = pd.pivot_table(df_temp, index='device_id', columns='app_parent_type', values='app_parent_type_counts').reset_index()\n    df_temp.columns = ['device_id'] + ['app_parent_type'+str(i) for i in range(-1,45)]\n    df_temp.fillna(0, inplace=True)\n    return df_temp\n\n\n\n# In[9]:\n\n\napps=deviceid_packages['apps'].apply(lambda x:' '.join(x)).tolist()\nvectorizer=CountVectorizer()\ntransformer=TfidfTransformer()\ncntTf = vectorizer.fit_transform(apps)\ntfidf=transformer.fit_transform(cntTf)\nword=vectorizer.get_feature_names()\nweight=tfidf.toarray()\ndf_weight=pd.DataFrame(weight)\nfeature=df_weight.columns\ndf_weight['sum']=0\nfor f in tqdm(feature):\n    df_weight['sum']+=df_weight[f]\ndeviceid_packages['tfidf_sum']=df_weight['sum']\n\n\n# In[10]:\n\n\nlda = LatentDirichletAllocation(n_topics=5,\n                                learning_offset=50.,\n                                random_state=666)\ndocres = lda.fit_transform(cntTf)\n\n\n# In[11]:\n\n\ndeviceid_packages = pd.concat([deviceid_packages,pd.DataFrame(docres)],axis=1)\n\n\n# In[12]:\n\n\ntemp=deviceid_packages.drop('apps',axis=1)\ndeviceid_train=pd.merge(deviceid_train,temp,on='device_id',how='left')\n\n\n# In[13]:\n\n\n#解析出所有的device_app_pair\ndevice_id_arr = []\napp_arr = []\ndf_device_app_pair = pd.DataFrame()\nfor row in deviceid_packages.values :\n    device_id = row[0]\n    app_list = row[1]\n    for app in app_list :\n        device_id_arr.append(device_id)\n        app_arr.append(app)\n#生成pair        \ndf_device_app_pair['device_id'] = device_id_arr\ndf_device_app_pair['app_id'] = app_arr    \n\ndf_device_app_pair = df_device_app_pair.merge(package_label, how='left', on='app_id')\n\n\n# In[15]:\n\n\n#提取特征\ndf_train = deviceid_train.merge(device_active_time_time_stat(), on='device_id', how='left')\ndf_train = df_train.merge(deviceid_brand, on='device_id', how='left')\ndf_train = df_train.merge(app_type_onehot_in_device(df_device_app_pair), on='device_id', how='left')\ndf_train = df_train.merge(app_type_encoding(), on='device_id', how='left')\ndf_train = df_train.merge(device_active_hour(), on='device_id', how='left')\ndf_train = df_train.merge(app_type_mean_time_gap_one_hot(), on='device_id', how='left')\ndf_train = df_train.merge(open_app_counts_in_hour(), on='device_id', how='left')\ndf_train = df_train.merge(close_app_counts_in_hour(), on='device_id', how='left')\ndf_train = df_train.merge(device_brand_encoding(), on='device_id', how='left')\ndf_train = df_train.merge(device_start_end_app_timegap(), on='device_id', how='left')\ndf_train = df_train.merge(open_app_timegap_in_hour(), on='device_id', how='left')\n\n\n# In[16]:\n\n#W2C文件\ndf_w2c_start = pd.read_csv('device_start_app_w2c.csv')\ndf_w2c_close = pd.read_csv('device_close_app_w2c.csv')\ndf_w2c_all = pd.read_csv('device_all_app_w2c.csv')\ndf_device_quchong_start_app_w2c = pd.read_csv('device_quchong_start_app_w2c.csv')\ndf_device_app_unique_start_app_w2c = pd.read_csv('device_app_unique_start_app_w2c.csv')\ndf_device_app_unique_close_app_w2c = pd.read_csv('device_app_unique_close_app_w2c.csv')\ndf_device_app_unique_all_app_w2c = pd.read_csv('device_app_unique_all_app_w2c.csv')\n\ndf_train_w2v = df_train.merge(df_w2c_start, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_w2c_close, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_w2c_all, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_quchong_start_app_w2c, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_app_unique_start_app_w2c, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_app_unique_close_app_w2c, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_app_unique_all_app_w2c, on='device_id', how='left')\n\n\n# In[23]:\n\n\ntrain = df_train_w2v[df_train_w2v['age'].notnull()]\ntest = df_train_w2v[df_train_w2v['age'].isnull()]\n\n\n# In[24]:\n\n\nX = train.drop(['sex', 'age', 'device_id'],axis=1)\nY = train['age']\nY_CAT = pd.Categorical(Y)\nY = pd.Series(Y_CAT.codes)\n\n\n# In[25]:\n\n\nfrom sklearn.model_selection import KFold, StratifiedKFold\n\nseed = 2018\nnum_folds = 5\nfolds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=seed)\n\nsub_list = []\n\noof_preds = np.zeros([train.shape[0], 11])\n\ncate_feat = ['device_type','device_brand']\n\nparams = {\n    'boosting_type': 'gbdt',\n    'learning_rate' : 0.02,\n    #'max_depth':5,\n    'num_leaves' : 2 ** 5,\n    'metric': {'multi_logloss'},\n    'num_class' : 11,\n    'objective' : 'multiclass',\n    'random_state' : 6666,\n    'bagging_freq' : 5,\n    'feature_fraction' : 0.7,\n    'bagging_fraction' : 0.7,\n    'min_split_gain' : 0.0970905919552776,\n    'min_child_weight' : 9.42012323936088,  \n}\n\nfor n_fold, (train_idx, valid_idx) in enumerate(folds.split(X, Y)):\n    train_x, train_y = X.iloc[train_idx], Y.iloc[train_idx]\n    valid_x, valid_y = X.iloc[valid_idx], Y.iloc[valid_idx] \n    \n    lgb_train=lgb.Dataset(train_x,label=train_y)\n    lgb_eval = lgb.Dataset(valid_x, valid_y, reference=lgb_train)\n    \n    gbm = lgb.train(params, lgb_train, num_boost_round=600, valid_sets=[lgb_train, lgb_eval], verbose_eval=50)  \n    \n    oof_preds[valid_idx] = gbm.predict(valid_x[X.columns.values])\n\n    \noof_train = pd.DataFrame(oof_preds)\noof_train.columns = ['age_prob_oof_' + str(i)  for i in range(11)] \ntrain = pd.concat([train, oof_train], axis=1)\n\n\n# In[27]:\n\n\n#用全部的train来预测test\nlgb_train = lgb.Dataset(X,label=Y)\n\ngbm = lgb.train(params, lgb_train, num_boost_round=600, valid_sets=lgb_train, verbose_eval=50)  \n\ntest = test.reset_index(drop=True)\ntest_preds = gbm.predict(test[X.columns.values])\n\n\n# In[28]:\n\n\noof_test = pd.DataFrame(test_preds)\noof_test.columns = ['age_prob_oof_' + str(i)  for i in range(11)] \ntest = pd.concat([test, oof_test], axis=1)\n\n\n# In[30]:\n\n\ndf_age_prob_oof = pd.concat([train[['device_id'] + ['age_prob_oof_' + str(i)  for i in range(11)] ], \n                             test[['device_id'] + ['age_prob_oof_' + str(i)  for i in range(11)] ]])\ndf_age_prob_oof.to_csv('device_age_prob_oof.csv', index=None)\n\n"
  },
  {
    "path": "THLUO/5.device_sex_prob_oof.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nfrom datetime import datetime,timedelta  \nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.preprocessing import LabelEncoder\nimport gc\n\n\n\n\n# In[2]:\n\nprint ('5.device_sex_prob_oof.py')\npath='input/'\ndata=pd.DataFrame()\n#sex_age=pd.read_excel('./data/性别年龄对照表.xlsx')\n\n\n# In[3]:\n\n\ndeviceid_packages=pd.read_csv(path+'deviceid_packages.tsv',sep='\\t',names=['device_id','apps'])\ndeviceid_test=pd.read_csv(path+'deviceid_test.tsv',sep='\\t',names=['device_id'])\ndeviceid_train=pd.read_csv(path+'deviceid_train.tsv',sep='\\t',names=['device_id','sex','age'])\ndeviceid_brand = pd.read_csv(path+'deviceid_brand.tsv',sep='\\t', names=['device_id','device_brand', 'device_type'])\ndeviceid_package_start_close = pd.read_csv(path+'deviceid_package_start_close.tsv',sep='\\t', names=['device_id','app_id','start_time','close_time'])\npackage_label = pd.read_csv(path+'package_label.tsv',sep='\\t',names=['app_id','app_parent_type', 'app_child_type'])\n\n\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : str(x).split(' ')[0])\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 1].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other' if x in one_time_brand else x)\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 2].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other_2' if x in one_time_brand else x)\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 3].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other_3' if x in one_time_brand else x)\n\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_brand.values))\ndeviceid_brand['device_brand'] = lbl.transform(list(deviceid_brand.device_brand.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_type.values))\ndeviceid_brand['device_type'] = lbl.transform(list(deviceid_brand.device_type.values))\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_parent_type.values))\npackage_label['app_parent_type'] = lbl.transform(list(package_label.app_parent_type.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_child_type.values))\npackage_label['app_child_type'] = lbl.transform(list(package_label.app_child_type.values))\n\n\n# In[4]:\n\n\nimport time\n\n# 输入毫秒级的时间，转出正常格式的时间\ndef timeStamp(timeNum):\n    timeStamp = float(timeNum/1000)\n    timeArray = time.localtime(timeStamp)\n    otherStyleTime = time.strftime(\"%Y-%m-%d %H:%M:%S\", timeArray)\n    return otherStyleTime\n\n#解析出具体的时间\ndeviceid_package_start_close['start_date'] = pd.to_datetime(deviceid_package_start_close.start_time.apply(timeStamp))\ndeviceid_package_start_close['end_date'] = pd.to_datetime(deviceid_package_start_close.close_time.apply(timeStamp))\ndeviceid_package_start_close['start_hour'] = deviceid_package_start_close.start_date.dt.hour\ndeviceid_package_start_close['end_hour'] = deviceid_package_start_close.end_date.dt.hour\ndeviceid_package_start_close['time_gap'] = (deviceid_package_start_close['end_date'] - deviceid_package_start_close['start_date']).astype('timedelta64[s]')\n\ndeviceid_package_start_close = deviceid_package_start_close.merge(package_label, on='app_id', how='left')\ndeviceid_package_start_close.app_parent_type.fillna(-1, inplace=True)\ndeviceid_package_start_close.app_child_type.fillna(-1, inplace=True)\ndeviceid_package_start_close['start_year'] = deviceid_package_start_close.start_date.dt.year\ndeviceid_package_start_close['end_year'] = deviceid_package_start_close.end_date.dt.year\ndeviceid_package_start_close['year_gap'] = deviceid_package_start_close['end_year'] - deviceid_package_start_close['start_year']\n\n\n# In[10]:\n\n\n#这里留一个伏笔，有些日期属于异常,去除掉那些开启app和关闭app不是同年的数据\n#df_temp = deviceid_package_start_close[deviceid_package_start_close.start_year != 2017]\n#df_temp['year_gap'] = df_temp['end_year'] - df_temp['start_year']\n\n\n# In[5]:\n\n\ndeviceid_train=pd.concat([deviceid_train,deviceid_test])\n\n\n# In[6]:\n\n\ndeviceid_packages['apps']=deviceid_packages['apps'].apply(lambda x:x.split(','))\ndeviceid_packages['app_lenghth']=deviceid_packages['apps'].apply(lambda x:len(x))\n\n\n#特征工程\ndef open_app_timegap_in_hour() :\n    df_temp = deviceid_package_start_close.groupby(['device_id', 'start_hour'])['time_gap'].mean().reset_index().rename(columns = {'time_gap': 'mean_time_gap'})\n    df_mean_temp = pd.pivot_table(df_temp, index='device_id', columns='start_hour', values='mean_time_gap').reset_index()\n    df_mean_temp.columns = ['device_id'] + ['open_app_timegap_in_'+str(i) + '_mean_hour' for i in range(0,24)]\n    df_mean_temp.fillna(0, inplace=True)\n\n\n    \n    return df_mean_temp\n\n\n# In[8]:\n\n\ndef device_start_end_app_timegap() :\n    #用户打开，关闭app的时间间隔\n    df_ = deviceid_package_start_close.sort_values(by=['device_id', 'start_date'], ascending=False)\n    df_['prev_start_date'] = df_.groupby('device_id')['start_date'].shift(-1)\n    df_['start_date_gap'] = (df_['start_date'] - df_['prev_start_date']).astype('timedelta64[s]')\n    agg_dic = {'start_date_gap' : ['min', 'max', 'mean', 'median', 'std']}\n    df_start_gap_agg = df_.groupby('device_id').agg(agg_dic)\n    df_start_gap_agg.columns = pd.Index(['device_' + e[0] + \"_\" + e[1].upper() for e in df_start_gap_agg.columns.tolist()])\n    df_start_gap_agg = df_start_gap_agg.reset_index()\n    #del df_\n    gc.collect()\n    #关闭时间间隔\n    df_ = deviceid_package_start_close.sort_values(by=['device_id', 'end_date'], ascending=False)\n    df_['prev_end_date'] = df_.groupby('device_id')['end_date'].shift(-1)\n    df_['end_date_gap'] = (df_['end_date'] - df_['prev_end_date']).astype('timedelta64[s]')\n    agg_dic = {'end_date_gap' : ['min', 'max', 'mean', 'median', 'std']}\n    df_end_gap_agg = df_.groupby('device_id').agg(agg_dic)\n    df_end_gap_agg.columns = pd.Index(['device_' + e[0] + \"_\" + e[1].upper() for e in df_end_gap_agg.columns.tolist()])\n    df_end_gap_agg = df_end_gap_agg.reset_index()\n    #del df_\n    gc.collect()\n\n\n\n    df_agg = df_start_gap_agg.merge(df_end_gap_agg, on='device_id', how='left')\n    #df_agg = df_agg.merge(df_app_start_gap_agg, on='device_id', how='left')\n    #df_agg = df_agg.merge(df_app_end_gap_agg, on='device_id', how='left')\n    return df_agg\n\ndef open_app_counts_in_hour() :\n    df_temp = deviceid_package_start_close.groupby(['device_id', 'start_hour'])['app_id'].count().reset_index().rename(columns = {'app_id': 'app_counts'})\n    df_temp = pd.pivot_table(df_temp, index='device_id', columns='start_hour', values='app_counts').reset_index()\n    df_temp.columns = ['device_id'] + ['open_app_counts_in'+str(i) + '_hour' for i in range(0,24)]\n    df_temp.fillna(0, inplace=True)\n    return df_temp\n\ndef close_app_counts_in_hour() :\n    df_temp = deviceid_package_start_close.groupby(['device_id', 'end_hour'])['app_id'].count().reset_index().rename(columns = {'app_id': 'app_counts'})\n    df_temp = pd.pivot_table(df_temp, index='device_id', columns='end_hour', values='app_counts').reset_index()\n    df_temp.columns = ['device_id'] + ['close_app_counts_in'+str(i) + '_hour' for i in range(0,24)]\n    df_temp.fillna(0, inplace=True)\n    return df_temp\n\ndef app_type_mean_time_gap_one_hot () :\n    df_temp = deviceid_package_start_close.groupby(['device_id', 'app_parent_type'])['time_gap'].mean().reset_index()\n    df_temp = pd.pivot_table(df_temp, index='device_id', columns='app_parent_type', values='time_gap').reset_index()\n    df_temp.columns = ['device_id'] + ['app_parent_type_mean_time_gap'+str(i) for i in range(-1,45)]\n    df_temp.fillna(-1, inplace=True)\n    return df_temp  \n\ndef device_active_hour() :\n    aggregations = {\n        'start_hour' : ['std','mean','max','min'],\n        'end_hour' : ['std','mean','max','min']\n    }\n    df_agg = deviceid_package_start_close.groupby('device_id').agg(aggregations)\n    df_agg.columns = pd.Index(['device_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\n    df_agg = df_agg.reset_index()   \n    \n    return df_agg\n\n\ndef device_brand_encoding() :\n    df_temp = deviceid_brand.merge(deviceid_train[['device_id', 'age', 'sex']], on='device_id', how='left')\n\n    aggregations = {\n        'age' : ['std','mean'],\n        'sex' : ['mean'],\n    }\n\n    df_device_brand = df_temp.groupby('device_brand').agg(aggregations)\n    df_device_brand.columns = pd.Index(['device_brand_' + e[0] + \"_\" + e[1].upper() for e in df_device_brand.columns.tolist()])\n    df_device_brand = df_device_brand.reset_index()\n\n    df_device_type = df_temp.groupby('device_type').agg(aggregations)\n    df_device_type.columns = pd.Index(['device_type_' + e[0] + \"_\" + e[1].upper() for e in df_device_type.columns.tolist()])\n    df_device_type = df_device_type.reset_index()\n\n    df_temp = df_temp.merge(df_device_brand, on='device_brand', how='left')\n    df_temp = df_temp.merge(df_device_type, on='device_type', how='left')\n\n    aggregations = {\n        'device_brand_age_STD' : ['mean'],\n        'device_brand_age_MEAN' : ['mean'],\n        'device_brand_sex_MEAN' : ['mean'],\n        #'device_type_age_STD' : ['mean'],\n        #'device_type_age_MEAN' : ['mean'],\n        #'device_type_sex_MEAN' : ['mean']\n    }\n\n    df_agg = df_temp.groupby('device_id').agg(aggregations)\n    df_agg.columns = pd.Index(['device_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\n    df_agg = df_agg.reset_index()\n    return df_agg\n\n\n#统计device运行app的情况\ndef device_active_time_time_stat() :\n    #device开启app的时间统计信息\n    deviceid_package_start_close['active_time'] = deviceid_package_start_close['close_time'] - deviceid_package_start_close['start_time']\n\n    #device开启了多少次app\n    #device开启了多少个app\n    aggregations = {\n        'app_id' : ['count', 'nunique'],\n        'active_time' : ['mean', 'std', 'max', 'min'],\n    }\n    df_agg = deviceid_package_start_close.groupby('device_id').agg(aggregations)\n    df_agg.columns = pd.Index(['device_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\n    df_agg = df_agg.reset_index()\n\n    aggregations = {\n        'active_time' : ['mean', 'std', 'max', 'min', 'count'],\n    }\n    df_da_agg = deviceid_package_start_close.groupby(['device_id', 'app_id']).agg(aggregations)\n    df_da_agg.columns = pd.Index(['device_app_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_da_agg.columns.tolist()])\n    df_da_agg = df_da_agg.reset_index()\n\n    #device开启app的平均时间\n    aggregations = {\n        'device_app_grouped_active_time_MEAN' : ['mean', 'std', 'max', 'min'],\n        'device_app_grouped_active_time_STD' : ['mean', 'std', 'max', 'min'],\n        'device_app_grouped_active_time_MAX' : ['mean', 'std', 'max', 'min'],\n        'device_app_grouped_active_time_MIN' : ['mean', 'std', 'max', 'min'],\n        'device_app_grouped_active_time_COUNT' : ['mean', 'std', 'max', 'min'],\n    }\n    df_temp = df_da_agg.groupby(['device_id']).agg(aggregations)\n    df_temp.columns = pd.Index([e[0] + \"_\" + e[1].upper() for e in df_temp.columns.tolist()])\n    df_temp = df_temp.reset_index()\n\n    df_agg = df_agg.merge(df_temp, on='device_id', how='left')\n    return df_agg\n\n\ndef app_type_encoding() :\n    df_temp = df_device_app_pair.merge(deviceid_train[['device_id', 'age', 'sex']], on='device_id', how='left')\n\n    aggregations = {\n        'age' : ['std','mean'],\n        'sex' : ['mean'],\n    }\n\n    df_agg_app_parent_type = df_temp.groupby('app_parent_type').agg(aggregations)\n    df_agg_app_parent_type.columns = pd.Index(['app_parent_type_' + e[0] + \"_\" + e[1].upper() for e in df_agg_app_parent_type.columns.tolist()])\n    df_agg_app_parent_type = df_agg_app_parent_type.reset_index()\n\n    df_agg_app_child_type = df_temp.groupby('app_child_type').agg(aggregations)\n    df_agg_app_child_type.columns = pd.Index(['app_child_type_' + e[0] + \"_\" + e[1].upper() for e in df_agg_app_child_type.columns.tolist()])\n    df_agg_app_child_type = df_agg_app_child_type.reset_index()\n\n    df_temp = df_temp.merge(df_agg_app_parent_type, on='app_parent_type', how='left')\n    df_temp = df_temp.merge(df_agg_app_child_type, on='app_child_type', how='left')\n\n    aggregations = {\n        'app_parent_type_age_STD' : ['mean'],\n        'app_parent_type_age_MEAN' : ['mean'],\n        'app_parent_type_sex_MEAN' : ['mean'],\n        'app_child_type_age_STD' : ['mean'],\n        'app_child_type_age_MEAN' : ['mean'],\n        'app_child_type_sex_MEAN' : ['mean']\n    }\n    \n    df_agg = df_temp.groupby('device_id').agg(aggregations)\n    df_agg.columns = pd.Index(['device_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\n    df_agg = df_agg.reset_index()\n    return df_agg\n\n#每个device对应的app_parent_type计数\ndef app_type_onehot_in_device(df) :\n    df_copy = df.fillna(-1)\n    df_temp = df_copy.groupby(['device_id', 'app_parent_type'])['app_id'].size().reset_index()\n    df_temp.rename(columns = {'app_id' : 'app_parent_type_counts'}, inplace=True)\n    df_temp = pd.pivot_table(df_temp, index='device_id', columns='app_parent_type', values='app_parent_type_counts').reset_index()\n    df_temp.columns = ['device_id'] + ['app_parent_type'+str(i) for i in range(-1,45)]\n    df_temp.fillna(0, inplace=True)\n    return df_temp\n\n\n\napps=deviceid_packages['apps'].apply(lambda x:' '.join(x)).tolist()\nvectorizer=CountVectorizer()\ntransformer=TfidfTransformer()\ncntTf = vectorizer.fit_transform(apps)\ntfidf=transformer.fit_transform(cntTf)\nword=vectorizer.get_feature_names()\nweight=tfidf.toarray()\ndf_weight=pd.DataFrame(weight)\nfeature=df_weight.columns\ndf_weight['sum']=0\nfor f in tqdm(feature):\n    df_weight['sum']+=df_weight[f]\ndeviceid_packages['tfidf_sum']=df_weight['sum']\n\n\n# In[10]:\n\n\nlda = LatentDirichletAllocation(n_topics=5,\n                                learning_offset=50.,\n                                random_state=666)\ndocres = lda.fit_transform(cntTf)\n\n\n# In[11]:\n\n\ndeviceid_packages = pd.concat([deviceid_packages,pd.DataFrame(docres)],axis=1)\n\n\n# In[12]:\n\n\ntemp=deviceid_packages.drop('apps',axis=1)\ndeviceid_train=pd.merge(deviceid_train,temp,on='device_id',how='left')\n\n\n# In[13]:\n\n\n#解析出所有的device_app_pair\ndevice_id_arr = []\napp_arr = []\ndf_device_app_pair = pd.DataFrame()\nfor row in deviceid_packages.values :\n    device_id = row[0]\n    app_list = row[1]\n    for app in app_list :\n        device_id_arr.append(device_id)\n        app_arr.append(app)\n#生成pair        \ndf_device_app_pair['device_id'] = device_id_arr\ndf_device_app_pair['app_id'] = app_arr    \n\ndf_device_app_pair = df_device_app_pair.merge(package_label, how='left', on='app_id')\n\n\n# In[14]:\n\n\ndf_device_app_pair\n\n\n# In[15]:\n\n\n#提取特征\ndf_train = deviceid_train.merge(device_active_time_time_stat(), on='device_id', how='left')\ndf_train = df_train.merge(deviceid_brand, on='device_id', how='left')\ndf_train = df_train.merge(app_type_onehot_in_device(df_device_app_pair), on='device_id', how='left')\ndf_train = df_train.merge(app_type_encoding(), on='device_id', how='left')\ndf_train = df_train.merge(device_active_hour(), on='device_id', how='left')\ndf_train = df_train.merge(app_type_mean_time_gap_one_hot(), on='device_id', how='left')\ndf_train = df_train.merge(open_app_counts_in_hour(), on='device_id', how='left')\ndf_train = df_train.merge(close_app_counts_in_hour(), on='device_id', how='left')\ndf_train = df_train.merge(device_brand_encoding(), on='device_id', how='left')\ndf_train = df_train.merge(device_start_end_app_timegap(), on='device_id', how='left')\ndf_train = df_train.merge(open_app_timegap_in_hour(), on='device_id', how='left')\n\n\n# In[79]:\n\n\ndf_w2c_start = pd.read_csv('device_start_app_w2c.csv')\ndf_w2c_close = pd.read_csv('device_close_app_w2c.csv')\ndf_w2c_all = pd.read_csv('device_all_app_w2c.csv')\ndf_device_quchong_start_app_w2c = pd.read_csv('device_quchong_start_app_w2c.csv')\ndf_device_app_unique_start_app_w2c = pd.read_csv('device_app_unique_start_app_w2c.csv')\ndf_device_app_unique_close_app_w2c = pd.read_csv('device_app_unique_close_app_w2c.csv')\ndf_device_app_unique_all_app_w2c = pd.read_csv('device_app_unique_all_app_w2c.csv')\n\ndf_train_w2v = df_train.merge(df_w2c_start, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_w2c_close, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_w2c_all, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_quchong_start_app_w2c, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_app_unique_start_app_w2c, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_app_unique_close_app_w2c, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_app_unique_all_app_w2c, on='device_id', how='left')\n\n\n# In[81]:\n\n\ntrain = df_train_w2v[df_train_w2v['sex'].notnull()]\ntest = df_train_w2v[df_train_w2v['sex'].isnull()]\n\n\n# In[82]:\n\n\ntrain['sex'] = train.sex.apply(lambda x : x if x == 1 else 0)\n\n\n# In[83]:\n\n\nX = train.drop(['sex', 'age', 'device_id'],axis=1)\nY = train['sex']\nY_CAT = pd.Categorical(Y)\nY = pd.Series(Y_CAT.codes)\n\n\n# In[84]:\n\n\nfrom sklearn.model_selection import KFold, StratifiedKFold\n\nseed = 2018\nnum_folds = 5\nfolds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=seed)\n\nsub_list = []\n\noof_preds = np.zeros(train.shape[0])\nsub_preds = np.zeros(test.shape[0])\n\ncate_feat = ['device_type','device_brand']\n\nparams = {\n    'boosting_type': 'gbdt',\n    'learning_rate' : 0.02,\n    #'max_depth':5,\n    'num_leaves' : 2 ** 5,\n    'metric': {'binary_logloss'},\n    #'num_class' : 22,\n    'objective' : 'binary',\n    'random_state' : 6666,\n    'bagging_freq' : 5,\n    'feature_fraction' : 0.7,\n    'bagging_fraction' : 0.7,\n    'min_split_gain' : 0.0970905919552776,\n    'min_child_weight' : 9.42012323936088,  \n}\n\nfor n_fold, (train_idx, valid_idx) in enumerate(folds.split(X, Y)):\n    train_x, train_y = X.iloc[train_idx], Y.iloc[train_idx]\n    valid_x, valid_y = X.iloc[valid_idx], Y.iloc[valid_idx] \n    \n    lgb_train=lgb.Dataset(train_x,label=train_y)\n    lgb_eval = lgb.Dataset(valid_x, valid_y, reference=lgb_train)\n    \n    gbm = lgb.train(params, lgb_train, num_boost_round=450, valid_sets=lgb_eval, verbose_eval=50)  \n    \n    oof_preds[valid_idx] = gbm.predict(valid_x[X.columns.values])\n\n    \ntrain['sex_prob_oof'] = oof_preds    \n\n\n# In[85]:\n\n\n#用全部的train来预测test\nlgb_train = lgb.Dataset(X,label=Y)\n\ngbm = lgb.train(params, lgb_train, num_boost_round=450, valid_sets=lgb_train, verbose_eval=50)  \n\ntest['sex_prob_oof'] = gbm.predict(test[X.columns.values])\n\n\n# In[88]:\n\n\ndf_sex_prob_oof = pd.concat([train[['device_id', 'sex_prob_oof']], test[['device_id', 'sex_prob_oof']]])\ndf_sex_prob_oof.to_csv('device_sex_prob_oof.csv', index=None)\n\n"
  },
  {
    "path": "THLUO/6.start_close_age_prob_oof.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nfrom datetime import datetime,timedelta  \nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.preprocessing import LabelEncoder\nimport gc\n\n\n\n# In[2]:\n\nprint ('6.start_close_age_prob_oof.py')\n\npath='input/'\ndata=pd.DataFrame()\n#sex_age=pd.read_excel('./data/性别年龄对照表.xlsx')\n\n\n# In[3]:\n\n\ndeviceid_packages=pd.read_csv(path+'deviceid_packages.tsv',sep='\\t',names=['device_id','apps'])\ndeviceid_test=pd.read_csv(path+'deviceid_test.tsv',sep='\\t',names=['device_id'])\ndeviceid_train=pd.read_csv(path+'deviceid_train.tsv',sep='\\t',names=['device_id','sex','age'])\ndeviceid_brand = pd.read_csv(path+'deviceid_brand.tsv',sep='\\t', names=['device_id','device_brand', 'device_type'])\ndeviceid_package_start_close = pd.read_csv(path+'deviceid_package_start_close.tsv',sep='\\t', names=['device_id','app_id','start_time','close_time'])\npackage_label = pd.read_csv(path+'package_label.tsv',sep='\\t',names=['app_id','app_parent_type', 'app_child_type'])\n\n\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : str(x).split(' ')[0])\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 1].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other' if x in one_time_brand else x)\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 2].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other_2' if x in one_time_brand else x)\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 3].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other_3' if x in one_time_brand else x)\n\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_brand.values))\ndeviceid_brand['device_brand'] = lbl.transform(list(deviceid_brand.device_brand.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_type.values))\ndeviceid_brand['device_type'] = lbl.transform(list(deviceid_brand.device_type.values))\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_parent_type.values))\npackage_label['app_parent_type'] = lbl.transform(list(package_label.app_parent_type.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_child_type.values))\npackage_label['app_child_type'] = lbl.transform(list(package_label.app_child_type.values))\n\ndeviceid_train=pd.concat([deviceid_train,deviceid_test])\n\n\n# In[4]:\n\n\ndeviceid_packages['apps']=deviceid_packages['apps'].apply(lambda x:x.split(','))\ndeviceid_packages['app_lenghth']=deviceid_packages['apps'].apply(lambda x:len(x))\n\n\n# In[6]:\n\n\nimport time\n\n# 输入毫秒级的时间，转出正常格式的时间\ndef timeStamp(timeNum):\n    timeStamp = float(timeNum/1000)\n    timeArray = time.localtime(timeStamp)\n    otherStyleTime = time.strftime(\"%Y-%m-%d %H:%M:%S\", timeArray)\n    return otherStyleTime\n\n#解析出具体的时间\ndeviceid_package_start_close['start_date'] = pd.to_datetime(deviceid_package_start_close.start_time.apply(timeStamp))\ndeviceid_package_start_close['end_date'] = pd.to_datetime(deviceid_package_start_close.close_time.apply(timeStamp))\ndeviceid_package_start_close['start_hour'] = deviceid_package_start_close.start_date.dt.hour\ndeviceid_package_start_close['end_hour'] = deviceid_package_start_close.end_date.dt.hour\ndeviceid_package_start_close['time_gap'] = (deviceid_package_start_close['end_date'] - deviceid_package_start_close['start_date']).astype('timedelta64[s]')\n\ndeviceid_package_start_close = deviceid_package_start_close.merge(package_label, on='app_id', how='left')\ndeviceid_package_start_close.app_parent_type.fillna(-1, inplace=True)\ndeviceid_package_start_close.app_child_type.fillna(-1, inplace=True)\ndeviceid_package_start_close['start_year'] = deviceid_package_start_close.start_date.dt.year\ndeviceid_package_start_close['end_year'] = deviceid_package_start_close.end_date.dt.year\ndeviceid_package_start_close['year_gap'] = deviceid_package_start_close['end_year'] - deviceid_package_start_close['start_year']\n\n\n# In[8]:\n\n\nagg_func = {\n    'start_hour' : ['min', 'max', 'mean', 'std', 'count'], \n    'end_hour' : ['min', 'max', 'mean', 'std'], \n    'time_gap' : ['min', 'max', 'mean', 'std']\n}\ndf_agg = deviceid_package_start_close.groupby(['device_id', 'app_id']).agg(agg_func)\ndf_agg.columns = pd.Index(['device_app_grouped' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\ndf_agg = df_agg.reset_index()\ndf_agg = df_agg.merge(package_label, on='app_id', how='left')\n\n\n# In[10]:\n\n\n#device在每个时段打开app的次数\ndf_temp = deviceid_package_start_close.groupby(['device_id', 'app_id', 'start_hour'])['start_time'].count().reset_index()\ndf_temp = pd.pivot_table(df_temp, index=['device_id', 'app_id'], columns='start_hour', values='start_time').reset_index()\ndf_temp.columns = ['device_id', 'app_id'] + ['device_app_start_counts'+str(i) + '_hour' for i in range(0,24)]\ndf_temp.fillna(0, inplace=True)\n\n\n# In[11]:\n\n\ndf_agg = df_agg.merge(df_temp, on=['device_id', 'app_id'], how='left')\n\n\n# In[13]:\n\n\napps=deviceid_packages['apps'].apply(lambda x:' '.join(x)).tolist()\nvectorizer=CountVectorizer()\ntransformer=TfidfTransformer()\ncntTf = vectorizer.fit_transform(apps)\ntfidf=transformer.fit_transform(cntTf)\nword=vectorizer.get_feature_names()\nweight=tfidf.toarray()\ndf_weight=pd.DataFrame(weight)\nfeature=df_weight.columns\ndf_weight['sum']=0\nfor f in tqdm(feature):\n    df_weight['sum']+=df_weight[f]\ndeviceid_packages['tfidf_sum']=df_weight['sum']\n\nlda = LatentDirichletAllocation(n_topics=5,\n                                learning_offset=50.,\n                                random_state=666)\ndocres = lda.fit_transform(cntTf)\n\ndeviceid_packages = pd.concat([deviceid_packages,pd.DataFrame(docres)],axis=1)\n\ndel deviceid_packages['apps']\ndeviceid_packages.columns = ['device_id', 'app_lenghth', 'tfidf_sum', 'LDA_0', 'LDA_1', 'LDA_2', 'LDA_3', 'LDA_4']\n\n\n# In[14]:\n\n\ndf_temp = df_agg.merge(deviceid_packages, on='device_id', how='left')\ndf_w2c_start = pd.read_csv('device_start_app_w2c.csv')\ndf_w2c_close = pd.read_csv('device_close_app_w2c.csv')\ndf_w2c_all = pd.read_csv('device_all_app_w2c.csv')\ndf_sex_prob_oof = pd.read_csv('device_sex_prob_oof.csv')\ndf_age_prob_oof = pd.read_csv('device_age_prob_oof.csv')\n\n\ndf_temp = df_temp.merge(df_w2c_start, on='device_id', how='left')\ndf_temp = df_temp.merge(df_w2c_close, on='device_id', how='left')\ndf_temp = df_temp.merge(df_w2c_all, on='device_id', how='left')\ndf_temp = df_temp.merge(df_sex_prob_oof, on='device_id', how='left')\ndf_temp = df_temp.merge(df_age_prob_oof, on='device_id', how='left')\n\n\n# In[16]:\n\n\nagg_func = {\n    'device_id' : ['count'], \n    'app_lenghth' : ['min', 'mean', 'std', 'max'], \n    'tfidf_sum' : ['min', 'mean', 'std', 'max'], \n    'LDA_1' : ['min', 'mean', 'std', 'max'], \n    'LDA_2' : ['min', 'mean', 'std', 'max'], \n    'LDA_3' : ['min', 'mean', 'std', 'max'], \n    'LDA_4' : ['min', 'mean', 'std', 'max'], \n}\n\nfor j in [i for i in df_age_prob_oof.columns.values if i != 'device_id'] :\n    agg_func[j] = ['min', 'mean', 'std', 'max']\n\nfor j in [i for i in df_sex_prob_oof.columns.values if i != 'device_id'] :\n    agg_func[j] = ['min', 'mean', 'std', 'max']    \n    \nfor j in [i for i in df_w2c_all.columns.values if i != 'device_id'] :\n    agg_func[j] = ['mean']   \n    \nfor j in [i for i in df_w2c_start.columns.values if i != 'device_id'] :\n    agg_func[j] = ['mean']  \n    \nfor j in [i for i in df_w2c_close.columns.values if i != 'device_id'] :\n    agg_func[j] = ['mean']      \n\n\n# In[18]:\n\n\ndf_app_temp = df_temp.groupby('app_id').agg(agg_func)\ndf_app_temp.columns = pd.Index(['app_grouped' + e[0] + \"_\" + e[1].upper() for e in df_app_temp.columns.tolist()])\ndf_app_temp = df_app_temp.reset_index()\ndf_train = df_agg.merge(df_app_temp, on='app_id', how='left')\ndf_train = df_train.merge(deviceid_train, on='device_id', how='left')\n\n\n# In[26]:\n\n\ntrain = df_train[df_train['age'].notnull()]\ntest = df_train[df_train['age'].isnull()]\n\ntrain = train.reset_index(drop=True)\ntest = test.reset_index(drop=True)\n\nX = train.drop(['sex','age', 'app_id', 'device_id'],axis=1)\nY = train['age']\nY_CAT = pd.Categorical(Y)\nY = pd.Series(Y_CAT.codes)\n\n\n# In[30]:\n\n\nfrom sklearn.model_selection import KFold, StratifiedKFold\n\nseed = 2018\nnum_folds = 5\nfolds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=seed)\n\nsub_list = []\n\n\noof_preds = np.zeros([train.shape[0], 11])\n\ncate_feat = ['device_type','device_brand']\n\nfor n_fold, (train_idx, valid_idx) in enumerate(folds.split(X, Y)):\n    train_x, train_y = X.iloc[train_idx], Y.iloc[train_idx]\n    valid_x, valid_y = X.iloc[valid_idx], Y.iloc[valid_idx] \n    \n    lgb_train=lgb.Dataset(train_x,label=train_y)\n    lgb_eval = lgb.Dataset(valid_x, valid_y, reference=lgb_train)\n    params = {\n        'boosting_type': 'gbdt',\n        'learning_rate' : 0.02,\n        #'max_depth':5,\n        'num_leaves' : 2 ** 5,\n        'metric': {'multi_logloss'},\n        'num_class' : 11,\n        'objective' : 'multiclass',\n        'random_state' : 6666,\n        'bagging_freq' : 5,\n        'feature_fraction' : 0.7,\n        'bagging_fraction' : 0.7,\n        'min_split_gain' : 0.0970905919552776,\n        'min_child_weight' : 9.42012323936088,  \n        'nthread': 5,\n    }  \n    \n    gbm = lgb.train(params,\n                    lgb_train,\n                    num_boost_round=2100,\n                    valid_sets=[lgb_train, lgb_eval],\n                    verbose_eval=100)  \n    \n    oof_preds[valid_idx] = gbm.predict(valid_x[X.columns.values])\n\n\n# In[32]:\n\n\noof_train = pd.DataFrame(oof_preds)\noof_train.columns = ['start_close_age_prob_oof_' + str(i)  for i in range(11)] \ntrain = pd.concat([train, oof_train], axis=1)\n\n\n# In[38]:\n\n\n#用全部的数据预测\nlgb_train = lgb.Dataset(X,label=Y)\n\ngbm = lgb.train(params, lgb_train, num_boost_round=2100, valid_sets=lgb_train, verbose_eval=100)  \n\ntest_preds = gbm.predict(test[X.columns.values])\n\noof_test = pd.DataFrame(test_preds)\noof_test.columns = ['start_close_age_prob_oof_' + str(i)  for i in range(11)] \ntest = pd.concat([test, oof_test], axis=1)\n\n\n# In[76]:\n\n\ndf_age_prob_oof = pd.concat([train[['device_id'] + ['start_close_age_prob_oof_' + str(i)  for i in range(11)]], \n                             test[['device_id'] + ['start_close_age_prob_oof_' + str(i)  for i in range(11)]]])\n\n\n# In[72]:\n\n\nagg_func = {\n    'start_close_age_prob_oof_0' : ['mean'],\n    'start_close_age_prob_oof_1' : ['mean'],\n    'start_close_age_prob_oof_2' : ['mean'],\n    'start_close_age_prob_oof_3' : ['mean'],\n    'start_close_age_prob_oof_4' : ['mean'],\n    'start_close_age_prob_oof_5' : ['mean'],\n    'start_close_age_prob_oof_6' : ['mean'],\n    'start_close_age_prob_oof_7' : ['mean'],\n    'start_close_age_prob_oof_8' : ['mean'],\n    'start_close_age_prob_oof_9' : ['mean'],\n    'start_close_age_prob_oof_10' : ['mean'],\n}\n\ndf_age_prob_oof = df_age_prob_oof.groupby('device_id').agg(agg_func)\ndf_age_prob_oof.columns = pd.Index(['device_app_grouped' + e[0] + \"_\" + e[1].upper() for e in df_age_prob_oof.columns.tolist()])\ndf_age_prob_oof = df_age_prob_oof.reset_index()\n\n\n# In[73]:\n\n\ndf_age_prob_oof.to_csv('start_close_age_prob_oof.csv', index=None)\n\n"
  },
  {
    "path": "THLUO/7.start_close_sex_prob_oof.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nfrom datetime import datetime,timedelta  \nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.preprocessing import LabelEncoder\nimport gc\n\n\n\n# In[2]:\n\nprint ('7.start_close_sex_prob_oof.py')\npath='input/'\ndata=pd.DataFrame()\n#sex_age=pd.read_excel('./data/性别年龄对照表.xlsx')\n\n\n# In[3]:\n\n\ndeviceid_packages=pd.read_csv(path+'deviceid_packages.tsv',sep='\\t',names=['device_id','apps'])\ndeviceid_test=pd.read_csv(path+'deviceid_test.tsv',sep='\\t',names=['device_id'])\ndeviceid_train=pd.read_csv(path+'deviceid_train.tsv',sep='\\t',names=['device_id','sex','age'])\ndeviceid_brand = pd.read_csv(path+'deviceid_brand.tsv',sep='\\t', names=['device_id','device_brand', 'device_type'])\ndeviceid_package_start_close = pd.read_csv(path+'deviceid_package_start_close.tsv',sep='\\t', names=['device_id','app_id','start_time','close_time'])\npackage_label = pd.read_csv(path+'package_label.tsv',sep='\\t',names=['app_id','app_parent_type', 'app_child_type'])\n\n\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : str(x).split(' ')[0])\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 1].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other' if x in one_time_brand else x)\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 2].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other_2' if x in one_time_brand else x)\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 3].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other_3' if x in one_time_brand else x)\n\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_brand.values))\ndeviceid_brand['device_brand'] = lbl.transform(list(deviceid_brand.device_brand.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_type.values))\ndeviceid_brand['device_type'] = lbl.transform(list(deviceid_brand.device_type.values))\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_parent_type.values))\npackage_label['app_parent_type'] = lbl.transform(list(package_label.app_parent_type.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_child_type.values))\npackage_label['app_child_type'] = lbl.transform(list(package_label.app_child_type.values))\n\ndeviceid_train=pd.concat([deviceid_train,deviceid_test])\n\n\n# In[6]:\n\n\ndeviceid_packages['apps']=deviceid_packages['apps'].apply(lambda x:x.split(','))\ndeviceid_packages['app_lenghth']=deviceid_packages['apps'].apply(lambda x:len(x))\n\n\n# In[4]:\n\n\nimport time\n\n# 输入毫秒级的时间，转出正常格式的时间\ndef timeStamp(timeNum):\n    timeStamp = float(timeNum/1000)\n    timeArray = time.localtime(timeStamp)\n    otherStyleTime = time.strftime(\"%Y-%m-%d %H:%M:%S\", timeArray)\n    return otherStyleTime\n\n#解析出具体的时间\ndeviceid_package_start_close['start_date'] = pd.to_datetime(deviceid_package_start_close.start_time.apply(timeStamp))\ndeviceid_package_start_close['end_date'] = pd.to_datetime(deviceid_package_start_close.close_time.apply(timeStamp))\ndeviceid_package_start_close['start_hour'] = deviceid_package_start_close.start_date.dt.hour\ndeviceid_package_start_close['end_hour'] = deviceid_package_start_close.end_date.dt.hour\ndeviceid_package_start_close['time_gap'] = (deviceid_package_start_close['end_date'] - deviceid_package_start_close['start_date']).astype('timedelta64[s]')\n\ndeviceid_package_start_close = deviceid_package_start_close.merge(package_label, on='app_id', how='left')\ndeviceid_package_start_close.app_parent_type.fillna(-1, inplace=True)\ndeviceid_package_start_close.app_child_type.fillna(-1, inplace=True)\ndeviceid_package_start_close['start_year'] = deviceid_package_start_close.start_date.dt.year\ndeviceid_package_start_close['end_year'] = deviceid_package_start_close.end_date.dt.year\ndeviceid_package_start_close['year_gap'] = deviceid_package_start_close['end_year'] - deviceid_package_start_close['start_year']\n\n\n# In[9]:\n\n\nagg_func = {\n    'start_hour' : ['min', 'max', 'mean', 'std', 'count'], \n    'end_hour' : ['min', 'max', 'mean', 'std'], \n    'time_gap' : ['min', 'max', 'mean', 'std']\n}\ndf_agg = deviceid_package_start_close.groupby(['device_id', 'app_id']).agg(agg_func)\ndf_agg.columns = pd.Index(['device_app_grouped' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\ndf_agg = df_agg.reset_index()\ndf_agg = df_agg.merge(package_label, on='app_id', how='left')\n\n\n# In[11]:\n\n\n#device在每个时段打开app的次数\ndf_temp = deviceid_package_start_close.groupby(['device_id', 'app_id', 'start_hour'])['start_time'].count().reset_index()\ndf_temp = pd.pivot_table(df_temp, index=['device_id', 'app_id'], columns='start_hour', values='start_time').reset_index()\ndf_temp.columns = ['device_id', 'app_id'] + ['device_app_start_counts'+str(i) + '_hour' for i in range(0,24)]\ndf_temp.fillna(0, inplace=True)\n\n\n# In[13]:\n\n\ndf_agg = df_agg.merge(df_temp, on=['device_id', 'app_id'], how='left')\n\n\n# In[15]:\n\n\napps=deviceid_packages['apps'].apply(lambda x:' '.join(x)).tolist()\nvectorizer=CountVectorizer()\ntransformer=TfidfTransformer()\ncntTf = vectorizer.fit_transform(apps)\ntfidf=transformer.fit_transform(cntTf)\nword=vectorizer.get_feature_names()\nweight=tfidf.toarray()\ndf_weight=pd.DataFrame(weight)\nfeature=df_weight.columns\ndf_weight['sum']=0\nfor f in tqdm(feature):\n    df_weight['sum']+=df_weight[f]\ndeviceid_packages['tfidf_sum']=df_weight['sum']\n\nlda = LatentDirichletAllocation(n_topics=5,\n                                learning_offset=50.,\n                                random_state=666)\ndocres = lda.fit_transform(cntTf)\n\ndeviceid_packages = pd.concat([deviceid_packages,pd.DataFrame(docres)],axis=1)\n\ndel deviceid_packages['apps']\ndeviceid_packages.columns = ['device_id', 'app_lenghth', 'tfidf_sum', 'LDA_0', 'LDA_1', 'LDA_2', 'LDA_3', 'LDA_4']\n\n\n# In[207]:\n\n\ndf_temp = df_agg.merge(deviceid_packages, on='device_id', how='left')\ndf_w2c_start = pd.read_csv('device_start_app_w2c.csv')\ndf_w2c_close = pd.read_csv('device_close_app_w2c.csv')\ndf_w2c_all = pd.read_csv('device_all_app_w2c.csv')\ndf_sex_prob_oof = pd.read_csv('device_sex_prob_oof.csv')\ndf_age_prob_oof = pd.read_csv('device_age_prob_oof.csv')\n\n\ndf_temp = df_temp.merge(df_w2c_start, on='device_id', how='left')\ndf_temp = df_temp.merge(df_w2c_close, on='device_id', how='left')\ndf_temp = df_temp.merge(df_w2c_all, on='device_id', how='left')\ndf_temp = df_temp.merge(df_sex_prob_oof, on='device_id', how='left')\ndf_temp = df_temp.merge(df_age_prob_oof, on='device_id', how='left')\n\n\n# In[224]:\n\n\nagg_func = {\n    'device_id' : ['count'], \n    'app_lenghth' : ['min', 'mean', 'std', 'max'], \n    'tfidf_sum' : ['min', 'mean', 'std', 'max'], \n    'LDA_1' : ['min', 'mean', 'std', 'max'], \n    'LDA_2' : ['min', 'mean', 'std', 'max'], \n    'LDA_3' : ['min', 'mean', 'std', 'max'], \n    'LDA_4' : ['min', 'mean', 'std', 'max'], \n}\n\nfor j in [i for i in df_age_prob_oof.columns.values if i != 'device_id'] :\n    agg_func[j] = ['min', 'mean', 'std', 'max']\n\nfor j in [i for i in df_sex_prob_oof.columns.values if i != 'device_id'] :\n    agg_func[j] = ['min', 'mean', 'std', 'max']    \n    \nfor j in [i for i in df_w2c_all.columns.values if i != 'device_id'] :\n    agg_func[j] = ['mean']   \n    \nfor j in [i for i in df_w2c_start.columns.values if i != 'device_id'] :\n    agg_func[j] = ['mean']  \n    \nfor j in [i for i in df_w2c_close.columns.values if i != 'device_id'] :\n    agg_func[j] = ['mean']      \n\n\n# In[226]:\n\n\ndf_app_temp = df_temp.groupby('app_id').agg(agg_func)\ndf_app_temp.columns = pd.Index(['app_grouped' + e[0] + \"_\" + e[1].upper() for e in df_app_temp.columns.tolist()])\ndf_app_temp = df_app_temp.reset_index()\ndf_train = df_agg.merge(df_app_temp, on='app_id', how='left')\n\n\n# In[228]:\n\n\ndf_train = df_train.merge(deviceid_train, on='device_id', how='left')\n\n\n# In[235]:\n\n\ntrain = df_train[df_train['sex'].notnull()]\ntest = df_train[df_train['sex'].isnull()]\n\ntrain = train.reset_index(drop=True)\ntest = test.reset_index(drop=True)\n\nX = train.drop(['sex','age', 'app_id', 'device_id'],axis=1)\nY = train['sex']\nY_CAT = pd.Categorical(Y)\nY = pd.Series(Y_CAT.codes)\n\n\n# In[237]:\n\n\nfrom sklearn.model_selection import KFold, StratifiedKFold\n\nseed = 2018\nnum_folds = 5\nfolds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=seed)\n\nsub_list = []\n\noof_preds = np.zeros(train.shape[0])\nsub_preds = np.zeros(test.shape[0])\n\ncate_feat = ['device_type','device_brand']\n\nparams = {\n    'boosting_type': 'gbdt',\n    'learning_rate' : 0.02,\n    #'max_depth':5,\n    'num_leaves' : 2 ** 5,\n    'metric': {'binary_logloss'},\n    'objective' : 'binary',\n    'random_state' : 6666,\n    'bagging_freq' : 5,\n    'feature_fraction' : 0.7,\n    'bagging_fraction' : 0.7,\n    'min_split_gain' : 0.0970905919552776,\n    'min_child_weight' : 9.42012323936088,  \n}\n\nfor n_fold, (train_idx, valid_idx) in enumerate(folds.split(X, Y)):\n    train_x, train_y = X.iloc[train_idx], Y.iloc[train_idx]\n    valid_x, valid_y = X.iloc[valid_idx], Y.iloc[valid_idx] \n    \n    lgb_train=lgb.Dataset(train_x,label=train_y)\n    lgb_eval = lgb.Dataset(valid_x, valid_y, reference=lgb_train)\n    \n    gbm = lgb.train(params, lgb_train, num_boost_round=2100, valid_sets=[lgb_train, lgb_eval], \n                    verbose_eval=100)  \n    \n    oof_preds[valid_idx] = gbm.predict(valid_x[X.columns.values])\n\n    \ntrain['sex_prob_oof'] = oof_preds    \n\n\n# In[239]:\n\n\n#用全部的train来预测test\nlgb_train = lgb.Dataset(X,label=Y)\n\ngbm = lgb.train(params, lgb_train, num_boost_round=2100, valid_sets=lgb_train, verbose_eval=100)  \n\ntest['sex_prob_oof'] = gbm.predict(test[X.columns.values])\n\n\n# In[240]:\n\n\ndf_sex_prob_oof = pd.concat([train[['device_id', 'sex_prob_oof']], test[['device_id', 'sex_prob_oof']]])\ndf_sex_prob_oof.columns = ['device_id', 'start_close_sex_prob_oof']\n\n\nagg_func = {\n    'start_close_sex_prob_oof' : ['min', 'max', 'mean', 'std']\n}\n\ndf_sex_prob_oof = df_sex_prob_oof.groupby('device_id').agg(agg_func)\ndf_sex_prob_oof.columns = pd.Index(['device_app_grouped' + e[0] + \"_\" + e[1].upper() for e in df_sex_prob_oof.columns.tolist()])\ndf_sex_prob_oof = df_sex_prob_oof.reset_index()\n\n\n# In[242]:\n\n\ndf_sex_prob_oof.to_csv('start_close_sex_prob_oof.csv', index=None)\n\n"
  },
  {
    "path": "THLUO/9.sex_age_bin_prob_oof.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nfrom datetime import datetime,timedelta  \nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.preprocessing import LabelEncoder\nimport gc\n\n\n\n\n# In[2]:\n\nprint ('9.sex_age_bin_prob_oof.py')\npath='input/'\ndata=pd.DataFrame()\n#sex_age=pd.read_excel('./data/性别年龄对照表.xlsx')\n\n\n# In[3]:\n\n\ndeviceid_packages=pd.read_csv(path+'deviceid_packages.tsv',sep='\\t',names=['device_id','apps'])\ndeviceid_test=pd.read_csv(path+'deviceid_test.tsv',sep='\\t',names=['device_id'])\ndeviceid_train=pd.read_csv(path+'deviceid_train.tsv',sep='\\t',names=['device_id','sex','age'])\ndeviceid_brand = pd.read_csv(path+'deviceid_brand.tsv',sep='\\t', names=['device_id','device_brand', 'device_type'])\ndeviceid_package_start_close = pd.read_csv(path+'deviceid_package_start_close.tsv',sep='\\t', names=['device_id','app_id','start_time','close_time'])\npackage_label = pd.read_csv(path+'package_label.tsv',sep='\\t',names=['app_id','app_parent_type', 'app_child_type'])\n\n\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : str(x).split(' ')[0])\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 1].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other' if x in one_time_brand else x)\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 2].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other_2' if x in one_time_brand else x)\n\ndf_temp = deviceid_brand.groupby('device_brand')['device_id'].count().reset_index().rename(columns={'device_id':'brand_counts'})\none_time_brand = df_temp[df_temp.brand_counts == 3].device_brand.values\ndeviceid_brand['device_brand'] = deviceid_brand.device_brand.apply(lambda x : 'other_3' if x in one_time_brand else x)\n\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_brand.values))\ndeviceid_brand['device_brand'] = lbl.transform(list(deviceid_brand.device_brand.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(deviceid_brand.device_type.values))\ndeviceid_brand['device_type'] = lbl.transform(list(deviceid_brand.device_type.values))\n\n#转换成对应的数字\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_parent_type.values))\npackage_label['app_parent_type'] = lbl.transform(list(package_label.app_parent_type.values))\n\nlbl = LabelEncoder()\nlbl.fit(list(package_label.app_child_type.values))\npackage_label['app_child_type'] = lbl.transform(list(package_label.app_child_type.values))\n\n\n# In[4]:\n\n\nimport time\n\n# 输入毫秒级的时间，转出正常格式的时间\ndef timeStamp(timeNum):\n    timeStamp = float(timeNum/1000)\n    timeArray = time.localtime(timeStamp)\n    otherStyleTime = time.strftime(\"%Y-%m-%d %H:%M:%S\", timeArray)\n    return otherStyleTime\n\n#解析出具体的时间\ndeviceid_package_start_close['start_date'] = pd.to_datetime(deviceid_package_start_close.start_time.apply(timeStamp))\ndeviceid_package_start_close['end_date'] = pd.to_datetime(deviceid_package_start_close.close_time.apply(timeStamp))\ndeviceid_package_start_close['start_hour'] = deviceid_package_start_close.start_date.dt.hour\ndeviceid_package_start_close['end_hour'] = deviceid_package_start_close.end_date.dt.hour\ndeviceid_package_start_close['time_gap'] = (deviceid_package_start_close['end_date'] - deviceid_package_start_close['start_date']).astype('timedelta64[s]')\n\ndeviceid_package_start_close = deviceid_package_start_close.merge(package_label, on='app_id', how='left')\ndeviceid_package_start_close.app_parent_type.fillna(-1, inplace=True)\ndeviceid_package_start_close.app_child_type.fillna(-1, inplace=True)\ndeviceid_package_start_close['start_year'] = deviceid_package_start_close.start_date.dt.year\ndeviceid_package_start_close['end_year'] = deviceid_package_start_close.end_date.dt.year\ndeviceid_package_start_close['year_gap'] = deviceid_package_start_close['end_year'] - deviceid_package_start_close['start_year']\n\n\n# In[5]:\n\n\ndeviceid_train=pd.concat([deviceid_train,deviceid_test])\n\n\n# In[6]:\n\n\ndeviceid_packages['apps']=deviceid_packages['apps'].apply(lambda x:x.split(','))\ndeviceid_packages['app_lenghth']=deviceid_packages['apps'].apply(lambda x:len(x))\n\n\n#特征工程\ndef open_app_timegap_in_hour() :\n    df_temp = deviceid_package_start_close.groupby(['device_id', 'start_hour'])['time_gap'].mean().reset_index().rename(columns = {'time_gap': 'mean_time_gap'})\n    df_mean_temp = pd.pivot_table(df_temp, index='device_id', columns='start_hour', values='mean_time_gap').reset_index()\n    df_mean_temp.columns = ['device_id'] + ['open_app_timegap_in_'+str(i) + '_mean_hour' for i in range(0,24)]\n    df_mean_temp.fillna(0, inplace=True)\n\n\n    \n    return df_mean_temp\n\n\n# In[8]:\n\n\ndef device_start_end_app_timegap() :\n    #用户打开，关闭app的时间间隔\n    df_ = deviceid_package_start_close.sort_values(by=['device_id', 'start_date'], ascending=False)\n    df_['prev_start_date'] = df_.groupby('device_id')['start_date'].shift(-1)\n    df_['start_date_gap'] = (df_['start_date'] - df_['prev_start_date']).astype('timedelta64[s]')\n    agg_dic = {'start_date_gap' : ['min', 'max', 'mean', 'median', 'std']}\n    df_start_gap_agg = df_.groupby('device_id').agg(agg_dic)\n    df_start_gap_agg.columns = pd.Index(['device_' + e[0] + \"_\" + e[1].upper() for e in df_start_gap_agg.columns.tolist()])\n    df_start_gap_agg = df_start_gap_agg.reset_index()\n    #del df_\n    gc.collect()\n    #关闭时间间隔\n    df_ = deviceid_package_start_close.sort_values(by=['device_id', 'end_date'], ascending=False)\n    df_['prev_end_date'] = df_.groupby('device_id')['end_date'].shift(-1)\n    df_['end_date_gap'] = (df_['end_date'] - df_['prev_end_date']).astype('timedelta64[s]')\n    agg_dic = {'end_date_gap' : ['min', 'max', 'mean', 'median', 'std']}\n    df_end_gap_agg = df_.groupby('device_id').agg(agg_dic)\n    df_end_gap_agg.columns = pd.Index(['device_' + e[0] + \"_\" + e[1].upper() for e in df_end_gap_agg.columns.tolist()])\n    df_end_gap_agg = df_end_gap_agg.reset_index()\n    #del df_\n    gc.collect()\n\n\n\n    df_agg = df_start_gap_agg.merge(df_end_gap_agg, on='device_id', how='left')\n    #df_agg = df_agg.merge(df_app_start_gap_agg, on='device_id', how='left')\n    #df_agg = df_agg.merge(df_app_end_gap_agg, on='device_id', how='left')\n    return df_agg\n\ndef open_app_counts_in_hour() :\n    df_temp = deviceid_package_start_close.groupby(['device_id', 'start_hour'])['app_id'].count().reset_index().rename(columns = {'app_id': 'app_counts'})\n    df_temp = pd.pivot_table(df_temp, index='device_id', columns='start_hour', values='app_counts').reset_index()\n    df_temp.columns = ['device_id'] + ['open_app_counts_in'+str(i) + '_hour' for i in range(0,24)]\n    df_temp.fillna(0, inplace=True)\n    return df_temp\n\ndef close_app_counts_in_hour() :\n    df_temp = deviceid_package_start_close.groupby(['device_id', 'end_hour'])['app_id'].count().reset_index().rename(columns = {'app_id': 'app_counts'})\n    df_temp = pd.pivot_table(df_temp, index='device_id', columns='end_hour', values='app_counts').reset_index()\n    df_temp.columns = ['device_id'] + ['close_app_counts_in'+str(i) + '_hour' for i in range(0,24)]\n    df_temp.fillna(0, inplace=True)\n    return df_temp\n\ndef app_type_mean_time_gap_one_hot () :\n    df_temp = deviceid_package_start_close.groupby(['device_id', 'app_parent_type'])['time_gap'].mean().reset_index()\n    df_temp = pd.pivot_table(df_temp, index='device_id', columns='app_parent_type', values='time_gap').reset_index()\n    df_temp.columns = ['device_id'] + ['app_parent_type_mean_time_gap'+str(i) for i in range(-1,45)]\n    df_temp.fillna(-1, inplace=True)\n    return df_temp  \n\ndef device_active_hour() :\n    aggregations = {\n        'start_hour' : ['std','mean','max','min'],\n        'end_hour' : ['std','mean','max','min']\n    }\n    df_agg = deviceid_package_start_close.groupby('device_id').agg(aggregations)\n    df_agg.columns = pd.Index(['device_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\n    df_agg = df_agg.reset_index()   \n    \n    return df_agg\n\n\ndef device_brand_encoding() :\n    df_temp = deviceid_brand.merge(deviceid_train[['device_id', 'age', 'sex']], on='device_id', how='left')\n\n    aggregations = {\n        'age' : ['std','mean'],\n        'sex' : ['mean'],\n    }\n\n    df_device_brand = df_temp.groupby('device_brand').agg(aggregations)\n    df_device_brand.columns = pd.Index(['device_brand_' + e[0] + \"_\" + e[1].upper() for e in df_device_brand.columns.tolist()])\n    df_device_brand = df_device_brand.reset_index()\n\n    df_device_type = df_temp.groupby('device_type').agg(aggregations)\n    df_device_type.columns = pd.Index(['device_type_' + e[0] + \"_\" + e[1].upper() for e in df_device_type.columns.tolist()])\n    df_device_type = df_device_type.reset_index()\n\n    df_temp = df_temp.merge(df_device_brand, on='device_brand', how='left')\n    df_temp = df_temp.merge(df_device_type, on='device_type', how='left')\n\n    aggregations = {\n        'device_brand_age_STD' : ['mean'],\n        'device_brand_age_MEAN' : ['mean'],\n        'device_brand_sex_MEAN' : ['mean'],\n        #'device_type_age_STD' : ['mean'],\n        #'device_type_age_MEAN' : ['mean'],\n        #'device_type_sex_MEAN' : ['mean']\n    }\n\n    df_agg = df_temp.groupby('device_id').agg(aggregations)\n    df_agg.columns = pd.Index(['device_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\n    df_agg = df_agg.reset_index()\n    return df_agg\n\n\n#统计device运行app的情况\ndef device_active_time_time_stat() :\n    #device开启app的时间统计信息\n    deviceid_package_start_close['active_time'] = deviceid_package_start_close['close_time'] - deviceid_package_start_close['start_time']\n\n    #device开启了多少次app\n    #device开启了多少个app\n    aggregations = {\n        'app_id' : ['count', 'nunique'],\n        'active_time' : ['mean', 'std', 'max', 'min'],\n    }\n    df_agg = deviceid_package_start_close.groupby('device_id').agg(aggregations)\n    df_agg.columns = pd.Index(['device_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\n    df_agg = df_agg.reset_index()\n\n    aggregations = {\n        'active_time' : ['mean', 'std', 'max', 'min', 'count'],\n    }\n    df_da_agg = deviceid_package_start_close.groupby(['device_id', 'app_id']).agg(aggregations)\n    df_da_agg.columns = pd.Index(['device_app_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_da_agg.columns.tolist()])\n    df_da_agg = df_da_agg.reset_index()\n\n    #device开启app的平均时间\n    aggregations = {\n        'device_app_grouped_active_time_MEAN' : ['mean', 'std', 'max', 'min'],\n        'device_app_grouped_active_time_STD' : ['mean', 'std', 'max', 'min'],\n        'device_app_grouped_active_time_MAX' : ['mean', 'std', 'max', 'min'],\n        'device_app_grouped_active_time_MIN' : ['mean', 'std', 'max', 'min'],\n        'device_app_grouped_active_time_COUNT' : ['mean', 'std', 'max', 'min'],\n    }\n    df_temp = df_da_agg.groupby(['device_id']).agg(aggregations)\n    df_temp.columns = pd.Index([e[0] + \"_\" + e[1].upper() for e in df_temp.columns.tolist()])\n    df_temp = df_temp.reset_index()\n\n    df_agg = df_agg.merge(df_temp, on='device_id', how='left')\n    return df_agg\n\n\ndef app_type_encoding() :\n    df_temp = df_device_app_pair.merge(deviceid_train[['device_id', 'age', 'sex']], on='device_id', how='left')\n\n    aggregations = {\n        'age' : ['std','mean'],\n        'sex' : ['mean'],\n    }\n\n    df_agg_app_parent_type = df_temp.groupby('app_parent_type').agg(aggregations)\n    df_agg_app_parent_type.columns = pd.Index(['app_parent_type_' + e[0] + \"_\" + e[1].upper() for e in df_agg_app_parent_type.columns.tolist()])\n    df_agg_app_parent_type = df_agg_app_parent_type.reset_index()\n\n    df_agg_app_child_type = df_temp.groupby('app_child_type').agg(aggregations)\n    df_agg_app_child_type.columns = pd.Index(['app_child_type_' + e[0] + \"_\" + e[1].upper() for e in df_agg_app_child_type.columns.tolist()])\n    df_agg_app_child_type = df_agg_app_child_type.reset_index()\n\n    df_temp = df_temp.merge(df_agg_app_parent_type, on='app_parent_type', how='left')\n    df_temp = df_temp.merge(df_agg_app_child_type, on='app_child_type', how='left')\n\n    aggregations = {\n        'app_parent_type_age_STD' : ['mean'],\n        'app_parent_type_age_MEAN' : ['mean'],\n        'app_parent_type_sex_MEAN' : ['mean'],\n        'app_child_type_age_STD' : ['mean'],\n        'app_child_type_age_MEAN' : ['mean'],\n        'app_child_type_sex_MEAN' : ['mean']\n    }\n    \n    df_agg = df_temp.groupby('device_id').agg(aggregations)\n    df_agg.columns = pd.Index(['device_grouped_' + e[0] + \"_\" + e[1].upper() for e in df_agg.columns.tolist()])\n    df_agg = df_agg.reset_index()\n    return df_agg\n\n#每个device对应的app_parent_type计数\ndef app_type_onehot_in_device(df) :\n    df_copy = df.fillna(-1)\n    df_temp = df_copy.groupby(['device_id', 'app_parent_type'])['app_id'].size().reset_index()\n    df_temp.rename(columns = {'app_id' : 'app_parent_type_counts'}, inplace=True)\n    df_temp = pd.pivot_table(df_temp, index='device_id', columns='app_parent_type', values='app_parent_type_counts').reset_index()\n    df_temp.columns = ['device_id'] + ['app_parent_type'+str(i) for i in range(-1,45)]\n    df_temp.fillna(0, inplace=True)\n    return df_temp\n\n\n\napps=deviceid_packages['apps'].apply(lambda x:' '.join(x)).tolist()\nvectorizer=CountVectorizer()\ntransformer=TfidfTransformer()\ncntTf = vectorizer.fit_transform(apps)\ntfidf=transformer.fit_transform(cntTf)\nword=vectorizer.get_feature_names()\nweight=tfidf.toarray()\ndf_weight=pd.DataFrame(weight)\nfeature=df_weight.columns\ndf_weight['sum']=0\nfor f in tqdm(feature):\n    df_weight['sum']+=df_weight[f]\ndeviceid_packages['tfidf_sum']=df_weight['sum']\n\n\n# In[10]:\n\n\nlda = LatentDirichletAllocation(n_topics=5,\n                                learning_offset=50.,\n                                random_state=666)\ndocres = lda.fit_transform(cntTf)\n\n\n# In[11]:\n\n\ndeviceid_packages = pd.concat([deviceid_packages,pd.DataFrame(docres)],axis=1)\n\n\n# In[12]:\n\n\ntemp=deviceid_packages.drop('apps',axis=1)\ndeviceid_train=pd.merge(deviceid_train,temp,on='device_id',how='left')\n\n\n# In[13]:\n\n\n#解析出所有的device_app_pair\ndevice_id_arr = []\napp_arr = []\ndf_device_app_pair = pd.DataFrame()\nfor row in deviceid_packages.values :\n    device_id = row[0]\n    app_list = row[1]\n    for app in app_list :\n        device_id_arr.append(device_id)\n        app_arr.append(app)\n#生成pair        \ndf_device_app_pair['device_id'] = device_id_arr\ndf_device_app_pair['app_id'] = app_arr    \n\ndf_device_app_pair = df_device_app_pair.merge(package_label, how='left', on='app_id')\n\n\n# In[15]:\n\n\n#提取特征\ndf_train = deviceid_train.merge(device_active_time_time_stat(), on='device_id', how='left')\ndf_train = df_train.merge(deviceid_brand, on='device_id', how='left')\ndf_train = df_train.merge(app_type_onehot_in_device(df_device_app_pair), on='device_id', how='left')\ndf_train = df_train.merge(app_type_encoding(), on='device_id', how='left')\ndf_train = df_train.merge(device_active_hour(), on='device_id', how='left')\ndf_train = df_train.merge(app_type_mean_time_gap_one_hot(), on='device_id', how='left')\ndf_train = df_train.merge(open_app_counts_in_hour(), on='device_id', how='left')\ndf_train = df_train.merge(close_app_counts_in_hour(), on='device_id', how='left')\ndf_train = df_train.merge(device_brand_encoding(), on='device_id', how='left')\ndf_train = df_train.merge(device_start_end_app_timegap(), on='device_id', how='left')\ndf_train = df_train.merge(open_app_timegap_in_hour(), on='device_id', how='left')\n\n\n# In[16]:\n\n\ndf_w2c_start = pd.read_csv('device_start_app_w2c.csv')\ndf_w2c_close = pd.read_csv('device_close_app_w2c.csv')\ndf_w2c_all = pd.read_csv('device_all_app_w2c.csv')\ndf_device_quchong_start_app_w2c = pd.read_csv('device_quchong_start_app_w2c.csv')\ndf_device_app_unique_start_app_w2c = pd.read_csv('device_app_unique_start_app_w2c.csv')\ndf_device_app_unique_close_app_w2c = pd.read_csv('device_app_unique_close_app_w2c.csv')\ndf_device_app_unique_all_app_w2c = pd.read_csv('device_app_unique_all_app_w2c.csv')\n\n\n# In[17]:\n\n\ndf_train_w2v = df_train.merge(df_w2c_start, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_w2c_close, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_w2c_all, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_quchong_start_app_w2c, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_app_unique_start_app_w2c, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_app_unique_close_app_w2c, on='device_id', how='left')\ndf_train_w2v = df_train_w2v.merge(df_device_app_unique_all_app_w2c, on='device_id', how='left')\n\n\n# In[19]:\n\n\ndf_train_w2v['sex'] = df_train_w2v['sex'].apply(lambda x:str(x))\ndf_train_w2v['age'] = df_train_w2v['age'].apply(lambda x:str(x))\ndef tool(x):\n    if x=='nan':\n        return x\n    else:\n        return str(int(float(x)))\ndf_train_w2v['sex']=df_train_w2v['sex'].apply(tool)\ndf_train_w2v['age']=df_train_w2v['age'].apply(tool)\ndf_train_w2v['sex_age']=df_train_w2v['sex']+'-'+df_train_w2v['age']\n\ndf_train_w2v = df_train_w2v.replace({'nan':np.NaN,'nan-nan':np.NaN})\n\n\n# In[42]:\n\n\ntrain = df_train_w2v[df_train_w2v['sex_age'].notnull()]\ntest = df_train_w2v[df_train_w2v['sex_age'].isnull()]\ntrain = train.reset_index(drop=True)\ntest = test.reset_index(drop=True)\n\n\n# In[43]:\n\n\nY = train['sex_age']\nY_CAT = pd.Categorical(Y)\nY = pd.Series(Y_CAT.codes)\ntrain['label'] = Y\n\n\n# In[45]:\n\n\nfrom sklearn.model_selection import KFold, StratifiedKFold\nlgb_round = {4: 267,\n             6: 199,\n             17: 151,\n             5: 166,\n             15: 188,\n             16: 147,\n             8: 195,\n             7: 250,\n             21: 107,\n             2: 254,\n             3: 282,\n             19: 139,\n             9: 169,\n             13: 153,\n             1: 167,\n             18: 178,\n             10: 153,\n             20: 177,\n             14: 208,\n             12: 194,\n             11: 211,\n             0: 132}\nlabel_set = train.label.unique()\nfor sex_age in label_set :\n    print (sex_age)\n    X = train.drop(['sex', 'age', 'sex_age', 'label', 'device_id'],axis=1)\n    Y = train.label.apply(lambda x : 1 if x == sex_age else 0)\n    print (Y.value_counts())\n    seed = 2018\n    num_folds = 5\n    folds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=seed)\n\n    sub_list = []\n\n    oof_preds = np.zeros(train.shape[0])\n    sub_preds = np.zeros(test.shape[0])\n\n    params = {\n        'boosting_type': 'gbdt',\n        'learning_rate' : 0.02,\n        #'max_depth':5,\n        'num_leaves' : 2 ** 5,\n        'metric': {'binary_logloss'},\n        #'num_class' : 22,\n        'objective' : 'binary',\n        'random_state' : 6666,\n        'bagging_freq' : 5,\n        'feature_fraction' : 0.7,\n        'bagging_fraction' : 0.7,\n        'min_split_gain' : 0.0970905919552776,\n        'min_child_weight' : 9.42012323936088,  \n    }\n\n    for n_fold, (train_idx, valid_idx) in enumerate(folds.split(X, Y)):\n        train_x, train_y = X.iloc[train_idx], Y.iloc[train_idx]\n        valid_x, valid_y = X.iloc[valid_idx], Y.iloc[valid_idx] \n\n        lgb_train=lgb.Dataset(train_x,label=train_y)\n        lgb_eval = lgb.Dataset(valid_x, valid_y, reference=lgb_train)\n\n        gbm = lgb.train(params, lgb_train, num_boost_round=lgb_round[sex_age], valid_sets=[lgb_train, lgb_eval], verbose_eval=50)  \n\n        oof_preds[valid_idx] = gbm.predict(valid_x[X.columns.values])\n\n\n    train['sex_age_bin_prob_oof_' + str(sex_age)] = oof_preds      \n    \n    \n    #用全部的train来预测test\n    lgb_train = lgb.Dataset(X,label=Y)\n\n    gbm = lgb.train(params, lgb_train, num_boost_round=lgb_round[sex_age], valid_sets=lgb_train, verbose_eval=50)  \n\n    test['sex_age_bin_prob_oof_' + str(sex_age)] = gbm.predict(test[X.columns.values])\n\n\n# In[49]:\n\n\ncolumns = ['device_id'] + ['sex_age_bin_prob_oof_' + str(i) for i in range(22)]\ncolumns\n\n\n# In[53]:\n\n\npd.concat([train[columns], test[columns]]).to_csv('sex_age_bin_prob_oof.csv', index=None)\n\n"
  },
  {
    "path": "THLUO/TextModel.py",
    "content": "import os\nimport re\nimport sys\nimport pandas as pd\nimport numpy as np\nimport gensim\nfrom gensim.models import Word2Vec\nfrom gensim.models.word2vec import LineSentence\nimport tensorflow as tf\nimport keras\nfrom keras.layers import *\nfrom keras.models import *\nfrom keras.optimizers import *\nfrom keras.callbacks import *\nfrom keras.preprocessing import text, sequence\nfrom keras.utils import to_categorical\nfrom sklearn.preprocessing import LabelEncoder\nfrom keras.utils import np_utils\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import  accuracy_score\nfrom sklearn.preprocessing import LabelEncoder\nimport warnings\nwarnings.filterwarnings('ignore')\nimport os\nimport gc\nimport random\nfrom keras.engine.topology import Layer\nfrom util import *\n\ndef capsule_lstm(sent_length, embeddings_weight,class_num):\n    print(\"get_text_capsule\")\n    content = Input(shape=(sent_length,), dtype='int32')\n    embedding = Embedding(\n        name=\"word_embedding\",\n        input_dim=embeddings_weight.shape[0],\n        weights=[embeddings_weight],\n        output_dim=embeddings_weight.shape[1],\n        trainable=False)\n    embed = SpatialDropout1D(0.2)(embedding(content))\n\n    x = Bidirectional(CuDNNLSTM(200, return_sequences=True))(embed)\n    capsule = Capsule(num_capsule=Num_capsule, dim_capsule=Dim_capsule, routings=Routings, share_weights=True)(x)\n    capsule = Flatten()(capsule)\n\n    x = Dropout(0.2)(Activation(activation=\"relu\")(BatchNormalization()(Dense(1000)(capsule))))\n    x = Activation(activation=\"relu\")(BatchNormalization()(Dense(500)(x)))\n    output = Dense(class_num, activation=\"softmax\")(x)\n\n    model = Model(inputs=content, outputs=output)\n    #model = multi_gpu_model(model, 2)\n    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n    return model\n\n\n\n\n\n\n\ndef get_text_capsule(sent_length, embeddings_weight,class_num):\n    print(\"get_text_capsule\")\n    content = Input(shape=(sent_length,), dtype='int32')\n    embedding = Embedding(\n        name=\"word_embedding\",\n        input_dim=embeddings_weight.shape[0],\n        weights=[embeddings_weight],\n        output_dim=embeddings_weight.shape[1],\n        trainable=False)\n    embed = SpatialDropout1D(0.2)(embedding(content))\n\n    x = Bidirectional(CuDNNGRU(200, return_sequences=True))(embed)\n    capsule = Capsule(num_capsule=Num_capsule, dim_capsule=Dim_capsule, routings=Routings, share_weights=True)(x)\n    capsule = Flatten()(capsule)\n\n    x = Dropout(0.2)(Activation(activation=\"relu\")(BatchNormalization()(Dense(1000)(capsule))))\n    x = Activation(activation=\"relu\")(BatchNormalization()(Dense(500)(x)))\n    output = Dense(class_num, activation=\"softmax\")(x)\n\n    model = Model(inputs=content, outputs=output)\n    #model = multi_gpu_model(model, 2)\n    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n    return model\n\n\ndef get_text_cnn1(sent_length, embeddings_weight,class_num):\n    print(\"get_text_cnn1\")\n    content = Input(shape=(sent_length,), dtype='int32')\n    embedding = Embedding(\n        name=\"word_embedding\",\n        input_dim=embeddings_weight.shape[0],\n        weights=[embeddings_weight],\n        output_dim=embeddings_weight.shape[1],\n        trainable=False)\n    embed = embedding(content)\n\n    embed = SpatialDropout1D(0.2)(embed)\n\n    conv2 = Activation('relu')(BatchNormalization()(Conv1D(128, 2, padding='same')(embed)))\n    conv2 = Activation('relu')(BatchNormalization()(Conv1D(64, 2, padding='same')(conv2)))\n    conv2 = MaxPool1D(pool_size=50)(conv2)\n\n    conv3 = Activation('relu')(BatchNormalization()(Conv1D(128, 3, padding='same')(embed)))\n    conv3 = Activation('relu')(BatchNormalization()(Conv1D(64, 3, padding='same')(conv3)))\n    conv3 = MaxPool1D(pool_size=50)(conv3)\n\n    conv4 = Activation('relu')(BatchNormalization()(Conv1D(128, 4, padding='same')(embed)))\n    conv4 = Activation('relu')(BatchNormalization()(Conv1D(64, 4, padding='same')(conv4)))\n    conv4 = MaxPool1D(pool_size=50)(conv4)\n\n    conv5 = Activation('relu')(BatchNormalization()(Conv1D(128, 5, padding='same')(embed)))\n    conv5 = Activation('relu')(BatchNormalization()(Conv1D(64, 5, padding='same')(conv5)))\n    conv5 = MaxPool1D(pool_size=50)(conv5)\n\n    cnn = concatenate([conv2, conv3, conv4, conv5], axis=-1)\n    flat = Flatten()(cnn)\n\n    drop = Dropout(0.2)(flat)\n\n    x = Dropout(0.2)(Activation(activation=\"relu\")(BatchNormalization()(Dense(1000)(drop))))\n    x = Activation(activation=\"relu\")(BatchNormalization()(Dense(500)(x)))\n    output = Dense(class_num, activation=\"softmax\")(x)\n\n    model = Model(inputs=content, outputs=output)\n    #model = multi_gpu_model(model, 2)\n    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n    return model\n\n\n\ndef get_text_cnn2(sent_length, embeddings_weight,class_num):\n    print(\"get_text_cnn2\")\n    content = Input(shape=(sent_length,), dtype='int32')\n    embedding = Embedding(\n        name=\"word_embedding\",\n        input_dim=embeddings_weight.shape[0],\n        weights=[embeddings_weight],\n        output_dim=embeddings_weight.shape[1],\n        trainable=False)\n    embed = embedding(content)\n    filter_sizes = [2, 3, 4,5]\n    num_filters = 128\n    embed_size = embeddings_weight.shape[1]\n\n    x = SpatialDropout1D(0.2)(embed)\n    x = Reshape((sent_length, embed_size, 1))(x)\n\n    conv_0 = Conv2D(num_filters, kernel_size=(filter_sizes[0], embed_size), kernel_initializer='normal',\n                    activation='relu')(x)\n    conv_1 = Conv2D(num_filters, kernel_size=(filter_sizes[1], embed_size), kernel_initializer='normal',\n                    activation='relu')(x)\n    conv_2 = Conv2D(num_filters, kernel_size=(filter_sizes[2], embed_size), kernel_initializer='normal',\n                    activation='relu')(x)\n    conv_3 = Conv2D(num_filters, kernel_size=(filter_sizes[3], embed_size), kernel_initializer='normal',\n                    activation='relu')(x)\n\n    maxpool_0 = MaxPool2D(pool_size=(sent_length - filter_sizes[0] + 1, 1))(conv_0)\n    maxpool_1 = MaxPool2D(pool_size=(sent_length - filter_sizes[1] + 1, 1))(conv_1)\n    maxpool_2 = MaxPool2D(pool_size=(sent_length - filter_sizes[2] + 1, 1))(conv_2)\n    maxpool_3 = MaxPool2D(pool_size=(sent_length - filter_sizes[3] + 1, 1))(conv_3)\n\n    z = Concatenate(axis=1)([maxpool_0, maxpool_1, maxpool_2, maxpool_3])\n    z = Flatten()(z)\n    z = Dropout(0.1)(z)\n\n    x = Dropout(0.2)(Activation(activation=\"relu\")(BatchNormalization()(Dense(1000)(z))))\n    x = Activation(activation=\"relu\")(BatchNormalization()(Dense(500)(x)))\n    output = Dense(class_num, activation=\"softmax\")(x)\n\n    model = Model(inputs=content, outputs=output)\n    #model = multi_gpu_model(model, 2)\n    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n    return model\n\n\n\ndef get_text_cnn3(sent_length, embeddings_weight,class_num):\n    print(\"get_text_cnn3\")\n    content = Input(shape=(sent_length,), dtype='int32')\n    embedding = Embedding(\n        name=\"word_embedding\",\n        input_dim=embeddings_weight.shape[0],\n        weights=[embeddings_weight],\n        output_dim=embeddings_weight.shape[1],\n        trainable=False)(content)\n\n    embedding = SpatialDropout1D(0.2)(embedding)\n\n    cnn1 = Conv1D(128, 2, padding='same', strides=1, activation='relu')(embedding)\n    cnn2 = Conv1D(128, 3, padding='same', strides=1, activation='relu')(embedding)\n    cnn3 = Conv1D(128, 4, padding='same', strides=1, activation='relu')(embedding)\n    cnn4 = Conv1D(128, 5, padding='same', strides=1, activation='relu')(embedding)\n    cnn = concatenate([cnn1, cnn2, cnn3, cnn4], axis=-1)\n\n    cnn1 = Conv1D(64, 2, padding='same', strides=1, activation='relu')(cnn)\n    cnn1 = MaxPooling1D(pool_size=100)(cnn1)\n    cnn2 = Conv1D(64, 3, padding='same', strides=1, activation='relu')(cnn)\n    cnn2 = MaxPooling1D(pool_size=100)(cnn2)\n    cnn3 = Conv1D(64, 4, padding='same', strides=1, activation='relu')(cnn)\n    cnn3 = MaxPooling1D(pool_size=100)(cnn3)\n    cnn4 = Conv1D(64, 5, padding='same', strides=1, activation='relu')(cnn)\n    cnn4 = MaxPooling1D(pool_size=100)(cnn4)\n\n    cnn = concatenate([cnn1, cnn2, cnn3, cnn4], axis=-1)\n\n    flat = Flatten()(cnn)\n    drop = Dropout(0.2)(flat)\n\n    x = Dropout(0.2)(Activation(activation=\"relu\")(BatchNormalization()(Dense(1000)(drop))))\n    x = Activation(activation=\"relu\")(BatchNormalization()(Dense(500)(x)))\n    output = Dense(class_num, activation=\"softmax\")(x)\n\n    model = Model(inputs=content, outputs=output)\n    #model = multi_gpu_model(model, 2)\n    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n    return model\n\ndef get_text_gru1(sent_length, embeddings_weight,class_num):\n    print(\"get_text_gru1\")\n    content = Input(shape=(sent_length,), dtype='int32')\n    embedding = Embedding(\n        name=\"word_embedding\",\n        input_dim=embeddings_weight.shape[0],\n        weights=[embeddings_weight],\n        output_dim=embeddings_weight.shape[1],\n        trainable=False)\n\n    x = SpatialDropout1D(0.2)(embedding(content))\n\n    x = Bidirectional(CuDNNGRU(200, return_sequences=True))(x)\n    x = Bidirectional(CuDNNGRU(200, return_sequences=True))(x)\n\n    avg_pool = GlobalAveragePooling1D()(x)\n    max_pool = GlobalMaxPooling1D()(x)\n    conc = concatenate([avg_pool, max_pool])\n\n    x = Dropout(0.2)(Activation(activation=\"relu\")(BatchNormalization()(Dense(1000)(conc))))\n    x = Activation(activation=\"relu\")(BatchNormalization()(Dense(500)(x)))\n    output = Dense(class_num, activation=\"softmax\")(x)\n\n    model = Model(inputs=content, outputs=output)\n    #model = multi_gpu_model(model, 2)\n    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n    return model\n\n\ndef get_text_gru2(sent_length, embeddings_weight,class_num):\n    print(\"get_text_gru2\")\n    content = Input(shape=(sent_length,), dtype='int32')\n    embedding = Embedding(\n        name=\"word_embedding\",\n        input_dim=embeddings_weight.shape[0],\n        weights=[embeddings_weight],\n        output_dim=embeddings_weight.shape[1],\n        trainable=False)\n\n    x = SpatialDropout1D(0.2)(embedding(content))\n\n    x = Bidirectional(CuDNNGRU(200, return_sequences=True))(x)\n    x = Bidirectional(CuDNNGRU(200, return_sequences=True))(x)\n\n    x = Conv1D(100, kernel_size=3, padding=\"valid\", kernel_initializer=\"glorot_uniform\")(x)\n    avg_pool = GlobalAveragePooling1D()(x)\n    max_pool = GlobalMaxPooling1D()(x)\n    conc = concatenate([avg_pool, max_pool])\n\n    x = Dropout(0.2)(Activation(activation=\"relu\")(BatchNormalization()(Dense(1000)(conc))))\n    x = Activation(activation=\"relu\")(BatchNormalization()(Dense(500)(x)))\n    output = Dense(class_num, activation=\"softmax\")(x)\n\n    model = Model(inputs=content, outputs=output)\n    #model = multi_gpu_model(model, 2)\n    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n    return model\n\n\ndef get_text_gru4(sent_length, embeddings_weight,class_num):\n    print(\"get_text_gru4\")\n    content = Input(shape=(sent_length,), dtype='int32')\n    embedding = Embedding(\n        name=\"word_embedding\",\n        input_dim=embeddings_weight.shape[0],\n        weights=[embeddings_weight],\n        output_dim=embeddings_weight.shape[1],\n        trainable=False)\n    x = SpatialDropout1D(0.2)(embedding(content))\n\n    x = Bidirectional(CuDNNLSTM(200, return_sequences=True))(x)\n    x = Bidirectional(CuDNNGRU(200, return_sequences=True))(x)\n\n    avg_pool = GlobalAveragePooling1D()(x)\n    max_pool = GlobalMaxPooling1D()(x)\n\n    x = concatenate([avg_pool, max_pool])\n\n    x = Dropout(0.2)(Activation(activation=\"relu\")(BatchNormalization()(Dense(1000)(x))))\n    x = Activation(activation=\"relu\")(BatchNormalization()(Dense(500)(x)))\n    output = Dense(class_num, activation=\"softmax\")(x)\n\n    model = Model(inputs=content, outputs=output)\n    #model = multi_gpu_model(model, 2)\n    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n    return model\n\n\ndef get_text_gru5(sent_length, embeddings_weight,class_num):\n    print(\"get_text_gru5\")\n    content = Input(shape=(sent_length,), dtype='int32')\n    embedding = Embedding(\n        name=\"word_embedding\",\n        input_dim=embeddings_weight.shape[0],\n        weights=[embeddings_weight],\n        output_dim=embeddings_weight.shape[1],\n        trainable=False)\n\n    embed = SpatialDropout1D(0.2)(embedding(content))\n\n    x = Bidirectional(CuDNNGRU(200, return_sequences=True))(embed)\n    x = Dropout(0.35)(x)\n    x = Bidirectional(CuDNNGRU(200, return_sequences=True))(x)\n\n    last = Lambda(lambda t: t[:, -1])(x)\n    maxpool = GlobalMaxPooling1D()(x)\n    average = GlobalAveragePooling1D()(x)\n    x = concatenate([last, maxpool, average])\n\n    x = Dropout(0.2)(Activation(activation=\"relu\")(BatchNormalization()(Dense(1000)(x))))\n    x = Activation(activation=\"relu\")(BatchNormalization()(Dense(500)(x)))\n    output = Dense(class_num, activation=\"softmax\")(x)\n\n    model = Model(inputs=content, outputs=output)\n    #model = multi_gpu_model(model, 2)\n    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n    return model\n\n\ndef get_text_gru6(sent_length, embeddings_weight,class_num):\n    print(\"get_text_gru6\")\n    content = Input(shape=(sent_length,), dtype='int32')\n    embedding = Embedding(\n        name=\"word_embedding\",\n        input_dim=embeddings_weight.shape[0],\n        weights=[embeddings_weight],\n        output_dim=embeddings_weight.shape[1],\n        trainable=False)\n\n    embed = SpatialDropout1D(0.2)(embedding(content))\n\n    x = Bidirectional(CuDNNGRU(200, return_sequences=True))(embed)\n    x = Conv1D(60, kernel_size=3, padding='valid', activation='relu', strides=1)(x)\n    avg_pool = GlobalAveragePooling1D()(x)\n    max_pool = GlobalMaxPooling1D()(x)\n\n    embed = SpatialDropout1D(0.2)(embedding(content))\n    y = Bidirectional(CuDNNGRU(100, return_sequences=True))(embed)\n    y = Conv1D(40, kernel_size=3, padding='valid', activation='relu', strides=1)(y)\n    avg_pool2 = GlobalAveragePooling1D()(y)\n    max_pool2 = GlobalMaxPooling1D()(y)\n\n    x = concatenate([avg_pool, max_pool, avg_pool2, max_pool2], -1)\n\n    x = Dropout(0.2)(Activation(activation=\"relu\")(BatchNormalization()(Dense(1000)(x))))\n    x = Activation(activation=\"relu\")(BatchNormalization()(Dense(500)(x)))\n    output = Dense(class_num, activation=\"softmax\")(x)\n\n    model = Model(inputs=content, outputs=output)\n    #model = multi_gpu_model(model, 2)\n    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n    return model\n\n\ndef get_text_rcnn1(sent_length, embeddings_weight,class_num):\n    print(\"get_text_rcnn1\")\n    document = Input(shape=(None,), dtype=\"int32\")\n\n    embedder = Embedding(\n        name=\"word_embedding\",\n        input_dim=embeddings_weight.shape[0],\n        weights=[embeddings_weight],\n        output_dim=embeddings_weight.shape[1],\n        trainable=False)\n\n    doc_embedding = SpatialDropout1D(0.2)(embedder(document))\n    forward = Bidirectional(CuDNNLSTM(200, return_sequences=True))(doc_embedding)\n    together = concatenate([forward, doc_embedding], axis=2)\n\n    semantic = Conv1D(100, 2, padding='same', strides=1, activation='relu')(together)\n    pool_rnn = Lambda(lambda x: K.max(x, axis=1), output_shape=(100,))(semantic)\n\n    x = Dropout(0.2)(Activation(activation=\"relu\")(BatchNormalization()(Dense(1000)(pool_rnn))))\n    x = Activation(activation=\"relu\")(BatchNormalization()(Dense(500)(x)))\n    output = Dense(class_num, activation=\"softmax\")(x)\n\n    model = Model(inputs=document, outputs=output)\n   # model = multi_gpu_model(model, 2)\n    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n    return model\n\n\ndef get_text_rcnn2(sent_length, embeddings_weight,class_num):\n    print(\"get_text_rcnn2\")\n    content = Input(shape=(None,), dtype=\"int32\")\n\n    embedding = Embedding(\n        name=\"word_embedding\",\n        input_dim=embeddings_weight.shape[0],\n        weights=[embeddings_weight],\n        output_dim=embeddings_weight.shape[1],\n        trainable=False)\n\n    x = SpatialDropout1D(0.2)(embedding(content))\n\n    x = Convolution1D(filters=256, kernel_size=3, padding='same', strides=1, activation=\"relu\")(x)\n    x = MaxPooling1D(pool_size=2)(x)\n\n    x = Dropout(0.2)(CuDNNGRU(units=200, return_sequences=True)(x))\n    x = Dropout(0.2)(CuDNNGRU(units=100)(x))\n\n    x = Dropout(0.2)(Activation(activation=\"relu\")(BatchNormalization()(Dense(1000)(x))))\n    x = Activation(activation=\"relu\")(BatchNormalization()(Dense(500)(x)))\n    output = Dense(class_num, activation=\"softmax\")(x)\n\n    model = Model(inputs=content, outputs=output)\n    #model = multi_gpu_model(model, 2)\n    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n    return model\n\n\ndef get_text_rcnn3(sent_length, embeddings_weight,class_num):\n    print(\"get_text_rcnn3\")\n    content = Input(shape=(None,), dtype=\"int32\")\n\n    embedding = Embedding(\n        name=\"word_embedding\",\n        input_dim=embeddings_weight.shape[0],\n        weights=[embeddings_weight],\n        output_dim=embeddings_weight.shape[1],\n        trainable=False)\n\n    x = SpatialDropout1D(0.2)(embedding(content))\n\n    cnn = Convolution1D(filters=200, kernel_size=3, padding=\"same\", strides=1, activation=\"relu\")(x)\n    cnn_avg_pool = GlobalAveragePooling1D()(cnn)\n    cnn_max_pool = GlobalMaxPooling1D()(cnn)\n\n    rnn = Dropout(0.2)(CuDNNGRU(200, return_sequences=True)(x))\n    rnn_avg_pool = GlobalAveragePooling1D()(rnn)\n    rnn_max_pool = GlobalMaxPooling1D()(rnn)\n\n    con = concatenate([cnn_avg_pool, cnn_max_pool, rnn_avg_pool, rnn_max_pool], axis=-1)\n\n    x = Dropout(0.2)(Activation(activation=\"relu\")(BatchNormalization()(Dense(1000)(con))))\n    x = Activation(activation=\"relu\")(BatchNormalization()(Dense(500)(x)))\n    output = Dense(class_num, activation=\"softmax\")(x)\n\n    model = Model(inputs=content, outputs=output)\n    #model = multi_gpu_model(model, 2)\n    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n    return model\n\n\n\ndef get_text_rcnn4(sent_length, embeddings_weight,class_num):\n    print(\"get_text_rcnn4\")\n    content = Input(shape=(sent_length,), dtype='int32')\n    embedding = Embedding(\n        name=\"word_embedding\",\n        input_dim=embeddings_weight.shape[0],\n        weights=[embeddings_weight],\n        output_dim=embeddings_weight.shape[1],\n        trainable=False)\n\n    embed = SpatialDropout1D(0.2)(embedding(content))\n\n    rnn_1 = Bidirectional(CuDNNGRU(128, return_sequences=True))(embed)\n    conv_2 = Conv1D(128, 2, kernel_initializer=\"normal\", padding=\"valid\", activation=\"relu\", strides=1)(rnn_1)\n\n    maxpool = GlobalMaxPooling1D()(conv_2)\n    attn = AttentionWeightedAverage()(conv_2)\n    average = GlobalAveragePooling1D()(conv_2)\n\n    x = concatenate([maxpool, attn, average])\n\n    x = Dropout(0.2)(Activation(activation=\"relu\")(BatchNormalization()(Dense(1000)(x))))\n    x = Activation(activation=\"relu\")(BatchNormalization()(Dense(500)(x)))\n    output = Dense(class_num, activation=\"softmax\")(x)\n\n    model = Model(inputs=content, outputs=output)\n    #model = multi_gpu_model(model, 2)\n    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n    return model\n\n\ndef get_text_rcnn5(sent_length, embeddings_weight,class_num):\n    print(\"get_text_rcnn5\")\n    content = Input(shape=(sent_length,), dtype='int32')\n    embedding = Embedding(\n        name=\"word_embedding\",\n        input_dim=embeddings_weight.shape[0],\n        weights=[embeddings_weight],\n        output_dim=embeddings_weight.shape[1],\n        trainable=False)\n\n    embed = SpatialDropout1D(0.2)(embedding(content))\n\n    rnn_1 = Bidirectional(CuDNNGRU(200, return_sequences=True))(embed)\n    rnn_2 = Bidirectional(CuDNNGRU(200, return_sequences=True))(rnn_1)\n    x = concatenate([rnn_1, rnn_2], axis=2)\n\n    last = Lambda(lambda t: t[:, -1], name='last')(x)\n    maxpool = GlobalMaxPooling1D()(x)\n    attn = AttentionWeightedAverage()(x)\n    average = GlobalAveragePooling1D()(x)\n\n    x = concatenate([last, maxpool, average, attn])\n\n    x = Dropout(0.2)(Activation(activation=\"relu\")(BatchNormalization()(Dense(1000)(x))))\n    x = Activation(activation=\"relu\")(BatchNormalization()(Dense(500)(x)))\n    output = Dense(class_num, activation=\"softmax\")(x)\n\n    model = Model(inputs=content, outputs=output)\n    #model = multi_gpu_model(model, 2)\n    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n    return model\n\n\ndef get_text_lstm1(sent_length, embeddings_weight,class_num):\n    print(\"get_text_lstm1\")\n    content = Input(shape=(sent_length,), dtype='int32')\n    embedding = Embedding(\n        name=\"word_embedding\",\n        input_dim=embeddings_weight.shape[0],\n        weights=[embeddings_weight],\n        output_dim=embeddings_weight.shape[1],\n        trainable=False)\n\n    embed = SpatialDropout1D(0.2)(embedding(content))\n    x = Dropout(0.2)(Bidirectional(CuDNNLSTM(200, return_sequences=True))(embed))\n    semantic = TimeDistributed(Dense(100, activation=\"tanh\"))(x)\n    pool_rnn = Lambda(lambda x: K.max(x, axis=1), output_shape=(100,))(semantic)\n\n    x = Dropout(0.2)(Activation(activation=\"relu\")(BatchNormalization()(Dense(1000)(pool_rnn))))\n    x = Activation(activation=\"relu\")(BatchNormalization()(Dense(500)(x)))\n    output = Dense(class_num, activation=\"softmax\")(x)\n\n    model = Model(inputs=content, outputs=output)\n    #model = multi_gpu_model(model, 2)\n    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n    return model\n\n\ndef get_text_lstm2(sent_length, embeddings_weight,class_num):\n    print(\"get_text_lstm2\")\n    content = Input(shape=(sent_length,), dtype='int32')\n    embedding = Embedding(\n        name=\"word_embedding\",\n        input_dim=embeddings_weight.shape[0],\n        weights=[embeddings_weight],\n        output_dim=embeddings_weight.shape[1],\n        trainable=False)\n\n    embed = SpatialDropout1D(0.2)(embedding(content))\n    x = Dropout(0.2)(Bidirectional(CuDNNLSTM(200, return_sequences=True))(embed))\n    x = Dropout(0.2)(Bidirectional(CuDNNLSTM(100, return_sequences=True))(x))\n    semantic = TimeDistributed(Dense(100, activation=\"tanh\"))(x)\n    pool_rnn = Lambda(lambda x: K.max(x, axis=1), output_shape=(100,))(semantic)\n\n    x = Dropout(0.2)(Activation(activation=\"relu\")(BatchNormalization()(Dense(1000)(pool_rnn))))\n    x = Activation(activation=\"relu\")(BatchNormalization()(Dense(500)(x)))\n    output = Dense(class_num, activation=\"softmax\")(x)\n\n    model = Model(inputs=content, outputs=output)\n    #model = multi_gpu_model(model, 2)\n    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n    return model\n\n\ndef get_text_lstm3(sent_length, embeddings_weight,class_num):\n    print(\"get_text_lstm3\")\n    content = Input(shape=(sent_length,), dtype='int32')\n    embedding = Embedding(\n        name=\"word_embedding\",\n        input_dim=embeddings_weight.shape[0],\n        weights=[embeddings_weight],\n        output_dim=embeddings_weight.shape[1],\n        trainable=False)\n\n    embed = SpatialDropout1D(0.2)(embedding(content))\n    x = Dropout(0.2)(Bidirectional(CuDNNLSTM(200, return_sequences=True))(embed))\n    x = Conv1D(64, kernel_size=3, padding='valid', kernel_initializer='glorot_uniform')(x)\n\n    avg_pool = GlobalAveragePooling1D()(x)\n    max_pool = GlobalMaxPooling1D()(x)\n    x = concatenate([avg_pool, max_pool])\n\n    x = Dropout(0.2)(Activation(activation=\"relu\")(BatchNormalization()(Dense(1000)(x))))\n    x = Activation(activation=\"relu\")(BatchNormalization()(Dense(500)(x)))\n    output = Dense(class_num, activation=\"softmax\")(x)\n\n    model = Model(inputs=content, outputs=output)\n    #model = multi_gpu_model(model, 2)\n    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n    return model\n\n\ndef get_text_lstm_attention(sent_length, embeddings_weight,class_num):\n    print(\"get_text_lstm_attention\")\n    content = Input(shape=(sent_length,), dtype='int32')\n    embedding = Embedding(\n        name=\"word_embedding\",\n        input_dim=embeddings_weight.shape[0],\n        weights=[embeddings_weight],\n        output_dim=embeddings_weight.shape[1],\n        trainable=False)\n\n    embedded_sequences = SpatialDropout1D(0.2)(embedding(content))\n    x = Dropout(0.25)(CuDNNLSTM(200, return_sequences=True)(embedded_sequences))\n    merged = Attention(sent_length)(x)\n    merged = Dense(100, activation='relu')(merged)\n    merged = Dropout(0.25)(merged)\n\n    x = Dropout(0.2)(Activation(activation=\"relu\")(BatchNormalization()(Dense(1000)(merged))))\n    x = Activation(activation=\"relu\")(BatchNormalization()(Dense(500)(x)))\n    output = Dense(class_num, activation=\"softmax\")(x)\n\n    model = Model(inputs=content, outputs=output)\n    #model = multi_gpu_model(model, 2)\n    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n    return model\n\n\ndef get_text_dpcnn(sent_length, embeddings_weight,class_num):\n    print(\"get_text_dpcnn\")\n    content = Input(shape=(sent_length,), dtype='int32')\n    embedding = Embedding(\n        name=\"word_embedding\",\n        input_dim=embeddings_weight.shape[0],\n        weights=[embeddings_weight],\n        output_dim=embeddings_weight.shape[1],\n        trainable=False)\n\n    embed = SpatialDropout1D(0.2)(embedding(content))\n\n    block1 = Conv1D(128, kernel_size=3, padding='same', activation='linear')(embed)\n    block1 = BatchNormalization()(block1)\n    block1 = PReLU()(block1)\n    block1 = Conv1D(128, kernel_size=3, padding='same', activation='linear')(block1)\n    block1 = BatchNormalization()(block1)\n    block1 = PReLU()(block1)\n\n    resize_emb = Conv1D(128, kernel_size=3, padding='same', activation='linear')(embed)\n    resize_emb = PReLU()(resize_emb)\n\n    block1_output = add([block1, resize_emb])\n    block1_output = MaxPooling1D(pool_size=10)(block1_output)\n\n    block2 = Conv1D(128, kernel_size=4, padding='same', activation='linear')(block1_output)\n    block2 = BatchNormalization()(block2)\n    block2 = PReLU()(block2)\n    block2 = Conv1D(128, kernel_size=4, padding='same', activation='linear')(block2)\n    block2 = BatchNormalization()(block2)\n    block2 = PReLU()(block2)\n\n    block2_output = add([block2, block1_output])\n    block2_output = MaxPooling1D(pool_size=10)(block2_output)\n\n    block3 = Conv1D(128, kernel_size=5, padding='same', activation='linear')(block2_output)\n    block3 = BatchNormalization()(block3)\n    block3 = PReLU()(block3)\n    block3 = Conv1D(128, kernel_size=5, padding='same', activation='linear')(block3)\n    block3 = BatchNormalization()(block3)\n    block3 = PReLU()(block3)\n\n    output = add([block3, block2_output])\n    maxpool = GlobalMaxPooling1D()(output)\n    average = GlobalAveragePooling1D()(output)\n\n    x = concatenate([maxpool, average])\n\n    x = Dropout(0.2)(Activation(activation=\"relu\")(BatchNormalization()(Dense(1000)(x))))\n    x = Activation(activation=\"relu\")(BatchNormalization()(Dense(500)(x)))\n    output = Dense(class_num, activation=\"softmax\")(x)\n\n    model = Model(inputs=content, outputs=output)\n    #model = multi_gpu_model(model, 2)\n    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n    return model\n\n\n\n\n\n\n\ndef bi_gru_model(sent_length, embeddings_weight,class_num):\n    print(\"get_text_gru3\")\n    content = Input(shape=(sent_length,), dtype='int32')\n    embedding = Embedding(\n        name=\"word_embedding\",\n        input_dim=embeddings_weight.shape[0],\n        weights=[embeddings_weight],\n        output_dim=embeddings_weight.shape[1],\n        trainable=False)\n\n    x = SpatialDropout1D(0.2)(embedding(content))\n\n    x = Bidirectional(CuDNNGRU(200, return_sequences=True))(x)\n    x = Bidirectional(CuDNNGRU(200, return_sequences=True))(x)\n\n    avg_pool = GlobalAveragePooling1D()(x)\n    max_pool = GlobalMaxPooling1D()(x)\n\n    conc = concatenate([avg_pool, max_pool])\n\n    x = Dropout(0.2)(Activation(activation=\"relu\")(BatchNormalization()(Dense(1000)(conc))))\n    x = Activation(activation=\"relu\")(BatchNormalization()(Dense(500)(x)))\n    output = Dense(class_num, activation=\"softmax\")(x)\n\n    model = Model(inputs=content, outputs=output)\n    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n    return model\n\n\ndef bi_gru_model_binary(sent_length, embeddings_weight,class_num):\n    print(\"bi_gru_model_binary\")\n    content = Input(shape=(sent_length,), dtype='int32')\n    embedding = Embedding(\n        name=\"word_embedding\",\n        input_dim=embeddings_weight.shape[0],\n        weights=[embeddings_weight],\n        output_dim=embeddings_weight.shape[1],\n        trainable=False)\n\n    x = SpatialDropout1D(0.2)(embedding(content))\n\n    x = Bidirectional(CuDNNGRU(200, return_sequences=True))(x)\n    x = Bidirectional(CuDNNGRU(200, return_sequences=True))(x)\n\n    avg_pool = GlobalAveragePooling1D()(x)\n    max_pool = GlobalMaxPooling1D()(x)\n\n    conc = concatenate([avg_pool, max_pool])\n\n    x = Dropout(0.2)(Activation(activation=\"relu\")(BatchNormalization()(Dense(1000)(conc))))\n    x = Activation(activation=\"relu\")(BatchNormalization()(Dense(500)(x)))\n    output = Dense(class_num, activation=\"softmax\")(x)\n\n    model = Model(inputs=content, outputs=output)\n    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n    return model\n\n\n"
  },
  {
    "path": "THLUO/readme.md",
    "content": "本代码运行在windows10, 48G内存, 1070ti显卡上, 由于运行的py文件比较多, 所以需要比较长的时间才能跑完\r\n\r\n文件夹说明:\r\n> cache文件夹是存放输出模型的文件夹\r\n> embedding是存放w2c词嵌入的文件夹\r\n> input是存放本次比赛数据的文件夹\r\n> result是THLUO选手最终的结果\r\n\r\n下面是每个py文件的功能介绍:\r\n* 1.w2c_model_start.py\t根据device打开app的时间对app进行排序，形成app_list, 将app开作词，device_id看成文档，对app进行embedding\r\n* 2.w2c_model_close.py\t根据device关闭app的时间对app进行排序，形成app_list, 将app开作词，device_id看成文档，对app进行embedding\r\n* 3.w2c_model_all.py\t根据device打开关闭app的时间合在对app进行排序，形成app_list, 将app开作词，device_id看成文档，对app进行embedding\r\n* 4.device_quchong_start_app_w2c.py\t根据device打开app的时间对app进行排序，形成app_list, 对app_list进行去重操作, 将app开作词，device_id看成文档，对app进行embedding\r\n* 5.device_age_prob_oof.py\t单独对用户年龄进行预测\r\n* 6.device_sex_prob_oof.py\t单独对用户性别进行预测\r\n* 7.start_close_age_prob_oof.py\t对app所属的年龄概率进行预测\r\n* 8.start_close_sex_prob_oof.py\t对app所属的性别概率进行预测\r\n* 9.sex_age_bin_prob_oof.py\t用2分类的手法来预测用户属于性别-年龄的概率\r\n* 10.age_bin_prob_oof.py\t用2分类的手法来预测用户属于年龄的概率\r\n* 11.hcc_device_brand_age_sex.py\t 手机品牌和手机类型属于High Cardinality Categorical,  参考论文A Preprocessing Scheme for High-Cardinality Categorical Attributes in Classification and Prediction Problems，对手机品牌和手机类型属于性别年龄的概率进行预测\r\n* 12.device_age_regression_prob_oof.py\t用回归的手法对用户属于年龄的概率进行预测\r\n* 13.device_start_GRU_pred.py\t\t根据device打开app的时间对app进行排序，形成app_list，将app开作词，device_id看成文档，跑了一个GRU文本模型对用户属于性别年龄的概率进行预测\r\n* 14.device_start_GRU_pred_age.py\t\t根据device打开app的时间对app进行排序，形成app_list，将app开作词，device_id看成文档，跑了一个GRU文本模型对用户属于年龄的概率进行预测\r\n* 15.device_all_GRU_pred.py\t根据device打开关闭app的时间合在对app进行排序，形成app_list, 将app开作词，device_id看成文档，跑了一个GRU文本模型对用户属于性别年龄的概率进行预测\r\n* 16.device_start_capsule_pred.py\t\t用capsule模型对用户属于性别年龄的概率进行预测\r\n* 17.device_start_textcnn_pred.py\t\t用textcnn模型对用户属于性别年龄的概率进行预测\r\n* 18.device_start_text_dpcnn_pred.py\t用dpcnn模型对用户属于性别年龄的概率进行预测\r\n* 19.device_start_lstm_pred.py\t用lstm模型对用户属于性别年龄的概率进行预测\r\n* 20.lgb_sex_age_prob_oof.py\t\t一个基础的模型，对用户属于性别年龄的概率进行预测\r\n* 21.tfidf_lr_sex_age_prob_oof.py\t对app进行tf-idf操作，用户LR训练一个模型来预测用户的性别年龄概率\r\n* 22.base_feat.py\t\t生成基础人工特征+上面产出的概率模型特征\r\n* 23.ATT_v6.py\t用attention模型对22.base_feat.py产出的特征进行训练，来计算用户属于性别年龄的概率\r\n* 24.thluo_22_lgb.py\t用lgb训练一个22多分类模型，输出test概率文件\r\n* 25.thluo_22_xgb.py\t用xgb训练一个22多分类模型，输出test概率文件\r\n* 26.thluo_nb_lgb.py\t用lgb训练一个条件分类模型，输出test概率文件，条件概率模型指的是先预测p(sex) 再预测p(age|sex),最终p(sex, age) = p(sex) * p(age|sex)\r\n* 27.thluo_nb_xgb.py\t用xgb训练一个条件分类模型，输出test概率文件，条件概率模型指的是先预测p(sex) 再预测p(age|sex),最终p(sex, age) = p(sex) * p(age|sex)\r\n* 28.final.py\t \t对上面四个模型产出的结果，进行线性加权融合，形成THLUO选手个人的最终结果\r\n* TextModel.py包含本次比赛用到的文本模型\r\n* util.py里面包含一些共用的函数\r\n\r\n\r\n\r\n\r\n> note:因为本次比赛提交代码的时间比较仓促，之前一直都是用notebook来做比赛，所以如有问题，请联系团队\r\n"
  },
  {
    "path": "THLUO/util.py",
    "content": "import os\nimport re\nimport sys\nimport pandas as pd\nimport numpy as np\nimport gensim\nfrom gensim.models import Word2Vec\nfrom gensim.models.word2vec import LineSentence\nimport tensorflow as tf\nimport keras\nfrom keras.layers import *\nfrom keras.models import *\nfrom keras.optimizers import *\nfrom keras.callbacks import *\nfrom keras.preprocessing import text, sequence\nfrom keras.utils import to_categorical\nfrom sklearn.preprocessing import LabelEncoder\nfrom keras.utils import np_utils\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import  accuracy_score\nfrom sklearn.preprocessing import LabelEncoder\nimport warnings\nwarnings.filterwarnings('ignore')\nimport os\nimport gc\nimport random\nfrom keras.engine.topology import Layer\nfrom sklearn.preprocessing import MultiLabelBinarizer\nfrom sklearn.metrics import f1_score\nimport jieba\nimport numpy as np\n\n\n\ngru_len = 128\nRoutings = 5\nNum_capsule = 10\nDim_capsule = 16\ndropout_p = 0.25\nrate_drop_dense = 0.28\n\n\n\nclass Attention(Layer):\n    def __init__(self, step_dim,\n                 W_regularizer=None, b_regularizer=None,\n                 W_constraint=None, b_constraint=None,\n                 bias=True, **kwargs):\n        \"\"\"\n        Keras Layer that implements an Attention mechanism for temporal data.\n        Supports Masking.\n        Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756]\n        # Input shape\n            3D tensor with shape: `(samples, steps, features)`.\n        # Output shape\n            2D tensor with shape: `(samples, features)`.\n        :param kwargs:\n        Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.\n        The dimensions are inferred based on the output shape of the RNN.\n        Example:\n            model.add(LSTM(64, return_sequences=True))\n            model.add(Attention())\n        \"\"\"\n        self.supports_masking = True\n        #self.init = initializations.get('glorot_uniform')\n        self.init = initializers.get('glorot_uniform')\n\n        self.W_regularizer = regularizers.get(W_regularizer)\n        self.b_regularizer = regularizers.get(b_regularizer)\n\n        self.W_constraint = constraints.get(W_constraint)\n        self.b_constraint = constraints.get(b_constraint)\n\n        self.bias = bias\n        self.step_dim = step_dim\n        self.features_dim = 0\n        super(Attention, self).__init__(**kwargs)\n\n    def build(self, input_shape):\n        assert len(input_shape) == 3\n\n        self.W = self.add_weight((input_shape[-1],),\n                                 initializer=self.init,\n                                 name='{}_W'.format(self.name),\n                                 regularizer=self.W_regularizer,\n                                 constraint=self.W_constraint)\n        self.features_dim = input_shape[-1]\n\n        if self.bias:\n            self.b = self.add_weight((input_shape[1],),\n                                     initializer='zero',\n                                     name='{}_b'.format(self.name),\n                                     regularizer=self.b_regularizer,\n                                     constraint=self.b_constraint)\n        else:\n            self.b = None\n\n        self.built = True\n\n    def compute_mask(self, input, input_mask=None):\n        # do not pass the mask to the next layers\n        return None\n\n    def call(self, x, mask=None):\n        # eij = K.dot(x, self.W) TF backend doesn't support it\n\n        # features_dim = self.W.shape[0]\n        # step_dim = x._keras_shape[1]\n\n        features_dim = self.features_dim\n        step_dim = self.step_dim\n\n        eij = K.reshape(K.dot(K.reshape(x, (-1, features_dim)), K.reshape(self.W, (features_dim, 1))), (-1, step_dim))\n\n        if self.bias:\n            eij += self.b\n\n        eij = K.tanh(eij)\n\n        a = K.exp(eij)\n\n        # apply mask after the exp. will be re-normalized next\n        if mask is not None:\n            # Cast the mask to floatX to avoid float64 upcasting in theano\n            a *= K.cast(mask, K.floatx())\n\n        # in some cases especially in the early stages of training the sum may be almost zero\n        a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())\n\n        a = K.expand_dims(a)\n        weighted_input = x * a\n    #print weigthted_input.shape\n        return K.sum(weighted_input, axis=1)\n\n    def compute_output_shape(self, input_shape):\n        #return input_shape[0], input_shape[-1]\n        return input_shape[0],  self.features_dim\n\n\ndef squash(x, axis=-1):\n    s_squared_norm = K.sum(K.square(x), axis, keepdims=True)\n    scale = K.sqrt(s_squared_norm + K.epsilon())\n    return x / scale\n\n# A Capsule Implement with Pure Keras\nclass Capsule(Layer):\n    def __init__(self, num_capsule, dim_capsule, routings=3, kernel_size=(9, 1), share_weights=True,\n                 activation='default', **kwargs):\n        super(Capsule, self).__init__(**kwargs)\n        self.num_capsule = num_capsule\n        self.dim_capsule = dim_capsule\n        self.routings = routings\n        self.kernel_size = kernel_size\n        self.share_weights = share_weights\n        if activation == 'default':\n            self.activation = squash\n        else:\n            self.activation = Activation(activation)\n\n    def build(self, input_shape):\n        super(Capsule, self).build(input_shape)\n        input_dim_capsule = input_shape[-1]\n        if self.share_weights:\n            self.W = self.add_weight(name='capsule_kernel',\n                                     shape=(1, input_dim_capsule,\n                                            self.num_capsule * self.dim_capsule),\n                                     # shape=self.kernel_size,\n                                     initializer='glorot_uniform',\n                                     trainable=True)\n        else:\n            input_num_capsule = input_shape[-2]\n            self.W = self.add_weight(name='capsule_kernel',\n                                     shape=(input_num_capsule,\n                                            input_dim_capsule,\n                                            self.num_capsule * self.dim_capsule),\n                                     initializer='glorot_uniform',\n                                     trainable=True)\n\n    def call(self, u_vecs):\n        if self.share_weights:\n            u_hat_vecs = K.conv1d(u_vecs, self.W)\n        else:\n            u_hat_vecs = K.local_conv1d(u_vecs, self.W, [1], [1])\n\n        batch_size = K.shape(u_vecs)[0]\n        input_num_capsule = K.shape(u_vecs)[1]\n        u_hat_vecs = K.reshape(u_hat_vecs, (batch_size, input_num_capsule,\n                                            self.num_capsule, self.dim_capsule))\n        u_hat_vecs = K.permute_dimensions(u_hat_vecs, (0, 2, 1, 3))\n        # final u_hat_vecs.shape = [None, num_capsule, input_num_capsule, dim_capsule]\n\n        b = K.zeros_like(u_hat_vecs[:, :, :, 0])  # shape = [None, num_capsule, input_num_capsule]\n        for i in range(self.routings):\n            b = K.permute_dimensions(b, (0, 2, 1))  # shape = [None, input_num_capsule, num_capsule]\n            c = K.softmax(b)\n            c = K.permute_dimensions(c, (0, 2, 1))\n            b = K.permute_dimensions(b, (0, 2, 1))\n            outputs = self.activation(K.batch_dot(c, u_hat_vecs, [2, 2]))\n            if i < self.routings - 1:\n                b = K.batch_dot(outputs, u_hat_vecs, [2, 3])\n\n        return outputs\n\n    def compute_output_shape(self, input_shape):\n        return (None, self.num_capsule, self.dim_capsule)\n\n\nclass AttentionWeightedAverage(Layer):\n    \"\"\"\n    Computes a weighted average of the different channels across timesteps.\n    Uses 1 parameter pr. channel to compute the attention value for a single timestep.\n    \"\"\"\n\n    def __init__(self, return_attention=False, **kwargs):\n        self.init = initializers.get('uniform')\n        self.supports_masking = True\n        self.return_attention = return_attention\n        super(AttentionWeightedAverage, self).__init__(**kwargs)\n\n    def build(self, input_shape):\n        self.input_spec = [InputSpec(ndim=3)]\n        assert len(input_shape) == 3\n\n        self.W = self.add_weight(shape=(input_shape[2], 1),\n                                 name='{}_W'.format(self.name),\n                                 initializer=self.init)\n        self.trainable_weights = [self.W]\n        super(AttentionWeightedAverage, self).build(input_shape)\n\n    def call(self, x, mask=None):\n        # computes a probability distribution over the timesteps\n        # uses 'max trick' for numerical stability\n        # reshape is done to avoid issue with Tensorflow\n        # and 1-dimensional weights\n        logits = K.dot(x, self.W)\n        x_shape = K.shape(x)\n        logits = K.reshape(logits, (x_shape[0], x_shape[1]))\n        ai = K.exp(logits - K.max(logits, axis=-1, keepdims=True))\n\n        # masked timesteps have zero weight\n        if mask is not None:\n            mask = K.cast(mask, K.floatx())\n            ai = ai * mask\n        att_weights = ai / (K.sum(ai, axis=1, keepdims=True) + K.epsilon())\n        weighted_input = x * K.expand_dims(att_weights)\n        result = K.sum(weighted_input, axis=1)\n        if self.return_attention:\n            return [result, att_weights]\n        return result\n\n    def get_output_shape_for(self, input_shape):\n        return self.compute_output_shape(input_shape)\n\n    def compute_output_shape(self, input_shape):\n        output_len = input_shape[2]\n        if self.return_attention:\n            return [(input_shape[0], output_len), (input_shape[0], input_shape[1])]\n        return (input_shape[0], output_len)\n\n    def compute_mask(self, input, input_mask=None):\n        if isinstance(input_mask, list):\n            return [None] * len(input_mask)\n        else:\n            return None\n\n\nclass KMaxPooling(Layer):\n    \"\"\"\n    K-max pooling layer that extracts the k-highest activations from a sequence (2nd dimension).\n    TensorFlow backend.\n    \"\"\"\n\n    def __init__(self, k=1, **kwargs):\n        super().__init__(**kwargs)\n        self.input_spec = InputSpec(ndim=3)\n        self.k = k\n\n    def compute_output_shape(self, input_shape):\n        return (input_shape[0], (input_shape[2] * self.k))\n\n    def call(self, inputs):\n        # swap last two dimensions since top_k will be applied along the last dimension\n        shifted_input = tf.transpose(inputs, [0, 2, 1])\n\n        # extract top_k, returns two tensors [values, indices]\n        top_k = tf.nn.top_k(shifted_input, k=self.k, sorted=True, name=None)[0]\n\n        # return flattened output\n        return Flatten()(top_k)\n\n\ndef performance(f):  # 定义装饰器函数，功能是传进来的函数进行包装并返回包装后的函数\n    def fn(*args, **kw):  # 对传进来的函数进行包装的函数\n        t_start = time.time()  # 记录函数开始时间\n        r = f(*args, **kw)  # 调用函数\n        t_end = time.time()  # 记录函数结束时间\n        print('call %s() in %fs' % (f.__name__, (t_end - t_start)))  # 打印调用函数的属性信息，并打印调用函数所用的时间\n        return r  # 返回包装后的函数\n    return fn\n\nfrom keras import backend as K\n\ndef f1(y_true, y_pred):\n    def recall(y_true, y_pred):\n        \"\"\"Recall metric.\n\n        Only computes a batch-wise average of recall.\n\n        Computes the recall, a metric for multi-label classification of\n        how many relevant items are selected.\n        \"\"\"\n        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n        possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n        recall = true_positives / (possible_positives + K.epsilon())\n        return recall\n\n    def precision(y_true, y_pred):\n        \"\"\"Precision metric.\n\n        Only computes a batch-wise average of precision.\n\n        Computes the precision, a metric for multi-label classification of\n        how many selected items are relevant.\n        \"\"\"\n        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n        predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n        precision = true_positives / (predicted_positives + K.epsilon())\n        return precision\n    precision = precision(y_true, y_pred)\n    recall = recall(y_true, y_pred)\n    return 2*((precision*recall)/(precision+recall+K.epsilon()))\n\n\nfrom keras import backend as K\n\ndef f1(y_true, y_pred):\n    def recall(y_true, y_pred):\n        \"\"\"Recall metric.\n\n        Only computes a batch-wise average of recall.\n\n        Computes the recall, a metric for multi-label classification of\n        how many relevant items are selected.\n        \"\"\"\n        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n        possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n        recall = true_positives / (possible_positives + K.epsilon())\n        return recall\n\n    def precision(y_true, y_pred):\n        \"\"\"Precision metric.\n\n        Only computes a batch-wise average of precision.\n\n        Computes the precision, a metric for multi-label classification of\n        how many selected items are relevant.\n        \"\"\"\n        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n        predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n        precision = true_positives / (predicted_positives + K.epsilon())\n        return precision\n    precision = precision(y_true, y_pred)\n    recall = recall(y_true, y_pred)\n    return 2*((precision*recall)/(precision+recall+K.epsilon()))\n\n\n\ndef evalation_score(y_true,y_pred):\n\n    for row,column in zip(range(y_pred.shape[0]),np.argmax(y_pred,axis=1)):\n        y_pred[row,column]=1\n    y_pred[y_pred<1]=0\n    y_pred=y_pred.astype(\"int\")\n\n\n    macro=f1_score(y_true, y_pred, average='macro')\n\n    micro=f1_score(y_true, y_pred, average='micro')\n    print(macro,micro)\n    score=(macro+ micro)/2\n    print(score)\n    return score\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t"
  },
  {
    "path": "THLUO/代码运行.bat",
    "content": "python 1.w2c_model_start.py\t\r\npython 2.w2c_model_close.py\t\r\npython 3.w2c_model_all.py\t\r\npython 3.device_quchong_start_app_w2c.py\r\npython 3.w2c_all_emb.py\r\npython 4.device_age_prob_oof.py\t\r\npython 5.device_sex_prob_oof.py\t\r\npython 6.start_close_age_prob_oof.py\t\r\npython 7.start_close_sex_prob_oof.py\t\r\npython 9.sex_age_bin_prob_oof.py\t\r\npython 10.age_bin_prob_oof.py\t\r\npython 11.hcc_device_brand_age_sex.py\t \r\npython 12.device_age_regression_prob_oof.py\t\r\npython 13.device_start_GRU_pred.py\t\t\r\npython 14.device_start_GRU_pred_age.py\t\t\r\npython 15.device_all_GRU_pred.py\t\r\npython 16.device_start_capsule_pred.py\t\t\r\npython 17.device_start_textcnn_pred.py\t\t\r\npython 18.device_start_text_dpcnn_pred.py\t\r\npython 19.device_start_lstm_pred.py\t\r\npython 20.lgb_sex_age_prob_oof.py\t\t\r\npython 21.tfidf_lr_sex_age_prob_oof.py\t\r\npython 22.base_feat.py\t\t\r\npython 23.ATT_v6.py\t\r\npython 24.thluo_22_lgb.py\t\r\npython 25.thluo_22_xgb.py\t\r\npython 26.thluo_nb_lgb.py\t\r\npython 27.thluo_nb_xgb.py\t\r\npython 28.final.py\t \t"
  },
  {
    "path": "chizhu/readme.txt",
    "content": "|-single_model/\n    |-data/ 处理后的特征和数据存放位置\n    |-model/ 模型文件\n    |-submit 模型概率文件，可用作stacking材料\n    |-config.py 配置原始文件路径\n    |-user_behavior.py 得到user_behavior特征集\n    |-get_nn_feat.py 获得nn 的统计特征输入\n    |-lgb.py\n    |-xgb.py\n    |-xgb_nb.py 条件概率\n    |-cnn.py\n    |-deepnn.py \n    |-yg_best_nn.py \n|-stacking/\n    |-all_feat/ 使用全部概率文件的xgb的条件概率\n    |-nurbs_feat/ 使用rurbs概率文件的xgb的22分类以及条件概率\n        |-xgb_nurbs_nb.py 条件概率\n        |-xgb_22.py 22分类\n|-util/\n    |-bagging.py  加权融合脚本\n    |-get_nn_res.py 获得nn概率文件和可提交的结果\n\n\n使用说明:\nsingle_model:1)先配置config.py 里的文件路径\n             2)运行user_behavior.py \n             3)运行get_nn_feat.py \n             4)然后可以逐个运行nn或者tree模型，得到的概率文件在submit/\n\nstacking：这里直接运行是不行的 因为需要概率文件，大小在2G左右，没有附上，之后可以找我们要\nutil:加权用，这里需要的是stacking/nurbs_feat下的xgb_22.py和_xgbnb.py产生的结果取均值得到一份结果,xgb_22_nb.csv\n\n"
  },
  {
    "path": "chizhu/single_model/cnn.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom datetime import datetime, timedelta\nimport matplotlib.pyplot as plt\nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\n# get_ipython().run_line_magic('matplotlib', 'inline')\n\n#add\n# from category_encoders import OrdinalEncoder\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom scipy.sparse import hstack\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import cross_val_score\nfrom gensim.models import FastText, Word2Vec\nimport re\nfrom keras.layers import *\nfrom keras.models import *\nfrom keras.preprocessing.text import Tokenizer, text_to_word_sequence\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.preprocessing import text, sequence\nfrom keras.callbacks import *\nfrom keras.layers.advanced_activations import LeakyReLU, PReLU\nimport keras.backend as K\nfrom keras.optimizers import *\nfrom keras.utils import to_categorical\nfrom keras.utils import multi_gpu_model\n\nimport tensorflow as tf\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\nfrom keras.backend.tensorflow_backend import set_session\nconfig = tf.ConfigProto()\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.9\nset_session(tf.Session(config=config))\nfrom config import path\n# path = \"/dev/shm/chizhu_data/data/\"\n\n\n# In[2]:\n\n\npackages = pd.read_csv(path+'deviceid_packages.tsv',\n                       sep='\\t', names=['device_id', 'apps'])\ntest = pd.read_csv(path+'deviceid_test.tsv', sep='\\t', names=['device_id'])\ntrain = pd.read_csv(path+'deviceid_train.tsv', sep='\\t',\n                    names=['device_id', 'sex', 'age'])\n\nbrand = pd.read_table(path+'deviceid_brand.tsv',\n                      names=['device_id', 'vendor', 'version'])\nbehave_train = pd.read_csv('data/train_statistic_feat.csv')\nbehave_test = pd.read_csv('data/test_statistic_feat.csv')\n\n\n# In[3]:\n\n\nbehave_train.drop(['sex', 'age', 'label', 'app'], 1, inplace=True)\nbehave_test.drop(['sex', 'age', 'label', 'app'], 1, inplace=True)\n\n\n# In[4]:\n\n\nbrand['phone_version'] = brand['vendor'] + ' ' + brand['version']\ntrain = pd.merge(brand[['device_id', 'phone_version']],\n                 train, on='device_id', how='right')\ntest = pd.merge(brand[['device_id', 'phone_version']],\n                test, on='device_id', how='right')\n\n\n# In[5]:\n\n\ntrain = pd.merge(train, behave_train, on='device_id', how='left')\ntest = pd.merge(test, behave_test, on='device_id', how='left')\n\n\n# In[6]:\n\n\npackages['app_lenghth'] = packages['apps'].apply(\n    lambda x: x.split(',')).apply(lambda x: len(x))\npackages['app_list'] = packages['apps'].apply(lambda x: x.split(','))\ntrain = pd.merge(train, packages, on='device_id', how='left')\ntest = pd.merge(test, packages, on='device_id', how='left')\n\n\n# In[7]:\n\n\nembed_size = 128\nfastmodel = Word2Vec(list(packages['app_list']), size=embed_size, window=4, min_count=3, negative=2,\n                     sg=1, sample=0.002, hs=1, workers=4)\n\nembedding_fast = pd.DataFrame([fastmodel[word]\n                               for word in (fastmodel.wv.vocab)])\nembedding_fast['app'] = list(fastmodel.wv.vocab)\nembedding_fast.columns = [\"fdim_%s\" %\n                          str(i) for i in range(embed_size)]+[\"app\"]\n\n\n# In[8]:\n\n\ntokenizer = Tokenizer(lower=False, char_level=False, split=',')\n\ntokenizer.fit_on_texts(list(packages['apps']))\n\nX_seq = tokenizer.texts_to_sequences(train['apps'])\nX_test_seq = tokenizer.texts_to_sequences(test['apps'])\n\nmaxlen = 50\nX = pad_sequences(X_seq, maxlen=maxlen, value=0)\nX_test = pad_sequences(X_test_seq, maxlen=maxlen, value=0)\nY_sex = train['sex']-1\n\n\n# In[9]:\n\n\nmax_feaures = 35001\nembedding_matrix = np.zeros((max_feaures, embed_size))\nfor word in tokenizer.word_index:\n    if word not in fastmodel.wv.vocab:\n        continue\n    embedding_matrix[tokenizer.word_index[word]] = fastmodel[word]\n\n\n# In[10]:\n\n\n# behave_train=behave_train.loc[:,\"ph_ver_0\":'week_day_6']\n# behave_test=behave_test.loc[:,\"h0\":'week_day_6']\nbehave_train = pd.merge(train[['device_id']],\n                        behave_train, on='device_id', how=\"left\")\nbehave_test = pd.merge(test[['device_id']],\n                       behave_test, on='device_id', how=\"left\")\nX_h = behave_train.iloc[:, 1:].values\nX_h_test = behave_test.iloc[:, 1:].values\n\n\n# In[11]:\n\n\nclass AdamW(Optimizer):\n    def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, weight_decay=1e-4,  # decoupled weight decay (1/4)\n                 epsilon=1e-8, decay=0., **kwargs):\n        super(AdamW, self).__init__(**kwargs)\n        with K.name_scope(self.__class__.__name__):\n            self.iterations = K.variable(0, dtype='int64', name='iterations')\n            self.lr = K.variable(lr, name='lr')\n            self.beta_1 = K.variable(beta_1, name='beta_1')\n            self.beta_2 = K.variable(beta_2, name='beta_2')\n            self.decay = K.variable(decay, name='decay')\n            # decoupled weight decay (2/4)\n            self.wd = K.variable(weight_decay, name='weight_decay')\n        self.epsilon = epsilon\n        self.initial_decay = decay\n\n    @interfaces.legacy_get_updates_support\n    def get_updates(self, loss, params):\n        grads = self.get_gradients(loss, params)\n        self.updates = [K.update_add(self.iterations, 1)]\n        wd = self.wd  # decoupled weight decay (3/4)\n\n        lr = self.lr\n        if self.initial_decay > 0:\n            lr *= (1. / (1. + self.decay * K.cast(self.iterations,\n                                                  K.dtype(self.decay))))\n\n        t = K.cast(self.iterations, K.floatx()) + 1\n        lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /\n                     (1. - K.pow(self.beta_1, t)))\n\n        ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n        vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n        self.weights = [self.iterations] + ms + vs\n\n        for p, g, m, v in zip(params, grads, ms, vs):\n            m_t = (self.beta_1 * m) + (1. - self.beta_1) * g\n            v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)\n            # decoupled weight decay (4/4)\n            p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon) - lr * wd * p\n\n            self.updates.append(K.update(m, m_t))\n            self.updates.append(K.update(v, v_t))\n            new_p = p_t\n\n            # Apply constraints.\n            if getattr(p, 'constraint', None) is not None:\n                new_p = p.constraint(new_p)\n\n            self.updates.append(K.update(p, new_p))\n        return self.updates\n\n    def get_config(self):\n        config = {'lr': float(K.get_value(self.lr)),\n                  'beta_1': float(K.get_value(self.beta_1)),\n                  'beta_2': float(K.get_value(self.beta_2)),\n                  'decay': float(K.get_value(self.decay)),\n                  'weight_decay': float(K.get_value(self.wd)),\n                  'epsilon': self.epsilon}\n        base_config = super(AdamW, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))\n\n\n# In[12]:\n\n\ndef model_conv1D(embedding_matrix):\n\n    K.clear_session()\n    # The embedding layer containing the word vectors\n    emb_layer = Embedding(\n        input_dim=embedding_matrix.shape[0],\n        output_dim=embedding_matrix.shape[1],\n        weights=[embedding_matrix],\n        input_length=maxlen,\n        trainable=False\n    )\n    lstm_layer = Bidirectional(\n        GRU(128, recurrent_dropout=0.15, dropout=0.15, return_sequences=True))\n\n    # 1D convolutions that can iterate over the word vectors\n    conv1 = Conv1D(filters=128, kernel_size=1,\n                   padding='same', activation='relu',)\n    conv2 = Conv1D(filters=64, kernel_size=2,\n                   padding='same', activation='relu', )\n    conv3 = Conv1D(filters=64, kernel_size=3,\n                   padding='same', activation='relu',)\n    conv5 = Conv1D(filters=32, kernel_size=5,\n                   padding='same', activation='relu',)\n\n    # Define inputs\n    seq = Input(shape=(maxlen,))\n\n    # Run inputs through embedding\n    emb = emb_layer(seq)\n\n    lstm = lstm_layer(emb)\n    # Run through CONV + GAP layers\n    conv1a = conv1(lstm)\n    gap1a = GlobalAveragePooling1D()(conv1a)\n    gmp1a = GlobalMaxPool1D()(conv1a)\n\n    conv2a = conv2(lstm)\n    gap2a = GlobalAveragePooling1D()(conv2a)\n    gmp2a = GlobalMaxPool1D()(conv2a)\n\n    conv3a = conv3(lstm)\n    gap3a = GlobalAveragePooling1D()(conv3a)\n    gmp3a = GlobalMaxPooling1D()(conv3a)\n\n    conv5a = conv5(lstm)\n    gap5a = GlobalAveragePooling1D()(conv5a)\n    gmp5a = GlobalMaxPooling1D()(conv5a)\n\n    hin = Input(shape=(396, ))\n    htime = Dense(64, activation='relu')(hin)\n    merge1 = concatenate([gap1a, gmp1a, htime])\n\n    # The MLP that determines the outcome\n    x = Dropout(0.3)(merge1)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.25)(x)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.25)(x)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.25)(x)\n    x = BatchNormalization()(x)\n    pred = Dense(1, activation='sigmoid')(x)\n\n    # model = Model(inputs=[seq1, seq2, magic_input, distance_input], outputs=pred)\n    model = Model(inputs=[seq, hin], outputs=pred)\n#     model=multi_gpu_model(model,2)\n    model.compile(loss='binary_crossentropy',\n                  optimizer=AdamW(weight_decay=0.08,))\n#     model.summary()\n    return model\n\n\n# In[ ]:\n\n\nkfold = StratifiedKFold(n_splits=5, random_state=20, shuffle=True)\nsub1 = np.zeros((X_test.shape[0], ))\noof_pref1 = np.zeros((X.shape[0], 1))\nscore = []\ncount = 0\nfor i, (train_index, test_index) in enumerate(kfold.split(X, Y_sex)):\n    print(\"FOLD | \", count+1)\n    filepath = \"model/sex_weights_best_%d.h5\" % count\n    checkpoint = ModelCheckpoint(\n        filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n    reduce_lr = ReduceLROnPlateau(\n        monitor='val_loss', factor=0.8, patience=2, min_lr=0.0001, verbose=1)\n    earlystopping = EarlyStopping(\n        monitor='val_loss', min_delta=0.0001, patience=6, verbose=1, mode='auto')\n    callbacks = [checkpoint, reduce_lr, earlystopping]\n\n    model_sex = model_conv1D(embedding_matrix)\n    X_tr, X_vl, X_tr2, X_vl2, y_tr, y_vl = X[train_index], X[test_index], X_h[\n        train_index], X_h[test_index], Y_sex[train_index], Y_sex[test_index]\n    hist = model_sex.fit([X_tr, X_tr2], y_tr, batch_size=128, epochs=50, validation_data=([X_vl, X_vl2], y_vl),\n                         callbacks=callbacks, verbose=1, shuffle=True)\n    model_sex.load_weights(filepath)\n    sub1 += np.squeeze(model_sex.predict([X_test, X_h_test]))/kfold.n_splits\n    oof_pref1[test_index] = model_sex.predict([X_vl, X_vl2])\n    score.append(np.min(hist.history['val_loss']))\n    count += 1\nprint('log loss:', np.mean(score))\n# pd.DataFrame(oof_pref1).to_csv('cnn_oof_sex.csv', index=False)\n\n\n# In[ ]:\n\n\noof_pref1 = pd.DataFrame(oof_pref1, columns=['sex2'])\nsub1 = pd.DataFrame(sub1, columns=['sex2'])\nres1 = pd.concat([oof_pref1, sub1])\nres1['sex1'] = 1-res1['sex2']\nres1.to_csv(\"data/res1.csv\", index=False)\n\n\n# In[ ]:\n\n\ndef model_age_conv(embedding_matrix):\n\n    # The embedding layer containing the word vectors\n    K.clear_session()\n    emb_layer = Embedding(\n        input_dim=embedding_matrix.shape[0],\n        output_dim=embedding_matrix.shape[1],\n        weights=[embedding_matrix],\n        input_length=maxlen,\n        trainable=False\n    )\n    lstm_layer = Bidirectional(\n        GRU(128, recurrent_dropout=0.15, dropout=0.15, return_sequences=True))\n\n    # 1D convolutions that can iterate over the word vectors\n    conv1 = Conv1D(filters=128, kernel_size=1,\n                   padding='same', activation='relu',)\n    conv2 = Conv1D(filters=64, kernel_size=2,\n                   padding='same', activation='relu', )\n    conv3 = Conv1D(filters=64, kernel_size=3,\n                   padding='same', activation='relu',)\n    conv5 = Conv1D(filters=32, kernel_size=5,\n                   padding='same', activation='relu',)\n\n    # Define inputs\n    seq = Input(shape=(maxlen,))\n\n    # Run inputs through embedding\n    emb = emb_layer(seq)\n\n    lstm = lstm_layer(emb)\n    # Run through CONV + GAP layers\n    conv1a = conv1(lstm)\n    gap1a = GlobalAveragePooling1D()(conv1a)\n    gmp1a = GlobalMaxPool1D()(conv1a)\n\n    conv2a = conv2(lstm)\n    gap2a = GlobalAveragePooling1D()(conv2a)\n    gmp2a = GlobalMaxPool1D()(conv2a)\n\n    conv3a = conv3(lstm)\n    gap3a = GlobalAveragePooling1D()(conv3a)\n    gmp3a = GlobalMaxPooling1D()(conv3a)\n\n    conv5a = conv5(lstm)\n    gap5a = GlobalAveragePooling1D()(conv5a)\n    gmp5a = GlobalMaxPooling1D()(conv5a)\n\n    hin = Input(shape=(397, ))\n    htime = Dense(64, activation='relu')(hin)\n    merge1 = concatenate([gap1a, gmp1a, htime])\n\n#     merge1 = concatenate([gap1a, gap2a, gap3a, gap5a])\n\n    # The MLP that determines the outcome\n    x = Dropout(0.3)(merge1)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.22)(x)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.22)(x)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.22)(x)\n    x = BatchNormalization()(x)\n    pred = Dense(11, activation='softmax')(x)\n\n    model = Model(inputs=[seq, hin], outputs=pred)\n    model.compile(loss='categorical_crossentropy',\n                  optimizer=AdamW(weight_decay=0.08,))\n#     model.summary()\n    return model\n\n\n# In[ ]:\n\n\nY_age = to_categorical(train['age'])\n\n\n# #### sex1\n\n# In[ ]:\n\n\nbehave_train['sex'] = train['sex']\nbehave_test['sex'] = 1\nX_h = behave_train.iloc[:, 1:].values\nX_h_test = behave_test.iloc[:, 1:].values\n\n\n# In[ ]:\n\n\nsub2 = np.zeros((X_test.shape[0], 11))\noof_pref2 = np.zeros((X.shape[0], 11))\nscore = []\ncount = 0\nfor i, (train_index, test_index) in enumerate(kfold.split(X, train['age'])):\n\n    print(\"FOLD | \", count+1)\n\n    filepath2 = \"model/age_weights_best_%d.h5\" % count\n    checkpoint2 = ModelCheckpoint(\n        filepath2, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n    reduce_lr2 = ReduceLROnPlateau(\n        monitor='val_loss', factor=0.8, patience=2, min_lr=0.0001, verbose=1)\n    earlystopping2 = EarlyStopping(\n        monitor='val_loss', min_delta=0.0001, patience=8, verbose=1, mode='auto')\n    callbacks2 = [checkpoint2, reduce_lr2, earlystopping2]\n\n    model_age = model_age_conv(embedding_matrix)\n    X_tr, X_vl, X_tr2, X_vl2, y_tr, y_vl = X[train_index], X[test_index], X_h[\n        train_index], X_h[test_index], Y_age[train_index], Y_age[test_index]\n    hist = model_age.fit([X_tr, X_tr2], y_tr, batch_size=128, epochs=50, validation_data=([X_vl, X_vl2], y_vl),\n                         callbacks=callbacks2, verbose=1, shuffle=True)\n\n    model_age.load_weights(filepath2)\n    oof_pref2[test_index] = model_age.predict([X_vl, X_vl2])\n    sub2 += model_age.predict([X_test, X_h_test])/kfold.n_splits\n    score.append(np.min(hist.history['val_loss']))\n    count += 1\nprint('log loss:', np.mean(score))\n# pd.DataFrame(oof_pref2).to_csv('cnn_oof_age.csv', index=False)\n\n\n# In[ ]:\n\n\nres2_1 = np.vstack((oof_pref2, sub2))\nres2_1 = pd.DataFrame(res2_1)\nres2_1.to_csv(\"submit/res2_1.csv\", index=False)\n\n\n# ### sex2\n\n# In[ ]:\n\n\nbehave_train['sex'] = train['sex']\nbehave_test['sex'] = 2\nX_h = behave_train.iloc[:, 1:].values\nX_h_test = behave_test.iloc[:, 1:].values\n\n\n# In[ ]:\n\n\nsub2 = np.zeros((X_test.shape[0], 11))\noof_pref2 = np.zeros((X.shape[0], 11))\nscore = []\n\ncount = 0\nfor i, (train_index, test_index) in enumerate(kfold.split(X, train['age'])):\n\n    print(\"FOLD | \", count+1)\n\n    filepath2 = \"model/age_weights_best_%d.h5\" % count\n    checkpoint2 = ModelCheckpoint(\n        filepath2, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n    reduce_lr2 = ReduceLROnPlateau(\n        monitor='val_loss', factor=0.8, patience=2, min_lr=0.0001, verbose=1)\n    earlystopping2 = EarlyStopping(\n        monitor='val_loss', min_delta=0.0001, patience=8, verbose=1, mode='auto')\n    callbacks2 = [checkpoint2, reduce_lr2, earlystopping2]\n\n    model_age = model_age_conv(embedding_matrix)\n    X_tr, X_vl, X_tr2, X_vl2, y_tr, y_vl = X[train_index], X[test_index], X_h[\n        train_index], X_h[test_index], Y_age[train_index], Y_age[test_index]\n    hist = model_age.fit([X_tr, X_tr2], y_tr, batch_size=128, epochs=50, validation_data=([X_vl, X_vl2], y_vl),\n                         callbacks=callbacks2, verbose=1, shuffle=True)\n\n    model_age.load_weights(filepath2)\n    oof_pref2[test_index] = model_age.predict([X_vl, X_vl2])\n    sub2 += model_age.predict([X_test, X_h_test])/kfold.n_splits\n    score.append(np.min(hist.history['val_loss']))\n    count += 1\nprint('log loss:', np.mean(score))\n# pd.DataFrame(oof_pref2).to_csv('cnn_oof_age.csv', index=False)\n\n\n# In[ ]:\n\n\nres2_2 = np.vstack((oof_pref2, sub2))\nres2_2 = pd.DataFrame(res2_2)\n\n\n# In[ ]:\n\n\nres2_2.to_csv(\"submit/res2_2.csv\", index=False)\n\n\n# In[ ]:\n\n\nres1.index = range(len(res1))\nres2_1.index = range(len(res2_1))\nres2_2.index = range(len(res2_2))\nfinal_1 = res2_1\nfinal_2 = res2_2\nfor i in range(11):\n    final_1[i] = res1['sex1']*res2_1[i]\n    final_2[i] = res1['sex2']*res2_2[i]\nid_list = pd.concat([train[['device_id']], test[['device_id']]])\nfinal = id_list\nfinal.index = range(len(final))\nfinal.columns = ['DeviceID']\nfinal_pred = pd.concat([final_1, final_2], 1)\nfinal = pd.concat([final, final_pred], 1)\nfinal.columns = ['DeviceID', '1-0', '1-1', '1-2', '1-3', '1-4', '1-5', '1-6',\n                 '1-7', '1-8', '1-9', '1-10', '2-0', '2-1', '2-2', '2-3', '2-4',\n                 '2-5', '2-6', '2-7', '2-8', '2-9', '2-10']\n\nfinal.to_csv('submit/nn_feat.csv', index=False)\n"
  },
  {
    "path": "chizhu/single_model/config.py",
    "content": "path = \"/Users/chizhu/data/competition_data/易观/\"\n"
  },
  {
    "path": "chizhu/single_model/deepnn.py",
    "content": "import pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom datetime import datetime, timedelta\nimport matplotlib.pyplot as plt\nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\n# %matplotlib inline\n\n#add\nfrom category_encoders import OrdinalEncoder\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom scipy.sparse import hstack\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import cross_val_score\nfrom gensim.models import FastText, Word2Vec\nimport re\nfrom keras.layers import *\nfrom keras.models import *\nfrom keras.preprocessing.text import Tokenizer, text_to_word_sequence\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.preprocessing import text, sequence\nfrom keras.callbacks import *\nfrom keras.layers.advanced_activations import LeakyReLU, PReLU\nimport keras.backend as K\nfrom keras.optimizers import *\nfrom keras.utils import to_categorical\n\npackages = pd.read_csv(path+'deviceid_packages.tsv',\n                       sep='\\t', names=['device_id', 'apps'])\ntest = pd.read_csv(path+'deviceid_test.tsv',\n                   sep='\\t', names=['device_id'])\ntrain = pd.read_csv(path+'deviceid_train.tsv',\n                    sep='\\t', names=['device_id', 'sex', 'age'])\n\nbrand = pd.read_table(path+'deviceid_brand.tsv',\n                      names=['device_id', 'vendor', 'version'])\nbehave = pd.read_csv('data/user_behavior.csv')\n\npackages['app_lenghth'] = packages['apps'].apply(\n    lambda x: x.split(',')).apply(lambda x: len(x))\npackages['app_list'] = packages['apps'].apply(lambda x: x.split(','))\ntrain = pd.merge(train, packages, on='device_id', how='left')\ntest = pd.merge(test, packages, on='device_id', how='left')\n\nembed_size = 128\nfastmodel = Word2Vec(list(packages['app_list']), size=embed_size, window=4, min_count=3, negative=2,\n                     sg=1, sample=0.002, hs=1, workers=4)\n\nembedding_fast = pd.DataFrame([fastmodel[word]\n                               for word in (fastmodel.wv.vocab)])\nembedding_fast['app'] = list(fastmodel.wv.vocab)\nembedding_fast.columns = [\"fdim_%s\" %\n                          str(i) for i in range(embed_size)]+[\"app\"]\n\n\ntokenizer = Tokenizer(lower=False, char_level=False, split=',')\n\ntokenizer.fit_on_texts(list(packages['apps']))\n\nX_seq = tokenizer.texts_to_sequences(train['apps'])\nX_test_seq = tokenizer.texts_to_sequences(test['apps'])\n\nmaxlen = 50\nX = pad_sequences(X_seq, maxlen=maxlen, value=0)\nX_test = pad_sequences(X_test_seq, maxlen=maxlen, value=0)\nY_sex = train['sex']-1\n\nmax_feaures = 35001\nembedding_matrix = np.zeros((max_feaures, embed_size))\nfor word in tokenizer.word_index:\n    if word not in fastmodel.wv.vocab:\n        continue\n    embedding_matrix[tokenizer.word_index[word]] = fastmodel[word]\n\n\nclass AdamW(Optimizer):\n    def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, weight_decay=1e-4,  # decoupled weight decay (1/4)\n                 epsilon=1e-8, decay=0., **kwargs):\n        super(AdamW, self).__init__(**kwargs)\n        with K.name_scope(self.__class__.__name__):\n            self.iterations = K.variable(0, dtype='int64', name='iterations')\n            self.lr = K.variable(lr, name='lr')\n            self.beta_1 = K.variable(beta_1, name='beta_1')\n            self.beta_2 = K.variable(beta_2, name='beta_2')\n            self.decay = K.variable(decay, name='decay')\n            # decoupled weight decay (2/4)\n            self.wd = K.variable(weight_decay, name='weight_decay')\n        self.epsilon = epsilon\n        self.initial_decay = decay\n\n    @interfaces.legacy_get_updates_support\n    def get_updates(self, loss, params):\n        grads = self.get_gradients(loss, params)\n        self.updates = [K.update_add(self.iterations, 1)]\n        wd = self.wd  # decoupled weight decay (3/4)\n\n        lr = self.lr\n        if self.initial_decay > 0:\n            lr *= (1. / (1. + self.decay * K.cast(self.iterations,\n                                                  K.dtype(self.decay))))\n\n        t = K.cast(self.iterations, K.floatx()) + 1\n        lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /\n                     (1. - K.pow(self.beta_1, t)))\n\n        ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n        vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n        self.weights = [self.iterations] + ms + vs\n\n        for p, g, m, v in zip(params, grads, ms, vs):\n            m_t = (self.beta_1 * m) + (1. - self.beta_1) * g\n            v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)\n            # decoupled weight decay (4/4)\n            p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon) - lr * wd * p\n\n            self.updates.append(K.update(m, m_t))\n            self.updates.append(K.update(v, v_t))\n            new_p = p_t\n\n            # Apply constraints.\n            if getattr(p, 'constraint', None) is not None:\n                new_p = p.constraint(new_p)\n\n            self.updates.append(K.update(p, new_p))\n        return self.updates\n\n    def get_config(self):\n        config = {'lr': float(K.get_value(self.lr)),\n                  'beta_1': float(K.get_value(self.beta_1)),\n                  'beta_2': float(K.get_value(self.beta_2)),\n                  'decay': float(K.get_value(self.decay)),\n                  'weight_decay': float(K.get_value(self.wd)),\n                  'epsilon': self.epsilon}\n        base_config = super(AdamW, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))\n\n\ndef model_conv1D_sex(embedding_matrix):\n\n    K.clear_session()\n    # The embedding layer containing the word vectors\n    emb_layer = Embedding(\n        input_dim=embedding_matrix.shape[0],\n        output_dim=embedding_matrix.shape[1],\n        weights=[embedding_matrix],\n        input_length=maxlen,\n        trainable=False)\n\n    # Define inputs\n    seq = Input(shape=(maxlen,))\n\n    # Run inputs through embedding\n    emb = emb_layer(seq)\n\n    lstm_layer = Bidirectional(GRU(128, recurrent_dropout=0.15, dropout=0.15,))\n    lstm = lstm_layer(emb)\n\n    translate = TimeDistributed(Dense(128, activation='relu'))\n    t1 = translate(emb)\n    t1 = TimeDistributed(Dropout(0.15))(t1)\n    sum_op = Lambda(lambda x: K.sum(x, axis=1), output_shape=(128,))\n    t1 = sum_op(t1)\n\n    lstm_layer2 = Bidirectional(\n        GRU(128, recurrent_dropout=0.15, dropout=0.15, return_sequences=True))\n\n    # 1D convolutions that can iterate over the word vectors\n    conv1_2 = Conv1D(filters=128, kernel_size=2,\n                     padding='same', activation='relu',)\n\n    lstm2 = lstm_layer2(emb)\n    # Run through CONV + GAP layers\n    conv1a2 = conv1_2(lstm2)\n    gap1a2 = GlobalAveragePooling1D()(conv1a2)\n    gmp1a2 = GlobalMaxPool1D()(conv1a2)\n\n    # 1D convolutions that can iterate over the word vectors\n    conv1 = Conv1D(filters=128, kernel_size=1,\n                   padding='same', activation='relu',)\n    conv2 = Conv1D(filters=64, kernel_size=2,\n                   padding='same', activation='relu', )\n    conv3 = Conv1D(filters=64, kernel_size=3,\n                   padding='same', activation='relu',)\n    conv4 = Conv1D(filters=64, kernel_size=4,\n                   padding='same', activation='relu',)\n    conv5 = Conv1D(filters=32, kernel_size=5,\n                   padding='same', activation='relu',)\n    conv6 = Conv1D(filters=32, kernel_size=6,\n                   padding='same', activation='relu',)\n\n    # Run through CONV + GAP layers\n    conv1a = conv1(emb)\n    gap1a = GlobalAveragePooling1D()(conv1a)\n    gmp1a = GlobalMaxPool1D()(conv1a)\n\n    conv2a = conv2(emb)\n    gap2a = GlobalAveragePooling1D()(conv2a)\n    gmp2a = GlobalMaxPool1D()(conv2a)\n\n    conv3a = conv3(emb)\n    gap3a = GlobalAveragePooling1D()(conv3a)\n    gmp3a = GlobalMaxPooling1D()(conv3a)\n\n    conv4a = conv3(emb)\n    gap4a = GlobalAveragePooling1D()(conv4a)\n    #gmp3a = GlobalMaxPooling1D()(conv3a)\n\n    conv5a = conv5(emb)\n    gap5a = GlobalAveragePooling1D()(conv5a)\n    gmp5a = GlobalMaxPooling1D()(conv5a)\n\n    conv6a = conv6(emb)\n    gap6a = GlobalAveragePooling1D()(conv6a)\n\n    #hin = Input(shape=(X_h.shape[1], ))\n    #htime = Dense(X_h.shape[1]//4, activation='relu')(hin)\n\n    merge1 = concatenate([gap1a2, gmp1a2, lstm, t1])\n\n    # The MLP that determines the outcome\n    x = Dropout(0.38)(merge1)\n    #x = BatchNormalization()(x)\n    #x = Dense(200, activation='relu',)(x)\n    #x = Dropout(0.22)(x)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.22)(x)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.22)(x)\n    x = BatchNormalization()(x)\n    pred = Dense(1, activation='sigmoid')(x)\n\n    # model = Model(inputs=[seq1, seq2, magic_input, distance_input], outputs=pred)\n    model = Model(inputs=seq, outputs=pred)\n    model.compile(loss='binary_crossentropy',\n                  optimizer=AdamW(weight_decay=0.1,))\n\n    return model\n\n\nkfold = StratifiedKFold(n_splits=5, random_state=20, shuffle=True)\nsub1 = np.zeros((X_test.shape[0], ))\noof_pref1 = np.zeros((X.shape[0], 1))\nscore = []\ncount = 0\nfor i, (train_index, test_index) in enumerate(kfold.split(X, Y_sex)):\n    print(\"FOLD | \", count+1)\n    filepath = \"sex_weights_best_%d.h5\" % count\n    checkpoint = ModelCheckpoint(\n        filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n    reduce_lr = ReduceLROnPlateau(\n        monitor='val_loss', factor=0.8, patience=2, min_lr=0.0001, verbose=1)\n    earlystopping = EarlyStopping(\n        monitor='val_loss', min_delta=0.0001, patience=6, verbose=1, mode='auto')\n    callbacks = [checkpoint, reduce_lr, earlystopping]\n\n    model_sex = model_conv1D_sex(embedding_matrix)\n    X_tr, X_vl, y_tr, y_vl = X[train_index], X[test_index], Y_sex[train_index], Y_sex[test_index]\n    hist = model_sex.fit(X_tr, y_tr, batch_size=512, epochs=50, validation_data=(X_vl, y_vl),\n                         callbacks=callbacks, verbose=2, shuffle=True)\n    model_sex.load_weights(filepath)\n    sub1 += np.squeeze(model_sex.predict(X_test))/kfold.n_splits\n    oof_pref1[test_index] = model_sex.predict(X_vl)\n    score.append(np.min(hist.history['val_loss']))\n    count += 1\nprint('log loss:', np.mean(score))\n\noof_pref1 = pd.DataFrame(oof_pref1, columns=['sex2'])\nsub1 = pd.DataFrame(sub1, columns=['sex2'])\nres1 = pd.concat([oof_pref1, sub1])\nres1['sex1'] = 1-res1['sex2']\n# res1.to_csv(\"res1.csv\", index=False)\n\n\ndef model_age_conv(embedding_matrix):\n\n    K.clear_session()\n    # The embedding layer containing the word vectors\n    emb_layer = Embedding(\n        input_dim=embedding_matrix.shape[0],\n        output_dim=embedding_matrix.shape[1],\n        weights=[embedding_matrix],\n        input_length=maxlen,\n        trainable=False)\n\n    # Define inputs\n    seq = Input(shape=(maxlen,))\n\n    # Run inputs through embedding\n    emb = emb_layer(seq)\n\n    lstm_layer = Bidirectional(GRU(128, recurrent_dropout=0.15, dropout=0.15,))\n    lstm = lstm_layer(emb)\n\n    translate = TimeDistributed(Dense(128, activation='relu'))\n    t1 = translate(emb)\n    t1 = TimeDistributed(Dropout(0.15))(t1)\n    sum_op = Lambda(lambda x: K.sum(x, axis=1), output_shape=(128,))\n    t1 = sum_op(t1)\n\n    lstm_layer2 = Bidirectional(\n        GRU(128, recurrent_dropout=0.15, dropout=0.15, return_sequences=True))\n\n    # 1D convolutions that can iterate over the word vectors\n    conv1_2 = Conv1D(filters=128, kernel_size=2,\n                     padding='same', activation='relu',)\n\n    lstm2 = lstm_layer2(emb)\n    # Run through CONV + GAP layers\n    conv1a2 = conv1_2(lstm2)\n    gap1a2 = GlobalAveragePooling1D()(conv1a2)\n    gmp1a2 = GlobalMaxPool1D()(conv1a2)\n\n    # 1D convolutions that can iterate over the word vectors\n    conv1 = Conv1D(filters=128, kernel_size=1,\n                   padding='same', activation='relu',)\n    conv2 = Conv1D(filters=64, kernel_size=2,\n                   padding='same', activation='relu', )\n    conv3 = Conv1D(filters=64, kernel_size=3,\n                   padding='same', activation='relu',)\n    conv4 = Conv1D(filters=64, kernel_size=4,\n                   padding='same', activation='relu',)\n    conv5 = Conv1D(filters=32, kernel_size=5,\n                   padding='same', activation='relu',)\n    conv6 = Conv1D(filters=32, kernel_size=6,\n                   padding='same', activation='relu',)\n\n    # Run through CONV + GAP layers\n    conv1a = conv1(emb)\n    gap1a = GlobalAveragePooling1D()(conv1a)\n    gmp1a = GlobalMaxPool1D()(conv1a)\n\n    conv2a = conv2(emb)\n    gap2a = GlobalAveragePooling1D()(conv2a)\n    gmp2a = GlobalMaxPool1D()(conv2a)\n\n    conv3a = conv3(emb)\n    gap3a = GlobalAveragePooling1D()(conv3a)\n    gmp3a = GlobalMaxPooling1D()(conv3a)\n\n    conv4a = conv3(emb)\n    gap4a = GlobalAveragePooling1D()(conv4a)\n    #gmp3a = GlobalMaxPooling1D()(conv3a)\n\n    conv5a = conv5(emb)\n    gap5a = GlobalAveragePooling1D()(conv5a)\n    gmp5a = GlobalMaxPooling1D()(conv5a)\n\n    conv6a = conv6(emb)\n    gap6a = GlobalAveragePooling1D()(conv6a)\n\n    #hin = Input(shape=(X_h.shape[1], ))\n    #htime = Dense(X_h.shape[1]//4, activation='relu')(hin)\n\n    merge1 = concatenate([gap1a2, gmp1a2, lstm, t1])\n\n    # The MLP that determines the outcome\n    x = Dropout(0.38)(merge1)\n    #x = BatchNormalization()(x)\n    #x = Dense(200, activation='relu',)(x)\n    #x = Dropout(0.22)(x)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.22)(x)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.22)(x)\n    x = BatchNormalization()(x)\n    pred = Dense(11, activation='softmax')(x)\n\n    model = Model(inputs=seq, outputs=pred)\n    model.compile(loss='categorical_crossentropy',\n                  optimizer=AdamW(weight_decay=0.1,))\n\n    return model\n\n\nY_age = to_categorical(train['age'])\nsub2 = np.zeros((X_test.shape[0], 11))\noof_pref2 = np.zeros((X.shape[0], 11))\nscore = []\ncount = 0\nfor i, (train_index, test_index) in enumerate(kfold.split(X, train['age'])):\n\n    print(\"FOLD | \", count+1)\n\n    filepath2 = \"age_weights_best_%d.h5\" % count\n    checkpoint2 = ModelCheckpoint(\n        filepath2, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n    reduce_lr2 = ReduceLROnPlateau(\n        monitor='val_loss', factor=0.8, patience=2, min_lr=0.0001, verbose=1)\n    earlystopping2 = EarlyStopping(\n        monitor='val_loss', min_delta=0.0001, patience=8, verbose=1, mode='auto')\n    callbacks2 = [checkpoint2, reduce_lr2, earlystopping2]\n\n    X_tr, X_vl, y_tr, y_vl = X[train_index], X[test_index], Y_age[train_index], Y_age[test_index]\n\n    model_age = model_age_conv(embedding_matrix)\n    hist = model_age.fit(X_tr, y_tr, batch_size=512, epochs=50, validation_data=(X_vl, y_vl),\n                         callbacks=callbacks2, verbose=2, shuffle=True)\n\n    model_age.load_weights(filepath2)\n    oof_pref2[test_index] = model_age.predict(X_vl)\n    sub2 += model_age.predict(X_test)/kfold.n_splits\n    score.append(np.min(hist.history['val_loss']))\n    count += 1\nprint('log loss:', np.mean(score))\n\nres2_1 = np.vstack((oof_pref2, sub2))\nres2_1 = pd.DataFrame(res2_1)\n\n# res2_1.to_csv(\"res2.csv\", index=False)\n\nres1.index = range(len(res1))\nres2_1.index = range(len(res2_1))\nfinal_1 = res2_1.copy()\nfinal_2 = res2_1.copy()\nfor i in range(11):\n    final_1[i] = res1['sex1']*res2_1[i]\n    final_2[i] = res1['sex2']*res2_1[i]\nid_list = pd.concat([train[['device_id']], test[['device_id']]])\nfinal = id_list\nfinal.index = range(len(final))\nfinal.columns = ['DeviceID']\nfinal_pred = pd.concat([final_1, final_2], 1)\nfinal = pd.concat([final, final_pred], 1)\nfinal.columns = ['DeviceID', '1-0', '1-1', '1-2', '1-3', '1-4', '1-5', '1-6',\n                 '1-7', '1-8', '1-9', '1-10', '2-0', '2-1', '2-2', '2-3', '2-4',\n                 '2-5', '2-6', '2-7', '2-8', '2-9', '2-10']\n\nfinal.to_csv('submit/deepnn_fix.csv', index=False)\n"
  },
  {
    "path": "chizhu/single_model/get_nn_feat.py",
    "content": "import pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nfrom datetime import datetime, timedelta\nimport matplotlib.pyplot as plt\nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\n%matplotlib inline\n\n#add\nimport gc\nfrom sklearn import preprocessing\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nfrom scipy.sparse import hstack, vstack\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import cross_val_score\n# from skopt.space import Integer, Categorical, Real, Log10\n# from skopt.utils import use_named_args\n# from skopt import gp_minimize\nfrom gensim.models import Word2Vec, FastText\nimport gensim\nimport re\nfrom config import path\n# path = \"/dev/shm/chizhu_data/data/\"\n###这里是原始文件的地址，务必修改这里的路径\n\ntest = pd.read_csv(path+'deviceid_test.tsv', sep='\\t', names=['device_id'])\ntrain = pd.read_csv(path+'deviceid_train.tsv', sep='\\t',\n                    names=['device_id', 'sex', 'age'])\nbrand = pd.read_table(path+'deviceid_brand.tsv',\n                      names=['device_id', 'vendor', 'version'])\npacktime = pd.read_table(path+'deviceid_package_start_close.tsv',\n                         names=['device_id', 'app', 'start', 'close'])\npackages = pd.read_csv(path+'deviceid_packages.tsv',\n                       sep='\\t', names=['device_id', 'apps'])\n\npacktime['period'] = (packtime['close'] - packtime['start'])/1000\npacktime['start'] = pd.to_datetime(packtime['start'], unit='ms')\napp_use_time = packtime.groupby(['app'])['period'].agg('sum').reset_index()\napp_use_top100 = app_use_time.sort_values(\n    by='period', ascending=False)[:100]['app']\ndevice_app_use_time = packtime.groupby(['device_id', 'app'])[\n    'period'].agg('sum').reset_index()\nuse_time_top100_statis = device_app_use_time.set_index(\n    'app').loc[list(app_use_top100)].reset_index()\ntop100_statis = use_time_top100_statis.pivot(\n    index='device_id', columns='app', values='period').reset_index()\n\ntop100_statis = top100_statis.fillna(0)\n\n# 手机品牌预处理\nbrand['vendor'] = brand['vendor'].astype(\n    str).apply(lambda x: x.split(' ')[0].upper())\nbrand['ph_ver'] = brand['vendor'] + '_' + brand['version']\n\nph_ver = brand['ph_ver'].value_counts()\nph_ver_cnt = pd.DataFrame(ph_ver).reset_index()\nph_ver_cnt.columns = ['ph_ver', 'ph_ver_cnt']\n\nbrand = pd.merge(left=brand, right=ph_ver_cnt, on='ph_ver')\n\n# 针对长尾分布做的一点处理\nmask = (brand.ph_ver_cnt < 100)\nbrand.loc[mask, 'ph_ver'] = 'other'\n\ntrain = pd.merge(brand[['device_id', 'ph_ver']],\n                 train, on='device_id', how='right')\ntest = pd.merge(brand[['device_id', 'ph_ver']],\n                test, on='device_id', how='right')\ntrain['ph_ver'] = train['ph_ver'].astype(str)\ntest['ph_ver'] = test['ph_ver'].astype(str)\n\n# 将 ph_ver 进行 label encoder\nph_ver_le = preprocessing.LabelEncoder()\ntrain['ph_ver'] = ph_ver_le.fit_transform(train['ph_ver'])\ntest['ph_ver'] = ph_ver_le.transform(test['ph_ver'])\ntrain['label'] = train['sex'].astype(str) + '-' + train['age'].astype(str)\nlabel_le = preprocessing.LabelEncoder()\ntrain['label'] = label_le.fit_transform(train['label'])\n\ntest['sex'] = -1\ntest['age'] = -1\ntest['label'] = -1\ndata = pd.concat([train, test], ignore_index=True)\n# data.shape\n\nph_ver_dummy = pd.get_dummies(data['ph_ver'])\nph_ver_dummy.columns = ['ph_ver_' + str(i)\n                        for i in range(ph_ver_dummy.shape[1])]\n\ndata = pd.concat([data, ph_ver_dummy], axis=1)\n\ndel data['ph_ver']\n\ntrain = data[data.sex != -1]\ntest = data[data.sex == -1]\n# train.shape, test.shape\n\n# 每个app的总使用次数统计\napp_num = packtime['app'].value_counts().reset_index()\napp_num.columns = ['app', 'app_num']\npacktime = pd.merge(left=packtime, right=app_num, on='app')\n# 同样的，针对长尾分布做些处理（尝试过不做处理，或换其他阈值，这个100的阈值最高）\npacktime.loc[packtime.app_num < 100, 'app'] = 'other'\n\n# 统计每台设备的app数量\ndf_app = packtime[['device_id', 'app']]\napps = df_app.drop_duplicates().groupby(['device_id'])[\n    'app'].apply(' '.join).reset_index()\napps['app_length'] = apps['app'].apply(lambda x: len(x.split(' ')))\n\ntrain = pd.merge(train, apps, on='device_id', how='left')\ntest = pd.merge(test, apps, on='device_id', how='left')\n\n# packtime['period'] = (packtime['close'] - packtime['start'])/1000\n# packtime['start'] = pd.to_datetime(packtime['start'], unit='ms')\npacktime['dayofweek'] = packtime['start'].dt.dayofweek\npacktime['hour'] = packtime['start'].dt.hour\n# packtime = packtime[(packtime['start'] < '2017-03-31 23:59:59') & (packtime['start'] > '2017-03-01 00:00:00')]\n\napp_use_time = packtime.groupby(['device_id', 'dayofweek'])[\n    'period'].agg('sum').reset_index()\nweek_app_use = app_use_time.pivot_table(\n    values='period', columns='dayofweek', index='device_id').reset_index()\nweek_app_use = week_app_use.fillna(0)\nweek_app_use.columns = ['device_id'] + \\\n    ['week_day_' + str(i) for i in range(0, 7)]\n\nweek_app_use['week_max'] = week_app_use.max(axis=1)\nweek_app_use['week_min'] = week_app_use.min(axis=1)\nweek_app_use['week_sum'] = week_app_use.sum(axis=1)\nweek_app_use['week_std'] = week_app_use.std(axis=1)\n\n# '''\n# for i in range(0, 7):\n#     week_app_use['week_day_' + str(i)] = week_app_use['week_day_' + str(i)] / week_app_use['week_sum']\n# '''\n\nuser_behavior = pd.read_csv('data/user_behavior.csv')\nuser_behavior['app_len_max'] = user_behavior['app_len_max'].astype(np.float64)\ndel user_behavior['app']\ntrain = pd.merge(train, user_behavior, on='device_id', how='left')\ntest = pd.merge(test, user_behavior, on='device_id', how='left')\n\ntrain = pd.merge(train, week_app_use, on='device_id', how='left')\ntest = pd.merge(test, week_app_use, on='device_id', how='left')\n\ntop100_statis.columns = ['device_id'] + \\\n    ['top100_statis_' + str(i) for i in range(0, 100)]\ntrain = pd.merge(train, top100_statis, on='device_id', how='left')\ntest = pd.merge(test, top100_statis, on='device_id', how='left')\n\ntrain.to_csv(\"data/train_statistic_feat.csv\", index=False)\ntest.to_csv(\"data/test_statistic_feat.csv\", index=False)\n"
  },
  {
    "path": "chizhu/single_model/lgb.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nfrom datetime import datetime,timedelta  \nimport matplotlib.pyplot as plt\nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\n\n#add\nimport gc\nfrom sklearn import preprocessing\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nfrom scipy.sparse import hstack, vstack\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import cross_val_score\n# from skopt.space import Integer, Categorical, Real, Log10\n# from skopt.utils import use_named_args\n# from skopt import gp_minimize\nfrom gensim.models import Word2Vec, FastText\nimport gensim \nimport re\nfrom config import path\n\n# path=\"/Users/chizhu/data/competition_data/易观/\"\n\n\n# In[2]:\n\n\ntest = pd.read_csv(path+'deviceid_test.tsv', sep='\\t', names=['device_id'])\ntrain = pd.read_csv(path+'deviceid_train.tsv', sep='\\t', names=['device_id', 'sex', 'age'])\nbrand = pd.read_table(path+'deviceid_brand.tsv', names=['device_id', 'vendor', 'version'])\npacktime = pd.read_table(path+'deviceid_package_start_close.tsv', \n                         names=['device_id', 'app', 'start', 'close'])\npackages = pd.read_csv(path+'deviceid_packages.tsv', sep='\\t', names=['device_id', 'apps'])\n\n\n# In[3]:\n\n\ndef get_str(df):\n    res=\"\"\n    for i in df.split(\",\"):\n        res+=i+\" \"\n    return res\npackages[\"str_app\"]=packages['apps'].apply(lambda x:get_str(x),1)\n\n\n# In[4]:\n\n\ntfidf = CountVectorizer()\ntrain_str_app=pd.merge(train[['device_id']],packages[[\"device_id\",'str_app']],on=\"device_id\",how=\"left\")\ntest_str_app=pd.merge(test[['device_id']],packages[[\"device_id\",'str_app']],on=\"device_id\",how=\"left\")\npackages['str_app'] = tfidf.fit_transform(packages['str_app'])\ntrain_app = tfidf.transform(list(train_str_app['str_app'])).tocsr()\ntest_app = tfidf.transform(list(test_str_app['str_app'])).tocsr()\n\n\n# In[5]:\n\n\nall_id=pd.concat([train[[\"device_id\"]],test[['device_id']]])\n\n\n# In[6]:\n\n\nall_id.index=range(len(all_id))\n\n\n# In[7]:\n\n\n# encoding:utf-8\nimport sys\nimport pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.cross_validation import StratifiedKFold\nimport numpy as np\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.linear_model import PassiveAggressiveClassifier\nfrom sklearn.linear_model import RidgeClassifier\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.svm import LinearSVC\n\nfrom sklearn.metrics import mean_squared_error\nimport os\nif not os.path.exists(\"data\"):\n    os.mkdir(\"data\")\n\n\n\n############################ 切分数据集 ##########################\nprint('开始进行一些前期处理')\ntrain_feature = train_app\ntest_feature = test_app\n    # 五则交叉验证\nn_folds = 5\nprint('处理完毕')\ndf_stack = pd.DataFrame()\ndf_stack['device_id']=all_id['device_id']\nfor label in [\"sex\"]:\n    score = train[label]-1\n    \n    ########################### lr(LogisticRegression) ################################\n    print('lr stacking')\n    stack_train = np.zeros((len(train), 1))\n    stack_test = np.zeros((len(test), 1))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        clf = LogisticRegression(random_state=1017, C=8)\n        clf.fit(train_feature[tr], score[tr])\n        score_va = clf.predict_proba(train_feature[va])[:,1]\n        \n        score_te = clf.predict_proba(test_feature)[:,1]\n        print('得分' + str(mean_squared_error(score[va], clf.predict(train_feature[va]))))\n        stack_train[va,0] = score_va\n        stack_test[:,0]+= score_te\n    stack_test /= n_folds\n    \n    stack = np.vstack([stack_train, stack_test])\n    \n    \n    df_stack['pack_tfidf_lr_classfiy_{}'.format(label)] = stack[:, 0]\n    \n\n    ########################### SGD(随机梯度下降) ################################\n    print('sgd stacking')\n    stack_train = np.zeros((len(train), 1))\n    stack_test = np.zeros((len(test), 1))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        sgd = SGDClassifier(random_state=1017, loss='log')\n        sgd.fit(train_feature[tr], score[tr])\n        score_va = sgd.predict_proba(train_feature[va])[:,1]\n        score_te = sgd.predict_proba(test_feature)[:,1]\n        print('得分' + str(mean_squared_error(score[va], sgd.predict(train_feature[va]))))\n        stack_train[va,0] = score_va\n        stack_test[:,0]+= score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    \n    df_stack['pack_tfidf_sgd_classfiy_{}'.format(label)] = stack[:, 0]\n\n\n    ########################### pac(PassiveAggressiveClassifier) ################################\n    print('PAC stacking')\n    stack_train = np.zeros((len(train), 1))\n    stack_test = np.zeros((len(test), 1))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        pac = PassiveAggressiveClassifier(random_state=1017)\n        pac.fit(train_feature[tr], score[tr])\n        score_va = pac._predict_proba_lr(train_feature[va])[:,1]\n        score_te = pac._predict_proba_lr(test_feature)[:,1]\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], pac.predict(train_feature[va]))))\n        stack_train[va,0] += score_va\n        stack_test[:,0] += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    \n    df_stack['pack_tfidf_pac_classfiy_{}'.format(label)] = stack[:, 0]\n    \n\n\n    ########################### ridge(RidgeClassfiy) ################################\n    print('RidgeClassfiy stacking')\n    stack_train = np.zeros((len(train), 1))\n    stack_test = np.zeros((len(test), 1))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        ridge = RidgeClassifier(random_state=1017)\n        ridge.fit(train_feature[tr], score[tr])\n        score_va = ridge._predict_proba_lr(train_feature[va])[:,1]\n        score_te = ridge._predict_proba_lr(test_feature)[:,1]\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], ridge.predict(train_feature[va]))))\n        stack_train[va,0] += score_va\n        stack_test[:,0] += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    \n    df_stack['pack_tfidf_ridge_classfiy_{}'.format(label)] = stack[:, 0]\n    \n\n\n    ########################### bnb(BernoulliNB) ################################\n    print('BernoulliNB stacking')\n    stack_train = np.zeros((len(train), 1))\n    stack_test = np.zeros((len(test), 1))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        bnb = BernoulliNB()\n        bnb.fit(train_feature[tr], score[tr])\n        score_va = bnb.predict_proba(train_feature[va])[:,1]\n        score_te = bnb.predict_proba(test_feature)[:,1]\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], bnb.predict(train_feature[va]))))\n        stack_train[va,0] += score_va\n        stack_test[:,0] += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    \n    df_stack['pack_tfidf_bnb_classfiy_{}'.format(label)] = stack[:, 0]\n    \n    ########################### mnb(MultinomialNB) ################################\n    print('MultinomialNB stacking')\n    stack_train = np.zeros((len(train), 1))\n    stack_test = np.zeros((len(test), 1))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        mnb = MultinomialNB()\n        mnb.fit(train_feature[tr], score[tr])\n        score_va = mnb.predict_proba(train_feature[va])[:,1]\n        score_te = mnb.predict_proba(test_feature)[:,1]\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], mnb.predict(train_feature[va]))))\n        stack_train[va,0] += score_va\n        stack_test[:,0] += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    \n    df_stack['pack_tfidf_mnb_classfiy_{}'.format(label)] = stack[:, 0]\n    \n\n    ############################ Linersvc(LinerSVC) ################################\n    print('LinerSVC stacking')\n    stack_train = np.zeros((len(train), 1))\n    stack_test = np.zeros((len(test), 1))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        lsvc = LinearSVC(random_state=1017)\n        lsvc.fit(train_feature[tr], score[tr])\n        score_va = lsvc._predict_proba_lr(train_feature[va])[:,1]\n        score_te = lsvc._predict_proba_lr(test_feature)[:,1]\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], lsvc.predict(train_feature[va]))))\n        stack_train[va,0] += score_va\n        stack_test[:,0] += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    \n    df_stack['pack_tfidf_lsvc_classfiy_{}'.format(label)] = stack[:, 0]\n    \ndf_stack.to_csv('data/tfidf_classfiy_package.csv', index=None, encoding='utf8')\nprint('tfidf特征已保存\\n')\n\n\n# In[8]:\n\n\npacktime['period'] = (packtime['close'] - packtime['start'])/1000\npacktime['start'] = pd.to_datetime(packtime['start'], unit='ms')\napp_use_time = packtime.groupby(['app'])['period'].agg('sum').reset_index()\napp_use_top100 = app_use_time.sort_values(by='period', ascending=False)[:100]['app']\ndevice_app_use_time = packtime.groupby(['device_id', 'app'])['period'].agg('sum').reset_index()\nuse_time_top100_statis = device_app_use_time.set_index('app').loc[list(app_use_top100)].reset_index()\ntop100_statis = use_time_top100_statis.pivot(index='device_id', columns='app', values='period').reset_index()\n\n\n# In[9]:\n\n\ntop100_statis = top100_statis.fillna(0)\n\n\n# In[10]:\n\n\n# 手机品牌预处理\nbrand['vendor'] = brand['vendor'].astype(str).apply(lambda x : x.split(' ')[0].upper())\nbrand['ph_ver'] = brand['vendor'] + '_' + brand['version']\n\nph_ver = brand['ph_ver'].value_counts()\nph_ver_cnt = pd.DataFrame(ph_ver).reset_index()\nph_ver_cnt.columns = ['ph_ver', 'ph_ver_cnt']\n\nbrand = pd.merge(left=brand, right=ph_ver_cnt,on='ph_ver')\n\n\n# In[11]:\n\n\n# 针对长尾分布做的一点处理\nmask = (brand.ph_ver_cnt < 100)\nbrand.loc[mask, 'ph_ver'] = 'other' \n\ntrain_data = pd.merge(brand[['device_id', 'ph_ver']], train, on='device_id', how='right')\ntest_data = pd.merge(brand[['device_id', 'ph_ver']], test, on='device_id', how='right')\ntrain_data['ph_ver'] = train_data['ph_ver'].astype(str)\ntest_data['ph_ver'] = test_data['ph_ver'].astype(str)\n\n# 将 ph_ver 进行 label encoder\nph_ver_le = preprocessing.LabelEncoder()\ntrain_data['ph_ver'] = ph_ver_le.fit_transform(train_data['ph_ver'])\ntest_data['ph_ver'] = ph_ver_le.transform(test_data['ph_ver'])\ntrain_data['label'] = train_data['sex'].astype(str) + '-' + train_data['age'].astype(str)\nlabel_le = preprocessing.LabelEncoder()\ntrain_data['label'] = label_le.fit_transform(train_data['label'])\n\n\n# In[12]:\n\n\ntest_data['sex'] = -1\ntest_data['age'] = -1\ntest_data['label'] = -1\ndata = pd.concat([train_data, test_data], ignore_index=True)\nprint(data.shape)\n\n\n# In[13]:\n\n\ntrain_data = data[data.sex != -1]\ntest_data = data[data.sex == -1]\nprint(train.shape, test.shape)\n\n\n# In[14]:\n\n\n# 每个app的总使用次数统计\napp_num = packtime['app'].value_counts().reset_index()\napp_num.columns = ['app', 'app_num']\npacktime = pd.merge(left=packtime, right=app_num, on='app')\n# 同样的，针对长尾分布做些处理（尝试过不做处理，或换其他阈值，这个100的阈值最高）\npacktime.loc[packtime.app_num < 100, 'app'] = 'other'\n\n\n# In[15]:\n\n\n# 统计每台设备的app数量\ndf_app = packtime[['device_id', 'app']]\napps = df_app.drop_duplicates().groupby(['device_id'])['app'].apply(' '.join).reset_index()\napps['app_length'] = apps['app'].apply(lambda x:len(x.split(' ')))\n\ntrain_data = pd.merge(train_data, apps, on='device_id', how='left')\ntest_data = pd.merge(test_data, apps, on='device_id', how='left')\n\n\n# In[16]:\n\n\n# 获取每台设备所安装的apps的tfidf\ntfidf = CountVectorizer()\napps['app'] = tfidf.fit_transform(apps['app'])\n\nX_tr_app = tfidf.transform(list(train_data['app'])).tocsr()\nX_ts_app = tfidf.transform(list(test_data['app'])).tocsr()\n\n\n# In[17]:\n\n\n# encoding:utf-8\nimport sys\nimport pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.cross_validation import StratifiedKFold\nimport numpy as np\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.linear_model import PassiveAggressiveClassifier\nfrom sklearn.linear_model import RidgeClassifier\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.svm import LinearSVC\n\nfrom sklearn.metrics import mean_squared_error\n\n\n\n\n############################ 切分数据集 ##########################\nprint('开始进行一些前期处理')\ntrain_feature = X_tr_app\ntest_feature = X_ts_app\n    # 五则交叉验证\nn_folds = 5\nprint('处理完毕')\ndf_stack = pd.DataFrame()\ndf_stack['device_id']=data['device_id']\nfor label in [\"sex\"]:\n    score = train_data[label]-1\n    \n    ########################### lr(LogisticRegression) ################################\n    print('lr stacking')\n    stack_train = np.zeros((len(train_data), 1))\n    stack_test = np.zeros((len(test_data), 1))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        clf = LogisticRegression(random_state=1017, C=8)\n        clf.fit(train_feature[tr], score[tr])\n        score_va = clf.predict_proba(train_feature[va])[:,1]\n        \n        score_te = clf.predict_proba(test_feature)[:,1]\n        print('得分' + str(mean_squared_error(score[va], clf.predict(train_feature[va]))))\n        stack_train[va,0] = score_va\n        stack_test[:,0]+= score_te\n    stack_test /= n_folds\n    \n    stack = np.vstack([stack_train, stack_test])\n    \n    \n    df_stack['tfidf_lr_classfiy_{}'.format(label)] = stack[:, 0]\n    \n\n    ########################### SGD(随机梯度下降) ################################\n    print('sgd stacking')\n    stack_train = np.zeros((len(train_data), 1))\n    stack_test = np.zeros((len(test_data), 1))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        sgd = SGDClassifier(random_state=1017, loss='log')\n        sgd.fit(train_feature[tr], score[tr])\n        score_va = sgd.predict_proba(train_feature[va])[:,1]\n        score_te = sgd.predict_proba(test_feature)[:,1]\n        print('得分' + str(mean_squared_error(score[va], sgd.predict(train_feature[va]))))\n        stack_train[va,0] = score_va\n        stack_test[:,0]+= score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    \n    df_stack['tfidf_sgd_classfiy_{}'.format(label)] = stack[:, 0]\n\n\n    ########################### pac(PassiveAggressiveClassifier) ################################\n    print('PAC stacking')\n    stack_train = np.zeros((len(train_data), 1))\n    stack_test = np.zeros((len(test_data), 1))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        pac = PassiveAggressiveClassifier(random_state=1017)\n        pac.fit(train_feature[tr], score[tr])\n        score_va = pac._predict_proba_lr(train_feature[va])[:,1]\n        score_te = pac._predict_proba_lr(test_feature)[:,1]\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], pac.predict(train_feature[va]))))\n        stack_train[va,0] += score_va\n        stack_test[:,0] += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    \n    df_stack['tfidf_pac_classfiy_{}'.format(label)] = stack[:, 0]\n    \n\n\n    ########################### ridge(RidgeClassfiy) ################################\n    print('RidgeClassfiy stacking')\n    stack_train = np.zeros((len(train_data), 1))\n    stack_test = np.zeros((len(test_data), 1))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        ridge = RidgeClassifier(random_state=1017)\n        ridge.fit(train_feature[tr], score[tr])\n        score_va = ridge._predict_proba_lr(train_feature[va])[:,1]\n        score_te = ridge._predict_proba_lr(test_feature)[:,1]\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], ridge.predict(train_feature[va]))))\n        stack_train[va,0] += score_va\n        stack_test[:,0] += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    \n    df_stack['tfidf_ridge_classfiy_{}'.format(label)] = stack[:, 0]\n    \n\n\n    ########################### bnb(BernoulliNB) ################################\n    print('BernoulliNB stacking')\n    stack_train = np.zeros((len(train_data), 1))\n    stack_test = np.zeros((len(test_data), 1))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        bnb = BernoulliNB()\n        bnb.fit(train_feature[tr], score[tr])\n        score_va = bnb.predict_proba(train_feature[va])[:,1]\n        score_te = bnb.predict_proba(test_feature)[:,1]\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], bnb.predict(train_feature[va]))))\n        stack_train[va,0] += score_va\n        stack_test[:,0] += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    \n    df_stack['tfidf_bnb_classfiy_{}'.format(label)] = stack[:, 0]\n    \n    ########################### mnb(MultinomialNB) ################################\n    print('MultinomialNB stacking')\n    stack_train = np.zeros((len(train_data), 1))\n    stack_test = np.zeros((len(test_data), 1))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        mnb = MultinomialNB()\n        mnb.fit(train_feature[tr], score[tr])\n        score_va = mnb.predict_proba(train_feature[va])[:,1]\n        score_te = mnb.predict_proba(test_feature)[:,1]\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], mnb.predict(train_feature[va]))))\n        stack_train[va,0] += score_va\n        stack_test[:,0] += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    \n    df_stack['tfidf_mnb_classfiy_{}'.format(label)] = stack[:, 0]\n    \n\n    ############################ Linersvc(LinerSVC) ################################\n    print('LinerSVC stacking')\n    stack_train = np.zeros((len(train_data), 1))\n    stack_test = np.zeros((len(test_data), 1))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        lsvc = LinearSVC(random_state=1017)\n        lsvc.fit(train_feature[tr], score[tr])\n        score_va = lsvc._predict_proba_lr(train_feature[va])[:,1]\n        score_te = lsvc._predict_proba_lr(test_feature)[:,1]\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], lsvc.predict(train_feature[va]))))\n        stack_train[va,0] += score_va\n        stack_test[:,0] += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    \n    df_stack['tfidf_lsvc_classfiy_{}'.format(label)] = stack[:, 0]\n    \ndf_stack.to_csv('data/tfidf_classfiy.csv', index=None, encoding='utf8')\nprint('tfidf特征已保存\\n')\n\n\n# ### 利用word2vec得到每台设备所安装app的embedding表示\n\n# In[18]:\n\n\npackages['apps'] = packages['apps'].apply(lambda x:x.split(','))\npackages['app_length'] = packages['apps'].apply(lambda x:len(x))\n\n\n# In[19]:\n\n\nembed_size = 128\nfastmodel = Word2Vec(list(packages['apps']), size=embed_size, window=4, min_count=3, negative=2,\n                 sg=1, sample=0.002, hs=1, workers=4)  \n\nembedding_fast = pd.DataFrame([fastmodel[word] for word in (fastmodel.wv.vocab)])\nembedding_fast['app'] = list(fastmodel.wv.vocab)\nembedding_fast.columns= [\"fdim_%s\" % str(i) for i in range(embed_size)]+[\"app\"]\nprint(embedding_fast.head())\n\n\n# In[20]:\n\n\nid_list = []\nfor i in range(packages.shape[0]):\n    id_list += [list(packages['device_id'])[i]]*packages['app_length'].iloc[i]\n\n\napp_list = [word for item in packages['apps'] for word in item]\n\napp_vect = pd.DataFrame({'device_id':id_list})        \napp_vect['app'] = app_list\n\n\n# In[21]:\n\n\napp_vect = app_vect.merge(embedding_fast, on='app', how='left')\napp_vect = app_vect.drop('app', axis=1)\n\nseqfeature = app_vect.groupby(['device_id']).agg('mean')\nseqfeature.reset_index(inplace=True)\n\n\n# In[22]:\n\n\nprint(seqfeature.head())\n\n\n# ### 用户一周七天玩手机的时长情况\n\n# In[23]:\n\n\n# packtime['period'] = (packtime['close'] - packtime['start'])/1000\n# packtime['start'] = pd.to_datetime(packtime['start'], unit='ms')\npacktime['dayofweek'] = packtime['start'].dt.dayofweek\npacktime['hour'] = packtime['start'].dt.hour\n# packtime = packtime[(packtime['start'] < '2017-03-31 23:59:59') & (packtime['start'] > '2017-03-01 00:00:00')]\n\n\n# In[24]:\n\n\napp_use_time = packtime.groupby(['device_id', 'dayofweek'])['period'].agg('sum').reset_index()\nweek_app_use = app_use_time.pivot_table(values='period', columns='dayofweek', index='device_id').reset_index()\nweek_app_use = week_app_use.fillna(0)\nweek_app_use.columns = ['device_id'] + ['week_day_' + str(i) for i in range(0, 7)]\n\nweek_app_use['week_max'] = week_app_use.max(axis=1)\nweek_app_use['week_min'] = week_app_use.min(axis=1)\nweek_app_use['week_sum'] = week_app_use.sum(axis=1)\nweek_app_use['week_std'] = week_app_use.std(axis=1)\n\n\n\n# ### 将各个特征整合到一块\n\n# In[25]:\n\n\nprint(train_data.columns[4:])\n\n\n# In[26]:\n\n\nuser_behavior = pd.read_csv('data/user_behavior.csv')\nuser_behavior['app_len_max'] = user_behavior['app_len_max'].astype(np.float64)\ndel user_behavior['app']\ntrain_data = pd.merge(train_data, user_behavior, on='device_id', how='left')\ntest_data = pd.merge(test_data, user_behavior, on='device_id', how='left')\n\n\n# In[27]:\n\n\ntrain_data = pd.merge(train_data, seqfeature, on='device_id', how='left')\ntest_data = pd.merge(test_data, seqfeature, on='device_id', how='left')\n\n\n# In[28]:\n\n\ntrain_data = pd.merge(train_data, week_app_use, on='device_id', how='left')\ntest_data = pd.merge(test_data, week_app_use, on='device_id', how='left')\n\n\n# In[29]:\n\n\ntop100_statis.columns = ['device_id'] + ['top100_statis_' + str(i) for i in range(0, 100)]\ntrain_data = pd.merge(train_data, top100_statis, on='device_id', how='left')\ntest_data = pd.merge(test_data, top100_statis, on='device_id', how='left')\n\n\n# In[30]:\n\n\ntrain_data.to_csv(\"./data/train_data.csv\",index=False)\ntest_data.to_csv(\"./data/test_data.csv\",index=False)\n\n\n# In[31]:\n\n\ntfidf_feat=pd.read_csv(\"data/tfidf_classfiy.csv\")\ntf2=pd.read_csv(\"data/tfidf_classfiy_package.csv\")\ntrain_data=pd.read_csv(\"data/train_data.csv\")\ntest_data=pd.read_csv(\"data/test_data.csv\")\n# app_w2v=pd.read_csv(\"./data/w2v_tfidf.csv\")\n\n\n# In[32]:\n\n\ntrain = pd.merge(train_data,tfidf_feat,on=\"device_id\",how=\"left\")\n# train = pd.merge(train_data,tf2,on=\"device_id\",how=\"left\")\n# train = pd.merge(train_data,app_w2v,on=\"device_id\",how=\"left\")\ntest = pd.merge(test_data,tfidf_feat,on=\"device_id\",how=\"left\")\n# test = pd.merge(test_data,tf2,on=\"device_id\",how=\"left\")\n# test = pd.merge(test_data,app_w2v,on=\"device_id\",how=\"left\")\n\n\n# In[85]:\n\n\ntrain_dt = pd.merge(train_data[['device_id','ph_ver']],tfidf_feat,on=\"device_id\",how=\"left\")\ntrain_dt = pd.merge(train_dt,tf2,on=\"device_id\",how=\"left\")\ntest_dt = pd.merge(test_data[['device_id',\"ph_ver\"]],tfidf_feat,on=\"device_id\",how=\"left\")\ntest_dt = pd.merge(test_dt,tf2,on=\"device_id\",how=\"left\")\nfeat=pd.concat([train_dt,test_dt])\nfeat.to_csv(\"data/sex_chizhu_feat.csv\",index=False)\n\n\n# In[33]:\n\n\nfeatures = [x for x in train.columns if x not in ['device_id', 'sex',\"age\",\"label\",\"app\"]]\nY = train['sex'] - 1\n\n\n# ### 开始训练模型\n\n# In[34]:\n\n\nimport lightgbm as lgb\n# import xgboost as xgb\nfrom sklearn.metrics import auc, log_loss, roc_auc_score,f1_score,recall_score,precision_score\nfrom sklearn.cross_validation import StratifiedKFold\n\nkf = StratifiedKFold(Y, n_folds=5, shuffle=True, random_state=1024)\n\nparams = {\n            'boosting_type': 'gbdt',\n            'metric': {'binary_logloss',}, \n#             'is_unbalance':'True',\n            'learning_rate' : 0.01, \n             'verbose': 0,\n            'num_leaves':32 ,\n            # 'max_depth':8, \n            # 'max_bin':10, \n            # 'lambda_l2': 1, \n            # 'min_child_weight':50,\n            'objective': 'binary', \n            'feature_fraction': 0.4,\n            'bagging_fraction':0.7, # 0.9是目前最优的\n            'bagging_freq':3,  # 3是目前最优的\n#             'min_data': 500,\n            'seed': 1024,\n            'nthread': 8,\n            # 'silent': True,\n}\nnum_round = 3500\nearly_stopping_rounds = 100\n\n\n# In[35]:\n\n\naus = []\nsub1 = np.zeros((len(test), ))\npred_oob1=np.zeros((len(train),))\nfor i,(train_index,test_index) in enumerate(kf):\n  \n    tr_x = train[features].reindex(index=train_index, copy=False)\n    tr_y = Y[train_index]\n    te_x = train[features].reindex(index=test_index, copy=False)\n    te_y = Y[test_index]\n\n    d_tr = lgb.Dataset(tr_x, label=tr_y)\n    d_te = lgb.Dataset(te_x, label=te_y)\n    model = lgb.train(params, d_tr, num_boost_round=num_round, \n                      valid_sets=d_te,verbose_eval=200,\n                              early_stopping_rounds=early_stopping_rounds)\n    pred= model.predict(te_x, num_iteration=model.best_iteration)\n    pred_oob1[test_index] =pred\n    \n    a = log_loss(te_y, pred)\n\n    sub1 += model.predict(test[features], num_iteration=model.best_iteration)/5\n\n    print (\"idx: \", i) \n    print (\" loss: %.5f\" % a)\n\n    print (\"best tree num: \", model.best_iteration)\n    aus.append(a)\n\nprint (\"mean\")\nprint (\"auc:       %s\" % (sum(aus) / 5.0))\n\n\n# In[36]:\n\n\n#####特征重要性\n# get_ipython().run_line_magic('matplotlib', 'inline')\n# import matplotlib.pyplot as plt\n# f=dict(zip(list(train[features].keys()),model.feature_importance()))\n# f=sorted(f.items(),key=lambda d:d[1], reverse = True)\n# f=pd.DataFrame(f,columns=['feature','imp'])\n# plt.bar(range(len(f)),f.imp)\n# plt.xticks(range(len(f)),f.feature,rotation=70,fontsize=20)\n# fig = plt.gcf()\n# fig.set_size_inches(50, 20)\n\n\n# In[37]:\n\n\n# f.ix[:450,:]\n\n\n# In[38]:\n\n\n# features=f.ix[:434,\"feature\"].values\n\n\n# In[39]:\n\n\npred_oob1 = pd.DataFrame(pred_oob1, columns=['sex2'])\nsub1 = pd.DataFrame(sub1, columns=['sex2'])\nres1=pd.concat([pred_oob1,sub1])\nres1['sex1'] = 1-res1['sex2']\n\n\n# In[40]:\n\n\nimport gc\ngc.collect()\n\n\n# In[41]:\n\n\ntrain_id = pd.read_csv(path+'deviceid_train.tsv', sep='\\t', names=['device_id', 'sex', 'age'])\n\n\n# In[42]:\n\n\n# encoding:utf-8\nimport sys\nimport pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.cross_validation import StratifiedKFold\nimport numpy as np\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.linear_model import PassiveAggressiveClassifier\nfrom sklearn.linear_model import RidgeClassifier\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.svm import LinearSVC\n\nfrom sklearn.metrics import mean_squared_error\n\n\n\n\n############################ 切分数据集 ##########################\nprint('开始进行一些前期处理')\ntrain_feature = train_app\ntest_feature = test_app\n    # 五则交叉验证\nn_folds = 5\nprint('处理完毕')\ndf_stack = pd.DataFrame()\ndf_stack['device_id']=all_id['device_id']\nfor label in [\"age\"]:\n    score = train_id[label]\n    \n    ########################### lr(LogisticRegression) ################################\n    print('lr stacking')\n    stack_train = np.zeros((len(train), 11))\n    stack_test = np.zeros((len(test), 11))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        clf = LogisticRegression(random_state=1017, C=8)\n        clf.fit(train_feature[tr], score[tr])\n        score_va = clf.predict_proba(train_feature[va])\n        \n        score_te = clf.predict_proba(test_feature)\n        print('得分' + str(mean_squared_error(score[va], clf.predict(train_feature[va]))))\n        stack_train[va] = score_va\n        stack_test+= score_te\n    stack_test /= n_folds\n    \n    stack = np.vstack([stack_train, stack_test])\n    \n    for i in range(stack.shape[1]):\n        df_stack['pack_tfidf_lr_classfiy_{}'.format(i)] = stack[:, i]\n    \n\n    ########################### SGD(随机梯度下降) ################################\n    print('sgd stacking')\n    stack_train = np.zeros((len(train), 11))\n    stack_test = np.zeros((len(test), 11))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        sgd = SGDClassifier(random_state=1017, loss='log')\n        sgd.fit(train_feature[tr], score[tr])\n        score_va = sgd.predict_proba(train_feature[va])\n        score_te = sgd.predict_proba(test_feature)\n        print('得分' + str(mean_squared_error(score[va], sgd.predict(train_feature[va]))))\n        stack_train[va] = score_va\n        stack_test+= score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    for i in range(stack.shape[1]):\n        df_stack['pack_tfidf_sgd_classfiy_{}'.format(i)] = stack[:, i]\n\n\n    ########################### pac(PassiveAggressiveClassifier) ################################\n    print('PAC stacking')\n    stack_train = np.zeros((len(train), 11))\n    stack_test = np.zeros((len(test), 11))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        pac = PassiveAggressiveClassifier(random_state=1017)\n        pac.fit(train_feature[tr], score[tr])\n        score_va = pac._predict_proba_lr(train_feature[va])\n        score_te = pac._predict_proba_lr(test_feature)\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], pac.predict(train_feature[va]))))\n        stack_train[va] += score_va\n        stack_test += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    for i in range(stack.shape[1]):\n        df_stack['pack_tfidf_pac_classfiy_{}'.format(i)] = stack[:, i]\n    \n\n\n    ########################### ridge(RidgeClassfiy) ################################\n    print('RidgeClassfiy stacking')\n    stack_train = np.zeros((len(train), 11))\n    stack_test = np.zeros((len(test), 11))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        ridge = RidgeClassifier(random_state=1017)\n        ridge.fit(train_feature[tr], score[tr])\n        score_va = ridge._predict_proba_lr(train_feature[va])\n        score_te = ridge._predict_proba_lr(test_feature)\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], ridge.predict(train_feature[va]))))\n        stack_train[va] += score_va\n        stack_test += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    for i in range(stack.shape[1]):\n        df_stack['pack_tfidf_ridge_classfiy_{}'.format(i)] = stack[:, i]\n    \n\n\n    ########################### bnb(BernoulliNB) ################################\n    print('BernoulliNB stacking')\n    stack_train = np.zeros((len(train), 11))\n    stack_test = np.zeros((len(test), 11))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        bnb = BernoulliNB()\n        bnb.fit(train_feature[tr], score[tr])\n        score_va = bnb.predict_proba(train_feature[va])\n        score_te = bnb.predict_proba(test_feature)\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], bnb.predict(train_feature[va]))))\n        stack_train[va] += score_va\n        stack_test += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    for i in range(stack.shape[1]):\n        df_stack['pack_tfidf_bnb_classfiy_{}'.format(i)] = stack[:, i]\n    \n    ########################### mnb(MultinomialNB) ################################\n    print('MultinomialNB stacking')\n    stack_train = np.zeros((len(train), 11))\n    stack_test = np.zeros((len(test), 11))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        mnb = MultinomialNB()\n        mnb.fit(train_feature[tr], score[tr])\n        score_va = mnb.predict_proba(train_feature[va])\n        score_te = mnb.predict_proba(test_feature)\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], mnb.predict(train_feature[va]))))\n        stack_train[va] += score_va\n        stack_test += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    for i in range(stack.shape[1]):\n        df_stack['pack_tfidf_mnb_classfiy_{}'.format(i)] = stack[:, i]\n    \n\n    ############################ Linersvc(LinerSVC) ################################\n    print('LinerSVC stacking')\n    stack_train = np.zeros((len(train), 11))\n    stack_test = np.zeros((len(test), 11))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        lsvc = LinearSVC(random_state=1017)\n        lsvc.fit(train_feature[tr], score[tr])\n        score_va = lsvc._predict_proba_lr(train_feature[va])\n        score_te = lsvc._predict_proba_lr(test_feature)\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], lsvc.predict(train_feature[va]))))\n        stack_train[va] += score_va\n        stack_test += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    for i in range(stack.shape[1]):\n        df_stack['pack_tfidf_lsvc_classfiy_{}'.format(i)] = stack[:, i]\n    \ndf_stack.to_csv('data/pack_tfidf_age.csv', index=None, encoding='utf8')\nprint('tfidf特征已保存\\n')\n\n\n# #### tfidf\n\n# In[43]:\n\n\n# encoding:utf-8\nimport sys\nimport pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.cross_validation import StratifiedKFold\nimport numpy as np\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.linear_model import PassiveAggressiveClassifier\nfrom sklearn.linear_model import RidgeClassifier\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.svm import LinearSVC\n\nfrom sklearn.metrics import mean_squared_error\n\n\n\n\n############################ 切分数据集 ##########################\nprint('开始进行一些前期处理')\ntrain_feature = X_tr_app\ntest_feature = X_ts_app\n    # 五则交叉验证\nn_folds = 5\nprint('处理完毕')\ndf_stack = pd.DataFrame()\ndf_stack['device_id']=data['device_id']\nfor label in [\"age\"]:\n    score = train[label]\n    \n    ########################### lr(LogisticRegression) ################################\n    print('lr stacking')\n    stack_train = np.zeros((len(train), 11))\n    stack_test = np.zeros((len(test), 11))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        clf = LogisticRegression(random_state=1017, C=8)\n        clf.fit(train_feature[tr], score[tr])\n        score_va = clf.predict_proba(train_feature[va])\n        \n        score_te = clf.predict_proba(test_feature)\n        print('得分' + str(mean_squared_error(score[va], clf.predict(train_feature[va]))))\n        stack_train[va] = score_va\n        stack_test+= score_te\n    stack_test /= n_folds\n    \n    stack = np.vstack([stack_train, stack_test])\n    \n    for i in range(stack.shape[1]):\n        df_stack['tfidf_lr_classfiy_{}'.format(i)] = stack[:, i]\n    \n\n    ########################### SGD(随机梯度下降) ################################\n    print('sgd stacking')\n    stack_train = np.zeros((len(train), 11))\n    stack_test = np.zeros((len(test), 11))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        sgd = SGDClassifier(random_state=1017, loss='log')\n        sgd.fit(train_feature[tr], score[tr])\n        score_va = sgd.predict_proba(train_feature[va])\n        score_te = sgd.predict_proba(test_feature)\n        print('得分' + str(mean_squared_error(score[va], sgd.predict(train_feature[va]))))\n        stack_train[va] = score_va\n        stack_test+= score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    for i in range(stack.shape[1]):\n        df_stack['tfidf_sgd_classfiy_{}'.format(i)] = stack[:, i]\n\n\n    ########################### pac(PassiveAggressiveClassifier) ################################\n    print('PAC stacking')\n    stack_train = np.zeros((len(train), 11))\n    stack_test = np.zeros((len(test), 11))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        pac = PassiveAggressiveClassifier(random_state=1017)\n        pac.fit(train_feature[tr], score[tr])\n        score_va = pac._predict_proba_lr(train_feature[va])\n        score_te = pac._predict_proba_lr(test_feature)\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], pac.predict(train_feature[va]))))\n        stack_train[va] += score_va\n        stack_test += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    for i in range(stack.shape[1]):\n        df_stack['tfidf_pac_classfiy_{}'.format(i)] = stack[:, i]\n    \n\n\n    ########################### ridge(RidgeClassfiy) ################################\n    print('RidgeClassfiy stacking')\n    stack_train = np.zeros((len(train), 11))\n    stack_test = np.zeros((len(test), 11))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        ridge = RidgeClassifier(random_state=1017)\n        ridge.fit(train_feature[tr], score[tr])\n        score_va = ridge._predict_proba_lr(train_feature[va])\n        score_te = ridge._predict_proba_lr(test_feature)\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], ridge.predict(train_feature[va]))))\n        stack_train[va] += score_va\n        stack_test += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    for i in range(stack.shape[1]):\n        df_stack['tfidf_ridge_classfiy_{}'.format(i)] = stack[:, i]\n    \n\n\n    ########################### bnb(BernoulliNB) ################################\n    print('BernoulliNB stacking')\n    stack_train = np.zeros((len(train), 11))\n    stack_test = np.zeros((len(test), 11))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        bnb = BernoulliNB()\n        bnb.fit(train_feature[tr], score[tr])\n        score_va = bnb.predict_proba(train_feature[va])\n        score_te = bnb.predict_proba(test_feature)\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], bnb.predict(train_feature[va]))))\n        stack_train[va] += score_va\n        stack_test += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    for i in range(stack.shape[1]):\n        df_stack['tfidf_bnb_classfiy_{}'.format(i)] = stack[:, i]\n    \n    ########################### mnb(MultinomialNB) ################################\n    print('MultinomialNB stacking')\n    stack_train = np.zeros((len(train), 11))\n    stack_test = np.zeros((len(test), 11))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        mnb = MultinomialNB()\n        mnb.fit(train_feature[tr], score[tr])\n        score_va = mnb.predict_proba(train_feature[va])\n        score_te = mnb.predict_proba(test_feature)\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], mnb.predict(train_feature[va]))))\n        stack_train[va] += score_va\n        stack_test += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    for i in range(stack.shape[1]):\n        df_stack['tfidf_mnb_classfiy_{}'.format(i)] = stack[:, i]\n    \n\n    ############################ Linersvc(LinerSVC) ################################\n    print('LinerSVC stacking')\n    stack_train = np.zeros((len(train), 11))\n    stack_test = np.zeros((len(test), 11))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        lsvc = LinearSVC(random_state=1017)\n        lsvc.fit(train_feature[tr], score[tr])\n        score_va = lsvc._predict_proba_lr(train_feature[va])\n        score_te = lsvc._predict_proba_lr(test_feature)\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], lsvc.predict(train_feature[va]))))\n        stack_train[va] += score_va\n        stack_test += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    for i in range(stack.shape[1]):\n        df_stack['data/tfidf_lsvc_classfiy_{}'.format(i)] = stack[:, i]\n    \ndf_stack.to_csv('data/tfidf_age.csv', index=None, encoding='utf8')\nprint('tfidf特征已保存\\n')\n\n\n# In[44]:\n\n\ntfidf_feat=pd.read_csv(\"data/tfidf_age.csv\")\ntf2=pd.read_csv(\"data/pack_tfidf_age.csv\")\ntrain_data=pd.read_csv(\"data/train_data.csv\")\ntest_data=pd.read_csv(\"data/test_data.csv\")\n\n\n# In[41]:\n\n\ntrain_dt = pd.merge(train_data[['device_id','ph_ver']],tfidf_feat,on=\"device_id\",how=\"left\")\ntrain_dt = pd.merge(train_dt,tf2,on=\"device_id\",how=\"left\")\ntest_dt = pd.merge(test_data[['device_id',\"ph_ver\"]],tfidf_feat,on=\"device_id\",how=\"left\")\ntest_dt = pd.merge(test_dt,tf2,on=\"device_id\",how=\"left\")\nfeat=pd.concat([train_dt,test_dt])\nfeat.to_csv(\"data/age_chizhu_feat.csv\",index=False)\n\n\n# In[40]:\n\n\n\n\n\n# In[45]:\n\n\ntfidf_feat=pd.read_csv(\"data/tfidf_age.csv\")\ntf2=pd.read_csv(\"data/pack_tfidf_age.csv\")\ntrain_data=pd.read_csv(\"data/train_data.csv\")\ntest_data=pd.read_csv(\"data/test_data.csv\")\ntrain = pd.merge(train_data,tfidf_feat,on=\"device_id\",how=\"left\")\n# train = pd.merge(train_data,tf2,on=\"device_id\",how=\"left\")\n# train = pd.merge(train_data,app_w2v,on=\"device_id\",how=\"left\")\ntest = pd.merge(test_data,tfidf_feat,on=\"device_id\",how=\"left\")\n# test = pd.merge(test_data,tf2,on=\"device_id\",how=\"left\")\n# test = pd.merge(test_data,app_w2v,on=\"device_id\",how=\"left\")\nfeatures = [x for x in train.columns if x not in ['device_id',\"age\",\"sex\",\"label\",\"app\"]]\nY = train['age'] \n\n\n# In[46]:\n\n\nimport lightgbm as lgb\n# import xgboost as xgb\nfrom sklearn.metrics import auc, log_loss, roc_auc_score,f1_score,recall_score,precision_score\nfrom sklearn.cross_validation import StratifiedKFold\n\nkf = StratifiedKFold(Y, n_folds=5, shuffle=True, random_state=1024)\n\nparams = {\n            'boosting_type': 'gbdt',\n            'metric': {'multi_logloss',}, \n#             'is_unbalance':'True',\n            'learning_rate' : 0.01, \n             'verbose': 0,\n            'num_leaves':32 ,\n            # 'max_depth':8, \n            # 'max_bin':10, \n            # 'lambda_l2': 1, \n            # 'min_child_weight':50,\n            \"num_class\":11,\n            'objective': 'multiclass', \n            'feature_fraction': 0.4,\n            'bagging_fraction':0.7, # 0.9是目前最优的\n            'bagging_freq':3,  # 3是目前最优的\n#             'min_data': 500,\n            'seed': 1024,\n            'nthread': 8,\n            # 'silent': True,\n}\nnum_round = 3500\nearly_stopping_rounds = 100\n\n\n# In[47]:\n\n\naus = []\nsub2 = np.zeros((len(test),11 ))\npred_oob2=np.zeros((len(train),11))\nfor i,(train_index,test_index) in enumerate(kf):\n  \n    tr_x = train[features].reindex(index=train_index, copy=False)\n    tr_y = Y[train_index]\n    te_x = train[features].reindex(index=test_index, copy=False)\n    te_y = Y[test_index]\n\n    d_tr = lgb.Dataset(tr_x, label=tr_y)\n    d_te = lgb.Dataset(te_x, label=te_y)\n    model = lgb.train(params, d_tr, num_boost_round=num_round, \n                      valid_sets=d_te,verbose_eval=200,\n                              early_stopping_rounds=early_stopping_rounds)\n    pred= model.predict(te_x, num_iteration=model.best_iteration)\n    pred_oob2[test_index] =pred\n    \n    a = log_loss(te_y, pred)\n\n    sub2 += model.predict(test[features], num_iteration=model.best_iteration)/5\n\n    print (\"idx: \", i) \n    print (\" loss: %.5f\" % a)\n\n    print (\"best tree num: \", model.best_iteration)\n    aus.append(a)\n\nprint (\"mean\")\nprint (\"loss:       %s\" % (sum(aus) / 5.0))\n\n\n# In[55]:\n\n\n#####特征重要性\n\n# import matplotlib.pyplot as plt\n# f=dict(zip(list(train[features].keys()),model.feature_importance()))\n# f=sorted(f.items(),key=lambda d:d[1], reverse = True)\n# f=pd.DataFrame(f,columns=['feature','imp'])\n# plt.bar(range(len(f)),f.imp)\n# plt.xticks(range(len(f)),f.feature,rotation=70,fontsize=20)\n# fig = plt.gcf()\n# fig.set_size_inches(50, 20)\n\n\n# In[56]:\n\n\n# f.ix[:650,:]\n\n\n# In[57]:\n\n\n# features=f.ix[:641,\"feature\"].values\n\n\n# In[58]:\n\n\nres2_1=np.vstack((pred_oob2,sub2))\nres2_1 = pd.DataFrame(res2_1)\n\n\n# In[59]:\n\n\nif not os.path.exists(\"submit\"):\n    os.mkdir(\"submit\")\nres1.index=range(len(res1))\nres2_1.index=range(len(res2_1))\nfinal_1=res2_1.copy()\nfinal_2=res2_1.copy()\nfor i in range(11):\n    final_1[i]=res1['sex1']*res2_1[i]\n    final_2[i]=res1['sex2']*res2_1[i]\nid_list=pd.concat([train[['device_id']],test[['device_id']]])\nfinal=id_list\nfinal.index=range(len(final))\nfinal.columns= ['DeviceID']\nfinal_pred = pd.concat([final_1,final_2],1)\nfinal=pd.concat([final,final_pred],1)\nfinal.columns = ['DeviceID', '1-0', '1-1', '1-2', '1-3', '1-4', '1-5', '1-6', \n         '1-7','1-8', '1-9', '1-10', '2-0', '2-1', '2-2', '2-3', '2-4', \n         '2-5', '2-6', '2-7', '2-8', '2-9', '2-10']\n\nfinal.to_csv('submit/lgb_feat_chizhu.csv', index=False)\n\n\n# In[60]:\n\n\ntest['DeviceID']=test['device_id']\nsub=pd.merge(test[['DeviceID']],final,on=\"DeviceID\",how=\"left\")\nsub.to_csv(\"submit/lgb_chizhu.csv\",index=False)\n\n\n# In[61]:\n\n\n# sub.sum(1)\n\n"
  },
  {
    "path": "chizhu/single_model/user_behavior.py",
    "content": "\n# coding: utf-8\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nfrom datetime import datetime, timedelta\nimport matplotlib.pyplot as plt\nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\n# %matplotlib inline\nfrom config import path\n#add\nimport gc\n\npacktime = pd.read_table(path+'deviceid_package_start_close.tsv',\n                         names=['device_id', 'app', 'start', 'close'], low_memory=True)\n# packtime.head()\npacktime['peroid'] = (packtime['close'] - packtime['start'])/1000\npacktime['start'] = pd.to_datetime(packtime['start'], unit='ms')\n#packtime['closetime'] = pd.to_datetime(packtime['close'], unit='ms')\ndel packtime['close']\ngc.collect()\n\n#packtime['day'] = packtime['start'].dt.day\n#packtime['month'] = packtime['start'].dt.month\npacktime['hour'] = packtime['start'].dt.hour\npacktime['date'] = packtime['start'].dt.date\npacktime['dayofweek'] = packtime['start'].dt.dayofweek\n#packtime['hour'] = pd.cut(packtime['hour'], bins=4).cat.codes\n\n#平均每天使用设备时间\ndtime = packtime.groupby(['device_id', 'date'])['peroid'].agg('sum')\n#不同时间段占比\nqtime = packtime.groupby(['device_id', 'hour'])['peroid'].agg('sum')\nwtime = packtime.groupby(['device_id', 'dayofweek'])['peroid'].agg('sum')\natime = packtime.groupby(['device_id', 'app'])['peroid'].agg('sum')\n\n\ndapp = packtime[['device_id', 'date', 'app']].drop_duplicates().groupby(\n    ['device_id', 'date'])['app'].agg(' '.join)\ndapp = dapp.reset_index()\ndapp['app_len'] = dapp['app'].apply(lambda x: x.split(' ')).apply(len)\ndapp_stat = dapp.groupby('device_id')['app_len'].agg(\n    {'std': 'std', 'mean': 'mean', 'max': 'max'})\ndapp_stat = dapp_stat.reset_index()\ndapp_stat.columns = ['device_id', 'app_len_std', 'app_len_mean', 'app_len_max']\n# dapp_stat.head()\n\ndtime = dtime.reset_index()\ndtime_stat = dtime.groupby(['device_id'])['peroid'].agg(\n    {'sum': 'sum', 'mean': 'mean', 'std': 'std', 'max': 'max'}).reset_index()\ndtime_stat.columns = ['device_id', 'date_sum',\n                      'date_mean', 'date_std', 'date_max']\n# dtime_stat.head()\n\nqtime = qtime.reset_index()\nftime = qtime.pivot(index='device_id', columns='hour',\n                    values='peroid').fillna(0)\nftime.columns = ['h%s' % i for i in range(24)]\nftime.reset_index(inplace=True)\n# ftime.head()\n\nwtime = wtime.reset_index()\nweektime = wtime.pivot(\n    index='device_id', columns='dayofweek', values='peroid').fillna(0)\nweektime.columns = ['w0', 'w1', 'w2', 'w3', 'w4', 'w5', 'w6']\nweektime.reset_index(inplace=True)\n# weektime.head()\n\natime = atime.reset_index()\napp = atime.groupby(['device_id'])['peroid'].idxmax()\n\n#dapp_stat.shape, dtime_stat.shape, ftime.shape, weektime.shape, app.shape\n\nuser = pd.merge(dapp_stat, dtime_stat, on='device_id', how='left')\nuser = pd.merge(user, ftime, on='device_id', how='left')\nuser = pd.merge(user, weektime, on='device_id', how='left')\nuser = pd.merge(user, atime.iloc[app], on='device_id', how='left')\n\napp_cat = pd.read_table(path+'package_label.tsv',\n                        names=['app', 'category', 'app_name'])\n\ncat_enc = pd.DataFrame(app_cat['category'].value_counts())\ncat_enc['idx'] = range(45)\n\napp_cat['cat_enc'] = app_cat['category'].map(cat_enc['idx'])\napp_cat.set_index(['app'], inplace=True)\n\natime['app_cat_enc'] = atime['app'].map(app_cat['cat_enc']).fillna(45)\n\ncat_num = atime.groupby(['device_id', 'app_cat_enc'])[\n    'app'].agg('count').reset_index()\ncat_time = atime.groupby(['device_id', 'app_cat_enc'])[\n    'peroid'].agg('sum').reset_index()\n\napp_cat_num = cat_num.pivot(\n    index='device_id', columns='app_cat_enc', values='app').fillna(0)\napp_cat_num.columns = ['cat%s' % i for i in range(46)]\napp_cat_time = cat_time.pivot(\n    index='device_id', columns='app_cat_enc', values='peroid').fillna(0)\napp_cat_time.columns = ['time%s' % i for i in range(46)]\n\nuser = pd.merge(user, app_cat_num, on='device_id', how='left')\nuser = pd.merge(user, app_cat_time, on='device_id', how='left')\nuser.to_csv('data/user_behavior.csv', index=False)\n\n\n"
  },
  {
    "path": "chizhu/single_model/xgb.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nfrom datetime import datetime,timedelta  \nimport matplotlib.pyplot as plt\nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\n# get_ipython().run_line_magic('matplotlib', 'inline')\n\n#add\nimport gc\nfrom sklearn import preprocessing\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nfrom scipy.sparse import hstack, vstack\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import cross_val_score\n# from skopt.space import Integer, Categorical, Real, Log10\n# from skopt.utils import use_named_args\n# from skopt import gp_minimize\nfrom gensim.models import Word2Vec, FastText\nimport gensim \nimport re\n# path=\"/dev/shm/chizhu_data/data/\"\n\n\n# In[2]:\n\n\ntfidf_feat=pd.read_csv(\"data/tfidf_classfiy.csv\")\ntf2=pd.read_csv(\"data/tfidf_classfiy_package.csv\")\ntrain_data=pd.read_csv(\"data/train_data.csv\")\ntest_data=pd.read_csv(\"data/test_data.csv\")\n\n\n# In[3]:\n\n\ntrain_data = pd.merge(train_data,tfidf_feat,on=\"device_id\",how=\"left\")\ntrain = pd.merge(train_data,tf2,on=\"device_id\",how=\"left\")\ntest_data = pd.merge(test_data,tfidf_feat,on=\"device_id\",how=\"left\")\ntest = pd.merge(test_data,tf2,on=\"device_id\",how=\"left\")\n\n\n# In[4]:\n\n\nfeatures = [x for x in train.columns if x not in ['device_id', 'sex',\"age\",\"label\",\"app\"]]\nY = train['sex'] - 1\n\n\n# In[19]:\n\n\nimport lightgbm as lgb\nimport xgboost as xgb\nfrom sklearn.metrics import auc, log_loss, roc_auc_score,f1_score,recall_score,precision_score\nfrom sklearn.cross_validation import StratifiedKFold\n\nkf = StratifiedKFold(Y, n_folds=5, shuffle=True, random_state=1024)\nparams={\n\t'booster':'gbtree',\n    \n\t'objective': 'binary:logistic',\n#      'is_unbalance':'True',\n# \t'scale_pos_weight': 1500.0/13458.0,\n        'eval_metric': \"logloss\",\n    \n\t'gamma':0.2,#0.2 is ok\n\t'max_depth':6,\n# \t'lambda':20,\n    # \"alpha\":5,\n        'subsample':0.7,\n        'colsample_bytree':0.4 ,\n#         'min_child_weight':2.5, \n        'eta': 0.01,\n    # 'learning_rate':0.01,\n    \"silent\":1,\n\t'seed':1024,\n\t'nthread':12,\n   \n    }\nnum_round = 3500\nearly_stopping_rounds = 100\n\n\n# In[20]:\n\n\naus = []\nsub1 = np.zeros((len(test), ))\npred_oob1=np.zeros((len(train),))\nfor i,(train_index,test_index) in enumerate(kf):\n  \n    tr_x = train[features].reindex(index=train_index, copy=False)\n    tr_y = Y[train_index]\n    te_x = train[features].reindex(index=test_index, copy=False)\n    te_y = Y[test_index]\n\n    # tr_y=tr_y.apply(lambda x:1 if x>0 else 0)\n    # te_y=te_y.apply(lambda x:1 if x>0 else 0)\n    d_tr = xgb.DMatrix(tr_x, label=tr_y)\n    d_te = xgb.DMatrix(te_x, label=te_y)\n    watchlist  = [(d_tr,'train'),\n    (d_te,'val')\n             ]\n    model = xgb.train(params, d_tr, num_boost_round=5500, \n                      evals=watchlist,verbose_eval=200,\n                              early_stopping_rounds=100)\n    pred = model.predict(d_te)\n    pred_oob1[test_index] =pred\n    # te_y=te_y.apply(lambda x:1 if x>0 else 0)\n    a = log_loss(te_y, pred)\n\n    sub1 += model.predict(xgb.DMatrix(test[features]))/5\n    \n\n    print (\"idx: \", i) \n    print (\" loss: %.5f\" % a)\n#     print \" gini: %.5f\" % g\n    aus.append(a)\n\nprint (\"mean\")\nprint (\"auc:       %s\" % (sum(aus) / 5.0))\n\n\n# In[21]:\n\n\npred_oob1 = pd.DataFrame(pred_oob1, columns=['sex2'])\nsub1 = pd.DataFrame(sub1, columns=['sex2'])\nres1=pd.concat([pred_oob1,sub1])\nres1['sex1'] = 1-res1['sex2']\n\n\n# In[22]:\n\n\nimport gc\ngc.collect()\n\n\n# In[23]:\n\n\ntfidf_feat=pd.read_csv(\"data/tfidf_age.csv\")\ntf2=pd.read_csv(\"data/pack_tfidf_age.csv\")\ntrain_data = pd.merge(train_data,tfidf_feat,on=\"device_id\",how=\"left\")\ntrain = pd.merge(train_data,tf2,on=\"device_id\",how=\"left\")\ntest_data = pd.merge(test_data,tfidf_feat,on=\"device_id\",how=\"left\")\ntest = pd.merge(test_data,tf2,on=\"device_id\",how=\"left\")\nfeatures = [x for x in train.columns if x not in ['device_id',\"age\",\"sex\",\"label\",\"app\"]]\nY = train['age'] \n\n\n# In[34]:\n\n\nimport lightgbm as lgb\nimport xgboost as xgb\nfrom sklearn.metrics import auc, log_loss, roc_auc_score,f1_score,recall_score,precision_score\nfrom sklearn.cross_validation import StratifiedKFold\n\nkf = StratifiedKFold(Y, n_folds=5, shuffle=True, random_state=1024)\nparams={\n\t'booster':'gbtree',\n\t'objective': 'multi:softprob',\n#      'is_unbalance':'True',\n# \t'scale_pos_weight': 1500.0/13458.0,\n        'eval_metric': \"mlogloss\",\n    'num_class':11,\n\t'gamma':0.1,#0.2 is ok\n\t'max_depth':6,\n# \t'lambda':20,\n    # \"alpha\":5,\n        'subsample':0.7,\n        'colsample_bytree':0.4 ,\n        # 'min_child_weight':2.5, \n        'eta': 0.01,\n    # 'learning_rate':0.01,\n    \"silent\":1,\n\t'seed':1024,\n\t'nthread':12,\n   \n    }\nnum_round = 3500\nearly_stopping_rounds = 100\n\n\n# In[ ]:\n\n\naus = []\nsub2 = np.zeros((len(test),11 ))\npred_oob2=np.zeros((len(train),11))\nfor i,(train_index,test_index) in enumerate(kf):\n  \n    tr_x = train[features].reindex(index=train_index, copy=False)\n    tr_y = Y[train_index]\n    te_x = train[features].reindex(index=test_index, copy=False)\n    te_y = Y[test_index]\n\n    # tr_y=tr_y.apply(lambda x:1 if x>0 else 0)\n    # te_y=te_y.apply(lambda x:1 if x>0 else 0)\n    d_tr = xgb.DMatrix(tr_x, label=tr_y)\n    d_te = xgb.DMatrix(te_x, label=te_y)\n    watchlist  = [(d_tr,'train'),\n    (d_te,'val')\n             ]\n    model = xgb.train(params, d_tr, num_boost_round=5500, \n                      evals=watchlist,verbose_eval=200,\n                              early_stopping_rounds=100)\n    pred = model.predict(d_te)\n    pred_oob2[test_index] =pred\n    # te_y=te_y.apply(lambda x:1 if x>0 else 0)\n    a = log_loss(te_y, pred)\n\n    sub2 += model.predict(xgb.DMatrix(test[features]))/5\n    \n\n    print (\"idx: \", i) \n    print (\" loss: %.5f\" % a)\n#     print \" gini: %.5f\" % g\n    aus.append(a)\n\nprint (\"mean\")\nprint (\"auc:       %s\" % (sum(aus) / 5.0))\n\n\n# In[ ]:\n\n\nres2_1=np.vstack((pred_oob2,sub2))\nres2_1 = pd.DataFrame(res2_1)\n\n\n# In[ ]:\n\n\nres1.index=range(len(res1))\nres2_1.index=range(len(res2_1))\nfinal_1=res2_1.copy()\nfinal_2=res2_1.copy()\nfor i in range(11):\n    final_1[i]=res1['sex1']*res2_1[i]\n    final_2[i]=res1['sex2']*res2_1[i]\nid_list=pd.concat([train[['device_id']],test[['device_id']]])\nfinal=id_list\nfinal.index=range(len(final))\nfinal.columns= ['DeviceID']\nfinal_pred = pd.concat([final_1,final_2],1)\nfinal=pd.concat([final,final_pred],1)\nfinal.columns = ['DeviceID', '1-0', '1-1', '1-2', '1-3', '1-4', '1-5', '1-6', \n         '1-7','1-8', '1-9', '1-10', '2-0', '2-1', '2-2', '2-3', '2-4', \n         '2-5', '2-6', '2-7', '2-8', '2-9', '2-10']\n\nfinal.to_csv('submit/xgb_feat_chizhu.csv', index=False)\n\n\n# In[ ]:\n\n\ntest['DeviceID']=test['device_id']\nsub=pd.merge(test[['DeviceID']],final,on=\"DeviceID\",how=\"left\")\nsub.to_csv(\"submit/xgb_chizhu.csv\",index=False)\n\n"
  },
  {
    "path": "chizhu/single_model/xgb_nb.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nfrom datetime import datetime,timedelta  \nimport matplotlib.pyplot as plt\nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\n# get_ipython().run_line_magic('matplotlib', 'inline')\n\n#add\nimport gc\nfrom sklearn import preprocessing\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nfrom scipy.sparse import hstack, vstack\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import cross_val_score\n# from skopt.space import Integer, Categorical, Real, Log10\n# from skopt.utils import use_named_args\n# from skopt import gp_minimize\nfrom gensim.models import Word2Vec, FastText\nimport gensim \nimport re\n# path=\"/dev/shm/chizhu_data/data/\"\n\n\n# In[2]:\n\n\ntfidf_feat=pd.read_csv(\"data/tfidf_classfiy.csv\")\ntf2=pd.read_csv(\"data/tfidf_classfiy_package.csv\")\ntrain_data=pd.read_csv(\"data/train_data.csv\")\ntest_data=pd.read_csv(\"data/test_data.csv\")\n\n\n# In[4]:\n\n\ntrain_data = pd.merge(train_data,tfidf_feat,on=\"device_id\",how=\"left\")\ntrain = pd.merge(train_data,tf2,on=\"device_id\",how=\"left\")\ntest_data = pd.merge(test_data,tfidf_feat,on=\"device_id\",how=\"left\")\ntest = pd.merge(test_data,tf2,on=\"device_id\",how=\"left\")\n\n\n# In[5]:\n\n\nfeatures = [x for x in train.columns if x not in ['device_id', 'sex',\"age\",\"label\",\"app\"]]\nY = train['sex'] - 1\n\n\n# In[5]:\n\n\nimport lightgbm as lgb\nimport xgboost as xgb\nfrom sklearn.metrics import auc, log_loss, roc_auc_score,f1_score,recall_score,precision_score\nfrom sklearn.cross_validation import StratifiedKFold\n\nkf = StratifiedKFold(Y, n_folds=5, shuffle=True, random_state=1024)\nparams={\n\t'booster':'gbtree',\n\t'objective': 'binary:logistic',\n#      'is_unbalance':'True',\n# \t'scale_pos_weight': 1500.0/13458.0,\n        'eval_metric': \"logloss\",\n    \n\t'gamma':0.2,#0.2 is ok\n\t'max_depth':6,\n# \t'lambda':20,\n    # \"alpha\":5,\n        'subsample':0.7,\n        'colsample_bytree':0.4 ,\n#         'min_child_weight':2.5, \n        'eta': 0.01,\n    # 'learning_rate':0.01,\n    \"silent\":1,\n\t'seed':1024,\n\t'nthread':12,\n   \n    }\nnum_round = 3500\nearly_stopping_rounds = 100\n\n\n# In[6]:\n\n\naus = []\nsub1 = np.zeros((len(test), ))\npred_oob1=np.zeros((len(train),))\nfor i,(train_index,test_index) in enumerate(kf):\n  \n    tr_x = train[features].reindex(index=train_index, copy=False)\n    tr_y = Y[train_index]\n    te_x = train[features].reindex(index=test_index, copy=False)\n    te_y = Y[test_index]\n\n    # tr_y=tr_y.apply(lambda x:1 if x>0 else 0)\n    # te_y=te_y.apply(lambda x:1 if x>0 else 0)\n    d_tr = xgb.DMatrix(tr_x, label=tr_y)\n    d_te = xgb.DMatrix(te_x, label=te_y)\n    watchlist  = [(d_tr,'train'),\n    (d_te,'val')\n             ]\n    model = xgb.train(params, d_tr, num_boost_round=5500, \n                      evals=watchlist,verbose_eval=200,\n                              early_stopping_rounds=100)\n    pred = model.predict(d_te)\n    pred_oob1[test_index] =pred\n    # te_y=te_y.apply(lambda x:1 if x>0 else 0)\n    a = log_loss(te_y, pred)\n\n    sub1 += model.predict(xgb.DMatrix(test[features]))/5\n    \n\n    print (\"idx: \", i) \n    print (\" loss: %.5f\" % a)\n#     print \" gini: %.5f\" % g\n    aus.append(a)\n\nprint (\"mean\")\nprint (\"auc:       %s\" % (sum(aus) / 5.0))\n\n\n# In[7]:\n\n\npred_oob1 = pd.DataFrame(pred_oob1, columns=['sex2'])\nsub1 = pd.DataFrame(sub1, columns=['sex2'])\nres1=pd.concat([pred_oob1,sub1])\nres1['sex1'] = 1-res1['sex2']\n\n\n# In[8]:\n\n\nimport gc\ngc.collect()\n\n\n# In[9]:\n\n\ntfidf_feat=pd.read_csv(\"data/tfidf_age.csv\")\ntf2=pd.read_csv(\"data/pack_tfidf_age.csv\")\ntrain_data = pd.merge(train_data,tfidf_feat,on=\"device_id\",how=\"left\")\ntrain = pd.merge(train_data,tf2,on=\"device_id\",how=\"left\")\ntest_data = pd.merge(test_data,tfidf_feat,on=\"device_id\",how=\"left\")\ntest = pd.merge(test_data,tf2,on=\"device_id\",how=\"left\")\n\n\n# In[10]:\n\n\n####sex1\ntest['sex']=1\n\n\n# In[11]:\n\n\nfeatures = [x for x in train.columns if x not in ['device_id',\"age\",\"label\",\"app\"]]\nY = train['age'] \n\n\n# In[12]:\n\n\nimport lightgbm as lgb\nimport xgboost as xgb\nfrom sklearn.metrics import auc, log_loss, roc_auc_score,f1_score,recall_score,precision_score\nfrom sklearn.cross_validation import StratifiedKFold\n\nkf = StratifiedKFold(Y, n_folds=5, shuffle=True, random_state=1024)\nparams={\n\t'booster':'gbtree',\n\t'objective': 'multi:softprob',\n#      'is_unbalance':'True',\n# \t'scale_pos_weight': 1500.0/13458.0,\n        'eval_metric': \"mlogloss\",\n    'num_class':11,\n\t'gamma':0.1,#0.2 is ok\n\t'max_depth':6,\n# \t'lambda':20,\n    # \"alpha\":5,\n        'subsample':0.7,\n        'colsample_bytree':0.4 ,\n        # 'min_child_weight':2.5, \n        'eta': 0.01,\n    # 'learning_rate':0.01,\n    \"silent\":1,\n\t'seed':1024,\n\t'nthread':12,\n   \n    }\nnum_round = 3500\nearly_stopping_rounds = 100\n\n\n# In[13]:\n\n\naus = []\nsub2 = np.zeros((len(test),11 ))\npred_oob2=np.zeros((len(train),11))\nfor i,(train_index,test_index) in enumerate(kf):\n  \n    tr_x = train[features].reindex(index=train_index, copy=False)\n    tr_y = Y[train_index]\n    te_x = train[features].reindex(index=test_index, copy=False)\n    te_y = Y[test_index]\n\n    # tr_y=tr_y.apply(lambda x:1 if x>0 else 0)\n    # te_y=te_y.apply(lambda x:1 if x>0 else 0)\n    d_tr = xgb.DMatrix(tr_x, label=tr_y)\n    d_te = xgb.DMatrix(te_x, label=te_y)\n    watchlist  = [(d_tr,'train'),\n    (d_te,'val')\n             ]\n    model = xgb.train(params, d_tr, num_boost_round=5500, \n                      evals=watchlist,verbose_eval=200,\n                              early_stopping_rounds=100)\n    pred = model.predict(d_te)\n    pred_oob2[test_index] =pred\n    # te_y=te_y.apply(lambda x:1 if x>0 else 0)\n    a = log_loss(te_y, pred)\n\n    sub2 += model.predict(xgb.DMatrix(test[features]))/5\n    \n\n    print (\"idx: \", i) \n    print (\" loss: %.5f\" % a)\n#     print \" gini: %.5f\" % g\n    aus.append(a)\n\nprint (\"mean\")\nprint (\"auc:       %s\" % (sum(aus) / 5.0))\n\n\n# In[14]:\n\n\nres2_1=np.vstack((pred_oob2,sub2))\nres2_1 = pd.DataFrame(res2_1)\n\n\n# In[ ]:\n\n\n###sex2\ntest['sex']=2\nfeatures = [x for x in train.columns if x not in ['device_id',\"age\",\"label\",\"app\"]]\nY = train['age'] \n\n\n# In[ ]:\n\n\naus = []\nsub2 = np.zeros((len(test),11 ))\npred_oob2=np.zeros((len(train),11))\nfor i,(train_index,test_index) in enumerate(kf):\n  \n    tr_x = train[features].reindex(index=train_index, copy=False)\n    tr_y = Y[train_index]\n    te_x = train[features].reindex(index=test_index, copy=False)\n    te_y = Y[test_index]\n\n    # tr_y=tr_y.apply(lambda x:1 if x>0 else 0)\n    # te_y=te_y.apply(lambda x:1 if x>0 else 0)\n    d_tr = xgb.DMatrix(tr_x, label=tr_y)\n    d_te = xgb.DMatrix(te_x, label=te_y)\n    watchlist  = [(d_tr,'train'),\n    (d_te,'val')\n             ]\n    model = xgb.train(params, d_tr, num_boost_round=5500, \n                      evals=watchlist,verbose_eval=200,\n                              early_stopping_rounds=100)\n    pred = model.predict(d_te)\n    pred_oob2[test_index] =pred\n    # te_y=te_y.apply(lambda x:1 if x>0 else 0)\n    a = log_loss(te_y, pred)\n\n    sub2 += model.predict(xgb.DMatrix(test[features]))/5\n    \n\n    print (\"idx: \", i) \n    print (\" loss: %.5f\" % a)\n#     print \" gini: %.5f\" % g\n    aus.append(a)\n\nprint (\"mean\")\nprint (\"auc:       %s\" % (sum(aus) / 5.0))\n\n\n# In[ ]:\n\n\nres2_2=np.vstack((pred_oob2,sub2))\nres2_2 = pd.DataFrame(res2_2)\n\n\n# In[ ]:\n\n\nres1.index=range(len(res1))\nres2_1.index=range(len(res2_1))\nres2_2.index=range(len(res2_2))\nfinal_1=res2_1.copy()\nfinal_2=res2_2.copy()\nfor i in range(11):\n    final_1[i]=res1['sex1']*res2_1[i]\n    final_2[i]=res1['sex2']*res2_2[i]\nid_list=pd.concat([train[['device_id']],test[['device_id']]])\nfinal=id_list\nfinal.index=range(len(final))\nfinal.columns= ['DeviceID']\nfinal_pred = pd.concat([final_1,final_2],1)\nfinal=pd.concat([final,final_pred],1)\nfinal.columns = ['DeviceID', '1-0', '1-1', '1-2', '1-3', '1-4', '1-5', '1-6', \n         '1-7','1-8', '1-9', '1-10', '2-0', '2-1', '2-2', '2-3', '2-4', \n         '2-5', '2-6', '2-7', '2-8', '2-9', '2-10']\n\nfinal.to_csv('submit/xgb_feat_chizhu_nb.csv', index=False)\n\n\n# In[ ]:\n\n\ntest['DeviceID']=test['device_id']\nsub=pd.merge(test[['DeviceID']],final,on=\"DeviceID\",how=\"left\")\nsub.to_csv(\"submit/xgb_chizhu_nb.csv\",index=False)\n\n"
  },
  {
    "path": "chizhu/single_model/yg_best_nn.py",
    "content": "import pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom datetime import datetime, timedelta\nimport matplotlib.pyplot as plt\nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\n# %matplotlib inline\n\n#add\nfrom category_encoders import OrdinalEncoder\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom scipy.sparse import hstack\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import cross_val_score\nfrom gensim.models import FastText, Word2Vec\nimport re\nfrom keras.layers import *\nfrom keras.models import *\nfrom keras.preprocessing.text import Tokenizer, text_to_word_sequence\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.preprocessing import text, sequence\nfrom keras.callbacks import *\nfrom keras.layers.advanced_activations import LeakyReLU, PReLU\nimport keras.backend as K\nfrom keras.optimizers import *\nfrom keras.utils import to_categorical\nfrom config import path\npackages = pd.read_csv(path+'deviceid_packages.tsv',\n                       sep='\\t', names=['device_id', 'apps'])\ntest = pd.read_csv(path+'deviceid_test.tsv',\n                   sep='\\t', names=['device_id'])\ntrain = pd.read_csv(path+'deviceid_train.tsv',\n                    sep='\\t', names=['device_id', 'sex', 'age'])\n\nbrand = pd.read_table(path+'deviceid_brand.tsv',\n                      names=['device_id', 'vendor', 'version'])\nbehave = pd.read_csv('data/user_behavior.csv')\n\nbrand['phone_version'] = brand['vendor'] + ' ' + brand['version']\ntrain = pd.merge(brand[['device_id', 'phone_version']],\n                 train, on='device_id', how='right')\ntest = pd.merge(brand[['device_id', 'phone_version']],\n                test, on='device_id', how='right')\n\ntrain = pd.merge(train, behave, on='device_id', how='left')\ntest = pd.merge(test, behave, on='device_id', how='left')\n\npackages['app_lenghth'] = packages['apps'].apply(\n    lambda x: x.split(',')).apply(lambda x: len(x))\npackages['app_list'] = packages['apps'].apply(lambda x: x.split(','))\ntrain = pd.merge(train, packages, on='device_id', how='left')\ntest = pd.merge(test, packages, on='device_id', how='left')\n\nembed_size = 128\nfastmodel = Word2Vec(list(packages['app_list']), size=embed_size, window=4, min_count=3, negative=2,\n                     sg=1, sample=0.002, hs=1, workers=4)\n\nembedding_fast = pd.DataFrame([fastmodel[word]\n                               for word in (fastmodel.wv.vocab)])\nembedding_fast['app'] = list(fastmodel.wv.vocab)\nembedding_fast.columns = [\"fdim_%s\" %\n                          str(i) for i in range(embed_size)]+[\"app\"]\n\n\ntokenizer = Tokenizer(lower=False, char_level=False, split=',')\n\ntokenizer.fit_on_texts(list(packages['apps']))\n\nX_seq = tokenizer.texts_to_sequences(train['apps'])\nX_test_seq = tokenizer.texts_to_sequences(test['apps'])\n\nmaxlen = 50\nX = pad_sequences(X_seq, maxlen=maxlen, value=0)\nX_test = pad_sequences(X_test_seq, maxlen=maxlen, value=0)\nY_sex = train['sex']-1\n\nmax_feaures = 35001\nembedding_matrix = np.zeros((max_feaures, embed_size))\nfor word in tokenizer.word_index:\n    if word not in fastmodel.wv.vocab:\n        continue\n    embedding_matrix[tokenizer.word_index[word]] = fastmodel[word]\n\n\nX_h = train[['h%s' % i for i in range(24)]].values\nX_h_test = test[['h%s' % i for i in range(24)]].values\n\n\nclass AdamW(Optimizer):\n    def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, weight_decay=1e-4,  # decoupled weight decay (1/4)\n                 epsilon=1e-8, decay=0., **kwargs):\n        super(AdamW, self).__init__(**kwargs)\n        with K.name_scope(self.__class__.__name__):\n            self.iterations = K.variable(0, dtype='int64', name='iterations')\n            self.lr = K.variable(lr, name='lr')\n            self.beta_1 = K.variable(beta_1, name='beta_1')\n            self.beta_2 = K.variable(beta_2, name='beta_2')\n            self.decay = K.variable(decay, name='decay')\n            # decoupled weight decay (2/4)\n            self.wd = K.variable(weight_decay, name='weight_decay')\n        self.epsilon = epsilon\n        self.initial_decay = decay\n\n    @interfaces.legacy_get_updates_support\n    def get_updates(self, loss, params):\n        grads = self.get_gradients(loss, params)\n        self.updates = [K.update_add(self.iterations, 1)]\n        wd = self.wd  # decoupled weight decay (3/4)\n\n        lr = self.lr\n        if self.initial_decay > 0:\n            lr *= (1. / (1. + self.decay * K.cast(self.iterations,\n                                                  K.dtype(self.decay))))\n\n        t = K.cast(self.iterations, K.floatx()) + 1\n        lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /\n                     (1. - K.pow(self.beta_1, t)))\n\n        ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n        vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n        self.weights = [self.iterations] + ms + vs\n\n        for p, g, m, v in zip(params, grads, ms, vs):\n            m_t = (self.beta_1 * m) + (1. - self.beta_1) * g\n            v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)\n            # decoupled weight decay (4/4)\n            p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon) - lr * wd * p\n\n            self.updates.append(K.update(m, m_t))\n            self.updates.append(K.update(v, v_t))\n            new_p = p_t\n\n            # Apply constraints.\n            if getattr(p, 'constraint', None) is not None:\n                new_p = p.constraint(new_p)\n\n            self.updates.append(K.update(p, new_p))\n        return self.updates\n\n    def get_config(self):\n        config = {'lr': float(K.get_value(self.lr)),\n                  'beta_1': float(K.get_value(self.beta_1)),\n                  'beta_2': float(K.get_value(self.beta_2)),\n                  'decay': float(K.get_value(self.decay)),\n                  'weight_decay': float(K.get_value(self.wd)),\n                  'epsilon': self.epsilon}\n        base_config = super(AdamW, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))\n\n\ndef model_conv1D(embedding_matrix):\n\n    K.clear_session()\n    # The embedding layer containing the word vectors\n    emb_layer = Embedding(\n        input_dim=embedding_matrix.shape[0],\n        output_dim=embedding_matrix.shape[1],\n        weights=[embedding_matrix],\n        input_length=maxlen,\n        trainable=False\n    )\n    lstm_layer = Bidirectional(\n        GRU(128, recurrent_dropout=0.15, dropout=0.15, return_sequences=True))\n\n    # 1D convolutions that can iterate over the word vectors\n    conv1 = Conv1D(filters=128, kernel_size=1,\n                   padding='same', activation='relu',)\n    conv2 = Conv1D(filters=64, kernel_size=2,\n                   padding='same', activation='relu', )\n    conv3 = Conv1D(filters=64, kernel_size=3,\n                   padding='same', activation='relu',)\n    conv5 = Conv1D(filters=32, kernel_size=5,\n                   padding='same', activation='relu',)\n\n    # Define inputs\n    seq = Input(shape=(maxlen,))\n\n    # Run inputs through embedding\n    emb = emb_layer(seq)\n\n    lstm = lstm_layer(emb)\n    # Run through CONV + GAP layers\n    conv1a = conv1(lstm)\n    gap1a = GlobalAveragePooling1D()(conv1a)\n    gmp1a = GlobalMaxPool1D()(conv1a)\n\n    conv2a = conv2(lstm)\n    gap2a = GlobalAveragePooling1D()(conv2a)\n    gmp2a = GlobalMaxPool1D()(conv2a)\n\n    conv3a = conv3(lstm)\n    gap3a = GlobalAveragePooling1D()(conv3a)\n    gmp3a = GlobalMaxPooling1D()(conv3a)\n\n    conv5a = conv5(lstm)\n    gap5a = GlobalAveragePooling1D()(conv5a)\n    gmp5a = GlobalMaxPooling1D()(conv5a)\n\n    hin = Input(shape=(24, ))\n    htime = Dense(6, activation='relu')(hin)\n    merge1 = concatenate([gmp1a, gmp1a, gmp1a, gmp1a, htime])\n\n    # The MLP that determines the outcome\n    x = Dropout(0.3)(merge1)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.25)(x)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.25)(x)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.25)(x)\n    x = BatchNormalization()(x)\n    pred = Dense(1, activation='sigmoid')(x)\n\n    # model = Model(inputs=[seq1, seq2, magic_input, distance_input], outputs=pred)\n    model = Model(inputs=[seq, hin], outputs=pred)\n    model.compile(loss='binary_crossentropy',\n                  optimizer=AdamW(weight_decay=0.08,))\n\n    return model\n\n\nkfold = StratifiedKFold(n_splits=5, random_state=20, shuffle=True)\nsub1 = np.zeros((X_test.shape[0], ))\noof_pref1 = np.zeros((X.shape[0], 1))\nscore = []\ncount = 0\nfor i, (train_index, test_index) in enumerate(kfold.split(X, Y_sex)):\n    print(\"FOLD | \", count+1)\n    filepath = \"sex_weights_best_%d.h5\" % count\n    checkpoint = ModelCheckpoint(\n        filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n    reduce_lr = ReduceLROnPlateau(\n        monitor='val_loss', factor=0.8, patience=2, min_lr=0.0001, verbose=1)\n    earlystopping = EarlyStopping(\n        monitor='val_loss', min_delta=0.0001, patience=6, verbose=1, mode='auto')\n    callbacks = [checkpoint, reduce_lr, earlystopping]\n\n    model_sex = model_conv1D(embedding_matrix)\n    X_tr, X_vl, X_tr2, X_vl2, y_tr, y_vl = X[train_index], X[test_index], X_h[\n        train_index], X_h[test_index], Y_sex[train_index], Y_sex[test_index]\n    hist = model_sex.fit([X_tr, X_tr2], y_tr, batch_size=256, epochs=50, validation_data=([X_vl, X_vl2], y_vl),\n                         callbacks=callbacks, verbose=1, shuffle=True)\n    model_sex.load_weights(filepath)\n    sub1 += np.squeeze(model_sex.predict([X_test, X_h_test]))/kfold.n_splits\n    oof_pref1[test_index] = model_sex.predict([X_vl, X_vl2])\n    score.append(np.min(hist.history['val_loss']))\n    count += 1\nprint('log loss:', np.mean(score))\n\n\noof_pref1 = pd.DataFrame(oof_pref1, columns=['sex2'])\nsub1 = pd.DataFrame(sub1, columns=['sex2'])\nres1 = pd.concat([oof_pref1, sub1])\nres1['sex1'] = 1-res1['sex2']\n# res1.to_csv(\"res1.csv\", index=False)\n\n\ndef model_age_conv(embedding_matrix):\n\n    # The embedding layer containing the word vectors\n    K.clear_session()\n    emb_layer = Embedding(\n        input_dim=embedding_matrix.shape[0],\n        output_dim=embedding_matrix.shape[1],\n        weights=[embedding_matrix],\n        input_length=maxlen,\n        trainable=False\n    )\n    lstm_layer = Bidirectional(\n        GRU(128, recurrent_dropout=0.15, dropout=0.15, return_sequences=True))\n\n    # 1D convolutions that can iterate over the word vectors\n    conv1 = Conv1D(filters=128, kernel_size=1,\n                   padding='same', activation='relu',)\n    conv2 = Conv1D(filters=64, kernel_size=2,\n                   padding='same', activation='relu', )\n    conv3 = Conv1D(filters=64, kernel_size=3,\n                   padding='same', activation='relu',)\n    conv5 = Conv1D(filters=32, kernel_size=5,\n                   padding='same', activation='relu',)\n\n    # Define inputs\n    seq = Input(shape=(maxlen,))\n\n    # Run inputs through embedding\n    emb = emb_layer(seq)\n\n    lstm = lstm_layer(emb)\n    # Run through CONV + GAP layers\n    conv1a = conv1(lstm)\n    gap1a = GlobalAveragePooling1D()(conv1a)\n    gmp1a = GlobalMaxPool1D()(conv1a)\n\n    conv2a = conv2(lstm)\n    gap2a = GlobalAveragePooling1D()(conv2a)\n    gmp2a = GlobalMaxPool1D()(conv2a)\n\n    conv3a = conv3(lstm)\n    gap3a = GlobalAveragePooling1D()(conv3a)\n    gmp3a = GlobalMaxPooling1D()(conv3a)\n\n    conv5a = conv5(lstm)\n    gap5a = GlobalAveragePooling1D()(conv5a)\n    gmp5a = GlobalMaxPooling1D()(conv5a)\n\n    merge1 = concatenate([gap1a, gap2a, gap3a, gap5a])\n\n    # The MLP that determines the outcome\n    x = Dropout(0.3)(merge1)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.22)(x)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.22)(x)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.22)(x)\n    x = BatchNormalization()(x)\n    pred = Dense(11, activation='softmax')(x)\n\n    model = Model(inputs=seq, outputs=pred)\n    model.compile(loss='categorical_crossentropy',\n                  optimizer=AdamW(weight_decay=0.08,))\n\n    return model\n\n\nY_age = to_categorical(train['age'])\n\nsub2 = np.zeros((X_test.shape[0], 11))\noof_pref2 = np.zeros((X.shape[0], 11))\nscore = []\ncount = 0\nfor i, (train_index, test_index) in enumerate(kfold.split(X, train['age'])):\n\n    print(\"FOLD | \", count+1)\n\n    filepath2 = \"age_weights_best_%d.h5\" % count\n    checkpoint2 = ModelCheckpoint(\n        filepath2, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n    reduce_lr2 = ReduceLROnPlateau(\n        monitor='val_loss', factor=0.8, patience=2, min_lr=0.0001, verbose=1)\n    earlystopping2 = EarlyStopping(\n        monitor='val_loss', min_delta=0.0001, patience=8, verbose=1, mode='auto')\n    callbacks2 = [checkpoint2, reduce_lr2, earlystopping2]\n\n    X_tr, X_vl, y_tr, y_vl = X[train_index], X[test_index], Y_age[train_index], Y_age[test_index]\n\n    model_age = model_age_conv(embedding_matrix)\n    hist = model_age.fit(X_tr, y_tr, batch_size=256, epochs=50, validation_data=(X_vl, y_vl),\n                         callbacks=callbacks2, verbose=2, shuffle=True)\n\n    model_age.load_weights(filepath2)\n    oof_pref2[test_index] = model_age.predict(X_vl)\n    sub2 += model_age.predict(X_test)/kfold.n_splits\n    score.append(np.min(hist.history['val_loss']))\n    count += 1\nprint('log loss:', np.mean(score))\n\n\nres2_1 = np.vstack((oof_pref2, sub2))\nres2_1 = pd.DataFrame(res2_1)\n# res2_1.to_csv(\"res2.csv\", index=False)\n\nres1.index = range(len(res1))\nres2_1.index = range(len(res2_1))\nfinal_1 = res2_1.copy()\nfinal_2 = res2_1.copy()\nfor i in range(11):\n    final_1[i] = res1['sex1']*res2_1[i]\n    final_2[i] = res1['sex2']*res2_1[i]\nid_list = pd.concat([train[['device_id']], test[['device_id']]])\nfinal = id_list\nfinal.index = range(len(final))\nfinal.columns = ['DeviceID']\nfinal_pred = pd.concat([final_1, final_2], 1)\nfinal = pd.concat([final, final_pred], 1)\nfinal.columns = ['DeviceID', '1-0', '1-1', '1-2', '1-3', '1-4', '1-5', '1-6',\n                 '1-7', '1-8', '1-9', '1-10', '2-0', '2-1', '2-2', '2-3', '2-4',\n                 '2-5', '2-6', '2-7', '2-8', '2-9', '2-10']\n\nfinal.to_csv('submit/yg_best_nn.csv', index=False)\n"
  },
  {
    "path": "chizhu/stacking/all_feat/xgb__nurbs_nb.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 1,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"data\": {\n      \"text/plain\": [\n       \"['xgb_final_nb.csv',\\n\",\n       \" 'deviceid_train.tsv',\\n\",\n       \" 'feat.csv.zip',\\n\",\n       \" '.DS_Store',\\n\",\n       \" 'thluo_train_best_feat.csv',\\n\",\n       \" 'feat.csv',\\n\",\n       \" 'xgb_feat_final_nb.csv',\\n\",\n       \" 'xgb_nb.ipynb',\\n\",\n       \" 'nurbs_feature_all.csv',\\n\",\n       \" '.ipynb_checkpoints',\\n\",\n       \" 'deviceid_test.tsv']\"\n      ]\n     },\n     \"execution_count\": 1,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"import pandas as pd\\n\",\n    \"import seaborn as sns\\n\",\n    \"import numpy as np\\n\",\n    \"from tqdm import tqdm\\n\",\n    \"from sklearn.decomposition import LatentDirichletAllocation\\n\",\n    \"from sklearn.model_selection import train_test_split\\n\",\n    \"from sklearn.metrics import accuracy_score\\n\",\n    \"import lightgbm as lgb\\n\",\n    \"from datetime import datetime,timedelta  \\n\",\n    \"import matplotlib.pyplot as plt\\n\",\n    \"import time\\n\",\n    \"from sklearn.feature_extraction.text import TfidfTransformer\\n\",\n    \"from sklearn.feature_extraction.text import CountVectorizer\\n\",\n    \"%matplotlib inline\\n\",\n    \"\\n\",\n    \"#add\\n\",\n    \"import gc\\n\",\n    \"from sklearn import preprocessing\\n\",\n    \"from sklearn.feature_extraction.text import TfidfVectorizer\\n\",\n    \"\\n\",\n    \"from scipy.sparse import hstack, vstack\\n\",\n    \"from sklearn.model_selection import StratifiedKFold\\n\",\n    \"from sklearn.model_selection import cross_val_score\\n\",\n    \"# from skopt.space import Integer, Categorical, Real, Log10\\n\",\n    \"# from skopt.utils import use_named_args\\n\",\n    \"# from skopt import gp_minimize\\n\",\n    \"from gensim.models import Word2Vec, FastText\\n\",\n    \"import gensim \\n\",\n    \"import re\\n\",\n    \"import os\\n\",\n    \"path=\\\"./\\\"\\n\",\n    \"os.listdir(path)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 2,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"train_id=pd.read_csv(\\\"deviceid_train.tsv\\\",sep=\\\"\\\\t\\\",names=['device_id','sex','age'])\\n\",\n    \"test_id=pd.read_csv(\\\"deviceid_test.tsv\\\",sep=\\\"\\\\t\\\",names=['device_id'])\\n\",\n    \"all_id=pd.concat([train_id[['device_id']],test_id[['device_id']]])\\n\",\n    \"nurbs=pd.read_csv(\\\"nurbs_feature_all.csv\\\")\\n\",\n    \"nurbs.columns=[\\\"nurbs_\\\"+str(i) for i in nurbs.columns]\\n\",\n    \"all_id.index=range(len(all_id))\\n\",\n    \"nurbs['device_id']=all_id\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 2,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"th=pd.read_csv(\\\"thluo_train_best_feat.csv\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 4,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"feat=pd.merge(th,nurbs,on=\\\"device_id\\\",how=\\\"left\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 5,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"feat.to_csv(\\\"feat.csv\\\",index=False)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 27,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"train=pd.merge(train_id,feat,on=\\\"device_id\\\",how=\\\"left\\\")\\n\",\n    \"test=pd.merge(test_id,feat,on=\\\"device_id\\\",how=\\\"left\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 29,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"features = [x for x in train.columns if x not in ['device_id', 'sex',\\\"age\\\",]]\\n\",\n    \"Y = train['sex'] - 1\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 32,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"name\": \"stderr\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"/Users/chizhu/anaconda3/lib/python3.6/site-packages/sklearn/cross_validation.py:41: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.\\n\",\n      \"  \\\"This module will be removed in 0.20.\\\", DeprecationWarning)\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"\\n\",\n    \"import xgboost as xgb\\n\",\n    \"from sklearn.metrics import auc, log_loss, roc_auc_score,f1_score,recall_score,precision_score\\n\",\n    \"from sklearn.cross_validation import StratifiedKFold\\n\",\n    \"\\n\",\n    \"kf = StratifiedKFold(Y, n_folds=5, shuffle=True, random_state=1024)\\n\",\n    \"params={\\n\",\n    \"\\t'booster':'gbtree',\\n\",\n    \"\\t'objective': 'binary:logistic',\\n\",\n    \"#      'is_unbalance':'True',\\n\",\n    \"# \\t'scale_pos_weight': 1500.0/13458.0,\\n\",\n    \"        'eval_metric': \\\"logloss\\\",\\n\",\n    \"    \\n\",\n    \"\\t'gamma':0.2,#0.2 is ok\\n\",\n    \"\\t'max_depth':6,\\n\",\n    \"# \\t'lambda':20,\\n\",\n    \"    # \\\"alpha\\\":5,\\n\",\n    \"        'subsample':0.7,\\n\",\n    \"        'colsample_bytree':0.4 ,\\n\",\n    \"#         'min_child_weight':2.5, \\n\",\n    \"        'eta': 0.01,\\n\",\n    \"    # 'learning_rate':0.01,\\n\",\n    \"    \\\"silent\\\":1,\\n\",\n    \"\\t'seed':1024,\\n\",\n    \"\\t'nthread':12,\\n\",\n    \"   \\n\",\n    \"    }\\n\",\n    \"num_round = 3500\\n\",\n    \"early_stopping_rounds = 100\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 33,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"[0]\\ttrain-logloss:0.691359\\tval-logloss:0.691488\\n\",\n      \"Multiple eval metrics have been passed: 'val-logloss' will be used for early stopping.\\n\",\n      \"\\n\",\n      \"Will train until val-logloss hasn't improved in 100 rounds.\\n\",\n      \"[200]\\ttrain-logloss:0.566693\\tval-logloss:0.595722\\n\",\n      \"[400]\\ttrain-logloss:0.53806\\tval-logloss:0.590461\\n\",\n      \"[600]\\ttrain-logloss:0.519054\\tval-logloss:0.590032\\n\",\n      \"Stopping. Best iteration:\\n\",\n      \"[529]\\ttrain-logloss:0.525748\\tval-logloss:0.589953\\n\",\n      \"\\n\",\n      \"idx:  0\\n\",\n      \" loss: 0.59015\\n\",\n      \"[0]\\ttrain-logloss:0.691215\\tval-logloss:0.691369\\n\",\n      \"Multiple eval metrics have been passed: 'val-logloss' will be used for early stopping.\\n\",\n      \"\\n\",\n      \"Will train until val-logloss hasn't improved in 100 rounds.\\n\",\n      \"[200]\\ttrain-logloss:0.56648\\tval-logloss:0.596397\\n\",\n      \"[400]\\ttrain-logloss:0.538516\\tval-logloss:0.591125\\n\",\n      \"[600]\\ttrain-logloss:0.51823\\tval-logloss:0.590809\\n\",\n      \"Stopping. Best iteration:\\n\",\n      \"[595]\\ttrain-logloss:0.518718\\tval-logloss:0.590732\\n\",\n      \"\\n\",\n      \"idx:  1\\n\",\n      \" loss: 0.59099\\n\",\n      \"[0]\\ttrain-logloss:0.691228\\tval-logloss:0.69143\\n\",\n      \"Multiple eval metrics have been passed: 'val-logloss' will be used for early stopping.\\n\",\n      \"\\n\",\n      \"Will train until val-logloss hasn't improved in 100 rounds.\\n\",\n      \"[200]\\ttrain-logloss:0.566822\\tval-logloss:0.596484\\n\",\n      \"[400]\\ttrain-logloss:0.538456\\tval-logloss:0.591576\\n\",\n      \"[600]\\ttrain-logloss:0.518551\\tval-logloss:0.590934\\n\",\n      \"Stopping. Best iteration:\\n\",\n      \"[641]\\ttrain-logloss:0.514957\\tval-logloss:0.590818\\n\",\n      \"\\n\",\n      \"idx:  2\\n\",\n      \" loss: 0.59091\\n\",\n      \"[0]\\ttrain-logloss:0.691224\\tval-logloss:0.691404\\n\",\n      \"Multiple eval metrics have been passed: 'val-logloss' will be used for early stopping.\\n\",\n      \"\\n\",\n      \"Will train until val-logloss hasn't improved in 100 rounds.\\n\",\n      \"[200]\\ttrain-logloss:0.565394\\tval-logloss:0.598566\\n\",\n      \"[400]\\ttrain-logloss:0.536792\\tval-logloss:0.594022\\n\",\n      \"Stopping. Best iteration:\\n\",\n      \"[458]\\ttrain-logloss:0.531227\\tval-logloss:0.593837\\n\",\n      \"\\n\",\n      \"idx:  3\\n\",\n      \" loss: 0.59396\\n\",\n      \"[0]\\ttrain-logloss:0.691344\\tval-logloss:0.691511\\n\",\n      \"Multiple eval metrics have been passed: 'val-logloss' will be used for early stopping.\\n\",\n      \"\\n\",\n      \"Will train until val-logloss hasn't improved in 100 rounds.\\n\",\n      \"[200]\\ttrain-logloss:0.566356\\tval-logloss:0.595648\\n\",\n      \"[400]\\ttrain-logloss:0.537421\\tval-logloss:0.591302\\n\",\n      \"[600]\\ttrain-logloss:0.518249\\tval-logloss:0.591042\\n\",\n      \"Stopping. Best iteration:\\n\",\n      \"[525]\\ttrain-logloss:0.525041\\tval-logloss:0.590956\\n\",\n      \"\\n\",\n      \"idx:  4\\n\",\n      \" loss: 0.59108\\n\",\n      \"mean\\n\",\n      \"auc:       0.5914183145833928\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"aus = []\\n\",\n    \"sub1 = np.zeros((len(test), ))\\n\",\n    \"pred_oob1=np.zeros((len(train),))\\n\",\n    \"for i,(train_index,test_index) in enumerate(kf):\\n\",\n    \"  \\n\",\n    \"    tr_x = train[features].reindex(index=train_index, copy=False)\\n\",\n    \"    tr_y = Y[train_index]\\n\",\n    \"    te_x = train[features].reindex(index=test_index, copy=False)\\n\",\n    \"    te_y = Y[test_index]\\n\",\n    \"\\n\",\n    \"    # tr_y=tr_y.apply(lambda x:1 if x>0 else 0)\\n\",\n    \"    # te_y=te_y.apply(lambda x:1 if x>0 else 0)\\n\",\n    \"    d_tr = xgb.DMatrix(tr_x, label=tr_y)\\n\",\n    \"    d_te = xgb.DMatrix(te_x, label=te_y)\\n\",\n    \"    watchlist  = [(d_tr,'train'),\\n\",\n    \"    (d_te,'val')\\n\",\n    \"             ]\\n\",\n    \"    model = xgb.train(params, d_tr, num_boost_round=5500, \\n\",\n    \"                      evals=watchlist,verbose_eval=200,\\n\",\n    \"                              early_stopping_rounds=100)\\n\",\n    \"    pred = model.predict(d_te)\\n\",\n    \"    pred_oob1[test_index] =pred\\n\",\n    \"    # te_y=te_y.apply(lambda x:1 if x>0 else 0)\\n\",\n    \"    a = log_loss(te_y, pred)\\n\",\n    \"\\n\",\n    \"    sub1 += model.predict(xgb.DMatrix(test[features]))/5\\n\",\n    \"    \\n\",\n    \"\\n\",\n    \"    print (\\\"idx: \\\", i) \\n\",\n    \"    print (\\\" loss: %.5f\\\" % a)\\n\",\n    \"#     print \\\" gini: %.5f\\\" % g\\n\",\n    \"    aus.append(a)\\n\",\n    \"\\n\",\n    \"print (\\\"mean\\\")\\n\",\n    \"print (\\\"auc:       %s\\\" % (sum(aus) / 5.0))\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 41,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"pred_oob1 = pd.DataFrame(pred_oob1, columns=['sex2'])\\n\",\n    \"sub1 = pd.DataFrame(sub1, columns=['sex2'])\\n\",\n    \"res1=pd.concat([pred_oob1,sub1])\\n\",\n    \"res1['sex1'] = 1-res1['sex2']\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 48,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"data\": {\n      \"text/plain\": [\n       \"1012\"\n      ]\n     },\n     \"execution_count\": 48,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"import gc\\n\",\n    \"gc.collect()\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 49,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"####sex1\\n\",\n    \"test['sex']=1\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 50,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"features = [x for x in train.columns if x not in ['device_id',\\\"age\\\"]]\\n\",\n    \"Y = train['age'] \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 51,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import lightgbm as lgb\\n\",\n    \"import xgboost as xgb\\n\",\n    \"from sklearn.metrics import auc, log_loss, roc_auc_score,f1_score,recall_score,precision_score\\n\",\n    \"from sklearn.cross_validation import StratifiedKFold\\n\",\n    \"\\n\",\n    \"kf = StratifiedKFold(Y, n_folds=5, shuffle=True, random_state=1024)\\n\",\n    \"params={\\n\",\n    \"\\t'booster':'gbtree',\\n\",\n    \"\\t'objective': 'multi:softprob',\\n\",\n    \"#      'is_unbalance':'True',\\n\",\n    \"# \\t'scale_pos_weight': 1500.0/13458.0,\\n\",\n    \"        'eval_metric': \\\"mlogloss\\\",\\n\",\n    \"    'num_class':11,\\n\",\n    \"\\t'gamma':0.1,#0.2 is ok\\n\",\n    \"\\t'max_depth':6,\\n\",\n    \"# \\t'lambda':20,\\n\",\n    \"    # \\\"alpha\\\":5,\\n\",\n    \"        'subsample':0.7,\\n\",\n    \"        'colsample_bytree':0.4 ,\\n\",\n    \"        # 'min_child_weight':2.5, \\n\",\n    \"        'eta': 0.01,\\n\",\n    \"    # 'learning_rate':0.01,\\n\",\n    \"    \\\"silent\\\":1,\\n\",\n    \"\\t'seed':1024,\\n\",\n    \"\\t'nthread':12,\\n\",\n    \"   \\n\",\n    \"    }\\n\",\n    \"num_round = 3500\\n\",\n    \"early_stopping_rounds = 100\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 52,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"[0]\\ttrain-mlogloss:2.39131\\tval-mlogloss:2.39264\\n\",\n      \"Multiple eval metrics have been passed: 'val-mlogloss' will be used for early stopping.\\n\",\n      \"\\n\",\n      \"Will train until val-mlogloss hasn't improved in 100 rounds.\\n\",\n      \"[200]\\ttrain-mlogloss:1.80941\\tval-mlogloss:2.00508\\n\",\n      \"[400]\\ttrain-mlogloss:1.60383\\tval-mlogloss:1.94\\n\",\n      \"[600]\\ttrain-mlogloss:1.472\\tval-mlogloss:1.9241\\n\",\n      \"[800]\\ttrain-mlogloss:1.36689\\tval-mlogloss:1.92024\\n\",\n      \"[1000]\\ttrain-mlogloss:1.273\\tval-mlogloss:1.91999\\n\",\n      \"Stopping. Best iteration:\\n\",\n      \"[918]\\ttrain-mlogloss:1.31045\\tval-mlogloss:1.91983\\n\",\n      \"\\n\",\n      \"idx:  0\\n\",\n      \" loss: 1.91985\\n\",\n      \"[0]\\ttrain-mlogloss:2.39114\\tval-mlogloss:2.39277\\n\",\n      \"Multiple eval metrics have been passed: 'val-mlogloss' will be used for early stopping.\\n\",\n      \"\\n\",\n      \"Will train until val-mlogloss hasn't improved in 100 rounds.\\n\",\n      \"[200]\\ttrain-mlogloss:1.8078\\tval-mlogloss:2.0115\\n\",\n      \"[400]\\ttrain-mlogloss:1.60116\\tval-mlogloss:1.9457\\n\",\n      \"[600]\\ttrain-mlogloss:1.46953\\tval-mlogloss:1.93011\\n\",\n      \"[800]\\ttrain-mlogloss:1.36553\\tval-mlogloss:1.92647\\n\",\n      \"Stopping. Best iteration:\\n\",\n      \"[825]\\ttrain-mlogloss:1.35318\\tval-mlogloss:1.92626\\n\",\n      \"\\n\",\n      \"idx:  1\\n\",\n      \" loss: 1.92627\\n\",\n      \"[0]\\ttrain-mlogloss:2.39122\\tval-mlogloss:2.3928\\n\",\n      \"Multiple eval metrics have been passed: 'val-mlogloss' will be used for early stopping.\\n\",\n      \"\\n\",\n      \"Will train until val-mlogloss hasn't improved in 100 rounds.\\n\",\n      \"[200]\\ttrain-mlogloss:1.8065\\tval-mlogloss:2.01298\\n\",\n      \"[400]\\ttrain-mlogloss:1.60091\\tval-mlogloss:1.94872\\n\",\n      \"[600]\\ttrain-mlogloss:1.4685\\tval-mlogloss:1.93313\\n\",\n      \"[800]\\ttrain-mlogloss:1.36383\\tval-mlogloss:1.92927\\n\",\n      \"Stopping. Best iteration:\\n\",\n      \"[899]\\ttrain-mlogloss:1.3168\\tval-mlogloss:1.92877\\n\",\n      \"\\n\",\n      \"idx:  2\\n\",\n      \" loss: 1.92879\\n\",\n      \"[0]\\ttrain-mlogloss:2.39105\\tval-mlogloss:2.39257\\n\",\n      \"Multiple eval metrics have been passed: 'val-mlogloss' will be used for early stopping.\\n\",\n      \"\\n\",\n      \"Will train until val-mlogloss hasn't improved in 100 rounds.\\n\",\n      \"[200]\\ttrain-mlogloss:1.80767\\tval-mlogloss:2.01163\\n\",\n      \"[400]\\ttrain-mlogloss:1.6018\\tval-mlogloss:1.94808\\n\",\n      \"[600]\\ttrain-mlogloss:1.47112\\tval-mlogloss:1.93282\\n\",\n      \"[800]\\ttrain-mlogloss:1.36743\\tval-mlogloss:1.92918\\n\",\n      \"[1000]\\ttrain-mlogloss:1.27495\\tval-mlogloss:1.92918\\n\",\n      \"Stopping. Best iteration:\\n\",\n      \"[953]\\ttrain-mlogloss:1.29641\\tval-mlogloss:1.92904\\n\",\n      \"\\n\",\n      \"idx:  3\\n\",\n      \" loss: 1.92906\\n\",\n      \"[0]\\ttrain-mlogloss:2.39143\\tval-mlogloss:2.39284\\n\",\n      \"Multiple eval metrics have been passed: 'val-mlogloss' will be used for early stopping.\\n\",\n      \"\\n\",\n      \"Will train until val-mlogloss hasn't improved in 100 rounds.\\n\",\n      \"[200]\\ttrain-mlogloss:1.81054\\tval-mlogloss:2.00446\\n\",\n      \"[400]\\ttrain-mlogloss:1.6046\\tval-mlogloss:1.93723\\n\",\n      \"[600]\\ttrain-mlogloss:1.47282\\tval-mlogloss:1.92063\\n\",\n      \"[800]\\ttrain-mlogloss:1.36819\\tval-mlogloss:1.91661\\n\",\n      \"[1000]\\ttrain-mlogloss:1.27547\\tval-mlogloss:1.91579\\n\",\n      \"Stopping. Best iteration:\\n\",\n      \"[1014]\\ttrain-mlogloss:1.26898\\tval-mlogloss:1.91575\\n\",\n      \"\\n\",\n      \"idx:  4\\n\",\n      \" loss: 1.91579\\n\",\n      \"mean\\n\",\n      \"auc:       1.923953299949125\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"aus = []\\n\",\n    \"sub2 = np.zeros((len(test),11 ))\\n\",\n    \"pred_oob2=np.zeros((len(train),11))\\n\",\n    \"models=[]\\n\",\n    \"iters=[]\\n\",\n    \"for i,(train_index,test_index) in enumerate(kf):\\n\",\n    \"  \\n\",\n    \"    tr_x = train[features].reindex(index=train_index, copy=False)\\n\",\n    \"    tr_y = Y[train_index]\\n\",\n    \"    te_x = train[features].reindex(index=test_index, copy=False)\\n\",\n    \"    te_y = Y[test_index]\\n\",\n    \"\\n\",\n    \"    # tr_y=tr_y.apply(lambda x:1 if x>0 else 0)\\n\",\n    \"    # te_y=te_y.apply(lambda x:1 if x>0 else 0)\\n\",\n    \"    d_tr = xgb.DMatrix(tr_x, label=tr_y)\\n\",\n    \"    d_te = xgb.DMatrix(te_x, label=te_y)\\n\",\n    \"    watchlist  = [(d_tr,'train'),\\n\",\n    \"    (d_te,'val')\\n\",\n    \"             ]\\n\",\n    \"    model = xgb.train(params, d_tr, num_boost_round=5500, \\n\",\n    \"                      evals=watchlist,verbose_eval=200,\\n\",\n    \"                              early_stopping_rounds=100)\\n\",\n    \"    models.append(model)\\n\",\n    \"    iters.append(model.best_iteration)\\n\",\n    \"    pred = model.predict(d_te,ntree_limit=model.best_iteration)\\n\",\n    \"    pred_oob2[test_index] =pred\\n\",\n    \"    # te_y=te_y.apply(lambda x:1 if x>0 else 0)\\n\",\n    \"    a = log_loss(te_y, pred)\\n\",\n    \"\\n\",\n    \"    sub2 += model.predict(xgb.DMatrix(test[features]),ntree_limit=model.best_iteration)/5\\n\",\n    \"    \\n\",\n    \"\\n\",\n    \"    print (\\\"idx: \\\", i) \\n\",\n    \"    print (\\\" loss: %.5f\\\" % a)\\n\",\n    \"#     print \\\" gini: %.5f\\\" % g\\n\",\n    \"    aus.append(a)\\n\",\n    \"\\n\",\n    \"print (\\\"mean\\\")\\n\",\n    \"print (\\\"auc:       %s\\\" % (sum(aus) / 5.0))\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 53,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res2_1=np.vstack((pred_oob2,sub2))\\n\",\n    \"res2_1 = pd.DataFrame(res2_1)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 54,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"###sex2\\n\",\n    \"test['sex']=2\\n\",\n    \"features = [x for x in train.columns if x not in ['device_id',\\\"age\\\",\\\"label\\\",\\\"app\\\"]]\\n\",\n    \"Y = train['age'] \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 55,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"aus = []\\n\",\n    \"sub2 = np.zeros((len(test),11 ))\\n\",\n    \"for model,it in zip(models,iters):\\n\",\n    \"    sub2 += model.predict(xgb.DMatrix(test[features]),ntree_limit=it)/5\\n\",\n    \"res2_2=np.vstack((pred_oob2,sub2))\\n\",\n    \"res2_2 = pd.DataFrame(res2_2) \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 56,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"res1.index=range(len(res1))\\n\",\n    \"res2_1.index=range(len(res2_1))\\n\",\n    \"res2_2.index=range(len(res2_2))\\n\",\n    \"final_1=res2_1.copy()\\n\",\n    \"final_2=res2_2.copy()\\n\",\n    \"for i in range(11):\\n\",\n    \"    final_1[i]=res1['sex1']*res2_1[i]\\n\",\n    \"    final_2[i]=res1['sex2']*res2_2[i]\\n\",\n    \"id_list=pd.concat([train[['device_id']],test[['device_id']]])\\n\",\n    \"final=id_list\\n\",\n    \"final.index=range(len(final))\\n\",\n    \"final.columns= ['DeviceID']\\n\",\n    \"final_pred = pd.concat([final_1,final_2],1)\\n\",\n    \"final=pd.concat([final,final_pred],1)\\n\",\n    \"final.columns = ['DeviceID', '1-0', '1-1', '1-2', '1-3', '1-4', '1-5', '1-6', \\n\",\n    \"         '1-7','1-8', '1-9', '1-10', '2-0', '2-1', '2-2', '2-3', '2-4', \\n\",\n    \"         '2-5', '2-6', '2-7', '2-8', '2-9', '2-10']\\n\",\n    \"\\n\",\n    \"final.to_csv('xgb_feat_final_nb.csv', index=False)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 57,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"test['DeviceID']=test['device_id']\\n\",\n    \"sub=pd.merge(test[['DeviceID']],final,on=\\\"DeviceID\\\",how=\\\"left\\\")\\n\",\n    \"sub.to_csv(\\\"xgb_final_nb.csv\\\",index=False)\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 3\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.6.4\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 2\n}\n"
  },
  {
    "path": "chizhu/stacking/nurbs_feat/xgb_22.py",
    "content": "\n# coding: utf-8\n\n# In[2]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nfrom datetime import datetime,timedelta  \nimport matplotlib.pyplot as plt\nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\n# get_ipython().run_line_magic('matplotlib', 'inline')\n\n#add\nimport gc\nfrom sklearn import preprocessing\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nfrom scipy.sparse import hstack, vstack\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import cross_val_score\n# from skopt.space import Integer, Categorical, Real, Log10\n# from skopt.utils import use_named_args\n# from skopt import gp_minimize\nfrom gensim.models import Word2Vec, FastText\nimport gensim \nimport re\nimport os\npath=\"./feature/\"###nurbs概率文件路径\no_path=\"/dev/shm/chizhu_data/data/\"###原始文件路径\nos.listdir(path)\n\n\n# In[4]:\n\n\n\nall_feat=pd.read_csv(path+\"feature_22_all.csv\")\ntrain_id=pd.read_csv(o_path+\"deviceid_train.tsv\",sep=\"\\t\",names=['device_id','sex','age'])\ntest_id=pd.read_csv(o_path+\"deviceid_test.tsv\",sep=\"\\t\",names=['device_id'])\nall_id=pd.concat([train_id[['device_id']],test_id[['device_id']]])\nall_id.index=range(len(all_id))\nall_feat['device_id']=all_id\n# deepnn_feat=pd.read_csv(path+\"deepnn_fix.csv\")\n# deepnn_feat['device_id']=deepnn_feat['DeviceID']\n# del deepnn_feat['DeviceID']\n\n\n# In[9]:\n\n\ntrain=pd.merge(train_id,all_feat,on=\"device_id\",how=\"left\")\n# train=pd.merge(train,deepnn_feat,on=\"device_id\",how=\"left\")\ntest=pd.merge(test_id,all_feat,on=\"device_id\",how=\"left\")\n# test=pd.merge(test,deepnn_feat,on=\"device_id\",how=\"left\")\n\n\n# In[10]:\n\n\ntrain['sex-age']=train.apply(lambda x:str(x['sex'])+\"-\"+str(x['age']),1)\n\n\n# In[11]:\n\n\nfeatures = [x for x in train.columns if x not in ['device_id',\"sex\",'age','sex-age']]\nlabel=\"sex-age\"\n\n\n# In[12]:\n\n\nY_CAT=pd.Categorical(train[label])\n\n\n# In[13]:\n\n\nimport lightgbm as lgb\nimport xgboost as xgb\nfrom sklearn.metrics import auc, log_loss, roc_auc_score,f1_score,recall_score,precision_score\nfrom sklearn.cross_validation import StratifiedKFold\n\nkf = StratifiedKFold(Y_CAT, n_folds=5, shuffle=True, random_state=1024)\nparams={\n\t'booster':'gbtree',\n     \"tree_method\":\"gpu_hist\",\n    \"gpu_id\":\"1\",\n\t'objective': 'multi:softprob',\n#      'is_unbalance':'True',\n# \t'scale_pos_weight': 1500.0/13458.0,\n        'eval_metric': \"mlogloss\",\n    'num_class':22,\n\t'gamma':0.1,#0.2 is ok\n\t'max_depth':6,\n# \t'lambda':20,\n    # \"alpha\":5,\n        'subsample':0.7,\n        'colsample_bytree':0.4 ,\n        # 'min_child_weight':2.5, \n        'eta': 0.01,\n    # 'learning_rate':0.01,\n    \"silent\":1,\n\t'seed':1024,\n\t'nthread':12,\n   \n    }\nnum_round = 3500\nearly_stopping_rounds = 100\n\n\n# In[14]:\n\n\naus = []\nsub2 = np.zeros((len(test),22 ))\npred_oob2=np.zeros((len(train),22))\nmodels=[]\niters=[]\nfor i,(train_index,test_index) in enumerate(kf):\n  \n    tr_x = train[features].reindex(index=train_index, copy=False)\n    tr_y = Y_CAT.codes[train_index]\n    te_x = train[features].reindex(index=test_index, copy=False)\n    te_y = Y_CAT.codes[test_index]\n\n    # tr_y=tr_y.apply(lambda x:1 if x>0 else 0)\n    # te_y=te_y.apply(lambda x:1 if x>0 else 0)\n    d_tr = xgb.DMatrix(tr_x, label=tr_y)\n    d_te = xgb.DMatrix(te_x, label=te_y)\n    watchlist  = [(d_tr,'train'),\n    (d_te,'val')\n             ]\n    model = xgb.train(params, d_tr, num_boost_round=5500, \n                      evals=watchlist,verbose_eval=200,\n                              early_stopping_rounds=100)\n    models.append(model)\n    iters.append(model.best_iteration)\n    pred = model.predict(d_te,ntree_limit=model.best_iteration)\n    pred_oob2[test_index] =pred\n    # te_y=te_y.apply(lambda x:1 if x>0 else 0)\n    a = log_loss(te_y, pred)\n\n    sub2 += model.predict(xgb.DMatrix(test[features]),ntree_limit=model.best_iteration)/5\n    \n\n    print (\"idx: \", i) \n    print (\" loss: %.5f\" % a)\n#     print \" gini: %.5f\" % g\n    aus.append(a)\n\nprint (\"mean\")\nprint (\"loss:       %s\" % (sum(aus) / 5.0))\n\n\n# In[15]:\n\n\nres=np.vstack((pred_oob2,sub2))\nres = pd.DataFrame(res,columns=Y_CAT.categories)\nres['DeviceID']=all_id\nres=res[['DeviceID', '1-0', '1-1', '1-2', '1-3', '1-4', '1-5', '1-6', '1-7','1-8', '1-9', '1-10', '2-0', '2-1', '2-2', '2-3', '2-4', '2-5', '2-6', '2-7', '2-8', '2-9', '2-10']]\n\nres.to_csv(\"xgb_nurbs_22_feat.csv\",index=False)\n\n\n# In[16]:\n\n\ntest['DeviceID']=test['device_id']\nsub=pd.merge(test[['DeviceID']],res,on=\"DeviceID\",how=\"left\")\nsub.to_csv(\"xgb_nurbs_22.csv\",index=False)\n\n"
  },
  {
    "path": "chizhu/stacking/nurbs_feat/xgb__nurbs_nb.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nfrom datetime import datetime,timedelta  \nimport matplotlib.pyplot as plt\nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n#add\nimport gc\nfrom sklearn import preprocessing\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nfrom scipy.sparse import hstack, vstack\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import cross_val_score\n# from skopt.space import Integer, Categorical, Real, Log10\n# from skopt.utils import use_named_args\n# from skopt import gp_minimize\nfrom gensim.models import Word2Vec, FastText\nimport gensim \nimport re\nimport os\npath=\"./feature/\"##nurbs概率文件路径\no_path=\"/dev/shm/chizhu_data/data/\"###原始文件路径\nos.listdir(path)\n\n\n# In[2]:\n\n\nsex_feat=pd.read_csv(path+\"feature_sex_all.csv\")\nage_feat=pd.read_csv(path+\"feature_age_all.csv\")\n# all_feat=pd.read_csv(path+\"feature_22_all.csv\")\ntrain_id=pd.read_csv(o_path+\"deviceid_train.tsv\",sep=\"\\t\",names=['device_id','sex','age'])\ntest_id=pd.read_csv(o_path+\"deviceid_test.tsv\",sep=\"\\t\",names=['device_id'])\nall_id=pd.concat([train_id[['device_id']],test_id[['device_id']]])\nall_id.index=range(len(all_id))\nsex_feat['device_id']=all_id\nage_feat['device_id']=all_id\n# deepnn_feat=pd.read_csv(path+\"deepnn_fix.csv\")\n# deepnn_feat['device_id']=deepnn_feat['DeviceID']\n# del deepnn_feat['DeviceID']\n\n\n# In[3]:\n\n\ntrain=pd.merge(train_id,sex_feat,on=\"device_id\",how=\"left\")\n# train=pd.merge(train,deepnn_feat,on=\"device_id\",how=\"left\")\ntest=pd.merge(test_id,sex_feat,on=\"device_id\",how=\"left\")\n# test=pd.merge(test,deepnn_feat,on=\"device_id\",how=\"left\")\n\n\n# In[4]:\n\n\nfeatures = [x for x in train.columns if x not in ['device_id', 'sex',\"age\",]]\nY = train['sex'] - 1\n\n\n# In[5]:\n\n\n\nimport xgboost as xgb\nfrom sklearn.metrics import auc, log_loss, roc_auc_score,f1_score,recall_score,precision_score\nfrom sklearn.cross_validation import StratifiedKFold\n\nkf = StratifiedKFold(Y, n_folds=10, shuffle=True, random_state=1024)\nparams={\n\t'booster':'gbtree',\n    \"tree_method\":\"gpu_hist\",\n    \"gpu_id\":\"2\",\n\t'objective': 'binary:logistic',\n#      'is_unbalance':'True',\n# \t'scale_pos_weight': 1500.0/13458.0,\n        'eval_metric': \"logloss\",\n    \n\t'gamma':0.2,#0.2 is ok\n\t'max_depth':6,\n# \t'lambda':20,\n    # \"alpha\":5,\n        'subsample':0.7,\n        'colsample_bytree':0.4 ,\n#         'min_child_weight':2.5, \n        'eta': 0.01,\n    # 'learning_rate':0.01,\n    \"silent\":1,\n\t'seed':1024,\n\t'nthread':12,\n   \n    }\nnum_round = 3500\nearly_stopping_rounds = 100\n\n\n# In[6]:\n\n\naus = []\nsub1 = np.zeros((len(test), ))\npred_oob1=np.zeros((len(train),))\nfor i,(train_index,test_index) in enumerate(kf):\n  \n    tr_x = train[features].reindex(index=train_index, copy=False)\n    tr_y = Y[train_index]\n    te_x = train[features].reindex(index=test_index, copy=False)\n    te_y = Y[test_index]\n\n    # tr_y=tr_y.apply(lambda x:1 if x>0 else 0)\n    # te_y=te_y.apply(lambda x:1 if x>0 else 0)\n    d_tr = xgb.DMatrix(tr_x, label=tr_y)\n    d_te = xgb.DMatrix(te_x, label=te_y)\n    watchlist  = [(d_tr,'train'),\n    (d_te,'val')\n             ]\n    model = xgb.train(params, d_tr, num_boost_round=5500, \n                      evals=watchlist,verbose_eval=200,\n                              early_stopping_rounds=100)\n    pred = model.predict(d_te,ntree_limit=model.best_iteration)\n    pred_oob1[test_index] =pred\n    # te_y=te_y.apply(lambda x:1 if x>0 else 0)\n    a = log_loss(te_y, pred)\n\n    sub1 += model.predict(xgb.DMatrix(test[features]),ntree_limit=model.best_iteration)/10\n    \n\n    print (\"idx: \", i) \n    print (\" loss: %.5f\" % a)\n#     print \" gini: %.5f\" % g\n    aus.append(a)\n\nprint (\"mean\")\nprint (\"auc:       %s\" % (sum(aus) / 10.0))\n\n\n# In[7]:\n\n\npred_oob1 = pd.DataFrame(pred_oob1, columns=['sex2'])\nsub1 = pd.DataFrame(sub1, columns=['sex2'])\nres1=pd.concat([pred_oob1,sub1])\nres1['sex1'] = 1-res1['sex2']\n\n\n# In[8]:\n\n\nimport gc\ngc.collect()\n\n\n# In[9]:\n\n\ntrain=pd.merge(train_id,age_feat,on=\"device_id\",how=\"left\")\n# train=pd.merge(train,deepnn_feat,on=\"device_id\",how=\"left\")\ntest=pd.merge(test_id,age_feat,on=\"device_id\",how=\"left\")\n# test=pd.merge(test,deepnn_feat,on=\"device_id\",how=\"left\")\n\n\n# In[10]:\n\n\n####sex1\ntest['sex']=1\n\n\n# In[11]:\n\n\nfeatures = [x for x in train.columns if x not in ['device_id',\"age\"]]\nY = train['age'] \n\n\n# In[12]:\n\n\nimport lightgbm as lgb\nimport xgboost as xgb\nfrom sklearn.metrics import auc, log_loss, roc_auc_score,f1_score,recall_score,precision_score\nfrom sklearn.cross_validation import StratifiedKFold\n\nkf = StratifiedKFold(Y, n_folds=10, shuffle=True, random_state=1024)\nparams={\n\t'booster':'gbtree',\n     \"tree_method\":\"gpu_hist\",\n    \"gpu_id\":\"2\",\n\t'objective': 'multi:softprob',\n#      'is_unbalance':'True',\n# \t'scale_pos_weight': 1500.0/13458.0,\n        'eval_metric': \"mlogloss\",\n    'num_class':11,\n\t'gamma':0.1,#0.2 is ok\n\t'max_depth':6,\n# \t'lambda':20,\n    # \"alpha\":5,\n        'subsample':0.7,\n        'colsample_bytree':0.4 ,\n        # 'min_child_weight':2.5, \n        'eta': 0.01,\n    # 'learning_rate':0.01,\n    \"silent\":1,\n\t'seed':1024,\n\t'nthread':12,\n   \n    }\nnum_round = 3500\nearly_stopping_rounds = 100\n\n\n# In[13]:\n\n\naus = []\nsub2 = np.zeros((len(test),11 ))\npred_oob2=np.zeros((len(train),11))\nmodels=[]\niters=[]\nfor i,(train_index,test_index) in enumerate(kf):\n  \n    tr_x = train[features].reindex(index=train_index, copy=False)\n    tr_y = Y[train_index]\n    te_x = train[features].reindex(index=test_index, copy=False)\n    te_y = Y[test_index]\n\n    # tr_y=tr_y.apply(lambda x:1 if x>0 else 0)\n    # te_y=te_y.apply(lambda x:1 if x>0 else 0)\n    d_tr = xgb.DMatrix(tr_x, label=tr_y)\n    d_te = xgb.DMatrix(te_x, label=te_y)\n    watchlist  = [(d_tr,'train'),\n    (d_te,'val')\n             ]\n    model = xgb.train(params, d_tr, num_boost_round=5500, \n                      evals=watchlist,verbose_eval=200,\n                              early_stopping_rounds=100)\n    models.append(model)\n    iters.append(model.best_iteration)\n    pred = model.predict(d_te,ntree_limit=model.best_iteration)\n    pred_oob2[test_index] =pred\n    # te_y=te_y.apply(lambda x:1 if x>0 else 0)\n    a = log_loss(te_y, pred)\n\n    sub2 += model.predict(xgb.DMatrix(test[features]),ntree_limit=model.best_iteration)/10\n    \n\n    print (\"idx: \", i) \n    print (\" loss: %.5f\" % a)\n#     print \" gini: %.5f\" % g\n    aus.append(a)\n\nprint (\"mean\")\nprint (\"auc:       %s\" % (sum(aus) / 10.0))\n\n\n# In[14]:\n\n\nres2_1=np.vstack((pred_oob2,sub2))\nres2_1 = pd.DataFrame(res2_1)\n\n\n# In[15]:\n\n\n###sex2\ntest['sex']=2\nfeatures = [x for x in train.columns if x not in ['device_id',\"age\"]]\nY = train['age'] \n\n\n# In[16]:\n\n\naus = []\nsub2 = np.zeros((len(test),11 ))\nfor model,it in zip(models,iters):\n    sub2 += model.predict(xgb.DMatrix(test[features]),ntree_limit=it)/10\nres2_2=np.vstack((pred_oob2,sub2))\nres2_2 = pd.DataFrame(res2_2) \n\n\n# In[17]:\n\n\nres1.index=range(len(res1))\nres2_1.index=range(len(res2_1))\nres2_2.index=range(len(res2_2))\nfinal_1=res2_1.copy()\nfinal_2=res2_2.copy()\nfor i in range(11):\n    final_1[i]=res1['sex1']*res2_1[i]\n    final_2[i]=res1['sex2']*res2_2[i]\nid_list=pd.concat([train[['device_id']],test[['device_id']]])\nfinal=id_list\nfinal.index=range(len(final))\nfinal.columns= ['DeviceID']\nfinal_pred = pd.concat([final_1,final_2],1)\nfinal=pd.concat([final,final_pred],1)\nfinal.columns = ['DeviceID', '1-0', '1-1', '1-2', '1-3', '1-4', '1-5', '1-6', \n         '1-7','1-8', '1-9', '1-10', '2-0', '2-1', '2-2', '2-3', '2-4', \n         '2-5', '2-6', '2-7', '2-8', '2-9', '2-10']\n\nfinal.to_csv('xgb_feat_nurbs_nb_10fold.csv', index=False)\n\n\n# In[18]:\n\n\ntest['DeviceID']=test['device_id']\nsub=pd.merge(test[['DeviceID']],final,on=\"DeviceID\",how=\"left\")\nsub.to_csv(\"xgb_nurbs_nb_10fold.csv\",index=False)\n\n"
  },
  {
    "path": "chizhu/util/bagging.py",
    "content": "import os\nimport pandas as pd\npath = \"/Users/chizhu/data/competition_data/易观/\"\nos.listdir(path)\n\ntrain = pd.read_csv(path+\"deviceid_train.tsv\", sep=\"\\t\",\n                    names=[\"id\", \"sex\", \"age\"])\ntest = pd.read_csv(path+\"deviceid_test.tsv\", sep=\"\\t\", names=['DeviceID'])\npred = pd.read_csv(path+\"nn_feat_v6.csv\")\n\nlgb1 = pd.read_csv(path+\"th_results_ems_22_nb_5400.csv\")  # 576\nlgb1 = pd.merge(test, lgb1, on=\"DeviceID\", how=\"left\")\nsubmit = lgb1.copy()\n\nnn1 = pd.read_csv(path+\"xgb_and_nurbs.csv\")  # 573\nnn1 = pd.merge(test, nn1, on=\"DeviceID\", how=\"left\")\n\n# nn2=pd.read_csv(path+\"th_results_ems_2547.csv\")##574\n# nn2=pd.merge(test,nn2,on=\"DeviceID\",how=\"left\")\n\n# lgb2=pd.read_csv(path+\"th_results_ems_2.549.csv\")##570\n# lgb2=pd.merge(test,lgb2,on=\"DeviceID\",how=\"left\")\n\n# lgb3=pd.read_csv(path+\"th_results_ems_2547.csv\")##547\n# lgb3=pd.merge(test,lgb3,on=\"DeviceID\",how=\"left\")\n\n\nfor i in['1-0', '1-1', '1-2', '1-3', '1-4', '1-5', '1-6',\n         '1-7', '1-8', '1-9', '1-10', '2-0', '2-1', '2-2', '2-3', '2-4',\n         '2-5', '2-6', '2-7', '2-8', '2-9', '2-10']:\n    #     submit[i]=(lgb1[i]+lgb2[i]+nn1[i]+nn2[i])/4.0\n    submit[i] = 0.75*lgb1[i]+0.25*nn1[i]\n#     submit[i]=0.1*lgb1[i]+0.1*nn1[i]+0.2*nn2[i]+0.2*lgb2[i]+0.4*lgb3[i]\n\nsubmit.to_csv(path+\"th_nurbs_7525.csv\", index=False)\n"
  },
  {
    "path": "chizhu/util/get_nn_res.py",
    "content": "import pandas as pd \npath = \"/Users/chizhu/data/competition_data/易观/\"\nres1 = pd.read_csv(path+\"res1.csv\")\nres2_1 = pd.read_csv(path+\"res2_1.csv\")\nres2_2 = pd.read_csv(path+\"res2_2.csv\")\nres1.index = range(len(res1))\nres2_1.index = range(len(res2_1))\nres2_2.index = range(len(res2_2))\nfinal_1 = res2_1.copy()\nfinal_2 = res2_2.copy()\nfor i in range(11):\n    final_1[str(i)] = res1['sex1']*res2_1[str(i)]\n    final_2[str(i)] = res1['sex2']*res2_2[str(i)]\nid_list = pred['DeviceID']\nfinal = id_list\nfinal.index = range(len(final))\nfinal.columns = ['DeviceID']\nfinal_pred = pd.concat([final_1, final_2], 1)\nfinal = pd.concat([final, final_pred], 1)\nfinal.columns = ['DeviceID', '1-0', '1-1', '1-2', '1-3', '1-4', '1-5', '1-6',\n                 '1-7', '1-8', '1-9', '1-10', '2-0', '2-1', '2-2', '2-3', '2-4',\n                 '2-5', '2-6', '2-7', '2-8', '2-9', '2-10']\n\nfinal.to_csv(path+'nn_feat_v12.csv', index=False)\n\ntrain = pd.read_csv(path+\"deviceid_train.tsv\", sep=\"\\t\",\n                    names=[\"id\", \"sex\", \"age\"])\ntest = pd.read_csv(path+\"deviceid_test.tsv\", sep=\"\\t\", names=['DeviceID'])\n\npred = pd.read_csv(path+\"nn_feat_v6.csv\")\nsub = pd.merge(test, pred, on=\"DeviceID\", how=\"left\")\n\nsub.to_csv(path+\"nn_v6.csv\", index=False)\n\n\n"
  },
  {
    "path": "linwangli/code/lgb_allfeat_22.py",
    "content": "#!/usr/bin/env python\n# coding: utf-8\n\nfrom catboost import Pool, CatBoostClassifier, cv\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nfrom datetime import datetime,timedelta  \nimport matplotlib.pyplot as plt\nimport time\n\nimport gc\nfrom sklearn import preprocessing\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nfrom scipy.sparse import hstack, vstack\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import cross_val_score\nfrom skopt.space import Integer, Categorical, Real, Log10\nfrom skopt.utils import use_named_args\nfrom skopt import gp_minimize\nimport re\n\n\ntrain = pd.read_csv('../dataset/deviceid_train.tsv', sep='\\t', names=['device_id', 'sex', 'age'])\nall_feat = pd.read_csv('../dataset/all_feat.csv')\n\ntrain['label'] = train['sex'].astype(str) + '-' + train['age'].astype(str)\nlabel_le = preprocessing.LabelEncoder()\ntrain['label'] = label_le.fit_transform(train['label'])\ndata_all = pd.merge(left=all_feat, right=train, on='device_id', how='left')\n\n\ntrain = data_all[:50000]\ntest = data_all[50000:]\ntrain = train.fillna(-1)\ntest = test.fillna(-1)\ndel data_all\ngc.collect()\n\nuse_feats = all_feat.columns[1:]\nuse_feats\n\nX_train = train[use_feats]\nX_test = test[use_feats]\nY = train['label']\nkfold = StratifiedKFold(n_splits=5, random_state=10, shuffle=True)\nsub = np.zeros((X_test.shape[0], 22))\nfor i, (train_index, test_index) in enumerate(kfold.split(X_train, Y)):\n    X_tr, X_vl, y_tr, y_vl = X_train.iloc[train_index], X_train.iloc[test_index],                                 Y.iloc[train_index], Y.iloc[test_index]\n    dtrain = lgb.Dataset(X_tr, label=y_tr, categorical_feature=[-1])\n    dvalid = lgb.Dataset(X_vl, y_vl, reference=dtrain)\n    params = {\n        'boosting_type': 'gbdt',\n        'max_depth':6,\n        'metric': {'multi_logloss'},\n        'num_class':22,\n        'objective':'multiclass',\n        'num_leaves':7,\n        'subsample': 0.9,\n        'colsample_bytree': 0.2,\n        'lambda_l1':0.0001,\n        'lambda_l2':0.00111,\n        'subsample_freq':12,\n        'learning_rate': 0.012,\n        'min_child_weight':12\n\n    }\n\n    model = lgb.train(params,\n                        dtrain,\n                        num_boost_round=6000,\n                        valid_sets=dvalid,\n                        early_stopping_rounds=100,\n                        verbose_eval=100)\n\n\n    sub += model.predict(X_test, num_iteration=model.best_iteration)/kfold.n_splits\n\n\nsub = pd.DataFrame(sub)\ncols = [x for x in range(0, 22)]\ncols = label_le.inverse_transform(cols)\nsub.columns = cols\nsub['DeviceID'] = test['device_id'].values\nsub = sub[['DeviceID', '1-0', '1-1', '1-2', '1-3', '1-4', '1-5', '1-6', \n         '1-7','1-8', '1-9', '1-10', '2-0', '2-1', '2-2', '2-3', '2-4', \n         '2-5', '2-6', '2-7', '2-8', '2-9', '2-10']]\nsub.to_csv('lgb_22.csv', index=False)\n\n\n\n\n\n"
  },
  {
    "path": "linwangli/code/lgb_allfeat_condProb.py",
    "content": "#!/usr/bin/env python\n# coding: utf-8\nfrom catboost import Pool, CatBoostClassifier, cv\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nfrom datetime import datetime,timedelta  \nimport matplotlib.pyplot as plt\nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport gc\nfrom sklearn import preprocessing\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nfrom scipy.sparse import hstack, vstack\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import cross_val_score\nfrom skopt.space import Integer, Categorical, Real, Log10\nfrom skopt.utils import use_named_args\nfrom skopt import gp_minimize\nimport re\n\n\n# 读入数据\ntrain = pd.read_csv('../dataset/deviceid_train.tsv', sep='\\t', names=['device_id', 'sex', 'age'])\nall_feat = pd.read_csv('../dataset/all_feat.csv')\n\n\ndata_all = pd.merge(left=all_feat, right=train, on='device_id', how='left')\ntrain = data_all[:50000]\ntest = data_all[50000:]\ntrain = train.fillna(-1)\ntest = test.fillna(-1)\ndel data_all\ngc.collect()\nuse_feats = all_feat.columns[1:]\nuse_feats\n\n\n# P(age)\n\nY = train['sex'] - 1\nX_train = train[use_feats]\nX_test = test[use_feats]\nkfold = StratifiedKFold(n_splits=5, random_state=10, shuffle=True)\noof_preds1 = np.zeros((X_train.shape[0], ))\nsub1 = np.zeros((X_test.shape[0], ))\nfor i, (train_index, test_index) in enumerate(kfold.split(X_train, Y)):\n    X_tr, X_vl, y_tr, y_vl = X_train.iloc[train_index], X_train.iloc[test_index],                                 Y.iloc[train_index], Y.iloc[test_index]\n    dtrain = lgb.Dataset(X_tr, label=y_tr)\n    dvalid = lgb.Dataset(X_vl, y_vl, reference=dtrain)\n    params = {\n        'boosting_type': 'gbdt',\n        'max_depth':6,\n        'objective':'binary',\n        'num_leaves':31,\n        'subsample': 0.85,\n        'colsample_bytree': 0.2,\n        'lambda_l1':0.00007995302080034896,\n        'lambda_l2':0.0003648648811380991,\n        'subsample_freq':12,\n        'learning_rate': 0.012,\n        'min_child_weight':5.5\n    }\n\n    model = lgb.train(params,\n                        dtrain,\n                        num_boost_round=4000,\n                        valid_sets=dvalid,\n                        early_stopping_rounds=100,\n                        verbose_eval=100)\n\n    oof_preds1[test_index] = model.predict(X_vl, num_iteration=model.best_iteration)\n    sub1 += model.predict(X_test, num_iteration=model.best_iteration)/kfold.n_splits\n\n\n# P(age|sex = 2)\n\ntrain['sex_pred'] = train['sex']\ntest['sex_pred'] = 1\n\nuse_feats = list(train.columns[1:-3])\nuse_feats = use_feats + ['sex_pred']\n\nX_train = train[use_feats]\nX_test = test[use_feats]\n\nY = train['age']\nkfold = StratifiedKFold(n_splits=10, random_state=10, shuffle=True)\noof_preds2_1 = np.zeros((X_train.shape[0], 11))\nsub2_1 = np.zeros((X_test.shape[0], 11))\nfor i, (train_index, test_index) in enumerate(kfold.split(X_train, Y)):\n    X_tr, X_vl, y_tr, y_vl = X_train.iloc[train_index], X_train.iloc[test_index],                                 Y.iloc[train_index], Y.iloc[test_index]\n\n    \n    dtrain = lgb.Dataset(X_tr, label=y_tr)\n    dvalid = lgb.Dataset(X_vl, y_vl, reference=dtrain)\n    params = {\n        'boosting_type': 'gbdt',\n        'max_depth':6,\n        'metric': {'multi_logloss'},\n        'num_class':11,\n        'objective':'multiclass',\n        'num_leaves':31,\n        'subsample': 0.9,\n        'colsample_bytree': 0.2,\n        'lambda_l1':0.0001,\n        'lambda_l2':0.00111,\n        'subsample_freq':10,\n        'learning_rate': 0.012,\n        'min_child_weight':10\n    }\n\n    model = lgb.train(params,\n                        dtrain,\n                        num_boost_round=4000,\n                        valid_sets=dvalid,\n                        early_stopping_rounds=100,\n                        verbose_eval=100)\n\n    oof_preds2_1[test_index] = model.predict(X_vl, num_iteration=model.best_iteration)\n    sub2_1 += model.predict(X_test, num_iteration=model.best_iteration)/kfold.n_splits\n\n\n# P(age|sex = 2)\n\ntrain['sex_pred'] = train['sex']\ntest['sex_pred'] = 2\n\nuse_feats = list(train.columns[1:-3])\nuse_feats = use_feats + ['sex_pred']\n\nX_train = train[use_feats]\nX_test = test[use_feats]\n\n\nY = train['age']\nkfold = StratifiedKFold(n_splits=10, random_state=10, shuffle=True)\noof_preds2_2 = np.zeros((X_train.shape[0], 11))\nsub2_2 = np.zeros((X_test.shape[0], 11))\nfor i, (train_index, test_index) in enumerate(kfold.split(X_train, Y)):\n    X_tr, X_vl, y_tr, y_vl = X_train.iloc[train_index], X_train.iloc[test_index],                                 Y.iloc[train_index], Y.iloc[test_index]\n\n    \n    dtrain = lgb.Dataset(X_tr, label=y_tr)\n    dvalid = lgb.Dataset(X_vl, y_vl, reference=dtrain)\n    params = {\n        'boosting_type': 'gbdt',\n        'max_depth':6,\n        'metric': {'multi_logloss'},\n        'num_class':11,\n        'objective':'multiclass',\n        'num_leaves':31,\n        'subsample': 0.9,\n        'colsample_bytree': 0.2,\n        'lambda_l1':0.0001,\n        'lambda_l2':0.00111,\n        'subsample_freq':10,\n        'learning_rate': 0.012,\n        'min_child_weight':10\n    }\n\n    model = lgb.train(params,\n                        dtrain,\n                        num_boost_round=4000,\n                        valid_sets=dvalid,\n                        early_stopping_rounds=100,\n                        verbose_eval=100)\n\n    oof_preds2_2[test_index] = model.predict(X_vl, num_iteration=model.best_iteration)\n    sub2_2 += model.predict(X_test, num_iteration=model.best_iteration)/kfold.n_splits\n\n\n# 保存测试集的预测结果\nsub1 = pd.DataFrame(sub1, columns=['sex2'])\n\nsub1['sex1'] = 1-sub1['sex2']\nsub2 = pd.DataFrame(sub2_1, columns=['age%s'%i for i in range(11)])\nsub = pd.DataFrame(test['device_id'].values, columns=['DeviceID'])\n\nfor i in ['sex1', 'sex2']:\n    for j in ['age%s'%i for i in range(11)]:\n        sub[i+'_'+j] = sub1[i] * sub2[j]\nsub.columns = ['DeviceID', '1-0', '1-1', '1-2', '1-3', '1-4', '1-5', '1-6', \n         '1-7','1-8', '1-9', '1-10', '2-0', '2-1', '2-2', '2-3', '2-4', \n         '2-5', '2-6', '2-7', '2-8', '2-9', '2-10']\n\nsub.to_csv('test_pred.csv', index=False)\n\n\n# 保存训练集五折的预测结果\noof_preds1 = pd.DataFrame(oof_preds1, columns=['sex2'])\noof_preds1['sex1'] = 1-oof_preds1['sex2']\n\noof_preds2_1 = pd.DataFrame(oof_preds2_1, columns=['age%s'%i for i in range(11)])\noof_preds2_2 = pd.DataFrame(oof_preds2_2, columns=['age%s'%i for i in range(11)])\n\noof_preds = train[['device_id']]\noof_preds.columns = ['DeviceID']\n\nfor i in ['age%s'%i for i in range(11)]:\n    oof_preds['sex1_'+i] = oof_preds1['sex1'] * oof_preds2_1[i]\nfor i in ['age%s'%i for i in range(11)]:\n    oof_preds['sex2_'+i] = oof_preds1['sex2'] * oof_preds2_2[i]   \n\noof_preds.columns = ['DeviceID', '1-0', '1-1', '1-2', '1-3', '1-4', '1-5', '1-6', \n         '1-7','1-8', '1-9', '1-10', '2-0', '2-1', '2-2', '2-3', '2-4', \n         '2-5', '2-6', '2-7', '2-8', '2-9', '2-10']\n\noof_preds.to_csv('train_pred.csv', index=False)\n\n\n\n\n\n"
  },
  {
    "path": "linwangli/code/utils.py",
    "content": "import pandas as pd\r\nimport numpy as np\r\n\r\ndef weights_ensemble(results, weights):\r\n\t'''\r\n\t针对此次比赛的按权重进行模型融合的函数脚本\r\n\tresults: list，存放所有需要融合的结果路径\r\n\tweights: list, 存放各个结果的权重\r\n\treturn: 可以直接to_csv提交的结果\r\n\t'''\r\n    for i in range(len(results)):\r\n        if i == 0:\r\n            sub = pd.read_csv(results[0])\r\n            final_cols = list(sub.columns)\r\n            cols = list(sub.columns)\r\n            cols[1:]  = [col + '_0' for col in cols[1:]]\r\n            sub.columns = cols\r\n        else:\r\n            result = pd.read_csv(results[i])\r\n            cols = list(result.columns)\r\n            cols[1:]  = [col + '_' + str(i) for col in cols[1:]]\r\n            result.columns = cols\r\n            sub = pd.merge(left=sub, right=result, on='DeviceID')\r\n    for i in range(len(weights)):\r\n        for col in final_cols[1:]:\r\n            if col not in sub.columns:\r\n                sub[col] = weights[i] * sub[col + '_' + str(i)]\r\n            else:\r\n                sub[col] = sub[col] +  weights[i] * sub[col + '_' + str(i)]\r\n    sub = sub[final_cols]\r\n    return sub\r\n\r\ndef result_corr(path1, path2):\r\n\t'''\r\n\t根据此次比赛写的评测不同提交结果相关性文件\r\n\tpath1: 结果1的路径\r\n\tpath2: 结果2的路径\r\n\treturn： 返回不同提交结果的相关性\r\n\t'''\r\n\tresult_1 = pd.read_csv(path1)\r\n\tresult_2 = pd.read_csv(path2)\r\n\tresult = pd.merge(left=result_1, right=result_2, on='DeviceID', suffixes=('_x', '_y'))\r\n\tcols = result_1.columns[1:]\r\n\tcol_list = []\r\n\tfor col in cols:\r\n\t    col_pair = [col + '_x', col + '_y']\r\n\t    col_list.append(result[col_pair].corr().loc[col + '_x', col + '_y'])\r\n\r\n\treturn np.mean(col_list)"
  },
  {
    "path": "linwangli/readme.txt",
    "content": "|—— code\r\n    |—— lgb_allfeat_22.py：基于【全部特征】训练得到lgb结果\r\n    |—— lgb_allfeat_condProb.py：基于【全部特征+条件概率】训练得到lgb结果\r\n    |—— utils.py：一些脚本函数，如加权融合/相关性评测等\r\n|—— dataset\r\n    |—— deviceid_train.tsv： 赛方提供的文件\r\n    |—— all_feat.csv: 团队提取的所有特征\r\n|—— result：存放各种提交文件\r\n"
  },
  {
    "path": "linwangli/yg-1st-lgb.py",
    "content": "\n# coding: utf-8\n\n# In[ ]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nfrom datetime import datetime,timedelta  \nimport matplotlib.pyplot as plt\nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\n# %matplotlib inline\n\n#add\nimport gc\nfrom sklearn import preprocessing\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nfrom scipy.sparse import hstack, vstack\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import cross_val_score\nfrom skopt.space import Integer, Categorical, Real, Log10\nfrom skopt.utils import use_named_args\nfrom skopt import gp_minimize\nfrom gensim.models import Word2Vec, FastText\nimport gensim \nimport re\n\n\n# In[ ]:\n\n\ntest = pd.read_csv('../input/yiguan/demo/Demo/deviceid_test.tsv', sep='\\t', names=['device_id'])\ntrain = pd.read_csv('../input/yiguan/demo/Demo/deviceid_train.tsv', sep='\\t', names=['device_id', 'sex', 'age'])\nbrand = pd.read_table('../input/yiguan/demo/Demo/deviceid_brand.tsv', names=['device_id', 'vendor', 'version'])\npacktime = pd.read_table('../input/yiguan/demo/Demo/deviceid_package_start_close.tsv', \n                         names=['device_id', 'app', 'start', 'close'])\npackages = pd.read_csv('../input/yiguan/demo/Demo/deviceid_packages.tsv', sep='\\t', names=['device_id', 'apps'])\n\n\n# In[ ]:\n\n\npacktime['period'] = (packtime['close'] - packtime['start'])/1000\npacktime['start'] = pd.to_datetime(packtime['start'], unit='ms')\napp_use_time = packtime.groupby(['app'])['period'].agg('sum').reset_index()\n# 试试看200\napp_use_top100 = app_use_time.sort_values(by='period', ascending=False)[:100]['app']\ndevice_app_use_time = packtime.groupby(['device_id', 'app'])['period'].agg('sum').reset_index()\nuse_time_top100_statis = device_app_use_time.set_index('app').loc[list(app_use_top100)].reset_index()\ntop100_statis = use_time_top100_statis.pivot(index='device_id', columns='app', values='period').reset_index()\n\n\n# In[ ]:\n\n\ntop100_statis = top100_statis.fillna(0)\n\n\n# In[ ]:\n\n\n# 手机品牌预处理\nbrand['vendor'] = brand['vendor'].astype(str).apply(lambda x : x.split(' ')[0].upper())\nbrand['ph_ver'] = brand['vendor'] + '_' + brand['version']\n\nph_ver = brand['ph_ver'].value_counts()\nph_ver_cnt = pd.DataFrame(ph_ver).reset_index()\nph_ver_cnt.columns = ['ph_ver', 'ph_ver_cnt']\n\nbrand = pd.merge(left=brand, right=ph_ver_cnt,on='ph_ver')\n\n\n# In[ ]:\n\n\n# 针对长尾分布做的一点处理\nmask = (brand.ph_ver_cnt < 100)\nbrand.loc[mask, 'ph_ver'] = 'other' \n\ntrain = pd.merge(brand[['device_id', 'ph_ver']], train, on='device_id', how='right')\ntest = pd.merge(brand[['device_id', 'ph_ver']], test, on='device_id', how='right')\ntrain['ph_ver'] = train['ph_ver'].astype(str)\ntest['ph_ver'] = test['ph_ver'].astype(str)\n\n# 将 ph_ver 进行 label encoder\nph_ver_le = preprocessing.LabelEncoder()\ntrain['ph_ver'] = ph_ver_le.fit_transform(train['ph_ver'])\ntest['ph_ver'] = ph_ver_le.transform(test['ph_ver'])\ntrain['label'] = train['sex'].astype(str) + '-' + train['age'].astype(str)\nlabel_le = preprocessing.LabelEncoder()\ntrain['label'] = label_le.fit_transform(train['label'])\n\n\n# In[ ]:\n\n\ntest['sex'] = -1\ntest['age'] = -1\ntest['label'] = -1\ndata = pd.concat([train, test], ignore_index=True)\ndata.shape\n\n\n# In[ ]:\n\n\nph_ver_dummy = pd.get_dummies(data['ph_ver'])\nph_ver_dummy.columns = ['ph_ver_' + str(i) for i in range(ph_ver_dummy.shape[1])]\n\n\n# In[ ]:\n\n\ndata = pd.concat([data, ph_ver_dummy], axis=1)\n\n\n# In[ ]:\n\n\ndel data['ph_ver']\n\n\n# In[ ]:\n\n\ntrain = data[data.sex != -1]\ntest = data[data.sex == -1]\ntrain.shape, test.shape\n\n\n# In[ ]:\n\n\n# 每个app的总使用次数统计\napp_num = packtime['app'].value_counts().reset_index()\napp_num.columns = ['app', 'app_num']\npacktime = pd.merge(left=packtime, right=app_num, on='app')\n# 同样的，针对长尾分布做些处理（尝试过不做处理，或换其他阈值，这个100的阈值最高）\npacktime.loc[packtime.app_num < 100, 'app'] = 'other'\n\n\n# In[ ]:\n\n\n# 统计每台设备的app数量\ndf_app = packtime[['device_id', 'app']]\napps = df_app.drop_duplicates().groupby(['device_id'])['app'].apply(' '.join).reset_index()\napps['app_length'] = apps['app'].apply(lambda x:len(x.split(' ')))\n\ntrain = pd.merge(train, apps, on='device_id', how='left')\ntest = pd.merge(test, apps, on='device_id', how='left')\n\n\n# In[ ]:\n\n\n# 获取每台设备所安装的apps的tfidf\ntfidf = CountVectorizer(lowercase=False, min_df=3, stop_words=top100_statis.columns.tolist()[1:7])\napps['app'] = tfidf.fit_transform(apps['app'])\n\nX_tr_app = tfidf.transform(list(train['app']))\nX_ts_app = tfidf.transform(list(test['app']))\n\n\n# In[ ]:\n\n\n'''\nsvd = TruncatedSVD(n_components=100, random_state=42)\nX = vstack([X_tr_app, X_ts_app])\nsvd.fit(X)\nX_tr_app = svd.fit_transform(X_tr_app)\nX_ts_app = svd.fit_transform(X_ts_app)\nX_tr_app = pd.DataFrame(X_tr_app)\nX_ts_app = pd.DataFrame(X_ts_app)\nX_tr_app.columns = ['app_' + str(i) for i in range(0, 100)]\nX_ts_app.columns = ['app_' + str(i) for i in range(0, 100)]\n'''\n\n\n# ### 利用word2vec得到每台设备所安装app的embedding表示\n\n# In[ ]:\n\n\npackages['apps'] = packages['apps'].apply(lambda x:x.split(','))\npackages['app_length'] = packages['apps'].apply(lambda x:len(x))\n\n\n# In[ ]:\n\n\nembed_size = 128\nfastmodel = Word2Vec(list(packages['apps']), size=embed_size, window=4, min_count=3, negative=2,\n                 sg=1, sample=0.002, hs=1, workers=4)  \n\nembedding_fast = pd.DataFrame([fastmodel[word] for word in (fastmodel.wv.vocab)])\nembedding_fast['app'] = list(fastmodel.wv.vocab)\nembedding_fast.columns= [\"fdim_%s\" % str(i) for i in range(embed_size)]+[\"app\"]\nembedding_fast.head()\n\n\n# In[ ]:\n\n\nid_list = []\nfor i in range(packages.shape[0]):\n    id_list += [list(packages['device_id'])[i]]*packages['app_length'].iloc[i]\n\n\napp_list = [word for item in packages['apps'] for word in item]\n\napp_vect = pd.DataFrame({'device_id':id_list})        \napp_vect['app'] = app_list\n\n\n# In[ ]:\n\n\napp_vect = app_vect.merge(embedding_fast, on='app', how='left')\napp_vect = app_vect.drop('app', axis=1)\n\nseqfeature = app_vect.groupby(['device_id']).agg('mean')\nseqfeature.reset_index(inplace=True)\n\n\n# In[ ]:\n\n\nseqfeature.head()\n\n\n# ### 用户一周七天玩手机的时长情况\n\n# In[ ]:\n\n\n# packtime['period'] = (packtime['close'] - packtime['start'])/1000\n# packtime['start'] = pd.to_datetime(packtime['start'], unit='ms')\npacktime['dayofweek'] = packtime['start'].dt.dayofweek\npacktime['hour'] = packtime['start'].dt.hour\n# packtime = packtime[(packtime['start'] < '2017-03-31 23:59:59') & (packtime['start'] > '2017-03-01 00:00:00')]\n\n\n# In[ ]:\n\n\napp_use_time = packtime.groupby(['device_id', 'dayofweek'])['period'].agg('sum').reset_index()\nweek_app_use = app_use_time.pivot_table(values='period', columns='dayofweek', index='device_id').reset_index()\nweek_app_use = week_app_use.fillna(0)\nweek_app_use.columns = ['device_id'] + ['week_day_' + str(i) for i in range(0, 7)]\n\nweek_app_use['week_max'] = week_app_use.max(axis=1)\nweek_app_use['week_min'] = week_app_use.min(axis=1)\nweek_app_use['week_sum'] = week_app_use.sum(axis=1)\nweek_app_use['week_std'] = week_app_use.std(axis=1)\n\n'''\nfor i in range(0, 7):\n    week_app_use['week_day_' + str(i)] = week_app_use['week_day_' + str(i)] / week_app_use['week_sum']\n'''\n\n\n# In[ ]:\n\n\n'''\napp_use_time = packtime.groupby(['device_id', 'hour'])['period'].agg('sum').reset_index()\nhour_app_use = app_use_time.pivot_table(values='period', columns='hour', index='device_id').reset_index()\nhour_app_use = hour_app_use.fillna(0)\nhour_app_use.columns = ['device_id'] + ['hour_' + str(i) for i in range(0, 24)]\n\n# hour_app_use['hour_max'] = hour_app_use.max(axis=1)\n# hour_app_use['hour_min'] = hour_app_use.min(axis=1)\n# hour_app_use['hour_sum'] = hour_app_use.sum(axis=1)\n# hour_app_use['hour_std'] = hour_app_use.std(axis=1)\n\n# for i in range(0, 24):\n#     hour_app_use['hour_' + str(i)] = hour_app_use['hour_' + str(i)] / hour_app_use['hour_sum']\n'''\n\n\n# ### 将各个特征整合到一块\n\n# In[ ]:\n\n\ntrain.columns[4:]\n\n\n# In[ ]:\n\n\nuser_behavior = pd.read_csv('../input/yg-user-behavior/user_behavior.csv')\nuser_behavior['app_len_max'] = user_behavior['app_len_max'].astype(np.float64)\ndel user_behavior['app']\ntrain = pd.merge(train, user_behavior, on='device_id', how='left')\ntest = pd.merge(test, user_behavior, on='device_id', how='left')\n\n\n# In[ ]:\n\n\ntrain = pd.merge(train, seqfeature, on='device_id', how='left')\ntest = pd.merge(test, seqfeature, on='device_id', how='left')\n\n\n# In[ ]:\n\n\ntrain = pd.merge(train, week_app_use, on='device_id', how='left')\ntest = pd.merge(test, week_app_use, on='device_id', how='left')\n\n\n# In[ ]:\n\n\n'''\napp_top50_list = list(packtime.groupby(by='app')['period'].sum().sort_values(ascending=False)[:50].index)\n\nfor app in app_top50_list:\n    app_cnt = packtime[packtime['app'] == app]\n    start_num_app = app_cnt.groupby(by='device_id')['start'].count().reset_index()\n    start_num_app.columns = ['device_id', 'start_num_app_' + app[0:4]]\n    train = train.merge(start_num_app, on='device_id', how='left')\n    test = test.merge(start_num_app, on='device_id', how='left')\n    print(app + ' done')   \n'''\n\n\n# In[ ]:\n\n\n'''\n# all_top50 : 使用总时长最高的50款app，每个人的使用时间统计\nall_top50 = pd.read_csv('../input/yg-feature/all_top50_statis.csv')\ntrain = pd.merge(train, all_top50, on='device_id', how='left')\ntest = pd.merge(test, all_top50, on='device_id', how='left')\n'''\n\n\n# In[ ]:\n\n\ntop100_statis.columns = ['device_id'] + ['top100_statis_' + str(i) for i in range(0, 100)]\ntrain = pd.merge(train, top100_statis, on='device_id', how='left')\ntest = pd.merge(test, top100_statis, on='device_id', how='left')\n\n\n# In[ ]:\n\n\ntrain.to_csv('train_feature.csv', index=None)\ntest.to_csv('test_feature.csv', index=None)\n\n\n# In[ ]:\n\n\nfeats = train.columns[4:]\nfeats\n\n\n# In[ ]:\n\n\nfeats = feats.delete(153)\nfeats[153]\n\n\n# In[ ]:\n\n\n'''\ntrain = pd.merge(train, hour_app_use, on='device_id', how='left')\ntest = pd.merge(test, hour_app_use, on='device_id', how='left')\n'''\n\n\n# In[ ]:\n\n\nX_train = hstack([X_tr_app, train[feats].astype(float)])\nX_test = hstack([X_ts_app, test[feats].astype(float)])\n\nX_train = X_train.tocsr().astype('float')\nX_test = X_test.tocsr().astype('float')\n\n\n# ### 开始训练模型\n\n# In[ ]:\n\n\nY = train['sex'] - 1\nkfold = StratifiedKFold(n_splits=10, random_state=10, shuffle=True)\noof_preds1 = np.zeros((X_train.shape[0], ))\nsub1 = np.zeros((X_test.shape[0], ))\nfor i, (train_index, test_index) in enumerate(kfold.split(X_train, Y)): \n    X_tr, X_vl, y_tr, y_vl = X_train[train_index], X_train[test_index],                                 Y[train_index], Y[test_index]\n    dtrain = lgb.Dataset(X_tr, label=y_tr)\n    dvalid = lgb.Dataset(X_vl, y_vl, reference=dtrain)\n    params = {\n        'boosting_type': 'gbdt',\n        'max_depth':6,\n        'objective':'binary',\n        'num_leaves':31,\n        'subsample': 0.85,\n        'colsample_bytree': 0.2,\n        'lambda_l1':0.00007995302080034896,\n        'lambda_l2':0.0003648648811380991,\n        'subsample_freq':12,\n        'learning_rate': 0.012,\n        'min_child_weight':5.5\n    }\n\n    model = lgb.train(params,\n                        dtrain,\n                        num_boost_round=4000,\n                        valid_sets=dvalid,\n                        early_stopping_rounds=100,\n                        verbose_eval=100)\n\n    oof_preds1[test_index] = model.predict(X_vl, num_iteration=model.best_iteration)\n    sub1 += model.predict(X_test, num_iteration=model.best_iteration)/kfold.n_splits\n\n\n# In[ ]:\n\n\nY = train['age']\nkfold = StratifiedKFold(n_splits=10, random_state=10, shuffle=True)\noof_preds2 = np.zeros((X_train.shape[0], 11))\nsub2 = np.zeros((X_test.shape[0], 11))\nfor i, (train_index, test_index) in enumerate(kfold.split(X_train, Y)):\n    X_tr, X_vl, y_tr, y_vl = X_train[train_index], X_train[test_index],                                 Y[train_index], Y[test_index]\n    dtrain = lgb.Dataset(X_tr, label=y_tr)\n    dvalid = lgb.Dataset(X_vl, y_vl, reference=dtrain)\n    params = {\n        'boosting_type': 'gbdt',\n        'max_depth':6,\n        'metric': {'multi_logloss'},\n        'num_class':11,\n        'objective':'multiclass',\n        'num_leaves':31,\n        'subsample': 0.9,\n        'colsample_bytree': 0.2,\n        'lambda_l1':0.0001,\n        'lambda_l2':0.00111,\n        'subsample_freq':10,\n        'learning_rate': 0.012,\n        'min_child_weight':10\n    }\n\n    model = lgb.train(params,\n                        dtrain,\n                        num_boost_round=4000,\n                        valid_sets=dvalid,\n                        early_stopping_rounds=100,\n                        verbose_eval=100)\n\n    oof_preds2[test_index] = model.predict(X_vl, num_iteration=model.best_iteration)\n    sub2 += model.predict(X_test, num_iteration=model.best_iteration)/kfold.n_splits\n\n\n# In[ ]:\n\n\noof_preds1 = pd.DataFrame(oof_preds1, columns=['sex2'])\n\noof_preds1['sex1'] = 1-oof_preds1['sex2']\noof_preds2 = pd.DataFrame(oof_preds2, columns=['age%s'%i for i in range(11)])\noof_preds = train[['device_id']]\noof_preds.columns = ['DeviceID']\n\nfor i in ['sex1', 'sex2']:\n    for j in ['age%s'%i for i in range(11)]:\n        oof_preds[i+'_'+j] = oof_preds1[i] * oof_preds2[j]\noof_preds.columns = ['DeviceID', '1-0', '1-1', '1-2', '1-3', '1-4', '1-5', '1-6', \n         '1-7','1-8', '1-9', '1-10', '2-0', '2-1', '2-2', '2-3', '2-4', \n         '2-5', '2-6', '2-7', '2-8', '2-9', '2-10']\n\noof_preds.to_csv('train.csv', index=False)\n\n\n# In[ ]:\n\n\nsub1 = pd.DataFrame(sub1, columns=['sex2'])\n\nsub1['sex1'] = 1-sub1['sex2']\nsub2 = pd.DataFrame(sub2, columns=['age%s'%i for i in range(11)])\nsub = test[['device_id']]\nsub.columns = ['DeviceID']\n\nfor i in ['sex1', 'sex2']:\n    for j in ['age%s'%i for i in range(11)]:\n        sub[i+'_'+j] = sub1[i] * sub2[j]\nsub.columns = ['DeviceID', '1-0', '1-1', '1-2', '1-3', '1-4', '1-5', '1-6', \n         '1-7','1-8', '1-9', '1-10', '2-0', '2-1', '2-2', '2-3', '2-4', \n         '2-5', '2-6', '2-7', '2-8', '2-9', '2-10']\n\nsub.to_csv('lgb_l_v54.csv', index=False)\n\n\n# In[ ]:\n\n\n'''\nY = train['label']\n#best params: [31, 11, 0.015955854914003094, 0.12122664084283229, 0.7645440142264772, 24, 1048, 0.00552258737237652, 0.005810068328090833, 7]\nkfold = StratifiedKFold(n_splits=5, random_state=10, shuffle=True)\nsub = np.zeros((X_test.shape[0], 22))\nfor i, (train_index, test_index) in enumerate(kfold.split(X_train, Y)):\n    X_tr, X_vl, y_tr, y_vl = X_train[train_index], X_train[test_index], Y[train_index], Y[test_index]\n    dtrain = lgb.Dataset(X_tr, label=y_tr)\n    dvalid = lgb.Dataset(X_vl, y_vl, reference=dtrain)\n    params = {\n        'boosting_type': 'gbdt',\n        'max_depth':7,\n        'objective':'multiclass',\n        'metric': {'multi_logloss'},\n        'num_class':22,\n        'num_leaves':20,\n        'subsample': 0.86,\n        'colsample_bytree': 0.8,\n        #'lambda_l1':0.00007995302080034896,\n        'lambda_l2':0.005,\n        'subsample_freq':11,\n        'learning_rate': 0.01,\n        'min_child_weight':5.5,\n\n    }\n    \n    model = lgb.train(params,\n                        dtrain,\n                        num_boost_round=6000,\n                        valid_sets=dvalid,\n                        early_stopping_rounds=20,\n                        verbose_eval=100)\n\n\n    sub += model.predict(X_test, num_iteration=model.best_iteration)/kfold.n_splits\n'''\n\n\n# In[ ]:\n\n\n'''\nsub = pd.DataFrame(sub)\ncols = [x for x in range(0, 22)]\ncols = label_le.inverse_transform(cols)\n\nsub.columns = cols\nsub['DeviceID'] = test['device_id'].values\n\nsub = sub[['DeviceID', '1-0', '1-1', '1-2', '1-3', '1-4', '1-5', '1-6', \n         '1-7','1-8', '1-9', '1-10', '2-0', '2-1', '2-2', '2-3', '2-4', \n         '2-5', '2-6', '2-7', '2-8', '2-9', '2-10']]\n\nsub.to_csv('30.csv', index=False)\n'''\n\n"
  },
  {
    "path": "nb_cz_lwl_wcm/10_lgb.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nfrom datetime import datetime,timedelta  \nimport matplotlib.pyplot as plt\nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\n\n#add\nimport gc\nfrom sklearn import preprocessing\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nfrom scipy.sparse import hstack, vstack\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import cross_val_score\n# from skopt.space import Integer, Categorical, Real, Log10\n# from skopt.utils import use_named_args\n# from skopt import gp_minimize\nfrom gensim.models import Word2Vec, FastText\nimport gensim \nimport re\nfrom config import path\n\n# path=\"/Users/chizhu/data/competition_data/易观/\"\n\n\n# In[2]:\n\n\ntest = pd.read_csv(path+'deviceid_test.tsv', sep='\\t', names=['device_id'])\ntrain = pd.read_csv(path+'deviceid_train.tsv', sep='\\t', names=['device_id', 'sex', 'age'])\nbrand = pd.read_table(path+'deviceid_brand.tsv', names=['device_id', 'vendor', 'version'])\npacktime = pd.read_table(path+'deviceid_package_start_close.tsv', \n                         names=['device_id', 'app', 'start', 'close'])\npackages = pd.read_csv(path+'deviceid_packages.tsv', sep='\\t', names=['device_id', 'apps'])\n\n\n# In[3]:\n\n\ndef get_str(df):\n    res=\"\"\n    for i in df.split(\",\"):\n        res+=i+\" \"\n    return res\npackages[\"str_app\"]=packages['apps'].apply(lambda x:get_str(x),1)\n\n\n# In[4]:\n\n\ntfidf = CountVectorizer()\ntrain_str_app=pd.merge(train[['device_id']],packages[[\"device_id\",'str_app']],on=\"device_id\",how=\"left\")\ntest_str_app=pd.merge(test[['device_id']],packages[[\"device_id\",'str_app']],on=\"device_id\",how=\"left\")\npackages['str_app'] = tfidf.fit_transform(packages['str_app'])\ntrain_app = tfidf.transform(list(train_str_app['str_app'])).tocsr()\ntest_app = tfidf.transform(list(test_str_app['str_app'])).tocsr()\n\n\n# In[5]:\n\n\nall_id=pd.concat([train[[\"device_id\"]],test[['device_id']]])\n\n\n# In[6]:\n\n\nall_id.index=range(len(all_id))\n\n\n# In[7]:\n\n\n# encoding:utf-8\nimport sys\nimport pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.cross_validation import StratifiedKFold\nimport numpy as np\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.linear_model import PassiveAggressiveClassifier\nfrom sklearn.linear_model import RidgeClassifier\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.svm import LinearSVC\n\nfrom sklearn.metrics import mean_squared_error\nimport os\nif not os.path.exists(\"data\"):\n    os.mkdir(\"data\")\n\n\n\n############################ 切分数据集 ##########################\nprint('开始进行一些前期处理')\ntrain_feature = train_app\ntest_feature = test_app\n    # 五则交叉验证\nn_folds = 5\nprint('处理完毕')\ndf_stack = pd.DataFrame()\ndf_stack['device_id']=all_id['device_id']\nfor label in [\"sex\"]:\n    score = train[label]-1\n    \n    ########################### lr(LogisticRegression) ################################\n    print('lr stacking')\n    stack_train = np.zeros((len(train), 1))\n    stack_test = np.zeros((len(test), 1))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        clf = LogisticRegression(random_state=1017, C=8)\n        clf.fit(train_feature[tr], score[tr])\n        score_va = clf.predict_proba(train_feature[va])[:,1]\n        \n        score_te = clf.predict_proba(test_feature)[:,1]\n        print('得分' + str(mean_squared_error(score[va], clf.predict(train_feature[va]))))\n        stack_train[va,0] = score_va\n        stack_test[:,0]+= score_te\n    stack_test /= n_folds\n    \n    stack = np.vstack([stack_train, stack_test])\n    \n    \n    df_stack['pack_tfidf_lr_classfiy_{}'.format(label)] = stack[:, 0]\n    \n\n    ########################### SGD(随机梯度下降) ################################\n    print('sgd stacking')\n    stack_train = np.zeros((len(train), 1))\n    stack_test = np.zeros((len(test), 1))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        sgd = SGDClassifier(random_state=1017, loss='log')\n        sgd.fit(train_feature[tr], score[tr])\n        score_va = sgd.predict_proba(train_feature[va])[:,1]\n        score_te = sgd.predict_proba(test_feature)[:,1]\n        print('得分' + str(mean_squared_error(score[va], sgd.predict(train_feature[va]))))\n        stack_train[va,0] = score_va\n        stack_test[:,0]+= score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    \n    df_stack['pack_tfidf_sgd_classfiy_{}'.format(label)] = stack[:, 0]\n\n\n    ########################### pac(PassiveAggressiveClassifier) ################################\n    print('PAC stacking')\n    stack_train = np.zeros((len(train), 1))\n    stack_test = np.zeros((len(test), 1))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        pac = PassiveAggressiveClassifier(random_state=1017)\n        pac.fit(train_feature[tr], score[tr])\n        score_va = pac._predict_proba_lr(train_feature[va])[:,1]\n        score_te = pac._predict_proba_lr(test_feature)[:,1]\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], pac.predict(train_feature[va]))))\n        stack_train[va,0] += score_va\n        stack_test[:,0] += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    \n    df_stack['pack_tfidf_pac_classfiy_{}'.format(label)] = stack[:, 0]\n    \n\n\n    ########################### ridge(RidgeClassfiy) ################################\n    print('RidgeClassfiy stacking')\n    stack_train = np.zeros((len(train), 1))\n    stack_test = np.zeros((len(test), 1))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        ridge = RidgeClassifier(random_state=1017)\n        ridge.fit(train_feature[tr], score[tr])\n        score_va = ridge._predict_proba_lr(train_feature[va])[:,1]\n        score_te = ridge._predict_proba_lr(test_feature)[:,1]\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], ridge.predict(train_feature[va]))))\n        stack_train[va,0] += score_va\n        stack_test[:,0] += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    \n    df_stack['pack_tfidf_ridge_classfiy_{}'.format(label)] = stack[:, 0]\n    \n\n\n    ########################### bnb(BernoulliNB) ################################\n    print('BernoulliNB stacking')\n    stack_train = np.zeros((len(train), 1))\n    stack_test = np.zeros((len(test), 1))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        bnb = BernoulliNB()\n        bnb.fit(train_feature[tr], score[tr])\n        score_va = bnb.predict_proba(train_feature[va])[:,1]\n        score_te = bnb.predict_proba(test_feature)[:,1]\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], bnb.predict(train_feature[va]))))\n        stack_train[va,0] += score_va\n        stack_test[:,0] += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    \n    df_stack['pack_tfidf_bnb_classfiy_{}'.format(label)] = stack[:, 0]\n    \n    ########################### mnb(MultinomialNB) ################################\n    print('MultinomialNB stacking')\n    stack_train = np.zeros((len(train), 1))\n    stack_test = np.zeros((len(test), 1))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        mnb = MultinomialNB()\n        mnb.fit(train_feature[tr], score[tr])\n        score_va = mnb.predict_proba(train_feature[va])[:,1]\n        score_te = mnb.predict_proba(test_feature)[:,1]\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], mnb.predict(train_feature[va]))))\n        stack_train[va,0] += score_va\n        stack_test[:,0] += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    \n    df_stack['pack_tfidf_mnb_classfiy_{}'.format(label)] = stack[:, 0]\n    \n\n    ############################ Linersvc(LinerSVC) ################################\n    print('LinerSVC stacking')\n    stack_train = np.zeros((len(train), 1))\n    stack_test = np.zeros((len(test), 1))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        lsvc = LinearSVC(random_state=1017)\n        lsvc.fit(train_feature[tr], score[tr])\n        score_va = lsvc._predict_proba_lr(train_feature[va])[:,1]\n        score_te = lsvc._predict_proba_lr(test_feature)[:,1]\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], lsvc.predict(train_feature[va]))))\n        stack_train[va,0] += score_va\n        stack_test[:,0] += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    \n    df_stack['pack_tfidf_lsvc_classfiy_{}'.format(label)] = stack[:, 0]\n    \ndf_stack.to_csv('data/tfidf_classfiy_package.csv', index=None, encoding='utf8')\nprint('tfidf特征已保存\\n')\n\n\n# In[8]:\n\n\npacktime['period'] = (packtime['close'] - packtime['start'])/1000\npacktime['start'] = pd.to_datetime(packtime['start'], unit='ms')\napp_use_time = packtime.groupby(['app'])['period'].agg('sum').reset_index()\napp_use_top100 = app_use_time.sort_values(by='period', ascending=False)[:100]['app']\ndevice_app_use_time = packtime.groupby(['device_id', 'app'])['period'].agg('sum').reset_index()\nuse_time_top100_statis = device_app_use_time.set_index('app').loc[list(app_use_top100)].reset_index()\ntop100_statis = use_time_top100_statis.pivot(index='device_id', columns='app', values='period').reset_index()\n\n\n# In[9]:\n\n\ntop100_statis = top100_statis.fillna(0)\n\n\n# In[10]:\n\n\n# 手机品牌预处理\nbrand['vendor'] = brand['vendor'].astype(str).apply(lambda x : x.split(' ')[0].upper())\nbrand['ph_ver'] = brand['vendor'] + '_' + brand['version']\n\nph_ver = brand['ph_ver'].value_counts()\nph_ver_cnt = pd.DataFrame(ph_ver).reset_index()\nph_ver_cnt.columns = ['ph_ver', 'ph_ver_cnt']\n\nbrand = pd.merge(left=brand, right=ph_ver_cnt,on='ph_ver')\n\n\n# In[11]:\n\n\n# 针对长尾分布做的一点处理\nmask = (brand.ph_ver_cnt < 100)\nbrand.loc[mask, 'ph_ver'] = 'other' \n\ntrain_data = pd.merge(brand[['device_id', 'ph_ver']], train, on='device_id', how='right')\ntest_data = pd.merge(brand[['device_id', 'ph_ver']], test, on='device_id', how='right')\ntrain_data['ph_ver'] = train_data['ph_ver'].astype(str)\ntest_data['ph_ver'] = test_data['ph_ver'].astype(str)\n\n# 将 ph_ver 进行 label encoder\nph_ver_le = preprocessing.LabelEncoder()\ntrain_data['ph_ver'] = ph_ver_le.fit_transform(train_data['ph_ver'])\ntest_data['ph_ver'] = ph_ver_le.transform(test_data['ph_ver'])\ntrain_data['label'] = train_data['sex'].astype(str) + '-' + train_data['age'].astype(str)\nlabel_le = preprocessing.LabelEncoder()\ntrain_data['label'] = label_le.fit_transform(train_data['label'])\n\n\n# In[12]:\n\n\ntest_data['sex'] = -1\ntest_data['age'] = -1\ntest_data['label'] = -1\ndata = pd.concat([train_data, test_data], ignore_index=True)\nprint(data.shape)\n\n\n# In[13]:\n\n\ntrain_data = data[data.sex != -1]\ntest_data = data[data.sex == -1]\nprint(train.shape, test.shape)\n\n\n# In[14]:\n\n\n# 每个app的总使用次数统计\napp_num = packtime['app'].value_counts().reset_index()\napp_num.columns = ['app', 'app_num']\npacktime = pd.merge(left=packtime, right=app_num, on='app')\n# 同样的，针对长尾分布做些处理（尝试过不做处理，或换其他阈值，这个100的阈值最高）\npacktime.loc[packtime.app_num < 100, 'app'] = 'other'\n\n\n# In[15]:\n\n\n# 统计每台设备的app数量\ndf_app = packtime[['device_id', 'app']]\napps = df_app.drop_duplicates().groupby(['device_id'])['app'].apply(' '.join).reset_index()\napps['app_length'] = apps['app'].apply(lambda x:len(x.split(' ')))\n\ntrain_data = pd.merge(train_data, apps, on='device_id', how='left')\ntest_data = pd.merge(test_data, apps, on='device_id', how='left')\n\n\n# In[16]:\n\n\n# 获取每台设备所安装的apps的tfidf\ntfidf = CountVectorizer()\napps['app'] = tfidf.fit_transform(apps['app'])\n\nX_tr_app = tfidf.transform(list(train_data['app'])).tocsr()\nX_ts_app = tfidf.transform(list(test_data['app'])).tocsr()\n\n\n# In[17]:\n\n\n# encoding:utf-8\nimport sys\nimport pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.cross_validation import StratifiedKFold\nimport numpy as np\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.linear_model import PassiveAggressiveClassifier\nfrom sklearn.linear_model import RidgeClassifier\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.svm import LinearSVC\n\nfrom sklearn.metrics import mean_squared_error\n\n\n\n\n############################ 切分数据集 ##########################\nprint('开始进行一些前期处理')\ntrain_feature = X_tr_app\ntest_feature = X_ts_app\n    # 五则交叉验证\nn_folds = 5\nprint('处理完毕')\ndf_stack = pd.DataFrame()\ndf_stack['device_id']=data['device_id']\nfor label in [\"sex\"]:\n    score = train_data[label]-1\n    \n    ########################### lr(LogisticRegression) ################################\n    print('lr stacking')\n    stack_train = np.zeros((len(train_data), 1))\n    stack_test = np.zeros((len(test_data), 1))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        clf = LogisticRegression(random_state=1017, C=8)\n        clf.fit(train_feature[tr], score[tr])\n        score_va = clf.predict_proba(train_feature[va])[:,1]\n        \n        score_te = clf.predict_proba(test_feature)[:,1]\n        print('得分' + str(mean_squared_error(score[va], clf.predict(train_feature[va]))))\n        stack_train[va,0] = score_va\n        stack_test[:,0]+= score_te\n    stack_test /= n_folds\n    \n    stack = np.vstack([stack_train, stack_test])\n    \n    \n    df_stack['tfidf_lr_classfiy_{}'.format(label)] = stack[:, 0]\n    \n\n    ########################### SGD(随机梯度下降) ################################\n    print('sgd stacking')\n    stack_train = np.zeros((len(train_data), 1))\n    stack_test = np.zeros((len(test_data), 1))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        sgd = SGDClassifier(random_state=1017, loss='log')\n        sgd.fit(train_feature[tr], score[tr])\n        score_va = sgd.predict_proba(train_feature[va])[:,1]\n        score_te = sgd.predict_proba(test_feature)[:,1]\n        print('得分' + str(mean_squared_error(score[va], sgd.predict(train_feature[va]))))\n        stack_train[va,0] = score_va\n        stack_test[:,0]+= score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    \n    df_stack['tfidf_sgd_classfiy_{}'.format(label)] = stack[:, 0]\n\n\n    ########################### pac(PassiveAggressiveClassifier) ################################\n    print('PAC stacking')\n    stack_train = np.zeros((len(train_data), 1))\n    stack_test = np.zeros((len(test_data), 1))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        pac = PassiveAggressiveClassifier(random_state=1017)\n        pac.fit(train_feature[tr], score[tr])\n        score_va = pac._predict_proba_lr(train_feature[va])[:,1]\n        score_te = pac._predict_proba_lr(test_feature)[:,1]\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], pac.predict(train_feature[va]))))\n        stack_train[va,0] += score_va\n        stack_test[:,0] += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    \n    df_stack['tfidf_pac_classfiy_{}'.format(label)] = stack[:, 0]\n    \n\n\n    ########################### ridge(RidgeClassfiy) ################################\n    print('RidgeClassfiy stacking')\n    stack_train = np.zeros((len(train_data), 1))\n    stack_test = np.zeros((len(test_data), 1))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        ridge = RidgeClassifier(random_state=1017)\n        ridge.fit(train_feature[tr], score[tr])\n        score_va = ridge._predict_proba_lr(train_feature[va])[:,1]\n        score_te = ridge._predict_proba_lr(test_feature)[:,1]\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], ridge.predict(train_feature[va]))))\n        stack_train[va,0] += score_va\n        stack_test[:,0] += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    \n    df_stack['tfidf_ridge_classfiy_{}'.format(label)] = stack[:, 0]\n    \n\n\n    ########################### bnb(BernoulliNB) ################################\n    print('BernoulliNB stacking')\n    stack_train = np.zeros((len(train_data), 1))\n    stack_test = np.zeros((len(test_data), 1))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        bnb = BernoulliNB()\n        bnb.fit(train_feature[tr], score[tr])\n        score_va = bnb.predict_proba(train_feature[va])[:,1]\n        score_te = bnb.predict_proba(test_feature)[:,1]\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], bnb.predict(train_feature[va]))))\n        stack_train[va,0] += score_va\n        stack_test[:,0] += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    \n    df_stack['tfidf_bnb_classfiy_{}'.format(label)] = stack[:, 0]\n    \n    ########################### mnb(MultinomialNB) ################################\n    print('MultinomialNB stacking')\n    stack_train = np.zeros((len(train_data), 1))\n    stack_test = np.zeros((len(test_data), 1))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        mnb = MultinomialNB()\n        mnb.fit(train_feature[tr], score[tr])\n        score_va = mnb.predict_proba(train_feature[va])[:,1]\n        score_te = mnb.predict_proba(test_feature)[:,1]\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], mnb.predict(train_feature[va]))))\n        stack_train[va,0] += score_va\n        stack_test[:,0] += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    \n    df_stack['tfidf_mnb_classfiy_{}'.format(label)] = stack[:, 0]\n    \n\n    ############################ Linersvc(LinerSVC) ################################\n    print('LinerSVC stacking')\n    stack_train = np.zeros((len(train_data), 1))\n    stack_test = np.zeros((len(test_data), 1))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        lsvc = LinearSVC(random_state=1017)\n        lsvc.fit(train_feature[tr], score[tr])\n        score_va = lsvc._predict_proba_lr(train_feature[va])[:,1]\n        score_te = lsvc._predict_proba_lr(test_feature)[:,1]\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], lsvc.predict(train_feature[va]))))\n        stack_train[va,0] += score_va\n        stack_test[:,0] += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    \n    df_stack['tfidf_lsvc_classfiy_{}'.format(label)] = stack[:, 0]\n    \ndf_stack.to_csv('data/tfidf_classfiy.csv', index=None, encoding='utf8')\nprint('tfidf特征已保存\\n')\n\n\n# ### 利用word2vec得到每台设备所安装app的embedding表示\n\n# In[18]:\n\n\npackages['apps'] = packages['apps'].apply(lambda x:x.split(','))\npackages['app_length'] = packages['apps'].apply(lambda x:len(x))\n\n\n# In[19]:\n\n\nembed_size = 128\nfastmodel = Word2Vec(list(packages['apps']), size=embed_size, window=4, min_count=3, negative=2,\n                 sg=1, sample=0.002, hs=1, workers=4)  \n\nembedding_fast = pd.DataFrame([fastmodel[word] for word in (fastmodel.wv.vocab)])\nembedding_fast['app'] = list(fastmodel.wv.vocab)\nembedding_fast.columns= [\"fdim_%s\" % str(i) for i in range(embed_size)]+[\"app\"]\nprint(embedding_fast.head())\n\n\n# In[20]:\n\n\nid_list = []\nfor i in range(packages.shape[0]):\n    id_list += [list(packages['device_id'])[i]]*packages['app_length'].iloc[i]\n\n\napp_list = [word for item in packages['apps'] for word in item]\n\napp_vect = pd.DataFrame({'device_id':id_list})        \napp_vect['app'] = app_list\n\n\n# In[21]:\n\n\napp_vect = app_vect.merge(embedding_fast, on='app', how='left')\napp_vect = app_vect.drop('app', axis=1)\n\nseqfeature = app_vect.groupby(['device_id']).agg('mean')\nseqfeature.reset_index(inplace=True)\n\n\n# In[22]:\n\n\nprint(seqfeature.head())\n\n\n# ### 用户一周七天玩手机的时长情况\n\n# In[23]:\n\n\n# packtime['period'] = (packtime['close'] - packtime['start'])/1000\n# packtime['start'] = pd.to_datetime(packtime['start'], unit='ms')\npacktime['dayofweek'] = packtime['start'].dt.dayofweek\npacktime['hour'] = packtime['start'].dt.hour\n# packtime = packtime[(packtime['start'] < '2017-03-31 23:59:59') & (packtime['start'] > '2017-03-01 00:00:00')]\n\n\n# In[24]:\n\n\napp_use_time = packtime.groupby(['device_id', 'dayofweek'])['period'].agg('sum').reset_index()\nweek_app_use = app_use_time.pivot_table(values='period', columns='dayofweek', index='device_id').reset_index()\nweek_app_use = week_app_use.fillna(0)\nweek_app_use.columns = ['device_id'] + ['week_day_' + str(i) for i in range(0, 7)]\n\nweek_app_use['week_max'] = week_app_use.max(axis=1)\nweek_app_use['week_min'] = week_app_use.min(axis=1)\nweek_app_use['week_sum'] = week_app_use.sum(axis=1)\nweek_app_use['week_std'] = week_app_use.std(axis=1)\n\n\n\n# ### 将各个特征整合到一块\n\n# In[25]:\n\n\nprint(train_data.columns[4:])\n\n\n# In[26]:\n\n\nuser_behavior = pd.read_csv('data/user_behavior.csv')\nuser_behavior['app_len_max'] = user_behavior['app_len_max'].astype(np.float64)\ndel user_behavior['app']\ntrain_data = pd.merge(train_data, user_behavior, on='device_id', how='left')\ntest_data = pd.merge(test_data, user_behavior, on='device_id', how='left')\n\n\n# In[27]:\n\n\ntrain_data = pd.merge(train_data, seqfeature, on='device_id', how='left')\ntest_data = pd.merge(test_data, seqfeature, on='device_id', how='left')\n\n\n# In[28]:\n\n\ntrain_data = pd.merge(train_data, week_app_use, on='device_id', how='left')\ntest_data = pd.merge(test_data, week_app_use, on='device_id', how='left')\n\n\n# In[29]:\n\n\ntop100_statis.columns = ['device_id'] + ['top100_statis_' + str(i) for i in range(0, 100)]\ntrain_data = pd.merge(train_data, top100_statis, on='device_id', how='left')\ntest_data = pd.merge(test_data, top100_statis, on='device_id', how='left')\n\n\n# In[30]:\n\n\ntrain_data.to_csv(\"./data/train_data.csv\",index=False)\ntest_data.to_csv(\"./data/test_data.csv\",index=False)\n\n\n# In[31]:\n\n\ntfidf_feat=pd.read_csv(\"data/tfidf_classfiy.csv\")\ntf2=pd.read_csv(\"data/tfidf_classfiy_package.csv\")\ntrain_data=pd.read_csv(\"data/train_data.csv\")\ntest_data=pd.read_csv(\"data/test_data.csv\")\n# app_w2v=pd.read_csv(\"./data/w2v_tfidf.csv\")\n\n\n# In[32]:\n\n\ntrain = pd.merge(train_data,tfidf_feat,on=\"device_id\",how=\"left\")\n# train = pd.merge(train_data,tf2,on=\"device_id\",how=\"left\")\n# train = pd.merge(train_data,app_w2v,on=\"device_id\",how=\"left\")\ntest = pd.merge(test_data,tfidf_feat,on=\"device_id\",how=\"left\")\n# test = pd.merge(test_data,tf2,on=\"device_id\",how=\"left\")\n# test = pd.merge(test_data,app_w2v,on=\"device_id\",how=\"left\")\n\n\n# In[85]:\n\n\ntrain_dt = pd.merge(train_data[['device_id','ph_ver']],tfidf_feat,on=\"device_id\",how=\"left\")\ntrain_dt = pd.merge(train_dt,tf2,on=\"device_id\",how=\"left\")\ntest_dt = pd.merge(test_data[['device_id',\"ph_ver\"]],tfidf_feat,on=\"device_id\",how=\"left\")\ntest_dt = pd.merge(test_dt,tf2,on=\"device_id\",how=\"left\")\nfeat=pd.concat([train_dt,test_dt])\nfeat.to_csv(\"data/sex_chizhu_feat.csv\",index=False)\n\n\n# In[33]:\n\n\nfeatures = [x for x in train.columns if x not in ['device_id', 'sex',\"age\",\"label\",\"app\"]]\nY = train['sex'] - 1\n\n\n# ### 开始训练模型\n\n# In[34]:\n\n\nimport lightgbm as lgb\n# import xgboost as xgb\nfrom sklearn.metrics import auc, log_loss, roc_auc_score,f1_score,recall_score,precision_score\nfrom sklearn.cross_validation import StratifiedKFold\n\nkf = StratifiedKFold(Y, n_folds=5, shuffle=True, random_state=1024)\n\nparams = {\n            'boosting_type': 'gbdt',\n            'metric': {'binary_logloss',}, \n#             'is_unbalance':'True',\n            'learning_rate' : 0.01, \n             'verbose': 0,\n            'num_leaves':32 ,\n            # 'max_depth':8, \n            # 'max_bin':10, \n            # 'lambda_l2': 1, \n            # 'min_child_weight':50,\n            'objective': 'binary', \n            'feature_fraction': 0.4,\n            'bagging_fraction':0.7, # 0.9是目前最优的\n            'bagging_freq':3,  # 3是目前最优的\n#             'min_data': 500,\n            'seed': 1024,\n            'nthread': 8,\n            # 'silent': True,\n}\nnum_round = 3500\nearly_stopping_rounds = 100\n\n\n# In[35]:\n\n\naus = []\nsub1 = np.zeros((len(test), ))\npred_oob1=np.zeros((len(train),))\nfor i,(train_index,test_index) in enumerate(kf):\n  \n    tr_x = train[features].reindex(index=train_index, copy=False)\n    tr_y = Y[train_index]\n    te_x = train[features].reindex(index=test_index, copy=False)\n    te_y = Y[test_index]\n\n    d_tr = lgb.Dataset(tr_x, label=tr_y)\n    d_te = lgb.Dataset(te_x, label=te_y)\n    model = lgb.train(params, d_tr, num_boost_round=num_round, \n                      valid_sets=d_te,verbose_eval=200,\n                              early_stopping_rounds=early_stopping_rounds)\n    pred= model.predict(te_x, num_iteration=model.best_iteration)\n    pred_oob1[test_index] =pred\n    \n    a = log_loss(te_y, pred)\n\n    sub1 += model.predict(test[features], num_iteration=model.best_iteration)/5\n\n    print (\"idx: \", i) \n    print (\" loss: %.5f\" % a)\n\n    print (\"best tree num: \", model.best_iteration)\n    aus.append(a)\n\nprint (\"mean\")\nprint (\"auc:       %s\" % (sum(aus) / 5.0))\n\n\n# In[36]:\n\n\n#####特征重要性\n# get_ipython().run_line_magic('matplotlib', 'inline')\n# import matplotlib.pyplot as plt\n# f=dict(zip(list(train[features].keys()),model.feature_importance()))\n# f=sorted(f.items(),key=lambda d:d[1], reverse = True)\n# f=pd.DataFrame(f,columns=['feature','imp'])\n# plt.bar(range(len(f)),f.imp)\n# plt.xticks(range(len(f)),f.feature,rotation=70,fontsize=20)\n# fig = plt.gcf()\n# fig.set_size_inches(50, 20)\n\n\n# In[37]:\n\n\n# f.ix[:450,:]\n\n\n# In[38]:\n\n\n# features=f.ix[:434,\"feature\"].values\n\n\n# In[39]:\n\n\npred_oob1 = pd.DataFrame(pred_oob1, columns=['sex2'])\nsub1 = pd.DataFrame(sub1, columns=['sex2'])\nres1=pd.concat([pred_oob1,sub1])\nres1['sex1'] = 1-res1['sex2']\n\n\n# In[40]:\n\n\nimport gc\ngc.collect()\n\n\n# In[41]:\n\n\ntrain_id = pd.read_csv(path+'deviceid_train.tsv', sep='\\t', names=['device_id', 'sex', 'age'])\n\n\n# In[42]:\n\n\n# encoding:utf-8\nimport sys\nimport pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.cross_validation import StratifiedKFold\nimport numpy as np\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.linear_model import PassiveAggressiveClassifier\nfrom sklearn.linear_model import RidgeClassifier\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.svm import LinearSVC\n\nfrom sklearn.metrics import mean_squared_error\n\n\n\n\n############################ 切分数据集 ##########################\nprint('开始进行一些前期处理')\ntrain_feature = train_app\ntest_feature = test_app\n    # 五则交叉验证\nn_folds = 5\nprint('处理完毕')\ndf_stack = pd.DataFrame()\ndf_stack['device_id']=all_id['device_id']\nfor label in [\"age\"]:\n    score = train_id[label]\n    \n    ########################### lr(LogisticRegression) ################################\n    print('lr stacking')\n    stack_train = np.zeros((len(train), 11))\n    stack_test = np.zeros((len(test), 11))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        clf = LogisticRegression(random_state=1017, C=8)\n        clf.fit(train_feature[tr], score[tr])\n        score_va = clf.predict_proba(train_feature[va])\n        \n        score_te = clf.predict_proba(test_feature)\n        print('得分' + str(mean_squared_error(score[va], clf.predict(train_feature[va]))))\n        stack_train[va] = score_va\n        stack_test+= score_te\n    stack_test /= n_folds\n    \n    stack = np.vstack([stack_train, stack_test])\n    \n    for i in range(stack.shape[1]):\n        df_stack['pack_tfidf_lr_classfiy_{}'.format(i)] = stack[:, i]\n    \n\n    ########################### SGD(随机梯度下降) ################################\n    print('sgd stacking')\n    stack_train = np.zeros((len(train), 11))\n    stack_test = np.zeros((len(test), 11))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        sgd = SGDClassifier(random_state=1017, loss='log')\n        sgd.fit(train_feature[tr], score[tr])\n        score_va = sgd.predict_proba(train_feature[va])\n        score_te = sgd.predict_proba(test_feature)\n        print('得分' + str(mean_squared_error(score[va], sgd.predict(train_feature[va]))))\n        stack_train[va] = score_va\n        stack_test+= score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    for i in range(stack.shape[1]):\n        df_stack['pack_tfidf_sgd_classfiy_{}'.format(i)] = stack[:, i]\n\n\n    ########################### pac(PassiveAggressiveClassifier) ################################\n    print('PAC stacking')\n    stack_train = np.zeros((len(train), 11))\n    stack_test = np.zeros((len(test), 11))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        pac = PassiveAggressiveClassifier(random_state=1017)\n        pac.fit(train_feature[tr], score[tr])\n        score_va = pac._predict_proba_lr(train_feature[va])\n        score_te = pac._predict_proba_lr(test_feature)\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], pac.predict(train_feature[va]))))\n        stack_train[va] += score_va\n        stack_test += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    for i in range(stack.shape[1]):\n        df_stack['pack_tfidf_pac_classfiy_{}'.format(i)] = stack[:, i]\n    \n\n\n    ########################### ridge(RidgeClassfiy) ################################\n    print('RidgeClassfiy stacking')\n    stack_train = np.zeros((len(train), 11))\n    stack_test = np.zeros((len(test), 11))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        ridge = RidgeClassifier(random_state=1017)\n        ridge.fit(train_feature[tr], score[tr])\n        score_va = ridge._predict_proba_lr(train_feature[va])\n        score_te = ridge._predict_proba_lr(test_feature)\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], ridge.predict(train_feature[va]))))\n        stack_train[va] += score_va\n        stack_test += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    for i in range(stack.shape[1]):\n        df_stack['pack_tfidf_ridge_classfiy_{}'.format(i)] = stack[:, i]\n    \n\n\n    ########################### bnb(BernoulliNB) ################################\n    print('BernoulliNB stacking')\n    stack_train = np.zeros((len(train), 11))\n    stack_test = np.zeros((len(test), 11))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        bnb = BernoulliNB()\n        bnb.fit(train_feature[tr], score[tr])\n        score_va = bnb.predict_proba(train_feature[va])\n        score_te = bnb.predict_proba(test_feature)\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], bnb.predict(train_feature[va]))))\n        stack_train[va] += score_va\n        stack_test += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    for i in range(stack.shape[1]):\n        df_stack['pack_tfidf_bnb_classfiy_{}'.format(i)] = stack[:, i]\n    \n    ########################### mnb(MultinomialNB) ################################\n    print('MultinomialNB stacking')\n    stack_train = np.zeros((len(train), 11))\n    stack_test = np.zeros((len(test), 11))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        mnb = MultinomialNB()\n        mnb.fit(train_feature[tr], score[tr])\n        score_va = mnb.predict_proba(train_feature[va])\n        score_te = mnb.predict_proba(test_feature)\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], mnb.predict(train_feature[va]))))\n        stack_train[va] += score_va\n        stack_test += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    for i in range(stack.shape[1]):\n        df_stack['pack_tfidf_mnb_classfiy_{}'.format(i)] = stack[:, i]\n    \n\n    ############################ Linersvc(LinerSVC) ################################\n    print('LinerSVC stacking')\n    stack_train = np.zeros((len(train), 11))\n    stack_test = np.zeros((len(test), 11))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        lsvc = LinearSVC(random_state=1017)\n        lsvc.fit(train_feature[tr], score[tr])\n        score_va = lsvc._predict_proba_lr(train_feature[va])\n        score_te = lsvc._predict_proba_lr(test_feature)\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], lsvc.predict(train_feature[va]))))\n        stack_train[va] += score_va\n        stack_test += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    for i in range(stack.shape[1]):\n        df_stack['pack_tfidf_lsvc_classfiy_{}'.format(i)] = stack[:, i]\n    \ndf_stack.to_csv('data/pack_tfidf_age.csv', index=None, encoding='utf8')\nprint('tfidf特征已保存\\n')\n\n\n# #### tfidf\n\n# In[43]:\n\n\n# encoding:utf-8\nimport sys\nimport pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.cross_validation import StratifiedKFold\nimport numpy as np\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.linear_model import PassiveAggressiveClassifier\nfrom sklearn.linear_model import RidgeClassifier\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.svm import LinearSVC\n\nfrom sklearn.metrics import mean_squared_error\n\n\n\n\n############################ 切分数据集 ##########################\nprint('开始进行一些前期处理')\ntrain_feature = X_tr_app\ntest_feature = X_ts_app\n    # 五则交叉验证\nn_folds = 5\nprint('处理完毕')\ndf_stack = pd.DataFrame()\ndf_stack['device_id']=data['device_id']\nfor label in [\"age\"]:\n    score = train[label]\n    \n    ########################### lr(LogisticRegression) ################################\n    print('lr stacking')\n    stack_train = np.zeros((len(train), 11))\n    stack_test = np.zeros((len(test), 11))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        clf = LogisticRegression(random_state=1017, C=8)\n        clf.fit(train_feature[tr], score[tr])\n        score_va = clf.predict_proba(train_feature[va])\n        \n        score_te = clf.predict_proba(test_feature)\n        print('得分' + str(mean_squared_error(score[va], clf.predict(train_feature[va]))))\n        stack_train[va] = score_va\n        stack_test+= score_te\n    stack_test /= n_folds\n    \n    stack = np.vstack([stack_train, stack_test])\n    \n    for i in range(stack.shape[1]):\n        df_stack['tfidf_lr_classfiy_{}'.format(i)] = stack[:, i]\n    \n\n    ########################### SGD(随机梯度下降) ################################\n    print('sgd stacking')\n    stack_train = np.zeros((len(train), 11))\n    stack_test = np.zeros((len(test), 11))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        sgd = SGDClassifier(random_state=1017, loss='log')\n        sgd.fit(train_feature[tr], score[tr])\n        score_va = sgd.predict_proba(train_feature[va])\n        score_te = sgd.predict_proba(test_feature)\n        print('得分' + str(mean_squared_error(score[va], sgd.predict(train_feature[va]))))\n        stack_train[va] = score_va\n        stack_test+= score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    for i in range(stack.shape[1]):\n        df_stack['tfidf_sgd_classfiy_{}'.format(i)] = stack[:, i]\n\n\n    ########################### pac(PassiveAggressiveClassifier) ################################\n    print('PAC stacking')\n    stack_train = np.zeros((len(train), 11))\n    stack_test = np.zeros((len(test), 11))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        pac = PassiveAggressiveClassifier(random_state=1017)\n        pac.fit(train_feature[tr], score[tr])\n        score_va = pac._predict_proba_lr(train_feature[va])\n        score_te = pac._predict_proba_lr(test_feature)\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], pac.predict(train_feature[va]))))\n        stack_train[va] += score_va\n        stack_test += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    for i in range(stack.shape[1]):\n        df_stack['tfidf_pac_classfiy_{}'.format(i)] = stack[:, i]\n    \n\n\n    ########################### ridge(RidgeClassfiy) ################################\n    print('RidgeClassfiy stacking')\n    stack_train = np.zeros((len(train), 11))\n    stack_test = np.zeros((len(test), 11))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        ridge = RidgeClassifier(random_state=1017)\n        ridge.fit(train_feature[tr], score[tr])\n        score_va = ridge._predict_proba_lr(train_feature[va])\n        score_te = ridge._predict_proba_lr(test_feature)\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], ridge.predict(train_feature[va]))))\n        stack_train[va] += score_va\n        stack_test += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    for i in range(stack.shape[1]):\n        df_stack['tfidf_ridge_classfiy_{}'.format(i)] = stack[:, i]\n    \n\n\n    ########################### bnb(BernoulliNB) ################################\n    print('BernoulliNB stacking')\n    stack_train = np.zeros((len(train), 11))\n    stack_test = np.zeros((len(test), 11))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        bnb = BernoulliNB()\n        bnb.fit(train_feature[tr], score[tr])\n        score_va = bnb.predict_proba(train_feature[va])\n        score_te = bnb.predict_proba(test_feature)\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], bnb.predict(train_feature[va]))))\n        stack_train[va] += score_va\n        stack_test += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    for i in range(stack.shape[1]):\n        df_stack['tfidf_bnb_classfiy_{}'.format(i)] = stack[:, i]\n    \n    ########################### mnb(MultinomialNB) ################################\n    print('MultinomialNB stacking')\n    stack_train = np.zeros((len(train), 11))\n    stack_test = np.zeros((len(test), 11))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        mnb = MultinomialNB()\n        mnb.fit(train_feature[tr], score[tr])\n        score_va = mnb.predict_proba(train_feature[va])\n        score_te = mnb.predict_proba(test_feature)\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], mnb.predict(train_feature[va]))))\n        stack_train[va] += score_va\n        stack_test += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    for i in range(stack.shape[1]):\n        df_stack['tfidf_mnb_classfiy_{}'.format(i)] = stack[:, i]\n    \n\n    ############################ Linersvc(LinerSVC) ################################\n    print('LinerSVC stacking')\n    stack_train = np.zeros((len(train), 11))\n    stack_test = np.zeros((len(test), 11))\n    score_va = 0\n\n    for i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n        print('stack:%d/%d' % ((i + 1), n_folds))\n        lsvc = LinearSVC(random_state=1017)\n        lsvc.fit(train_feature[tr], score[tr])\n        score_va = lsvc._predict_proba_lr(train_feature[va])\n        score_te = lsvc._predict_proba_lr(test_feature)\n        print(score_va)\n        print('得分' + str(mean_squared_error(score[va], lsvc.predict(train_feature[va]))))\n        stack_train[va] += score_va\n        stack_test += score_te\n    stack_test /= n_folds\n    stack = np.vstack([stack_train, stack_test])\n    \n    for i in range(stack.shape[1]):\n        df_stack['data/tfidf_lsvc_classfiy_{}'.format(i)] = stack[:, i]\n    \ndf_stack.to_csv('data/tfidf_age.csv', index=None, encoding='utf8')\nprint('tfidf特征已保存\\n')\n\n\n# In[44]:\n\n\ntfidf_feat=pd.read_csv(\"data/tfidf_age.csv\")\ntf2=pd.read_csv(\"data/pack_tfidf_age.csv\")\ntrain_data=pd.read_csv(\"data/train_data.csv\")\ntest_data=pd.read_csv(\"data/test_data.csv\")\n\n\n# In[41]:\n\n\ntrain_dt = pd.merge(train_data[['device_id','ph_ver']],tfidf_feat,on=\"device_id\",how=\"left\")\ntrain_dt = pd.merge(train_dt,tf2,on=\"device_id\",how=\"left\")\ntest_dt = pd.merge(test_data[['device_id',\"ph_ver\"]],tfidf_feat,on=\"device_id\",how=\"left\")\ntest_dt = pd.merge(test_dt,tf2,on=\"device_id\",how=\"left\")\nfeat=pd.concat([train_dt,test_dt])\nfeat.to_csv(\"data/age_chizhu_feat.csv\",index=False)\n\n\n# In[40]:\n\n\n\n\n\n# In[45]:\n\n\ntfidf_feat=pd.read_csv(\"data/tfidf_age.csv\")\ntf2=pd.read_csv(\"data/pack_tfidf_age.csv\")\ntrain_data=pd.read_csv(\"data/train_data.csv\")\ntest_data=pd.read_csv(\"data/test_data.csv\")\ntrain = pd.merge(train_data,tfidf_feat,on=\"device_id\",how=\"left\")\n# train = pd.merge(train_data,tf2,on=\"device_id\",how=\"left\")\n# train = pd.merge(train_data,app_w2v,on=\"device_id\",how=\"left\")\ntest = pd.merge(test_data,tfidf_feat,on=\"device_id\",how=\"left\")\n# test = pd.merge(test_data,tf2,on=\"device_id\",how=\"left\")\n# test = pd.merge(test_data,app_w2v,on=\"device_id\",how=\"left\")\nfeatures = [x for x in train.columns if x not in ['device_id',\"age\",\"sex\",\"label\",\"app\"]]\nY = train['age'] \n\n\n# In[46]:\n\n\nimport lightgbm as lgb\n# import xgboost as xgb\nfrom sklearn.metrics import auc, log_loss, roc_auc_score,f1_score,recall_score,precision_score\nfrom sklearn.cross_validation import StratifiedKFold\n\nkf = StratifiedKFold(Y, n_folds=5, shuffle=True, random_state=1024)\n\nparams = {\n            'boosting_type': 'gbdt',\n            'metric': {'multi_logloss',}, \n#             'is_unbalance':'True',\n            'learning_rate' : 0.01, \n             'verbose': 0,\n            'num_leaves':32 ,\n            # 'max_depth':8, \n            # 'max_bin':10, \n            # 'lambda_l2': 1, \n            # 'min_child_weight':50,\n            \"num_class\":11,\n            'objective': 'multiclass', \n            'feature_fraction': 0.4,\n            'bagging_fraction':0.7, # 0.9是目前最优的\n            'bagging_freq':3,  # 3是目前最优的\n#             'min_data': 500,\n            'seed': 1024,\n            'nthread': 8,\n            # 'silent': True,\n}\nnum_round = 3500\nearly_stopping_rounds = 100\n\n\n# In[47]:\n\n\naus = []\nsub2 = np.zeros((len(test),11 ))\npred_oob2=np.zeros((len(train),11))\nfor i,(train_index,test_index) in enumerate(kf):\n  \n    tr_x = train[features].reindex(index=train_index, copy=False)\n    tr_y = Y[train_index]\n    te_x = train[features].reindex(index=test_index, copy=False)\n    te_y = Y[test_index]\n\n    d_tr = lgb.Dataset(tr_x, label=tr_y)\n    d_te = lgb.Dataset(te_x, label=te_y)\n    model = lgb.train(params, d_tr, num_boost_round=num_round, \n                      valid_sets=d_te,verbose_eval=200,\n                              early_stopping_rounds=early_stopping_rounds)\n    pred= model.predict(te_x, num_iteration=model.best_iteration)\n    pred_oob2[test_index] =pred\n    \n    a = log_loss(te_y, pred)\n\n    sub2 += model.predict(test[features], num_iteration=model.best_iteration)/5\n\n    print (\"idx: \", i) \n    print (\" loss: %.5f\" % a)\n\n    print (\"best tree num: \", model.best_iteration)\n    aus.append(a)\n\nprint (\"mean\")\nprint (\"loss:       %s\" % (sum(aus) / 5.0))\n\n\n# In[55]:\n\n\n#####特征重要性\n\n# import matplotlib.pyplot as plt\n# f=dict(zip(list(train[features].keys()),model.feature_importance()))\n# f=sorted(f.items(),key=lambda d:d[1], reverse = True)\n# f=pd.DataFrame(f,columns=['feature','imp'])\n# plt.bar(range(len(f)),f.imp)\n# plt.xticks(range(len(f)),f.feature,rotation=70,fontsize=20)\n# fig = plt.gcf()\n# fig.set_size_inches(50, 20)\n\n\n# In[56]:\n\n\n# f.ix[:650,:]\n\n\n# In[57]:\n\n\n# features=f.ix[:641,\"feature\"].values\n\n\n# In[58]:\n\n\nres2_1=np.vstack((pred_oob2,sub2))\nres2_1 = pd.DataFrame(res2_1)\n\n\n# In[59]:\n\n\nif not os.path.exists(\"submit\"):\n    os.mkdir(\"submit\")\nres1.index=range(len(res1))\nres2_1.index=range(len(res2_1))\nfinal_1=res2_1.copy()\nfinal_2=res2_1.copy()\nfor i in range(11):\n    final_1[i]=res1['sex1']*res2_1[i]\n    final_2[i]=res1['sex2']*res2_1[i]\nid_list=pd.concat([train[['device_id']],test[['device_id']]])\nfinal=id_list\nfinal.index=range(len(final))\nfinal.columns= ['DeviceID']\nfinal_pred = pd.concat([final_1,final_2],1)\nfinal=pd.concat([final,final_pred],1)\nfinal.columns = ['DeviceID', '1-0', '1-1', '1-2', '1-3', '1-4', '1-5', '1-6', \n         '1-7','1-8', '1-9', '1-10', '2-0', '2-1', '2-2', '2-3', '2-4', \n         '2-5', '2-6', '2-7', '2-8', '2-9', '2-10']\n\nfinal.to_csv('feature/lgb_feat_chizhu.csv', index=False)\n\n\n# In[60]:\n\n\ntest['DeviceID']=test['device_id']\nsub=pd.merge(test[['DeviceID']],final,on=\"DeviceID\",how=\"left\")\nsub.to_csv(\"submit/lgb_chizhu.csv\",index=False)\n\n\n# In[61]:\n\n\n# sub.sum(1)\n\n"
  },
  {
    "path": "nb_cz_lwl_wcm/11_cnn.py",
    "content": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom datetime import datetime, timedelta\nimport matplotlib.pyplot as plt\nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\n# get_ipython().run_line_magic('matplotlib', 'inline')\n\n#add\n# from category_encoders import OrdinalEncoder\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom scipy.sparse import hstack\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import cross_val_score\nfrom gensim.models import FastText, Word2Vec\nimport re\nfrom keras.layers import *\nfrom keras.models import *\nfrom keras.preprocessing.text import Tokenizer, text_to_word_sequence\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.preprocessing import text, sequence\nfrom keras.callbacks import *\nfrom keras.layers.advanced_activations import LeakyReLU, PReLU\nimport keras.backend as K\nfrom keras.optimizers import *\nfrom keras.utils import to_categorical\nfrom keras.utils import multi_gpu_model\n\nimport tensorflow as tf\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\nfrom keras.backend.tensorflow_backend import set_session\nconfig = tf.ConfigProto()\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.9\nset_session(tf.Session(config=config))\nfrom config import path\n# path = \"/dev/shm/chizhu_data/data/\"\n\n\n# In[2]:\n\n\npackages = pd.read_csv(path+'deviceid_packages.tsv',\n                       sep='\\t', names=['device_id', 'apps'])\ntest = pd.read_csv(path+'deviceid_test.tsv', sep='\\t', names=['device_id'])\ntrain = pd.read_csv(path+'deviceid_train.tsv', sep='\\t',\n                    names=['device_id', 'sex', 'age'])\n\nbrand = pd.read_table(path+'deviceid_brand.tsv',\n                      names=['device_id', 'vendor', 'version'])\nbehave_train = pd.read_csv('data/train_statistic_feat.csv')\nbehave_test = pd.read_csv('data/test_statistic_feat.csv')\n\n\n# In[3]:\n\n\nbehave_train.drop(['sex', 'age', 'label', 'app'], 1, inplace=True)\nbehave_test.drop(['sex', 'age', 'label', 'app'], 1, inplace=True)\n\n\n# In[4]:\n\n\nbrand['phone_version'] = brand['vendor'] + ' ' + brand['version']\ntrain = pd.merge(brand[['device_id', 'phone_version']],\n                 train, on='device_id', how='right')\ntest = pd.merge(brand[['device_id', 'phone_version']],\n                test, on='device_id', how='right')\n\n\n# In[5]:\n\n\ntrain = pd.merge(train, behave_train, on='device_id', how='left')\ntest = pd.merge(test, behave_test, on='device_id', how='left')\n\n\n# In[6]:\n\n\npackages['app_lenghth'] = packages['apps'].apply(\n    lambda x: x.split(',')).apply(lambda x: len(x))\npackages['app_list'] = packages['apps'].apply(lambda x: x.split(','))\ntrain = pd.merge(train, packages, on='device_id', how='left')\ntest = pd.merge(test, packages, on='device_id', how='left')\n\n\n# In[7]:\n\n\nembed_size = 128\nfastmodel = Word2Vec(list(packages['app_list']), size=embed_size, window=4, min_count=3, negative=2,\n                     sg=1, sample=0.002, hs=1, workers=4)\n\nembedding_fast = pd.DataFrame([fastmodel[word]\n                               for word in (fastmodel.wv.vocab)])\nembedding_fast['app'] = list(fastmodel.wv.vocab)\nembedding_fast.columns = [\"fdim_%s\" %\n                          str(i) for i in range(embed_size)]+[\"app\"]\n\n\n# In[8]:\n\n\ntokenizer = Tokenizer(lower=False, char_level=False, split=',')\n\ntokenizer.fit_on_texts(list(packages['apps']))\n\nX_seq = tokenizer.texts_to_sequences(train['apps'])\nX_test_seq = tokenizer.texts_to_sequences(test['apps'])\n\nmaxlen = 50\nX = pad_sequences(X_seq, maxlen=maxlen, value=0)\nX_test = pad_sequences(X_test_seq, maxlen=maxlen, value=0)\nY_sex = train['sex']-1\n\n\n# In[9]:\n\n\nmax_feaures = 35001\nembedding_matrix = np.zeros((max_feaures, embed_size))\nfor word in tokenizer.word_index:\n    if word not in fastmodel.wv.vocab:\n        continue\n    embedding_matrix[tokenizer.word_index[word]] = fastmodel[word]\n\n\n# In[10]:\n\n\n# behave_train=behave_train.loc[:,\"ph_ver_0\":'week_day_6']\n# behave_test=behave_test.loc[:,\"h0\":'week_day_6']\nbehave_train = pd.merge(train[['device_id']],\n                        behave_train, on='device_id', how=\"left\")\nbehave_test = pd.merge(test[['device_id']],\n                       behave_test, on='device_id', how=\"left\")\nX_h = behave_train.iloc[:, 1:].values\nX_h_test = behave_test.iloc[:, 1:].values\n\n\n# In[11]:\n\n\nclass AdamW(Optimizer):\n    def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, weight_decay=1e-4,  # decoupled weight decay (1/4)\n                 epsilon=1e-8, decay=0., **kwargs):\n        super(AdamW, self).__init__(**kwargs)\n        with K.name_scope(self.__class__.__name__):\n            self.iterations = K.variable(0, dtype='int64', name='iterations')\n            self.lr = K.variable(lr, name='lr')\n            self.beta_1 = K.variable(beta_1, name='beta_1')\n            self.beta_2 = K.variable(beta_2, name='beta_2')\n            self.decay = K.variable(decay, name='decay')\n            # decoupled weight decay (2/4)\n            self.wd = K.variable(weight_decay, name='weight_decay')\n        self.epsilon = epsilon\n        self.initial_decay = decay\n\n    @interfaces.legacy_get_updates_support\n    def get_updates(self, loss, params):\n        grads = self.get_gradients(loss, params)\n        self.updates = [K.update_add(self.iterations, 1)]\n        wd = self.wd  # decoupled weight decay (3/4)\n\n        lr = self.lr\n        if self.initial_decay > 0:\n            lr *= (1. / (1. + self.decay * K.cast(self.iterations,\n                                                  K.dtype(self.decay))))\n\n        t = K.cast(self.iterations, K.floatx()) + 1\n        lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /\n                     (1. - K.pow(self.beta_1, t)))\n\n        ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n        vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n        self.weights = [self.iterations] + ms + vs\n\n        for p, g, m, v in zip(params, grads, ms, vs):\n            m_t = (self.beta_1 * m) + (1. - self.beta_1) * g\n            v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)\n            # decoupled weight decay (4/4)\n            p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon) - lr * wd * p\n\n            self.updates.append(K.update(m, m_t))\n            self.updates.append(K.update(v, v_t))\n            new_p = p_t\n\n            # Apply constraints.\n            if getattr(p, 'constraint', None) is not None:\n                new_p = p.constraint(new_p)\n\n            self.updates.append(K.update(p, new_p))\n        return self.updates\n\n    def get_config(self):\n        config = {'lr': float(K.get_value(self.lr)),\n                  'beta_1': float(K.get_value(self.beta_1)),\n                  'beta_2': float(K.get_value(self.beta_2)),\n                  'decay': float(K.get_value(self.decay)),\n                  'weight_decay': float(K.get_value(self.wd)),\n                  'epsilon': self.epsilon}\n        base_config = super(AdamW, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))\n\n\n# In[12]:\n\n\ndef model_conv1D(embedding_matrix):\n\n    K.clear_session()\n    # The embedding layer containing the word vectors\n    emb_layer = Embedding(\n        input_dim=embedding_matrix.shape[0],\n        output_dim=embedding_matrix.shape[1],\n        weights=[embedding_matrix],\n        input_length=maxlen,\n        trainable=False\n    )\n    lstm_layer = Bidirectional(\n        GRU(128, recurrent_dropout=0.15, dropout=0.15, return_sequences=True))\n\n    # 1D convolutions that can iterate over the word vectors\n    conv1 = Conv1D(filters=128, kernel_size=1,\n                   padding='same', activation='relu',)\n    conv2 = Conv1D(filters=64, kernel_size=2,\n                   padding='same', activation='relu', )\n    conv3 = Conv1D(filters=64, kernel_size=3,\n                   padding='same', activation='relu',)\n    conv5 = Conv1D(filters=32, kernel_size=5,\n                   padding='same', activation='relu',)\n\n    # Define inputs\n    seq = Input(shape=(maxlen,))\n\n    # Run inputs through embedding\n    emb = emb_layer(seq)\n\n    lstm = lstm_layer(emb)\n    # Run through CONV + GAP layers\n    conv1a = conv1(lstm)\n    gap1a = GlobalAveragePooling1D()(conv1a)\n    gmp1a = GlobalMaxPool1D()(conv1a)\n\n    conv2a = conv2(lstm)\n    gap2a = GlobalAveragePooling1D()(conv2a)\n    gmp2a = GlobalMaxPool1D()(conv2a)\n\n    conv3a = conv3(lstm)\n    gap3a = GlobalAveragePooling1D()(conv3a)\n    gmp3a = GlobalMaxPooling1D()(conv3a)\n\n    conv5a = conv5(lstm)\n    gap5a = GlobalAveragePooling1D()(conv5a)\n    gmp5a = GlobalMaxPooling1D()(conv5a)\n\n    hin = Input(shape=(396, ))\n    htime = Dense(64, activation='relu')(hin)\n    merge1 = concatenate([gap1a, gmp1a, htime])\n\n    # The MLP that determines the outcome\n    x = Dropout(0.3)(merge1)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.25)(x)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.25)(x)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.25)(x)\n    x = BatchNormalization()(x)\n    pred = Dense(1, activation='sigmoid')(x)\n\n    # model = Model(inputs=[seq1, seq2, magic_input, distance_input], outputs=pred)\n    model = Model(inputs=[seq, hin], outputs=pred)\n#     model=multi_gpu_model(model,2)\n    model.compile(loss='binary_crossentropy',\n                  optimizer=AdamW(weight_decay=0.08,))\n#     model.summary()\n    return model\n\n\n# In[ ]:\n\n\nkfold = StratifiedKFold(n_splits=5, random_state=20, shuffle=True)\nsub1 = np.zeros((X_test.shape[0], ))\noof_pref1 = np.zeros((X.shape[0], 1))\nscore = []\ncount = 0\nfor i, (train_index, test_index) in enumerate(kfold.split(X, Y_sex)):\n    print(\"FOLD | \", count+1)\n    filepath = \"model/sex_weights_best_%d.h5\" % count\n    checkpoint = ModelCheckpoint(\n        filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n    reduce_lr = ReduceLROnPlateau(\n        monitor='val_loss', factor=0.8, patience=2, min_lr=0.0001, verbose=1)\n    earlystopping = EarlyStopping(\n        monitor='val_loss', min_delta=0.0001, patience=6, verbose=1, mode='auto')\n    callbacks = [checkpoint, reduce_lr, earlystopping]\n\n    model_sex = model_conv1D(embedding_matrix)\n    X_tr, X_vl, X_tr2, X_vl2, y_tr, y_vl = X[train_index], X[test_index], X_h[\n        train_index], X_h[test_index], Y_sex[train_index], Y_sex[test_index]\n    hist = model_sex.fit([X_tr, X_tr2], y_tr, batch_size=128, epochs=50, validation_data=([X_vl, X_vl2], y_vl),\n                         callbacks=callbacks, verbose=1, shuffle=True)\n    model_sex.load_weights(filepath)\n    sub1 += np.squeeze(model_sex.predict([X_test, X_h_test]))/kfold.n_splits\n    oof_pref1[test_index] = model_sex.predict([X_vl, X_vl2])\n    score.append(np.min(hist.history['val_loss']))\n    count += 1\nprint('log loss:', np.mean(score))\n# pd.DataFrame(oof_pref1).to_csv('cnn_oof_sex.csv', index=False)\n\n\n# In[ ]:\n\n\noof_pref1 = pd.DataFrame(oof_pref1, columns=['sex2'])\nsub1 = pd.DataFrame(sub1, columns=['sex2'])\nres1 = pd.concat([oof_pref1, sub1])\nres1['sex1'] = 1-res1['sex2']\nres1.to_csv(\"data/res1.csv\", index=False)\n\n\n# In[ ]:\n\n\ndef model_age_conv(embedding_matrix):\n\n    # The embedding layer containing the word vectors\n    K.clear_session()\n    emb_layer = Embedding(\n        input_dim=embedding_matrix.shape[0],\n        output_dim=embedding_matrix.shape[1],\n        weights=[embedding_matrix],\n        input_length=maxlen,\n        trainable=False\n    )\n    lstm_layer = Bidirectional(\n        GRU(128, recurrent_dropout=0.15, dropout=0.15, return_sequences=True))\n\n    # 1D convolutions that can iterate over the word vectors\n    conv1 = Conv1D(filters=128, kernel_size=1,\n                   padding='same', activation='relu',)\n    conv2 = Conv1D(filters=64, kernel_size=2,\n                   padding='same', activation='relu', )\n    conv3 = Conv1D(filters=64, kernel_size=3,\n                   padding='same', activation='relu',)\n    conv5 = Conv1D(filters=32, kernel_size=5,\n                   padding='same', activation='relu',)\n\n    # Define inputs\n    seq = Input(shape=(maxlen,))\n\n    # Run inputs through embedding\n    emb = emb_layer(seq)\n\n    lstm = lstm_layer(emb)\n    # Run through CONV + GAP layers\n    conv1a = conv1(lstm)\n    gap1a = GlobalAveragePooling1D()(conv1a)\n    gmp1a = GlobalMaxPool1D()(conv1a)\n\n    conv2a = conv2(lstm)\n    gap2a = GlobalAveragePooling1D()(conv2a)\n    gmp2a = GlobalMaxPool1D()(conv2a)\n\n    conv3a = conv3(lstm)\n    gap3a = GlobalAveragePooling1D()(conv3a)\n    gmp3a = GlobalMaxPooling1D()(conv3a)\n\n    conv5a = conv5(lstm)\n    gap5a = GlobalAveragePooling1D()(conv5a)\n    gmp5a = GlobalMaxPooling1D()(conv5a)\n\n    hin = Input(shape=(397, ))\n    htime = Dense(64, activation='relu')(hin)\n    merge1 = concatenate([gap1a, gmp1a, htime])\n\n#     merge1 = concatenate([gap1a, gap2a, gap3a, gap5a])\n\n    # The MLP that determines the outcome\n    x = Dropout(0.3)(merge1)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.22)(x)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.22)(x)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.22)(x)\n    x = BatchNormalization()(x)\n    pred = Dense(11, activation='softmax')(x)\n\n    model = Model(inputs=[seq, hin], outputs=pred)\n    model.compile(loss='categorical_crossentropy',\n                  optimizer=AdamW(weight_decay=0.08,))\n#     model.summary()\n    return model\n\n\n# In[ ]:\n\n\nY_age = to_categorical(train['age'])\n\n\n# #### sex1\n\n# In[ ]:\n\n\nbehave_train['sex'] = train['sex']\nbehave_test['sex'] = 1\nX_h = behave_train.iloc[:, 1:].values\nX_h_test = behave_test.iloc[:, 1:].values\n\n\n# In[ ]:\n\n\nsub2 = np.zeros((X_test.shape[0], 11))\noof_pref2 = np.zeros((X.shape[0], 11))\nscore = []\ncount = 0\nfor i, (train_index, test_index) in enumerate(kfold.split(X, train['age'])):\n\n    print(\"FOLD | \", count+1)\n\n    filepath2 = \"model/age_weights_best_%d.h5\" % count\n    checkpoint2 = ModelCheckpoint(\n        filepath2, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n    reduce_lr2 = ReduceLROnPlateau(\n        monitor='val_loss', factor=0.8, patience=2, min_lr=0.0001, verbose=1)\n    earlystopping2 = EarlyStopping(\n        monitor='val_loss', min_delta=0.0001, patience=8, verbose=1, mode='auto')\n    callbacks2 = [checkpoint2, reduce_lr2, earlystopping2]\n\n    model_age = model_age_conv(embedding_matrix)\n    X_tr, X_vl, X_tr2, X_vl2, y_tr, y_vl = X[train_index], X[test_index], X_h[\n        train_index], X_h[test_index], Y_age[train_index], Y_age[test_index]\n    hist = model_age.fit([X_tr, X_tr2], y_tr, batch_size=128, epochs=50, validation_data=([X_vl, X_vl2], y_vl),\n                         callbacks=callbacks2, verbose=1, shuffle=True)\n\n    model_age.load_weights(filepath2)\n    oof_pref2[test_index] = model_age.predict([X_vl, X_vl2])\n    sub2 += model_age.predict([X_test, X_h_test])/kfold.n_splits\n    score.append(np.min(hist.history['val_loss']))\n    count += 1\nprint('log loss:', np.mean(score))\n# pd.DataFrame(oof_pref2).to_csv('cnn_oof_age.csv', index=False)\n\n\n# In[ ]:\n\n\nres2_1 = np.vstack((oof_pref2, sub2))\nres2_1 = pd.DataFrame(res2_1)\nres2_1.to_csv(\"submit/res2_1.csv\", index=False)\n\n\n# ### sex2\n\n# In[ ]:\n\n\nbehave_train['sex'] = train['sex']\nbehave_test['sex'] = 2\nX_h = behave_train.iloc[:, 1:].values\nX_h_test = behave_test.iloc[:, 1:].values\n\n\n# In[ ]:\n\n\nsub2 = np.zeros((X_test.shape[0], 11))\noof_pref2 = np.zeros((X.shape[0], 11))\nscore = []\n\ncount = 0\nfor i, (train_index, test_index) in enumerate(kfold.split(X, train['age'])):\n\n    print(\"FOLD | \", count+1)\n\n    filepath2 = \"model/age_weights_best_%d.h5\" % count\n    checkpoint2 = ModelCheckpoint(\n        filepath2, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n    reduce_lr2 = ReduceLROnPlateau(\n        monitor='val_loss', factor=0.8, patience=2, min_lr=0.0001, verbose=1)\n    earlystopping2 = EarlyStopping(\n        monitor='val_loss', min_delta=0.0001, patience=8, verbose=1, mode='auto')\n    callbacks2 = [checkpoint2, reduce_lr2, earlystopping2]\n\n    model_age = model_age_conv(embedding_matrix)\n    X_tr, X_vl, X_tr2, X_vl2, y_tr, y_vl = X[train_index], X[test_index], X_h[\n        train_index], X_h[test_index], Y_age[train_index], Y_age[test_index]\n    hist = model_age.fit([X_tr, X_tr2], y_tr, batch_size=128, epochs=50, validation_data=([X_vl, X_vl2], y_vl),\n                         callbacks=callbacks2, verbose=1, shuffle=True)\n\n    model_age.load_weights(filepath2)\n    oof_pref2[test_index] = model_age.predict([X_vl, X_vl2])\n    sub2 += model_age.predict([X_test, X_h_test])/kfold.n_splits\n    score.append(np.min(hist.history['val_loss']))\n    count += 1\nprint('log loss:', np.mean(score))\n# pd.DataFrame(oof_pref2).to_csv('cnn_oof_age.csv', index=False)\n\n\n# In[ ]:\n\n\nres2_2 = np.vstack((oof_pref2, sub2))\nres2_2 = pd.DataFrame(res2_2)\n\n\n# In[ ]:\n\n\nres2_2.to_csv(\"submit/res2_2.csv\", index=False)\n\n\n# In[ ]:\n\n\nres1.index = range(len(res1))\nres2_1.index = range(len(res2_1))\nres2_2.index = range(len(res2_2))\nfinal_1 = res2_1\nfinal_2 = res2_2\nfor i in range(11):\n    final_1[i] = res1['sex1']*res2_1[i]\n    final_2[i] = res1['sex2']*res2_2[i]\nid_list = pd.concat([train[['device_id']], test[['device_id']]])\nfinal = id_list\nfinal.index = range(len(final))\nfinal.columns = ['DeviceID']\nfinal_pred = pd.concat([final_1, final_2], 1)\nfinal = pd.concat([final, final_pred], 1)\nfinal.columns = ['DeviceID', '1-0', '1-1', '1-2', '1-3', '1-4', '1-5', '1-6',\n                 '1-7', '1-8', '1-9', '1-10', '2-0', '2-1', '2-2', '2-3', '2-4',\n                 '2-5', '2-6', '2-7', '2-8', '2-9', '2-10']\n\nfinal.to_csv('feature/nn_feat.csv', index=False)\n"
  },
  {
    "path": "nb_cz_lwl_wcm/12_get_feature_lwl.py",
    "content": "#!/usr/bin/env python\r\n# coding: utf-8\r\n\r\nimport pandas as pd\r\nimport seaborn as sns\r\nimport numpy as np\r\nfrom sklearn.decomposition import LatentDirichletAllocation\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import accuracy_score\r\nimport lightgbm as lgb\r\nfrom datetime import datetime,timedelta  \r\nimport matplotlib.pyplot as plt\r\nimport time\r\nfrom sklearn.feature_extraction.text import TfidfTransformer\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nimport gc\r\nfrom sklearn import preprocessing\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom scipy.sparse import hstack, vstack\r\nfrom sklearn.model_selection import StratifiedKFold\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom skopt.space import Integer, Categorical, Real, Log10\r\nfrom skopt.utils import use_named_args\r\nfrom skopt import gp_minimize\r\nfrom gensim.models import Word2Vec, FastText\r\nimport gensim \r\nimport re\r\n\r\n\r\n# 获取app开关表中使用总时间Top100的app使用时长统计特征\r\ndef get_top100_statis_feat(start_close):\r\n\tstart_close['period'] = (start_close['close'] - start_close['start'])/1000\r\n\tstart_close['start'] = pd.to_datetime(start_close['start'], unit='ms')\r\n\tapp_use_time = start_close.groupby(['app'])['period'].agg('sum').reset_index()\r\n\tapp_use_top100 = app_use_time.sort_values(by='period', ascending=False)[:100]['app']\r\n\tdevice_app_use_time = start_close.groupby(['device_id', 'app'])['period'].agg('sum').reset_index()\r\n\tuse_time_top100_statis = device_app_use_time.set_index('app').loc[list(app_use_top100)].reset_index()\r\n\ttop100_statis = use_time_top100_statis.pivot(index='device_id', columns='app', values='period').reset_index()\r\n\ttop100_statis = top100_statis.fillna(0)\r\n\ttop100_statis.columns = ['device_id'] + ['top100_statis_' + str(i) for i in range(0, 100)]\r\n\tprint('top100_statis_feat done')\r\n\treturn top100_statis\r\n\r\n# 获得手机品牌特征\r\ndef get_brand_feat(brand):\r\n\t# 手机品牌预处理\r\n\tbrand['vendor'] = brand['vendor'].astype(str).apply(lambda x : x.split(' ')[0].upper())\r\n\tbrand['ph_ver'] = brand['vendor'] + '_' + brand['version']\r\n\tph_ver = brand['ph_ver'].value_counts()\r\n\tph_ver_cnt = pd.DataFrame(ph_ver).reset_index()\r\n\tph_ver_cnt.columns = ['ph_ver', 'ph_ver_cnt']\r\n\tbrand = pd.merge(left=brand, right=ph_ver_cnt,on='ph_ver')\r\n\t# 针对长尾分布做的一点处理\r\n\tmask = (brand.ph_ver_cnt < 100)\r\n\tbrand.loc[mask, 'ph_ver'] = 'other'\r\n\tph_ver_le = preprocessing.LabelEncoder()\r\n\tbrand['ph_ver'] = ph_ver_le.fit_transform(brand['ph_ver'].astype(str))\r\n\tprint('brand_feat done')\r\n\treturn brand[['device_id', 'ph_ver']]\r\n\r\n# 获取app开关表的tfidf特征，不是df格式\r\ndef get_start_close_tfidf_feat(data_all, start_close):\r\n\t# 每个app的总使用次数统计\r\n\tapp_num = start_close['app'].value_counts().reset_index()\r\n\tapp_num.columns = ['app', 'app_num']\r\n\tstart_close = pd.merge(left=start_close, right=app_num, on='app')\r\n\t# 同样的，针对长尾分布做些处理（尝试过不做处理，或换其他阈值，这个100的阈值最高）\r\n\tstart_close.loc[start_close.app_num < 100, 'app'] = 'other'\r\n\tdf_app = start_close[['device_id', 'app']]\r\n\tapps = df_app.drop_duplicates().groupby(['device_id'])['app'].apply(' '.join).reset_index()\r\n\tapps['app_length'] = apps['app'].apply(lambda x:len(x.split(' ')))\r\n\tdata_all = pd.merge(data_all, apps, on='device_id', how='left')\r\n\t# 获取每台设备所安装的apps的tfidf\r\n\ttfidf = CountVectorizer()\r\n\tapps['app'] = tfidf.fit_transform(apps['app'])\r\n\t# 转换\r\n\tstart_close_tfidf = tfidf.transform(list(data_all['app']))\r\n\tprint('start_close_tfidf_feat done')\r\n\treturn start_close_tfidf\r\n\r\n# 利用word2vec得到每台设备所安装app的embedding表示\r\ndef get_packages_w2c_feat(packages):\r\n\tpackages['apps'] = packages['apps'].apply(lambda x:x.split(','))\r\n\tpackages['app_length'] = packages['apps'].apply(lambda x:len(x))\r\n\tembed_size = 128\r\n\tfastmodel = Word2Vec(list(packages['apps']), size=embed_size, window=4, min_count=3, negative=2,\r\n\t                 sg=1, sample=0.002, hs=1, workers=4)  \r\n\r\n\tembedding_fast = pd.DataFrame([fastmodel[word] for word in (fastmodel.wv.vocab)])\r\n\tembedding_fast['app'] = list(fastmodel.wv.vocab)\r\n\tembedding_fast.columns= [\"fdim_%s\" % str(i) for i in range(embed_size)]+[\"app\"]\r\n\r\n\tid_list = []\r\n\tfor i in range(packages.shape[0]):\r\n\t    id_list += [list(packages['device_id'])[i]]*packages['app_length'].iloc[i]\r\n\tapp_list = [word for item in packages['apps'] for word in item]\r\n\tapp_vect = pd.DataFrame({'device_id':id_list})        \r\n\tapp_vect['app'] = app_list\r\n\r\n\tapp_vect = app_vect.merge(embedding_fast, on='app', how='left')\r\n\tapp_vect = app_vect.drop('app', axis=1)\r\n\r\n\tseqfeature = app_vect.groupby(['device_id']).agg('mean')\r\n\tseqfeature.reset_index(inplace=True)\r\n\tprint('packages_w2c_feat done')\r\n\treturn seqfeature\r\n\r\n# 用户一周七天玩手机的时长情况的统计特征\r\ndef get_week_statis_feat(start_close):\r\n\tstart_close['dayofweek'] = start_close['start'].dt.dayofweek\r\n\tstart_close['hour'] = start_close['start'].dt.hour\r\n\tapp_use_time = start_close.groupby(['device_id', 'dayofweek'])['period'].agg('sum').reset_index()\r\n\tweek_app_use = app_use_time.pivot_table(values='period', columns='dayofweek', index='device_id').reset_index()\r\n\tweek_app_use = week_app_use.fillna(0)\r\n\tweek_app_use.columns = ['device_id'] + ['week_day_' + str(i) for i in range(0, 7)]\r\n\r\n\tweek_app_use['week_max'] = week_app_use.max(axis=1)\r\n\tweek_app_use['week_min'] = week_app_use.min(axis=1)\r\n\tweek_app_use['week_sum'] = week_app_use.sum(axis=1)\r\n\tweek_app_use['week_std'] = week_app_use.std(axis=1)\r\n\tprint('week_statis_feat done')\r\n\treturn week_app_use\r\n\r\n\r\ndef get_user_behaviour_feat(start_close):\r\n\t# start_close['peroid'] = (start_close['close'] - start_close['start'])/1000\r\n\t# start_close['start'] = pd.to_datetime(start_close['start'], unit='ms')\r\n\t#start_close['closetime'] = pd.to_datetime(start_close['close'], unit='ms')\r\n\t# del start_close['close']\r\n\t# gc.collect();\r\n\tstart_close['hour'] = start_close['start'].dt.hour\r\n\tstart_close['date'] = start_close['start'].dt.date\r\n\tstart_close['dayofweek'] = start_close['start'].dt.dayofweek\r\n\t#平均每天使用设备时间\r\n\tdtime = start_close.groupby(['device_id', 'date'])['period'].agg('sum')\r\n\t#不同时间段占比\r\n\tqtime = start_close.groupby(['device_id', 'hour'])['period'].agg('sum')\r\n\twtime = start_close.groupby(['device_id', 'dayofweek'])['period'].agg('sum')\r\n\tatime = start_close.groupby(['device_id', 'app'])['period'].agg('sum')\r\n\tdapp = start_close[['device_id', 'date', 'app']].drop_duplicates().groupby(['device_id', 'date'])['app'].agg(' '.join)\r\n\tdapp = dapp.reset_index()\r\n\tdapp['app_len'] = dapp['app'].apply(lambda x:x.split(' ')).apply(len)\r\n\tdapp_stat = dapp.groupby('device_id')['app_len'].agg({'std':'std', 'mean':'mean', 'max':'max'})\r\n\tdapp_stat = dapp_stat.reset_index()\r\n\tdapp_stat.columns = ['device_id', 'app_len_std', 'app_len_mean', 'app_len_max']\r\n\tdtime = dtime.reset_index()\r\n\tdtime_stat = dtime.groupby(['device_id'])['period'].agg({'sum':'sum', 'mean':'mean', 'std':'std', 'max':'max'}).reset_index()\r\n\tdtime_stat.columns = ['device_id', 'date_sum', 'date_mean', 'date_std', 'date_max']\r\n\tqtime = qtime.reset_index()\r\n\tftime = qtime.pivot(index='device_id', columns='hour', values='period').fillna(0)\r\n\tftime.columns = ['h%s'%i for i in range(24)]\r\n\tftime.reset_index(inplace=True)\r\n\twtime = wtime.reset_index()\r\n\tweektime = wtime.pivot(index='device_id', columns='dayofweek', values='period').fillna(0)\r\n\tweektime.columns = ['w0', 'w1', 'w2', 'w3', 'w4', 'w5', 'w6']\r\n\tweektime.reset_index(inplace=True)\r\n\tatime = atime.reset_index()\r\n\tapp = atime.groupby(['device_id'])['period'].idxmax()\r\n\tuser = pd.merge(dapp_stat, dtime_stat, on='device_id', how='left')\r\n\tuser = pd.merge(user, ftime, on='device_id', how='left')\r\n\tuser = pd.merge(user, weektime, on='device_id', how='left')\r\n\tuser = pd.merge(user, atime.iloc[app], on='device_id', how='left')\r\n\tapp_cat = pd.read_table('Demo/package_label.tsv', names=['app', 'category', 'app_name'])\r\n\r\n\tcat_enc = pd.DataFrame(app_cat['category'].value_counts())\r\n\tcat_enc['idx'] = range(45)\r\n\r\n\tapp_cat['cat_enc'] = app_cat['category'].map(cat_enc['idx'])\r\n\tapp_cat.set_index(['app'], inplace=True)\r\n\tatime['app_cat_enc'] = atime['app'].map(app_cat['cat_enc']).fillna(45)\r\n\r\n\tcat_num = atime.groupby(['device_id', 'app_cat_enc'])['app'].agg('count').reset_index()\r\n\tcat_time = atime.groupby(['device_id', 'app_cat_enc'])['period'].agg('sum').reset_index()\r\n\tapp_cat_num = cat_num.pivot(index='device_id', columns='app_cat_enc', values='app').fillna(0)\r\n\tapp_cat_num.columns = ['cat%s'%i for i in range(46)]\r\n\tapp_cat_time = cat_time.pivot(index='device_id', columns='app_cat_enc', values='period').fillna(0)\r\n\tapp_cat_time.columns = ['time%s'%i for i in range(46)]\r\n\tuser = pd.merge(user, app_cat_num, on='device_id', how='left')\r\n\tuser = pd.merge(user, app_cat_time, on='device_id', how='left')\r\n\tdel user['app']\r\n\tprint('user_behaviour_feat done')\r\n\treturn user\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\ttest = pd.read_csv('Demo/deviceid_test.tsv', sep='\\t', names=['device_id'])\r\n\ttrain = pd.read_csv('Demo/deviceid_train.tsv', sep='\\t', names=['device_id', 'sex', 'age'])\r\n\tbrand = pd.read_table('Demo/deviceid_brand.tsv', names=['device_id', 'vendor', 'version'])\r\n\tstart_close = pd.read_table('Demo/deviceid_package_start_close.tsv', \r\n\t                         names=['device_id', 'app', 'start', 'close'])\r\n\tpackages = pd.read_csv('Demo/deviceid_packages.tsv', sep='\\t', names=['device_id', 'apps'])\r\n\tdata_all = pd.concat([train, test], axis=0, ignore_index=True)\r\n\tprint('data done')\r\n\r\n\ttop100_statis_feat = get_top100_statis_feat(start_close)\r\n\tbrand_feat = get_brand_feat(brand)\r\n\t# start_close_tfidf_feat = get_start_close_tfidf_feat(data_all, start_close)\r\n\tpackages_w2c_feat = get_packages_w2c_feat(packages)\r\n\tweek_statis_feat = get_week_statis_feat(start_close)\r\n\tuser_behaviour_feat = get_user_behaviour_feat(start_close)\r\n\tprint('feats done')\r\n\r\n\tdata_all = pd.merge(data_all, top100_statis_feat, on='device_id', how='left')\r\n\tdata_all = pd.merge(data_all, brand_feat, on='device_id', how='left')\r\n\tdata_all = pd.merge(data_all, packages_w2c_feat, on='device_id', how='left')\r\n\tdata_all = pd.merge(data_all, week_statis_feat, on='device_id', how='left')\r\n\tdata_all = pd.merge(data_all, user_behaviour_feat, on='device_id', how='left')\r\n\tprint('merge done')\r\n\t# 删掉标签\r\n\tdel data_all['age'], data_all['sex']\r\n\tdata_all.to_csv('feature/feat_lwl.csv', index=None)\r\n\r\n\r\n\r\n"
  },
  {
    "path": "nb_cz_lwl_wcm/13_last_get_all_feature.py",
    "content": "# -*- coding:utf-8 -*-\n\nimport pandas as pd\n\ndf_brand = pd.read_csv('feature/deviceid_brand_feature.csv')\ndf_lr = pd.read_csv('feature/tfidf_lr_error_single_classfiy.csv')\ndf_pac = pd.read_csv('feature/tfidf_pac_error_single_classfiy.csv')\ndf_sgd = pd.read_csv('feature/tfidf_sgd_error_single_classfiy.csv')\ndf_ridge = pd.read_csv('feature/tfidf_ridge_error_single_classfiy.csv')\ndf_bnb = pd.read_csv('feature/tfidf_bnb_error_single_classfiy.csv')\ndf_mnb = pd.read_csv('feature/tfidf_mnb_error_single_classfiy.csv')\ndf_lsvc = pd.read_csv('feature/tfidf_lsvc_error_single_classfiy.csv')\ndf_lr_2 = pd.read_csv('feature/tfidf_lr_1_3_error_single_classfiy.csv')\ndf_pac_2 = pd.read_csv('feature/tfidf_pac_1_3_error_single_classfiy.csv')\ndf_sgd_2 = pd.read_csv('feature/tfidf_sgd_1_3_error_single_classfiy.csv')\ndf_ridge_2 = pd.read_csv('feature/tfidf_ridge_1_3_error_single_classfiy.csv')\ndf_bnb_2 = pd.read_csv('feature/tfidf_bnb_1_3_error_single_classfiy.csv')\ndf_mnb_2 = pd.read_csv('feature/tfidf_mnb_1_3_error_single_classfiy.csv')\ndf_lsvc_2 = pd.read_csv('feature/tfidf_lsvc_2_error_single_classfiy.csv')\ndf_kmeans_2 = pd.read_csv('feature/cluster_2_tfidf_feature.csv')\ndf_start_close = pd.read_csv('feature/feature_start_close.csv')\ndf_ling_reg = pd.read_csv('feature/tfidf_ling_reg.csv')\ndf_par_reg = pd.read_csv('feature/tfidf_par_reg.csv')\ndf_svr_reg = pd.read_csv('feature/tfidf_svr_reg.csv')\ndf_w2v = pd.read_csv('feature/w2v_avg.csv')\ndel df_w2v['DeviceID']\ndf_best_nn = pd.read_csv('feature/yg_best_nn.csv')\ndel df_best_nn['DeviceID']\ndf_chizhu_lgb = pd.read_csv('feature/lgb_feat_chizhu.csv')\ndel df_chizhu_lgb['DeviceID']\ndf_chizhu_nn = pd.read_csv('feature/nn_feat.csv')\ndel df_chizhu_nn['DeviceID']\ndf_lwl_lgb = pd.read_csv('feature/feat_lwl.csv')\ndel df_lwl_lgb['DeviceID']\ndf_feature = pd.concat([\n                        df_brand,\n                        df_lr, df_pac, df_sgd,\n                        df_ridge, df_bnb, df_mnb, df_lsvc,\n                        df_start_close, df_ling_reg, df_par_reg,df_svr_reg,\n                        df_lr_2, df_pac_2, df_sgd_2, df_ridge_2, df_bnb_2, df_mnb_2,\n                        df_lsvc_2, df_kmeans_2, df_w2v, df_best_nn, df_chizhu_lgb, df_chizhu_nn\n                        df_lwl_lgb\n                        ], axis=1)\n\ndf_feature.to_csv('feature/feature_one.csv', encoding='utf8', index=None)\n\n"
  },
  {
    "path": "nb_cz_lwl_wcm/1_get_age_reg.py",
    "content": "# -*- coding:utf-8 -*-\n\n\n#######  尝试骚操作，单独针对这个表\nimport pandas as pd\nfrom sklearn.cluster import KMeans\nfrom sklearn.linear_model import LogisticRegression, SGDClassifier, PassiveAggressiveClassifier, RidgeClassifier, Ridge, \\\n    PassiveAggressiveRegressor\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import KFold\nfrom sklearn.naive_bayes import BernoulliNB, MultinomialNB\nfrom sklearn.svm import LinearSVC, LinearSVR\n\ntrain = pd.read_csv('Demo/deviceid_train.tsv', sep='\\t', header=None)\ntest = pd.read_csv('Demo/deviceid_test.tsv', sep='\\t', header=None)\ntest_id = test[0]\ndef get_label(row):\n    return row[2]\ntrain['label'] = train.apply(lambda row:get_label(row), axis=1)\ndata_all = pd.concat([train, test], axis=0)\ndata_all = data_all.rename({0:'id'}, axis=1)\ndel data_all[1],data_all[2]\n\ndeviceid_packages = pd.read_csv('Demo/deviceid_packages.tsv', sep='\\t', header=None)\ndeviceid_packages = deviceid_packages.rename({0: 'id', 1: 'packages_names'}, axis=1)\npackage_label = pd.read_csv('Demo/package_label.tsv', sep='\\t', header=None)\npackage_label = package_label.rename({0:'packages_name', 1:'packages_type'},axis=1)\ndict_label = dict(zip(list(package_label['packages_name']), list(package_label['packages_type'])))\n\ndata_all = pd.merge(data_all, deviceid_packages, on='id', how='left')\n\nfeature = pd.DataFrame()\n\nimport numpy as np\n\n# app个数\n# 毒特征？\n# feature['app_count'] = data_all['packages_names'].apply(lambda row: len(str(row).split(',')))\n\n# 对此数据做countvector,和tfidfvector,并在一起跑几个学习模型\n# 引申出来的count和tfidf，跑基本机器学习分类模型\ndata_all['package_str'] = data_all['packages_names'].apply(lambda row: str(row).replace(',', ' '))\ndef get_more_information(row):\n    result = ' '\n    start = True\n    row_list = row.split(',')\n    for i in row_list:\n        try:\n            if start:\n                result = dict_label[i]\n                start = False\n            else:\n                result = result + ' ' + dict_label[i]\n        except KeyError:\n            pass\n    return result\ndata_all['package_str_more_information'] = data_all['packages_names'].apply(lambda row: get_more_information(str(row)))\n\nprint(data_all)\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport scipy.sparse\n\ncount_vec = CountVectorizer()\ncount_csr_basic = count_vec.fit_transform(data_all['package_str'])\ntfidf_vec = TfidfVectorizer()\ntfidf_vec_basic = tfidf_vec.fit_transform(data_all['package_str'])\n\ncount_vec = CountVectorizer()\ncount_csr_more = count_vec.fit_transform(data_all['package_str_more_information'])\n\ntfidf_vec = TfidfVectorizer()\ntfidf_vec_more = tfidf_vec.fit_transform(data_all['package_str_more_information'])\n\ndata_feature = scipy.sparse.csr_matrix(scipy.sparse.hstack([count_csr_basic, tfidf_vec_basic,\n                     count_csr_more, tfidf_vec_more]))\n\ntrain_feature = data_feature[:len(train)]\nscore = train['label']\ntest_feature = data_feature[len(train):]\nnumber = len(np.unique(score))\n\nX = train_feature\ntest = test_feature\ny = score\n\nn_flods = 5\nkf = KFold(n_splits=n_flods,shuffle=True,random_state=1017)\nkf = kf.split(X)\n\ndef xx_mse_s(y_true,y_pre):\n    y_true = y_true\n    y_pre = pd.DataFrame({'res': list(y_pre)})\n    return mean_squared_error(y_true,y_pre['res'].values)\n\n######################## ridge reg #########################3\ncv_pred = []\nxx_mse = []\nstack = np.zeros((len(y),1))\nstack_te = np.zeros((len(test_id),1))\nmodel_1 = Ridge(solver='auto', fit_intercept=True, alpha=0.4, max_iter=250, normalize=False, tol=0.01,random_state=1017)\nfor i ,(train_fold,test_fold) in enumerate(kf):\n    X_train, X_validate, label_train, label_validate = X[train_fold, :], X[test_fold, :], y[train_fold], y[test_fold]\n    model_1.fit(X_train, label_train)\n    val_ = model_1.predict(X=X_validate)\n    stack[test_fold] = np.array(val_).reshape(len(val_),1)\n    print(xx_mse_s(label_validate, val_))\n    cv_pred.append(model_1.predict(test))\n    xx_mse.append(xx_mse_s(label_validate, val_))\nimport numpy as np\nprint('xx_result',np.mean(xx_mse))\ns = 0\nfor i in cv_pred:\n    s = s+i\ns = s/n_flods\nprint(stack)\nprint(s)\ndf_stack1 = pd.DataFrame(stack)\ndf_stack2 = pd.DataFrame(s)\ndf_stack = pd.concat([df_stack1,df_stack2\n                ], axis=0)\ndf_stack.to_csv('feature/tfidf_ling_reg.csv', encoding='utf8', index=None)\n\n######################## par reg #########################\nkf = KFold(n_splits=n_flods,shuffle=True,random_state=1017)\nkf = kf.split(X)\ncv_pred = []\nxx_mse = []\nstack = np.zeros((len(y),1))\nmodel_1 = PassiveAggressiveRegressor(fit_intercept=True, max_iter=280, tol=0.01,random_state=1017)\nfor i ,(train_fold,test_fold) in enumerate(kf):\n    X_train, X_validate, label_train, label_validate = X[train_fold, :], X[test_fold, :], y[train_fold], y[test_fold]\n    model_1.fit(X_train, label_train)\n    val_ = model_1.predict(X=X_validate)\n    stack[test_fold] = np.array(val_).reshape(len(val_),1)\n    print(xx_mse_s(label_validate, val_))\n    cv_pred.append(model_1.predict(test))\n    xx_mse.append(xx_mse_s(label_validate, val_))\nimport numpy as np\nprint('xx_result',np.mean(xx_mse))\ns = 0\nfor i in cv_pred:\n    s = s+i\ns = s/n_flods\nprint(stack)\nprint(s)\ndf_stack1 = pd.DataFrame(stack)\ndf_stack2 = pd.DataFrame(s)\ndf_stack = pd.concat([df_stack1,df_stack2\n                ], axis=0)\ndf_stack.to_csv('feature/tfidf_par_reg.csv', encoding='utf8', index=None)\n\n######################## svr reg #########################\nkf = KFold(n_splits=n_flods,shuffle=True,random_state=1017)\nkf = kf.split(X)\ncv_pred = []\nxx_mse = []\nstack = np.zeros((len(y),1))\nmodel_1 = LinearSVR(random_state=1017)\nfor i ,(train_fold,test_fold) in enumerate(kf):\n    X_train, X_validate, label_train, label_validate = X[train_fold, :], X[test_fold, :], y[train_fold], y[test_fold]\n    model_1.fit(X_train, label_train)\n    val_ = model_1.predict(X=X_validate)\n    stack[test_fold] = np.array(val_).reshape(len(val_),1)\n    print(xx_mse_s(label_validate, val_))\n    cv_pred.append(model_1.predict(test))\n    xx_mse.append(xx_mse_s(label_validate, val_))\nimport numpy as np\nprint('xx_result',np.mean(xx_mse))\ns = 0\nfor i in cv_pred:\n    s = s+i\ns = s/n_flods\nprint(stack)\nprint(s)\ndf_stack1 = pd.DataFrame(stack)\ndf_stack2 = pd.DataFrame(s)\ndf_stack = pd.concat([df_stack1,df_stack2\n                ], axis=0)\ndf_stack.to_csv('feature/tfidf_svr_reg.csv', encoding='utf8', index=None)\n\n"
  },
  {
    "path": "nb_cz_lwl_wcm/2_get_feature_brand.py",
    "content": "# -*- coding:utf-8 -*-\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn import preprocessing\n\ntrain = pd.read_csv('Demo/deviceid_train.tsv', sep='\\t', header=None)\ntest = pd.read_csv('Demo/deviceid_test.tsv', sep='\\t', header=None)\n\ndata_all = pd.concat([train, test], axis=0)\ndata_all = data_all.rename({0:'id'}, axis=1)\ndel data_all[1],data_all[2]\ndeviced_brand = pd.read_csv('Demo/deviceid_brand.tsv', sep='\\t', header=None)\ndeviced_brand = deviced_brand.rename({0: 'id'}, axis=1)\ndata_all = pd.merge(data_all, deviced_brand, on='id', how='left')\nprint(data_all)\n# 直接做类别编码特征\n\nfeature = pd.DataFrame()\nlabel_encoder = preprocessing.LabelEncoder()\nfeature['phone_type'] = label_encoder.fit_transform(data_all[1])\nfeature['phone_type_detail'] = label_encoder.fit_transform(data_all[2])\nfeature.to_csv('feature/deviceid_brand_feature.csv', index=False)"
  },
  {
    "path": "nb_cz_lwl_wcm/3_get_feature_device_package.py",
    "content": "# -*- coding:utf-8 -*-\n\n\n#######  尝试骚操作，单独针对这个表\nimport pandas as pd\nfrom sklearn.cluster import KMeans\nfrom sklearn.linear_model import LogisticRegression, SGDClassifier, PassiveAggressiveClassifier, RidgeClassifier\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.naive_bayes import BernoulliNB, MultinomialNB\nfrom sklearn.svm import LinearSVC\n\ntrain = pd.read_csv('Demo/deviceid_train.tsv', sep='\\t', header=None)\ntest = pd.read_csv('Demo/deviceid_test.tsv', sep='\\t', header=None)\ndef get_label(row):\n    if row[1] == 1:\n        return row[2]\n    else:\n        return row[2] + 11\ntrain['label'] = train.apply(lambda row:get_label(row), axis=1)\ndata_all = pd.concat([train, test], axis=0)\ndata_all = data_all.rename({0:'id'}, axis=1)\ndel data_all[1],data_all[2]\n\ndeviceid_packages = pd.read_csv('Demo/deviceid_packages.tsv', sep='\\t', header=None)\ndeviceid_packages = deviceid_packages.rename({0: 'id', 1: 'packages_names'}, axis=1)\npackage_label = pd.read_csv('Demo/package_label.tsv', sep='\\t', header=None)\npackage_label = package_label.rename({0:'packages_name', 1:'packages_type'},axis=1)\n# package_label['packages_type'] = package_label.apply(lambda row:row['packages_type'] + ' ' + row[2], axis=1)\ndict_label = dict(zip(list(package_label['packages_name']), list(package_label['packages_type'])))\n\ndata_all = pd.merge(data_all, deviceid_packages, on='id', how='left')\n\nfeature = pd.DataFrame()\n\nimport numpy as np\n\n# app个数\n# 毒特征？\n# feature['app_count'] = data_all['packages_names'].apply(lambda row: len(str(row).split(',')))\n\n# 对此数据做countvector,和tfidfvector,并在一起跑几个学习模型\n# 引申出来的count和tfidf，跑基本机器学习分类模型\ndata_all['package_str'] = data_all['packages_names'].apply(lambda row: str(row).replace(',', ' '))\ndef get_more_information(row):\n    result = ' '\n    start = True\n    row_list = row.split(',')\n    for i in row_list:\n        try:\n            if start:\n                result = dict_label[i]\n                start = False\n            else:\n                result = result + ' ' + dict_label[i]\n        except KeyError:\n            pass\n    return result\ndata_all['package_str_more_information'] = data_all['packages_names'].apply(lambda row: get_more_information(str(row)))\n\nprint(data_all)\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport scipy.sparse\nfrom sklearn.cross_validation import StratifiedKFold\n\n\ncount_vec = CountVectorizer()\ncount_csr_basic = count_vec.fit_transform(data_all['package_str'])\ntfidf_vec = TfidfVectorizer()\ntfidf_vec_basic = tfidf_vec.fit_transform(data_all['package_str'])\n\ncount_vec = CountVectorizer()\ncount_csr_more = count_vec.fit_transform(data_all['package_str_more_information'])\n\ntfidf_vec = TfidfVectorizer()\ntfidf_vec_more = tfidf_vec.fit_transform(data_all['package_str_more_information'])\n\ndata_feature = scipy.sparse.csr_matrix(scipy.sparse.hstack([count_csr_basic, tfidf_vec_basic,\n                     count_csr_more, tfidf_vec_more]))\n\ntrain_feature = data_feature[:len(train)]\nscore = train['label']\ntest_feature = data_feature[len(train):]\nnumber = len(np.unique(score))\n\n# 五则交叉验证\nn_folds = 5\nprint('处理完毕')\n\n########################### lr(LogisticRegression) ################################\nprint('lr stacking')\nstack_train = np.zeros((len(train), number))\nstack_test = np.zeros((len(test), number))\nscore_va = 0\n\nfor i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n    print('stack:%d/%d' % ((i + 1), n_folds))\n    clf = LogisticRegression(random_state=1017, C=8)\n    clf.fit(train_feature[tr], score[tr])\n    score_va = clf.predict_proba(train_feature[va])\n    score_te = clf.predict_proba(test_feature)\n    print('得分' + str(mean_squared_error(score[va], clf.predict(train_feature[va]))))\n    stack_train[va] += score_va\n    stack_test += score_te\nstack_test /= n_folds\nstack = np.vstack([stack_train, stack_test])\ndf_stack = pd.DataFrame()\nfor i in range(stack.shape[1]):\n    df_stack['tfidf_lr_classfiy_{}'.format(i)] = np.around(stack[:, i], 6)\ndf_stack.to_csv('feature/tfidf_lr_error_single_classfiy.csv', index=None, encoding='utf8')\nprint('lr特征已保存\\n')\n\n########################### SGD(随机梯度下降) ################################\nprint('sgd stacking')\nstack_train = np.zeros((len(train), number))\nstack_test = np.zeros((len(test), number))\nscore_va = 0\n\nfor i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n    print('stack:%d/%d' % ((i + 1), n_folds))\n    sgd = SGDClassifier(random_state=1017, loss='log')\n    sgd.fit(train_feature[tr], score[tr])\n    score_va = sgd.predict_proba(train_feature[va])\n    score_te = sgd.predict_proba(test_feature)\n    print('得分' + str(mean_squared_error(score[va], sgd.predict(train_feature[va]))))\n    stack_train[va] += score_va\n    stack_test += score_te\nstack_test /= n_folds\nstack = np.vstack([stack_train, stack_test])\ndf_stack = pd.DataFrame()\nfor i in range(stack.shape[1]):\n    df_stack['tfidf_sgd_classfiy_{}'.format(i)] = np.around(stack[:, i], 6)\ndf_stack.to_csv('feature/tfidf_sgd_error_single_classfiy.csv', index=None, encoding='utf8')\nprint('sgd特征已保存\\n')\n\n########################### pac(PassiveAggressiveClassifier) ################################\nprint('PAC stacking')\nstack_train = np.zeros((len(train), number))\nstack_test = np.zeros((len(test), number))\nscore_va = 0\n\nfor i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n    print('stack:%d/%d' % ((i + 1), n_folds))\n    pac = PassiveAggressiveClassifier(random_state=1017)\n    pac.fit(train_feature[tr], score[tr])\n    score_va = pac._predict_proba_lr(train_feature[va])\n    score_te = pac._predict_proba_lr(test_feature)\n    print(score_va)\n    print('得分' + str(mean_squared_error(score[va], pac.predict(train_feature[va]))))\n    stack_train[va] += score_va\n    stack_test += score_te\nstack_test /= n_folds\nstack = np.vstack([stack_train, stack_test])\ndf_stack = pd.DataFrame()\nfor i in range(stack.shape[1]):\n    df_stack['tfidf_pac_classfiy_{}'.format(i)] = np.around(stack[:, i], 6)\ndf_stack.to_csv('feature/tfidf_pac_error_single_classfiy.csv', index=None, encoding='utf8')\nprint('pac特征已保存\\n')\n\n\n########################### ridge(RidgeClassfiy) ################################\nprint('RidgeClassfiy stacking')\nstack_train = np.zeros((len(train), number))\nstack_test = np.zeros((len(test), number))\nscore_va = 0\n\nfor i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n    print('stack:%d/%d' % ((i + 1), n_folds))\n    ridge = RidgeClassifier(random_state=1017)\n    ridge.fit(train_feature[tr], score[tr])\n    score_va = ridge._predict_proba_lr(train_feature[va])\n    score_te = ridge._predict_proba_lr(test_feature)\n    print(score_va)\n    print('得分' + str(mean_squared_error(score[va], ridge.predict(train_feature[va]))))\n    stack_train[va] += score_va\n    stack_test +=                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                      \\\n        score_te\nstack_test /= n_folds\nstack = np.vstack([stack_train, stack_test])\ndf_stack = pd.DataFrame()\nfor i in range(stack.shape[1]):\n    df_stack['tfidf_ridge_classfiy_{}'.format(i)] = np.around(stack[:, i], 6)\ndf_stack.to_csv('feature/tfidf_ridge_error_single_classfiy.csv', index=None, encoding='utf8')\nprint('ridge特征已保存\\n')\n\n\n########################### bnb(BernoulliNB) ################################\nprint('BernoulliNB stacking')\nstack_train = np.zeros((len(train), number))\nstack_test = np.zeros((len(test), number))\nscore_va = 0\n\nfor i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n    print('stack:%d/%d' % ((i + 1), n_folds))\n    bnb = BernoulliNB()\n    bnb.fit(train_feature[tr], score[tr])\n    score_va = bnb.predict_proba(train_feature[va])\n    score_te = bnb.predict_proba(test_feature)\n    print(score_va)\n    print('得分' + str(mean_squared_error(score[va], bnb.predict(train_feature[va]))))\n    stack_train[va] += score_va\n    stack_test += score_te\nstack_test /= n_folds\nstack = np.vstack([stack_train, stack_test])\ndf_stack = pd.DataFrame()\nfor i in range(stack.shape[1]):\n    df_stack['tfidf_bnb_classfiy_{}'.format(i)] = np.around(stack[:, i], 6)\ndf_stack.to_csv('feature/tfidf_bnb_error_single_classfiy.csv', index=None, encoding='utf8')\nprint('BernoulliNB特征已保存\\n')\n\n########################### mnb(MultinomialNB) ################################\nprint('MultinomialNB stacking')\nstack_train = np.zeros((len(train), number))\nstack_test = np.zeros((len(test), number))\nscore_va = 0\n\nfor i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n    print('stack:%d/%d' % ((i + 1), n_folds))\n    mnb = MultinomialNB()\n    mnb.fit(train_feature[tr], score[tr])\n    score_va = mnb.predict_proba(train_feature[va])\n    score_te = mnb.predict_proba(test_feature)\n    print(score_va)\n    print('得分' + str(mean_squared_error(score[va], mnb.predict(train_feature[va]))))\n    stack_train[va] += score_va\n    stack_test += score_te\nstack_test /= n_folds\nstack = np.vstack([stack_train, stack_test])\ndf_stack = pd.DataFrame()\nfor i in range(stack.shape[1]):\n    df_stack['tfidf_mnb_classfiy_{}'.format(i)] = np.around(stack[:, i], 6)\ndf_stack.to_csv('feature/tfidf_mnb_error_single_classfiy.csv', index=None, encoding='utf8')\nprint('MultinomialNB特征已保存\\n')\n\n############################ Linersvc(LinerSVC) ################################\nprint('LinerSVC stacking')\nstack_train = np.zeros((len(train), number))\nstack_test = np.zeros((len(test), number))\nscore_va = 0\n\nfor i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n    print('stack:%d/%d' % ((i + 1), n_folds))\n    lsvc = LinearSVC(random_state=1017)\n    lsvc.fit(train_feature[tr], score[tr])\n    score_va = lsvc._predict_proba_lr(train_feature[va])\n    score_te = lsvc._predict_proba_lr(test_feature)\n    print(score_va)\n    print('得分' + str(mean_squared_error(score[va], lsvc.predict(train_feature[va]))))\n    stack_train[va] += score_va\n    stack_test += score_te\nstack_test /= n_folds\nstack = np.vstack([stack_train, stack_test])\ndf_stack = pd.DataFrame()\nfor i in range(stack.shape[1]):\n    df_stack['tfidf_lsvc_classfiy_{}'.format(i)] = np.around(stack[:, i], 6)\ndf_stack.to_csv('feature/tfidf_lsvc_error_single_classfiy.csv', index=None, encoding='utf8')\nprint('LSVC特征已保存\\n')\n\n\nkmeans_result = pd.DataFrame()\n###### kmeans ###\ndef get_cluster(num_clusters):\n    print('开始' + str(num_clusters))\n    name = 'kmean'\n    print(name)\n    model = KMeans(n_clusters=num_clusters, max_iter=300, n_init=1, \\\n                        init='k-means++', n_jobs=10, random_state=1017)\n    result = model.fit_predict(data_feature)\n    kmeans_result[name + 'word_' + str(num_clusters)] = result\n\nget_cluster(5)\nget_cluster(10)\nget_cluster(19)\nget_cluster(30)\nget_cluster(40)\nget_cluster(50)\nget_cluster(60)\nget_cluster(70)\nkmeans_result.to_csv('feature/cluster_tfidf_feature.csv', index=False)\n\n\n\nfeature.to_csv('feature/deviceid_package_feature.csv', index=False)\n"
  },
  {
    "path": "nb_cz_lwl_wcm/4_get_feature_device_start_close_tfidf_1_2.py",
    "content": "# -*- coding:utf-8 -*-\n\nimport pandas as pd\nimport scipy.sparse\nimport numpy as np\nfrom sklearn import preprocessing\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n\ntrain = pd.read_csv('Demo/deviceid_train.tsv', sep='\\t', header=None)\ntest = pd.read_csv('Demo/deviceid_test.tsv', sep='\\t', header=None)\n\ndata_all = pd.concat([train, test], axis=0)\ndata_all = data_all.rename({0:'id'}, axis=1)\ndel data_all[1],data_all[2]\n\nstart_close_time = pd.read_csv('Demo/deviceid_package_start_close.tsv', sep='\\t', header=None)\nstart_close_time = start_close_time.rename({0:'id', 1:'app_name', 2:'start_time', 3:'close_time'}, axis=1)\n\nstart_close_time = start_close_time.sort_values(by='start_time')\n\nstart_close_time['start_time'] = map(int,start_close_time['start_time']/1000)\nstart_close_time['close_time'] = map(int,start_close_time['close_time']/1000)\n\nunique_app_name = np.unique(start_close_time['app_name'])\ndict_label = dict(zip(list(unique_app_name), list(np.arange(0, len(unique_app_name), 1))))\nimport time\nstart_close_time['app_name'] = start_close_time['app_name'].apply(lambda row: str(dict_label[row]))\n\ndel start_close_time['start_time'], start_close_time['close_time']\n\nfrom tqdm import tqdm, tqdm_pandas\ntqdm_pandas(tqdm())\ndef dealed_row(row):\n    app_name_list = list(row['app_name'])\n    return ' '.join(app_name_list)\n\ndata_feature = start_close_time.groupby('id').progress_apply(lambda row:dealed_row(row)).reset_index()\ndata_feature = pd.merge(data_all, data_feature, on='id', how='left')\ndel data_feature['id']\n\ncount_vec = CountVectorizer(ngram_range=(1,3))\ncount_csr_basic = count_vec.fit_transform(data_feature[0])\ntfidf_vec = TfidfVectorizer(ngram_range=(1,3))\ntfidf_vec_basic = tfidf_vec.fit_transform(data_feature[0])\n\ndata_feature = scipy.sparse.csr_matrix(scipy.sparse.hstack([count_csr_basic, tfidf_vec_basic]))\n\n\nfrom sklearn.cluster import KMeans\nfrom sklearn.linear_model import LogisticRegression, SGDClassifier, PassiveAggressiveClassifier, RidgeClassifier\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.naive_bayes import BernoulliNB, MultinomialNB\nfrom sklearn.svm import LinearSVC\nfrom sklearn.cross_validation import StratifiedKFold\n\ntrain = pd.read_csv('Demo/deviceid_train.tsv', sep='\\t', header=None)\ntest = pd.read_csv('Demo/deviceid_test.tsv', sep='\\t', header=None)\ndef get_label(row):\n    if row[1] == 1:\n        return row[2]\n    else:\n        return row[2] + 11\ntrain['label'] = train.apply(lambda row:get_label(row), axis=1)\ndata_all = pd.concat([train, test], axis=0)\ndata_all = data_all.rename({0:'id'}, axis=1)\ndel data_all[1],data_all[2]\n\ntrain_feature = data_feature[:len(train)]\nscore = train['label']\ntest_feature = data_feature[len(train):]\nnumber = len(np.unique(score))\n\n# 五则交叉验证\nn_folds = 5\nprint('处理完毕')\n\n########################### lr(LogisticRegression) ################################\nprint('lr stacking')\nstack_train = np.zeros((len(train), number))\nstack_test = np.zeros((len(test), number))\nscore_va = 0\n\nfor i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n    print('stack:%d/%d' % ((i + 1), n_folds))\n    clf = LogisticRegression(random_state=1017, C=8)\n    clf.fit(train_feature[tr], score[tr])\n    score_va = clf.predict_proba(train_feature[va])\n    score_te = clf.predict_proba(test_feature)\n    print('得分' + str(mean_squared_error(score[va], clf.predict(train_feature[va]))))\n    stack_train[va] += score_va\n    stack_test += score_te\nstack_test /= n_folds\nstack = np.vstack([stack_train, stack_test])\ndf_stack = pd.DataFrame()\nfor i in range(stack.shape[1]):\n    df_stack['tfidf_lr_2_classfiy_{}'.format(i)] = np.around(stack[:, i], 6)\ndf_stack.to_csv('feature/tfidf_lr_1_3_error_single_classfiy.csv', index=None, encoding='utf8')\nprint('lr特征已保存\\n')\n\n########################### SGD(随机梯度下降) ################################\nprint('sgd stacking')\nstack_train = np.zeros((len(train), number))\nstack_test = np.zeros((len(test), number))\nscore_va = 0\n\nfor i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n    print('stack:%d/%d' % ((i + 1), n_folds))\n    sgd = SGDClassifier(random_state=1017, loss='log')\n    sgd.fit(train_feature[tr], score[tr])\n    score_va = sgd.predict_proba(train_feature[va])\n    score_te = sgd.predict_proba(test_feature)\n    print('得分' + str(mean_squared_error(score[va], sgd.predict(train_feature[va]))))\n    stack_train[va] += score_va\n    stack_test += score_te\nstack_test /= n_folds\nstack = np.vstack([stack_train, stack_test])\ndf_stack = pd.DataFrame()\nfor i in range(stack.shape[1]):\n    df_stack['tfidf_2_sgd_classfiy_{}'.format(i)] = np.around(stack[:, i], 6)\ndf_stack.to_csv('feature/tfidf_sgd_1_3_error_single_classfiy.csv', index=None, encoding='utf8')\nprint('sgd特征已保存\\n')\n\n########################### pac(PassiveAggressiveClassifier) ################################\nprint('PAC stacking')\nstack_train = np.zeros((len(train), number))\nstack_test = np.zeros((len(test), number))\nscore_va = 0\n\nfor i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n    print('stack:%d/%d' % ((i + 1), n_folds))\n    pac = PassiveAggressiveClassifier(random_state=1017)\n    pac.fit(train_feature[tr], score[tr])\n    score_va = pac._predict_proba_lr(train_feature[va])\n    score_te = pac._predict_proba_lr(test_feature)\n    print(score_va)\n    print('得分' + str(mean_squared_error(score[va], pac.predict(train_feature[va]))))\n    stack_train[va] += score_va\n    stack_test += score_te\nstack_test /= n_folds\nstack = np.vstack([stack_train, stack_test])\ndf_stack = pd.DataFrame()\nfor i in range(stack.shape[1]):\n    df_stack['tfidf_pac_classfiy_{}'.format(i)] = np.around(stack[:, i], 6)\ndf_stack.to_csv('feature/tfidf_pac_1_3_error_single_classfiy.csv', index=None, encoding='utf8')\nprint('pac特征已保存\\n')\n\n\n########################### ridge(RidgeClassfiy) ################################\nprint('RidgeClassfiy stacking')\nstack_train = np.zeros((len(train), number))\nstack_test = np.zeros((len(test), number))\nscore_va = 0\n\nfor i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n    print('stack:%d/%d' % ((i + 1), n_folds))\n    ridge = RidgeClassifier(random_state=1017)\n    ridge.fit(train_feature[tr], score[tr])\n    score_va = ridge._predict_proba_lr(train_feature[va])\n    score_te = ridge._predict_proba_lr(test_feature)\n    print(score_va)\n    print('得分' + str(mean_squared_error(score[va], ridge.predict(train_feature[va]))))\n    stack_train[va] += score_va\n    stack_test += score_te\nstack_test /= n_folds\nstack = np.vstack([stack_train, stack_test])\ndf_stack = pd.DataFrame()\nfor i in range(stack.shape[1]):\n    df_stack['tfidf_ridge_classfiy_{}'.format(i)] = np.around(stack[:, i], 6)\ndf_stack.to_csv('feature/tfidf_ridge_1_3_error_single_classfiy.csv', index=None, encoding='utf8')\nprint('ridge特征已保存\\n')\n\n\n########################### bnb(BernoulliNB) ################################\nprint('BernoulliNB stacking')\nstack_train = np.zeros((len(train), number))\nstack_test = np.zeros((len(test), number))\nscore_va = 0\n\nfor i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n    print('stack:%d/%d' % ((i + 1), n_folds))\n    bnb = BernoulliNB()\n    bnb.fit(train_feature[tr], score[tr])\n    score_va = bnb.predict_proba(train_feature[va])\n    score_te = bnb.predict_proba(test_feature)\n    print(score_va)\n    print('得分' + str(mean_squared_error(score[va], bnb.predict(train_feature[va]))))\n    stack_train[va] += score_va\n    stack_test += score_te\nstack_test /= n_folds\nstack = np.vstack([stack_train, stack_test])\ndf_stack = pd.DataFrame()\nfor i in range(stack.shape[1]):\n    df_stack['tfidf_bnb_classfiy_{}'.format(i)] = np.around(stack[:, i], 6)\ndf_stack.to_csv('feature/tfidf_bnb_1_3_error_single_classfiy.csv', index=None, encoding='utf8')\nprint('BernoulliNB特征已保存\\n')\n\n########################### mnb(MultinomialNB) ################################\nprint('MultinomialNB stacking')\nstack_train = np.zeros((len(train), number))\nstack_test = np.zeros((len(test), number))\nscore_va = 0\n\nfor i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n    print('stack:%d/%d' % ((i + 1), n_folds))\n    mnb = MultinomialNB()\n    mnb.fit(train_feature[tr], score[tr])\n    score_va = mnb.predict_proba(train_feature[va])\n    score_te = mnb.predict_proba(test_feature)\n    print(score_va)\n    print('得分' + str(mean_squared_error(score[va], mnb.predict(train_feature[va]))))\n    stack_train[va] += score_va\n    stack_test += score_te\nstack_test /= n_folds\nstack = np.vstack([stack_train, stack_test])\ndf_stack = pd.DataFrame()\nfor i in range(stack.shape[1]):\n    df_stack['tfidf_mnb_classfiy_{}'.format(i)] = np.around(stack[:, i], 6)\ndf_stack.to_csv('feature/tfidf_mnb_1_3_error_single_classfiy.csv', index=None, encoding='utf8')\nprint('MultinomialNB特征已保存\\n')\n"
  },
  {
    "path": "nb_cz_lwl_wcm/5_get_feature_device_start_close_tfidf.py",
    "content": "# -*- coding:utf-8 -*-\n\nimport pandas as pd\nimport scipy.sparse\nimport numpy as np\nfrom sklearn import preprocessing\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n\ntrain = pd.read_csv('Demo/deviceid_train.tsv', sep='\\t', header=None)\ntest = pd.read_csv('Demo/deviceid_test.tsv', sep='\\t', header=None)\n\ndata_all = pd.concat([train, test], axis=0)\ndata_all = data_all.rename({0:'id'}, axis=1)\ndel data_all[1],data_all[2]\n\nstart_close_time = pd.read_csv('Demo/deviceid_package_start_close.tsv', sep='\\t', header=None)\nstart_close_time = start_close_time.rename({0:'id', 1:'app_name', 2:'start_time', 3:'close_time'}, axis=1)\n\nstart_close_time['start_time'] = map(int,start_close_time['start_time']/1000)\nstart_close_time['close_time'] = map(int,start_close_time['close_time']/1000)\n\nunique_app_name = np.unique(start_close_time['app_name'])\ndict_label = dict(zip(list(unique_app_name), list(np.arange(0, len(unique_app_name), 1))))\nimport time\nstart_close_time['app_name'] = start_close_time['app_name'].apply(lambda row: str(dict_label[row]))\n\ndel start_close_time['start_time'], start_close_time['close_time']\n\nfrom tqdm import tqdm, tqdm_pandas\ntqdm_pandas(tqdm())\ndef dealed_row(row):\n    app_name_list = list(row['app_name'])\n    return ' '.join(app_name_list)\n\ndata_feature = start_close_time.groupby('id').progress_apply(lambda row:dealed_row(row)).reset_index()\ndata_feature = pd.merge(data_all, data_feature, on='id', how='left')\ndel data_feature['id']\n\ncount_vec = CountVectorizer()\ncount_csr_basic = count_vec.fit_transform(data_feature[0])\ntfidf_vec = TfidfVectorizer()\ntfidf_vec_basic = tfidf_vec.fit_transform(data_feature[0])\n\ndata_feature = scipy.sparse.csr_matrix(scipy.sparse.hstack([count_csr_basic, tfidf_vec_basic]))\n\n\nfrom sklearn.cluster import KMeans\nfrom sklearn.linear_model import LogisticRegression, SGDClassifier, PassiveAggressiveClassifier, RidgeClassifier\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.naive_bayes import BernoulliNB, MultinomialNB\nfrom sklearn.svm import LinearSVC\nfrom sklearn.cross_validation import StratifiedKFold\n\ntrain = pd.read_csv('Demo/deviceid_train.tsv', sep='\\t', header=None)\ntest = pd.read_csv('Demo/deviceid_test.tsv', sep='\\t', header=None)\ndef get_label(row):\n    if row[1] == 1:\n        return row[2]\n    else:\n        return row[2] + 11\ntrain['label'] = train.apply(lambda row:get_label(row), axis=1)\ndata_all = pd.concat([train, test], axis=0)\ndata_all = data_all.rename({0:'id'}, axis=1)\ndel data_all[1],data_all[2]\n\ntrain_feature = data_feature[:len(train)]\nscore = train['label']\ntest_feature = data_feature[len(train):]\nnumber = len(np.unique(score))\n\n# 五则交叉验证\nn_folds = 5\nprint('处理完毕')\n\n########################### lr(LogisticRegression) ################################\nprint('lr stacking')\nstack_train = np.zeros((len(train), number))\nstack_test = np.zeros((len(test), number))\nscore_va = 0\n\nfor i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n    print('stack:%d/%d' % ((i + 1), n_folds))\n    clf = LogisticRegression(random_state=1017, C=8)\n    clf.fit(train_feature[tr], score[tr])\n    score_va = clf.predict_proba(train_feature[va])\n    score_te = clf.predict_proba(test_feature)\n    print('得分' + str(mean_squared_error(score[va], clf.predict(train_feature[va]))))\n    stack_train[va] += score_va\n    stack_test += score_te\nstack_test /= n_folds\nstack = np.vstack([stack_train, stack_test])\ndf_stack = pd.DataFrame()\nfor i in range(stack.shape[1]):\n    df_stack['tfidf_lr_2_classfiy_{}'.format(i)] = np.around(stack[:, i], 6)\ndf_stack.to_csv('feature/tfidf_lr_2_error_single_classfiy.csv', index=None, encoding='utf8')\nprint('lr特征已保存\\n')\n\n########################### SGD(随机梯度下降) ################################\nprint('sgd stacking')\nstack_train = np.zeros((len(train), number))\nstack_test = np.zeros((len(test), number))\nscore_va = 0\n\nfor i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n    print('stack:%d/%d' % ((i + 1), n_folds))\n    sgd = SGDClassifier(random_state=1017, loss='log')\n    sgd.fit(train_feature[tr], score[tr])\n    score_va = sgd.predict_proba(train_feature[va])\n    score_te = sgd.predict_proba(test_feature)\n    print('得分' + str(mean_squared_error(score[va], sgd.predict(train_feature[va]))))\n    stack_train[va] += score_va\n    stack_test += score_te\nstack_test /= n_folds\nstack = np.vstack([stack_train, stack_test])\ndf_stack = pd.DataFrame()\nfor i in range(stack.shape[1]):\n    df_stack['tfidf_2_sgd_classfiy_{}'.format(i)] = np.around(stack[:, i], 6)\ndf_stack.to_csv('feature/tfidf_sgd_2_error_single_classfiy.csv', index=None, encoding='utf8')\nprint('sgd特征已保存\\n')\n\n########################### pac(PassiveAggressiveClassifier) ################################\nprint('PAC stacking')\nstack_train = np.zeros((len(train), number))\nstack_test = np.zeros((len(test), number))\nscore_va = 0\n\nfor i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n    print('stack:%d/%d' % ((i + 1), n_folds))\n    pac = PassiveAggressiveClassifier(random_state=1017)\n    pac.fit(train_feature[tr], score[tr])\n    score_va = pac._predict_proba_lr(train_feature[va])\n    score_te = pac._predict_proba_lr(test_feature)\n    print(score_va)\n    print('得分' + str(mean_squared_error(score[va], pac.predict(train_feature[va]))))\n    stack_train[va] += score_va\n    stack_test += score_te\nstack_test /= n_folds\nstack = np.vstack([stack_train, stack_test])\ndf_stack = pd.DataFrame()\nfor i in range(stack.shape[1]):\n    df_stack['tfidf_pac_classfiy_{}'.format(i)] = np.around(stack[:, i], 6)\ndf_stack.to_csv('feature/tfidf_pac_2_error_single_classfiy.csv', index=None, encoding='utf8')\nprint('pac特征已保存\\n')\n\n\n########################### ridge(RidgeClassfiy) ################################\nprint('RidgeClassfiy stacking')\nstack_train = np.zeros((len(train), number))\nstack_test = np.zeros((len(test), number))\nscore_va = 0\n\nfor i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n    print('stack:%d/%d' % ((i + 1), n_folds))\n    ridge = RidgeClassifier(random_state=1017)\n    ridge.fit(train_feature[tr], score[tr])\n    score_va = ridge._predict_proba_lr(train_feature[va])\n    score_te = ridge._predict_proba_lr(test_feature)\n    print(score_va)\n    print('得分' + str(mean_squared_error(score[va], ridge.predict(train_feature[va]))))\n    stack_train[va] += score_va\n    stack_test +=                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                      \\\n        score_te\nstack_test /= n_folds\nstack = np.vstack([stack_train, stack_test])\ndf_stack = pd.DataFrame()\nfor i in range(stack.shape[1]):\n    df_stack['tfidf_ridge_classfiy_{}'.format(i)] = np.around(stack[:, i], 6)\ndf_stack.to_csv('feature/tfidf_ridge_2_error_single_classfiy.csv', index=None, encoding='utf8')\nprint('ridge特征已保存\\n')\n\n\n########################### bnb(BernoulliNB) ################################\nprint('BernoulliNB stacking')\nstack_train = np.zeros((len(train), number))\nstack_test = np.zeros((len(test), number))\nscore_va = 0\n\nfor i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n    print('stack:%d/%d' % ((i + 1), n_folds))\n    bnb = BernoulliNB()\n    bnb.fit(train_feature[tr], score[tr])\n    score_va = bnb.predict_proba(train_feature[va])\n    score_te = bnb.predict_proba(test_feature)\n    print(score_va)\n    print('得分' + str(mean_squared_error(score[va], bnb.predict(train_feature[va]))))\n    stack_train[va] += score_va\n    stack_test += score_te\nstack_test /= n_folds\nstack = np.vstack([stack_train, stack_test])\ndf_stack = pd.DataFrame()\nfor i in range(stack.shape[1]):\n    df_stack['tfidf_bnb_classfiy_{}'.format(i)] = np.around(stack[:, i], 6)\ndf_stack.to_csv('feature/tfidf_bnb_2_error_single_classfiy.csv', index=None, encoding='utf8')\nprint('BernoulliNB特征已保存\\n')\n\n########################### mnb(MultinomialNB) ################################\nprint('MultinomialNB stacking')\nstack_train = np.zeros((len(train), number))\nstack_test = np.zeros((len(test), number))\nscore_va = 0\n\nfor i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n    print('stack:%d/%d' % ((i + 1), n_folds))\n    mnb = MultinomialNB()\n    mnb.fit(train_feature[tr], score[tr])\n    score_va = mnb.predict_proba(train_feature[va])\n    score_te = mnb.predict_proba(test_feature)\n    print(score_va)\n    print('得分' + str(mean_squared_error(score[va], mnb.predict(train_feature[va]))))\n    stack_train[va] += score_va\n    stack_test += score_te\nstack_test /= n_folds\nstack = np.vstack([stack_train, stack_test])\ndf_stack = pd.DataFrame()\nfor i in range(stack.shape[1]):\n    df_stack['tfidf_mnb_classfiy_{}'.format(i)] = np.around(stack[:, i], 6)\ndf_stack.to_csv('feature/tfidf_mnb_2_error_single_classfiy.csv', index=None, encoding='utf8')\nprint('MultinomialNB特征已保存\\n')\n\n############################ Linersvc(LinerSVC) ################################\nprint('LinerSVC stacking')\nstack_train = np.zeros((len(train), number))\nstack_test = np.zeros((len(test), number))\nscore_va = 0\n\nfor i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n    print('stack:%d/%d' % ((i + 1), n_folds))\n    lsvc = LinearSVC(random_state=1017)\n    lsvc.fit(train_feature[tr], score[tr])\n    score_va = lsvc._predict_proba_lr(train_feature[va])\n    score_te = lsvc._predict_proba_lr(test_feature)\n    print(score_va)\n    print('得分' + str(mean_squared_error(score[va], lsvc.predict(train_feature[va]))))\n    stack_train[va] += score_va\n    stack_test += score_te\nstack_test /= n_folds\nstack = np.vstack([stack_train, stack_test])\ndf_stack = pd.DataFrame()\nfor i in range(stack.shape[1]):\n    df_stack['tfidf_lsvc_classfiy_{}'.format(i)] = np.around(stack[:, i], 6)\ndf_stack.to_csv('feature/tfidf_lsvc_2_error_single_classfiy.csv', index=None, encoding='utf8')\nprint('LSVC特征已保存\\n')\n\n\nkmeans_result = pd.DataFrame()\n###### kmeans ###\ndef get_cluster(num_clusters):\n    print('开始' + str(num_clusters))\n    name = 'kmean'\n    print(name)\n    model = KMeans(n_clusters=num_clusters, max_iter=300, n_init=1, \\\n                        init='k-means++', n_jobs=10, random_state=1017)\n    result = model.fit_predict(data_feature)\n    kmeans_result[name + 'word_' + str(num_clusters)] = result\n\nget_cluster(5)\nget_cluster(10)\nget_cluster(19)\nget_cluster(30)\nget_cluster(40)\nget_cluster(50)\nget_cluster(60)\nget_cluster(70)\nkmeans_result.to_csv('feature/cluster_2_tfidf_feature.csv', index=False)\n\n\n"
  },
  {
    "path": "nb_cz_lwl_wcm/6_get_feature_device_start_close.py",
    "content": "# -*- coding:utf-8 -*-\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn import preprocessing\n\ntrain = pd.read_csv('Demo/deviceid_train.tsv', sep='\\t', header=None)\ntest = pd.read_csv('Demo/deviceid_test.tsv', sep='\\t', header=None)\n\ndata_all = pd.concat([train, test], axis=0)\ndata_all = data_all.rename({0:'id'}, axis=1)\ndel data_all[1],data_all[2]\n\nstart_close_time = pd.read_csv('Demo/deviceid_package_start_close.tsv', sep='\\t', header=None)\nstart_close_time = start_close_time.rename({0:'id', 1:'app_name', 2:'start_time', 3:'close_time'}, axis=1)\n\n\nstart_close_time['diff_time'] = (start_close_time['close_time'] - start_close_time['start_time'])/1000\n\nprint('开始转换时间')\nimport time\nstart_close_time['close_time'] = start_close_time['close_time'].apply(lambda row: int(time.localtime(row/1000).tm_hour))\nstart_close_time['start_time'] = start_close_time['start_time'].apply(lambda row: int(time.localtime(row/1000).tm_hour))\n\n# 一个表里面的总次数\nprint('一个表的总次数')\nfeature = pd.DataFrame()\nfeature['start_close_count'] = pd.merge(data_all, start_close_time.groupby('id').size().reset_index(), on='id', how='left')[0]\n\n# 0 - 5 点的使用次数\ntemp = start_close_time[(start_close_time['close_time'] >=0)&(start_close_time['close_time'] <=5)]\ntemp = temp.groupby('id').size().reset_index()\nfeature['zero_five_count'] = pd.merge(data_all, temp, on='id', how='left').fillna(0)[0]\n\n# 玩的时间最长的app的名字编码\ndef get_max_label(row):\n    row_name = list(row['app_name'])\n    row_diff_time = list(row['diff_time'])\n    return row_name[np.argmax(row_diff_time)]\n\nstart_close_max_name = start_close_time.groupby('id').apply(lambda row:get_max_label(row)).reset_index()\nlabel_encoder = preprocessing.LabelEncoder()\nfeature['start_close_max_name'] = label_encoder.fit_transform(pd.merge(data_all, start_close_max_name, on='id', how='left').fillna(0)[0])\n\nfeature.to_csv('feature/feature_start_close.csv', index=False)"
  },
  {
    "path": "nb_cz_lwl_wcm/7_get_feature_w2v.py",
    "content": "from gensim.models import Word2Vec\nimport pandas as pd\npath=\"Demo/\"\npackages = pd.read_csv(path+\"deviceid_packages.tsv\",\n                       sep=\"\\t\", names=['id', 'app_list'])\npackages['app_count'] = packages['app_list'].apply(\n    lambda x: len(x.split(\",\")), 1)\ndocuments = packages['app_list'].values.tolist()\ntexts = [[word for word in str(document).split(',')] for document in documents]\n# frequency = defaultdict(int)\n# for text in texts:\n#     for token in text:\n#         frequency[token] += 1\n# texts = [[token for token in text if frequency[token] >= 5] for text in texts]\nw2v = Word2Vec(texts, size=128, window=10, iter=45,\n               workers=12, seed=1017, min_count=5)\nw2v.wv.save_word2vec_format('./w2v_128.txt')\n\nimport gensim\nimport numpy as np\n\n\ndef get_w2v_avg(text, w2v_out_path, word2vec_Path):\n    texts = []\n    w2v_dim = 128\n    data = text\n#     data = pd.read_csv(text_path)\n    data['app_list'] = data['app_list'].apply(\n        lambda x: x.strip().split(\",\"), 1)\n    texts = data['app_list'].values.tolist()\n\n    model = gensim.models.KeyedVectors.load_word2vec_format(\n        word2vec_Path, binary=False)\n    vacab = model.vocab.keys()\n\n    w2v_feature = np.zeros((len(texts), w2v_dim))\n    w2v_feature_avg = np.zeros((len(texts), w2v_dim))\n\n    for i, line in enumerate(texts):\n        num = 0\n        if line == '':\n            w2v_feature_avg[i, :] = np.zeros(w2v_dim)\n        else:\n            for word in line:\n                num += 1\n                vec = model[word] if word in vacab else np.zeros(w2v_dim)\n                w2v_feature[i, :] += vec\n            w2v_feature_avg[i, :] = w2v_feature[i, :] / num\n    w2v_avg = pd.DataFrame(w2v_feature_avg)\n    w2v_avg.columns = ['w2v_avg_' + str(i) for i in w2v_avg.columns]\n    w2v_avg['id'] = data['id']\n    w2v_avg.to_csv(w2v_out_path, encoding='utf-8', index=None)\n    return w2v_avg\n\n\nw2v_feat = get_w2v_avg(packages, \"feature/w2v_avg.csv\", \"w2v_128.txt\")\n\n\n"
  },
  {
    "path": "nb_cz_lwl_wcm/8_get_feature_lwl.py",
    "content": "\n# coding: utf-8\n\n# In[ ]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport lightgbm as lgb\nfrom datetime import datetime,timedelta  \nimport matplotlib.pyplot as plt\nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\n# %matplotlib inline\n\n#add\nimport gc\nfrom sklearn import preprocessing\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nfrom scipy.sparse import hstack, vstack\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import cross_val_score\nfrom skopt.space import Integer, Categorical, Real, Log10\nfrom skopt.utils import use_named_args\nfrom skopt import gp_minimize\nfrom gensim.models import Word2Vec, FastText\nimport gensim \nimport re\n\n\n# In[ ]:\n\n\ntest = pd.read_csv('../input/yiguan/demo/Demo/deviceid_test.tsv', sep='\\t', names=['device_id'])\ntrain = pd.read_csv('../input/yiguan/demo/Demo/deviceid_train.tsv', sep='\\t', names=['device_id', 'sex', 'age'])\nbrand = pd.read_table('../input/yiguan/demo/Demo/deviceid_brand.tsv', names=['device_id', 'vendor', 'version'])\npacktime = pd.read_table('../input/yiguan/demo/Demo/deviceid_package_start_close.tsv', \n                         names=['device_id', 'app', 'start', 'close'])\npackages = pd.read_csv('../input/yiguan/demo/Demo/deviceid_packages.tsv', sep='\\t', names=['device_id', 'apps'])\n\n\n# In[ ]:\n\n\npacktime['period'] = (packtime['close'] - packtime['start'])/1000\npacktime['start'] = pd.to_datetime(packtime['start'], unit='ms')\napp_use_time = packtime.groupby(['app'])['period'].agg('sum').reset_index()\n# 试试看200\napp_use_top100 = app_use_time.sort_values(by='period', ascending=False)[:100]['app']\ndevice_app_use_time = packtime.groupby(['device_id', 'app'])['period'].agg('sum').reset_index()\nuse_time_top100_statis = device_app_use_time.set_index('app').loc[list(app_use_top100)].reset_index()\ntop100_statis = use_time_top100_statis.pivot(index='device_id', columns='app', values='period').reset_index()\n\n\n# In[ ]:\n\n\ntop100_statis = top100_statis.fillna(0)\n\n\n# In[ ]:\n\n\n# 手机品牌预处理\nbrand['vendor'] = brand['vendor'].astype(str).apply(lambda x : x.split(' ')[0].upper())\nbrand['ph_ver'] = brand['vendor'] + '_' + brand['version']\n\nph_ver = brand['ph_ver'].value_counts()\nph_ver_cnt = pd.DataFrame(ph_ver).reset_index()\nph_ver_cnt.columns = ['ph_ver', 'ph_ver_cnt']\n\nbrand = pd.merge(left=brand, right=ph_ver_cnt,on='ph_ver')\n\n\n# In[ ]:\n\n\n# 针对长尾分布做的一点处理\nmask = (brand.ph_ver_cnt < 100)\nbrand.loc[mask, 'ph_ver'] = 'other' \n\ntrain = pd.merge(brand[['device_id', 'ph_ver']], train, on='device_id', how='right')\ntest = pd.merge(brand[['device_id', 'ph_ver']], test, on='device_id', how='right')\ntrain['ph_ver'] = train['ph_ver'].astype(str)\ntest['ph_ver'] = test['ph_ver'].astype(str)\n\n# 将 ph_ver 进行 label encoder\nph_ver_le = preprocessing.LabelEncoder()\ntrain['ph_ver'] = ph_ver_le.fit_transform(train['ph_ver'])\ntest['ph_ver'] = ph_ver_le.transform(test['ph_ver'])\ntrain['label'] = train['sex'].astype(str) + '-' + train['age'].astype(str)\nlabel_le = preprocessing.LabelEncoder()\ntrain['label'] = label_le.fit_transform(train['label'])\n\n\n# In[ ]:\n\n\ntest['sex'] = -1\ntest['age'] = -1\ntest['label'] = -1\ndata = pd.concat([train, test], ignore_index=True)\ndata.shape\n\n\n# In[ ]:\n\n\nph_ver_dummy = pd.get_dummies(data['ph_ver'])\nph_ver_dummy.columns = ['ph_ver_' + str(i) for i in range(ph_ver_dummy.shape[1])]\n\n\n# In[ ]:\n\n\ndata = pd.concat([data, ph_ver_dummy], axis=1)\n\n\n# In[ ]:\n\n\ndel data['ph_ver']\n\n\n# In[ ]:\n\n\ntrain = data[data.sex != -1]\ntest = data[data.sex == -1]\ntrain.shape, test.shape\n\n\n# In[ ]:\n\n\n# 每个app的总使用次数统计\napp_num = packtime['app'].value_counts().reset_index()\napp_num.columns = ['app', 'app_num']\npacktime = pd.merge(left=packtime, right=app_num, on='app')\n# 同样的，针对长尾分布做些处理（尝试过不做处理，或换其他阈值，这个100的阈值最高）\npacktime.loc[packtime.app_num < 100, 'app'] = 'other'\n\n\n# In[ ]:\n\n\n# 统计每台设备的app数量\ndf_app = packtime[['device_id', 'app']]\napps = df_app.drop_duplicates().groupby(['device_id'])['app'].apply(' '.join).reset_index()\napps['app_length'] = apps['app'].apply(lambda x:len(x.split(' ')))\n\ntrain = pd.merge(train, apps, on='device_id', how='left')\ntest = pd.merge(test, apps, on='device_id', how='left')\n\n\n# In[ ]:\n\n\n# 获取每台设备所安装的apps的tfidf\ntfidf = CountVectorizer(lowercase=False, min_df=3, stop_words=top100_statis.columns.tolist()[1:7])\napps['app'] = tfidf.fit_transform(apps['app'])\n\nX_tr_app = tfidf.transform(list(train['app']))\nX_ts_app = tfidf.transform(list(test['app']))\n\n\n# In[ ]:\n\n\n'''\nsvd = TruncatedSVD(n_components=100, random_state=42)\nX = vstack([X_tr_app, X_ts_app])\nsvd.fit(X)\nX_tr_app = svd.fit_transform(X_tr_app)\nX_ts_app = svd.fit_transform(X_ts_app)\nX_tr_app = pd.DataFrame(X_tr_app)\nX_ts_app = pd.DataFrame(X_ts_app)\nX_tr_app.columns = ['app_' + str(i) for i in range(0, 100)]\nX_ts_app.columns = ['app_' + str(i) for i in range(0, 100)]\n'''\n\n\n# ### 利用word2vec得到每台设备所安装app的embedding表示\n\n# In[ ]:\n\n\npackages['apps'] = packages['apps'].apply(lambda x:x.split(','))\npackages['app_length'] = packages['apps'].apply(lambda x:len(x))\n\n\n# In[ ]:\n\n\nembed_size = 128\nfastmodel = Word2Vec(list(packages['apps']), size=embed_size, window=4, min_count=3, negative=2,\n                 sg=1, sample=0.002, hs=1, workers=4)  \n\nembedding_fast = pd.DataFrame([fastmodel[word] for word in (fastmodel.wv.vocab)])\nembedding_fast['app'] = list(fastmodel.wv.vocab)\nembedding_fast.columns= [\"fdim_%s\" % str(i) for i in range(embed_size)]+[\"app\"]\nembedding_fast.head()\n\n\n# In[ ]:\n\n\nid_list = []\nfor i in range(packages.shape[0]):\n    id_list += [list(packages['device_id'])[i]]*packages['app_length'].iloc[i]\n\n\napp_list = [word for item in packages['apps'] for word in item]\n\napp_vect = pd.DataFrame({'device_id':id_list})        \napp_vect['app'] = app_list\n\n\n# In[ ]:\n\n\napp_vect = app_vect.merge(embedding_fast, on='app', how='left')\napp_vect = app_vect.drop('app', axis=1)\n\nseqfeature = app_vect.groupby(['device_id']).agg('mean')\nseqfeature.reset_index(inplace=True)\n\n\n# In[ ]:\n\n\nseqfeature.head()\n\n\n# ### 用户一周七天玩手机的时长情况\n\n# In[ ]:\n\n\n# packtime['period'] = (packtime['close'] - packtime['start'])/1000\n# packtime['start'] = pd.to_datetime(packtime['start'], unit='ms')\npacktime['dayofweek'] = packtime['start'].dt.dayofweek\npacktime['hour'] = packtime['start'].dt.hour\n# packtime = packtime[(packtime['start'] < '2017-03-31 23:59:59') & (packtime['start'] > '2017-03-01 00:00:00')]\n\n\n# In[ ]:\n\n\napp_use_time = packtime.groupby(['device_id', 'dayofweek'])['period'].agg('sum').reset_index()\nweek_app_use = app_use_time.pivot_table(values='period', columns='dayofweek', index='device_id').reset_index()\nweek_app_use = week_app_use.fillna(0)\nweek_app_use.columns = ['device_id'] + ['week_day_' + str(i) for i in range(0, 7)]\n\nweek_app_use['week_max'] = week_app_use.max(axis=1)\nweek_app_use['week_min'] = week_app_use.min(axis=1)\nweek_app_use['week_sum'] = week_app_use.sum(axis=1)\nweek_app_use['week_std'] = week_app_use.std(axis=1)\n\n'''\nfor i in range(0, 7):\n    week_app_use['week_day_' + str(i)] = week_app_use['week_day_' + str(i)] / week_app_use['week_sum']\n'''\n\n\n# In[ ]:\n\n\n'''\napp_use_time = packtime.groupby(['device_id', 'hour'])['period'].agg('sum').reset_index()\nhour_app_use = app_use_time.pivot_table(values='period', columns='hour', index='device_id').reset_index()\nhour_app_use = hour_app_use.fillna(0)\nhour_app_use.columns = ['device_id'] + ['hour_' + str(i) for i in range(0, 24)]\n\n# hour_app_use['hour_max'] = hour_app_use.max(axis=1)\n# hour_app_use['hour_min'] = hour_app_use.min(axis=1)\n# hour_app_use['hour_sum'] = hour_app_use.sum(axis=1)\n# hour_app_use['hour_std'] = hour_app_use.std(axis=1)\n\n# for i in range(0, 24):\n#     hour_app_use['hour_' + str(i)] = hour_app_use['hour_' + str(i)] / hour_app_use['hour_sum']\n'''\n\n\n# ### 将各个特征整合到一块\n\n# In[ ]:\n\n\ntrain.columns[4:]\n\n\n# In[ ]:\n\n\nuser_behavior = pd.read_csv('../input/yg-user-behavior/user_behavior.csv')\nuser_behavior['app_len_max'] = user_behavior['app_len_max'].astype(np.float64)\ndel user_behavior['app']\ntrain = pd.merge(train, user_behavior, on='device_id', how='left')\ntest = pd.merge(test, user_behavior, on='device_id', how='left')\n\n\n# In[ ]:\n\n\ntrain = pd.merge(train, seqfeature, on='device_id', how='left')\ntest = pd.merge(test, seqfeature, on='device_id', how='left')\n\n\n# In[ ]:\n\n\ntrain = pd.merge(train, week_app_use, on='device_id', how='left')\ntest = pd.merge(test, week_app_use, on='device_id', how='left')\n\n\n# In[ ]:\n\n\n'''\napp_top50_list = list(packtime.groupby(by='app')['period'].sum().sort_values(ascending=False)[:50].index)\n\nfor app in app_top50_list:\n    app_cnt = packtime[packtime['app'] == app]\n    start_num_app = app_cnt.groupby(by='device_id')['start'].count().reset_index()\n    start_num_app.columns = ['device_id', 'start_num_app_' + app[0:4]]\n    train = train.merge(start_num_app, on='device_id', how='left')\n    test = test.merge(start_num_app, on='device_id', how='left')\n    print(app + ' done')   \n'''\n\n\n# In[ ]:\n\n\n'''\n# all_top50 : 使用总时长最高的50款app，每个人的使用时间统计\nall_top50 = pd.read_csv('../input/yg-feature/all_top50_statis.csv')\ntrain = pd.merge(train, all_top50, on='device_id', how='left')\ntest = pd.merge(test, all_top50, on='device_id', how='left')\n'''\n\n\n# In[ ]:\n\n\ntop100_statis.columns = ['device_id'] + ['top100_statis_' + str(i) for i in range(0, 100)]\ntrain = pd.merge(train, top100_statis, on='device_id', how='left')\ntest = pd.merge(test, top100_statis, on='device_id', how='left')\n\n\n# In[ ]:\n\n\ntrain.to_csv('train_feature.csv', index=None)\ntest.to_csv('test_feature.csv', index=None)\n\n\n# In[ ]:\n\n\nfeats = train.columns[4:]\nfeats\n\n\n# In[ ]:\n\n\nfeats = feats.delete(153)\nfeats[153]\n\n\n# In[ ]:\n\n\n'''\ntrain = pd.merge(train, hour_app_use, on='device_id', how='left')\ntest = pd.merge(test, hour_app_use, on='device_id', how='left')\n'''\n\n\n# In[ ]:\n\n\nX_train = hstack([X_tr_app, train[feats].astype(float)])\nX_test = hstack([X_ts_app, test[feats].astype(float)])\n\nX_train = X_train.tocsr().astype('float')\nX_test = X_test.tocsr().astype('float')\n\n\n# ### 开始训练模型\n\n# In[ ]:\n\n\nY = train['sex'] - 1\nkfold = StratifiedKFold(n_splits=10, random_state=10, shuffle=True)\noof_preds1 = np.zeros((X_train.shape[0], ))\nsub1 = np.zeros((X_test.shape[0], ))\nfor i, (train_index, test_index) in enumerate(kfold.split(X_train, Y)): \n    X_tr, X_vl, y_tr, y_vl = X_train[train_index], X_train[test_index],                                 Y[train_index], Y[test_index]\n    dtrain = lgb.Dataset(X_tr, label=y_tr)\n    dvalid = lgb.Dataset(X_vl, y_vl, reference=dtrain)\n    params = {\n        'boosting_type': 'gbdt',\n        'max_depth':6,\n        'objective':'binary',\n        'num_leaves':31,\n        'subsample': 0.85,\n        'colsample_bytree': 0.2,\n        'lambda_l1':0.00007995302080034896,\n        'lambda_l2':0.0003648648811380991,\n        'subsample_freq':12,\n        'learning_rate': 0.012,\n        'min_child_weight':5.5\n    }\n\n    model = lgb.train(params,\n                        dtrain,\n                        num_boost_round=4000,\n                        valid_sets=dvalid,\n                        early_stopping_rounds=100,\n                        verbose_eval=100)\n\n    oof_preds1[test_index] = model.predict(X_vl, num_iteration=model.best_iteration)\n    sub1 += model.predict(X_test, num_iteration=model.best_iteration)/kfold.n_splits\n\n\n# In[ ]:\n\n\nY = train['age']\nkfold = StratifiedKFold(n_splits=10, random_state=10, shuffle=True)\noof_preds2 = np.zeros((X_train.shape[0], 11))\nsub2 = np.zeros((X_test.shape[0], 11))\nfor i, (train_index, test_index) in enumerate(kfold.split(X_train, Y)):\n    X_tr, X_vl, y_tr, y_vl = X_train[train_index], X_train[test_index],                                 Y[train_index], Y[test_index]\n    dtrain = lgb.Dataset(X_tr, label=y_tr)\n    dvalid = lgb.Dataset(X_vl, y_vl, reference=dtrain)\n    params = {\n        'boosting_type': 'gbdt',\n        'max_depth':6,\n        'metric': {'multi_logloss'},\n        'num_class':11,\n        'objective':'multiclass',\n        'num_leaves':31,\n        'subsample': 0.9,\n        'colsample_bytree': 0.2,\n        'lambda_l1':0.0001,\n        'lambda_l2':0.00111,\n        'subsample_freq':10,\n        'learning_rate': 0.012,\n        'min_child_weight':10\n    }\n\n    model = lgb.train(params,\n                        dtrain,\n                        num_boost_round=4000,\n                        valid_sets=dvalid,\n                        early_stopping_rounds=100,\n                        verbose_eval=100)\n\n    oof_preds2[test_index] = model.predict(X_vl, num_iteration=model.best_iteration)\n    sub2 += model.predict(X_test, num_iteration=model.best_iteration)/kfold.n_splits\n\n\n# In[ ]:\n\n\noof_preds1 = pd.DataFrame(oof_preds1, columns=['sex2'])\n\noof_preds1['sex1'] = 1-oof_preds1['sex2']\noof_preds2 = pd.DataFrame(oof_preds2, columns=['age%s'%i for i in range(11)])\noof_preds = train[['device_id']]\noof_preds.columns = ['DeviceID']\n\nfor i in ['sex1', 'sex2']:\n    for j in ['age%s'%i for i in range(11)]:\n        oof_preds[i+'_'+j] = oof_preds1[i] * oof_preds2[j]\noof_preds.columns = ['DeviceID', '1-0', '1-1', '1-2', '1-3', '1-4', '1-5', '1-6', \n         '1-7','1-8', '1-9', '1-10', '2-0', '2-1', '2-2', '2-3', '2-4', \n         '2-5', '2-6', '2-7', '2-8', '2-9', '2-10']\n\noof_preds.to_csv('train.csv', index=False)\n\n\n# In[ ]:\n\n\nsub1 = pd.DataFrame(sub1, columns=['sex2'])\n\nsub1['sex1'] = 1-sub1['sex2']\nsub2 = pd.DataFrame(sub2, columns=['age%s'%i for i in range(11)])\nsub = test[['device_id']]\nsub.columns = ['DeviceID']\n\nfor i in ['sex1', 'sex2']:\n    for j in ['age%s'%i for i in range(11)]:\n        sub[i+'_'+j] = sub1[i] * sub2[j]\nsub.columns = ['DeviceID', '1-0', '1-1', '1-2', '1-3', '1-4', '1-5', '1-6', \n         '1-7','1-8', '1-9', '1-10', '2-0', '2-1', '2-2', '2-3', '2-4', \n         '2-5', '2-6', '2-7', '2-8', '2-9', '2-10']\n\nsub.to_csv('lgb_l_v54.csv', index=False)\n\n\n# In[ ]:\n\n\n'''\nY = train['label']\n#best params: [31, 11, 0.015955854914003094, 0.12122664084283229, 0.7645440142264772, 24, 1048, 0.00552258737237652, 0.005810068328090833, 7]\nkfold = StratifiedKFold(n_splits=5, random_state=10, shuffle=True)\nsub = np.zeros((X_test.shape[0], 22))\nfor i, (train_index, test_index) in enumerate(kfold.split(X_train, Y)):\n    X_tr, X_vl, y_tr, y_vl = X_train[train_index], X_train[test_index], Y[train_index], Y[test_index]\n    dtrain = lgb.Dataset(X_tr, label=y_tr)\n    dvalid = lgb.Dataset(X_vl, y_vl, reference=dtrain)\n    params = {\n        'boosting_type': 'gbdt',\n        'max_depth':7,\n        'objective':'multiclass',\n        'metric': {'multi_logloss'},\n        'num_class':22,\n        'num_leaves':20,\n        'subsample': 0.86,\n        'colsample_bytree': 0.8,\n        #'lambda_l1':0.00007995302080034896,\n        'lambda_l2':0.005,\n        'subsample_freq':11,\n        'learning_rate': 0.01,\n        'min_child_weight':5.5,\n\n    }\n    \n    model = lgb.train(params,\n                        dtrain,\n                        num_boost_round=6000,\n                        valid_sets=dvalid,\n                        early_stopping_rounds=20,\n                        verbose_eval=100)\n\n\n    sub += model.predict(X_test, num_iteration=model.best_iteration)/kfold.n_splits\n'''\n\n\n# In[ ]:\n\n\n'''\nsub = pd.DataFrame(sub)\ncols = [x for x in range(0, 22)]\ncols = label_le.inverse_transform(cols)\n\nsub.columns = cols\nsub['DeviceID'] = test['device_id'].values\n\nsub = sub[['DeviceID', '1-0', '1-1', '1-2', '1-3', '1-4', '1-5', '1-6', \n         '1-7','1-8', '1-9', '1-10', '2-0', '2-1', '2-2', '2-3', '2-4', \n         '2-5', '2-6', '2-7', '2-8', '2-9', '2-10']]\n\nsub.to_csv('30.csv', index=False)\n'''\n\n"
  },
  {
    "path": "nb_cz_lwl_wcm/9_yg_best_nn.py",
    "content": "import pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom datetime import datetime, timedelta\nimport matplotlib.pyplot as plt\nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\n# %matplotlib inline\n\n#add\nfrom category_encoders import OrdinalEncoder\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom scipy.sparse import hstack\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import cross_val_score\nfrom gensim.models import FastText, Word2Vec\nimport re\nfrom keras.layers import *\nfrom keras.models import *\nfrom keras.preprocessing.text import Tokenizer, text_to_word_sequence\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.preprocessing import text, sequence\nfrom keras.callbacks import *\nfrom keras.layers.advanced_activations import LeakyReLU, PReLU\nimport keras.backend as K\nfrom keras.optimizers import *\nfrom keras.utils import to_categorical\nfrom config import path\npackages = pd.read_csv(path+'deviceid_packages.tsv',\n                       sep='\\t', names=['device_id', 'apps'])\ntest = pd.read_csv(path+'deviceid_test.tsv',\n                   sep='\\t', names=['device_id'])\ntrain = pd.read_csv(path+'deviceid_train.tsv',\n                    sep='\\t', names=['device_id', 'sex', 'age'])\n\nbrand = pd.read_table(path+'deviceid_brand.tsv',\n                      names=['device_id', 'vendor', 'version'])\nbehave = pd.read_csv('data/user_behavior.csv')\n\nbrand['phone_version'] = brand['vendor'] + ' ' + brand['version']\ntrain = pd.merge(brand[['device_id', 'phone_version']],\n                 train, on='device_id', how='right')\ntest = pd.merge(brand[['device_id', 'phone_version']],\n                test, on='device_id', how='right')\n\ntrain = pd.merge(train, behave, on='device_id', how='left')\ntest = pd.merge(test, behave, on='device_id', how='left')\n\npackages['app_lenghth'] = packages['apps'].apply(\n    lambda x: x.split(',')).apply(lambda x: len(x))\npackages['app_list'] = packages['apps'].apply(lambda x: x.split(','))\ntrain = pd.merge(train, packages, on='device_id', how='left')\ntest = pd.merge(test, packages, on='device_id', how='left')\n\nembed_size = 128\nfastmodel = Word2Vec(list(packages['app_list']), size=embed_size, window=4, min_count=3, negative=2,\n                     sg=1, sample=0.002, hs=1, workers=4)\n\nembedding_fast = pd.DataFrame([fastmodel[word]\n                               for word in (fastmodel.wv.vocab)])\nembedding_fast['app'] = list(fastmodel.wv.vocab)\nembedding_fast.columns = [\"fdim_%s\" %\n                          str(i) for i in range(embed_size)]+[\"app\"]\n\n\ntokenizer = Tokenizer(lower=False, char_level=False, split=',')\n\ntokenizer.fit_on_texts(list(packages['apps']))\n\nX_seq = tokenizer.texts_to_sequences(train['apps'])\nX_test_seq = tokenizer.texts_to_sequences(test['apps'])\n\nmaxlen = 50\nX = pad_sequences(X_seq, maxlen=maxlen, value=0)\nX_test = pad_sequences(X_test_seq, maxlen=maxlen, value=0)\nY_sex = train['sex']-1\n\nmax_feaures = 35001\nembedding_matrix = np.zeros((max_feaures, embed_size))\nfor word in tokenizer.word_index:\n    if word not in fastmodel.wv.vocab:\n        continue\n    embedding_matrix[tokenizer.word_index[word]] = fastmodel[word]\n\n\nX_h = train[['h%s' % i for i in range(24)]].values\nX_h_test = test[['h%s' % i for i in range(24)]].values\n\n\nclass AdamW(Optimizer):\n    def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, weight_decay=1e-4,  # decoupled weight decay (1/4)\n                 epsilon=1e-8, decay=0., **kwargs):\n        super(AdamW, self).__init__(**kwargs)\n        with K.name_scope(self.__class__.__name__):\n            self.iterations = K.variable(0, dtype='int64', name='iterations')\n            self.lr = K.variable(lr, name='lr')\n            self.beta_1 = K.variable(beta_1, name='beta_1')\n            self.beta_2 = K.variable(beta_2, name='beta_2')\n            self.decay = K.variable(decay, name='decay')\n            # decoupled weight decay (2/4)\n            self.wd = K.variable(weight_decay, name='weight_decay')\n        self.epsilon = epsilon\n        self.initial_decay = decay\n\n    @interfaces.legacy_get_updates_support\n    def get_updates(self, loss, params):\n        grads = self.get_gradients(loss, params)\n        self.updates = [K.update_add(self.iterations, 1)]\n        wd = self.wd  # decoupled weight decay (3/4)\n\n        lr = self.lr\n        if self.initial_decay > 0:\n            lr *= (1. / (1. + self.decay * K.cast(self.iterations,\n                                                  K.dtype(self.decay))))\n\n        t = K.cast(self.iterations, K.floatx()) + 1\n        lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /\n                     (1. - K.pow(self.beta_1, t)))\n\n        ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n        vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n        self.weights = [self.iterations] + ms + vs\n\n        for p, g, m, v in zip(params, grads, ms, vs):\n            m_t = (self.beta_1 * m) + (1. - self.beta_1) * g\n            v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)\n            # decoupled weight decay (4/4)\n            p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon) - lr * wd * p\n\n            self.updates.append(K.update(m, m_t))\n            self.updates.append(K.update(v, v_t))\n            new_p = p_t\n\n            # Apply constraints.\n            if getattr(p, 'constraint', None) is not None:\n                new_p = p.constraint(new_p)\n\n            self.updates.append(K.update(p, new_p))\n        return self.updates\n\n    def get_config(self):\n        config = {'lr': float(K.get_value(self.lr)),\n                  'beta_1': float(K.get_value(self.beta_1)),\n                  'beta_2': float(K.get_value(self.beta_2)),\n                  'decay': float(K.get_value(self.decay)),\n                  'weight_decay': float(K.get_value(self.wd)),\n                  'epsilon': self.epsilon}\n        base_config = super(AdamW, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))\n\n\ndef model_conv1D(embedding_matrix):\n\n    K.clear_session()\n    # The embedding layer containing the word vectors\n    emb_layer = Embedding(\n        input_dim=embedding_matrix.shape[0],\n        output_dim=embedding_matrix.shape[1],\n        weights=[embedding_matrix],\n        input_length=maxlen,\n        trainable=False\n    )\n    lstm_layer = Bidirectional(\n        GRU(128, recurrent_dropout=0.15, dropout=0.15, return_sequences=True))\n\n    # 1D convolutions that can iterate over the word vectors\n    conv1 = Conv1D(filters=128, kernel_size=1,\n                   padding='same', activation='relu',)\n    conv2 = Conv1D(filters=64, kernel_size=2,\n                   padding='same', activation='relu', )\n    conv3 = Conv1D(filters=64, kernel_size=3,\n                   padding='same', activation='relu',)\n    conv5 = Conv1D(filters=32, kernel_size=5,\n                   padding='same', activation='relu',)\n\n    # Define inputs\n    seq = Input(shape=(maxlen,))\n\n    # Run inputs through embedding\n    emb = emb_layer(seq)\n\n    lstm = lstm_layer(emb)\n    # Run through CONV + GAP layers\n    conv1a = conv1(lstm)\n    gap1a = GlobalAveragePooling1D()(conv1a)\n    gmp1a = GlobalMaxPool1D()(conv1a)\n\n    conv2a = conv2(lstm)\n    gap2a = GlobalAveragePooling1D()(conv2a)\n    gmp2a = GlobalMaxPool1D()(conv2a)\n\n    conv3a = conv3(lstm)\n    gap3a = GlobalAveragePooling1D()(conv3a)\n    gmp3a = GlobalMaxPooling1D()(conv3a)\n\n    conv5a = conv5(lstm)\n    gap5a = GlobalAveragePooling1D()(conv5a)\n    gmp5a = GlobalMaxPooling1D()(conv5a)\n\n    hin = Input(shape=(24, ))\n    htime = Dense(6, activation='relu')(hin)\n    merge1 = concatenate([gmp1a, gmp1a, gmp1a, gmp1a, htime])\n\n    # The MLP that determines the outcome\n    x = Dropout(0.3)(merge1)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.25)(x)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.25)(x)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.25)(x)\n    x = BatchNormalization()(x)\n    pred = Dense(1, activation='sigmoid')(x)\n\n    # model = Model(inputs=[seq1, seq2, magic_input, distance_input], outputs=pred)\n    model = Model(inputs=[seq, hin], outputs=pred)\n    model.compile(loss='binary_crossentropy',\n                  optimizer=AdamW(weight_decay=0.08,))\n\n    return model\n\n\nkfold = StratifiedKFold(n_splits=5, random_state=20, shuffle=True)\nsub1 = np.zeros((X_test.shape[0], ))\noof_pref1 = np.zeros((X.shape[0], 1))\nscore = []\ncount = 0\nfor i, (train_index, test_index) in enumerate(kfold.split(X, Y_sex)):\n    print(\"FOLD | \", count+1)\n    filepath = \"sex_weights_best_%d.h5\" % count\n    checkpoint = ModelCheckpoint(\n        filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n    reduce_lr = ReduceLROnPlateau(\n        monitor='val_loss', factor=0.8, patience=2, min_lr=0.0001, verbose=1)\n    earlystopping = EarlyStopping(\n        monitor='val_loss', min_delta=0.0001, patience=6, verbose=1, mode='auto')\n    callbacks = [checkpoint, reduce_lr, earlystopping]\n\n    model_sex = model_conv1D(embedding_matrix)\n    X_tr, X_vl, X_tr2, X_vl2, y_tr, y_vl = X[train_index], X[test_index], X_h[\n        train_index], X_h[test_index], Y_sex[train_index], Y_sex[test_index]\n    hist = model_sex.fit([X_tr, X_tr2], y_tr, batch_size=256, epochs=50, validation_data=([X_vl, X_vl2], y_vl),\n                         callbacks=callbacks, verbose=1, shuffle=True)\n    model_sex.load_weights(filepath)\n    sub1 += np.squeeze(model_sex.predict([X_test, X_h_test]))/kfold.n_splits\n    oof_pref1[test_index] = model_sex.predict([X_vl, X_vl2])\n    score.append(np.min(hist.history['val_loss']))\n    count += 1\nprint('log loss:', np.mean(score))\n\n\noof_pref1 = pd.DataFrame(oof_pref1, columns=['sex2'])\nsub1 = pd.DataFrame(sub1, columns=['sex2'])\nres1 = pd.concat([oof_pref1, sub1])\nres1['sex1'] = 1-res1['sex2']\n# res1.to_csv(\"res1.csv\", index=False)\n\n\ndef model_age_conv(embedding_matrix):\n\n    # The embedding layer containing the word vectors\n    K.clear_session()\n    emb_layer = Embedding(\n        input_dim=embedding_matrix.shape[0],\n        output_dim=embedding_matrix.shape[1],\n        weights=[embedding_matrix],\n        input_length=maxlen,\n        trainable=False\n    )\n    lstm_layer = Bidirectional(\n        GRU(128, recurrent_dropout=0.15, dropout=0.15, return_sequences=True))\n\n    # 1D convolutions that can iterate over the word vectors\n    conv1 = Conv1D(filters=128, kernel_size=1,\n                   padding='same', activation='relu',)\n    conv2 = Conv1D(filters=64, kernel_size=2,\n                   padding='same', activation='relu', )\n    conv3 = Conv1D(filters=64, kernel_size=3,\n                   padding='same', activation='relu',)\n    conv5 = Conv1D(filters=32, kernel_size=5,\n                   padding='same', activation='relu',)\n\n    # Define inputs\n    seq = Input(shape=(maxlen,))\n\n    # Run inputs through embedding\n    emb = emb_layer(seq)\n\n    lstm = lstm_layer(emb)\n    # Run through CONV + GAP layers\n    conv1a = conv1(lstm)\n    gap1a = GlobalAveragePooling1D()(conv1a)\n    gmp1a = GlobalMaxPool1D()(conv1a)\n\n    conv2a = conv2(lstm)\n    gap2a = GlobalAveragePooling1D()(conv2a)\n    gmp2a = GlobalMaxPool1D()(conv2a)\n\n    conv3a = conv3(lstm)\n    gap3a = GlobalAveragePooling1D()(conv3a)\n    gmp3a = GlobalMaxPooling1D()(conv3a)\n\n    conv5a = conv5(lstm)\n    gap5a = GlobalAveragePooling1D()(conv5a)\n    gmp5a = GlobalMaxPooling1D()(conv5a)\n\n    merge1 = concatenate([gap1a, gap2a, gap3a, gap5a])\n\n    # The MLP that determines the outcome\n    x = Dropout(0.3)(merge1)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.22)(x)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.22)(x)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.22)(x)\n    x = BatchNormalization()(x)\n    pred = Dense(11, activation='softmax')(x)\n\n    model = Model(inputs=seq, outputs=pred)\n    model.compile(loss='categorical_crossentropy',\n                  optimizer=AdamW(weight_decay=0.08,))\n\n    return model\n\n\nY_age = to_categorical(train['age'])\n\nsub2 = np.zeros((X_test.shape[0], 11))\noof_pref2 = np.zeros((X.shape[0], 11))\nscore = []\ncount = 0\nfor i, (train_index, test_index) in enumerate(kfold.split(X, train['age'])):\n\n    print(\"FOLD | \", count+1)\n\n    filepath2 = \"age_weights_best_%d.h5\" % count\n    checkpoint2 = ModelCheckpoint(\n        filepath2, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\n    reduce_lr2 = ReduceLROnPlateau(\n        monitor='val_loss', factor=0.8, patience=2, min_lr=0.0001, verbose=1)\n    earlystopping2 = EarlyStopping(\n        monitor='val_loss', min_delta=0.0001, patience=8, verbose=1, mode='auto')\n    callbacks2 = [checkpoint2, reduce_lr2, earlystopping2]\n\n    X_tr, X_vl, y_tr, y_vl = X[train_index], X[test_index], Y_age[train_index], Y_age[test_index]\n\n    model_age = model_age_conv(embedding_matrix)\n    hist = model_age.fit(X_tr, y_tr, batch_size=256, epochs=50, validation_data=(X_vl, y_vl),\n                         callbacks=callbacks2, verbose=2, shuffle=True)\n\n    model_age.load_weights(filepath2)\n    oof_pref2[test_index] = model_age.predict(X_vl)\n    sub2 += model_age.predict(X_test)/kfold.n_splits\n    score.append(np.min(hist.history['val_loss']))\n    count += 1\nprint('log loss:', np.mean(score))\n\n\nres2_1 = np.vstack((oof_pref2, sub2))\nres2_1 = pd.DataFrame(res2_1)\n# res2_1.to_csv(\"res2.csv\", index=False)\n\nres1.index = range(len(res1))\nres2_1.index = range(len(res2_1))\nfinal_1 = res2_1.copy()\nfinal_2 = res2_1.copy()\nfor i in range(11):\n    final_1[i] = res1['sex1']*res2_1[i]\n    final_2[i] = res1['sex2']*res2_1[i]\nid_list = pd.concat([train[['device_id']], test[['device_id']]])\nfinal = id_list\nfinal.index = range(len(final))\nfinal.columns = ['DeviceID']\nfinal_pred = pd.concat([final_1, final_2], 1)\nfinal = pd.concat([final, final_pred], 1)\nfinal.columns = ['DeviceID', '1-0', '1-1', '1-2', '1-3', '1-4', '1-5', '1-6',\n                 '1-7', '1-8', '1-9', '1-10', '2-0', '2-1', '2-2', '2-3', '2-4',\n                 '2-5', '2-6', '2-7', '2-8', '2-9', '2-10']\n\nfinal.to_csv('feature/yg_best_nn.csv', index=False)\n"
  },
  {
    "path": "nb_cz_lwl_wcm/运行说明.txt",
    "content": "Demo文件夹下存放原始数据集\n按照1、2、3... 顺序运行，最后在feature文件夹下面生成feature_nurbs.csv\n"
  },
  {
    "path": "wangcanming/deepnet_v33.py",
    "content": "import pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom datetime import datetime,timedelta  \nimport matplotlib.pyplot as plt\nimport time\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\n%matplotlib inline\n\n#add\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom scipy.sparse import hstack\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import cross_val_score\nfrom gensim.models import FastText, Word2Vec\nimport re\nfrom keras.layers import *\nfrom keras.models import *\nfrom keras.preprocessing.text import Tokenizer, text_to_word_sequence\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.preprocessing import text, sequence\nfrom keras.callbacks import *\nfrom keras.layers.advanced_activations import LeakyReLU, PReLU\nimport keras.backend as K\nfrom keras.optimizers import *\nfrom keras.utils import to_categorical\n\npackages = pd.read_csv('../input/yiguan/demo/Demo/deviceid_packages.tsv', sep='\\t', names=['device_id', 'apps'])\ntest = pd.read_csv('../input/yiguan/demo/Demo/deviceid_test.tsv', sep='\\t', names=['device_id'])\ntrain = pd.read_csv('../input/yiguan/demo/Demo/deviceid_train.tsv', sep='\\t', names=['device_id', 'sex', 'age'])\n\nbrand = pd.read_table('../input/yiguan/demo/Demo/deviceid_brand.tsv', names=['device_id', 'vendor', 'version'])\n\n\npackages['app_lenghth'] = packages['apps'].apply(lambda x:x.split(',')).apply(lambda x:len(x))\npackages['app_list'] = packages['apps'].apply(lambda x:x.split(','))\ntrain = pd.merge(train, packages, on='device_id', how='left')\ntest = pd.merge(test, packages, on='device_id', how='left')\n\nembed_size = 128\nfastmodel = Word2Vec(list(packages['app_list']), size=embed_size, window=4, min_count=1, negative=2,\n                 sg=1, sample=0.001, hs=1, workers=4)  \n\nembedding_fast = pd.DataFrame([fastmodel[word] for word in (fastmodel.wv.vocab)])\nembedding_fast['app'] = list(fastmodel.wv.vocab)\nembedding_fast.columns= [\"fdim_%s\" % str(i) for i in range(embed_size)]+[\"app\"]\n\ntokenizer = Tokenizer(lower=False, char_level=False, split=',')\n\ntokenizer.fit_on_texts(list(packages['apps']))\n\nX_seq = tokenizer.texts_to_sequences(train['apps'])\nX_test_seq = tokenizer.texts_to_sequences(test['apps'])\n\nmaxlen = 50\nX = pad_sequences(X_seq, maxlen=maxlen, value=0)\nX_test = pad_sequences(X_test_seq, maxlen=maxlen, value=0)\nY_sex = train['sex']-1\n\nmax_feaures=35001\nembedding_matrix = np.zeros((max_feaures, embed_size))\nfor word in tokenizer.word_index:\n    if word not in fastmodel.wv.vocab:\n        continue\n    embedding_matrix[tokenizer.word_index[word]] = fastmodel[word]\n\nclass AdamW(Optimizer):\n    def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, weight_decay=1e-4,  # decoupled weight decay (1/4)\n                 epsilon=1e-8, decay=0., **kwargs):\n        super(AdamW, self).__init__(**kwargs)\n        with K.name_scope(self.__class__.__name__):\n            self.iterations = K.variable(0, dtype='int64', name='iterations')\n            self.lr = K.variable(lr, name='lr')\n            self.beta_1 = K.variable(beta_1, name='beta_1')\n            self.beta_2 = K.variable(beta_2, name='beta_2')\n            self.decay = K.variable(decay, name='decay')\n            self.wd = K.variable(weight_decay, name='weight_decay') # decoupled weight decay (2/4)\n        self.epsilon = epsilon\n        self.initial_decay = decay\n\n    @interfaces.legacy_get_updates_support\n    def get_updates(self, loss, params):\n        grads = self.get_gradients(loss, params)\n        self.updates = [K.update_add(self.iterations, 1)]\n        wd = self.wd # decoupled weight decay (3/4)\n\n        lr = self.lr\n        if self.initial_decay > 0:\n            lr *= (1. / (1. + self.decay * K.cast(self.iterations,\n                                                  K.dtype(self.decay))))\n\n        t = K.cast(self.iterations, K.floatx()) + 1\n        lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /\n                     (1. - K.pow(self.beta_1, t)))\n\n        ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n        vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n        self.weights = [self.iterations] + ms + vs\n\n        for p, g, m, v in zip(params, grads, ms, vs):\n            m_t = (self.beta_1 * m) + (1. - self.beta_1) * g\n            v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)\n            p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon) - lr * wd * p # decoupled weight decay (4/4)\n\n            self.updates.append(K.update(m, m_t))\n            self.updates.append(K.update(v, v_t))\n            new_p = p_t\n\n            # Apply constraints.\n            if getattr(p, 'constraint', None) is not None:\n                new_p = p.constraint(new_p)\n\n            self.updates.append(K.update(p, new_p))\n        return self.updates\n\n    def get_config(self):\n        config = {'lr': float(K.get_value(self.lr)),\n                  'beta_1': float(K.get_value(self.beta_1)),\n                  'beta_2': float(K.get_value(self.beta_2)),\n                  'decay': float(K.get_value(self.decay)),\n                  'weight_decay': float(K.get_value(self.wd)),\n                  'epsilon': self.epsilon}\n        base_config = super(AdamW, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))\n\ndef model_conv1D_sex(embedding_matrix):\n    \n    K.clear_session()\n    # The embedding layer containing the word vectors\n    emb_layer = Embedding(\n        input_dim=embedding_matrix.shape[0],\n        output_dim=embedding_matrix.shape[1],\n        weights=[embedding_matrix],\n        input_length=maxlen,\n        trainable=False)\n    \n    # Define inputs\n    seq = Input(shape=(maxlen,))\n    \n    # Run inputs through embedding\n    emb = emb_layer(seq)\n    \n    lstm_layer = Bidirectional(GRU(128, recurrent_dropout=0.15, dropout=0.15,))\n    lstm = lstm_layer(emb)\n    \n    translate = TimeDistributed(Dense(128, activation='relu'))\n    t1 = translate(emb)\n    t1 = TimeDistributed(Dropout(0.15))(t1)\n    sum_op = Lambda(lambda x: K.sum(x, axis=1), output_shape=(128,))\n    t1 = sum_op(t1)\n\n    merge1 = concatenate([lstm, t1])\n    \n    # The MLP that determines the outcome\n    x = Dropout(0.24)(merge1)\n    #x = BatchNormalization()(x)\n    #x = Dense(200, activation='relu',)(x)\n    #x = Dropout(0.22)(x)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.22)(x)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.22)(x)\n    x = BatchNormalization()(x)\n    pred = Dense(1, activation='sigmoid')(x)\n\n    # model = Model(inputs=[seq1, seq2, magic_input, distance_input], outputs=pred)\n    model = Model(inputs=seq, outputs=pred)\n    model.compile(loss='binary_crossentropy', optimizer=AdamW(weight_decay=0.1,))###\n\n    return model\n\n\nkfold = StratifiedKFold(n_splits=5, random_state=20, shuffle=True)\nsub1 = np.zeros((X_test.shape[0], ))\noof_pref1 = np.zeros((X.shape[0], 1))\nscore = []\nfor i, (train_index, test_index) in enumerate(kfold.split(X, Y_sex)):\n    filepath=\"weights_best.h5\"\n    checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=2, save_best_only=True, mode='min')\n    reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=3, min_lr=0.0001, verbose=2)\n    earlystopping = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=8, verbose=2, mode='auto')\n    callbacks = [checkpoint, reduce_lr, earlystopping]\n    model_sex = model_conv1D_sex(embedding_matrix)\n    X_tr, X_vl, y_tr, y_vl = X[train_index], X[test_index], Y_sex[train_index], Y_sex[test_index]\n    hist = model_sex.fit(X_tr, y_tr, batch_size=512, epochs=50, validation_data=(X_vl, y_vl),\n                 callbacks=callbacks, verbose=2, shuffle=True)\n    model_sex.load_weights(filepath)\n    sub1 += np.squeeze(model_sex.predict(X_test))/kfold.n_splits\n    oof_pref1[test_index] = model_sex.predict(X_vl)\n    score.append(np.min(hist.history['val_loss']))\nprint('log loss:',np.mean(score))\n\ndef model_age_conv(embedding_matrix):\n    \n    K.clear_session()\n    # The embedding layer containing the word vectors\n    emb_layer = Embedding(\n        input_dim=embedding_matrix.shape[0],\n        output_dim=embedding_matrix.shape[1],\n        weights=[embedding_matrix],\n        input_length=maxlen,\n        trainable=False)\n    \n    # Define inputs\n    seq = Input(shape=(maxlen,))\n    \n    # Run inputs through embedding\n    emb = emb_layer(seq)\n    \n    lstm_layer = Bidirectional(GRU(128, recurrent_dropout=0.15, dropout=0.15,))\n    lstm = lstm_layer(emb)\n    \n    translate = TimeDistributed(Dense(128, activation='relu'))\n    t1 = translate(emb)\n    t1 = TimeDistributed(Dropout(0.15))(t1)\n    sum_op = Lambda(lambda x: K.sum(x, axis=1), output_shape=(128,))\n    t1 = sum_op(t1)\n    \n    merge1 = concatenate([lstm, t1])\n    \n    # The MLP that determines the outcome\n    x = Dropout(0.24)(merge1)\n    #x = BatchNormalization()(x)\n    #x = Dense(200, activation='relu',)(x)\n    #x = Dropout(0.22)(x)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.22)(x)\n    x = BatchNormalization()(x)\n    x = Dense(200, activation='relu',)(x)\n    x = Dropout(0.22)(x)\n    x = BatchNormalization()(x)\n    pred = Dense(11, activation='softmax')(x)\n\n    model = Model(inputs=seq, outputs=pred)\n    model.compile(loss='categorical_crossentropy', optimizer=AdamW(weight_decay=0.1,))\n\n    return model\n\nY_age = to_categorical(train['age'])\n\nsub2 = np.zeros((X_test.shape[0], 11))\noof_pref2 = np.zeros((X.shape[0], 11))\nscore = []\n\n\nfor i, (train_index, test_index) in enumerate(kfold.split(X, train['age'])):\n    filepath2=\"weights_best2.h5\"\n    checkpoint2 = ModelCheckpoint(filepath2, monitor='val_loss', verbose=2, save_best_only=True, mode='min')\n    reduce_lr2 = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=3, min_lr=0.0001, verbose=2)\n    earlystopping2 = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=8, verbose=2, mode='auto')\n    callbacks2 = [checkpoint2, reduce_lr2, earlystopping2]\n    model_age = model_age_conv(embedding_matrix)\n    X_tr, X_vl, y_tr, y_vl = X[train_index], X[test_index], Y_age[train_index], Y_age[test_index]\n    hist = model_age.fit(X_tr, y_tr, batch_size=512, epochs=50, validation_data=(X_vl, y_vl),\n                 callbacks=callbacks2, verbose=2, shuffle=True)\n    \n    model_age.load_weights(filepath2)\n    sub2 += model_age.predict(X_test)/kfold.n_splits\n    oof_pref2[test_index] = model_age.predict(X_vl)\n    score.append(np.min(hist.history['val_loss']))\n\nprint('log loss:',np.mean(score))\n\nsub1 = pd.DataFrame(sub1, columns=['sex2'])\noof_pref1 = pd.DataFrame(oof_pref1, columns=['sex2'])\nsub1['sex1'] = 1-sub1['sex2']\noof_pref1['sex1'] = 1-oof_pref1['sex2']\nsub2 = pd.DataFrame(sub2, columns=['age%s'%i for i in range(11)])\noof_pref2 = pd.DataFrame(oof_pref2, columns=['age%s'%i for i in range(11)])\nsub = test[['device_id']]\nsub.columns = ['DeviceID']\noof = train[['device_id']]\noof.columns = ['DeviceID']\nfor i in ['sex1', 'sex2']:\n    for j in ['age%s'%i for i in range(11)]:\n        sub[i+'_'+j] = sub1[i]*sub2[j]\n        oof[i+'_'+j] = oof_pref1[i]*oof_pref2[j]\nsub.columns = ['DeviceID', '1-0', '1-1', '1-2', '1-3', '1-4', '1-5', '1-6', \n         '1-7','1-8', '1-9', '1-10', '2-0', '2-1', '2-2', '2-3', '2-4', \n         '2-5', '2-6', '2-7', '2-8', '2-9', '2-10']\noof.columns = ['DeviceID', '1-0', '1-1', '1-2', '1-3', '1-4', '1-5', '1-6', \n         '1-7','1-8', '1-9', '1-10', '2-0', '2-1', '2-2', '2-3', '2-4', \n         '2-5', '2-6', '2-7', '2-8', '2-9', '2-10']\n\nsub.to_csv('deepnet_v33.csv', index=False)\noof.to_csv('deepnet_oof_v33.csv', index=False)\n\ndf_stack = pd.concat([oof, sub])\ndf_stack.to_csv('feature_wcm.csv')\n\n"
  }
]