[
  {
    "path": ".gitignore",
    "content": ".idea\n__pycache__\nplayground.py\n"
  },
  {
    "path": "line.py",
    "content": "import tensorflow as tf\nimport numpy as np\nimport argparse\nfrom model import LINEModel\nfrom utils import DBLPDataLoader\nimport pickle\nimport time\n\n\ndef main():\n    parser = argparse.ArgumentParser()\n    parser.add_argument('--embedding_dim', default=128)\n    parser.add_argument('--batch_size', default=128)\n    parser.add_argument('--K', default=5)\n    parser.add_argument('--proximity', default='second-order', help='first-order or second-order')\n    parser.add_argument('--learning_rate', default=0.025)\n    parser.add_argument('--mode', default='train')\n    parser.add_argument('--num_batches', default=300000)\n    parser.add_argument('--total_graph', default=True)\n    parser.add_argument('--graph_file', default='data/co-authorship_graph.pkl')\n    args = parser.parse_args()\n    if args.mode == 'train':\n        train(args)\n    elif args.mode == 'test':\n        test(args)\n\n\ndef train(args):\n    data_loader = DBLPDataLoader(graph_file=args.graph_file)\n    suffix = args.proximity\n    args.num_of_nodes = data_loader.num_of_nodes\n    model = LINEModel(args)\n    with tf.Session() as sess:\n        print(args)\n        print('batches\\tloss\\tsampling time\\ttraining_time\\tdatetime')\n        tf.global_variables_initializer().run()\n        initial_embedding = sess.run(model.embedding)\n        learning_rate = args.learning_rate\n        sampling_time, training_time = 0, 0\n        for b in range(args.num_batches):\n            t1 = time.time()\n            u_i, u_j, label = data_loader.fetch_batch(batch_size=args.batch_size, K=args.K)\n            feed_dict = {model.u_i: u_i, model.u_j: u_j, model.label: label, model.learning_rate: learning_rate}\n            t2 = time.time()\n            sampling_time += t2 - t1\n            if b % 100 != 0:\n                sess.run(model.train_op, feed_dict=feed_dict)\n                training_time += time.time() - t2\n                if learning_rate > args.learning_rate * 0.0001:\n                    learning_rate = args.learning_rate * (1 - b / args.num_batches)\n                else:\n                    learning_rate = args.learning_rate * 0.0001\n            else:\n                loss = sess.run(model.loss, feed_dict=feed_dict)\n                print('%d\\t%f\\t%0.2f\\t%0.2f\\t%s' % (b, loss, sampling_time, training_time,\n                                                    time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())))\n                sampling_time, training_time = 0, 0\n            if b % 1000 == 0 or b == (args.num_batches - 1):\n                embedding = sess.run(model.embedding)\n                normalized_embedding = embedding / np.linalg.norm(embedding, axis=1, keepdims=True)\n                pickle.dump(data_loader.embedding_mapping(normalized_embedding),\n                            open('data/embedding_%s.pkl' % suffix, 'wb'))\n\n\ndef test(args):\n    pass\n\nif __name__ == '__main__':\n    main()"
  },
  {
    "path": "model.py",
    "content": "import tensorflow as tf\n\n\nclass LINEModel:\n    def __init__(self, args):\n        self.u_i = tf.placeholder(name='u_i', dtype=tf.int32, shape=[args.batch_size * (args.K + 1)])\n        self.u_j = tf.placeholder(name='u_j', dtype=tf.int32, shape=[args.batch_size * (args.K + 1)])\n        self.label = tf.placeholder(name='label', dtype=tf.float32, shape=[args.batch_size * (args.K + 1)])\n        self.embedding = tf.get_variable('target_embedding', [args.num_of_nodes, args.embedding_dim],\n                                         initializer=tf.random_uniform_initializer(minval=-1., maxval=1.))\n        self.u_i_embedding = tf.matmul(tf.one_hot(self.u_i, depth=args.num_of_nodes), self.embedding)\n        if args.proximity == 'first-order':\n            self.u_j_embedding = tf.matmul(tf.one_hot(self.u_j, depth=args.num_of_nodes), self.embedding)\n        elif args.proximity == 'second-order':\n            self.context_embedding = tf.get_variable('context_embedding', [args.num_of_nodes, args.embedding_dim],\n                                                     initializer=tf.random_uniform_initializer(minval=-1., maxval=1.))\n            self.u_j_embedding = tf.matmul(tf.one_hot(self.u_j, depth=args.num_of_nodes), self.context_embedding)\n\n        self.inner_product = tf.reduce_sum(self.u_i_embedding * self.u_j_embedding, axis=1)\n        self.loss = -tf.reduce_mean(tf.log_sigmoid(self.label * self.inner_product))\n        self.learning_rate = tf.placeholder(name='learning_rate', dtype=tf.float32)\n        # self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)\n        self.optimizer = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate)\n        self.train_op = self.optimizer.minimize(self.loss)\n\n\n"
  },
  {
    "path": "readme.md",
    "content": "# LINE in TensorFlow\n\nTensorFlow implementation of paper _LINE: Large-scale Information Network Embedding_ by Jian Tang, et al.\n\nYou can see [my slide](Network_Embedding_with_TensorFlow.pdf) on GDG DevFest 2017 for more detail about LINE and TensorFlow. Notice: code shown in the slide are pseudocode, minibatch and negative sampling are omitted in the slide. \n\n## Prerequisites\n\n* Python 3.6\n* TensorFlow 1.3.0\n* Networkx\n* NumPy\n\n## Setup\n\n* Prepare a network using networkx. Write the graph to a file by [nx.write_gpickle](https://networkx.github.io/documentation/stable/reference/readwrite/generated/networkx.readwrite.gpickle.write_gpickle.html).\n* Put the network file in `data` folder.\n* Run `line.py --graph_file graph.pkl` to start training. `graph.pkl` is the name of your network file.\n* Embedding will be stored in `data/embedding_XXX-order.pkl`. You can load it by `pickle.load()` in python.\n\n## References\n\n- Tang, Jian, et al. \"[Line: Large-scale information network embedding.](https://dl.acm.org/citation.cfm?id=2741093)\" _Proceedings of the 24th International Conference on World Wide Web. International World Wide Web Conferences Steering Committee_, 2015.\n"
  },
  {
    "path": "utils.py",
    "content": "import networkx as nx\nimport numpy as np\n\n\nclass DBLPDataLoader:\n    def __init__(self, graph_file):\n        self.g = nx.read_gpickle(graph_file)\n        self.num_of_nodes = self.g.number_of_nodes()\n        self.num_of_edges = self.g.number_of_edges()\n        self.edges_raw = self.g.edges(data=True)\n        self.nodes_raw = self.g.nodes(data=True)\n\n        self.edge_distribution = np.array([attr['weight'] for _, _, attr in self.edges_raw], dtype=np.float32)\n        self.edge_distribution /= np.sum(self.edge_distribution)\n        self.edge_sampling = AliasSampling(prob=self.edge_distribution)\n        self.node_negative_distribution = np.power(\n            np.array([self.g.degree(node, weight='weight') for node, _ in self.nodes_raw], dtype=np.float32), 0.75)\n        self.node_negative_distribution /= np.sum(self.node_negative_distribution)\n        self.node_sampling = AliasSampling(prob=self.node_negative_distribution)\n\n        self.node_index = {}\n        self.node_index_reversed = {}\n        for index, (node, _) in enumerate(self.nodes_raw):\n            self.node_index[node] = index\n            self.node_index_reversed[index] = node\n        self.edges = [(self.node_index[u], self.node_index[v]) for u, v, _ in self.edges_raw]\n\n    def fetch_batch(self, batch_size=16, K=10, edge_sampling='atlas', node_sampling='atlas'):\n        if edge_sampling == 'numpy':\n            edge_batch_index = np.random.choice(self.num_of_edges, size=batch_size, p=self.edge_distribution)\n        elif edge_sampling == 'atlas':\n            edge_batch_index = self.edge_sampling.sampling(batch_size)\n        elif edge_sampling == 'uniform':\n            edge_batch_index = np.random.randint(0, self.num_of_edges, size=batch_size)\n        u_i = []\n        u_j = []\n        label = []\n        for edge_index in edge_batch_index:\n            edge = self.edges[edge_index]\n            if self.g.__class__ == nx.Graph:\n                if np.random.rand() > 0.5:      # important: second-order proximity is for directed edge\n                    edge = (edge[1], edge[0])\n            u_i.append(edge[0])\n            u_j.append(edge[1])\n            label.append(1)\n            for i in range(K):\n                while True:\n                    if node_sampling == 'numpy':\n                        negative_node = np.random.choice(self.num_of_nodes, p=self.node_negative_distribution)\n                    elif node_sampling == 'atlas':\n                        negative_node = self.node_sampling.sampling()\n                    elif node_sampling == 'uniform':\n                        negative_node = np.random.randint(0, self.num_of_nodes)\n                    if not self.g.has_edge(self.node_index_reversed[negative_node], self.node_index_reversed[edge[0]]):\n                        break\n                u_i.append(edge[0])\n                u_j.append(negative_node)\n                label.append(-1)\n        return u_i, u_j, label\n\n    def embedding_mapping(self, embedding):\n        return {node: embedding[self.node_index[node]] for node, _ in self.nodes_raw}\n\n\nclass AliasSampling:\n\n    # Reference: https://en.wikipedia.org/wiki/Alias_method\n\n    def __init__(self, prob):\n        self.n = len(prob)\n        self.U = np.array(prob) * self.n\n        self.K = [i for i in range(len(prob))]\n        overfull, underfull = [], []\n        for i, U_i in enumerate(self.U):\n            if U_i > 1:\n                overfull.append(i)\n            elif U_i < 1:\n                underfull.append(i)\n        while len(overfull) and len(underfull):\n            i, j = overfull.pop(), underfull.pop()\n            self.K[j] = i\n            self.U[i] = self.U[i] - (1 - self.U[j])\n            if self.U[i] > 1:\n                overfull.append(i)\n            elif self.U[i] < 1:\n                underfull.append(i)\n\n    def sampling(self, n=1):\n        x = np.random.rand(n)\n        i = np.floor(self.n * x)\n        y = self.n * x - i\n        i = i.astype(np.int32)\n        res = [i[k] if y[k] < self.U[i[k]] else self.K[i[k]] for k in range(n)]\n        if n == 1:\n            return res[0]\n        else:\n            return res\n\n"
  }
]