Repository: yueliu1999/DCRN
Branch: main
Commit: 189f6f389a3e
Files: 12
Total size: 36.0 KB
Directory structure:
gitextract_5pir9mvx/
├── DCRN.py
├── LICENSE
├── README.md
├── main.py
├── model_pretrain/
│ ├── acm_pretrain.pkl
│ ├── amap_pretrain.pkl
│ ├── cite_pretrain.pkl
│ └── dblp_pretrain.pkl
├── opt.py
├── requirements.txt
├── train.py
└── utils.py
================================================
FILE CONTENTS
================================================
================================================
FILE: DCRN.py
================================================
import opt
import torch
from torch import nn
from torch.nn import Linear
import torch.nn.functional as F
from torch.nn import Module, Parameter
# AE encoder from DFCN
class AE_encoder(nn.Module):
def __init__(self, ae_n_enc_1, ae_n_enc_2, ae_n_enc_3, n_input, n_z):
super(AE_encoder, self).__init__()
self.enc_1 = Linear(n_input, ae_n_enc_1)
self.enc_2 = Linear(ae_n_enc_1, ae_n_enc_2)
self.enc_3 = Linear(ae_n_enc_2, ae_n_enc_3)
self.z_layer = Linear(ae_n_enc_3, n_z)
self.act = nn.LeakyReLU(0.2, inplace=True)
def forward(self, x):
z = self.act(self.enc_1(x))
z = self.act(self.enc_2(z))
z = self.act(self.enc_3(z))
z_ae = self.z_layer(z)
return z_ae
# AE decoder from DFCN
class AE_decoder(nn.Module):
def __init__(self, ae_n_dec_1, ae_n_dec_2, ae_n_dec_3, n_input, n_z):
super(AE_decoder, self).__init__()
self.dec_1 = Linear(n_z, ae_n_dec_1)
self.dec_2 = Linear(ae_n_dec_1, ae_n_dec_2)
self.dec_3 = Linear(ae_n_dec_2, ae_n_dec_3)
self.x_bar_layer = Linear(ae_n_dec_3, n_input)
self.act = nn.LeakyReLU(0.2, inplace=True)
def forward(self, z_ae):
z = self.act(self.dec_1(z_ae))
z = self.act(self.dec_2(z))
z = self.act(self.dec_3(z))
x_hat = self.x_bar_layer(z)
return x_hat
# Auto Encoder from DFCN
class AE(nn.Module):
def __init__(self, ae_n_enc_1, ae_n_enc_2, ae_n_enc_3, ae_n_dec_1, ae_n_dec_2, ae_n_dec_3, n_input, n_z):
super(AE, self).__init__()
self.encoder = AE_encoder(
ae_n_enc_1=ae_n_enc_1,
ae_n_enc_2=ae_n_enc_2,
ae_n_enc_3=ae_n_enc_3,
n_input=n_input,
n_z=n_z)
self.decoder = AE_decoder(
ae_n_dec_1=ae_n_dec_1,
ae_n_dec_2=ae_n_dec_2,
ae_n_dec_3=ae_n_dec_3,
n_input=n_input,
n_z=n_z)
# GNNLayer from DFCN
class GNNLayer(Module):
def __init__(self, in_features, out_features):
super(GNNLayer, self).__init__()
self.in_features = in_features
self.out_features = out_features
if opt.args.name == "dblp":
self.act = nn.Tanh()
self.weight = Parameter(torch.FloatTensor(out_features, in_features))
else:
self.act = nn.Tanh()
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
torch.nn.init.xavier_uniform_(self.weight)
def forward(self, features, adj, active=False):
if active:
if opt.args.name == "dblp":
support = self.act(F.linear(features, self.weight))
else:
support = self.act(torch.mm(features, self.weight))
else:
if opt.args.name == "dblp":
support = F.linear(features, self.weight)
else:
support = torch.mm(features, self.weight)
output = torch.spmm(adj, support)
az = torch.spmm(adj, output)
return output, az
# IGAE encoder from DFCN
class IGAE_encoder(nn.Module):
def __init__(self, gae_n_enc_1, gae_n_enc_2, gae_n_enc_3, n_input):
super(IGAE_encoder, self).__init__()
self.gnn_1 = GNNLayer(n_input, gae_n_enc_1)
self.gnn_2 = GNNLayer(gae_n_enc_1, gae_n_enc_2)
self.gnn_3 = GNNLayer(gae_n_enc_2, gae_n_enc_3)
self.s = nn.Sigmoid()
def forward(self, x, adj):
z_1, az_1 = self.gnn_1(x, adj, active=True)
z_2, az_2 = self.gnn_2(z_1, adj, active=True)
z_igae, az_3 = self.gnn_3(z_2, adj, active=False)
z_igae_adj = self.s(torch.mm(z_igae, z_igae.t()))
return z_igae, z_igae_adj, [az_1, az_2, az_3], [z_1, z_2, z_igae]
# IGAE decoder from DFCN
class IGAE_decoder(nn.Module):
def __init__(self, gae_n_dec_1, gae_n_dec_2, gae_n_dec_3, n_input):
super(IGAE_decoder, self).__init__()
self.gnn_4 = GNNLayer(gae_n_dec_1, gae_n_dec_2)
self.gnn_5 = GNNLayer(gae_n_dec_2, gae_n_dec_3)
self.gnn_6 = GNNLayer(gae_n_dec_3, n_input)
self.s = nn.Sigmoid()
def forward(self, z_igae, adj):
z_1, az_1 = self.gnn_4(z_igae, adj, active=True)
z_2, az_2 = self.gnn_5(z_1, adj, active=True)
z_hat, az_3 = self.gnn_6(z_2, adj, active=True)
z_hat_adj = self.s(torch.mm(z_hat, z_hat.t()))
return z_hat, z_hat_adj, [az_1, az_2, az_3], [z_1, z_2, z_hat]
# Improved Graph Auto Encoder from DFCN
class IGAE(nn.Module):
def __init__(self, gae_n_enc_1, gae_n_enc_2, gae_n_enc_3, gae_n_dec_1, gae_n_dec_2, gae_n_dec_3, n_input):
super(IGAE, self).__init__()
# IGAE encoder
self.encoder = IGAE_encoder(
gae_n_enc_1=gae_n_enc_1,
gae_n_enc_2=gae_n_enc_2,
gae_n_enc_3=gae_n_enc_3,
n_input=n_input)
# IGAE decoder
self.decoder = IGAE_decoder(
gae_n_dec_1=gae_n_dec_1,
gae_n_dec_2=gae_n_dec_2,
gae_n_dec_3=gae_n_dec_3,
n_input=n_input)
# readout function
class Readout(nn.Module):
def __init__(self, K):
super(Readout, self).__init__()
self.K = K
def forward(self, Z):
# calculate cluster-level embedding
Z_tilde = []
# step1: split the nodes into K groups
# step2: average the node embedding in each group
n_node = Z.shape[0]
step = n_node // self.K
for i in range(0, n_node, step):
if n_node - i < 2 * step:
Z_tilde.append(torch.mean(Z[i:n_node], dim=0))
break
else:
Z_tilde.append(torch.mean(Z[i:i + step], dim=0))
# the cluster-level embedding
Z_tilde = torch.cat(Z_tilde, dim=0)
return Z_tilde.view(1, -1)
# Dual Correlation Reduction Network
class DCRN(nn.Module):
def __init__(self, n_node=None):
super(DCRN, self).__init__()
# Auto Encoder
self.ae = AE(
ae_n_enc_1=opt.args.ae_n_enc_1,
ae_n_enc_2=opt.args.ae_n_enc_2,
ae_n_enc_3=opt.args.ae_n_enc_3,
ae_n_dec_1=opt.args.ae_n_dec_1,
ae_n_dec_2=opt.args.ae_n_dec_2,
ae_n_dec_3=opt.args.ae_n_dec_3,
n_input=opt.args.n_input,
n_z=opt.args.n_z)
# Improved Graph Auto Encoder From DFCN
self.gae = IGAE(
gae_n_enc_1=opt.args.gae_n_enc_1,
gae_n_enc_2=opt.args.gae_n_enc_2,
gae_n_enc_3=opt.args.gae_n_enc_3,
gae_n_dec_1=opt.args.gae_n_dec_1,
gae_n_dec_2=opt.args.gae_n_dec_2,
gae_n_dec_3=opt.args.gae_n_dec_3,
n_input=opt.args.n_input)
# fusion parameter from DFCN
self.a = Parameter(nn.init.constant_(torch.zeros(n_node, opt.args.n_z), 0.5), requires_grad=True)
self.b = Parameter(nn.init.constant_(torch.zeros(n_node, opt.args.n_z), 0.5), requires_grad=True)
self.alpha = Parameter(torch.zeros(1))
# cluster layer (clustering assignment matrix)
self.cluster_centers = Parameter(torch.Tensor(opt.args.n_clusters, opt.args.n_z), requires_grad=True)
# readout function
self.R = Readout(K=opt.args.n_clusters)
# calculate the soft assignment distribution Q
def q_distribute(self, Z, Z_ae, Z_igae):
"""
calculate the soft assignment distribution based on the embedding and the cluster centers
Args:
Z: fusion node embedding
Z_ae: node embedding encoded by AE
Z_igae: node embedding encoded by IGAE
Returns:
the soft assignment distribution Q
"""
q = 1.0 / (1.0 + torch.sum(torch.pow(Z.unsqueeze(1) - self.cluster_centers, 2), 2))
q = (q.t() / torch.sum(q, 1)).t()
q_ae = 1.0 / (1.0 + torch.sum(torch.pow(Z_ae.unsqueeze(1) - self.cluster_centers, 2), 2))
q_ae = (q_ae.t() / torch.sum(q_ae, 1)).t()
q_igae = 1.0 / (1.0 + torch.sum(torch.pow(Z_igae.unsqueeze(1) - self.cluster_centers, 2), 2))
q_igae = (q_igae.t() / torch.sum(q_igae, 1)).t()
return [q, q_ae, q_igae]
def forward(self, X_tilde1, Am, X_tilde2, Ad):
# node embedding encoded by AE
Z_ae1 = self.ae.encoder(X_tilde1)
Z_ae2 = self.ae.encoder(X_tilde2)
# node embedding encoded by IGAE
Z_igae1, A_igae1, AZ_1, Z_1 = self.gae.encoder(X_tilde1, Am)
Z_igae2, A_igae2, AZ_2, Z_2 = self.gae.encoder(X_tilde2, Ad)
# cluster-level embedding calculated by readout function
Z_tilde_ae1 = self.R(Z_ae1)
Z_tilde_ae2 = self.R(Z_ae2)
Z_tilde_igae1 = self.R(Z_igae1)
Z_tilde_igae2 = self.R(Z_igae2)
# linear combination of view 1 and view 2
Z_ae = (Z_ae1 + Z_ae2) / 2
Z_igae = (Z_igae1 + Z_igae2) / 2
# node embedding fusion from DFCN
Z_i = self.a * Z_ae + self.b * Z_igae
Z_l = torch.spmm(Am, Z_i)
S = torch.mm(Z_l, Z_l.t())
S = F.softmax(S, dim=1)
Z_g = torch.mm(S, Z_l)
Z = self.alpha * Z_g + Z_l
# AE decoding
X_hat = self.ae.decoder(Z)
# IGAE decoding
Z_hat, Z_adj_hat, AZ_de, Z_de = self.gae.decoder(Z, Am)
sim = (A_igae1 + A_igae2) / 2
A_hat = sim + Z_adj_hat
# node embedding and cluster-level embedding
Z_ae_all = [Z_ae1, Z_ae2, Z_tilde_ae1, Z_tilde_ae2]
Z_gae_all = [Z_igae1, Z_igae2, Z_tilde_igae1, Z_tilde_igae2]
# the soft assignment distribution Q
Q = self.q_distribute(Z, Z_ae, Z_igae)
# propagated embedding AZ_all and embedding Z_all
AZ_en = []
Z_en = []
for i in range(len(AZ_1)):
AZ_en.append((AZ_1[i]+AZ_2[i])/2)
Z_en.append((Z_1[i]+Z_2[i])/2)
AZ_all = [AZ_en, AZ_de]
Z_all = [Z_en, Z_de]
return X_hat, Z_hat, A_hat, sim, Z_ae_all, Z_gae_all, Q, Z, AZ_all, Z_all
================================================
FILE: LICENSE
================================================
MIT License
Copyright (c) 2021 yueliu1999
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: README.md
================================================
[stars-img]: https://img.shields.io/github/stars/yueliu1999/DCRN?color=yellow
[stars-url]: https://github.com/yueliu1999/DCRN/stargazers
[fork-img]: https://img.shields.io/github/forks/yueliu1999/DCRN?color=lightblue&label=fork
[fork-url]: https://github.com/yueliu1999/DCRN/network/members
[visitors-img]: https://visitor-badge.glitch.me/badge?page_id=yueliu1999.DCRN
[adgc-url]: https://github.com/yueliu1999/DCRN
# Dual Correlation Reduction Network
<p align="center">
<a href="https://pytorch.org/" alt="PyTorch">
<img src="https://img.shields.io/badge/PyTorch-%23EE4C2C.svg?e&logo=PyTorch&logoColor=white" /></a>
<a href="https://aaai.org/Conferences/AAAI-22/" alt="Conference">
<img src="https://img.shields.io/badge/AAAI'22-brightgreen" /></a>
<p/>
[![GitHub stars][stars-img]][stars-url]
[![GitHub forks][fork-img]][fork-url]
[![visitors][visitors-img]][adgc-url]
An official source code for paper [Deep Graph Clustering via Dual Correlation Reduction](https://www.researchgate.net/profile/Yue-Liu-240/publication/357271184_Deep_Graph_Clustering_via_Dual_Correlation_Reduction/links/61c466e68bb20101842f9a92/Deep-Graph-Clustering-via-Dual-Correlation-Reduction.pdf), accepted by AAAI 2022. Any communications or issues are welcomed. Please contact yueliu19990731@163.com. If you find this repository useful to your research or work, it is really appreciate to star this repository. :heart:
-------------
### Overview
<p align = "justify">
<a href="https://github.com/yueliu1999/Awesome-Deep-Graph-Clustering">Deep graph clustering</a>, which aims to reveal the underlying graph structure and divide the nodes into different groups, has attracted intensive attention in recent years. However, we observe that, in the process of node encoding, existing methods suffer from representation collapse which tends to map all data into a same representation. Consequently, the discriminative capability of node representations is limited, leading to unsatisfied clustering performance. To address this issue, we propose a novel self-supervised deep graph clustering method termed <b>D</b>ual <b>C</b>orrelation <b>R</b>eduction <b>N</b>etwork (<b>DCRN</b>) by reducing information correlation in a dual manner. Specifically, in our method, we first design a siamese network to encode samples. Then by forcing the cross-view sample correlation matrix and cross-view feature correlation matrix to approximate two identity matrices, respectively, we reduce the information correlation in dual level, thus improve the discriminative capability of the resulting features. Moreover, in order to alleviate representation collapse caused by over-smoothing in GCN, we introduce a propagation-regularization term to enable the network to gain long-distance information with shallow network structure. Extensive experimental results on six benchmark datasets demonstrate the effectiveness of the proposed DCRN against the existing state-of-the-art methods.
</p>
<div align="center">
<img src="./assets/overall.png" width=60%/>
</div>
<div align="center">
Illustration of the Dual Correlation Reduction Network (DCRN).
</div>
### Requirements
The proposed DCRN is implemented with python 3.8.5 on a NVIDIA 3090 GPU.
Python package information is summarized in **requirements.txt**:
- torch==1.8.0
- tqdm==4.50.2
- numpy==1.19.2
- munkres==1.1.4
- scikit_learn==1.0.1
### Pre-training
We release the pre-training code.
- Google Drive: [Link](https://drive.google.com/file/d/1XRlu3Ahgwin52jluqFu2aBW6wjCwjY4M/view?usp=sharing)
- Nut store: [Link](https://www.jianguoyun.com/p/DXCOQEYQwdaSChiEjrsEIAA)
### Quick Start
- Step1: use the **dblp.zip** file or download other datasets from [Awesome Deep Graph Clustering/Benchmark Datasets](https://github.com/yueliu1999/Awesome-Deep-Graph-Clustering#benchmark-datasets)
- Step2: unzip the dataset into the **./dataset** folder
- Step3: run
```
python main.py --name dblp --seed 3 --alpha_value 0.2 --lambda_value 10 --gamma_value 1e3 --lr 1e-4
```
Parameter setting
- name: the name of dataset
- seed: the random seed. 10 runs under different random seeds.
- alpha_value: the teleport probability in graph diffusion
- PUBMED: 0.1
- DBLP, CITE, ACM, AMAP, CORAFULL: 0.2
- lambda_value: the coefficient of clustering guidance loss.
- all datasets: 10
- gamma_value: the coefficient of propagation regularization
- all datasets: 1e3
- lr: learning rate
- DBLP 1e-4
- ACM: 5e-5
- AMAP: 1e-3
- CITE, PUBMED, CORAFULL: 1e-5
Tips: Limited by the GPU memory, PUBMED and CORAFULL might be out of memory during training. Thus, we adpot batch training on PUBMED and CORAFULL dataseets and the batch size is set to 2000. Please use the batch training version of DCRN [here](https://drive.google.com/file/d/185GLObsQQL3Y-dQ2aIin5YrXuA-dgpnU/view?usp=sharing).
### Results
<div align="center">
<img src="./assets/result.png" width=100%/>
</div>
<div align="center">
<img src="./assets/t-sne.png" width=100%/>
</div>
### Citation
If you use code or datasets in this repository for your research, please cite our paper.
```
@inproceedings{DCRN,
title={Deep Graph Clustering via Dual Correlation Reduction},
author={Liu, Yue and Tu, Wenxuan and Zhou, Sihang and Liu, Xinwang and Song, Linxuan and Yang, Xihong and Zhu, En},
booktitle={Proceedings of the AAAI Conference on Artificial Intelligence},
volume={36},
number={7},
pages={7603-7611},
year={2022}
}
@article{Deep_graph_clustering_survey,
author = {Liu, Yue and Xia, Jun and Zhou, Sihang and Wang, Siwei and Guo, Xifeng and Yang, Xihong and Liang, Ke and Tu, Wenxuan and Li, Z. Stan and Liu, Xinwang},
journal = {arXiv preprint arXiv:2211.12875},
title = {A Survey of Deep Graph Clustering: Taxonomy, Challenge, and Application},
year = {2022}
}
```
================================================
FILE: main.py
================================================
from train import *
from DCRN import DCRN
if __name__ == '__main__':
# setup
setup()
# data pre-precessing: X, y, A, A_norm, Ad
X, y, A = load_graph_data(opt.args.name, show_details=False)
A_norm = normalize_adj(A, self_loop=True, symmetry=False)
Ad = diffusion_adj(A, mode="ppr", transport_rate=opt.args.alpha_value)
# to torch tensor
X = numpy_to_torch(X).to(opt.args.device)
A_norm = numpy_to_torch(A_norm, sparse=True).to(opt.args.device)
Ad = numpy_to_torch(Ad).to(opt.args.device)
# Dual Correlation Reduction Network
model = DCRN(n_node=X.shape[0]).to(opt.args.device)
# deep graph clustering
acc, nmi, ari, f1 = train(model, X, y, A, A_norm, Ad)
print("ACC: {:.4f},".format(acc), "NMI: {:.4f},".format(nmi), "ARI: {:.4f},".format(ari), "F1: {:.4f}".format(f1))
================================================
FILE: opt.py
================================================
import argparse
parser = argparse.ArgumentParser(description='DCRN', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# setting
parser.add_argument('--name', type=str, default="dblp")
parser.add_argument('--cuda', type=bool, default=True)
parser.add_argument('--seed', type=int, default=3)
parser.add_argument('--alpha_value', type=float, default=0.2)
parser.add_argument('--lambda_value', type=float, default=10)
parser.add_argument('--gamma_value', type=float, default=1e3)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--n_z', type=int, default=20)
parser.add_argument('--epoch', type=int, default=400)
parser.add_argument('--show_training_details', type=bool, default=False)
# AE structure parameter from DFCN
parser.add_argument('--ae_n_enc_1', type=int, default=128)
parser.add_argument('--ae_n_enc_2', type=int, default=256)
parser.add_argument('--ae_n_enc_3', type=int, default=512)
parser.add_argument('--ae_n_dec_1', type=int, default=512)
parser.add_argument('--ae_n_dec_2', type=int, default=256)
parser.add_argument('--ae_n_dec_3', type=int, default=128)
# IGAE structure parameter from DFCN
parser.add_argument('--gae_n_enc_1', type=int, default=128)
parser.add_argument('--gae_n_enc_2', type=int, default=256)
parser.add_argument('--gae_n_enc_3', type=int, default=20)
parser.add_argument('--gae_n_dec_1', type=int, default=20)
parser.add_argument('--gae_n_dec_2', type=int, default=256)
parser.add_argument('--gae_n_dec_3', type=int, default=128)
# clustering performance: acc, nmi, ari, f1
parser.add_argument('--acc', type=float, default=0)
parser.add_argument('--nmi', type=float, default=0)
parser.add_argument('--ari', type=float, default=0)
parser.add_argument('--f1', type=float, default=0)
args = parser.parse_args()
================================================
FILE: requirements.txt
================================================
torch==1.8.0
tqdm==4.50.2
numpy==1.22.0
munkres==1.1.4
scikit_learn==1.0.1
================================================
FILE: train.py
================================================
import tqdm
from utils import *
from torch.optim import Adam
def train(model, X, y, A, A_norm, Ad):
"""
train our model
Args:
model: Dual Correlation Reduction Network
X: input feature matrix
y: input label
A: input origin adj
A_norm: normalized adj
Ad: graph diffusion
Returns: acc, nmi, ari, f1
"""
print("Training…")
# calculate embedding similarity and cluster centers
sim, centers = model_init(model, X, y, A_norm)
# initialize cluster centers
model.cluster_centers.data = torch.tensor(centers).to(opt.args.device)
# edge-masked adjacency matrix (Am): remove edges based on feature-similarity
Am = remove_edge(A, sim, remove_rate=0.1)
optimizer = Adam(model.parameters(), lr=opt.args.lr)
for epoch in tqdm.tqdm(range(opt.args.epoch)):
# add gaussian noise to X
X_tilde1, X_tilde2 = gaussian_noised_feature(X)
# input & output
X_hat, Z_hat, A_hat, _, Z_ae_all, Z_gae_all, Q, Z, AZ_all, Z_all = model(X_tilde1, Ad, X_tilde2, Am)
# calculate loss: L_{DICR}, L_{REC} and L_{KL}
L_DICR = dicr_loss(Z_ae_all, Z_gae_all, AZ_all, Z_all)
L_REC = reconstruction_loss(X, A_norm, X_hat, Z_hat, A_hat)
L_KL = distribution_loss(Q, target_distribution(Q[0].data))
loss = L_DICR + L_REC + opt.args.lambda_value * L_KL
# optimization
optimizer.zero_grad()
loss.backward(retain_graph=True)
optimizer.step()
# clustering & evaluation
acc, nmi, ari, f1, _ = clustering(Z, y)
if acc > opt.args.acc:
opt.args.acc = acc
opt.args.nmi = nmi
opt.args.ari = ari
opt.args.f1 = f1
return opt.args.acc, opt.args.nmi, opt.args.ari, opt.args.f1
================================================
FILE: utils.py
================================================
import opt
import torch
import random
import numpy as np
from sklearn import metrics
from munkres import Munkres
import torch.nn.functional as F
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.metrics import adjusted_rand_score as ari_score
from sklearn.metrics.cluster import normalized_mutual_info_score as nmi_score
def setup():
"""
setup
- name: the name of dataset
- device: CPU / GPU
- seed: random seed
- n_clusters: num of cluster
- n_input: dimension of feature
- alpha_value: alpha value for graph diffusion
- lambda_value: lambda value for clustering guidance
- gamma_value: gamma value for propagation regularization
- lr: learning rate
Return: None
"""
print("setting:")
setup_seed(opt.args.seed)
if opt.args.name == 'acm':
opt.args.n_clusters = 3
opt.args.n_input = 100
opt.args.alpha_value = 0.2
opt.args.lambda_value = 10
opt.args.gamma_value = 1e3
opt.args.lr = 5e-5
elif opt.args.name == 'dblp':
opt.args.n_clusters = 4
opt.args.n_input = 50
opt.args.alpha_value = 0.2
opt.args.lambda_value = 10
opt.args.gamma_value = 1e3
opt.args.lr = 1e-4
elif opt.args.name == 'cite':
opt.args.n_clusters = 6
opt.args.n_input = 100
opt.args.alpha_value = 0.2
opt.args.lambda_value = 10
opt.args.gamma_value = 1e3
opt.args.lr = 1e-5
elif opt.args.name == 'amap':
opt.args.n_clusters = 8
opt.args.n_input = 100
opt.args.alpha_value = 0.2
opt.args.lambda_value = 10
opt.args.gamma_value = 1e3
opt.args.lr = 1e-3
else:
print("error!")
print("please add the new dataset's parameters")
print("------------------------------")
print("dataset : ")
print("device : ")
print("random seed : ")
print("clusters : ")
print("alpha value : ")
print("lambda value : ")
print("gamma value : ")
print("learning rate : ")
print("------------------------------")
exit(0)
opt.args.device = torch.device("cuda" if opt.args.cuda else "cpu")
print("------------------------------")
print("dataset : {}".format(opt.args.name))
print("device : {}".format(opt.args.device))
print("random seed : {}".format(opt.args.seed))
print("clusters : {}".format(opt.args.n_clusters))
print("alpha value : {}".format(opt.args.alpha_value))
print("lambda value : {}".format(opt.args.lambda_value))
print("gamma value : {:.0e}".format(opt.args.gamma_value))
print("learning rate : {:.0e}".format(opt.args.lr))
print("------------------------------")
def setup_seed(seed):
"""
setup random seed to fix the result
Args:
seed: random seed
Returns: None
"""
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def numpy_to_torch(a, sparse=False):
"""
numpy array to torch tensor
:param a: the numpy array
:param sparse: is sparse tensor or not
:return: torch tensor
"""
if sparse:
a = torch.sparse.Tensor(a)
a = a.to_sparse()
else:
a = torch.FloatTensor(a)
return a
def torch_to_numpy(t):
"""
torch tensor to numpy array
:param t: the torch tensor
:return: numpy array
"""
return t.numpy()
def load_graph_data(dataset_name, show_details=False):
"""
load graph data
:param dataset_name: the name of the dataset
:param show_details: if show the details of dataset
- dataset name
- features' shape
- labels' shape
- adj shape
- edge num
- category num
- category distribution
:return: the features, labels and adj
"""
load_path = "dataset/" + dataset_name + "/" + dataset_name
feat = np.load(load_path+"_feat.npy", allow_pickle=True)
label = np.load(load_path+"_label.npy", allow_pickle=True)
adj = np.load(load_path+"_adj.npy", allow_pickle=True)
if show_details:
print("++++++++++++++++++++++++++++++")
print("---details of graph dataset---")
print("++++++++++++++++++++++++++++++")
print("dataset name: ", dataset_name)
print("feature shape: ", feat.shape)
print("label shape: ", label.shape)
print("adj shape: ", adj.shape)
print("undirected edge num: ", int(np.nonzero(adj)[0].shape[0]/2))
print("category num: ", max(label)-min(label)+1)
print("category distribution: ")
for i in range(max(label)+1):
print("label", i, end=":")
print(len(label[np.where(label == i)]))
print("++++++++++++++++++++++++++++++")
# X pre-processing
pca = PCA(n_components=opt.args.n_input)
feat = pca.fit_transform(feat)
return feat, label, adj
def normalize_adj(adj, self_loop=True, symmetry=False):
"""
normalize the adj matrix
:param adj: input adj matrix
:param self_loop: if add the self loop or not
:param symmetry: symmetry normalize or not
:return: the normalized adj matrix
"""
# add the self_loop
if self_loop:
adj_tmp = adj + np.eye(adj.shape[0])
else:
adj_tmp = adj
# calculate degree matrix and it's inverse matrix
d = np.diag(adj_tmp.sum(0))
d_inv = np.linalg.inv(d)
# symmetry normalize: D^{-0.5} A D^{-0.5}
if symmetry:
sqrt_d_inv = np.sqrt(d_inv)
norm_adj = np.matmul(np.matmul(sqrt_d_inv, adj_tmp), adj_tmp)
# non-symmetry normalize: D^{-1} A
else:
norm_adj = np.matmul(d_inv, adj_tmp)
return norm_adj
def gaussian_noised_feature(X):
"""
add gaussian noise to the attribute matrix X
Args:
X: the attribute matrix
Returns: the noised attribute matrix X_tilde
"""
N_1 = torch.Tensor(np.random.normal(1, 0.1, X.shape)).to(opt.args.device)
N_2 = torch.Tensor(np.random.normal(1, 0.1, X.shape)).to(opt.args.device)
X_tilde1 = X * N_1
X_tilde2 = X * N_2
return X_tilde1, X_tilde2
def diffusion_adj(adj, mode="ppr", transport_rate=0.2):
"""
graph diffusion
:param adj: input adj matrix
:param mode: the mode of graph diffusion
:param transport_rate: the transport rate
- personalized page rank
-
:return: the graph diffusion
"""
# add the self_loop
adj_tmp = adj + np.eye(adj.shape[0])
# calculate degree matrix and it's inverse matrix
d = np.diag(adj_tmp.sum(0))
d_inv = np.linalg.inv(d)
sqrt_d_inv = np.sqrt(d_inv)
# calculate norm adj
norm_adj = np.matmul(np.matmul(sqrt_d_inv, adj_tmp), sqrt_d_inv)
# calculate graph diffusion
if mode == "ppr":
diff_adj = transport_rate * np.linalg.inv((np.eye(d.shape[0]) - (1 - transport_rate) * norm_adj))
return diff_adj
def remove_edge(A, similarity, remove_rate=0.1):
"""
remove edge based on embedding similarity
Args:
A: the origin adjacency matrix
similarity: cosine similarity matrix of embedding
remove_rate: the rate of removing linkage relation
Returns:
Am: edge-masked adjacency matrix
"""
# remove edges based on cosine similarity of embedding
n_node = A.shape[0]
for i in range(n_node):
A[i, torch.argsort(similarity[i].cpu())[:int(round(remove_rate * n_node))]] = 0
# normalize adj
Am = normalize_adj(A, self_loop=True, symmetry=False)
Am = numpy_to_torch(Am).to(opt.args.device)
return Am
def load_pretrain_parameter(model):
"""
load pretrained parameters
Args:
model: Dual Correlation Reduction Network
Returns: model
"""
pretrained_dict = torch.load('model_pretrain/{}_pretrain.pkl'.format(opt.args.name), map_location='cpu')
model_dict = model.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model
def model_init(model, X, y, A_norm):
"""
load the pre-train model and calculate similarity and cluster centers
Args:
model: Dual Correlation Reduction Network
X: input feature matrix
y: input label
A_norm: normalized adj
Returns: embedding similarity matrix
"""
# load pre-train model
model = load_pretrain_parameter(model)
# calculate embedding similarity
with torch.no_grad():
_, _, _, sim, _, _, _, Z, _, _ = model(X, A_norm, X, A_norm)
# calculate cluster centers
acc, nmi, ari, f1, centers = clustering(Z, y)
return sim, centers
# the reconstruction function from DFCN
def reconstruction_loss(X, A_norm, X_hat, Z_hat, A_hat):
"""
reconstruction loss L_{}
Args:
X: the origin feature matrix
A_norm: the normalized adj
X_hat: the reconstructed X
Z_hat: the reconstructed Z
A_hat: the reconstructed A
Returns: the reconstruction loss
"""
loss_ae = F.mse_loss(X_hat, X)
loss_w = F.mse_loss(Z_hat, torch.spmm(A_norm, X))
loss_a = F.mse_loss(A_hat, A_norm.to_dense())
loss_igae = loss_w + 0.1 * loss_a
loss_rec = loss_ae + loss_igae
return loss_rec
def target_distribution(Q):
"""
calculate the target distribution (student-t distribution)
Args:
Q: the soft assignment distribution
Returns: target distribution P
"""
weight = Q ** 2 / Q.sum(0)
P = (weight.t() / weight.sum(1)).t()
return P
# clustering guidance from DFCN
def distribution_loss(Q, P):
"""
calculate the clustering guidance loss L_{KL}
Args:
Q: the soft assignment distribution
P: the target distribution
Returns: L_{KL}
"""
loss = F.kl_div((Q[0].log() + Q[1].log() + Q[2].log()) / 3, P, reduction='batchmean')
return loss
def r_loss(AZ, Z):
"""
the loss of propagated regularization (L_R)
Args:
AZ: the propagated embedding
Z: embedding
Returns: L_R
"""
loss = 0
for i in range(2):
for j in range(3):
p_output = F.softmax(AZ[i][j], dim=1)
q_output = F.softmax(Z[i][j], dim=1)
log_mean_output = ((p_output + q_output) / 2).log()
loss += (F.kl_div(log_mean_output, p_output, reduction='batchmean') +
F.kl_div(log_mean_output, p_output, reduction='batchmean')) / 2
return loss
def off_diagonal(x):
"""
off-diagonal elements of x
Args:
x: the input matrix
Returns: the off-diagonal elements of x
"""
n, m = x.shape
assert n == m
return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
def cross_correlation(Z_v1, Z_v2):
"""
calculate the cross-view correlation matrix S
Args:
Z_v1: the first view embedding
Z_v2: the second view embedding
Returns: S
"""
return torch.mm(F.normalize(Z_v1, dim=1), F.normalize(Z_v2, dim=1).t())
def correlation_reduction_loss(S):
"""
the correlation reduction loss L: MSE for S and I (identical matrix)
Args:
S: the cross-view correlation matrix S
Returns: L
"""
return torch.diagonal(S).add(-1).pow(2).mean() + off_diagonal(S).pow(2).mean()
def dicr_loss(Z_ae, Z_igae, AZ, Z):
"""
Dual Information Correlation Reduction loss L_{DICR}
Args:
Z_ae: AE embedding including two-view node embedding [0, 1] and two-view cluster-level embedding [2, 3]
Z_igae: IGAE embedding including two-view node embedding [0, 1] and two-view cluster-level embedding [2, 3]
AZ: the propagated fusion embedding AZ
Z: the fusion embedding Z
Returns:
L_{DICR}
"""
# Sample-level Correlation Reduction (SCR)
# cross-view sample correlation matrix
S_N_ae = cross_correlation(Z_ae[0], Z_ae[1])
S_N_igae = cross_correlation(Z_igae[0], Z_igae[1])
# loss of SCR
L_N_ae = correlation_reduction_loss(S_N_ae)
L_N_igae = correlation_reduction_loss(S_N_igae)
# Feature-level Correlation Reduction (FCR)
# cross-view feature correlation matrix
S_F_ae = cross_correlation(Z_ae[2].t(), Z_ae[3].t())
S_F_igae = cross_correlation(Z_igae[2].t(), Z_igae[3].t())
# loss of FCR
L_F_ae = correlation_reduction_loss(S_F_ae)
L_F_igae = correlation_reduction_loss(S_F_igae)
if opt.args.name == "dblp" or opt.args.name == "acm":
L_N = 0.01 * L_N_ae + 10 * L_N_igae
L_F = 0.5 * L_F_ae + 0.5 * L_F_igae
else:
L_N = 0.1 * L_N_ae + 5 * L_N_igae
L_F = L_F_ae + L_F_igae
# propagated regularization
L_R = r_loss(AZ, Z)
# loss of DICR
loss_dicr = L_N + L_F + opt.args.gamma_value * L_R
return loss_dicr
def clustering(Z, y):
"""
clustering based on embedding
Args:
Z: the input embedding
y: the ground truth
Returns: acc, nmi, ari, f1, clustering centers
"""
model = KMeans(n_clusters=opt.args.n_clusters, n_init=20)
cluster_id = model.fit_predict(Z.data.cpu().numpy())
acc, nmi, ari, f1 = eva(y, cluster_id, show_details=opt.args.show_training_details)
return acc, nmi, ari, f1, model.cluster_centers_
def cluster_acc(y_true, y_pred):
"""
calculate clustering acc and f1-score
Args:
y_true: the ground truth
y_pred: the clustering id
Returns: acc and f1-score
"""
y_true = y_true - np.min(y_true)
l1 = list(set(y_true))
num_class1 = len(l1)
l2 = list(set(y_pred))
num_class2 = len(l2)
ind = 0
if num_class1 != num_class2:
for i in l1:
if i in l2:
pass
else:
y_pred[ind] = i
ind += 1
l2 = list(set(y_pred))
numclass2 = len(l2)
if num_class1 != numclass2:
print('error')
return
cost = np.zeros((num_class1, numclass2), dtype=int)
for i, c1 in enumerate(l1):
mps = [i1 for i1, e1 in enumerate(y_true) if e1 == c1]
for j, c2 in enumerate(l2):
mps_d = [i1 for i1 in mps if y_pred[i1] == c2]
cost[i][j] = len(mps_d)
m = Munkres()
cost = cost.__neg__().tolist()
indexes = m.compute(cost)
new_predict = np.zeros(len(y_pred))
for i, c in enumerate(l1):
c2 = l2[indexes[i][1]]
ai = [ind for ind, elm in enumerate(y_pred) if elm == c2]
new_predict[ai] = c
acc = metrics.accuracy_score(y_true, new_predict)
f1_macro = metrics.f1_score(y_true, new_predict, average='macro')
return acc, f1_macro
def eva(y_true, y_pred, show_details=True):
"""
evaluate the clustering performance
Args:
y_true: the ground truth
y_pred: the predicted label
show_details: if print the details
Returns: None
"""
acc, f1 = cluster_acc(y_true, y_pred)
nmi = nmi_score(y_true, y_pred, average_method='arithmetic')
ari = ari_score(y_true, y_pred)
if show_details:
print(':acc {:.4f}'.format(acc), ', nmi {:.4f}'.format(nmi), ', ari {:.4f}'.format(ari),
', f1 {:.4f}'.format(f1))
return acc, nmi, ari, f1
gitextract_5pir9mvx/ ├── DCRN.py ├── LICENSE ├── README.md ├── main.py ├── model_pretrain/ │ ├── acm_pretrain.pkl │ ├── amap_pretrain.pkl │ ├── cite_pretrain.pkl │ └── dblp_pretrain.pkl ├── opt.py ├── requirements.txt ├── train.py └── utils.py
SYMBOL INDEX (49 symbols across 3 files)
FILE: DCRN.py
class AE_encoder (line 10) | class AE_encoder(nn.Module):
method __init__ (line 11) | def __init__(self, ae_n_enc_1, ae_n_enc_2, ae_n_enc_3, n_input, n_z):
method forward (line 19) | def forward(self, x):
class AE_decoder (line 28) | class AE_decoder(nn.Module):
method __init__ (line 29) | def __init__(self, ae_n_dec_1, ae_n_dec_2, ae_n_dec_3, n_input, n_z):
method forward (line 38) | def forward(self, z_ae):
class AE (line 47) | class AE(nn.Module):
method __init__ (line 48) | def __init__(self, ae_n_enc_1, ae_n_enc_2, ae_n_enc_3, ae_n_dec_1, ae_...
class GNNLayer (line 67) | class GNNLayer(Module):
method __init__ (line 68) | def __init__(self, in_features, out_features):
method forward (line 80) | def forward(self, features, adj, active=False):
class IGAE_encoder (line 97) | class IGAE_encoder(nn.Module):
method __init__ (line 98) | def __init__(self, gae_n_enc_1, gae_n_enc_2, gae_n_enc_3, n_input):
method forward (line 105) | def forward(self, x, adj):
class IGAE_decoder (line 114) | class IGAE_decoder(nn.Module):
method __init__ (line 115) | def __init__(self, gae_n_dec_1, gae_n_dec_2, gae_n_dec_3, n_input):
method forward (line 122) | def forward(self, z_igae, adj):
class IGAE (line 131) | class IGAE(nn.Module):
method __init__ (line 132) | def __init__(self, gae_n_enc_1, gae_n_enc_2, gae_n_enc_3, gae_n_dec_1,...
class Readout (line 150) | class Readout(nn.Module):
method __init__ (line 151) | def __init__(self, K):
method forward (line 155) | def forward(self, Z):
class DCRN (line 176) | class DCRN(nn.Module):
method __init__ (line 177) | def __init__(self, n_node=None):
method q_distribute (line 213) | def q_distribute(self, Z, Z_ae, Z_igae):
method forward (line 234) | def forward(self, X_tilde1, Am, X_tilde2, Ad):
FILE: train.py
function train (line 6) | def train(model, X, y, A, A_norm, Ad):
FILE: utils.py
function setup (line 14) | def setup():
function setup_seed (line 91) | def setup_seed(seed):
function numpy_to_torch (line 108) | def numpy_to_torch(a, sparse=False):
function torch_to_numpy (line 123) | def torch_to_numpy(t):
function load_graph_data (line 132) | def load_graph_data(dataset_name, show_details=False):
function normalize_adj (line 172) | def normalize_adj(adj, self_loop=True, symmetry=False):
function gaussian_noised_feature (line 202) | def gaussian_noised_feature(X):
function diffusion_adj (line 216) | def diffusion_adj(adj, mode="ppr", transport_rate=0.2):
function remove_edge (line 244) | def remove_edge(A, similarity, remove_rate=0.1):
function load_pretrain_parameter (line 265) | def load_pretrain_parameter(model):
function model_init (line 280) | def model_init(model, X, y, A_norm):
function reconstruction_loss (line 304) | def reconstruction_loss(X, A_norm, X_hat, Z_hat, A_hat):
function target_distribution (line 323) | def target_distribution(Q):
function distribution_loss (line 336) | def distribution_loss(Q, P):
function r_loss (line 348) | def r_loss(AZ, Z):
function off_diagonal (line 367) | def off_diagonal(x):
function cross_correlation (line 379) | def cross_correlation(Z_v1, Z_v2):
function correlation_reduction_loss (line 390) | def correlation_reduction_loss(S):
function dicr_loss (line 400) | def dicr_loss(Z_ae, Z_igae, AZ, Z):
function clustering (line 444) | def clustering(Z, y):
function cluster_acc (line 459) | def cluster_acc(y_true, y_pred):
function eva (line 505) | def eva(y_true, y_pred, show_details=True):
Condensed preview — 12 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (39K chars).
[
{
"path": "DCRN.py",
"chars": 9991,
"preview": "import opt\nimport torch\nfrom torch import nn\nfrom torch.nn import Linear\nimport torch.nn.functional as F\nfrom torch.nn i"
},
{
"path": "LICENSE",
"chars": 1067,
"preview": "MIT License\n\nCopyright (c) 2021 yueliu1999\n\nPermission is hereby granted, free of charge, to any person obtaining a copy"
},
{
"path": "README.md",
"chars": 5873,
"preview": "[stars-img]: https://img.shields.io/github/stars/yueliu1999/DCRN?color=yellow\n[stars-url]: https://github.com/yueliu1999"
},
{
"path": "main.py",
"chars": 835,
"preview": "from train import *\nfrom DCRN import DCRN\n\n\nif __name__ == '__main__':\n # setup\n setup()\n\n # data pre-precessin"
},
{
"path": "opt.py",
"chars": 1784,
"preview": "import argparse\n\nparser = argparse.ArgumentParser(description='DCRN', formatter_class=argparse.ArgumentDefaultsHelpForma"
},
{
"path": "requirements.txt",
"chars": 76,
"preview": "torch==1.8.0\ntqdm==4.50.2\nnumpy==1.22.0\nmunkres==1.1.4\nscikit_learn==1.0.1\n\n"
},
{
"path": "train.py",
"chars": 1812,
"preview": "import tqdm\nfrom utils import *\nfrom torch.optim import Adam\n\n\ndef train(model, X, y, A, A_norm, Ad):\n \"\"\"\n train "
},
{
"path": "utils.py",
"chars": 15439,
"preview": "import opt\nimport torch\nimport random\nimport numpy as np\nfrom sklearn import metrics\nfrom munkres import Munkres\nimport "
}
]
// ... and 4 more files (download for full content)
About this extraction
This page contains the full source code of the yueliu1999/DCRN GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 12 files (36.0 KB), approximately 10.7k tokens, and a symbol index with 49 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.