Showing preview only (1,709K chars total). Download the full file or copy to clipboard to get everything.
Repository: YGZWQZD/LAMDA-SSL
Branch: master
Commit: 6cb8e2250983
Files: 305
Total size: 1.5 MB
Directory structure:
gitextract_shde08cu/
├── Examples/
│ ├── Assemble_BreastCancer.py
│ ├── CIFAR10_imbalance.py
│ ├── CoReg_Boston.py
│ ├── Co_Training_BreastCancer.py
│ ├── Co_Training_Wine.py
│ ├── Constrained_Seed_k_means_Wine.py
│ ├── Constrained_k_means_Wine.py
│ ├── FixMatch_BreastCancer.py
│ ├── FixMatch_CIFAR10.py
│ ├── FixMatch_SST2.py
│ ├── FlexMatch_CIFAR10.py
│ ├── FlexMatch_SST2.py
│ ├── GAT_Cora.py
│ ├── GCN_Cora.py
│ ├── ICTReg_Boston.py
│ ├── ICT_CIFAR10.py
│ ├── ImprovedGAN_MNIST.py
│ ├── LabelPropagation_BreastCancer.py
│ ├── LabelSpreading_BreastCancer.py
│ ├── LadderNetwork_MNIST.py
│ ├── LapSVM_BreastCancer.py
│ ├── MeanTeacherReg_Boston.py
│ ├── MeanTeacher_CIFAR10.py
│ ├── MixMatch_CIFAR10.py
│ ├── ParallelDistributed.py
│ ├── Parameter_Search.py
│ ├── PiModelReg_Boston.py
│ ├── PiModel_CIFAR10.py
│ ├── PseudoLabel_CIFAR10.py
│ ├── ReMixMatch_CIFAR10.py
│ ├── S4L_CIFAR10.py
│ ├── SDNE_Cora.py
│ ├── SSGMM_BreastCancer.py
│ ├── SSVAE_MNIST.py
│ ├── Save_Load_Model.py
│ ├── SemiBoost_BreastCancer.py
│ ├── TSVM_BreastCancer.py
│ ├── TemporalEnsembling_CIFAR10.py
│ ├── Tri_Training_BreastCancer.py
│ ├── UDA_CIFAR10.py
│ └── VAT_CIFAR10.py
├── LAMDA_SSL/
│ ├── Algorithm/
│ │ ├── Classification/
│ │ │ ├── Assemble.py
│ │ │ ├── CAFA.py
│ │ │ ├── Co_Training.py
│ │ │ ├── FixMatch.py
│ │ │ ├── Fix_A_Step.py
│ │ │ ├── FlexMatch.py
│ │ │ ├── FreeMatch.py
│ │ │ ├── GAT.py
│ │ │ ├── GCN.py
│ │ │ ├── ICT.py
│ │ │ ├── ImprovedGAN.py
│ │ │ ├── LabelPropagation.py
│ │ │ ├── LabelSpreading.py
│ │ │ ├── LadderNetwork.py
│ │ │ ├── LapSVM.py
│ │ │ ├── MTCF.py
│ │ │ ├── MeanTeacher.py
│ │ │ ├── MixMatch.py
│ │ │ ├── PiModel.py
│ │ │ ├── PseudoLabel.py
│ │ │ ├── ReMixMatch.py
│ │ │ ├── S4L.py
│ │ │ ├── SDNE.py
│ │ │ ├── SSGMM.py
│ │ │ ├── SSVAE.py
│ │ │ ├── SemiBoost.py
│ │ │ ├── SoftMatch.py
│ │ │ ├── Supervised.py
│ │ │ ├── TSVM.py
│ │ │ ├── TemporalEnsembling.py
│ │ │ ├── Tri_Training.py
│ │ │ ├── UASD.py
│ │ │ ├── UDA.py
│ │ │ ├── VAT.py
│ │ │ └── __init__.py
│ │ ├── Clustering/
│ │ │ ├── Constrained_Seed_k_means.py
│ │ │ ├── Constrained_k_means.py
│ │ │ └── __init__.py
│ │ ├── Regression/
│ │ │ ├── CoReg.py
│ │ │ ├── ICTReg.py
│ │ │ ├── MeanTeacherReg.py
│ │ │ ├── PiModelReg.py
│ │ │ └── __init__.py
│ │ └── __init__.py
│ ├── Augmentation/
│ │ ├── Graph/
│ │ │ ├── DropEdges.py
│ │ │ ├── DropNodes.py
│ │ │ └── __init__.py
│ │ ├── Tabular/
│ │ │ ├── Noise.py
│ │ │ └── __init__.py
│ │ ├── Text/
│ │ │ ├── RandomDeletion.py
│ │ │ ├── RandomSwap.py
│ │ │ ├── TFIDFReplacement.py
│ │ │ └── __init__.py
│ │ ├── Vision/
│ │ │ ├── AutoContrast.py
│ │ │ ├── Brightness.py
│ │ │ ├── CenterCrop.py
│ │ │ ├── Color.py
│ │ │ ├── Contrast.py
│ │ │ ├── Cutout.py
│ │ │ ├── CutoutAbs.py
│ │ │ ├── Equalize.py
│ │ │ ├── Identity.py
│ │ │ ├── Invert.py
│ │ │ ├── Mixup.py
│ │ │ ├── Posterize.py
│ │ │ ├── RandAugment.py
│ │ │ ├── RandomCrop.py
│ │ │ ├── RandomHorizontalFlip.py
│ │ │ ├── Rotate.py
│ │ │ ├── Sharpness.py
│ │ │ ├── ShearX.py
│ │ │ ├── ShearY.py
│ │ │ ├── Solarize.py
│ │ │ ├── TranslateX.py
│ │ │ ├── TranslateY.py
│ │ │ └── __init__.py
│ │ └── __init__.py
│ ├── Base/
│ │ ├── BaseOptimizer.py
│ │ ├── BaseSampler.py
│ │ ├── BaseScheduler.py
│ │ ├── ClassifierEvaluation.py
│ │ ├── ClusterEvaluation.py
│ │ ├── DeepModelMixin.py
│ │ ├── GraphMixin.py
│ │ ├── InductiveEstimator.py
│ │ ├── LambdaLR.py
│ │ ├── RegressorEvaluation.py
│ │ ├── SemiEstimator.py
│ │ ├── TabularMixin.py
│ │ ├── TextMixin.py
│ │ ├── TransductiveEstimator.py
│ │ ├── Transformer.py
│ │ ├── VisionMixin.py
│ │ └── __init__.py
│ ├── Config/
│ │ ├── Assemble.py
│ │ ├── CAFA.py
│ │ ├── CoReg.py
│ │ ├── Co_Training.py
│ │ ├── Constrained_Seed_k_means.py
│ │ ├── Constrained_k_means.py
│ │ ├── FixMatch.py
│ │ ├── FlexMatch.py
│ │ ├── GAT.py
│ │ ├── GCN.py
│ │ ├── ICT.py
│ │ ├── ICTReg.py
│ │ ├── ImprovedGAN.py
│ │ ├── LabelPropagation.py
│ │ ├── LabelSpreading.py
│ │ ├── LadderNetwork.py
│ │ ├── LapSVM.py
│ │ ├── MeanTeacher.py
│ │ ├── MeanTeacherReg.py
│ │ ├── MixMatch.py
│ │ ├── PiModel.py
│ │ ├── PiModelReg.py
│ │ ├── PseudoLabel.py
│ │ ├── ReMixMatch.py
│ │ ├── S4L.py
│ │ ├── SDNE.py
│ │ ├── SSGMM.py
│ │ ├── SSVAE.py
│ │ ├── SemiBoost.py
│ │ ├── TSVM.py
│ │ ├── TemporalEnsembling.py
│ │ ├── Tri_Training.py
│ │ ├── UDA.py
│ │ ├── VAT.py
│ │ └── __init__.py
│ ├── Dataloader/
│ │ ├── LabeledDataloader.py
│ │ ├── TrainDataloader.py
│ │ ├── UnlabeledDataloader.py
│ │ └── __init__.py
│ ├── Dataset/
│ │ ├── Graph/
│ │ │ ├── Cora.py
│ │ │ └── __init__.py
│ │ ├── LabeledDataset.py
│ │ ├── SemiDataset.py
│ │ ├── Tabular/
│ │ │ ├── Boston.py
│ │ │ ├── BreastCancer.py
│ │ │ ├── Wine.py
│ │ │ └── __init__.py
│ │ ├── Text/
│ │ │ ├── IMDB.py
│ │ │ ├── SST2.py
│ │ │ └── __init__.py
│ │ ├── TrainDataset.py
│ │ ├── UnlabeledDataset.py
│ │ ├── Vision/
│ │ │ ├── CIFAR10.py
│ │ │ ├── ImageCLEF.py
│ │ │ ├── Mnist.py
│ │ │ ├── Office31.py
│ │ │ ├── VisDA.py
│ │ │ └── __init__.py
│ │ └── __init__.py
│ ├── Distributed/
│ │ ├── DataParallel.py
│ │ ├── DistributedDataParallel.py
│ │ └── __init__.py
│ ├── Evaluation/
│ │ ├── Classifier/
│ │ │ ├── AUC.py
│ │ │ ├── Accuracy.py
│ │ │ ├── Confusion_Matrix.py
│ │ │ ├── F1.py
│ │ │ ├── Precision.py
│ │ │ ├── Recall.py
│ │ │ ├── Top_k_Accuracy.py
│ │ │ └── __init__.py
│ │ ├── Cluster/
│ │ │ ├── Davies_Bouldin_Score.py
│ │ │ ├── Fowlkes_Mallows_Score.py
│ │ │ ├── Jaccard_Score.py
│ │ │ ├── Rand_Score.py
│ │ │ ├── Silhouette_Score.py
│ │ │ └── __init__.py
│ │ ├── Regressor/
│ │ │ ├── Mean_Absolute_Error.py
│ │ │ ├── Mean_Squared_Error.py
│ │ │ ├── Mean_Squared_Log_Error.py
│ │ │ ├── Median_Absolute_Error.py
│ │ │ └── __init__.py
│ │ └── __init__.py
│ ├── Loss/
│ │ ├── Consistency.py
│ │ ├── Cross_Entropy.py
│ │ ├── EntMin.py
│ │ ├── KL_Divergence.py
│ │ ├── MSE.py
│ │ ├── Semi_Supervised_Loss.py
│ │ └── __init__.py
│ ├── Network/
│ │ ├── AdversarialNet.py
│ │ ├── FT_Transformer.py
│ │ ├── GAT.py
│ │ ├── GCN.py
│ │ ├── ImprovedGAN.py
│ │ ├── LadderNetwork.py
│ │ ├── MLPCLS.py
│ │ ├── MLPReg.py
│ │ ├── ResNet50.py
│ │ ├── ResNet50Fc.py
│ │ ├── SDNE.py
│ │ ├── SSVAE.py
│ │ ├── TextRCNN.py
│ │ ├── WideResNet.py
│ │ └── __init__.py
│ ├── Opitimizer/
│ │ ├── Adam.py
│ │ ├── SGD.py
│ │ └── __init__.py
│ ├── Sampler/
│ │ ├── BatchSampler.py
│ │ ├── DistributedSampler.py
│ │ ├── RandomSampler.py
│ │ ├── SequentialSampler.py
│ │ └── __init__.py
│ ├── Scheduler/
│ │ ├── CosineAnnealingLR.py
│ │ ├── CosineWarmup.py
│ │ ├── InverseDecaySheduler.py
│ │ ├── LinearWarmup.py
│ │ ├── StepLR.py
│ │ └── __init__.py
│ ├── Search/
│ │ ├── BayesSearchCV.py
│ │ ├── EvolutionaryStrategySearchCV.py
│ │ ├── MetaLearnerSearchCV.py
│ │ └── __init__.py
│ ├── Split/
│ │ ├── DataSplit.py
│ │ ├── ViewSplit.py
│ │ └── __init__.py
│ ├── Transform/
│ │ ├── Graph/
│ │ │ ├── GCNNorm.py
│ │ │ ├── GDC.py
│ │ │ ├── NormalizeFeatures.py
│ │ │ ├── SVDFeatureReduction.py
│ │ │ └── __init__.py
│ │ ├── Tabular/
│ │ │ ├── MaxAbsScaler.py
│ │ │ ├── MinMaxScaler.py
│ │ │ ├── StandarScaler.py
│ │ │ └── __init__.py
│ │ ├── Text/
│ │ │ ├── AdjustLength.py
│ │ │ ├── AutoTokenizer.py
│ │ │ ├── CharNGram.py
│ │ │ ├── FastText.py
│ │ │ ├── GloVe.py
│ │ │ ├── Lcut.py
│ │ │ ├── PadSequence.py
│ │ │ ├── Split.py
│ │ │ ├── SynonymsReplacement.py
│ │ │ ├── Tokenizer.py
│ │ │ ├── Truncate.py
│ │ │ ├── Vectors.py
│ │ │ ├── Vocab.py
│ │ │ └── __init__.py
│ │ ├── ToImage.py
│ │ ├── ToNumpy.py
│ │ ├── ToTensor.py
│ │ ├── Vision/
│ │ │ ├── Normalization.py
│ │ │ ├── Resize.py
│ │ │ └── __init__.py
│ │ └── __init__.py
│ ├── __init__.py
│ └── utils.py
├── LICENSE
├── README.md
├── docs/
│ ├── .nojekyll
│ ├── README.md
│ ├── _coverpage.md
│ ├── _navbar.md
│ ├── _sidebar.md
│ ├── index.html
│ └── zh-cn/
│ ├── README.md
│ ├── _coverpage.md
│ └── _sidebar.md
├── environment.yaml
└── setup.py
================================================
FILE CONTENTS
================================================
================================================
FILE: Examples/Assemble_BreastCancer.py
================================================
from LAMDA_SSL.Algorithm.Classification.Assemble import Assemble
from LAMDA_SSL.Dataset.Tabular.BreastCancer import BreastCancer
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from sklearn.svm import SVC
import numpy as np
file = open("../Result/Assemble_BreastCancer.txt", "w")
dataset=BreastCancer(test_size=0.3,labeled_size=0.1,stratified=True,shuffle=True,random_state=0,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
unlabeled_y=dataset.unlabeled_y
test_X=dataset.test_X
test_y=dataset.test_y
# Pre_transform
pre_transform=dataset.pre_transform
pre_transform.fit(np.vstack([labeled_X, unlabeled_X]))
labeled_X=pre_transform.transform(labeled_X)
unlabeled_X=pre_transform.transform(unlabeled_X)
test_X=pre_transform.transform(test_X)
evaluation={
'accuracy':Accuracy(),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
# Base estimater
SVM=SVC(probability=True)
model=Assemble(T=100,base_estimator=SVM,evaluation=evaluation,verbose=True,file=file)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X)
performance=model.evaluate(X=test_X,y=test_y)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/CIFAR10_imbalance.py
================================================
================================================
FILE: Examples/CoReg_Boston.py
================================================
from LAMDA_SSL.Algorithm.Regression.CoReg import CoReg
from LAMDA_SSL.Evaluation.Regressor.Mean_Absolute_Error import Mean_Absolute_Error
from LAMDA_SSL.Evaluation.Regressor.Mean_Squared_Error import Mean_Squared_Error
from LAMDA_SSL.Evaluation.Regressor.Mean_Squared_Log_Error import Mean_Squared_Log_Error
from LAMDA_SSL.Dataset.Tabular.Boston import Boston
import numpy as np
file = open("../Result/CoReg_Boston.txt", "w")
dataset=Boston(labeled_size=0.3,test_size=0.1,stratified=False,shuffle=True,random_state=0,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
unlabeled_y=dataset.unlabeled_y
test_X=dataset.test_X
test_y=dataset.test_y
# Pre_transform
pre_transform=dataset.pre_transform
pre_transform.fit(np.vstack([labeled_X, unlabeled_X]))
labeled_X=pre_transform.transform(labeled_X)
unlabeled_X=pre_transform.transform(unlabeled_X)
test_X=pre_transform.transform(test_X)
evaluation={
'Mean_Absolute_Error':Mean_Absolute_Error(),
'Mean_Squared_Error':Mean_Squared_Error(),
'Mean_Squared_Log_Error':Mean_Squared_Log_Error()
}
model=CoReg(evaluation=evaluation,verbose=True,file=file)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X)
performance=model.evaluate(X=test_X,y=test_y)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/Co_Training_BreastCancer.py
================================================
from LAMDA_SSL.Algorithm.Classification.Co_Training import Co_Training
from LAMDA_SSL.Dataset.Tabular.Wine import Wine
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from LAMDA_SSL.Split.ViewSplit import ViewSplit
import numpy as np
file = open("../Result/Co_Training_BreastCancer.txt", "w")
dataset=Wine(test_size=0.3,labeled_size=0.1,stratified=True,shuffle=True,random_state=0,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
unlabeled_y=dataset.unlabeled_y
test_X=dataset.test_X
test_y=dataset.test_y
pre_transform=dataset.pre_transform
pre_transform.fit(np.vstack([labeled_X, unlabeled_X]))
labeled_X=pre_transform.transform(labeled_X)
unlabeled_X=pre_transform.transform(unlabeled_X)
test_X=pre_transform.transform(test_X)
# View split
split_labeled_X=ViewSplit(labeled_X,shuffle=False)
split_unlabeled_X=ViewSplit(unlabeled_X,shuffle=False)
split_test_X=ViewSplit(test_X,shuffle=False)
evaluation={
'accuracy':Accuracy(),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
model=Co_Training(evaluation=evaluation,verbose=True,file=file)
model.fit(X=split_labeled_X,y=labeled_y,unlabeled_X=split_unlabeled_X)
performance=model.evaluate(X=split_test_X,y=test_y)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/Co_Training_Wine.py
================================================
from LAMDA_SSL.Algorithm.Classification.Co_Training import Co_Training
from LAMDA_SSL.Dataset.Tabular.Wine import Wine
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from LAMDA_SSL.Split.ViewSplit import ViewSplit
import numpy as np
file = open("../Result/Co_Training_Wine.txt", "w")
dataset=Wine(test_size=0.3,labeled_size=0.1,stratified=True,shuffle=True,random_state=0,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
unlabeled_y=dataset.unlabeled_y
test_X=dataset.test_X
test_y=dataset.test_y
pre_transform=dataset.pre_transform
pre_transform.fit(np.vstack([labeled_X, unlabeled_X]))
labeled_X=pre_transform.transform(labeled_X)
unlabeled_X=pre_transform.transform(unlabeled_X)
test_X=pre_transform.transform(test_X)
# View split
split_labeled_X=ViewSplit(labeled_X,shuffle=False)
split_unlabeled_X=ViewSplit(unlabeled_X,shuffle=False)
split_test_X=ViewSplit(test_X,shuffle=False)
evaluation={
'accuracy':Accuracy(),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
model=Co_Training(binary=False,evaluation=evaluation,verbose=True,file=file)
model.fit(X=split_labeled_X,y=labeled_y,unlabeled_X=split_unlabeled_X)
performance=model.evaluate(X=split_test_X,y=test_y)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/Constrained_Seed_k_means_Wine.py
================================================
from LAMDA_SSL.Algorithm.Clustering.Constrained_Seed_k_means import Constrained_Seed_k_means
from LAMDA_SSL.Evaluation.Cluster.Davies_Bouldin_Score import Davies_Bouldin_Score
from LAMDA_SSL.Evaluation.Cluster.Fowlkes_Mallows_Score import Fowlkes_Mallows_Score
from LAMDA_SSL.Evaluation.Cluster.Jaccard_Score import Jaccard_Score
from LAMDA_SSL.Evaluation.Cluster.Silhouette_Score import Silhouette_Score
from LAMDA_SSL.Evaluation.Cluster.Rand_Score import Rand_Score
from LAMDA_SSL.Dataset.Tabular.Wine import Wine
import numpy as np
file = open("../Result/Constrained_Seed_k_means_Wine.txt", "w")
dataset = Wine(labeled_size=0.2, stratified=True, shuffle=True,random_state=0,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
unlabeled_y=dataset.unlabeled_y
pre_transform=dataset.pre_transform
pre_transform.fit(np.vstack([labeled_X, unlabeled_X]))
labeled_X=pre_transform.transform(labeled_X)
unlabeled_X=pre_transform.transform(unlabeled_X)
evaluation={
'Fowlkes_Mallows_Score':Fowlkes_Mallows_Score(),
'Jaccard_Score':Jaccard_Score(average='macro'),
'Rand_Score':Rand_Score(),
'Davies_Bouldin_Score':Davies_Bouldin_Score(),
'Silhouette_Score':Silhouette_Score()
}
model = Constrained_Seed_k_means(k=3,evaluation=evaluation,verbose=True,file=file)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X)
performance=model.evaluate(y=np.hstack([labeled_y, unlabeled_y]),Transductive=True)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/Constrained_k_means_Wine.py
================================================
from LAMDA_SSL.Algorithm.Clustering.Constrained_k_means import Constrained_k_means
from LAMDA_SSL.Evaluation.Cluster.Davies_Bouldin_Score import Davies_Bouldin_Score
from LAMDA_SSL.Evaluation.Cluster.Fowlkes_Mallows_Score import Fowlkes_Mallows_Score
from LAMDA_SSL.Evaluation.Cluster.Jaccard_Score import Jaccard_Score
from LAMDA_SSL.Evaluation.Cluster.Silhouette_Score import Silhouette_Score
from LAMDA_SSL.Evaluation.Cluster.Rand_Score import Rand_Score
from LAMDA_SSL.Dataset.Tabular.Wine import Wine
import numpy as np
file = open("../Result/Constrained_k_means_Wine.txt", "w")
dataset=Wine(labeled_size=0.2,stratified=True,shuffle=True,random_state=0,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
unlabeled_y=dataset.unlabeled_y
pre_transform=dataset.pre_transform
pre_transform.fit(np.vstack([labeled_X, unlabeled_X]))
labeled_X=pre_transform.transform(labeled_X)
unlabeled_X=pre_transform.transform(unlabeled_X)
evaluation={
'Fowlkes_Mallows_Score':Fowlkes_Mallows_Score(),
'Jaccard_Score':Jaccard_Score(average='macro'),
'Rand_Score':Rand_Score(),
'Davies_Bouldin_Score':Davies_Bouldin_Score(),
'Silhouette_Score':Silhouette_Score()
}
ml=[]
cl=[]
for i in range(labeled_X.shape[0]):
for j in range(i+1,labeled_X.shape[0]):
if labeled_y[i]==labeled_y[j]:
ml.append({i,j})
else:
cl.append({i,j})
model=Constrained_k_means(k=3,evaluation=evaluation,verbose=True,file=file)
model.fit(X=np.vstack((labeled_X,unlabeled_X)),ml=ml,cl=cl)
performance=model.evaluate(y=np.hstack([labeled_y, unlabeled_y]),Transductive=True)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/FixMatch_BreastCancer.py
================================================
from LAMDA_SSL.Algorithm.Classification.FixMatch import FixMatch
from LAMDA_SSL.Dataset.Tabular.BreastCancer import BreastCancer
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from LAMDA_SSL.Network.FT_Transformer import FT_Transformer
import numpy as np
from LAMDA_SSL.Opitimizer.Adam import Adam
from LAMDA_SSL.Sampler.RandomSampler import RandomSampler
from LAMDA_SSL.Dataset.LabeledDataset import LabeledDataset
from LAMDA_SSL.Dataset.UnlabeledDataset import UnlabeledDataset
from LAMDA_SSL.Transform.ToTensor import ToTensor
from LAMDA_SSL.Augmentation.Tabular.Noise import Noise
file = open("../Result/FixMatch_BreastCancer.txt", "w")
dataset=BreastCancer(test_size=0.3,labeled_size=0.1,stratified=True,shuffle=True,random_state=0,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
unlabeled_y=dataset.unlabeled_y
test_X=dataset.test_X
test_y=dataset.test_y
# Pre_transform
pre_transform=dataset.pre_transform
pre_transform.fit(np.vstack([labeled_X, unlabeled_X]))
labeled_X=pre_transform.transform(labeled_X)
unlabeled_X=pre_transform.transform(unlabeled_X)
test_X=pre_transform.transform(test_X)
labeled_dataset=LabeledDataset(transform=ToTensor())
unlabeled_dataset=UnlabeledDataset(transform=ToTensor())
test_dataset=UnlabeledDataset(transform=ToTensor())
labeled_sampler=RandomSampler(replacement=True,num_samples=64*(10000))
evaluation={
'accuracy':Accuracy(),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
weak_augmentation=Noise(0.1)
strong_augmentation=Noise(0.2)
augmentation={
'weak_augmentation':weak_augmentation,
'strong_augmentation':strong_augmentation
}
model=FixMatch(labeled_dataset=labeled_dataset, unlabeled_dataset=unlabeled_dataset,
test_dataset=test_dataset, device='cuda:0', augmentation=augmentation,
network=FT_Transformer(dim_in=labeled_X.shape[1], num_classes=2),num_it_epoch=10000,labeled_sampler=labeled_sampler, optimizer=Adam(lr=1e-4),
scheduler=None, weight_decay=1e-5,verbose=True)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X)
performance=model.evaluate(X=test_X,y=test_y)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/FixMatch_CIFAR10.py
================================================
from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip
from LAMDA_SSL.Augmentation.Vision.RandomCrop import RandomCrop
from LAMDA_SSL.Augmentation.Vision.RandAugment import RandAugment
from LAMDA_SSL.Augmentation.Vision.Cutout import Cutout
from LAMDA_SSL.Dataset.Vision.CIFAR10 import CIFAR10
from LAMDA_SSL.Opitimizer.SGD import SGD
from LAMDA_SSL.Scheduler.CosineWarmup import CosineWarmup
from LAMDA_SSL.Network.WideResNet import WideResNet
from LAMDA_SSL.Dataloader.UnlabeledDataloader import UnlabeledDataLoader
from LAMDA_SSL.Dataloader.LabeledDataloader import LabeledDataLoader
from LAMDA_SSL.Algorithm.Classification.FixMatch import FixMatch
from LAMDA_SSL.Sampler.RandomSampler import RandomSampler
from LAMDA_SSL.Sampler.SequentialSampler import SequentialSampler
from sklearn.pipeline import Pipeline
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.Top_k_Accuracy import Top_k_Accurary
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from LAMDA_SSL.Dataset.LabeledDataset import LabeledDataset
from LAMDA_SSL.Dataset.UnlabeledDataset import UnlabeledDataset
# dataset
dataset=CIFAR10(root='..\Download\cifar-10-python',labeled_size=4000,stratified=False,shuffle=True,download=False,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
test_X=dataset.test_X
test_y=dataset.test_y
valid_X=dataset.valid_X
valid_y=dataset.valid_y
labeled_dataset=LabeledDataset(pre_transform=dataset.pre_transform,transforms=dataset.transforms,
transform=dataset.transform,target_transform=dataset.target_transform)
unlabeled_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.unlabeled_transform)
valid_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.valid_transform)
test_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.test_transform)
# sampler
labeled_sampler=RandomSampler(replacement=True,num_samples=64*(2**20))
unlabeled_sampler=RandomSampler(replacement=True)
valid_sampler=SequentialSampler()
test_sampler=SequentialSampler()
#dataloader
labeled_dataloader=LabeledDataLoader(batch_size=64,num_workers=0,drop_last=True)
unlabeled_dataloader=UnlabeledDataLoader(num_workers=0,drop_last=True)
valid_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
test_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
# network
network=WideResNet(num_classes=10,depth=28,widen_factor=2,drop_rate=0)
# optimizer
optimizer=SGD(lr=0.03,momentum=0.9,nesterov=True)
# scheduler
scheduler=CosineWarmup(num_cycles=7./16,num_training_steps=2**20)
# augmentation
weak_augmentation=Pipeline([('RandomHorizontalFlip',RandomHorizontalFlip()),
('RandomCrop',RandomCrop(padding=0.125,padding_mode='reflect')),
])
strong_augmentation=Pipeline([('RandomHorizontalFlip',RandomHorizontalFlip()),
('RandomCrop',RandomCrop(padding=0.125,padding_mode='reflect')),
('RandAugment',RandAugment(n=2,m=10,num_bins=10)),
('Cutout',Cutout(v=0.5,fill=(127, 127, 127))),
])
augmentation={
'weak_augmentation':weak_augmentation,
'strong_augmentation':strong_augmentation
}
# evalutation
evaluation={
'accuracy':Accuracy(),
'top_5_accuracy':Top_k_Accurary(k=5),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
file = open("../Result/FixMatch_CIFAR10.txt", "w")
model=FixMatch(threshold=0.95,lambda_u=1.0,T=1.0,mu=7,weight_decay=5e-4,ema_decay=0.999,
epoch=1,num_it_epoch=2**20,num_it_total=2**20,eval_it=2000,device='cpu',
labeled_dataset=labeled_dataset,
unlabeled_dataset=unlabeled_dataset,
valid_dataset=valid_dataset,
test_dataset=test_dataset,
labeled_sampler=labeled_sampler,
unlabeled_sampler=unlabeled_sampler,
valid_sampler=valid_sampler,
test_sampler=test_sampler,
labeled_dataloader=labeled_dataloader,
unlabeled_dataloader=unlabeled_dataloader,
valid_dataloader=valid_dataloader,
test_dataloader=test_dataloader,
augmentation=augmentation,
network=network,
optimizer=optimizer,
scheduler=scheduler,
evaluation=evaluation,
file=file,
verbose=True)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X,valid_X=valid_X,valid_y=valid_y)
performance=model.evaluate(X=test_X,y=test_y)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/FixMatch_SST2.py
================================================
from LAMDA_SSL.Dataset.Text.SST2 import SST2
from LAMDA_SSL.Opitimizer.SGD import SGD
from LAMDA_SSL.Scheduler.CosineAnnealingLR import CosineAnnealingLR
from LAMDA_SSL.Dataloader.UnlabeledDataloader import UnlabeledDataLoader
from LAMDA_SSL.Dataloader.LabeledDataloader import LabeledDataLoader
from LAMDA_SSL.Algorithm.Classification.FixMatch import FixMatch
from LAMDA_SSL.Sampler.RandomSampler import RandomSampler
from LAMDA_SSL.Sampler.SequentialSampler import SequentialSampler
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from LAMDA_SSL.Dataset.LabeledDataset import LabeledDataset
from LAMDA_SSL.Dataset.UnlabeledDataset import UnlabeledDataset
from LAMDA_SSL.Augmentation.Text.TFIDFReplacement import TFIDFReplacement
from LAMDA_SSL.Augmentation.Text.RandomSwap import RandomSwap
from LAMDA_SSL.Network.TextRCNN import TextRCNN
from LAMDA_SSL.Transform.Text.GloVe import Glove
# dataset
dataset=SST2(root='..\Download\SST2',stratified=True,shuffle=True,download=False,vectors=Glove(cache='..\Download\Glove\.vector_cache'),length=50,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
test_X=dataset.test_X
test_y=dataset.test_y
valid_X=dataset.valid_X
valid_y=dataset.valid_y
labeled_dataset=LabeledDataset(pre_transform=dataset.pre_transform,transforms=dataset.transforms,
transform=dataset.transform,target_transform=dataset.target_transform)
unlabeled_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.unlabeled_transform)
valid_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.valid_transform)
test_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.test_transform)
# sampler
labeled_sampler=RandomSampler(replacement=True,num_samples=64*(2**20))
unlabeled_sampler=RandomSampler(replacement=True)
valid_sampler=SequentialSampler()
test_sampler=SequentialSampler()
#dataloader
labeled_dataloader=LabeledDataLoader(batch_size=64,num_workers=0,drop_last=True)
unlabeled_dataloader=UnlabeledDataLoader(num_workers=0,drop_last=True)
valid_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
test_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
# augmentation
weak_augmentation=RandomSwap(n=1)
strong_augmentation=TFIDFReplacement(text=labeled_X,p=0.7)
augmentation={
'weak_augmentation':weak_augmentation,
'strong_augmentation':strong_augmentation
}
# optimizer
optimizer=SGD(lr=0.03,momentum=0.9,nesterov=True)
# scheduler
scheduler=CosineAnnealingLR(eta_min=0,T_max=2**20)
# network
network=TextRCNN(n_vocab=dataset.vectors.vec.vectors.shape[0],embedding_dim=dataset.vectors.vec.vectors.shape[1],
pretrained_embeddings=dataset.vectors.vec.vectors,len_seq=50,
num_classes=2)
# evalutation
evaluation={
'accuracy':Accuracy(),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
file = open("../Result/FixMatch_SST2.txt", "w")
model=FixMatch(threshold=0.95,lambda_u=1.0,T=0.5,mu=7,ema_decay=0.999,weight_decay=5e-4,
epoch=1,num_it_epoch=2**20,num_it_total=2**20,eval_it=2000,device='cpu',
labeled_dataset=labeled_dataset, unlabeled_dataset=unlabeled_dataset,
valid_dataset=valid_dataset, test_dataset=test_dataset,
labeled_sampler=labeled_sampler,unlabeled_sampler=unlabeled_sampler,
valid_sampler=valid_sampler,test_sampler=test_sampler,
labeled_dataloader=labeled_dataloader, unlabeled_dataloader=unlabeled_dataloader,
valid_dataloader=valid_dataloader, test_dataloader=test_dataloader,
augmentation=augmentation,network=network,optimizer=optimizer,scheduler=scheduler,
evaluation=evaluation,verbose=True,file=file)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X,valid_X=valid_X,valid_y=valid_y)
performance=model.evaluate(X=test_X,y=test_y)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/FlexMatch_CIFAR10.py
================================================
from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip
from LAMDA_SSL.Augmentation.Vision.RandomCrop import RandomCrop
from LAMDA_SSL.Augmentation.Vision.RandAugment import RandAugment
from LAMDA_SSL.Augmentation.Vision.Cutout import Cutout
from LAMDA_SSL.Dataset.Vision.CIFAR10 import CIFAR10
from LAMDA_SSL.Opitimizer.SGD import SGD
from LAMDA_SSL.Network.WideResNet import WideResNet
from LAMDA_SSL.Dataloader.UnlabeledDataloader import UnlabeledDataLoader
from LAMDA_SSL.Dataloader.LabeledDataloader import LabeledDataLoader
from LAMDA_SSL.Algorithm.Classification.FlexMatch import FlexMatch
from LAMDA_SSL.Sampler.RandomSampler import RandomSampler
from LAMDA_SSL.Sampler.SequentialSampler import SequentialSampler
from sklearn.pipeline import Pipeline
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.Top_k_Accuracy import Top_k_Accurary
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from LAMDA_SSL.Dataset.LabeledDataset import LabeledDataset
from LAMDA_SSL.Dataset.UnlabeledDataset import UnlabeledDataset
from LAMDA_SSL.Scheduler.CosineWarmup import CosineWarmup
# dataset
dataset=CIFAR10(root='..\Download\cifar-10-python',labeled_size=4000,stratified=True,shuffle=True,download=False,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
test_X=dataset.test_X
test_y=dataset.test_y
valid_X=dataset.valid_X
valid_y=dataset.valid_y
labeled_dataset=LabeledDataset(pre_transform=dataset.pre_transform,transforms=dataset.transforms,
transform=dataset.transform,target_transform=dataset.target_transform)
unlabeled_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.unlabeled_transform)
valid_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.valid_transform)
test_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.test_transform)
# sampler
labeled_sampler=RandomSampler(replacement=True,num_samples=64*(2**20))
unlabeled_sampler=RandomSampler(replacement=True)
valid_sampler=SequentialSampler()
test_sampler=SequentialSampler()
#dataloader
labeled_dataloader=LabeledDataLoader(batch_size=64,num_workers=0,drop_last=True)
unlabeled_dataloader=UnlabeledDataLoader(num_workers=0,drop_last=True)
valid_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
test_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
# augmentation
weak_augmentation=Pipeline([('RandomHorizontalFlip',RandomHorizontalFlip()),
('RandomCrop',RandomCrop(padding=0.125,padding_mode='reflect')),
])
strong_augmentation=Pipeline([('RandomHorizontalFlip',RandomHorizontalFlip()),
('RandomCrop',RandomCrop(padding=0.125,padding_mode='reflect')),
('RandAugment',RandAugment(n=2,m=10,num_bins=10)),
('Cutout',Cutout(v=0.5,fill=(127, 127, 127))),
])
augmentation={
'weak_augmentation':weak_augmentation,
'strong_augmentation':strong_augmentation
}
# optimizer
optimizer=SGD(lr=0.03,momentum=0.9,nesterov=True)
# scheduler
scheduler=CosineWarmup(num_cycles=7./16,num_training_steps=2**20)
# network
network=WideResNet(num_classes=10,depth=28,widen_factor=2,drop_rate=0)
# evalutation
evaluation={
'accuracy':Accuracy(),
'top_5_accuracy':Top_k_Accurary(k=5),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
file = open("../Result/FlexMatch_CIFAR10.txt", "w")
model=FlexMatch(threshold=0.95,lambda_u=1.0,T=1.0,num_classes=10,
use_hard_labels=True,threshold_warmup=True,mu=7,weight_decay=5e-4,ema_decay=0.999,
epoch=1,num_it_epoch=2**20,num_it_total=2**20,eval_it=2000,device='cpu',
labeled_dataset=labeled_dataset,
unlabeled_dataset=unlabeled_dataset,
valid_dataset=valid_dataset,test_dataset=test_dataset,
labeled_sampler=labeled_sampler,
unlabeled_sampler=unlabeled_sampler,
valid_sampler=valid_sampler,test_sampler=test_sampler,
labeled_dataloader=labeled_dataloader,
unlabeled_dataloader=unlabeled_dataloader,valid_dataloader=valid_dataloader,test_dataloader=test_dataloader,
augmentation=augmentation,network=network,optimizer=optimizer,scheduler=scheduler,evaluation=evaluation,
verbose=True,file=file)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X,valid_X=valid_X,valid_y=valid_y)
performance=model.evaluate(X=test_X,y=test_y)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/FlexMatch_SST2.py
================================================
from LAMDA_SSL.Dataset.Text.SST2 import SST2
from LAMDA_SSL.Opitimizer.SGD import SGD
from LAMDA_SSL.Scheduler.CosineAnnealingLR import CosineAnnealingLR
from LAMDA_SSL.Dataloader.UnlabeledDataloader import UnlabeledDataLoader
from LAMDA_SSL.Dataloader.LabeledDataloader import LabeledDataLoader
from LAMDA_SSL.Algorithm.Classification.FlexMatch import FlexMatch
from LAMDA_SSL.Sampler.RandomSampler import RandomSampler
from LAMDA_SSL.Sampler.SequentialSampler import SequentialSampler
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from LAMDA_SSL.Dataset.LabeledDataset import LabeledDataset
from LAMDA_SSL.Dataset.UnlabeledDataset import UnlabeledDataset
from LAMDA_SSL.Augmentation.Text.TFIDFReplacement import TFIDFReplacement
from LAMDA_SSL.Augmentation.Text.RandomSwap import RandomSwap
from LAMDA_SSL.Network.TextRCNN import TextRCNN
from LAMDA_SSL.Transform.Text.GloVe import Glove
# dataset
dataset=SST2(root='..\Download\SST2',stratified=True,shuffle=True,download=False,vectors=Glove(cache='..\Download\Glove\.vector_cache'),length=50,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
test_X=dataset.test_X
test_y=dataset.test_y
valid_X=dataset.valid_X
valid_y=dataset.valid_y
labeled_dataset=LabeledDataset(pre_transform=dataset.pre_transform,transforms=dataset.transforms,
transform=dataset.transform,target_transform=dataset.target_transform)
unlabeled_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.unlabeled_transform)
valid_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.valid_transform)
test_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.test_transform)
# sampler
labeled_sampler=RandomSampler(replacement=True,num_samples=64*(2**20))
unlabeled_sampler=RandomSampler(replacement=True)
valid_sampler=SequentialSampler()
test_sampler=SequentialSampler()
#dataloader
labeled_dataloader=LabeledDataLoader(batch_size=64,num_workers=0,drop_last=True)
unlabeled_dataloader=UnlabeledDataLoader(num_workers=0,drop_last=True)
valid_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
test_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
# augmentation
weak_augmentation=RandomSwap(n=1)
strong_augmentation=TFIDFReplacement(text=labeled_X,p=0.7)
augmentation={
'weak_augmentation':weak_augmentation,
'strong_augmentation':strong_augmentation
}
# optimizer
optimizer=SGD(lr=0.03,momentum=0.9,nesterov=True)
# scheduler
scheduler=CosineAnnealingLR(eta_min=0,T_max=2**20)
# network
network=TextRCNN(n_vocab=dataset.vectors.vec.vectors.shape[0],embedding_dim=dataset.vectors.vec.vectors.shape[1],
pretrained_embeddings=dataset.vectors.vec.vectors,len_seq=50,
num_classes=2)
# evalutation
evaluation={
'accuracy':Accuracy(),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
file = open("../Result/FixMatch_SST2.txt", "w")
model=FlexMatch(threshold=0.95,lambda_u=1.0,T=1.0,mu=7,ema_decay=0.999,weight_decay=5e-4,
epoch=1,num_it_epoch=2**20,num_it_total=2**20,eval_it=2000,device='cpu',
labeled_dataset=labeled_dataset, unlabeled_dataset=unlabeled_dataset,
valid_dataset=valid_dataset, test_dataset=test_dataset,
labeled_sampler=labeled_sampler,unlabeled_sampler=unlabeled_sampler,
valid_sampler=valid_sampler,test_sampler=test_sampler,
labeled_dataloader=labeled_dataloader, unlabeled_dataloader=unlabeled_dataloader,
valid_dataloader=valid_dataloader, test_dataloader=test_dataloader,
augmentation=augmentation,network=network,optimizer=optimizer,scheduler=scheduler,
evaluation=evaluation,verbose=True,file=file)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X,valid_X=valid_X,valid_y=valid_y)
performance=model.evaluate(X=test_X,y=test_y)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/GAT_Cora.py
================================================
from LAMDA_SSL.Dataset.Graph.Cora import Cora
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.Top_k_Accuracy import Top_k_Accurary
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from LAMDA_SSL.Opitimizer.Adam import Adam
from LAMDA_SSL.Algorithm.Classification.GAT import GAT
file = open("../Result/GAT_Cora.txt", "w")
dataset=Cora(labeled_size=0.2,root='..\Download\Cora',random_state=0,default_transforms=True)
data=dataset.data
data=dataset.transform.fit_transform(data)
evaluation={
'accuracy':Accuracy(),
'top_5_accuracy':Top_k_Accurary(k=5),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
optimizer=Adam(lr=0.01)
model=GAT(
dim_in=1433,
heads=16,
dropout=0,
epoch=500,
eval_epoch=200,
weight_decay=5e-4,
device='cpu',
optimizer=optimizer,
evaluation=evaluation,
file=file,
verbose=True
)
model.fit(data,valid_X=data.val_mask)
performance=model.evaluate(X=data.test_mask)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/GCN_Cora.py
================================================
from LAMDA_SSL.Dataset.Graph.Cora import Cora
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.Top_k_Accuracy import Top_k_Accurary
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from LAMDA_SSL.Opitimizer.Adam import Adam
from LAMDA_SSL.Algorithm.Classification.GCN import GCN
file = open("../Result/GCN_Cora.txt", "w")
dataset=Cora(labeled_size=0.2,root='..\Download\Cora',random_state=0,default_transforms=True)
data=dataset.data
data=dataset.transform.fit_transform(data)
evaluation={
'accuracy':Accuracy(),
'top_5_accuracy':Top_k_Accurary(k=5),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
optimizer=Adam(lr=0.01)
model=GCN(
dim_in=1433,
normalize=True,
epoch=500,
eval_epoch=200,
weight_decay=5e-4,
device='cpu',
optimizer=optimizer,
evaluation=evaluation,
file=file,
verbose=True
)
model.fit(data,valid_X=data.val_mask)
performance=model.evaluate(X=data.test_mask)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/ICTReg_Boston.py
================================================
from LAMDA_SSL.Augmentation.Tabular.Noise import Noise
import torch.nn as nn
from LAMDA_SSL.Dataset.Tabular.Boston import Boston
from LAMDA_SSL.Opitimizer.SGD import SGD
from LAMDA_SSL.Scheduler.CosineAnnealingLR import CosineAnnealingLR
from LAMDA_SSL.Network.MLPReg import MLPReg
from LAMDA_SSL.Dataset.LabeledDataset import LabeledDataset
from LAMDA_SSL.Dataset.UnlabeledDataset import UnlabeledDataset
from LAMDA_SSL.Dataloader.LabeledDataloader import LabeledDataLoader
from LAMDA_SSL.Dataloader.UnlabeledDataloader import UnlabeledDataLoader
from LAMDA_SSL.Algorithm.Regression.ICTReg import ICTReg
from LAMDA_SSL.Sampler.RandomSampler import RandomSampler
from LAMDA_SSL.Sampler.SequentialSampler import SequentialSampler
from LAMDA_SSL.Evaluation.Regressor.Mean_Absolute_Error import Mean_Absolute_Error
from LAMDA_SSL.Evaluation.Regressor.Mean_Squared_Error import Mean_Squared_Error
from LAMDA_SSL.Evaluation.Regressor.Mean_Squared_Log_Error import Mean_Squared_Log_Error
import numpy as np
# dataset
dataset=Boston(test_size=0.3,labeled_size=0.1,stratified=False,shuffle=True,random_state=0,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
test_X=dataset.test_X
test_y=dataset.test_y
valid_X=dataset.valid_X
valid_y=dataset.valid_y
# Pre_transform
pre_transform=dataset.pre_transform
pre_transform.fit(np.vstack([labeled_X, unlabeled_X]))
labeled_X=pre_transform.transform(labeled_X)
unlabeled_X=pre_transform.transform(unlabeled_X)
test_X=pre_transform.transform(test_X)
labeled_dataset=LabeledDataset(transforms=dataset.transforms,
transform=dataset.transform,target_transform=dataset.target_transform)
unlabeled_dataset=UnlabeledDataset(transform=dataset.unlabeled_transform)
valid_dataset=UnlabeledDataset(transform=dataset.valid_transform)
test_dataset=UnlabeledDataset(transform=dataset.test_transform)
# sampler
labeled_sampler=RandomSampler(replacement=True,num_samples=64*(2**20))
unlabeled_sampler=RandomSampler(replacement=True)
valid_sampler=SequentialSampler()
test_sampler=SequentialSampler()
#dataloader
labeled_dataloader=LabeledDataLoader(batch_size=64,num_workers=0,drop_last=True)
unlabeled_dataloader=UnlabeledDataLoader(num_workers=0,drop_last=True)
valid_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
test_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
# augmentation
augmentation=Noise(noise_level=0.01)
# optimizer
optimizer=SGD(lr=0.001,momentum=0.9,nesterov=True)
scheduler=CosineAnnealingLR(eta_min=0,T_max=4000)
# network
network=MLPReg(hidden_dim=[100,50,10],activations=[nn.ReLU(),nn.ReLU(),nn.ReLU()],dim_in=labeled_X.shape[-1])
evaluation={
'Mean_Absolute_Error':Mean_Absolute_Error(),
'Mean_Squared_Error':Mean_Squared_Error(),
'Mean_Squared_Log_Error':Mean_Squared_Log_Error()
}
file = open("../Result/ICTReg_Boston.txt", "w")
model=ICTReg(alpha=0.5,lambda_u=0.001,warmup=1/64,
mu=1,weight_decay=5e-4,ema_decay=0.999,
epoch=1,num_it_epoch=4000,
num_it_total=4000,
eval_it=200,device='cpu',
labeled_dataset=labeled_dataset,
unlabeled_dataset=unlabeled_dataset,
valid_dataset=valid_dataset,
test_dataset=test_dataset,
labeled_sampler=labeled_sampler,
unlabeled_sampler=unlabeled_sampler,
valid_sampler=valid_sampler,
test_sampler=test_sampler,
labeled_dataloader=labeled_dataloader,
unlabeled_dataloader=unlabeled_dataloader,
valid_dataloader=valid_dataloader,
test_dataloader=test_dataloader,
augmentation=augmentation,network=network,
optimizer=optimizer,scheduler=scheduler,
evaluation=evaluation,file=file,verbose=True)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X,valid_X=valid_X,valid_y=valid_y)
performance=model.evaluate(X=test_X,y=test_y)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/ICT_CIFAR10.py
================================================
from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip
from LAMDA_SSL.Augmentation.Vision.RandomCrop import RandomCrop
from LAMDA_SSL.Dataset.Vision.CIFAR10 import CIFAR10
from LAMDA_SSL.Opitimizer.SGD import SGD
from LAMDA_SSL.Scheduler.CosineAnnealingLR import CosineAnnealingLR
from LAMDA_SSL.Network.WideResNet import WideResNet
from LAMDA_SSL.Dataloader.UnlabeledDataloader import UnlabeledDataLoader
from LAMDA_SSL.Dataloader.LabeledDataloader import LabeledDataLoader
from LAMDA_SSL.Algorithm.Classification.ICT import ICT
from LAMDA_SSL.Sampler.RandomSampler import RandomSampler
from LAMDA_SSL.Sampler.SequentialSampler import SequentialSampler
from sklearn.pipeline import Pipeline
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.Top_k_Accuracy import Top_k_Accurary
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from LAMDA_SSL.Dataset.LabeledDataset import LabeledDataset
from LAMDA_SSL.Dataset.UnlabeledDataset import UnlabeledDataset
# dataset
dataset=CIFAR10(root='..\Download\cifar-10-python',labeled_size=4000,stratified=True,shuffle=True,download=False,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
test_X=dataset.test_X
test_y=dataset.test_y
valid_X=dataset.valid_X
valid_y=dataset.valid_y
labeled_dataset=LabeledDataset(pre_transform=dataset.pre_transform,transforms=dataset.transforms,
transform=dataset.transform,target_transform=dataset.target_transform)
unlabeled_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.unlabeled_transform)
valid_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.valid_transform)
test_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.test_transform)
# sampler
labeled_sampler=RandomSampler(replacement=True,num_samples=64*(2**20))
unlabeled_sampler=RandomSampler(replacement=True)
valid_sampler=SequentialSampler()
test_sampler=SequentialSampler()
#dataloader
labeled_dataloader=LabeledDataLoader(batch_size=64,num_workers=0,drop_last=True)
unlabeled_dataloader=UnlabeledDataLoader(num_workers=0,drop_last=True)
valid_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
test_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
# augmentation
augmentation=Pipeline([('RandomHorizontalFlip',RandomHorizontalFlip()),
('RandomCrop',RandomCrop(padding=0.125,padding_mode='reflect')),
])
# optimizer
optimizer=SGD(lr=0.03,momentum=0.9,nesterov=True)
# scheduler
scheduler=CosineAnnealingLR(eta_min=0,T_max=2**20)
# network
network=WideResNet(num_classes=10,depth=28,widen_factor=2,drop_rate=0)
# evalutation
evaluation={
'accuracy':Accuracy(),
'top_5_accuracy':Top_k_Accurary(k=5),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
file = open("../Result/ICT_CIFAR10.txt", "w")
model=ICT(alpha=0.5,lambda_u=100,warmup=1/64,mu=1,weight_decay=5e-4,ema_decay=0.999,
epoch=1,num_it_epoch=2**20,num_it_total=2**20,eval_it=2000,device='cpu',
labeled_dataset=labeled_dataset,
unlabeled_dataset=unlabeled_dataset,
valid_dataset=valid_dataset,
test_dataset=test_dataset,
labeled_sampler=labeled_sampler,
unlabeled_sampler=unlabeled_sampler,
valid_sampler=valid_sampler,
test_sampler=test_sampler,
labeled_dataloader=labeled_dataloader,
unlabeled_dataloader=unlabeled_dataloader,
valid_dataloader=valid_dataloader,
test_dataloader=test_dataloader,
augmentation=augmentation,
network=network,
optimizer=optimizer,
scheduler=scheduler,
evaluation=evaluation,
file=file,
verbose=True)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X,valid_X=valid_X,valid_y=valid_y)
performance=model.evaluate(X=test_X,y=test_y)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/ImprovedGAN_MNIST.py
================================================
from LAMDA_SSL.Opitimizer.Adam import Adam
from LAMDA_SSL.Transform.ToImage import ToImage
from LAMDA_SSL.Dataloader.UnlabeledDataloader import UnlabeledDataLoader
from LAMDA_SSL.Dataloader.LabeledDataloader import LabeledDataLoader
from LAMDA_SSL.Sampler.RandomSampler import RandomSampler
from LAMDA_SSL.Sampler.SequentialSampler import SequentialSampler
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.Top_k_Accuracy import Top_k_Accurary
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from LAMDA_SSL.Dataset.LabeledDataset import LabeledDataset
from LAMDA_SSL.Dataset.UnlabeledDataset import UnlabeledDataset
from LAMDA_SSL.Algorithm.Classification.ImprovedGAN import ImprovedGAN
import torch.nn as nn
from LAMDA_SSL.Dataset.Vision.Mnist import Mnist
dataset=Mnist(root='..\Download\mnist',labeled_size=6000,shuffle=True,download=False,random_state=0,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
valid_X=dataset.valid_X
valid_y=dataset.valid_y
test_X=dataset.test_X
test_y=dataset.test_y
labeled_dataset=LabeledDataset(pre_transform=dataset.pre_transform,transforms=dataset.transforms,
transform=dataset.transform,target_transform=dataset.target_transform)
unlabeled_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.unlabeled_transform)
valid_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.valid_transform)
test_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.test_transform)
#dataloader
labeled_dataloader=LabeledDataLoader(batch_size=100,num_workers=0,drop_last=True)
unlabeled_dataloader=UnlabeledDataLoader(num_workers=0,drop_last=True)
valid_dataloader=UnlabeledDataLoader(batch_size=100,num_workers=0,drop_last=False)
test_dataloader=UnlabeledDataLoader(batch_size=100,num_workers=0,drop_last=False)
# sampler
labeled_sampler=RandomSampler(replacement=True,num_samples=100*540)
unlabeled_sampler=RandomSampler(replacement=False)
test_sampler=SequentialSampler()
valid_sampler=SequentialSampler()
# optimizer
optimizer=Adam(lr=3e-4)
# evalutation
evaluation={
'Accuracy':Accuracy(),
'Top_5_Accuracy':Top_k_Accurary(k=5),
'Precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_Matrix':Confusion_Matrix(normalize='true')
}
file = open("../Result/ImprovedGAN_MNIST.txt", "w")
model=ImprovedGAN(lambda_u=1,
dim_z=100,dim_in=(28,28),hidden_G=[500,500],
hidden_D=[1000,500,250,250,250],
noise_level=[0.3, 0.5, 0.5, 0.5, 0.5, 0.5],
activations_G=[nn.Softplus(), nn.Softplus(), nn.Softplus()],
activations_D=[nn.ReLU(), nn.ReLU(), nn.ReLU(), nn.ReLU(), nn.ReLU()],
mu=1,epoch=100,num_it_epoch=540,
num_it_total=540*100,eval_it=2000,device='cpu',
labeled_dataset=labeled_dataset, unlabeled_dataset=unlabeled_dataset, valid_dataset=valid_dataset,
test_dataset=test_dataset,
labeled_sampler=labeled_sampler, unlabeled_sampler=unlabeled_sampler, valid_sampler=valid_sampler,
test_sampler=test_sampler,
labeled_dataloader=labeled_dataloader, unlabeled_dataloader=unlabeled_dataloader,
valid_dataloader=valid_dataloader, test_dataloader=test_dataloader,
optimizer=optimizer,evaluation=evaluation,file=file,verbose=True)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X,valid_X=valid_X,valid_y=valid_y)
performance=model.evaluate(X=test_X,y=test_y)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
fake_X=model.generate(num=100)
for _ in range(100):
img=ToImage()(fake_X[_]*256)
img.convert('RGB').save('../Result/Imgs/ImprovedGAN/' + str(_) + '.jpg')
================================================
FILE: Examples/LabelPropagation_BreastCancer.py
================================================
from LAMDA_SSL.Algorithm.Classification.LabelPropagation import LabelPropagation
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Dataset.Tabular.BreastCancer import BreastCancer
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
import numpy as np
file = open("../Result/LabelPropagation_BreastCancer.txt", "w")
dataset=BreastCancer(test_size=0.3,labeled_size=0.1,stratified=True,shuffle=True,random_state=0,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
unlabeled_y=dataset.unlabeled_y
test_X=dataset.test_X
test_y=dataset.test_y
# Pre_transform
pre_transform=dataset.pre_transform
pre_transform.fit(np.vstack([labeled_X, unlabeled_X]))
labeled_X=pre_transform.transform(labeled_X)
unlabeled_X=pre_transform.transform(unlabeled_X)
test_X=pre_transform.transform(test_X)
evaluation={
'accuracy':Accuracy(),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
model=LabelPropagation(gamma=1,max_iter=10000,evaluation=evaluation,file=file,verbose=True)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X)
performance=model.evaluate(X=test_X,y=test_y,Transductive=False)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/LabelSpreading_BreastCancer.py
================================================
from LAMDA_SSL.Algorithm.Classification.LabelSpreading import LabelSpreading
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Dataset.Tabular.BreastCancer import BreastCancer
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
import numpy as np
file = open("../Result/LabelSpreading_BreastCancer.txt", "w")
dataset=BreastCancer(test_size=0.3,labeled_size=0.1,stratified=True,shuffle=True,random_state=0,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
unlabeled_y=dataset.unlabeled_y
test_X=dataset.test_X
test_y=dataset.test_y
# Pre_transform
pre_transform=dataset.pre_transform
pre_transform.fit(np.vstack([labeled_X, unlabeled_X]))
labeled_X=pre_transform.transform(labeled_X)
unlabeled_X=pre_transform.transform(unlabeled_X)
test_X=pre_transform.transform(test_X)
evaluation={
'accuracy':Accuracy(),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
model=LabelSpreading(gamma=1,max_iter=10000,evaluation=evaluation,file=file,verbose=True)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X)
performance=model.evaluate(X=test_X,y=test_y,Transductive=False)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/LadderNetwork_MNIST.py
================================================
from LAMDA_SSL.Algorithm.Classification.LadderNetwork import LadderNetwork
from LAMDA_SSL.Opitimizer.Adam import Adam
from LAMDA_SSL.Dataloader.UnlabeledDataloader import UnlabeledDataLoader
from LAMDA_SSL.Dataloader.LabeledDataloader import LabeledDataLoader
from LAMDA_SSL.Sampler.RandomSampler import RandomSampler
from LAMDA_SSL.Sampler.SequentialSampler import SequentialSampler
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.Top_k_Accuracy import Top_k_Accurary
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from LAMDA_SSL.Dataset.LabeledDataset import LabeledDataset
from LAMDA_SSL.Dataset.UnlabeledDataset import UnlabeledDataset
from LAMDA_SSL.Scheduler.LinearWarmup import LinearWarmup
from LAMDA_SSL.Dataset.Vision.Mnist import Mnist
import torch.nn as nn
dataset=Mnist(root='..\Download\mnist',labeled_size=6000,stratified=True,shuffle=True,download=False,random_state=0,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
test_X=dataset.test_X
test_y=dataset.test_y
valid_X=dataset.valid_X
valid_y=dataset.valid_y
labeled_dataset=LabeledDataset(pre_transform=dataset.pre_transform,transforms=dataset.transforms,
transform=dataset.transform,target_transform=dataset.target_transform)
unlabeled_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.unlabeled_transform)
valid_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.valid_transform)
test_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.test_transform)
#dataloader
labeled_dataloader=LabeledDataLoader(batch_size=100,num_workers=0,drop_last=True)
unlabeled_dataloader=UnlabeledDataLoader(num_workers=0,drop_last=True)
valid_dataloader=UnlabeledDataLoader(batch_size=100,num_workers=0,drop_last=False)
test_dataloader=UnlabeledDataLoader(batch_size=100,num_workers=0,drop_last=False)
# sampler
labeled_sampler=RandomSampler(replacement=True,num_samples=100*540)
unlabeled_sampler=RandomSampler(replacement=False)
test_sampler=SequentialSampler()
valid_sampler=SequentialSampler()
# optimizer
optimizer=Adam(lr=0.02)
# scheduler
scheduler=LinearWarmup(num_warmup_steps=15,num_training_steps=10,verbose=False)
# evalutation
evaluation={
'accuracy':Accuracy(),
'top_5_accuracy':Top_k_Accurary(k=5),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
file = open("../Result/LadderNetwork_MNIST.txt", "w")
model=LadderNetwork(noise_std=0.2,
lambda_u=[0.1, 0.1, 0.1, 0.1, 0.1, 10., 1000.],
dim_encoder=[1000, 500, 250, 250, 250],
encoder_activations=[nn.ReLU(), nn.ReLU(), nn.ReLU(), nn.ReLU(), nn.ReLU()],
mu=1,weight_decay=5e-4,
epoch=10,num_it_epoch=540,num_it_total=540*10,eval_epoch=1,
optimizer=optimizer,scheduler=scheduler,evaluation=evaluation,device='cpu',
labeled_dataset=labeled_dataset, unlabeled_dataset=unlabeled_dataset, valid_dataset=valid_dataset,
test_dataset=test_dataset,
labeled_sampler=labeled_sampler, unlabeled_sampler=unlabeled_sampler, valid_sampler=valid_sampler,
test_sampler=test_sampler,
labeled_dataloader=labeled_dataloader, unlabeled_dataloader=unlabeled_dataloader,
valid_dataloader=valid_dataloader, test_dataloader=test_dataloader,
file=None,verbose=True)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X,valid_X=valid_X,valid_y=valid_y)
performance=model.evaluate(X=test_X,y=test_y)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/LapSVM_BreastCancer.py
================================================
from LAMDA_SSL.Algorithm.Classification.LapSVM import LapSVM
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from LAMDA_SSL.Dataset.Tabular.BreastCancer import BreastCancer
import numpy as np
file = open("../Result/LapSVM_BreastCancer.txt", "w")
dataset=BreastCancer(test_size=0.3,labeled_size=0.1,stratified=True,shuffle=True,random_state=0,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
unlabeled_y=dataset.unlabeled_y
test_X=dataset.test_X
test_y=dataset.test_y
# Pre_transform
pre_transform=dataset.pre_transform
pre_transform.fit(np.vstack([labeled_X, unlabeled_X]))
labeled_X=pre_transform.transform(labeled_X)
unlabeled_X=pre_transform.transform(unlabeled_X)
test_X=pre_transform.transform(test_X)
evaluation={
'accuracy':Accuracy(),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
model=LapSVM(neighbor_mode='connectivity',
gamma_d=0.03,
n_neighbor= 5,
gamma_k=0.03,
gamma_A= 0.03,
gamma_I= 0,
evaluation=evaluation,file=file,verbose=True)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X)
performance=model.evaluate(X=test_X,y=test_y)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/MeanTeacherReg_Boston.py
================================================
from LAMDA_SSL.Augmentation.Tabular.Noise import Noise
import torch.nn as nn
from LAMDA_SSL.Dataset.Tabular.Boston import Boston
from LAMDA_SSL.Opitimizer.SGD import SGD
from LAMDA_SSL.Scheduler.CosineAnnealingLR import CosineAnnealingLR
from LAMDA_SSL.Network.MLPReg import MLPReg
from LAMDA_SSL.Dataset.LabeledDataset import LabeledDataset
from LAMDA_SSL.Dataset.UnlabeledDataset import UnlabeledDataset
from LAMDA_SSL.Dataloader.LabeledDataloader import LabeledDataLoader
from LAMDA_SSL.Dataloader.UnlabeledDataloader import UnlabeledDataLoader
from LAMDA_SSL.Algorithm.Regression.MeanTeacherReg import MeanTeacherReg
from LAMDA_SSL.Sampler.RandomSampler import RandomSampler
from LAMDA_SSL.Sampler.SequentialSampler import SequentialSampler
from LAMDA_SSL.Evaluation.Regressor.Mean_Absolute_Error import Mean_Absolute_Error
from LAMDA_SSL.Evaluation.Regressor.Mean_Squared_Error import Mean_Squared_Error
from LAMDA_SSL.Evaluation.Regressor.Mean_Squared_Log_Error import Mean_Squared_Log_Error
import numpy as np
# dataset
dataset=Boston(test_size=0.3,labeled_size=0.1,stratified=False,shuffle=True,random_state=0,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
test_X=dataset.test_X
test_y=dataset.test_y
valid_X=dataset.valid_X
valid_y=dataset.valid_y
# Pre_transform
pre_transform=dataset.pre_transform
pre_transform.fit(np.vstack([labeled_X, unlabeled_X]))
labeled_X=pre_transform.transform(labeled_X)
unlabeled_X=pre_transform.transform(unlabeled_X)
test_X=pre_transform.transform(test_X)
labeled_dataset=LabeledDataset(transforms=dataset.transforms,
transform=dataset.transform,target_transform=dataset.target_transform)
unlabeled_dataset=UnlabeledDataset(transform=dataset.unlabeled_transform)
valid_dataset=UnlabeledDataset(transform=dataset.valid_transform)
test_dataset=UnlabeledDataset(transform=dataset.test_transform)
# sampler
labeled_sampler=RandomSampler(replacement=True,num_samples=64*(2**20))
unlabeled_sampler=RandomSampler(replacement=True)
valid_sampler=SequentialSampler()
test_sampler=SequentialSampler()
#dataloader
labeled_dataloader=LabeledDataLoader(batch_size=64,num_workers=0,drop_last=True)
unlabeled_dataloader=UnlabeledDataLoader(num_workers=0,drop_last=True)
valid_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
test_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
# augmentation
augmentation=Noise(noise_level=0.01)
# optimizer
optimizer=SGD(lr=0.001,momentum=0.9,nesterov=True)
scheduler=CosineAnnealingLR(eta_min=0,T_max=4000)
# network
network=MLPReg(hidden_dim=[100,50,10],activations=[nn.ReLU(),nn.ReLU(),nn.ReLU()],dim_in=labeled_X.shape[-1])
evaluation={
'Mean_Absolute_Error':Mean_Absolute_Error(),
'Mean_Squared_Error':Mean_Squared_Error(),
'Mean_Squared_Log_Error':Mean_Squared_Log_Error()
}
file = open("../Result/MeanTeacherReg_Boston.txt", "w")
model=MeanTeacherReg(lambda_u=0,warmup=0.4,
mu=1,weight_decay=5e-4,ema_decay=0.999,
epoch=1,num_it_epoch=4000,
num_it_total=4000,
eval_it=200,device='cpu',
labeled_dataset=labeled_dataset,
unlabeled_dataset=unlabeled_dataset,
valid_dataset=valid_dataset,
test_dataset=test_dataset,
labeled_sampler=labeled_sampler,
unlabeled_sampler=unlabeled_sampler,
valid_sampler=valid_sampler,
test_sampler=test_sampler,
labeled_dataloader=labeled_dataloader,
unlabeled_dataloader=unlabeled_dataloader,
valid_dataloader=valid_dataloader,
test_dataloader=test_dataloader,
augmentation=augmentation,network=network,
optimizer=optimizer,scheduler=scheduler,
evaluation=evaluation,file=file,verbose=True)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X,valid_X=valid_X,valid_y=valid_y)
performance=model.evaluate(X=test_X,y=test_y)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/MeanTeacher_CIFAR10.py
================================================
from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip
from LAMDA_SSL.Augmentation.Vision.RandomCrop import RandomCrop
from LAMDA_SSL.Dataset.Vision.CIFAR10 import CIFAR10
from LAMDA_SSL.Opitimizer.SGD import SGD
from LAMDA_SSL.Scheduler.CosineAnnealingLR import CosineAnnealingLR
from LAMDA_SSL.Network.WideResNet import WideResNet
from LAMDA_SSL.Dataloader.UnlabeledDataloader import UnlabeledDataLoader
from LAMDA_SSL.Dataloader.LabeledDataloader import LabeledDataLoader
from LAMDA_SSL.Algorithm.Classification.MeanTeacher import MeanTeacher
from LAMDA_SSL.Sampler.RandomSampler import RandomSampler
from LAMDA_SSL.Sampler.SequentialSampler import SequentialSampler
from sklearn.pipeline import Pipeline
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.Top_k_Accuracy import Top_k_Accurary
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from LAMDA_SSL.Dataset.LabeledDataset import LabeledDataset
from LAMDA_SSL.Dataset.UnlabeledDataset import UnlabeledDataset
# dataset
dataset=CIFAR10(root='..\Download\cifar-10-python',labeled_size=4000,stratified=True,shuffle=True,download=False,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
test_X=dataset.test_X
test_y=dataset.test_y
valid_X=dataset.valid_X
valid_y=dataset.valid_y
labeled_dataset=LabeledDataset(pre_transform=dataset.pre_transform,transforms=dataset.transforms,
transform=dataset.transform,target_transform=dataset.target_transform)
unlabeled_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.unlabeled_transform)
valid_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.valid_transform)
test_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.test_transform)
# sampler
labeled_sampler=RandomSampler(replacement=True,num_samples=64*(2**20))
unlabeled_sampler=RandomSampler(replacement=True)
valid_sampler=SequentialSampler()
test_sampler=SequentialSampler()
#dataloader
labeled_dataloader=LabeledDataLoader(batch_size=64,num_workers=0,drop_last=True)
unlabeled_dataloader=UnlabeledDataLoader(num_workers=0,drop_last=True)
valid_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
test_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
# augmentation
augmentation=Pipeline([('RandomHorizontalFlip',RandomHorizontalFlip()),
('RandomCrop',RandomCrop(padding=0.125,padding_mode='reflect')),
])
# optimizer
optimizer=SGD(lr=0.03,momentum=0.9,nesterov=True)
# scheduler
scheduler=CosineAnnealingLR(eta_min=0,T_max=2**20)
# network
network=WideResNet(num_classes=10,depth=28,widen_factor=2,drop_rate=0)
# evalutation
evaluation={
'accuracy':Accuracy(),
'top_5_accuracy':Top_k_Accurary(k=5),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
file = open("../Result/MeanTeacher_CIFAR10.txt", "w")
model=MeanTeacher(lambda_u=50,warmup=0.4,mu=1,weight_decay=5e-4,ema_decay=0.999,
epoch=1, num_it_epoch=2 ** 20, num_it_total=2 ** 20, eval_it=2000, device='cpu',
labeled_dataset=labeled_dataset,
unlabeled_dataset=unlabeled_dataset,
valid_dataset=valid_dataset,
test_dataset=test_dataset,
labeled_sampler=labeled_sampler,
unlabeled_sampler=unlabeled_sampler,
valid_sampler=valid_sampler,
test_sampler=test_sampler,
labeled_dataloader=labeled_dataloader,
unlabeled_dataloader=unlabeled_dataloader,
valid_dataloader=valid_dataloader,
test_dataloader=test_dataloader,
augmentation=augmentation,
network=network,
optimizer=optimizer,
scheduler=scheduler,
evaluation=evaluation,
file=file,
verbose=True)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X,valid_X=valid_X,valid_y=valid_y)
performance=model.evaluate(X=test_X,y=test_y)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/MixMatch_CIFAR10.py
================================================
from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip
from LAMDA_SSL.Augmentation.Vision.RandomCrop import RandomCrop
from LAMDA_SSL.Dataset.Vision.CIFAR10 import CIFAR10
from LAMDA_SSL.Opitimizer.SGD import SGD
from LAMDA_SSL.Scheduler.CosineAnnealingLR import CosineAnnealingLR
from LAMDA_SSL.Network.WideResNet import WideResNet
from LAMDA_SSL.Dataloader.UnlabeledDataloader import UnlabeledDataLoader
from LAMDA_SSL.Dataloader.LabeledDataloader import LabeledDataLoader
from LAMDA_SSL.Algorithm.Classification.MixMatch import MixMatch
from LAMDA_SSL.Sampler.RandomSampler import RandomSampler
from LAMDA_SSL.Sampler.SequentialSampler import SequentialSampler
from sklearn.pipeline import Pipeline
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.Top_k_Accuracy import Top_k_Accurary
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from LAMDA_SSL.Dataset.LabeledDataset import LabeledDataset
from LAMDA_SSL.Dataset.UnlabeledDataset import UnlabeledDataset
# dataset
dataset=CIFAR10(root='..\Download\cifar-10-python',labeled_size=4000,stratified=True,shuffle=True,download=False,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
test_X=dataset.test_X
test_y=dataset.test_y
valid_X=dataset.valid_X
valid_y=dataset.valid_y
labeled_dataset=LabeledDataset(pre_transform=dataset.pre_transform,transforms=dataset.transforms,
transform=dataset.transform,target_transform=dataset.target_transform)
unlabeled_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.unlabeled_transform)
valid_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.valid_transform)
test_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.test_transform)
# sampler
labeled_sampler=RandomSampler(replacement=True,num_samples=64*(2**20))
unlabeled_sampler=RandomSampler(replacement=True)
valid_sampler=SequentialSampler()
test_sampler=SequentialSampler()
#dataloader
labeled_dataloader=LabeledDataLoader(batch_size=64,num_workers=0,drop_last=True)
unlabeled_dataloader=UnlabeledDataLoader(num_workers=0,drop_last=True)
valid_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
test_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
# augmentation
augmentation=Pipeline([('RandomHorizontalFlip',RandomHorizontalFlip()),
('RandomCrop',RandomCrop(padding=0.125,padding_mode='reflect')),
])
# optimizer
optimizer=SGD(lr=0.03,momentum=0.9,nesterov=True)
# scheduler
scheduler=CosineAnnealingLR(eta_min=0,T_max=2**20)
# network
network=WideResNet(num_classes=10,depth=28,widen_factor=2,drop_rate=0)
# evalutation
evaluation={
'accuracy':Accuracy(),
'top_5_accuracy':Top_k_Accurary(k=5),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
file = open("../Result/MixMatch_CIFAR10.txt", "w")
model=MixMatch(alpha=0.5,lambda_u=100,T=0.5,warmup=1 / 64,mu=1,
weight_decay=5e-4,ema_decay=0.999,
epoch=1,num_it_epoch=2**20,num_it_total=2**20,
eval_it=2000,device='cpu',
labeled_dataset=labeled_dataset,
unlabeled_dataset=unlabeled_dataset,
valid_dataset=valid_dataset,
test_dataset=test_dataset,
labeled_sampler=labeled_sampler,
unlabeled_sampler=unlabeled_sampler,
valid_sampler=valid_sampler,
test_sampler=test_sampler,
labeled_dataloader=labeled_dataloader,
unlabeled_dataloader=unlabeled_dataloader,
valid_dataloader=valid_dataloader,
test_dataloader=test_dataloader,
augmentation=augmentation,
network=network,
optimizer=optimizer,
scheduler=scheduler,
evaluation=evaluation,
file=file,verbose=True)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X,valid_X=valid_X,valid_y=valid_y)
performance=model.evaluate(X=test_X,y=test_y)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/ParallelDistributed.py
================================================
from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip
from LAMDA_SSL.Augmentation.Vision.RandomCrop import RandomCrop
from LAMDA_SSL.Augmentation.Vision.RandAugment import RandAugment
from LAMDA_SSL.Augmentation.Vision.Cutout import Cutout
from LAMDA_SSL.Dataset.Vision.CIFAR10 import CIFAR10
from LAMDA_SSL.Opitimizer.SGD import SGD
from LAMDA_SSL.Scheduler.CosineAnnealingLR import CosineAnnealingLR
from LAMDA_SSL.Network.WideResNet import WideResNet
from LAMDA_SSL.Dataloader.UnlabeledDataloader import UnlabeledDataLoader
from LAMDA_SSL.Dataloader.LabeledDataloader import LabeledDataLoader
from LAMDA_SSL.Algorithm.Classification.FixMatch import FixMatch
from LAMDA_SSL.Sampler.RandomSampler import RandomSampler
from LAMDA_SSL.Sampler.SequentialSampler import SequentialSampler
from sklearn.pipeline import Pipeline
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.Top_k_Accuracy import Top_k_Accurary
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from LAMDA_SSL.Dataset.LabeledDataset import LabeledDataset
from LAMDA_SSL.Dataset.UnlabeledDataset import UnlabeledDataset
from LAMDA_SSL.Distributed.DataParallel import DataParallel
# dataset
dataset=CIFAR10(root='..\Download\cifar-10-python',labeled_size=4000,stratified=False,shuffle=True,download=False,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
test_X=dataset.test_X
test_y=dataset.test_y
valid_X=dataset.valid_X
valid_y=dataset.valid_y
labeled_dataset=LabeledDataset(pre_transform=dataset.pre_transform,transforms=dataset.transforms,
transform=dataset.transform,target_transform=dataset.target_transform)
unlabeled_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.unlabeled_transform)
valid_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.valid_transform)
test_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.test_transform)
# sampler
labeled_sampler=RandomSampler(replacement=True,num_samples=64*(2**20))
unlabeled_sampler=RandomSampler(replacement=True)
valid_sampler=SequentialSampler()
test_sampler=SequentialSampler()
#dataloader
labeled_dataloader=LabeledDataLoader(batch_size=64,num_workers=0,drop_last=True)
unlabeled_dataloader=UnlabeledDataLoader(num_workers=0,drop_last=True)
valid_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
test_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
# network
network=WideResNet(num_classes=10,depth=28,widen_factor=2,drop_rate=0)
# optimizer
optimizer=SGD(lr=0.03,momentum=0.9,nesterov=True)
# scheduler
scheduler=CosineAnnealingLR(eta_min=0,T_max=2**20)
# augmentation
weak_augmentation=Pipeline([('RandomHorizontalFlip',RandomHorizontalFlip()),
('RandomCrop',RandomCrop(padding=0.125,padding_mode='reflect')),
])
strong_augmentation=Pipeline([('RandomHorizontalFlip',RandomHorizontalFlip()),
('RandomCrop',RandomCrop(padding=0.125,padding_mode='reflect')),
('RandAugment',RandAugment(n=2,m=10,num_bins=30)),
('Cutout',Cutout(v=0.5,fill=(127,127,127))),
])
augmentation={
'weak_augmentation':weak_augmentation,
'strong_augmentation':strong_augmentation
}
# parallel
parallel=DataParallel(device_ids=['cuda:0','cuda:1'],output_device='cuda:0')
# evalutation
evaluation={
'accuracy':Accuracy(),
'top_5_accuracy':Top_k_Accurary(k=5),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
file = open("../Result/FixMatch_CIFAR10.txt", "w")
model=FixMatch(threshold=0.95,lambda_u=1.0,T=0.5,mu=7,weight_decay=5e-4,ema_decay=0.999,
epoch=1,num_it_epoch=1,num_it_total=1,eval_it=1,eval_epoch=1,device='cpu',
labeled_dataset=labeled_dataset,
unlabeled_dataset=unlabeled_dataset,
valid_dataset=valid_dataset,
test_dataset=test_dataset,
labeled_sampler=labeled_sampler,
unlabeled_sampler=unlabeled_sampler,
valid_sampler=valid_sampler,
test_sampler=test_sampler,
labeled_dataloader=labeled_dataloader,
unlabeled_dataloader=unlabeled_dataloader,
valid_dataloader=valid_dataloader,
test_dataloader=test_dataloader,
augmentation=augmentation,
network=network,
optimizer=optimizer,
scheduler=scheduler,
parallel=parallel,
evaluation=evaluation,
file=file,
verbose=True)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X,valid_X=valid_X,valid_y=valid_y)
performance=model.evaluate(X=test_X,y=test_y)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/Parameter_Search.py
================================================
from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip
from LAMDA_SSL.Augmentation.Vision.RandomCrop import RandomCrop
from LAMDA_SSL.Augmentation.Vision.RandAugment import RandAugment
from LAMDA_SSL.Augmentation.Vision.Cutout import Cutout
from LAMDA_SSL.Dataset.Vision.CIFAR10 import CIFAR10
from LAMDA_SSL.Opitimizer.SGD import SGD
from LAMDA_SSL.Scheduler.CosineAnnealingLR import CosineAnnealingLR
from LAMDA_SSL.Network.WideResNet import WideResNet
from LAMDA_SSL.Dataloader.UnlabeledDataloader import UnlabeledDataLoader
from LAMDA_SSL.Dataloader.LabeledDataloader import LabeledDataLoader
from LAMDA_SSL.Algorithm.Classification.FixMatch import FixMatch
from LAMDA_SSL.Sampler.RandomSampler import RandomSampler
from LAMDA_SSL.Sampler.SequentialSampler import SequentialSampler
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.Top_k_Accuracy import Top_k_Accurary
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from LAMDA_SSL.Dataset.LabeledDataset import LabeledDataset
from LAMDA_SSL.Dataset.UnlabeledDataset import UnlabeledDataset
from sklearn.pipeline import Pipeline
from sklearn.model_selection._search import RandomizedSearchCV
# dataset
dataset=CIFAR10(root='..\Download\cifar-10-python',labeled_size=4000,stratified=False,shuffle=True,download=False,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
test_X=dataset.test_X
test_y=dataset.test_y
valid_X=dataset.valid_X
valid_y=dataset.valid_y
labeled_dataset=LabeledDataset(pre_transform=dataset.pre_transform,transforms=dataset.transforms,
transform=dataset.transform,target_transform=dataset.target_transform)
unlabeled_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.unlabeled_transform)
valid_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.valid_transform)
test_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.test_transform)
# sampler
labeled_sampler=RandomSampler(replacement=True,num_samples=64*(2**20))
unlabeled_sampler=RandomSampler(replacement=True)
valid_sampler=SequentialSampler()
test_sampler=SequentialSampler()
#dataloader
labeled_dataloader=LabeledDataLoader(batch_size=64,num_workers=0,drop_last=True)
unlabeled_dataloader=UnlabeledDataLoader(num_workers=0,drop_last=True)
valid_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
test_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
# network
network=WideResNet(num_classes=10,depth=28,widen_factor=2,drop_rate=0)
# optimizer
optimizer=SGD(lr=0.03,momentum=0.9,nesterov=True)
# scheduler
scheduler=CosineAnnealingLR(eta_min=0,T_max=2**20)
# augmentation
weak_augmentation=Pipeline([('RandomHorizontalFlip',RandomHorizontalFlip()),
('RandomCrop',RandomCrop(padding=0.125,padding_mode='reflect')),
])
strong_augmentation=Pipeline([('RandomHorizontalFlip',RandomHorizontalFlip()),
('RandomCrop',RandomCrop(padding=0.125,padding_mode='reflect')),
('RandAugment',RandAugment(n=2,m=10,num_bins=30)),
('Cutout',Cutout(v=0.5,fill=(127,127,127))),
])
augmentation={
'weak_augmentation':weak_augmentation,
'strong_augmentation':strong_augmentation
}
# evalutation
evaluation={
'accuracy':Accuracy(),
'top_5_accuracy':Top_k_Accurary(k=5),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
file = open("../Result/Parameter_Searcn.txt", "w")
model=FixMatch(T=0.5,mu=7,weight_decay=5e-4,ema_decay=0.999,
epoch=1,num_it_epoch=2**20,num_it_total=2**20,eval_it=2000,device='cpu',
labeled_dataset=labeled_dataset,
unlabeled_dataset=unlabeled_dataset,
valid_dataset=valid_dataset,
test_dataset=test_dataset,
labeled_sampler=labeled_sampler,
unlabeled_sampler=unlabeled_sampler,
valid_sampler=valid_sampler,
test_sampler=test_sampler,
labeled_dataloader=labeled_dataloader,
unlabeled_dataloader=unlabeled_dataloader,
valid_dataloader=valid_dataloader,
test_dataloader=test_dataloader,
augmentation=augmentation,
network=network,
optimizer=optimizer,
scheduler=scheduler,
file=None,
verbose=False)
param_dict = {"threshold": [0.7, 1],
"lambda_u":[0.8,1]
}
random_search = RandomizedSearchCV(model, param_distributions=param_dict,
n_iter=1, cv=4,scoring='accuracy')
random_search.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X)
best_params=random_search.best_params_
print(best_params,file=file)
================================================
FILE: Examples/PiModelReg_Boston.py
================================================
from LAMDA_SSL.Augmentation.Tabular.Noise import Noise
import torch.nn as nn
from LAMDA_SSL.Dataset.Tabular.Boston import Boston
from LAMDA_SSL.Opitimizer.SGD import SGD
from LAMDA_SSL.Scheduler.CosineAnnealingLR import CosineAnnealingLR
from LAMDA_SSL.Network.MLPReg import MLPReg
from LAMDA_SSL.Dataset.LabeledDataset import LabeledDataset
from LAMDA_SSL.Dataset.UnlabeledDataset import UnlabeledDataset
from LAMDA_SSL.Dataloader.LabeledDataloader import LabeledDataLoader
from LAMDA_SSL.Dataloader.UnlabeledDataloader import UnlabeledDataLoader
from LAMDA_SSL.Algorithm.Regression.PiModelReg import PiModelReg
from LAMDA_SSL.Sampler.RandomSampler import RandomSampler
from LAMDA_SSL.Sampler.SequentialSampler import SequentialSampler
from LAMDA_SSL.Evaluation.Regressor.Mean_Absolute_Error import Mean_Absolute_Error
from LAMDA_SSL.Evaluation.Regressor.Mean_Squared_Error import Mean_Squared_Error
from LAMDA_SSL.Evaluation.Regressor.Mean_Squared_Log_Error import Mean_Squared_Log_Error
import numpy as np
# dataset
dataset=Boston(test_size=0.3,labeled_size=0.1,stratified=False,shuffle=True,random_state=0,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
test_X=dataset.test_X
test_y=dataset.test_y
valid_X=dataset.valid_X
valid_y=dataset.valid_y
# Pre_transform
pre_transform=dataset.pre_transform
pre_transform.fit(np.vstack([labeled_X, unlabeled_X]))
labeled_X=pre_transform.transform(labeled_X)
unlabeled_X=pre_transform.transform(unlabeled_X)
test_X=pre_transform.transform(test_X)
labeled_dataset=LabeledDataset(transforms=dataset.transforms,
transform=dataset.transform,target_transform=dataset.target_transform)
unlabeled_dataset=UnlabeledDataset(transform=dataset.unlabeled_transform)
valid_dataset=UnlabeledDataset(transform=dataset.valid_transform)
test_dataset=UnlabeledDataset(transform=dataset.test_transform)
# sampler
labeled_sampler=RandomSampler(replacement=True,num_samples=64*(4000))
unlabeled_sampler=RandomSampler(replacement=True)
valid_sampler=SequentialSampler()
test_sampler=SequentialSampler()
#dataloader
labeled_dataloader=LabeledDataLoader(batch_size=64,num_workers=0,drop_last=True)
unlabeled_dataloader=UnlabeledDataLoader(num_workers=0,drop_last=True)
valid_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
test_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
# augmentation
augmentation=Noise(noise_level=0.01)
# optimizer
optimizer=SGD(lr=0.001,momentum=0.9,nesterov=True)
scheduler=CosineAnnealingLR(eta_min=0,T_max=4000)
# network
network=MLPReg(hidden_dim=[100,50,10],activations=[nn.ReLU(),nn.ReLU(),nn.ReLU()],dim_in=labeled_X.shape[-1])
evaluation={
'Mean_Absolute_Error':Mean_Absolute_Error(),
'Mean_Squared_Error':Mean_Squared_Error(),
'Mean_Squared_Log_Error':Mean_Squared_Log_Error()
}
file = open("../Result/PiModelReg_Boston.txt", "w")
model=PiModelReg(lambda_u=0.001,warmup=0.4,
mu=1,weight_decay=5e-4,ema_decay=0.999,
epoch=1,num_it_epoch=4000,
num_it_total=4000,
eval_it=200,device='cpu',
labeled_dataset=labeled_dataset,
unlabeled_dataset=unlabeled_dataset,
valid_dataset=valid_dataset,
test_dataset=test_dataset,
labeled_sampler=labeled_sampler,
unlabeled_sampler=unlabeled_sampler,
valid_sampler=valid_sampler,
test_sampler=test_sampler,
labeled_dataloader=labeled_dataloader,
unlabeled_dataloader=unlabeled_dataloader,
valid_dataloader=valid_dataloader,
test_dataloader=test_dataloader,
augmentation=augmentation,network=network,
optimizer=optimizer,scheduler=scheduler,
evaluation=evaluation,file=file,verbose=True)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X,valid_X=valid_X,valid_y=valid_y)
performance=model.evaluate(X=test_X,y=test_y)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/PiModel_CIFAR10.py
================================================
from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip
from LAMDA_SSL.Augmentation.Vision.RandomCrop import RandomCrop
from LAMDA_SSL.Dataset.Vision.CIFAR10 import CIFAR10
from LAMDA_SSL.Opitimizer.SGD import SGD
from LAMDA_SSL.Scheduler.CosineAnnealingLR import CosineAnnealingLR
from LAMDA_SSL.Network.WideResNet import WideResNet
from LAMDA_SSL.Dataloader.UnlabeledDataloader import UnlabeledDataLoader
from LAMDA_SSL.Dataloader.LabeledDataloader import LabeledDataLoader
from LAMDA_SSL.Algorithm.Classification.PiModel import PiModel
from LAMDA_SSL.Sampler.RandomSampler import RandomSampler
from LAMDA_SSL.Sampler.SequentialSampler import SequentialSampler
from sklearn.pipeline import Pipeline
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.Top_k_Accuracy import Top_k_Accurary
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from LAMDA_SSL.Dataset.LabeledDataset import LabeledDataset
from LAMDA_SSL.Dataset.UnlabeledDataset import UnlabeledDataset
# dataset
dataset=CIFAR10(root='..\Download\cifar-10-python',labeled_size=4000,stratified=True,shuffle=True,download=False,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
test_X=dataset.test_X
test_y=dataset.test_y
valid_X=dataset.valid_X
valid_y=dataset.valid_y
labeled_dataset=LabeledDataset(pre_transform=dataset.pre_transform,transforms=dataset.transforms,
transform=dataset.transform,target_transform=dataset.target_transform)
unlabeled_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.unlabeled_transform)
valid_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.valid_transform)
test_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.test_transform)
# sampler
labeled_sampler=RandomSampler(replacement=True,num_samples=64*(2**20))
unlabeled_sampler=RandomSampler(replacement=True)
valid_sampler=SequentialSampler()
test_sampler=SequentialSampler()
#dataloader
labeled_dataloader=LabeledDataLoader(batch_size=64,num_workers=0,drop_last=True)
unlabeled_dataloader=UnlabeledDataLoader(num_workers=0,drop_last=True)
valid_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
test_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
# augmentation
augmentation=Pipeline([('RandomHorizontalFlip',RandomHorizontalFlip()),
('RandomCrop',RandomCrop(padding=0.125,padding_mode='reflect')),
])
# optimizer
optimizer=SGD(lr=0.03,momentum=0.9,nesterov=True)
# scheduler
scheduler=CosineAnnealingLR(eta_min=0,T_max=2**20)
# network
network=WideResNet(num_classes=10,depth=28,widen_factor=2,drop_rate=0)
evaluation={
'accuracy':Accuracy(),
'top_5_accuracy':Top_k_Accurary(k=5),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
file = open("../Result/PiModel_CIFAR10.txt", "w")
model=PiModel(lambda_u=10,warmup=0.4,mu=1,weight_decay=5e-4,ema_decay=0.999,
epoch=1,num_it_epoch=2**20,num_it_total=2**20,
eval_it=2000,device='cpu',
labeled_dataset=labeled_dataset,
unlabeled_dataset=unlabeled_dataset,
valid_dataset=valid_dataset,
test_dataset=test_dataset,
labeled_sampler=labeled_sampler,
unlabeled_sampler=unlabeled_sampler,
valid_sampler=valid_sampler,
test_sampler=test_sampler,
labeled_dataloader=labeled_dataloader,
unlabeled_dataloader=unlabeled_dataloader,
valid_dataloader=valid_dataloader,
test_dataloader=test_dataloader,
augmentation=augmentation,
network=network,
optimizer=optimizer,
scheduler=scheduler,
evaluation=evaluation,
file=file,verbose=True)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X,valid_X=valid_X,valid_y=valid_y)
performance=model.evaluate(X=test_X,y=test_y)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/PseudoLabel_CIFAR10.py
================================================
from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip
from LAMDA_SSL.Augmentation.Vision.RandomCrop import RandomCrop
from LAMDA_SSL.Dataset.Vision.CIFAR10 import CIFAR10
from LAMDA_SSL.Opitimizer.SGD import SGD
from LAMDA_SSL.Scheduler.CosineAnnealingLR import CosineAnnealingLR
from LAMDA_SSL.Network.WideResNet import WideResNet
from LAMDA_SSL.Dataloader.UnlabeledDataloader import UnlabeledDataLoader
from LAMDA_SSL.Dataloader.LabeledDataloader import LabeledDataLoader
from LAMDA_SSL.Algorithm.Classification.PseudoLabel import PseudoLabel
from LAMDA_SSL.Sampler.RandomSampler import RandomSampler
from LAMDA_SSL.Sampler.SequentialSampler import SequentialSampler
from sklearn.pipeline import Pipeline
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.Top_k_Accuracy import Top_k_Accurary
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from LAMDA_SSL.Dataset.LabeledDataset import LabeledDataset
from LAMDA_SSL.Dataset.UnlabeledDataset import UnlabeledDataset
# dataset
dataset=CIFAR10(root='..\Download\cifar-10-python',labeled_size=4000,stratified=True,shuffle=True,download=False,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
test_X=dataset.test_X
test_y=dataset.test_y
valid_X=dataset.valid_X
valid_y=dataset.valid_y
labeled_dataset=LabeledDataset(pre_transform=dataset.pre_transform,transforms=dataset.transforms,
transform=dataset.transform,target_transform=dataset.target_transform)
unlabeled_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.unlabeled_transform)
valid_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.valid_transform)
test_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.test_transform)
# sampler
labeled_sampler=RandomSampler(replacement=True,num_samples=64*(2**20))
unlabeled_sampler=RandomSampler(replacement=True)
valid_sampler=SequentialSampler()
test_sampler=SequentialSampler()
#dataloader
labeled_dataloader=LabeledDataLoader(batch_size=64,num_workers=0,drop_last=True)
unlabeled_dataloader=UnlabeledDataLoader(num_workers=0,drop_last=True)
valid_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
test_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
# augmentation
augmentation=Pipeline([('RandomHorizontalFlip',RandomHorizontalFlip()),
('RandomCrop',RandomCrop(padding=0.125,padding_mode='reflect')),
])
# optimizer
optimizer=SGD(lr=0.03,momentum=0.9,nesterov=True)
# scheduler
scheduler=CosineAnnealingLR(eta_min=0,T_max=2**20)
# network
network=WideResNet(num_classes=10,depth=28,widen_factor=2,drop_rate=0)
# evalutation
evaluation={
'accuracy':Accuracy(),
'top_5_accuracy':Top_k_Accurary(k=5),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
file = open("../Result/PseudoLabel_CIFAR10.txt", "w")
model=PseudoLabel(threshold=0.95,lambda_u=1,warmup=0.4,mu=1,weight_decay=5e-4,ema_decay=0.999,
epoch=1,num_it_epoch=2**20,num_it_total=2**20,
eval_it=2000,device='cpu',
labeled_dataset=labeled_dataset,
unlabeled_dataset=unlabeled_dataset,
valid_dataset=valid_dataset,
test_dataset=test_dataset,
labeled_sampler=labeled_sampler,
unlabeled_sampler=unlabeled_sampler,
valid_sampler=valid_sampler,
test_sampler=test_sampler,
labeled_dataloader=labeled_dataloader,
unlabeled_dataloader=unlabeled_dataloader,
valid_dataloader=valid_dataloader,
test_dataloader=test_dataloader,
augmentation=augmentation,
network=network,
optimizer=optimizer,
scheduler=scheduler,
evaluation=evaluation,
file=file,verbose=True)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X,valid_X=valid_X,valid_y=valid_y)
valid_performance=model.valid_performance
print(valid_performance,file=file)
performance=model.evaluate(X=test_X,y=test_y)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/ReMixMatch_CIFAR10.py
================================================
from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip
from LAMDA_SSL.Augmentation.Vision.RandomCrop import RandomCrop
from LAMDA_SSL.Augmentation.Vision.RandAugment import RandAugment
from LAMDA_SSL.Augmentation.Vision.Cutout import Cutout
from LAMDA_SSL.Dataset.Vision.CIFAR10 import CIFAR10
from LAMDA_SSL.Opitimizer.SGD import SGD
from LAMDA_SSL.Scheduler.CosineAnnealingLR import CosineAnnealingLR
from LAMDA_SSL.Network.WideResNet import WideResNet
from LAMDA_SSL.Dataloader.UnlabeledDataloader import UnlabeledDataLoader
from LAMDA_SSL.Dataloader.LabeledDataloader import LabeledDataLoader
from LAMDA_SSL.Algorithm.Classification.ReMixMatch import ReMixMatch
from LAMDA_SSL.Sampler.RandomSampler import RandomSampler
from LAMDA_SSL.Sampler.SequentialSampler import SequentialSampler
from sklearn.pipeline import Pipeline
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.Top_k_Accuracy import Top_k_Accurary
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from LAMDA_SSL.Dataset.LabeledDataset import LabeledDataset
from LAMDA_SSL.Dataset.UnlabeledDataset import UnlabeledDataset
# dataset
dataset=CIFAR10(root='..\Download\cifar-10-python',labeled_size=4000,stratified=True,shuffle=True,download=False,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
test_X=dataset.test_X
test_y=dataset.test_y
valid_X=dataset.valid_X
valid_y=dataset.valid_y
labeled_dataset=LabeledDataset(pre_transform=dataset.pre_transform,transforms=dataset.transforms,
transform=dataset.transform,target_transform=dataset.target_transform)
unlabeled_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.unlabeled_transform)
valid_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.valid_transform)
test_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.test_transform)
# sampler
labeled_sampler=RandomSampler(replacement=True,num_samples=64*(2**20))
unlabeled_sampler=RandomSampler(replacement=True)
valid_sampler=SequentialSampler()
test_sampler=SequentialSampler()
#dataloader
labeled_dataloader=LabeledDataLoader(batch_size=64,num_workers=0,drop_last=True)
unlabeled_dataloader=UnlabeledDataLoader(num_workers=0,drop_last=True)
valid_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
test_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
# augmentation
weak_augmentation=Pipeline([('RandomHorizontalFlip',RandomHorizontalFlip()),
('RandomCrop',RandomCrop(padding=0.125,padding_mode='reflect')),
])
strong_augmentation=Pipeline([('RandomHorizontalFlip',RandomHorizontalFlip()),
('RandomCrop',RandomCrop(padding=0.125,padding_mode='reflect')),
('RandAugment',RandAugment(n=2,m=10,num_bins=10)),
('Cutout',Cutout(v=0.5,fill=(127, 127, 127))),
])
augmentation={
'weak_augmentation':weak_augmentation,
'strong_augmentation':strong_augmentation
}
# optimizer
optimizer=SGD(lr=0.03,momentum=0.9,nesterov=True)
# scheduler
scheduler=CosineAnnealingLR(eta_min=0,T_max=2**20)
# network
network=WideResNet(num_classes=(10,4),depth=28,widen_factor=2,drop_rate=0)
# evalutation
evaluation={
'accuracy':Accuracy(),
'top_5_accuracy':Top_k_Accurary(k=5),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
file = open("../Result/ReMixMatch_CIFAR10.txt", "w")
model=ReMixMatch(alpha=0.5,lambda_u=1.5,lambda_s=0.5,
lambda_rot=0.5,T=0.5,warmup=0.015625,mu=1,
weight_decay=5e-4, ema_decay=0.999,
epoch=1, num_it_epoch=2 ** 20, num_it_total=2 ** 20,
eval_it=2000, device='cpu',
labeled_dataset=labeled_dataset,
unlabeled_dataset=unlabeled_dataset,
valid_dataset=valid_dataset,
test_dataset=test_dataset,
labeled_sampler=labeled_sampler,
unlabeled_sampler=unlabeled_sampler,
valid_sampler=valid_sampler,
test_sampler=test_sampler,
labeled_dataloader=labeled_dataloader,
unlabeled_dataloader=unlabeled_dataloader,
valid_dataloader=valid_dataloader,
test_dataloader=test_dataloader,
augmentation=augmentation,
network=network,
optimizer=optimizer,
scheduler=scheduler,
evaluation=evaluation,
file=file, verbose=True)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X,valid_X=valid_X,valid_y=valid_y)
performance=model.evaluate(X=test_X,y=test_y)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/S4L_CIFAR10.py
================================================
from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip
from LAMDA_SSL.Augmentation.Vision.RandomCrop import RandomCrop
from LAMDA_SSL.Dataset.Vision.CIFAR10 import CIFAR10
from LAMDA_SSL.Opitimizer.SGD import SGD
from LAMDA_SSL.Scheduler.CosineAnnealingLR import CosineAnnealingLR
from LAMDA_SSL.Network.WideResNet import WideResNet
from LAMDA_SSL.Dataloader.UnlabeledDataloader import UnlabeledDataLoader
from LAMDA_SSL.Dataloader.LabeledDataloader import LabeledDataLoader
from LAMDA_SSL.Sampler.RandomSampler import RandomSampler
from LAMDA_SSL.Sampler.SequentialSampler import SequentialSampler
from sklearn.pipeline import Pipeline
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.Top_k_Accuracy import Top_k_Accurary
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from LAMDA_SSL.Dataset.LabeledDataset import LabeledDataset
from LAMDA_SSL.Dataset.UnlabeledDataset import UnlabeledDataset
from LAMDA_SSL.Algorithm.Classification.S4L import S4L
# dataset
dataset=CIFAR10(root='..\Download\cifar-10-python',labeled_size=4000,stratified=True,shuffle=True,download=False,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
test_X=dataset.test_X
test_y=dataset.test_y
valid_X=dataset.valid_X
valid_y=dataset.valid_y
labeled_dataset=LabeledDataset(pre_transform=dataset.pre_transform,transforms=dataset.transforms,
transform=dataset.transform,target_transform=dataset.target_transform)
unlabeled_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.unlabeled_transform)
valid_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.valid_transform)
test_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.test_transform)
# sampler
labeled_sampler=RandomSampler(replacement=True,num_samples=64*(2**20))
unlabeled_sampler=RandomSampler(replacement=True)
valid_sampler=SequentialSampler()
test_sampler=SequentialSampler()
#dataloader
labeled_dataloader=LabeledDataLoader(batch_size=64,num_workers=0,drop_last=True)
unlabeled_dataloader=UnlabeledDataLoader(num_workers=0,drop_last=True)
valid_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
test_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
# augmentation
augmentation=Pipeline([('RandomHorizontalFlip',RandomHorizontalFlip()),
('RandomCrop',RandomCrop(padding=0.125,padding_mode='reflect')),
])
# optimizer
optimizer=SGD(lr=0.03,momentum=0.9,nesterov=True)
# scheduler
scheduler=CosineAnnealingLR(eta_min=0,T_max=2**20)
# network
network=WideResNet(num_classes=(10,4),depth=28,widen_factor=2,drop_rate=0)
# evalutation
evaluation={
'accuracy':Accuracy(),
'top_5_accuracy':Top_k_Accurary(k=5),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
file = open("../Result/S4L_CIFAR10.txt", "w")
model=S4L(lambda_u=1.5,mu=1,weight_decay=5e-4, ema_decay=0.999,
epoch=1, num_it_epoch=2 ** 20, num_it_total=2 ** 20,
eval_it=2000, device='cpu',
labeled_dataset=labeled_dataset,
unlabeled_dataset=unlabeled_dataset,
valid_dataset=valid_dataset,
test_dataset=test_dataset,
labeled_sampler=labeled_sampler,
unlabeled_sampler=unlabeled_sampler,
valid_sampler=valid_sampler,
test_sampler=test_sampler,
labeled_dataloader=labeled_dataloader,
unlabeled_dataloader=unlabeled_dataloader,
valid_dataloader=valid_dataloader,
test_dataloader=test_dataloader,
augmentation=augmentation,
network=network,
optimizer=optimizer,
scheduler=scheduler,
evaluation=evaluation,
file=file, verbose=True)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X,valid_X=valid_X,valid_y=valid_y)
performance=model.evaluate(X=test_X,y=test_y)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/SDNE_Cora.py
================================================
from LAMDA_SSL.Dataset.Graph.Cora import Cora
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Top_k_Accuracy import Top_k_Accurary
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Scheduler.StepLR import StepLR
from LAMDA_SSL.Opitimizer.Adam import Adam
from LAMDA_SSL.Algorithm.Classification.SDNE import SDNE
file = open("../Result/SDNE_Cora.txt", "w")
dataset=Cora(labeled_size=0.2,root='..\Download\Cora',random_state=0,default_transforms=True)
data=dataset.data
data=dataset.transform.fit_transform(data)
optimizer=Adam(lr=0.001)
scheduler= StepLR(step_size=10, gamma=0.9)
evaluation={
'accuracy':Accuracy(),
'top_5_accuracy':Top_k_Accurary(k=5),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
model=SDNE(
hidden_layers=[1000,1000],
gamma=1e-5,
alpha=1e-3,
beta=10,
epoch=500,
eval_epoch=200,
weight_decay=0,
device='cpu',
optimizer=optimizer,
scheduler=scheduler,
evaluation=evaluation,
verbose=True,
file=file
)
model.fit(data,valid_X=data.val_mask)
performance=model.evaluate(X=data.test_mask)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/SSGMM_BreastCancer.py
================================================
from LAMDA_SSL.Algorithm.Classification.SSGMM import SSGMM
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from LAMDA_SSL.Dataset.Tabular.BreastCancer import BreastCancer
import numpy as np
file = open("../Result/SSGMM_BreastCancer.txt", "w")
dataset=BreastCancer(test_size=0.3,labeled_size=0.1,stratified=True,shuffle=True,random_state=0,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
unlabeled_y=dataset.unlabeled_y
test_X=dataset.test_X
test_y=dataset.test_y
# Pre_transform
pre_transform=dataset.pre_transform
pre_transform.fit(np.vstack([labeled_X, unlabeled_X]))
labeled_X=pre_transform.transform(labeled_X)
unlabeled_X=pre_transform.transform(unlabeled_X)
test_X=pre_transform.transform(test_X)
evaluation={
'accuracy':Accuracy(),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
model=SSGMM(evaluation=evaluation,file=file,verbose=True)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X)
performance=model.evaluate(X=test_X,y=test_y)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/SSVAE_MNIST.py
================================================
from LAMDA_SSL.Algorithm.Classification.SSVAE import SSVAE
from LAMDA_SSL.Opitimizer.Adam import Adam
from LAMDA_SSL.Transform.ToImage import ToImage
import torch.nn as nn
from LAMDA_SSL.Dataloader.UnlabeledDataloader import UnlabeledDataLoader
from LAMDA_SSL.Dataloader.LabeledDataloader import LabeledDataLoader
from LAMDA_SSL.Sampler.RandomSampler import RandomSampler
from LAMDA_SSL.Sampler.SequentialSampler import SequentialSampler
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.Top_k_Accuracy import Top_k_Accurary
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from LAMDA_SSL.Dataset.LabeledDataset import LabeledDataset
from LAMDA_SSL.Dataset.UnlabeledDataset import UnlabeledDataset
from LAMDA_SSL.Dataset.Vision.Mnist import Mnist
# dataset
dataset=Mnist(root='..\Download\mnist',labeled_size=6000,stratified=True,shuffle=True,download=False,random_state=0,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
test_X=dataset.test_X
test_y=dataset.test_y
valid_X=dataset.valid_X
valid_y=dataset.valid_y
labeled_dataset=LabeledDataset(pre_transform=dataset.pre_transform,transforms=dataset.transforms,
transform=dataset.transform,target_transform=dataset.target_transform)
unlabeled_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.unlabeled_transform)
valid_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.valid_transform)
test_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.test_transform)
#dataloader
labeled_dataloader=LabeledDataLoader(batch_size=100,num_workers=0,drop_last=True)
unlabeled_dataloader=UnlabeledDataLoader(num_workers=0,drop_last=True)
valid_dataloader=UnlabeledDataLoader(batch_size=100,num_workers=0,drop_last=False)
test_dataloader=UnlabeledDataLoader(batch_size=100,num_workers=0,drop_last=False)
# sampler
labeled_sampler=RandomSampler(replacement=True,num_samples=100*540)
unlabeled_sampler=RandomSampler(replacement=False)
test_sampler=SequentialSampler()
valid_sampler=SequentialSampler()
# optimizer
optimizer=Adam(lr=3e-4)
# evalutation
evaluation={
'Accuracy':Accuracy(),
'Top_5_Accuracy':Top_k_Accurary(k=5),
'Precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_Matrix':Confusion_Matrix(normalize='true')
}
file = open("../Result/SSVAE_MNIST.txt", "w")
model=SSVAE(alpha=5,num_labeled=6000,dim_in=(28,28),num_classes=10,
dim_z=100,
dim_hidden_de=[500, 500],
dim_hidden_en_y=[500, 500], dim_hidden_en_z=[500, 500],
activations_de=[nn.Softplus(), nn.Softplus()],
activations_en_y=[nn.Softplus(), nn.Softplus()],
activations_en_z=[nn.Softplus(), nn.Softplus()],
mu=1,weight_decay=0,epoch=100,num_it_epoch=540,num_it_total=540*100,eval_it=2000,device='cpu',
labeled_dataset=labeled_dataset,unlabeled_dataset=unlabeled_dataset,valid_dataset=valid_dataset,test_dataset=test_dataset,
labeled_sampler=labeled_sampler,unlabeled_sampler=unlabeled_sampler,valid_sampler=valid_sampler,test_sampler=test_sampler,
labeled_dataloader=labeled_dataloader,unlabeled_dataloader=unlabeled_dataloader,valid_dataloader=valid_dataloader,test_dataloader=test_dataloader,
optimizer=optimizer,
evaluation=evaluation,
file=file,verbose=True)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X,valid_X=valid_X,valid_y=valid_y)
performance=model.evaluate(X=test_X,y=test_y)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
X=model.generate(100)
for _ in range(100):
img=ToImage()(X[_]*256)
img.convert('RGB').save('../Result/Imgs/SSVAE/' + str(_) + '.jpg')
================================================
FILE: Examples/Save_Load_Model.py
================================================
from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip
from LAMDA_SSL.Augmentation.Vision.RandomCrop import RandomCrop
from LAMDA_SSL.Augmentation.Vision.RandAugment import RandAugment
from LAMDA_SSL.Augmentation.Vision.Cutout import Cutout
from LAMDA_SSL.Dataset.Vision.CIFAR10 import CIFAR10
from LAMDA_SSL.Opitimizer.SGD import SGD
from LAMDA_SSL.Scheduler.CosineAnnealingLR import CosineAnnealingLR
from LAMDA_SSL.Network.WideResNet import WideResNet
from LAMDA_SSL.Dataloader.UnlabeledDataloader import UnlabeledDataLoader
from LAMDA_SSL.Dataloader.LabeledDataloader import LabeledDataLoader
from LAMDA_SSL.Algorithm.Classification.FixMatch import FixMatch
from LAMDA_SSL.Sampler.RandomSampler import RandomSampler
from LAMDA_SSL.Sampler.SequentialSampler import SequentialSampler
from sklearn.pipeline import Pipeline
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.Top_k_Accuracy import Top_k_Accurary
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from LAMDA_SSL.Dataset.LabeledDataset import LabeledDataset
from LAMDA_SSL.Dataset.UnlabeledDataset import UnlabeledDataset
import pickle
# dataset
dataset=CIFAR10(root='..\Download\cifar-10-python',labeled_size=4000,stratified=False,shuffle=True,download=False,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
test_X=dataset.test_X
test_y=dataset.test_y
valid_X=dataset.valid_X
valid_y=dataset.valid_y
labeled_dataset=LabeledDataset(pre_transform=dataset.pre_transform,transforms=dataset.transforms,
transform=dataset.transform,target_transform=dataset.target_transform)
unlabeled_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.unlabeled_transform)
valid_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.valid_transform)
test_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.test_transform)
# sampler
labeled_sampler=RandomSampler(replacement=True,num_samples=64*(2**20))
unlabeled_sampler=RandomSampler(replacement=True)
valid_sampler=SequentialSampler()
test_sampler=SequentialSampler()
#dataloader
labeled_dataloader=LabeledDataLoader(batch_size=64,num_workers=0,drop_last=True)
unlabeled_dataloader=UnlabeledDataLoader(num_workers=0,drop_last=True)
valid_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
test_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
# network
network=WideResNet(num_classes=10,depth=28,widen_factor=2,drop_rate=0)
# optimizer
optimizer=SGD(lr=0.03,momentum=0.9,nesterov=True)
# scheduler
scheduler=CosineAnnealingLR(eta_min=0,T_max=2**20)
# augmentation
weak_augmentation=Pipeline([('RandomHorizontalFlip',RandomHorizontalFlip()),
('RandomCrop',RandomCrop(padding=0.125,padding_mode='reflect')),
])
strong_augmentation=Pipeline([('RandomHorizontalFlip',RandomHorizontalFlip()),
('RandomCrop',RandomCrop(padding=0.125,padding_mode='reflect')),
('RandAugment',RandAugment(n=2,m=10,num_bins=30)),
('Cutout',Cutout(v=0.5,fill=(127,127,127))),
])
augmentation={
'weak_augmentation':weak_augmentation,
'strong_augmentation':strong_augmentation
}
# evalutation
evaluation={
'accuracy':Accuracy(),
'top_5_accuracy':Top_k_Accurary(k=5),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
file = open("../Result/Save_Load.txt", "w")
model=FixMatch(threshold=0.95,lambda_u=1.0,T=0.5,mu=7,weight_decay=5e-4,ema_decay=0.999,
epoch=1,num_it_epoch=2**20,num_it_total=2**20,eval_it=2000,device='cpu',
labeled_dataset=labeled_dataset,
unlabeled_dataset=unlabeled_dataset,
valid_dataset=valid_dataset,
test_dataset=test_dataset,
labeled_sampler=labeled_sampler,
unlabeled_sampler=unlabeled_sampler,
valid_sampler=valid_sampler,
test_sampler=test_sampler,
labeled_dataloader=labeled_dataloader,
unlabeled_dataloader=unlabeled_dataloader,
valid_dataloader=valid_dataloader,
test_dataloader=test_dataloader,
augmentation=augmentation,
network=network,
optimizer=optimizer,
scheduler=scheduler,
evaluation=evaluation,
file=None,
verbose=False)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X,valid_X=valid_X,valid_y=valid_y)
path='../Save/Fixmatch.pkl'
with open(path, 'wb') as f:
pickle.dump(model, f)
with open(path, 'rb') as f:
model = pickle.load(f)
performance=model.evaluate(X=test_X,y=test_y)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/SemiBoost_BreastCancer.py
================================================
from LAMDA_SSL.Algorithm.Classification.SemiBoost import SemiBoost
from LAMDA_SSL.Dataset.Tabular.BreastCancer import BreastCancer
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from sklearn.svm import SVC
import numpy as np
file = open("../Result/SemiBoost_BreastCancer.txt", "w")
dataset=BreastCancer(test_size=0.3,labeled_size=0.1,stratified=True,shuffle=True,random_state=0,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
unlabeled_y=dataset.unlabeled_y
test_X=dataset.test_X
test_y=dataset.test_y
# Pre_transform
pre_transform=dataset.pre_transform
pre_transform.fit(np.vstack([labeled_X, unlabeled_X]))
labeled_X=pre_transform.transform(labeled_X)
unlabeled_X=pre_transform.transform(unlabeled_X)
test_X=pre_transform.transform(test_X)
SVM=SVC(C=1.0,kernel='rbf',probability=True,gamma='auto')
evaluation={
'accuracy':Accuracy(),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
model=SemiBoost(gamma=10,base_estimator=SVM,evaluation=evaluation,file=file,verbose=True)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X)
performance=model.evaluate(X=test_X,y=test_y)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/TSVM_BreastCancer.py
================================================
from LAMDA_SSL.Algorithm.Classification.TSVM import TSVM
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Dataset.Tabular.BreastCancer import BreastCancer
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
import numpy as np
file = open("../Result/TSVM_BreastCancer.txt", "w")
dataset=BreastCancer(test_size=0.3,labeled_size=0.1,stratified=True,shuffle=True,random_state=0,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
unlabeled_y=dataset.unlabeled_y
test_X=dataset.test_X
test_y=dataset.test_y
# Pre_transform
pre_transform=dataset.pre_transform
pre_transform.fit(np.vstack([labeled_X, unlabeled_X]))
labeled_X=pre_transform.transform(labeled_X)
unlabeled_X=pre_transform.transform(unlabeled_X)
test_X=pre_transform.transform(test_X)
evaluation={
'accuracy':Accuracy(),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
model=TSVM(evaluation=evaluation,file=file)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X)
performance=model.evaluate(X=test_X,y=test_y,Transductive=False)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/TemporalEnsembling_CIFAR10.py
================================================
from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip
from LAMDA_SSL.Augmentation.Vision.RandomCrop import RandomCrop
from LAMDA_SSL.Dataset.Vision.CIFAR10 import CIFAR10
from LAMDA_SSL.Opitimizer.SGD import SGD
from LAMDA_SSL.Scheduler.CosineAnnealingLR import CosineAnnealingLR
from LAMDA_SSL.Network.WideResNet import WideResNet
from LAMDA_SSL.Dataloader.UnlabeledDataloader import UnlabeledDataLoader
from LAMDA_SSL.Dataloader.LabeledDataloader import LabeledDataLoader
from LAMDA_SSL.Algorithm.Classification.TemporalEnsembling import TemporalEnsembling
from LAMDA_SSL.Sampler.RandomSampler import RandomSampler
from LAMDA_SSL.Sampler.SequentialSampler import SequentialSampler
from sklearn.pipeline import Pipeline
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.Top_k_Accuracy import Top_k_Accurary
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from LAMDA_SSL.Dataset.LabeledDataset import LabeledDataset
from LAMDA_SSL.Dataset.UnlabeledDataset import UnlabeledDataset
# dataset
dataset=CIFAR10(root='..\Download\cifar-10-python',labeled_size=4000,stratified=True,shuffle=True,download=False,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
test_X=dataset.test_X
test_y=dataset.test_y
valid_X=dataset.valid_X
valid_y=dataset.valid_y
labeled_dataset=LabeledDataset(pre_transform=dataset.pre_transform,transforms=dataset.transforms,
transform=dataset.transform,target_transform=dataset.target_transform)
unlabeled_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.unlabeled_transform)
valid_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.valid_transform)
test_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.test_transform)
# sampler
labeled_sampler=RandomSampler(replacement=True,num_samples=460*100)
unlabeled_sampler=RandomSampler(replacement=False)
valid_sampler=SequentialSampler()
test_sampler=SequentialSampler()
#dataloader
labeled_dataloader=LabeledDataLoader(batch_size=100,num_workers=0,drop_last=True)
unlabeled_dataloader=UnlabeledDataLoader(num_workers=0,drop_last=True)
valid_dataloader=UnlabeledDataLoader(batch_size=100,num_workers=0,drop_last=False)
test_dataloader=UnlabeledDataLoader(batch_size=100,num_workers=0,drop_last=False)
# augmentation
augmentation=Pipeline([('RandomHorizontalFlip',RandomHorizontalFlip()),
('RandomCrop',RandomCrop(padding=0.125,padding_mode='reflect')),
])
# optimizer
optimizer=SGD(lr=0.1,momentum=0.9,weight_decay=5e-4)
# scheduler
scheduler=CosineAnnealingLR(eta_min=0.0001,T_max=400)
# network
network=WideResNet(num_classes=10,depth=28,widen_factor=2,drop_rate=0)
# evalutation
evaluation={
'accuracy':Accuracy(),
'top_5_accuracy':Top_k_Accurary(k=5),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
file = open("../Result/TemporalEnsembling_CIFAR10.txt", "w")
model=TemporalEnsembling(lambda_u=30,ema_weight=0.6,warmup=0.4,mu=1,weight_decay=5e-4,epoch=400,num_it_epoch=460,
num_it_total=460*400,eval_epoch=10,device='cpu',
labeled_dataset=labeled_dataset,
unlabeled_dataset=unlabeled_dataset,
valid_dataset=valid_dataset,
test_dataset=test_dataset,
labeled_sampler=labeled_sampler,
unlabeled_sampler=unlabeled_sampler,
valid_sampler=valid_sampler,
test_sampler=test_sampler,
labeled_dataloader=labeled_dataloader,
unlabeled_dataloader=unlabeled_dataloader,
valid_dataloader=valid_dataloader,
test_dataloader=test_dataloader,
augmentation=augmentation,
network=network,
optimizer=optimizer,
scheduler=scheduler,
evaluation=evaluation,
file=file, verbose=True
)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X,valid_X=valid_X,valid_y=valid_y)
performance=model.evaluate(X=test_X,y=test_y)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/Tri_Training_BreastCancer.py
================================================
from LAMDA_SSL.Algorithm.Classification.Tri_Training import Tri_Training
from LAMDA_SSL.Dataset.Tabular.BreastCancer import BreastCancer
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from sklearn.svm import SVC
import numpy as np
file = open("../Result/Tri_Training_BreastCancer.txt", "w")
dataset=BreastCancer(test_size=0.3,labeled_size=0.1,stratified=True,shuffle=True,random_state=0,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
unlabeled_y=dataset.unlabeled_y
test_X=dataset.test_X
test_y=dataset.test_y
# Pre_transform
pre_transform=dataset.pre_transform
pre_transform.fit(np.vstack([labeled_X, unlabeled_X]))
labeled_X=pre_transform.transform(labeled_X)
unlabeled_X=pre_transform.transform(unlabeled_X)
test_X=pre_transform.transform(test_X)
evaluation={
'accuracy':Accuracy(),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
SVM=SVC(C=1.0,kernel='linear',probability=True,gamma='auto')
model=Tri_Training(base_estimator=SVM,evaluation=evaluation,file=file,verbose=True)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X)
performance=model.evaluate(X=test_X,y=test_y)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/UDA_CIFAR10.py
================================================
from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip
from LAMDA_SSL.Augmentation.Vision.RandomCrop import RandomCrop
from LAMDA_SSL.Augmentation.Vision.RandAugment import RandAugment
from LAMDA_SSL.Augmentation.Vision.Cutout import Cutout
from LAMDA_SSL.Dataset.Vision.CIFAR10 import CIFAR10
from LAMDA_SSL.Opitimizer.SGD import SGD
from LAMDA_SSL.Scheduler.CosineWarmup import CosineWarmup
from LAMDA_SSL.Network.WideResNet import WideResNet
from LAMDA_SSL.Dataloader.UnlabeledDataloader import UnlabeledDataLoader
from LAMDA_SSL.Dataloader.LabeledDataloader import LabeledDataLoader
from LAMDA_SSL.Algorithm.Classification.UDA import UDA
from LAMDA_SSL.Sampler.RandomSampler import RandomSampler
from LAMDA_SSL.Sampler.SequentialSampler import SequentialSampler
from sklearn.pipeline import Pipeline
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.Top_k_Accuracy import Top_k_Accurary
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from LAMDA_SSL.Dataset.LabeledDataset import LabeledDataset
from LAMDA_SSL.Dataset.UnlabeledDataset import UnlabeledDataset
# dataset
dataset=CIFAR10(root='..\Download\cifar-10-python',labeled_size=4000,stratified=True,shuffle=True,download=False,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
test_X=dataset.test_X
test_y=dataset.test_y
valid_X=dataset.valid_X
valid_y=dataset.valid_y
labeled_dataset=LabeledDataset(pre_transform=dataset.pre_transform,transforms=dataset.transforms,
transform=dataset.transform,target_transform=dataset.target_transform)
unlabeled_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.unlabeled_transform)
valid_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.valid_transform)
test_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.test_transform)
# sampler
labeled_sampler=RandomSampler(replacement=True,num_samples=64*(2**20))
unlabeled_sampler=RandomSampler(replacement=True)
valid_sampler=SequentialSampler()
test_sampler=SequentialSampler()
#dataloader
labeled_dataloader=LabeledDataLoader(batch_size=64,num_workers=0,drop_last=True)
unlabeled_dataloader=UnlabeledDataLoader(num_workers=0,drop_last=True)
valid_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
test_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
# augmentation
weak_augmentation=Pipeline([('RandomHorizontalFlip',RandomHorizontalFlip()),
('RandomCrop',RandomCrop(padding=0.125,padding_mode='reflect')),
])
strong_augmentation=Pipeline([('RandomHorizontalFlip',RandomHorizontalFlip()),
('RandomCrop',RandomCrop(padding=0.125,padding_mode='reflect')),
('RandAugment',RandAugment(n=2,m=10,num_bins=10)),
('Cutout',Cutout(v=0.5,fill=(127, 127, 127))),
])
augmentation={
'weak_augmentation':weak_augmentation,
'strong_augmentation':strong_augmentation
}
# optimizer
optimizer=SGD(lr=0.03,momentum=0.9,nesterov=True)
# scheduler
scheduler=CosineWarmup(num_cycles=7./16,num_training_steps=2**20)
# network
network=WideResNet(num_classes=10,depth=28,widen_factor=2,drop_rate=0)
# evalutation
evaluation={
'accuracy':Accuracy(),
'top_5_accuracy':Top_k_Accurary(k=5),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
file = open("../Result/UDA_CIFAR10.txt", "w")
model=UDA(threshold=0.8,lambda_u=1.0,T=0.4,mu=7,
weight_decay=5e-4, ema_decay=0.999,
epoch=1, num_it_epoch=2 ** 20, num_it_total=2 ** 20,
eval_it=2000, device='cpu',
labeled_dataset=labeled_dataset,
unlabeled_dataset=unlabeled_dataset,
valid_dataset=valid_dataset,
test_dataset=test_dataset,
labeled_sampler=labeled_sampler,
unlabeled_sampler=unlabeled_sampler,
valid_sampler=valid_sampler,
test_sampler=test_sampler,
labeled_dataloader=labeled_dataloader,
unlabeled_dataloader=unlabeled_dataloader,
valid_dataloader=valid_dataloader,
test_dataloader=test_dataloader,
augmentation=augmentation,
network=network,
optimizer=optimizer,
scheduler=scheduler,
evaluation=evaluation,
file=file, verbose=True
)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X,valid_X=valid_X,valid_y=valid_y)
performance=model.evaluate(X=test_X,y=test_y)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: Examples/VAT_CIFAR10.py
================================================
from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip
from LAMDA_SSL.Augmentation.Vision.RandomCrop import RandomCrop
from LAMDA_SSL.Dataset.Vision.CIFAR10 import CIFAR10
from LAMDA_SSL.Opitimizer.SGD import SGD
from LAMDA_SSL.Scheduler.CosineAnnealingLR import CosineAnnealingLR
from LAMDA_SSL.Network.WideResNet import WideResNet
from LAMDA_SSL.Dataloader.UnlabeledDataloader import UnlabeledDataLoader
from LAMDA_SSL.Dataloader.LabeledDataloader import LabeledDataLoader
from LAMDA_SSL.Algorithm.Classification.VAT import VAT
from LAMDA_SSL.Sampler.RandomSampler import RandomSampler
from LAMDA_SSL.Sampler.SequentialSampler import SequentialSampler
from sklearn.pipeline import Pipeline
from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy
from LAMDA_SSL.Evaluation.Classifier.Top_k_Accuracy import Top_k_Accurary
from LAMDA_SSL.Evaluation.Classifier.Precision import Precision
from LAMDA_SSL.Evaluation.Classifier.Recall import Recall
from LAMDA_SSL.Evaluation.Classifier.F1 import F1
from LAMDA_SSL.Evaluation.Classifier.AUC import AUC
from LAMDA_SSL.Evaluation.Classifier.Confusion_Matrix import Confusion_Matrix
from LAMDA_SSL.Dataset.LabeledDataset import LabeledDataset
from LAMDA_SSL.Dataset.UnlabeledDataset import UnlabeledDataset
# dataset
dataset=CIFAR10(root='..\Download\cifar-10-python',labeled_size=4000,stratified=True,shuffle=True,download=False,default_transforms=True)
labeled_X=dataset.labeled_X
labeled_y=dataset.labeled_y
unlabeled_X=dataset.unlabeled_X
test_X=dataset.test_X
test_y=dataset.test_y
valid_X=dataset.valid_X
valid_y=dataset.valid_y
labeled_dataset=LabeledDataset(pre_transform=dataset.pre_transform,transforms=dataset.transforms,
transform=dataset.transform,target_transform=dataset.target_transform)
unlabeled_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.unlabeled_transform)
valid_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.valid_transform)
test_dataset=UnlabeledDataset(pre_transform=dataset.pre_transform,transform=dataset.test_transform)
# sampler
labeled_sampler=RandomSampler(replacement=True,num_samples=64*(2**20))
unlabeled_sampler=RandomSampler(replacement=True)
valid_sampler=SequentialSampler()
test_sampler=SequentialSampler()
#dataloader
labeled_dataloader=LabeledDataLoader(batch_size=64,num_workers=0,drop_last=True)
unlabeled_dataloader=UnlabeledDataLoader(num_workers=0,drop_last=True)
valid_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
test_dataloader=UnlabeledDataLoader(batch_size=64,num_workers=0,drop_last=False)
# augmentation
augmentation=Pipeline([('RandomHorizontalFlip',RandomHorizontalFlip()),
('RandomCrop',RandomCrop(padding=0.125,padding_mode='reflect')),
])
# optimizer
optimizer=SGD(lr=0.03,momentum=0.9,nesterov=True)
# scheduler
scheduler=CosineAnnealingLR(eta_min=0,T_max=2**20)
# network
network=WideResNet(num_classes=10,depth=28,widen_factor=2,drop_rate=0)
# evalutation
evaluation={
'accuracy':Accuracy(),
'top_5_accuracy':Top_k_Accurary(k=5),
'precision':Precision(average='macro'),
'Recall':Recall(average='macro'),
'F1':F1(average='macro'),
'AUC':AUC(multi_class='ovo'),
'Confusion_matrix':Confusion_Matrix(normalize='true')
}
file = open("../Result/VAT_CIFAR10.txt", "w")
model=VAT(lambda_u=0.3,lambda_entmin=0.06,eps=6,xi=1e-6,it_vat=1,warmup=0.4,mu=1,
weight_decay=5e-4, ema_decay=0.999,
epoch=1, num_it_epoch=2 ** 20, num_it_total=2 ** 20,
eval_it=2000, device='cpu',
labeled_dataset=labeled_dataset,
unlabeled_dataset=unlabeled_dataset,
valid_dataset=valid_dataset,
test_dataset=test_dataset,
labeled_sampler=labeled_sampler,
unlabeled_sampler=unlabeled_sampler,
valid_sampler=valid_sampler,
test_sampler=test_sampler,
labeled_dataloader=labeled_dataloader,
unlabeled_dataloader=unlabeled_dataloader,
valid_dataloader=valid_dataloader,
test_dataloader=test_dataloader,
augmentation=augmentation,
network=network,
optimizer=optimizer,
scheduler=scheduler,
evaluation=evaluation,
file=file, verbose=True
)
model.fit(X=labeled_X,y=labeled_y,unlabeled_X=unlabeled_X,valid_X=valid_X,valid_y=valid_y)
performance=model.evaluate(X=test_X,y=test_y)
result=model.y_pred
print(result,file=file)
print(performance,file=file)
================================================
FILE: LAMDA_SSL/Algorithm/Classification/Assemble.py
================================================
import copy
import numbers
import numpy as np
from LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator
from sklearn.base import ClassifierMixin
from torch.utils.data.dataset import Dataset
from sklearn.neighbors import KNeighborsClassifier
import LAMDA_SSL.Config.Assemble as config
class Assemble(InductiveEstimator, ClassifierMixin):
def __init__(
self,
base_estimator=config.base_estimator,
T=config.T,
alpha=config.alpha,
beta=config.beta,
evaluation=config.evaluation,
verbose=config.verbose,
file=config.file):
# >> Parameter:
# >> - base_estimator: A base learner for ensemble learning.
# >> - T: the number of base learners. It is also the number of iterations.
# >> - alpha: the weight of each sample when the sampling distribution is updated.
# >> - Beta: used to initialize the sampling distribution of labeled data and unlabeled data.
self.base_estimator = base_estimator
self.T = T
self.alpha = alpha
self.beta = beta
self.KNN = KNeighborsClassifier(n_neighbors=3)
self.evaluation = evaluation
self.verbose = verbose
self.file = file
self.w = []
self.f = []
self.y_pred = None
self.y_score = None
self._estimator_type = ClassifierMixin._estimator_type
def predict_proba(self, X):
y_proba = 0
for _ in range(len(self.w)):
y_proba = y_proba + self.w[_] * self.f[_].predict_proba(X)
return y_proba
def predict(self, X):
y_proba = self.predict_proba(X)
y_pred = np.argmax(y_proba, axis=1)
return y_pred
def fit(self, X, y, unlabeled_X):
self.w = []
self.f = []
l = X.shape[0]
u = unlabeled_X.shape[0]
sample_weight = np.zeros(l + u)
for i in range(l):
sample_weight[i] = self.beta / l
for i in range(u):
sample_weight[i + l] = (1 - self.beta) / u
unlabeled_y = self.KNN.fit(X, y).predict(unlabeled_X)
classfier = copy.deepcopy(self.base_estimator)
X_all = np.concatenate((X, unlabeled_X))
y_all = np.concatenate((y, unlabeled_y))
classfier.fit(X_all, y_all, sample_weight=sample_weight * (l + u))
for i in range(self.T):
self.f.append(classfier)
_y_all = classfier.predict(X_all)
epsilon = 0
for _ in range(l + u):
if _y_all[_] != y_all[_]:
epsilon += sample_weight[_]
w = np.log((1 - epsilon) / (epsilon + 1e-8)) * 0.5
self.w.append(w)
if epsilon > 0.5:
break
probas = self.predict_proba(X_all)
logits = np.max(probas, axis=1)
unlabeled_y = self.predict(unlabeled_X)
y_all = np.concatenate((y, unlabeled_y))
if isinstance(self.alpha, numbers.Number):
alpha = np.ones(l + u) * self.alpha
else:
alpha = self.alpha
sample_weight = alpha * np.exp(-logits)
sample_weight = (sample_weight + 1e-8) / \
(sample_weight + 1e-8).sum()
idx_sample = np.random.choice(
l + u, l, False, p=sample_weight.tolist())
X_sample = X_all[idx_sample]
y_sample = y_all[idx_sample]
sample_weight_sample = sample_weight[idx_sample]
classfier = copy.deepcopy(self.base_estimator)
classfier.fit(
X_sample,
y_sample,
sample_weight_sample *
X_sample.shape[0])
return self
def evaluate(self, X, y=None):
if isinstance(X, Dataset) and y is None:
y = getattr(X, 'y')
self.y_score = self.predict_proba(X)
self.y_pred = self.predict(X)
if self.evaluation is None:
return None
elif isinstance(self.evaluation, (list, tuple)):
performance = []
for eval in self.evaluation:
score = eval.scoring(y, self.y_pred, self.y_score)
if self.verbose:
print(score, file=self.file)
performance.append(score)
self.performance = performance
return performance
elif isinstance(self.evaluation, dict):
performance = {}
for key, val in self.evaluation.items():
performance[key] = val.scoring(y, self.y_pred, self.y_score)
if self.verbose:
print(key, ' ', performance[key], file=self.file)
self.performance = performance
return performance
else:
performance = self.evaluation.scoring(y, self.y_pred, self.y_score)
if self.verbose:
print(performance, file=self.file)
self.performance = performance
return performance
================================================
FILE: LAMDA_SSL/Algorithm/Classification/CAFA.py
================================================
from LAMDA_SSL.Base.DeepModelMixin import DeepModelMixin
from LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator
from sklearn.base import ClassifierMixin
from LAMDA_SSL.Loss.Cross_Entropy import Cross_Entropy
from LAMDA_SSL.Loss.Consistency import Consistency
from LAMDA_SSL.Loss.Semi_Supervised_Loss import Semi_Supervised_Loss
from LAMDA_SSL.utils import Bn_Controller
from LAMDA_SSL.Network.AdversarialNet import AdversarialNet
import copy
import numpy as np
import LAMDA_SSL.Config.CAFA as config
import torch
import torch.nn.functional as F
import torch.nn as nn
from LAMDA_SSL.Base.BaseOptimizer import BaseOptimizer
from LAMDA_SSL.Base.BaseScheduler import BaseScheduler
from LAMDA_SSL.utils import class_status
import math
def TempScale(p, t):
return p / t
def inverseDecaySheduler(step, initial_lr, gamma=10, power=0.75, max_iter=1000):
return initial_lr * ((1 + gamma * min(1.0, step / float(max_iter))) ** (- power))
def compute_score(inputs, model, eps):
model.eval()
inputs.requires_grad = True
_, output = model(inputs)
softmax_output = output.softmax(1)
softmax_output = TempScale(softmax_output, 0.5)
max_value, max_target = torch.max(softmax_output, dim=1)
xent = F.cross_entropy(softmax_output, max_target.detach().long())
d = torch.autograd.grad(xent, inputs)[0]
d = torch.ge(d, 0)
d = (d.float() - 0.5) * 2
# Normalizing the gradient to the same space of image
# d[0][0] = (d[0][0]) / (63.0 / 255.0)
# d[0][1] = (d[0][1]) / (62.1 / 255.0)
# d[0][2] = (d[0][2]) / (66.7 / 255.0)
inputs_hat = torch.add(inputs.data, -eps, d.detach())
_, output_hat = model(inputs_hat)
softmax_output_hat = output_hat.softmax(1)
softmax_output_hat = TempScale(softmax_output_hat, 0.5)
max_value_hat = torch.max(softmax_output_hat, dim=1).values
pred_shift = torch.abs(max_value - max_value_hat).unsqueeze(1)
model.train()
return pred_shift.detach()
def normalize_weight(x):
min_val = x.min()
max_val = x.max()
x = (x - min_val) / (max_val - min_val)
x = x / max(torch.mean(x), 1e-6)
return x.detach()
def feature_scaling(x):
min_val = x.min()
max_val = x.max()
x = (x - min_val) / (max_val - min_val)
return x.detach()
def pseudo_label_calibration(pslab, weight):
weight = weight.transpose(1, 0).expand(pslab.shape[0], -1)
weight = normalize_weight(weight)
pslab = torch.exp(pslab)
pslab = pslab * weight
pslab = pslab / torch.sum(pslab, 1, keepdim=True)
return pslab
def reverse_sigmoid(y):
return torch.log(y / (1.0 - y + 1e-10) + 1e-10)
def get_label_share_weight(domain_out, pred_shift, domain_temperature=1.0, class_temperature=1.0):
min_val = pred_shift.min()
max_val = pred_shift.max()
pred_shift = (pred_shift - min_val) / (max_val - min_val)
pred_shift = reverse_sigmoid(pred_shift)
pred_shift = pred_shift / class_temperature
pred_shift = nn.Sigmoid()(pred_shift)
domain_logit = reverse_sigmoid(domain_out)
domain_logit = domain_logit / domain_temperature
domain_out = nn.Sigmoid()(domain_logit)
weight = domain_out - pred_shift
weight = weight.detach()
return weight
def get_unlabel_share_weight(domain_out, pred_shift, domain_temperature=1.0, class_temperature=1.0):
weight = get_label_share_weight(domain_out, pred_shift, domain_temperature, class_temperature)
return -weight
def match_string(stra, strb):
'''
stra: labels.
strb: unlabeled data predicts.
'''
l_b, prob = torch.argmax(strb, dim=1), torch.max(strb, dim=1).values
permidx = torch.tensor(range(len(l_b)))
for i in range(len(l_b)):
if stra[i] != l_b[i]:
mask = (l_b[i:] == stra[i]).float()
if mask.sum() > 0:
idx_tmp = int(i + torch.argmax(prob[i:] * mask, dim=0))
tmp = permidx[i].data.clone()
permidx[i] = permidx[idx_tmp]
permidx[idx_tmp] = tmp
return permidx
def compute_class_weight(weight, label, class_weight):
for i in range(len(class_weight)):
mask = (label == i)
class_weight[i] = weight[mask].mean()
return class_weight
class CAFA(DeepModelMixin,InductiveEstimator,ClassifierMixin):
def __init__(self,lambda_u=config.lambda_u,
warmup=config.warmup,
mu=config.mu,
threshold=config.threshold,
T=config.T,
ema_decay=config.ema_decay,
adv_warmup=config.adv_warmup,
weight_decay=config.weight_decay,
eps=config.eps,
l_domain_temper=config.l_domain_temper,
u_domain_temper=config.u_domain_temper,
l_class_temper=config.l_class_temper,
u_class_temper=config.u_class_temper,
num_classes=config.num_classes,
discriminator=config.discriminator,
discriminator_separate=config.discriminator_separate,
discriminator_optimizer=config.discriminator_optimizer,
discriminator_optimizer_separate=config.discriminator_optimizer_separate,
discriminator_scheduler=config.discriminator_scheduler,
discriminator_scheduler_separate=config.discriminator_scheduler_separate,
epoch=config.epoch,
num_it_epoch=config.num_it_epoch,
num_it_total=config.num_it_total,
eval_epoch=config.eval_epoch,
eval_it=config.eval_it,
device=config.device,
train_dataset=config.train_dataset,
labeled_dataset=config.labeled_dataset,
unlabeled_dataset=config.unlabeled_dataset,
valid_dataset=config.valid_dataset,
test_dataset=config.test_dataset,
train_dataloader=config.train_dataloader,
labeled_dataloader=config.labeled_dataloader,
unlabeled_dataloader=config.unlabeled_dataloader,
valid_dataloader=config.valid_dataloader,
test_dataloader=config.test_dataloader,
train_sampler=config.train_sampler,
train_batch_sampler=config.train_batch_sampler,
valid_sampler=config.valid_sampler,
valid_batch_sampler=config.valid_batch_sampler,
test_sampler=config.test_sampler,
test_batch_sampler=config.test_batch_sampler,
labeled_sampler=config.labeled_sampler,
unlabeled_sampler=config.unlabeled_sampler,
labeled_batch_sampler=config.labeled_batch_sampler,
unlabeled_batch_sampler=config.unlabeled_batch_sampler,
augmentation=config.augmentation,
network=config.network,
optimizer=config.optimizer,
scheduler=config.scheduler,
evaluation=config.evaluation,
parallel=config.parallel,
file=config.file,
verbose=config.verbose
):
# >> Parameter:
# >> - lambda_u: The weight of unsupervised loss.
# >> - warmup: The end position of warmup. For example, num_it_total is 100 and warmup is 0.4,
# then warmup is performed in the first 40 iterations.
DeepModelMixin.__init__(self,train_dataset=train_dataset,
valid_dataset=valid_dataset,
test_dataset=test_dataset,
train_dataloader=train_dataloader,
valid_dataloader=valid_dataloader,
test_dataloader=test_dataloader,
augmentation=augmentation,
network=network,
train_sampler=train_sampler,
train_batch_sampler=train_batch_sampler,
valid_sampler=valid_sampler,
valid_batch_sampler=valid_batch_sampler,
test_sampler=test_sampler,
test_batch_sampler=test_batch_sampler,
labeled_dataset=labeled_dataset,
unlabeled_dataset=unlabeled_dataset,
labeled_dataloader=labeled_dataloader,
unlabeled_dataloader=unlabeled_dataloader,
labeled_sampler=labeled_sampler,
unlabeled_sampler=unlabeled_sampler,
labeled_batch_sampler=labeled_batch_sampler,
unlabeled_batch_sampler=unlabeled_batch_sampler,
epoch=epoch,
num_it_epoch=num_it_epoch,
num_it_total=num_it_total,
eval_epoch=eval_epoch,
eval_it=eval_it,
mu=mu,
weight_decay=weight_decay,
ema_decay=ema_decay,
optimizer=optimizer,
scheduler=scheduler,
device=device,
evaluation=evaluation,
parallel=parallel,
file=file,
verbose=verbose
)
self.ema_decay=ema_decay
self.lambda_u=lambda_u
self.weight_decay=weight_decay
self.warmup=warmup
self.adv_warmup=adv_warmup
self.threshold=threshold
self.T=T
self.eps=eps
self.num_classes=num_classes
self.discriminator = discriminator
self.discriminator_separate = discriminator_separate
self.discriminator_optimizer=discriminator_optimizer
self.discriminator_optimizer_separate=discriminator_optimizer_separate
self.discriminator_scheduler=discriminator_scheduler
self.discriminator_scheduler_separate=discriminator_scheduler_separate
self.l_domain_temper=l_domain_temper
self.u_domain_temper=u_domain_temper
self.l_class_temper=l_class_temper
self.u_class_temper=u_class_temper
self._discriminator=copy.deepcopy(discriminator)
self._discriminator_separate = copy.deepcopy(discriminator_separate)
self._discriminator_optimizer=copy.deepcopy(discriminator_optimizer)
self._discriminator_optimizer_separate = copy.deepcopy(discriminator_optimizer_separate)
self._discriminator_scheduler=copy.deepcopy(discriminator_scheduler)
self._discriminator_scheduler_separate = copy.deepcopy(discriminator_scheduler_separate)
self.bn_controller=Bn_Controller()
self._estimator_type = ClassifierMixin._estimator_type
def init_transform(self):
self._train_dataset.add_unlabeled_transform(copy.copy(self.train_dataset.unlabeled_transform),dim=0,x=1)
self._train_dataset.add_transform(self.weak_augmentation,dim=1,x=0,y=0)
self._train_dataset.add_unlabeled_transform(self.weak_augmentation,dim=1,x=0,y=0)
self._train_dataset.add_unlabeled_transform(self.strong_augmentation,dim=1,x=1,y=0)
def init_model(self):
self._network = copy.deepcopy(self.network)
self._parallel = copy.deepcopy(self.parallel)
self._discriminator=copy.deepcopy(self.discriminator)
self._discriminator_separate = copy.deepcopy(self.discriminator_separate)
if self.device is None:
self.device='cpu'
if self.device is not 'cpu':
torch.cuda.set_device(self.device)
self._discriminator=self._discriminator.to(self.device)
self._discriminator_separate=self._discriminator_separate.to(self.device)
self._network=self._network.to(self.device)
if self._parallel is not None:
self._network=self._parallel.init_parallel(self._network)
self._discriminator=self._parallel.init_parallel(self._discriminator)
self._discriminator_separate=self._parallel.init_parallel(self._discriminator_separate)
def start_fit(self, *args, **kwargs):
self.init_epoch()
self._network.zero_grad()
self._network.train()
self._discriminator.zero_grad()
self._discriminator.train()
self._discriminator_separate.zero_grad()
self._discriminator_separate.train()
self.num_classes = self.num_classes if self.num_classes is not None else \
class_status(self._train_dataset.labeled_dataset.y).num_classes
self.l_weight = torch.zeros((len(self._train_dataset.labeled_dataset), 1)).to(self.device)
self.u_weight = torch.zeros((len(self._train_dataset.unlabeled_dataset), 1)).to(self.device)
self.class_weight = torch.zeros((self.num_classes, 1)).to(self.device)
self.label_all = torch.zeros(len(self._train_dataset.labeled_dataset)).to(self.device).long()
self.beta_distribution = torch.distributions.beta.Beta(0.75, 0.75)
def train(self,lb_X,lb_y,ulb_X,lb_idx=None,ulb_idx=None,*args,**kwargs):
lb_X=lb_X[0] if isinstance(lb_X,(tuple,list)) else lb_X
lb_y=lb_y[0] if isinstance(lb_y,(tuple,list)) else lb_y
w_ulb_X,s_ulb_X=ulb_X[0],ulb_X[1]
inputs=torch.cat((lb_X, w_ulb_X, s_ulb_X))
batch_size=lb_X.shape[0]
features,logits = self._network(inputs)
lb_logits = logits[:batch_size]
w_ulb_logits, s_ulb_logits = logits[batch_size:].chunk(2)
l_feature=features[:batch_size]
u_feature,_=features[batch_size:].chunk(2)
self._network.eval()
l_pred_shift = compute_score(lb_X.detach(), self._network,self.eps).detach()
u_pred_shift = compute_score(ulb_X[0].detach(),self._network,self.eps).detach()
self._network.train()
l_domain_prob = self._discriminator.forward(l_feature)
u_domain_prob = self._discriminator.forward(u_feature)
permidx = match_string(lb_y, w_ulb_logits)
shuf_u_feature = u_feature[permidx]
cos_sim = nn.CosineSimilarity(dim=1)(l_feature, shuf_u_feature)
cos_sim = feature_scaling(cos_sim)
cos_sim = cos_sim.unsqueeze(1).detach()
lam = self.beta_distribution.sample().item()
lam = max(lam, 1 - lam)
mix_feature = lam * l_feature + (1 - lam) * shuf_u_feature
domain_prob_separate_mix = self._discriminator_separate(mix_feature.detach())
l_domain_prob_separate = self._discriminator_separate.forward(l_feature.detach())
u_domain_prob_separate = self._discriminator_separate.forward(u_feature.detach())
label_share_weight = get_label_share_weight(
l_domain_prob_separate, l_pred_shift, domain_temperature=self.l_domain_temper,
class_temperature=self.l_class_temper)
label_share_weight = normalize_weight(label_share_weight)
unlabel_share_weight = get_unlabel_share_weight(
u_domain_prob_separate, u_pred_shift, domain_temperature=self.u_domain_temper,
class_temperature=self.u_class_temper)
unlabel_share_weight = normalize_weight(unlabel_share_weight)
adv_loss = torch.zeros(1).to(self.device)
adv_loss_separate = torch.zeros(1).to(self.device)
tmp = self.l_weight[lb_idx] * nn.BCELoss(reduction="none")(l_domain_prob, torch.zeros_like(l_domain_prob))
adv_loss += torch.mean(tmp, dim=0)
tmp = self.u_weight[ulb_idx] * nn.BCELoss(reduction="none")(u_domain_prob, torch.ones_like(u_domain_prob))
adv_loss += torch.mean(tmp, dim=0)
self.l_weight[lb_idx] = label_share_weight
self.u_weight[ulb_idx] = unlabel_share_weight
# D'
# tmp = cos_sim * nn.BCELoss(reduction="none")(domain_prob_separate_mix, torch.ones_like(domain_prob_separate_mix)*(1 - lam))
tmp = cos_sim * (-1. * (1 - lam) * torch.log(domain_prob_separate_mix) - lam * torch.log(
1 - domain_prob_separate_mix))
adv_loss_separate += torch.mean(tmp, dim=0)
adv_loss_separate += nn.BCELoss()(l_domain_prob_separate, torch.zeros_like(l_domain_prob_separate))
adv_loss_separate += nn.BCELoss()(u_domain_prob_separate, torch.ones_like(u_domain_prob_separate))
if self.it_total > 100:
w_ulb_logits = pseudo_label_calibration(w_ulb_logits, self.class_weight)
# ramp up exp(-5(1 - t)^2)
# coef = 1. * math.exp(-5 * (1 - min(self.it_total / (self.warmup*self.num_it_total), 1)) ** 2)
# pseudo_label = torch.softmax(u_output.detach() / self.T, dim=-1)
# max_probs, targets_u = torch.max(pseudo_label, dim=-1)
# mask = max_probs.ge(self.threshold).float()
# ssl_loss = (Cross_Entropy(reduction='none')(s_u_output, targets_u) * mask).mean()* coef
pseudo_label = torch.softmax(w_ulb_logits.detach() / self.T, dim=-1)
max_probs, targets_u = torch.max(pseudo_label, dim=-1)
mask = max_probs.ge(self.threshold).float()
ssl_loss = (Cross_Entropy(reduction='none')(s_ulb_logits, targets_u) * mask).mean()*self.lambda_u
# supervised loss
cls_loss = Cross_Entropy(reduction='mean')(logits=lb_logits,targets=lb_y)
adv_coef = 1. * math.exp(-5 * (1 - min(self.it_total / self.adv_warmup, 1)) ** 2)
return cls_loss , ssl_loss , adv_coef , adv_loss , adv_loss_separate
def init_optimizer(self):
self._optimizer=copy.deepcopy(self.optimizer)
self._discriminator_optimizer = copy.deepcopy(self.discriminator_optimizer)
self._discriminator_optimizer_separate = copy.deepcopy(self.discriminator_optimizer_separate)
if isinstance(self._optimizer,BaseOptimizer):
no_decay = ['bias', 'bn']
grouped_parameters = [
{'params': [p for n, p in self._network.named_parameters() if not any(
nd in n for nd in no_decay)], 'weight_decay': self.weight_decay},
{'params': [p for n, p in self._network.named_parameters() if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
self._optimizer=self._optimizer.init_optimizer(params=grouped_parameters)
if isinstance(self._discriminator_optimizer,BaseOptimizer):
no_decay = ['bias', 'bn']
grouped_parameters = [
{'params': [p for n, p in self._discriminator.named_parameters() if not any(
nd in n for nd in no_decay)], 'weight_decay': self.weight_decay},
{'params': [p for n, p in self._discriminator.named_parameters() if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
self._discriminator_optimizer=self._discriminator_optimizer.init_optimizer(params=grouped_parameters)
if isinstance(self._discriminator_optimizer_separate,BaseOptimizer):
no_decay = ['bias', 'bn']
grouped_parameters = [
{'params': [p for n, p in self._discriminator_separate.named_parameters() if not any(
nd in n for nd in no_decay)], 'weight_decay': self.weight_decay},
{'params': [p for n, p in self._discriminator_separate.named_parameters() if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
self._discriminator_optimizer_separate=self._discriminator_optimizer_separate.init_optimizer(params=grouped_parameters)
def init_scheduler(self):
self._scheduler=copy.deepcopy(self.scheduler)
if isinstance(self._scheduler,BaseScheduler):
self._scheduler=self._scheduler.init_scheduler(optimizer=self._optimizer)
self._discriminator_scheduler=copy.deepcopy(self.discriminator_scheduler)
if isinstance(self._discriminator_scheduler,BaseScheduler):
self._discriminator_scheduler=self._discriminator_scheduler.init_scheduler(optimizer=self._discriminator_optimizer)
self._discriminator_scheduler_separate=copy.deepcopy(self.discriminator_scheduler_separate)
if isinstance(self._discriminator_scheduler_separate,BaseScheduler):
self._discriminator_scheduler_separate=self._discriminator_scheduler_separate.init_scheduler(optimizer=self._discriminator_optimizer_separate)
def optimize(self,loss,*args,**kwargs):
self._network.zero_grad()
self._discriminator.zero_grad()
self._discriminator_separate.zero_grad()
loss.backward()
self._optimizer.step()
if self._scheduler is not None:
self._scheduler.step()
self._discriminator_optimizer.step()
if self._discriminator_scheduler is not None:
self._discriminator_scheduler.step()
self._discriminator_optimizer_separate.step()
if self._discriminator_scheduler_separate is not None:
self._discriminator_scheduler_separate.step()
if self.ema is not None:
self.ema.update()
def end_fit_epoch(self, *args, **kwargs):
self.class_weight = compute_class_weight(self.l_weight, self.label_all, self.class_weight)
@torch.no_grad()
def estimate(self, X, idx=None, *args, **kwargs):
_,outputs = self._network(X)
return outputs
def get_loss(self,train_result,*args,**kwargs):
# lb_logits,lb_y,ulb_logits_1,ulb_logits_2=train_result
# sup_loss = Cross_Entropy(reduction='mean')(lb_logits, lb_y)
# _warmup = float(np.clip((self.it_total) / (self.warmup * self.num_it_total), 0., 1.))
# unsup_loss = _warmup * Consistency(reduction='mean')(ulb_logits_1,ulb_logits_2.detach())
# loss = Semi_Supervised_Loss(self.lambda_u)(sup_loss ,unsup_loss)
cls_loss, ssl_loss, adv_coef, adv_loss, adv_loss_separate=train_result
loss = cls_loss + ssl_loss + adv_coef * (adv_loss + adv_loss_separate)
return loss
def predict(self,X=None,valid=None):
return DeepModelMixin.predict(self,X=X,valid=valid)
================================================
FILE: LAMDA_SSL/Algorithm/Classification/Co_Training.py
================================================
from LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator
import numpy as np
from sklearn.base import ClassifierMixin
import random
import copy
from torch.utils.data.dataset import Dataset
import LAMDA_SSL.Config.Co_Training as config
from LAMDA_SSL.Split.ViewSplit import ViewSplit
class Co_Training(InductiveEstimator, ClassifierMixin):
# Binary
def __init__(
self,
base_estimator=config.base_estimator,
base_estimator_2=config.base_estimator_2,
p=config.p,
n=config.n,
k=config.k,
s=config.s,
random_state=config.random_state,
evaluation=config.evaluation,
verbose=config.verbose,
binary=config.binary,
threshold=config.threshold,
file=config.file):
# >> Parameter:
# >> - base_estimator: the first learner for co-training.
# >> - base_estimator_2: the second learner for co-training.
# >> - p: In each round, each base learner selects at most p positive samples to assign pseudo-labels.
# >> - n: In each round, each base learner selects at most n negative samples to assign pseudo-labels.
# >> - k: iteration rounds.
# >> - s: the size of the buffer pool in each iteration.
self.base_estimator = base_estimator
self.base_estimator_2 = base_estimator_2
self.p = p
self.n = n
self.k = k
self.s = s
self.random_state = random_state
self.evaluation = evaluation
self.binary = binary
self.threshold = threshold
self.verbose = verbose
self.file = file
if isinstance(self.base_estimator, (list, tuple)):
self.base_estimator, self.base_estimator_2 = self.base_estimator[
0], self.base_estimator[1]
if self.base_estimator_2 is None:
self.base_estimator_2 = copy.deepcopy(self.base_estimator)
self.y_pred = None
self.y_score = None
random.seed(self.random_state)
self._estimator_type = ClassifierMixin._estimator_type
def fit(self, X, y, unlabeled_X, X_2=None, unlabeled_X_2=None):
if X_2 is None:
if isinstance(X, (list, tuple)):
X, X_2 = X[0], X[1]
else:
X, X_2 = ViewSplit(X, shuffle=False)
if unlabeled_X_2 is None:
if isinstance(unlabeled_X, (list, tuple)):
unlabeled_X, unlabeled_X_2 = unlabeled_X[0], unlabeled_X[1]
else:
unlabeled_X, unlabeled_X_2 = ViewSplit(
unlabeled_X, shuffle=False)
X = copy.copy(X)
X_2 = copy.copy(X_2)
y = copy.copy(y)
unlabeled_y = np.ones(len(unlabeled_X)) * -1
unlabeled_idx = np.arange(len(unlabeled_X))
random.shuffle(unlabeled_idx)
selected_unlabeled_idx = unlabeled_idx[-min(
len(unlabeled_idx), self.s):]
unlabeled_idx = unlabeled_idx[:-len(selected_unlabeled_idx)]
it = 0
while it != self.k and len(unlabeled_idx):
it += 1
self.base_estimator.fit(X, y)
self.base_estimator_2.fit(X_2, y)
proba_1 = self.base_estimator.predict_proba(
unlabeled_X[selected_unlabeled_idx])
proba_2 = self.base_estimator_2.predict_proba(
unlabeled_X_2[selected_unlabeled_idx])
if self.binary:
negative_samples, positive_samples = [], []
for i in (proba_1[:, 0].argsort())[-self.n:]:
if proba_1[i, 0] > self.threshold:
negative_samples.append(i)
for i in (proba_1[:, 1].argsort())[-self.p:]:
if proba_1[i, 1] > self.threshold:
positive_samples.append(i)
for i in (proba_2[:, 0].argsort())[-self.n:]:
if proba_2[i, 0] > self.threshold:
negative_samples.append(i)
for i in (proba_2[:, 1].argsort())[-self.p:]:
if proba_2[i, 1] > self.threshold:
positive_samples.append(i)
unlabeled_y[[selected_unlabeled_idx[x]
for x in positive_samples]] = 1
unlabeled_y[[selected_unlabeled_idx[x]
for x in negative_samples]] = 0
for x in positive_samples:
X = np.vstack([X, unlabeled_X[selected_unlabeled_idx[x]]])
X_2 = np.vstack(
[X_2, unlabeled_X_2[selected_unlabeled_idx[x]]])
y = np.hstack([y, unlabeled_y[selected_unlabeled_idx[x]]])
for x in negative_samples:
X = np.vstack([X, unlabeled_X[selected_unlabeled_idx[x]]])
X_2 = np.vstack(
[X_2, unlabeled_X_2[selected_unlabeled_idx[x]]])
y = np.hstack([y, unlabeled_y[selected_unlabeled_idx[x]]])
selected_unlabeled_idx = np.array([elem for elem in selected_unlabeled_idx if not (
elem in positive_samples or elem in negative_samples)])
num_selected = len(positive_samples) + len(negative_samples)
else:
pred_1 = np.argmax(proba_1, axis=1)
pred_2 = np.argmax(proba_2, axis=1)
confidence_1 = np.max(proba_1, axis=1)
confidence_2 = np.max(proba_2, axis=1)
selected_1 = confidence_1 > self.threshold
selected_2 = confidence_2 > self.threshold
unlabeled_y[selected_unlabeled_idx[selected_1]] = pred_1[selected_1]
unlabeled_y[selected_unlabeled_idx[selected_2]] = pred_2[selected_2]
selected_samples=[]
for i in (confidence_1.argsort())[-self.n:]:
if confidence_1[i] > self.threshold:
selected_samples.append(i)
for i in (confidence_2.argsort())[-self.n:]:
if confidence_2[i] > self.threshold:
selected_samples.append(i)
for x in selected_samples:
X = np.vstack([X, unlabeled_X[selected_unlabeled_idx[x]]])
X_2 = np.vstack(
[X_2, unlabeled_X_2[selected_unlabeled_idx[x]]])
y = np.hstack([y, unlabeled_y[selected_unlabeled_idx[x]]])
selected_unlabeled_idx = np.array(
[elem for elem in selected_unlabeled_idx if not (elem in selected_samples)])
num_selected = len(selected_samples)
num_selected = min(num_selected, len(unlabeled_idx))
selected_unlabeled_idx = np.concatenate(
(selected_unlabeled_idx, unlabeled_idx[:num_selected]))
unlabeled_idx = unlabeled_idx[num_selected:]
self.base_estimator.fit(X, y)
self.base_estimator_2.fit(X_2, y)
return self
def predict(self, X, X_2=None):
y_proba = self.predict_proba(X=X, X_2=X_2)
y_pred = np.argmax(y_proba, axis=1)
return y_pred
def predict_proba(self, X, X_2=None):
if X_2 is None:
if isinstance(X, (list, tuple)):
X, X_2 = X[0], X[1]
else:
X, X_2 = ViewSplit(X, shuffle=False)
# y_proba = np.full((X.shape[0], 2), -1, np.float)
y1_proba = self.base_estimator.predict_proba(X)
y2_proba = self.base_estimator_2.predict_proba(X_2)
# for i, (y1_i_dist, y2_i_dist) in enumerate(zip(y1_proba, y2_proba)):
# y_proba[i][0] = (y1_i_dist[0] + y2_i_dist[0]) / 2
# y_proba[i][1] = (y1_i_dist[1] + y2_i_dist[1]) / 2
y_proba=(y1_proba+y2_proba)/2
return y_proba
def evaluate(self, X, y=None):
if isinstance(X, Dataset) and y is None:
y = getattr(X, 'y')
self.y_score = self.predict_proba(X)
self.y_pred = self.predict(X)
if self.evaluation is None:
return None
elif isinstance(self.evaluation, (list, tuple)):
performance = []
for eval in self.evaluation:
score = eval.scoring(y, self.y_pred, self.y_score)
if self.verbose:
print(score, file=self.file)
performance.append(score)
self.performance = performance
return performance
elif isinstance(self.evaluation, dict):
performance = {}
for key, val in self.evaluation.items():
performance[key] = val.scoring(y, self.y_pred, self.y_score)
if self.verbose:
print(key, ' ', performance[key], file=self.file)
self.performance = performance
return performance
else:
performance = self.evaluation.scoring(y, self.y_pred, self.y_score)
if self.verbose:
print(performance, file=self.file)
self.performance = performance
return performance
================================================
FILE: LAMDA_SSL/Algorithm/Classification/FixMatch.py
================================================
import copy
from LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator
from LAMDA_SSL.Base.DeepModelMixin import DeepModelMixin
from sklearn.base import ClassifierMixin
import LAMDA_SSL.Config.FixMatch as config
from LAMDA_SSL.Loss.Cross_Entropy import Cross_Entropy
from LAMDA_SSL.Loss.Semi_Supervised_Loss import Semi_Supervised_Loss
import torch
class FixMatch(InductiveEstimator,DeepModelMixin,ClassifierMixin):
def __init__(self,
threshold=config.threshold,
lambda_u=config.lambda_u,
T=config.T,
mu=config.mu,
weight_decay=config.weight_decay,
ema_decay=config.ema_decay,
epoch=config.epoch,
num_it_epoch=config.num_it_epoch,
num_it_total=config.num_it_total,
eval_epoch=config.eval_epoch,
eval_it=config.eval_it,
optimizer=config.optimizer,
scheduler=config.scheduler,
device=config.device,
train_dataset=config.train_dataset,
labeled_dataset=config.labeled_dataset,
unlabeled_dataset=config.unlabeled_dataset,
valid_dataset=config.valid_dataset,
test_dataset=config.test_dataset,
train_dataloader=config.train_dataloader,
valid_dataloader=config.valid_dataloader,
test_dataloader=config.test_dataloader,
augmentation=config.augmentation,
network=config.network,
train_sampler=config.train_sampler,
train_batch_sampler=config.train_batch_sampler,
valid_sampler=config.valid_sampler,
valid_batch_sampler=config.valid_batch_sampler,
test_sampler=config.test_sampler,
test_batch_sampler=config.test_batch_sampler,
labeled_dataloader=config.labeled_dataloader,
unlabeled_dataloader=config.unlabeled_dataloader,
labeled_sampler=config.labeled_sampler,
unlabeled_sampler=config.unlabeled_sampler,
labeled_batch_sampler=config.labeled_batch_sampler,
unlabeled_batch_sampler=config.unlabeled_batch_sampler,
parallel=config.parallel,
evaluation=config.evaluation,
file=config.file,
verbose=config.verbose
):
# >> Parameter:
# >> - threshold: The confidence threshold for choosing samples.
# >> - lambda_u: The weight of unsupervised loss.
# >> - T: Sharpening temperature.
# >> - num_classes: The number of classes for the classification task.
# >> - thresh_warmup: Whether to use threshold warm-up mechanism.
# >> - use_hard_labels: Whether to use hard labels in the consistency regularization.
# >> - use_DA: Whether to perform distribution alignment for soft labels.
# >> - p_target: p(y) based on the labeled examples seen during training.
DeepModelMixin.__init__(self,train_dataset=train_dataset,
valid_dataset=valid_dataset,
labeled_dataset=labeled_dataset,
unlabeled_dataset=unlabeled_dataset,
test_dataset=test_dataset,
train_dataloader=train_dataloader,
valid_dataloader=valid_dataloader,
test_dataloader=test_dataloader,
augmentation=augmentation,
network=network,
train_sampler=train_sampler,
train_batch_sampler=train_batch_sampler,
valid_sampler=valid_sampler,
valid_batch_sampler=valid_batch_sampler,
test_sampler=test_sampler,
test_batch_sampler=test_batch_sampler,
labeled_dataloader=labeled_dataloader,
unlabeled_dataloader=unlabeled_dataloader,
labeled_sampler=labeled_sampler,
unlabeled_sampler=unlabeled_sampler,
labeled_batch_sampler=labeled_batch_sampler,
unlabeled_batch_sampler=unlabeled_batch_sampler,
epoch=epoch,
num_it_epoch=num_it_epoch,
num_it_total=num_it_total,
eval_epoch=eval_epoch,
eval_it=eval_it,
mu=mu,
weight_decay=weight_decay,
ema_decay=ema_decay,
optimizer=optimizer,
scheduler=scheduler,
device=device,
evaluation=evaluation,
parallel=parallel,
file=file,
verbose=verbose
)
self.lambda_u=lambda_u
self.threshold=threshold
self.T=T
self.weight_decay=weight_decay
self._estimator_type=ClassifierMixin._estimator_type
def init_transform(self):
self._train_dataset.add_unlabeled_transform(copy.copy(self.train_dataset.unlabeled_transform),dim=0,x=1)
self._train_dataset.add_transform(self.weak_augmentation,dim=1,x=0,y=0)
self._train_dataset.add_unlabeled_transform(self.weak_augmentation,dim=1,x=0,y=0)
self._train_dataset.add_unlabeled_transform(self.strong_augmentation,dim=1,x=1,y=0)
def train(self,lb_X,lb_y,ulb_X,lb_idx=None,ulb_idx=None,*args,**kwargs):
lb_X=lb_X[0] if isinstance(lb_X,(tuple,list)) else lb_X
lb_y=lb_y[0] if isinstance(lb_y,(tuple,list)) else lb_y
w_ulb_X,s_ulb_X=ulb_X[0],ulb_X[1]
batch_size = lb_X.shape[0]
inputs=torch.cat((lb_X, w_ulb_X, s_ulb_X))
logits = self._network(inputs)
lb_logits = logits[:batch_size]
w_ulb_logits, s_ulb_logits = logits[batch_size:].chunk(2)
train_result=(lb_logits,lb_y,w_ulb_logits, s_ulb_logits)
return train_result
def get_loss(self,train_result,*args,**kwargs):
lb_logits, lb_y, w_ulb_logits, s_ulb_logits = train_result
sup_loss=Cross_Entropy(reduction='mean')(logits=lb_logits,targets=lb_y)
pseudo_label = torch.softmax(w_ulb_logits.detach() / self.T, dim=-1)
max_probs, targets_u = torch.max(pseudo_label, dim=-1)
mask = max_probs.ge(self.threshold).float()
unsup_loss = (Cross_Entropy(reduction='none')(s_ulb_logits, targets_u) * mask).mean()
loss=Semi_Supervised_Loss(lambda_u =self.lambda_u)(sup_loss,unsup_loss)
return loss
def predict(self,X=None,valid=None):
return DeepModelMixin.predict(self,X=X,valid=valid)
================================================
FILE: LAMDA_SSL/Algorithm/Classification/Fix_A_Step.py
================================================
import copy
from LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator
from LAMDA_SSL.Base.DeepModelMixin import DeepModelMixin
from sklearn.base import ClassifierMixin
import LAMDA_SSL.Config.FixMatch as config
from LAMDA_SSL.Loss.Cross_Entropy import Cross_Entropy
from LAMDA_SSL.Loss.Semi_Supervised_Loss import Semi_Supervised_Loss
from LAMDA_SSL.utils import class_status
import torch
import torch.nn.functional as F
import numpy as np
def interleave(x, size):
s = list(x.shape)
return x.reshape([-1, size] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])
def de_interleave(x, size):
s = list(x.shape)
return x.reshape([size, -1] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])
class Fix_A_Step(InductiveEstimator,DeepModelMixin,ClassifierMixin):
def __init__(self,
num_classes=None,
warmup=0,
alpha=0.75,
threshold=config.threshold,
lambda_u=config.lambda_u,
T=config.T,
mu=config.mu,
weight_decay=config.weight_decay,
ema_decay=config.ema_decay,
epoch=config.epoch,
num_it_epoch=config.num_it_epoch,
num_it_total=config.num_it_total,
eval_epoch=config.eval_epoch,
eval_it=config.eval_it,
optimizer=config.optimizer,
scheduler=config.scheduler,
device=config.device,
train_dataset=config.train_dataset,
labeled_dataset=config.labeled_dataset,
unlabeled_dataset=config.unlabeled_dataset,
valid_dataset=config.valid_dataset,
test_dataset=config.test_dataset,
train_dataloader=config.train_dataloader,
valid_dataloader=config.valid_dataloader,
test_dataloader=config.test_dataloader,
augmentation=config.augmentation,
network=config.network,
train_sampler=config.train_sampler,
train_batch_sampler=config.train_batch_sampler,
valid_sampler=config.valid_sampler,
valid_batch_sampler=config.valid_batch_sampler,
test_sampler=config.test_sampler,
test_batch_sampler=config.test_batch_sampler,
labeled_dataloader=config.labeled_dataloader,
unlabeled_dataloader=config.unlabeled_dataloader,
labeled_sampler=config.labeled_sampler,
unlabeled_sampler=config.unlabeled_sampler,
labeled_batch_sampler=config.labeled_batch_sampler,
unlabeled_batch_sampler=config.unlabeled_batch_sampler,
parallel=config.parallel,
evaluation=config.evaluation,
file=config.file,
verbose=config.verbose
):
# >> Parameter:
# >> - threshold: The confidence threshold for choosing samples.
# >> - lambda_u: The weight of unsupervised loss.
# >> - T: Sharpening temperature.
# >> - num_classes: The number of classes for the classification task.
# >> - thresh_warmup: Whether to use threshold warm-up mechanism.
# >> - use_hard_labels: Whether to use hard labels in the consistency regularization.
# >> - use_DA: Whether to perform distribution alignment for soft labels.
# >> - p_target: p(y) based on the labeled examples seen during training.
DeepModelMixin.__init__(self,train_dataset=train_dataset,
valid_dataset=valid_dataset,
labeled_dataset=labeled_dataset,
unlabeled_dataset=unlabeled_dataset,
test_dataset=test_dataset,
train_dataloader=train_dataloader,
valid_dataloader=valid_dataloader,
test_dataloader=test_dataloader,
augmentation=augmentation,
network=network,
train_sampler=train_sampler,
train_batch_sampler=train_batch_sampler,
valid_sampler=valid_sampler,
valid_batch_sampler=valid_batch_sampler,
test_sampler=test_sampler,
test_batch_sampler=test_batch_sampler,
labeled_dataloader=labeled_dataloader,
unlabeled_dataloader=unlabeled_dataloader,
labeled_sampler=labeled_sampler,
unlabeled_sampler=unlabeled_sampler,
labeled_batch_sampler=labeled_batch_sampler,
unlabeled_batch_sampler=unlabeled_batch_sampler,
epoch=epoch,
num_it_epoch=num_it_epoch,
num_it_total=num_it_total,
eval_epoch=eval_epoch,
eval_it=eval_it,
mu=mu,
weight_decay=weight_decay,
ema_decay=ema_decay,
optimizer=optimizer,
scheduler=scheduler,
device=device,
evaluation=evaluation,
parallel=parallel,
file=file,
verbose=verbose
)
self.lambda_u=lambda_u
self.alpha=alpha
self.warmup=warmup
self.threshold=threshold
self.num_classes=num_classes
self.T=T
self.weight_decay=weight_decay
self._estimator_type=ClassifierMixin._estimator_type
def init_transform(self):
self._train_dataset.add_unlabeled_transform(copy.copy(self.train_dataset.unlabeled_transform),dim=0,x=1)
self._train_dataset.add_unlabeled_transform(copy.copy(self.train_dataset.unlabeled_transform),dim=0,x=2)
self._train_dataset.add_transform(self.weak_augmentation,dim=1,x=0,y=0)
self._train_dataset.add_unlabeled_transform(self.weak_augmentation,dim=1,x=0,y=0)
self._train_dataset.add_unlabeled_transform(self.weak_augmentation,dim=1,x=1,y=0)
self._train_dataset.add_unlabeled_transform(self.strong_augmentation,dim=1,x=2,y=0)
def start_fit(self):
self.num_classes = self.num_classes if self.num_classes is not None else \
class_status(self._train_dataset.labeled_dataset.y).num_classes
self._network.zero_grad()
self._network.train()
def train(self,lb_X,lb_y,ulb_X,lb_idx=None,ulb_idx=None,*args,**kwargs):
lb_X=lb_X[0] if isinstance(lb_X,(tuple,list)) else lb_X
lb_y=lb_y[0] if isinstance(lb_y,(tuple,list)) else lb_y
w_ulb_X_1, w_ulb_X_2, s_ulb_X=ulb_X[0],ulb_X[1],ulb_X[2]
batch_size = lb_X.shape[0]
with torch.no_grad():
u_output1 = self._network(w_ulb_X_1)
u_output2 = self._network(w_ulb_X_2)
p = (torch.softmax(u_output1, dim=1) + torch.softmax(u_output2, dim=1)) / 2
pt = p**(1/self.T)
u_targets = pt/pt.sum(dim=1, keepdim=True)
u_targets = u_targets.detach()
lb_y = torch.zeros(batch_size, self.num_classes).to(self.device).scatter_(1, lb_y.view(-1,1).long(), 1)
Augment_combined_inputs = torch.cat([lb_X, w_ulb_X_1, w_ulb_X_2], dim=0)
Augment_combined_labels = torch.cat([lb_y, u_targets, u_targets], dim=0)
l = np.random.beta(self.alpha, self.alpha)
l = max(l, 1-l)
idx = torch.randperm(Augment_combined_inputs.size(0))
input_a, input_b = Augment_combined_inputs, Augment_combined_inputs[idx]
target_a, target_b = Augment_combined_labels, Augment_combined_labels[idx]
mixed_input = l * input_a + (1-l) * input_b
mixed_target = l * target_a + (1-l) * target_b
mixed_labeled_input = torch.split(mixed_input, batch_size)[0]
mixed_labeled_target = torch.split(mixed_target, batch_size)[0]
inputs = interleave(torch.cat((mixed_labeled_input, w_ulb_X_1, s_ulb_X)), 2*self.mu+1).to(self.device)
logits = self._network(inputs)
logits = de_interleave(logits, 2*self.mu+1)
logits_x = logits[:batch_size]
logits_u_w, logits_u_s = logits[batch_size:].chunk(2)
del logits
labeledtrain_loss = -torch.mean(torch.sum(F.log_softmax(logits_x, dim=1) * mixed_labeled_target, dim=1))
labeledtrain_loss.backward(retain_graph=True)
labeled_grads = []
for name, param in self._network.named_parameters():
try:
labeled_grads.append(param.grad.view(-1))
except:
continue
labeled_grads = torch.cat(labeled_grads)
#
self._network.zero_grad()
pseudo_label = torch.softmax(logits_u_w.detach()/self.T, dim=-1)
max_probs, targets_u = torch.max(pseudo_label, dim=-1)
mask = max_probs.ge(self.threshold).float()
unlabeledtrain_loss = (F.cross_entropy(logits_u_s, targets_u, reduction='none') * mask).mean()
unlabeledtrain_loss.backward(retain_graph=True)
unlabeled_grads = []
for name, param in self._network.named_parameters():
try:
unlabeled_grads.append(param.grad.view(-1))
except:
continue
unlabeled_grads = torch.cat(unlabeled_grads)
self._network.zero_grad()
gradient_dot = torch.dot(labeled_grads, unlabeled_grads)
current_lambda_u = self.lambda_u
if self.it_total>= float(self.warmup*self.num_it_total):
if gradient_dot<0:
# gradient_dot_sign_prob.update(-1)
# gradient_dot_sign_this_epoch.append(-1)
loss = labeledtrain_loss
else:
# gradient_dot_sign_prob.update(1)
# gradient_dot_sign_this_epoch.append(1)
loss = labeledtrain_loss + current_lambda_u * unlabeledtrain_loss
else:
loss = labeledtrain_loss + current_lambda_u * unlabeledtrain_loss
return loss
def get_loss(self,train_result,*args,**kwargs):
loss = train_result
return loss
def predict(self,X=None,valid=None):
return DeepModelMixin.predict(
gitextract_shde08cu/ ├── Examples/ │ ├── Assemble_BreastCancer.py │ ├── CIFAR10_imbalance.py │ ├── CoReg_Boston.py │ ├── Co_Training_BreastCancer.py │ ├── Co_Training_Wine.py │ ├── Constrained_Seed_k_means_Wine.py │ ├── Constrained_k_means_Wine.py │ ├── FixMatch_BreastCancer.py │ ├── FixMatch_CIFAR10.py │ ├── FixMatch_SST2.py │ ├── FlexMatch_CIFAR10.py │ ├── FlexMatch_SST2.py │ ├── GAT_Cora.py │ ├── GCN_Cora.py │ ├── ICTReg_Boston.py │ ├── ICT_CIFAR10.py │ ├── ImprovedGAN_MNIST.py │ ├── LabelPropagation_BreastCancer.py │ ├── LabelSpreading_BreastCancer.py │ ├── LadderNetwork_MNIST.py │ ├── LapSVM_BreastCancer.py │ ├── MeanTeacherReg_Boston.py │ ├── MeanTeacher_CIFAR10.py │ ├── MixMatch_CIFAR10.py │ ├── ParallelDistributed.py │ ├── Parameter_Search.py │ ├── PiModelReg_Boston.py │ ├── PiModel_CIFAR10.py │ ├── PseudoLabel_CIFAR10.py │ ├── ReMixMatch_CIFAR10.py │ ├── S4L_CIFAR10.py │ ├── SDNE_Cora.py │ ├── SSGMM_BreastCancer.py │ ├── SSVAE_MNIST.py │ ├── Save_Load_Model.py │ ├── SemiBoost_BreastCancer.py │ ├── TSVM_BreastCancer.py │ ├── TemporalEnsembling_CIFAR10.py │ ├── Tri_Training_BreastCancer.py │ ├── UDA_CIFAR10.py │ └── VAT_CIFAR10.py ├── LAMDA_SSL/ │ ├── Algorithm/ │ │ ├── Classification/ │ │ │ ├── Assemble.py │ │ │ ├── CAFA.py │ │ │ ├── Co_Training.py │ │ │ ├── FixMatch.py │ │ │ ├── Fix_A_Step.py │ │ │ ├── FlexMatch.py │ │ │ ├── FreeMatch.py │ │ │ ├── GAT.py │ │ │ ├── GCN.py │ │ │ ├── ICT.py │ │ │ ├── ImprovedGAN.py │ │ │ ├── LabelPropagation.py │ │ │ ├── LabelSpreading.py │ │ │ ├── LadderNetwork.py │ │ │ ├── LapSVM.py │ │ │ ├── MTCF.py │ │ │ ├── MeanTeacher.py │ │ │ ├── MixMatch.py │ │ │ ├── PiModel.py │ │ │ ├── PseudoLabel.py │ │ │ ├── ReMixMatch.py │ │ │ ├── S4L.py │ │ │ ├── SDNE.py │ │ │ ├── SSGMM.py │ │ │ ├── SSVAE.py │ │ │ ├── SemiBoost.py │ │ │ ├── SoftMatch.py │ │ │ ├── Supervised.py │ │ │ ├── TSVM.py │ │ │ ├── TemporalEnsembling.py │ │ │ ├── Tri_Training.py │ │ │ ├── UASD.py │ │ │ ├── UDA.py │ │ │ ├── VAT.py │ │ │ └── __init__.py │ │ ├── Clustering/ │ │ │ ├── Constrained_Seed_k_means.py │ │ │ ├── Constrained_k_means.py │ │ │ └── __init__.py │ │ ├── Regression/ │ │ │ ├── CoReg.py │ │ │ ├── ICTReg.py │ │ │ ├── MeanTeacherReg.py │ │ │ ├── PiModelReg.py │ │ │ └── __init__.py │ │ └── __init__.py │ ├── Augmentation/ │ │ ├── Graph/ │ │ │ ├── DropEdges.py │ │ │ ├── DropNodes.py │ │ │ └── __init__.py │ │ ├── Tabular/ │ │ │ ├── Noise.py │ │ │ └── __init__.py │ │ ├── Text/ │ │ │ ├── RandomDeletion.py │ │ │ ├── RandomSwap.py │ │ │ ├── TFIDFReplacement.py │ │ │ └── __init__.py │ │ ├── Vision/ │ │ │ ├── AutoContrast.py │ │ │ ├── Brightness.py │ │ │ ├── CenterCrop.py │ │ │ ├── Color.py │ │ │ ├── Contrast.py │ │ │ ├── Cutout.py │ │ │ ├── CutoutAbs.py │ │ │ ├── Equalize.py │ │ │ ├── Identity.py │ │ │ ├── Invert.py │ │ │ ├── Mixup.py │ │ │ ├── Posterize.py │ │ │ ├── RandAugment.py │ │ │ ├── RandomCrop.py │ │ │ ├── RandomHorizontalFlip.py │ │ │ ├── Rotate.py │ │ │ ├── Sharpness.py │ │ │ ├── ShearX.py │ │ │ ├── ShearY.py │ │ │ ├── Solarize.py │ │ │ ├── TranslateX.py │ │ │ ├── TranslateY.py │ │ │ └── __init__.py │ │ └── __init__.py │ ├── Base/ │ │ ├── BaseOptimizer.py │ │ ├── BaseSampler.py │ │ ├── BaseScheduler.py │ │ ├── ClassifierEvaluation.py │ │ ├── ClusterEvaluation.py │ │ ├── DeepModelMixin.py │ │ ├── GraphMixin.py │ │ ├── InductiveEstimator.py │ │ ├── LambdaLR.py │ │ ├── RegressorEvaluation.py │ │ ├── SemiEstimator.py │ │ ├── TabularMixin.py │ │ ├── TextMixin.py │ │ ├── TransductiveEstimator.py │ │ ├── Transformer.py │ │ ├── VisionMixin.py │ │ └── __init__.py │ ├── Config/ │ │ ├── Assemble.py │ │ ├── CAFA.py │ │ ├── CoReg.py │ │ ├── Co_Training.py │ │ ├── Constrained_Seed_k_means.py │ │ ├── Constrained_k_means.py │ │ ├── FixMatch.py │ │ ├── FlexMatch.py │ │ ├── GAT.py │ │ ├── GCN.py │ │ ├── ICT.py │ │ ├── ICTReg.py │ │ ├── ImprovedGAN.py │ │ ├── LabelPropagation.py │ │ ├── LabelSpreading.py │ │ ├── LadderNetwork.py │ │ ├── LapSVM.py │ │ ├── MeanTeacher.py │ │ ├── MeanTeacherReg.py │ │ ├── MixMatch.py │ │ ├── PiModel.py │ │ ├── PiModelReg.py │ │ ├── PseudoLabel.py │ │ ├── ReMixMatch.py │ │ ├── S4L.py │ │ ├── SDNE.py │ │ ├── SSGMM.py │ │ ├── SSVAE.py │ │ ├── SemiBoost.py │ │ ├── TSVM.py │ │ ├── TemporalEnsembling.py │ │ ├── Tri_Training.py │ │ ├── UDA.py │ │ ├── VAT.py │ │ └── __init__.py │ ├── Dataloader/ │ │ ├── LabeledDataloader.py │ │ ├── TrainDataloader.py │ │ ├── UnlabeledDataloader.py │ │ └── __init__.py │ ├── Dataset/ │ │ ├── Graph/ │ │ │ ├── Cora.py │ │ │ └── __init__.py │ │ ├── LabeledDataset.py │ │ ├── SemiDataset.py │ │ ├── Tabular/ │ │ │ ├── Boston.py │ │ │ ├── BreastCancer.py │ │ │ ├── Wine.py │ │ │ └── __init__.py │ │ ├── Text/ │ │ │ ├── IMDB.py │ │ │ ├── SST2.py │ │ │ └── __init__.py │ │ ├── TrainDataset.py │ │ ├── UnlabeledDataset.py │ │ ├── Vision/ │ │ │ ├── CIFAR10.py │ │ │ ├── ImageCLEF.py │ │ │ ├── Mnist.py │ │ │ ├── Office31.py │ │ │ ├── VisDA.py │ │ │ └── __init__.py │ │ └── __init__.py │ ├── Distributed/ │ │ ├── DataParallel.py │ │ ├── DistributedDataParallel.py │ │ └── __init__.py │ ├── Evaluation/ │ │ ├── Classifier/ │ │ │ ├── AUC.py │ │ │ ├── Accuracy.py │ │ │ ├── Confusion_Matrix.py │ │ │ ├── F1.py │ │ │ ├── Precision.py │ │ │ ├── Recall.py │ │ │ ├── Top_k_Accuracy.py │ │ │ └── __init__.py │ │ ├── Cluster/ │ │ │ ├── Davies_Bouldin_Score.py │ │ │ ├── Fowlkes_Mallows_Score.py │ │ │ ├── Jaccard_Score.py │ │ │ ├── Rand_Score.py │ │ │ ├── Silhouette_Score.py │ │ │ └── __init__.py │ │ ├── Regressor/ │ │ │ ├── Mean_Absolute_Error.py │ │ │ ├── Mean_Squared_Error.py │ │ │ ├── Mean_Squared_Log_Error.py │ │ │ ├── Median_Absolute_Error.py │ │ │ └── __init__.py │ │ └── __init__.py │ ├── Loss/ │ │ ├── Consistency.py │ │ ├── Cross_Entropy.py │ │ ├── EntMin.py │ │ ├── KL_Divergence.py │ │ ├── MSE.py │ │ ├── Semi_Supervised_Loss.py │ │ └── __init__.py │ ├── Network/ │ │ ├── AdversarialNet.py │ │ ├── FT_Transformer.py │ │ ├── GAT.py │ │ ├── GCN.py │ │ ├── ImprovedGAN.py │ │ ├── LadderNetwork.py │ │ ├── MLPCLS.py │ │ ├── MLPReg.py │ │ ├── ResNet50.py │ │ ├── ResNet50Fc.py │ │ ├── SDNE.py │ │ ├── SSVAE.py │ │ ├── TextRCNN.py │ │ ├── WideResNet.py │ │ └── __init__.py │ ├── Opitimizer/ │ │ ├── Adam.py │ │ ├── SGD.py │ │ └── __init__.py │ ├── Sampler/ │ │ ├── BatchSampler.py │ │ ├── DistributedSampler.py │ │ ├── RandomSampler.py │ │ ├── SequentialSampler.py │ │ └── __init__.py │ ├── Scheduler/ │ │ ├── CosineAnnealingLR.py │ │ ├── CosineWarmup.py │ │ ├── InverseDecaySheduler.py │ │ ├── LinearWarmup.py │ │ ├── StepLR.py │ │ └── __init__.py │ ├── Search/ │ │ ├── BayesSearchCV.py │ │ ├── EvolutionaryStrategySearchCV.py │ │ ├── MetaLearnerSearchCV.py │ │ └── __init__.py │ ├── Split/ │ │ ├── DataSplit.py │ │ ├── ViewSplit.py │ │ └── __init__.py │ ├── Transform/ │ │ ├── Graph/ │ │ │ ├── GCNNorm.py │ │ │ ├── GDC.py │ │ │ ├── NormalizeFeatures.py │ │ │ ├── SVDFeatureReduction.py │ │ │ └── __init__.py │ │ ├── Tabular/ │ │ │ ├── MaxAbsScaler.py │ │ │ ├── MinMaxScaler.py │ │ │ ├── StandarScaler.py │ │ │ └── __init__.py │ │ ├── Text/ │ │ │ ├── AdjustLength.py │ │ │ ├── AutoTokenizer.py │ │ │ ├── CharNGram.py │ │ │ ├── FastText.py │ │ │ ├── GloVe.py │ │ │ ├── Lcut.py │ │ │ ├── PadSequence.py │ │ │ ├── Split.py │ │ │ ├── SynonymsReplacement.py │ │ │ ├── Tokenizer.py │ │ │ ├── Truncate.py │ │ │ ├── Vectors.py │ │ │ ├── Vocab.py │ │ │ └── __init__.py │ │ ├── ToImage.py │ │ ├── ToNumpy.py │ │ ├── ToTensor.py │ │ ├── Vision/ │ │ │ ├── Normalization.py │ │ │ ├── Resize.py │ │ │ └── __init__.py │ │ └── __init__.py │ ├── __init__.py │ └── utils.py ├── LICENSE ├── README.md ├── docs/ │ ├── .nojekyll │ ├── README.md │ ├── _coverpage.md │ ├── _navbar.md │ ├── _sidebar.md │ ├── index.html │ └── zh-cn/ │ ├── README.md │ ├── _coverpage.md │ └── _sidebar.md ├── environment.yaml └── setup.py
SYMBOL INDEX (1010 symbols across 182 files)
FILE: LAMDA_SSL/Algorithm/Classification/Assemble.py
class Assemble (line 11) | class Assemble(InductiveEstimator, ClassifierMixin):
method __init__ (line 12) | def __init__(
method predict_proba (line 40) | def predict_proba(self, X):
method predict (line 46) | def predict(self, X):
method fit (line 51) | def fit(self, X, y, unlabeled_X):
method evaluate (line 101) | def evaluate(self, X, y=None):
FILE: LAMDA_SSL/Algorithm/Classification/CAFA.py
function TempScale (line 19) | def TempScale(p, t):
function inverseDecaySheduler (line 22) | def inverseDecaySheduler(step, initial_lr, gamma=10, power=0.75, max_ite...
function compute_score (line 25) | def compute_score(inputs, model, eps):
function normalize_weight (line 49) | def normalize_weight(x):
function feature_scaling (line 56) | def feature_scaling(x):
function pseudo_label_calibration (line 62) | def pseudo_label_calibration(pslab, weight):
function reverse_sigmoid (line 70) | def reverse_sigmoid(y):
function get_label_share_weight (line 73) | def get_label_share_weight(domain_out, pred_shift, domain_temperature=1....
function get_unlabel_share_weight (line 90) | def get_unlabel_share_weight(domain_out, pred_shift, domain_temperature=...
function match_string (line 95) | def match_string(stra, strb):
function compute_class_weight (line 113) | def compute_class_weight(weight, label, class_weight):
class CAFA (line 119) | class CAFA(DeepModelMixin,InductiveEstimator,ClassifierMixin):
method __init__ (line 120) | def __init__(self,lambda_u=config.lambda_u,
method init_transform (line 245) | def init_transform(self):
method init_model (line 251) | def init_model(self):
method start_fit (line 268) | def start_fit(self, *args, **kwargs):
method train (line 284) | def train(self,lb_X,lb_y,ulb_X,lb_idx=None,ulb_idx=None,*args,**kwargs):
method init_optimizer (line 366) | def init_optimizer(self):
method init_scheduler (line 400) | def init_scheduler(self):
method optimize (line 415) | def optimize(self,loss,*args,**kwargs):
method end_fit_epoch (line 433) | def end_fit_epoch(self, *args, **kwargs):
method estimate (line 437) | def estimate(self, X, idx=None, *args, **kwargs):
method get_loss (line 442) | def get_loss(self,train_result,*args,**kwargs):
method predict (line 452) | def predict(self,X=None,valid=None):
FILE: LAMDA_SSL/Algorithm/Classification/Co_Training.py
class Co_Training (line 11) | class Co_Training(InductiveEstimator, ClassifierMixin):
method __init__ (line 13) | def __init__(
method fit (line 56) | def fit(self, X, y, unlabeled_X, X_2=None, unlabeled_X_2=None):
method predict (line 166) | def predict(self, X, X_2=None):
method predict_proba (line 171) | def predict_proba(self, X, X_2=None):
method evaluate (line 188) | def evaluate(self, X, y=None):
FILE: LAMDA_SSL/Algorithm/Classification/FixMatch.py
class FixMatch (line 11) | class FixMatch(InductiveEstimator,DeepModelMixin,ClassifierMixin):
method __init__ (line 12) | def __init__(self,
method init_transform (line 109) | def init_transform(self):
method train (line 115) | def train(self,lb_X,lb_y,ulb_X,lb_idx=None,ulb_idx=None,*args,**kwargs):
method get_loss (line 127) | def get_loss(self,train_result,*args,**kwargs):
method predict (line 137) | def predict(self,X=None,valid=None):
FILE: LAMDA_SSL/Algorithm/Classification/Fix_A_Step.py
function interleave (line 13) | def interleave(x, size):
function de_interleave (line 18) | def de_interleave(x, size):
class Fix_A_Step (line 22) | class Fix_A_Step(InductiveEstimator,DeepModelMixin,ClassifierMixin):
method __init__ (line 23) | def __init__(self,
method init_transform (line 126) | def init_transform(self):
method start_fit (line 134) | def start_fit(self):
method train (line 140) | def train(self,lb_X,lb_y,ulb_X,lb_idx=None,ulb_idx=None,*args,**kwargs):
method get_loss (line 232) | def get_loss(self,train_result,*args,**kwargs):
method predict (line 236) | def predict(self,X=None,valid=None):
FILE: LAMDA_SSL/Algorithm/Classification/FlexMatch.py
class FlexMatch (line 12) | class FlexMatch(InductiveEstimator,DeepModelMixin,ClassifierMixin):
method __init__ (line 13) | def __init__(self,
method init_transform (line 121) | def init_transform(self):
method start_fit (line 127) | def start_fit(self):
method train (line 141) | def train(self,lb_X,lb_y,ulb_X,lb_idx=None,ulb_idx=None,*args,**kwargs):
method get_loss (line 184) | def get_loss(self,train_result,*args,**kwargs):
method predict (line 191) | def predict(self,X=None,valid=None):
FILE: LAMDA_SSL/Algorithm/Classification/FreeMatch.py
class FreeMatch (line 12) | class FreeMatch(InductiveEstimator,DeepModelMixin,ClassifierMixin):
method __init__ (line 13) | def __init__(self,
method update_prob_t (line 117) | def update_prob_t(self, lb_probs, ulb_probs):
method calculate_mask (line 131) | def calculate_mask(self, probs):
method init_transform (line 138) | def init_transform(self):
method start_fit (line 144) | def start_fit(self):
method train (line 153) | def train(self,lb_X,lb_y,ulb_X,lb_idx=None,ulb_idx=None,*args,**kwargs):
method distribution_alignment (line 166) | def distribution_alignment(self, probs):
method cal_time_p_and_p_model (line 172) | def cal_time_p_and_p_model(self,logits_x_ulb_w, time_p, p_model, label...
method get_loss (line 191) | def get_loss(self,train_result,*args,**kwargs):
method predict (line 212) | def predict(self,X=None,valid=None):
FILE: LAMDA_SSL/Algorithm/Classification/GAT.py
class GAT (line 15) | class GAT(InductiveEstimator,DeepModelMixin,ClassifierMixin):
method __init__ (line 16) | def __init__(self,
method fit (line 60) | def fit(self,X=None,y=None,unlabeled_X=None,valid_X=None,valid_y=None,
method start_fit (line 69) | def start_fit(self):
method init_optimizer (line 84) | def init_optimizer(self):
method init_train_dataloader (line 93) | def init_train_dataloader(self):
method init_train_dataset (line 96) | def init_train_dataset(self, X=None, y=None, unlabeled_X=None,
method end_fit_epoch (line 137) | def end_fit_epoch(self, train_result,*args, **kwargs):
method fit_epoch_loop (line 141) | def fit_epoch_loop(self, valid_X=None, valid_y=None):
method train (line 162) | def train(self, lb_X=None, lb_y=None, ulb_X=None, lb_idx=None, ulb_idx...
method get_loss (line 168) | def get_loss(self,train_result,*args,**kwargs):
method init_estimate_dataloader (line 174) | def init_estimate_dataloader(self,valid=False):
method init_estimate_dataset (line 177) | def init_estimate_dataset(self, X=None, valid=False):
method predict_batch_loop (line 185) | def predict_batch_loop(self):
method predict (line 190) | def predict(self,X=None,valid=None):
method evaluate (line 195) | def evaluate(self,X=None,y=None,valid=False):
FILE: LAMDA_SSL/Algorithm/Classification/GCN.py
class GCN (line 15) | class GCN(InductiveEstimator,DeepModelMixin,ClassifierMixin):
method __init__ (line 16) | def __init__(self,
method fit (line 57) | def fit(self,X=None,y=None,unlabeled_X=None,valid_X=None,valid_y=None,
method start_fit (line 66) | def start_fit(self):
method init_optimizer (line 81) | def init_optimizer(self):
method init_train_dataloader (line 90) | def init_train_dataloader(self):
method init_train_dataset (line 93) | def init_train_dataset(self, X=None, y=None, unlabeled_X=None,
method end_fit_epoch (line 134) | def end_fit_epoch(self, train_result,*args, **kwargs):
method fit_epoch_loop (line 138) | def fit_epoch_loop(self, valid_X=None, valid_y=None):
method train (line 160) | def train(self, lb_X=None, lb_y=None, ulb_X=None, lb_idx=None, ulb_idx...
method get_loss (line 166) | def get_loss(self,train_result,*args,**kwargs):
method init_estimate_dataloader (line 172) | def init_estimate_dataloader(self,valid=False):
method init_estimate_dataset (line 175) | def init_estimate_dataset(self, X=None, valid=False):
method predict_batch_loop (line 183) | def predict_batch_loop(self):
method predict (line 188) | def predict(self,X=None,valid=None):
method evaluate (line 193) | def evaluate(self,X=None,y=None,valid=False):
FILE: LAMDA_SSL/Algorithm/Classification/ICT.py
class ICT (line 13) | class ICT(InductiveEstimator,DeepModelMixin,ClassifierMixin):
method __init__ (line 14) | def __init__(self,
method start_fit (line 105) | def start_fit(self):
method train (line 109) | def train(self,lb_X,lb_y,ulb_X,lb_idx=None,ulb_idx=None,*args,**kwargs):
method get_loss (line 131) | def get_loss(self,train_result,*args,**kwargs):
method predict (line 141) | def predict(self,X=None,valid=None):
FILE: LAMDA_SSL/Algorithm/Classification/ImprovedGAN.py
class ImprovedGAN (line 16) | class ImprovedGAN(InductiveEstimator,DeepModelMixin,ClassifierMixin):
method __init__ (line 17) | def __init__(self,
method start_fit (line 124) | def start_fit(self):
method fit_batch_loop (line 146) | def fit_batch_loop(self,valid_X=None,valid_y=None):
method init_optimizer (line 185) | def init_optimizer(self):
method init_scheduler (line 220) | def init_scheduler(self):
method init_ema (line 239) | def init_ema(self):
method get_loss_D (line 260) | def get_loss_D(self,train_result_D):
method get_loss_G (line 272) | def get_loss_G(self,train_result_G):
method end_fit_batch_D (line 277) | def end_fit_batch_D(self,train_result_D):
method end_fit_batch_G (line 281) | def end_fit_batch_G(self,train_result_G):
method optimize_D (line 285) | def optimize_D(self,loss):
method optimize_G (line 295) | def optimize_G(self,loss):
method log_sum_exp (line 305) | def log_sum_exp(self,x, axis=1):
method train_D (line 309) | def train_D(self,lb_X,lb_y,ulb_X):
method train_G (line 318) | def train_G(self, ulb_X):
method estimate (line 333) | def estimate(self, X, idx=None, *args, **kwargs):
method predict (line 338) | def predict(self,X=None,valid=None):
method generate (line 341) | def generate(self,num,z=None):
FILE: LAMDA_SSL/Algorithm/Classification/LabelPropagation.py
class LabelPropagation (line 8) | class LabelPropagation(TransductiveEstimator,ClassifierMixin):
method __init__ (line 9) | def __init__(
method fit (line 44) | def fit(self,X,y,unlabeled_X=None):
method predict (line 56) | def predict(self,X=None,Transductive=True):
method predict_proba (line 64) | def predict_proba(self,X=None,Transductive=True):
method evaluate (line 72) | def evaluate(self,X=None,y=None,Transductive=True):
FILE: LAMDA_SSL/Algorithm/Classification/LabelSpreading.py
class LabelSpreading (line 8) | class LabelSpreading(TransductiveEstimator,ClassifierMixin):
method __init__ (line 9) | def __init__(
method fit (line 44) | def fit(self,X,y,unlabeled_X=None):
method predict (line 56) | def predict(self,X=None,Transductive=True):
method predict_proba (line 64) | def predict_proba(self,X=None,Transductive=True):
method evaluate (line 72) | def evaluate(self,X=None,y=None,Transductive=True):
FILE: LAMDA_SSL/Algorithm/Classification/LadderNetwork.py
class LadderNetwork (line 15) | class LadderNetwork(InductiveEstimator,DeepModelMixin,ClassifierMixin):
method __init__ (line 16) | def __init__(self,
method start_fit (line 111) | def start_fit(self):
method init_optimizer (line 128) | def init_optimizer(self):
method train (line 133) | def train(self,lb_X=None,lb_y=None,ulb_X=None,lb_idx=None,ulb_idx=None...
method get_loss (line 169) | def get_loss(self,train_result,*args,**kwargs):
method optimize (line 179) | def optimize(self,loss,*args,**kwargs):
method end_fit_epoch (line 186) | def end_fit_epoch(self):
method estimate (line 191) | def estimate(self, X, idx=None, *args, **kwargs):
method predict (line 196) | def predict(self,X=None,valid=None):
FILE: LAMDA_SSL/Algorithm/Classification/LapSVM.py
class LapSVM (line 12) | class LapSVM(InductiveEstimator,ClassifierMixin):
method __init__ (line 14) | def __init__(self,
method fit (line 51) | def fit(self,X,y,unlabeled_X):
method decision_function (line 149) | def decision_function(self,X):
method predict_proba (line 164) | def predict_proba(self,X):
method predict (line 171) | def predict(self,X):
method evaluate (line 179) | def evaluate(self,X,y=None):
FILE: LAMDA_SSL/Algorithm/Classification/MTCF.py
class MTCF (line 15) | class MTCF(InductiveEstimator,DeepModelMixin,ClassifierMixin):
method __init__ (line 16) | def __init__(self,
method start_fit (line 115) | def start_fit(self):
method init_transform (line 121) | def init_transform(self):
method interleave_offsets (line 127) | def interleave_offsets(self, batch, nu):
method interleave (line 137) | def interleave(self, xy, batch):
method train (line 145) | def train(self,lb_X,lb_y,ulb_X,lb_idx=None,ulb_idx=None,*args,**kwargs):
method get_loss (line 194) | def get_loss(self,train_result,*args,**kwargs):
method predict (line 202) | def predict(self,X=None,valid=None):
FILE: LAMDA_SSL/Algorithm/Classification/MeanTeacher.py
class MeanTeacher (line 13) | class MeanTeacher(InductiveEstimator,DeepModelMixin,ClassifierMixin):
method __init__ (line 14) | def __init__(self,
method init_transform (line 104) | def init_transform(self):
method train (line 110) | def train(self,lb_X,lb_y,ulb_X,lb_idx=None,ulb_idx=None,*args,**kwargs):
method get_loss (line 126) | def get_loss(self,train_result,*args,**kwargs):
method predict (line 134) | def predict(self,X=None,valid=None):
FILE: LAMDA_SSL/Algorithm/Classification/MixMatch.py
class MixMatch (line 16) | class MixMatch(InductiveEstimator,DeepModelMixin,ClassifierMixin):
method __init__ (line 17) | def __init__(self,
method init_transform (line 114) | def init_transform(self):
method start_fit (line 120) | def start_fit(self):
method train (line 126) | def train(self,lb_X,lb_y,ulb_X,lb_idx=None,ulb_idx=None,*args,**kwargs):
method interleave_offsets (line 168) | def interleave_offsets(self, batch, num):
method interleave (line 178) | def interleave(self, xy, batch):
method get_loss (line 186) | def get_loss(self,train_result,*args,**kwargs):
method predict (line 194) | def predict(self,X=None,valid=None):
FILE: LAMDA_SSL/Algorithm/Classification/PiModel.py
class PiModel (line 12) | class PiModel(DeepModelMixin,InductiveEstimator,ClassifierMixin):
method __init__ (line 13) | def __init__(self,lambda_u=config.lambda_u,
method init_transform (line 102) | def init_transform(self):
method train (line 108) | def train(self,lb_X,lb_y,ulb_X,lb_idx=None,ulb_idx=None,*args,**kwargs):
method get_loss (line 119) | def get_loss(self,train_result,*args,**kwargs):
method predict (line 127) | def predict(self,X=None,valid=None):
FILE: LAMDA_SSL/Algorithm/Classification/PseudoLabel.py
class PseudoLabel (line 11) | class PseudoLabel(InductiveEstimator,DeepModelMixin,ClassifierMixin):
method __init__ (line 12) | def __init__(self,
method train (line 99) | def train(self,lb_X,lb_y,ulb_X,lb_idx=None,ulb_idx=None,*args,**kwargs):
method get_loss (line 114) | def get_loss(self,train_result,*args,**kwargs):
method predict (line 125) | def predict(self,X=None,valid=None):
FILE: LAMDA_SSL/Algorithm/Classification/ReMixMatch.py
class ReMixMatch (line 16) | class ReMixMatch(InductiveEstimator,DeepModelMixin,ClassifierMixin):
method __init__ (line 17) | def __init__(self,
method init_transform (line 127) | def init_transform(self):
method start_fit (line 135) | def start_fit(self):
method train (line 144) | def train(self,lb_X,lb_y,ulb_X,lb_idx=None,ulb_idx=None,*args,**kwargs):
method interleave_offsets (line 199) | def interleave_offsets(self, batch, num):
method interleave (line 209) | def interleave(self, xy, batch):
method get_loss (line 218) | def get_loss(self,train_result,*args,**kwargs):
method predict (line 228) | def predict(self,X=None,valid=None):
FILE: LAMDA_SSL/Algorithm/Classification/S4L.py
class S4L (line 14) | class S4L(InductiveEstimator,DeepModelMixin,ClassifierMixin):
method __init__ (line 15) | def __init__(self,
method init_transform (line 117) | def init_transform(self):
method start_fit (line 121) | def start_fit(self):
method train (line 127) | def train(self,lb_X,lb_y,ulb_X,lb_idx=None,ulb_idx=None,*args,**kwargs):
method get_loss (line 165) | def get_loss(self,train_result,*args,**kwargs):
method predict (line 172) | def predict(self,X=None,valid=None):
FILE: LAMDA_SSL/Algorithm/Classification/SDNE.py
class SDNE (line 12) | class SDNE(InductiveEstimator,DeepModelMixin,ClassifierMixin):
method __init__ (line 13) | def __init__(self,
method fit (line 66) | def fit(self,X=None,y=None,unlabeled_X=None,valid_X=None,valid_y=None,
method start_fit (line 75) | def start_fit(self):
method init_train_dataloader (line 89) | def init_train_dataloader(self):
method init_train_dataset (line 92) | def init_train_dataset(self, X=None, y=None, unlabeled_X=None,
method estimator_fit (line 135) | def estimator_fit(self):
method create_adjacency_laplace_matrix (line 145) | def create_adjacency_laplace_matrix(self):
method end_fit_epoch (line 169) | def end_fit_epoch(self, train_result,*args, **kwargs):
method fit_epoch_loop (line 173) | def fit_epoch_loop(self, valid_X=None, valid_y=None):
method end_fit (line 192) | def end_fit(self):
method train (line 195) | def train(self, lb_X=None, lb_y=None, ulb_X=None, lb_idx=None, ulb_idx...
method get_loss (line 204) | def get_loss(self,train_result,*args,**kwargs):
method predict (line 222) | def predict(self,X=None,valid=False):
method predict_proba (line 233) | def predict_proba(self, X=None, valid=False):
method evaluate (line 245) | def evaluate(self, X, y=None,valid=False):
FILE: LAMDA_SSL/Algorithm/Classification/SSGMM.py
class SSGMM (line 9) | class SSGMM(InductiveEstimator,ClassifierMixin):
method __init__ (line 10) | def __init__(self,tolerance=config.tolerance, max_iterations=config.ma...
method normfun (line 28) | def normfun(self,x, mu, sigma):
method fit (line 32) | def fit(self,X,y,unlabeled_X):
method predict_proba (line 107) | def predict_proba(self,X):
method predict (line 120) | def predict(self,X):
method evaluate (line 125) | def evaluate(self,X,y=None):
FILE: LAMDA_SSL/Algorithm/Classification/SSVAE.py
class SSVAE (line 14) | class SSVAE(InductiveEstimator,DeepModelMixin,ClassifierMixin):
method __init__ (line 15) | def __init__(self,
method start_fit (line 125) | def start_fit(self):
method loss_components_fn (line 147) | def loss_components_fn(self,x, y, z, p_y, p_z, p_x_yz, q_z_xy):
method train (line 153) | def train(self,lb_X=None,lb_y=None,ulb_X=None,lb_idx=None,ulb_idx=None...
method get_loss (line 187) | def get_loss(self,train_result,*args,**kwargs):
method optimize (line 206) | def optimize(self,loss,*args,**kwargs):
method estimate (line 212) | def estimate(self, X, idx=None, *args, **kwargs):
method predict (line 217) | def predict(self,X=None,valid=None):
method generate (line 220) | def generate(self,num,z=None,x=None,y=None):
FILE: LAMDA_SSL/Algorithm/Classification/SemiBoost.py
class SemiBoost (line 11) | class SemiBoost(InductiveEstimator,ClassifierMixin):
method __init__ (line 13) | def __init__(self, base_estimator = config.base_estimator,
method fit (line 44) | def fit(self, X, y,unlabeled_X):
method predict_proba (line 129) | def predict_proba(self, X):
method predict (line 135) | def predict(self, X):
method evaluate (line 145) | def evaluate(self,X,y=None):
FILE: LAMDA_SSL/Algorithm/Classification/SoftMatch.py
class SoftMatch (line 15) | class SoftMatch(InductiveEstimator,DeepModelMixin,ClassifierMixin):
method __init__ (line 16) | def __init__(self,
method update_prob_t (line 120) | def update_prob_t(self, lb_probs, ulb_probs):
method calculate_mask (line 134) | def calculate_mask(self, probs):
method init_transform (line 141) | def init_transform(self):
method start_fit (line 147) | def start_fit(self):
method train (line 157) | def train(self,lb_X,lb_y,ulb_X,lb_idx=None,ulb_idx=None,*args,**kwargs):
method distribution_alignment (line 170) | def distribution_alignment(self, probs):
method get_loss (line 176) | def get_loss(self,train_result,*args,**kwargs):
method predict (line 194) | def predict(self,X=None,valid=None):
FILE: LAMDA_SSL/Algorithm/Classification/Supervised.py
class Supervised (line 14) | class Supervised(DeepModelMixin,InductiveEstimator,ClassifierMixin):
method __init__ (line 15) | def __init__(self,lambda_u=config.lambda_u,
method init_transform (line 104) | def init_transform(self):
method train (line 112) | def train(self,lb_X,lb_y,ulb_X,lb_idx=None,ulb_idx=None,*args,**kwargs):
method get_loss (line 125) | def get_loss(self,train_result,*args,**kwargs):
method predict (line 134) | def predict(self,X=None,valid=None):
FILE: LAMDA_SSL/Algorithm/Classification/TSVM.py
class TSVM (line 9) | class TSVM(TransductiveEstimator,ClassifierMixin):
method __init__ (line 10) | def __init__(
method fit (line 89) | def fit(self,X,y,unlabeled_X):
method predict_proba (line 138) | def predict_proba(self, X=None, Transductive=True):
method predict (line 147) | def predict(self,X=None,Transductive=True):
method score (line 158) | def score(self,X=None, y=None,sample_weight=None,Transductive=True):
method evaluate (line 168) | def evaluate(self,X=None,y=None,Transductive=True):
FILE: LAMDA_SSL/Algorithm/Classification/TemporalEnsembling.py
class TemporalEnsembling (line 13) | class TemporalEnsembling(InductiveEstimator,DeepModelMixin):
method __init__ (line 14) | def __init__(self,
method start_fit (line 113) | def start_fit(self):
method end_fit_epoch (line 128) | def end_fit_epoch(self):
method create_soft_pslab (line 133) | def create_soft_pslab(self, num_samples, num_classes, dtype='rand'):
method update_ema_predictions (line 142) | def update_ema_predictions(self):
method train (line 146) | def train(self,lb_X,lb_y,ulb_X,lb_idx=None,ulb_idx=None,*args,**kwargs):
method optimize (line 161) | def optimize(self,loss,*args,**kwargs):
method get_loss (line 168) | def get_loss(self,train_result,*args,**kwargs):
method predict (line 176) | def predict(self,X=None,valid=None):
FILE: LAMDA_SSL/Algorithm/Classification/Tri_Training.py
class Tri_Training (line 9) | class Tri_Training(InductiveEstimator,ClassifierMixin):
method __init__ (line 10) | def __init__(self, base_estimator=config.base_estimator,base_estimator...
method fit (line 32) | def fit(self, X, y, unlabeled_X):
method predict_proba (line 72) | def predict_proba(self,X):
method predict (line 78) | def predict(self, X):
method measure_error (line 86) | def measure_error(self, X, y, j, k):
method evaluate (line 92) | def evaluate(self,X,y=None):
FILE: LAMDA_SSL/Algorithm/Classification/UASD.py
class UASD (line 14) | class UASD(InductiveEstimator,DeepModelMixin):
method __init__ (line 15) | def __init__(self,
method start_fit (line 116) | def start_fit(self):
method end_fit_epoch (line 129) | def end_fit_epoch(self):
method update_predictions (line 132) | def update_predictions(self):
method train (line 136) | def train(self,lb_X,lb_y,ulb_X,lb_idx=None,ulb_idx=None,*args,**kwargs):
method get_loss (line 152) | def get_loss(self,train_result,*args,**kwargs):
method predict (line 162) | def predict(self,X=None,valid=None):
FILE: LAMDA_SSL/Algorithm/Classification/UDA.py
class UDA (line 12) | class UDA(InductiveEstimator,DeepModelMixin,ClassifierMixin):
method __init__ (line 13) | def __init__(self,
method init_transform (line 110) | def init_transform(self):
method start_fit (line 116) | def start_fit(self):
method train (line 122) | def train(self,lb_X,lb_y,ulb_X,lb_idx=None,ulb_idx=None,*args,**kwargs):
method get_tsa (line 133) | def get_tsa(self):
method get_loss (line 152) | def get_loss(self,train_result,*args,**kwargs):
method predict (line 165) | def predict(self,X=None,valid=None):
FILE: LAMDA_SSL/Algorithm/Classification/VAT.py
class VAT (line 14) | class VAT(InductiveEstimator,DeepModelMixin,ClassifierMixin):
method __init__ (line 15) | def __init__(self,
method start_fit (line 117) | def start_fit(self):
method train (line 121) | def train(self,lb_X,lb_y,ulb_X,lb_idx=None,ulb_idx=None,*args,**kwargs):
method get_loss (line 149) | def get_loss(self,train_result,*args,**kwargs):
method predict (line 162) | def predict(self,X=None,valid=None):
FILE: LAMDA_SSL/Algorithm/Clustering/Constrained_Seed_k_means.py
class Constrained_Seed_k_means (line 8) | class Constrained_Seed_k_means(TransductiveEstimator, ClusterMixin):
method __init__ (line 9) | def __init__(self, k=config.k, tolerance=config.tolerance, max_iterati...
method fit (line 25) | def fit(self, X, y=None, unlabeled_X=None,clusters=None):
method predict (line 100) | def predict(self, X=None, Transductive=True):
method evaluate (line 110) | def evaluate(self, X=None, y=None,Transductive=True):
FILE: LAMDA_SSL/Algorithm/Clustering/Constrained_k_means.py
class Constrained_k_means (line 8) | class Constrained_k_means(TransductiveEstimator,ClusterMixin):
method __init__ (line 9) | def __init__(self,k=config.k, tolerance=config.tolerance, max_iteratio...
method fit (line 26) | def fit(self,X,y=None,unlabeled_X=None,cl=None,ml=None):
method violate_constraints (line 156) | def violate_constraints(self, data_index, cluster_index, ml, cl):
method predict (line 169) | def predict(self, X=None,Transductive=True):
method evaluate (line 179) | def evaluate(self,X=None,y=None,Transductive=True):
FILE: LAMDA_SSL/Algorithm/Regression/CoReg.py
class CoReg (line 10) | class CoReg(InductiveEstimator,RegressorMixin):
method __init__ (line 11) | def __init__(self, k1=config.k1, k2=config.k2, p1=config.p1, p2=config...
method fit (line 37) | def fit(self,X,y,unlabeled_X):
method predict (line 110) | def predict(self,X):
method evaluate (line 116) | def evaluate(self,X,y=None):
FILE: LAMDA_SSL/Algorithm/Regression/ICTReg.py
class ICTReg (line 15) | class ICTReg(DeepModelMixin,InductiveEstimator,RegressorMixin):
method __init__ (line 16) | def __init__(self,
method init_transform (line 111) | def init_transform(self):
method start_fit (line 115) | def start_fit(self):
method train (line 128) | def train(self,lb_X,lb_y,ulb_X,lb_idx=None,ulb_idx=None,*args,**kwargs):
method get_loss (line 151) | def get_loss(self,train_result,*args,**kwargs):
method predict (line 161) | def predict(self,X=None,valid=None):
FILE: LAMDA_SSL/Algorithm/Regression/MeanTeacherReg.py
class MeanTeacherReg (line 14) | class MeanTeacherReg(DeepModelMixin,InductiveEstimator,RegressorMixin):
method __init__ (line 15) | def __init__(self,
method init_transform (line 109) | def init_transform(self):
method start_fit (line 115) | def start_fit(self):
method train (line 128) | def train(self,lb_X,lb_y,ulb_X,lb_idx=None,ulb_idx=None,*args,**kwargs):
method get_loss (line 144) | def get_loss(self,train_result,*args,**kwargs):
method predict (line 152) | def predict(self,X=None,valid=None):
FILE: LAMDA_SSL/Algorithm/Regression/PiModelReg.py
class PiModelReg (line 13) | class PiModelReg(DeepModelMixin,InductiveEstimator,RegressorMixin):
method __init__ (line 14) | def __init__(self,
method init_transform (line 108) | def init_transform(self):
method start_fit (line 114) | def start_fit(self):
method train (line 127) | def train(self,lb_X,lb_y,ulb_X,lb_idx=None,ulb_idx=None,*args,**kwargs):
method get_loss (line 139) | def get_loss(self,train_result,*args,**kwargs):
method predict (line 147) | def predict(self,X=None,valid=None):
FILE: LAMDA_SSL/Augmentation/Graph/DropEdges.py
class DropEdges (line 6) | class DropEdges(Transformer):
method __init__ (line 7) | def __init__(self, num_drop, shuffle=True, random_state=None):
method transform (line 17) | def transform(self, X):
FILE: LAMDA_SSL/Augmentation/Graph/DropNodes.py
class DropNodes (line 4) | class DropNodes(Transformer):
method __init__ (line 5) | def __init__(self,num_drop,shuffle=True,random_state=None):
method transform (line 15) | def transform(self,X):
FILE: LAMDA_SSL/Augmentation/Tabular/Noise.py
class Noise (line 8) | class Noise(Transformer):
method __init__ (line 9) | def __init__(self,noise_level=0.1):
method transform (line 15) | def transform(self,X):
FILE: LAMDA_SSL/Augmentation/Text/RandomDeletion.py
class RandomDeletion (line 5) | class RandomDeletion(Transformer):
method __init__ (line 6) | def __init__(self,p=0.5,tokenizer=None):
method transform (line 14) | def transform(self,X):
FILE: LAMDA_SSL/Augmentation/Text/RandomSwap.py
class RandomSwap (line 5) | class RandomSwap(Transformer):
method __init__ (line 6) | def __init__(self,n=1,tokenizer=None):
method swap (line 14) | def swap(self,X):
method transform (line 26) | def transform(self,X):
FILE: LAMDA_SSL/Augmentation/Text/TFIDFReplacement.py
class TFIDFReplacement (line 6) | class TFIDFReplacement(Transformer):
method __init__ (line 7) | def __init__(self,text,p=0.7,tokenizer=None,cache_len=100000):
method reset_random_prob (line 49) | def reset_random_prob(self):
method reset_token_list (line 54) | def reset_token_list(self):
method get_random_prob (line 63) | def get_random_prob(self):
method get_random_token (line 71) | def get_random_token(self):
method get_replace_prob (line 79) | def get_replace_prob(self, X):
method replace_tokens (line 93) | def replace_tokens(self, word_list, replace_prob):
method transform (line 100) | def transform(self,X):
FILE: LAMDA_SSL/Augmentation/Vision/AutoContrast.py
class AutoContrast (line 7) | class AutoContrast(Transformer):
method __init__ (line 8) | def __init__(self):
method transform (line 12) | def transform(self,X):
FILE: LAMDA_SSL/Augmentation/Vision/Brightness.py
class Brightness (line 7) | class Brightness(Transformer):
method __init__ (line 8) | def __init__(self, min_v=0.05,max_v=0.95,num_bins=10,magnitude=5,v=None):
method transform (line 24) | def transform(self,X):
FILE: LAMDA_SSL/Augmentation/Vision/CenterCrop.py
class CenterCrop (line 8) | class CenterCrop(Transformer):
method __init__ (line 9) | def __init__(self):
method transform (line 22) | def transform(self,X):
FILE: LAMDA_SSL/Augmentation/Vision/Color.py
class Color (line 8) | class Color(Transformer):
method __init__ (line 9) | def __init__(self, min_v=0.05,max_v=0.95,num_bins=10,magnitude=5,v=None):
method transform (line 25) | def transform(self,X):
FILE: LAMDA_SSL/Augmentation/Vision/Contrast.py
class Contrast (line 7) | class Contrast(Transformer):
method __init__ (line 8) | def __init__(self, min_v=0.05,max_v=0.95,num_bins=10,magnitude=5,v=None):
method transform (line 24) | def transform(self,X):
FILE: LAMDA_SSL/Augmentation/Vision/Cutout.py
class Cutout (line 8) | class Cutout(Transformer):
method __init__ (line 9) | def __init__(self, v=0.5,fill=(127,127,127),random_v=True):
method transform (line 20) | def transform(self,X):
FILE: LAMDA_SSL/Augmentation/Vision/CutoutAbs.py
class CutoutAbs (line 12) | class CutoutAbs(Transformer):
method __init__ (line 13) | def __init__(self, v=16,fill=(127,127,127),random_v=True):
method transform (line 31) | def transform(self,X):
FILE: LAMDA_SSL/Augmentation/Vision/Equalize.py
class Equalize (line 7) | class Equalize(Transformer):
method __init__ (line 8) | def __init__(self,scale=255):
method transform (line 14) | def transform(self,X):
FILE: LAMDA_SSL/Augmentation/Vision/Identity.py
class Identity (line 3) | class Identity(Transformer):
method __init__ (line 4) | def __init__(self):
method transform (line 7) | def transform(self,X):
FILE: LAMDA_SSL/Augmentation/Vision/Invert.py
class Invert (line 7) | class Invert(Transformer):
method __init__ (line 8) | def __init__(self):
method transform (line 11) | def transform(self,X):
FILE: LAMDA_SSL/Augmentation/Vision/Mixup.py
class Mixup (line 4) | class Mixup(Transformer):
method __init__ (line 5) | def __init__(self, alpha=0.5):
method fit (line 13) | def fit(self,X,y=None,**fit_params):
method transform (line 18) | def transform(self,X):
FILE: LAMDA_SSL/Augmentation/Vision/Posterize.py
class Posterize (line 7) | class Posterize(Transformer):
method __init__ (line 8) | def __init__(self, min_v=4,max_v=8,num_bins=10,magnitude=5,v=None,scal...
method transform (line 27) | def transform(self,X):
FILE: LAMDA_SSL/Augmentation/Vision/RandAugment.py
function AutoContrast (line 6) | def AutoContrast(X, **kwarg):
function Brightness (line 10) | def Brightness(X, min_v, max_v,magnitude,num_bins=10):
function Color (line 15) | def Color(X, min_v, max_v,magnitude,num_bins=10):
function Contrast (line 20) | def Contrast(X, min_v, max_v,magnitude,num_bins=10):
function Equalize (line 24) | def Equalize(X, **kwarg):
function Identity (line 28) | def Identity(X, **kwarg):
function Invert (line 32) | def Invert(X, **kwarg):
function Posterize (line 36) | def Posterize(X, min_v, max_v,magnitude,num_bins=10):
function Rotate (line 41) | def Rotate(X, min_v, max_v,magnitude,num_bins=10):
function Sharpness (line 48) | def Sharpness(X, min_v, max_v,magnitude,num_bins=10):
function ShearX (line 53) | def ShearX(X, min_v, max_v,magnitude,num_bins=10):
function ShearY (line 60) | def ShearY(X, min_v, max_v,magnitude,num_bins=10):
function Solarize (line 67) | def Solarize(X, min_v, max_v,magnitude,num_bins=10):
function TranslateX (line 73) | def TranslateX(X, min_v, max_v,magnitude,num_bins=10):
function TranslateY (line 81) | def TranslateY(X, min_v, max_v,magnitude,num_bins=10):
class RandAugment (line 88) | class RandAugment(Transformer):
method __init__ (line 89) | def __init__(self, n=2, m=5, num_bins=10, random=True,augment_list=None):
method transform (line 116) | def transform(self, X):
FILE: LAMDA_SSL/Augmentation/Vision/RandomCrop.py
class RandomCrop (line 8) | class RandomCrop(Transformer):
method __init__ (line 9) | def __init__(self, padding=None, pad_if_needed=False, fill=0, padding_...
method transform (line 22) | def transform(self,X):
FILE: LAMDA_SSL/Augmentation/Vision/RandomHorizontalFlip.py
class RandomHorizontalFlip (line 7) | class RandomHorizontalFlip(Transformer):
method __init__ (line 8) | def __init__(self):
method transform (line 12) | def transform(self,X=None):
FILE: LAMDA_SSL/Augmentation/Vision/Rotate.py
class Rotate (line 8) | class Rotate(Transformer):
method __init__ (line 9) | def __init__(self, min_v=0,max_v=30,num_bins=10,magnitude=5,v=None):
method transform (line 27) | def transform(self,X,rand=False):
FILE: LAMDA_SSL/Augmentation/Vision/Sharpness.py
class Sharpness (line 8) | class Sharpness(Transformer):
method __init__ (line 9) | def __init__(self, min_v=0.05,max_v=0.95,num_bins=10,magnitude=5,v=None):
method transform (line 25) | def transform(self,X):
FILE: LAMDA_SSL/Augmentation/Vision/ShearX.py
class ShearX (line 9) | class ShearX(Transformer):
method __init__ (line 10) | def __init__(self, min_v=0,max_v=0.3,num_bins=10,magnitude=5,v=None):
method transform (line 25) | def transform(self,X):
FILE: LAMDA_SSL/Augmentation/Vision/ShearY.py
class ShearY (line 9) | class ShearY(Transformer):
method __init__ (line 10) | def __init__(self, min_v=0,max_v=0.3,num_bins=10,magnitude=5,v=None):
method transform (line 25) | def transform(self,X):
FILE: LAMDA_SSL/Augmentation/Vision/Solarize.py
class Solarize (line 7) | class Solarize(Transformer):
method __init__ (line 8) | def __init__(self, min_v=0,max_v=255,num_bins=10,magnitude=5,v=None,sc...
method transform (line 25) | def transform(self,X):
FILE: LAMDA_SSL/Augmentation/Vision/TranslateX.py
class TranslateX (line 8) | class TranslateX(Transformer):
method __init__ (line 9) | def __init__(self, min_v=0,max_v=0.3,num_bins=10,magnitude=5,v=None):
method transform (line 24) | def transform(self,X):
FILE: LAMDA_SSL/Augmentation/Vision/TranslateY.py
class TranslateY (line 8) | class TranslateY(Transformer):
method __init__ (line 9) | def __init__(self, min_v=0,max_v=0.3,num_bins=10,magnitude=5,v=None):
method transform (line 24) | def transform(self,X):
FILE: LAMDA_SSL/Base/BaseOptimizer.py
class BaseOptimizer (line 2) | class BaseOptimizer:
method __init__ (line 3) | def __init__(self,defaults):
method init_optimizer (line 6) | def init_optimizer(self,params):
FILE: LAMDA_SSL/Base/BaseSampler.py
class BaseSampler (line 2) | class BaseSampler:
method __init__ (line 3) | def __init__(self):
method init_sampler (line 5) | def init_sampler(self,data_source):
FILE: LAMDA_SSL/Base/BaseScheduler.py
class BaseScheduler (line 2) | class BaseScheduler:
method __init__ (line 3) | def __init__(self, last_epoch=-1, verbose=False):
method init_scheduler (line 10) | def init_scheduler(self,optimizer):
FILE: LAMDA_SSL/Base/ClassifierEvaluation.py
class ClassifierEvaluation (line 3) | class ClassifierEvaluation(ABC):
method __init__ (line 4) | def __init__(self):
method scoring (line 7) | def scoring(self,y_true,y_pred=None,y_score=None):
FILE: LAMDA_SSL/Base/ClusterEvaluation.py
class ClusterEvaluation (line 2) | class ClusterEvaluation(ABC):
method __init__ (line 3) | def __init__(self):
method scoring (line 6) | def scoring(self,y_true=None,clusters=None,X=None):
FILE: LAMDA_SSL/Base/DeepModelMixin.py
class DeepModelMixin (line 16) | class DeepModelMixin(SemiEstimator):
method __init__ (line 17) | def __init__(self, train_dataset=None,
method init_model (line 149) | def init_model(self):
method init_ema (line 160) | def init_ema(self):
method init_optimizer (line 167) | def init_optimizer(self):
method init_scheduler (line 179) | def init_scheduler(self):
method init_epoch (line 184) | def init_epoch(self):
method init_augmentation (line 192) | def init_augmentation(self):
method init_transform (line 210) | def init_transform(self):
method init_train_dataset (line 215) | def init_train_dataset(self,X=None,y=None,unlabeled_X=None, *args, **k...
method init_train_dataloader (line 224) | def init_train_dataloader(self):
method start_fit (line 249) | def start_fit(self, *args, **kwargs):
method start_fit_epoch (line 254) | def start_fit_epoch(self, *args, **kwargs):
method start_fit_batch (line 257) | def start_fit_batch(self, *args, **kwargs):
method train (line 260) | def train(self,lb_X=None,lb_y=None,ulb_X=None,lb_idx=None,ulb_idx=None...
method get_loss (line 263) | def get_loss(self,train_result,*args,**kwargs):
method optimize (line 266) | def optimize(self,loss,*args,**kwargs):
method end_fit_batch (line 275) | def end_fit_batch(self, train_result,*args, **kwargs):
method fit_batch_loop (line 279) | def fit_batch_loop(self,valid_X=None,valid_y=None):
method end_fit_epoch (line 300) | def end_fit_epoch(self, *args, **kwargs):
method fit_epoch_loop (line 303) | def fit_epoch_loop(self,valid_X=None,valid_y=None):
method end_fit (line 321) | def end_fit(self, *args, **kwargs):
method fit (line 324) | def fit(self,X=None,y=None,unlabeled_X=None,valid_X=None,valid_y=None):
method init_estimate_dataset (line 339) | def init_estimate_dataset(self, X=None,valid=False):
method init_estimate_dataloader (line 353) | def init_estimate_dataloader(self,valid=False):
method start_predict (line 369) | def start_predict(self, *args, **kwargs):
method start_predict_batch (line 375) | def start_predict_batch(self, *args, **kwargs):
method estimate (line 379) | def estimate(self, X, idx=None, *args, **kwargs):
method end_predict_batch (line 383) | def end_predict_batch(self, *args, **kwargs):
method predict_batch_loop (line 386) | def predict_batch_loop(self):
method get_predict_result (line 399) | def get_predict_result(self, y_est, *args, **kwargs):
method end_predict (line 411) | def end_predict(self, *args, **kwargs):
method predict (line 418) | def predict(self,X=None,valid=False):
method predict_proba (line 427) | def predict_proba(self,X=None,valid=False):
method evaluate (line 436) | def evaluate(self,X,y=None,valid=False):
FILE: LAMDA_SSL/Base/GraphMixin.py
class GraphMixin (line 4) | class GraphMixin:
method __init__ (line 5) | def __init__(self):
method init_default_transforms (line 8) | def init_default_transforms(self):
FILE: LAMDA_SSL/Base/InductiveEstimator.py
class InductiveEstimator (line 4) | class InductiveEstimator(SemiEstimator):
method predict (line 7) | def predict(self,X):
FILE: LAMDA_SSL/Base/LambdaLR.py
class LambdaLR (line 3) | class LambdaLR(BaseScheduler):
method __init__ (line 4) | def __init__(self, lr_lambda, last_epoch=-1,verbose=False):
method init_scheduler (line 13) | def init_scheduler(self,optimizer):
FILE: LAMDA_SSL/Base/RegressorEvaluation.py
class RegressorEvaluation (line 3) | class RegressorEvaluation(ABC):
method __init__ (line 4) | def __init__(self):
method scoring (line 7) | def scoring(self,y_true,y_pred=None):
FILE: LAMDA_SSL/Base/SemiEstimator.py
class SemiEstimator (line 3) | class SemiEstimator(ABC,BaseEstimator):
method fit (line 5) | def fit(self,X,y,unlabeled_X):
FILE: LAMDA_SSL/Base/TabularMixin.py
class TabularMixin (line 5) | class TabularMixin:
method __init__ (line 6) | def __init__(self):
method init_default_transforms (line 9) | def init_default_transforms(self):
FILE: LAMDA_SSL/Base/TextMixin.py
class TextMixin (line 7) | class TextMixin:
method __init__ (line 8) | def __init__(self,word_vocab=None,vectors=None,length=300,unk_token='<...
method init_default_transforms (line 30) | def init_default_transforms(self):
FILE: LAMDA_SSL/Base/TransductiveEstimator.py
class TransductiveEstimator (line 4) | class TransductiveEstimator(SemiEstimator):
method predict (line 7) | def predict(self,X=None,Transductive=True):
FILE: LAMDA_SSL/Base/Transformer.py
class Transformer (line 4) | class Transformer(BaseEstimator,TransformerMixin,ABC):
method __init__ (line 5) | def __init__(self):
method fit (line 8) | def fit(self,X,y=None,**fit_params):
method __call__ (line 14) | def __call__(self, X,y=None,**fit_params):
method transform (line 21) | def transform(self,X):
method fit_transform (line 26) | def fit_transform(self,X,y=None,**fit_params):
FILE: LAMDA_SSL/Base/VisionMixin.py
class VisionMixin (line 7) | class VisionMixin:
method __init__ (line 8) | def __init__(self,mean=None,std=None):
method init_default_transforms (line 14) | def init_default_transforms(self):
method show_image (line 34) | def show_image(self,img):
FILE: LAMDA_SSL/Dataloader/LabeledDataloader.py
class LabeledDataLoader (line 4) | class LabeledDataLoader:
method __init__ (line 5) | def __init__(self,
method init_dataloader (line 45) | def init_dataloader(self,dataset=None,sampler=None,batch_sampler=None):
FILE: LAMDA_SSL/Dataloader/TrainDataloader.py
class TrainDataLoader (line 7) | class TrainDataLoader:
method __init__ (line 8) | def __init__(self,
method init_dataloader (line 202) | def init_dataloader(self,dataset=None,labeled_dataset=None,unlabeled_d...
FILE: LAMDA_SSL/Dataloader/UnlabeledDataloader.py
class UnlabeledDataLoader (line 4) | class UnlabeledDataLoader:
method __init__ (line 5) | def __init__(self,batch_size= 1,
method init_dataloader (line 46) | def init_dataloader(self,dataset=None,sampler=None,batch_sampler=None):
FILE: LAMDA_SSL/Dataset/Graph/Cora.py
class Cora (line 11) | class Cora(SemiDataset,GraphMixin):
method __init__ (line 16) | def __init__(
method _init_dataset (line 79) | def _init_dataset(self):
FILE: LAMDA_SSL/Dataset/LabeledDataset.py
class LabeledDataset (line 6) | class LabeledDataset(Dataset):
method __init__ (line 7) | def __init__(self,
method init_dataset (line 29) | def init_dataset(self, X=None, y=None):
method _transforms (line 38) | def _transforms(self,X,y,transforms):
method to_list (line 55) | def to_list(self,l):
method insert (line 62) | def insert(self,l,pos,item):
method add_transform (line 70) | def add_transform(self,transform,dim=1,x=0,y=0):
method add_pre_transform (line 84) | def add_pre_transform(self,transform,dim=1,x=0,y=0):
method add_transforms (line 98) | def add_transforms(self,transforms,dim=1,x=0,y=0):
method add_target_transform (line 112) | def add_target_transform(self,target_transform,dim=1,x=0,y=0):
method _transform (line 126) | def _transform(self,X,transform):
method apply_transform (line 143) | def apply_transform(self,X,y):
method __getitem__ (line 213) | def __getitem__(self, i):
method __len__ (line 220) | def __len__(self):
FILE: LAMDA_SSL/Dataset/SemiDataset.py
class SemiDataset (line 9) | class SemiDataset(Dataset):
method __init__ (line 10) | def __init__(self,
method _init_dataset (line 90) | def _init_dataset(self):
method init_dataset (line 95) | def init_dataset(self,labeled_X=None,labeled_y=None,unlabeled_X=None,
method add_transform (line 192) | def add_transform(self,transform,dim,x,y=0):
method add_target_transform (line 195) | def add_target_transform(self,target_transform,dim,x,y=0):
method add_transforms (line 198) | def add_transforms(self,transforms,dim,x,y=0):
method add_unlabeled_transform (line 201) | def add_unlabeled_transform(self,unlabeled_transform,dim,x,y=0):
method add_valid_transform (line 204) | def add_valid_transform(self,valid_transform,dim,x,y=0):
method add_test_transform (line 207) | def add_test_transform(self,test_transform,dim,x,y=0):
method add_pre_transform (line 210) | def add_pre_transform(self,transform,dim,x,y=0):
method __getitem__ (line 215) | def __getitem__(self, i, test=False,valid=False,labeled=True):
method __len__ (line 226) | def __len__(self,test=False,valid=False,labeled=True):
FILE: LAMDA_SSL/Dataset/Tabular/Boston.py
class Boston (line 10) | class Boston(SemiDataset,TabularMixin):
method __init__ (line 11) | def __init__(
method _init_dataset (line 72) | def _init_dataset(self):
FILE: LAMDA_SSL/Dataset/Tabular/BreastCancer.py
class BreastCancer (line 10) | class BreastCancer(SemiDataset,TabularMixin):
method __init__ (line 11) | def __init__(
method _init_dataset (line 71) | def _init_dataset(self):
FILE: LAMDA_SSL/Dataset/Tabular/Wine.py
class Wine (line 10) | class Wine(SemiDataset,TabularMixin):
method __init__ (line 11) | def __init__(
method _init_dataset (line 71) | def _init_dataset(self):
FILE: LAMDA_SSL/Dataset/Text/IMDB.py
class IMDB (line 11) | class IMDB(SemiDataset,TextMixin):
method __init__ (line 21) | def __init__(self,root,
method download (line 64) | def download(self):
method _init_dataset (line 70) | def _init_dataset(self):
FILE: LAMDA_SSL/Dataset/Text/SST2.py
class SST2 (line 10) | class SST2(SemiDataset,TextMixin):
method __init__ (line 23) | def __init__(self,root,
method download (line 61) | def download(self):
method _init_dataset (line 67) | def _init_dataset(self):
FILE: LAMDA_SSL/Dataset/TrainDataset.py
class TrainDataset (line 6) | class TrainDataset(Dataset):
method __init__ (line 7) | def __init__(self,
method _init_dataset (line 55) | def _init_dataset(self):
method init_dataset (line 60) | def init_dataset(self,labeled_X=None,labeled_y=None,unlabeled_X=None,
method add_transform (line 99) | def add_transform(self,transform,dim,x,y):
method add_target_transform (line 102) | def add_target_transform(self,target_transform,dim,x,y=0):
method add_transforms (line 105) | def add_transforms(self,transforms,dim,x,y=0):
method add_pre_transform (line 108) | def add_pre_transform(self,transform,dim,x,y=0):
method add_unlabeled_transform (line 112) | def add_unlabeled_transform(self,unlabeled_transform,dim,x,y=0):
method get_dataset (line 115) | def get_dataset(self,labeled):
method __getitem__ (line 121) | def __getitem__(self, i, labeled=True):
method __len__ (line 128) | def __len__(self,labeled=True):
FILE: LAMDA_SSL/Dataset/UnlabeledDataset.py
class UnlabeledDataset (line 7) | class UnlabeledDataset(Dataset):
method __init__ (line 8) | def __init__(self,
method init_dataset (line 24) | def init_dataset(self, X=None, y=None):
method to_list (line 33) | def to_list(self,l):
method insert (line 40) | def insert(self,l,pos,item):
method add_transform (line 50) | def add_transform(self,transform,dim=1,x=0,y=0):
method add_pre_transform (line 64) | def add_pre_transform(self,transform,dim=1,x=0,y=0):
method _transform (line 78) | def _transform(self,X,transform):
method apply_transform (line 95) | def apply_transform(self,X,y=None):
method __getitem__ (line 135) | def __getitem__(self, i):
method __len__ (line 146) | def __len__(self):
FILE: LAMDA_SSL/Dataset/Vision/CIFAR10.py
class CIFAR10 (line 12) | class CIFAR10(SemiDataset,VisionMixin):
method __init__ (line 36) | def __init__(
method _load_meta (line 110) | def _load_meta(self) -> None:
method _check_integrity (line 119) | def _check_integrity(self) -> bool:
method download (line 128) | def download(self) -> None:
method _init_dataset (line 134) | def _init_dataset(self):
FILE: LAMDA_SSL/Dataset/Vision/ImageCLEF.py
function make_dataset_with_labels (line 4) | def make_dataset_with_labels(dir, classnames):
class ImageCLEF (line 24) | class ImageCLEF(Dataset):
method __init__ (line 25) | def __init__(self, root, domain='webcam', transform=None,classnames=cl...
method __getitem__ (line 44) | def __getitem__(self, index):
method make_dataset_classwise (line 51) | def make_dataset_classwise(self, category):
method __len__ (line 60) | def __len__(self):
FILE: LAMDA_SSL/Dataset/Vision/Mnist.py
class Mnist (line 11) | class Mnist(SemiDataset,VisionMixin):
method __init__ (line 12) | def __init__(
method init_default_transforms (line 75) | def init_default_transforms(self):
method _init_dataset (line 85) | def _init_dataset(self):
FILE: LAMDA_SSL/Dataset/Vision/Office31.py
function make_dataset_with_labels (line 4) | def make_dataset_with_labels(dir, classnames):
class Office31 (line 28) | class Office31(Dataset):
method __init__ (line 29) | def __init__(self, root, domain='webcam', transform=None,classnames=cl...
method __getitem__ (line 48) | def __getitem__(self, index):
method make_dataset_classwise (line 55) | def make_dataset_classwise(self, category):
method __len__ (line 64) | def __len__(self):
FILE: LAMDA_SSL/Dataset/Vision/VisDA.py
function make_dataset_with_labels (line 4) | def make_dataset_with_labels(dir, classnames):
class VisDA (line 33) | class VisDA(Dataset):
method __init__ (line 34) | def __init__(self, root, domain='train', transform=None,classnames=cla...
method __getitem__ (line 56) | def __getitem__(self, index):
method __len__ (line 74) | def __len__(self):
FILE: LAMDA_SSL/Distributed/DataParallel.py
class DataParallel (line 2) | class DataParallel:
method __init__ (line 3) | def __init__(self, device_ids=None, output_device=None, dim=0):
method init_parallel (line 11) | def init_parallel(self,module):
FILE: LAMDA_SSL/Distributed/DistributedDataParallel.py
class DistributedDataParallel (line 2) | class DistributedDataParallel:
method __init__ (line 3) | def __init__(
method init_parallel (line 31) | def init_parallel(self,module):
FILE: LAMDA_SSL/Evaluation/Classifier/AUC.py
class AUC (line 6) | class AUC(ClassifierEvaluation):
method __init__ (line 7) | def __init__(self,
method scoring (line 30) | def scoring(self,y_true,y_pred=None,y_score=None):
FILE: LAMDA_SSL/Evaluation/Classifier/Accuracy.py
class Accuracy (line 5) | class Accuracy(ClassifierEvaluation):
method __init__ (line 6) | def __init__(self,normalize=True, sample_weight=None):
method scoring (line 14) | def scoring(self,y_true,y_pred=None,y_score=None):
FILE: LAMDA_SSL/Evaluation/Classifier/Confusion_Matrix.py
class Confusion_Matrix (line 5) | class Confusion_Matrix(ClassifierEvaluation):
method __init__ (line 6) | def __init__(self,labels=None, sample_weight=None, normalize=None):
method scoring (line 21) | def scoring(self,y_true,y_pred=None,y_score=None):
FILE: LAMDA_SSL/Evaluation/Classifier/F1.py
class F1 (line 5) | class F1(ClassifierEvaluation):
method __init__ (line 6) | def __init__(self,
method scoring (line 27) | def scoring(self,y_true,y_pred=None,y_score=None):
FILE: LAMDA_SSL/Evaluation/Classifier/Precision.py
class Precision (line 5) | class Precision(ClassifierEvaluation):
method __init__ (line 6) | def __init__(self,labels=None,
method scoring (line 26) | def scoring(self,y_true,y_pred=None,y_score=None):
FILE: LAMDA_SSL/Evaluation/Classifier/Recall.py
class Recall (line 5) | class Recall(ClassifierEvaluation):
method __init__ (line 6) | def __init__(self,
method scoring (line 27) | def scoring(self,y_true,y_pred=None,y_score=None):
FILE: LAMDA_SSL/Evaluation/Classifier/Top_k_Accuracy.py
class Top_k_Accurary (line 6) | class Top_k_Accurary(ClassifierEvaluation):
method __init__ (line 7) | def __init__(self,k=2, normalize=True, sample_weight=None, labels=None):
method scoring (line 20) | def scoring(self,y_true,y_pred=None,y_score=None):
FILE: LAMDA_SSL/Evaluation/Cluster/Davies_Bouldin_Score.py
class Davies_Bouldin_Score (line 4) | class Davies_Bouldin_Score(ClusterEvaluation):
method __init__ (line 5) | def __init__(self):
method scoring (line 9) | def scoring(self,y_true=None,clusters=None,X=None):
FILE: LAMDA_SSL/Evaluation/Cluster/Fowlkes_Mallows_Score.py
class Fowlkes_Mallows_Score (line 5) | class Fowlkes_Mallows_Score(ClusterEvaluation):
method __init__ (line 6) | def __init__(self,sparse=False):
method scoring (line 12) | def scoring(self,y_true=None,clusters=None,X=None):
FILE: LAMDA_SSL/Evaluation/Cluster/Jaccard_Score.py
class Jaccard_Score (line 5) | class Jaccard_Score(ClusterEvaluation):
method __init__ (line 6) | def __init__(self, labels=None, pos_label=1,
method scoring (line 44) | def scoring(self,y_true=None,clusters=None,X=None):
FILE: LAMDA_SSL/Evaluation/Cluster/Rand_Score.py
class Rand_Score (line 4) | class Rand_Score(ClusterEvaluation):
method __init__ (line 5) | def __init__(self):
method scoring (line 8) | def scoring(self,y_true=None,clusters=None,X=None):
FILE: LAMDA_SSL/Evaluation/Cluster/Silhouette_Score.py
class Silhouette_Score (line 5) | class Silhouette_Score(ClusterEvaluation):
method __init__ (line 6) | def __init__(self, metric="euclidean", sample_size=None, random_state=...
method scoring (line 13) | def scoring(self,y_true=None,clusters=None,X=None):
FILE: LAMDA_SSL/Evaluation/Regressor/Mean_Absolute_Error.py
class Mean_Absolute_Error (line 5) | class Mean_Absolute_Error(RegressorEvaluation):
method __init__ (line 6) | def __init__(self,sample_weight=None, multioutput="uniform_average"):
method scoring (line 14) | def scoring(self,y_true,y_pred=None):
FILE: LAMDA_SSL/Evaluation/Regressor/Mean_Squared_Error.py
class Mean_Squared_Error (line 5) | class Mean_Squared_Error(RegressorEvaluation):
method __init__ (line 6) | def __init__(self,sample_weight=None, multioutput="uniform_average",sq...
method scoring (line 17) | def scoring(self,y_true,y_pred=None):
FILE: LAMDA_SSL/Evaluation/Regressor/Mean_Squared_Log_Error.py
class Mean_Squared_Log_Error (line 5) | class Mean_Squared_Log_Error(RegressorEvaluation):
method __init__ (line 6) | def __init__(self,sample_weight=None, multioutput="uniform_average",sq...
method scoring (line 16) | def scoring(self,y_true,y_pred=None):
FILE: LAMDA_SSL/Evaluation/Regressor/Median_Absolute_Error.py
class Median_Absolute_Error (line 5) | class Median_Absolute_Error(RegressorEvaluation):
method __init__ (line 6) | def __init__(self,sample_weight=None, multioutput="uniform_average"):
method scoring (line 14) | def scoring(self,y_true,y_pred=None):
FILE: LAMDA_SSL/Loss/Consistency.py
class Consistency (line 5) | class Consistency(nn.Module):
method __init__ (line 6) | def __init__(self,reduction='mean'):
method forward (line 12) | def forward(self,logits_1,logits_2):
FILE: LAMDA_SSL/Loss/Cross_Entropy.py
class Cross_Entropy (line 4) | class Cross_Entropy(nn.Module):
method __init__ (line 5) | def __init__(self, use_hard_labels=True, reduction='mean'):
method forward (line 13) | def forward(self,logits, targets):
FILE: LAMDA_SSL/Loss/EntMin.py
class EntMin (line 4) | class EntMin(nn.Module):
method __init__ (line 5) | def __init__(self, reduction='mean', activation=None):
method forward (line 12) | def forward(self,logits):
FILE: LAMDA_SSL/Loss/KL_Divergence.py
class KL_Divergence (line 4) | class KL_Divergence(nn.Module):
method __init__ (line 5) | def __init__(self,softmax_1=True,softmax_2=True,reduction='mean'):
method forward (line 14) | def forward(self,logits_1,logits_2): # KL(p||q)
FILE: LAMDA_SSL/Loss/MSE.py
class MSE (line 4) | class MSE(nn.Module):
method __init__ (line 5) | def __init__(self,reduction='mean',activation_1=None,activation_2=None):
method forward (line 14) | def forward(self,logits_1,logits_2):
FILE: LAMDA_SSL/Loss/Semi_Supervised_Loss.py
class Semi_Supervised_Loss (line 2) | class Semi_Supervised_Loss(nn.Module):
method __init__ (line 3) | def __init__(self,lambda_u=1.0):
method forward (line 7) | def forward(self,sup_loss,unsup_loss):
FILE: LAMDA_SSL/Network/AdversarialNet.py
class GradientReverseLayer (line 4) | class GradientReverseLayer(torch.autograd.Function):
method forward (line 18) | def forward(ctx, coeff, input):
method backward (line 24) | def backward(ctx, grad_outputs):
class GradientReverseModule (line 29) | class GradientReverseModule(nn.Module):
method __init__ (line 32) | def __init__(self, scheduler):
method forward (line 39) | def forward(self, x):
function aToBSheduler (line 45) | def aToBSheduler(step, A, B, gamma=10, max_iter=10000):
class AdversarialNet (line 49) | class AdversarialNet(nn.Module):
method __init__ (line 50) | def __init__(self, in_feature):
method forward (line 64) | def forward(self, x):
FILE: LAMDA_SSL/Network/FT_Transformer.py
class Tokenizer (line 10) | class Tokenizer(nn.Module):
method __init__ (line 13) | def __init__(
method n_tokens (line 43) | def n_tokens(self) -> int:
method forward (line 48) | def forward(self, x_num: Tensor, x_cat: ty.Optional[Tensor]) -> Tensor:
class MultiheadAttention (line 73) | class MultiheadAttention(nn.Module):
method __init__ (line 74) | def __init__(
method _reshape (line 97) | def _reshape(self, x: Tensor) -> Tensor:
method forward (line 106) | def forward(
class FT_Transformer (line 144) | class FT_Transformer(nn.Module):
method __init__ (line 145) | def __init__(
method _get_kv_compressions (line 255) | def _get_kv_compressions(self, layer):
method _start_residual (line 266) | def _start_residual(self, x, layer, norm_idx):
method _end_residual (line 274) | def _end_residual(self, x, x_residual, layer, norm_idx):
method forward (line 282) | def forward(self, x) -> Tensor:
FILE: LAMDA_SSL/Network/GAT.py
class GAT (line 5) | class GAT(torch.nn.Module):
method __init__ (line 6) | def __init__(self, dim_in, num_classes, dim_hidden=16, heads=8, dropo...
method forward (line 19) | def forward(self, data):
FILE: LAMDA_SSL/Network/GCN.py
class GCN (line 4) | class GCN(torch.nn.Module):
method __init__ (line 5) | def __init__(self,dim_in,num_classes,dim_hidden=16,normalize=False):
method forward (line 16) | def forward(self,data):
FILE: LAMDA_SSL/Network/ImprovedGAN.py
class LinearWeightNorm (line 9) | class LinearWeightNorm(torch.nn.Module):
method __init__ (line 10) | def __init__(self, in_features, out_features, bias=True, weight_scale=...
method forward (line 24) | def forward(self, x):
class Discriminator (line 28) | class Discriminator(nn.Module):
method __init__ (line 29) | def __init__(self, dim_in = 28 ** 2,hidden_dim=[1000,500,250,250,250],
method forward (line 49) | def forward(self, x):
class Generator (line 69) | class Generator(nn.Module):
method __init__ (line 70) | def __init__(self, dim_in = 28 ** 2,hidden_dim=[500,500],activations=[...
method forward (line 94) | def forward(self, batch_size=10,z=None):
class ImprovedGAN (line 104) | class ImprovedGAN(nn.Module):
method __init__ (line 105) | def __init__(self, G=None, D=None,dim_in = 28 ** 2,
method forward (line 137) | def forward(self, x):
FILE: LAMDA_SSL/Network/LadderNetwork.py
class Encoder (line 9) | class Encoder(torch.nn.Module):
method __init__ (line 10) | def __init__(self, dim_in, dim_out, activation,
method bn_gamma_beta (line 45) | def bn_gamma_beta(self, x):
method forward_clean (line 53) | def forward_clean(self, h):
method forward_noise (line 66) | def forward_noise(self, tilde_h):
class StackedEncoders (line 86) | class StackedEncoders(torch.nn.Module):
method __init__ (line 87) | def __init__(self, dim_in, num_classes,dim_encoders, activation_types,
method forward_clean (line 115) | def forward_clean(self, x):
method forward_noise (line 122) | def forward_noise(self, x):
method get_encoders_tilde_z (line 134) | def get_encoders_tilde_z(self, reverse=True):
method get_encoders_z_pre (line 144) | def get_encoders_z_pre(self, reverse=True):
method get_encoders_z (line 154) | def get_encoders_z(self, reverse=True):
class Decoder (line 164) | class Decoder(torch.nn.Module):
method __init__ (line 165) | def __init__(self, dim_in, dim_out,device='cpu'):
method g (line 195) | def g(self, tilde_z_l, u_l):
method forward (line 223) | def forward(self, tilde_z_l, u_l):
class StackedDecoders (line 237) | class StackedDecoders(torch.nn.Module):
method __init__ (line 238) | def __init__(self, dim_in, num_classes,dim_decoders, device='cpu'):
method forward (line 261) | def forward(self, tilde_z_layers, u_top, tilde_z_bottom):
method bn_hat_z_layers (line 276) | def bn_hat_z_layers(self, hat_z_layers, z_pre_layers):
class LadderNetwork (line 290) | class LadderNetwork(torch.nn.Module):
method __init__ (line 291) | def __init__(self, dim_encoder=[1000, 500, 250, 250, 250],
method forward_encoders_clean (line 317) | def forward_encoders_clean(self, data):
method forward_encoders_noise (line 320) | def forward_encoders_noise(self, data):
method forward_decoders (line 323) | def forward_decoders(self, tilde_z_layers, encoder_output, tilde_z_bot...
method get_encoders_tilde_z (line 326) | def get_encoders_tilde_z(self, reverse=True):
method get_encoders_z_pre (line 329) | def get_encoders_z_pre(self, reverse=True):
method get_encoder_tilde_z_bottom (line 332) | def get_encoder_tilde_z_bottom(self):
method get_encoders_z (line 335) | def get_encoders_z(self, reverse=True):
method decoder_bn_hat_z_layers (line 338) | def decoder_bn_hat_z_layers(self, hat_z_layers, z_pre_layers):
method forward (line 341) | def forward(self, data):
FILE: LAMDA_SSL/Network/MLPCLS.py
class MLPCLS (line 3) | class MLPCLS(torch.nn.Module):
method __init__ (line 5) | def __init__(self, dim_in = 28 ** 2,hidden_dim=[10],
method forward (line 29) | def forward(self, X):
FILE: LAMDA_SSL/Network/MLPReg.py
class MLPReg (line 3) | class MLPReg(torch.nn.Module):
method __init__ (line 5) | def __init__(self, dim_in = 28 ** 2,hidden_dim=[10],
method forward (line 28) | def forward(self, X):
FILE: LAMDA_SSL/Network/ResNet50.py
function conv3x3 (line 7) | def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: in...
function conv1x1 (line 13) | def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
class BasicBlock (line 18) | class BasicBlock(nn.Module):
method __init__ (line 21) | def __init__(
method forward (line 49) | def forward(self, x: Tensor) -> Tensor:
class Bottleneck (line 68) | class Bottleneck(nn.Module):
method __init__ (line 77) | def __init__(
method forward (line 103) | def forward(self, x: Tensor) -> Tensor:
class ResNet50 (line 126) | class ResNet50(nn.Module):
method __init__ (line 128) | def __init__(
method _make_layer (line 203) | def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], plan...
method _forward_impl (line 228) | def _forward_impl(self, x):
method forward (line 250) | def forward(self, x):
FILE: LAMDA_SSL/Network/ResNet50Fc.py
class ResNet50Fc (line 3) | class ResNet50Fc(nn.Module):
method __init__ (line 8) | def __init__(self, num_classes, output_feature=False):
method forward (line 28) | def forward(self, x):
method output_num (line 47) | def output_num(self):
FILE: LAMDA_SSL/Network/SDNE.py
class SDNE (line 3) | class SDNE(torch.nn.Module):
method __init__ (line 4) | def __init__(self, dim_in, hidden_layers, device="cpu"):
method forward (line 30) | def forward(self,X=None):
FILE: LAMDA_SSL/Network/SSVAE.py
class SSVAE (line 6) | class SSVAE(nn.Module):
method __init__ (line 16) | def __init__(self, dim_in,num_classes,dim_z,dim_hidden_de=[500,500],
method encode_z (line 104) | def encode_z(self, x, y):
method encode_y (line 110) | def encode_y(self, x):
method decode (line 114) | def decode(self, y, z):
method forward (line 120) | def forward(self, x):
FILE: LAMDA_SSL/Network/TextRCNN.py
class TextRCNN (line 5) | class TextRCNN(nn.Module):
method __init__ (line 7) | def __init__(self, n_vocab,embedding_dim=300,len_seq=300, padding_idx=...
method forward (line 32) | def forward(self, x):
FILE: LAMDA_SSL/Network/WideResNet.py
function mish (line 8) | def mish(x):
class PSBatchNorm2d (line 13) | class PSBatchNorm2d(nn.BatchNorm2d):
method __init__ (line 16) | def __init__(self, num_features, alpha=0.1, eps=1e-05, momentum=0.001,...
method forward (line 20) | def forward(self, x):
class BasicBlock (line 24) | class BasicBlock(nn.Module):
method __init__ (line 25) | def __init__(self, in_planes, out_planes, stride, drop_rate=0.0, activ...
method forward (line 41) | def forward(self, x):
class NetworkBlock (line 53) | class NetworkBlock(nn.Module):
method __init__ (line 54) | def __init__(self, nb_layers, in_planes, out_planes, block, stride, dr...
method _make_layer (line 59) | def _make_layer(self, block, in_planes, out_planes, nb_layers, stride,...
method forward (line 66) | def forward(self, x):
class WideResNet (line 70) | class WideResNet(nn.Module):
method __init__ (line 71) | def __init__(self, num_classes=10, depth=28, widen_factor=2, drop_rat...
method forward (line 118) | def forward(self, x):
FILE: LAMDA_SSL/Opitimizer/Adam.py
class Adam (line 3) | class Adam(BaseOptimizer):
method __init__ (line 4) | def __init__(self,lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
method init_optimizer (line 21) | def init_optimizer(self,params):
FILE: LAMDA_SSL/Opitimizer/SGD.py
class SGD (line 4) | class SGD(BaseOptimizer):
method __init__ (line 5) | def __init__(self, lr=0.01, momentum=0, dampening=0, weight_decay=0, n...
method init_optimizer (line 22) | def init_optimizer(self,params):
FILE: LAMDA_SSL/Sampler/BatchSampler.py
class BatchSampler (line 3) | class BatchSampler(BaseSampler):
method __init__ (line 4) | def __init__(self, batch_size: int, drop_last: bool):
method init_sampler (line 12) | def init_sampler(self,sampler):
FILE: LAMDA_SSL/Sampler/DistributedSampler.py
class DistributedSampler (line 3) | class DistributedSampler(BaseSampler):
method __init__ (line 4) | def __init__(self,num_replicas=None, rank=None, shuffle=True, seed=0, ...
method init_sampler (line 18) | def init_sampler(self,data_source):
FILE: LAMDA_SSL/Sampler/RandomSampler.py
class RandomSampler (line 3) | class RandomSampler(BaseSampler):
method __init__ (line 4) | def __init__(self,replacement: bool = False,
method init_sampler (line 15) | def init_sampler(self,data_source):
FILE: LAMDA_SSL/Sampler/SequentialSampler.py
class SequentialSampler (line 3) | class SequentialSampler(BaseSampler):
method __init__ (line 4) | def __init__(self):
method init_sampler (line 6) | def init_sampler(self,data_source):
FILE: LAMDA_SSL/Scheduler/CosineAnnealingLR.py
class CosineAnnealingLR (line 3) | class CosineAnnealingLR(BaseScheduler):
method __init__ (line 4) | def __init__(self, T_max, eta_min=0, last_epoch=-1, verbose=False):
method init_scheduler (line 13) | def init_scheduler(self,optimizer):
FILE: LAMDA_SSL/Scheduler/CosineWarmup.py
class CosineWarmup (line 3) | class CosineWarmup(LambdaLR):
method __init__ (line 4) | def __init__(self,
method _lr_lambda (line 22) | def _lr_lambda(self,current_step):
FILE: LAMDA_SSL/Scheduler/InverseDecaySheduler.py
class InverseDecaySheduler (line 3) | class InverseDecaySheduler(LambdaLR):
method __init__ (line 4) | def __init__(self, initial_lr, gamma=10, power=0.75, max_iter=1000):
method _lr_lambda (line 10) | def _lr_lambda(self, current_step):
FILE: LAMDA_SSL/Scheduler/LinearWarmup.py
class LinearWarmup (line 3) | class LinearWarmup(LambdaLR):
method __init__ (line 4) | def __init__(self,
method _lr_lambda (line 25) | def _lr_lambda(self,current_step):
FILE: LAMDA_SSL/Scheduler/StepLR.py
class StepLR (line 3) | class StepLR(BaseScheduler):
method __init__ (line 4) | def __init__(self, step_size, gamma=0.1, last_epoch=-1, verbose=False):
method init_scheduler (line 16) | def init_scheduler(self,optimizer):
FILE: LAMDA_SSL/Search/BayesSearchCV.py
function PI (line 23) | def PI(x,gp,y_max=1,xi=0.01,kappa=None):
function EI (line 28) | def EI(x,gp,y_max=1,xi=0.01,kappa=None):
function UCB (line 34) | def UCB(x,gp,y_max=None,xi=None,kappa=0.1):
class BayesSearchCV (line 38) | class BayesSearchCV(BaseSearchCV):
method __init__ (line 39) | def __init__(
method _run_search (line 97) | def _run_search(self, evaluate_candidates):
method fit (line 105) | def fit(self, X, y=None, *, groups=None, **fit_params):
FILE: LAMDA_SSL/Search/EvolutionaryStrategySearchCV.py
class Evolve (line 22) | class Evolve:
method __init__ (line 24) | def __init__(self, param_distributions, *, random_state=None,lam=5,anc...
method _is_all_lists (line 55) | def _is_all_lists(self):
method __iter__ (line 61) | def __iter__(self):
method __len__ (line 78) | def __len__(self):
class EvolutionaryStrategySearchCV (line 86) | class EvolutionaryStrategySearchCV(BaseSearchCV):
method __init__ (line 87) | def __init__(
method fit (line 133) | def fit(self, X, y=None, *, groups=None, **fit_params):
method _run_search (line 326) | def _run_search(self, evaluate_candidates):
FILE: LAMDA_SSL/Search/MetaLearnerSearchCV.py
class MetaLearnerSearchCV (line 21) | class MetaLearnerSearchCV(BaseSearchCV):
method __init__ (line 22) | def __init__(
method _run_search (line 73) | def _run_search(self, evaluate_candidates):
method fit (line 81) | def fit(self, X, y=None, *, groups=None, **fit_params):
FILE: LAMDA_SSL/Split/DataSplit.py
function get_split_num (line 8) | def get_split_num(X,size_split=0.1):
function get_split_index (line 31) | def get_split_index(y,num_1,num_2,stratified,shuffle,random_state=None):
function DataSplit (line 95) | def DataSplit(stratified=True,shuffle=True,random_state=None, X=None, y=...
FILE: LAMDA_SSL/Split/ViewSplit.py
function ViewSplit (line 5) | def ViewSplit(X,num_splits=2,axis=1,shuffle=True):
FILE: LAMDA_SSL/Transform/Graph/GCNNorm.py
class GCNNorm (line 3) | class GCNNorm(Transformer):
method __init__ (line 4) | def __init__(self,add_self_loops=True):
method transform (line 10) | def transform(self,X):
FILE: LAMDA_SSL/Transform/Graph/GDC.py
class GDC (line 3) | class GDC(Transformer):
method __init__ (line 4) | def __init__(self,self_loop_weight=1, normalization_in='sym',
method transform (line 21) | def transform(self,X):
FILE: LAMDA_SSL/Transform/Graph/NormalizeFeatures.py
class NormalizeFeatures (line 4) | class NormalizeFeatures(Transformer):
method __init__ (line 5) | def __init__(self,attrs=["x"]):
method transform (line 13) | def transform(self,X):
FILE: LAMDA_SSL/Transform/Graph/SVDFeatureReduction.py
class SVDFeatureReduction (line 3) | class SVDFeatureReduction(Transformer):
method __init__ (line 4) | def __init__(self,out_channels):
method transform (line 10) | def transform(self,X):
FILE: LAMDA_SSL/Transform/Tabular/MaxAbsScaler.py
class MaxAbsScaler (line 2) | class MaxAbsScaler(Transformer):
method __init__ (line 3) | def __init__(self,max_abs=None):
method transform (line 9) | def transform(self,X):
FILE: LAMDA_SSL/Transform/Tabular/MinMaxScaler.py
class MinMaxScaler (line 2) | class MinMaxScaler(Transformer):
method __init__ (line 3) | def __init__(self,min_val=None,max_val=None):
method transform (line 11) | def transform(self,X):
FILE: LAMDA_SSL/Transform/Tabular/StandarScaler.py
class StandardScaler (line 2) | class StandardScaler(Transformer):
method __init__ (line 3) | def __init__(self,mean=None,std=None):
method transform (line 11) | def transform(self,X):
FILE: LAMDA_SSL/Transform/Text/AdjustLength.py
class AdjustLength (line 4) | class AdjustLength(Transformer):
method __init__ (line 5) | def __init__(self,length=300,pad_val=None,pos=0):
method transform (line 15) | def transform(self,X):
FILE: LAMDA_SSL/Transform/Text/AutoTokenizer.py
class AutoTokenizer (line 6) | class AutoTokenizer(Transformer):
method __init__ (line 7) | def __init__(self,model_name='hfl/chinese-roberta-wwm-ext',padding='ma...
method transform (line 17) | def transform(self,X):
FILE: LAMDA_SSL/Transform/Text/CharNGram.py
class CharNGram (line 5) | class CharNGram(Transformer):
method __init__ (line 6) | def __init__(self,lower_case_backup=True,unk_init=None,pad_init=None,p...
method transform (line 23) | def transform(self,X):
FILE: LAMDA_SSL/Transform/Text/FastText.py
class FastText (line 5) | class FastText(Transformer):
method __init__ (line 6) | def __init__(self, language="en",lower_case_backup=True,unk_init=None,...
method transform (line 23) | def transform(self,X):
FILE: LAMDA_SSL/Transform/Text/GloVe.py
class Glove (line 5) | class Glove(Transformer):
method __init__ (line 6) | def __init__(self,name="840B", dim=300,lower_case_backup=True,unk_init...
method transform (line 25) | def transform(self,X):
FILE: LAMDA_SSL/Transform/Text/Lcut.py
class Lcut (line 6) | class Lcut(Transformer):
method __init__ (line 7) | def __init__(self):
method transform (line 10) | def transform(self,X):
FILE: LAMDA_SSL/Transform/Text/PadSequence.py
class PadSequence (line 2) | class PadSequence(Transformer):
method __init__ (line 3) | def __init__(self,length=300,pad_val=None):
method transform (line 10) | def transform(self,X):
FILE: LAMDA_SSL/Transform/Text/Split.py
class Split (line 3) | class Split(Transformer):
method __init__ (line 4) | def __init__(self):
method transform (line 7) | def transform(self,X):
FILE: LAMDA_SSL/Transform/Text/SynonymsReplacement.py
function synonym_replacement (line 7) | def synonym_replacement(tokens, n=10):
class SynonymsReplacement (line 19) | class SynonymsReplacement(Transformer):
method __init__ (line 20) | def __init__(self,n=10):
method transform (line 24) | def transform(self,X):
FILE: LAMDA_SSL/Transform/Text/Tokenizer.py
class Tokenizer (line 4) | class Tokenizer(Transformer):
method __init__ (line 5) | def __init__(self,tokenizer='basic_english',language='en'):
method transform (line 14) | def transform(self,X):
FILE: LAMDA_SSL/Transform/Text/Truncate.py
class Truncate (line 2) | class Truncate(Transformer):
method __init__ (line 3) | def __init__(self,length=300,pos=0):
method transform (line 11) | def transform(self,X):
FILE: LAMDA_SSL/Transform/Text/Vectors.py
class Vectors (line 4) | class Vectors(Transformer):
method __init__ (line 5) | def __init__(self,name='840B', cache=None, url=None, unk_init=None,pad...
method transform (line 29) | def transform(self,X):
FILE: LAMDA_SSL/Transform/Text/Vocab.py
class Vocab (line 6) | class Vocab(Transformer):
method __init__ (line 7) | def __init__(self,word_vocab=None,vectors=None,text=None,min_freq=1,sp...
method transform (line 52) | def transform(self,X):
FILE: LAMDA_SSL/Transform/ToImage.py
class ToImage (line 8) | class ToImage(Transformer):
method __init__ (line 10) | def __init__(self,channels=3,channels_first=False):
method transform (line 18) | def transform(self,X):
FILE: LAMDA_SSL/Transform/ToNumpy.py
class ToNumpy (line 5) | class ToNumpy(Transformer):
method __init__ (line 6) | def __init__(self):
method transform (line 9) | def transform(self,X):
FILE: LAMDA_SSL/Transform/ToTensor.py
class ToTensor (line 6) | class ToTensor(Transformer):
method __init__ (line 7) | def __init__(self,dtype=None,image=False):
method transform (line 15) | def transform(self,X):
FILE: LAMDA_SSL/Transform/Vision/Normalization.py
class Normalization (line 3) | class Normalization(Transformer):
method __init__ (line 4) | def __init__(self,mean=None,std=None):
method transform (line 13) | def transform(self,X):
FILE: LAMDA_SSL/Transform/Vision/Resize.py
class Resize (line 7) | class Resize(Transformer):
method __init__ (line 8) | def __init__(self,size, interpolation = InterpolationMode.BILINEAR,
method transform (line 26) | def transform(self,X):
FILE: LAMDA_SSL/utils.py
function is_pandas_ndframe (line 20) | def is_pandas_ndframe(x):
function indexing_none (line 23) | def indexing_none(data, i):
function indexing_dict (line 27) | def indexing_dict(data, i):
function indexing_list_tuple_of_data (line 31) | def indexing_list_tuple_of_data(data, i, indexings=None):
function indexing_sparse (line 37) | def indexing_sparse(data,i):
function indexing_ndframe (line 42) | def indexing_ndframe(data, i):
function indexing_other (line 50) | def indexing_other(data, i):
function indexing_dataset (line 55) | def indexing_dataset(data,i):
function get_indexing_method (line 61) | def get_indexing_method(data):
function normalize_numpy_indices (line 89) | def normalize_numpy_indices(i):
function indexing (line 98) | def indexing(data, i, indexing_method=None):
function flatten (line 107) | def flatten(arr):
function apply_to_data (line 114) | def apply_to_data(data, func, unpack_dict=False):
function is_sparse (line 128) | def is_sparse(x):
function _len (line 134) | def _len(data):
function get_len (line 146) | def get_len(data):
function is_torch_data_type (line 154) | def is_torch_data_type(x):
function to_device (line 158) | def to_device(X, device):
function to_numpy (line 202) | def to_numpy(X):
class partial (line 227) | class partial:
method __new__ (line 233) | def __new__(*args, **keywords):
method __call__ (line 258) | def __call__(*args, **keywords):
method change (line 265) | def change(self,**keywords):
method __repr__ (line 269) | def __repr__(self):
method __reduce__ (line 278) | def __reduce__(self):
method __setstate__ (line 282) | def __setstate__(self, state):
class EMA (line 306) | class EMA:
method __init__ (line 311) | def __init__(self, model, decay):
method load (line 317) | def load(self, ema_model):
method register (line 321) | def register(self):
method update (line 326) | def update(self):
method apply_shadow (line 333) | def apply_shadow(self):
method restore (line 340) | def restore(self):
class class_status (line 347) | class class_status:
method __init__ (line 348) | def __init__(self,y):
method classes (line 357) | def classes(self):
method y_indices (line 362) | def y_indices(self):
method num_classes (line 367) | def num_classes(self):
method class_counts (line 373) | def class_counts(self):
function _l2_normalize (line 379) | def _l2_normalize(d):
function one_hot (line 384) | def one_hot(targets, nClass,device):
class Bn_Controller (line 388) | class Bn_Controller:
method __init__ (line 389) | def __init__(self):
method freeze_bn (line 395) | def freeze_bn(self, model):
method unfreeze_bn (line 403) | def unfreeze_bn(self, model):
Condensed preview — 305 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (1,710K chars).
[
{
"path": "Examples/Assemble_BreastCancer.py",
"chars": 1687,
"preview": "from LAMDA_SSL.Algorithm.Classification.Assemble import Assemble\nfrom LAMDA_SSL.Dataset.Tabular.BreastCancer import Brea"
},
{
"path": "Examples/CIFAR10_imbalance.py",
"chars": 0,
"preview": ""
},
{
"path": "Examples/CoReg_Boston.py",
"chars": 1359,
"preview": "from LAMDA_SSL.Algorithm.Regression.CoReg import CoReg\nfrom LAMDA_SSL.Evaluation.Regressor.Mean_Absolute_Error import Me"
},
{
"path": "Examples/Co_Training_BreastCancer.py",
"chars": 1792,
"preview": "from LAMDA_SSL.Algorithm.Classification.Co_Training import Co_Training\nfrom LAMDA_SSL.Dataset.Tabular.Wine import Wine\nf"
},
{
"path": "Examples/Co_Training_Wine.py",
"chars": 1797,
"preview": "from LAMDA_SSL.Algorithm.Classification.Co_Training import Co_Training\nfrom LAMDA_SSL.Dataset.Tabular.Wine import Wine\nf"
},
{
"path": "Examples/Constrained_Seed_k_means_Wine.py",
"chars": 1562,
"preview": "from LAMDA_SSL.Algorithm.Clustering.Constrained_Seed_k_means import Constrained_Seed_k_means\nfrom LAMDA_SSL.Evaluation.C"
},
{
"path": "Examples/Constrained_k_means_Wine.py",
"chars": 1740,
"preview": "from LAMDA_SSL.Algorithm.Clustering.Constrained_k_means import Constrained_k_means\nfrom LAMDA_SSL.Evaluation.Cluster.Dav"
},
{
"path": "Examples/FixMatch_BreastCancer.py",
"chars": 2715,
"preview": "from LAMDA_SSL.Algorithm.Classification.FixMatch import FixMatch\nfrom LAMDA_SSL.Dataset.Tabular.BreastCancer import Brea"
},
{
"path": "Examples/FixMatch_CIFAR10.py",
"chars": 5274,
"preview": "from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip\nfrom LAMDA_SSL.Augmentation.Vision.R"
},
{
"path": "Examples/FixMatch_SST2.py",
"chars": 4546,
"preview": "from LAMDA_SSL.Dataset.Text.SST2 import SST2\nfrom LAMDA_SSL.Opitimizer.SGD import SGD\nfrom LAMDA_SSL.Scheduler.CosineAnn"
},
{
"path": "Examples/FlexMatch_CIFAR10.py",
"chars": 5199,
"preview": "from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip\nfrom LAMDA_SSL.Augmentation.Vision.R"
},
{
"path": "Examples/FlexMatch_SST2.py",
"chars": 4549,
"preview": "from LAMDA_SSL.Dataset.Text.SST2 import SST2\nfrom LAMDA_SSL.Opitimizer.SGD import SGD\nfrom LAMDA_SSL.Scheduler.CosineAnn"
},
{
"path": "Examples/GAT_Cora.py",
"chars": 1475,
"preview": "from LAMDA_SSL.Dataset.Graph.Cora import Cora\nfrom LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy\nfrom LAMDA_S"
},
{
"path": "Examples/GCN_Cora.py",
"chars": 1466,
"preview": "from LAMDA_SSL.Dataset.Graph.Cora import Cora\nfrom LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy\nfrom LAMDA_S"
},
{
"path": "Examples/ICTReg_Boston.py",
"chars": 4152,
"preview": "from LAMDA_SSL.Augmentation.Tabular.Noise import Noise\nimport torch.nn as nn\nfrom LAMDA_SSL.Dataset.Tabular.Boston impor"
},
{
"path": "Examples/ICT_CIFAR10.py",
"chars": 4638,
"preview": "from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip\nfrom LAMDA_SSL.Augmentation.Vision.R"
},
{
"path": "Examples/ImprovedGAN_MNIST.py",
"chars": 4314,
"preview": "from LAMDA_SSL.Opitimizer.Adam import Adam\nfrom LAMDA_SSL.Transform.ToImage import ToImage\nfrom LAMDA_SSL.Dataloader.Unl"
},
{
"path": "Examples/LabelPropagation_BreastCancer.py",
"chars": 1663,
"preview": "from LAMDA_SSL.Algorithm.Classification.LabelPropagation import LabelPropagation\nfrom LAMDA_SSL.Evaluation.Classifier.Ac"
},
{
"path": "Examples/LabelSpreading_BreastCancer.py",
"chars": 1655,
"preview": "from LAMDA_SSL.Algorithm.Classification.LabelSpreading import LabelSpreading\nfrom LAMDA_SSL.Evaluation.Classifier.Accura"
},
{
"path": "Examples/LadderNetwork_MNIST.py",
"chars": 4204,
"preview": "from LAMDA_SSL.Algorithm.Classification.LadderNetwork import LadderNetwork\nfrom LAMDA_SSL.Opitimizer.Adam import Adam\nfr"
},
{
"path": "Examples/LapSVM_BreastCancer.py",
"chars": 1747,
"preview": "from LAMDA_SSL.Algorithm.Classification.LapSVM import LapSVM\nfrom LAMDA_SSL.Evaluation.Classifier.Recall import Recall\nf"
},
{
"path": "Examples/MeanTeacherReg_Boston.py",
"chars": 4169,
"preview": "from LAMDA_SSL.Augmentation.Tabular.Noise import Noise\nimport torch.nn as nn\nfrom LAMDA_SSL.Dataset.Tabular.Boston impor"
},
{
"path": "Examples/MeanTeacher_CIFAR10.py",
"chars": 4723,
"preview": "from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip\nfrom LAMDA_SSL.Augmentation.Vision.R"
},
{
"path": "Examples/MixMatch_CIFAR10.py",
"chars": 4675,
"preview": "from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip\nfrom LAMDA_SSL.Augmentation.Vision.R"
},
{
"path": "Examples/ParallelDistributed.py",
"chars": 5445,
"preview": "from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip\nfrom LAMDA_SSL.Augmentation.Vision.R"
},
{
"path": "Examples/Parameter_Search.py",
"chars": 5413,
"preview": "from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip\nfrom LAMDA_SSL.Augmentation.Vision.R"
},
{
"path": "Examples/PiModelReg_Boston.py",
"chars": 4162,
"preview": "from LAMDA_SSL.Augmentation.Tabular.Noise import Noise\nimport torch.nn as nn\nfrom LAMDA_SSL.Dataset.Tabular.Boston impor"
},
{
"path": "Examples/PiModel_CIFAR10.py",
"chars": 4626,
"preview": "from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip\nfrom LAMDA_SSL.Augmentation.Vision.R"
},
{
"path": "Examples/PseudoLabel_CIFAR10.py",
"chars": 4748,
"preview": "from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip\nfrom LAMDA_SSL.Augmentation.Vision.R"
},
{
"path": "Examples/ReMixMatch_CIFAR10.py",
"chars": 5401,
"preview": "from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip\nfrom LAMDA_SSL.Augmentation.Vision.R"
},
{
"path": "Examples/S4L_CIFAR10.py",
"chars": 4665,
"preview": "from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip\nfrom LAMDA_SSL.Augmentation.Vision.R"
},
{
"path": "Examples/SDNE_Cora.py",
"chars": 1623,
"preview": "from LAMDA_SSL.Dataset.Graph.Cora import Cora\nfrom LAMDA_SSL.Evaluation.Classifier.Precision import Precision\nfrom LAMDA"
},
{
"path": "Examples/SSGMM_BreastCancer.py",
"chars": 1578,
"preview": "from LAMDA_SSL.Algorithm.Classification.SSGMM import SSGMM\nfrom LAMDA_SSL.Evaluation.Classifier.F1 import F1\nfrom LAMDA_"
},
{
"path": "Examples/SSVAE_MNIST.py",
"chars": 4204,
"preview": "from LAMDA_SSL.Algorithm.Classification.SSVAE import SSVAE\nfrom LAMDA_SSL.Opitimizer.Adam import Adam\nfrom LAMDA_SSL.Tra"
},
{
"path": "Examples/Save_Load_Model.py",
"chars": 5410,
"preview": "from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip\nfrom LAMDA_SSL.Augmentation.Vision.R"
},
{
"path": "Examples/SemiBoost_BreastCancer.py",
"chars": 1708,
"preview": "from LAMDA_SSL.Algorithm.Classification.SemiBoost import SemiBoost\nfrom LAMDA_SSL.Dataset.Tabular.BreastCancer import Br"
},
{
"path": "Examples/TSVM_BreastCancer.py",
"chars": 1579,
"preview": "from LAMDA_SSL.Algorithm.Classification.TSVM import TSVM\nfrom LAMDA_SSL.Evaluation.Classifier.Recall import Recall\nfrom "
},
{
"path": "Examples/TemporalEnsembling_CIFAR10.py",
"chars": 4891,
"preview": "from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip\nfrom LAMDA_SSL.Augmentation.Vision.R"
},
{
"path": "Examples/Tri_Training_BreastCancer.py",
"chars": 1714,
"preview": "from LAMDA_SSL.Algorithm.Classification.Tri_Training import Tri_Training\nfrom LAMDA_SSL.Dataset.Tabular.BreastCancer imp"
},
{
"path": "Examples/UDA_CIFAR10.py",
"chars": 5176,
"preview": "from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip\nfrom LAMDA_SSL.Augmentation.Vision.R"
},
{
"path": "Examples/VAT_CIFAR10.py",
"chars": 4592,
"preview": "from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip\nfrom LAMDA_SSL.Augmentation.Vision.R"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/Assemble.py",
"chars": 5036,
"preview": "import copy\nimport numbers\nimport numpy as np\nfrom LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator\nfrom skle"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/CAFA.py",
"chars": 22980,
"preview": "from LAMDA_SSL.Base.DeepModelMixin import DeepModelMixin\r\nfrom LAMDA_SSL.Base.InductiveEstimator import InductiveEstimat"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/Co_Training.py",
"chars": 9190,
"preview": "from LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator\nimport numpy as np\nfrom sklearn.base import ClassifierM"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/FixMatch.py",
"chars": 7383,
"preview": "import copy\nfrom LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator\nfrom LAMDA_SSL.Base.DeepModelMixin import D"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/Fix_A_Step.py",
"chars": 11042,
"preview": "import copy\nfrom LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator\nfrom LAMDA_SSL.Base.DeepModelMixin import D"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/FlexMatch.py",
"chars": 10368,
"preview": "import copy\nfrom LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator\nfrom LAMDA_SSL.Base.DeepModelMixin import D"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/FreeMatch.py",
"chars": 10821,
"preview": "import copy\nfrom LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator\nfrom LAMDA_SSL.Base.DeepModelMixin import D"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/GAT.py",
"chars": 9918,
"preview": "import copy\n\nfrom LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator\nfrom LAMDA_SSL.Base.DeepModelMixin import "
},
{
"path": "LAMDA_SSL/Algorithm/Classification/GCN.py",
"chars": 9839,
"preview": "import copy\n\nfrom LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator\nfrom LAMDA_SSL.Base.DeepModelMixin import "
},
{
"path": "LAMDA_SSL/Algorithm/Classification/ICT.py",
"chars": 7388,
"preview": "from LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator\nfrom LAMDA_SSL.Base.DeepModelMixin import DeepModelMixi"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/ImprovedGAN.py",
"chars": 16656,
"preview": "import copy\nimport torch.nn.functional as F\nfrom LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator\nfrom LAMDA_"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/LabelPropagation.py",
"chars": 3895,
"preview": "from LAMDA_SSL.Base.TransductiveEstimator import TransductiveEstimator\nfrom sklearn.base import ClassifierMixin\nimport n"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/LabelSpreading.py",
"chars": 4013,
"preview": "from LAMDA_SSL.Base.TransductiveEstimator import TransductiveEstimator\nfrom sklearn.base import ClassifierMixin\nimport n"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/LadderNetwork.py",
"chars": 9586,
"preview": "import copy\nfrom LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator\nfrom LAMDA_SSL.Base.DeepModelMixin import D"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/LapSVM.py",
"chars": 8517,
"preview": "import numpy as np\nfrom scipy.optimize import minimize\nfrom sklearn.neighbors import kneighbors_graph\nfrom LAMDA_SSL.Bas"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/MTCF.py",
"chars": 9584,
"preview": "import copy\nfrom LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator\nfrom LAMDA_SSL.Base.DeepModelMixin import D"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/MeanTeacher.py",
"chars": 7284,
"preview": "from LAMDA_SSL.Base.DeepModelMixin import DeepModelMixin\nfrom LAMDA_SSL.Base.InductiveEstimator import InductiveEstimato"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/MixMatch.py",
"chars": 9755,
"preview": "import copy\nfrom LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator\nfrom LAMDA_SSL.Base.DeepModelMixin import D"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/PiModel.py",
"chars": 6993,
"preview": "from LAMDA_SSL.Base.DeepModelMixin import DeepModelMixin\nfrom LAMDA_SSL.Base.InductiveEstimator import InductiveEstimato"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/PseudoLabel.py",
"chars": 6605,
"preview": "from LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator\nfrom LAMDA_SSL.Base.DeepModelMixin import DeepModelMixi"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/ReMixMatch.py",
"chars": 11852,
"preview": "import copy\nfrom LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator\nfrom LAMDA_SSL.Base.DeepModelMixin import D"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/S4L.py",
"chars": 8610,
"preview": "from LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator\nfrom LAMDA_SSL.Base.DeepModelMixin import DeepModelMixi"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/SDNE.py",
"chars": 13598,
"preview": "import copy\nfrom LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator\nfrom LAMDA_SSL.Base.DeepModelMixin import D"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/SSGMM.py",
"chars": 5866,
"preview": "import copy\nfrom LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator\nfrom sklearn.base import ClassifierMixin\nim"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/SSVAE.py",
"chars": 11522,
"preview": "import copy\nimport torch.nn.functional as F\nfrom LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator\nfrom LAMDA_"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/SemiBoost.py",
"chars": 7786,
"preview": "import numpy as np\nfrom sklearn import neighbors\nimport copy\nfrom sklearn.metrics.pairwise import rbf_kernel,linear_kern"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/SoftMatch.py",
"chars": 10004,
"preview": "import copy\r\nfrom LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator\r\nfrom LAMDA_SSL.Base.DeepModelMixin import"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/Supervised.py",
"chars": 7221,
"preview": "import torch\r\n\r\nfrom LAMDA_SSL.Base.DeepModelMixin import DeepModelMixin\r\nfrom LAMDA_SSL.Base.InductiveEstimator import "
},
{
"path": "LAMDA_SSL/Algorithm/Classification/TSVM.py",
"chars": 8792,
"preview": "import copy\nfrom LAMDA_SSL.Base.TransductiveEstimator import TransductiveEstimator\nfrom sklearn.base import ClassifierMi"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/TemporalEnsembling.py",
"chars": 8980,
"preview": "from LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator\nfrom LAMDA_SSL.Base.DeepModelMixin import DeepModelMixi"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/Tri_Training.py",
"chars": 5127,
"preview": "import copy\nimport numpy as np\nimport sklearn\nfrom LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator\nfrom skle"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/UASD.py",
"chars": 8268,
"preview": "from LAMDA_SSL.Base.DeepModelMixin import DeepModelMixin\r\nfrom LAMDA_SSL.Base.InductiveEstimator import InductiveEstimat"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/UDA.py",
"chars": 8600,
"preview": "import copy\nfrom LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator\nfrom LAMDA_SSL.Base.DeepModelMixin import D"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/VAT.py",
"chars": 7990,
"preview": "from LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator\nfrom LAMDA_SSL.Base.DeepModelMixin import DeepModelMixi"
},
{
"path": "LAMDA_SSL/Algorithm/Classification/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "LAMDA_SSL/Algorithm/Clustering/Constrained_Seed_k_means.py",
"chars": 4950,
"preview": "import numpy as np\nfrom sklearn.base import ClusterMixin\nfrom LAMDA_SSL.Base.TransductiveEstimator import TransductiveEs"
},
{
"path": "LAMDA_SSL/Algorithm/Clustering/Constrained_k_means.py",
"chars": 7886,
"preview": "import numpy as np\nfrom sklearn.base import ClusterMixin\nfrom LAMDA_SSL.Base.TransductiveEstimator import TransductiveEs"
},
{
"path": "LAMDA_SSL/Algorithm/Clustering/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "LAMDA_SSL/Algorithm/Regression/CoReg.py",
"chars": 5954,
"preview": "import copy\nfrom sklearn.base import RegressorMixin\nfrom LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator\nfro"
},
{
"path": "LAMDA_SSL/Algorithm/Regression/ICTReg.py",
"chars": 8107,
"preview": "from LAMDA_SSL.Base.InductiveEstimator import InductiveEstimator\nfrom LAMDA_SSL.Base.DeepModelMixin import DeepModelMixi"
},
{
"path": "LAMDA_SSL/Algorithm/Regression/MeanTeacherReg.py",
"chars": 7873,
"preview": "from LAMDA_SSL.Base.DeepModelMixin import DeepModelMixin\nfrom LAMDA_SSL.Base.InductiveEstimator import InductiveEstimato"
},
{
"path": "LAMDA_SSL/Algorithm/Regression/PiModelReg.py",
"chars": 7653,
"preview": "from LAMDA_SSL.Base.DeepModelMixin import DeepModelMixin\nfrom LAMDA_SSL.Base.InductiveEstimator import InductiveEstimato"
},
{
"path": "LAMDA_SSL/Algorithm/Regression/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "LAMDA_SSL/Algorithm/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "LAMDA_SSL/Augmentation/Graph/DropEdges.py",
"chars": 1337,
"preview": "from LAMDA_SSL.Base.Transformer import Transformer\nfrom sklearn.utils import check_random_state\nimport numpy as np\n\n\ncla"
},
{
"path": "LAMDA_SSL/Augmentation/Graph/DropNodes.py",
"chars": 2412,
"preview": "from LAMDA_SSL.Base.Transformer import Transformer\nfrom sklearn.utils import check_random_state\nimport numpy as np\nclass"
},
{
"path": "LAMDA_SSL/Augmentation/Graph/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "LAMDA_SSL/Augmentation/Tabular/Noise.py",
"chars": 928,
"preview": "import numbers\n\nimport torch\n\nfrom LAMDA_SSL.Base.Transformer import Transformer\nimport numpy as np\n\nclass Noise(Transfo"
},
{
"path": "LAMDA_SSL/Augmentation/Tabular/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "LAMDA_SSL/Augmentation/Text/RandomDeletion.py",
"chars": 1281,
"preview": "from LAMDA_SSL.Base.Transformer import Transformer\nimport random\nfrom LAMDA_SSL.Transform.Text.Tokenizer import Tokenize"
},
{
"path": "LAMDA_SSL/Augmentation/Text/RandomSwap.py",
"chars": 1210,
"preview": "from LAMDA_SSL.Base.Transformer import Transformer\nimport random\nfrom LAMDA_SSL.Transform.Text.Tokenizer import Tokenize"
},
{
"path": "LAMDA_SSL/Augmentation/Text/TFIDFReplacement.py",
"chars": 4373,
"preview": "import math\nimport numpy as np\nfrom LAMDA_SSL.Base.Transformer import Transformer\nfrom collections import Counter,defaul"
},
{
"path": "LAMDA_SSL/Augmentation/Text/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "LAMDA_SSL/Augmentation/Vision/AutoContrast.py",
"chars": 595,
"preview": "from LAMDA_SSL.Base.Transformer import Transformer\nimport torchvision.transforms.functional as F\nimport PIL\nimport torc"
},
{
"path": "LAMDA_SSL/Augmentation/Vision/Brightness.py",
"chars": 1743,
"preview": "from LAMDA_SSL.Base.Transformer import Transformer\nimport torchvision.transforms.functional as F\nimport torch\nimport PIL"
},
{
"path": "LAMDA_SSL/Augmentation/Vision/CenterCrop.py",
"chars": 2458,
"preview": "from LAMDA_SSL.Base.Transformer import Transformer\r\nfrom torchvision import transforms\r\nfrom LAMDA_SSL.utils import part"
},
{
"path": "LAMDA_SSL/Augmentation/Vision/Color.py",
"chars": 1731,
"preview": "from LAMDA_SSL.Base.Transformer import Transformer\nimport torchvision.transforms.functional as F\nimport torch\nimport PIL"
},
{
"path": "LAMDA_SSL/Augmentation/Vision/Contrast.py",
"chars": 1733,
"preview": "from LAMDA_SSL.Base.Transformer import Transformer\nimport torchvision.transforms.functional as F\nimport torch\nimport PIL"
},
{
"path": "LAMDA_SSL/Augmentation/Vision/Cutout.py",
"chars": 985,
"preview": "import PIL,PIL.ImageEnhance\nimport torch\n\nfrom LAMDA_SSL.Base.Transformer import Transformer\nfrom LAMDA_SSL.Augmentation"
},
{
"path": "LAMDA_SSL/Augmentation/Vision/CutoutAbs.py",
"chars": 3122,
"preview": "import copy\nimport numbers\nimport random\n\nimport PIL.Image\n\nfrom LAMDA_SSL.Base.Transformer import Transformer\n\nimport n"
},
{
"path": "LAMDA_SSL/Augmentation/Vision/Equalize.py",
"chars": 740,
"preview": "from LAMDA_SSL.Base.Transformer import Transformer\nimport torchvision.transforms.functional as F\nimport PIL\nimport torch"
},
{
"path": "LAMDA_SSL/Augmentation/Vision/Identity.py",
"chars": 271,
"preview": "from LAMDA_SSL.Base.Transformer import Transformer\n\nclass Identity(Transformer):\n def __init__(self):\n super()"
},
{
"path": "LAMDA_SSL/Augmentation/Vision/Invert.py",
"chars": 575,
"preview": "from LAMDA_SSL.Base.Transformer import Transformer\nimport torchvision.transforms.functional as F\nimport PIL\nimport torch"
},
{
"path": "LAMDA_SSL/Augmentation/Vision/Mixup.py",
"chars": 961,
"preview": "from LAMDA_SSL.Base.Transformer import Transformer\nimport numpy as np\n\nclass Mixup(Transformer):\n def __init__(self, "
},
{
"path": "LAMDA_SSL/Augmentation/Vision/Posterize.py",
"chars": 1480,
"preview": "from LAMDA_SSL.Base.Transformer import Transformer\nimport torchvision.transforms.functional as F\nimport torch\nimport PIL"
},
{
"path": "LAMDA_SSL/Augmentation/Vision/RandAugment.py",
"chars": 3973,
"preview": "from LAMDA_SSL.Base.Transformer import Transformer\nimport numpy as np\nimport random\nimport PIL\n\ndef AutoContrast(X, **kw"
},
{
"path": "LAMDA_SSL/Augmentation/Vision/RandomCrop.py",
"chars": 2564,
"preview": "from LAMDA_SSL.Base.Transformer import Transformer\nfrom torchvision import transforms\nfrom LAMDA_SSL.utils import partia"
},
{
"path": "LAMDA_SSL/Augmentation/Vision/RandomHorizontalFlip.py",
"chars": 806,
"preview": "import torch\nimport numpy as np\nfrom LAMDA_SSL.Base.Transformer import Transformer\nfrom torchvision import transforms\nim"
},
{
"path": "LAMDA_SSL/Augmentation/Vision/Rotate.py",
"chars": 1658,
"preview": "import torch\nimport numpy as np\nimport PIL\nfrom LAMDA_SSL.Base.Transformer import Transformer\nimport torchvision.transfo"
},
{
"path": "LAMDA_SSL/Augmentation/Vision/Sharpness.py",
"chars": 1737,
"preview": "import numpy as np\n\nfrom LAMDA_SSL.Base.Transformer import Transformer\nimport torchvision.transforms.functional as F\nimp"
},
{
"path": "LAMDA_SSL/Augmentation/Vision/ShearX.py",
"chars": 1781,
"preview": "from LAMDA_SSL.Base.Transformer import Transformer\nimport torchvision.transforms.functional as F\nimport math\nimport rand"
},
{
"path": "LAMDA_SSL/Augmentation/Vision/ShearY.py",
"chars": 1782,
"preview": "from LAMDA_SSL.Base.Transformer import Transformer\nimport torchvision.transforms.functional as F\nimport math\nimport rand"
},
{
"path": "LAMDA_SSL/Augmentation/Vision/Solarize.py",
"chars": 1454,
"preview": "import PIL.Image\nimport torch\nimport numpy as np\nfrom LAMDA_SSL.Base.Transformer import Transformer\nimport torchvision.t"
},
{
"path": "LAMDA_SSL/Augmentation/Vision/TranslateX.py",
"chars": 1885,
"preview": "from LAMDA_SSL.Base.Transformer import Transformer\nimport torchvision.transforms.functional as F\nimport random\nimport to"
},
{
"path": "LAMDA_SSL/Augmentation/Vision/TranslateY.py",
"chars": 1883,
"preview": "from LAMDA_SSL.Base.Transformer import Transformer\nimport torchvision.transforms.functional as F\nimport random\nimport to"
},
{
"path": "LAMDA_SSL/Augmentation/Vision/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "LAMDA_SSL/Augmentation/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "LAMDA_SSL/Base/BaseOptimizer.py",
"chars": 520,
"preview": "from torch.optim.optimizer import Optimizer\nclass BaseOptimizer:\n def __init__(self,defaults):\n # >> - default"
},
{
"path": "LAMDA_SSL/Base/BaseSampler.py",
"chars": 316,
"preview": "from torch.utils.data.sampler import Sampler\nclass BaseSampler:\n def __init__(self):\n pass\n def init_sample"
},
{
"path": "LAMDA_SSL/Base/BaseScheduler.py",
"chars": 616,
"preview": "from torch.optim import lr_scheduler\nclass BaseScheduler:\n def __init__(self, last_epoch=-1, verbose=False):\n "
},
{
"path": "LAMDA_SSL/Base/ClassifierEvaluation.py",
"chars": 473,
"preview": "from abc import ABC,abstractmethod\n\nclass ClassifierEvaluation(ABC):\n def __init__(self):\n pass\n @abstractm"
},
{
"path": "LAMDA_SSL/Base/ClusterEvaluation.py",
"chars": 452,
"preview": "from abc import ABC,abstractmethod\nclass ClusterEvaluation(ABC):\n def __init__(self):\n pass\n @abstractmetho"
},
{
"path": "LAMDA_SSL/Base/DeepModelMixin.py",
"chars": 21547,
"preview": "import copy\nfrom math import ceil\nimport torch\nfrom LAMDA_SSL.Base.SemiEstimator import SemiEstimator\nfrom torch.utils.d"
},
{
"path": "LAMDA_SSL/Base/GraphMixin.py",
"chars": 824,
"preview": "from LAMDA_SSL.Transform.Graph.NormalizeFeatures import NormalizeFeatures\nfrom sklearn.pipeline import Pipeline\n\nclass G"
},
{
"path": "LAMDA_SSL/Base/InductiveEstimator.py",
"chars": 377,
"preview": "from .SemiEstimator import SemiEstimator\nfrom abc import abstractmethod\n\nclass InductiveEstimator(SemiEstimator):\n __"
},
{
"path": "LAMDA_SSL/Base/LambdaLR.py",
"chars": 828,
"preview": "from torch.optim import lr_scheduler\nfrom LAMDA_SSL.Base.BaseScheduler import BaseScheduler\nclass LambdaLR(BaseScheduler"
},
{
"path": "LAMDA_SSL/Base/RegressorEvaluation.py",
"chars": 379,
"preview": "from abc import ABC,abstractmethod\n\nclass RegressorEvaluation(ABC):\n def __init__(self):\n pass\n @abstractme"
},
{
"path": "LAMDA_SSL/Base/SemiEstimator.py",
"chars": 464,
"preview": "from sklearn.base import BaseEstimator\nfrom abc import ABC,abstractmethod\nclass SemiEstimator(ABC,BaseEstimator):\n @a"
},
{
"path": "LAMDA_SSL/Base/TabularMixin.py",
"chars": 772,
"preview": "from sklearn.pipeline import Pipeline\nfrom sklearn import preprocessing\nfrom LAMDA_SSL.Transform.ToTensor import ToTenso"
},
{
"path": "LAMDA_SSL/Base/TextMixin.py",
"chars": 3347,
"preview": "import copy\nfrom LAMDA_SSL.Transform.Text.Vocab import Vocab\nfrom sklearn.pipeline import Pipeline\nfrom LAMDA_SSL.Transf"
},
{
"path": "LAMDA_SSL/Base/TransductiveEstimator.py",
"chars": 670,
"preview": "from .SemiEstimator import SemiEstimator\nfrom abc import abstractmethod\n\nclass TransductiveEstimator(SemiEstimator):\n "
},
{
"path": "LAMDA_SSL/Base/Transformer.py",
"chars": 1324,
"preview": "from abc import abstractmethod,ABC\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\nclass Transformer(BaseEstim"
},
{
"path": "LAMDA_SSL/Base/VisionMixin.py",
"chars": 1672,
"preview": "from LAMDA_SSL.Transform.Vision.Normalization import Normalization\nfrom LAMDA_SSL.Transform.ToTensor import ToTensor\nfro"
},
{
"path": "LAMDA_SSL/Base/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "LAMDA_SSL/Config/Assemble.py",
"chars": 754,
"preview": "from sklearn.svm import SVC\nfrom LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy\nfrom LAMDA_SSL.Evaluation.Clas"
},
{
"path": "LAMDA_SSL/Config/CAFA.py",
"chars": 4948,
"preview": "from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip\nfrom LAMDA_SSL.Augmentation.Vision.R"
},
{
"path": "LAMDA_SSL/Config/CoReg.py",
"chars": 490,
"preview": "from LAMDA_SSL.Evaluation.Regressor.Mean_Absolute_Error import Mean_Absolute_Error\nfrom LAMDA_SSL.Evaluation.Regressor.M"
},
{
"path": "LAMDA_SSL/Config/Co_Training.py",
"chars": 290,
"preview": "from sklearn.svm import SVC\nrandom_state=None\nbase_estimator = SVC(C=1.0,kernel='rbf',probability=True,gamma='auto')\nbas"
},
{
"path": "LAMDA_SSL/Config/Constrained_Seed_k_means.py",
"chars": 680,
"preview": "from LAMDA_SSL.Evaluation.Cluster.Davies_Bouldin_Score import Davies_Bouldin_Score\nfrom LAMDA_SSL.Evaluation.Cluster.Fow"
},
{
"path": "LAMDA_SSL/Config/Constrained_k_means.py",
"chars": 680,
"preview": "from LAMDA_SSL.Evaluation.Cluster.Davies_Bouldin_Score import Davies_Bouldin_Score\nfrom LAMDA_SSL.Evaluation.Cluster.Fow"
},
{
"path": "LAMDA_SSL/Config/FixMatch.py",
"chars": 4944,
"preview": "from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip\nfrom LAMDA_SSL.Augmentation.Vision.R"
},
{
"path": "LAMDA_SSL/Config/FlexMatch.py",
"chars": 5033,
"preview": "from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip\nfrom LAMDA_SSL.Augmentation.Vision.R"
},
{
"path": "LAMDA_SSL/Config/GAT.py",
"chars": 994,
"preview": "from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy\nfrom LAMDA_SSL.Evaluation.Classifier.Top_k_Accuracy import"
},
{
"path": "LAMDA_SSL/Config/GCN.py",
"chars": 976,
"preview": "from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy\nfrom LAMDA_SSL.Evaluation.Classifier.Top_k_Accuracy import"
},
{
"path": "LAMDA_SSL/Config/ICT.py",
"chars": 4336,
"preview": "from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip\nfrom LAMDA_SSL.Augmentation.Vision.R"
},
{
"path": "LAMDA_SSL/Config/ICTReg.py",
"chars": 2801,
"preview": "from LAMDA_SSL.Opitimizer.SGD import SGD\nfrom LAMDA_SSL.Scheduler.CosineAnnealingLR import CosineAnnealingLR\nfrom LAMDA_"
},
{
"path": "LAMDA_SSL/Config/ImprovedGAN.py",
"chars": 3322,
"preview": "import torch.nn as nn\nfrom LAMDA_SSL.Opitimizer.Adam import Adam\nfrom LAMDA_SSL.Dataloader.UnlabeledDataloader import Un"
},
{
"path": "LAMDA_SSL/Config/LabelPropagation.py",
"chars": 721,
"preview": "from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy\nfrom LAMDA_SSL.Evaluation.Classifier.Precision import Prec"
},
{
"path": "LAMDA_SSL/Config/LabelSpreading.py",
"chars": 731,
"preview": "from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy\nfrom LAMDA_SSL.Evaluation.Classifier.Precision import Prec"
},
{
"path": "LAMDA_SSL/Config/LadderNetwork.py",
"chars": 3361,
"preview": "import torch.nn as nn\nfrom LAMDA_SSL.Opitimizer.Adam import Adam\nfrom LAMDA_SSL.Dataloader.UnlabeledDataloader import Un"
},
{
"path": "LAMDA_SSL/Config/LapSVM.py",
"chars": 831,
"preview": "from LAMDA_SSL.Evaluation.Classifier.Recall import Recall\nfrom LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy\n"
},
{
"path": "LAMDA_SSL/Config/MeanTeacher.py",
"chars": 4336,
"preview": "from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip\nfrom LAMDA_SSL.Augmentation.Vision.R"
},
{
"path": "LAMDA_SSL/Config/MeanTeacherReg.py",
"chars": 2788,
"preview": "from LAMDA_SSL.Opitimizer.SGD import SGD\nfrom LAMDA_SSL.Scheduler.CosineAnnealingLR import CosineAnnealingLR\nfrom LAMDA_"
},
{
"path": "LAMDA_SSL/Config/MixMatch.py",
"chars": 4359,
"preview": "from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip\nfrom LAMDA_SSL.Augmentation.Vision.R"
},
{
"path": "LAMDA_SSL/Config/PiModel.py",
"chars": 4321,
"preview": "from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip\nfrom LAMDA_SSL.Augmentation.Vision.R"
},
{
"path": "LAMDA_SSL/Config/PiModelReg.py",
"chars": 2772,
"preview": "from LAMDA_SSL.Augmentation.Tabular.Noise import Noise\nfrom LAMDA_SSL.Opitimizer.SGD import SGD\nfrom LAMDA_SSL.Scheduler"
},
{
"path": "LAMDA_SSL/Config/PseudoLabel.py",
"chars": 4336,
"preview": "from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip\nfrom LAMDA_SSL.Augmentation.Vision.R"
},
{
"path": "LAMDA_SSL/Config/ReMixMatch.py",
"chars": 5051,
"preview": "from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip\nfrom LAMDA_SSL.Augmentation.Vision.R"
},
{
"path": "LAMDA_SSL/Config/S4L.py",
"chars": 4416,
"preview": "from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip\nfrom LAMDA_SSL.Augmentation.Vision.R"
},
{
"path": "LAMDA_SSL/Config/SDNE.py",
"chars": 1220,
"preview": "from sklearn.linear_model import LogisticRegression\nfrom LAMDA_SSL.Evaluation.Classifier.Precision import Precision\nfrom"
},
{
"path": "LAMDA_SSL/Config/SSGMM.py",
"chars": 703,
"preview": "from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy\nfrom LAMDA_SSL.Evaluation.Classifier.Precision import Prec"
},
{
"path": "LAMDA_SSL/Config/SSVAE.py",
"chars": 3314,
"preview": "import torch.nn as nn\nfrom LAMDA_SSL.Opitimizer.Adam import Adam\nfrom LAMDA_SSL.Dataloader.UnlabeledDataloader import Un"
},
{
"path": "LAMDA_SSL/Config/SemiBoost.py",
"chars": 848,
"preview": "from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy\nfrom LAMDA_SSL.Evaluation.Classifier.Recall import Recall\n"
},
{
"path": "LAMDA_SSL/Config/TSVM.py",
"chars": 881,
"preview": "from LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy\nfrom LAMDA_SSL.Evaluation.Classifier.Precision import Prec"
},
{
"path": "LAMDA_SSL/Config/TemporalEnsembling.py",
"chars": 4380,
"preview": "from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip\nfrom LAMDA_SSL.Augmentation.Vision.R"
},
{
"path": "LAMDA_SSL/Config/Tri_Training.py",
"chars": 872,
"preview": "from LAMDA_SSL.Evaluation.Classifier.F1 import F1\nfrom LAMDA_SSL.Evaluation.Classifier.Accuracy import Accuracy\nfrom LAM"
},
{
"path": "LAMDA_SSL/Config/UDA.py",
"chars": 4980,
"preview": "from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip\nfrom LAMDA_SSL.Augmentation.Vision.R"
},
{
"path": "LAMDA_SSL/Config/VAT.py",
"chars": 4378,
"preview": "from LAMDA_SSL.Augmentation.Vision.RandomHorizontalFlip import RandomHorizontalFlip\nfrom LAMDA_SSL.Augmentation.Vision.R"
},
{
"path": "LAMDA_SSL/Config/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "LAMDA_SSL/Dataloader/LabeledDataloader.py",
"chars": 6401,
"preview": "from torch.utils.data.dataloader import DataLoader\nfrom LAMDA_SSL.Base.BaseSampler import BaseSampler\nfrom LAMDA_SSL.Sam"
},
{
"path": "LAMDA_SSL/Dataloader/TrainDataloader.py",
"chars": 27914,
"preview": "import copy\n\nfrom torch.utils.data.dataloader import DataLoader\nfrom LAMDA_SSL.Base.BaseSampler import BaseSampler\nfrom "
},
{
"path": "LAMDA_SSL/Dataloader/UnlabeledDataloader.py",
"chars": 6419,
"preview": "from torch.utils.data.dataloader import DataLoader\nfrom LAMDA_SSL.Base.BaseSampler import BaseSampler\nfrom LAMDA_SSL.Sam"
},
{
"path": "LAMDA_SSL/Dataloader/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "LAMDA_SSL/Dataset/Graph/Cora.py",
"chars": 7381,
"preview": "import numpy as np\nfrom LAMDA_SSL.Dataset.SemiDataset import SemiDataset\nfrom LAMDA_SSL.Split.DataSplit import DataSplit"
},
{
"path": "LAMDA_SSL/Dataset/Graph/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "LAMDA_SSL/Dataset/LabeledDataset.py",
"chars": 8448,
"preview": "import copy\nfrom LAMDA_SSL.utils import indexing\nfrom torch.utils.data.dataset import Dataset\nfrom LAMDA_SSL.utils impor"
},
{
"path": "LAMDA_SSL/Dataset/SemiDataset.py",
"chars": 11157,
"preview": "from torch.utils.data import Dataset\nfrom .TrainDataset import TrainDataset\nfrom .LabeledDataset import LabeledDataset\nf"
},
{
"path": "LAMDA_SSL/Dataset/Tabular/Boston.py",
"chars": 5602,
"preview": "from LAMDA_SSL.Dataset.SemiDataset import SemiDataset\nfrom LAMDA_SSL.Base.TabularMixin import TabularMixin\nfrom LAMDA_SS"
},
{
"path": "LAMDA_SSL/Dataset/Tabular/BreastCancer.py",
"chars": 5585,
"preview": "from LAMDA_SSL.Dataset.SemiDataset import SemiDataset\nfrom LAMDA_SSL.Base.TabularMixin import TabularMixin\nfrom LAMDA_SS"
},
{
"path": "LAMDA_SSL/Dataset/Tabular/Wine.py",
"chars": 5569,
"preview": "from LAMDA_SSL.Dataset.SemiDataset import SemiDataset\nfrom LAMDA_SSL.Base.TabularMixin import TabularMixin\nfrom LAMDA_SS"
},
{
"path": "LAMDA_SSL/Dataset/Tabular/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "LAMDA_SSL/Dataset/Text/IMDB.py",
"chars": 5708,
"preview": "from LAMDA_SSL.Dataset.SemiDataset import SemiDataset\nfrom LAMDA_SSL.Base.TextMixin import TextMixin\nfrom torchtext.util"
},
{
"path": "LAMDA_SSL/Dataset/Text/SST2.py",
"chars": 6056,
"preview": "from LAMDA_SSL.Dataset.SemiDataset import SemiDataset\nfrom LAMDA_SSL.Base.TextMixin import TextMixin\nfrom torchtext.util"
},
{
"path": "LAMDA_SSL/Dataset/Text/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "LAMDA_SSL/Dataset/TrainDataset.py",
"chars": 6245,
"preview": "from torch.utils.data import Dataset\nfrom .LabeledDataset import LabeledDataset\nfrom LAMDA_SSL.Dataset.UnlabeledDataset "
},
{
"path": "LAMDA_SSL/Dataset/UnlabeledDataset.py",
"chars": 4865,
"preview": "import copy\nimport torch\nfrom LAMDA_SSL.utils import indexing\nfrom torch.utils.data.dataset import Dataset\nfrom LAMDA_SS"
},
{
"path": "LAMDA_SSL/Dataset/Vision/CIFAR10.py",
"chars": 8464,
"preview": "import numpy as np\nfrom LAMDA_SSL.Dataset.SemiDataset import SemiDataset\nfrom LAMDA_SSL.Base.VisionMixin import VisionMi"
},
{
"path": "LAMDA_SSL/Dataset/Vision/ImageCLEF.py",
"chars": 2163,
"preview": "import os\r\nfrom PIL import Image\r\nfrom torch.utils.data.dataset import Dataset\r\ndef make_dataset_with_labels(dir, classn"
},
{
"path": "LAMDA_SSL/Dataset/Vision/Mnist.py",
"chars": 5769,
"preview": "from LAMDA_SSL.Dataset.SemiDataset import SemiDataset\nfrom LAMDA_SSL.Base.VisionMixin import VisionMixin\nfrom LAMDA_SSL."
},
{
"path": "LAMDA_SSL/Dataset/Vision/Office31.py",
"chars": 2554,
"preview": "import os\r\nfrom PIL import Image\r\nfrom torch.utils.data.dataset import Dataset\r\ndef make_dataset_with_labels(dir, classn"
},
{
"path": "LAMDA_SSL/Dataset/Vision/VisDA.py",
"chars": 2531,
"preview": "import os\r\nfrom PIL import Image\r\nfrom torch.utils.data.dataset import Dataset\r\ndef make_dataset_with_labels(dir, classn"
},
{
"path": "LAMDA_SSL/Dataset/Vision/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "LAMDA_SSL/Dataset/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "LAMDA_SSL/Distributed/DataParallel.py",
"chars": 604,
"preview": "from torch import nn\nclass DataParallel:\n def __init__(self, device_ids=None, output_device=None, dim=0):\n # >"
},
{
"path": "LAMDA_SSL/Distributed/DistributedDataParallel.py",
"chars": 3347,
"preview": "from torch.nn import parallel\nclass DistributedDataParallel:\n def __init__(\n self,\n device_ids=None,\n "
},
{
"path": "LAMDA_SSL/Distributed/__init__.py",
"chars": 0,
"preview": ""
},
{
"path": "LAMDA_SSL/Evaluation/Classifier/AUC.py",
"chars": 1571,
"preview": "from LAMDA_SSL.Base.ClassifierEvaluation import ClassifierEvaluation\nfrom sklearn.metrics import roc_auc_score\nfrom LAMD"
},
{
"path": "LAMDA_SSL/Evaluation/Classifier/Accuracy.py",
"chars": 984,
"preview": "from LAMDA_SSL.Base.ClassifierEvaluation import ClassifierEvaluation\nfrom sklearn.metrics import accuracy_score\nfrom LAM"
},
{
"path": "LAMDA_SSL/Evaluation/Classifier/Confusion_Matrix.py",
"chars": 1129,
"preview": "from LAMDA_SSL.utils import partial\nfrom LAMDA_SSL.Base.ClassifierEvaluation import ClassifierEvaluation\nfrom sklearn.me"
}
]
// ... and 105 more files (download for full content)
About this extraction
This page contains the full source code of the YGZWQZD/LAMDA-SSL GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 305 files (1.5 MB), approximately 382.0k tokens, and a symbol index with 1010 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.