Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
sorted_pred = prediction_list[:]
sorted_pred.sort()
threshold = sorted_pred[-len(true_edges)]
y_pred = np.zeros(len(prediction_list), dtype=np.int32)
for i in range(len(prediction_list)):
if prediction_list[i] >= threshold:
y_pred[i] = 1
y_true = np.array(true_list)
y_scores = np.array(prediction_list)
ps, rs, _ = precision_recall_curve(y_true, y_scores)
return roc_auc_score(y_true, y_scores), f1_score(y_true, y_pred), auc(rs, ps)
@register_task("link_prediction")
class LinkPrediction(BaseTask):
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument("--hidden-size", type=int, default=128)
parser.add_argument("--negative-ratio", type=int, default=5)
# fmt: on
def __init__(self, args):
super(LinkPrediction, self).__init__(args)
dataset = build_dataset(args)
data = dataset[0]
self.data = data
if hasattr(dataset, "num_features"):
from sklearn.metrics import f1_score
from sklearn.multiclass import OneVsRestClassifier
from sklearn.utils import shuffle as skshuffle
from tqdm import tqdm
from cogdl import options
from cogdl.data import Dataset, InMemoryDataset
from cogdl.datasets import build_dataset
from cogdl.models import build_model
from . import BaseTask, register_task
warnings.filterwarnings("ignore")
@register_task("unsupervised_node_classification")
class UnsupervisedNodeClassification(BaseTask):
"""Node classification task."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument("--hidden-size", type=int, default=128)
parser.add_argument("--num-shuffle", type=int, default=5)
# fmt: on
def __init__(self, args):
super(UnsupervisedNodeClassification, self).__init__(args)
dataset = build_dataset(args)
self.data = dataset[0]
if issubclass(dataset.__class__.__bases__[0], InMemoryDataset):
sorted_pred = prediction_list[:]
sorted_pred.sort()
threshold = sorted_pred[-len(true_edges)]
y_pred = np.zeros(len(prediction_list), dtype=np.int32)
for i in range(len(prediction_list)):
if prediction_list[i] >= threshold:
y_pred[i] = 1
y_true = np.array(true_list)
y_scores = np.array(prediction_list)
ps, rs, _ = precision_recall_curve(y_true, y_scores)
return roc_auc_score(y_true, y_scores), f1_score(y_true, y_pred), auc(rs, ps)
@register_task("multiplex_link_prediction")
class MultiplexLinkPrediction(BaseTask):
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument("--hidden-size", type=int, default=200)
parser.add_argument("--negative-ratio", type=int, default=5)
parser.add_argument("--eval-type", type=str, default='all', nargs='+')
# fmt: on
def __init__(self, args):
super(MultiplexLinkPrediction, self).__init__(args)
dataset = build_dataset(args)
data = dataset[0]
self.data = data
import copy
import random
import numpy as np
import torch
import torch.nn.functional as F
from tqdm import tqdm
from cogdl import options
from cogdl.datasets import build_dataset
from cogdl.models import build_model
from . import BaseTask, register_task
@register_task("node_classification")
class NodeClassification(BaseTask):
"""Node classification task."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
# parser.add_argument("--num-features", type=int)
# fmt: on
def __init__(self, args):
super(NodeClassification, self).__init__(args)
dataset = build_dataset(args)
self.data = dataset.data
self.data.apply(lambda x: x.cuda())
import copy
import random
import numpy as np
import torch
import torch.nn.functional as F
from tqdm import tqdm
from cogdl import options
from cogdl.datasets import build_dataset
from cogdl.models import build_model
from . import BaseTask, register_task
@register_task("node_classification_sample")
class NodeClassification(BaseTask):
"""Node classification task."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument("--batch-size", type=int, default=256)
# fmt: off
# parser.add_argument("--num-features", type=int)
# fmt: on
def __init__(self, args):
super(NodeClassification, self).__init__(args)
dataset = build_dataset(args)
data = dataset[0]
class CotrainingModel(nn.Module):
def __init__(self, args):
super(CotrainingModel, self).__init__()
self.model_1 = build_model(args)
self.model_2 = build_model(args)
self.dropout = args.dropout
def forward(self, x1, x2, A1, A2):
x1 = F.dropout(x1, self.dropout, training=self.training)
x2 = F.dropout(x2, self.dropout, training=self.training)
return self.model_1(x1, A1), self.model_2(x2, A2)
@register_task("node_classification_cotraining")
class NodeClassificationCotraining(BaseTask):
"""Node classification task with cotraining (NSGCN)."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument("--order", type=int, default=5)
# fmt: on
def __init__(self, args):
super(NodeClassificationCotraining, self).__init__(args)
dataset = build_dataset(args)
data = dataset[0]
self.data = data.cuda()