Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_edge_batch():
d = 10
g = dgl.DGLGraph(nx.path_graph(20))
nfeat = F.randn((g.number_of_nodes(), d))
efeat = F.randn((g.number_of_edges(), d))
g.ndata['x'] = nfeat
g.edata['x'] = efeat
# test all
eid = utils.toindex(slice(0, g.number_of_edges()))
u, v, _ = g._graph.edges('eid')
src_data = g.get_n_repr(u)
edge_data = g.get_e_repr(eid)
dst_data = g.get_n_repr(v)
ebatch = EdgeBatch((u, v, eid), src_data, edge_data, dst_data)
assert F.shape(ebatch.src['x'])[0] == g.number_of_edges() and\
F.shape(ebatch.src['x'])[1] == d
assert F.shape(ebatch.dst['x'])[0] == g.number_of_edges() and\
def test_local_var():
g = DGLGraph(nx.path_graph(5))
g.ndata['h'] = F.zeros((g.number_of_nodes(), 3))
g.edata['w'] = F.zeros((g.number_of_edges(), 4))
# test override
def foo(g):
g = g.local_var()
g.ndata['h'] = F.ones((g.number_of_nodes(), 3))
g.edata['w'] = F.ones((g.number_of_edges(), 4))
foo(g)
assert F.allclose(g.ndata['h'], F.zeros((g.number_of_nodes(), 3)))
assert F.allclose(g.edata['w'], F.zeros((g.number_of_edges(), 4)))
# test out-place update
def foo(g):
g = g.local_var()
g.nodes[[2, 3]].data['h'] = F.ones((2, 3))
g.edges[[2, 3]].data['w'] = F.ones((2, 4))
foo(g)
def test_spmv_3d_feat():
def src_mul_edge_udf(edges):
return {'sum': edges.src['h'] * F.unsqueeze(F.unsqueeze(edges.data['h'], 1), 1)}
def sum_udf(nodes):
return {'h': F.sum(nodes.mailbox['sum'], 1)}
n = 100
p = 0.1
a = sp.random(n, n, p, data_rvs=lambda n: np.ones(n))
g = dgl.DGLGraph(a)
m = g.number_of_edges()
# test#1: v2v with adj data
h = F.randn((n, 5, 5))
e = F.randn((m,))
g.ndata['h'] = h
g.edata['h'] = e
g.update_all(message_func=fn.src_mul_edge('h', 'h', 'sum'), reduce_func=fn.sum('sum', 'h')) # 1
ans = g.ndata['h']
g.ndata['h'] = h
g.edata['h'] = e
g.update_all(message_func=src_mul_edge_udf, reduce_func=fn.sum('sum', 'h')) # 2
assert F.allclose(g.ndata['h'], ans)
def test_batch_no_edge():
g1 = dgl.DGLGraph()
g1.add_nodes(6)
g1.add_edges([4, 4, 2, 2, 0], [5, 3, 3, 1, 1])
g2 = dgl.DGLGraph()
g2.add_nodes(6)
g2.add_edges([0, 1, 2, 5, 4, 5], [1 ,2 ,3, 4, 3, 0])
g3 = dgl.DGLGraph()
g3.add_nodes(1) # no edges
g = dgl.batch([g1, g3, g2]) # should not throw an error
def generate_graph():
g = DGLGraph()
for i in range(10):
g.add_node(i, __REPR__=i+1) # 10 nodes.
# create a graph where 0 is the source and 9 is the sink
for i in range(1, 9):
g.add_edge(0, i)
g.add_edge(i, 9)
return g
def test_dfs_labeled_edges(example=False):
dgl_g = dgl.DGLGraph()
dgl_g.add_nodes(6)
dgl_g.add_edges([0, 1, 0, 3, 3], [1, 2, 2, 4, 5])
dgl_edges, dgl_labels = dgl.dfs_labeled_edges_generator(
dgl_g, [0, 3], has_reverse_edge=True, has_nontree_edge=True)
dgl_edges = [toset(t) for t in dgl_edges]
dgl_labels = [toset(t) for t in dgl_labels]
g1_solutions = [
# edges labels
[[0, 1, 1, 0, 2], [0, 0, 1, 1, 2]],
[[2, 2, 0, 1, 0], [0, 1, 0, 2, 1]],
]
g2_solutions = [
# edges labels
[[3, 3, 4, 4], [0, 1, 0, 1]],
[[4, 4, 3, 3], [0, 1, 0, 1]],
def test_softmax_nodes():
# test#1: basic
g0 = dgl.DGLGraph(nx.path_graph(9))
feat0 = F.randn((g0.number_of_nodes(), 10))
g0.ndata['x'] = feat0
ground_truth = F.softmax(feat0, dim=0)
assert F.allclose(dgl.softmax_nodes(g0, 'x'), ground_truth)
g0.ndata.pop('x')
# test#2: batched graph
g1 = dgl.DGLGraph(nx.path_graph(5))
g2 = dgl.DGLGraph(nx.path_graph(3))
g3 = dgl.DGLGraph()
g4 = dgl.DGLGraph(nx.path_graph(10))
bg = dgl.batch([g0, g1, g2, g3, g4])
feat1 = F.randn((g1.number_of_nodes(), 10))
feat2 = F.randn((g2.number_of_nodes(), 10))
feat4 = F.randn((g4.number_of_nodes(), 10))
bg.ndata['x'] = F.cat([feat0, feat1, feat2, feat4], 0)
ground_truth = F.cat([
F.softmax(feat0, 0),
F.softmax(feat1, 0),
F.softmax(feat2, 0),
F.softmax(feat4, 0)
], 0)
assert F.allclose(dgl.softmax_nodes(bg, 'x'), ground_truth)
#Edges %d
#Classes %d
#Train samples %d
#Val samples %d
#Test samples %d""" %
(n_edges, n_classes,
n_train_samples,
n_val_samples,
n_test_samples))
# create GCN model
g = data.graph
if args.self_loop and not args.dataset.startswith('reddit'):
g.remove_edges_from(nx.selfloop_edges(g))
g.add_edges_from(zip(g.nodes(), g.nodes()))
print("adding self-loop edges")
g = DGLGraph(g, readonly=True)
# set device for dataset tensors
if args.gpu < 0:
cuda = False
else:
cuda = True
torch.cuda.set_device(args.gpu)
features = features.cuda()
labels = labels.cuda()
train_mask = train_mask.cuda()
val_mask = val_mask.cuda()
test_mask = test_mask.cuda()
print(torch.cuda.get_device_name(0))
g.ndata['features'] = features
else:
cuda = True
torch.cuda.set_device(args.gpu)
features = features.cuda()
labels = labels.cuda()
train_mask = train_mask.cuda()
val_mask = val_mask.cuda()
test_mask = test_mask.cuda()
# graph preprocess and calculate normalization factor
g = data.graph
# add self loop
if args.self_loop:
g.remove_edges_from(nx.selfloop_edges(g))
g.add_edges_from(zip(g.nodes(), g.nodes()))
g = DGLGraph(g)
n_edges = g.number_of_edges()
# create TAGCN model
model = TAGCN(g,
in_feats,
args.n_hidden,
n_classes,
args.n_layers,
F.relu,
args.dropout)
if cuda:
model.cuda()
loss_fcn = torch.nn.CrossEntropyLoss()
# use optimizer
src, dst, etype = g.get_edges(v_shift=v_shift)
row.append(src)
col.append(dst)
etypes.append(th.from_numpy(etype))
# update shift
v_shift += g.number_of_nodes
e_shift += g.number_of_edges
n = v_shift
root_ids = th.tensor(root_ids)
leaf_ids = th.cat(leaf_ids)
pos_arr = th.cat(pos_arr)
etypes = th.cat(etypes)
row, col = map(np.concatenate, (row, col))
coo = coo_matrix((np.zeros_like(row), (row, col)), shape=(n, n))
g = dgl.DGLGraph(coo, readonly=True)
g.set_n_initializer(dgl.init.zero_initializer)
g.set_e_initializer(dgl.init.zero_initializer)
data = th.cat(data)
labels = th.cat(labels)
g.edata['etype'] = etypes
g.ndata['pos'] = pos_arr
g.nodes[leaf_ids].data['x'] = data
return Batch(g=g, readout_ids=root_ids, leaf_ids=leaf_ids, y=labels)