How to use spektral - 10 common examples

To help you get started, we’ve selected a few spektral examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github danielegrattarola / spektral / examples / classification_BDGC_disjoint.py View on Github external
X_1, A_1, I_1, M_1 = MinCutPool(k=int(average_N // 2),
                                h=mincut_H,
                                activation=activ,
                                kernel_regularizer=l2(pool_l2))([gc1, A_in, I_in])

# Block 2
gc2 = GraphConvSkip(n_channels,
                    activation=activ,
                    kernel_regularizer=l2(GNN_l2))([X_1, A_1])
X_2, A_2, I_2, M_2 = MinCutPool(k=int(average_N // 4),
                                h=mincut_H,
                                activation=activ,
                                kernel_regularizer=l2(pool_l2))([gc2, A_1, I_1])

# Block 3
X_3 = GraphConvSkip(n_channels,
                    activation=activ,
                    kernel_regularizer=l2(GNN_l2))([X_2, A_2])

# Output block
avgpool = GlobalAvgPool()([X_3, I_2])
output = Dense(n_out, activation='softmax')(avgpool)

# Build model
model = Model([X_in, A_in, I_in], output)
model.compile(optimizer='adam', loss='categorical_crossentropy', target_tensors=[target])
model.summary()

# Training setup
sess = K.get_session()
loss = model.total_loss
acc = K.mean(categorical_accuracy(target, model.output))
github danielegrattarola / spektral / examples / classification_BDGC_disjoint.py View on Github external
# Parameters
F = X_train[0].shape[-1]  # Dimension of node features
n_out = y_train[0].shape[-1]  # Dimension of the target
average_N = np.ceil(np.mean([a.shape[-1] for a in A_train]))  # Average number of nodes in dataset

################################################################################
# BUILD MODEL
################################################################################
X_in = Input(tensor=tf.placeholder(tf.float32, shape=(None, F), name='X_in'))
A_in = Input(tensor=tf.sparse_placeholder(tf.float32, shape=(None, None)), sparse=True)
I_in = Input(tensor=tf.placeholder(tf.int32, shape=(None,), name='segment_ids_in'))
target = Input(tensor=tf.placeholder(tf.float32, shape=(None, n_out), name='target'))

# Block 1
gc1 = GraphConvSkip(n_channels,
                    activation=activ,
                    kernel_regularizer=l2(GNN_l2))([X_in, A_in])
X_1, A_1, I_1, M_1 = MinCutPool(k=int(average_N // 2),
                                h=mincut_H,
                                activation=activ,
                                kernel_regularizer=l2(pool_l2))([gc1, A_in, I_in])

# Block 2
gc2 = GraphConvSkip(n_channels,
                    activation=activ,
                    kernel_regularizer=l2(GNN_l2))([X_1, A_1])
X_2, A_2, I_2, M_2 = MinCutPool(k=int(average_N // 4),
                                h=mincut_H,
                                activation=activ,
                                kernel_regularizer=l2(pool_l2))([gc2, A_1, I_1])
github danielegrattarola / spektral / examples / classification_BDGC_disjoint.py View on Github external
X_in = Input(tensor=tf.placeholder(tf.float32, shape=(None, F), name='X_in'))
A_in = Input(tensor=tf.sparse_placeholder(tf.float32, shape=(None, None)), sparse=True)
I_in = Input(tensor=tf.placeholder(tf.int32, shape=(None,), name='segment_ids_in'))
target = Input(tensor=tf.placeholder(tf.float32, shape=(None, n_out), name='target'))

# Block 1
gc1 = GraphConvSkip(n_channels,
                    activation=activ,
                    kernel_regularizer=l2(GNN_l2))([X_in, A_in])
X_1, A_1, I_1, M_1 = MinCutPool(k=int(average_N // 2),
                                h=mincut_H,
                                activation=activ,
                                kernel_regularizer=l2(pool_l2))([gc1, A_in, I_in])

# Block 2
gc2 = GraphConvSkip(n_channels,
                    activation=activ,
                    kernel_regularizer=l2(GNN_l2))([X_1, A_1])
X_2, A_2, I_2, M_2 = MinCutPool(k=int(average_N // 4),
                                h=mincut_H,
                                activation=activ,
                                kernel_regularizer=l2(pool_l2))([gc2, A_1, I_1])

# Block 3
X_3 = GraphConvSkip(n_channels,
                    activation=activ,
                    kernel_regularizer=l2(GNN_l2))([X_2, A_2])

# Output block
avgpool = GlobalAvgPool()([X_3, I_2])
output = Dense(n_out, activation='softmax')(avgpool)
github danielegrattarola / spektral / examples / classification_BDGC_disjoint.py View on Github external
# Block 2
gc2 = GraphConvSkip(n_channels,
                    activation=activ,
                    kernel_regularizer=l2(GNN_l2))([X_1, A_1])
X_2, A_2, I_2, M_2 = MinCutPool(k=int(average_N // 4),
                                h=mincut_H,
                                activation=activ,
                                kernel_regularizer=l2(pool_l2))([gc2, A_1, I_1])

# Block 3
X_3 = GraphConvSkip(n_channels,
                    activation=activ,
                    kernel_regularizer=l2(GNN_l2))([X_2, A_2])

# Output block
avgpool = GlobalAvgPool()([X_3, I_2])
output = Dense(n_out, activation='softmax')(avgpool)

# Build model
model = Model([X_in, A_in, I_in], output)
model.compile(optimizer='adam', loss='categorical_crossentropy', target_tensors=[target])
model.summary()

# Training setup
sess = K.get_session()
loss = model.total_loss
acc = K.mean(categorical_accuracy(target, model.output))
opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_step = opt.minimize(loss)

# Initialize all variables
init_op = tf.global_variables_initializer()
github danielegrattarola / spektral / tests / benchmarks / citation / citation.py View on Github external
'n_layers': neighbourhood,
        'kwargs': {
            'mlp_channels': 32
        },
        'fltr': lambda A: A,
        'sparse': True
    }
]

results = {}
weights = []
for c in CONFIG:
    acc = []
    times = []
    for i in range(runs):
        A, X, y, train_mask, val_mask, test_mask = citation.load_data(
            dataset, random_split=True
        )

        # Parameters
        N = X.shape[0]          # Number of nodes in the graph
        F = X.shape[1]          # Original feature dimensionality
        n_classes = y.shape[1]  # Number of classes

        # Preprocessing operations
        fltr = c['fltr'](A)

        # Model definition
        X_in = Input(shape=(F, ))
        fltr_in = Input((N, ), sparse=c['sparse'])

        gc_1 = Dropout(dropout_rate)(X_in)
github danielegrattarola / spektral / tests / benchmarks / node_classification / node_classification.py View on Github external
'sparse': True
    }
]

results = {}
weights = []
for c in CONFIG:
    acc = []
    times = []
    for i in range(runs):
        if dataset == 'ppi':
            A, X, y, train_mask, val_mask, test_mask = graphsage.load_data(
                dataset_name=dataset
            )
        else:
            A, X, y, train_mask, val_mask, test_mask = citation.load_data(
                dataset, random_split=True
            )

        # Parameters
        N = X.shape[0]          # Number of nodes in the graph
        F = X.shape[1]          # Original feature dimensionality
        n_classes = y.shape[1]  # Number of classes

        # Preprocessing operations
        fltr = c['fltr'](A)

        # Model definition
        X_in = Input(shape=(F, ))
        fltr_in = Input((N, ), sparse=c['sparse'])

        gc_1 = Dropout(dropout_rate)(X_in)
github danielegrattarola / spektral / tests / test_datasets.py View on Github external
def test_citation():
    for dataset_name in ['cora', 'citeseer', 'pubmed']:
        citation.load_data(dataset_name)
        citation.load_data(dataset_name, random_split=True)
github danielegrattarola / spektral / tests / benchmarks / citation / citation.py View on Github external
N = X.shape[0]          # Number of nodes in the graph
        F = X.shape[1]          # Original feature dimensionality
        n_classes = y.shape[1]  # Number of classes

        # Preprocessing operations
        fltr = c['fltr'](A)

        # Model definition
        X_in = Input(shape=(F, ))
        fltr_in = Input((N, ), sparse=c['sparse'])

        gc_1 = Dropout(dropout_rate)(X_in)
        for _ in range(c['n_layers']):
            gc_1 = c['layer'](**dict(base_kwargs, **c['kwargs']))([gc_1, fltr_in])
        gc_2 = Dropout(dropout_rate)(gc_1)
        gc_2 = GraphConv(n_classes, activation='softmax')([gc_2, fltr_in])

        # Build model
        model = Model(inputs=[X_in, fltr_in], outputs=gc_2)
        optimizer = Adam(lr=learning_rate)
        model.compile(optimizer=optimizer,
                      loss='categorical_crossentropy',
                      weighted_metrics=['acc'])
        if i == 0:
            weights.append((
                c['layer'].__name__, sum([i.size for i in model.get_weights()])
            ))

        # Callbacks
        callbacks = [
            EarlyStopping(monitor='val_weighted_acc',
                          patience=es_patience,
github danielegrattarola / spektral / tests / benchmarks / citation / citation.py View on Github external
dropout_rate = 0.5      # Dropout rate applied to the input of GCN layers
l2_reg = 5e-4           # Regularization rate for l2
learning_rate = 1e-3    # Learning rate for SGD
epochs = 20000          # Number of training epochs
es_patience = 200       # Patience for early stopping
runs = 100

base_kwargs = {
    'channels': 16,
    'activation': 'relu',
    'kernel_regularizer': l2(l2_reg),
}

CONFIG = [
    {
        'layer': GraphConv,
        'n_layers': neighbourhood,
        'kwargs': {},
        'fltr': lambda A: localpooling_filter(A),
        'sparse': True
    },
    {
        'layer': GraphConvSkip,
        'n_layers': neighbourhood,
        'kwargs': {},
        'fltr': lambda A: localpooling_filter(A),
        'sparse': True
    },
    {
        'layer': ARMAConv,
        'n_layers': 1,
        'kwargs': {
github danielegrattarola / spektral / tests / benchmarks / node_classification / node_classification.py View on Github external
N = X.shape[0]          # Number of nodes in the graph
        F = X.shape[1]          # Original feature dimensionality
        n_classes = y.shape[1]  # Number of classes

        # Preprocessing operations
        fltr = c['fltr'](A)

        # Model definition
        X_in = Input(shape=(F, ))
        fltr_in = Input((N, ), sparse=c['sparse'])

        gc_1 = Dropout(dropout_rate)(X_in)
        for _ in range(c['n_layers']):
            gc_1 = c['layer'](**dict(base_kwargs, **c['kwargs']))([gc_1, fltr_in])
        gc_2 = Dropout(dropout_rate)(gc_1)
        gc_2 = GraphConv(n_classes, activation='softmax')([gc_2, fltr_in])

        # Build model
        model = Model(inputs=[X_in, fltr_in], outputs=gc_2)
        optimizer = Adam(lr=learning_rate)
        model.compile(optimizer=optimizer,
                      loss='categorical_crossentropy',
                      weighted_metrics=['acc'])
        if i == 0:
            weights.append((
                c['layer'].__name__, sum([i.size for i in model.get_weights()])
            ))

        # Callbacks
        callbacks = [
            EarlyStopping(monitor='val_weighted_acc',
                          patience=es_patience,