How to use the pgl.layers function in pgl

To help you get started, we’ve selected a few pgl examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github PaddlePaddle / PGL / examples / static_gat / train.py View on Github external
test_index = dataset.test_index
    test_label = np.expand_dims(dataset.y[test_index], -1)
    test_index = np.expand_dims(test_index, -1)

    place = fluid.CUDAPlace(0) if args.use_cuda else fluid.CPUPlace()
    train_program = fluid.Program()
    startup_program = fluid.Program()
    test_program = fluid.Program()
    hidden_size = 16

    with fluid.program_guard(train_program, startup_program):
        gw = pgl.graph_wrapper.StaticGraphWrapper(
            name="graph", graph=dataset.graph, place=place)

        output = pgl.layers.gat(gw,
                                gw.node_feat["words"],
                                hidden_size,
                                activation="elu",
                                name="gat_layer_1",
                                num_heads=8,
                                feat_drop=0.6,
                                attn_drop=0.6,
                                is_test=False)
        output = pgl.layers.gat(gw,
                                output,
                                dataset.num_classes,
                                num_heads=1,
                                activation=None,
                                name="gat_layer_2",
                                feat_drop=0.6,
                                attn_drop=0.6,
github PaddlePaddle / PGL / examples / gat / train.py View on Github external
with fluid.program_guard(train_program, startup_program):
        gw = pgl.graph_wrapper.GraphWrapper(
            name="graph",
            place=place,
            node_feat=dataset.graph.node_feat_info())

        output = pgl.layers.gat(gw,
                                gw.node_feat["words"],
                                hidden_size,
                                activation="elu",
                                name="gat_layer_1",
                                num_heads=8,
                                feat_drop=0.6,
                                attn_drop=0.6,
                                is_test=False)
        output = pgl.layers.gat(gw,
                                output,
                                dataset.num_classes,
                                num_heads=1,
                                activation=None,
                                name="gat_layer_2",
                                feat_drop=0.6,
                                attn_drop=0.6,
                                is_test=False)
        node_index = fluid.layers.data(
            "node_index",
            shape=[None, 1],
            dtype="int64",
            append_batch_size=False)
        node_label = fluid.layers.data(
            "node_label",
            shape=[None, 1],
github PaddlePaddle / PGL / examples / static_gat / train.py View on Github external
hidden_size = 16

    with fluid.program_guard(train_program, startup_program):
        gw = pgl.graph_wrapper.StaticGraphWrapper(
            name="graph", graph=dataset.graph, place=place)

        output = pgl.layers.gat(gw,
                                gw.node_feat["words"],
                                hidden_size,
                                activation="elu",
                                name="gat_layer_1",
                                num_heads=8,
                                feat_drop=0.6,
                                attn_drop=0.6,
                                is_test=False)
        output = pgl.layers.gat(gw,
                                output,
                                dataset.num_classes,
                                num_heads=1,
                                activation=None,
                                name="gat_layer_2",
                                feat_drop=0.6,
                                attn_drop=0.6,
                                is_test=False)

    val_program = train_program.clone(for_test=True)
    test_program = train_program.clone(for_test=True)

    initializer = []
    with fluid.program_guard(train_program, startup_program):
        train_node_index, init = paddle_helper.constant(
            "train_node_index", dtype="int64", value=train_index)
github PaddlePaddle / PGL / examples / gat / train.py View on Github external
def main(args):
    dataset = load(args.dataset)
    place = fluid.CUDAPlace(0) if args.use_cuda else fluid.CPUPlace()
    train_program = fluid.Program()
    startup_program = fluid.Program()
    test_program = fluid.Program()
    hidden_size = 8

    with fluid.program_guard(train_program, startup_program):
        gw = pgl.graph_wrapper.GraphWrapper(
            name="graph",
            place=place,
            node_feat=dataset.graph.node_feat_info())

        output = pgl.layers.gat(gw,
                                gw.node_feat["words"],
                                hidden_size,
                                activation="elu",
                                name="gat_layer_1",
                                num_heads=8,
                                feat_drop=0.6,
                                attn_drop=0.6,
                                is_test=False)
        output = pgl.layers.gat(gw,
                                output,
                                dataset.num_classes,
                                num_heads=1,
                                activation=None,
                                name="gat_layer_2",
                                feat_drop=0.6,
                                attn_drop=0.6,
github PaddlePaddle / PGL / examples / gcn / train.py View on Github external
with fluid.program_guard(train_program, startup_program):
        gw = pgl.graph_wrapper.GraphWrapper(
            name="graph",
            place=place,
            node_feat=dataset.graph.node_feat_info())

        output = pgl.layers.gcn(gw,
                                gw.node_feat["words"],
                                hidden_size,
                                activation="relu",
                                norm=gw.node_feat['norm'],
                                name="gcn_layer_1")
        output = fluid.layers.dropout(
            output, 0.5, dropout_implementation='upscale_in_train')
        output = pgl.layers.gcn(gw,
                                output,
                                dataset.num_classes,
                                activation=None,
                                norm=gw.node_feat['norm'],
                                name="gcn_layer_2")
        node_index = fluid.layers.data(
            "node_index",
            shape=[None, 1],
            dtype="int64",
            append_batch_size=False)
        node_label = fluid.layers.data(
            "node_label",
            shape=[None, 1],
            dtype="int64",
            append_batch_size=False)
github PaddlePaddle / PGL / examples / static_gcn / train.py View on Github external
val_index = np.expand_dims(val_index, -1)

    test_index = dataset.test_index
    test_label = np.expand_dims(dataset.y[test_index], -1)
    test_index = np.expand_dims(test_index, -1)

    place = fluid.CUDAPlace(0) if args.use_cuda else fluid.CPUPlace()
    train_program = fluid.Program()
    startup_program = fluid.Program()
    test_program = fluid.Program()
    hidden_size = 16

    with fluid.program_guard(train_program, startup_program):
        gw = pgl.graph_wrapper.StaticGraphWrapper(
            name="graph", graph=dataset.graph, place=place)
        output = pgl.layers.gcn(gw,
                                gw.node_feat["words"],
                                hidden_size,
                                activation="relu",
                                norm=gw.node_feat['norm'],
                                name="gcn_layer_1")
        output = fluid.layers.dropout(
            output, 0.5, dropout_implementation='upscale_in_train')
        output = pgl.layers.gcn(gw,
                                output,
                                dataset.num_classes,
                                activation=None,
                                norm=gw.node_feat['norm'],
                                name="gcn_layer_2")

    val_program = train_program.clone(for_test=True)
    test_program = train_program.clone(for_test=True)
github PaddlePaddle / PGL / examples / gcn / train.py View on Github external
norm[indegree > 0] = np.power(indegree[indegree > 0], -0.5)
    dataset.graph.node_feat["norm"] = np.expand_dims(norm, -1)

    place = fluid.CUDAPlace(0) if args.use_cuda else fluid.CPUPlace()
    train_program = fluid.Program()
    startup_program = fluid.Program()
    test_program = fluid.Program()
    hidden_size = 16

    with fluid.program_guard(train_program, startup_program):
        gw = pgl.graph_wrapper.GraphWrapper(
            name="graph",
            place=place,
            node_feat=dataset.graph.node_feat_info())

        output = pgl.layers.gcn(gw,
                                gw.node_feat["words"],
                                hidden_size,
                                activation="relu",
                                norm=gw.node_feat['norm'],
                                name="gcn_layer_1")
        output = fluid.layers.dropout(
            output, 0.5, dropout_implementation='upscale_in_train')
        output = pgl.layers.gcn(gw,
                                output,
                                dataset.num_classes,
                                activation=None,
                                norm=gw.node_feat['norm'],
                                name="gcn_layer_2")
        node_index = fluid.layers.data(
            "node_index",
            shape=[None, 1],
github PaddlePaddle / PGL / examples / static_gcn / train.py View on Github external
startup_program = fluid.Program()
    test_program = fluid.Program()
    hidden_size = 16

    with fluid.program_guard(train_program, startup_program):
        gw = pgl.graph_wrapper.StaticGraphWrapper(
            name="graph", graph=dataset.graph, place=place)
        output = pgl.layers.gcn(gw,
                                gw.node_feat["words"],
                                hidden_size,
                                activation="relu",
                                norm=gw.node_feat['norm'],
                                name="gcn_layer_1")
        output = fluid.layers.dropout(
            output, 0.5, dropout_implementation='upscale_in_train')
        output = pgl.layers.gcn(gw,
                                output,
                                dataset.num_classes,
                                activation=None,
                                norm=gw.node_feat['norm'],
                                name="gcn_layer_2")

    val_program = train_program.clone(for_test=True)
    test_program = train_program.clone(for_test=True)

    initializer = []
    with fluid.program_guard(train_program, startup_program):
        train_node_index, init = paddle_helper.constant(
            "train_node_index", dtype="int64", value=train_index)
        initializer.append(init)

        train_node_label, init = paddle_helper.constant(