How to use the pgl.utils.paddle_helper function in pgl

To help you get started, we’ve selected a few pgl examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github PaddlePaddle / PGL / examples / static_gcn / train.py View on Github external
output,
                                dataset.num_classes,
                                activation=None,
                                norm=gw.node_feat['norm'],
                                name="gcn_layer_2")

    val_program = train_program.clone(for_test=True)
    test_program = train_program.clone(for_test=True)

    initializer = []
    with fluid.program_guard(train_program, startup_program):
        train_node_index, init = paddle_helper.constant(
            "train_node_index", dtype="int64", value=train_index)
        initializer.append(init)

        train_node_label, init = paddle_helper.constant(
            "train_node_label", dtype="int64", value=train_label)
        initializer.append(init)
        pred = fluid.layers.gather(output, train_node_index)
        train_loss_t = fluid.layers.softmax_with_cross_entropy(
            logits=pred, label=train_node_label)
        train_loss_t = fluid.layers.reduce_mean(train_loss_t)

        adam = fluid.optimizer.Adam(
            learning_rate=1e-2,
            regularization=fluid.regularizer.L2DecayRegularizer(
                regularization_coeff=0.0005))
        adam.minimize(train_loss_t)

    with fluid.program_guard(val_program, startup_program):
        val_node_index, init = paddle_helper.constant(
            "val_node_index", dtype="int64", value=val_index)
github PaddlePaddle / PGL / examples / static_gat / train.py View on Github external
output = pgl.layers.gat(gw,
                                output,
                                dataset.num_classes,
                                num_heads=1,
                                activation=None,
                                name="gat_layer_2",
                                feat_drop=0.6,
                                attn_drop=0.6,
                                is_test=False)

    val_program = train_program.clone(for_test=True)
    test_program = train_program.clone(for_test=True)

    initializer = []
    with fluid.program_guard(train_program, startup_program):
        train_node_index, init = paddle_helper.constant(
            "train_node_index", dtype="int64", value=train_index)
        initializer.append(init)

        train_node_label, init = paddle_helper.constant(
            "train_node_label", dtype="int64", value=train_label)
        initializer.append(init)
        pred = fluid.layers.gather(output, train_node_index)
        train_loss_t = fluid.layers.softmax_with_cross_entropy(
            logits=pred, label=train_node_label)
        train_loss_t = fluid.layers.reduce_mean(train_loss_t)

        adam = fluid.optimizer.Adam(
            learning_rate=1e-2,
            regularization=fluid.regularizer.L2DecayRegularizer(
                regularization_coeff=0.0005))
        adam.minimize(train_loss_t)
github PaddlePaddle / PGL / pgl / graph_wrapper.py View on Github external
self._initializers.append(init)

        self._edge_uniq_dst_count, init = paddle_helper.constant(
            name=self.__data_name_prefix + "/uniq_dst_count",
            dtype="int32",
            value=uniq_dst_count)
        self._initializers.append(init)

        node_ids_value = np.arange(0, graph.num_nodes, dtype="int64")
        self._node_ids, init = paddle_helper.constant(
            name=self.__data_name_prefix + "/node_ids",
            dtype="int64",
            value=node_ids_value)
        self._initializers.append(init)

        self._indegree, init = paddle_helper.constant(
            name=self.__data_name_prefix + "/indegree",
            dtype="int64",
            value=indegree)
        self._initializers.append(init)
github PaddlePaddle / PGL / examples / static_gat / train.py View on Github external
train_loss_t = fluid.layers.softmax_with_cross_entropy(
            logits=pred, label=train_node_label)
        train_loss_t = fluid.layers.reduce_mean(train_loss_t)

        adam = fluid.optimizer.Adam(
            learning_rate=1e-2,
            regularization=fluid.regularizer.L2DecayRegularizer(
                regularization_coeff=0.0005))
        adam.minimize(train_loss_t)

    with fluid.program_guard(val_program, startup_program):
        val_node_index, init = paddle_helper.constant(
            "val_node_index", dtype="int64", value=val_index)
        initializer.append(init)

        val_node_label, init = paddle_helper.constant(
            "val_node_label", dtype="int64", value=val_label)
        initializer.append(init)

        pred = fluid.layers.gather(output, val_node_index)
        val_loss_t, pred = fluid.layers.softmax_with_cross_entropy(
            logits=pred, label=val_node_label, return_softmax=True)
        val_acc_t = fluid.layers.accuracy(
            input=pred, label=val_node_label, k=1)
        val_loss_t = fluid.layers.reduce_mean(val_loss_t)

    with fluid.program_guard(test_program, startup_program):
        test_node_index, init = paddle_helper.constant(
            "test_node_index", dtype="int64", value=test_index)
        initializer.append(init)

        test_node_label, init = paddle_helper.constant(
github PaddlePaddle / PGL / examples / graphsage / train.py View on Github external
log.info("Test Examples: %s" % len(data["test_index"]))
    log.info("Num nodes %s" % data["graph"].num_nodes)
    log.info("Num edges %s" % data["graph"].num_edges)
    log.info("Average Degree %s" % np.mean(data["graph"].indegree()))

    place = fluid.CUDAPlace(0) if args.use_cuda else fluid.CPUPlace()
    train_program = fluid.Program()
    startup_program = fluid.Program()
    samples = []
    if args.samples_1 > 0:
        samples.append(args.samples_1)
    if args.samples_2 > 0:
        samples.append(args.samples_2)

    with fluid.program_guard(train_program, startup_program):
        feature, feature_init = paddle_helper.constant(
            "feat",
            dtype=data['feature'].dtype,
            value=data['feature'],
            hide_batch_size=False)

        graph_wrapper = pgl.graph_wrapper.GraphWrapper(
            "sub_graph",
            fluid.CPUPlace(),
            node_feat=data['graph'].node_feat_info())
        model_loss, model_acc = build_graph_model(
            graph_wrapper,
            num_class=data["num_class"],
            feature=feature,
            hidden_size=args.hidden_size,
            graphsage_type=args.graphsage_type,
            k_hop=len(samples))
github PaddlePaddle / PGL / pgl / graph_wrapper.py View on Github external
def recv(dst, uniq_dst, bucketing_index, msg, reduce_function, node_ids):
    """Recv message from given msg to dst nodes.
    """
    if reduce_function == "sum":
        if isinstance(msg, dict):
            raise TypeError("The message for build-in function"
                            " should be Tensor not dict.")

        try:
            out_dims = msg.shape[-1]
            init_output = fluid.layers.fill_constant_batch_size_like(
                node_ids, shape=[1, out_dims], value=0, dtype="float32")
            init_output.stop_gradient = False
            output = paddle_helper.scatter_add(init_output, dst, msg)
            return output
        except TypeError as e:
            warnings.warn(
                "scatter_add is not supported with paddle version <= 1.5")

            def sum_func(message):
                return fluid.layers.sequence_pool(message, "sum")

            reduce_function = sum_func

    # convert msg into lodtensor
    bucketed_msg = op.nested_lod_reset(msg, bucketing_index)
    # Check dim for bucketed_msg equal to out_dims
    output = reduce_function(bucketed_msg)
    out_dims = output.shape[-1]
github PaddlePaddle / PGL / examples / static_gcn / train.py View on Github external
name="gcn_layer_1")
        output = fluid.layers.dropout(
            output, 0.5, dropout_implementation='upscale_in_train')
        output = pgl.layers.gcn(gw,
                                output,
                                dataset.num_classes,
                                activation=None,
                                norm=gw.node_feat['norm'],
                                name="gcn_layer_2")

    val_program = train_program.clone(for_test=True)
    test_program = train_program.clone(for_test=True)

    initializer = []
    with fluid.program_guard(train_program, startup_program):
        train_node_index, init = paddle_helper.constant(
            "train_node_index", dtype="int64", value=train_index)
        initializer.append(init)

        train_node_label, init = paddle_helper.constant(
            "train_node_label", dtype="int64", value=train_label)
        initializer.append(init)
        pred = fluid.layers.gather(output, train_node_index)
        train_loss_t = fluid.layers.softmax_with_cross_entropy(
            logits=pred, label=train_node_label)
        train_loss_t = fluid.layers.reduce_mean(train_loss_t)

        adam = fluid.optimizer.Adam(
            learning_rate=1e-2,
            regularization=fluid.regularizer.L2DecayRegularizer(
                regularization_coeff=0.0005))
        adam.minimize(train_loss_t)
github PaddlePaddle / PGL / pgl / layers / conv.py View on Github external
def reduce_attention(msg):
        alpha = msg["alpha"]  # lod-tensor (batch_size, seq_len, num_heads)
        h = msg["h"]
        alpha = paddle_helper.sequence_softmax(alpha)
        old_h = h
        h = fluid.layers.reshape(h, [-1, num_heads, hidden_size])
        alpha = fluid.layers.reshape(alpha, [-1, num_heads, 1])
        if attn_drop > 1e-15:
            alpha = fluid.layers.dropout(
                alpha,
                dropout_prob=attn_drop,
                is_test=is_test,
                dropout_implementation="upscale_in_train")
        h = h * alpha
        h = fluid.layers.reshape(h, [-1, num_heads * hidden_size])
        h = fluid.layers.lod_reset(h, old_h)
        return fluid.layers.sequence_pool(h, "sum")