How to use the bigdl.util.common.to_list function in bigdl

To help you get started, we’ve selected a few bigdl examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github intel-analytics / analytics-zoo / pyzoo / zoo / pipeline / api / net.py View on Github external
batch_size = self.dataset.batch_size

        sample_rdd = self.dataset.get_training_data()

        if val_outputs is not None and val_labels is not None:
            val_rdd = self.dataset.get_validation_data()
            if val_rdd is not None:
                val_method = [TFValidationMethod(m, len(val_outputs), len(val_labels))
                              for m in to_list(val_method)]
                training_rdd = sample_rdd

            elif val_split != 0.0:
                training_rdd, val_rdd = sample_rdd.randomSplit([1 - val_split, val_split])
                val_method = [TFValidationMethod(m, len(val_outputs), len(val_labels))
                              for m in to_list(val_method)]
            else:
                raise ValueError("Validation data is not specified. Please set " +
                                 "val rdd in TFDataset, or set val_split larger than zero")

            self.optimizer = Optimizer.create(self.training_helper_layer,
                                              training_rdd,
                                              IdentityCriterion(),
                                              batch_size=batch_size,
                                              optim_method=self.optim_method)
            self.optimizer.set_validation(self.dataset.batch_size,
                                          val_rdd,
                                          EveryEpoch(),
                                          val_method)
        else:
            training_rdd = sample_rdd
            self.optimizer = Optimizer.create(self.training_helper_layer,
github intel-analytics / analytics-zoo / pyzoo / zoo / pipeline / api / net / tf_optimizer.py View on Github external
val_labels = model_targets
        else:
            val_outputs = None
            val_labels = None
            bigdl_val_methods = None

        tensor_with_value = {
            K.learning_phase(): [True, False]
        }

        updates = keras_model.updates

        metrics = None

        if bigdl_val_methods is not None:
            val_methods = to_list(bigdl_val_methods)
            metrics = {}
            for i, method in enumerate(val_methods):
                metrics['bigdl_metirc_' + str(i)] = BigDLMetric(method, val_outputs, val_labels)

        tf_model = TFModel.create_for_unfreeze(loss, sess, inputs, grads, variables, loss.graph,
                                               tensor_with_value, session_config, metrics,
                                               updates, model_dir)

        return cls(tf_model, optim_method, sess=sess, dataset=dataset, val_split=val_split,
                   clip_norm=clip_norm, clip_value=clip_value)
github intel-analytics / BigDL / pyspark / bigdl / nn / layer.py View on Github external
def __init__(self,
                 inputs,
                 outputs,
                 bigdl_type="float", byte_order="little_endian", model_type="bigdl"):
        if model_type == "bigdl":
            super(Model, self).__init__(None, bigdl_type,
                                    to_list(inputs),
                                    to_list(outputs))
        else:
            from bigdl.util.tf_utils import convert
            model = convert(to_list(inputs), to_list(outputs), byte_order, bigdl_type)
            super(Model, self).__init__(model, bigdl_type)
github intel-analytics / BigDL / pyspark / bigdl / optim / optimizer.py View on Github external
"""
        Configure validation settings.


        :param batch_size: validation batch size
        :param val_rdd: validation dataset
        :param trigger: validation interval
        :param val_method: the ValidationMethod to use,e.g. "Top1Accuracy", "Top5Accuracy", "Loss"
        """
        if val_method is None:
            val_method = [Top1Accuracy()]
        func_name = "setValidation"
        if isinstance(val_rdd, DataSet):
            func_name = "setValidationFromDataSet"
        callBigDlFunc(self.bigdl_type, func_name, self.value, batch_size,
                      trigger, val_rdd, to_list(val_method))
github intel-analytics / analytics-zoo / pyzoo / zoo / pipeline / api / net / tf_optimizer.py View on Github external
loss, optim_method, sess, dataset, inputs = args[:5]
        grads, variables, graph, val_outputs, val_labels, val_method = args[5:]
        if clip_value is not None:
            if isinstance(clip_value, float) or isinstance(clip_value, int):
                if clip_value <= 0:
                    ValueError("The clip_value argument should be positive number")
                clip_value = (-float(clip_value), float(clip_value))

            if not isinstance(clip_value, tuple):
                raise ValueError("The clip_value argument should be" +
                                 " a positive float/int which clips to" +
                                 " (-clip_value, clip_value); " +
                                 "or a tuple which clips to (min_value, max_value)")

        if val_method is not None:
            val_methods = to_list(val_method)
            if metrics is None:
                metrics = {}

            for i, method in enumerate(val_methods):
                metrics['bigdl_metirc_' + str(i)] = BigDLMetric(method, val_outputs, val_labels)

        tf_model = TFModel.create_for_unfreeze(loss, sess, inputs, grads, variables, graph,
                                               tensor_with_value, session_config, metrics,
                                               updates, model_dir)

        return cls(tf_model, optim_method, sess=sess, dataset=dataset, val_split=val_split,
                   clip_norm=clip_norm, clip_value=clip_value)
github intel-analytics / BigDL / pyspark / bigdl / optim / optimizer.py View on Github external
def set_validation(self, batch_size, X_val, Y_val, trigger, val_method=None):
        """
        Configure validation settings.

        :param batch_size: validation batch size
        :param X_val: features of validation dataset
        :param Y_val: label of validation dataset
        :param trigger: validation interval
        :param val_method: the ValidationMethod to use,e.g. "Top1Accuracy", "Top5Accuracy", "Loss"
        """
        if val_method is None:
            val_method = [Top1Accuracy()]
        callBigDlFunc(self.bigdl_type, "setValidation", self.value, batch_size,
                      trigger, [JTensor.from_ndarray(X) for X in to_list(X_val)],
                      JTensor.from_ndarray(Y_val), to_list(val_method))
github intel-analytics / analytics-zoo / pyzoo / zoo / pipeline / api / keras / engine / topology.py View on Github external
if validation_data:
                    validation_data = to_sample_rdd(*validation_data)
            elif (isinstance(x, RDD) or isinstance(x, ImageSet) or isinstance(x, TextSet))\
                    and not y:
                training_data = x
            else:
                raise TypeError("Unsupported training data type: %s" % type(x))
            callBigDlFunc(self.bigdl_type, "zooFit",
                          self.value,
                          training_data,
                          batch_size,
                          nb_epoch,
                          validation_data)
        else:
            if validation_data:
                val_x = [JTensor.from_ndarray(x) for x in to_list(validation_data[0])]
                val_y = JTensor.from_ndarray(validation_data[1])
            else:
                val_x, val_y = None, None
            callBigDlFunc(self.bigdl_type, "zooFit",
                          self.value,
                          [JTensor.from_ndarray(x) for x in to_list(x)],
                          JTensor.from_ndarray(y),
                          batch_size,
                          nb_epoch,
                          val_x,
                          val_y)
github intel-analytics / analytics-zoo / pyzoo / zoo / pipeline / api / net.py View on Github external
def __init__(self, input, output, jvalue=None, bigdl_type="float", **kwargs):
        super(BModel, self).__init__(jvalue,
                                     to_list(input),
                                     to_list(output),
                                     bigdl_type,
                                     **kwargs)
github intel-analytics / BigDL / pyspark / bigdl / optim / optimizer.py View on Github external
def set_validation(self, batch_size, X_val, Y_val, trigger, val_method=None):
        """
        Configure validation settings.

        :param batch_size: validation batch size
        :param X_val: features of validation dataset
        :param Y_val: label of validation dataset
        :param trigger: validation interval
        :param val_method: the ValidationMethod to use,e.g. "Top1Accuracy", "Top5Accuracy", "Loss"
        """
        if val_method is None:
            val_method = [Top1Accuracy()]
        callBigDlFunc(self.bigdl_type, "setValidation", self.value, batch_size,
                      trigger, [JTensor.from_ndarray(X) for X in to_list(X_val)],
                      JTensor.from_ndarray(Y_val), to_list(val_method))
github intel-analytics / BigDL / pyspark / bigdl / keras / optimization.py View on Github external
def to_bigdl_metrics(metrics):
        metrics = to_list(metrics)
        bmetrics = []
        for metric in metrics:
            if metric == "accuracy":
                bmetrics.append(boptimizer.Top1Accuracy())
            else:
                unsupport_exp(metric)
        # TODO: add more metrics
        return bmetrics