How to use the bigdl.optim.optimizer.SGD function in bigdl

To help you get started, we’ve selected a few bigdl examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github intel-analytics / analytics-zoo / pyzoo / zoo / examples / qaranker / qa_ranker.py View on Github external
train_set = TextSet.from_relation_pairs(train_relations, q_set, a_set)
    validate_relations = Relations.read(options.data_path + "/relation_valid.csv",
                                        sc, int(options.partition_num))
    validate_set = TextSet.from_relation_lists(validate_relations, q_set, a_set)

    if options.model:
        knrm = KNRM.load_model(options.model)
    else:
        word_index = a_set.get_word_index()
        knrm = KNRM(int(options.question_length), int(options.answer_length),
                    options.embedding_file, word_index)
    model = Sequential().add(
        TimeDistributed(
            knrm,
            input_shape=(2, int(options.question_length) + int(options.answer_length))))
    model.compile(optimizer=SGD(learningrate=float(options.learning_rate)),
                  loss="rank_hinge")
    for i in range(0, int(options.nb_epoch)):
        model.fit(train_set, batch_size=int(options.batch_size), nb_epoch=1)
        knrm.evaluate_ndcg(validate_set, 3)
        knrm.evaluate_ndcg(validate_set, 5)
        knrm.evaluate_map(validate_set)

    if options.output_path:
        knrm.save_model(options.output_path + "/knrm.model")
        a_set.save_word_index(options.output_path + "/word_index.txt")
        print("Trained model and word dictionary saved")
    sc.stop()
github intel-analytics / analytics-zoo / pyzoo / zoo / examples / autograd / custom.py View on Github external
parser.add_option("--nb_epoch", dest="nb_epoch", default="500")

    (options, args) = parser.parse_args(sys.argv)

    sc = init_nncontext(init_spark_conf().setMaster("local[4]"))

    data_len = 1000
    X_ = np.random.uniform(0, 1, (1000, 2))
    Y_ = ((2 * X_).sum(1) + 0.4).reshape([data_len, 1])

    a = Input(shape=(2,))
    b = Dense(1)(a)
    c = Lambda(function=add_one_func)(b)
    model = Model(input=a, output=c)

    model.compile(optimizer=SGD(learningrate=1e-2),
                  loss=mean_absolute_error)

    model.set_tensorboard('./log', 'customized layer and loss')

    model.fit(x=X_,
              y=Y_,
              batch_size=32,
              nb_epoch=int(options.nb_epoch),
              distributed=False)

    model.save_graph_topology('./log')

    w = model.get_weights()
    print(w)
    pred = model.predict_local(X_)
github intel-analytics / analytics-zoo / pyzoo / zoo / pipeline / api / net / utils.py View on Github external
epsilon=koptim_method.epsilon)
    elif isinstance(koptim_method, tftrain.Optimizer):
        def get_value(v):
            if isinstance(v, (tf.Tensor, tf.SparseTensor, tf.Variable)):
                return float(K.eval(v))
            else:
                return float(v)

        if isinstance(koptim_method, tftrain.GradientDescentOptimizer):
            lr = get_value(koptim_method._learning_rate)
            return boptimizer.SGD(learningrate=lr)
        elif isinstance(koptim_method, tftrain.MomentumOptimizer):
            lr = get_value(koptim_method._learning_rate)
            momentum = get_value(koptim_method._momentum)
            use_nesterov = koptim_method._use_nesterov
            return boptimizer.SGD(learningrate=lr, momentum=momentum, nesterov=use_nesterov)
        elif isinstance(koptim_method, tftrain.AdagradOptimizer):
            lr = get_value(koptim_method._learning_rate)
            return boptimizer.Adagrad(learningrate=lr)
        elif isinstance(koptim_method, tftrain.AdamOptimizer):
            lr = get_value(koptim_method._lr)
            beta1 = get_value(koptim_method._beta1)
            beta2 = get_value(koptim_method._beta2)
            epsilon = get_value(koptim_method._epsilon)
            return boptimizer.Adam(learningrate=lr, beta1=beta1, beta2=beta2, epsilon=epsilon)
        elif isinstance(koptim_method, tftrain.RMSPropOptimizer):
            lr = get_value(koptim_method._learning_rate)
            decay = get_value(koptim_method._decay)
            momentum = get_value(koptim_method._momentum)
            epsilon = get_value(koptim_method._epsilon)
            centered = get_value(koptim_method._centered)
            if momentum != 0.0 or centered:
github intel-analytics / analytics-zoo / pyzoo / zoo / pipeline / api / net / utils.py View on Github external
beta2 = float(K.eval(koptim_method.beta_2))
            warnings.warn("For Adamax, we don't support learning rate decay for now")
            return boptimizer.Adamax(learningrate=lr,
                                     beta1=beta1,
                                     beta2=beta2,
                                     epsilon=koptim_method.epsilon)
    elif isinstance(koptim_method, tftrain.Optimizer):
        def get_value(v):
            if isinstance(v, (tf.Tensor, tf.SparseTensor, tf.Variable)):
                return float(K.eval(v))
            else:
                return float(v)

        if isinstance(koptim_method, tftrain.GradientDescentOptimizer):
            lr = get_value(koptim_method._learning_rate)
            return boptimizer.SGD(learningrate=lr)
        elif isinstance(koptim_method, tftrain.MomentumOptimizer):
            lr = get_value(koptim_method._learning_rate)
            momentum = get_value(koptim_method._momentum)
            use_nesterov = koptim_method._use_nesterov
            return boptimizer.SGD(learningrate=lr, momentum=momentum, nesterov=use_nesterov)
        elif isinstance(koptim_method, tftrain.AdagradOptimizer):
            lr = get_value(koptim_method._learning_rate)
            return boptimizer.Adagrad(learningrate=lr)
        elif isinstance(koptim_method, tftrain.AdamOptimizer):
            lr = get_value(koptim_method._lr)
            beta1 = get_value(koptim_method._beta1)
            beta2 = get_value(koptim_method._beta2)
            epsilon = get_value(koptim_method._epsilon)
            return boptimizer.Adam(learningrate=lr, beta1=beta1, beta2=beta2, epsilon=epsilon)
        elif isinstance(koptim_method, tftrain.RMSPropOptimizer):
            lr = get_value(koptim_method._learning_rate)
github intel-analytics / analytics-zoo / pyzoo / zoo / pipeline / api / net.py View on Github external
import tensorflow as tf
        from tensorflow.python.keras.optimizers import TFOptimizer

        if isinstance(koptim_method, TFOptimizer):
            koptim_method = koptim_method.optimizer

        if isinstance(koptim_method, koptimizers.Optimizer):
            lr = float(K.eval(koptim_method.lr))
            decay = float(K.eval(koptim_method.decay))
            if isinstance(koptim_method, koptimizers.Adagrad):
                warnings.warn("For Adagrad, we don't support epsilon for now")
                return boptimizer.Adagrad(learningrate=lr,
                                          learningrate_decay=decay)
            elif isinstance(koptim_method, koptimizers.SGD):
                momentum = float(K.eval(koptim_method.momentum))
                return boptimizer.SGD(learningrate=lr,
                                      learningrate_decay=decay,
                                      momentum=momentum,
                                      nesterov=koptim_method.nesterov)
            elif isinstance(koptim_method, koptimizers.Adam):
                beta1 = float(K.eval(koptim_method.beta_1))
                beta2 = float(K.eval(koptim_method.beta_2))
                return boptimizer.Adam(learningrate=lr,
                                       learningrate_decay=decay,
                                       beta1=beta1,
                                       beta2=beta2,
                                       epsilon=koptim_method.epsilon)
            elif isinstance(koptim_method, koptimizers.RMSprop):
                rho = float(K.eval(koptim_method.rho))
                return boptimizer.RMSprop(learningrate=lr,
                                          learningrate_decay=decay,
                                          decayrate=rho,
github intel-analytics / analytics-zoo / pyzoo / zoo / pipeline / api / net.py View on Github external
beta2 = float(K.eval(koptim_method.beta_2))
                warnings.warn("For Adamax, we don't support learning rate decay for now")
                return boptimizer.Adamax(learningrate=lr,
                                         beta1=beta1,
                                         beta2=beta2,
                                         epsilon=koptim_method.epsilon)
        elif isinstance(koptim_method, tftrain.Optimizer):
            def get_value(v):
                if isinstance(v, (tf.Tensor, tf.SparseTensor, tf.Variable)):
                    return float(K.eval(v))
                else:
                    return float(v)

            if isinstance(koptim_method, tftrain.GradientDescentOptimizer):
                lr = get_value(koptim_method._learning_rate)
                return boptimizer.SGD(learningrate=lr)
            elif isinstance(koptim_method, tftrain.MomentumOptimizer):
                lr = get_value(koptim_method._learning_rate)
                momentum = get_value(koptim_method._momentum)
                use_nesterov = koptim_method._use_nesterov
                return boptimizer.SGD(learningrate=lr, momentum=momentum, nesterov=use_nesterov)
            elif isinstance(koptim_method, tftrain.AdagradOptimizer):
                lr = get_value(koptim_method._learning_rate)
                return boptimizer.Adagrad(learningrate=lr)
            elif isinstance(koptim_method, tftrain.AdamOptimizer):
                lr = get_value(koptim_method._lr)
                beta1 = get_value(koptim_method._beta1)
                beta2 = get_value(koptim_method._beta2)
                epsilon = get_value(koptim_method._epsilon)
                return boptimizer.Adam(learningrate=lr, beta1=beta1, beta2=beta2, epsilon=epsilon)
            elif isinstance(koptim_method, tftrain.RMSPropOptimizer):
                lr = get_value(koptim_method._learning_rate)
github intel-analytics / analytics-zoo / pyzoo / zoo / pipeline / nnframes / nn_classifier.py View on Github external
def __init__(self):
        super(HasOptimMethod, self).__init__()
        self.optimMethod = SGD()
github intel-analytics / analytics-zoo / pyzoo / zoo / pipeline / api / net / utils.py View on Github external
if isinstance(koptim_method, TFOptimizer):
        koptim_method = koptim_method.optimizer

    if isinstance(koptim_method, boptimizer.OptimMethod):
        return koptim_method
    elif isinstance(koptim_method, koptimizers.Optimizer):
        lr = float(K.eval(koptim_method.lr))
        decay = float(K.eval(koptim_method.decay))
        if isinstance(koptim_method, koptimizers.Adagrad):
            warnings.warn("For Adagrad, we don't support epsilon for now")
            return boptimizer.Adagrad(learningrate=lr,
                                      learningrate_decay=decay)
        elif isinstance(koptim_method, koptimizers.SGD):
            momentum = float(K.eval(koptim_method.momentum))
            return boptimizer.SGD(learningrate=lr,
                                  learningrate_decay=decay,
                                  momentum=momentum,
                                  nesterov=koptim_method.nesterov)
        elif isinstance(koptim_method, koptimizers.Adam):
            beta1 = float(K.eval(koptim_method.beta_1))
            beta2 = float(K.eval(koptim_method.beta_2))
            return boptimizer.Adam(learningrate=lr,
                                   learningrate_decay=decay,
                                   beta1=beta1,
                                   beta2=beta2,
                                   epsilon=koptim_method.epsilon)
        elif isinstance(koptim_method, koptimizers.RMSprop):
            rho = float(K.eval(koptim_method.rho))
            return boptimizer.RMSprop(learningrate=lr,
                                      learningrate_decay=decay,
                                      decayrate=rho,
github intel-analytics / analytics-zoo / pyzoo / zoo / pipeline / api / net.py View on Github external
epsilon=koptim_method.epsilon)
        elif isinstance(koptim_method, tftrain.Optimizer):
            def get_value(v):
                if isinstance(v, (tf.Tensor, tf.SparseTensor, tf.Variable)):
                    return float(K.eval(v))
                else:
                    return float(v)

            if isinstance(koptim_method, tftrain.GradientDescentOptimizer):
                lr = get_value(koptim_method._learning_rate)
                return boptimizer.SGD(learningrate=lr)
            elif isinstance(koptim_method, tftrain.MomentumOptimizer):
                lr = get_value(koptim_method._learning_rate)
                momentum = get_value(koptim_method._momentum)
                use_nesterov = koptim_method._use_nesterov
                return boptimizer.SGD(learningrate=lr, momentum=momentum, nesterov=use_nesterov)
            elif isinstance(koptim_method, tftrain.AdagradOptimizer):
                lr = get_value(koptim_method._learning_rate)
                return boptimizer.Adagrad(learningrate=lr)
            elif isinstance(koptim_method, tftrain.AdamOptimizer):
                lr = get_value(koptim_method._lr)
                beta1 = get_value(koptim_method._beta1)
                beta2 = get_value(koptim_method._beta2)
                epsilon = get_value(koptim_method._epsilon)
                return boptimizer.Adam(learningrate=lr, beta1=beta1, beta2=beta2, epsilon=epsilon)
            elif isinstance(koptim_method, tftrain.RMSPropOptimizer):
                lr = get_value(koptim_method._learning_rate)
                decay = get_value(koptim_method._decay)
                momentum = get_value(koptim_method._momentum)
                epsilon = get_value(koptim_method._epsilon)
                centered = get_value(koptim_method._centered)
                if momentum != 0.0 or centered: