How to use the elephas.optimizers.Optimizer function in elephas

To help you get started, we’ve selected a few elephas examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github maxpumperla / elephas / elephas / optimizers.py View on Github external
new_a = self.rho * a + (1 - self.rho) * g ** 2
            self.updates.append((a, new_a))

            new_p = p - self.lr * g / np.sqrt(new_a + self.epsilon)
            new_weights.append(c(new_p))

        return new_weights

    def get_config(self):
        return {"class_name": self.__class__.__name__,
                "lr": float(self.lr),
                "rho": float(self.rho),
                "epsilon": self.epsilon}


class Adagrad(Optimizer):
    """Reference: http://www.magicbroom.info/Papers/DuchiHaSi10.pdf
    """

    def __init__(self, lr=0.01, epsilon=1e-6, *args, **kwargs):
        super(Adagrad, self).__init__(**kwargs)
        self.__dict__.update(locals())
        self.lr = lr

    def get_updates(self, params, constraints, grads):
        accumulators = [np.zeros_like(p) for p in params]
        new_weights = []
        for p, g, a, c in zip(params, grads, accumulators, constraints):
            new_a = a + g ** 2
            new_p = p - self.lr * g / np.sqrt(new_a + self.epsilon)
            new_weights.append(new_p)
github maxpumperla / elephas / elephas / optimizers.py View on Github external
if hasattr(self, 'clipnorm') and self.clipnorm > 0:
            norm = K.sqrt(sum([K.sum(g ** 2) for g in grads]))
            grads = [clip_norm(g, self.clipnorm, norm) for g in grads]

        if hasattr(self, 'clipvalue') and self.clipvalue > 0:
            grads = [K.clip(g, -self.clipvalue, self.clipvalue) for g in grads]

        return K.shared(grads)

    def get_config(self):
        """ Get configuration dictionary """
        return {"class_name": self.__class__.__name__}


class SGD(Optimizer):
    """SGD, optionally with nesterov momentum """

    def __init__(self, lr=0.01, momentum=0., decay=0.,
                 nesterov=False, *args, **kwargs):
        super(SGD, self).__init__(**kwargs)
        self.__dict__.update(locals())
        self.iterations = 0
        self.lr = lr
        self.momentum = momentum
        self.decay = decay

    def get_updates(self, params, constraints, grads):
        lr = self.lr * (1.0 / (1.0 + self.decay * self.iterations))
        self.updates = [(self.iterations, self.iterations + 1.)]
        new_weights = []
github maxpumperla / elephas / elephas / optimizers.py View on Github external
accumulators = [np.zeros_like(p) for p in params]
        new_weights = []
        for p, g, a, c in zip(params, grads, accumulators, constraints):
            new_a = a + g ** 2
            new_p = p - self.lr * g / np.sqrt(new_a + self.epsilon)
            new_weights.append(new_p)

        return new_weights

    def get_config(self):
        return {"class_name": self.__class__.__name__,
                "lr": float(self.lr),
                "epsilon": self.epsilon}


class Adadelta(Optimizer):
    """Reference: http://arxiv.org/abs/1212.5701
    """

    def __init__(self, lr=1.0, rho=0.95, epsilon=1e-6, *args, **kwargs):
        super(Adadelta, self).__init__(**kwargs)
        self.__dict__.update(locals())
        self.lr = lr

    def get_updates(self, params, constraints, grads):
        accumulators = [np.zeros_like(p) for p in params]
        delta_accumulators = [np.zeros_like(p) for p in params]
        new_weights = []

        for p, g, a, d_a, c in zip(params, grads, accumulators,
                                   delta_accumulators, constraints):
            new_a = self.rho * a + (1 - self.rho) * g ** 2
github maxpumperla / elephas / elephas / optimizers.py View on Github external
div = np.sqrt(new_a + self.epsilon)
            update = g * np.sqrt(d_a + self.epsilon) / div
            new_p = p - self.lr * update
            self.updates.append((p, c(new_p)))  # apply constraints

            new_weights.append(new_p)
        return new_weights

    def get_config(self):
        return {"class_name": self.__class__.__name__,
                "lr": float(self.lr),
                "rho": self.rho,
                "epsilon": self.epsilon}


class Adam(Optimizer):
    """Reference: http://arxiv.org/abs/1412.6980v8
    Default parameters follow those provided in the original paper.
    """

    def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999,
                 epsilon=1e-8, *args, **kwargs):
        super(Adam, self).__init__(**kwargs)
        self.__dict__.update(locals())
        self.iterations = 0
        self.lr = lr

    def get_updates(self, params, constraints, grads):
        new_weights = []

        t = self.iterations + 1
        lr_t = self.lr * np.sqrt(1-self.beta_2**t)/(1-self.beta_1**t)
github maxpumperla / elephas / elephas / optimizers.py View on Github external
new_p = p + self.momentum * v - lr * g
            else:
                new_p = p + v
            new_weights.append(c(new_p))

        return new_weights

    def get_config(self):
        return {"class_name": self.__class__.__name__,
                "lr": float(self.lr),
                "momentum": float(self.momentum),
                "decay": float(self.decay),
                "nesterov": self.nesterov}


class RMSprop(Optimizer):
    """Reference: www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf
    """

    def __init__(self, lr=0.001, rho=0.9, epsilon=1e-6, *args, **kwargs):
        super(RMSprop, self).__init__(**kwargs)
        self.__dict__.update(locals())
        self.lr = lr
        self.rho = rho

    def get_updates(self, params, constraints, grads):
        accumulators = [np.zeros_like(p) for p in params]
        new_weights = []

        for p, g, a, c in zip(params, grads, accumulators, constraints):
            new_a = self.rho * a + (1 - self.rho) * g ** 2
            self.updates.append((a, new_a))
github maxpumperla / elephas / elephas / optimizers.py View on Github external
(it will be wrapped as a Keras Optimizer).
    # Returns
        A Keras Optimizer instance.
    # Raises
        ValueError: If `identifier` cannot be interpreted.
    """
    if K.backend() == 'tensorflow':
        # Wrap TF optimizer instances
        if isinstance(identifier, tf.train.Optimizer):
            return TFOptimizer(identifier)
    if isinstance(identifier, dict):
        return deserialize(identifier)
    elif isinstance(identifier, six.string_types):
        config = {'class_name': str(identifier), 'config': {}}
        return deserialize(config)
    if isinstance(identifier, Optimizer):
        return identifier
    else:
        raise ValueError('Could not interpret optimizer identifier:',
                         identifier)