How to use the larq.quantizers function in larq

To help you get started, we’ve selected a few larq examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github larq / zoo / larq_zoo / literature / xnornet.py View on Github external
def xnor_weight_scale(x):
    """
    Clips the weights between -1 and +1 and then calculates a scale factor per
    weight filter. See https://arxiv.org/abs/1603.05279 for more details
    """
    x = tf.clip_by_value(x, -1, 1)
    alpha = tf.reduce_mean(tf.abs(x), axis=[0, 1, 2], keepdims=True)
    return alpha * lq.quantizers.ste_sign(x)
github larq / larq / larq / layers_base.py View on Github external
def __init__(
        self,
        *args,
        input_quantizer=None,
        depthwise_quantizer=None,
        pointwise_quantizer=None,
        metrics=None,
        **kwargs,
    ):
        self.input_quantizer = quantizers.get(input_quantizer)
        self.depthwise_quantizer = quantizers.get(depthwise_quantizer)
        self.pointwise_quantizer = quantizers.get(pointwise_quantizer)
        self._custom_metrics = (
            metrics if metrics is not None else lq_metrics.get_training_metrics()
        )

        super().__init__(*args, **kwargs)
        if depthwise_quantizer and not self.depthwise_constraint:
            log.warning(
                "Using `depthwise_quantizer` without setting `depthwise_constraint` "
                "may result in starved weights (where the gradient is always zero)."
            )
        if pointwise_quantizer and not self.pointwise_constraint:
            log.warning(
                "Using `pointwise_quantizer` without setting `pointwise_constraint` "
                "may result in starved weights (where the gradient is always zero)."
github larq / zoo / larq_zoo / literature / resnet_e.py View on Github external
def kernel_quantizer(self):
        return lq.quantizers.SteSign(clip_value=1.25)