How to use the larq.layers function in larq

To help you get started, we’ve selected a few larq examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github larq / larq / larq / testing_utils.py View on Github external
def get_small_bnn_model(input_dim, num_hidden, output_dim, trainable_bn=True):
    model = tf.keras.models.Sequential()
    model.add(
        lq.layers.QuantDense(
            units=num_hidden,
            kernel_quantizer="ste_sign",
            kernel_constraint="weight_clip",
            activation="relu",
            input_shape=(input_dim,),
            use_bias=False,
        )
    )
    model.add(tf.keras.layers.BatchNormalization(trainable=trainable_bn))
    model.add(
        lq.layers.QuantDense(
            units=output_dim,
            kernel_quantizer="ste_sign",
            kernel_constraint="weight_clip",
            input_quantizer="ste_sign",
            activation="softmax",
github larq / zoo / larq_zoo / literature / dorefanet.py View on Github external
def conv_block(
        self, x, filters, kernel_size, strides=1, pool=False, pool_padding="same"
    ):
        x = lq.layers.QuantConv2D(
            filters,
            kernel_size=kernel_size,
            strides=strides,
            padding="same",
            input_quantizer=self.input_quantizer,
            kernel_quantizer=self.kernel_quantizer,
            kernel_constraint=self.kernel_constraint,
            use_bias=False,
        )(x)
        x = tf.keras.layers.BatchNormalization(scale=False, momentum=0.9, epsilon=1e-4)(
            x
        )
        if pool:
            x = tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding=pool_padding)(
                x
            )
github larq / zoo / larq_zoo / binarynet.py View on Github external
def conv_block(
        x,
        features,
        kernel_size,
        strides=1,
        pool=False,
        first_layer=False,
        no_inflation=False,
    ):
        x = lq.layers.QuantConv2D(
            features * (1 if no_inflation else hparams.inflation_ratio),
            kernel_size=kernel_size,
            strides=strides,
            padding="same",
            input_quantizer=None if first_layer else "ste_sign",
            kernel_quantizer="ste_sign",
            kernel_constraint="weight_clip",
            use_bias=False,
        )(x)
        if pool:
            x = tf.keras.layers.MaxPool2D(pool_size=3, strides=2)(x)
        x = tf.keras.layers.BatchNormalization(scale=False, momentum=0.9)(x)
        return x
github larq / zoo / larq_zoo / binarynet.py View on Github external
def dense_block(x, units):
        x = lq.layers.QuantDense(units, **kwhparams)(x)
        x = tf.keras.layers.BatchNormalization(scale=False, momentum=0.9)(x)
        return x
github larq / zoo / larq_zoo / literature / binary_alex_net.py View on Github external
def dense_block(self, x: tf.Tensor, units: int) -> tf.Tensor:
        x = lq.layers.QuantDense(
            units,
            input_quantizer=self.input_quantizer,
            kernel_quantizer=self.kernel_quantizer,
            kernel_constraint=self.kernel_constraint,
            use_bias=False,
        )(x)
        return tf.keras.layers.BatchNormalization(scale=False, momentum=0.9)(x)
github larq / larq / larq / models.py View on Github external
import itertools
from dataclasses import dataclass

import numpy as np
import tensorflow.keras.layers as keras_layers
from terminaltables import AsciiTable

import larq.layers as lq_layers

__all__ = ["summary"]

op_count_supported_layer_types = (
    lq_layers.QuantConv2D,
    lq_layers.QuantSeparableConv2D,
    lq_layers.QuantDepthwiseConv2D,
    lq_layers.QuantDense,
    keras_layers.Conv2D,
    keras_layers.SeparableConv2D,
    keras_layers.DepthwiseConv2D,
    keras_layers.Dense,
    keras_layers.Flatten,
    keras_layers.BatchNormalization,
    keras_layers.MaxPool2D,
    keras_layers.AveragePooling2D,
)

mac_containing_layers = (
    lq_layers.QuantConv2D,
    lq_layers.QuantSeparableConv2D,
github larq / zoo / larq_zoo / literature / xnornet.py View on Github external
)(self.image_input)

        x = tf.keras.layers.BatchNormalization(momentum=0.9, scale=False, epsilon=1e-5)(
            x
        )
        x = tf.keras.layers.Activation("relu")(x)
        x = tf.keras.layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2))(x)
        x = tf.keras.layers.BatchNormalization(momentum=0.9, scale=False, epsilon=1e-4)(
            x
        )
        x = lq.layers.QuantConv2D(256, (5, 5), padding="same", **quant_conv_kwargs)(x)
        x = tf.keras.layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2))(x)
        x = tf.keras.layers.BatchNormalization(momentum=0.9, scale=False, epsilon=1e-4)(
            x
        )
        x = lq.layers.QuantConv2D(384, (3, 3), padding="same", **quant_conv_kwargs)(x)
        x = tf.keras.layers.BatchNormalization(momentum=0.9, scale=False, epsilon=1e-4)(
            x
        )
        x = lq.layers.QuantConv2D(384, (3, 3), padding="same", **quant_conv_kwargs)(x)
        x = tf.keras.layers.BatchNormalization(momentum=0.9, scale=False, epsilon=1e-4)(
            x
        )
        x = lq.layers.QuantConv2D(256, (3, 3), padding="same", **quant_conv_kwargs)(x)
        x = tf.keras.layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2))(x)
        x = tf.keras.layers.BatchNormalization(momentum=0.9, scale=False, epsilon=1e-4)(
            x
        )
        x = lq.layers.QuantConv2D(4096, (6, 6), padding="valid", **quant_conv_kwargs)(x)
        x = tf.keras.layers.BatchNormalization(momentum=0.9, scale=False, epsilon=1e-4)(
            x
        )
github larq / zoo / larq_zoo / literature / resnet_e.py View on Github external
if downsample:
            residual = tf.keras.layers.AvgPool2D(pool_size=2, strides=2)(x)
            residual = tf.keras.layers.Conv2D(
                filters,
                kernel_size=1,
                use_bias=False,
                kernel_initializer="glorot_normal",
            )(residual)
            residual = tf.keras.layers.BatchNormalization(momentum=0.9, epsilon=1e-5)(
                residual
            )
        else:
            residual = x

        x = lq.layers.QuantConv2D(
            filters,
            kernel_size=3,
            strides=strides,
            padding="same",
            input_quantizer=self.input_quantizer,
            kernel_quantizer=self.kernel_quantizer,
            kernel_constraint=self.kernel_constraint,
            kernel_initializer="glorot_normal",
            use_bias=False,
        )(x)
        x = tf.keras.layers.BatchNormalization(momentum=0.9, epsilon=1e-5)(x)

        return tf.keras.layers.add([x, residual])
github larq / zoo / larq_zoo / literature / densenet.py View on Github external
def densely_connected_block(self, x: tf.Tensor, dilation_rate: int = 1):
        y = tf.keras.layers.BatchNormalization(momentum=0.9, epsilon=1e-5)(x)
        y = lq.layers.QuantConv2D(
            filters=self.growth_rate,
            kernel_size=3,
            dilation_rate=dilation_rate,
            input_quantizer=self.input_quantizer,
            kernel_quantizer=self.kernel_quantizer,
            kernel_initializer="glorot_normal",
            kernel_constraint=self.kernel_constraint,
            padding="same",
            use_bias=False,
        )(y)
        return tf.keras.layers.concatenate([x, y])