How to use larq - 10 common examples

To help you get started, we’ve selected a few larq examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github plumerai / rethinking-bnn-optimization / bnn_optimization / train.py View on Github external
),
            ]
        )

    with tf.device("/cpu:0"):
        train_data = dataset.train_data(hparams.batch_size)
        validation_data = dataset.validation_data(hparams.batch_size)

    with utils.get_distribution_scope(hparams.batch_size):
        model = build_model(hparams, **dataset.preprocessing.kwargs)
        model.compile(
            optimizer=hparams.optimizer,
            loss="categorical_crossentropy",
            metrics=["categorical_accuracy", "top_k_categorical_accuracy"],
        )
        lq.models.summary(model)

        if initial_epoch > 0:
            model.load_weights(model_path)
            click.echo(f"Loaded model from epoch {initial_epoch}")

    model.fit(
        train_data,
        epochs=hparams.epochs,
        steps_per_epoch=dataset.train_examples // hparams.batch_size,
        validation_data=validation_data,
        validation_steps=dataset.validation_examples // hparams.batch_size,
        verbose=2 if tensorboard else 1,
        initial_epoch=initial_epoch,
        callbacks=callbacks,
    )
github larq / larq / larq / testing_utils.py View on Github external
def get_small_bnn_model(input_dim, num_hidden, output_dim, trainable_bn=True):
    model = tf.keras.models.Sequential()
    model.add(
        lq.layers.QuantDense(
            units=num_hidden,
            kernel_quantizer="ste_sign",
            kernel_constraint="weight_clip",
            activation="relu",
            input_shape=(input_dim,),
            use_bias=False,
        )
    )
    model.add(tf.keras.layers.BatchNormalization(trainable=trainable_bn))
    model.add(
        lq.layers.QuantDense(
            units=output_dim,
            kernel_quantizer="ste_sign",
            kernel_constraint="weight_clip",
            input_quantizer="ste_sign",
            activation="softmax",
github larq / zoo / larq_zoo / training / train.py View on Github external
decoders=self.preprocessing.decoders
        )
        validation_data = (
            validation_data.cache()
            .repeat()
            .map(self.preprocessing, num_parallel_calls=tf.data.experimental.AUTOTUNE)
            .batch(self.batch_size)
            .prefetch(1)
        )

        with utils.get_distribution_scope(self.batch_size):
            self.model.compile(
                optimizer=self.optimizer, loss=self.loss, metrics=self.metrics,
            )

            lq.models.summary(self.model)

            if initial_epoch > 0:
                self.model.load_weights(str(self.model_path))
                print(f"Loaded model from epoch {initial_epoch}.")

        click.secho(str(self))

        self.model.fit(
            train_data,
            epochs=self.epochs,
            steps_per_epoch=math.ceil(num_train_examples / self.batch_size),
            validation_data=validation_data,
            validation_steps=math.ceil(num_validation_examples / self.batch_size),
            validation_freq=self.validation_frequency,
            verbose=1 if self.use_progress_bar else 2,
            initial_epoch=initial_epoch,
github plumerai / rethinking-bnn-optimization / bnn_optimization / models / birealnet.py View on Github external
def residual_block(x, double_filters=False, filters=None):
        assert not (double_filters and filters)

        # compute dimensions
        in_filters = x.get_shape().as_list()[-1]
        out_filters = filters or in_filters if not double_filters else 2 * in_filters

        shortcut = x
        if in_filters != out_filters:
            shortcut = tf.keras.layers.AvgPool2D(2, strides=2, padding="same")(shortcut)
            shortcut = tf.keras.layers.Conv2D(
                out_filters, 1, kernel_initializer="glorot_normal", use_bias=False,
            )(shortcut)
            shortcut = tf.keras.layers.BatchNormalization(momentum=0.8)(shortcut)

        x = lq.layers.QuantConv2D(
            out_filters,
            3,
            strides=1 if out_filters == in_filters else 2,
            padding="same",
            input_quantizer="approx_sign",
            kernel_quantizer=None,
            kernel_initializer="glorot_normal",
            kernel_constraint=None,
            use_bias=False,
        )(x)
        x = tf.keras.layers.BatchNormalization(momentum=0.8)(x)
        return tf.keras.layers.add([x, shortcut])
github larq / zoo / larq_zoo / literature / real_to_bin_nets.py View on Github external
Channel scaling follows Figure 1 (Right).
        """

        in_channels = x.shape[-1]
        out_channels = int(in_channels * 2 if downsample else in_channels)

        # Shortcut, which gets downsampled if necessary
        shortcut_add = self.shortcut_connection(x, name, in_channels, out_channels)

        # Batch Normalization
        conv_input = tf.keras.layers.BatchNormalization(
            momentum=self.momentum, name=f"{name}_batch_norm"
        )(x)

        # Convolution
        conv_output = lq.layers.QuantConv2D(
            out_channels,
            kernel_size=3,
            strides=2 if downsample else 1,
            padding="same",
            input_quantizer=self.input_quantizer,
            kernel_quantizer=self.kernel_quantizer,
            kernel_constraint=self.kernel_constraint,
            kernel_regularizer=self.kernel_regularizer
            if self.kernel_quantizer is None
            else None,
            kernel_initializer=self.kernel_initializer,
            use_bias=False,
            name=f"{name}_conv2d",
        )(conv_input)

        # binary convolution rescaling
github plumerai / rethinking-bnn-optimization / bnn_optimization / models / binarynet.py View on Github external
def binarynet(hparams, input_shape, num_classes):
    kwhparams = dict(
        input_quantizer="ste_sign",
        kernel_quantizer=hparams.kernel_quantizer,
        kernel_constraint=hparams.kernel_constraint,
        use_bias=False,
    )
    return tf.keras.models.Sequential(
        [
            # don't quantize inputs in first layer
            lq.layers.QuantConv2D(
                hparams.filters,
                hparams.kernel_size,
                kernel_quantizer=hparams.kernel_quantizer,
                kernel_constraint=hparams.kernel_constraint,
                use_bias=False,
                input_shape=input_shape,
            ),
            tf.keras.layers.BatchNormalization(scale=False),
            lq.layers.QuantConv2D(
                hparams.filters, hparams.kernel_size, padding="same", **kwhparams
            ),
            tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)),
            tf.keras.layers.BatchNormalization(scale=False),
            lq.layers.QuantConv2D(
                2 * hparams.filters, hparams.kernel_size, padding="same", **kwhparams
            ),
github larq / zoo / larq_zoo / literature / dorefanet.py View on Github external
def conv_block(
        self, x, filters, kernel_size, strides=1, pool=False, pool_padding="same"
    ):
        x = lq.layers.QuantConv2D(
            filters,
            kernel_size=kernel_size,
            strides=strides,
            padding="same",
            input_quantizer=self.input_quantizer,
            kernel_quantizer=self.kernel_quantizer,
            kernel_constraint=self.kernel_constraint,
            use_bias=False,
        )(x)
        x = tf.keras.layers.BatchNormalization(scale=False, momentum=0.9, epsilon=1e-4)(
            x
        )
        if pool:
            x = tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding=pool_padding)(
                x
            )
github larq / zoo / larq_zoo / literature / birealnet.py View on Github external
in_filters = x.get_shape().as_list()[-1]
        out_filters = filters or in_filters if not double_filters else 2 * in_filters

        shortcut = x

        if in_filters != out_filters:
            shortcut = tf.keras.layers.AvgPool2D(2, strides=2, padding="same")(shortcut)
            shortcut = tf.keras.layers.Conv2D(
                out_filters,
                (1, 1),
                kernel_initializer=self.kernel_initializer,
                use_bias=False,
            )(shortcut)
            shortcut = tf.keras.layers.BatchNormalization(momentum=0.8)(shortcut)

        x = lq.layers.QuantConv2D(
            out_filters,
            (3, 3),
            strides=1 if out_filters == in_filters else 2,
            padding="same",
            input_quantizer=self.input_quantizer,
            kernel_quantizer=self.kernel_quantizer,
            kernel_initializer=self.kernel_initializer,
            kernel_constraint=self.kernel_constraint,
            use_bias=False,
        )(x)
        x = tf.keras.layers.BatchNormalization(momentum=0.8)(x)

        return tf.keras.layers.add([x, shortcut])
github larq / zoo / larq_zoo / literature / xnornet.py View on Github external
x
        )
        x = lq.layers.QuantConv2D(384, (3, 3), padding="same", **quant_conv_kwargs)(x)
        x = tf.keras.layers.BatchNormalization(momentum=0.9, scale=False, epsilon=1e-4)(
            x
        )
        x = lq.layers.QuantConv2D(384, (3, 3), padding="same", **quant_conv_kwargs)(x)
        x = tf.keras.layers.BatchNormalization(momentum=0.9, scale=False, epsilon=1e-4)(
            x
        )
        x = lq.layers.QuantConv2D(256, (3, 3), padding="same", **quant_conv_kwargs)(x)
        x = tf.keras.layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2))(x)
        x = tf.keras.layers.BatchNormalization(momentum=0.9, scale=False, epsilon=1e-4)(
            x
        )
        x = lq.layers.QuantConv2D(4096, (6, 6), padding="valid", **quant_conv_kwargs)(x)
        x = tf.keras.layers.BatchNormalization(momentum=0.9, scale=False, epsilon=1e-4)(
            x
        )

        if self.include_top:
            # Equivalent to a dense layer
            x = lq.layers.QuantConv2D(
                4096, (1, 1), strides=(1, 1), padding="valid", **quant_conv_kwargs
            )(x)
            x = tf.keras.layers.BatchNormalization(
                momentum=0.9, scale=False, epsilon=1e-3
            )(x)
            x = tf.keras.layers.Activation("relu")(x)
            x = tf.keras.layers.Flatten()(x)
            x = tf.keras.layers.Dense(
                self.num_classes,
github larq / zoo / larq_zoo / binarynet.py View on Github external
def conv_block(
        x,
        features,
        kernel_size,
        strides=1,
        pool=False,
        first_layer=False,
        no_inflation=False,
    ):
        x = lq.layers.QuantConv2D(
            features * (1 if no_inflation else hparams.inflation_ratio),
            kernel_size=kernel_size,
            strides=strides,
            padding="same",
            input_quantizer=None if first_layer else "ste_sign",
            kernel_quantizer="ste_sign",
            kernel_constraint="weight_clip",
            use_bias=False,
        )(x)
        if pool:
            x = tf.keras.layers.MaxPool2D(pool_size=3, strides=2)(x)
        x = tf.keras.layers.BatchNormalization(scale=False, momentum=0.9)(x)
        return x