How to use the larq.quantized_variable.QuantizedVariable function in larq

To help you get started, we’ve selected a few larq examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github larq / larq / larq / quantized_variable.py View on Github external
return self.latent_variable._gather_saveables_for_checkpoint()

    # TODO: Maybe encode the fact the variable is an QuantizedVariable in to_proto().
    def to_proto(self, *args, **kwargs):
        return self.latent_variable.to_proto(*args, **kwargs)

    def from_proto(self, *args, **kwargs):
        return self.latent_variable.from_proto(*args, **kwargs)

    def _as_graph_element(self):
        return self._quantize(self.latent_variable._as_graph_element())


QuantizedVariable._OverloadAllOperators()
tf.register_tensor_conversion_function(
    QuantizedVariable, QuantizedVariable._dense_var_to_tensor
)
ops.register_dense_tensor_like_type(QuantizedVariable)
github larq / larq / larq / quantized_variable.py View on Github external
# TODO: Maybe encode the fact the variable is an QuantizedVariable in to_proto().
    def to_proto(self, *args, **kwargs):
        return self.latent_variable.to_proto(*args, **kwargs)

    def from_proto(self, *args, **kwargs):
        return self.latent_variable.from_proto(*args, **kwargs)

    def _as_graph_element(self):
        return self._quantize(self.latent_variable._as_graph_element())


QuantizedVariable._OverloadAllOperators()
tf.register_tensor_conversion_function(
    QuantizedVariable, QuantizedVariable._dense_var_to_tensor
)
ops.register_dense_tensor_like_type(QuantizedVariable)
github larq / larq / larq / layers_base.py View on Github external
def getter(*args, **kwargs):
            variable = old_getter(*args, **kwargs)
            return QuantizedVariable.from_variable(variable, quantizer)
github larq / larq / larq / quantized_variable.py View on Github external
# Therefore models checkpointed with QuantizedVariables can be restored on
        # models with normal variables, and vice versa.
        return self.latent_variable._gather_saveables_for_checkpoint()

    # TODO: Maybe encode the fact the variable is an QuantizedVariable in to_proto().
    def to_proto(self, *args, **kwargs):
        return self.latent_variable.to_proto(*args, **kwargs)

    def from_proto(self, *args, **kwargs):
        return self.latent_variable.from_proto(*args, **kwargs)

    def _as_graph_element(self):
        return self._quantize(self.latent_variable._as_graph_element())


QuantizedVariable._OverloadAllOperators()
tf.register_tensor_conversion_function(
    QuantizedVariable, QuantizedVariable._dense_var_to_tensor
)
ops.register_dense_tensor_like_type(QuantizedVariable)
github larq / larq / larq / quantized_variable.py View on Github external
value instead of a MirroredVariable. So we cannot properly wrap it in an
        AutoCastVariable. We return the original variable in that case.

        # Arguments
        variable: A tf.Variable or op.
        quantizer: An optional quantizer to transform the floating-point variable to a
            fake quantized variable.
        precision: An optional integer defining the precision of the quantized variable.
            If `None`, `quantizer.precision` is used.
        wrap: A boolean to define whether to wrap the variable in an QuantizedVariable.

        # Returns
        An QuantizedVariable if wrap is True and variable is a resource variable.
        """
        if wrap and resource_variable_ops.is_resource_variable(variable):
            return QuantizedVariable.from_variable(variable, quantizer, precision)
        return variable