How to use the tensorflowjs.quantization function in tensorflowjs

To help you get started, we’ve selected a few tensorflowjs examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github maru-labo / doodle / tools / convert_tfjs.py View on Github external
output_dir=output_dir,
      saved_model_tags=','.join(tags),
      quantization_dtype=quantization_dtype,
      skip_op_check=skip_op_check,
      strip_debug_ops=strip_debug_ops)

if __name__ == '__main__':
  import argparse
  p = argparse.ArgumentParser()
  p.add_argument('savedmodel_dir')
  p.add_argument('output_dir')
  p.add_argument('--tags', default=DEFAULT_TAGS)
  p.add_argument('--signature', default=DEFAULT_SIGNATURE)
  p.add_argument('--inputs', default=DEFAULT_INPUTS)
  p.add_argument('--outputs', default=DEFAULT_OUTPUTS)
  p.add_argument('--quantization_bytes', type=int, choices=set(quantization.QUANTIZATION_BYTES_TO_DTYPES.keys()))
  p.add_argument('--skip_op_check', default=False)
  p.add_argument('--strip_debug_ops', default=True)
  args = p.parse_args()

  quantization_dtype = (
    quantization.QUANTIZATION_BYTES_TO_DTYPES[args.quantization_bytes]
    if args.quantization_bytes else None)
  convert_to_tfjs(
    args.savedmodel_dir,
    args.output_dir,
    args.tags,
    args.signature,
    args.inputs,
    args.outputs,
    quantization_dtype,
    args.skip_op_check,
github tensorflow / tfjs-converter / python / tensorflowjs / converters / converter.py View on Github external
if  args.output_format != common.TFJS_LAYERS_MODEL:
      raise ValueError(
          'The --weight_shard_size_bytes flag is only supported under '
          'output_format=tfjs_layers_model.')
    weight_shard_size_bytes = args.weight_shard_size_bytes

  if args.input_path is None:
    raise ValueError(
        'Error: The input_path argument must be set. '
        'Run with --help flag for usage information.')

  input_format, output_format = _standardize_input_output_formats(
      args.input_format, args.output_format)

  quantization_dtype = (
      quantization.QUANTIZATION_BYTES_TO_DTYPES[args.quantization_bytes]
      if args.quantization_bytes else None)

  if (args.signature_name and input_format not in
      (common.TF_SAVED_MODEL, common.TF_HUB_MODEL)):
    raise ValueError(
        'The --signature_name flag is applicable only to "tf_saved_model" and '
        '"tf_hub" input format, but the current input format is '
        '"%s".' % input_format)

  # TODO(cais, piyu): More conversion logics can be added as additional
  #   branches below.
  if (input_format == common.KERAS_MODEL and
      output_format == common.TFJS_LAYERS_MODEL):
    dispatch_keras_h5_to_tfjs_layers_model_conversion(
        args.input_path, output_dir=args.output_path,
        quantization_dtype=quantization_dtype,
github tensorflow / tfjs-converter / python / tensorflowjs / read_weights.py View on Github external
elif quant_info:
        # Quantized array.
        dtype = np.dtype(quant_info['dtype'])
      else:
        # Regular numeric array.
        dtype = np.dtype(weight['dtype'])
      shape = weight['shape']
      if dtype not in _INPUT_DTYPES:
        raise NotImplementedError('Unsupported data type: %s' % dtype)
      if weight['dtype'] == 'string':
        value, offset = _deserialize_string_array(data_buffer, offset, shape)
      else:
        value = _deserialize_numeric_array(data_buffer, offset, dtype, shape)
        offset += dtype.itemsize * value.size
      if quant_info:
        value = quantization.dequantize_weights(
            value, quant_info['scale'], quant_info['min'],
            np.dtype(weight['dtype']))
      out_group.append({'name': name, 'data': value})

    if flatten:
      out += out_group
    else:
      out.append(out_group)

  return out
github tensorflow / tfjs-converter / tfjs-converter / python / tensorflowjs / read_weights.py View on Github external
elif quant_info:
        # Quantized array.
        dtype = np.dtype(quant_info['dtype'])
      else:
        # Regular numeric array.
        dtype = np.dtype(weight['dtype'])
      shape = weight['shape']
      if dtype not in _INPUT_DTYPES:
        raise NotImplementedError('Unsupported data type: %s' % dtype)
      if weight['dtype'] == 'string':
        value, offset = _deserialize_string_array(data_buffer, offset, shape)
      else:
        value = _deserialize_numeric_array(data_buffer, offset, dtype, shape)
        offset += dtype.itemsize * value.size
      if quant_info:
        value = quantization.dequantize_weights(
            value, quant_info['scale'], quant_info['min'],
            np.dtype(weight['dtype']))
      out_group.append({'name': name, 'data': value})

    if flatten:
      out += out_group
    else:
      out.append(out_group)

  return out
github maru-labo / doodle / tools / convert_tfjs.py View on Github external
if __name__ == '__main__':
  import argparse
  p = argparse.ArgumentParser()
  p.add_argument('savedmodel_dir')
  p.add_argument('output_dir')
  p.add_argument('--tags', default=DEFAULT_TAGS)
  p.add_argument('--signature', default=DEFAULT_SIGNATURE)
  p.add_argument('--inputs', default=DEFAULT_INPUTS)
  p.add_argument('--outputs', default=DEFAULT_OUTPUTS)
  p.add_argument('--quantization_bytes', type=int, choices=set(quantization.QUANTIZATION_BYTES_TO_DTYPES.keys()))
  p.add_argument('--skip_op_check', default=False)
  p.add_argument('--strip_debug_ops', default=True)
  args = p.parse_args()

  quantization_dtype = (
    quantization.QUANTIZATION_BYTES_TO_DTYPES[args.quantization_bytes]
    if args.quantization_bytes else None)
  convert_to_tfjs(
    args.savedmodel_dir,
    args.output_dir,
    args.tags,
    args.signature,
    args.inputs,
    args.outputs,
    quantization_dtype,
    args.skip_op_check,
    args.strip_debug_ops
  )
github tensorflow / tfjs / tfjs-converter / python / tensorflowjs / converters / converter.py View on Github external
if  args.output_format != common.TFJS_LAYERS_MODEL:
      raise ValueError(
          'The --weight_shard_size_bytes flag is only supported under '
          'output_format=tfjs_layers_model.')
    weight_shard_size_bytes = args.weight_shard_size_bytes

  if args.input_path is None:
    raise ValueError(
        'Error: The input_path argument must be set. '
        'Run with --help flag for usage information.')

  input_format, output_format = _standardize_input_output_formats(
      args.input_format, args.output_format)

  quantization_dtype = (
      quantization.QUANTIZATION_BYTES_TO_DTYPES[args.quantization_bytes]
      if args.quantization_bytes else None)

  if (not args.output_node_names and input_format == common.TF_FROZEN_MODEL):
    raise ValueError(
        'The --output_node_names flag is required for "tf_frozen_model"')

  if (args.signature_name and input_format not in
      (common.TF_SAVED_MODEL, common.TF_HUB_MODEL)):
    raise ValueError(
        'The --signature_name flag is applicable only to "tf_saved_model" and '
        '"tf_hub" input format, but the current input format is '
        '"%s".' % input_format)

  # TODO(cais, piyu): More conversion logics can be added as additional
  #   branches below.
  if (input_format == common.KERAS_MODEL and
github tensorflow / tfjs / tfjs-converter / python / tensorflowjs / write_weights.py View on Github external
original_entry = {
          'name': 'weight1',
          'data': np.array([0, -0.1, 1.2], 'float32')
        }
        quantized_entry = {
          'name': 'weight1',
          'data': np.array([20, 0, 255], 'uint8')
          'quantization': {'min': -0.10196078817, 'scale': 0.00509803940852,
                           'original_dtype': 'float32'}
        }
  """
  data = entry['data']
  # Only float32 tensors are quantized.
  if data.dtype != 'float32':
    return entry
  quantized_data, scale, min_val = quantization.quantize_weights(
      data, quantization_dtype)
  quantized_entry = entry.copy()
  quantized_entry['data'] = quantized_data
  quantized_entry['quantization'] = {
      'min': min_val, 'scale': scale, 'original_dtype': data.dtype.name}
  return quantized_entry