How to use the datasets.dataset_utils.bytes_feature function in datasets

To help you get started, we’ve selected a few datasets examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github fisheess / modular_SSD_tensorflow / datasets / pascalvoc_to_tfrecords.py View on Github external
image_format = b'JPEG'
    example = tf.train.Example(features=tf.train.Features(feature={
            'image/height': int64_feature(shape[0]),
            'image/width': int64_feature(shape[1]),
            'image/channels': int64_feature(shape[2]),
            'image/shape': int64_feature(shape),
            'image/object/bbox/xmin': float_feature(xmin),
            'image/object/bbox/xmax': float_feature(xmax),
            'image/object/bbox/ymin': float_feature(ymin),
            'image/object/bbox/ymax': float_feature(ymax),
            'image/object/bbox/label': int64_feature(labels),
            'image/object/bbox/label_text': bytes_feature(labels_text),
            'image/object/bbox/difficult': int64_feature(difficult),
            'image/object/bbox/truncated': int64_feature(truncated),
            'image/format': bytes_feature(image_format),
            'image/filename': bytes_feature(name.encode('utf-8')),
            'image/encoded': bytes_feature(image_data)}))
    return example
github Shun14 / TextBoxes_plusplus_Tensorflow / datasets / xml_to_tfrecords.py View on Github external
'image/shape': int64_feature(shape),
            'image/filename': bytes_feature(filename.encode('utf-8')),
            'image/object/bbox/xmin': float_feature(xmin),
            'image/object/bbox/xmax': float_feature(xmax),
            'image/object/bbox/ymin': float_feature(ymin),
            'image/object/bbox/ymax': float_feature(ymax),
            'image/object/bbox/x1': float_feature(x1),
            'image/object/bbox/y1': float_feature(y1),
            'image/object/bbox/x2': float_feature(x2),
            'image/object/bbox/y2': float_feature(y2),
            'image/object/bbox/x3': float_feature(x3),
            'image/object/bbox/y3': float_feature(y3),
            'image/object/bbox/x4': float_feature(x4),
            'image/object/bbox/y4': float_feature(y4),
            'image/object/bbox/label': int64_feature(labels),
            'image/object/bbox/label_text': bytes_feature(labels_text),
            'image/object/bbox/difficult': int64_feature(difficult),
            'image/object/bbox/truncated': int64_feature(truncated),
            'image/object/bbox/ignored': int64_feature(ignored),
            'image/format': bytes_feature(image_format),
            'image/encoded': bytes_feature(image_data)}))
    return example
github balancap / SSD-Tensorflow / datasets / pascalvoc_to_tfrecords.py View on Github external
image_format = b'JPEG'
    example = tf.train.Example(features=tf.train.Features(feature={
            'image/height': int64_feature(shape[0]),
            'image/width': int64_feature(shape[1]),
            'image/channels': int64_feature(shape[2]),
            'image/shape': int64_feature(shape),
            'image/object/bbox/xmin': float_feature(xmin),
            'image/object/bbox/xmax': float_feature(xmax),
            'image/object/bbox/ymin': float_feature(ymin),
            'image/object/bbox/ymax': float_feature(ymax),
            'image/object/bbox/label': int64_feature(labels),
            'image/object/bbox/label_text': bytes_feature(labels_text),
            'image/object/bbox/difficult': int64_feature(difficult),
            'image/object/bbox/truncated': int64_feature(truncated),
            'image/format': bytes_feature(image_format),
            'image/encoded': bytes_feature(image_data)}))
    return example
github tensorflow / models / research / slim / datasets / download_and_convert_visualwakewords_lib.py View on Github external
xmin, xmax, ymin, ymax, area = [], [], [], [], []
  for obj in annotations['objects']:
    (x, y, width, height) = tuple(obj['bbox'])
    xmin.append(float(x) / image_width)
    xmax.append(float(x + width) / image_width)
    ymin.append(float(y) / image_height)
    ymax.append(float(y + height) / image_height)
    area.append(obj['area'])

  feature_dict = {
      'image/height':
          dataset_utils.int64_feature(image_height),
      'image/width':
          dataset_utils.int64_feature(image_width),
      'image/filename':
          dataset_utils.bytes_feature(filename.encode('utf8')),
      'image/source_id':
          dataset_utils.bytes_feature(str(image_id).encode('utf8')),
      'image/key/sha256':
          dataset_utils.bytes_feature(key.encode('utf8')),
      'image/encoded':
          dataset_utils.bytes_feature(encoded_jpg),
      'image/format':
          dataset_utils.bytes_feature('jpeg'.encode('utf8')),
      'image/class/label':
          dataset_utils.int64_feature(annotations['label']),
      'image/object/bbox/xmin':
          dataset_utils.float_list_feature(xmin),
      'image/object/bbox/xmax':
          dataset_utils.float_list_feature(xmax),
      'image/object/bbox/ymin':
          dataset_utils.float_list_feature(ymin),
github gxd1994 / TextBoxes-TensorFlow / datasets / data2record.py View on Github external
xmin = list(nbbox[:, 1])
	ymax = list(nbbox[:, 2])
	xmax = list(nbbox[:, 3])

	print 'shape: {}, height:{}, width:{}'.format(shape,shape[0],shape[1])
	example = tf.train.Example(features=tf.train.Features(feature={
			'image/height': int64_feature(shape[0]),
			'image/width': int64_feature(shape[1]),
			'image/channels': int64_feature(shape[2]),
			'image/shape': int64_feature(shape),
			'image/object/bbox/xmin': float_feature(xmin),
			'image/object/bbox/xmax': float_feature(xmax),
			'image/object/bbox/ymin': float_feature(ymin),
			'image/object/bbox/ymax': float_feature(ymax),
			'image/object/bbox/label': int64_feature(label),
			'image/format': bytes_feature('jpeg'),
			'image/encoded': bytes_feature(image_data),
			'image/name': bytes_feature(imname.tostring()),
			}))
	return example
github charliememory / Disentangled-Person-Image-Generation / datasets / convert_RCV.py View on Github external
#     _visualizePose(roi_mask_list_0[2], scipy.misc.imread(img_path_0))
        #     _visualizePose(roi_mask_list_0[3], scipy.misc.imread(img_path_0))
        #     _visualizePose(roi_mask_list_0[4], scipy.misc.imread(img_path_0))
        #     _visualizePose(roi_mask_list_0[5], scipy.misc.imread(img_path_0))
        #     _visualizePose(roi_mask_list_0[6], scipy.misc.imread(img_path_0))
        #     _visualizePose(roi_mask_list_0[7], scipy.misc.imread(img_path_0))
        #     _visualizePose(roi_mask_list_0[8], scipy.misc.imread(img_path_0))
        #     _visualizePose(roi_mask_list_0[9], scipy.misc.imread(img_path_0))
        # pdb.set_trace()
    else:
        return None

    example = tf.train.Example(features=tf.train.Features(feature={
            'image_name_0': dataset_utils.bytes_feature(pairs[i][0]),
            'image_name_1': dataset_utils.bytes_feature(pairs[i][1]),
            'image_raw_0': dataset_utils.bytes_feature(image_raw_0),
            'image_raw_1': dataset_utils.bytes_feature(image_raw_1),
            'label': dataset_utils.int64_feature(labels[i]),
            'id_0': dataset_utils.int64_feature(id_map[id_0]),
            'id_1': dataset_utils.int64_feature(id_map[id_1]),
            'cam_0': dataset_utils.int64_feature(-1),
            'cam_1': dataset_utils.int64_feature(-1),
            'image_format': dataset_utils.bytes_feature(_IMG_PATTERN),
            'image_height': dataset_utils.int64_feature(height),
            'image_width': dataset_utils.int64_feature(width),
            'real_data': dataset_utils.int64_feature(1),
            'attrs_0': dataset_utils.int64_feature(attrs_0),
            'attrs_1': dataset_utils.int64_feature(attrs_1),
            'pose_peaks_0_rcv': dataset_utils.float_feature(pose_peaks_0_rcv.flatten().tolist()),
            'pose_peaks_1_rcv': dataset_utils.float_feature(pose_peaks_1_rcv.flatten().tolist()),
            'pose_mask_r4_0': dataset_utils.int64_feature(pose_mask_r4_0.astype(np.int64).flatten().tolist()),
            'pose_mask_r4_1': dataset_utils.int64_feature(pose_mask_r4_1.astype(np.int64).flatten().tolist()),
github NVIDIAAICITYCHALLENGE / AICity_TeamUW / ssd-tensorflow / datasets / pascalvoc_to_tfrecords.py View on Github external
image_format = b'JPEG'
    example = tf.train.Example(features=tf.train.Features(feature={
            'image/height': int64_feature(shape[0]),
            'image/width': int64_feature(shape[1]),
            'image/channels': int64_feature(shape[2]),
            'image/shape': int64_feature(shape),
            'image/object/bbox/xmin': float_feature(xmin),
            'image/object/bbox/xmax': float_feature(xmax),
            'image/object/bbox/ymin': float_feature(ymin),
            'image/object/bbox/ymax': float_feature(ymax),
            'image/object/bbox/label': int64_feature(labels),
            'image/object/bbox/label_text': bytes_feature(labels_text),
            'image/object/bbox/difficult': int64_feature(difficult),
            'image/object/bbox/truncated': int64_feature(truncated),
            'image/format': bytes_feature(image_format),
            'image/encoded': bytes_feature(image_data)}))
    return example
github fisheess / modular_SSD_tensorflow / datasets / pascalvoc_to_tfrecords.py View on Github external
# pylint: disable=expression-not-assigned
        [l.append(point) for l, point in zip([ymin, xmin, ymax, xmax], b)]
        # pylint: enable=expression-not-assigned

    image_format = b'JPEG'
    example = tf.train.Example(features=tf.train.Features(feature={
            'image/height': int64_feature(shape[0]),
            'image/width': int64_feature(shape[1]),
            'image/channels': int64_feature(shape[2]),
            'image/shape': int64_feature(shape),
            'image/object/bbox/xmin': float_feature(xmin),
            'image/object/bbox/xmax': float_feature(xmax),
            'image/object/bbox/ymin': float_feature(ymin),
            'image/object/bbox/ymax': float_feature(ymax),
            'image/object/bbox/label': int64_feature(labels),
            'image/object/bbox/label_text': bytes_feature(labels_text),
            'image/object/bbox/difficult': int64_feature(difficult),
            'image/object/bbox/truncated': int64_feature(truncated),
            'image/format': bytes_feature(image_format),
            'image/filename': bytes_feature(name.encode('utf-8')),
            'image/encoded': bytes_feature(image_data)}))
    return example
github Zehaos / MobileNet / datasets / kitti_object_to_tfrecords.py View on Github external
example = tf.train.Example(features=tf.train.Features(feature={
            'image/height': int64_feature(shape[0]),
            'image/width': int64_feature(shape[1]),
            'image/channels': int64_feature(shape[2]),
            'image/shape': int64_feature(shape),
            'image/object/bbox/xmin': float_feature(bbox_x1_list),
            'image/object/bbox/xmax': float_feature(bbox_x2_list),
            'image/object/bbox/ymin': float_feature(bbox_y1_list),
            'image/object/bbox/ymax': float_feature(bbox_y2_list),
            'image/object/bbox/label': int64_feature(label_list),
            'image/object/bbox/label_text': bytes_feature(type_list),
            'image/object/bbox/occlusion': int64_feature(occl_list),
            'image/object/bbox/truncation': float_feature(trun_list),
            'image/object/observation/alpha': float_feature(alpha_list),
            'image/format': bytes_feature(image_format),
            'image/encoded': bytes_feature(image_data),
            'image/object/3Dbbox/h': float_feature(ddd_bbox_h_list),
            'image/object/3Dbbox/w': float_feature(ddd_bbox_w_list),
            'image/object/3Dbbox/l': float_feature(ddd_bbox_l_list),
            'image/object/3Dbbox/x': float_feature(ddd_bbox_x_list),
            'image/object/3Dbbox/y': float_feature(ddd_bbox_y_list),
            'image/object/3Dbbox/z': float_feature(ddd_bbox_z_list),
            'image/object/3Dbbox/ry': float_feature(ddd_bbox_ry_list)
    }))
    return example
github charliememory / Disentangled-Person-Image-Generation / datasets / convert_RCV.py View on Github external
#     _visualizePose(roi_mask_list_0[1], scipy.misc.imread(img_path_0))
        #     _visualizePose(roi_mask_list_0[2], scipy.misc.imread(img_path_0))
        #     _visualizePose(roi_mask_list_0[3], scipy.misc.imread(img_path_0))
        #     _visualizePose(roi_mask_list_0[4], scipy.misc.imread(img_path_0))
        #     _visualizePose(roi_mask_list_0[5], scipy.misc.imread(img_path_0))
        #     _visualizePose(roi_mask_list_0[6], scipy.misc.imread(img_path_0))
        #     _visualizePose(roi_mask_list_0[7], scipy.misc.imread(img_path_0))
        #     _visualizePose(roi_mask_list_0[8], scipy.misc.imread(img_path_0))
        #     _visualizePose(roi_mask_list_0[9], scipy.misc.imread(img_path_0))
        # pdb.set_trace()
    else:
        return None

    example = tf.train.Example(features=tf.train.Features(feature={
            'image_name_0': dataset_utils.bytes_feature(pairs[i][0]),
            'image_name_1': dataset_utils.bytes_feature(pairs[i][1]),
            'image_raw_0': dataset_utils.bytes_feature(image_raw_0),
            'image_raw_1': dataset_utils.bytes_feature(image_raw_1),
            'label': dataset_utils.int64_feature(labels[i]),
            'id_0': dataset_utils.int64_feature(id_map[id_0]),
            'id_1': dataset_utils.int64_feature(id_map[id_1]),
            'cam_0': dataset_utils.int64_feature(-1),
            'cam_1': dataset_utils.int64_feature(-1),
            'image_format': dataset_utils.bytes_feature(_IMG_PATTERN),
            'image_height': dataset_utils.int64_feature(height),
            'image_width': dataset_utils.int64_feature(width),
            'real_data': dataset_utils.int64_feature(1),
            'attrs_0': dataset_utils.int64_feature(attrs_0),
            'attrs_1': dataset_utils.int64_feature(attrs_1),
            'pose_peaks_0_rcv': dataset_utils.float_feature(pose_peaks_0_rcv.flatten().tolist()),
            'pose_peaks_1_rcv': dataset_utils.float_feature(pose_peaks_1_rcv.flatten().tolist()),
            'pose_mask_r4_0': dataset_utils.int64_feature(pose_mask_r4_0.astype(np.int64).flatten().tolist()),