How to use the labelme.utils.img_b64_to_arr function in labelme

To help you get started, we’ve selected a few labelme examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github wkentaro / labelme / tests / test_utils.py View on Github external
def _get_img_and_data():
    json_file = osp.join(data_dir, 'apc2016_obj3.json')
    data = json.load(open(json_file))
    img_b64 = data['imageData']
    img = labelme.utils.img_b64_to_arr(img_b64)
    return img, data
github wkentaro / labelme / tests / test_utils.py View on Github external
def test_img_arr_to_b64():
    img_file = osp.join(data_dir, 'apc2016_obj3.jpg')
    img_arr = np.asarray(PIL.Image.open(img_file))
    img_b64 = labelme.utils.img_arr_to_b64(img_arr)
    img_arr2 = labelme.utils.img_b64_to_arr(img_b64)
    np.testing.assert_allclose(img_arr, img_arr2)
github veraposeidon / labelme2Datasets / bbox_labelme2voc.py View on Github external
out_img_file = osp.join(
            args.output_dir, 'JPEGImages', base + '.jpg')
        # annotation xml file
        out_xml_file = osp.join(
            args.output_dir, 'Annotations', base + '.xml')
        # visualize image file
        out_viz_file = osp.join(
            args.output_dir, 'AnnotationsVisualization', base + '.jpg')
        # color annotated image file
        out_colorize_file = osp.join(
            args.output_dir, 'AnnotationsVisualization', base + '_viz.jpg')

        # save source image
        imageData = data.get('imageData')  # labelme annotated file contains source image data(serialized)
        if imageData:
            img = utils.img_b64_to_arr(imageData)
        else:
            img_file = osp.join(osp.dirname(label_file), data['imagePath'])
            img = np.asarray(PIL.Image.open(img_file))
        PIL.Image.fromarray(img).save(out_img_file)

        # generate voc format annotation file
        maker = lxml.builder.ElementMaker()
        xml = maker.annotation(
            # folder name
            maker.folder(""),
            # img path
            maker.filename(base + '.jpg'),
            # img source, ignore it
            maker.source(
                maker.database(""),
                maker.annotation(""),
github wkentaro / labelme / labelme / label_file.py View on Github external
def _check_image_height_and_width(imageData, imageHeight, imageWidth):
        img_arr = utils.img_b64_to_arr(imageData)
        if imageHeight is not None and img_arr.shape[0] != imageHeight:
            logger.error(
                'imageHeight does not match with imageData or imagePath, '
                'so getting imageHeight from actual image.'
            )
            imageHeight = img_arr.shape[0]
        if imageWidth is not None and img_arr.shape[1] != imageWidth:
            logger.error(
                'imageWidth does not match with imageData or imagePath, '
                'so getting imageWidth from actual image.'
            )
            imageWidth = img_arr.shape[1]
        return imageHeight, imageWidth
github veraposeidon / labelme2Datasets / segmentation_labelme2voc.py View on Github external
with open(label_file, "r", encoding='UTF-8') as f:
            base = osp.splitext(osp.basename(label_file))[0]
            out_img_file = osp.join(
                args.voc_dir, 'JPEGImages', base + '.jpg')
            out_lbl_file = osp.join(
                args.voc_dir, 'SegmentationClass', base + '.npy')
            out_png_file = osp.join(
                args.voc_dir, 'SegmentationClassPNG', base + '.png')
            out_viz_file = osp.join(
                args.voc_dir, 'SegmentationClassVisualization', base + '.jpg')

            data = json.load(f)

            imageData = data.get('imageData')  # labelme annotated file contains source image data(serialized)
            if imageData:
                img = utils.img_b64_to_arr(imageData)
            else:
                img_file = osp.join(osp.dirname(label_file), data['imagePath'])
                img = np.asarray(PIL.Image.open(img_file))
            PIL.Image.fromarray(img).save(out_img_file)

            lbl = labelme.utils.shapes_to_label(
                img_shape=img.shape,
                shapes=data['shapes'],
                label_name_to_value=class_name_to_id,
            )
            labelme.utils.lblsave(out_png_file, lbl)

            np.save(out_lbl_file, lbl)

            viz = labelme.utils.draw_label(
                lbl, img, class_names, colormap=colormap)
github shuyucool / Labelme / json_to_dataset_copy.py View on Github external
import os
import os.path as osp
import warnings
import numpy as np
import PIL.Image
import yaml
import io
from labelme import utils

json_file = "D:\\Practice\\fusion_json\\41.json"  # jsonζ–‡δ»Άθ·―εΎ„
out_dir = "D:\\Practice\\fusion_json\\41"    #θΎ“ε‡Ίζ–‡δ»Άθ·―εΎ„


data = json.load(open(json_file))

img = utils.img_b64_to_arr(data['imageData'])

label_name_to_value = {'_background_': 0}
for shape in data['shapes']:
    label_name = shape['label']
    if label_name in label_name_to_value:
        label_value = label_name_to_value[label_name]
    else:
        label_value = len(label_name_to_value)
        label_name_to_value[label_name] = label_value

# label_values must be dense
label_values, label_names = [], []
for ln, lv in sorted(label_name_to_value.items(), key=lambda x: x[1]):
    label_values.append(lv)
    label_names.append(ln)
assert label_values == list(range(len(label_values)))
github wkentaro / labelme / labelme / cli / draw_json.py View on Github external
parser = argparse.ArgumentParser()
    parser.add_argument('json_file')
    args = parser.parse_args()

    json_file = args.json_file

    data = json.load(open(json_file))

    if data['imageData']:
        imageData = data['imageData']
    else:
        imagePath = os.path.join(os.path.dirname(json_file), data['imagePath'])
        with open(imagePath, 'rb') as f:
            imageData = f.read()
            imageData = base64.b64encode(imageData).decode('utf-8')
    img = utils.img_b64_to_arr(imageData)

    label_name_to_value = {'_background_': 0}
    for shape in sorted(data['shapes'], key=lambda x: x['label']):
        label_name = shape['label']
        if label_name in label_name_to_value:
            label_value = label_name_to_value[label_name]
        else:
            label_value = len(label_name_to_value)
            label_name_to_value[label_name] = label_value
    lbl = utils.shapes_to_label(img.shape, data['shapes'], label_name_to_value)

    label_names = [None] * (max(label_name_to_value.values()) + 1)
    for name, value in label_name_to_value.items():
        label_names[value] = name
    lbl_viz = imgviz.label2rgb(
        label=lbl,