How to use the labelme.utils.shapes_to_label function in labelme

To help you get started, we’ve selected a few labelme examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github wkentaro / labelme / tests / test_utils.py View on Github external
def _get_img_and_lbl():
    img, data = _get_img_and_data()

    label_name_to_value = {'__background__': 0}
    for shape in data['shapes']:
        label_name = shape['label']
        label_value = len(label_name_to_value)
        label_name_to_value[label_name] = label_value

    n_labels = max(label_name_to_value.values()) + 1
    label_names = [None] * n_labels
    for label_name, label_value in label_name_to_value.items():
        label_names[label_value] = label_name

    lbl = labelme.utils.shapes_to_label(
        img.shape, data['shapes'], label_name_to_value)
    return img, lbl, label_names
github veraposeidon / labelme2Datasets / bbox_labelme2voc.py View on Github external
)

        PIL.Image.fromarray(viz).save(out_viz_file)

        # another visualize format (colored mask in bbox)
        label_name_to_value = {'_background_': 0}
        for shape in sorted(data['shapes'], key=lambda x: x['label']):
            label_name = shape['label']

            if label_name in label_name_to_value:
                label_value = label_name_to_value[label_name]
            else:
                label_value = len(label_name_to_value)
                label_name_to_value[label_name] = label_value

        lbl = utils.shapes_to_label(img.shape, data['shapes'], label_name_to_value)
        label_names = [None] * (max(label_name_to_value.values()) + 1)
        for name, value in label_name_to_value.items():
            if args.label_dict is not None:
                name = fst2snd_dict[name]
            label_names[value] = name

        lbl_viz = utils.draw_label(lbl, img, label_names)
        PIL.Image.fromarray(lbl_viz).save(out_colorize_file)

        # save voc annotation to xml file
        with open(out_xml_file, 'wb') as f:
            f.write(lxml.etree.tostring(xml, pretty_print=True))
github veraposeidon / labelme2Datasets / labelme_json_to_dataset.py View on Github external
if not imageData:
        imagePath = os.path.join(os.path.dirname(json_file), data['imagePath'])
        with open(imagePath, 'rb') as f:
            imageData = f.read()
            imageData = base64.b64encode(imageData).decode('utf-8')
    img = utils.img_b64_to_arr(imageData)

    label_name_to_value = {'_background_': 0}
    for shape in sorted(data['shapes'], key=lambda x: x['label']):
        label_name = shape['label']
        if label_name in label_name_to_value:
            label_value = label_name_to_value[label_name]
        else:
            label_value = len(label_name_to_value)
            label_name_to_value[label_name] = label_value
    lbl = utils.shapes_to_label(img.shape, data['shapes'], label_name_to_value)

    label_names = [None] * (max(label_name_to_value.values()) + 1)
    for name, value in label_name_to_value.items():
        label_names[value] = name
    lbl_viz = utils.draw_label(lbl, img, label_names)

    PIL.Image.fromarray(img).save(osp.join(out_dir, 'img.png'))
    utils.lblsave(osp.join(out_dir, 'label.png'), lbl)
    PIL.Image.fromarray(lbl_viz).save(osp.join(out_dir, 'label_viz.png'))

    with open(osp.join(out_dir, 'label_names.txt'), 'w') as f:
        for lbl_name in label_names:
            f.write(lbl_name + '\n')

    print('Saved to: %s' % out_dir)
github wkentaro / labelme / examples / instance_segmentation / labelme2voc.py View on Github external
out_insp_file = osp.join(
                args.output_dir, 'SegmentationObjectPNG', base + '.png')
            if not args.noviz:
                out_insv_file = osp.join(
                    args.output_dir,
                    'SegmentationObjectVisualization',
                    base + '.jpg',
                )

            data = json.load(f)

            img_file = osp.join(osp.dirname(label_file), data['imagePath'])
            img = np.asarray(PIL.Image.open(img_file))
            PIL.Image.fromarray(img).save(out_img_file)

            cls, ins = labelme.utils.shapes_to_label(
                img_shape=img.shape,
                shapes=data['shapes'],
                label_name_to_value=class_name_to_id,
                type='instance',
            )
            ins[cls == -1] = 0  # ignore it.

            # class label
            labelme.utils.lblsave(out_clsp_file, cls)
            np.save(out_cls_file, cls)
            if not args.noviz:
                clsv = imgviz.label2rgb(
                    label=cls,
                    img=imgviz.rgb2gray(img),
                    label_names=class_names,
                    font_size=15,
github khanhha / crack_segmentation / preprocess / label2voc.py View on Github external
else:
                imagePath = os.path.join(os.path.dirname(label_file), data['imagePath'])
                with open(imagePath, 'rb') as f:
                    imageData = f.read()
                    imageData = base64.b64encode(imageData).decode('utf-8')
            img = utils.img_b64_to_arr(imageData)

            label_name_to_value = {'_background_': 0}
            for shape in sorted(data['shapes'], key=lambda x: x['label']):
                label_name = shape['label']
                if label_name in label_name_to_value:
                    label_value = label_name_to_value[label_name]
                else:
                    label_value = len(label_name_to_value)
                    label_name_to_value[label_name] = label_value
            lbl = utils.shapes_to_label(img.shape, data['shapes'], label_name_to_value)

            #lb = cv.imread(join(*[args.label_dir, f'{path.stem}.png']))
            #lb = cv.cvtColor(lb, cv.COLOR_BGR2GRAY)
            lbl = (lbl > 0).astype(np.uint8) * 255
            lbl = cv.morphologyEx(src=lbl, op=cv.MORPH_DILATE, kernel=cv.getStructuringElement(cv.MORPH_RECT, (20, 20)))

            results = random_crop(img, lbl, size=(448, 448), n_tries=10)
            #tq.update(1)
            for sub_img, sub_mask, crop_info in results:
                info = f'{crop_info[0]}_{crop_info[1]}_{crop_info[2]}_{crop_info[3]}'
                cv.imwrite(filename=os.path.join(*[args.output_dir, 'images', f'{base}_{info}.jpg']), img=sub_img)
                cv.imwrite(filename=os.path.join(*[args.output_dir, 'masks',  f'{base}_{info}.jpg']), img=sub_mask)
                #cnt += 1
                plt.clf()
                plt.imshow(sub_img)
                plt.imshow(sub_mask, alpha=0.4)
github wkentaro / labelme / examples / semantic_segmentation / labelme2voc.py View on Github external
out_png_file = osp.join(
                args.output_dir, 'SegmentationClassPNG', base + '.png')
            if not args.noviz:
                out_viz_file = osp.join(
                    args.output_dir,
                    'SegmentationClassVisualization',
                    base + '.jpg',
                )

            data = json.load(f)

            img_file = osp.join(osp.dirname(label_file), data['imagePath'])
            img = np.asarray(PIL.Image.open(img_file))
            PIL.Image.fromarray(img).save(out_img_file)

            lbl = labelme.utils.shapes_to_label(
                img_shape=img.shape,
                shapes=data['shapes'],
                label_name_to_value=class_name_to_id,
            )
            labelme.utils.lblsave(out_png_file, lbl)

            np.save(out_lbl_file, lbl)

            if not args.noviz:
                viz = imgviz.label2rgb(
                    label=lbl,
                    img=imgviz.rgb2gray(img),
                    font_size=15,
                    label_names=class_names,
                    loc='rb',
                )
github PaddlePaddle / PaddleSeg / pdseg / tools / labelme2seg.py View on Github external
color_map = get_color_map_list(256)

    for label_file in glob.glob(osp.join(args.input_dir, '*.json')):
        print('Generating dataset from:', label_file)
        with open(label_file) as f:
            base = osp.splitext(osp.basename(label_file))[0]
            out_png_file = osp.join(
                output_dir, base + '.png')

            data = json.load(f)

            img_file = osp.join(osp.dirname(label_file), data['imagePath'])
            img = np.asarray(PIL.Image.open(img_file))

            lbl = labelme.utils.shapes_to_label(
                img_shape=img.shape,
                shapes=data['shapes'],
                label_name_to_value=class_name_to_id,
            )

            if osp.splitext(out_png_file)[1] != '.png':
                out_png_file += '.png'
            # Assume label ranges [0, 255] for uint8,
            if lbl.min() >= 0 and lbl.max() <= 255:
                lbl_pil = PIL.Image.fromarray(lbl.astype(np.uint8), mode='P')
                lbl_pil.putpalette(color_map)
                lbl_pil.save(out_png_file)
            else:
                raise ValueError(
                    '[%s] Cannot save the pixel-wise class label as PNG. '
                    'Please consider using the .npy format.' % out_png_file
github wkentaro / labelme / labelme / cli / json_to_dataset.py View on Github external
if not imageData:
        imagePath = os.path.join(os.path.dirname(json_file), data['imagePath'])
        with open(imagePath, 'rb') as f:
            imageData = f.read()
            imageData = base64.b64encode(imageData).decode('utf-8')
    img = utils.img_b64_to_arr(imageData)

    label_name_to_value = {'_background_': 0}
    for shape in sorted(data['shapes'], key=lambda x: x['label']):
        label_name = shape['label']
        if label_name in label_name_to_value:
            label_value = label_name_to_value[label_name]
        else:
            label_value = len(label_name_to_value)
            label_name_to_value[label_name] = label_value
    lbl = utils.shapes_to_label(img.shape, data['shapes'], label_name_to_value)

    label_names = [None] * (max(label_name_to_value.values()) + 1)
    for name, value in label_name_to_value.items():
        label_names[value] = name
    lbl_viz = imgviz.label2rgb(
        label=lbl, img=imgviz.rgb2gray(img), label_names=label_names, loc='rb'
    )

    PIL.Image.fromarray(img).save(osp.join(out_dir, 'img.png'))
    utils.lblsave(osp.join(out_dir, 'label.png'), lbl)
    PIL.Image.fromarray(lbl_viz).save(osp.join(out_dir, 'label_viz.png'))

    with open(osp.join(out_dir, 'label_names.txt'), 'w') as f:
        for lbl_name in label_names:
            f.write(lbl_name + '\n')