How to use the mmcv.imread function in mmcv

To help you get started, we’ve selected a few mmcv examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github open-mmlab / mmcv / tests / test_image.py View on Github external
def test_imwrite(self):
        img = mmcv.imread(self.img_path)
        out_file = osp.join(tempfile.gettempdir(), 'mmcv_test.jpg')
        mmcv.imwrite(img, out_file)
        rewrite_img = mmcv.imread(out_file)
        os.remove(out_file)
        self.assert_img_equal(img, rewrite_img)
github hekesai / MMDetection-Introduce / mmdetection-inference / test_inference_time.py View on Github external
def test_inference(config_file,model_file):

    model = init_detector(config_file,model_file,device='cuda:0')

    g_loop_count = 100
    image = 'test.jpg'
    input_image = mmcv.imread(image)

    start_time = time.time()

    for i in range(g_loop_count):
        result = inference_detector(model,input_image)

    end_time = time.time()

    avg_time = (end_time-start_time) / g_loop_count

    return avg_time
github xieenze / PolarMask / mmdet / datasets / coco_seg.py View on Github external
# skip the image if there is no valid gt bbox
        if len(gt_bboxes) == 0 and self.skip_img_without_anno:
            warnings.warn('Skip the image "%s" that has no valid gt bbox' %
                          osp.join(self.img_prefix, img_info['filename']))
            return None

        # apply transforms
        flip = True if np.random.rand() < self.flip_ratio else False
        # randomly sample a scale
        img_scale = random_scale(self.img_scales, self.multiscale_mode)
        img, img_shape, pad_shape, scale_factor = self.img_transform(img, img_scale, flip, keep_ratio=self.resize_keep_ratio)


        img = img.copy()
        if self.with_seg:
            gt_seg = mmcv.imread(
                osp.join(self.seg_prefix,
                         img_info['filename'].replace('jpg', 'png')),
                flag='unchanged')
            gt_seg = self.seg_transform(gt_seg.squeeze(), img_scale, flip)
            gt_seg = mmcv.imrescale(
                gt_seg, self.seg_scale_factor, interpolation='nearest')
            gt_seg = gt_seg[None, ...]
        if self.proposals is not None:
            proposals = self.bbox_transform(proposals, img_shape, scale_factor,
                                            flip)
            proposals = np.hstack([proposals, scores
                                   ]) if scores is not None else proposals
        gt_bboxes = self.bbox_transform(gt_bboxes, img_shape, scale_factor,
                                        flip)
        if self.with_crowd:
            gt_bboxes_ignore = self.bbox_transform(gt_bboxes_ignore, img_shape,
github ming71 / mmdetection-annotated / mmdet / apis / inference.py View on Github external
def _inference_single(model, img, img_transform, device):
    img = mmcv.imread(img)
    data = _prepare_data(img, img_transform, model.cfg, device)
    with torch.no_grad():
        result = model(return_loss=False, rescale=True, **data)
    return result
github xinntao / BasicSR / basicsr / utils / lmdb.py View on Github external
def read_img_worker(path, key, compress_level):
    """Read image worker.

    Args:
        path (str): Image path.
        key (str): Image key.
        compress_level (int): Compress level when encoding images.

    Returns:
        str: Image key.
        byte: Image byte.
        tuple[int]: Image shape.
    """

    img = mmcv.imread(path, flag='unchanged')
    if img.ndim == 2:
        h, w = img.shape
        c = 1
    else:
        h, w, c = img.shape
    _, img_byte = cv2.imencode('.png', img,
                               [cv2.IMWRITE_PNG_COMPRESSION, compress_level])
    return (key, img_byte, (h, w, c))
github lxy5513 / hrnet / lib / detector / mmdetection / high_api.py View on Github external
def human_boxes_get(model, img, score_thr=0.5):
    if isinstance(img, str):
        img = mmcv.imread(img)
    result = inference_detector(model, img, cfg, device='cuda:0')
    bboxes, scores = re_result(result, score_thr=score_thr)
    return bboxes, scores
github open-mmlab / mmaction / mmaction / datasets / ava_dataset.py View on Github external
def _load_image(self, directory, image_tmpl, modality, idx):
        if modality in ['RGB', 'RGBDiff']:
            return [mmcv.imread(osp.join(directory, image_tmpl.format(idx)))]
        elif modality == 'Flow':
            x_imgs = mmcv.imread(
                osp.join(directory, image_tmpl.format('x', idx)),
                flag='grayscale')
            y_imgs = mmcv.imread(
                osp.join(directory, image_tmpl.format('y', idx)),
                flag='grayscale')
            return [x_imgs, y_imgs]
        else:
            raise ValueError(
                'Not implemented yet; modality should be '
                '["RGB", "RGBDiff", "Flow"]')
github open-mmlab / mmdetection / mmdet / datasets / pipelines / loading.py View on Github external
def _load_semantic_seg(self, results):
        results['gt_semantic_seg'] = mmcv.imread(
            osp.join(results['seg_prefix'], results['ann_info']['seg_map']),
            flag='unchanged').squeeze()
        results['seg_fields'].append('gt_semantic_seg')
        return results
github musket-ml / segmentation_training_pipeline / segmentation_pipeline / mmdetection.py View on Github external
# extra augmentation
            if self.extra_aug is not None:
                #img = self.extra_aug(img)
                img, gt_bboxes, gt_labels = self.extra_aug(img, gt_bboxes,
                                                            gt_labels)

            # apply transforms
            flip = True if np.random.rand() < self.flip_ratio else False
            # randomly sample a scale
            img_scale = random_scale(self.img_scales, self.multiscale_mode)
            img, img_shape, pad_shape, scale_factor = self.img_transform(
                img, img_scale, flip, keep_ratio=self.resize_keep_ratio)
            img = img.copy()
            if self.with_seg:
                gt_seg = mmcv.imread(
                    osp.join(self.seg_prefix, img_info['file_name'].replace(
                        'jpg', 'png')),
                    flag='unchanged')
                gt_seg = self.seg_transform(gt_seg.squeeze(), img_scale, flip)
                gt_seg = mmcv.imrescale(
                    gt_seg, self.seg_scale_factor, interpolation='nearest')
                gt_seg = gt_seg[None, ...]
                pass
            if self.proposals is not None:
                proposals = self.bbox_transform(proposals, img_shape, scale_factor,
                                                flip)
                proposals = np.hstack(
                    [proposals, scores]) if scores is not None else proposals
            gt_bboxes = self.bbox_transform(gt_bboxes, img_shape, scale_factor,
                                            flip)
            if self.with_crowd: