How to use the tensorpack.dataflow.common.MapData function in tensorpack

To help you get started, we’ve selected a few tensorpack examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github YangZeyu95 / unofficial-implement-of-openpose / pose_dataset.py View on Github external
ds = MapDataComponent(ds, pose_flip)
        ds = MapDataComponent(ds, pose_resize_shortestedge_random)
        ds = MapDataComponent(ds, pose_crop_random)
        ds = MapData(ds, pose_to_img)
        # augs = [
        #     imgaug.RandomApplyAug(imgaug.RandomChooseAug([
        #         imgaug.GaussianBlur(max_size=3)
        #     ]), 0.7)
        # ]
        # ds = AugmentImageComponent(ds, augs)
        ds = PrefetchData(ds, 1000, multiprocessing.cpu_count()-1)
    else:
        ds = MultiThreadMapData(ds, nr_thread=16, map_func=read_image_url, buffer_size=1000)
        ds = MapDataComponent(ds, pose_resize_shortestedge_fixed)
        ds = MapDataComponent(ds, pose_crop_center)
        ds = MapData(ds, pose_to_img)
        ds = PrefetchData(ds, 100, multiprocessing.cpu_count() // 4)

    return ds
github ataata107 / Research-Papers-Implementations / Single-shot multi person 3d body pose / training / dataset_inspect.py View on Github external
meta.all_joints, 7.0, stride=8)

    pafmap = create_paf(JointsLoader.num_connections, 46, 46,
                           meta.all_joints, 1, stride=8)

    return [meta, mask_paf, mask_heatmap, pafmap, heatmap]


if __name__ == '__main__':
    batch_size = 10
    curr_dir = os.path.dirname(__file__)
    annot_path = os.path.join(curr_dir, '../dataset/annotations/person_keypoints_val2017.json')
    img_dir = os.path.abspath(os.path.join(curr_dir, '../dataset/val2017/'))
    df = CocoDataFlow((368, 368), annot_path, img_dir)#, select_ids=[1000])
    df.prepare()
    df = MapData(df, read_img)
    df = MapData(df, gen_mask)
    df = MapData(df, augment)
    df = MapData(df, apply_mask)
    df = MapData(df, build_debug_sample)
    df = PrefetchData(df, nr_prefetch=2, nr_proc=1)

    df.reset_state()
    gen = df.get_data()

    for g in gen:
        show_image_mask_center_of_main_person(g)
        #show_comparision_of_2_pafs(g, 3, 3, 2)
github ataata107 / Research-Papers-Implementations / Single-shot multi person 3d body pose / training / dataset.py View on Github external
in your hardware
    """
    batch_size = 10
    curr_dir = os.path.dirname(__file__)
    annot_path = os.path.join(curr_dir, '../dataset/annotations/person_keypoints_val2017.json')
    img_dir = os.path.abspath(os.path.join(curr_dir, '../dataset/val2017/'))
    df = CocoDataFlow((368, 368), annot_path, img_dir)#, select_ids=[1000])
    df.prepare()
    df = MapData(df, read_img)
    df = MapData(df, gen_mask)
    df = MapData(df, augment)
    df = MapData(df, apply_mask)
    df = MapData(df, build_sample)
    df = PrefetchDataZMQ(df, nr_proc=4)
    df = BatchData(df, batch_size, use_list=False)
    df = MapData(df, lambda x: (
        [x[0], x[1], x[2]],
        [x[3], x[4]])#, x[3], x[4], x[3], x[4], x[3], x[4], x[3], x[4], x[3], x[4]])
    )

    TestDataSpeed(df, size=100).start()
github YangZeyu95 / unofficial-implement-of-openpose / dataset.py View on Github external
def get_dataflow(coco_data_paths):
    """
    This function initializes the tensorpack dataflow and serves generator
    for training operation.

    :param coco_data_paths: paths to the coco files: annotation file and folder with images
    :return: dataflow object
    """
    df = CocoDataFlow((368, 368), coco_data_paths)
    df.prepare()
    df = MapData(df, read_img)
    # df = MapData(df, gen_mask)
    df = MapData(df, augment)
    df = MapData(df, apply_mask)
    df = MapData(df, build_sample)
    df = PrefetchDataZMQ(df, nr_proc=4)  # df = PrefetchData(df, 2, 1)

    return df
github AlexEMG / DeepLabCut / deeplabcut / pose_estimation_tensorflow / dataset / pose_dataset_tensorpack.py View on Github external
def get_dataflow(self, cfg):

        df = Pose(cfg)
        df = MapData(df, self.augment)
        df = MapData(df, self.compute_target_part_scoremap)

        num_cores = multiprocessing.cpu_count()
        num_processes = num_cores * int(self.cfg['processratio'])
        if num_processes <= 1:
            num_processes = 2 # recommended to use more than one process for training
        if os.name == 'nt':
            df2 = MultiProcessRunner(df, num_proc = num_processes, num_prefetch = self.cfg['num_prefetch'])
        else:
            df2 = MultiProcessRunnerZMQ(df, num_proc = num_processes, hwm = self.cfg['num_prefetch'])
        return df2
github YangZeyu95 / unofficial-implement-of-openpose / dataset.py View on Github external
"""
    Run this script to check speed of generating samples. Tweak the nr_proc
    parameter of PrefetchDataZMQ. Ideally it should reflect the number of cores 
    in your hardware
    """
    batch_size = 10
    curr_dir = os.path.dirname(__file__)
    # annot_path = os.path.join(curr_dir, '../dataset/annotations/person_keypoints_val2017.json')
    # img_dir = os.path.abspath(os.path.join(curr_dir, '../dataset/val2017/'))
    annot_path = '/run/user/1000/gvfs/smb-share:server=192.168.1.2,share=data/yzy/dataset/Realtime_Multi-Person_Pose_Estimation-master/training/dataset/COCO/annotations/person_keypoints_val2017.json'
    img_dir = '/run/user/1000/gvfs/smb-share:server=192.168.1.2,share=data/yzy/dataset/Realtime_Multi-Person_Pose_Estimation-master/training/dataset/COCO/images/val2017/'
    df = CocoDataFlow((368, 368), COCODataPaths(
        annot_path, img_dir))  # , select_ids=[1000])
    df.prepare()
    df = MapData(df, read_img)
    df = MapData(df, gen_mask)
    df = MapData(df, augment)
    df = MapData(df, apply_mask)
    df = MapData(df, build_sample)
    df = PrefetchDataZMQ(df, nr_proc=4)
    # df = BatchData(df, batch_size, use_list=False)
    # df = MapData(df, lambda x: (
    #     [x[0], x[1], x[2]],
    #     [x[3], x[4], x[3], x[4], x[3], x[4], x[3], x[4], x[3], x[4], x[3], x[4]])
    # )

    # TestDataSpeed(df, size=100).start()
    TFRecordSerializer.save(
        df, '/media/yzy/diskF/dynamic/unofficial-implement-for-openpose/dataset/COCO/tfrecord/val2017.tfrecord')
github ataata107 / Research-Papers-Implementations / Single-shot multi person 3d body pose / training / dataset_inspect.py View on Github external
meta.all_joints, 1, stride=8)

    return [meta, mask_paf, mask_heatmap, pafmap, heatmap]


if __name__ == '__main__':
    batch_size = 10
    curr_dir = os.path.dirname(__file__)
    annot_path = os.path.join(curr_dir, '../dataset/annotations/person_keypoints_val2017.json')
    img_dir = os.path.abspath(os.path.join(curr_dir, '../dataset/val2017/'))
    df = CocoDataFlow((368, 368), annot_path, img_dir)#, select_ids=[1000])
    df.prepare()
    df = MapData(df, read_img)
    df = MapData(df, gen_mask)
    df = MapData(df, augment)
    df = MapData(df, apply_mask)
    df = MapData(df, build_debug_sample)
    df = PrefetchData(df, nr_prefetch=2, nr_proc=1)

    df.reset_state()
    gen = df.get_data()

    for g in gen:
        show_image_mask_center_of_main_person(g)
        #show_comparision_of_2_pafs(g, 3, 3, 2)
github YangZeyu95 / unofficial-implement-of-openpose / pose_dataset.py View on Github external
def _get_dataflow_onlyread(path, is_train, img_path=None):
    ds = CocoPose(path, img_path, is_train)  # read data from lmdb
    ds = MapData(ds, read_image_url)
    ds = MapData(ds, pose_to_img)
    # ds = PrefetchData(ds, 1000, multiprocessing.cpu_count() * 4)
    return ds