How to use the reader.Settings function in reader

To help you get started, we’ve selected a few reader examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github PaddlePaddle / paddle-ce-latest-kpis / ce_models / object_detection / train.py View on Github external
if __name__ == '__main__':
    args = parser.parse_args()
    print_arguments(args)

    data_dir = '/root/.cache/paddle/dataset/pascalvoc'
    train_file_list = 'trainval.txt'
    val_file_list = 'test.txt'
    label_file = 'label_list'
    model_save_dir = args.model_save_dir
    if args.dataset == 'coco':
        data_dir = './data/COCO17'
        train_file_list = 'annotations/instances_train2017.json'
        val_file_list = 'annotations/instances_val2017.json'
        label_file = 'label_list'

    data_args = reader.Settings(
        dataset=args.dataset,
        data_dir=data_dir,
        label_file=label_file,
        apply_distort=args.apply_distort,
        apply_expand=args.apply_expand,
        resize_h=args.resize_h,
        resize_w=args.resize_w,
        mean_value=[args.mean_value_B, args.mean_value_G, args.mean_value_R],
        toy=args.is_toy)
    #method = parallel_do
    method = parallel_exe
    method(
        args,
        train_file_list=train_file_list,
        val_file_list=val_file_list,
        data_args=data_args,
github PaddlePaddle / models / fluid / PaddleCV / object_detection / train.py View on Github external
if dataset == 'coco2014':
        train_file_list = 'annotations/instances_train2014.json'
        val_file_list = 'annotations/instances_val2014.json'
    elif dataset == 'coco2017':
        train_file_list = 'annotations/instances_train2017.json'
        val_file_list = 'annotations/instances_val2017.json'

    mean_BGR = [float(m) for m in args.mean_BGR.split(",")]
    image_shape = [int(m) for m in args.image_shape.split(",")]
    train_parameters[dataset]['image_shape'] = image_shape
    train_parameters[dataset]['batch_size'] = args.batch_size
    train_parameters[dataset]['lr'] = args.learning_rate
    train_parameters[dataset]['epoc_num'] = args.epoc_num
    train_parameters[dataset]['ap_version'] = args.ap_version

    data_args = reader.Settings(
        dataset=args.dataset,
        data_dir=data_dir,
        label_file=label_file,
        resize_h=image_shape[1],
        resize_w=image_shape[2],
        mean_value=mean_BGR,
        apply_distort=True,
        apply_expand=True,
        ap_version = args.ap_version)
    train(args,
          data_args,
          train_parameters[dataset],
          train_file_list=train_file_list,
          val_file_list=val_file_list)
github PaddlePaddle / models / PaddleCV / face_detection / profile.py View on Github external
run_time = run(num_iterations)
    end = time.time()
    total_time = end - start
    print("Total time: {0}, reader time: {1} s, run time: {2} s".format(
        total_time, total_time - np.sum(run_time), np.sum(run_time)))


if __name__ == '__main__':
    args = parser.parse_args()
    print_arguments(args)

    data_dir = os.path.join(args.data_dir, 'WIDER_train/images/')
    train_file_list = os.path.join(args.data_dir,
        'wider_face_split/wider_face_train_bbx_gt.txt')

    config = reader.Settings(
        data_dir=data_dir,
        resize_h=args.resize_h,
        resize_w=args.resize_w,
        apply_expand=False,
        mean_value=[104., 117., 123.],
        ap_version='11point')
    train(args, config, train_file_list, optimizer_method="momentum")
github PaddlePaddle / models / PaddleCV / face_detection / widerface_eval.py View on Github external
max_shrink = max_shrink - 0.3
    elif max_shrink >= 4 and max_shrink < 5:
        max_shrink = max_shrink - 0.4
    elif max_shrink >= 5:
        max_shrink = max_shrink - 0.5
    elif max_shrink <= 0.1:
        max_shrink = 0.1

    shrink = max_shrink if max_shrink < 1 else 1
    return shrink, max_shrink


if __name__ == '__main__':
    args = parser.parse_args()
    print_arguments(args)
    config = reader.Settings(data_dir=args.data_dir)

    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)
    main_program = fluid.Program()
    startup_program = fluid.Program()
    image_shape = [3, 1024, 1024]
    with fluid.program_guard(main_program, startup_program):
        network = PyramidBox(
            data_shape=image_shape,
            sub_network=args.use_pyramidbox,
            is_infer=True)
        infer_program, nmsed_out = network.infer(main_program)
        fetches = [nmsed_out]
        exe.run(startup_program)
        fluid.io.load_persistables(
            exe, args.model_dir, main_program=infer_program)
github PaddlePaddle / models / fluid / face_detection / infer.py View on Github external
predict, = exe.run(infer_program,
                       feed=feed,
                       fetch_list=fetches,
                       return_numpy=False)
    predict = np.array(predict)
    draw_bounding_box_on_image(args.image_path, predict, args.confs_threshold)


if __name__ == '__main__':
    args = parser.parse_args()
    print_arguments(args)

    data_dir = 'data/WIDERFACE/WIDER_val/images/'
    file_list = 'label/val_gt_widerface.res'

    data_args = reader.Settings(
        data_dir=data_dir,
        resize_h=args.resize_h,
        resize_w=args.resize_w,
        mean_value=[104., 117., 123],
        apply_distort=False,
        apply_expand=False,
        ap_version='11point')
    infer(args, data_args=data_args)
github PaddlePaddle / models / fluid / PaddleCV / object_detection / infer.py View on Github external
if __name__ == '__main__':
    args = parser.parse_args()
    print_arguments(args)

    data_dir = 'data/pascalvoc'
    label_file = 'label_list'

    if not os.path.exists(args.model_dir):
        raise ValueError("The model path [%s] does not exist." %
                         (args.model_dir))
    if 'coco' in args.dataset:
        data_dir = 'data/coco'
        label_file = 'annotations/instances_val2014.json'

    data_args = reader.Settings(
        dataset=args.dataset,
        data_dir=data_dir,
        label_file=label_file,
        resize_h=args.resize_h,
        resize_w=args.resize_w,
        mean_value=[args.mean_value_B, args.mean_value_G, args.mean_value_R],
        apply_distort=False,
        apply_expand=False,
        ap_version='')
    infer(
        args,
        data_args=data_args,
        image_path=args.image_path,
        model_dir=args.model_dir)
github PaddlePaddle / models / legacy / neural_qa / train.py View on Github external
def train(conf):
    if not os.path.exists(conf.model_save_dir):
        os.makedirs(conf.model_save_dir, mode=0755)

    settings = reader.Settings(
        vocab=conf.vocab,
        is_training=True,
        label_schema=conf.label_schema,
        negative_sample_ratio=conf.negative_sample_ratio,
        hit_ans_negative_sample_ratio=conf.hit_ans_negative_sample_ratio,
        keep_first_b=conf.keep_first_b,
        seed=conf.seed)
    samples_per_pass = conf.batch_size * conf.batches_per_pass
    train_reader = paddle.batch(
        paddle.reader.buffered(
            reader.create_reader(conf.train_data_path, settings,
                                 samples_per_pass),
            size=samples_per_pass),
        batch_size=conf.batch_size)

    # TODO(lipeng17) v2 API does not support parallel_nn yet. Therefore, we can
github PaddlePaddle / models / PaddleCV / ssd / eval.py View on Github external
data_dir = 'data/pascalvoc'
    test_list = 'test.txt'
    label_file = 'label_list'

    if not os.path.exists(args.model_dir):
        raise ValueError("The model path [%s] does not exist." %
                         (args.model_dir))
    if 'coco' in args.dataset:
        data_dir = 'data/coco'
        if '2014' in args.dataset:
            test_list = 'annotations/instances_val2014.json'
        elif '2017' in args.dataset:
            test_list = 'annotations/instances_val2017.json'

    data_args = reader.Settings(
        dataset=args.dataset,
        data_dir=args.data_dir if len(args.data_dir) > 0 else data_dir,
        label_file=label_file,
        resize_h=args.resize_h,
        resize_w=args.resize_w,
        mean_value=[args.mean_value_B, args.mean_value_G, args.mean_value_R],
        apply_distort=False,
        apply_expand=False,
        ap_version=args.ap_version)
    eval(
        args,
        data_args=data_args,
        test_list=args.test_list if len(args.test_list) > 0 else test_list,
        batch_size=args.batch_size,
        model_dir=args.model_dir)