How to use the fastprogress.progress_bar function in fastprogress

To help you get started, we’ve selected a few fastprogress examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github katsura-jp / extruct-video-feature / main.py View on Github external
visual_activity_concept = visual_activity_concept.cpu().detach().numpy()
            # convert matrix to vector : (1,outdim) --> (outdim,)
            visual_feature = visual_feature.reshape(visual_feature.shape[1],)
            visual_activity_concept = visual_activity_concept.reshape(visual_activity_concept.shape[1],)
            # save feature
            np.save(os.path.join(visual_feature_out_dir, name), visual_feature)
            np.save(os.path.join(activity_concept_out_dir, activity_name(name)), visual_activity_concept)
            if args.verbose:
                print('save {}.npy'.format(name))

    # get movie name
    movie_names = []
    for movie in root_dir:
        movie_names.append(movie.split('/')[-1])

    for movie_name in progress_bar(movie_names):
        pool_feature(movie_name, activity_concept_out_dir, pooled_activity_concept_out_dir)
github GilesStrong / lumin / lumin / optimisation / features.py View on Github external
if verbose: print("Optimising RF parameters")
        rfp, rf = get_opt_rf_params(train_df[train_feats], train_df[targ_name], val_df[train_feats], val_df[targ_name],
                                    objective, w_trn=w_trn, w_val=w_val, n_estimators=n_estimators, params=rf_params, verbose=False)
    else:
        rfp = rf_params
        rfp['n_estimators'] = n_estimators
        m = RandomForestClassifier if 'class' in objective.lower() else RandomForestRegressor
        rf = m(**rfp)
        rf.fit(X=train_df[train_feats], y=train_df[targ_name], sample_weight=w_trn)
    
    if verbose: print("Evalualting importances")
    fi = get_rf_feat_importance(rf, train_df[train_feats], train_df[targ_name], w_trn)
    orig_score = [rf.score(X=val_df[train_feats], y=val_df[targ_name], sample_weight=w_val)]
    if n_rfs > 1:
        m = RandomForestClassifier if 'class' in objective.lower() else RandomForestRegressor
        for _ in progress_bar(range(n_rfs-1)):
            rf = m(**rfp)
            rf.fit(X=train_df[train_feats], y=train_df[targ_name], sample_weight=w_trn)
            fi = pd.merge(fi, get_rf_feat_importance(rf, train_df[train_feats], train_df[targ_name], w_trn), on='Feature', how='left')
            orig_score.append(rf.score(X=val_df[train_feats], y=val_df[targ_name], sample_weight=w_val))
        fi['Importance']  = np.mean(fi[[f for f in fi.columns if 'Importance' in f]].values, axis=1)
        fi['Uncertainty'] = np.std(fi[[f for f in fi.columns if 'Importance' in f]].values, ddof=1, axis=1)/np.sqrt(n_rfs)
        fi.sort_values(by='Importance', ascending=False, inplace=True)
    orig_score = uncert_round(np.mean(orig_score), np.std(orig_score, ddof=1))
    if verbose: print("Top ten most important features:\n", fi[['Feature', 'Importance']][:min(len(fi), 10)])
    if plot_results: plot_importance(fi[:min(len(fi), n_max_display)], threshold=importance_cut, savename=savename, settings=plot_settings)

    top_feats = list(fi[fi.Importance >= importance_cut].Feature)
    if verbose: print(f"\n{len(top_feats)} features found with importance greater than {importance_cut}:\n", top_feats, '\n')
    if len(top_feats) == 0:
        if verbose: print(f"Model score: :\t{orig_score[0]}±{orig_score[1]}")
        print('No features found to be important, returning all training features. Good luck.')
github GilesStrong / lumin / lumin / nn / models / model.py View on Github external
pred_name: name of group to which to save predictions
            callbacks: list of any callbacks to use during evaluation
            verbose: whether to print average prediction timings
            bs: if not `None`, will run prediction in batches of specified size to save of memory
        '''

        times = []
        mb = master_bar(range(len(fy)))
        for fold_idx in mb:
            fold_tmr = timeit.default_timer()
            if not fy.test_time_aug:
                fold = fy.get_fold(fold_idx)['inputs']
                pred = self.predict_array(fold, callbacks=callbacks, bs=bs)
            else:
                tmpPred = []
                pb = progress_bar(range(fy.aug_mult), parent=mb)
                for aug in pb:
                    fold = fy.get_test_fold(fold_idx, aug)['inputs']
                    tmpPred.append(self.predict_array(fold, callbacks=callbacks, bs=bs))
                pred = np.mean(tmpPred, axis=0)

            times.append((timeit.default_timer()-fold_tmr)/len(fold))
            if self.n_out > 1: fy.save_fold_pred(pred, fold_idx, pred_name=pred_name)
            else: fy.save_fold_pred(pred[:, 0], fold_idx, pred_name=pred_name)
        times = uncert_round(np.mean(times), np.std(times, ddof=1)/np.sqrt(len(times)))
        if verbose: print(f'Mean time per event = {times[0]}±{times[1]}')
github BPHO-Salk / PSSR / utils / utils.py View on Github external
if not max_imgs is None: times = min(max_imgs, times)

    imgs = []

    for i in range(times):
        im.seek(i)
        im.load()
        imgs.append(np.array(im))

    imgs, img_info = img_to_float(np.stack(imgs))

    preds = []

    x, y = im.size
    print(f'tif: x:{x} y:{y} t:{times}')
    for t in progress_bar(list(range(times))):
        img = imgs[t]
        img = img.copy()

    if len(preds) > 0:
        all_y = img_to_uint8(np.concatenate(preds))
        imageio.mimwrite(pred_out, all_y, bigtiff=True)
        shutil.copy(tif_in, orig_out)
github huggingface / transformers / examples / run_tf_squad.py View on Github external
gradient_accumulator(grads)

            return total_loss

        per_example_losses = strategy.experimental_run_v2(step_fn, args=(train_features, train_labels))
        mean_loss = strategy.reduce(tf.distribute.ReduceOp.MEAN, per_example_losses, axis=0)

        return mean_loss

    current_time = datetime.datetime.now()
    train_iterator = master_bar(range(args.num_train_epochs))
    global_step = 0
    logging_loss = 0.0

    for epoch in train_iterator:
        epoch_iterator = progress_bar(
            train_dataset, total=num_train_steps, parent=train_iterator, display=args.n_device > 1
        )
        step = 1

        with strategy.scope():
            for train_features, train_labels in epoch_iterator:
                loss = train_step(train_features, train_labels)

                if step % args.gradient_accumulation_steps == 0:
                    strategy.experimental_run_v2(apply_gradients)

                    loss_metric(loss)

                    global_step += 1

                    if args.logging_steps > 0 and global_step % args.logging_steps == 0:
github fastai / fastai_dev / dev / fastai2 / torch_core.py View on Github external
    def done(): return (queue.get() for _ in progress_bar(items, leave=False))
    yield from run_procs(f, done, L(batches,idx).zip())
github GilesStrong / lumin / lumin / nn / ensemble / ensemble.py View on Github external
>>> preds = ensemble.predict_array(inputs)
        '''
        
        n_models = len(self.models) if n_models is None else n_models
        models = self.models[:n_models]
        weights = self.weights[:n_models]
        weights = weights/weights.sum()

        if isinstance(arr, tuple):
            arr = (to_device(Tensor(arr[0])),to_device(Tensor(arr[1])))
            pred = np.zeros((len(arr[0]), self.n_out))
        else:
            arr = to_device(Tensor(arr))
            pred = np.zeros((len(arr), self.n_out))

        for i, m in enumerate(progress_bar(models, parent=parent_bar, display=display)):
            tmp_pred = m.predict(arr, callbacks=callbacks, bs=bs)
            if self.output_pipe is not None: tmp_pred = self.output_pipe.inverse_transform(Xt=tmp_pred)
            pred += weights[i]*tmp_pred
        return pred
github GilesStrong / lumin / lumin / nn / ensemble / ensemble.py View on Github external
name: base name for saved objects
            feats: optional list of input features
            overwrite: if existing objects are found, whether to overwrite them
        
        Examples::
            >>> ensemble.save('weights/ensemble')
            >>>
            >>> ensemble.save('weights/ensemble', ['pt','eta','phi'])
        '''

        if (len(glob.glob(f"{name}*.json")) or len(glob.glob(f"{name}*.h5")) or len(glob.glob(f"{name}*.pkl"))) and not overwrite:
            raise FileExistsError("Ensemble already exists with that name, call with overwrite=True to force save")
        else:
            os.makedirs(name[:name.rfind('/')], exist_ok=True)
            os.system(f"rm {name}*.json {name}*.h5 {name}*.pkl")
            for i, model in enumerate(progress_bar(self.models)): model.save(f'{name}_{i}.h5')    
            with open(f'{name}_weights.pkl', 'wb')         as fout: pickle.dump(self.weights, fout)
            with open(f'{name}_results.pkl', 'wb')         as fout: pickle.dump(self.results, fout)
            with open(f'{name}_builder.pkl', 'wb')         as fout: pickle.dump(self.model_builder, fout)
            if self.input_pipe  is not None: 
                with open(f'{name}_input_pipe.pkl', 'wb')  as fout: pickle.dump(self.input_pipe, fout)
            if self.output_pipe is not None: 
                with open(f'{name}_output_pipe.pkl', 'wb') as fout: pickle.dump(self.output_pipe, fout)
            if feats            is not None: 
                with open(f'{name}_feats.pkl', 'wb')       as fout: pickle.dump(feats, fout)
github BPHO-Salk / PSSR / image_gen.py View on Github external
data = np.repeat(img_tif[None],5,axis=0).astype(np.float32)
            else:
                return []
        else:
            times = img_tif.n_frames
            img_tifs = []
            for i in range(times):
                img_tif.seek(i)
                img_tif.load()
                img_tifs.append(np.array(img_tif).copy())
            data = np.stack(img_tifs).astype(np.float32)

        data, img_info = img_to_float(data)
        img_tiffs = []
        time_range = list(range(offset_frames, times - offset_frames))
        for t in progress_bar(time_range):
            time_slice = slice(t-offset_frames, t+offset_frames+1)
            img = data[time_slice].copy()
            pred_img = proc_func(img, img_info=img_info, mode=mode)
            pred_img8 = (pred_img * np.iinfo(np.uint8).max).astype(np.uint8)
            img_tiffs.append(pred_img8[None])

        imgs = np.concatenate(img_tiffs)
        if processor!='bilinear':
            fldr_name = f'{out_fn.parent}/{processor}'
        else:
            fldr_name = out_fn.parent.parent.parent/processor/out_fn.parent.stem
        save_name = f'{fn.stem}_{processor}.tif'
        out_fldr = ensure_folder(out_fn.parent/processor)

        if imgs.size < 4e9:
            imageio.mimwrite(out_fldr/save_name, imgs)
github torchgan / torchgan / torchgan / trainer / base_trainer.py View on Github external
data_loader (torch.utils.data.DataLoader): A DataLoader for the trainer to iterate over
                and train the models.
        """
        for name in self.optimizer_names:
            getattr(self, name).zero_grad()

        master_bar_iter = master_bar(range(self.start_epoch, self.epochs))
        for epoch in master_bar_iter:

            start_time = time.time()
            master_bar_iter.first_bar.comment = f"Training Progress"

            for model in self.model_names:
                getattr(self, model).train()

            for data in progress_bar(data_loader, parent=master_bar_iter):

                master_bar_iter.child.comment = f"Epoch {epoch+1} Progress"

                if type(data) is tuple or type(data) is list:
                    self.real_inputs = data[0].to(self.device)
                    self.labels = data[1].to(self.device)
                elif type(data) is torch.Tensor:
                    self.real_inputs = data.to(self.device)
                else:
                    self.real_inputs = data

                lgen, ldis, gen_iter, dis_iter = self.train_iter()
                self.loss_information["generator_losses"] += lgen
                self.loss_information["discriminator_losses"] += ldis
                self.loss_information["generator_iters"] += gen_iter
                self.loss_information["discriminator_iters"] += dis_iter