How to use the tqdm.tqdm_notebook function in tqdm

To help you get started, we’ve selected a few tqdm examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github COSIMA / cosima-cookbook / cosima_cookbook / netcdf_index.py View on Github external
runs_to_index = list(runs_available - runs_already_seen)
        if len(runs_to_index) == 0:
            print('No new runs found in {}'.format(directoryToSearch))
            continue
        # 
        # print('{} new run directories found including... '.format(len(runs_to_index)))
        # 
        # for i in range(min(3, len(runs_to_index))):
        #     print(runs_to_index[i])
        # if len(runs_to_index) > 3:
        #     print('...')

        print('Finding files in {} run directories... '.format(len(runs_to_index)))
        ncfiles = []
        for run in tqdm.tqdm_notebook(runs_to_index, leave=False):
            try:
                results = subprocess.check_output(['find', run, '-name', '*.nc'])
                results = [s for s in results.decode('utf-8').split()]
                ncfiles.extend(results)
            except:
                print ('{0} exception occurred while finding *.nc in {1}'.format(sys.exc_info()[0], run))

        # IPython.display.clear_output(wait=True)
        # NetCDF files found on disk not seen before:
        files_to_add = set(ncfiles) - files_already_seen

        print('Files found but not yet indexed: {}'.format(len(files_to_add)))

        # For these new files, we can determine their configuration, experiment, and run.
        # Using NetCDF4 to get list of all variables in each file.
github CalabreseLab / seekr / seekr / my_tqdm.py View on Github external
def my_tqdm():
    return tqdm_notebook if _is_kernel() else tqdm
github worldveil / photomosaic / emosiac / utils / indexing.py View on Github external
height_aspect,
        width_aspect,
        vectorization_factor=1,
        precompute_target=None, 
        use_stabilization=True,
        stabilization_threshold=0.85,
        randomness=0.0,
        caching=True,
    ):
    scale2index = {}
    scale2mosaic = {}
    count = 0
    scales = range(min_scale, max_scale + 1, 1)
    aspect_ratio = height_aspect / float(width_aspect)

    with tqdm(total=len(scales)) as pbar:
        for scale in scales:
            print("Indexing scale=%d..." % scale)
            h, w = compute_hw(scale, height_aspect, width_aspect)
            tile_index, _, tile_images = index_images(
                paths='%s/*.jpg' % codebook_dir,
                aspect_ratio=aspect_ratio, 
                height=h, width=w,
                vectorization_scaling_factor=vectorization_factor,
                caching=True,
            )
            scale2index[scale] = (tile_index, tile_images)

            # then precompute the mosiac 
            h, w = compute_hw(scale, height_aspect, width_aspect)

            # mosaic-ify & show it
github COSIMA / cosima-cookbook / cosima_cookbook / plots / overturning.py View on Github external
def psi_avg(expts, n=10, GM=False, clev=np.arange(-20,20,2)):
    
    if not isinstance(expts, list):
        expts = [expts]
        
    # computing
    results = []
    for expt in tqdm_notebook(expts, leave=False, desc='experiments'):
        psi_avg = cc.diagnostics.psi_avg(expt, n, GM)
            
        result = {'psi_avg': psi_avg,
                  'expt': expt}
        results.append(result)
        
    IPython.display.clear_output()
   
    # plotting
    for result in results:
        psi_avg = result['psi_avg']
        expt = result['expt']
        
        plt.figure(figsize=(10, 5)) 
        plt.contourf(psi_avg.grid_yu_ocean, 
                 psi_avg.potrho, psi_avg,
github shayneobrien / explicit-gan-eval / utils.py View on Github external
""" Find best results """

    # Get filenames
    dim_numsamples_names = [i for i in os.listdir(dirname) if i != '.DS_Store']
    lr_hdim_bsize_names = [i for i in os.listdir(dirname + dim_numsamples_names[0] + '/trial_1')
                           if '1024' in i]

    # For each number of dimensions and training samples
    for name in tqdm_notebook(dim_numsamples_names):

        # Initialize best dictionary
        best_path = '../best/multivariate/{0}/'.format(t)
        global_optimal = nested_pickle_dict()

        # For each hyperparameter setting
        for t in tqdm_notebook(lr_hdim_bsize_names):

            # Initialize a dictionary containing the best result
            optimal = nested_pickle_dict()
            results = []

            # Load in the results from each trial
            for trial in range(1, 21):
                path = dirname + '{0}/trial_{1}/{2}'.format(name, trial, t)

                data = []
                with open(path) as f:
                    for line in f:
                        data.append(json.loads(line))

                results.append(data[0])
github PIQuIL / QuCumber / qucumber / nn_states / neural_state.py View on Github external
:param callbacks: Callbacks to run while training.
        :type callbacks: list[qucumber.callbacks.CallbackBase]
        :param optimizer: The constructor of a torch optimizer.
        :type optimizer: torch.optim.Optimizer
        :param scheduler: The constructor of a torch scheduler
        :param optimizer_args: Arguments to pass to the optimizer
        :type optimizer_args: dict
        :param scheduler_args: Arguments to pass to the scheduler
        :type scheduler_args: dict
        :param \**kwargs: Ignored; exists for backwards compatibility.
        """
        if self.stop_training:  # terminate immediately if stop_training is true
            return

        disable_progbar = progbar is False
        progress_bar = tqdm_notebook if progbar == "notebook" else tqdm

        callbacks = CallbackList(callbacks if callbacks else [])
        if time:
            callbacks.append(Timer())

        neg_batch_size = neg_batch_size if neg_batch_size else pos_batch_size

        if isinstance(data, torch.Tensor):
            train_samples = (
                data.clone().detach().to(device=self.device, dtype=torch.double)
            )
        else:
            train_samples = torch.tensor(data, device=self.device, dtype=torch.double)

        all_params = [getattr(self, net).parameters() for net in self.networks]
        all_params = list(chain(*all_params))
github PIQuIL / QuCumber / tools / benchmarks / python / rbm.py View on Github external
"adam", "nesterov", "momentum", or "sgd"
                  (default "sgd")
        l1_reg -- the l1 regularization parameter (default 0)
        l2_reg -- the l2 regularization parameter (default 0)
        log_every -- how often the validation statistics are recorded
                     in epochs (default 10)
        progbar -- whether to display a progress bar; can be a boolean
                   or "notebook" for displaying progress bars in a
                   jupyter notebook (default False)
        **kwargs -- extra keyword arguments passed to the parameter
                    update function; refer to `rbm_grad_updates.py`
                    for more info
        """
        nll_list, overlap_list = [], []
        disable_progbar = (progbar is False)
        prog_bar = tqdm_notebook if progbar == "notebook" else tqdm

        if not callable(lr):
            lr = schedulers.constant(lr)
        if not callable(momentum):
            momentum = schedulers.constant(momentum)

        updater, updater_data = rgu.get_updater(method,
                                                learning_rate=lr,
                                                momentum_param=momentum,
                                                **kwargs)

        pbatch = (self.rand_state.binomial(
                        1, 0.5, size=(batch_size, self.num_visible)
                    ).astype(np.float)
                  if persistent
                  else None)
github analysiscenter / batchflow / batchflow / notifier.py View on Github external
def __init__(self, *args, **kwargs):
        self._notifier = tqdm.tqdm_notebook(*args, **kwargs)
github COSIMA / cosima-cookbook / cosima_cookbook / plots / lineplots.py View on Github external
Plot Drake Passage transport.

    Parameters
    ----------
    expts : str or list of str
        Experiment name(s).
    """

    plt.figure(figsize=(12, 6))

    if not isinstance(expts, list):
        expts = [expts]

    # computing
    results = []
    for expt in tqdm_notebook(expts, leave=False, desc='experiments'):
        transport = cc.diagnostics.drake_passage(expt)
            
        result = {'transport': transport,
                  'expt': expt}
        results.append(result)
    
    IPython.display.clear_output()
    
    # plotting
    for result in results:
        transport = result['transport']
        expt = result['expt']
        transport.plot(label=expt)
        
    plt.title('Drake Passage Transport')
    plt.xlabel('Time')