How to use the tqdm.auto.tqdm function in tqdm

To help you get started, we’ve selected a few tqdm examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github tardis-sn / tardis / tardis / io / util.py View on Github external
def download_from_url(url, dst):
    """
    kindly used from https://gist.github.com/wy193777/0e2a4932e81afc6aa4c8f7a2984f34e2
    @param: url to download file
    @param: dst place to put the file
    """

    file_size = int(requests.head(url).headers["Content-Length"])
    if os.path.exists(dst):
        first_byte = os.path.getsize(dst)
    else:
        first_byte = 0
    if first_byte >= file_size:
        return file_size
    header = {"Range": "bytes=%s-%s" % (first_byte, file_size)}
    pbar = tqdm(
        total=file_size,
        initial=first_byte,
        unit="B",
        unit_scale=True,
        desc=url.split("/")[-1],
    )
    req = requests.get(url, headers=header, stream=True)
    with open(dst, "ab") as f:
        for chunk in req.iter_content(chunk_size=1024):
            if chunk:
                f.write(chunk)
                pbar.update(1024)
    pbar.close()
    return file_size
github alanrkessler / savantscraper / savantscraper.py View on Github external
savant = sqlite3.connect(f"{db_name}.db")

    # Define teams to be iterated over
    if teams is None:
        teams = ['LAA', 'HOU', 'OAK', 'TOR', 'ATL', 'MIL', 'STL',
                 'CHC', 'ARI', 'LAD', 'SF', 'CLE', 'SEA', 'MIA',
                 'NYM', 'WSH', 'BAL', 'SD', 'PHI', 'PIT', 'TEX',
                 'TB', 'BOS', 'CIN', 'COL', 'KC', 'DET', 'MIN',
                 'CWS', 'NYY']

    locations = ['Home', 'Road']

    # Loop over seasons and teams
    # Append to statcast table at each iteration
    for season in tqdm(range(seasons[0], seasons[1]+1), desc='Seasons'):
        for team in tqdm(teams, desc='Teams'):
            for location in tqdm(locations, desc='Home/Road', leave=False):
                single_combination = savant_search(season, team, location)
                pd.io.sql.to_sql(single_combination, name='statcast',
                                 con=savant, if_exists='append')

    # Close connection
    savant.commit()
    savant.close()
github MicroStrategy / mstrio-py / mstrio / report.py View on Github external
def __get_attr_elements(self, limit=200000):
        """Get elements of report attributes synchronously.
        Implements GET /reports//attributes//elements
        """

        attr_elements = []
        if self.attributes:
            pbar = tqdm(self.attributes, desc="Loading attribute elements",
                        leave=False, disable=(not self.progress_bar))
            # Fetch first chunk of attribute elements.
            for i, attr in enumerate(pbar):
                # Fetch first chunk of attribute elements.
                response = reports.report_single_attribute_elements(connection=self._connection,
                                                                    report_id=self._report_id,
                                                                    attribute_id=attr['id'],
                                                                    offset=0,
                                                                    limit=limit,
                                                                    verbose=helper.debug())
                # Get total number of rows from headers.
                total = int(response.headers['x-mstr-total-count'])
                # Get attribute elements from the response.
                elements = response.json()

                # If total number of elements is bigger than the chunk size (limit), fetch them incrementally.
github dm3ll3n / ezpq / ezpq / Queue.py View on Github external
"""

        if poll is None or poll <= 0:
            poll = self._poll
        else:
            assert poll >= self._poll

        n_pending = self.size(waiting=True, working=True)

        if n_pending > 0:
            
            from tqdm.auto import tqdm

            start = time.time()

            with tqdm(total=n_pending, unit='op') as pb:
                while n_pending > 0 and (timeout==0 or time.time() - start < timeout):
                    time.sleep(poll)
                    tmp = self.size(waiting=True, working=True)
                    diff = n_pending - tmp
                    if diff > 0:
                        n_pending = tmp
                        pb.update(diff)
                pb.close()

        return n_pending
github YosefLab / scVI / simu_gaussian.py View on Github external
#     (True, "IWELBO", "CUBO", None),
    # (True, "IWELBO", "VRMAX", None),
    # IWELBO and SLEEP updates
    # (True, "IWELBO", None, "SLEEPKL"),
    # wAKE AND SLEEP
    # IWELBO and SLEEP updates
    # (True, "IWELBO", "REVKL", "SLEEPKL"),
]

nus = np.geomspace(1e-4, 1e2, num=20)
n_hidden_ranges = [16, 32, 64, 128, 256, 512]
# n_hidden_ranges = [128]

df = []
for learn_var, loss_gen, loss_wvar, loss_svar, do_linear_encoder in scenarios:
    for n_hidden in tqdm(n_hidden_ranges):
        print(learn_var, loss_gen, loss_wvar, loss_svar)
        iwelbo = []
        cubo = []
        l1_gen_dis = []
        l1_gen_sign = []
        l1_post_dis = []
        l1_post_sign = []
        l1_err_ex_plugin = []
        l1_err_ex_is = []
        l2_ess = []
        l1_errs_is = []
        khat = []
        a_2 = []
        for t in tqdm(range(n_simu)):
            print(t)
            params_gen = None
github jkoutsikakis / pytorch-wrapper / pytorch_wrapper / training_callbacks.py View on Github external
def on_training_end(self, training_context):
        if training_context['_verbose']:
            tqdm.write("Epoch chosen: %d" % self._best_epoch)
        training_context['system'].load_model_state(self._best_state_filepath)
github swansonk14 / p_tqdm / p_tqdm / p_tqdm.py View on Github external
# Determine num_cpus
    if num_cpus is None:
        num_cpus = cpu_count()
    elif type(num_cpus) == float:
        num_cpus = int(round(num_cpus * cpu_count()))

    # Determine length of tqdm (equal to length of shortest iterable)
    length = min(len(iterable) for iterable in iterables if isinstance(iterable, Sized))

    # Create parallel generator
    map_type = 'imap' if ordered else 'uimap'
    pool = Pool(num_cpus)
    map_func = getattr(pool, map_type)

    for item in tqdm(map_func(function, *iterables), total=length, **kwargs):
        yield item

    pool.clear()
github jkoutsikakis / pytorch-wrapper / pytorch_wrapper / system.py View on Github external
:param data_loader: DataLoader object that generates batches of data. Each batch must be a Dict that contains at
            least a Tensor or a list/tuple of Tensors containing the input(s) of the model(key=`batch_input_key`).
        :param batch_input_key: The key of the batches returned by the data_loader that contains the input of the
            model.
        :param keep_batches: If set to True then the method also returns a list of the batches returned by the
            dataloader.
        :param verbose: Whether to print progress info.
        :return: Dict containing a list of batched model outputs (key=`output_list`) and a list of batches as returned
            by the dataloader (key=`batch_list`) if keep_batches is set to True.
        """

        batch_list = []
        output_list = []

        with torch.no_grad():
            gen = partial(auto_tqdm, ncols=NCOLS) if verbose else lambda x: x
            for i, batch in enumerate(gen(data_loader)):
                if keep_batches:
                    batch_list.append(batch)
                output = self.predict_batch(batch[batch_input_key])
                output = self._pure_predict_convert_output(output)
                output_list.append(output)

        if keep_batches:
            return {'batch_list': batch_list, 'output_list': output_list}
        else:
            return {'output_list': output_list}
github schlegelp / skeletonizer / skeletor / skeletonizers.py View on Github external
# Produce weighted edges
    edges = np.concatenate((mesh.edges_unique,
                            mesh.edges_unique_length.reshape(mesh.edges_unique.shape[0], 1)),
                           axis=1)

    # Generate Graph (must be undirected)
    G = nx.Graph()
    G.add_weighted_edges_from(edges)

    # Run the graph traversal that groups vertices into spatial clusters
    not_visited = set(G.nodes)
    seen = set()
    clusters = []
    to_visit = len(not_visited)
    with tqdm(desc='Clustering', total=len(not_visited), disable=progress is False) as pbar:
        while not_visited:
            # Pick a random node
            start = not_visited.pop()
            # Get all nodes in the geodesic vicinity
            cl, seen = dfs(G, n=start, dist_traveled=0,
                           max_dist=sampling_dist, seen=seen)
            cl = set(cl)

            # Append this cluster and track visited/not-visited nodes
            clusters.append(cl)
            not_visited = not_visited - cl

            # Update  progress bar
            pbar.update(to_visit - len(not_visited))
            to_visit = len(not_visited)
github theislab / scanpy / scanpy / readwrite.py View on Github external
def _download(url: str, path: Path):
    from tqdm.auto import tqdm
    from urllib.request import urlretrieve

    path.parent.mkdir(parents=True, exist_ok=True)
    with tqdm(unit='B', unit_scale=True, miniters=1, desc=path.name) as t:
        def update_to(b=1, bsize=1, tsize=None):
            if tsize is not None:
                t.total = tsize
            t.update(b * bsize - t.n)

        urlretrieve(url, str(path), reporthook=update_to)