How to use the tqdm.trange function in tqdm

To help you get started, we’ve selected a few tqdm examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github huggingface / transformers / examples / run_ner.py View on Github external
steps_trained_in_current_epoch = 0
    # Check if continuing training from a checkpoint
    if os.path.exists(args.model_name_or_path):
        # set global_step to gobal_step of last saved checkpoint from model path
        global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
        epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
        steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)

        logger.info("  Continuing training from checkpoint, will skip to saved global_step")
        logger.info("  Continuing training from epoch %d", epochs_trained)
        logger.info("  Continuing training from global step %d", global_step)
        logger.info("  Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)

    tr_loss, logging_loss = 0.0, 0.0
    model.zero_grad()
    train_iterator = trange(
        epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
    )
    set_seed(args)  # Added here for reproductibility
    for _ in train_iterator:
        epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
        for step, batch in enumerate(epoch_iterator):

            # Skip past any already trained steps if resuming training
            if steps_trained_in_current_epoch > 0:
                steps_trained_in_current_epoch -= 1
                continue

            model.train()
            batch = tuple(t.to(args.device) for t in batch)
            inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
            if args.model_type != "distilbert":
github huggingface / transformers / examples / lm_finetuning / pregenerate_training_data.py View on Github external
docs.add_document(doc)  # If the last doc didn't end on a newline, make sure it still gets added
        if len(docs) <= 1:
            exit("ERROR: No document breaks were found in the input file! These are necessary to allow the script to "
                 "ensure that random NextSentences are not sampled from the same document. Please add blank lines to "
                 "indicate breaks between documents in your input file. If your dataset does not contain multiple "
                 "documents, blank lines can be inserted at any natural boundary, such as the ends of chapters, "
                 "sections or paragraphs.")

        args.output_dir.mkdir(exist_ok=True)

        if args.num_workers > 1:
            writer_workers = Pool(min(args.num_workers, args.epochs_to_generate))
            arguments = [(docs, vocab_list, args, idx) for idx in range(args.epochs_to_generate)]
            writer_workers.starmap(create_training_file, arguments)
        else:
            for epoch in trange(args.epochs_to_generate, desc="Epoch"):
                create_training_file(docs, vocab_list, args, epoch)
github sharadmv / deepx / examples / jax / mnist_classifier_fromscratch.py View on Github external
return list(nested_update(ps, gs) for
                        ps, gs in zip(params, grads))
        elif isinstance(params, dict):
            return {k:nested_update(params[k], grads[k]) for
                    k in params.keys()}
        return params - step_size * grads

    @jit
    def update(params, batch):
        grads = grad(loss)(params, batch)
        return nested_update(params, grads)

    params = network.parameters
    for epoch in range(num_epochs):
        start_time = time.time()
        for _ in tqdm.trange(num_batches):
            params = update(params, next(batches))
        epoch_time = time.time() - start_time

        # train_acc = accuracy(params, (train_images, train_labels))
        test_acc = accuracy(params, (test_images, test_labels))
        print("Epoch {} in {:0.2f} sec".format(epoch, epoch_time))
        #print("Training set accuracy {}".format(train_acc))
        print("Test set accuracy {}".format(test_acc))
github louishenrifranc / attention / attention / services / create_copy_task / create_copy_task.py View on Github external
def create_copy_task_files(self, context_filename, answer_filename, vocab_size, num_examples, max_sequence_length):
        with open(context_filename, 'w') as file:
            for _ in trange(num_examples):
                num_tokens = np.random.randint(2, max_sequence_length, 1)
                tokens = np.random.randint(0, vocab_size, num_tokens)
                file.write(" ".join([str(x) for x in list(tokens)]) + "\n")

        shutil.copyfile(context_filename, answer_filename)
github Yangyangii / DeepConvolutionalTTS-pytorch / synthesize.py View on Github external
prev_mel_hats[:, t+1, :] = mel_hats[:, t, :]
            total_mel_hats[step*batch_size:(step+1)*batch_size, :, :] = prev_mel_hats
            
            print('='*10, ' Alignment ', '='*10)
            alignments = A.cpu().detach().numpy()
            visual_texts = texts.cpu().detach().numpy()
            for idx in range(len(alignments)):
                text = [idx2char[ch] for ch in visual_texts[idx]]
                utils.plot_att(alignments[idx], text, args.global_step, path=os.path.join(args.sampledir, 'A'), name='{}.png'.format(idx))
            print('='*10, ' SSRN ', '='*10)
            # Mel --> Mag
            mags[step*batch_size:(step+1)*batch_size:, :, :] = \
                ssrn(total_mel_hats[step*batch_size:(step+1)*batch_size, :, :]) # mag: (N, Ty, n_mags)
            mags = mags.cpu().detach().numpy()
        print('='*10, ' Vocoder ', '='*10)
        for idx in trange(len(mags), unit='B', ncols=70):
            wav = utils.spectrogram2wav(mags[idx])
            write(os.path.join(args.sampledir, '{}.wav'.format(idx+1)), args.sr, wav)
    return None
github mwydmuch / ViZDoom / examples / python / learning_theano.py View on Github external
score = game.get_total_reward()
            train_scores.append(score)
            game.new_episode()
            train_episodes_finished += 1

    print("%d training episodes played." % train_episodes_finished)

    train_scores = np.array(train_scores)

    print("Results: mean: %.1f±%.1f," % (train_scores.mean(), train_scores.std()), \
          "min: %.1f," % train_scores.min(), "max: %.1f," % train_scores.max())

    print("\nTesting...")
    test_episode = []
    test_scores = []
    for test_episode in trange(test_episodes_per_epoch):
        game.new_episode()
        while not game.is_episode_finished():
            state = preprocess(game.get_state().screen_buffer)
            best_action_index = get_best_action(state)

            game.make_action(actions[best_action_index], frame_repeat)
        r = game.get_total_reward()
        test_scores.append(r)

    test_scores = np.array(test_scores)
    print("Results: mean: %.1f±%.1f," % (
        test_scores.mean(), test_scores.std()), "min: %.1f" % test_scores.min(), "max: %.1f" % test_scores.max())

    print("Saving the network weigths to:", model_savefile)
    pickle.dump(get_all_param_values(net), open(model_savefile, "wb"))
github jjgoings / McMurchie-Davidson / molecule.py View on Github external
def two_electron_integrals(self):
        N = self.nbasis
        self.TwoE = np.zeros((N,N,N,N))  
        print "Two-electron integrals"
        for i in trange(N,desc='First loop'):
            for j in trange(N,desc='Second loop'):
                for k in trange(N,desc='Third loop'):
                    for l in trange(N,desc='Fourth loop'):
                        if i >= j:
                            if k >= l:
                                if (i*(i+1)//2 + j) >= (k*(k+1)//2 + l):
                                    val = ERI(self.bfs[i],self.bfs[j],self.bfs[k],self.bfs[l])
                                    self.TwoE[i,j,k,l] = val
                                    self.TwoE[k,l,i,j] = val
                                    self.TwoE[j,i,l,k] = val
                                    self.TwoE[l,k,j,i] = val
                                    self.TwoE[j,i,k,l] = val
                                    self.TwoE[l,k,i,j] = val
                                    self.TwoE[i,j,l,k] = val
                                    self.TwoE[k,l,j,i] = val
        print "\n"
    def SCF(self):
github lopuhin / transformer-lm / lm / gpt_2_tf2 / main.py View on Github external
        @tf.function
        def distributed_validate():
            return strategy.experimental_run(valid_step, valid_iterator)

        def validate():
            valid_iterator.initialize()
            valid_loss.reset_states()
            for _ in tqdm.trange(valid_steps_per_epoch, desc='validate',
                                 leave=False, dynamic_ncols=True):
                distributed_validate()
            with valid_summary_writer.as_default():
                tf.summary.scalar('loss', valid_loss.result(),
                                  step=step * step_tokens)

        for epoch in tqdm.trange(1, epochs + 1, desc='epochs'):

            train_iterator.initialize()
            for _ in tqdm.trange(train_steps_per_epoch, desc=f'epoch {epoch}',
                                 dynamic_ncols=True):
                distributed_train()
                step += 1
                if step % log_every == 0:
                    with train_summary_writer.as_default():
                        tf.summary.scalar('loss', train_loss.result(),
                                          step=step * step_tokens)
                    train_loss.reset_states()
                if validate_every and step % validate_every == 0:
                    validate()
                if save_every and step % save_every == 0:
                    checkpoint.save(checkpoint_prefix)
github delve-team / delve / example_fc.py View on Github external
for h in [10, 100, 300]:

    # Create random Tensors to hold inputs and outputs
    x = torch.randn(N, D_in)
    y = torch.randn(N, D_out)

    model = LayerCake(D_in, h, H2, H3, H4, H5, D_out)

    x, y, model = x.to(device), y.to(device), model.to(device)
    if not os.path.exists('regression'):
        os.mkdir('regression')
    stats = CheckLayerSat('regression/h{}'.format(h), 'csv', model, device=device, reset_covariance=True,)

    loss_fn = torch.nn.MSELoss(size_average=False)
    optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
    steps_iter = trange(2000, desc='steps', leave=True, position=0)
    steps_iter.write("{:^80}".format(
        "Regression - SixLayerNet - Hidden layer size {}".format(h)))
    for i in steps_iter:
        y_pred = model(x)
        loss = loss_fn(y_pred, y)
        steps_iter.set_description('loss=%g' % loss.data)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        stats.add_saturations()
        #stats.saturation()
    steps_iter.write('\n')
    stats.close()
    steps_iter.close()
github psychopa4 / PFNL / model / frvsr.py View on Github external
self.sr1 = self.forward(self.L, self.LP, self.est)
        if not reuse:
            config = tf.ConfigProto() 
            config.gpu_options.allow_growth = True
            sess = tf.Session(config=config)
            #sess=tf.Session()
            self.sess=sess
            sess.run(tf.global_variables_initializer())
            self.saver = tf.train.Saver(max_to_keep=100, keep_checkpoint_every_n_hours=1)
            self.load(sess, self.save_dir)
        
        print('Save at {}'.format(save_path))
        print('{} Inputs With Shape {}'.format(imgs.shape[0],imgs.shape[1:]))

        all_time=[]
        for i in trange(max_frame):
            st_time=time.time()
            if i==0:
                SR=self.sess.run(self.sr0,feed_dict={self.L : imgs[i:i+1]})
            else:
                SR=self.sess.run(self.sr1,feed_dict={self.L : imgs[i:i+1], self.LP : imgs[i-1:i], self.est : SR})
            all_time.append(time.time()-st_time)
            img=SR[0]*255.
            img=np.clip(img,0,255).astype(np.uint8)
            cv2_imsave(join(save_path, '{:0>4}.png'.format(i)),img)
        if max_frame>0:
            all_time=np.array(all_time)
            print('spent {} s in total and {} s in average'.format(np.sum(all_time),np.mean(all_time[1:])))