How to use the pyprind.ProgPercent function in PyPrind

To help you get started, we’ve selected a few PyPrind examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github rasbt / pyprind / test / custom_stream.py View on Github external
import sys
import pyprind

n = 1000000
mbar = pyprind.ProgBar(n, stream=sys.stdout)
for i in range(n):
    mbar.update()

mper = pyprind.ProgPercent(n, stream=sys.stdout)
for i in range(n):
    mper.update()

mbar2 = pyprind.ProgBar(n, stream='test')
for i in range(n):
    mbar2.update()

for i in pyprind.prog_bar(range(n), stream=sys.stdout):
    # do something
    pass

for i in pyprind.prog_percent(range(n), stream=sys.stdout):
    # do something
    pass

for i in pyprind.prog_bar(range(n), stream='test'):
github rasbt / pyprind / test / percentage_indicator.py View on Github external
print('%s\n' % (80 * '='))
print('Testing Basic Percentage Indicator\n')

n = 100
sleeptime = 0.02

perc = pyprind.ProgPercent(n)
for i in range(n):
    time.sleep(sleeptime)
    perc.update()

print('\n%s' % (80 * '='))
print('%s\n' % (80 * '='))
print('Testing stdout Stream\n')

perc = pyprind.ProgPercent(n, stream=sys.stdout)
for i in range(n):
    time.sleep(sleeptime)
    perc.update()

print('\n%s' % (80 * '='))
print('%s\n' % (80 * '='))
print('Testing Percentage Indicator Generator\n')

for i in pyprind.prog_percent(range(n), stream=sys.stdout):
    time.sleep(sleeptime)


print('\n%s' % (80 * '='))
print('%s\n' % (80 * '='))
print('Testing monitor function\n')
github rgtjf / Semantic-Texual-Similarity-Toolkits / stst / utils.py View on Github external
try:
        with io.open(emb_file, encoding="utf8") as f:
            lines = [line for line in f]
    # If there are malformed lines, read in binary mode
    # and manually decode each word from utf-8
    except:
        logger.warning("Could not read {} as UTF8 file, "
                        "reading file as bytes and skipping "
                        "words with malformed UTF8.".format(emb_file))
        with open(emb_file, 'rb') as f:
            lines = [line for line in f]
        binary_lines = True

    logger.info("Loading vectors from {}".format(emb_file))

    process_bar = pyprind.ProgPercent(len(lines))
    for line in lines:
        process_bar.update()
        # Explicitly splitting on " " is important, so we don't
        # get rid of Unicode non-breaking spaces in the vectors.
        entries = line.rstrip().split(b" " if binary_lines else " ")

        word, entries = entries[0], entries[1:]
        if dim is None and len(entries) > 1:
            dim = len(entries)
            # add pad_word
            itos.append(pad_word)
            vectors.extend(np.zeros(dim, ))
            # add unk_word
            itos.append(unk_word)
            vectors.extend(np.random.uniform(-0.25, 0.25, (dim, )))
github motazsaad / comparable-text-miner / textpro.py View on Github external
check_dir(output_path) # if directory does not exist, then create
		
	logging.info( 'aliging %s and %s wikipeida documents using interlanguage links',  source_language, target_language)
	source_docs = split_wikipedia_docs_into_array(source_corpus_file)
	logging.info( 'source corpus is loaded')
	target_docs = split_wikipedia_docs_into_array(target_corpus_file)
	logging.info( 'target corpus is loaded')
	
	target_titles = [get_title_from_interlanguage_links(d, source_language) for d in target_docs]
	
	logging.info( 'start aligning...')
	source_out = open(output_path +  source_language + '-wiki.txt', 'w') 
	target_out = open(output_path +  target_language + '-wiki.txt', 'w') 
	count = 1
	
	my_prperc = pyprind.ProgPercent(len(source_docs)) 
	
	for i in range(len(source_docs)):
		my_prperc.update() # print progress 
		source_title = get_title_from_interlanguage_links(source_docs[i], source_language)
		try: 
			index = target_titles.index(source_title)
			text_out = source_docs[i] 
			print>>source_out, text_out.encode('utf-8')
			text_out = target_docs[index]
			print>>target_out, text_out.encode('utf-8')
			count += 1
		except: continue
				
				
	logging.info( 'aliging by document interlanguage links is done! ... \n %d documents are aligned', count)
##################################################################################
github kethort / TwitterLDATopicModeling / src / twitter_user_grabber.py View on Github external
twpy_api = auth.get_access_creds(args.creds)

        if not twpy_api:
            print('Error: Twitter developer access credentials denied')
            return

        working_dir = get_directory_of_file(args.filename)

        # gets the first 50 zip codes by city and state
        zip_search = SearchEngine()
        zipcodes = zip_search.by_city_and_state(args.city, args.state, returns=50)

        user_ids = []
        user_followers = []
        # gets the user ids at each geo-location for the retrieved zip codes
        bar = pyprind.ProgPercent(len(zipcodes), track_time=True, title='Finding user ids')
        for zipcode in zipcodes:
            bar.update(item_id='zip code:' + str(zipcode.zipcode) + '\t')
            user_ids.extend(get_user_ids(twpy_api, zipcode.lat, zipcode.lng, args.radius))
            write_json(args.filename, list(set(user_ids)))

    if args.mode == 'netx':
        user_followers = read_json(args.in_filename)
        pythonify_dict(user_followers)
        print("Number of followers: " + str(len(user_followers)))
        output_filename = args.out_filename + '.json'
        graph = build_netx_graph(user_followers)

        if args.gen_cliques:
            generate_cliques(graph, output_filename, args.min_size)
        if args.gen_comms:
            generate_communities(graph, output_filename, args.min_size)
github rasbt / pyprind / examples / ex1_percentage_indicator_stdout.py View on Github external
def example_1():
    n = 1000000
    my_perc = pyprind.ProgPercent(n, stream=1)
    for i in range(n):
        # do some computation
        my_perc.update()
github rasbt / pyprind / examples / ex1_percentage_indicator_stderr.py View on Github external
def example_1():
    n = 1000000
    my_perc = pyprind.ProgPercent(n, stream=2)
    for i in range(n):
        # do some computation
        my_perc.update()
github mdenil / txtnets / code / convolutional_sentence_model.py View on Github external
vocabulary_size = int(data['size_vocab'])
    max_epochs = 1

    train = data['train'] - 1
    train_sentence_lengths = data['train_lbl'][:,1]

    max_sentence_length = data['train'].shape[1]

    csm = load_testing_model("cnn-sm-gpu-kmax/DEBUGGING_MODEL.mat")

    n_batches_per_epoch = int(data['train'].shape[0] / batch_size)

    matlab_results = scipy.io.loadmat("cnn-sm-gpu-kmax/BATCH_RESULTS_ONE_PASS_ONE_LAYER_CHECK.mat")['batch_results']
    # matlab_results = scipy.io.loadmat("verify_forward_pass/data/batch_results_first_layer.mat")['batch_results']

    progress_bar = pyprind.ProgPercent(n_batches_per_epoch)

    total_errs = 0

    for batch_index in xrange(n_batches_per_epoch):

        if batch_index == 3:
            pass

        minibatch = train[batch_index*batch_size:(batch_index+1)*batch_size]

        meta = {'lengths': train_sentence_lengths[batch_index*batch_size:(batch_index+1)*batch_size]}

        # s1 = csm.fprop(minibatch, num_layers=1, meta=meta)
        # s2 = csm.fprop(minibatch, num_layers=2, meta=meta)
        # s3 = csm.fprop(minibatch, num_layers=3, meta=meta)
        # s4 = csm.fprop(minibatch, num_layers=4, meta=meta)
github mawanda-jun / TableTrainNet / dataset / img_to_jpeg.py View on Github external
def img_to_jpeg(img_path):
    """
    Transform every image in path into a png one for making it compatible with TF pre-trained NN
    :param img_path: path to image folder
    :return: None. It writes images on disk
    """
    img_found_counter = 0
    img_converted_counter = 0
    if not os.path.isdir(img_path):
        raise InputError('{} is not a valid path'.format(img_path))
    for (gen_path, bmp_paths, img_names) in os.walk(img_path):
        bar = pyprind.ProgPercent(len(img_names))
        # print(gen_path, bmp_paths, img_names)
        for file_name in img_names:
            if not file_name.endswith(IMAGES_EXTENSION):
                file_no_extension = os.path.splitext(file_name)[0]
                # file_no_extension = file_name.replace('.bmp', '')
                img_found_counter += 1
                # if (file_no_extension + IMAGES_EXTENSION) not in img_names:
                if True:
                    logger.info('Now processing: {}'.format(file_name))
                    file_path = os.path.join(gen_path, file_name)
                    if not os.path.isfile(file_path):
                        raise InputError('{} is not a valid image'.format(file_path))
                    with Image.open(file_path) as img:
                        img = img.convert('L')
                        img = uglify_image(img)
                        # path is valid as it has been checked before
github rgtjf / Semantic-Texual-Similarity-Toolkits / stst / data / data_utils.py View on Github external
parse_train_file = train_file.replace('./data', './generate/parse')

    if flag or not os.path.isfile(parse_train_file):

        print(train_file)
        if nlp is None:
            raise RuntimeError("nlp should be init by nlp = stst.StanfordNLP('http://localhost:9000')")

        ''' Parse Data '''
        data = load_STS(train_file)

        print('*' * 50)
        print("Parse Data, train_file=%s, n_train=%d\n" % (train_file, len(data)))

        parse_data = []
        process_bar = pyprind.ProgPercent(len(data))
        for (sa, sb, score) in data:
            process_bar.update()
            parse_sa = nlp.parse(sa)
            parse_sb = nlp.parse(sb)
            parse_data.append((parse_sa, parse_sb, score))

        ''' Write Data to File '''
        with utils.create_write_file(parse_train_file) as f_parse:
            for parse_instance in parse_data:
                line = json.dumps(parse_instance)
                print(line, file=f_parse)

    ''' Load Data from File '''
    print('*' * 50)
    parse_data = []
    with utils.create_read_file(parse_train_file) as f: