How to use the readchar.readchar function in readchar

To help you get started, we’ve selected a few readchar examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github tyiannak / multimodalAnalysis / audio / example28.py View on Github external
n_clusters = 4
    k_means = sklearn.cluster.KMeans(n_clusters=n_clusters)
    k_means.fit(mt_feats_norm.T)
    cls = k_means.labels_
    segs, c = labels_to_segments(cls, mt_step) # convert flags to segment limits
    for sp in range(n_clusters):            # play each cluster's segment
        for i in range(len(c)):
            if c[i] == sp and segs[i, 1]-segs[i, 0] > 0.5:
                # play long segments of current speaker
                print(c[i], segs[i, 0], segs[i, 1])
                cmd = "avconv -i {} -ss {} -t {} temp.wav " \
                          "-loglevel panic -y".format(input_file, segs[i, 0]+1,
                                                      segs[i, 1]-segs[i, 0]-1)
                os.system(cmd)
                os.system("play temp.wav -q")
                readchar.readchar()
github lervag / apy / apy / note.py View on Github external
for x, y in actions.items():
                    menu = click.style(x, fg='blue') + ': ' + y
                    if column < 3:
                        click.echo(f'{menu:28s}', nl=False)
                    else:
                        click.echo(menu)
                    column = (column + 1) % 4

                width = os.get_terminal_size()[0]
                click.echo('')

                self.print()
            else:
                refresh = True

            choice = readchar.readchar()
            action = actions.get(choice)

            if action == 'Continue':
                return True

            if action == 'Edit':
                self.edit()
                continue

            if action == 'Delete':
                if click.confirm('Are you sure you want to delete the note?'):
                    self.delete()
                return True

            if action == 'Show images':
                self.show_images()
github jprouty / mint-amazon-tagger / mintamazontagger / tagger.py View on Github external
t, new_transactions, ignore_category=args.no_tag_categories):
            stats['already_up_to_date'] += 1
            continue

        valid_prefixes = (
            args.amazon_domains.lower().split(',') + [prefix.lower()])
        if any(t.merchant.lower().startswith(pre) for pre in valid_prefixes):
            if args.prompt_retag:
                if args.num_updates > 0 and len(updates) >= args.num_updates:
                    break
                logger.info('\nTransaction already tagged:')
                print_dry_run(
                    [(t, new_transactions)],
                    ignore_category=args.no_tag_categories)
                logger.info('\nUpdate tag to proposed? [Yn] ')
                action = readchar.readchar()
                if action == '':
                    exit(1)
                if action not in ('Y', 'y', '\r', '\n'):
                    stats['user_skipped_retag'] += 1
                    continue
                stats['retag'] += 1
            elif not args.retag_changed:
                stats['no_retag'] += 1
                continue
            else:
                stats['retag'] += 1
        else:
            stats['new_tag'] += 1
        updates.append((t, new_transactions))

    if args.num_updates > 0:
github TsiakasK / sequence-learning / play.py View on Github external
time.sleep(1)

	## record EEG signals when user presses the buttons ##
	out = open(dirname + "/user_" + str(turn), 'w')
	server.f = out
	######################################################

	# start time to measure response time
	start_time = time.time()

	################### CHECK USER RESPONSE AND CALCULATE SCORE ####################
	sig2 = 1
	first = 0 	
	while(sig2):
		res.append(readchar.readchar().lower())
		if first == 0: 
			reaction_time = time.time() - start_time
			first = 1
		if len(res) == Dold:
			sig2 = 0 
	
	completion_time = time.time() - start_time
	if seq != res:
		success = -1
		score = -1*(D.index(length)+1)
	else: 
		success = 1
		score = D.index(length) + 1
		correct += 1
	#################################################################################
github lervag / apy / apy / note.py View on Github external
def show_cards(self):
        """Show cards for note"""
        for i, c in enumerate(self.n.cards()):
            number = f'{str(i) + ".":>3s}'
            name = c.template()['name']
            if c.flags > 0:
                name = click.style(name, fg='red')
            click.echo(f'  {click.style(number, fg="white")} {name}')

        click.secho('\nPress any key to continue ... ', fg='blue', nl=False)
        readchar.readchar()
github evildmp / BrachioGraph / pantograph.py View on Github external
def calibrate(self, pin, angle, description):

        adjustments = {"<": -100, ">": +100, "{": -10, "}": +10, "[": -1, "]": +1, "0": "done"}

        pw = 1350
        self.rpi.set_servo_pulsewidth(pin, pw)

        print("        Now use the controls to move the arm to {}˚ (i.e. {}).\n".format(angle, description))

        while True:
            key = readchar.readchar()

            adjustment = adjustments.get(key, None)

            if adjustment:
                if adjustment=="done":
                    print("\n")
                    return pw
                else:
                    pw = pw + adjustment
                    print("        pulse width: {} ".format(pw), end="\r")
                    self.rpi.set_servo_pulsewidth(pin, pw)
github evildmp / BrachioGraph / brachiograph.py View on Github external
print(f"See https://brachiograph.art/how-to/calibrate.html")
        print()
        self.rpi.set_servo_pulsewidth(pin, pw)
        print(f"The servo is now at {pw}µS, in the centre of its range of movement.")
        print("Attach the protractor to the base, with its centre at the axis of the servo.")

        print(f"Mount the arm at a position as close as possible to {texts['nominal-centre'][servo]}˚ {texts['mount-arm'][servo]}.")

        print("Now drive the arm to a known angle, as marked on the protractor.")
        print("When the arm reaches the angle, press 1 and record the angle. Do this for as many angles as possible.")
        print()
        print("When you have done all the angles, press 2.")
        print("Press 0 to exit at any time.")

        while True:
            key = readchar.readchar()

            if key == "0":
                return
            elif key == "1":
                angle = float(input("Enter the angle: "))
                servo_angle_pws.append([angle, pw])
            elif key == "2":
                break
            elif key=="a":
                pw = pw - 10
            elif key=="s":
                pw = pw + 10
            elif key=="A":
                pw = pw - 1
            elif key=="S":
                pw = pw + 1
github anuragranj / coma / lib / visualize_latent_space.py View on Github external
def visualize_latent_space(model, facedata, mesh_path=None):
    if mesh_path is not None:
        normalized_mesh = facedata.get_normalized_meshes([mesh_path])
    else:
        normalized_mesh = np.array([facedata.vertices_test[0]])

    latent_vector = model.encode(normalized_mesh)
    viewer = MeshViewers(window_width=800, window_height=800, shape=[1, 1], titlebar='Meshes')


    while(1):
        input_key = readchar.readchar()
        if input_key == "q":
            latent_vector[0][0] = 1.01*latent_vector[0][0]
        elif input_key == "w":
            latent_vector[0][1] = 1.01*latent_vector[0][1]
        elif input_key == "e":
            latent_vector[0][2] = 1.01*latent_vector[0][2]
        elif input_key == "r":
            latent_vector[0][3] = 1.01*latent_vector[0][3]
        elif input_key == "t":
            latent_vector[0][4] = 1.01*latent_vector[0][4]
        elif input_key == "y":
            latent_vector[0][5] = 1.01*latent_vector[0][5]
        elif input_key == "u":
            latent_vector[0][6] = 1.01*latent_vector[0][6]
        elif input_key == "i":
            latent_vector[0][7] = 1.01*latent_vector[0][7]
github ElevenPaths / HomePWN / modules / discovery / mdns.py View on Github external
def run(self):
        listener = Listener()
        print("Searching. Press q to stop")
        browser = zeroconf.ServiceBrowser(
            zeroconf.Zeroconf(), self.args["service"], listener)
        key = ""
        while key.lower() != "q":
            key = readchar.readchar()
        browser.cancel()
        print("")
github ElevenPaths / HomePWN / modules / capture / read-pcap.py View on Github external
def _show_range_packets(self, packets, limit):
        msg = "--show more-- (press q to exit)"
        
        count = 0
        for p in packets:
            count += 1
            if count == limit:
                count = 0
                print_formatted_text(HTML(f"{msg}"), end="")
                res = readchar.readchar()
                # Deletes the last line
                print("\033[A")
                print(" "*len(msg))
                print("\033[A")
                if res.lower() == "q":
                    print("")
                    return
            p.show()

readchar

Library to easily read single chars and key strokes

MIT
Latest version published 17 days ago

Package Health Score

88 / 100
Full package analysis

Similar packages