Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def sync_record(filename, duration, fs, channels):
print('recording')
myrecording = sd.rec(int(duration * fs), samplerate=fs, channels=channels)
sd.wait()
sf.write(filename, myrecording, fs)
print('done recording')
def sync_record(filename, duration, fs, channels):
print('recording')
myrecording = sd.rec(int(duration * fs), samplerate=fs, channels=channels)
sd.wait()
sf.write(filename, myrecording, fs)
print('done recording')
def sync_record(filename, duration, fs, channels):
print('recording')
myrecording = sd.rec(int(duration * fs), samplerate=fs, channels=channels)
sd.wait()
sf.write(filename, myrecording, fs)
print('done recording')
def record_data(filename, duration, fs, channels):
# synchronous recording
print('recording')
myrecording = sd.rec(int(duration * fs), samplerate=fs, channels=channels)
sd.wait()
sf.write(filename, myrecording, fs)
print('done')
return filename
def record_data(filename, duration, fs, channels):
# synchronous recording
myrecording = sd.rec(int(duration * fs), samplerate=fs, channels=channels)
sd.wait()
sf.write(filename, myrecording, fs)
y, sr = librosa.load(filename)
rmse=np.mean(librosa.feature.rmse(y)[0])
os.remove(filename)
return rmse*1000
def recording(self, duration=5):
# read data from microphone
# duration is the length of time you want to record
self.duration = duration
self.voice = sd.rec(self.duration * self.fs, samplerate=self.fs, channels=self.ch, dtype='float64')
sd.wait()
self.voice = self.voice.T.copy()
def call_speech():
s = input(colored("Press enter to start recording " + str(duration) + " seconds of audio", "cyan"))
if s == "":
# Record audio and write to file using sounddevice
fs = 16000
myrecording = sd.rec(duration * fs, samplerate=fs, channels=1, blocking=True)
print(colored("Writing your audio to a file...", "magenta"))
scipy.write('test.wav', fs, myrecording)
filename = 'speech-' + str(int(time.time())) + '.flac'
ff = ffmpy.FFmpeg(
inputs={'test.wav':None},
outputs={filename:None}
)
ff.run()
# Encode audio file and call the Speech API
with io.open(filename,"rb") as speech:
# Base64 encode the binary audio file for inclusion in the JSON
# request.
speech_content = base64.b64encode(speech.read())
service = get_service('speech', 'v1beta1')
def start_recording(self, seconds):
print(seconds)
self.myrecording = sd.rec(int(seconds * self.default_samplerate))
parser.add_argument('-i', '--n_iter', type=int, default=20,
help='Number of iteration of the algorithm')
args = parser.parse_args()
## Prepare one-shot STFT
L = args.block
# Let's hard code sampling frequency to avoid some problems
fs = 16000
## RECORD
if args.device is not None:
sd.default.device[0] = args.device
## MIXING
print('* Recording started... ', end='')
mics_signals = sd.rec(int(args.duration * fs), samplerate=fs, channels=2, blocking=True)
print('done')
## STFT ANALYSIS
# shape == (n_chan, n_frames, n_freq)
X = pra.transform.analysis(mics_signals.T, L, L, zp_back=L//2, zp_front=L//2)
## Monitor convergence
it = 10
def cb_print(*args):
global it
print(' AuxIVA Iter', it)
it += 10
## Run live BSS
print('* Starting BSS')
bss_type = args.algo
def record_target(file_path, length, fs, channels=2, append=False):
"""Records audio and writes it to a file.
Args:
file_path: Path to output file
length: Audio recording length in samples
fs: Sampling rate
channels: Number of channels in the recording
append: Add track(s) to an existing file? Silence will be added to end of each track to make all equal in
length
Returns:
None
"""
recording = sd.rec(length, samplerate=fs, channels=channels, blocking=True)
recording = np.transpose(recording)
max_gain = 20 * np.log10(np.max(np.abs(recording)))
if append and os.path.isfile(file_path):
# Adding to existing file, read the file
_fs, data = read_wav(file_path, expand=True)
# Zero pad shorter to the length of the longer
if recording.shape[1] > data.shape[1]:
n = recording.shape[1] - data.shape[1]
data = np.pad(data, [(0, 0), (0, n)])
elif data.shape[1] > recording.shape[1]:
recording = np.pad(data, [(0, 0), (0, data.shape[1] - recording.shape[1])])
# Add recording to the end of the existing data
recording = np.vstack([data, recording])
write_wav(file_path, fs, recording)
print(f'Headroom: {-1.0*max_gain:.1f} dB')