Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
"""
assert blocksize != 0
q_in = asyncio.Queue()
q_out = queue.Queue()
loop = asyncio.get_event_loop()
def callback(indata, outdata, frame_count, time_info, status):
loop.call_soon_threadsafe(q_in.put_nowait, (indata.copy(), status))
outdata[:] = q_out.get_nowait()
# pre-fill output queue
for _ in range(pre_fill_blocks):
q_out.put(np.zeros((blocksize, channels), dtype=dtype))
stream = sd.Stream(blocksize=blocksize, callback=callback, dtype=dtype,
channels=channels, **kwargs)
with stream:
while True:
indata, status = await q_in.get()
outdata = np.empty((blocksize, channels), dtype=dtype)
yield indata, outdata, status
q_out.put_nowait(outdata)
help='number of channels')
parser.add_argument('--dtype', help='audio data type')
parser.add_argument('--samplerate', type=float, help='sampling rate')
parser.add_argument('--blocksize', type=int, help='block size')
parser.add_argument('--latency', type=float, help='latency in seconds')
args = parser.parse_args(remaining)
def callback(indata, outdata, frames, time, status):
if status:
print(status)
outdata[:] = indata
try:
with sd.Stream(device=(args.input_device, args.output_device),
samplerate=args.samplerate, blocksize=args.blocksize,
dtype=args.dtype, latency=args.latency,
channels=args.channels, callback=callback):
print('#' * 80)
print('press Return to quit')
print('#' * 80)
input()
except KeyboardInterrupt:
parser.exit('')
except Exception as e:
parser.exit(type(e).__name__ + ': ' + str(e))
self.recording_paused_changed.wait()
if status:
self.logger.warning('Recording callback status: {}'.format(
str(status)))
outdata[:] = indata
stream_index = None
try:
import soundfile as sf
import numpy
stream_index = self._allocate_stream_index()
stream = sd.Stream(samplerate=sample_rate, channels=channels,
blocksize=blocksize, latency=latency,
device=(input_device, output_device),
dtype=dtype, callback=audio_callback)
self.start_recording()
self._start_playback(stream_index=stream_index,
stream=stream)
self.logger.info('Started recording pass-through from device ' +
'[{}] to sound device [{}]'.
format(input_device, output_device))
recording_started_time = time.time()
while self._get_recording_state() != RecordingState.STOPPED \
and (duration is None or
time.time() - recording_started_time < duration):
def censor(self):
""" Censors audio chunks in a continuous stream """
""" Creates a clean/new version of a file by removing explicits """
# Start thread that will analyze and censor recorded chunks
processing_thread = threading.Thread(target=self.run)
processing_thread.daemon = True
processing_thread.start()
try:
# listen from Soundflower, play to speakers
with sd.Stream(device=(2, 1),
samplerate=self.samplerate, blocksize=int(self.samplerate*self.duration),
channels=1, callback=self.callback, finished_callback=self.finished_callback):
print('#' * 80)
print('press Return to stop censoring')
print('#' * 80)
input()
except KeyboardInterrupt:
print('\nInterrupted by user')
CensorRealtimeMac.running = False
except Exception as e:
print(type(e).__name__ + ': ' + str(e))
CensorRealtimeMac.running = False
def listen(_): #until_silence
# stream.start_stream()
# read audio input for phrases until there is a phrase that is long enough
elapsed_time = 0 # number of seconds of audio read
buf = b"" # an empty buffer means that the stream has ended and there is no data left to read
_.energy_threshold = 300 # minimum audio energy to consider for recording
_.stream=sounddevice.Stream(samplerate=_.sample_rate, channels=_.channels, dtype='int16')#dtype='float32')#
with _.stream:
while True:
frames = collections.deque()
# store audio input until the phrase starts
while True:
# handle waiting too long for phrase by raising an exception
elapsed_time += _.seconds_per_buffer
if _.timeout and elapsed_time > _.timeout:
raise Exception("listening timed out while waiting for phrase to start")
buf = _.stream.read(_.chunk)[0]
# if len(buffer) == 0: break # reached end of the stream
frames.append(buf)
if len(frames) > _.non_speaking_buffer_count: # ensure we only keep the needed amount of non-speaking buffers
frames.popleft()
dynamic_energy_adjustment_damping = 0.15
dynamic_energy_ratio = 1.5
dynamic_energy_threshold = True
energy_threshold = 3000 # minimum audio energy to consider for recording
pause_threshold = 0.5 # seconds of non-speaking audio before a phrase is considered complete
phrase_threshold = 0.3 # minimum seconds of speaking audio before we consider the speaking audio a phrase - values below this are ignored (for filtering out clicks and pops)
non_speaking_duration = 0.5 # seconds of non-speaking audio to keep on both sides of the recording
chunk=1024 # number of frames stored in each buffer
sample_rate=16000 # sampling rate in Hertz
## pa_format=pyaudio.paInt16 # 16-bit int sampling
sample_width=2 #pyaudio.get_sample_size(pa_format) # size of each sample
seconds_per_buffer = float(chunk) / sample_rate
pause_buffer_count = int(math.ceil(pause_threshold / seconds_per_buffer)) # number of buffers of non-speaking audio during a phrase before the phrase should be considered complete
phrase_buffer_count = int(math.ceil(phrase_threshold / seconds_per_buffer)) # minimum number of buffers of speaking audio before we consider the speaking audio a phrase
non_speaking_buffer_count = int(math.ceil(non_speaking_duration / seconds_per_buffer)) # maximum number of buffers of non-speaking audio to retain before and after a phrase
stream=sounddevice.Stream(samplerate=sample_rate, channels=channels, dtype='int16')
with stream:
while oa.alive:
elapsed_time = 0 # number of seconds of audio read
buf = b"" # an empty buffer means that the stream has ended and there is no data left to read
# energy_threshold = 300 # minimum audio energy to consider for recording
while oa.alive:
frames = collections.deque()
# store audio input until the phrase starts
while oa.alive:
# handle waiting too long for phrase by raising an exception
elapsed_time += seconds_per_buffer
if timeout and elapsed_time > timeout:
raise Exception("listening timed out while waiting for phrase to start")
buf = stream.read(chunk)[0]
def callback(indata, outdata, frames, time, status):
if status:
print(status, flush=True)
global index
index += frames
returns = processing.proccesing_func(index, indata)
index2, out = returns['main_output']
if index2 is not None:
outdata[:] = out
else:
outdata[:] = 0
latency = 'low'
stream = sd.Stream(channels=nb_channel, callback=callback, samplerate=sample_rate,
blocksize=chunksize, latency=latency, device=None, dtype='float32')
# run the audio stream for 10 seconds.
stream.start()
time.sleep(10)
stream.stop()
def _in():
_config = DEFAULT_CONFIG.copy()
seconds_per_buffer = _config.get("chunk") / _config.get("sample_rate")
pause_buffer_count = math.ceil(_config.get("pause_threshold") / seconds_per_buffer)
# Number of buffers of non-speaking audio during a phrase before the phrase should be considered complete.
phrase_buffer_count = math.ceil(_config.get("phrase_threshold") / seconds_per_buffer) # Minimum number of buffers of speaking audio before we consider the speaking audio a phrase.
non_speaking_buffer_count = math.ceil(_config.get("non_speaking_duration") / seconds_per_buffer) # Maximum number of buffers of non-speaking audio to retain before and after a phrase.
stream = sounddevice.Stream(samplerate=_config.get("sample_rate"), channels=_config.get("channels"), dtype='int16')
with stream:
while not oa.core.finished.is_set():
elapsed_time = 0 # Number of seconds of audio read
buf = b"" # An empty buffer means that the stream has ended and there is no data left to read.
while not oa.core.finished.is_set():
frames = collections.deque()
# Store audio input until the phrase starts
while not oa.core.finished.is_set():
# Handle waiting too long for phrase by raising an exception
elapsed_time += seconds_per_buffer
if _config.get("timeout") and elapsed_time > _config.get("timeout"):
raise Exception("Listening timed out while waiting for phrase to start.")
buf = stream.read(_config.get("chunk"))[0]
frames.append(buf)
def play_input_to_output(duration, device, sample_rate=44100, chunksize=1024, nb_channel=2):
#~ duration = 5 # seconds
dev = sd.query_devices(device=device)
sample_rate = dev['default_samplerate']
print(dev)
def callback(indata, outdata, frames, time, status):
if status:
print(status, flush=True)
outdata[:] = indata
with sd.Stream(device=device, channels=nb_channel, callback=callback, samplerate=sample_rate):
sd.sleep(int(duration * 1000) )