Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def __init__(
self,
text,
lang='en',
slow=False,
lang_check=True,
pre_processor_funcs=[
pre_processors.tone_marks,
pre_processors.end_of_line,
pre_processors.abbreviations,
pre_processors.word_sub
],
tokenizer_func=Tokenizer([
tokenizer_cases.tone_marks,
tokenizer_cases.period_comma,
tokenizer_cases.colon,
tokenizer_cases.other_punctuation
]).run
):
# Debug
for k, v in locals().items():
if k == 'self':
continue
log.debug("%s: %s", k, v)
# Text
assert text, 'No text to speak'
self.text = text
# Language
if lang_check:
self,
text,
lang='en',
slow=False,
lang_check=True,
pre_processor_funcs=[
pre_processors.tone_marks,
pre_processors.end_of_line,
pre_processors.abbreviations,
pre_processors.word_sub
],
tokenizer_func=Tokenizer([
tokenizer_cases.tone_marks,
tokenizer_cases.period_comma,
tokenizer_cases.colon,
tokenizer_cases.other_punctuation
]).run
):
# Text
assert text, 'No text to speak'
self.text = text
# Language
if lang_check:
try:
langs = tts_langs()
if lang.lower() not in langs:
raise ValueError("Language not supported: %s" % lang)
except RuntimeError as e:
log.debug(str(e), exc_info=True)
log.warning(str(e))
def __init__(
self,
text,
lang='en',
slow=False,
lang_check=True,
pre_processor_funcs=[
pre_processors.tone_marks,
pre_processors.end_of_line,
pre_processors.abbreviations,
pre_processors.word_sub
],
tokenizer_func=Tokenizer([
tokenizer_cases.tone_marks,
tokenizer_cases.period_comma,
tokenizer_cases.colon,
tokenizer_cases.other_punctuation
]).run
):
# Debug
for k, v in locals().items():
if k == 'self':
continue
log.debug("%s: %s", k, v)
# Text
assert text, 'No text to speak'
self.text = text
def __init__(
self,
text,
lang='en',
slow=False,
lang_check=True,
pre_processor_funcs=[
pre_processors.tone_marks,
pre_processors.end_of_line,
pre_processors.abbreviations,
pre_processors.word_sub
],
tokenizer_func=Tokenizer([
tokenizer_cases.tone_marks,
tokenizer_cases.period_comma,
tokenizer_cases.other_punctuation
]).run
):
# Debug
for k, v in locals().items():
if k == 'self':
continue
log.debug("%s: %s", k, v)
# Text
assert text, 'No text to speak'
self.text = text
# Language
if lang_check:
def __init__(
self,
text,
lang='en',
slow=False,
lang_check=True,
pre_processor_funcs=[
pre_processors.tone_marks,
pre_processors.end_of_line,
pre_processors.abbreviations,
pre_processors.word_sub
],
tokenizer_func=Tokenizer([
tokenizer_cases.tone_marks,
tokenizer_cases.period_comma,
tokenizer_cases.other_punctuation
]).run
):
# Debug
for k, v in locals().items():
if k == 'self':
continue
log.debug("%s: %s", k, v)
# Text
assert text, 'No text to speak'
self.text = text
# Language
def __init__(
self,
text,
lang='en',
slow=False,
lang_check=True,
pre_processor_funcs=[
pre_processors.tone_marks,
pre_processors.end_of_line,
pre_processors.abbreviations,
pre_processors.word_sub
],
tokenizer_func=Tokenizer([
tokenizer_cases.tone_marks,
tokenizer_cases.period_comma,
tokenizer_cases.colon,
tokenizer_cases.other_punctuation
]).run
):
# Text
assert text, 'No text to speak'
self.text = text
# Language
if lang_check:
try:
langs = tts_langs()
if lang.lower() not in langs:
raise ValueError("Language not supported: %s" % lang)