Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def foo() -> int:
assert check_return_type(0)
return 0
assert check_argument_types()
data = {}
with Path(path).open("r") as f:
for linenum, line in enumerate(f, 1):
sps = line.rstrip().split(maxsplit=1)
if len(sps) != 2:
raise RuntimeError(
f"scp file must have two or more columns: "
f"{line} ({path}:{linenum})"
)
k, v = sps
if k in data:
raise RuntimeError(f"{k} is duplicated ({path}:{linenum})")
data[k] = v.rstrip()
assert check_return_type(data)
return data
# - Data augmentation
pass
if self.text_name in data and self.text_converter is not None:
text = data[self.text_name]
text_ints = self.text_converter.text2ids(text)
data[self.text_name] = np.array(text_ints, dtype=np.int64)
# TODO(kamo): I couldn't find clear way to realize this
# [Option] Derive the shape
if self.dir_writer is not None:
for k, v in data.items():
shape = ",".join(map(str, v.shape))
self.dir_writer[k + "_shape"][uid] = shape
assert check_return_type(data)
return data
if np.prod(v.shape) != 1:
raise ValueError(f"v must be 0 or 1 dimension: {len(v.shape)}")
v = v.item()
if isinstance(weight, (torch.Tensor, np.ndarray)):
if np.prod(weight.shape) != 1:
raise ValueError(
f"weight must be 0 or 1 dimension: {len(weight.shape)}"
)
weight = weight.item()
if weight is not None:
retval = WeightedAverage(v, weight)
else:
retval = Average(v)
assert check_return_type(retval)
return retval
elif name.lower() == "lambdalr":
retval = torch.optim.lr_scheduler.LambdaLR
elif name.lower() == "steplr":
retval = torch.optim.lr_scheduler.StepLR
elif name.lower() == "multisteplr":
retval = torch.optim.lr_scheduler.MultiStepLR
elif name.lower() == "exponentiallr":
retval = torch.optim.lr_scheduler.ExponentialLR
elif name.lower() == "CosineAnnealingLR".lower():
retval = torch.optim.lr_scheduler.CosineAnnealingLR
else:
raise RuntimeError(
f"--escheduler must be one of "
f"{cls.epoch_scheduler_choices()}: --escheduler {name}"
)
assert check_return_type(retval)
return retval
# 'uttb': np.ndarray([3, 4, 5])}
d = read_2column_text(path)
# Using for-loop instead of dict-comprehension for debuggability
retval = {}
for k, v in d.items():
try:
retval[k] = np.loadtxt(
StringIO(v), ndmin=1, dtype=dtype, delimiter=delimiter
)
except ValueError:
logging.error(
f'Error happened with path="{path}", ' f'id="{k}", value="{v}"'
)
raise
assert check_return_type(retval)
return retval
normalize = normalize_class(**args.normalize_conf)
else:
normalize = None
# 3. TTS
tts_class = cls.get_tts_class(args.tts)
tts = tts_class(idim=vocab_size, odim=odim, **args.tts_conf)
# 4. Build model
model = TTSE2E(
feats_extract=feats_extract,
normalize=normalize,
tts=tts,
**args.e2e_conf,
)
assert check_return_type(model)
return model
assert check_argument_types()
if 'feats' in data:
# Nothing now: candidates:
# - STFT
# - Fbank
# - CMVN
# - Data augmentation
pass
if 'text' in data and self.converter is not None:
text = data['text']
text_int_array = self.converter(text)
data['text'] = text_int_array
assert check_return_type(data)
return data
elif name.lower() == "adamax":
retval = torch.optim.Adamax
elif name.lower() == "asgd":
retval = torch.optim.ASGD
elif name.lower() == "lbfgs":
retval = torch.optim.LBFGS
elif name.lower() == "rmsprop":
retval = torch.optim.RMSprop
elif name.lower() == "rprop":
retval = torch.optim.Rprop
else:
raise RuntimeError(
f"--optim must be one of {cls.optimizer_choices()}: "
f"--optim {name}"
)
assert check_return_type(retval)
return retval
group.add_argument(
"--bscheduler",
type=lambda x: str_or_none(x.lower()),
default=None,
choices=cls.batch_scheduler_choices(),
help="The batch-scheduler-type",
)
group.add_argument(
"--bscheduler_conf",
action=NestedDictAction,
default=dict(),
help="The keyword arguments for the batch scheduler",
)
assert check_return_type(parser)
return parser