Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def build_model():
model = getattr(builder, Hparams.builder)(
n_speakers=Hparams.n_speakers,
speaker_embed_dim=Hparams.speaker_embed_dim,
n_vocab=frontend.n_vocab,
embed_dim=Hparams.text_embed_dim,
mel_dim=Hparams.num_mels,
linear_dim=Hparams.fft_size // 2 + 1,
r=Hparams.outputs_per_step,
padding_idx=Hparams.padding_idx,
dropout=Hparams.dropout,
kernel_size=Hparams.kernel_size,
encoder_channels=Hparams.encoder_channels,
decoder_channels=Hparams.decoder_channels,
converter_channels=Hparams.converter_channels,
use_memory_mask=Hparams.use_memory_mask,
trainable_positional_encodings=Hparams.trainable_positional_encodings,
force_monotonic_attention=Hparams.force_monotonic_attention,
use_decoder_state_for_postnet_input=Hparams.use_decoder_state_for_postnet_input,
max_positions=Hparams.max_positions,
freeze_embedding=Hparams.freeze_embedding,
window_ahead=Hparams.window_ahead,
window_backward=Hparams.window_backward
)
return model
def build_model():
model = getattr(builder, Hparams.builder)(
n_speakers=Hparams.n_speakers,
speaker_embed_dim=Hparams.speaker_embed_dim,
n_vocab=frontend.n_vocab,
embed_dim=Hparams.text_embed_dim,
mel_dim=Hparams.num_mels,
linear_dim=Hparams.fft_size // 2 + 1,
r=Hparams.outputs_per_step,
padding_idx=Hparams.padding_idx,
dropout=Hparams.dropout,
kernel_size=Hparams.kernel_size,
encoder_channels=Hparams.encoder_channels,
decoder_channels=Hparams.decoder_channels,
converter_channels=Hparams.converter_channels,
use_memory_mask=Hparams.use_memory_mask,
trainable_positional_encodings=Hparams.trainable_positional_encodings,
force_monotonic_attention=Hparams.force_monotonic_attention,
use_decoder_state_for_postnet_input=Hparams.use_decoder_state_for_postnet_input,
max_positions=Hparams.max_positions,
freeze_embedding=Hparams.freeze_embedding,
window_ahead=Hparams.window_ahead,
window_backward=Hparams.window_backward
)
return model
def build_model():
model = getattr(builder, Hparams.builder)(
n_speakers=Hparams.n_speakers,
speaker_embed_dim=Hparams.speaker_embed_dim,
n_vocab=frontend.n_vocab,
embed_dim=Hparams.text_embed_dim,
mel_dim=Hparams.num_mels,
linear_dim=Hparams.fft_size // 2 + 1,
r=Hparams.outputs_per_step,
padding_idx=Hparams.padding_idx,
dropout=Hparams.dropout,
kernel_size=Hparams.kernel_size,
encoder_channels=Hparams.encoder_channels,
decoder_channels=Hparams.decoder_channels,
converter_channels=Hparams.converter_channels,
use_memory_mask=Hparams.use_memory_mask,
trainable_positional_encodings=Hparams.trainable_positional_encodings,
force_monotonic_attention=Hparams.force_monotonic_attention,
use_decoder_state_for_postnet_input=Hparams.use_decoder_state_for_postnet_input,
max_positions=Hparams.max_positions,
freeze_embedding=Hparams.freeze_embedding,
window_ahead=Hparams.window_ahead,
window_backward=Hparams.window_backward
)
return model
def build_model():
model = getattr(builder, Hparams.builder)(
n_speakers=Hparams.n_speakers,
speaker_embed_dim=Hparams.speaker_embed_dim,
n_vocab=frontend.n_vocab,
embed_dim=Hparams.text_embed_dim,
mel_dim=Hparams.num_mels,
linear_dim=Hparams.fft_size // 2 + 1,
r=Hparams.outputs_per_step,
padding_idx=Hparams.padding_idx,
dropout=Hparams.dropout,
kernel_size=Hparams.kernel_size,
encoder_channels=Hparams.encoder_channels,
decoder_channels=Hparams.decoder_channels,
converter_channels=Hparams.converter_channels,
use_memory_mask=Hparams.use_memory_mask,
trainable_positional_encodings=Hparams.trainable_positional_encodings,
force_monotonic_attention=Hparams.force_monotonic_attention,
use_decoder_state_for_postnet_input=Hparams.use_decoder_state_for_postnet_input,
max_positions=Hparams.max_positions,
freeze_embedding=Hparams.freeze_embedding,
window_ahead=Hparams.window_ahead,
def build_model():
model = getattr(builder, Hparams.builder)(
n_speakers=Hparams.n_speakers,
speaker_embed_dim=Hparams.speaker_embed_dim,
n_vocab=frontend.n_vocab,
embed_dim=Hparams.text_embed_dim,
mel_dim=Hparams.num_mels,
linear_dim=Hparams.fft_size // 2 + 1,
r=Hparams.outputs_per_step,
padding_idx=Hparams.padding_idx,
dropout=Hparams.dropout,
kernel_size=Hparams.kernel_size,
encoder_channels=Hparams.encoder_channels,
decoder_channels=Hparams.decoder_channels,
converter_channels=Hparams.converter_channels,
use_memory_mask=Hparams.use_memory_mask,
trainable_positional_encodings=Hparams.trainable_positional_encodings,
force_monotonic_attention=Hparams.force_monotonic_attention,
use_decoder_state_for_postnet_input=Hparams.use_decoder_state_for_postnet_input,
max_positions=Hparams.max_positions,
freeze_embedding=Hparams.freeze_embedding,
window_ahead=Hparams.window_ahead,
window_backward=Hparams.window_backward
)
return model
def build_model():
model = getattr(builder, Hparams.builder)(
n_speakers=Hparams.n_speakers,
speaker_embed_dim=Hparams.speaker_embed_dim,
n_vocab=frontend.n_vocab,
embed_dim=Hparams.text_embed_dim,
mel_dim=Hparams.num_mels,
linear_dim=Hparams.fft_size // 2 + 1,
r=Hparams.outputs_per_step,
padding_idx=Hparams.padding_idx,
dropout=Hparams.dropout,
kernel_size=Hparams.kernel_size,
encoder_channels=Hparams.encoder_channels,
decoder_channels=Hparams.decoder_channels,
converter_channels=Hparams.converter_channels,
use_memory_mask=Hparams.use_memory_mask,
trainable_positional_encodings=Hparams.trainable_positional_encodings,
force_monotonic_attention=Hparams.force_monotonic_attention,
bn_size=self.bn_size, growth_rate=self.growth_rate,
drop_rate=self.drop_rate,
graph=graph, input_node_id=db_input_node_id)
num_features = num_features + num_layers * self.growth_rate
if i != len(self.block_config) - 1:
db_input_node_id = self._transition(num_input_features=num_features,
num_output_features=num_features // 2,
graph=graph, input_node_id=db_input_node_id)
num_features = num_features // 2
# Final batch norm
out = graph.add_layer(self.batch_norm(num_features), db_input_node_id)
out = graph.add_layer(StubReLU(), out)
out = graph.add_layer(self.adaptive_avg_pooling(), out)
# Linear layer
graph.add_layer(StubDense(num_features, self.n_output_node), out)
return graph
def test_merge(tmp_dir):
x_train = np.random.rand(100, 33)
y_train = np.random.rand(100, 1)
input_node1 = ak.Input()
input_node2 = ak.Input()
output_node1 = ak.DenseBlock()(input_node1)
output_node2 = ak.DenseBlock()(input_node2)
output_node = ak.Merge()([output_node1, output_node2])
output_node = ak.RegressionHead()(output_node)
graph = ak.GraphAutoModel([input_node1, input_node2],
output_node,
directory=tmp_dir,
max_trials=1)
graph.fit([x_train, x_train], y_train,
epochs=1,
batch_size=100,
verbose=False,
validation_split=0.5)
result = graph.predict([x_train, x_train])
assert result.shape == (100, 1)
def test_preprocessing(_, tmp_dir):
x_train = np.random.rand(100, 33)
y_train = np.random.rand(100, 1)
input_node1 = ak.Input()
temp_node1 = ak.Normalization()(input_node1)
output_node1 = ak.DenseBlock()(temp_node1)
output_node3 = ak.Normalization()(temp_node1)
output_node3 = ak.DenseBlock()(output_node3)
input_node2 = ak.Input()
output_node2 = ak.Normalization()(input_node2)
output_node2 = ak.DenseBlock()(output_node2)
output_node = ak.Merge()([output_node1, output_node2, output_node3])
output_node = ak.RegressionHead()(output_node)
graph = ak.GraphAutoModel([input_node1, input_node2],
output_node,
directory=tmp_dir,
max_trials=1)
graph.fit([x_train, x_train], y_train,
def test_preprocessing(_, tmp_dir):
x_train = np.random.rand(100, 33)
y_train = np.random.rand(100, 1)
input_node1 = ak.Input()
temp_node1 = ak.Normalization()(input_node1)
output_node1 = ak.DenseBlock()(temp_node1)
output_node3 = ak.Normalization()(temp_node1)
output_node3 = ak.DenseBlock()(output_node3)
input_node2 = ak.Input()
output_node2 = ak.Normalization()(input_node2)
output_node2 = ak.DenseBlock()(output_node2)
output_node = ak.Merge()([output_node1, output_node2, output_node3])
output_node = ak.RegressionHead()(output_node)
graph = ak.GraphAutoModel([input_node1, input_node2],
output_node,
directory=tmp_dir,
max_trials=1)
graph.fit([x_train, x_train], y_train,
epochs=1,
batch_size=100,
validation_data=([x_train, x_train], y_train),
validation_split=0.5,
verbose=False)