Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def set_multi_head_attention_v2(spec, variables, scope, self_attention=False):
set_layer_norm(spec.layer_norm, variables, "%s/input_layer_norm" % scope)
if self_attention:
split_layers = [common_spec.LinearSpec() for _ in range(3)]
set_linear(split_layers[0], variables, "%s/layer/linear_queries" % scope)
set_linear(split_layers[1], variables, "%s/layer/linear_keys" % scope)
set_linear(split_layers[2], variables, "%s/layer/linear_values" % scope)
utils.fuse_linear(spec.linear[0], split_layers)
else:
set_linear(spec.linear[0], variables, "%s/layer/linear_queries" % scope)
split_layers = [common_spec.LinearSpec() for _ in range(2)]
set_linear(split_layers[0], variables, "%s/layer/linear_keys" % scope)
set_linear(split_layers[1], variables, "%s/layer/linear_values" % scope)
utils.fuse_linear(spec.linear[1], split_layers)
set_linear(spec.linear[-1], variables, "%s/layer/linear_output" % scope)
def set_multi_head_attention(spec, variables, scope, self_attention=False):
if self_attention:
split_layers = [common_spec.LinearSpec() for _ in range(3)]
set_linear(split_layers[0], variables, "%s.linear_query" % scope)
set_linear(split_layers[1], variables, "%s.linear_keys" % scope)
set_linear(split_layers[2], variables, "%s.linear_values" % scope)
utils.fuse_linear(spec.linear[0], split_layers)
else:
set_linear(spec.linear[0], variables, "%s.linear_query" % scope)
split_layers = [common_spec.LinearSpec() for _ in range(2)]
set_linear(split_layers[0], variables, "%s.linear_keys" % scope)
set_linear(split_layers[1], variables, "%s.linear_values" % scope)
utils.fuse_linear(spec.linear[1], split_layers)
set_linear(spec.linear[-1], variables, "%s.final_linear" % scope)
def set_multi_head_attention_v2(spec, variables, scope, self_attention=False):
set_layer_norm(spec.layer_norm, variables, "%s/input_layer_norm" % scope)
if self_attention:
split_layers = [common_spec.LinearSpec() for _ in range(3)]
set_linear(split_layers[0], variables, "%s/layer/linear_queries" % scope)
set_linear(split_layers[1], variables, "%s/layer/linear_keys" % scope)
set_linear(split_layers[2], variables, "%s/layer/linear_values" % scope)
utils.fuse_linear(spec.linear[0], split_layers)
else:
set_linear(spec.linear[0], variables, "%s/layer/linear_queries" % scope)
split_layers = [common_spec.LinearSpec() for _ in range(2)]
set_linear(split_layers[0], variables, "%s/layer/linear_keys" % scope)
set_linear(split_layers[1], variables, "%s/layer/linear_values" % scope)
utils.fuse_linear(spec.linear[1], split_layers)
set_linear(spec.linear[-1], variables, "%s/layer/linear_output" % scope)
def __init__(self, num_layers):
self.embeddings = common_spec.EmbeddingsSpec()
self.position_encodings = PositionEncoderSpec()
self.layer_norm = common_spec.LayerNormSpec()
self.projection = common_spec.LinearSpec()
self.layer = [
TransformerDecoderLayerSpec() for _ in range(num_layers)]
def set_multi_head_attention(spec, variables, scope, self_attention=False):
if self_attention:
split_layers = [common_spec.LinearSpec() for _ in range(3)]
set_linear(split_layers[0], variables, "%s.linear_query" % scope)
set_linear(split_layers[1], variables, "%s.linear_keys" % scope)
set_linear(split_layers[2], variables, "%s.linear_values" % scope)
utils.fuse_linear(spec.linear[0], split_layers)
else:
set_linear(spec.linear[0], variables, "%s.linear_query" % scope)
split_layers = [common_spec.LinearSpec() for _ in range(2)]
set_linear(split_layers[0], variables, "%s.linear_keys" % scope)
set_linear(split_layers[1], variables, "%s.linear_values" % scope)
utils.fuse_linear(spec.linear[1], split_layers)
set_linear(spec.linear[-1], variables, "%s.final_linear" % scope)