Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
bn_size=self.bn_size, growth_rate=self.growth_rate,
drop_rate=self.drop_rate,
graph=graph, input_node_id=db_input_node_id)
num_features = num_features + num_layers * self.growth_rate
if i != len(self.block_config) - 1:
db_input_node_id = self._transition(num_input_features=num_features,
num_output_features=num_features // 2,
graph=graph, input_node_id=db_input_node_id)
num_features = num_features // 2
# Final batch norm
out = graph.add_layer(self.batch_norm(num_features), db_input_node_id)
out = graph.add_layer(StubReLU(), out)
out = graph.add_layer(self.adaptive_avg_pooling(), out)
# Linear layer
graph.add_layer(StubDense(num_features, self.n_output_node), out)
return graph
conv_deeper_classes = [get_conv_class(n_dim), get_batch_norm_class(n_dim)]
dense_deeper_classes = [StubDense, get_dropout_class(n_dim)]
elif is_layer(layer, LayerType.DROPOUT):
dense_deeper_classes = [StubDense, StubReLU]
elif is_layer(layer, LayerType.BATCH_NORM):
conv_deeper_classes = [get_conv_class(n_dim), StubReLU]
if len(input_shape) == 1:
# It is in the dense layer part.
layer_class = sample(dense_deeper_classes, 1)[0]
else:
# It is in the conv layer part.
layer_class = sample(conv_deeper_classes, 1)[0]
if layer_class == StubDense:
new_layer = StubDense(input_shape[0], input_shape[0])
elif layer_class == get_dropout_class(n_dim):
new_layer = layer_class(Constant.DENSE_DROPOUT_RATE)
elif layer_class == get_conv_class(n_dim):
new_layer = layer_class(input_shape[-1], input_shape[-1], sample((1, 3, 5), 1)[0], stride=1)
elif layer_class == get_batch_norm_class(n_dim):
new_layer = layer_class(input_shape[-1])
elif layer_class == get_pooling_class(n_dim):
new_layer = layer_class(sample((1, 3, 5), 1)[0])
else:
new_layer = layer_class()
def wider_pre_dense(layer, n_add, weighted=True):
if not weighted:
return StubDense(layer.input_units, layer.units + n_add)
n_units2 = layer.units
teacher_w, teacher_b = layer.get_weights()
rand = np.random.randint(n_units2, size=n_add)
student_w = teacher_w.copy()
student_b = teacher_b.copy()
# target layer update (i)
for i in range(n_add):
teacher_index = rand[i]
new_weight = teacher_w[teacher_index, :]
new_weight = new_weight[np.newaxis, :]
student_w = np.concatenate((student_w, add_noise(new_weight, student_w)), axis=0)
student_b = np.append(student_b, add_noise(teacher_b[teacher_index], student_b))
def get_n_dim(layer):
if isinstance(layer, (StubConv1d, StubDropout1d, StubGlobalPooling1d, StubPooling1d, StubBatchNormalization1d)):
return 1
if isinstance(layer, (StubConv2d, StubDropout2d, StubGlobalPooling2d, StubPooling2d, StubBatchNormalization2d)):
return 2
if isinstance(layer, (StubConv3d, StubDropout3d, StubGlobalPooling3d, StubPooling3d, StubBatchNormalization3d)):
return 3
return -1
class LayerType:
INPUT = (StubInput,)
CONV = (StubConv,)
DENSE = (StubDense,)
BATCH_NORM = (StubBatchNormalization,)
CONCAT = (StubConcatenate,)
ADD = (StubAdd,)
POOL = (StubPooling,)
DROPOUT = (StubDropout,)
SOFTMAX = (StubSoftmax,)
RELU = (StubReLU,)
FLATTEN = (StubFlatten,)
GLOBAL_POOL = (StubGlobalPooling,)
def is_layer(layer, layer_type):
return isinstance(layer, layer_type)
An instance of the class Graph. Represents the neural architecture graph of the generated model.
"""
if model_len is None:
model_len = Constant.MODEL_LEN
if model_width is None:
model_width = Constant.MODEL_WIDTH
if isinstance(model_width, list) and not len(model_width) == model_len:
raise ValueError('The length of \'model_width\' does not match \'model_len\'')
elif isinstance(model_width, int):
model_width = [model_width] * model_len
graph = Graph(self.input_shape, False)
output_node_id = 0
n_nodes_prev_layer = self.input_shape[0]
for width in model_width:
output_node_id = graph.add_layer(StubDense(n_nodes_prev_layer, width), output_node_id)
output_node_id = graph.add_layer(StubDropout1d(Constant.MLP_DROPOUT_RATE), output_node_id)
output_node_id = graph.add_layer(StubReLU(), output_node_id)
n_nodes_prev_layer = width
graph.add_layer(StubDense(n_nodes_prev_layer, self.n_output_node), output_node_id)
return graph
model_width = Constant.MODEL_WIDTH
if isinstance(model_width, list) and not len(model_width) == model_len:
raise ValueError('The length of \'model_width\' does not match \'model_len\'')
elif isinstance(model_width, int):
model_width = [model_width] * model_len
graph = Graph(self.input_shape, False)
output_node_id = 0
n_nodes_prev_layer = self.input_shape[0]
for width in model_width:
output_node_id = graph.add_layer(StubDense(n_nodes_prev_layer, width), output_node_id)
output_node_id = graph.add_layer(StubDropout1d(Constant.MLP_DROPOUT_RATE), output_node_id)
output_node_id = graph.add_layer(StubReLU(), output_node_id)
n_nodes_prev_layer = width
graph.add_layer(StubDense(n_nodes_prev_layer, self.n_output_node), output_node_id)
return graph
def wider_next_dense(layer, start_dim, total_dim, n_add, weighted=True):
if not weighted:
return StubDense(layer.input_units + n_add, layer.units)
teacher_w, teacher_b = layer.get_weights()
student_w = teacher_w.copy()
n_units_each_channel = int(teacher_w.shape[1] / total_dim)
new_weight = np.zeros((teacher_w.shape[0], n_add * n_units_each_channel))
student_w = np.concatenate((student_w[:, :start_dim * n_units_each_channel],
add_noise(new_weight, student_w),
student_w[:, start_dim * n_units_each_channel:total_dim * n_units_each_channel]),
axis=1)
new_layer = StubDense(layer.input_units + n_add, layer.units)
new_layer.set_weights((student_w, teacher_b))
return new_layer
def create_new_layer(layer, n_dim):
input_shape = layer.output.shape
dense_deeper_classes = [StubDense, get_dropout_class(n_dim), StubReLU]
conv_deeper_classes = [get_conv_class(n_dim), get_batch_norm_class(n_dim), StubReLU]
if is_layer(layer, LayerType.RELU):
conv_deeper_classes = [get_conv_class(n_dim), get_batch_norm_class(n_dim)]
dense_deeper_classes = [StubDense, get_dropout_class(n_dim)]
elif is_layer(layer, LayerType.DROPOUT):
dense_deeper_classes = [StubDense, StubReLU]
elif is_layer(layer, LayerType.BATCH_NORM):
conv_deeper_classes = [get_conv_class(n_dim), StubReLU]
if len(input_shape) == 1:
# It is in the dense layer part.
layer_class = sample(dense_deeper_classes, 1)[0]
else:
# It is in the conv layer part.
layer_class = sample(conv_deeper_classes, 1)[0]
if layer_class == StubDense:
new_layer = StubDense(input_shape[0], input_shape[0])
elif layer_class == get_dropout_class(n_dim):
new_layer = layer_class(Constant.DENSE_DROPOUT_RATE)