Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
return list(filter(lambda layer_id: is_layer(self.layer_list[layer_id], LayerType.CONV),
self.get_main_chain_layers()))
def to_deeper_model(self, target_id, new_layer):
"""Insert a relu-conv-bn block after the target block.
Args:
target_id: A convolutional layer ID. The new block should be inserted after the block.
new_layer: An instance of StubLayer subclasses.
"""
self.operation_history.append(('to_deeper_model', target_id, new_layer))
input_id = self.layer_id_to_input_node_ids[target_id][0]
output_id = self.layer_id_to_output_node_ids[target_id][0]
if self.weighted:
if is_layer(new_layer, LayerType.DENSE):
init_dense_weight(new_layer)
elif is_layer(new_layer, LayerType.CONV):
init_conv_weight(new_layer)
elif is_layer(new_layer, LayerType.BATCH_NORM):
init_bn_weight(new_layer)
self._insert_new_layers([new_layer], input_id, output_id)
return list(filter(lambda layer_id: is_layer(self.layer_list[layer_id], type_str), range(self.n_layers)))
def _upper_layer_width(self, u):
for v, layer_id in self.reverse_adj_list[u]:
layer = self.layer_list[layer_id]
if is_layer(layer, LayerType.CONV) or is_layer(layer, LayerType.DENSE):
return layer_width(layer)
elif is_layer(layer, LayerType.CONCAT):
a = self.layer_id_to_input_node_ids[layer_id][0]
b = self.layer_id_to_input_node_ids[layer_id][1]
return self._upper_layer_width(a) + self._upper_layer_width(b)
else:
return self._upper_layer_width(v)
return self.node_list[0].shape[-1]
def deep_layer_ids(self):
ret = []
for layer_id in self.get_main_chain_layers():
layer = self.layer_list[layer_id]
if is_layer(layer, LayerType.GLOBAL_POOL):
break
if is_layer(layer, LayerType.ADD) or is_layer(layer, LayerType.CONCAT):
continue
ret.append(layer_id)
return ret
layer = self.layer_list[layer_id]
if is_layer(layer, LayerType.CONV):
new_layer = wider_next_conv(layer, start_dim, total_dim, n_add, self.weighted)
self._replace_layer(layer_id, new_layer)
elif is_layer(layer, LayerType.DENSE):
new_layer = wider_next_dense(layer, start_dim, total_dim, n_add, self.weighted)
self._replace_layer(layer_id, new_layer)
elif is_layer(layer, LayerType.BATCH_NORM):
new_layer = wider_bn(layer, start_dim, total_dim, n_add, self.weighted)
self._replace_layer(layer_id, new_layer)
self._search(v, start_dim, total_dim, n_add)
elif is_layer(layer, LayerType.CONCAT):
if self.layer_id_to_input_node_ids[layer_id][1] == u:
# u is on the right of the concat
# next_start_dim += next_total_dim - total_dim
left_dim = self._upper_layer_width(self.layer_id_to_input_node_ids[layer_id][0])
next_start_dim = start_dim + left_dim
next_total_dim = total_dim + left_dim
else:
next_start_dim = start_dim
next_total_dim = total_dim + self._upper_layer_width(self.layer_id_to_input_node_ids[layer_id][1])
self._search(v, next_start_dim, next_total_dim, n_add)
else:
self._search(v, start_dim, total_dim, n_add)
for v, layer_id in self.reverse_adj_list[u]:
layer = self.layer_list[layer_id]
u: The starting node ID.
start_dim: The position to insert the additional dimensions.
total_dim: The total number of dimensions the layer has before widening.
n_add: The number of dimensions to add.
"""
if (u, start_dim, total_dim, n_add) in self.vis:
return
self.vis[(u, start_dim, total_dim, n_add)] = True
for v, layer_id in self.adj_list[u]:
layer = self.layer_list[layer_id]
if is_layer(layer, LayerType.CONV):
new_layer = wider_next_conv(layer, start_dim, total_dim, n_add, self.weighted)
self._replace_layer(layer_id, new_layer)
elif is_layer(layer, LayerType.DENSE):
new_layer = wider_next_dense(layer, start_dim, total_dim, n_add, self.weighted)
self._replace_layer(layer_id, new_layer)
elif is_layer(layer, LayerType.BATCH_NORM):
new_layer = wider_bn(layer, start_dim, total_dim, n_add, self.weighted)
self._replace_layer(layer_id, new_layer)
self._search(v, start_dim, total_dim, n_add)
elif is_layer(layer, LayerType.CONCAT):
if self.layer_id_to_input_node_ids[layer_id][1] == u:
# u is on the right of the concat
# next_start_dim += next_total_dim - total_dim
left_dim = self._upper_layer_width(self.layer_id_to_input_node_ids[layer_id][0])
next_start_dim = start_dim + left_dim
next_total_dim = total_dim + left_dim
else:
# u is on the right of the concat
# next_start_dim += next_total_dim - total_dim
left_dim = self._upper_layer_width(self.layer_id_to_input_node_ids[layer_id][0])
next_start_dim = start_dim + left_dim
next_total_dim = total_dim + left_dim
else:
next_start_dim = start_dim
next_total_dim = total_dim + self._upper_layer_width(self.layer_id_to_input_node_ids[layer_id][1])
self._search(v, next_start_dim, next_total_dim, n_add)
else:
self._search(v, start_dim, total_dim, n_add)
for v, layer_id in self.reverse_adj_list[u]:
layer = self.layer_list[layer_id]
if is_layer(layer, LayerType.CONV):
new_layer = wider_pre_conv(layer, n_add, self.weighted)
self._replace_layer(layer_id, new_layer)
elif is_layer(layer, LayerType.DENSE):
new_layer = wider_pre_dense(layer, n_add, self.weighted)
self._replace_layer(layer_id, new_layer)
elif is_layer(layer, LayerType.CONCAT):
continue
else:
self._search(v, start_dim, total_dim, n_add)