How to use the nni.networkmorphism_tuner.layers.get_conv_class function in nni

To help you get started, we’ve selected a few nni examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github microsoft / nni / src / sdk / pynni / nni / networkmorphism_tuner / graph.py View on Github external
start_id: The convolutional layer ID, after which to start the skip-connection.
            end_id: The convolutional layer ID, after which to end the skip-connection.
        """
        self.operation_history.append(("to_add_skip_model", start_id, end_id))
        filters_end = self.layer_list[end_id].output.shape[-1]
        filters_start = self.layer_list[start_id].output.shape[-1]
        start_node_id = self.layer_id_to_output_node_ids[start_id][0]

        pre_end_node_id = self.layer_id_to_input_node_ids[end_id][0]
        end_node_id = self.layer_id_to_output_node_ids[end_id][0]

        skip_output_id = self._insert_pooling_layer_chain(
            start_node_id, end_node_id)

        # Add the conv layer
        new_conv_layer = get_conv_class(
            self.n_dim)(
                filters_start,
                filters_end,
                1)
        skip_output_id = self.add_layer(new_conv_layer, skip_output_id)

        # Add the add layer.
        add_input_node_id = self._add_node(
            deepcopy(self.node_list[end_node_id]))
        add_layer = StubAdd()

        self._redirect_edge(pre_end_node_id, end_node_id, add_input_node_id)
        self._add_edge(add_layer, add_input_node_id, end_node_id)
        self._add_edge(add_layer, skip_output_id, end_node_id)
        add_layer.input = [
            self.node_list[add_input_node_id],
github microsoft / nni / src / sdk / pynni / nni / networkmorphism_tuner / graph.py View on Github external
def _insert_pooling_layer_chain(self, start_node_id, end_node_id):
        skip_output_id = start_node_id
        for layer in self._get_pooling_layers(start_node_id, end_node_id):
            new_layer = deepcopy(layer)
            if is_layer(new_layer, "Conv"):
                filters = self.node_list[start_node_id].shape[-1]
                new_layer = get_conv_class(self.n_dim)(
                    filters, filters, 1, layer.stride)
                if self.weighted:
                    init_conv_weight(new_layer)
            else:
                new_layer = deepcopy(layer)
            skip_output_id = self.add_layer(new_layer, skip_output_id)
        skip_output_id = self.add_layer(StubReLU(), skip_output_id)
        return skip_output_id
github microsoft / nni / src / sdk / pynni / nni / networkmorphism_tuner / graph.py View on Github external
concat_layer = StubConcatenate()
        concat_layer.input = [
            self.node_list[concat_input_node_id],
            self.node_list[skip_output_id],
        ]
        concat_output_node_id = self._add_node(Node(concat_layer.output_shape))
        self._add_edge(
            concat_layer,
            concat_input_node_id,
            concat_output_node_id)
        self._add_edge(concat_layer, skip_output_id, concat_output_node_id)
        concat_layer.output = self.node_list[concat_output_node_id]
        self.node_list[concat_output_node_id].shape = concat_layer.output_shape

        # Add the concatenate layer.
        new_conv_layer = get_conv_class(self.n_dim)(
            filters_start + filters_end, filters_end, 1
        )
        self._add_edge(new_conv_layer, concat_output_node_id, end_node_id)
        new_conv_layer.input = self.node_list[concat_output_node_id]
        new_conv_layer.output = self.node_list[end_node_id]
        self.node_list[end_node_id].shape = new_conv_layer.output_shape

        if self.weighted:
            filter_shape = (1,) * self.n_dim
            weights = np.zeros((filters_end, filters_end) + filter_shape)
            for i in range(filters_end):
                filter_weight = np.zeros((filters_end,) + filter_shape)
                center_index = (i,) + (0,) * self.n_dim
                filter_weight[center_index] = 1
                weights[i, ...] = filter_weight
            weights = np.concatenate(
github microsoft / nni / src / sdk / pynni / nni / networkmorphism_tuner / layer_transformer.py View on Github external
def deeper_conv_block(conv_layer, kernel_size, weighted=True):
    '''deeper conv layer.
    '''
    n_dim = get_n_dim(conv_layer)
    filter_shape = (kernel_size,) * 2
    n_filters = conv_layer.filters
    weight = np.zeros((n_filters, n_filters) + filter_shape)
    center = tuple(map(lambda x: int((x - 1) / 2), filter_shape))
    for i in range(n_filters):
        filter_weight = np.zeros((n_filters,) + filter_shape)
        index = (i,) + center
        filter_weight[index] = 1
        weight[i, ...] = filter_weight
    bias = np.zeros(n_filters)
    new_conv_layer = get_conv_class(n_dim)(
        conv_layer.filters, n_filters, kernel_size=kernel_size
    )
    bn = get_batch_norm_class(n_dim)(n_filters)

    if weighted:
        new_conv_layer.set_weights(
            (add_noise(weight, np.array([0, 1])),
             add_noise(bias, np.array([0, 1])))
        )
        new_weights = [
            add_noise(np.ones(n_filters, dtype=np.float32), np.array([0, 1])),
            add_noise(np.zeros(n_filters, dtype=np.float32), np.array([0, 1])),
            add_noise(np.zeros(n_filters, dtype=np.float32), np.array([0, 1])),
            add_noise(np.ones(n_filters, dtype=np.float32), np.array([0, 1])),
        ]
        bn.set_weights(new_weights)
github microsoft / nni / src / sdk / pynni / nni / networkmorphism_tuner / graph_transformer.py View on Github external
def create_new_layer(layer, n_dim):
    ''' create  new layer for the graph
    '''

    input_shape = layer.output.shape
    dense_deeper_classes = [StubDense, get_dropout_class(n_dim), StubReLU]
    conv_deeper_classes = [
        get_conv_class(n_dim),
        get_batch_norm_class(n_dim),
        StubReLU]
    if is_layer(layer, "ReLU"):
        conv_deeper_classes = [
            get_conv_class(n_dim),
            get_batch_norm_class(n_dim)]
        dense_deeper_classes = [StubDense, get_dropout_class(n_dim)]
    elif is_layer(layer, "Dropout"):
        dense_deeper_classes = [StubDense, StubReLU]
    elif is_layer(layer, "BatchNormalization"):
        conv_deeper_classes = [get_conv_class(n_dim), StubReLU]

    layer_class = None
    if len(input_shape) == 1:
        # It is in the dense layer part.
        layer_class = sample(dense_deeper_classes, 1)[0]
github microsoft / nni / src / sdk / pynni / nni / networkmorphism_tuner / graph.py View on Github external
"""Initializer for Graph.
        """
        self.input_shape = input_shape
        self.weighted = weighted
        self.node_list = []
        self.layer_list = []
        # node id start with 0
        self.node_to_id = {}
        self.layer_to_id = {}
        self.layer_id_to_input_node_ids = {}
        self.layer_id_to_output_node_ids = {}
        self.adj_list = {}
        self.reverse_adj_list = {}
        self.operation_history = []
        self.n_dim = len(input_shape) - 1
        self.conv = get_conv_class(self.n_dim)
        self.batch_norm = get_batch_norm_class(self.n_dim)

        self.vis = None
        self._add_node(Node(input_shape))
github microsoft / nni / src / sdk / pynni / nni / networkmorphism_tuner / layer_transformer.py View on Github external
def wider_next_conv(layer, start_dim, total_dim, n_add, weighted=True):
    '''wider next conv layer.
    '''
    n_dim = get_n_dim(layer)
    if not weighted:
        return get_conv_class(n_dim)(layer.input_channel + n_add,
                                     layer.filters,
                                     kernel_size=layer.kernel_size,
                                     stride=layer.stride)
    n_filters = layer.filters
    teacher_w, teacher_b = layer.get_weights()

    new_weight_shape = list(teacher_w.shape)
    new_weight_shape[1] = n_add
    new_weight = np.zeros(tuple(new_weight_shape))

    student_w = np.concatenate((teacher_w[:, :start_dim, ...].copy(),
                                add_noise(new_weight, teacher_w),
                                teacher_w[:, start_dim:total_dim, ...].copy()), axis=1)
    new_layer = get_conv_class(n_dim)(layer.input_channel + n_add,
                                      n_filters,
                                      kernel_size=layer.kernel_size,