How to use the batchflow.models.eager_torch.utils.get_shape function in batchflow

To help you get started, we’ve selected a few batchflow examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github analysiscenter / batchflow / batchflow / models / eager_torch / layers / resize.py View on Github external
def spatial_resize(self, inputs):
        """ Force the same shapes of the inputs, if needed. """
        shape_ = get_shape(inputs[0])
        dim_ = get_num_dims(inputs[0])
        spatial_shape_ = shape_[-dim_:]

        resized = []
        for item in inputs:
            shape = get_shape(item)
            dim = get_num_dims(item)
            spatial_shape = shape[-dim:]
            if dim > 0 and spatial_shape_ != tuple([1]*dim) and spatial_shape != spatial_shape_:
                item = Crop(inputs[0])(item)
            resized.append(item)
        return resized
github analysiscenter / batchflow / batchflow / models / eager_torch / layers / conv_block.py View on Github external
def __init__(self, *args, inputs=None, base_block=BaseConvBlock, n_repeats=1, n_branches=1, combine='+', **kwargs):
        super().__init__()
        base_block = kwargs.pop('base', None) or base_block
        self.input_shape, self.device = get_shape(inputs), inputs.device
        self.n_repeats, self.n_branches = n_repeats, n_branches
        self.base_block, self.combine = base_block, combine
        self.args, self.kwargs = args, kwargs

        self._make_modules(inputs)
github analysiscenter / batchflow / batchflow / models / eager_torch / layers / conv_block.py View on Github external
layer = layer_class(**args).to(device)
                    skip = layer(inputs)
                    residuals.append(skip)

                    layer_desc = 'Layer {}, skip-letter "{}"; {} -> {}'.format(i, letter,
                                                                               get_shape(inputs),
                                                                               get_shape(skip))
                    layer = nn.Sequential(OrderedDict([(layer_desc, layer)]))
                    skip_modules.append(layer)

                elif letter in self.COMBINE_LETTERS:
                    args = self.fill_layer_params(layer_name, layer_class, inputs, layout_dict[letter_group])
                    args = {**args, 'inputs': [residuals.pop(), inputs], 'op': letter}
                    layer = layer_class(**args).to(device)
                    shape_before = get_shape(inputs)
                    inputs = layer(args['inputs'])
                    shape_after = get_shape(inputs)

                    shape_before, shape_after = (None, *shape_before[1:]), (None, *shape_after[1:])
                    layer_desc = 'Layer {}: combine; {} -> {}'.format(i, shape_before, shape_after)
                    layer = nn.Sequential(OrderedDict([(layer_desc, layer)]))
                    combine_modules.append(layer)
            else:
                layer_args = self.kwargs.get(layer_name, {})
                skip_layer = layer_args is False \
                             or isinstance(layer_args, dict) and layer_args.get('disable', False)

                # Create params for the layer call
                if skip_layer:
                    pass
                elif letter in self.DEFAULT_LETTERS:
github analysiscenter / batchflow / batchflow / models / eager_torch / layers / resize.py View on Github external
def spatial_resize(self, inputs):
        """ Force the same shapes of the inputs, if needed. """
        shape_ = get_shape(inputs[0])
        dim_ = get_num_dims(inputs[0])
        spatial_shape_ = shape_[-dim_:]

        resized = []
        for item in inputs:
            shape = get_shape(item)
            dim = get_num_dims(item)
            spatial_shape = shape[-dim:]
            if dim > 0 and spatial_shape_ != tuple([1]*dim) and spatial_shape != spatial_shape_:
                item = Crop(inputs[0])(item)
            resized.append(item)
        return resized
github analysiscenter / batchflow / batchflow / models / eager_torch / layers / conv_block.py View on Github external
if len(layers) >= 1:
                    module_layout += '_'
                    block_modules.append(nn.Sequential(OrderedDict(layers)))
                    layers = []
                module_layout += letter

                if letter in self.SKIP_LETTERS:
                    args = self.fill_layer_params(layer_name, layer_class, inputs, layout_dict[letter_group])

                    layer = layer_class(**args).to(device)
                    skip = layer(inputs)
                    residuals.append(skip)

                    layer_desc = 'Layer {}, skip-letter "{}"; {} -> {}'.format(i, letter,
                                                                               get_shape(inputs),
                                                                               get_shape(skip))
                    layer = nn.Sequential(OrderedDict([(layer_desc, layer)]))
                    skip_modules.append(layer)

                elif letter in self.COMBINE_LETTERS:
                    args = self.fill_layer_params(layer_name, layer_class, inputs, layout_dict[letter_group])
                    args = {**args, 'inputs': [residuals.pop(), inputs], 'op': letter}
                    layer = layer_class(**args).to(device)
                    shape_before = get_shape(inputs)
                    inputs = layer(args['inputs'])
                    shape_after = get_shape(inputs)

                    shape_before, shape_after = (None, *shape_before[1:]), (None, *shape_after[1:])
                    layer_desc = 'Layer {}: combine; {} -> {}'.format(i, shape_before, shape_after)
                    layer = nn.Sequential(OrderedDict([(layer_desc, layer)]))
                    combine_modules.append(layer)
            else:
github analysiscenter / batchflow / batchflow / models / eager_torch / layers / conv_block.py View on Github external
if letter in self.SKIP_LETTERS + self.COMBINE_LETTERS:
                if len(layers) >= 1:
                    module_layout += '_'
                    block_modules.append(nn.Sequential(OrderedDict(layers)))
                    layers = []
                module_layout += letter

                if letter in self.SKIP_LETTERS:
                    args = self.fill_layer_params(layer_name, layer_class, inputs, layout_dict[letter_group])

                    layer = layer_class(**args).to(device)
                    skip = layer(inputs)
                    residuals.append(skip)

                    layer_desc = 'Layer {}, skip-letter "{}"; {} -> {}'.format(i, letter,
                                                                               get_shape(inputs),
                                                                               get_shape(skip))
                    layer = nn.Sequential(OrderedDict([(layer_desc, layer)]))
                    skip_modules.append(layer)

                elif letter in self.COMBINE_LETTERS:
                    args = self.fill_layer_params(layer_name, layer_class, inputs, layout_dict[letter_group])
                    args = {**args, 'inputs': [residuals.pop(), inputs], 'op': letter}
                    layer = layer_class(**args).to(device)
                    shape_before = get_shape(inputs)
                    inputs = layer(args['inputs'])
                    shape_after = get_shape(inputs)

                    shape_before, shape_after = (None, *shape_before[1:]), (None, *shape_after[1:])
                    layer_desc = 'Layer {}: combine; {} -> {}'.format(i, shape_before, shape_after)
                    layer = nn.Sequential(OrderedDict([(layer_desc, layer)]))
                    combine_modules.append(layer)
github analysiscenter / batchflow / batchflow / models / eager_torch / layers / resize.py View on Github external
shape = [slice(None, None)] * len(i_shape)
                shape[i + 2] = slice(None, r_shape_)
                output = output[shape]
            elif i_shape_ < r_shape_:
                # Increase input tensor's shape by zero padding
                zeros_shape = list(i_shape)
                zeros_shape[i + 2] = r_shape_
                zeros = torch.zeros(zeros_shape)

                shape = [slice(None, None)] * len(i_shape)
                shape[i + 2] = slice(None, i_shape_)
                zeros[shape] = output
                output = zeros
            else:
                pass
            i_shape = get_shape(output)
        return output
github analysiscenter / batchflow / batchflow / models / eager_torch / encoder_decoder.py View on Github external
def head(cls, inputs, target_shape, classes, **kwargs):
        kwargs = cls.get_defaults('head', kwargs)
        layers = []
        layer = super().head(inputs, target_shape, classes, **kwargs)
        if layer is not None:
            inputs = layer(inputs)
            layers.append(layer)

        if target_shape:
            if get_shape(inputs) != target_shape:
                layer = Crop(resize_to=target_shape)
                inputs = layer(inputs)
                layers.append(layer)

                if get_shape(inputs)[1] != classes:
                    layer = ConvBlock(inputs=inputs, layout='c', filters=classes, kernel_size=1)
                    layers.append(layer)
        return nn.Sequential(*layers)