How to use the batchflow.models.tf.TFModel.default_config function in batchflow

To help you get started, we’ve selected a few batchflow examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github analysiscenter / batchflow / batchflow / models / tf / linknet.py View on Github external
def default_config(cls):
        config = TFModel.default_config()

        filters = 64   # number of filters in the first block

        config['initial_block'] = dict(layout='cnap', filters=filters, kernel_size=7, strides=2,
                                       pool_size=3, pool_strides=2)
        config['body/num_blocks'] = 4
        config['body/upsample'] = dict(layout='tna', factor=2, kernel_size=3)

        config['head/filters'] = filters // 2
        config['head/upsample1'] = dict(layout='tna cna', factor=2, kernel_size=3, strides=[2, 1])
        config['head/upsample2'] = dict(layout='t', factor=2)

        config['loss'] = 'ce'

        return config
github analysiscenter / batchflow / batchflow / models / tf / densenet.py View on Github external
def default_config(cls):
        config = TFModel.default_config()
        config['common/conv/use_bias'] = False
        config['initial_block'] = dict(layout='cnap', filters=16, kernel_size=7, strides=2,
                                       pool_size=3, pool_strides=2)
        config['body/block'] = dict(layout='nacd', dropout_rate=.2, growth_rate=32, bottleneck=True, skip=True)
        config['body/transition_layer'] = dict(layout='nacv', kernel_size=1, strides=1,
                                               pool_size=2, pool_strides=2, reduction_factor=1)
        config['head'] = dict(layout='Vf')

        config['loss'] = 'ce'
        if is_best_practice('optimizer'):
            config['optimizer'].update(name='Adam')
        else:
            lr = 1e-1
            # boundaries - the number of iterations on the 150th and 225th epochs on CIFAR with batch size=64
            config['decay'] = ('const', dict(boundaries=[117300, 175950], values=[lr, lr/10, lr/100]))
            config['optimizer'] = ('Momentum', dict(momentum=.9))
github analysiscenter / batchflow / batchflow / models / tf / resattention.py View on Github external
def default_config(cls):
        config = TFModel.default_config()

        filters = 64   # number of filters in the first block
        config['initial_block'] = dict(layout='cnap', filters=filters, kernel_size=7, strides=2,
                                       pool_size=3, pool_strides=2)

        config['body'] = dict(bottleneck=True, downsample=False)
        config['body']['trunk'] = dict(bottleneck=True, downsample=False)
        config['body']['mask'] = dict(bottleneck=True, pool_size=3, pool_strides=2)
        config['body']['mask']['upsample'] = dict(layout='b', factor=2)

        config['head']['layout'] = 'Vf'

        config['loss'] = 'ce'
        config['common'] = dict(conv=dict(use_bias=False))
        lr = 1e-4
        config['decay'] = ('const', dict(boundaries=[200000, 400000, 500000], values=[lr, lr/10, lr/100, lr/1000]))
github analysiscenter / batchflow / batchflow / models / tf / encoder_decoder.py View on Github external
def default_config(cls):
        config = TFModel.default_config()

        config['body/encoder'] = dict(base=None, num_stages=None,
                                      order=['skip', 'block', 'downsampling'])
        config['body/encoder/downsample'] = dict(layout='p', pool_size=2, pool_strides=2)
        config['body/encoder/blocks'] = dict(base=cls.block)

        config['body/embedding'] = dict(base=cls.block)

        config['body/decoder'] = dict(skip=True, num_stages=None, factor=None,
                                      order=['upsampling', 'block', 'combine'])
        config['body/decoder/upsample'] = dict(layout='tna')
        config['body/decoder/blocks'] = dict(base=cls.block, combine_op='concat')
        return config
github analysiscenter / batchflow / batchflow / models / tf / unet.py View on Github external
def default_config(cls):
        config = TFModel.default_config()

        config['common'] = dict(conv=dict(use_bias=False))
        config['body/num_blocks'] = 5
        config['body/filters'] = (2 ** np.arange(config['body/num_blocks']) * 64).tolist()
        config['body/downsample'] = dict(layout='p', pool_size=2, pool_strides=2)
        config['body/encoder'] = dict(layout='cnacna', kernel_size=3)
        config['body/upsample'] = dict(layout='tna', kernel_size=2, strides=2)
        config['body/decoder'] = dict(layout='cnacna', kernel_size=3)
        config['head'] = dict(layout='c', kernel_size=1, strides=1)

        config['loss'] = 'ce'
        # The article does not specify the initial learning rate. 1e-4 was chosen arbitrarily.
        config['optimizer'] = ('Momentum', dict(learning_rate=1e-4, momentum=.99))

        return config
github analysiscenter / batchflow / batchflow / models / tf / vnet.py View on Github external
def default_config(cls):
        config = TFModel.default_config()

        filters = 16   # number of filters in the first block
        config['body/layout'] = ['cna', 'cna'*2] + ['cna'*3] * 3
        num_blocks = len(config['body']['layout'])
        config['body/filters'] = 2 ** np.arange(num_blocks) * filters
        config['body/kernel_size'] = 5
        config['body/upsample'] = dict(layout='tna', factor=2)
        config['head'] = dict(layout='c', kernel_size=1)

        config['loss'] = 'ce'

        return config
github analysiscenter / batchflow / batchflow / models / tf / mobilenet.py View on Github external
def default_config(cls):
        config = TFModel.default_config()
        config['initial_block'].update(dict(layout='cna', filters=32, kernel_size=3, strides=2))
        config['body'].update(dict(width_factor=1, layout=_V2_DEFAULT_BODY))
        config['head'].update(dict(layout='cnacnV', filters=[1280, None], kernel_size=1))
        config['common'].update(dict(activation=tf.nn.relu6))
        config['loss'] = 'ce'
        return config
github analysiscenter / batchflow / batchflow / models / tf / fcn.py View on Github external
def default_config(cls):
        config = TFModel.default_config()
        config['common/dropout_rate'] = .5
        config['initial_block/base_network'] = VGG16
        config['body/filters'] = 100
        config['body/upsample'] = dict(layout='t', kernel_size=4)
        config['head/upsample'] = dict(layout='t')

        config['loss'] = 'ce'
        config['optimizer'] = ('Momentum', dict(learning_rate=1e-4, momentum=.9))

        return config
github analysiscenter / batchflow / batchflow / models / tf / gcn.py View on Github external
def default_config(cls):
        config = TFModel.default_config()

        config['initial_block'] = dict(layout='cna', filters=64, kernel_size=7, strides=2)
        config['body/encoder'] = dict(base_class=ResNet101, filters=[256, 512, 1024, 2048])
        config['body/block'] = dict(layout='cn cn', filters=21, kernel_size=11)
        config['body/res_block'] = False
        config['body/br'] = dict(layout='ca c', kernel_size=3, bottleneck=False, downsample=False)
        config['body/upsample'] = dict(layout='tna', factor=2, kernel_size=4)

        config['head/upsample'] = dict(layout='tna', factor=2, kernel_size=4)

        config['loss'] = 'ce'
        if is_best_practice('optimizer'):
            config['optimizer'].update(name='Adam')
        else:
            config['optimizer'] = ('Momentum', dict(learning_rate=5e-4, momentum=.99))
        return config
github analysiscenter / batchflow / batchflow / models / tf / xception.py View on Github external
def default_config(cls):
        config = TFModel.default_config()
        config['body/entry'] = dict(num_stages=None, filters=None, strides=2, combine_op='softsum')
        config['body/middle'] = dict(num_stages=None, filters=None, strides=1, combine_op='sum')
        config['body/exit'] = dict(num_stages=None, filters=None, strides=1,
                                   depth_activation=True, combine_op='softsum')
        return config