How to use the sparse.util.Flatten function in sparse

To help you get started, we’ve selected a few sparse examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github MaestroGraph / sparse-hyper / experiments / attention.py View on Github external
hyperlayer,
            util.Reshape((arg.num_glimpses * shape[0], arg.k, arg.k)), # Fold glimpses into channels
            nn.Conv2d(arg.num_glimpses * shape[0], ch1, kernel_size=3, padding=1),
            activation,
            nn.MaxPool2d(kernel_size=2),
            nn.Conv2d(ch1, ch2, kernel_size=3, padding=1),
            activation,
            nn.Conv2d(ch2, ch2, kernel_size=3, padding=1),
            activation,
            nn.MaxPool2d(kernel_size=2),
            nn.Conv2d(ch2, ch3, kernel_size=3, padding=1),
            activation,
            nn.Conv2d(ch3, ch3, kernel_size=3, padding=1),
            activation,
            nn.MaxPool2d(kernel_size=2),
            util.Flatten(),
            nn.Linear(h, 128),
            activation,
            nn.Linear(128, num_classes),
            nn.Softmax()
        )

        reinforce = False

    elif arg.modelname == 'ash-conv':
        """
        Model with a convolution head. More powerful classification, but more difficult to train on top of a hyperlayer.
        """

        hyperlayer = BoxAttentionLayer(
            glimpses=arg.num_glimpses,
            in_size=shape, k=arg.k,
github MaestroGraph / sparse-hyper / experiments / convolution.py View on Github external
def __init__(self, num_classes:int, num_groups:int=3, N:int=3, k:int=6, drop_p:float=0.0, start_nf:int=16, n_in_channels:int=3, **kwargs):
        super().__init__()

        n_channels = [start_nf]
        for i in range(num_groups): n_channels.append(start_nf*(2**i)*k)

        layers = [nn.Conv2d(n_in_channels, n_channels[0], 3, 1, padding=1)]  # conv1
        for i in range(num_groups):
            layers += _make_group(N, n_channels[i], n_channels[i+1], WideBlock, (1 if i==0 else 2), drop_p)

        layers += [
            nn.BatchNorm2d(n_channels[num_groups]), nn.ReLU(),
            nn.AdaptiveAvgPool2d(1), util.Flatten(),
            nn.Linear(n_channels[num_groups], num_classes)
        ]

        self.features = nn.Sequential(*layers)
github MaestroGraph / sparse-hyper / experiments / bias.py View on Github external
c, h, w = data_size
        cs = [c] + [2**(d+4) for d in range(depth)]

        div = 2 ** depth

        modules = []

        for d in range(depth):
            modules += [
                nn.Conv2d(cs[d], cs[d+1], 3, padding=1), nn.ReLU(),
                nn.Conv2d(cs[d+1], cs[d+1], 3, padding=1), nn.ReLU(),
                nn.MaxPool2d((2, 2))
            ]

        modules += [
            util.Flatten(),
            nn.Linear(cs[-1] * (h//div) * (w//div), 1024), nn.ReLU(),
            nn.Linear(1024, latent_size) # encoder produces a cont. index tuple (ln -1 for the means, 1 for the sigma)
        ]

        self.encoder = nn.Sequential(*modules)
github MaestroGraph / sparse-hyper / experiments / attention.py View on Github external
activation,
        nn.MaxPool2d(kernel_size=pool),
        CRest(ch1, ch1, kernel_size=3, padding=1),
        activation,
        nn.MaxPool2d(kernel_size=pool),
        CRest(ch1, ch2, kernel_size=3, padding=1),
        activation,
        nn.MaxPool2d(kernel_size=pool),
        CRest(ch2, ch2, kernel_size=3, padding=1),
        activation,
        nn.MaxPool2d(kernel_size=2),
        CRest(ch2, ch3, kernel_size=3, padding=1),
        activation,
        CRest(ch3, ch3, kernel_size=3, padding=1),
        activation,
        util.Flatten(),
        nn.Linear(hid, HIDLIN),
        activation,
        nn.Linear(HIDLIN, HIDLIN)
    ]
github MaestroGraph / sparse-hyper / experiments / attention.py View on Github external
base +
            [activation, nn.Linear(HIDLIN, num_classes),
            nn.Softmax()])
        )

        reinforce = False

    elif arg.modelname == 'reinforce':

        hyperlayer = ReinforceLayer(in_shape=shape, glimpses=arg.num_glimpses,
                glimpse_size=(28, 28),
                num_classes=num_classes, pool=arg.pool)

        model = nn.Sequential(
             hyperlayer,
             R(util.Flatten()),
             R(nn.Linear(28 * 28 * shape[0] * arg.num_glimpses, arg.hidden)),
             R(activation),
             R(nn.Linear(arg.hidden, num_classes)),
             R(nn.Softmax())
        )

        reinforce = True

    elif arg.modelname == 'ash':

        hyperlayer = BoxAttentionLayer(
            glimpses=arg.num_glimpses,
            in_size=shape, k=arg.k,
            gadditional=arg.gadditional, radditional=arg.radditional, region=(arg.region, arg.region),
            min_sigma=arg.min_sigma, pool=arg.pool
        )
github MaestroGraph / sparse-hyper / experiments / memory.py View on Github external
cs = [c] + [2**(d+4) for d in range(depth)]

        div = 2 ** depth

        modules = []

        for d in range(depth):
            modules += [
                nn.Conv2d(cs[d], cs[d+1], 3, padding=1), nn.ReLU(),
                nn.Conv2d(cs[d+1], cs[d+1], 3, padding=1), nn.ReLU(),
                nn.MaxPool2d((2, 2))
            ]

        modules += [
            util.Flatten(),
            nn.Linear(cs[-1] * (h//div) * (w//div), 1024), nn.ReLU(),
            nn.Linear(1024, len(latent_size)) # encoder produces a cont. index tuple (ln -1 for the means, 1 for the sigma)
        ]

        self.encoder = nn.Sequential(*modules)

        upmode = 'bilinear'
        cl = lambda x : int(math.ceil(x))



        modules = [
            nn.Linear(emb_size, cs[-1] * cl(h/div) * cl(w/div)), nn.ReLU(),
            util.Reshape( (cs[-1], cl(h/div), cl(w/div)) )
        ]
github MaestroGraph / sparse-hyper / experiments / attention.py View on Github external
base +
            [activation, nn.Linear(HIDLIN, num_classes),
            nn.Softmax()])
        )

        reinforce = False

    elif arg.modelname == 'reinforce':

        hyperlayer = ReinforceLayer(in_shape=shape, glimpses=arg.num_glimpses,
                glimpse_size=(28, 28),
                num_classes=num_classes)

        model = nn.Sequential(
             hyperlayer,
             R(util.Flatten()),
             R(nn.Linear(28 * 28 * shape[0] * arg.num_glimpses, arg.hidden)),
             R(activation),
             R(nn.Linear(arg.hidden, num_classes)),
             R(nn.Softmax())
        )

        reinforce = True

    elif arg.modelname == 'ash':

        hyperlayer = BoxAttentionLayer(
            glimpses=arg.num_glimpses,
            in_size=shape, k=arg.k,
            gadditional=arg.gadditional, radditional=arg.radditional, region=(arg.region, arg.region),
            min_sigma=arg.min_sigma
        )
github MaestroGraph / sparse-hyper / experiments / attention.py View on Github external
hyperlayer,
            util.Reshape((arg.num_glimpses * shape[0], arg.k, arg.k)), # Fold glimpses into channels
            nn.Conv2d(arg.num_glimpses * shape[0], ch1, kernel_size=3, padding=1),
            activation,
            nn.MaxPool2d(kernel_size=2),
            nn.Conv2d(ch1, ch2, kernel_size=3, padding=1),
            activation,
            nn.Conv2d(ch2, ch2, kernel_size=3, padding=1),
            activation,
            nn.MaxPool2d(kernel_size=2),
            nn.Conv2d(ch2, ch3, kernel_size=3, padding=1),
            activation,
            nn.Conv2d(ch3, ch3, kernel_size=3, padding=1),
            activation,
            nn.MaxPool2d(kernel_size=2),
            util.Flatten(),
            nn.Linear(h, 128),
            activation,
            nn.Linear(128, num_classes),
            nn.Softmax()
        )

        reinforce = False

    elif arg.modelname == 'ash-conv':
        """
        Model with a convolution head. More powerful classification, but more difficult to train on top of a hyperlayer.
        """

        hyperlayer = BoxAttentionLayer(
            glimpses=arg.num_glimpses,
            in_size=shape, k=arg.k,