How to use the sparse.NASLayer function in sparse

To help you get started, we’ve selected a few sparse examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github MaestroGraph / sparse-hyper / experiments / sparsity.py View on Github external
one = nn.Linear(util.prod(insize), arg.hidden)
        two = nn.Linear(arg.hidden, numcls)

        model = nn.Sequential(
            util.Flatten(),
            one, nn.Sigmoid(),
            two, nn.Softmax()
        )

    elif arg.method == 'nas':

        rng = (
            min(arg.hidden, arg.range), 1,
            arg.range, arg.range)

        one = NASLayer(
            in_size=insize, out_size=(arg.hidden,), k=points,
            fix_values=arg.fix_values,
            gadditional=arg.gadditional, radditional=arg.radditional, region=rng, has_bias=True,
            min_sigma=arg.min_sigma
        )

        two = nn.Linear(arg.hidden, numcls)

        model = nn.Sequential(
            one, nn.Sigmoid(),
            two, nn.Softmax()
        )
    elif arg.method == 'nas-temp':
        """
        Templated NAS model. Fixed in one dimension 
        """
github MaestroGraph / sparse-hyper / experiments / sparsity.py View on Github external
one, nn.Sigmoid(),
            two, nn.Softmax()
        )
    elif arg.method == 'nas-temp':
        """
        Templated NAS model. Fixed in one dimension 
        """

        rng = (arg.range, arg.range)

        h, c = arg.hidden, arg.control+1

        template = torch.arange(h, dtype=torch.long)[:, None].expand(h, c).contiguous().view(h*c, 1)
        template = torch.cat([template, torch.zeros(h*c, 3, dtype=torch.long)], dim=1)

        one = NASLayer(
            in_size=insize, out_size=(arg.hidden,), k=points,
            gadditional=arg.gadditional, radditional=arg.radditional, region=rng, has_bias=True,
            fix_values=arg.fix_values,
            min_sigma=arg.min_sigma,
            template=template,
            learn_cols=(2, 3),
            chunk_size=c
        )

        two = nn.Linear(arg.hidden, numcls)

        model = nn.Sequential(
            one, nn.Sigmoid(),
            two, nn.Softmax()
        )
github MaestroGraph / sparse-hyper / experiments / sparsity-mlp.py View on Github external
in_size=(h1,), out_size=(h2,), k=h2*c,
            gadditional=arg.gadditional[1], radditional=arg.radditional[1], region=rng, has_bias=True,
            fix_values=arg.fix_values,
            min_sigma=arg.min_sigma,
            template=template,
            learn_cols=(1,),
            chunk_size=c
        )

        rng = getrng(arg.range[2], (h2, ))
        c = arg.k[2]

        template = torch.arange(numcls, dtype=torch.long)[:, None].expand(numcls, c).contiguous().view(numcls * c, 1)
        template = torch.cat([template, torch.zeros(numcls*c, 1, dtype=torch.long)], dim=1)

        three = NASLayer(
            in_size=(h2,), out_size=(numcls,), k=numcls*c,
            gadditional=arg.gadditional[2], radditional=arg.radditional[2], region=rng, has_bias=True,
            fix_values=arg.fix_values,
            min_sigma=arg.min_sigma,
            template=template,
            learn_cols=(1,),
            chunk_size=c
        )

        model = nn.Sequential(
            one, nn.Sigmoid(),
            two, nn.Sigmoid(),
            three, nn.Softmax(),
        )
    elif arg.method == 'nas-conv':
        """
github MaestroGraph / sparse-hyper / experiments / sparsity-mlp.py View on Github external
in_size=insize, out_size=(h1,), k=h1*c,
            gadditional=arg.gadditional[0], radditional=arg.radditional[0], region=rng, has_bias=True,
            fix_values=arg.fix_values,
            min_sigma=arg.min_sigma,
            template=template,
            learn_cols=(1, 2, 3) if insize[0] > 1 else (2, 3),
            chunk_size=c
        )

        rng = getrng(arg.range[1], (h1, ))
        c = arg.k[1]

        template = torch.arange(h2, dtype=torch.long)[:, None].expand(h2, c).contiguous().view(h2 * c, 1)
        template = torch.cat([template, torch.zeros(h2*c, 1, dtype=torch.long)], dim=1)

        two = NASLayer(
            in_size=(h1,), out_size=(h2,), k=h2*c,
            gadditional=arg.gadditional[1], radditional=arg.radditional[1], region=rng, has_bias=True,
            fix_values=arg.fix_values,
            min_sigma=arg.min_sigma,
            template=template,
            learn_cols=(1,),
            chunk_size=c
        )

        rng = getrng(arg.range[2], (h2, ))
        c = arg.k[2]

        template = torch.arange(numcls, dtype=torch.long)[:, None].expand(numcls, c).contiguous().view(numcls * c, 1)
        template = torch.cat([template, torch.zeros(numcls*c, 1, dtype=torch.long)], dim=1)

        three = NASLayer(
github MaestroGraph / sparse-hyper / experiments / sparsity-mlp.py View on Github external
c = arg.k[1]

        two = NASLayer(
            in_size=(h1,), out_size=(h2,), k=h2*c,
            gadditional=arg.gadditional[1], radditional=arg.radditional[1], region=rng, has_bias=True,
            fix_values=arg.fix_values,
            min_sigma=arg.min_sigma,
            template=None,
            learn_cols=None,
            chunk_size=c
        )

        rng = getrng(arg.range[2], (numcls, h2))
        c = arg.k[2]

        three = NASLayer(
            in_size=(h2,), out_size=(numcls,), k=numcls*c,
            gadditional=arg.gadditional[2], radditional=arg.radditional[2], region=rng, has_bias=True,
            fix_values=arg.fix_values,
            min_sigma=arg.min_sigma,
            template=None,
            learn_cols=None,
            chunk_size=c
        )

        model = nn.Sequential(
            one, nn.Sigmoid(),
            two, nn.Sigmoid(),
            three, nn.Softmax(),
        )

    elif arg.method == 'nas-temp':