How to use the sparse.util.makedirs function in sparse

To help you get started, we’ve selected a few sparse examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github MaestroGraph / sparse-hyper / experiments / identity.py View on Github external
iterations = arg.iterations if arg.iterations is not None else arg.size * 3000
    additional = arg.additional if arg.additional is not None else int(np.floor(np.log2(arg.size)) * arg.size)

    torch.manual_seed(arg.seed)

    ndots = iterations // arg.dot_every

    results = np.zeros((arg.reps, ndots))

    print('Starting size {} with {} additional samples (reinforce={})'.format(arg.size, additional, arg.reinforce))
    w = None
    for r in range(arg.reps):
        print('repeat {} of {}'.format(r, arg.reps))

        util.makedirs('./identity/{}'.format(r))
        util.makedirs('./runs/identity/{}'.format(r))

        if w is not None:
            w.close()
        w = SummaryWriter(log_dir='./runs/identity/{}/'.format(r))

        SHAPE = (arg.size,)

        if not arg.reinforce:
            model = sparse.NASLayer(
                SHAPE, SHAPE,
                k=arg.size,
                gadditional=additional,
                sigma_scale=arg.sigma_scale,
                has_bias=False,
                fix_values=arg.fix_values,
github MaestroGraph / sparse-hyper / experiments / identity.py View on Github external
iterations = arg.iterations if arg.iterations is not None else arg.size * 3000
    additional = arg.additional if arg.additional is not None else int(np.floor(np.log2(arg.size)) * arg.size)

    torch.manual_seed(arg.seed)

    ndots = iterations // arg.dot_every

    results = np.zeros((arg.reps, ndots))

    print('Starting size {} with {} additional samples (reinforce={})'.format(arg.size, additional, arg.reinforce))
    w = None
    for r in range(arg.reps):
        print('repeat {} of {}'.format(r, arg.reps))

        util.makedirs('./identity/{}'.format(r))
        util.makedirs('./runs/identity/{}'.format(r))

        if w is not None:
            w.close()
        w = SummaryWriter(log_dir='./runs/identity/{}/'.format(r))

        SHAPE = (arg.size,)

        if not arg.reinforce:
            model = sparse.NASLayer(
                SHAPE, SHAPE,
                k=arg.size,
                gadditional=additional,
                sigma_scale=arg.sigma_scale,
                has_bias=False,
                fix_values=arg.fix_values,
                min_sigma=arg.min_sigma,
github MaestroGraph / sparse-hyper / experiments / attention.py View on Github external
else:
        raise Exception('Model name {} not recognized'.format(arg.modelname))

    if arg.cuda:
        model.cuda()

    optimizer = optim.Adam(model.parameters(), lr=arg.lr)

    xent = nn.CrossEntropyLoss()
    mse = nn.MSELoss()

    step = 0

    sigs, vals = [], []

    util.makedirs('./mnist/')

    for epoch in range(arg.epochs):

        model.train(True)

        for i, (inputs, labels) in tqdm(enumerate(trainloader, 0)):

            # if i> 2:
            #     break

            if arg.cuda:
                inputs, labels = inputs.cuda(), labels.cuda()
            inputs, labels = Variable(inputs), Variable(labels)

            optimizer.zero_grad()
github MaestroGraph / sparse-hyper / experiments / transformer.py View on Github external
def go(arg):

    util.makedirs('./transformer-plots/')

    if arg.seed < 0:
        seed = random.randint(0, 1000000)
        print('random seed: ', seed)
    else:
        torch.manual_seed(arg.seed)

    dv = 'cuda' if arg.cuda else 'cpu'

    tbw = SummaryWriter(log_dir=arg.tb_dir)

    # load the data
    data_train, data_val, data_test = enwik8(arg.data)
    data_test = data_test if arg.final else data_val

    # create the model
github MaestroGraph / sparse-hyper / experiments / bias.py View on Github external
def go(arg):

    try:
        arg.bins = int(arg.bins)
    except ValueError:
        pass

    util.makedirs('./bias/')

    if not os.path.exists('./bias/cached.npz'):

        if arg.seed < 0:
            seed = random.randint(0, 1000000)
            print('random seed: ', seed)
        else:
            torch.manual_seed(arg.seed)

        tbw = SummaryWriter(log_dir=arg.tb_dir)
        tfms = transforms.Compose([transforms.ToTensor()])

        if (arg.task == 'mnist'):

            shape = (1, 28, 28)
            num_classes = 10
github MaestroGraph / sparse-hyper / scripts / generate.mnist.py View on Github external
def go(arg):

    # make directories
    for i in range(10):
        util.makedirs('./mnist-rsc/train/{}/'.format(i))
        util.makedirs('./mnist-rsc/test/{}/'.format(i))

    train = torchvision.datasets.MNIST(root=arg.data, train=True, download=True, transform=ToTensor())
    trainloader = torch.utils.data.DataLoader(train, batch_size=arg.batch, shuffle=True, num_workers=2)

    test = torchvision.datasets.MNIST(root=arg.data, train=False, download=True, transform=ToTensor())
    testloader = torch.utils.data.DataLoader(test, batch_size=arg.batch, shuffle=True, num_workers=2)

    indices = Counter()

    for images, labels in tqdm(trainloader):

        batch_size = labels.size(0)

        for b in range(batch_size):
            image = make_image(b, images, res=arg.res, noise=arg.noise, scale=arg.scale)
            label = int(labels[b].item())
github MaestroGraph / sparse-hyper / experiments / attention.py View on Github external
else:
        raise Exception('Model name {} not recognized'.format(arg.modelname))

    if arg.cuda:
        model.cuda()

    optimizer = optim.Adam(model.parameters(), lr=arg.lr)

    xent = nn.CrossEntropyLoss()
    mse = nn.MSELoss()

    step = 0

    sigs, vals = [], []

    util.makedirs('./mnist/')

    for epoch in range(arg.epochs):

        model.train(True)

        for i, (inputs, labels) in tqdm(enumerate(trainloader, 0)):

            # if i> 2:
            #     break

            if arg.cuda:
                inputs, labels = inputs.cuda(), labels.cuda()
            inputs, labels = Variable(inputs), Variable(labels)

            optimizer.zero_grad()