How to use the cupy.random.seed function in cupy

To help you get started, we’ve selected a few cupy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github musyoku / chainer-gqn / train_mn.py View on Github external
ELBO = log_px - kl_divergence

        # https://arxiv.org/abs/1604.08772 Section.2
        # https://www.reddit.com/r/MachineLearning/comments/56m5o2/discussion_calculation_of_bitsdims/
        bits_per_pixel = -(ELBO / num_pixels_per_batch - np.log(256)) / np.log(
            2)

        return ELBO, bits_per_pixel, negative_log_likelihood, kl_divergence

    #==============================================================================
    # Training iterations
    #==============================================================================
    dataset_size = len(dataset_train)
    random.seed(0)
    np.random.seed(0)
    cp.random.seed(0)

    for epoch in range(meter_train.epoch, args.epochs):
        _print("Epoch {}/{}:".format(
            epoch + 1,
            args.epochs,
        ))
        meter_train.next_epoch()

        subset_indices = list(range(len(dataset_train.subset_filenames)))
        subset_size_per_gpu = len(subset_indices) // comm.size
        if len(subset_indices) % comm.size != 0:
            subset_size_per_gpu += 1

        for subset_loop in range(subset_size_per_gpu):
            random.shuffle(subset_indices)
            subset_index = subset_indices[comm.rank]
github bwohlberg / sporco / tests / cupy / dictlrn / test_onlinecdl.py View on Github external
def setup_method(self, method):
        N = 16
        Nc = 3
        Nd = 5
        M = 4
        K = 3
        cp.random.seed(12345)
        self.D0 = cp.random.randn(Nd, Nd, Nc, M)
        self.S = cp.random.randn(N, N, Nc, K)
github bwohlberg / sporco / tests / cupy / admm / test_tvl2.py View on Github external
def setup_method(self, method):
        cp.random.seed(12345)
        self.D = cp.random.randn(16, 15)
github bwohlberg / sporco / tests / cupy / admm / test_bpdn.py View on Github external
def setup_method(self, method):
        cp.random.seed(12345)
github NVIDIA / DeepLearningExamples / TensorFlow / Recommendation / NCF / ncf.py View on Github external
def main():
    """
    Run training/evaluation
    """
    script_start = time.time()
    hvd_init()
    mpi_comm = MPI.COMM_WORLD
    args = parse_args()

    if hvd.rank() == 0:
        log_args(args)

    if args.seed is not None:
        tf.random.set_random_seed(args.seed)
        np.random.seed(args.seed)
        cp.random.seed(args.seed)

    if args.amp:
        os.environ["TF_ENABLE_AUTO_MIXED_PRECISION"] = "1"
    if "TF_ENABLE_AUTO_MIXED_PRECISION" in os.environ \
       and os.environ["TF_ENABLE_AUTO_MIXED_PRECISION"] == "1":
        args.fp16 = False

    # directory to store/read final checkpoint
    if args.mode == 'train' and hvd.rank() == 0:
        print("Saving best checkpoint to {}".format(args.checkpoint_dir))
    elif hvd.rank() == 0:
        print("Reading checkpoint: {}".format(args.checkpoint_dir))
    if not os.path.exists(args.checkpoint_dir) and args.checkpoint_dir != '':
        os.makedirs(args.checkpoint_dir, exist_ok=True)
    final_checkpoint_path = os.path.join(args.checkpoint_dir, 'model.ckpt')
github hiroharu-kato / mesh_reconstruction / mesh_reconstruction / reconstruct.py View on Github external
# arguments
    parser = argparse.ArgumentParser()
    parser.add_argument('-eid', '--experiment_id', type=str)
    parser.add_argument('-d', '--directory', type=str, default=DIRECTORY)
    parser.add_argument('-i', '--input_image', type=str)
    parser.add_argument('-oi', '--output_image', type=str)
    parser.add_argument('-oo', '--output_obj', type=str)
    parser.add_argument('-s', '--seed', type=int, default=RANDOM_SEED)
    parser.add_argument('-g', '--gpu', type=int, default=GPU)
    args = parser.parse_args()
    directory_output = os.path.join(args.directory, args.experiment_id)

    # set random seed, gpu
    random.seed(args.seed)
    np.random.seed(args.seed)
    cp.random.seed(args.seed)
    chainer.cuda.get_device(args.gpu).use()

    # load dataset
    image_in = skimage.io.imread(args.input_image).astype('float32') / 255
    if image_in.ndim != 3 or image_in.shape[-1] != 4:
        raise Exception('Input must be a RGBA image.')
    images_in = image_in.transpose((2, 0, 1))[None, :, :, :]
    images_in = chainer.cuda.to_gpu(images_in)

    # setup model & optimizer
    model = models.Model()
    model.to_gpu()
    chainer.serializers.load_npz(os.path.join(directory_output, 'model.npz'), model)

    # reconstruct .obj
    vertices, faces = model.reconstruct(images_in)
github Pinafore / qb / qanta / buzzer / rnn_1.py View on Github external
def main():
    np.random.seed(0)
    try: 
        import cupy
        cupy.random.seed(0)
    except Exception:
        pass

    option2id, all_guesses = load_quizbowl()
    train_iter = QuestionIterator(all_guesses[c.BUZZER_TRAIN_FOLD], option2id,
            batch_size=128, make_vector=dense_vector)
    dev_iter = QuestionIterator(all_guesses[c.BUZZER_DEV_FOLD], option2id,
            batch_size=128, make_vector=dense_vector)
    expo_iter = QuestionIterator(all_guesses['expo'], option2id,
            batch_size=128, make_vector=dense_vector)

    n_hidden = 300
    model_name = 'neo_1'
    model_dir = 'output/buzzer/neo/{}.npz'.format(model_name)
    model = RNN(train_iter.n_input, n_hidden, N_GUESSERS + 1)
github delira-dev / delira / delira / _backends.py View on Github external
import random
    random.seed = seed

    if "torch" in sys.modules and "TORCH" in get_backends():
        import torch
        torch.random.manual_seed(seed)

    elif "tensorflow" in sys.modules and "TF" in get_backends():
        import tensorflow as tf
        tf.random.set_random_seed(seed)

    elif "chainer" in sys.modules and "CHAINER" in get_backends():
        try:
            import cupy
            cupy.random.seed(seed)
        except ImportError:
            pass
github rapidsai / dask-cuda / dask_cuda / benchmarks / local_cudf_merge.py View on Github external
def generate_chunk(i_chunk, local_size, num_chunks, chunk_type, frac_match):
    # Setting a seed that triggers max amount of comm in the two-GPU case.
    cupy.random.seed(17561648246761420848)

    chunk_type = chunk_type or "build"
    frac_match = frac_match or 1.0
    if chunk_type == "build":
        # Build dataframe
        #
        # "key" column is a unique sample within [0, local_size * num_chunks)
        #
        # "shuffle" column is a random selection of partitions (used for shuffle)
        #
        # "payload" column is a random permutation of the chunk_size

        start = local_size * i_chunk
        stop = start + local_size

        parts_array = cupy.arange(num_chunks, dtype="int64")
github knorth55 / chainer-fcis / examples / voc / train.py View on Github external
config = easydict.EasyDict(yaml.load(f))

    shutil.copy(cfgpath, osp.join(out, 'train.yaml'))

    min_size = config.min_size
    max_size = config.max_size
    random_seed = config.random_seed
    max_epoch = config.max_epoch
    lr = config.lr
    cooldown_epoch = config.cooldown_epoch
    lr = config.lr
    lr_cooldown_factor = config.lr_cooldown_factor

    # set random seed
    np.random.seed(random_seed)
    cp.random.seed(random_seed)

    # dataset
    if config.use_sbd:
        dataset_class = SBDInstanceSegmentationDataset
    else:
        dataset_class = VOCInstanceSegmentationDataset
    train_dataset = dataset_class(split='train')
    test_dataset = dataset_class(split='val')
    train_dataset.aspect_grouping()

    # model
    n_class = len(voc_label_names)
    fcis_model = fcis.models.FCISResNet101(
        n_class,
        ratios=(0.5, 1.0, 2.0),
        anchor_scales=(8, 16, 32),