How to use the byteps.torch.broadcast_parameters function in byteps

To help you get started, we’ve selected a few byteps examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github bytedance / byteps / example / pytorch / transformer / train.py View on Github external
print("src_vocab_size:",opt.src_vocab_size,",tgt_vocab_size:",opt.tgt_vocab_size,",share_weight:",(opt.proj_share_weight,opt.embs_share_weight))
    if hvd.local_rank() == 0:
        fo = open("transformer_model.csv", "w")
        for name, p in transformer.named_parameters():
            if p.requires_grad:
                size = 1
                for s in list(p.size()):
                    size = size * s
                print("name:",name,", size:",size)
                fo.write(name+", "+str(size)+"\n")
        fo.close()   
 
    torch_optimizer = optim.Adam(filter(lambda x: x.requires_grad, transformer.parameters()), betas=(0.9, 0.98), eps=1e-09)
    if use_horovod > 0:
        torch_optimizer = hvd.DistributedOptimizer(torch_optimizer, named_parameters=transformer.named_parameters())
        hvd.broadcast_parameters(transformer.state_dict(), root_rank=0)
        hvd.broadcast_optimizer_state(torch_optimizer, root_rank=0)
    #print("finish hvd preparation")
    #optimizer = ScheduledOptim(torch_optimizer, opt.d_model, opt.n_warmup_steps)
    optimizer = torch_optimizer
    '''
    optimizer = ScheduledOptim(
        optim.Adam(
            filter(lambda x: x.requires_grad, transformer.parameters()),
            betas=(0.9, 0.98), eps=1e-09),
        opt.d_model, opt.n_warmup_steps)
    '''
    train(transformer, training_data, validation_data, optimizer, device ,opt)
github bytedance / byteps / example / pytorch / benchmark_byteps.py View on Github external
if args.cuda:
    # Move model to GPU.
    model.cuda()

optimizer = optim.SGD(model.parameters(), lr=0.01)

# BytePS: (optional) compression algorithm.
compression = bps.Compression.fp16 if args.fp16_pushpull else bps.Compression.none

# BytePS: wrap optimizer with DistributedOptimizer.
optimizer = bps.DistributedOptimizer(optimizer,
                                     named_parameters=model.named_parameters(),
                                     compression=compression)

# BytePS: broadcast parameters & optimizer state.
bps.broadcast_parameters(model.state_dict(), root_rank=0)
bps.broadcast_optimizer_state(optimizer, root_rank=0)

# Set up fake data
datasets = []
for _ in range(100):
    data = torch.rand(args.batch_size, 3, 224, 224)
    target = torch.LongTensor(args.batch_size).random_() % 1000
    if args.cuda:
        data, target = data.cuda(), target.cuda()
    datasets.append(data)
data_index = 0

def benchmark_step():
    global data_index

    data = datasets[data_index%len(datasets)]
github bytedance / byteps / example / pytorch / train_mnist_byteps.py View on Github external
# BytePS: scale learning rate by the number of GPUs.
optimizer = optim.SGD(model.parameters(), lr=args.lr * bps.size(),
                      momentum=args.momentum)

# BytePS: (optional) compression algorithm.
compression = bps.Compression.fp16 if args.fp16_pushpull else bps.Compression.none

# BytePS: wrap optimizer with DistributedOptimizer.
optimizer = bps.DistributedOptimizer(optimizer,
                                     named_parameters=model.named_parameters(),
                                     compression=compression)


# BytePS: broadcast parameters.
bps.broadcast_parameters(model.state_dict(), root_rank=0)
bps.broadcast_optimizer_state(optimizer, root_rank=0)

def train(epoch):
    model.train()
    # BytePS: set epoch to sampler for shuffling.
    train_sampler.set_epoch(epoch)
    for batch_idx, (data, target) in enumerate(train_loader):
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        optimizer.zero_grad()
        output = model(data)
        loss = F.nll_loss(output, target)
        loss.backward()
        optimizer.step()
        if batch_idx % args.log_interval == 0:
            # BytePS: use train_sampler to determine the number of examples in
github bytedance / byteps / example / pytorch / BERT / examples / run_classifier.py View on Github external
else:
        optimizer = BertAdam(optimizer_grouped_parameters,
                             lr=args.learning_rate,
                             warmup=args.warmup_proportion,
                             t_total=num_train_optimization_steps)
        if use_horovod == 1:

            state_dict = optimizer.state_dict()
            pop_dict = {}
            for index, group in enumerate(state_dict['param_groups']):
                if 'schedule' in group:
                    pop_dict[index] = group.pop('schedule')
            optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters())

            hvd.broadcast_parameters(model.state_dict(), root_rank=0)
            optimizer.load_state_dict(state_dict)
            hvd.broadcast_optimizer_state(optimizer, root_rank=0)
            for index, group in enumerate(state_dict['param_groups']):
                if index in pop_dict:
                    group['schedule'] = pop_dict[index]
            optimizer.load_state_dict(state_dict)

    global_step = 0
    nb_tr_steps = 0
    tr_loss = 0
    if args.do_train:
        train_features = convert_examples_to_features(
            train_examples, label_list, args.max_seq_length, tokenizer)
        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_examples))
        logger.info("  Batch size = %d", args.train_batch_size)
github bytedance / byteps / byteps / torch / cross_barrier.py View on Github external
from byteps.torch.ops import size, local_size, rank, local_rank

import threading
import logging
try:
    import queue
except ImportError:
    import Queue as queue
import time
import math
import torch
import byteps.torch as bps

_DistributedOptimizer = bps._DistributedOptimizer
_bps_DistributedOptimizer = bps.DistributedOptimizer
broadcast_parameters = bps.broadcast_parameters
broadcast_optimizer_state = bps.broadcast_optimizer_state


class _CrossBarrier(_DistributedOptimizer):
    """An optimizer that wraps a _DistributedOptimizer, intercepting push-pull operations.
    This class enables overlapping gradient push-pull with both backward and forward propagation while maintaining
    correct dependencies. It can achieve even higher training performance than the default BytePS with proper system
    parameters. To understand the principles behind barrier crossing, check the paper
    https://dl.acm.org/citation.cfm?id=3359642
    """
    def __init__(self, model, byteps_opt, num_steps=10**6):
        """Construct a new ScheduledOptimizer, which uses byteps optimizer under the hood for averaging gradients
         across all workers.
        Args:
            model: The training model. BytePS uses the model object to register hooks.
            byteps_opt: Optimizer to use for averaging gradients and applying updates.
github bytedance / byteps / example / pytorch / train_imagenet_resnet50_byteps.py View on Github external
# BytePS: wrap optimizer with DistributedOptimizer.
optimizer = bps.DistributedOptimizer(
    optimizer, named_parameters=model.named_parameters(),
    compression=compression,
    backward_passes_per_step=args.batches_per_pushpull)

# Restore from a previous checkpoint, if initial_epoch is specified.
# BytePS: restore on the first worker which will broadcast weights to other workers.
if resume_from_epoch > 0 and bps.rank() == 0:
    filepath = args.checkpoint_format.format(epoch=resume_from_epoch)
    checkpoint = torch.load(filepath)
    model.load_state_dict(checkpoint['model'])
    optimizer.load_state_dict(checkpoint['optimizer'])

# BytePS: broadcast parameters & optimizer state.
bps.broadcast_parameters(model.state_dict(), root_rank=0)
bps.broadcast_optimizer_state(optimizer, root_rank=0)

def train(epoch):
    model.train()
    train_sampler.set_epoch(epoch)
    train_loss = Metric('train_loss')
    train_accuracy = Metric('train_accuracy')

    with tqdm(total=len(train_loader),
              desc='Train Epoch     #{}'.format(epoch + 1),
              disable=not verbose) as t:
        for batch_idx, (data, target) in enumerate(train_loader):
            adjust_learning_rate(epoch, batch_idx)

            if args.cuda:
                data, target = data.cuda(), target.cuda()