How to use the wandb.run function in wandb

To help you get started, we’ve selected a few wandb examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github wandb / client / tests / test_wandb.py View on Github external
def test_log(wandb_init_run):
    history_row = {'stuff': 5}
    wandb.log(history_row)
    assert len(wandb.run.history.rows) == 1
    assert set(history_row.items()) <= set(wandb.run.history.rows[0].items())
github wandb / client / wandb / fastai / __init__.py View on Github external
seed=12345):


        # Check if wandb.init has been called
        if wandb.run is None:
            raise ValueError(
                'You must call wandb.init() before WandbCallback()')

        # Adapted from fast.ai "SaveModelCallback"
        if monitor is None:
            # use default TrackerCallback monitor value
            super().__init__(learn, mode=mode)
        else:
            super().__init__(learn, monitor=monitor, mode=mode)
        self.save_model = save_model
        self.model_path = Path(wandb.run.dir) / 'bestmodel.pth'

        self.log = log
        self.input_type = input_type
        self.best = None

        # Select items for sample predictions to see evolution along training
        self.validation_data = validation_data
        if input_type and not self.validation_data:
            wandbRandom = random.Random(seed)  # For repeatability
            predictions = min(predictions, len(learn.data.valid_ds))
            indices = wandbRandom.sample(range(len(learn.data.valid_ds)),
                                         predictions)
            self.validation_data = [learn.data.valid_ds[i] for i in indices]
github fastai / fastai_dev / dev / fastai2 / callback / wandb.py View on Github external
def begin_fit(self):
        "Call watch method to log model topology, gradients & weights"
        self.run = not hasattr(self.learn, 'lr_finder') and not hasattr(self, "gather_preds")
        if not self.run: return
        if not WandbCallback._wandb_watch_called:
            WandbCallback._wandb_watch_called = True
            # Logs model topology and optionally gradients and weights
            wandb.watch(self.learn.model, log=self.log)

        if hasattr(self, 'save_model'): self.save_model.add_save = Path(wandb.run.dir)/'bestmodel.pth'

        if self.log_preds and not self.valid_dl:
            #Initializes the batch watched
            wandbRandom = random.Random(self.seed)  # For repeatability
            self.n_preds = min(self.n_preds, len(self.dbunch.valid_ds))
            idxs = wandbRandom.sample(range(len(self.dbunch.valid_ds)), self.n_preds)

            items = [self.dbunch.valid_ds.items[i] for i in idxs]
            test_tls = [tl._new(items, split_idx=1) for tl in self.dbunch.valid_ds.tls]
            self.valid_dl = self.dbunch.valid_dl.new(DataSource(tls=test_tls), bs=self.n_preds)
github catalyst-team / catalyst / catalyst / contrib / runner / wandb.py View on Github external
logdir_files))

        for subdir in logdir_files:
            if subdir.is_dir():
                os.makedirs(f"{logdir_dst}/{subdir.name}", exist_ok=True)
                shutil.rmtree(f"{logdir_dst}/{subdir.name}")
                shutil.copytree(
                    f"{str(subdir.absolute())}",
                    f"{logdir_dst}/{subdir.name}")
            else:
                shutil.copy2(
                    f"{str(subdir.absolute())}",
                    f"{logdir_dst}/{subdir.name}")

        checkpoints_src = logdir_src.joinpath("checkpoints")
        checkpoints_dst = Path(wandb.run.dir).joinpath("checkpoints")
        os.makedirs(checkpoints_dst, exist_ok=True)

        checkpoint_paths = []
        for glob in self.checkpoints_glob:
            checkpoint_paths.extend(list(checkpoints_src.glob(glob)))
        checkpoint_paths = list(set(checkpoint_paths))
        for checkpoint_path in checkpoint_paths:
            shutil.copy2(
                f"{str(checkpoint_path.absolute())}",
                f"{checkpoints_dst}/{checkpoint_path.name}")
github catalyst-team / catalyst / catalyst / rl / core / trainer.py View on Github external
def _save_wandb(self):
        if WANDB_ENABLED:
            logdir_src = Path(self.logdir)
            logdir_dst = Path(wandb.run.dir)

            events_src = list(logdir_src.glob("events.out.tfevents*"))
            if len(events_src) > 0:
                events_src = events_src[0]
                os.makedirs(f"{logdir_dst}/{logdir_src.name}", exist_ok=True)
                shutil.copy2(
                    f"{str(events_src.absolute())}",
                    f"{logdir_dst}/{logdir_src.name}/{events_src.name}")
github lukas / ml-class / lstm / attention / train.py View on Github external
model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    prob_model = Model(inputs=input_, outputs=y_prob)
    return model, prob_model


model, prob_model = build_models(encoder_units=config.encoder_units,
                                 decoder_units=config.decoder_units)

# Configure the visualizer
viz = Visualizer(input_vocab, output_vocab)
viz.set_models(model, prob_model)

# Save the network to wandb
wandb.run.summary['graph'] = wandb.Graph.from_keras(model)

model.fit_generator(generator=training.generator(config.batch_size),
                    steps_per_epoch=100,
                    validation_data=validation.generator(config.batch_size),
                    validation_steps=10,
                    workers=1,
                    verbose=1,
                    callbacks=[Examples(viz)],
                    epochs=100)

print('Model training complete.')
github lukas / ml-class / examples / keras-gan / gan.py View on Github external
self.generator = self.build_generator()

        # The generator takes noise as input and generates imgs
        z = Input(shape=(self.latent_dim,))
        img = self.generator(z)

        # For the combined model we will only train the generator
        self.discriminator.trainable = False

        # The discriminator takes generated images as input and determines validity
        validity = self.discriminator(img)

        # The combined model  (stacked generator and discriminator)
        # Trains the generator to fool the discriminator
        self.combined = Model(z, validity)
        wandb.run.summary['graph'] = wandb.Graph.from_keras(self.combined)
        wandb.run._user_accessed_summary = False
        self.combined.summary()
        self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)
github wandb / client / wandb / jupyter.py View on Github external
def start(self):
        if self.paused:
            self.rm = RunManager(wandb.run, output=False, cloud=wandb.run.mode != "dryrun")
            wandb.run.api._file_stream_api = None
            self.rm.mirror_stdout_stderr()
            self.paused = False
            # Init will return the last step of a resumed run
            # we update the runs history._steps in extreme hack fashion
            # TODO: this reserves a bigtime refactor
            new_step = self.rm.init_run(dict(os.environ))
            if new_step:
                wandb.run.history._steps = new_step + 1
github wandb / client / wandb / jupyter.py View on Github external
def start(self):
        if self.paused:
            self.rm = RunManager(wandb.run, output=False, cloud=wandb.run.mode != "dryrun")
            wandb.run.api._file_stream_api = None
            self.rm.mirror_stdout_stderr()
            self.paused = False
            # Init will return the last step of a resumed run
            # we update the runs history._steps in extreme hack fashion
            # TODO: this reserves a bigtime refactor
            new_step = self.rm.init_run(dict(os.environ))
            if new_step:
                wandb.run.history._steps = new_step + 1
github wandb / client / wandb / mlflow / __init__.py View on Github external
def _get_run(run_id, only_run=True):
        run = RUNS.get(run_id, {}).get("run") if only_run else RUNS.get(run_id, {})
        if not run:
            wandb.termwarn("No run found for %s - cur: %s" % (run_id, wandb.run), force=True)
            run = MlflowClient().get_run(run_id)
            return _get_or_start_wandb_run(run)
        else:
            return run