How to use the wandb.log function in wandb

To help you get started, we’ve selected a few wandb examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github wandb / client / tests / test_torch.py View on Github external
def test_unwatch(wandb_init_run):
    net = ConvNet()
    wandb.watch(net, log_freq=1, log="all")
    wandb.unwatch()
    for i in range(3):
        output = net(dummy_torch_tensor((64, 1, 28, 28)))
        grads = torch.ones(64, 10)
        output.backward(grads)
        assert(len(wandb_init_run.history.row) == 0)
        assert(
            wandb_init_run.history.row.get('gradients/fc2.bias') is None)
        wandb.log({"a": 2})
    assert(len(wandb_init_run.history.rows) == 3)
github keras-rl / keras-rl / rl / callbacks.py View on Github external
def on_episode_end(self, episode, logs):
        """ Compute and log training statistics of the episode when done """
        duration = timeit.default_timer() - self.episode_start[episode]
        episode_steps = len(self.observations[episode])

        metrics = np.array(self.metrics[episode])
        metrics_dict = {}
        with warnings.catch_warnings():
            warnings.filterwarnings('error')
            for idx, name in enumerate(self.metrics_names):
                try:
                    metrics_dict[name] = np.nanmean(metrics[:, idx])
                except Warning:
                    metrics_dict[name] = float('nan')

        wandb.log({
            'step': self.step,
            'episode': episode + 1,
            'duration': duration,
            'episode_steps': episode_steps,
            'sps': float(episode_steps) / duration,
            'episode_reward': np.sum(self.rewards[episode]),
            'reward_mean': np.mean(self.rewards[episode]),
            'reward_min': np.min(self.rewards[episode]),
            'reward_max': np.max(self.rewards[episode]),
            'action_mean': np.mean(self.actions[episode]),
            'action_min': np.min(self.actions[episode]),
            'action_max': np.max(self.actions[episode]),
            'obs_mean': np.mean(self.observations[episode]),
            'obs_min': np.min(self.observations[episode]),
            'obs_max': np.max(self.observations[episode]),
            **metrics_dict
github medipixel / rl_algorithms / algorithms / trpo / agent.py View on Github external
def write_log(self, i: int, loss: np.ndarray, score: int):
        """Write log about loss and score"""
        total_loss = loss.sum()

        print(
            "[INFO] episode %d total score: %d, total loss: %f\n"
            "actor_loss: %.3f critic_loss: %.3f\n"
            % (i, score, total_loss, loss[0], loss[1])  # actor loss  # critic loss
        )

        if self.args.log:
            wandb.log(
                {
                    "score": score,
                    "total loss": total_loss,
                    "actor loss": loss[0],
                    "critic loss": loss[1],
                }
github jstray / deepform / train-unet.py View on Github external
# Thanks, StackOverflow. This "undoes" a 1D convolution, by combining upsampling plus convolution.
def Conv1DTranspose(input_tensor, filters, kernel_size, strides=2, padding='same'):
		x = Lambda(lambda x: expand_dims(x, axis=2))(input_tensor)
		x = Conv2DTranspose(filters=filters, kernel_size=(kernel_size, 1), strides=(strides, 1), padding=padding)(x)
		x = Lambda(lambda x: squeeze(x, axis=2))(x)
		return x

# Configuration
read_docs = 10000 # how many docs to load, at most
max_doc_length = 4096
vocab_size = 5000
target_thresh = 0.9
augment_dims = 4 # number of features per token, other then token type

wandb.log({'algorithm':'U-net with position and dollar marker'})

# Generator that reads all our training data
# For each document, yields an array of dictionaries, each of which is a token
def input_docs(max_docs=None):
	incsv = csv.DictReader(open('data/training.csv', mode='r'))
    
	# Reconstruct documents by concatenating all rows with the same slug
	active_slug = None
	doc_rows = [] 
	num_docs = 0

	for row in incsv:	
		# throw out tokens that are too short, they won't help us
		token = row['token']
		if len(token) < 3:
			continue
github lukas / ml-class / keras-autoencoder / autoencoder.py View on Github external
def on_epoch_end(self, epoch, logs):
        indices = np.random.randint(self.validation_data[0].shape[0], size=8)
        test_data = self.validation_data[0][indices]
        pred_data = self.model.predict(test_data)
        wandb.log({
             "examples": [
                   wandb.Image(np.hstack([data, pred_data[i]]), caption=str(i))
                   for i, data in enumerate(test_data)]
        }, commit=False)
github medipixel / rl_algorithms / algorithms / common / abstract / agent.py View on Github external
if self.args.render:
                    self.env.render()

                action = self.select_action(state)
                next_state, reward, done, _ = self.step(action)

                state = next_state
                score += reward
                step += 1

            print(
                "[INFO] test %d\tstep: %d\ttotal score: %d" % (i_episode, step, score)
            )

            if self.args.log:
                wandb.log({"test score": score})
github medipixel / rl_algorithms / algorithms / reinforce / agent.py View on Github external
def write_log(self, i: int, score: int, policy_loss: float, value_loss: float):
        total_loss = policy_loss + value_loss

        print(
            "[INFO] episode %d\ttotal score: %d\ttotal loss: %f\n"
            "policy loss: %f\tvalue loss: %f\n"
            % (i, score, total_loss, policy_loss, value_loss)
        )

        if self.args.log:
            wandb.log(
                {
                    "total loss": total_loss,
                    "policy loss": policy_loss,
                    "value loss": value_loss,
                    "score": score,
                }
github wandb / client / wandb / fastai / __init__.py View on Github external
# we just log input data
                else:
                    pred_log.append(wandb.Image(x.data, caption='Input data'))

            wandb.log({"Prediction Samples": pred_log}, commit=False)

        # Log losses & metrics
        # Adapted from fast.ai "CSVLogger"
        logs = {
            name: stat
            for name, stat in list(
                zip(self.learn.recorder.names, [epoch, smooth_loss] +
                    last_metrics))
        }
        wandb.log(logs)
github medipixel / rl_algorithms / algorithms / ddpg / agent.py View on Github external
"[INFO] episode %d, episode step: %d, total step: %d, total score: %d\n"
            "total loss: %f actor_loss: %.3f critic_loss: %.3f (spent %.6f sec/step)\n"
            % (
                i,
                self.episode_step,
                self.total_step,
                score,
                total_loss,
                loss[0],
                loss[1],
                avg_time_cost,
            )  # actor loss  # critic loss
        )

        if self.args.log:
            wandb.log(
                {
                    "score": score,
                    "total loss": total_loss,
                    "actor loss": loss[0],
                    "critic loss": loss[1],
                    "time per each step": avg_time_cost,
                }
github lukas / ml-class / examples / mobile / tfjs-emotion / train.py View on Github external
def on_epoch_end(self, epoch, logs):
        start = time.time()
        self.model.predict(self.testX)
        end = time.time()
        self.model.predict(self.testX[:1])
        latency = time.time() - end
        wandb.log({"avg_inference_time": (end - start) /
                   len(self.testX) * 1000, "latency": latency * 1000}, commit=False)