How to use the wandb.config function in wandb

To help you get started, we’ve selected a few wandb examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github medipixel / rl_algorithms / algorithms / ppo / agent.py View on Github external
def train(self):
        """Train the agent."""
        # logger
        if self.args.log:
            wandb.init()
            wandb.config.update(self.hyper_params)
            # wandb.watch([self.actor, self.critic], log="parameters")

        score = 0
        i_episode_prev = 0
        loss = [0.0, 0.0, 0.0]
        state = self.env.reset()

        while self.i_episode <= self.args.episode_num:
            for _ in range(self.hyper_params["ROLLOUT_LEN"]):
                if self.args.render and self.i_episode >= self.args.render_after:
                    self.env.render()

                action = self.select_action(state)
                next_state, reward, done, _ = self.step(action)
                self.episode_steps += 1
github lukas / ml-class / keras-seq2seq / train.py View on Github external
from keras.models import Sequential
from keras.layers import LSTM, TimeDistributed, RepeatVector, Dense
import numpy as np
import wandb
from wandb.keras import WandbCallback

wandb.init()
config = wandb.config

class CharacterTable(object):
    """Given a set of characters:
    + Encode them to a one hot integer representation
    + Decode the one hot integer representation to their character output
    + Decode a vector of probabilities to their character output
    """
    def __init__(self, chars):
        """Initialize character table.
        # Arguments
            chars: Characters that can appear in the input.
        """
        self.chars = sorted(set(chars))
        self.char_indices = dict((c, i) for i, c in enumerate(self.chars))
        self.indices_char = dict((i, c) for i, c in enumerate(self.chars))
github lukas / ml-class / keras-transfer / dogcat-bottleneck.py View on Github external
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.callbacks import Callback
from keras.layers import Dropout, Flatten, Dense
from keras.applications.vgg16 import VGG16, preprocess_input
from dogcat_data import generators, get_nb_files
import os
import sys
import wandb
from wandb.keras import WandbCallback

wandb.init()
config = wandb.config

# dimensions of our images.
config.img_width = 224
config.img_height = 224
config.epochs = 50
config.batch_size = 40 

top_model_weights_path = 'bottleneck.h5'
train_dir = 'dogcat-data/train'
validation_dir = 'dogcat-data/validation'
nb_train_samples = 1000
nb_validation_samples = 1000

def save_bottlebeck_features():
    if os.path.exists('bottleneck_features_train.npy') and (len(sys.argv) == 1 or sys.argv[1] != "--force"):
        print("Using saved features, pass --force to save new features")
github lukas / ml-class / examples / lstm / imdb-classifier / imdb-bow.py View on Github external
import util
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing import text
import wandb

wandb.init()
config = wandb.config
config.vocab_size = 1000

(X_train, y_train), (X_test, y_test) = util.load_imdb()

tokenizer = text.Tokenizer(num_words=config.vocab_size)
tokenizer.fit_on_texts(X_train)
X_train = tokenizer.texts_to_matrix(X_train)
X_test = tokenizer.texts_to_matrix(X_test)

# one hot encode outputs
y_train = tf.keras.utils.to_categorical(y_train)
y_test = tf.keras.utils.to_categorical(y_test)

# create model
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(2, activation="softmax", input_shape=(1000,)))
github medipixel / rl_algorithms / algorithms / dqn / agent.py View on Github external
def train(self):
        """Train the agent."""
        # logger
        if self.args.log:
            wandb.init()
            wandb.config.update(self.hyper_params)
            # wandb.watch([self.dqn], log="parameters")

        # pre-training if needed
        self.pretrain()

        max_epsilon, min_epsilon, epsilon_decay = (
            self.hyper_params["MAX_EPSILON"],
            self.hyper_params["MIN_EPSILON"],
            self.hyper_params["EPSILON_DECAY"],
        )

        for self.i_episode in range(1, self.args.episode_num + 1):
            state = self.env.reset()
            self.episode_step = 0
            losses = list()
            done = False
github wandb / client / wandb / sweeps / examples / train-tune.py View on Github external
def train():
    run = wandb.init(config=config_defaults)
    shorten = dict(width="w", height="h", activation="a")
    clean = lambda x: '{:0.1f}'.format(x) if isinstance(x, float) else x
    run.name = "run:" + ','.join([
        '{}={}'.format(shorten.get(k), clean(v)) for k, v in dict(run.config).items() if k in shorten])
    run.save()
    conf = dict(wandb.config)
    value = conf.get("width") + conf.get("height")
    wandb.log(dict(mean_loss=value))
github lukas / ml-class / amazon-reviews / amazon-bow.py View on Github external
from keras.preprocessing import sequence
from keras.preprocessing import text
import amazon
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding, LSTM
from keras.layers import Conv1D, Flatten
from keras.preprocessing import text
import wandb
from wandb.keras import WandbCallback

wandb.init()
config = wandb.config
config.vocab_size = 1000

(train_summary, train_review_text, train_labels), (test_summary, test_review_text, test_labels) = amazon.load_amazon()

config.vocab_size = 1000
config.maxlen = 1000
config.batch_size = 32
config.embedding_dims = 50
config.filters = 250
config.kernel_size = 3
config.hidden_dims = 250
config.epochs = 10

(X_train, y_train), (X_test, y_test) = (train_summary, train_labels), (test_summary, test_labels)
print("Review", X_train[0])
print("Label", y_train[0])
github learnables / cherry / benchmarks / benchmark.py View on Github external
SEED = seed
    random.seed(seed)
    np.random.seed(seed)
    th.manual_seed(seed)

    # Wrap envs.Logger.log for live logging
    envs.Logger.log = benchmark_log(envs.Logger.log)
    envs.Logger._episodes_stats = benchmark_stats(envs.Logger._episodes_stats)

    # Train
    print('Benchmarks: Started training.')
    exec(main_code)

    # Update informations about environment
    if hasattr(env, 'spec'):
        wandb.config.update(env.spec)

    # Compute and log all rewards
    if hasattr(env, 'all_rewards'):
        print('Benchmarks: Computing rewards.')
        R = 0
        returns = []
        for i, (reward, done) in enumerate(zip(env.all_rewards,
                                               env.all_dones)):
            wandb.log({
                'all_rewards': reward,
                'all_dones': done,
            }, step=i)

            R += reward
            if bool(done):
                wandb.log({
github lukas / ml-class / keras-autoencoder / conditional_autoencoder.py View on Github external
def create_categorical_decoder():
    '''
    Create the decoder with an optional class appended to the input.
    '''
    decoder_input = layers.Input(shape=(wandb.config.latent_dim,))
    label_input = layers.Input(shape=(len(wandb.config.labels),))
    if wandb.config.conditional:
        x = layers.concatenate([decoder_input, label_input], axis=-1)
    else:
        x = decoder_input
    x = layers.Dense(512, activation='relu')(x)
    x = layers.Dense(img_size * img_size, activation='sigmoid')(x)
    x = layers.Reshape((img_size, img_size, 1))(x)

    return Model([decoder_input, label_input], x, name='decoder')