How to use the smdebug.tensorflow.modes function in smdebug

To help you get started, we’ve selected a few smdebug examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github awslabs / sagemaker-debugger / tests / zero_code_change / tensorflow_integration_tests.py View on Github external
)

        if script_mode:
            hook = smd.EstimatorHook(sim.out_dir)
            hook.set_mode(smd.modes.TRAIN)
            keras_estimator.train(input_fn=input_fn, steps=25, hooks=[hook])
            hook.set_mode(smd.modes.EVAL)
            eval_result = keras_estimator.evaluate(input_fn=input_fn, steps=10, hooks=[hook])
        else:
            keras_estimator.train(input_fn=input_fn, steps=25)
            keras_estimator.evaluate(input_fn=input_fn, steps=10)

        tr = smd.create_trial(sim.out_dir)
        assert len(tr.tensor_names()) == 1
        assert tr.steps() == [0, 25]
        assert len(tr.steps(smd.modes.TRAIN)) == 1
        assert len(tr.steps(smd.modes.EVAL)) == 1
github awslabs / sagemaker-debugger / tests / zero_code_change / tensorflow_integration_tests.py View on Github external
if script_mode:
            hook = smd.EstimatorHook(sim.out_dir)
            hook.set_mode(smd.modes.TRAIN)
            keras_estimator.train(input_fn=input_fn, steps=25, hooks=[hook])
            hook.set_mode(smd.modes.EVAL)
            eval_result = keras_estimator.evaluate(input_fn=input_fn, steps=10, hooks=[hook])
        else:
            keras_estimator.train(input_fn=input_fn, steps=25)
            keras_estimator.evaluate(input_fn=input_fn, steps=10)

        tr = smd.create_trial(sim.out_dir)
        assert len(tr.tensor_names()) == 1
        assert tr.steps() == [0, 25]
        assert len(tr.steps(smd.modes.TRAIN)) == 1
        assert len(tr.steps(smd.modes.EVAL)) == 1
github awslabs / sagemaker-debugger / tests / tensorflow / hooks / test_save_config.py View on Github external
def helper_save_config_modes(trial_dir, hook):
    help_test_mnist(trial_dir, hook=hook, num_steps=2, num_eval_steps=3)
    tr = create_trial(trial_dir)
    for tname in tr.tensors(collection="weights"):
        t = tr.tensor(tname)
        assert len(t.steps(mode=modes.TRAIN)) == 2
        assert len(t.steps(mode=modes.EVAL)) == 1
github awslabs / sagemaker-debugger / tests / tensorflow / hooks / test_mirrored_strategy.py View on Github external
for s in steps:
        if s == "train":
            print("Starting train")
            if not zcc:
                ts_hook.set_mode(smd.modes.TRAIN)
                # Train the model
                mnist_classifier.train(
                    input_fn=input_fn_provider.train_input_fn, steps=num_steps, hooks=[ts_hook]
                )
            else:
                mnist_classifier.train(input_fn=input_fn_provider.train_input_fn, steps=num_steps)
        elif s == "eval":
            print("Starting eval")

            if not zcc:
                ts_hook.set_mode(smd.modes.EVAL)
                # Evaluate the model and print results
                mnist_classifier.evaluate(
                    input_fn=input_fn_provider.eval_input_fn, steps=num_steps, hooks=[ts_hook]
                )
            else:
                mnist_classifier.evaluate(input_fn=input_fn_provider.eval_input_fn, steps=num_steps)
        elif s == "predict":
            print("Starting predict")
            if not zcc:
                ts_hook.set_mode(smd.modes.PREDICT)
                # Evaluate the model and print results
                p = mnist_classifier.predict(
                    input_fn=input_fn_provider.eval_input_fn, hooks=[ts_hook]
                )
            else:
                p = mnist_classifier.predict(input_fn=input_fn_provider.eval_input_fn)
github awslabs / sagemaker-debugger / examples / tensorflow / local / simple.py View on Github external
global_step = tf.Variable(17, name="global_step", trainable=False)
    increment_global_step_op = tf.assign(global_step, global_step + 1)

    optimizer = tf.train.AdamOptimizer(args.lr)

    # Wrap the optimizer with wrap_optimizer so smdebug can find gradients to save
    optimizer = hook.wrap_optimizer(optimizer)

    # use this wrapped optimizer to minimize loss
    optimizer_op = optimizer.minimize(loss, global_step=increment_global_step_op)

    # pass the hook to hooks parameter of monitored session
    sess = tf.train.MonitoredSession(hooks=[hook])

    # use this session for running the tensorflow model
    hook.set_mode(smd.modes.TRAIN)
    for i in range(args.steps):
        x_ = np.random.random((10, 2)) * args.scale
        _loss, opt, gstep = sess.run([loss, optimizer_op, increment_global_step_op], {x: x_})
        print(f"Step={i}, Loss={_loss}")

    hook.set_mode(smd.modes.EVAL)
    for i in range(args.steps):
        x_ = np.random.random((10, 2)) * args.scale
        sess.run([loss, increment_global_step_op], {x: x_})
github awslabs / sagemaker-debugger / examples / tensorflow / sagemaker_byoc / simple.py View on Github external
##### Enabling SageMaker Debugger ###########
# pass the hook to hooks parameter of monitored session
sess = tf.train.MonitoredSession(hooks=[hook])

##### Enabling SageMaker Debugger ###########
# setting the mode of job so analysis can differentiate between TRAIN, EVAL, PREDICT
hook.set_mode(smd.modes.TRAIN)
for i in range(args.steps):
    x_ = np.random.random((10, 2)) * args.scale
    _loss, opt, gstep = sess.run([loss, optimizer_op, increment_global_step_op], {x: x_})
    print(f"Step={i}, Loss={_loss}")

##### Enabling SageMaker Debugger ###########
# setting the mode of job so analysis can differentiate between TRAIN, EVAL, PREDICT
hook.set_mode(smd.modes.EVAL)
for i in range(args.steps):
    x_ = np.random.random((10, 2)) * args.scale
    sess.run([loss, increment_global_step_op], {x: x_})
github awslabs / sagemaker-debugger / examples / tensorflow / sagemaker_byoc / simple.py View on Github external
optimizer = tf.train.AdamOptimizer(args.lr)

##### Enabling SageMaker Debugger ###########
# Wrap the optimizer with wrap_optimizer so smdebug can find gradients to save
optimizer = hook.wrap_optimizer(optimizer)

# use this wrapped optimizer to minimize loss
optimizer_op = optimizer.minimize(loss, global_step=increment_global_step_op)

##### Enabling SageMaker Debugger ###########
# pass the hook to hooks parameter of monitored session
sess = tf.train.MonitoredSession(hooks=[hook])

##### Enabling SageMaker Debugger ###########
# setting the mode of job so analysis can differentiate between TRAIN, EVAL, PREDICT
hook.set_mode(smd.modes.TRAIN)
for i in range(args.steps):
    x_ = np.random.random((10, 2)) * args.scale
    _loss, opt, gstep = sess.run([loss, optimizer_op, increment_global_step_op], {x: x_})
    print(f"Step={i}, Loss={_loss}")

##### Enabling SageMaker Debugger ###########
# setting the mode of job so analysis can differentiate between TRAIN, EVAL, PREDICT
hook.set_mode(smd.modes.EVAL)
for i in range(args.steps):
    x_ = np.random.random((10, 2)) * args.scale
    sess.run([loss, increment_global_step_op], {x: x_})
github awslabs / sagemaker-debugger / examples / tensorflow / scripts / distributed_training / mirrored_strategy_mnist.py View on Github external
save_config=smd.SaveConfig(save_interval=FLAGS.save_frequency),
        reduction_config=rdnc,
    )

    ts_hook.set_mode(smd.modes.TRAIN)

    # Create the Estimator
    # pass RunConfig
    mnist_classifier = tf.estimator.Estimator(model_fn=cnn_model_fn, config=config)

    # Train the model
    mnist_classifier.train(
        input_fn=input_fn_provider.train_input_fn, steps=FLAGS.steps, hooks=[ts_hook]
    )

    ts_hook.set_mode(smd.modes.EVAL)
    # Evaluate the model and print results
    eval_results = mnist_classifier.evaluate(
        input_fn=input_fn_provider.eval_input_fn, hooks=[ts_hook]
    )
    print(eval_results)
github awslabs / sagemaker-debugger / examples / tensorflow / scripts / distributed_training / parameter_server_training / parameter_server_mnist.py View on Github external
# save tensors as reductions if necessary
    rdnc = (
        smd.ReductionConfig(reductions=["mean"], abs_reductions=["max"], norms=["l1"])
        if FLAGS.reductions
        else None
    )

    ts_hook = smd.SessionHook(
        out_dir=FLAGS.smdebug_path,
        save_all=FLAGS.save_all,
        include_collections=["weights", "gradients", "losses", "biases"],
        save_config=smd.SaveConfig(save_interval=FLAGS.save_frequency),
        reduction_config=rdnc,
    )

    ts_hook.set_mode(smd.modes.TRAIN)

    # Create the Estimator
    # pass RunConfig
    mnist_classifier = tf.estimator.Estimator(model_fn=cnn_model_fn, config=config)

    hooks = list()
    hooks.append(ts_hook)

    train_spec = tf.estimator.TrainSpec(
        input_fn=input_fn_provider.train_input_fn, max_steps=FLAGS.steps, hooks=hooks
    )
    eval_spec = tf.estimator.EvalSpec(
        input_fn=input_fn_provider.eval_input_fn, steps=FLAGS.steps, hooks=hooks
    )

    tf.estimator.train_and_evaluate(mnist_classifier, train_spec, eval_spec)