How to use the tensorboard.backend.event_processing.event_accumulator.EventAccumulator function in tensorboard

To help you get started, we’ve selected a few tensorboard examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github cainmagi / MDNT / utilities / tboard.py View on Github external
def __init__(self, path, mode='scalars', size_guidance=None,
                 compression_bps=event_accumulator.NORMAL_HISTOGRAM_BPS,
                 purge_orphaned_data=True):
        '''Initialization
        see the docstring of this class.
        '''
        self.__curMode = None
        self.setDefaultMode(mode)
        self.accumulator = event_accumulator.EventAccumulator(path=path, 
            size_guidance=size_guidance, compression_bps=compression_bps,
            purge_orphaned_data=purge_orphaned_data)
        self.accumulator.Reload()
        self.__keys = self.accumulator.Tags()
github PPPLDeepLearning / plasma-python / examples / custom_plot.py View on Github external
import numpy as np
from bokeh.plotting import figure, output_file, save  # , show

from tensorboard.backend.event_processing import event_accumulator

file_path = "/tigress/alexeys/worked_Graphs/Graph16_momSGD_new/"
ea1 = event_accumulator.EventAccumulator(
    file_path + "events.out.tfevents.1502649990.tiger-i19g10")
ea1.Reload()

ea2 = event_accumulator.EventAccumulator(
    file_path + "events.out.tfevents.1502652797.tiger-i19g10")
ea2.Reload()

histograms = ea1.Tags()['histograms']
# ages': [], 'audio': [], 'histograms': ['input_2_out',
# 'time_distributed_1_out', 'lstm_1/kernel_0', 'lstm_1/kernel_0_grad',
# 'lstm_1/recurrent_kernel_0', 'lstm_1/recurrent_kernel_0_grad',
# 'lstm_1/bias_0', 'lstm_1/bias_0_grad', 'lstm_1_out', 'dropout_1_out',
# 'lstm_2/kernel_0', 'lstm_2/kernel_0_grad', 'lstm_2/recurrent_kernel_0',
# 'lstm_2/recurrent_kernel_0_grad', 'lstm_2/bias_0', 'lstm_2/bias_0_grad',
# 'lstm_2_out', 'dropout_2_out', 'time_distributed_2/kernel_0',
# 'time_distributed_2/kernel_0_grad', 'time_distributed_2/bias_0',
# 'time_distributed_2/bias_0_grad', 'time_distributed_2_out'], 'scalars':
# ['val_roc', 'val_loss', 'train_loss'], 'distributions': ['input_2_out',
# 'time_distributed_1_out', 'lstm_1/kernel_0', 'lstm_1/kernel_0_grad',
# 'lstm_1/recurrent_kernel_0', 'lstm_1/recurrent_kernel_0_grad',
github tensorflow / tensorboard / tensorboard / backend / event_processing / event_multiplexer.py View on Github external
"""
        name = name or path
        accumulator = None
        with self._accumulators_mutex:
            if name not in self._accumulators or self._paths[name] != path:
                if name in self._paths and self._paths[name] != path:
                    # TODO(@decentralion) - Make it impossible to overwrite an old path
                    # with a new path (just give the new path a distinct name)
                    logger.warn(
                        "Conflict for name %s: old path %s, new path %s",
                        name,
                        self._paths[name],
                        path,
                    )
                logger.info("Constructing EventAccumulator for %s", path)
                accumulator = event_accumulator.EventAccumulator(
                    path,
                    size_guidance=self._size_guidance,
                    purge_orphaned_data=self.purge_orphaned_data,
                )
                self._accumulators[name] = accumulator
                self._paths[name] = path
        if accumulator:
            if self._reload_called:
                accumulator.Reload()
        return self
github dstamoulis / single-path-nas / nas-search / plot-progress / parse_search_output.py View on Github external
def parse_indicators_single_path_nas(path, tf_size_guidance):

  event_acc = EventAccumulator(path, tf_size_guidance)
  event_acc.Reload()

  # Show all tags in the log file
  tags = event_acc.Tags()['scalars']
  labels = ['t5x5_','t50c_','t100c_']
  inds = []
  for idx in range(20):
    layer_row = []
    for label_ in labels:
      summary_label_ = label_ + str(idx+1) 
      decision_ij = event_acc.Scalars(summary_label_)
      layer_row.append(decision_ij[-1].value)
    inds.append(layer_row)
  return inds
github Detry322 / DeepRole / data / figures / data.py View on Github external
def load_tensorboard_data(filename):
    logging.disable(logging.CRITICAL)
    event_acc = EventAccumulator(filename)
    event_acc.Reload()
    _, _, loss = zip(*event_acc.Scalars('epoch_val_loss'))
    logging.disable(logging.NOTSET)
    return loss
github google-research / rl-reliability-metrics / rl_reliability_metrics / evaluation / data_loading.py View on Github external
If the input data are sets of rollouts, each numpy array represents a single
    set of rollouts (e.g. n_rollouts for a single model checkpoint) and has dims
    [2 x n_rollouts]. rollouts[0, :] is just an index variable (e.g.
    range(0, n_rollouts)) and rollouts[1, :] are the performances per rollout.
    Each rollout set is sorted by the index variable.
  """

  if align_on_global_step:
    restart_determiner_x = 'step'
  else:
    restart_determiner_x = 'value'
  restart_determiner_y = 'step'

  curves = []
  for run_dir in run_dirs:
    accumulator = event_accumulator.EventAccumulator(
        run_dir, size_guidance=event_accumulator.STORE_EVERYTHING_SIZE_GUIDANCE)
    accumulator.Reload()

    # Load the dependent variable.
    y_vals, y_steps = extract_summary(accumulator, dependent_variable,
                                      restart_determiner_y)
    y_vals_dict = {step: val for step, val in zip(y_steps, y_vals)}

    # Load the timepoint variable.
    if timepoint_variable is None:
      # Load from the step values of y.
      steps_to_load = set(y_steps)
      x_steps = y_steps
      x_vals_dict = {step: step for step in x_steps}
    else:
      # Load from summaries.
github hysts / pytorch_image_classification / tools / extract_scalars.py View on Github external
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--path', type=str, required=True)
    parser.add_argument('--outdir', type=str, required=True)
    args = parser.parse_args()

    event_acc = event_accumulator.EventAccumulator(args.path)
    event_acc.Reload()

    scalars = {}
    for tag in event_acc.Tags()['scalars']:
        events = event_acc.Scalars(tag)
        scalars[tag] = [event.value for event in events]

    outdir = pathlib.Path(args.outdir)
    outdir.mkdir(exist_ok=True, parents=True)

    outpath = outdir / 'all_scalars.json'
    with open(outpath, 'w') as fout:
        json.dump(scalars, fout)
github espnet / espnet / egs / an4 / asr1 / local / check_tblog.py View on Github external
#!/usr/bin/env python3
# coding: utf-8

from glob import glob
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator

for dirc in glob("tensorboard/*"):
    event_acc = EventAccumulator(dirc)
    event_acc.Reload()
    # Show all tags in the log file
    # print(event_acc.Tags())
    try:
        t = event_acc.Scalars("main/cer_ctc")
    except KeyError:
        t = {}
    try:
        v = event_acc.Scalars("validation/main/cer_ctc")
    except KeyError:
        v = {}

    print(f"{dirc}: #train: {len(t)}, #valid {len(v)}")
github belskikh / kekas / kekas / utils.py View on Github external
def get_tensorboard_scalars(
    logdir: str, metrics: Optional[List[str]], step: str
) -> Dict[str, List]:
    event_acc = EventAccumulator(str(logdir))
    event_acc.Reload()

    if metrics is not None:
        scalar_names = [
            n for n in event_acc.Tags()["scalars"] if step in n and any(m in n for m in metrics)
        ]
    else:
        scalar_names = [n for n in event_acc.Tags()["scalars"] if step in n]

    scalars = {sn: event_acc.Scalars(sn) for sn in scalar_names}
    return scalars