How to use contextlib2 - 10 common examples

To help you get started, we’ve selected a few contextlib2 examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github IuryAlves / TryPython / docker / eval.py View on Github external
def _get_console_out(to_eval, namespace):
    fake_out, fake_err = StringIO(), StringIO()
    console = code.InteractiveConsole(locals=namespace)
    with redirect_stdout(fake_out), redirect_stderr(fake_err):
        for function in namespace.get("functions", []):
            for statement in function.split("\\n"):
                console.push(statement)
        for statement in to_eval.split("\n"):
            if statement:
                console.push(statement)
            else:
                console.push('\n')
    return fake_out.getvalue(), fake_err.getvalue()
github enigmampc / catalyst / catalyst / gens / tradesimulation.py View on Github external
if splits:
                    algo.blotter.process_splits(splits)
                    perf_tracker.position_tracker.handle_splits(splits)

        def handle_benchmark(date, benchmark_source=self.benchmark_source):
            algo.perf_tracker.all_benchmark_returns[date] = \
                benchmark_source.get_value(date)

        def on_exit():
            # Remove references to algo, data portal, et al to break cycles
            # and ensure deterministic cleanup of these objects when the
            # simulation finishes.
            self.algo = None
            self.benchmark_source = self.current_data = self.data_portal = None

        with ExitStack() as stack:
            stack.callback(on_exit)
            stack.enter_context(self.processor)
            stack.enter_context(ZiplineAPI(self.algo))

            if algo.data_frequency == 'minute':
                def execute_order_cancellation_policy():
                    algo.blotter.execute_cancel_policy(SESSION_END)

                def calculate_minute_capital_changes(dt):
                    # process any capital changes that came between the last
                    # and current minutes
                    return algo.calculate_capital_changes(
                        dt, emission_rate=emission_rate, is_interday=False)
            else:
                def execute_order_cancellation_policy():
                    pass
github broadinstitute / viral-ngs / tools / git_annex.py View on Github external
def _in_tmp_worktree(self, worktree_group_id='', chdir=True, keep=False):
        """Create a temp branch in a temp worktree, chdir to it; yield the name of the branch and the temp worktree dir."""
        temp_branch = os.path.join('tmp_wtree', worktree_group_id, str(uuid.uuid4()))
        temp_worktree_dir = os.path.join('tmp', temp_branch)
        self.execute_git(['worktree', 'add', '--no-checkout', '-b', temp_branch, temp_worktree_dir, self.get_first_commit()])
        dot_git = os.path.join(temp_worktree_dir, '.git')
        save_dot_git = util.file.slurp_file(dot_git)
        self.execute_git(['checkout'], cwd=temp_worktree_dir)
        try:
            with contextlib2.ExitStack() as exit_stack:
                if chdir:
                    exit_stack.enter_context(util.file.pushd_popd(temp_worktree_dir))
                yield temp_branch, temp_worktree_dir
        finally:
            os.remove(dot_git)
            util.file.dump_file(dot_git, save_dot_git)
            if not keep and not util.file.keep_tmp():
                try:
                    self.execute_git(['worktree', 'remove', temp_worktree_dir])
                    self.execute_git(['branch', '-D', temp_branch])
                except Exception:
                    _log.warning('Could not remove temp worktree %s', temp_worktree_dir)
github apache / incubator-superset / superset / sql_lab.py View on Github external
@contextmanager
def session_scope(nullpool):
    """Provide a transactional scope around a series of operations."""
    if nullpool:
        engine = sqlalchemy.create_engine(
            app.config["SQLALCHEMY_DATABASE_URI"], poolclass=NullPool
        )
        session_class = sessionmaker()
        session_class.configure(bind=engine)
        session = session_class()
    else:
        session = db.session()
        session.commit()  # HACK

    try:
        yield session
        session.commit()
github pymedusa / Medusa / medusa / config.py View on Github external
def convert_csv_string_to_list(value, delimiter=',', trim=False):
    """
    Convert comma or other character delimited strings to a list.

    :param value: The value to convert.f
    :param delimiter: Optionally Change the default delimiter ',' if required.
    :param trim: Optionally trim the individual list items.
    :return: The delimited value as a list.
    """

    if not isinstance(value, (string_types, text_type)):
        return value

    with suppress(AttributeError, ValueError):
        value = value.split(delimiter) if value else []
        if trim:
            value = [_.strip() for _ in value]

    return value
github tensorflow / models / research / object_detection / dataset_tools / create_coco_tf_record.py View on Github external
def _create_tf_record_from_coco_annotations(
    annotations_file, image_dir, output_path, include_masks, num_shards):
  """Loads COCO annotation json files and converts to tf.Record format.

  Args:
    annotations_file: JSON file containing bounding box annotations.
    image_dir: Directory containing the image files.
    output_path: Path to output tf.Record file.
    include_masks: Whether to include instance segmentations masks
      (PNG encoded) in the result. default: False.
    num_shards: number of output file shards.
  """
  with contextlib2.ExitStack() as tf_record_close_stack, \
      tf.gfile.GFile(annotations_file, 'r') as fid:
    output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords(
        tf_record_close_stack, output_path, num_shards)
    groundtruth_data = json.load(fid)
    images = groundtruth_data['images']
    category_index = label_map_util.create_category_index(
        groundtruth_data['categories'])

    annotations_index = {}
    if 'annotations' in groundtruth_data:
      tf.logging.info(
          'Found groundtruth annotations. Building annotations index.')
      for annotation in groundtruth_data['annotations']:
        image_id = annotation['image_id']
        if image_id not in annotations_index:
          annotations_index[image_id] = []
github deepmind / sonnet / sonnet / python / modules / base.py View on Github external
Upon entering this context manager the module adds itself onto the top
    of the module call stack. Any variables created with `tf.get_variable()`
    inside `_build()` or `_enter_variable_scope()` while this module is on top
    of the call stack will be added to `self._all_variables`.

    Before exiting the context the module removes itself from the top of the
    call stack, and adds all of the variables in `self._all_variables` to its
    parent module (the new top) of the call stack.

    Yields:
      Nothing, the yield just transfers focus back to the inner context.
    """
    module_stack = get_module_stack()
    module_stack.append(self)
    try:
      with contextlib2.ExitStack() as stack:
        # Ideally move re-entering store into Template.variable_scope.
        template_store = getattr(self._template, "_template_store", None)
        if template_store is not None:
          # In eager mode, the template store keeps references to created
          # variables such that they survive even if there are no references to
          # them in Python code. Variables added to an eager template store are
          # also added to TensorFlow global collections (unlike regular
          # variables created in eager mode).
          stack.enter_context(template_store.as_default())

        stack.enter_context(
            util.notify_about_new_variables(self._all_variables.add))

        yield

        if self._original_name:
github IBM / MAX-Object-Detector / training / training_code / data / prepare_data_object_detection.py View on Github external
def create_tf_record(output_filename, num_shards, examples):
        with contextlib2.ExitStack() as tf_record_close_stack:
            output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords(
                tf_record_close_stack,
                output_filename,
                num_shards)
            for idx, example in enumerate(examples):
                img_path = os.path.join(read_bucket, example)
                if not os.path.isfile(img_path):
                    continue                
                with tf.gfile.GFile(img_path, 'rb') as fid:
                    encoded_jpg = fid.read()
                encoded_jpg_io = io.BytesIO(encoded_jpg)
                image = PIL.Image.open(encoded_jpg_io)
                if image.format != 'JPEG':
                    raise ValueError('Image format not JPEG')
                key = hashlib.sha256(encoded_jpg).hexdigest()
github oulutan / ACAM_Demo / object_detection / models / research / object_detection / dataset_tools / create_oid_tf_record.py View on Github external
]
  for flag_name in required_flags:
    if not getattr(FLAGS, flag_name):
      raise ValueError('Flag --{} is required'.format(flag_name))

  label_map = label_map_util.get_label_map_dict(FLAGS.input_label_map)
  all_annotations = pd.read_csv(FLAGS.input_annotations_csv)
  all_images = tf.gfile.Glob(
      os.path.join(FLAGS.input_images_directory, '*.jpg'))
  all_image_ids = [os.path.splitext(os.path.basename(v))[0] for v in all_images]
  all_image_ids = pd.DataFrame({'ImageID': all_image_ids})
  all_annotations = pd.concat([all_annotations, all_image_ids])

  tf.logging.log(tf.logging.INFO, 'Found %d images...', len(all_image_ids))

  with contextlib2.ExitStack() as tf_record_close_stack:
    output_tfrecords = oid_tfrecord_creation.open_sharded_output_tfrecords(
        tf_record_close_stack, FLAGS.output_tf_record_path_prefix,
        FLAGS.num_shards)

    for counter, image_data in enumerate(all_annotations.groupby('ImageID')):
      tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 1000,
                             counter)

      image_id, image_annotations = image_data
      # In OID image file names are formed by appending ".jpg" to the image ID.
      image_path = os.path.join(FLAGS.input_images_directory, image_id + '.jpg')
      with tf.gfile.Open(image_path) as image_file:
        encoded_image = image_file.read()

      tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame(
          image_annotations, label_map, encoded_image)
github google / nucleus / nucleus / io / tfrecord.py View on Github external
This function writes serialized strings of each proto in protos to output_path
  in their original order. If output_path is a sharded file (e.g., foo@2), this
  function will write the protos spread out as evenly as possible among the
  individual components of the sharded spec (e.g., foo-00000-of-00002 and
  foo-00001-of-00002). Note that the order of records in the sharded files may
  differ from the order in protos due to the striping.

  Args:
    protos: An iterable of protobufs. The objects we want to write out.
    output_path: str. The filepath where we want to write protos.
    compression_type: 'GZIP', 'ZLIB', '' (uncompressed), or None to autodetect
      based on file extension.
  """
  if sharded_file_utils.is_sharded_file_spec(output_path):
    with contextlib2.ExitStack() as stack:
      _, n_shards, _ = sharded_file_utils.parse_sharded_file_spec(output_path)
      writers = [
          stack.enter_context(
              Writer(sharded_file_utils.sharded_filename(
                  output_path, i), compression_type=compression_type))
          for i in range(n_shards)
      ]
      for i, proto in enumerate(protos):
        writers[i % n_shards].write(proto)
  else:
    with Writer(output_path, compression_type=compression_type) as writer:
      for proto in protos:
        writer.write(proto)

contextlib2

Backports and enhancements for the contextlib module

Python-2.0
Latest version published 3 years ago

Package Health Score

76 / 100
Full package analysis

Similar packages