How to use the trains.debugging.log.LoggerRoot.get_base_logger function in trains

To help you get started, we’ve selected a few trains examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github allegroai / trains / trains / logger.py View on Github external
try:
            level = int(level)
        except (TypeError, ValueError):
            self._task.log.log(level=logging.ERROR,
                               msg='Logger failed casting log level "%s" to integer' % str(level))
            level = logging.INFO

        if not running_remotely():
            # noinspection PyBroadException
            try:
                record = self._task.log.makeRecord(
                    "console", level=level, fn='', lno=0, func='', msg=msg, args=args, exc_info=None
                )
                # find the task handler that matches our task
                if not self._task_handler:
                    self._task_handler = [h for h in LoggerRoot.get_base_logger().handlers
                                          if isinstance(h, TaskHandler) and h.task_id == self._task.id][0]
                self._task_handler.emit(record)
            except Exception:
                LoggerRoot.get_base_logger().warning(msg='Logger failed sending log: [level %s]: "%s"'
                                                         % (str(level), str(msg)))

        if not omit_console:
            # if we are here and we grabbed the stdout, we need to print the real thing
            if DevWorker.report_stdout and not running_remotely():
                # noinspection PyBroadException
                try:
                    # make sure we are writing to the original stdout
                    StdStreamPatch.stdout_original_write(str(msg)+'\n')
                except Exception:
                    pass
            else:
github allegroai / trains / trains / logger.py View on Github external
msg='Logger failed casting log level "%s" to integer' % str(level))
            level = logging.INFO

        if not running_remotely():
            # noinspection PyBroadException
            try:
                record = self._task.log.makeRecord(
                    "console", level=level, fn='', lno=0, func='', msg=msg, args=args, exc_info=None
                )
                # find the task handler that matches our task
                if not self._task_handler:
                    self._task_handler = [h for h in LoggerRoot.get_base_logger().handlers
                                          if isinstance(h, TaskHandler) and h.task_id == self._task.id][0]
                self._task_handler.emit(record)
            except Exception:
                LoggerRoot.get_base_logger().warning(msg='Logger failed sending log: [level %s]: "%s"'
                                                         % (str(level), str(msg)))

        if not omit_console:
            # if we are here and we grabbed the stdout, we need to print the real thing
            if DevWorker.report_stdout and not running_remotely():
                # noinspection PyBroadException
                try:
                    # make sure we are writing to the original stdout
                    StdStreamPatch.stdout_original_write(str(msg)+'\n')
                except Exception:
                    pass
            else:
                print(str(msg))

        # if task was not started, we have to start it
        self._start_task_if_needed()
github allegroai / trains / trains / task.py View on Github external
def __get_last_used_task_id(cls, default_project_name, default_task_name, default_task_type):
        hash_key = cls.__get_hash_key(cls._get_api_server(), default_project_name, default_task_name, default_task_type)

        # check if we have a cached task_id we can reuse
        # it must be from within the last 24h and with the same project/name/type
        task_sessions = SessionCache.load_dict(str(cls))

        task_data = task_sessions.get(hash_key)
        if task_data is None:
            return None

        try:
            task_data['type'] = cls.TaskTypes(task_data['type'])
        except (ValueError, KeyError):
            LoggerRoot.get_base_logger().warning(
                "Corrupted session cache entry: {}. "
                "Unsupported task type: {}"
                "Creating a new task.".format(hash_key, task_data['type']),
            )

            return None

        return task_data
github allegroai / trains / trains / binding / artifacts.py View on Github external
# because sometimes that is the only change
        h = hashlib.sha256()
        file_hash = hashlib.sha256()
        b = bytearray(Artifacts._hash_block_size)
        mv = memoryview(b)
        try:
            with open(filename, 'rb', buffering=0) as f:
                # skip header
                if skip_header:
                    file_hash.update(f.read(skip_header))
                for n in iter(lambda: f.readinto(mv), 0):
                    h.update(mv[:n])
                    if skip_header:
                        file_hash.update(mv[:n])
        except Exception as e:
            LoggerRoot.get_base_logger().warning(str(e))
            return None, None

        return h.hexdigest(), file_hash.hexdigest() if skip_header else None
github allegroai / trains / trains / binding / frameworks / tensorflow_bind.py View on Github external
if Network is not None:
                Network._updated_config = _patched_call(Network._updated_config, PatchKerasModelIO._updated_config)
                if hasattr(Sequential.from_config, '__func__'):
                    Network.from_config = classmethod(_patched_call(Network.from_config.__func__,
                                                                    PatchKerasModelIO._from_config))
                else:
                    Network.from_config = _patched_call(Network.from_config, PatchKerasModelIO._from_config)
                Network.save = _patched_call(Network.save, PatchKerasModelIO._save)
                Network.save_weights = _patched_call(Network.save_weights, PatchKerasModelIO._save_weights)
                Network.load_weights = _patched_call(Network.load_weights, PatchKerasModelIO._load_weights)

            if keras_saving is not None:
                keras_saving.save_model = _patched_call(keras_saving.save_model, PatchKerasModelIO._save_model)
                keras_saving.load_model = _patched_call(keras_saving.load_model, PatchKerasModelIO._load_model)
        except Exception as ex:
            LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
github allegroai / trains / trains / binding / frameworks / tensorflow_bind.py View on Github external
self.trains_out_model = OutputModel(
                    task=PatchKerasModelIO.__main_task,
                    config_dict=config,
                    name=PatchKerasModelIO.__main_task.name + ' ' + model_name_id,
                    label_enumeration=PatchKerasModelIO.__main_task.get_labels_enumeration(),
                    framework=Framework.keras,
                )
            # check if we have output storage
            if self.trains_out_model.upload_storage_uri:
                self.trains_out_model.update_weights(weights_filename=filepath, auto_delete_file=False)
            else:
                self.trains_out_model.update_weights(weights_filename=None, register_uri=filepath)
            # if anyone asks, we were here
            self.trains_out_model._processed = True
        except Exception as ex:
            LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
github allegroai / trains / trains / binding / frameworks / tensorflow_bind.py View on Github external
except Exception as ex:
                LoggerRoot.get_base_logger(TensorflowBinding).debug(str(ex))

            if PatchSummaryToEventTransformer.__original_getattributeX is None:
                try:
                    # only patch once
                    if PatchSummaryToEventTransformer._original_add_eventX is None:
                        from tensorboardX.writer import FileWriter as FileWriterX
                        PatchSummaryToEventTransformer._original_add_eventX = FileWriterX.add_event
                        FileWriterX.add_event = PatchSummaryToEventTransformer._patched_add_eventX
                        setattr(FileWriterX, 'trains', None)
                except ImportError:
                    # this is a new version of TensorflowX
                    pass
                except Exception as ex:
                    LoggerRoot.get_base_logger(TensorflowBinding).debug(str(ex))
github allegroai / trains / trains / binding / frameworks / tensorflow_bind.py View on Github external
is_tf_keras = False
                callbacks = None
        # we have nothing, quit
        if not is_keras and not is_tf_keras:
            return

        try:
            # only patch once
            if PatchModelCheckPointCallback.__original_getattribute is None and callbacks is not None:
                PatchModelCheckPointCallback.__original_getattribute = callbacks.ModelCheckpoint.__getattribute__
                callbacks.ModelCheckpoint.__getattribute__ = PatchModelCheckPointCallback._patched_getattribute
                setattr(callbacks.ModelCheckpoint, 'trains',
                        property(PatchModelCheckPointCallback.trains_object))

        except Exception as ex:
            LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
github allegroai / trains / trains / backend_interface / task / task.py View on Github external
If the value for this option is false, we won't touch the current logger configuration regarding TaskHandler(s)
        :param replace_existing: If True and another task is already logging to the backend, replace the handler with
        a handler for this task.
        """
        # Make sure urllib is never in debug/info,
        disable_urllib3_info = config.get('log.disable_urllib3_info', True)
        if disable_urllib3_info and logging.getLogger('urllib3').isEnabledFor(logging.INFO):
            logging.getLogger('urllib3').setLevel(logging.WARNING)

        log_to_backend = get_log_to_backend(default=default_log_to_backend) or self._log_to_backend
        if not log_to_backend:
            return

        # Handle the root logger and our own logger. We use set() to make sure we create no duplicates
        # in case these are the same logger...
        loggers = {logging.getLogger(), LoggerRoot.get_base_logger()}

        # Find all TaskHandler handlers for these loggers
        handlers = {logger: h for logger in loggers for h in logger.handlers if isinstance(h, TaskHandler)}

        if handlers and not replace_existing:
            # Handlers exist and we shouldn't replace them
            return

        # Remove all handlers, we'll add new ones
        for logger, handler in handlers.items():
            logger.removeHandler(handler)

        # Create a handler that will be used in all loggers. Since our handler is a buffering handler, using more
        # than one instance to report to the same task will result in out-of-order log reports (grouped by whichever
        # handler instance handled them)
        backend_handler = TaskHandler(self.session, self.task_id)
github allegroai / trains / trains / binding / frameworks / tensorflow_bind.py View on Github external
else:
                val = image.astype(np.uint8)
            if val.ndim == 3 and val.shape[2] == 3:
                if self._visualization_mode == 'BGR':
                    val = val[:, :, [2, 1, 0]]
                else:
                    val = val
            elif (val.ndim == 2) or (val.ndim == 3 and val.shape[2] == 1):
                val = np.tile(np.atleast_3d(val), (1, 1, 3))
            elif val.ndim == 3 and val.shape[2] == 4:
                if self._visualization_mode == 'BGR':
                    val = val[:, :, [2, 1, 0]]
                else:
                    val = val[:, :, [0, 1, 2]]
        except Exception:
            LoggerRoot.get_base_logger(TensorflowBinding).warning('Failed decoding debug image [%d, %d, %d]'
                                                 % (width, height, color_channels))
            val = None
        return val