How to use the loguru.logger function in loguru

To help you get started, we’ve selected a few loguru examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Delgan / loguru / tests / exceptions / source / backtrace / nested_decorator_catch_up.py View on Github external
import sys
from loguru import logger

logger.remove()
logger.add(sys.stderr, format="", colorize=False, backtrace=False, diagnose=False)
logger.add(sys.stderr, format="", colorize=False, backtrace=True, diagnose=False)


@logger.catch(ZeroDivisionError)
def foo():
    bar()


@logger.catch(NotImplementedError)
def bar():
    1 / 0


foo()
github Delgan / loguru / tests / test_repr.py View on Github external
def test_no_handler():
    assert repr(logger) == ""
github Delgan / loguru / tests / exceptions / source / backtrace / head_recursion.py View on Github external
def c(n):
    if n:
        try:
            c(n - 1)
        except ZeroDivisionError:
            logger.exception("")
    1 / n
github httprunner / httprunner / httprunner / cli.py View on Github external
# keep compatibility with v2
    extra_args = ensure_cli_args(extra_args)

    tests_path_list = []
    extra_args_new = []
    for item in extra_args:
        if not os.path.exists(item):
            # item is not file/folder path
            extra_args_new.append(item)
        else:
            # item is file/folder path
            tests_path_list.append(item)

    if len(tests_path_list) == 0:
        # has not specified any testcase path
        logger.error(f"No valid testcase path in cli arguments: {extra_args}")
        sys.exit(1)

    testcase_path_list = main_make(tests_path_list)
    if not testcase_path_list:
        logger.error("No valid testcases found, exit 1.")
        sys.exit(1)

    if "--tb=short" not in extra_args_new:
        extra_args_new.append("--tb=short")

    extra_args_new.extend(testcase_path_list)
    logger.info(f"start to run tests with pytest. HttpRunner version: {__version__}")
    return pytest.main(extra_args_new)
github deepdrive / deepdrive-sim / .travis / cloud_build.py View on Github external
@log.catch
@retry(tries=5, jitter=(0, 1), logger=log)
def get_job_status(job_id):
    status_resp = requests.post('https://sim.deepdrive.io/job/status',
                                json={'job_id': job_id})
    if not status_resp.ok:
        raise RuntimeError('Error getting job status')
    return status_resp
github pawamoy / aria2p / src / aria2p / downloads.py View on Github external
Examples:
            # download dir is /a/b.
            >>> self.files
            ["/a/b/c/1.txt", "/a/b/c/2.txt", "/a/b/3.txt"]
            >>> self.root_files_paths
            ["/a/b/c", "/a/b/3.txt"]
        """
        if not self._root_files_paths:
            paths = []
            for file in self.files:
                if file.is_metadata:
                    continue
                try:
                    relative_path = file.path.relative_to(self.dir)
                except ValueError as error:
                    logger.warning(f"Can't determine file path '{file.path}' relative to '{self.dir}'")
                    logger.opt(exception=True).trace(error)
                else:
                    path = self.dir / relative_path.parts[0]
                    if path not in paths:
                        paths.append(path)
            self._root_files_paths = paths
        return self._root_files_paths
github kdexd / probnmn-clevr / probnmn / evaluators / program_prior_evaluator.py View on Github external
eval_metrics = super().evaluate(num_batches)

        # ----------------------------------------------------------------------------------------
        # PRINT MODEL PREDICTIONS FOR FIVE EXAMPLES (OF FIRST BATCH)
        # ----------------------------------------------------------------------------------------
        self._program_prior.eval()
        for batch in self._dataloader:
            for key in batch:
                batch[key] = batch[key].to(self._device)
            break

        with torch.no_grad():
            output_dict = self._do_iteration(batch)["program_prior"]

        logger.info("\n")
        for inp, out in zip(batch["program"][:5], output_dict["predictions"][:5]):
            # Print only first five time-steps, these sequences can be really long.
            input_program = " ".join(
                self._vocabulary.get_token_from_index(i.item(), "programs") for i in inp[:6]
            )
            output_program = " ".join(
                self._vocabulary.get_token_from_index(o.item(), "programs") for o in out[:6]
            )
            logger.info(f"INPUT PROGRAM: {input_program} ...")
            logger.info(f"OUTPUT PROGRAM: {output_program} ...")
            logger.info("-" * 60)

        self._program_prior.train()
        # ----------------------------------------------------------------------------------------

        return eval_metrics
github aiogram / bot / app / services / healthcheck.py View on Github external
async def on_startup(dispatcher: Dispatcher):
    from app.utils.executor import runner

    logger.info("Setup healthcheck")

    health.add_check(check_redis)
    health.add_check(check_postgres)
    health.add_check(check_webhook)
    runner.web_app.router.add_get("/healthcheck", health)
github httprunner / httprunner / httprunner / v3 / api.py View on Github external
def _run_suite(self, prepared_testcases: List[unittest.TestSuite]) -> List[TestCaseSummary]:
        """ run prepared testcases
        """
        tests_results: List[TestCaseSummary] = []

        for index, testcase in enumerate(prepared_testcases):
            log_handler = None
            if self.save_tests:
                logs_file_abs_path = utils.prepare_log_file_abs_path(
                    self.test_path, f"testcase_{index+1}.log"
                )
                log_handler = logger.add(logs_file_abs_path, level="DEBUG")

            logger.info(f"Start to run testcase: {testcase.config.name}")

            result = self.unittest_runner.run(testcase)
            testcase_summary = report.get_summary(result)
            testcase_summary.in_out.vars = testcase.config.variables
            testcase_summary.in_out.out = testcase.config.export

            if self.save_tests and log_handler:
                logger.remove(log_handler)
                logs_file_abs_path = utils.prepare_log_file_abs_path(
                    self.test_path, f"testcase_{index+1}.log"
                )
                testcase_summary.log = logs_file_abs_path

            if result.wasSuccessful():
github MechWolf / MechWolf / mechwolf / __init__.py View on Github external
nest_asyncio.apply()

# to avoid circular import
from .core.apparatus import Apparatus
from .core.protocol import Protocol
from .components import *
from .core.experiment import Experiment

from . import zoo
from . import plugins

# deactivate logging (see https://loguru.readthedocs.io/en/stable/overview.html#suitable-for-scripts-and-libraries)
from loguru import logger

logger.remove()
logger.level("SUCCESS", icon="✅")
logger.level("ERROR", icon="❌")
logger.level("TRACE", icon="🔍")