How to use the bentoml.config function in bentoml

To help you get started, we’ve selected a few bentoml examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github bentoml / BentoML / bentoml / repository / __init__.py View on Github external
def __init__(self, base_url=None):
        if base_url is None:
            base_url = config().get('default_repository_base_url')

        if is_s3_url(base_url):
            self._repo = _S3BentoRepository(base_url)
        else:
            self._repo = _LocalBentoRepository(base_url)
github bentoml / BentoML / bentoml / server / middlewares.py View on Github external
def __init__(self, app, bento_service):
        self.app = app
        self.bento_service = bento_service

        from prometheus_client import Histogram, Counter, Gauge

        service_name = self.bento_service.name
        namespace = config('instrument').get('default_namespace')

        self.metrics_request_duration = Histogram(
            name=service_name + '_request_duration_seconds',
            documentation=service_name + " API HTTP request duration in seconds",
            namespace=namespace,
            labelnames=['endpoint', 'service_version', 'http_response_code'],
        )
        self.metrics_request_total = Counter(
            name=service_name + "_request_total",
            documentation='Totoal number of HTTP requests',
            namespace=namespace,
            labelnames=['endpoint', 'service_version', 'http_response_code'],
        )
        self.metrics_request_in_progress = Gauge(
            name=service_name + "_request_in_progress",
            documentation='Totoal number of HTTP requests in progress now',
github bentoml / BentoML / bentoml / server / bento_api_server.py View on Github external
And user defined BentoServiceAPI list into flask routes, e.g.:
        /classify
        /predict
        """

        self.app.add_url_rule("/", "index", self.index_view_func)
        self.app.add_url_rule(
            "/docs.json", "docs", partial(self.docs_view_func, self.bento_service)
        )
        self.app.add_url_rule("/healthz", "healthz", self.healthz_view_func)

        if config("apiserver").getboolean("enable_metrics"):
            self.app.add_url_rule("/metrics", "metrics", self.metrics_view_func)

        if config("apiserver").getboolean("enable_feedback"):
            self.app.add_url_rule(
                "/feedback",
                "feedback",
                partial(self.feedback_view_func, self.bento_service),
                methods=["POST", "GET"],
            )

        self.setup_bento_service_api_routes()
github bentoml / BentoML / bentoml / utils / log.py View on Github external
def get_logging_config_dict(logging_level, base_log_directory):
    conf = config("logging")  # proxy to logging section in bentoml config file

    LOG_FORMAT = conf.get("LOG_FORMAT")
    DEV_LOG_FORMAT = conf.get("DEV_LOG_FORMAT")

    PREDICTION_LOG_FILENAME = conf.get("prediction_log_filename")
    PREDICTION_LOG_JSON_FORMAT = conf.get("prediction_log_json_format")

    FEEDBACK_LOG_FILENAME = conf.get("feedback_log_filename")
    FEEDBACK_LOG_JSON_FORMAT = conf.get("feedback_log_json_format")

    return {
        "version": 1,
        "disable_existing_loggers": False,
        "formatters": {
            "console": {"format": LOG_FORMAT},
            "dev": {"format": DEV_LOG_FORMAT},
github bentoml / BentoML / bentoml / deployment / sagemaker / __init__.py View on Github external
):
    execution_role_arn = get_arn_role_from_current_aws_user()

    sagemaker_model_info = {
        "ModelName": sagemaker_model_name,
        "PrimaryContainer": {
            "ContainerHostname": sagemaker_model_name,
            "Image": ecr_image_path,
            "Environment": {
                "API_NAME": bento_service_api_name,
                "BENTO_SERVER_TIMEOUT": config().get('apiserver', 'default_timeout'),
            },
        },
        "ExecutionRoleArn": execution_role_arn,
    }
    default_worker_count = config().getint(
        'apiserver', 'default_gunicorn_workers_count'
    )
    if default_worker_count > 0:
        sagemaker_model_info['PrimaryContainer']['Environment'][
            'BENTO_SERVER_WORKERS'
        ] = default_worker_count

    logger.debug("Creating sagemaker model %s", sagemaker_model_name)
    try:
        create_model_response = sagemaker_client.create_model(**sagemaker_model_info)
    except ClientError as e:
        raise _aws_client_error_to_bentoml_exception(
            e, "Failed to create sagemaker model"
        )
    logger.debug("AWS create model response: %s", create_model_response)
github bentoml / BentoML / bentoml / server / bento_api_server.py View on Github external
def api_func_wrapper():
            image_paths = []
            if not config('logging').getboolean('disable_logging_image'):
                image_paths = self.log_image(request, request_id)

            # _request_to_json parses request as JSON; in case errors, it raises
            # a 400 exception. (consider 4xx before 5xx.)
            request_for_log = _request_to_json(request)

            # handle_request may raise 4xx or 5xx exception.
            try:
                response = api.handle_request(request)
            except BentoMLException as e:
                self.log_exception(sys.exc_info())

                if 400 <= e.status_code < 500 and e.status_code not in (401, 403):
                    response = make_response(
                        jsonify(
                            message="BentoService error handling API request: %s"
github bentoml / BentoML / bentoml / deployment / sagemaker / __init__.py View on Github external
def _create_sagemaker_model(
    sagemaker_client, sagemaker_model_name, ecr_image_path, bento_service_api_name
):
    execution_role_arn = get_arn_role_from_current_aws_user()

    sagemaker_model_info = {
        "ModelName": sagemaker_model_name,
        "PrimaryContainer": {
            "ContainerHostname": sagemaker_model_name,
            "Image": ecr_image_path,
            "Environment": {
                "API_NAME": bento_service_api_name,
                "BENTO_SERVER_TIMEOUT": config().get('apiserver', 'default_timeout'),
            },
        },
        "ExecutionRoleArn": execution_role_arn,
    }
    default_worker_count = config().getint(
        'apiserver', 'default_gunicorn_workers_count'
    )
    if default_worker_count > 0:
        sagemaker_model_info['PrimaryContainer']['Environment'][
            'BENTO_SERVER_WORKERS'
        ] = default_worker_count

    logger.debug("Creating sagemaker model %s", sagemaker_model_name)
    try:
        create_model_response = sagemaker_client.create_model(**sagemaker_model_info)
    except ClientError as e:
github bentoml / BentoML / bentoml / utils / usage_stats.py View on Github external
def track(event_type, event_properties=None):
    if not config().getboolean("core", "usage_tracking"):
        return  # Usage tracking disabled

    if event_properties is None:
        event_properties = {}

    if 'bento_service_bundle_path' in event_properties:
        _get_bento_service_event_properties_from_bundle_path(
            event_properties['bento_service_bundle_path'], event_properties
        )
        del event_properties['bento_service_bundle_path']

    event_properties['py_version'] = PY_VERSION
    event_properties["bento_version"] = BENTOML_VERSION
    event_properties["platform_info"] = PLATFORM

    return _send_amplitude_event(event_type, event_properties)