How to use kfserving - 10 common examples

To help you get started, we’ve selected a few kfserving examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github kubeflow / pipelines / components / kubeflow / kfserving / src / kfservingdeployer.py View on Github external
def EndpointSpec(framework, storage_uri):
    if framework == 'tensorflow':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(tensorflow=V1alpha2TensorflowSpec(storage_uri=storage_uri)))
    elif framework == 'pytorch':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(pytorch=V1alpha2PyTorchSpec(storage_uri=storage_uri)))
    elif framework == 'sklearn':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(sklearn=V1alpha2SKLearnSpec(storage_uri=storage_uri)))
    elif framework == 'xgboost':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(xgboost=V1alpha2XGBoostSpec(storage_uri=storage_uri)))
    elif framework == 'onnx':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(onnx=V1alpha2ONNXSpec(storage_uri=storage_uri)))
    elif framework == 'tensorrt':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(tensorrt=V1alpha2TensorRTSpec(storage_uri=storage_uri)))
    else:
        raise("Error: No matching framework: " + framework)
github kubeflow / pipelines / components / kubeflow / kfserving / src / kfservingdeployer.py View on Github external
def customEndpointSpec(custom_model_spec):
    env = [client.V1EnvVar(name=i['name'], value=i['value']) for i in custom_model_spec['env']] if custom_model_spec.get('env', '') else None
    ports = [client.V1ContainerPort(container_port=int(custom_model_spec.get('port', '')))] if custom_model_spec.get('port', '') else None
    containerSpec = client.V1Container(
        name=custom_model_spec.get('name', 'custom-container'),
        image=custom_model_spec['image'],
        env=env,
        ports=ports,
        command=custom_model_spec.get('command', None),
        args=custom_model_spec.get('args', None),
        image_pull_policy=custom_model_spec.get('image_pull_policy', None),
        working_dir=custom_model_spec.get('working_dir', None)
    )
    return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(custom=V1alpha2CustomSpec(container=containerSpec)))
github kubeflow / pipelines / components / kubeflow / kfserving / src / kfservingdeployer.py View on Github external
def EndpointSpec(framework, storage_uri):
    if framework == 'tensorflow':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(tensorflow=V1alpha2TensorflowSpec(storage_uri=storage_uri)))
    elif framework == 'pytorch':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(pytorch=V1alpha2PyTorchSpec(storage_uri=storage_uri)))
    elif framework == 'sklearn':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(sklearn=V1alpha2SKLearnSpec(storage_uri=storage_uri)))
    elif framework == 'xgboost':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(xgboost=V1alpha2XGBoostSpec(storage_uri=storage_uri)))
    elif framework == 'onnx':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(onnx=V1alpha2ONNXSpec(storage_uri=storage_uri)))
    elif framework == 'tensorrt':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(tensorrt=V1alpha2TensorRTSpec(storage_uri=storage_uri)))
    else:
        raise("Error: No matching framework: " + framework)
github kubeflow / pipelines / components / kubeflow / kfserving / src / kfservingdeployer.py View on Github external
def EndpointSpec(framework, storage_uri):
    if framework == 'tensorflow':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(tensorflow=V1alpha2TensorflowSpec(storage_uri=storage_uri)))
    elif framework == 'pytorch':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(pytorch=V1alpha2PyTorchSpec(storage_uri=storage_uri)))
    elif framework == 'sklearn':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(sklearn=V1alpha2SKLearnSpec(storage_uri=storage_uri)))
    elif framework == 'xgboost':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(xgboost=V1alpha2XGBoostSpec(storage_uri=storage_uri)))
    elif framework == 'onnx':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(onnx=V1alpha2ONNXSpec(storage_uri=storage_uri)))
    elif framework == 'tensorrt':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(tensorrt=V1alpha2TensorRTSpec(storage_uri=storage_uri)))
    else:
        raise("Error: No matching framework: " + framework)
github kubeflow / pipelines / components / kubeflow / kfserving / src / kfservingdeployer.py View on Github external
def EndpointSpec(framework, storage_uri):
    if framework == 'tensorflow':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(tensorflow=V1alpha2TensorflowSpec(storage_uri=storage_uri)))
    elif framework == 'pytorch':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(pytorch=V1alpha2PyTorchSpec(storage_uri=storage_uri)))
    elif framework == 'sklearn':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(sklearn=V1alpha2SKLearnSpec(storage_uri=storage_uri)))
    elif framework == 'xgboost':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(xgboost=V1alpha2XGBoostSpec(storage_uri=storage_uri)))
    elif framework == 'onnx':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(onnx=V1alpha2ONNXSpec(storage_uri=storage_uri)))
    elif framework == 'tensorrt':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(tensorrt=V1alpha2TensorRTSpec(storage_uri=storage_uri)))
    else:
        raise("Error: No matching framework: " + framework)
github kubeflow / pipelines / components / kubeflow / kfserving / src / kfservingdeployer.py View on Github external
def EndpointSpec(framework, storage_uri):
    if framework == 'tensorflow':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(tensorflow=V1alpha2TensorflowSpec(storage_uri=storage_uri)))
    elif framework == 'pytorch':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(pytorch=V1alpha2PyTorchSpec(storage_uri=storage_uri)))
    elif framework == 'sklearn':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(sklearn=V1alpha2SKLearnSpec(storage_uri=storage_uri)))
    elif framework == 'xgboost':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(xgboost=V1alpha2XGBoostSpec(storage_uri=storage_uri)))
    elif framework == 'onnx':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(onnx=V1alpha2ONNXSpec(storage_uri=storage_uri)))
    elif framework == 'tensorrt':
        return V1alpha2EndpointSpec(predictor=V1alpha2PredictorSpec(tensorrt=V1alpha2TensorRTSpec(storage_uri=storage_uri)))
    else:
        raise("Error: No matching framework: " + framework)
github SeldonIO / alibi-detect / integrations / samples / kfserving / ad-signs / signs.py View on Github external
return { "predictions":  preds.argmax(axis=1).tolist() }
        except Exception as e:
            raise Exception("Failed to predict %s" % e)

DEFAULT_MODEL_NAME = "model"
DEFAULT_LOCAL_MODEL_DIR = "/tmp/model"

parser = argparse.ArgumentParser(parents=[kfserving.kfserver.parser])
parser.add_argument('--model_dir', required=True,
                    help='A URI pointer to the model binary')
args, _ = parser.parse_known_args()

if __name__ == "__main__":
    model = SignsModel("signs",args.model_dir)
    # Set number of workers to 1 as model is quite large
    kfserving.KFServer(workers=1).start([model])
github SeldonIO / alibi-detect / integrations / samples / kfserving / ad-mnist / mnist.py View on Github external
return { "predictions":  preds.argmax(axis=1).tolist() }
        except Exception as e:
            raise Exception("Failed to predict %s" % e)

DEFAULT_MODEL_NAME = "model"
DEFAULT_LOCAL_MODEL_DIR = "/tmp/model"

parser = argparse.ArgumentParser(parents=[kfserving.kfserver.parser])
parser.add_argument('--model_dir', required=True,
                    help='A URI pointer to the model binary')
args, _ = parser.parse_known_args()

if __name__ == "__main__":
    model = MnistModel("mnist",args.model_dir)
    # Set number of workers to 1 as model is quite large
    kfserving.KFServer(workers=1).start([model])
github kubeflow / pipelines / components / kubeflow / kfserving / src / kfservingdeployer.py View on Github external
def InferenceService(metadata, default_model_spec, canary_model_spec=None, canary_model_traffic=None):
    return V1alpha2InferenceService(api_version=constants.KFSERVING_GROUP + '/' + constants.KFSERVING_VERSION,
                             kind=constants.KFSERVING_KIND,
                             metadata=metadata,
                             spec=V1alpha2InferenceServiceSpec(default=default_model_spec,
                                                               canary=canary_model_spec,
                                                               canary_traffic_percent=canary_model_traffic))
github kubeflow / pipelines / components / kubeflow / kfserving / src / kfservingdeployer.py View on Github external
def InferenceService(metadata, default_model_spec, canary_model_spec=None, canary_model_traffic=None):
    return V1alpha2InferenceService(api_version=constants.KFSERVING_GROUP + '/' + constants.KFSERVING_VERSION,
                             kind=constants.KFSERVING_KIND,
                             metadata=metadata,
                             spec=V1alpha2InferenceServiceSpec(default=default_model_spec,
                                                               canary=canary_model_spec,
                                                               canary_traffic_percent=canary_model_traffic))