How to use the lightgbm.__version__ function in lightgbm

To help you get started, we’ve selected a few lightgbm examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github mljar / mljar-supervised / supervised / models / learner_lightgbm.py View on Github external
def __init__(self, params):
        super(LightgbmLearner, self).__init__(params)
        self.library_version = lgb.__version__
        self.model_file = self.uid + ".lgbm.model"
        self.model_file_path = os.path.join(storage_path, self.model_file)

        self.rounds = additional.get("one_step", 50)
        self.max_iters = additional.get("max_steps", 3)
        self.learner_params = {
            "boosting_type": "gbdt",
            "objective": "binary",
            "metric": self.params.get("metric", "binary_logloss"),
            "num_threads": multiprocessing.cpu_count(),
            "num_leaves": self.params.get("num_leaves", 16),
            "learning_rate": self.params.get("learning_rate", 0.01),
            "feature_fraction": self.params.get("feature_fraction", 0.7),
            "bagging_fraction": self.params.get("bagging_fraction", 0.7),
            "bagging_freq": self.params.get("bagging_freq", 1),
            "verbose": -1,
github onnx / sklearn-onnx / docs / examples / plot_pipeline_lightgbm.py View on Github external
image = plt.imread("pipeline.dot.png")
fig, ax = plt.subplots(figsize=(40, 20))
ax.imshow(image)
ax.axis('off')

#################################
# **Versions used for this example**

print("numpy:", numpy.__version__)
print("scikit-learn:", sklearn.__version__)
print("onnx: ", onnx.__version__)
print("onnxruntime: ", onnxruntime.__version__)
print("skl2onnx: ", skl2onnx.__version__)
print("onnxmltools: ", onnxmltools.__version__)
print("lightgbm: ", lightgbm.__version__)
github nubank / fklearn / src / fklearn / training / regression.py View on Github external
shap_output = {"shap_values": shap_values,
                           "shap_expected_value": np.repeat(shap_expected_value, len(shap_values))}

            col_dict = merge(col_dict, shap_output)

        return new_df.assign(**col_dict)

    p.__doc__ = learner_pred_fn_docstring("lgbm_regression_learner", shap=True)

    log = {'lgbm_regression_learner': {
        'features': features,
        'target': target,
        'prediction_column': prediction_column,
        'package': "lightgbm",
        'package_version': lgbm.__version__,
        'parameters': assoc(params, "num_estimators", num_estimators),
        'feature_importance': dict(zip(features, bst.feature_importance().tolist())),
        'training_samples': len(df)},
        'object': bst}

    return p, p(df), log
github mlflow / mlflow / mlflow / lightgbm.py View on Github external
# Save a LightGBM model
    lgb_model.save_model(model_data_path)

    conda_env_subpath = "conda.yaml"
    if conda_env is None:
        conda_env = get_default_conda_env()
    elif not isinstance(conda_env, dict):
        with open(conda_env, "r") as f:
            conda_env = yaml.safe_load(f)
    with open(os.path.join(path, conda_env_subpath), "w") as f:
        yaml.safe_dump(conda_env, stream=f, default_flow_style=False)

    pyfunc.add_to_model(mlflow_model, loader_module="mlflow.lightgbm",
                        data=model_data_subpath, env=conda_env_subpath)
    mlflow_model.add_flavor(FLAVOR_NAME, lgb_version=lgb.__version__, data=model_data_subpath)
    mlflow_model.save(os.path.join(path, "MLmodel"))
github mlflow / mlflow / mlflow / lightgbm.py View on Github external
def get_default_conda_env():
    """
    :return: The default Conda environment for MLflow Models produced by calls to
             :func:`save_model()` and :func:`log_model()`.
    """
    import lightgbm as lgb

    return _mlflow_conda_env(
        additional_conda_deps=None,
        # LightGBM is not yet available via the default conda channels, so we install it via pip
        additional_pip_deps=[
            "lightgbm=={}".format(lgb.__version__),
        ],
        additional_conda_channels=None)
github interpretml / interpret-community / python / interpret_community / mimic / models / lightgbm_model.py View on Github external
from .tree_model_utils import _explain_local_tree_surrogate, _expected_values_tree_surrogate
from ...common.constants import ShapValuesOutput, LightGBMSerializationConstants, \
    ExplainableModelType, Extension
import json
import warnings
import logging
import inspect

with warnings.catch_warnings():
    warnings.filterwarnings('ignore', 'Starting from version 2.2.1', UserWarning)
    import shap
    try:
        from lightgbm import LGBMRegressor, LGBMClassifier, Booster
        import lightgbm
        from packaging import version
        if (version.parse(lightgbm.__version__) <= version.parse('2.2.1')):
            print("Using older than supported version of lightgbm, please upgrade to version greater than 2.2.1")
    except ImportError:
        print("Could not import lightgbm, required if using LGBMExplainableModel")

DEFAULT_RANDOM_STATE = 123
_N_FEATURES = '_n_features'
_N_CLASSES = '_n_classes'


class LGBMExplainableModel(BaseExplainableModel):
    available_explanations = [Extension.GLOBAL, Extension.LOCAL]
    explainer_type = Extension.GLASSBOX

    """LightGBM (fast, high performance framework based on decision tree) explainable model.

    Please see documentation for more details: https://github.com/Microsoft/LightGBM
github gilad-rubin / hypster / hypster / core.py View on Github external
estimators.append("XGBLinearClassifier")
                        else:
                            estimators.append("XGBLinearRegressor")
                    if any(item.startswith('tree') for item in model_types):
                        if objective_type == "classification":
                            estimators.append("XGBTreeClassifier")
                        else:
                            estimators.append("XGBTreeRegressor")
            except:
                # TODO: log that xgboost is not installed in the right version
                continue

        if str.startswith(framework, ("lgb", "lightgbm")):
            try:
                import lightgbm as lgb
                lgb_ver = lgb.__version__
                if ge_version(lgb_ver, lgb_min_ver):
                    if any(item.startswith('tree') for item in model_types):
                        if objective_type == "classification":
                            estimators.append("LGBClassifier")
                        else:
                            estimators.append("LGBRegressor")
            except:
                # TODO: log that lightgbm is not installed in the right version
                continue

        if str.startswith(framework, ("sklearn", "scikit", "sci-kit")):
            try:
                import sklearn
                sklearn_ver = sklearn.__version__
                if ge_version(sklearn_ver, sklearn_min_ver):
                    if any(item.startswith('linear') for item in model_types):
github NVIDIA / gbm-bench / runme.py View on Github external
def print_sys_info(args):
    try:
        import xgboost  # pylint: disable=import-outside-toplevel
        print("Xgboost : %s" % xgboost.__version__)
    except ImportError:
        pass
    try:
        import lightgbm  # pylint: disable=import-outside-toplevel
        print("LightGBM: %s" % lightgbm.__version__)
    except (ImportError, OSError):
        pass
    try:
        import catboost  # pylint: disable=import-outside-toplevel
        print("Catboost: %s" % catboost.__version__)
    except ImportError:
        pass
    print("System  : %s" % sys.version)
    print("#jobs   : %d" % args.cpus)
github nubank / fklearn / src / fklearn / training / classification.py View on Github external
shap_values = list(shap_values)
                shap_output = {"shap_values": shap_values,
                               "shap_expected_value": np.repeat(shap_expected_value, len(shap_values))}

            col_dict = merge(col_dict, shap_output)

        return new_df.assign(**col_dict)

    p.__doc__ = learner_pred_fn_docstring("lgbm_classification_learner", shap=True)

    log = {'lgbm_classification_learner': {
        'features': features,
        'target': target,
        'prediction_column': prediction_column,
        'package': "lightgbm",
        'package_version': lgbm.__version__,
        'parameters': assoc(params, "num_estimators", num_estimators),
        'feature_importance': dict(zip(features, bst.feature_importance().tolist())),
        'training_samples': len(df)},
        'object': bst}

    return p, p(df), log