How to use the eli5.base.Explanation function in eli5

To help you get started, we’ve selected a few eli5 examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github TeamHG-Memex / eli5 / eli5 / sklearn / explain_weights.py View on Github external
def explain_libsvm_linear_classifier_weights(clf, *args, **kwargs):
    if clf.kernel != 'linear':
        return Explanation(
            estimator=repr(clf),
            error="only kernel='linear' is currently supported for "
                  "libsvm-based classifiers",
        )
    if len(getattr(clf, 'classes_', [])) > 2:
        return Explanation(
            estimator=repr(clf),
            error="only binary libsvm-based classifiers are supported",
        )
    return explain_linear_classifier_weights(clf, *args, **kwargs)
github TeamHG-Memex / eli5 / eli5 / sklearn / explain_weights.py View on Github external
feature_names=feature_names,
        feature_filter=feature_filter,
        feature_re=feature_re,
    )
    _extra_caveats = "\n" + HASHING_CAVEATS if is_invhashing(vec) else ''

    def _features(target_id):
        coef = get_coef(reg, target_id, scale=coef_scale)
        if flt_indices is not None:
            coef = coef[flt_indices]
        return get_top_features(feature_names, coef, top)

    display_names = get_target_display_names(get_default_target_names(reg),
                                             target_names, targets)
    if is_multitarget_regressor(reg):
        return Explanation(
            targets=[
                TargetExplanation(
                    target=target_name,
                    feature_weights=_features(target_id)
                )
                for target_id, target_name in display_names
                ],
            description=DESCRIPTION_REGRESSION_MULTITARGET + _extra_caveats,
            estimator=repr(reg),
            method='linear model',
            is_regression=True,
        )
    else:
        return Explanation(
            targets=[TargetExplanation(
                target=display_names[0][1],
github TeamHG-Memex / eli5 / eli5 / lightning.py View on Github external
def explain_weights_lightning_not_supported(
        estimator, vec=None, top=20, target_names=None,
        targets=None, feature_names=None,
        coef_scale=None):
    return Explanation(
        estimator=repr(estimator),
        error="Error: estimator %r is not supported" % estimator,
    )
github TeamHG-Memex / eli5 / eli5 / lightning.py View on Github external
def explain_prediction_lightning_not_supported(
        estimator, doc, vec=None, top=None,
        target_names=None, targets=None,
        feature_names=None, vectorized=False,
        coef_scale=None):
    return Explanation(
        estimator=repr(estimator),
        error="Error: estimator %r is not supported" % estimator,
    )
github TeamHG-Memex / eli5 / eli5 / explain.py View on Github external
Keyword arguments. All keyword arguments are passed to
        concrete explain_weights... implementations.

    Returns
    -------
    Explanation
        :class:`~Explanation` result. Use one of the formatting functions from
        :mod:`eli5.formatters` to print it in a human-readable form.

        Explanation instances have repr which works well with
        IPython notebook, but it can be a better idea to use
        :func:`eli5.show_weights` instead of :func:`eli5.explain_weights`
        if you work with IPython: :func:`eli5.show_weights` allows to customize
        formatting without a need to import :mod:`eli5.formatters` functions.
    """
    return Explanation(
        estimator=repr(estimator),
        error="estimator %r is not supported" % estimator,
    )
github TeamHG-Memex / eli5 / eli5 / sklearn / explain_prediction.py View on Github external
is already vectorized.
    """
    vec, feature_names = handle_vec(clf, doc, vec, vectorized, feature_names)
    X = get_X(doc, vec=vec, vectorized=vectorized, to_dense=True)

    proba = predict_proba(clf, X)
    score, = clf.decision_function(X)

    if has_intercept(clf):
        X = add_intercept(X)
    x = get_X0(X)

    feature_names, flt_indices = feature_names.handle_filter(
        feature_filter, feature_re, x)

    res = Explanation(
        estimator=repr(clf),
        method='linear model',
        targets=[],
    )
    assert res.targets is not None

    _weights = _linear_weights(clf, x, top, feature_names, flt_indices)
    classes = getattr(clf, "classes_", ["-1", "1"])  # OneClassSVM support
    display_names = get_target_display_names(classes, target_names,
                                             targets, top_targets, score)

    if is_multiclass_classifier(clf):
        for label_id, label in display_names:
            target_expl = TargetExplanation(
                target=label,
                feature_weights=_weights(label_id),
github TeamHG-Memex / eli5 / eli5 / sklearn / explain_prediction.py View on Github external
feature_names.bias_name = ''

    score, = reg.predict(X)
    num_targets = getattr(reg, 'n_outputs_', 1)
    is_multitarget = num_targets > 1
    feature_weights = _trees_feature_weights(reg, X, feature_names, num_targets)
    x = get_X0(add_intercept(X))
    flt_feature_names, flt_indices = feature_names.handle_filter(
        feature_filter, feature_re, x)

    def _weights(label_id, scale=1.0):
        weights = feature_weights[:, label_id]
        return get_top_features_filtered(x, flt_feature_names, flt_indices,
                                         weights, top, scale)

    res = Explanation(
        estimator=repr(reg),
        method='decision path',
        description=(DESCRIPTION_TREE_REG_MULTITARGET if is_multitarget
                     else DESCRIPTION_TREE_REG),
        targets=[],
        is_regression=True,
    )
    assert res.targets is not None

    names = get_default_target_names(reg, num_targets=num_targets)
    display_names = get_target_display_names(names, target_names, targets,
                                             top_targets, score)

    if is_multitarget:
        for label_id, label in display_names:
            target_expl = TargetExplanation(
github TeamHG-Memex / eli5 / eli5 / keras / explain_prediction.py View on Github external
* ``score`` value for predicted class.
    """
    if image is None:
        image = _extract_image(doc)
    _validate_doc(model, doc)
    activation_layer = _get_activation_layer(model, layer)

    # TODO: maybe do the sum / loss calculation in this function and pass it to gradcam.
    # This would be consistent with what is done in
    # https://github.com/ramprs/grad-cam/blob/master/misc/utils.lua
    # and https://github.com/ramprs/grad-cam/blob/master/classification.lua
    values = gradcam_backend(model, doc, targets, activation_layer)
    weights, activations, grads, predicted_idx, predicted_val = values
    heatmap = gradcam(weights, activations)

    return Explanation(
        model.name,
        description=DESCRIPTION_KERAS,
        error='',
        method='Grad-CAM',
        image=image,
        targets=[TargetExplanation(
            predicted_idx,
            score=predicted_val, # for now we keep the prediction in the .score field (not .proba)
            heatmap=heatmap, # 2D [0, 1] numpy array
        )],
        is_regression=False, # might be relevant later when explaining for regression tasks
        highlight_spaces=None, # might be relevant later when explaining text models
    )
github TeamHG-Memex / eli5 / eli5 / sklearn / explain_prediction.py View on Github external
else:
        score = None

    is_multiclass = clf.n_classes_ > 2
    feature_weights = _trees_feature_weights(
        clf, X, feature_names, clf.n_classes_)
    x = get_X0(add_intercept(X))
    flt_feature_names, flt_indices = feature_names.handle_filter(
        feature_filter, feature_re, x)

    def _weights(label_id, scale=1.0):
        weights = feature_weights[:, label_id]
        return get_top_features_filtered(x, flt_feature_names, flt_indices,
                                         weights, top, scale)

    res = Explanation(
        estimator=repr(clf),
        method='decision path',
        targets=[],
        description=(DESCRIPTION_TREE_CLF_MULTICLASS if is_multiclass
                     else DESCRIPTION_TREE_CLF_BINARY),
    )
    assert res.targets is not None

    display_names = get_target_display_names(
        clf.classes_, target_names, targets, top_targets,
        score=score if score is not None else proba)

    if is_multiclass:
        for label_id, label in display_names:
            target_expl = TargetExplanation(
                target=label,
github TeamHG-Memex / eli5 / eli5 / sklearn / explain_prediction.py View on Github external
if isinstance(reg, (SVR, NuSVR)) and reg.kernel != 'linear':
        return explain_prediction_sklearn_not_supported(reg, doc)

    vec, feature_names = handle_vec(reg, doc, vec, vectorized, feature_names)
    X = get_X(doc, vec=vec, vectorized=vectorized, to_dense=True)

    score, = reg.predict(X)

    if has_intercept(reg):
        X = add_intercept(X)
    x = get_X0(X)

    feature_names, flt_indices = feature_names.handle_filter(
        feature_filter, feature_re, x)

    res = Explanation(
        estimator=repr(reg),
        method='linear model',
        targets=[],
        is_regression=True,
    )
    assert res.targets is not None

    _weights = _linear_weights(reg, x, top, feature_names, flt_indices)
    names = get_default_target_names(reg)
    display_names = get_target_display_names(names, target_names, targets,
                                             top_targets, score)

    if is_multitarget_regressor(reg):
        for label_id, label in display_names:
            target_expl = TargetExplanation(
                target=label,