How to use the captum.attr._utils.attribution.GradientAttribution function in captum

To help you get started, we’ve selected a few captum examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github pytorch / captum / captum / attr / _core / neuron / neuron_gradient_shap.py View on Github external
layer (torch.nn.Module): Layer for which neuron attributions are computed.
                          The output size of the attribute method matches the
                          dimensions of the inputs or ouputs of the neuron with
                          index `neuron_index` in this layer, depending on whether
                          we attribute to the inputs or outputs of the neuron.
                          Currently, it is assumed that the inputs or the outputs
                          of the neurons in this layer, depending on which one is
                          used for attribution, can only be a single tensor.
            device_ids (list(int)): Device ID list, necessary only if forward_func
                          applies a DataParallel model. This allows reconstruction of
                          intermediate outputs from batched results across devices.
                          If forward_func is given as the DataParallel model itself,
                          then it is not necessary to provide this argument.
        """
        NeuronAttribution.__init__(self, forward_func, layer, device_ids)
        GradientAttribution.__init__(self, forward_func)
github pytorch / captum / captum / attr / _core / neuron / neuron_conductance.py View on Github external
Output size of attribute matches this layer's input or
                          output dimensions, depending on whether we attribute to
                          the inputs or outputs of the layer, corresponding to
                          attribution of each neuron in the input or output of
                          this layer.
                          Currently, it is assumed that the inputs or the outputs
                          of the layer, depending on which one is used for
                          attribution, can only be a single tensor.
            device_ids (list(int)): Device ID list, necessary only if forward_func
                          applies a DataParallel model. This allows reconstruction of
                          intermediate outputs from batched results across devices.
                          If forward_func is given as the DataParallel model itself,
                          then it is not necessary to provide this argument.
        """
        NeuronAttribution.__init__(self, forward_func, layer, device_ids)
        GradientAttribution.__init__(self, forward_func)
github pytorch / captum / captum / attr / _core / neuron / neuron_integrated_gradients.py View on Github external
#!/usr/bin/env python3
from ..._utils.attribution import NeuronAttribution, GradientAttribution
from ..._utils.gradient import construct_neuron_grad_fn

from ..integrated_gradients import IntegratedGradients


class NeuronIntegratedGradients(NeuronAttribution, GradientAttribution):
    def __init__(self, forward_func, layer, device_ids=None):
        r"""
        Args:

            forward_func (callable):  The forward function of the model or any
                          modification of it
            layer (torch.nn.Module): Layer for which attributions are computed.
                          Output size of attribute matches this layer's input or
                          output dimensions, depending on whether we attribute to
                          the inputs or outputs of the layer, corresponding to
                          attribution of each neuron in the input or output of
                          this layer.
                          Currently, it is assumed that the inputs or the outputs
                          of the layer, depending on which one is used for
                          attribution, can only be a single tensor.
            device_ids (list(int)): Device ID list, necessary only if forward_func
github pytorch / captum / captum / attr / _core / input_x_gradient.py View on Github external
#!/usr/bin/env python3
from .._utils.common import _format_input, _format_attributions
from .._utils.attribution import GradientAttribution
from .._utils.gradient import apply_gradient_requirements, undo_gradient_requirements


class InputXGradient(GradientAttribution):
    def __init__(self, forward_func):
        r"""
        Args:

            forward_func (callable):  The forward function of the model or any
                          modification of it
        """
        GradientAttribution.__init__(self, forward_func)

    def attribute(self, inputs, target=None, additional_forward_args=None):
        r""""
        A baseline approach for computing the attribution. It multiplies input with
        the gradient with respect to input.
        https://arxiv.org/abs/1611.07270

        Args:
github pytorch / captum / captum / attr / _core / neuron / neuron_deep_lift.py View on Github external
#!/usr/bin/env python3
from ..._utils.attribution import NeuronAttribution, GradientAttribution
from ..._utils.gradient import construct_neuron_grad_fn

from ..deep_lift import DeepLift, DeepLiftShap


class NeuronDeepLift(NeuronAttribution, GradientAttribution):
    def __init__(self, model, layer):
        r"""
        Args:

            model (torch.nn.Module):  The reference to PyTorch model instance.
            layer (torch.nn.Module): Layer for which neuron attributions are computed.
                          Attributions for a particular neuron for the input or output
                          of this layer are computed using the argument neuron_index
                          in the attribute method.
                          Currently, it is assumed that the inputs or the outputs
                          of the layer, depending on which one is used for
                          attribution, can only be a single tensor.
        """
        NeuronAttribution.__init__(self, model, layer)
        GradientAttribution.__init__(self, model)
github pytorch / captum / captum / attr / _core / integrated_gradients.py View on Github external
from .._utils.approximation_methods import approximation_parameters
from .._utils.batching import _batched_operator
from .._utils.common import (
    _validate_input,
    _format_additional_forward_args,
    _format_attributions,
    _format_input_baseline,
    _reshape_and_sum,
    _expand_additional_forward_args,
    _expand_target,
)
from .._utils.attribution import GradientAttribution


class IntegratedGradients(GradientAttribution):
    r"""
    Integrated Gradients is an axiomatic model interpretability algorithm that
    assigns an importance score to each input feature by approximating the
    integral of gradients of the model's output with respect to the inputs
    along the path (straight line) from given baselines / references to inputs.

    Baselines can be provided as input arguments to attribute method.
    To approximate the integral we can choose to use either a variant of
    Riemann sum or Gauss-Legendre quadrature rule.

    More details regarding the integrated gradients method can be found in the
    original paper:
    https://arxiv.org/abs/1703.01365

    """
github pytorch / captum / captum / attr / _core / layer / internal_influence.py View on Github external
Output size of attribute matches this layer's input or
                          output dimensions, depending on whether we attribute to
                          the inputs or outputs of the layer, corresponding to
                          attribution of each neuron in the input or output of
                          this layer.
                          Currently, it is assumed that the inputs or the outputs
                          of the layer, depending on which one is used for
                          attribution can only be a single tensor.
            device_ids (list(int)): Device ID list, necessary only if forward_func
                          applies a DataParallel model. This allows reconstruction of
                          intermediate outputs from batched results across devices.
                          If forward_func is given as the DataParallel model itself,
                          then it is not necessary to provide this argument.
        """
        LayerAttribution.__init__(self, forward_func, layer, device_ids)
        GradientAttribution.__init__(self, forward_func)