How to use the captum.attr._utils.attribution.LayerAttribution function in captum

To help you get started, we’ve selected a few captum examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github pytorch / captum / tests / attr / helpers / conductance_reference.py View on Github external
"""
Note: This implementation of conductance follows the procedure described in the original
paper exactly (https://arxiv.org/abs/1805.12233), computing gradients of output with
respect to hidden neurons and each hidden neuron with respect to the input and summing
appropriately. Computing the gradient of each neuron with respect to the input is
not necessary to just compute the conductance of a given layer, so the main
implementationof conductance does not use this approach in order to compute layer
conductance more efficiently (https://arxiv.org/pdf/1807.09946.pdf).
This implementation is used only for testing to verify that the output matches
that of the main implementation.
"""


class ConductanceReference(LayerAttribution):
    def __init__(self, forward_func, layer):
        r"""
        Args

            forward_func:  The forward function of the model or any modification of it
            layer: Layer for which output attributions are computed.
                   Output size of attribute matches that of layer output.
        """
        super().__init__(forward_func, layer)

    def _conductance_grads(self, forward_fn, input, target_ind=None):
        with torch.autograd.set_grad_enabled(True):
            # Set a forward hook on specified module and run forward pass to
            # get output tensor size.
            saved_tensor = None
github pytorch / captum / captum / attr / _core / layer / layer_activation.py View on Github external
#!/usr/bin/env python3
import torch

from ..._utils.attribution import LayerAttribution
from ..._utils.gradient import _forward_layer_eval


class LayerActivation(LayerAttribution):
    def __init__(self, forward_func, layer, device_ids=None):
        r"""
        Args:

            forward_func (callable):  The forward function of the model or any
                          modification of it
            layer (torch.nn.Module): Layer for which attributions are computed.
                          Output size of attribute matches this layer's input or
                          output dimensions, depending on whether we attribute to
                          the inputs or outputs of the layer, corresponding to
                          attribution of each neuron in the input or output of
                          this layer.
                          Currently, it is assumed that the inputs or the outputs
                          of the layer, depending on which one is used for
                          attribution, can only be a single tensor.
            device_ids (list(int)): Device ID list, necessary only if forward_func
github pytorch / captum / captum / attr / _core / layer / layer_conductance.py View on Github external
import torch
from ..._utils.approximation_methods import approximation_parameters
from ..._utils.attribution import LayerAttribution, GradientAttribution
from ..._utils.batching import _batched_operator
from ..._utils.common import (
    _reshape_and_sum,
    _format_input_baseline,
    _format_additional_forward_args,
    _expand_additional_forward_args,
    _validate_input,
    _expand_target,
)
from ..._utils.gradient import compute_layer_gradients_and_eval


class LayerConductance(LayerAttribution, GradientAttribution):
    def __init__(self, forward_func, layer, device_ids=None):
        r"""
        Args:

            forward_func (callable):  The forward function of the model or any
                          modification of it
            layer (torch.nn.Module): Layer for which attributions are computed.
                          Output size of attribute matches this layer's input or
                          output dimensions, depending on whether we attribute to
                          the inputs or outputs of the layer, corresponding to
                          attribution of each neuron in the input or output of
                          this layer.
                          Currently, it is assumed that the inputs or the outputs
                          of the layer, depending on which one is used for
                          attribution, can only be a single tensor.
            device_ids (list(int)): Device ID list, necessary only if forward_func
github pytorch / captum / captum / attr / _core / layer / layer_gradient_shap.py View on Github external
import numpy as np

from ..._utils.attribution import LayerAttribution
from ..._utils.gradient import compute_layer_gradients_and_eval, _forward_layer_eval

from ..gradient_shap import GradientShap, InputBaselineXGradient
from ..._utils.common import (
    _format_callable_baseline,
    _compute_conv_delta_and_format_attrs,
)

from ..noise_tunnel import NoiseTunnel


class LayerGradientShap(LayerAttribution, GradientShap):
    def __init__(self, forward_func, layer, device_ids=None):
        r"""
        Args:

            forward_func (callable):  The forward function of the model or any
                          modification of it
            layer (torch.nn.Module): Layer for which attributions are computed.
                          Output size of attribute matches this layer's input or
                          output dimensions, depending on whether we attribute to
                          the inputs or outputs of the layer, corresponding to
                          attribution of each neuron in the input or output of
                          this layer.
                          Currently, it is assumed that the inputs or the outputs
                          of the layer, depending on which one is used for
                          attribution can only be a single tensor.
            device_ids (list(int)): Device ID list, necessary only if forward_func
github pytorch / captum / captum / attr / _core / layer / internal_influence.py View on Github external
import torch
from ..._utils.approximation_methods import approximation_parameters
from ..._utils.attribution import LayerAttribution, GradientAttribution
from ..._utils.batching import _batched_operator
from ..._utils.common import (
    _reshape_and_sum,
    _format_input_baseline,
    _validate_input,
    _format_additional_forward_args,
    _expand_additional_forward_args,
    _expand_target,
)
from ..._utils.gradient import compute_layer_gradients_and_eval


class InternalInfluence(LayerAttribution, GradientAttribution):
    def __init__(self, forward_func, layer, device_ids=None):
        r"""
        Args:

            forward_func (callable):  The forward function of the model or any
                          modification of it
            layer (torch.nn.Module): Layer for which attributions are computed.
                          Output size of attribute matches this layer's input or
                          output dimensions, depending on whether we attribute to
                          the inputs or outputs of the layer, corresponding to
                          attribution of each neuron in the input or output of
                          this layer.
                          Currently, it is assumed that the inputs or the outputs
                          of the layer, depending on which one is used for
                          attribution can only be a single tensor.
            device_ids (list(int)): Device ID list, necessary only if forward_func
github pytorch / captum / captum / attr / _core / layer / grad_cam.py View on Github external
#!/usr/bin/env python3
import torch
import torch.nn.functional as F

from ..._utils.attribution import LayerAttribution, GradientAttribution
from ..._utils.common import _format_input, _format_additional_forward_args
from ..._utils.gradient import compute_layer_gradients_and_eval


class LayerGradCam(LayerAttribution, GradientAttribution):
    def __init__(self, forward_func, layer, device_ids=None):
        r"""
        Args

            forward_func (callable):  The forward function of the model or any
                          modification of it
            layer (torch.nn.Module): Layer for which attributions are computed.
                          Output size of attribute matches this layer's output
                          dimensions, except for dimension 2, which will be 1,
                          since GradCAM sums over channels.
                          Currently, only layers with a single tensor output are
                          supported.
            device_ids (list(int)): Device ID list, necessary only if forward_func
                          applies a DataParallel model. This allows reconstruction of
                          intermediate outputs from batched results across devices.
                          If forward_func is given as the DataParallel model itself,