How to use the captum.attr._utils.attribution.NeuronAttribution function in captum

To help you get started, we’ve selected a few captum examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github pytorch / captum / captum / attr / _core / neuron / neuron_gradient_shap.py View on Github external
#!/usr/bin/env python3

from ..gradient_shap import GradientShap
from ..._utils.attribution import NeuronAttribution, GradientAttribution
from ..._utils.gradient import construct_neuron_grad_fn


class NeuronGradientShap(NeuronAttribution, GradientAttribution):
    def __init__(self, forward_func, layer, device_ids=None):
        r"""
        Args:

            forward_func (callable):  The forward function of the model or any
                          modification of it
            layer (torch.nn.Module): Layer for which neuron attributions are computed.
                          The output size of the attribute method matches the
                          dimensions of the inputs or ouputs of the neuron with
                          index `neuron_index` in this layer, depending on whether
                          we attribute to the inputs or outputs of the neuron.
                          Currently, it is assumed that the inputs or the outputs
                          of the neurons in this layer, depending on which one is
                          used for attribution, can only be a single tensor.
            device_ids (list(int)): Device ID list, necessary only if forward_func
                          applies a DataParallel model. This allows reconstruction of
github pytorch / captum / captum / attr / _core / neuron / neuron_gradient.py View on Github external
#!/usr/bin/env python3
from ..._utils.attribution import NeuronAttribution, GradientAttribution
from ..._utils.common import (
    _format_input,
    _format_additional_forward_args,
    _format_attributions,
)
from ..._utils.gradient import (
    apply_gradient_requirements,
    undo_gradient_requirements,
    _forward_layer_eval_with_neuron_grads,
)


class NeuronGradient(NeuronAttribution, GradientAttribution):
    def __init__(self, forward_func, layer, device_ids=None):
        r"""
        Args:

            forward_func (callable):  The forward function of the model or any
                          modification of it
            layer (torch.nn.Module): Layer for which attributions are computed.
                          Output size of attribute matches this layer's input or
                          output dimensions, depending on whether we attribute to
                          the inputs or outputs of the layer, corresponding to
                          attribution of each neuron in the input or output of
                          this layer.
                          Currently, it is assumed that the inputs or the outputs
                          of the layer, depending on which one is used for
                          attribution, can only be a single tensor.
            device_ids (list(int)): Device ID list, necessary only if forward_func
github pytorch / captum / captum / attr / _core / neuron / neuron_guided_backprop_deconvnet.py View on Github external
#!/usr/bin/env python3
from ..._utils.attribution import NeuronAttribution, GradientAttribution
from ..._utils.gradient import construct_neuron_grad_fn
from ..guided_backprop_deconvnet import GuidedBackprop, Deconvolution


class NeuronDeconvolution(NeuronAttribution, GradientAttribution):
    def __init__(self, model, layer, device_ids=None):
        r"""
        Args:

            model (nn.Module):  The reference to PyTorch model instance.
            layer (torch.nn.Module): Layer for which attributions are computed.
                          Output size of attribute matches this layer's input or
                          output dimensions, depending on whether we attribute to
                          the inputs or outputs of the layer, corresponding to
                          attribution of each neuron in the input or output of
                          this layer.
                          Currently, it is assumed that the inputs or the outputs
                          of the layer, depending on which one is used for
                          attribution, can only be a single tensor.
            device_ids (list(int)): Device ID list, necessary only if forward_func
                          applies a DataParallel model. This allows reconstruction of
github pytorch / captum / captum / attr / _core / neuron / neuron_guided_backprop_deconvnet.py View on Github external
layer (torch.nn.Module): Layer for which attributions are computed.
                          Output size of attribute matches this layer's input or
                          output dimensions, depending on whether we attribute to
                          the inputs or outputs of the layer, corresponding to
                          attribution of each neuron in the input or output of
                          this layer.
                          Currently, it is assumed that the inputs or the outputs
                          of the layer, depending on which one is used for
                          attribution, can only be a single tensor.
            device_ids (list(int)): Device ID list, necessary only if forward_func
                          applies a DataParallel model. This allows reconstruction of
                          intermediate outputs from batched results across devices.
                          If forward_func is given as the DataParallel model itself,
                          then it is not necessary to provide this argument.
        """
        NeuronAttribution.__init__(self, model, layer, device_ids)
        GradientAttribution.__init__(self, model)
        self.deconv = Deconvolution(model)
github pytorch / captum / captum / attr / _core / neuron / neuron_deep_lift.py View on Github external
dl = DeepLift(self.forward_func)
        dl.gradient_func = construct_neuron_grad_fn(
            self.layer,
            neuron_index,
            attribute_to_neuron_input=attribute_to_neuron_input,
        )

        return dl.attribute(
            inputs,
            baselines,
            additional_forward_args=additional_forward_args,
            custom_attribution_func=custom_attribution_func,
        )


class NeuronDeepLiftShap(NeuronAttribution, GradientAttribution):
    def __init__(self, model, layer):
        r"""
        Args:

            model (torch.nn.Module):  The reference to PyTorch model instance.
            layer (torch.nn.Module): Layer for which neuron attributions are computed.
                          Attributions for a particular neuron for the input or output
                          of this layer are computed using the argument neuron_index
                          in the attribute method.
                          Currently, only layers with a single tensor input and output
                          are supported.
        """
        NeuronAttribution.__init__(self, model, layer)
        GradientAttribution.__init__(self, model)

    def attribute(
github pytorch / captum / captum / attr / _core / neuron / neuron_conductance.py View on Github external
from ..._utils.attribution import NeuronAttribution, GradientAttribution
from ..._utils.batching import _batched_operator
from ..._utils.common import (
    _reshape_and_sum,
    _format_input_baseline,
    _format_additional_forward_args,
    _validate_input,
    _format_attributions,
    _expand_additional_forward_args,
    _expand_target,
    _verify_select_column,
)
from ..._utils.gradient import compute_layer_gradients_and_eval


class NeuronConductance(NeuronAttribution, GradientAttribution):
    def __init__(self, forward_func, layer, device_ids=None):
        r"""
        Args:

            forward_func (callable):  The forward function of the model or any
                          modification of it
            layer (torch.nn.Module): Layer for which neuron attributions are computed.
                          Attributions for a particular neuron in the input or output
                          of this layer are computed using the argument neuron_index
                          in the attribute method.
                          Currently, only layers with a single tensor input or output
                          are supported.
            layer (torch.nn.Module): Layer for which attributions are computed.
                          Output size of attribute matches this layer's input or
                          output dimensions, depending on whether we attribute to
                          the inputs or outputs of the layer, corresponding to