How to use the captum.attr._core.integrated_gradients.IntegratedGradients function in captum

To help you get started, we’ve selected a few captum examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github pytorch / captum / tests / attr / test_integrated_gradients_classification.py View on Github external
def _validate_completness(
        self,
        model,
        inputs,
        target,
        type="vanilla",
        approximation_method="gausslegendre",
        baseline=None,
    ):
        ig = IntegratedGradients(model.forward)
        model.zero_grad()
        if type == "vanilla":
            attributions, delta = ig.attribute(
                inputs,
                baselines=baseline,
                target=target,
                method=approximation_method,
                n_steps=200,
                return_convergence_delta=True,
            )
            delta_expected = ig.compute_convergence_delta(
                attributions, baseline, inputs, target
            )
            assertTensorAlmostEqual(self, delta_expected, delta)

            delta_condition = all(abs(delta.numpy().flatten()) < 0.005)
github pytorch / captum / tests / attr / test_targets.py View on Github external
def test_simple_target_nt_single_tensor(self):
        net = BasicModel_MultiLayer()
        inp = torch.randn(4, 3)
        self._target_batch_test_assert(
            NoiseTunnel,
            IntegratedGradients(net),
            inputs=inp,
            targets=torch.tensor([0]),
            stdevs=0.0,
            test_batches=True,
            splice_targets=False,
        )
github pytorch / captum / tests / attr / test_targets.py View on Github external
def test_multi_target_nt(self):
        net = BasicModel_MultiLayer()
        inp = torch.randn(4, 3)
        self._target_batch_test_assert(
            NoiseTunnel,
            IntegratedGradients(net),
            inputs=inp,
            additional_forward_args=(None, True),
            stdevs=0.0,
            targets=[(1, 0, 0), (0, 1, 1), (1, 1, 1), (0, 0, 0)],
            test_batches=True,
        )
github pytorch / captum / tests / attr / test_deeplift_classification.py View on Github external
def _assert_attributions(
        self, model, attributions, inputs, baselines, delta, target=None
    ):
        self.assertEqual(inputs.shape, attributions.shape)

        delta_condition = all(abs(delta.numpy().flatten()) < 0.003)
        self.assertTrue(
            delta_condition,
            "The sum of attribution values {} is not "
            "nearly equal to the difference between the endpoint for "
            "some samples".format(delta),
        )
        # compare with integrated gradients
        if inputs.shape == baselines.shape:
            ig = IntegratedGradients(model)
            attributions_ig = ig.attribute(inputs, baselines=baselines, target=target)
            assertAttributionComparision(self, attributions, attributions_ig)
github pytorch / captum / tests / attr / test_gradient_shap.py View on Github external
with self.assertRaises(AssertionError):
            gradient_shap.compute_convergence_delta(
                attributions, inputs, baselines, target=target
            )
        # now, let's expand target and choose random baselines from `baselines` tensor
        rand_indices = np.random.choice(baselines.shape[0], inputs.shape[0]).tolist()
        chosen_baselines = baselines[rand_indices]

        target_extendes = torch.tensor([1, 1])
        external_delta = gradient_shap.compute_convergence_delta(
            attributions, chosen_baselines, inputs, target=target_extendes
        )
        _assert_delta(self, external_delta)

        # Compare with integrated gradients
        ig = IntegratedGradients(model)
        baselines = torch.arange(0.0, num_in * 2.0).reshape(2, num_in)
        attributions_ig = ig.attribute(inputs, baselines=baselines, target=target)
        self._assert_shap_ig_comparision((attributions,), (attributions_ig,))
github pytorch / captum / tests / attr / test_deeplift_classification.py View on Github external
def test_sigmoid_classification(self):
        num_in = 20
        input = torch.arange(0.0, num_in * 1.0, requires_grad=True).unsqueeze(0)
        baseline = 0 * input
        target = torch.tensor(0)
        # TODO add test cases for multiple different layers
        model = SigmoidDeepLiftModel(num_in, 5, 1)
        dl = DeepLift(model)
        model.zero_grad()
        attributions, delta = dl.attribute(
            input, baseline, target=target, return_convergence_delta=True
        )
        self._assert_attributions(model, attributions, input, baseline, delta, target)

        # compare with integrated gradients
        ig = IntegratedGradients(model)
        attributions_ig = ig.attribute(input, baseline, target=target)
        assertAttributionComparision(self, (attributions,), (attributions_ig,))
github pytorch / captum / tests / attr / test_targets.py View on Github external
def test_simple_target_nt(self):
        net = BasicModel_MultiLayer()
        inp = torch.randn(4, 3)
        self._target_batch_test_assert(
            NoiseTunnel,
            IntegratedGradients(net),
            inputs=inp,
            targets=[0, 1, 1, 0],
            stdevs=0.0,
            test_batches=True,
        )
github pytorch / captum / captum / attr / _core / layer / layer_integrated_gradients.py View on Github external
modification of it
            layer (torch.nn.Module): Layer for which attributions are computed.
                          Output size of attribute matches this layer's input or
                          output dimensions, depending on whether we attribute to
                          the inputs or outputs of the layer, corresponding to
                          the attribution of each neuron in the input or output
                          of this layer.
            device_ids (list(int)): Device ID list, necessary only if forward_func
                          applies a DataParallel model. This allows reconstruction of
                          intermediate outputs from batched results across devices.
                          If forward_func is given as the DataParallel model itself,
                          then it is not necessary to provide this argument.

        """
        LayerAttribution.__init__(self, forward_func, layer, device_ids=device_ids)
        IntegratedGradients.__init__(self, forward_func)
github pytorch / captum / captum / attr / _core / neuron / neuron_integrated_gradients.py View on Github external
>>> # and returns an Nx10 tensor of class probabilities.
                >>> # It contains an attribute conv1, which is an instance of nn.conv2d,
                >>> # and the output of this layer has dimensions Nx12x32x32.
                >>> net = ImageClassifier()
                >>> neuron_ig = NeuronIntegratedGradients(net, net.conv1)
                >>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
                >>> # To compute neuron attribution, we need to provide the neuron
                >>> # index for which attribution is desired. Since the layer output
                >>> # is Nx12x32x32, we need a tuple in the form (0..11,0..31,0..31)
                >>> # which indexes a particular neuron in the layer output.
                >>> # For this example, we choose the index (4,1,2).
                >>> # Computes neuron integrated gradients for neuron with
                >>> # index (4,1,2).
                >>> attribution = neuron_ig.attribute(input, (4,1,2))
        """
        ig = IntegratedGradients(self.forward_func)
        ig.gradient_func = construct_neuron_grad_fn(
            self.layer, neuron_index, self.device_ids, attribute_to_neuron_input
        )
        # Return only attributions and not delta
        return ig.attribute(
            inputs,
            baselines,
            additional_forward_args=additional_forward_args,
            n_steps=n_steps,
            method=method,
            internal_batch_size=internal_batch_size,
        )