How to use captum - 10 common examples

To help you get started, we’ve selected a few captum examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github pytorch / captum / captum / attr / _core / noise_tunnel.py View on Github external
def compute_expected_attribution_and_sq(attribution):
            bsz = attribution.shape[0] // n_samples
            attribution_shape = (bsz, n_samples)
            if len(attribution.shape) > 1:
                attribution_shape += attribution.shape[1:]

            attribution = attribution.view(attribution_shape)
            expected_attribution = attribution.mean(dim=1, keepdim=False)
            expected_attribution_sq = torch.mean(attribution ** 2, dim=1, keepdim=False)
            return expected_attribution, expected_attribution_sq

        # Keeps track whether original input is a tuple or not before
        # converting it into a tuple.
        is_inputs_tuple = isinstance(inputs, tuple)

        inputs = _format_input(inputs)

        _validate_noise_tunnel_type(nt_type, SUPPORTED_NOISE_TUNNEL_TYPES)

        delta = 0
        inputs_with_noise = add_noise_to_inputs()
        # if the algorithm supports targets, baselines and/or additional_forward_args
        # they will be expanded based on the n_steps and corresponding kwargs
        # variables will be updated accordingly
        expand_and_update_baselines()
        expand_and_update_additional_forward_args()
        expand_and_update_target()
        # smoothgrad_Attr(x) = 1 / n * sum(Attr(x + N(0, sigma^2))
        attributions = self.attribution_method.attribute(inputs_with_noise, **kwargs)

        return_convergence_delta = (
            "return_convergence_delta" in kwargs and kwargs["return_convergence_delta"]
github pytorch / captum / tests / attr / test_saliency.py View on Github external
def _saliency_base_assert(
        self, model, inputs, expected, additional_forward_args=None, nt_type="vanilla"
    ):
        saliency = Saliency(model)
        if nt_type == "vanilla":
            attributions = saliency.attribute(
                inputs, additional_forward_args=additional_forward_args
            )
        else:
            nt = NoiseTunnel(saliency)
            attributions = nt.attribute(
                inputs,
                nt_type=nt_type,
                n_samples=10,
                stdevs=0.0000002,
                additional_forward_args=additional_forward_args,
            )
        if isinstance(attributions, tuple):
            for input, attribution, expected_attr in zip(
                inputs, attributions, expected
            ):
                if nt_type == "vanilla":
                    self._assert_attribution(attribution, expected_attr)
                self.assertEqual(input.shape, attribution.shape)
        else:
            if nt_type == "vanilla":
github pytorch / captum / tests / attr / test_input_x_gradient.py View on Github external
def _input_x_gradient_base_assert(
        self,
        model,
        inputs,
        expected_grads,
        additional_forward_args=None,
        nt_type="vanilla",
    ):
        input_x_grad = InputXGradient(model)
        if nt_type == "vanilla":
            attributions = input_x_grad.attribute(
                inputs, additional_forward_args=additional_forward_args
            )
        else:
            nt = NoiseTunnel(input_x_grad)
            attributions = nt.attribute(
                inputs,
                nt_type=nt_type,
                n_samples=10,
                stdevs=0.0002,
                additional_forward_args=additional_forward_args,
            )

        if isinstance(attributions, tuple):
            for input, attribution, expected_grad in zip(
                inputs, attributions, expected_grads
            ):
                if nt_type == "vanilla":
                    assertArraysAlmostEqual(
                        attribution.reshape(-1), (expected_grad * input).reshape(-1)
                    )
github pytorch / captum / tests / attr / test_input_x_gradient.py View on Github external
target = torch.tensor(5)

        # 10-class classification model
        model = SoftmaxModel(num_in, 20, 10)
        input_x_grad = InputXGradient(model.forward)
        if nt_type == "vanilla":
            attributions = input_x_grad.attribute(input, target)
            output = model(input)[:, target]
            output.backward()
            expercted = input.grad * input
            self.assertEqual(
                expercted.detach().numpy().tolist(),
                attributions.detach().numpy().tolist(),
            )
        else:
            nt = NoiseTunnel(input_x_grad)
            attributions = nt.attribute(
                input, nt_type=nt_type, n_samples=10, stdevs=1.0, target=target
            )

        self.assertAlmostEqual(attributions.shape, input.shape)
github pytorch / captum / tests / attr / test_saliency.py View on Github external
# 10-class classification model
        model = SoftmaxModel(num_in, 20, 10)
        saliency = Saliency(model)

        if nt_type == "vanilla":
            attributions = saliency.attribute(input, target)

            output = model(input)[:, target]
            output.backward()
            expected = torch.abs(input.grad)
            self.assertEqual(
                expected.detach().numpy().tolist(),
                attributions.detach().numpy().tolist(),
            )
        else:
            nt = NoiseTunnel(saliency)
            attributions = nt.attribute(
                input, nt_type=nt_type, n_samples=10, stdevs=0.0002, target=target
            )
        self.assertEqual(input.shape, attributions.shape)
github pytorch / captum / tests / attr / test_deeplift_basic.py View on Github external
def test_relu_linear_deeplift_batch(self):
        model = ReLULinearDeepLiftModel(inplace=True)
        x1 = torch.tensor([[-10.0, 1.0, -5.0], [2.0, 3.0, 4.0]], requires_grad=True)
        x2 = torch.tensor([[3.0, 3.0, 1.0], [2.3, 5.0, 4.0]], requires_grad=True)

        inputs = (x1, x2)
        baselines = (torch.zeros(1, 3), torch.rand(1, 3) * 0.001)
        # expected = [[[0.0, 0.0]], [[6.0, 2.0]]]
        self._deeplift_assert(model, DeepLift(model), inputs, baselines)
github pytorch / captum / tests / attr / test_deeplift_basic.py View on Github external
def test_reusable_modules(self):
        model = BasicModelWithReusableModules()
        input = torch.rand(1, 3)
        dl = DeepLift(model)
        with self.assertRaises(RuntimeError):
            dl.attribute(input, target=0)
github pytorch / captum / tests / attr / test_deeplift_classification.py View on Github external
def test_convnet_with_maxpool3d(self):
        input = 100 * torch.randn(2, 1, 10, 10, 10, requires_grad=True)
        baseline = 20 * torch.randn(2, 1, 10, 10, 10)

        model = BasicModel_ConvNet_MaxPool3d()
        dl = DeepLift(model)

        self.softmax_classification(model, dl, input, baseline, torch.tensor(2))
github pytorch / captum / tests / attr / test_deeplift_basic.py View on Github external
def test_relu_deeplift_batch(self):
        x1 = torch.tensor([[1.0], [1.0], [1.0], [1.0]], requires_grad=True)
        x2 = torch.tensor([[2.0], [2.0], [2.0], [2.0]], requires_grad=True)

        b1 = torch.tensor([[0.0], [0.0], [0.0], [0.0]], requires_grad=True)
        b2 = torch.tensor([[0.0], [0.0], [0.0], [0.0]], requires_grad=True)

        inputs = (x1, x2)
        baselines = (b1, b2)

        model = ReLUDeepLiftModel()
        self._deeplift_assert(model, DeepLift(model), inputs, baselines)
github pytorch / captum / tests / attr / test_deeplift_classification.py View on Github external
def test_softmax_classification_batch_multi_target(self):
        num_in = 40
        inputs = torch.arange(0.0, num_in * 3.0, requires_grad=True).reshape(3, num_in)
        baselines = torch.arange(1.0, num_in + 1).reshape(1, num_in)
        model = SoftmaxDeepLiftModel(num_in, 20, 10)
        dl = DeepLift(model)

        self.softmax_classification(
            model, dl, inputs, baselines, torch.tensor([2, 2, 2])
        )