How to use the captum.attr._core.deep_lift.DeepLiftShap function in captum

To help you get started, we’ve selected a few captum examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github pytorch / captum / tests / attr / test_deeplift_classification.py View on Github external
def test_softmax_classification_batch_multi_baseline(self):
        num_in = 40
        input = torch.arange(0.0, num_in * 2.0, requires_grad=True).reshape(2, num_in)
        baselines = torch.randn(5, 40)

        model = SoftmaxDeepLiftModel(num_in, 20, 10)
        dl = DeepLiftShap(model)

        self.softmax_classification(model, dl, input, baselines, torch.tensor(2))
github pytorch / captum / tests / attr / test_deeplift_basic.py View on Github external
def test_relu_deepliftshap_with_hypothetical_contrib_func(self):
        model = Conv1dDeepLiftModel()
        rand_seq_data = torch.abs(torch.randn(2, 4, 1000))
        rand_seq_ref = torch.abs(torch.randn(3, 4, 1000))
        dls = DeepLiftShap(model)
        attr = dls.attribute(
            rand_seq_data,
            rand_seq_ref,
            custom_attribution_func=_hypothetical_contrib_func,
            target=(0, 0),
        )
        self.assertEqual(attr.shape, rand_seq_data.shape)
github pytorch / captum / tests / attr / test_deeplift_basic.py View on Github external
def test_relu_deepliftshap_multi_ref(self):
        x1 = torch.tensor([[1.0]], requires_grad=True)
        x2 = torch.tensor([[2.0]], requires_grad=True)

        b1 = torch.tensor([[0.0], [0.0], [0.0], [0.0]], requires_grad=True)
        b2 = torch.tensor([[0.0], [0.0], [0.0], [0.0]], requires_grad=True)

        inputs = (x1, x2)
        baselines = (b1, b2)

        model = ReLUDeepLiftModel()
        self._deeplift_assert(model, DeepLiftShap(model), inputs, baselines)
github pytorch / captum / tests / attr / test_deeplift_basic.py View on Github external
def test_relu_deepliftshap_with_custom_attr_func(self):
        def custom_attr_func(multipliers, inputs, baselines):
            return tuple(multiplier * 0.0 for multiplier in multipliers)

        model = ReLULinearDeepLiftModel(inplace=True)
        x1 = torch.tensor([[-10.0, 1.0, -5.0]])
        x2 = torch.tensor([[3.0, 3.0, 1.0]])
        b1 = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])
        b2 = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])
        inputs = (x1, x2)
        baselines = (b1, b2)
        dls = DeepLiftShap(model)
        attr_w_func = dls.attribute(
            inputs, baselines, custom_attribution_func=custom_attr_func
        )

        assertTensorAlmostEqual(self, attr_w_func[0], [[0.0, 0.0, 0.0]], 0.0)
        assertTensorAlmostEqual(self, attr_w_func[1], [[0.0, 0.0, 0.0]], 0.0)
github pytorch / captum / tests / attr / test_deeplift_basic.py View on Github external
def test_relu_linear_deepliftshap_compare_inplace(self):
        model1 = ReLULinearDeepLiftModel(inplace=True)
        x1 = torch.tensor([[-10.0, 1.0, -5.0], [2.0, 3.0, 4.0]], requires_grad=True)
        x2 = torch.tensor([[3.0, 3.0, 1.0], [2.3, 5.0, 4.0]], requires_grad=True)
        inputs = (x1, x2)
        b1 = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])
        b2 = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])
        baselines = (b1, b2)

        attributions1 = DeepLiftShap(model1).attribute(inputs, baselines)

        model2 = ReLULinearDeepLiftModel()
        attributions2 = DeepLiftShap(model2).attribute(inputs, baselines)
        assertTensorAlmostEqual(self, attributions1[0], attributions2[0])
        assertTensorAlmostEqual(self, attributions1[1], attributions2[1])
github pytorch / captum / tests / attr / test_deeplift_basic.py View on Github external
def test_relu_linear_deepliftshap_compare_inplace(self):
        model1 = ReLULinearDeepLiftModel(inplace=True)
        x1 = torch.tensor([[-10.0, 1.0, -5.0], [2.0, 3.0, 4.0]], requires_grad=True)
        x2 = torch.tensor([[3.0, 3.0, 1.0], [2.3, 5.0, 4.0]], requires_grad=True)
        inputs = (x1, x2)
        b1 = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])
        b2 = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])
        baselines = (b1, b2)

        attributions1 = DeepLiftShap(model1).attribute(inputs, baselines)

        model2 = ReLULinearDeepLiftModel()
        attributions2 = DeepLiftShap(model2).attribute(inputs, baselines)
        assertTensorAlmostEqual(self, attributions1[0], attributions2[0])
        assertTensorAlmostEqual(self, attributions1[1], attributions2[1])
github pytorch / captum / tests / attr / test_deeplift_basic.py View on Github external
def test_relu_deepliftshap_batch_4D_input(self):
        x1 = torch.ones(4, 1, 1, 1)
        x2 = torch.tensor([[[[2.0]]]] * 4)

        b1 = torch.zeros(4, 1, 1, 1)
        b2 = torch.zeros(4, 1, 1, 1)

        inputs = (x1, x2)
        baselines = (b1, b2)

        model = ReLUDeepLiftModel()
        self._deeplift_assert(model, DeepLiftShap(model), inputs, baselines)
github pytorch / captum / captum / attr / _core / layer / layer_deep_lift.py View on Github external
undo_gradient_requirements(inputs, gradient_mask)

        return _compute_conv_delta_and_format_attrs(
            self,
            return_convergence_delta,
            attributions,
            baselines,
            inputs,
            additional_forward_args,
            target,
            False,  # currently both the input and output of layer can only be a tensor
        )


class LayerDeepLiftShap(LayerDeepLift, DeepLiftShap):
    def __init__(self, model, layer):
        r"""
        Args:

            model (torch.nn.Module):  The reference to PyTorch model instance.
            layer (torch.nn.Module): Layer for which attributions are computed.
                          The size and dimensionality of the attributions
                          corresponds to the size and dimensionality of the layer's
                          input or output depending on whether we attribute to the
                          inputs or outputs of the layer.
                          Currently, it is assumed that both inputs and ouputs of
                          the layer can only be a single tensor.
        """
        LayerDeepLift.__init__(self, model, layer)
        DeepLiftShap.__init__(self, model)
github pytorch / captum / captum / attr / _core / layer / layer_deep_lift.py View on Github external
def __init__(self, model, layer):
        r"""
        Args:

            model (torch.nn.Module):  The reference to PyTorch model instance.
            layer (torch.nn.Module): Layer for which attributions are computed.
                          The size and dimensionality of the attributions
                          corresponds to the size and dimensionality of the layer's
                          input or output depending on whether we attribute to the
                          inputs or outputs of the layer.
                          Currently, it is assumed that both inputs and ouputs of
                          the layer can only be a single tensor.
        """
        LayerDeepLift.__init__(self, model, layer)
        DeepLiftShap.__init__(self, model)