How to use the captum.attr._core.layer.layer_conductance.LayerConductance function in captum

To help you get started, we’ve selected a few captum examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github pytorch / captum / tests / attr / layer / test_layer_conductance.py View on Github external
def _conductance_reference_test_assert(
        self, model, target_layer, test_input, test_baseline=None
    ):
        layer_output = None

        def forward_hook(module, inp, out):
            nonlocal layer_output
            layer_output = out

        hook = target_layer.register_forward_hook(forward_hook)
        final_output = model(test_input)
        hook.remove()
        target_index = torch.argmax(torch.sum(final_output, 0))
        cond = LayerConductance(model, target_layer)
        cond_ref = ConductanceReference(model, target_layer)
        attributions, delta = cond.attribute(
            test_input,
            baselines=test_baseline,
            target=target_index,
            n_steps=300,
            method="gausslegendre",
            return_convergence_delta=True,
        )
        delta_condition = all(abs(delta.numpy().flatten()) < 0.005)
        self.assertTrue(
            delta_condition,
            "Sum of attribution values does {} "
            " not match the difference of endpoints.".format(delta),
        )
github pytorch / captum / tests / attr / layer / test_layer_conductance.py View on Github external
def _conductance_test_assert(
        self,
        model,
        target_layer,
        test_input,
        expected_conductance,
        baselines=None,
        additional_args=None,
    ):
        cond = LayerConductance(model, target_layer)
        for internal_batch_size in (None, 1, 20):
            attributions, delta = cond.attribute(
                test_input,
                baselines=baselines,
                target=0,
                n_steps=500,
                method="gausslegendre",
                additional_forward_args=additional_args,
                internal_batch_size=internal_batch_size,
                return_convergence_delta=True,
            )
            delta_condition = all(abs(delta.numpy().flatten()) < 0.01)
            self.assertTrue(
                delta_condition,
                "Sum of attributions does {}"
                " not match the difference of endpoints.".format(delta),
github pytorch / captum / tests / attr / neuron / test_neuron_conductance.py View on Github external
def _conductance_input_sum_test_assert(
        self, model, target_layer, test_input, test_baseline=None
    ):
        layer_cond = LayerConductance(model, target_layer)
        attributions = layer_cond.attribute(
            test_input,
            baselines=test_baseline,
            target=0,
            n_steps=500,
            method="gausslegendre",
        )
        neuron_cond = NeuronConductance(model, target_layer)
        for i in range(attributions.shape[1]):
            for j in range(attributions.shape[2]):
                for k in range(attributions.shape[3]):
                    neuron_vals = neuron_cond.attribute(
                        test_input,
                        (i, j, k),
                        baselines=test_baseline,
                        target=0,
github pytorch / captum / tests / attr / test_data_parallel.py View on Github external
def test_simple_layer_conductance(self):
        net = BasicModel_MultiLayer().cuda()
        inp = torch.tensor(
            [
                [0.0, 100.0, 0.0],
                [20.0, 100.0, 120.0],
                [30.0, 10.0, 0.0],
                [0.0, 0.0, 2.0],
            ]
        ).cuda()
        self._data_parallel_test_assert(
            LayerConductance, net, net.relu, inputs=inp, target=1
        )