How to use the captum.attr._utils.common._format_additional_forward_args function in captum

To help you get started, we’ve selected a few captum examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github pytorch / captum / captum / attr / _core / layer / grad_cam.py View on Github external
>>> # use case for GradCAM.
                >>> net = ImageClassifier()
                >>> layer_gc = LayerGradCam(net, net.conv4)
                >>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
                >>> # Computes layer GradCAM for class 3.
                >>> # attribution size matches layer output except for dimension
                >>> # 1, so dimensions of attr would be Nx1x8x8.
                >>> attr = layer_gc.attribute(input, 3)
                >>> # GradCAM attributions are often upsampled and viewed as a
                >>> # mask to the input, since the convolutional layer output
                >>> # spatially matches the original input image.
                >>> # This can be done with LayerAttribution's interpolate method.
                >>> upsampled_attr = LayerAttribution.interpolate(attr, (32, 32))
        """
        inputs = _format_input(inputs)
        additional_forward_args = _format_additional_forward_args(
            additional_forward_args
        )
        # Returns gradient of output with respect to
        # hidden layer and hidden layer evaluated at each input.
        layer_gradients, layer_eval = compute_layer_gradients_and_eval(
            self.forward_func,
            self.layer,
            inputs,
            target,
            additional_forward_args,
            device_ids=self.device_ids,
            attribute_to_layer_input=attribute_to_layer_input,
        )
        summed_grads = torch.mean(
            layer_gradients,
            dim=tuple(x for x in range(2, len(layer_gradients.shape))),
github pytorch / captum / captum / attr / _core / layer / internal_influence.py View on Github external
inputs, baselines = _format_input_baseline(inputs, baselines)
        _validate_input(inputs, baselines, n_steps, method)

        # Retrieve step size and scaling factor for specified approximation method
        step_sizes_func, alphas_func = approximation_parameters(method)
        step_sizes, alphas = step_sizes_func(n_steps), alphas_func(n_steps)

        # Compute scaled inputs from baseline to final input.
        scaled_features_tpl = tuple(
            torch.cat(
                [baseline + alpha * (input - baseline) for alpha in alphas], dim=0
            ).requires_grad_()
            for input, baseline in zip(inputs, baselines)
        )

        additional_forward_args = _format_additional_forward_args(
            additional_forward_args
        )
        # apply number of steps to additional forward args
        # currently, number of steps is applied only to additional forward arguments
        # that are nd-tensors. It is assumed that the first dimension is
        # the number of batches.
        # dim -> (bsz * #steps x additional_forward_args[0].shape[1:], ...)
        input_additional_args = (
            _expand_additional_forward_args(additional_forward_args, n_steps)
            if additional_forward_args is not None
            else None
        )
        expanded_target = _expand_target(target, n_steps)

        # Returns gradient of output with respect to hidden layer.
        layer_gradients, _ = _batched_operator(
github pytorch / captum / captum / attr / _core / integrated_gradients.py View on Github external
_validate_input(inputs, baselines, n_steps, method)

        # retrieve step size and scaling factor for specified approximation method
        step_sizes_func, alphas_func = approximation_parameters(method)
        step_sizes, alphas = step_sizes_func(n_steps), alphas_func(n_steps)

        # scale features and compute gradients. (batch size is abbreviated as bsz)
        # scaled_features' dim -> (bsz * #steps x inputs[0].shape[1:], ...)
        scaled_features_tpl = tuple(
            torch.cat(
                [baseline + alpha * (input - baseline) for alpha in alphas], dim=0
            ).requires_grad_()
            for input, baseline in zip(inputs, baselines)
        )

        additional_forward_args = _format_additional_forward_args(
            additional_forward_args
        )
        # apply number of steps to additional forward args
        # currently, number of steps is applied only to additional forward arguments
        # that are nd-tensors. It is assumed that the first dimension is
        # the number of batches.
        # dim -> (bsz * #steps x additional_forward_args[0].shape[1:], ...)
        input_additional_args = (
            _expand_additional_forward_args(additional_forward_args, n_steps)
            if additional_forward_args is not None
            else None
        )
        expanded_target = _expand_target(target, n_steps)

        # grads: dim -> (bsz * #steps x inputs[0].shape[1:], ...)
        grads = _batched_operator(
github pytorch / captum / captum / attr / _core / noise_tunnel.py View on Github external
def expand_and_update_additional_forward_args():
            if "additional_forward_args" not in kwargs:
                return
            additional_forward_args = kwargs["additional_forward_args"]
            additional_forward_args = _format_additional_forward_args(
                additional_forward_args
            )
            if additional_forward_args is None:
                return
            additional_forward_args = _expand_additional_forward_args(
                additional_forward_args,
                n_samples,
                expansion_type=ExpansionTypes.repeat_interleave,
            )
            # update kwargs with expanded baseline
            kwargs["additional_forward_args"] = additional_forward_args
github pytorch / captum / captum / attr / _core / layer / layer_integrated_gradients.py View on Github external
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
                >>> # and returns an Nx10 tensor of class probabilities.
                >>> # It contains an attribute conv1, which is an instance of nn.conv2d,
                >>> # and the output of this layer has dimensions Nx12x32x32.
                >>> net = ImageClassifier()
                >>> lig = LayerIntegratedGradients(net, net.conv1)
                >>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
                >>> # Computes layer integrated gradients for class 3.
                >>> # attribution size matches layer output, Nx12x32x32
                >>> attribution = lig.attribute(input, target=3)
        """
        inps, baselines = _format_input_baseline(inputs, baselines)
        _validate_input(inps, baselines, n_steps, method)

        baselines = _tensorize_baseline(inps, baselines)
        additional_forward_args = _format_additional_forward_args(
            additional_forward_args
        )

        if self.device_ids is None:
            self.device_ids = getattr(self.forward_func, "device_ids", None)
        inputs_layer = _forward_layer_eval(
            self.forward_func,
            inps,
            self.layer,
            device_ids=self.device_ids,
            additional_forward_args=additional_forward_args,
            attribute_to_layer_input=attribute_to_layer_input,
        )

        baselines_layer = _forward_layer_eval(
            self.forward_func,
github pytorch / captum / captum / attr / _core / neuron / neuron_conductance.py View on Github external
num_examples = inputs[0].shape[0]
        total_batch = num_examples * n_steps

        # Retrieve scaling factors for specified approximation method
        step_sizes_func, alphas_func = approximation_parameters(method)
        step_sizes, alphas = step_sizes_func(n_steps), alphas_func(n_steps)

        # Compute scaled inputs from baseline to final input.
        scaled_features_tpl = tuple(
            torch.cat(
                [baseline + alpha * (input - baseline) for alpha in alphas], dim=0
            ).requires_grad_()
            for input, baseline in zip(inputs, baselines)
        )

        additional_forward_args = _format_additional_forward_args(
            additional_forward_args
        )
        # apply number of steps to additional forward args
        # currently, number of steps is applied only to additional forward arguments
        # that are nd-tensors. It is assumed that the first dimension is
        # the number of batches.
        # dim -> (#examples * #steps x additional_forward_args[0].shape[1:], ...)
        input_additional_args = (
            _expand_additional_forward_args(additional_forward_args, n_steps)
            if additional_forward_args is not None
            else None
        )
        expanded_target = _expand_target(target, n_steps)

        # Conductance Gradients - Returns gradient of output with respect to
        # hidden layer and hidden layer evaluated at each input.
github pytorch / captum / captum / attr / _utils / batching.py View on Github external
def _batched_generator(
    inputs, additional_forward_args=None, target_ind=None, internal_batch_size=None
):
    """
    Returns a generator which returns corresponding chunks of size internal_batch_size
    for both inputs and additional_forward_args. If batch size is None,
    generator only includes original inputs and additional args.
    """
    assert internal_batch_size is None or (
        isinstance(internal_batch_size, int) and internal_batch_size > 0
    ), "Batch size must be greater than 0."
    inputs = _format_input(inputs)
    additional_forward_args = _format_additional_forward_args(additional_forward_args)
    num_examples = inputs[0].shape[0]
    if internal_batch_size is None:
        yield inputs, additional_forward_args, target_ind
    else:
        for current_total in range(0, num_examples, internal_batch_size):
            yield _tuple_splice_range(
                inputs, current_total, current_total + internal_batch_size
            ), _tuple_splice_range(
                additional_forward_args,
                current_total,
                current_total + internal_batch_size,
            ), target_ind[
                current_total : current_total + internal_batch_size
            ] if isinstance(
                target_ind, list
            ) or (
github pytorch / captum / captum / attr / _core / layer / layer_conductance.py View on Github external
num_examples = inputs[0].shape[0]

        # Retrieve scaling factors for specified approximation method
        step_sizes_func, alphas_func = approximation_parameters(method)
        alphas = alphas_func(n_steps + 1)

        # Compute scaled inputs from baseline to final input.
        scaled_features_tpl = tuple(
            torch.cat(
                [baseline + alpha * (input - baseline) for alpha in alphas], dim=0
            ).requires_grad_()
            for input, baseline in zip(inputs, baselines)
        )

        additional_forward_args = _format_additional_forward_args(
            additional_forward_args
        )
        # apply number of steps to additional forward args
        # currently, number of steps is applied only to additional forward arguments
        # that are nd-tensors. It is assumed that the first dimension is
        # the number of batches.
        # dim -> (#examples * #steps x additional_forward_args[0].shape[1:], ...)
        input_additional_args = (
            _expand_additional_forward_args(additional_forward_args, n_steps + 1)
            if additional_forward_args is not None
            else None
        )
        expanded_target = _expand_target(target, n_steps + 1)

        # Conductance Gradients - Returns gradient of output with respect to
        # hidden layer and hidden layer evaluated at each input.
github pytorch / captum / captum / attr / _core / feature_ablation.py View on Github external
>>> # +---+---+---+---+
            >>> # With this mask, all inputs with the same value are ablated
            >>> # simultaneously, and the attribution for each input in the same
            >>> # group (0, 1, 2, and 3) per example are the same.
            >>> # The attributions can be calculated as follows:
            >>> # feature mask has dimensions 1 x 4 x 4
            >>> feature_mask = torch.tensor([[[0,0,1,1],[0,0,1,1],
            >>>                             [2,2,3,3],[2,2,3,3]]])
            >>> attr = ablator.attribute(input, target=1, feature_mask=feature_mask)
        """
        with torch.no_grad():
            # Keeps track whether original input is a tuple or not before
            # converting it into a tuple.
            is_inputs_tuple = isinstance(inputs, tuple)
            inputs, baselines = _format_input_baseline(inputs, baselines)
            additional_forward_args = _format_additional_forward_args(
                additional_forward_args
            )
            num_examples = inputs[0].shape[0]
            feature_mask = (
                _format_input(feature_mask) if feature_mask is not None else None
            )
            assert (
                isinstance(ablations_per_eval, int) and ablations_per_eval >= 1
            ), "Ablations per evaluation must be at least 1."

            # Computes initial evaluation with all features, which is compared
            # to each ablated result.
            initial_eval = _run_forward(
                self.forward_func, inputs, target, additional_forward_args
            )
            if isinstance(initial_eval, (int, float)) or (