How to use the saliency.GradientSaliency function in saliency

To help you get started, we’ve selected a few saliency examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github google-research / google-research / interpretability_benchmark / saliency_data_gen / saliency_helper.py View on Github external
sess: the current session.
    y: the pre-softmax activation we want to assess attribution with respect to.
    image: float32 image tensor with size [1, None, None].
    saliency_method: string indicating saliency map type to generate.

  Returns:
    a saliency map and a smoothed saliency map.

  Raises:
    ValueError: if the saliency_method string does not match any included method
  """
  if saliency_method == 'integrated_gradients':
    integrated_placeholder = saliency.IntegratedGradients(graph, sess, y, image)
    return integrated_placeholder
  elif saliency_method == 'gradient':
    gradient_placeholder = saliency.GradientSaliency(graph, sess, y, image)
    return gradient_placeholder
  elif saliency_method == 'guided_backprop':
    gb_placeholder = saliency.GuidedBackprop(graph, sess, y, image)
    return gb_placeholder
  else:
    raise ValueError('No saliency method method matched. Verification of'
                     'input needed')
github PreferredAI / tutorials / image-classification / face-emotion / src / visualize.py View on Github external
# Restore trained model
    saver.restore(sess, tf.train.latest_checkpoint(FLAGS.checkpoint_dir))
    print("Model loaded!")

    # Visualize first convolutional layer filters
    vis_conv1_filters(sess)

    # Visualize activation maps from conv4 layer
    vis_conv4(sess, model, 'data/images/0/130.jpg')
    vis_conv4(sess, model, 'data/images/0/607.jpg')
    vis_conv4(sess, model, 'data/images/1/82.jpg')
    vis_conv4(sess, model, 'data/images/1/791.jpg')

    # Construct the saliency object. This doesn't yet compute the saliency mask, it just sets up the necessary ops.
    grad_saliency = saliency.GradientSaliency(graph, sess, y, model.x)

    # Visualize using guided back-propagation
    vis_guided_backprop(model, grad_saliency, neuron_selector, 'data/images/0/130.jpg', 0)
    vis_guided_backprop(model, grad_saliency, neuron_selector, 'data/images/0/607.jpg', 0)
    vis_guided_backprop(model, grad_saliency, neuron_selector, 'data/images/1/82.jpg', 1)
    vis_guided_backprop(model, grad_saliency, neuron_selector, 'data/images/1/791.jpg', 1)
github danisbet / machine-lip-reading / vis / integrated_gradients.py View on Github external
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Utilities to compute an IntegratedGradients SaliencyMask."""

import numpy as np
from saliency import GradientSaliency

class IntegratedGradients(GradientSaliency):
    """A SaliencyMask class that implements the integrated gradients method.

    https://arxiv.org/abs/1703.01365
    """

    def GetMask(self, input_image, input_baseline=None, nsamples=100):
        """Returns a integrated gradients mask."""
        if input_baseline == None:
            input_baseline = np.zeros_like(input_image)

        assert input_baseline.shape == input_image.shape

        input_diff = input_image - input_baseline

        total_gradients = np.zeros_like(input_image)
github PAIR-code / saliency / saliency.py View on Github external
def __init__(self, graph, session, y, x):
    super(GradientSaliency, self).__init__(graph, session, y, x)
    self.gradients_node = tf.gradients(y, x)[0]