Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
model.add(Activation('softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train[:1000,:], y_train[:1000,:],
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test[:1000,:], y_test[:1000,:]))
# explain by passing the tensorflow inputs and outputs
np.random.seed(0)
inds = np.random.choice(x_train.shape[0], 10, replace=False)
e = shap.DeepExplainer((model.layers[0].input, model.layers[-1].input), x_train[inds,:,:])
shap_values = e.shap_values(x_test[:1])
sess = tf.keras.backend.get_session()
diff = sess.run(model.layers[-1].input, feed_dict={model.layers[0].input: x_test[:1]}) - \
sess.run(model.layers[-1].input, feed_dict={model.layers[0].input: x_train[inds,:,:]}).mean(0)
sums = np.array([shap_values[i].sum() for i in range(len(shap_values))])
d = np.abs(sums - diff).sum()
assert d / np.abs(diff).sum() < 0.001, "Sum of SHAP values does not match difference! %f" % d
output = model(data)
loss = F.mse_loss(output.squeeze(1), target)
loss.backward()
optimizer.step()
if batch_idx % 2 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
device = torch.device('cpu')
train(model, device, loader, optimizer, 1)
next_x, next_y = next(iter(loader))
np.random.seed(0)
inds = np.random.choice(next_x.shape[0], 20, replace=False)
e = shap.DeepExplainer(model, next_x[inds, :])
test_x, test_y = next(iter(loader))
shap_values = e.shap_values(test_x[:1])
model.eval()
model.zero_grad()
with torch.no_grad():
diff = (model(test_x[:1]) - model(next_x[inds, :])).detach().numpy().mean(0)
sums = np.array([shap_values[i].sum() for i in range(len(shap_values))])
d = np.abs(sums - diff).sum()
assert d / np.abs(diff).sum() < 0.001, "Sum of SHAP values does not match difference! %f" % (
d / np.abs(diff).sum())
loss = F.mse_loss(output.squeeze(1), target)
loss.backward()
optimizer.step()
if batch_idx % 2 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
device = torch.device('cpu')
train(model, device, loader, optimizer, 1)
next_x1, next_x2, next_y = next(iter(loader))
np.random.seed(0)
inds = np.random.choice(next_x1.shape[0], 20, replace=False)
background = [next_x1[inds, :], next_x2[inds, :]]
e = shap.DeepExplainer(model, background)
test_x1, test_x2, test_y = next(iter(loader))
shap_x1, shap_x2 = e.shap_values([test_x1[:1], test_x2[:1]])
model.eval()
model.zero_grad()
with torch.no_grad():
diff = (model(test_x1[:1], test_x2[:1]) - model(*background)).detach().numpy().mean(0)
sums = np.array([shap_x1[i].sum() + shap_x2[i].sum() for i in range(len(shap_x1))])
d = np.abs(sums - diff).sum()
assert d / np.abs(diff).sum() < 0.001, "Sum of SHAP values does not match difference! %f" % (
d / np.abs(diff).sum())
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
if num_examples > cutoff:
break
device = torch.device('cpu')
train(model, device, train_loader, optimizer, 1)
next_x, next_y = next(iter(train_loader))
np.random.seed(0)
inds = np.random.choice(next_x.shape[0], 20, replace=False)
if interim:
e = shap.DeepExplainer((model, model.conv_layers[0]), next_x[inds, :, :, :])
else:
e = shap.DeepExplainer(model, next_x[inds, :, :, :])
test_x, test_y = next(iter(test_loader))
input_tensor = test_x[:1]
input_tensor.requires_grad = True
shap_values = e.shap_values(input_tensor)
model.eval()
model.zero_grad()
with torch.no_grad():
diff = (model(test_x[:1]) - model(next_x[inds, :, :, :])).detach().numpy().mean(0)
sums = np.array([shap_values[i].sum() for i in range(len(shap_values))])
d = np.abs(sums - diff).sum()
assert d / np.abs(diff).sum() < 0.001, "Sum of SHAP values does not match difference! %f" % (
d / np.abs(diff).sum())
background=torch.cat([background,next(iter(dataloader_val))[0]],0)
X_test,y_test=next(iter(dataloader_val))
if torch.cuda.is_available():
background=background.cuda()
X_test=X_test.cuda()
if pred_out!='none':
if torch.cuda.is_available():
model2=model.cuda()
y_test=out_transform[pred_out](model2(X_test)).detach().cpu()
y_test=y_test.numpy()
if method=='deep':
e = shap.DeepExplainer(model, background)
s=e.shap_values(X_test, ranked_outputs=n_outputs)
elif method=='gradient':
e = shap.GradientExplainer(model, background, batch_size=batch_size, local_smoothing=local_smoothing)
s=e.shap_values(X_test, ranked_outputs=n_outputs, nsamples=n_samples)
if y_test.shape[1]>1:
y_test=y_test.argmax(axis=1)
if n_outputs>1:
shap_values, idx = s
else:
shap_values, idx = s, y_test
#print(shap_values) # .detach().cpu()
if num_targets == 1:
def __init__(self, *argv, **kwargs):
"""
Initialize shap kernelexplainer object.
"""
super(DeepExplainer, self).__init__(*argv, **kwargs)
self.explainer = shap.DeepExplainer(*argv, **kwargs)
def deep_shap(model, data):
""" Deep SHAP (DeepLIFT)
"""
if isinstance(model, KerasWrap):
model = model.model
explainer = DeepExplainer(model, kmeans(data, 1).data)
def f(X):
phi = explainer.shap_values(X)
if type(phi) is list and len(phi) == 1:
return phi[0]
else:
return phi
return f
allow_all_transformations=allow_all_transformations)
super(DeepExplainer, self).__init__(model, initialization_examples, **kwargs)
self._logger.debug('Initializing DeepExplainer')
self._method = 'shap.deep'
self.features = features
self.classes = classes
self.nclusters = nclusters
self.explain_subset = explain_subset
self.transformations = transformations
self.model_task = model_task
self.framework = _get_dnn_model_framework(self.model)
summary = _get_summary_data(self.initialization_examples, nclusters, self.framework)
# Suppress warning message from Keras
with logger_redirector(self._logger):
self.explainer = shap.DeepExplainer(self.model, summary)