How to use the alibi.utils.mapping.ohe_to_ord_shape function in alibi

To help you get started, we’ve selected a few alibi examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github SeldonIO / alibi / alibi / explainers / cfproto.py View on Github external
if c not in target_class:
                    continue
                dist_c, idx_c = self.kdtrees[c].query(X_num, k=k)
                dist_proto[c] = dist_c[0][-1]
                self.class_proto[c] = self.X_by_class[c][idx_c[0][-1]].reshape(1, -1)

        if self.enc_or_kdtree:
            self.id_proto = min(dist_proto, key=dist_proto.get)
            proto_val = self.class_proto[self.id_proto]
            if verbose:
                print('Prototype class: {}'.format(self.id_proto))
        else:  # no prototype loss term used
            proto_val = np.zeros(self.shape_enc)

        # set shape for perturbed instance and gradients
        pert_shape = ohe_to_ord_shape(self.shape, cat_vars=self.cat_vars, is_ohe=self.ohe)

        # set the lower and upper bounds for the constant 'c' to scale the attack loss term
        # these bounds are updated for each c_step iteration
        const_lb = np.zeros(self.batch_size)
        const = np.ones(self.batch_size) * self.c_init
        const_ub = np.ones(self.batch_size) * 1e10

        # init values for the best attack instances for each instance in the batch
        overall_best_dist = [1e10] * self.batch_size
        overall_best_attack = [np.zeros(self.shape[1:])] * self.batch_size
        overall_best_grad = (np.zeros(self.shape), np.zeros(self.shape))

        # keep track of counterfactual evolution
        self.cf_global = {i: [] for i in range(self.c_steps)}  # type: dict

        # iterate over nb of updates for 'c'
github SeldonIO / alibi / alibi / explainers / cfproto.py View on Github external
self.enc = enc_model
        self.cat_vars = cat_vars
        self.ohe = ohe
        self.use_kdtree = use_kdtree
        self.batch_size = shape[0]
        self.max_iterations = max_iterations
        self.c_init = c_init
        self.c_steps = c_steps
        self.feature_range = feature_range
        self.update_num_grad = update_num_grad
        self.eps = eps
        self.clip = clip
        self.write_dir = write_dir

        # compute dimensionality after conversion from OHE to ordinal encoding
        shape = ohe_to_ord_shape(shape, cat_vars=cat_vars, is_ohe=self.ohe)

        if self.is_cat:

            # define ragged tensor for mapping from categorical to numerical values
            self.map_cat_to_num = tf.ragged.constant([np.zeros(v) for _, v in self.cat_vars.items()])

            # define placeholder for mapping which can be fed after the fit step
            max_key = max(cat_vars, key=cat_vars.get)
            self.max_cat = cat_vars[max_key]
            cat_keys = list(cat_vars.keys())
            n_cat = len(cat_keys)
            self.assign_map = tf.placeholder(tf.float32, (n_cat, self.max_cat), name='assign_map')
            self.map_var = tf.Variable(np.zeros((n_cat, self.max_cat)), dtype=tf.float32, name='map_var')

            # update ragged mapping tensor
            lengths = [v for k, v in cat_vars.items()]