How to use the numpy.mean function in numpy

To help you get started, we’ve selected a few numpy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ucbrise / clipper / containers / python / test_sklearn_cifar_container.py View on Github external
def load_cifar(cifar_location, cifar_filename="train.data", norm=False):
    cifar_path = cifar_location + "/" + cifar_filename
    print("Source file: %s" % cifar_path)
    df = pd.read_csv(cifar_path, sep=",", header=None)
    data = df.values
    print("Number of image files: %d" % len(data))
    y = data[:, 0]
    X = data[:, 1:]
    Z = X
    if norm:
        mu = np.mean(X.T, 0)
        sigma = np.var(X.T, 0)
        Z = (X.T - mu) / np.array([np.sqrt(z) if z > 0 else 1. for z in sigma])
        Z = Z.T
    return (Z, y)
github shallowtoil / SiamDW.pytorch / bin / track_siamfc.py View on Github external
def init(im, target_pos, target_sz, model):
    state = dict()
    cfg = load_json('lib/utils/config.json')
    config = cfg[args.arch][args.dataset]
    p.update(config)

    net = model

    avg_chans = np.mean(im, axis=(0, 1))

    wc_z = target_sz[0] + p.context_amount * sum(target_sz)
    hc_z = target_sz[1] + p.context_amount * sum(target_sz)
    s_z = round(np.sqrt(wc_z * hc_z))
    scale_z = p.exemplar_size / s_z

    z_crop = get_subwindow_tracking(im, target_pos, p.exemplar_size, s_z, avg_chans)
   
    d_search = (p.instance_size - p.exemplar_size) / 2
    pad = d_search / scale_z
    s_x = s_z + 2 * pad
    min_s_x = 0.2 * s_x
    max_s_x = 5 * s_x
   
    s_x_serise = {'s_x': s_x, 'min_s_x': min_s_x, 'max_s_x': max_s_x}
    p.update(s_x_serise)
github WISDEM / WISDEM / wisdem / rotorse / rotor_fast.py View on Github external
cm_temp = cm[idx_low:idx_high]
        idx_cm_min = [i for i,local_min in enumerate(np.r_[True, cm_temp[1:] < cm_temp[:-1]] & np.r_[cm_temp[:-1] < cm_temp[1:], True]) if local_min] + idx_low
        idx_high = idx_cm_min[-1]
        
        
        idx_Cn1 = find_breakpoint(alpha, cm, idx_alpha0, idx_high)
        unsteady['Cn1'] = cn[idx_Cn1]
    else:
        idx_Cn1 = np.argmin(abs(alpha-0.))
        unsteady['Cn1'] = 0.
    

    
    # Cn2
    if max(np.abs(np.gradient(cm)))>1.e-10:
        aoa_l = np.mean([alpha[idx_alpha0], alpha[idx_Cn1]])-30.
        idx_low  = np.argmin(abs(alpha-aoa_l))

        cm_temp = cm[idx_low:idx_high]
        idx_cm_min = [i for i,local_min in enumerate(np.r_[True, cm_temp[1:] < cm_temp[:-1]] & np.r_[cm_temp[:-1] < cm_temp[1:], True]) if local_min] + idx_low
        idx_high = idx_cm_min[-1]
        
        idx_Cn2 = find_breakpoint(alpha, cm, idx_low, idx_alpha0, multi=0.)
        unsteady['Cn2'] = cn[idx_Cn2]
    else:
        idx_Cn2 = np.argmin(abs(alpha-0.))
        unsteady['Cn2'] = 0.

    # C_nalpha
    if max(np.abs(np.gradient(cm)))>1.e-10:
        # unsteady['C_nalpha'] = np.gradient(cn, alpha_rad)[idx_alpha0]
        unsteady['C_nalpha'] = max(np.gradient(cn[idx_alpha0:idx_Cn1], alpha_rad[idx_alpha0:idx_Cn1]))
github SudeepDasari / visual_foresight / visual_mpc / policy / cem_controllers / pixel_cost_controller.py View on Github external
for icam in range(self._n_cam):
            for p in range(self._n_desig):
                distance_grid = self._get_distancegrid(self._goal_pix[icam, p])
                score = self._expected_distance(icam, p, gen_distrib[:, :, icam, :, :, p], distance_grid,
                                                normalize=True)
                
                scores_per_task.append(score)
                self._logger.log(
                    'best flow score of task {} cam{}  :{}'.format(p, icam, np.min(scores_per_task[-1])))

        scores_per_task = np.stack(scores_per_task, axis=1)

        if self._hp.only_take_first_view:
            scores_per_task = scores_per_task[:, 0][:, None]

        scores = np.mean(scores_per_task, axis=1)

        bestind = scores.argsort()[0]
        for icam in range(self._n_cam):
            for p in range(self._n_desig):
                self._logger.log('flow score of best traj for task{} cam{} :{}'.format(p, icam, scores_per_task[
                    bestind, p + icam * self._n_desig]))

        if self._hp.predictor_propagation:
            if cem_itr == (self._hp.iterations - 1):
                # pick the prop distrib from the action actually chosen after the last iteration (i.e. self.indices[0])
                bestind = scores.argsort()[0]
                self._chosen_distrib = gen_distrib[bestind]
        return scores
github sisl / mechamodlearn / mechamodlearn / trainer.py View on Github external
method=self._integration_method)

            with utils.Timer() as gradtime:
                loss.backward()

            self.optimizer.step()

            loss_ls.append(loss.cpu().detach().numpy())
            loss_info_ls.append(loss_info)
            losstimer_ls.append(losstime.dt)
            gradtimer_ls.append(gradtime.dt)

        metrics = {}
        loss_info = nested.zip(*loss_info_ls)
        metrics['cpu_memory_MB'] = peak_mem_usage
        metrics['loss/mean'] = np.mean(loss_ls)
        metrics['loss/std'] = np.std(loss_ls)
        metrics['log10loss/mean'] = np.mean(np.log10(loss_ls))
        metrics['log10loss/std'] = np.std(np.log10(loss_ls))
        for k, val in loss_info.items():
            metrics['loss/{}/mean'.format(k)] = np.mean(val)
            metrics['loss/{}/std'.format(k)] = np.std(val)

        metrics['time/loss/mean'] = np.mean(losstimer_ls)
        metrics['time/loss/std'] = np.std(losstimer_ls)
        metrics['time/loss/max'] = np.max(losstimer_ls)
        metrics['time/loss/min'] = np.min(losstimer_ls)
        metrics['time/grad/mean'] = np.mean(gradtimer_ls)
        metrics['time/grad/std'] = np.std(gradtimer_ls)
        metrics['time/grad/max'] = np.max(gradtimer_ls)
        metrics['time/grad/min'] = np.min(gradtimer_ls)
github MaayanLab / geo2enrichr / g2e / signature_factory / soft_file_factory / cleaner.py View on Github external
# visualization:
    #
    #          Original    Ranked    Averaged    Re-ordered
    #          A   B       A   B     A   B       A   B
    # gene1    2 4 8 6     2 4 3 3   3 3 3 3     3 3 6 6
    # gene2    6 4 3 3     6 4 8 6   6 6 6 6     6 6 3 3
    #
    # Read more here: http://en.wikipedia.org/wiki/Quantile_normalization

    O = values

    # 1. Sorted by rank.
    M = np.sort(O, axis=0)

    # 2. Averaged.
    D = np.mean(M, axis=1)
    for i, avg in enumerate(D):
        M[i].fill(float(avg))

    # 3. Ranked by index:
    # First argsort gets the order. Second argsort gets the rank. See
    # http://stackoverflow.com/a/6266510/1830334.
    I = np.argsort(np.argsort(O, axis=0), axis=0)

    # 4. Move values back to their original locations.
    M = M.T
    I = I.T
    O = O.T
    for i in range(len(M)):
        O[i] = M[i][I[i]]

    return O.T
github dstndstn / tractor / tractor / sdss.py View on Github external
iva.update(constantSkyAt=(
                int((x0 + x1) / 2.), int((y0 + y1) / 2.)))
        else:
            iva.update(constantSkyAt=(int(W / 2.), int(H / 2.)))
    invvar = frame.getInvvar(psfield, bandnum, **iva)
    invvar = invvar.astype(np.float32)
    if not invvarAtCenter:
        assert(invvar.shape == (H, W))

    # Could get this from photoField instead
    # http://data.sdss3.org/datamodel/files/BOSS_PHOTOOBJ/RERUN/RUN/photoField.html
    gain = psfield.getGain(bandnum)
    darkvar = psfield.getDarkVariance(bandnum)

    meansky = np.mean(frame.sky)
    meancalib = np.mean(calibvec)
    skysig = sqrt((meansky / gain) + darkvar) * meancalib

    info.update(sky=sky, skysig=skysig)
    zr = np.array(zrange) * skysig + sky
    info.update(zr=zr)

    # http://data.sdss3.org/datamodel/files/PHOTO_REDUX/RERUN/RUN/objcs/CAMCOL/fpM.html
    fpM = sdss.readFpM(run, camcol, field, bandname)

    if not hasroi:
        image = frame.getImage()

    else:
        roislice = (slice(y0, y1), slice(x0, x1))
        image = frame.getImageSlice(roislice).astype(np.float32)
        if invvarAtCenterImage:
github xfl15 / RecoGCN / train.py View on Github external
ts_loss.append(loss_value_ts)
				ts_mrr.append(mrr_ts)
				ts_ndcg.append(ndcg_ts)
				ts_hr5.append(hr5_ts)
				ts_hr10.append(hr10_ts)
				ts_hr3.append(hr3_ts)
				ts_hr1.append(hr1_ts)
				ts_step += 1

			with h5py.File(log_dir + test_num + str(lr)+str(lamb) + '_attn_test.hdf5', 'w') as f:
			 	f.create_dataset("user_coef", data=np.concatenate(user_coef_list))
			 	f.create_dataset("agent_coef", data=np.concatenate(agent_coef_list))
			 	f.create_dataset("item_coef", data=np.concatenate(item_coef_list))
				

			print('Test loss:', np.mean(ts_loss), '; Test mrr:', np.mean(ts_mrr), '; Test ndcg:', np.mean(ts_ndcg), '; Test hr1:', np.mean(ts_hr1), '; Test hr3:',np.mean(ts_hr3),'; Test hr5:', np.mean(ts_hr5), '; Test hr10:', np.mean(ts_hr10))
			logfile.write('Test loss:'+ str(np.mean(ts_loss))+ '; Test mrr:'+ str(np.mean(ts_mrr))+ '; Test ndcg:'+ str(np.mean(ts_ndcg))+ '; Test hr1:'+ str(np.mean(ts_hr1))+ '; Test hr3:'+ str(np.mean(ts_hr3))+ '; Test hr5:'+ str(np.mean(ts_hr5))+ '; Test hr10:'+ str(np.mean(ts_hr10))+'\n')

			logfile.close()
			sess.close()
github raamana / visualqc / visualqc / diffusion.py View on Github external
def stats_over_b0(self, indices_b0):
        """Computes voxel-wise stats over B=0 volumes (no diffusion) data
            --> single volume over space.
        """

        # TODO connect this
        b0_subset = self.dw_volumes[:, :, :, self.b0_indices]
        mean_img = np.mean(b0_subset, axis=3)
        sd_img = np.std(b0_subset, axis=3)

        return mean_img, sd_img
github urinieto / msaf / old_scripts / eval_old.py View on Github external
logging.error("You should pass at least a track id or a dataset name")
        return

    # Make sure that the results are stored in numpy arrays
    res = np.asarray(results)

    if track_id is not None:
        all_values = (track_id, res[5], res[3], res[4], res[2], res[0], res[1],
                      res[6], res[7], res[8], annot_beats, feature, "none",
                      trim)
        table = "%s_bounds" % alg_id
        select_where = "track_id=?"
        select_values = (track_id, annot_beats, feature, trim)
    elif ds_name is not None:
        # Aggregate results
        res = np.mean(res, axis=0)
        all_values = (alg_id, ds_name, res[5], res[3], res[4], res[2], res[0],
                      res[1], res[6], res[7], res[8], annot_beats, feature,
                      "none", trim)
        table = "boundaries"
        select_where = "algo_id=? AND ds_name=?"
        select_values = (alg_id, ds_name, annot_beats, feature, trim)

    # Check if exists
    cursor.execute("SELECT * FROM %s WHERE %s AND annot_beat=? AND "
                   "feature=? AND trim=?" % (table, select_where),
                   select_values)

    # Insert new if it doesn't exist
    if cursor.fetchone() is None:
        questions = "?," * len(all_values)
        sql_cmd = "INSERT INTO %s VALUES (%s)" % (table, questions[:-1])