How to use the numpy.max function in numpy

To help you get started, we’ve selected a few numpy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github pwcazenave / PyFVCOM / PyFVCOM / validation.py View on Github external
current_modelfile_dt = [this_date.date() for this_date in FileReader(self.model_files[current_modelfile_ind]).time.datetime]

        unique_obs_days = np.unique([this_date.date() for this_date in self.ices_data['time_dt']])
        for counter_ind, this_day in enumerate(unique_obs_days):
            if self.noisy:
                print('Getting model data from day {} of {}'.format(counter_ind +1, len(unique_obs_days)))

            if this_day > current_modelfile_dt[-1]:
                current_modelfile_ind += 1
                if current_modelfile_ind < len(self.model_files):
                    current_modelfile_dt = [this_date.date() for this_date in FileReader(self.model_files[current_modelfile_ind]).time.datetime]
                else:
                    return
            this_day_index = np.where(np.asarray(current_modelfile_dt) == this_day)[0]
            this_day_fr = FileReader(self.model_files[current_modelfile_ind], self.model_varkeys,
                                     dims={'time': np.arange(np.min(this_day_index), np.max(this_day_index) + 1)})
            this_day_obs_inds = np.where(np.asarray([this_dt.date() for this_dt in self.ices_data['time_dt']]) == this_day)[0]

            for this_record_ind in this_day_obs_inds:
                for key in self.var_keys:
                    if self.ices_data[key][this_record_ind] >-9.99e9:
                        this_model_key = self.ices_model_conversion[key]
                        space_ind = self.model_data['node_ind'][this_record_ind]
                        dep_ind = self.model_data['z_ind'][this_record_ind]
                        time_ind = this_day_fr.closest_time(self.ices_data['time_dt'][this_record_ind])

                        if "+" in this_model_key:
                            vlist = this_model_key.split('+')
                            mbuffer = 0
                            for v in vlist:
                                mbuffer += getattr(this_day_fr.data, v)[time_ind, dep_ind, space_ind]
                            self.model_data[key][this_record_ind] = mbuffer
github forcecore / Keras-GAN-Animeface-Character / data.py View on Github external
def test(hdff):
    '''
    Reads in hdf file and check if pixels are scaled in [-1, 1] range.
    '''
    with h5py.File(hdff, "r") as f:
        X = f.get("faces")
        print(np.min(X[:,:,:,0]))
        print(np.max(X[:,:,:,0]))
        print(np.min(X[:,:,:,1]))
        print(np.max(X[:,:,:,1]))
        print(np.min(X[:,:,:,2]))
        print(np.max(X[:,:,:,2]))
        print("Dataset size:", len(X))
        assert np.max(X) <= 1.0
        assert np.min(X) >= -1.0
github chalmersgit / SphericalHarmonics / sphericalHarmonics.py View on Github external
def resizeImage(img, width, height, interpolation=cv2.INTER_CUBIC):
	if img.shape[1]
github mortcanty / CRCPython / src / CHAPTER9 / wishart_change.py View on Github external
idx = np.where(det2 <= 0.0)
    det2[idx] = 0.0001 
    idx = np.where(det3 <= 0.0)
    det3[idx] = 0.0001  
    lnQ = cst+m*np.log(det1)+n*np.log(det2)-(n+m)*np.log(det3)
#  test statistic    
    Z = -2*rho*lnQ
#  change probabilty
    P =  (1.-omega2)*stats.chi2.cdf(Z,[f])+omega2*stats.chi2.cdf(Z,[f+4])
    P =  ndimage.filters.median_filter(P, size = (3,3))
#  change map
    a255 = np.ones((rows,cols),dtype=np.byte)*255
    a0 = a255*0
    c11 = np.log(k1+0.0001) 
    min1 =np.min(c11)
    max1 = np.max(c11)
    c11 = (c11-min1)*255.0/(max1-min1)  
    c11 = np.where(c11<0,a0,c11)  
    c11 = np.where(c11>255,a255,c11) 
    c11 = np.where(P>(1.0-sig),a0,c11)      
    cmap = np.where(P>(1.0-sig),a255,c11)
#  write to file system        
    driver = gdal.GetDriverByName(fmt)    
    outDataset = driver.Create(outfile,cols,rows,2,GDT_Float32)
    geotransform = inDataset1.GetGeoTransform()
    if geotransform is not None:
        outDataset.SetGeoTransform(geotransform)
    projection = inDataset1.GetProjection()        
    if projection is not None:
        outDataset.SetProjection(projection) 
    outBand = outDataset.GetRasterBand(1)
    outBand.WriteArray(Z,0,0)
github BorisMuzellec / EllipticalEmbeddings / wordnet_evaluation.py View on Github external
vars = embeddings["vars"]

    words_to_idx = embeddings['word_to_idx']

    hypernym_df = pd.read_csv(hypernym_file, header=None, sep='\t')
    hypernym_df[2] = hypernym_df[0].apply(lambda x: words_to_idx[x])
    hypernym_df[3] = hypernym_df[1].apply(lambda x: words_to_idx[x])
    hypernym_couples = []

    print("Ranking distances")

    idxs1 = hypernym_df[2].values
    idxs2 = hypernym_df[3].values

    n = np.max(idxs1) + 1
    m = np.max(idxs2) + 1

    idxs = np.arange(m)

    ranks = []
    avg_precision_scores = []

    idxs1 = list(set(idxs1))
    np.random.shuffle(idxs1)

    for i, idx in tqdm.tqdm(enumerate(idxs1)):
        x = [idx] * m
        if args.metric == "bures_distance":
            dists_ = wb.batch_W2(cp.array(means[x]), cp.array(means[idxs]), cp.array(vars[x]), cp.array(vars[idxs]))[0]
            item_distances = cp.asnumpy(dists_)
        elif args.metric == "bures_cosine":
            scores_ = wb.bures_cosine(cp.array(means[x]), cp.array(means[idxs]), cp.array(vars[x]), cp.array(vars[idxs]))
github JGCRI / tethys / tethys / TemporalDownscaling / TemporalDownscaling.py View on Github external
TDW = np.zeros((np.shape(data['tas'])[0],len(years)*12),dtype = float)
    
    for i in range(np.shape(data['tas'])[0]):
        R = data['DomesticR'][i]       
        for j in years:
            N = years.index(j)
            monT = data['tas'][i,N*12:(N+1)*12]
            monT[np.isnan(monT)]=0
            if np.sum(monT) == 0: # if the tas data is not available for this gird, use neighbor grid with higher temp
                monT1 = data['tas'][i-1,N*12:(N+1)*12]
                monT2 = data['tas'][i+1,N*12:(N+1)*12]
                if np.mean(monT1) >= np.mean(monT2):
                    tmp = (monT1-np.mean(monT1))/(np.max(monT1)-np.min(monT1))*R+1
                else:
                    tmp = (monT2-np.mean(monT2))/(np.max(monT2)-np.min(monT2))*R+1
            else:
                tmp = (monT-np.mean(monT))/(np.max(monT)-np.min(monT))*R+1
            TDW[i,N*12:(N+1)*12] = W[i,N]*tmp/12
        
    return TDW
github schlegelp / navis / navis / core / neurons.py View on Github external
def bbox(self) -> np.ndarray:
        """Bounding box."""
        mn = np.min(self.vertices, axis=0)
        mx = np.max(self.vertices, axis=0)
        return np.vstack((mn, mx)).T
github MTgeophysics / mtpy / mtpy / imaging / plotstrike2d.py View on Github external
# needs to be negative because measures clockwise
            tipr = -tip.angle_real[index_2d]

            tipr[np.where(tipr == 180.)] = 0.0
            tipr[np.where(tipr == -180.)] = 0.0

            # make sure the angle is between 0 and 360
            tipr = tipr % 360

            # make a dictionary of strikes with keys as period
            tiprdict = dict([(ff, jj)
                             for ff, jj in zip(mt.period[index_2d], tipr)])
            tiprlist.append(tiprdict)

        #--> get min and max period
        maxper = np.max([np.max(mm.keys()) for mm in ptlist if mm.keys()])
        minper = np.min([np.min(mm.keys()) for mm in ptlist if mm.keys()])

        # make empty arrays to put data into for easy manipulation
        medpt = np.zeros((nt, nc))
        medtipr = np.zeros((nt, nc))

        # make a list of periods from the longest period list
        plist = np.logspace(
            np.log10(minper),
            np.log10(maxper),
            num=nt,
            base=10)
        pdict = dict([(ii, jj) for jj, ii in enumerate(plist)])

        self._plist = plist
github fxgsell / GG-Edge-Inference / 2-face-detection / numpy / core / arrayprint.py View on Github external
def __init__(self, data):
        if data.size > 0:
            max_str_len = max(len(str(np.max(data))),
                              len(str(np.min(data))))
        else:
            max_str_len = 0
        self.format = '%{}d'.format(max_str_len)
github metno / wxgen / wxgen / generator.py View on Github external
I0 = np.where(weights_v < 1e-3)[0]
            I1 = np.where(weights_v >= 1e-3)[0]
            # Ensure we do not get too high weights
            weights_v[I1] = 1.0/weights_v[I1]
            weights_v[I0] = 1e3

        I_v = wxgen.util.random_weighted(weights_v, self.policy)
        I = Itime[I_v]

        # Do a weighted random choice of the weights
        wxgen.util.debug("Num candidates:  %d" % len(weights_v))
        wxgen.util.debug("Date range:  %d %d" % (wxgen.util.unixtime_to_date(np.min(self._database.inittimes[Itime])), wxgen.util.unixtime_to_date(np.max(self._database.inittimes[Itime]))))
        wxgen.util.debug("Found state:  %s" % ' '.join(["%0.2f" % x for x in self._database._data_matching[Istart, :, I]]))
        wxgen.util.debug("Found date: %s (%i)" % (wxgen.util.unixtime_to_date(self._database.inittimes[I]), I))
        wxgen.util.debug("Climate: %s" % (climate_state))
        wxgen.util.debug("Weight (max weight): %s (%s)" % (weights_v[I_v], np.max(weights_v)))
        tr = self._database.get(I)
        tr.indices = tr.indices[Istart:]
        return tr