How to use the numpy.arange function in numpy

To help you get started, we’ve selected a few numpy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github phulin / rebook / rebook / block.py View on Github external
import cv2
import numpy as np
import scipy.spatial
import sys

import algorithm
import binarize
import lib
from lib import GREEN

N_values = np.array([64, 64, 64, 128, 128, 128, 256, 256, 256, 256])
k_values = np.array([5, 4, 3, 5, 4, 3, 5, 4, 3, 2])
s_values = N_values.astype(np.float64) / k_values

theta_values = np.arange(32) / np.pi

# radius of circle for "nearby" CCs projection
radius = 100
radius_sq = radius ** 2

epsilon = 2.8

def pack_label(s, theta):
    return theta * s_values.shape[0] + s

def unpack_label(label):
    s_len = s_values.shape[0]
    return label % s_len, label // s_len

def V_p(nearby_centroids, centroids_rotated, ellipses_sheared):
    result = np.zeros((centroids_rotated[0].shape[0],
github simpeg / simpeg / tests / em / new_fdem / forward / test_FDEM_primsec.py View on Github external
# secondary mesh
h = [(csz, npadz-4, -pf), (csz, ncz), (csz, npadz-4, pf)]
meshs = Mesh.TensorMesh(3*[h], x0 = 'CCC')

# mappings
primaryMapping = (
    Maps.ExpMap(meshp) *
    Maps.SurjectFull(meshp) *
    Maps.Projection(nP=8, index=[0])
)

mapping = (
    Maps.ExpMap(meshs) *
    Maps.ParametrizedBlockInLayer(meshs) *
    Maps.Projection(
        nP=8, index=np.hstack([np.r_[0], np.arange(0, 8)])
    )
)

primaryMap2Meshs = (
    Maps.ExpMap(meshs) *
    Maps.SurjectFull(meshs) *
    Maps.Projection(nP=8, index=[0])
)


class PrimSecFDEMTest(object):

    # --------------------- Run some tests! --------------------- #
    def DataTest(self):
        print('\nTesting Data')
        dpred_primsec = self.secondaryProblem.dpred(
github MobleyLab / alchemical-analysis / alchemical_analysis / alchemical_analysis.py View on Github external
def plotdFvsLambda2(nb=10):
      """Plots the free energy differences evaluated for each pair of adjacent states for all methods.
      The layout is approximately 'nb' bars per subplot."""
      x = numpy.arange(len(df_allk))
      if len(x) < nb:
         return
      xs = numpy.array_split(x, len(x)/nb+1)
      mnb = max([len(i) for i in xs])
      fig = pl.figure(figsize = (8,6))
      width = 1./(len(P.methods)+1)
      elw = 30*width
      colors = {'TI':'#C45AEC', 'TI-CUBIC':'#33CC33', 'DEXP':'#F87431', 'IEXP':'#FF3030', 'GINS':'#EAC117', 'GDEL':'#347235', 'BAR':'#6698FF', 'UBAR':'#817339', 'RBAR':'#C11B17', 'MBAR':'#F9B7FF'}
      ndx = 1
      for x in xs:
         lines = tuple()
         ax = pl.subplot(len(xs), 1, ndx)
         for name in P.methods:
            y = [df_allk[i][name]/P.beta_report for i in x]
            ye = [ddf_allk[i][name]/P.beta_report for i in x]
            line = pl.bar(x+len(lines)*width, y, width, color=colors[name], yerr=ye, lw=0.05*elw, error_kw=dict(elinewidth=elw, ecolor='black', capsize=0.5*elw))
github mmp2 / megaman / megaman / relaxation / utils.py View on Github external
save_init : bool
            whether to save Y0 and L before running relaxation.
    """
    new_relaxation_kwds = {
        'weights': np.array([],dtype=np.float64),
        'step_method': 'fixed',
        'linesearch': True,
        'verbose': False,
        'niter': 2000,
        'niter_trace': 0,
        'presave': False,
        'sqrd': True,
        'alpha': 0,
        'projected': False,
        'lossf': 'epsilon' if n_components > intrinsic_dim else 'rloss',
        'subset': np.arange(n_samples),
        'sub_dir': current_time_str(),
        'backup_base_dir': default_basedir,
        'saveiter': 10,
        'printiter': 1,
        'save_init': False,
    }

    new_relaxation_kwds.update(relaxation_kwds)

    backup_dir = os.path.join(new_relaxation_kwds['backup_base_dir'], new_relaxation_kwds['sub_dir'])
    new_relaxation_kwds['backup_dir'] = backup_dir
    create_output_dir(backup_dir)

    new_relaxation_kwds = convert_to_int(new_relaxation_kwds)

    if new_relaxation_kwds['weights'].shape[0] != 0:
github goiosunsw / PyPeVoc / pypevoc / PVAnalysis.py View on Github external
def dphase2freq(self, dph, nbin):
        '''
        Calculates the "instantaneous frequency" corresponding to the
        phase difference dph between two consecutive frames
        '''
        # Unwrapped phase
        # dphw = dph + self.wfbin[nbin] + np.array([-pi2, 0, pi2])
        dphw = dph + self.wfbin[nbin] + pi2*np.arange(-1, 2)
        # precise frequency options
        freq = dphw / self.dt / pi2
        # search among neighboring bins for the right freq
        df = self.fbin[nbin] - freq
        ii = np.argmin(abs(df))

        return freq[ii], df[ii]
        # return self.fbin[nbin]
github visionegg / visionegg / VisionEgg / SphereMap.py View on Github external
phi_stop = (i+1)/float(cp.num_samples_per_circle)*2*math.pi
                x_start,y_start,z_start = get_xyz(theta_start,phi_start,cp.radius)
                x_stop,y_stop,z_stop = get_xyz(theta_stop,phi_stop,cp.radius)
                gl.glVertex3f(x_start, y_start, z_start)
                gl.glVertex3f(x_stop, y_stop, z_stop)

        cp = self.constant_parameters
        # Weird range construction to be sure to include zero.
        azs_major = numpy.concatenate((
            numpy.arange(0.0,180.0,cp.az_major_spacing),
            -numpy.arange(0.0,180.0,cp.az_major_spacing)[1:]))
        azs_minor = numpy.concatenate((
            numpy.arange(0.0,180.0,cp.az_minor_spacing),
            -numpy.arange(0.0,180.0,cp.az_minor_spacing)[1:]))
        els_major = numpy.concatenate((
            numpy.arange(0.0,90.0,cp.el_major_spacing),
            -numpy.arange(0.0,90.0,cp.el_major_spacing)[1:]))
        els_minor = numpy.concatenate((
            numpy.arange(0.0,90.0,cp.el_minor_spacing),
            -numpy.arange(0.0,90.0,cp.el_minor_spacing)[1:]))

        gl.glNewList(self.cached_minor_lines_display_list,gl.GL_COMPILE)
        gl.glBegin(gl.GL_LINES)
        # az minor
        for az in azs_minor:
            if az in azs_major:
                continue # draw only once as major
            draw_half_great_circle(az)
        for el in els_minor:
            if el in els_major:
                continue # draw only once as major
            draw_iso_elevation_circle(el)
github prusa3d / PrusaControl / sceneRender.py View on Github external
r0 = radius * 0.35
        r1 = radius * 0.7
        r2 = radius
        r3 = radius+0.05
        r4 = radius+0.15
        r5 = radius+0.25
        r6 = radius+0.4
        r7 = radius+1.0

        if picking:
            list_of_segments_6 = numpy.arange(0., 360., 1.)
            circle7 = numpy.array([[numpy.cos(numpy.radians(i)) * r7, numpy.sin(numpy.radians(i)) * r7] for i in list_of_segments_6])
        else:
            #calculete points for circle 0 and 1
            list_of_segments_0_1 = numpy.arange(0, 360., 360./8.)
            circle0 = numpy.array([[numpy.cos(numpy.radians(i)) * r0, numpy.sin(numpy.radians(i)) * r0] for i in list_of_segments_0_1])
            circle1 = numpy.array([[numpy.cos(numpy.radians(i)) * r1, numpy.sin(numpy.radians(i)) * r1] for i in list_of_segments_0_1])

            # calculete points for circle 2
            list_of_segments_2 = numpy.arange(0, 360., 360. / segments)
            circle2 = numpy.array([[numpy.cos(numpy.radians(i)) * r2, numpy.sin(numpy.radians(i)) * r2] for i in list_of_segments_2])

            # calculete points for circle 3, 4 and 5
            list_of_segments_3_4_5 = numpy.arange(0, 360., 360. / 72.)
            circle3 = numpy.array([[numpy.cos(numpy.radians(i)) * r3, numpy.sin(numpy.radians(i)) * r3] for i in list_of_segments_3_4_5])
            circle4 = numpy.array([[numpy.cos(numpy.radians(i)) * r4, numpy.sin(numpy.radians(i)) * r4] for i in list_of_segments_3_4_5])
            circle5 = numpy.array([[numpy.cos(numpy.radians(i)) * r5, numpy.sin(numpy.radians(i)) * r5] for i in list_of_segments_3_4_5])

        # calculete points for circle 6
        list_of_segments_6 = numpy.arange(0., 360., 1.)
        circle6 = numpy.array([[numpy.cos(numpy.radians(i)) * r6, numpy.sin(numpy.radians(i)) * r6] for i in list_of_segments_6])
github sveitser / kaggle_diabetic / boost.py View on Github external
def get_xgb(**kwargs):
    grid = {
        #'colsample_bytree': [0.0005, 0.001, 0.002, 0.005, 0.01, 0.02,
        #                     0.05],
        'colsample_bytree': [0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2],
        #'colsample_bytree': [0.1, 0.2, 0.3, 0.5],
        #'colsample_bytree': [0.1, 0.2, 0.5],
        #'max_depth': [2, 3, 4],
        'learning_rate': [0.1],
        'n_estimators': [100],
        'seed': np.arange(kwargs.pop('n_iter', 1)) * 10 + 1,
    }
    args = {
        'subsample': 0.5,
        'colsample_bytree': 0.2,
        'learning_rate': 0.1,
        'seed': 99,
        'n_estimators': 100,
        'max_depth': 3,
        #'silent': False,
    }
    args.update(kwargs)
    pprint.pprint(args)
    p = Pipeline([
        ('scale', StandardScaler()),
        ('fit', XGBRegressor(**args))
    ])
github pmonta / GNSS-DSP-tools / gnsstools / gps / l5q.py View on Github external
def make_l5q(prn):
  xb_offset = l5q_init[prn]
  n = code_length
  xb_shift = xb[np.mod(np.arange(xb_offset,xb_offset+n),n)]
  return np.logical_xor(xa,xb_shift)
github Yangruipis / simple_ml / simple_ml / evaluation.py View on Github external
def classify_auc(y_predict, y_true):
    """
    直接计算roc下方面积太麻烦,可以利用其物理意义进行计算,复杂度为O(N)N为总样本数
    ref: https://www.cnblogs.com/gatherstars/p/6084696.html
    假设有正样本M个,负样本N个
    - 首先对y_pred排序
    - 对于排序最大的正样本,假设其排序为rank_1,那么比他score小的正样本有M-1个, 则比他小的负样本有(rank_1-1) - (M-1)个
    - 对于排序为rank_i 的样本, 比他score小的正样本有M-i个,那么比他小的负样本有(rank_i - 1) - (M-i)
    - 当 i = M时, 比其小的负样本有rank_i-1-(M-M) = rank_i - 1个
    - 总共有M*N个负样本对,所以得到auc求和公式: auc = \frac{\sum_{正样本i}^M rank_i - M*(M+1)/2}{M*N}
    """
    _check_input(y_predict, y_true)
    length = len(y_true)
    pair = zip(y_predict, y_true)
    pair = sorted(pair, key=lambda x: x[0])
    rank_pair = np.column_stack((np.arange(1, length+1), pair))
    positive_pair = rank_pair[rank_pair[:, 2] == 1]
    positive_count = positive_pair.shape[0]
    return (np.sum(positive_pair[:, 0]) - positive_count * (positive_count + 1) / 2) / \
           (positive_count * (length - positive_count))