How to use the scipy.sparse.csr_matrix function in scipy

To help you get started, we’ve selected a few scipy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github jvbalen / catchy / pitch_features.py View on Github external
def one_melody_matrix(track_id):

    tchr, chroma, t, melody = aligned_pitch_features(track_id)
    melody = np.round(melody)
    pitched = melody > 0
    pitchclass = np.remainder(melody - 69, 12)
    framerate = 1.0/(t[1]-t[0])

    nmel = len(melody)

    vals = np.ones(nmel)[pitched]
    vals *= 1.0 / framerate
    rows = np.arange(nmel)[pitched]
    cols = pitchclass[pitched]
    melmat = csr_matrix((vals, (rows, cols)), shape=(nmel, 12))
    return t, melmat.todense()
github YuwenXiong / py-R-FCN / lib / datasets / coco.py View on Github external
overlap = -1 (for all gt rois), which means they will be excluded from
    training.
    """
    for ix, entry in enumerate(roidb):
        overlaps = entry['gt_overlaps'].toarray()
        crowd_inds = np.where(overlaps.max(axis=1) == -1)[0]
        non_gt_inds = np.where(entry['gt_classes'] == 0)[0]
        if len(crowd_inds) == 0 or len(non_gt_inds) == 0:
            continue
        iscrowd = [int(True) for _ in xrange(len(crowd_inds))]
        crowd_boxes = ds_utils.xyxy_to_xywh(entry['boxes'][crowd_inds, :])
        non_gt_boxes = ds_utils.xyxy_to_xywh(entry['boxes'][non_gt_inds, :])
        ious = COCOmask.iou(non_gt_boxes, crowd_boxes, iscrowd)
        bad_inds = np.where(ious.max(axis=1) > crowd_thresh)[0]
        overlaps[non_gt_inds[bad_inds], :] = -1
        roidb[ix]['gt_overlaps'] = scipy.sparse.csr_matrix(overlaps)
    return roidb
github rwl / PYPOWER / pypower / opf_setup.py View on Github external
Pg   = gen[:, PG] / baseMVA
    Qg   = gen[:, QG] / baseMVA
    Pmin = gen[:, PMIN] / baseMVA
    Pmax = gen[:, PMAX] / baseMVA
    Qmin = gen[:, QMIN] / baseMVA
    Qmax = gen[:, QMAX] / baseMVA

    if dc:               ## DC model
        ## more problem dimensions
        nv    = 0            ## number of voltage magnitude vars
        nq    = 0            ## number of Qg vars
        q1    = array([])    ## index of 1st Qg column in Ay

        ## power mismatch constraints
        B, Bf, Pbusinj, Pfinj = makeBdc(baseMVA, bus, branch)
        neg_Cg = sparse((-ones(ng), (gen[:, GEN_BUS], arange(ng))), (nb, ng))   ## Pbus w.r.t. Pg
        Amis = hstack([B, neg_Cg], 'csr')
        bmis = -(bus[:, PD] + bus[:, GS]) / baseMVA - Pbusinj

        ## branch flow constraints
        il = find((branch[:, RATE_A] != 0) & (branch[:, RATE_A] < 1e10))
        nl2 = len(il)         ## number of constrained lines
        lpf = -Inf * ones(nl2)
        upf = branch[il, RATE_A] / baseMVA - Pfinj[il]
        upt = branch[il, RATE_A] / baseMVA + Pfinj[il]

        user_vars = ['Va', 'Pg']
        ycon_vars = ['Pg', 'y']
    else:                ## AC model
        ## more problem dimensions
        nv    = nb           ## number of voltage magnitude vars
        nq    = ng           ## number of Qg vars
github jinfagang / tfboys / pt / faster_rcnn / vendor / faster-rcnn.pytorch / lib / datasets / pascal_voc.py View on Github external
x1 = float(bbox.find('xmin').text) - 1
            y1 = float(bbox.find('ymin').text) - 1
            x2 = float(bbox.find('xmax').text) - 1
            y2 = float(bbox.find('ymax').text) - 1

            diffc = obj.find('difficult')
            difficult = 0 if diffc == None else int(diffc.text)
            ishards[ix] = difficult

            cls = self._class_to_ind[obj.find('name').text.lower().strip()]
            boxes[ix, :] = [x1, y1, x2, y2]
            gt_classes[ix] = cls
            overlaps[ix, cls] = 1.0
            seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)

        overlaps = scipy.sparse.csr_matrix(overlaps)

        return {'boxes': boxes,
                'gt_classes': gt_classes,
                'gt_ishard': ishards,
                'gt_overlaps': overlaps,
                'flipped': False,
                'seg_areas': seg_areas}
github simpeg / simpeg / SimPEG / Mesh / DiffOperators.py View on Github external
def projDirichlet(n, bc):
            bc = checkBC(bc)
            ij = ([0, n], [0, 1])
            vals = [0, 0]
            if(bc[0] == 'dirichlet'):
                vals[0] = -1
            if(bc[1] == 'dirichlet'):
                vals[1] = 1
            return sp.csr_matrix((vals, ij), shape=(n+1, 2))
github JenifferWuUCLA / pulmonary-nodules-MaskRCNN / Faster-RCNN-Git / Faster-RCNN_TF / lib / datasets / pascal_voc.py View on Github external
# Load object bounding boxes into a data frame.
        for ix, obj in enumerate(objs):
            bbox = obj.find('bndbox')
            # Make pixel indexes 0-based
            x1 = float(bbox.find('xmin').text) - 1
            y1 = float(bbox.find('ymin').text) - 1
            x2 = float(bbox.find('xmax').text) - 1
            y2 = float(bbox.find('ymax').text) - 1
            cls = self._class_to_ind[obj.find('name').text.lower().strip()]
            boxes[ix, :] = [x1, y1, x2, y2]
            gt_classes[ix] = cls
            overlaps[ix, cls] = 1.0
            seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)

        overlaps = scipy.sparse.csr_matrix(overlaps)

        return {'boxes' : boxes,
                'gt_classes': gt_classes,
                'gt_overlaps' : overlaps,
                'flipped' : False,
                'seg_areas' : seg_areas}
github Azure / cortana-intelligence-product-detection-from-images / technical_deployment / train_model / imdb_data.py View on Github external
gt_roidb = self.gt_roidb()
        ss_roidb = self._load_selective_search_roidb(gt_roidb)

        #add ground truth ROIs
        if self._boAddGroundTruthRois:
            roidb = self.merge_roidbs(gt_roidb, ss_roidb)
        else:
            roidb = ss_roidb

        #Keep max of e.g. 2000 rois
        if self._maxNrRois and self._maxNrRois > 0:
            print ("Only keeping the first %d ROIs.." % self._maxNrRois)
            for i in range(self.num_images):
                gt_overlaps = roidb[i]['gt_overlaps']
                gt_overlaps = gt_overlaps.todense()[:self._maxNrRois]
                gt_overlaps = scipy.sparse.csr_matrix(gt_overlaps)
                roidb[i]['gt_overlaps'] = gt_overlaps
                roidb[i]['boxes'] = roidb[i]['boxes'][:self._maxNrRois,:]
                roidb[i]['gt_classes'] = roidb[i]['gt_classes'][:self._maxNrRois]

        with open(cache_file, 'wb') as fid:
            cp.dump(roidb, fid, cp.HIGHEST_PROTOCOL)
        print ('wrote ss roidb to {}'.format(cache_file))

        return roidb
github slundberg / shap / shap / explainers / kernel.py View on Github external
if nnz == 0:
                self.synth_data = sp.sparse.csr_matrix(shape, dtype=self.data.data.dtype).tolil()
            else:
                data = self.data.data.data
                indices = self.data.data.indices
                indptr = self.data.data.indptr
                last_indptr_idx = indptr[len(indptr) - 1]
                indptr_wo_last = indptr[:-1]
                new_indptrs = []
                for i in range(0, self.nsamples - 1):
                    new_indptrs.append(indptr_wo_last + (i * last_indptr_idx))
                new_indptrs.append(indptr + ((self.nsamples - 1) * last_indptr_idx))
                new_indptr = np.concatenate(new_indptrs)
                new_data = np.tile(data, self.nsamples)
                new_indices = np.tile(indices, self.nsamples)
                self.synth_data = sp.sparse.csr_matrix((new_data, new_indices, new_indptr), shape=shape).tolil()
        else:
            self.synth_data = np.tile(self.data.data, (self.nsamples, 1))

        self.maskMatrix = np.zeros((self.nsamples, self.M))
        self.kernelWeights = np.zeros(self.nsamples)
        self.y = np.zeros((self.nsamples * self.N, self.D))
        self.ey = np.zeros((self.nsamples, self.D))
        self.lastMask = np.zeros(self.nsamples)
        self.nsamplesAdded = 0
        self.nsamplesRun = 0
        if self.keep_index:
            self.synth_data_index = np.tile(self.data.index_value, self.nsamples)
github fducau / ML2016_EDU / FeatureCreation / features.py View on Github external
def skills_corr_counter_win(ds, sparse_matrix_input, window=None):
    #If window not specified not use window
    student_cfa = ds[['student_id', 'correct_first_attempt']]
    sparse_matrix = csr_matrix(sparse_matrix_input.shape)

    for col in xrange(sparse_matrix_input.shape[1]):

        skill_indices = np.array(sparse_matrix_input[:,col].nonzero()[0])

        s_cfa = student_cfa.ix[skill_indices]
        grouped = s_cfa.groupby('student_id')

        if window:
            sg = grouped.apply(cumsum_window, window)
            sg = sg.reset_index(level=0).drop('student_id',axis=1)
            if sg.shape[0]==1:
                sg = sg.transpose()
        else:
            sg = grouped.cumsum()
github SmokinCaterpillar / pypet / pypet / parameter.py View on Github external
`data_list` needs to be formatted as the first result of
        :func:`~pypet.parameter.SparseParameter._serialize_matrix`

        """
        matrix_format = data_list[0]

        if matrix_format == 'csc':
            if data_list[1] == '__empty__':
                 return spsp.csc_matrix(data_list[4])
            else:
                return spsp.csc_matrix(tuple(data_list[1:4]), shape=data_list[4])
        elif matrix_format == 'csr':
            if data_list[1] == '__empty__':
                 return spsp.csr_matrix(data_list[4])
            else:
                return spsp.csr_matrix(tuple(data_list[1:4]), shape=data_list[4])
        elif matrix_format == 'bsr':
            if data_list[1] == '__empty__':
                # We have an empty matrix, that cannot be build as in esle case
                return spsp.bsr_matrix(data_list[4])
            else:
                return spsp.bsr_matrix(tuple(data_list[1:4]), shape=data_list[4])
        elif matrix_format == 'dia':
            if data_list[1] == '__empty__':
                # We have an empty matrix, that cannot be build as in esle case
                return spsp.dia_matrix(data_list[3])
            else:
                return spsp.dia_matrix(tuple(data_list[1:3]), shape=data_list[3])
        else:
            raise RuntimeError('You shall not pass!')