How to use the scipy.io.loadmat function in scipy

To help you get started, we’ve selected a few scipy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github peng-cao / mripy / test / CS_MRI / cs_MRF_CNN_IST_cuda.py View on Github external
def test():
    # read rf and tr arrays from mat file
    mat_contents  = sio.loadmat(pathdat+'mrf_t1t2b0pd_mrf_randphasecyc_traintest.mat');
    far           = np.array(mat_contents["rf"].astype(np.complex128).squeeze())
    trr           = np.array(mat_contents["trr"].astype(np.float64).squeeze())
    # input MRF time courses
    mat_contents2 = sio.loadmat(pathdat+'datax1.mat');
    data_x        = np.array(mat_contents2["datax1"]).astype(np.float64)
    # prepare for sequence simulation, y->x_hat
    Nk            = far.shape[0]
    Nexample      = data_x.shape[0]
    ti            = 10 #ms
    M0            = np.array([0.0,0.0,1.0]).astype(np.float64)
    #image size
    nx            = 217
    ny            = 181
    # mask in ksp
    mask          = ut.mask3d( nx, ny, Nk, [15,15,0], 0.4)
    #FTm           = opts.FFT2d_kmask(mask) 
    FTm           = cuopts.FFT2d_cuda_kmask(mask)
    
    #intial timing
    timing        = utc.timing()
github chappers / scikit-feature / skfeature / example / test_fisher_score.py View on Github external
def main():
    # load data
    mat = scipy.io.loadmat('../data/COIL20.mat')
    X = mat['X']    # data
    X = X.astype(float)
    y = mat['Y']    # label
    y = y[:, 0]
    n_samples, n_features = X.shape    # number of samples and number of features

    # split data into 10 folds
    ss = cross_validation.KFold(n_samples, n_folds=10, shuffle=True)

    # perform evaluation on classification task
    num_fea = 100    # number of selected features
    clf = svm.LinearSVC()    # linear SVM

    correct = 0
    for train, test in ss:
        # obtain the score of each feature on the training set
github balakg / posewarp-cvpr2018 / datareader.py View on Github external
def makeVidInfoList(vid_list_file):
	
	f = open(vid_list_file)
	vids = f.read().splitlines()
	f.close()	
	n_vids = len(vids)

	vid_info = []
	
	for i in range(n_vids):

		path,vid_name = os.path.split(vids[i])
		info_name = path[:-6] + 'info/' + vid_name + '.mat'

		info = sio.loadmat(info_name)		
		box = info['data']['bbox'][0][0]
		X = info['data']['X'][0][0]

		vid_info.append([info,box,X,vids[i]])

		'''
		n_frames = X.shape[2]
		frames = np.random.choice(n_frames,2,replace=False)

		while(abs(frames[0] - frames[1])/(n_frames*1.0) <= 0.02):
			frames = np.random.choice(n_frames,2,replace=False)

		l = []
		l += getExampleInfo(vid_path,frames[0],box,X)
		l += getExampleInfo(vid_path,frames[1],box,X)
		#l.append(class_id)
github takahiro-777 / nlp_tutorial / nlp100_Python / 99 / main.py View on Github external
import numpy as np

from sklearn.manifold import TSNE
from matplotlib import pyplot as plt
from sklearn.cluster import KMeans

fname_dict_index_t = 'dict_index_country'
fname_matrix_x300 = 'matrix_x300_country'


# 辞書読み込み
with open(fname_dict_index_t, 'rb') as data_file:
		dict_index_t = pickle.load(data_file)

# 行列読み込み
matrix_x300 = io.loadmat(fname_matrix_x300)['matrix_x300']

# t-SNE
t_sne = TSNE(perplexity=30, learning_rate=500).fit_transform(matrix_x300)
print(t_sne)

# KMeansクラスタリング
predicts = KMeans(n_clusters=5).fit_predict(matrix_x300)

# 表示
cmap = plt.get_cmap('Set1')
for index, label in enumerate(dict_index_t.keys()):
	cval = cmap(predicts[index] / 4)
	plt.scatter(t_sne[index, 0], t_sne[index, 1], marker='.', color=cval)
	plt.annotate(label, xy=(t_sne[index, 0], t_sne[index, 1]), color=cval)
plt.show()
github nico / cvbook / sift.py View on Github external
def read_features_from_file(filename):
  '''Returns feature locations, descriptors.'''
  f = sio.loadmat(filename + '.mat')['f']
  return f[:, :4], f[:, 4:]
github zhanghang1989 / PyTorch-Encoding / encoding / datasets / pascal_aug.py View on Github external
def _load_mat(self, filename):
        mat = scipy.io.loadmat(filename, mat_dtype=True, squeeze_me=True, 
            struct_as_record=False)
        mask = mat['GTcls'].Segmentation
        return Image.fromarray(mask)
github lzrobots / LearningToCompare_ZSL / AwA2_RN.py View on Github external
def main():
    # step 1: init dataset
    print("init dataset")
    
    dataroot = './data'
    dataset = 'AwA2_data'
    image_embedding = 'res101' 
    class_embedding = 'att'

    matcontent = sio.loadmat(dataroot + "/" + dataset + "/" + image_embedding + ".mat")
    feature = matcontent['features'].T
    label = matcontent['labels'].astype(int).squeeze() - 1
    matcontent = sio.loadmat(dataroot + "/" + dataset + "/" + class_embedding + "_splits.mat")
    # numpy array index starts from 0, matlab starts from 1
    trainval_loc = matcontent['trainval_loc'].squeeze() - 1
    test_seen_loc = matcontent['test_seen_loc'].squeeze() - 1
    test_unseen_loc = matcontent['test_unseen_loc'].squeeze() - 1
  
    attribute = matcontent['original_att'].T 

    x = feature[trainval_loc] # train_features
    train_label = label[trainval_loc].astype(int)  # train_label
    att = attribute[train_label] # train attributes
    
    x_test = feature[test_unseen_loc]  # test_feature
    test_label = label[test_unseen_loc].astype(int) # test_label
github siconos / siconos / examples / Mechanics / Music / guitar.py View on Github external
def compute_initial_state_modal(self, max_coords):
        """Set initial positions of the string,
        assuming a triangular shape, with u[imax] = umax
        and modal form.
        """
        if max_coords is None:
            assert self.matlab_input is not None
            inputfile = self.matlab_input + '_q2.mat'
            q0 = scipy.io.loadmat(inputfile)['q2'][:, 0].copy()
            self.u0 = np.dot(self.s_mat, q0)
            self.max_coords = (self.u0.max(), self.x[self.u0.argmax()])
            return q0
        else:
            self.u0 = self._compute_initial_state_std(max_coords)
            q0 = np.dot(self.s_mat.T, self.u0)
            coeff = self.length / (self.n_modes + 1)
            q0 *= coeff
            return npw.asrealarray(q0)
github neurodata / m2g / MROCPdjango / computation / scanstat_degr.py View on Github external
G_fn - fibergraph full filename (.mat)
  G - the sparse matrix containing the graphs
  bin - binarize or not
  toDir - Directory where resulting array is placed
  N - Scan statistic number i.e 1 or 2 ONLY
  '''
  print '\nCalculating scan statistic %d...' % N

  if (G !=None):
    pass
  elif (lcc_fn):
    G = loadAdjMat(G_fn, lcc_fn)

  # test case
  else:
    G = sio.loadmat(G_fn)['fibergraph']

  numNodes = G.shape[0]
  vertxDeg = np.zeros(numNodes) # Vertex degrees of all vertices
  indSubgrEdgeNum = np.zeros(numNodes) # Induced subgraph edge number i.e scan statistic

  percNodes = int(numNodes*0.1)
  mulNodes = float(numNodes)

  start = time()
  for vertx in range (numNodes):
    if (vertx > 0 and (vertx% (percNodes) == 0)):
      print ceil((vertx/mulNodes)*100), "% complete..."

    nbors = G[:,vertx].nonzero()[0]
    vertxDeg[vertx] = nbors.shape[0] # degree of each vertex
github takahiro-777 / nlp_tutorial / nlp100_Python / 87 / main.py View on Github external
コサイン類似度
	'''
	norm_ab = np.linalg.norm(vec_a) * np.linalg.norm(vec_b)
	if norm_ab != 0:
		return np.dot(vec_a, vec_b) / norm_ab
	else:
		# ベクトルのノルムが0だと似ているかどうかの判断すらできないので最低値
		return -1


# 辞書読み込み
with open(fname_dict_index_t, 'rb') as data_file:
	dict_index_t = pickle.load(data_file)

# 行列読み込み
matrix_x300 = io.loadmat(fname_matrix_x300)['matrix_x300']

# 'United States'と'U.S'のコサイン類似度表示
vec_a = matrix_x300[dict_index_t['United_States']]
vec_b = matrix_x300[dict_index_t['U.S']]

print(cos_sim(vec_a, vec_b))