How to use the scipy.io.savemat function in scipy

To help you get started, we’ve selected a few scipy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ZaixuCui / Pattern_Regression / ElasticNet / ElasticNet_CZ_RandomCV.py View on Github external
Inner_Corr[k, l] = Fold_l_Mat['Fold_Corr'][0][0]
                Inner_MAE_inv[k, l] = Fold_l_Mat['Fold_MAE_inv']
                os.remove(Fold_l_Mat_Path)
            
            Inner_Corr = np.nan_to_num(Inner_Corr)
        Inner_Corr_Mean[i, :] = np.mean(Inner_Corr, axis=0)
        Inner_MAE_inv_Mean[i, :] = np.mean(Inner_MAE_inv, axis=0)

    Inner_Corr_CVMean = np.mean(Inner_Corr_Mean, axis=0)
    Inner_MAE_inv_CVMean = np.mean(Inner_MAE_inv_Mean, axis=0)
    Inner_Corr_CVMean = (Inner_Corr_CVMean - np.mean(Inner_Corr_CVMean)) / np.std(Inner_Corr_CVMean)
    Inner_MAE_inv_CVMean = (Inner_MAE_inv_CVMean - np.mean(Inner_MAE_inv_CVMean)) / np.std(Inner_MAE_inv_CVMean)
    Inner_Evaluation = Inner_Corr_CVMean + Inner_MAE_inv_CVMean
    
    Inner_Evaluation_Mat = {'Inner_Corr':Inner_Corr, 'Inner_MAE_inv':Inner_MAE_inv, 'Inner_Corr_CVMean':Inner_Corr_CVMean, 'Inner_MAE_inv_CVMean':Inner_MAE_inv_CVMean, 'Inner_Evaluation':Inner_Evaluation}
    sio.savemat(ResultantFolder + '/Inner_Evaluation.mat', Inner_Evaluation_Mat)
    
    Optimal_Combination_Index = np.argmax(Inner_Evaluation) 
    
    Optimal_Alpha_Index = np.int64(np.ceil((Optimal_Combination_Index + 1) / len(L1_ratio_Range))) - 1
    Optimal_Alpha = Alpha_Range[Optimal_Alpha_Index]
    Optimal_L1_ratio_Index = np.mod(Optimal_Combination_Index, len(L1_ratio_Range))
    Optimal_L1_ratio = L1_ratio_Range[Optimal_L1_ratio_Index]
    return (Optimal_Alpha, Optimal_L1_ratio)
github Conchylicultor / DeepLearningOnGraph / subsample_signals.py View on Github external
print('Testing...')
        for j in range(len(completeSignal)): # Iterate over the signal
            if completeSignal[j] == 1: # Candidate for subsampling
                decimatedSignal = np.zeros(completeSignal.shape) # Only zeroes
                decimatedSignal[j] = 1 # Except for the current signal
                utils.saveLabelList(decimatedSignal, dirOutSamplesTe + idSignal + '_' + str(j) + '.txt')
                sio.savemat(dirOutSamplesTe + idSignal + '_' + str(j) + '.mat', {'y':decimatedSignal})
                labelsListTe.append(categoryId) # Add the category of the label (Warning: No categories skippables !!!)
        
        categoryId += 1 # Next categories

    # Save labels
    utils.saveLabelList(labelsListTr, dirOutSamples + 'trlabels.txt')
    utils.saveLabelList(labelsListTe, dirOutSamples + 'telabels.txt')
    sio.savemat(dirOutSamples + 'trlabels.mat', {'labels':np.array(labelsListTr)})
    sio.savemat(dirOutSamples + 'telabels.mat', {'labels':np.array(labelsListTe)})
github dsmbgu8 / image_annotate.py / image_annotate.py View on Github external
dat = pl.imread(datf)

        x,y,z = np.atleast_3d(dat).shape

        if verbose:
            print "Loaded %d x %d x %d image data from file %s"%(x,y,z,datf)

        if norm and not pathexists(normf):
            from scipy.io import savemat 
            print "Saving normalized data to file %s"%normf  
            dat = dat.reshape([x*y,z])   
            normv = np.apply_along_axis(np.linalg.norm,1,dat)
            normv[normv==0] = 1.0
            dat = (dat.T / normv.T).T            
            dat = dat.reshape([x,y,z])               
            savemat(normf,{'normed':dat})
            print "Finished saving normalized data"            

        if flipdat:
            dat = dat[::-1,:,:]
                
        if reff is None:
            ref = dat[:,:,refbands]
        else:
            rfile,rext = os.path.splitext(reff)
            if rext == '.mat':
                ref = load_matlab(reff,refkey)
                if len(ref)==0:
                    return -1
            else:
                ref = pl.imread(reff)
github hli1221 / imagefusion_densefuse / train_recons.py View on Github external
loss_data = Loss_all[:count_loss]
        scio.savemat('./models/loss/DeepDenseLossData'+str(ssim_weight)+'.mat',{'loss':loss_data})

        loss_ssim_data = Loss_ssim[:count_loss]
        scio.savemat('./models/loss/DeepDenseLossSSIMData'+str(ssim_weight)+'.mat', {'loss_ssim': loss_ssim_data})

        loss_pixel_data = Loss_pixel[:count_loss]
        scio.savemat('./models/loss/DeepDenseLossPixelData.mat'+str(ssim_weight)+'', {'loss_pixel': loss_pixel_data})

        # IS_Validation = True;
        if IS_Validation:
            validation_ssim_data = Val_ssim_data[:count_loss]
            scio.savemat('./models/val/Validation_ssim_Data.mat' + str(ssim_weight) + '', {'val_ssim': validation_ssim_data})
            validation_pixel_data = Val_pixel_data[:count_loss]
            scio.savemat('./models/val/Validation_pixel_Data.mat' + str(ssim_weight) + '', {'val_pixel': validation_pixel_data})


        if debug:
            elapsed_time = datetime.now() - start_time
            print('Done training! Elapsed time: %s' % elapsed_time)
            print('Model is saved to: %s' % save_path)
github nico / cvbook / dsift.py View on Github external
x, y = numpy.meshgrid(range(steps, m, steps), range(steps, n, steps))
    xx, yy = x.flatten(), y.flatten()
    tmpframe = os.path.join(temp_dir, 'out_tmp.frame')
    frame = numpy.array([xx, yy, scale * numpy.ones(xx.shape[0]),
                                 numpy.zeros(xx.shape[0])])
    numpy.savetxt(tmpframe, frame.T, fmt='%03.3f')

    cmd = ['sift', imname, '--output=' + resultname,
           '--read-frames=' + tmpframe]
    if force_orientation:
      cmd += ['--orientations']
    os.system(' '.join(cmd))

    # Re-write as .mat file, which loads faster.
    f = numpy.loadtxt(resultname)
    sio.savemat(resultname + '.mat', {'f':f}, oned_as='row')
  finally:
    shutil.rmtree(temp_dir)
github JHUVisionLab / multi-modal-regression / evaluateSimpleBDModel.py View on Github external
# training step
	training()
	# validation
	ytest, yhat_test, test_labels = testing()
	tmp_val_loss = get_error2(ytest, yhat_test, test_labels, num_classes)
	print('\nMedErr: {0}'.format(tmp_val_loss))
	writer.add_scalar('val_loss', tmp_val_loss, count)
	val_loss.append(tmp_val_loss)
	# time and output
	toc = time.time() - tic
	print('Epoch: {0} done in time {1}s'.format(epoch, toc))
	# cleanup
	gc.collect()
writer.close()
val_loss = np.stack(val_loss)
spio.savemat(plots_file, {'val_loss': val_loss})
github nipy / nipype / nipype / interfaces / spm.py View on Github external
fprintf('SPM path: %s\\n',which('spm'));
        spm_defaults;
                  
        if strcmp(spm('ver'),'SPM8'), spm_jobman('initcfg');end\n
        """
        if self.mfile:
            if self.jobname in ['st','smooth','preproc','fmri_spec','fmri_est'] :
                mscript += self._generate_job('jobs{1}.%s{1}.%s(1)' % 
                                             (self.jobtype,self.jobname), contents[0])
            else:
                mscript += self._generate_job('jobs{1}.%s{1}.%s{1}' % 
                                             (self.jobtype,self.jobname), contents[0])
        else:
            jobdef = {'jobs':[{self.jobtype:[{self.jobname:self.reformat_dict_for_savemat
                                         (contents[0])}]}]}
            savemat(os.path.join(cwd,'pyjobs_%s.mat'%self.jobname), jobdef)
            mscript += "load pyjobs_%s;\n\n" % self.jobname
        mscript += """ 
        if strcmp(spm('ver'),'SPM8'), 
           jobs=spm_jobman('spm5tospm8',{jobs});
        end 
        spm_jobman(\'run\',jobs);\n
        """
        if postscript is not None:
            mscript += postscript
        cmdline = self._gen_matlab_command(mscript, cwd=cwd,
                                           script_name='pyscript_%s' % self.jobname,
                                           mfile=self.mfile) 
        return cmdline, mscript
github OpenModelica / OMPython / PythonInterface / PySimulator / Plugins / Analysis / LinearSystemAnalysis / LinearizeFMU.py View on Github external
'''Write linearization data to MATLAB .mat file (with file name matFileName)'''
        try: 
            data = {}
            data['A'] = self.A
            data['B'] = self.B
            data['C'] = self.C
            data['D'] = self.D
            data['eigenValues']=self.eigenValues
            data['eigenVectors']=self.eigenVectors
            data['inputNames']=self.inputNames
            data['outputNames']=self.outputNames
            data['stateNames']=self.stateNames
            if self.p != None:           
                for name, value in self.p.iteritems():                    
                    data[name]=value            
            scipy.io.savemat(file_name=matFileName,mdict=data,oned_as='row')
        except Exception, info:
                print 'Error in writeDataToMat()'
                print info.message
github YadiraF / PRNet / demo.py View on Github external
pos_interpolated = pos.copy()
                texture = cv2.remap(image, pos_interpolated[:,:,:2].astype(np.float32), None, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT,borderValue=(0))
                if args.isMask:
                    vertices_vis = get_visibility(vertices, prn.triangles, h, w)
                    uv_mask = get_uv_mask(vertices_vis, prn.triangles, prn.uv_coords, h, w, prn.resolution_op)
                    uv_mask = resize(uv_mask, (args.texture_size, args.texture_size), preserve_range = True)
                    texture = texture*uv_mask[:,:,np.newaxis]
                write_obj_with_texture(os.path.join(save_folder, name + '.obj'), save_vertices, prn.triangles, texture, prn.uv_coords/prn.resolution_op)#save 3d face with texture(can open with meshlab)
            else:
                write_obj_with_colors(os.path.join(save_folder, name + '.obj'), save_vertices, prn.triangles, colors) #save 3d face(can open with meshlab)

        if args.isDepth:
            depth_image = get_depth_image(vertices, prn.triangles, h, w, True)
            depth = get_depth_image(vertices, prn.triangles, h, w)
            imsave(os.path.join(save_folder, name + '_depth.jpg'), depth_image)
            sio.savemat(os.path.join(save_folder, name + '_depth.mat'), {'depth':depth})

        if args.isMat:
            sio.savemat(os.path.join(save_folder, name + '_mesh.mat'), {'vertices': vertices, 'colors': colors, 'triangles': prn.triangles})

        if args.isKpt or args.isShow:
            # get landmarks
            kpt = prn.get_landmarks(pos)
            np.savetxt(os.path.join(save_folder, name + '_kpt.txt'), kpt)

        if args.isPose or args.isShow:
            # estimate pose
            camera_matrix, pose = estimate_pose(vertices)
            np.savetxt(os.path.join(save_folder, name + '_pose.txt'), pose) 
            np.savetxt(os.path.join(save_folder, name + '_camera_matrix.txt'), camera_matrix) 

            np.savetxt(os.path.join(save_folder, name + '_pose.txt'), pose)