How to use the deeplabcut.evaluate_network function in deeplabcut

To help you get started, we’ve selected a few deeplabcut examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github AlexEMG / DeepLabCut / examples / testscriptwffmpeg.py View on Github external
posefile=os.path.join(cfg['project_path'],'dlc-models/iteration-'+str(cfg['iteration'])+'/'+ cfg['Task'] + cfg['date'] + '-trainset' + str(int(cfg['TrainingFraction'][0] * 100)) + 'shuffle' + str(1),'train/pose_cfg.yaml')
DLC_config=read_config(posefile)
DLC_config['save_iters']=10
DLC_config['display_iters']=2
DLC_config['multi_step']=[[0.001,10]]

print("CHANGING training parameters to end quickly!")
write_config(posefile,DLC_config)

print("TRAIN")
deeplabcut.train_network(path_config_file)

#this is much easier now: deeplabcut.train_network(path_config_file,gputouse=0,max_snapshots_to_keep=None,saveiters=1)

print("EVALUATE")
deeplabcut.evaluate_network(path_config_file,plotting=True)

print("CUT SHORT VIDEO AND ANALYZE")

# Make super short video (so the analysis is quick!)
vname='brief'
newvideo=os.path.join(cfg['project_path'],'videos',vname+'.avi')

subprocess.call(['ffmpeg','-i',video[0],'-ss','00:00:00','-to','00:00:00.4','-c','copy',newvideo])
deeplabcut.analyze_videos(path_config_file,[newvideo])

print("CREATE VIDEO")
deeplabcut.create_labeled_video(path_config_file,[newvideo])


print("EXTRACT OUTLIERS")
deeplabcut.extract_outlier_frames(path_config_file,[newvideo],outlieralgorithm='jump',epsilon=0,automatic=True)
github AlexEMG / DeepLabCut / examples / testscript_openfielddata.py View on Github external
deeplabcut.create_training_dataset(path_config_file,Shuffles=[shuffle])
cfg=deeplabcut.auxiliaryfunctions.read_config(path_config_file)
posefile=os.path.join(cfg['project_path'],'dlc-models/iteration-'+str(cfg['iteration'])+'/'+ cfg['Task'] + cfg['date'] + '-trainset' + str(int(cfg['TrainingFraction'][0] * 100)) + 'shuffle' + str(shuffle),'train/pose_cfg.yaml')

DLC_config=deeplabcut.auxiliaryfunctions.read_plainconfig(posefile)
DLC_config['save_iters']=10
DLC_config['display_iters']=2
DLC_config['multi_step']=[[0.005,15001]]
deeplabcut.auxiliaryfunctions.write_plainconfig(posefile,DLC_config)


print("TRAIN NETWORK")
deeplabcut.train_network(path_config_file, shuffle=shuffle,saveiters=15000,displayiters=1000,max_snapshots_to_keep=15)

print("EVALUATE")
deeplabcut.evaluate_network(path_config_file, Shuffles=[shuffle],plotting=True)
github AlexEMG / DeepLabCut / examples / testscript.py View on Github external
posefile=os.path.join(cfg['project_path'],'dlc-models/iteration-'+str(cfg['iteration'])+'/'+ cfg['Task'] + cfg['date'] + '-trainset' + str(int(cfg['TrainingFraction'][0] * 100)) + 'shuffle' + str(1),'train/pose_cfg.yaml')

DLC_config=deeplabcut.auxiliaryfunctions.read_plainconfig(posefile)
DLC_config['save_iters']=numiter
DLC_config['display_iters']=2
DLC_config['multi_step']=[[0.001,numiter]]

print("CHANGING training parameters to end quickly!")
deeplabcut.auxiliaryfunctions.write_plainconfig(posefile,DLC_config)

print("TRAIN")
deeplabcut.train_network(path_config_file)

print("EVALUATE")
deeplabcut.evaluate_network(path_config_file,plotting=True)
#deeplabcut.evaluate_network(path_config_file,plotting=True,trainingsetindex=33)
print("CUT SHORT VIDEO AND ANALYZE (with dynamic cropping!)")

# Make super short video (so the analysis is quick!)

try: #you need ffmpeg command line interface
    #subprocess.call(['ffmpeg','-i',video[0],'-ss','00:00:00','-to','00:00:00.4','-c','copy',newvideo])
    newvideo=deeplabcut.ShortenVideo(video[0],start='00:00:00',stop='00:00:00.4',outsuffix='short',outpath=os.path.join(cfg['project_path'],'videos'))
    vname=Path(newvideo).stem
except: # if ffmpeg is broken
    vname='brief'
    newvideo=os.path.join(cfg['project_path'],'videos',vname+'.mp4')
    from moviepy.editor import VideoFileClip,VideoClip
    clip = VideoFileClip(video[0])
    clip.reader.initialize()
    def make_frame(t):
github AlexEMG / DeepLabCut / examples / testscript_openfielddata_augmentationcomparison.py View on Github external
posefile=os.path.join(cfg['project_path'],'dlc-models/iteration-'+str(cfg['iteration'])+'/'+ cfg['Task'] + cfg['date'] + '-trainset' + str(int(cfg['TrainingFraction'][0] * 100)) + 'shuffle' + str(shuffle),'train/pose_cfg.yaml')

		DLC_config=deeplabcut.auxiliaryfunctions.read_plainconfig(posefile)
		DLC_config['dataset_type']='tensorpack'
		deeplabcut.auxiliaryfunctions.write_plainconfig(posefile,DLC_config)
'''

###Note that the new function in DLC 2.1 does that much easier...
deeplabcut.create_training_model_comparison(path_config_file,num_shuffles=1,net_types=['resnet_50'],augmenter_types=['imgaug','default','tensorpack'])

for shuffle in [2,3]:
	print("TRAIN NETWORK", shuffle)
	deeplabcut.train_network(path_config_file, shuffle=shuffle,saveiters=10000,displayiters=200,maxiters=5,max_snapshots_to_keep=11)

	print("EVALUATE")
	deeplabcut.evaluate_network(path_config_file, Shuffles=[shuffle],plotting=True)

	print("Analyze Video")

	videofile_path = os.path.join(os.getcwd(),'openfield-Pranav-2018-10-30','videos','m3v1mp4.mp4')

	deeplabcut.analyze_videos(path_config_file,[videofile_path], shuffle=shuffle)

	print("Create Labeled Video and plot")
	deeplabcut.create_labeled_video(path_config_file,[videofile_path], shuffle=shuffle)
	deeplabcut.plot_trajectories(path_config_file,[videofile_path], shuffle=shuffle)
github AlexEMG / DeepLabCut / deeplabcut / gui / evaluate_network.py View on Github external
def evaluate_network(self,event):

        #shuffle = self.shuffle.GetValue()
        trainingsetindex = self.trainingset.GetValue()

        shuffle = [self.shuffles.GetValue()]
        if self.plot_choice.GetStringSelection() == "Yes":
            plotting = True
        else:
            plotting = False

        if len(self.bodyparts)==0:
            self.bodyparts='all'
        deeplabcut.evaluate_network(self.config,Shuffles=shuffle,trainingsetindex=trainingsetindex,plotting=plotting,show_errors=True,comparisonbodyparts=self.bodyparts,gputouse=None)