How to use the aicsimageio.omeTifWriter function in aicsimageio

To help you get started, we’ve selected a few aicsimageio examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github AllenInstitute / aics-ml-segmentation / aicsmlsegment / bin / exp_scheduler.py View on Github external
img = np.transpose(img,(1,0,2,3))
            img = img[args.InputCh,:,:,:]
            img = input_normalization(img, args)

            if len(args.ResizeRatio)>0:
                img = resize(img, (1, args.ResizeRatio[0], args.ResizeRatio[1], args.ResizeRatio[2]), method='cubic')
                for ch_idx in range(img.shape[0]):
                    struct_img = img[ch_idx,:,:,:] # note that struct_img is only a view of img, so changes made on struct_img also affects img
                    struct_img = (struct_img - struct_img.min())/(struct_img.max() - struct_img.min())
                    img[ch_idx,:,:,:] = struct_img

            # apply the model
            output_img = model_inference(model, img, softmax, args)

            for ch_idx in range(len(args.OutputCh)//2):
                writer = omeTifWriter.OmeTifWriter(args.OutputDir + pathlib.PurePosixPath(fn).stem +'_seg_'+ str(args.OutputCh[2*ch_idx])+'.ome.tif')
                if args.Threshold<0:
                    writer.save(output_img[ch_idx].astype(float))
                else:
                    out = output_img[ch_idx] > args.Threshold
                    out = out.astype(np.uint8)
                    out[out>0]=255
                    writer.save(out)
        
        print(f'Image {fn} has been segmented')
github AllenInstitute / aics-ml-segmentation / aicsmlsegment / bin / curator / curator_sorting.py View on Github external
cmap = np.ones(seg.shape, dtype=np.float32)
                if os.path.isfile(str(row['mask'])):
                    # load segmentation gt
                    reader = AICSImage(row['mask'])
                    img = reader.data
                    assert img.shape[0]==1 and img.shape[1]==1
                    mask = img[0,0,:,:,:]
                    cmap[mask>0]=0

                writer = omeTifWriter.OmeTifWriter(args.train_path + os.sep + 'img_' + f'{training_data_count:03}' + '.ome.tif')
                writer.save(struct_img)

                writer = omeTifWriter.OmeTifWriter(args.train_path + os.sep + 'img_' + f'{training_data_count:03}' + '_GT.ome.tif')
                writer.save(seg)

                writer = omeTifWriter.OmeTifWriter(args.train_path + os.sep + 'img_' + f'{training_data_count:03}' + '_CM.ome.tif')
                writer.save(cmap)
        print('training data is ready')
github AllenInstitute / aics-ml-segmentation / aicsmlsegment / bin / curator / curator_takeall.py View on Github external
# excluding mask
            cmap = np.ones(seg.shape, dtype=np.float32)
            mask_fn = args.mask_path + os.sep + os.path.basename(fn)[:-1*len(args.data_type)] + '_mask.tiff'
            if os.path.isfile(mask_fn):
                reader = AICSImage(mask_fn)
                img = reader.data
                assert img.shape[0]==1 and img.shape[1]==1
                mask = img[0,0,:,:,:]
                cmap[mask==0]=0


            writer = omeTifWriter.OmeTifWriter(args.train_path + os.sep + 'img_' + f'{training_data_count:03}' + '.ome.tif')
            writer.save(struct_img)

            writer = omeTifWriter.OmeTifWriter(args.train_path + os.sep + 'img_' + f'{training_data_count:03}' + '_GT.ome.tif')
            writer.save(seg)

            writer = omeTifWriter.OmeTifWriter(args.train_path + os.sep + 'img_' + f'{training_data_count:03}' + '_CM.ome.tif')
            writer.save(cmap)
github AllenInstitute / aics-ml-segmentation / aicsmlsegment / bin / exp_scheduler.py View on Github external
for fi, fn in enumerate(filenames):
            print(fn)
            # load data
            struct_img = load_single_image(args, fn, time_flag=False)

            print(struct_img.shape)

            # apply the model
            output_img = apply_on_image(model, struct_img, softmax, args)
            #output_img = model_inference(model, struct_img, softmax, args)

            #print(len(output_img))

            for ch_idx in range(len(args.OutputCh)//2):
                write = omeTifWriter.OmeTifWriter(args.OutputDir + pathlib.PurePosixPath(fn).stem + '_seg_'+ str(args.OutputCh[2*ch_idx])+'.ome.tif')
                if args.Threshold<0:
                    write.save(output_img[ch_idx].astype(float))
                else:
                    out = output_img[ch_idx] > args.Threshold
                    out = out.astype(np.uint8)
                    out[out>0]=255
                    write.save(out)
            
            print(f'Image {fn} has been segmented')

    elif args.mode == 'eval_file':

        fn = args.InputFile
        print(fn)
        data_reader = AICSImage(fn)
        img0 = data_reader.data
github AllenInstitute / aics-ml-segmentation / aicsmlsegment / bin / curator / curator_sorting.py View on Github external
seg = im_seg_full[0,0,:,:,:]

            score = gt_sorting(raw_img, seg)
            if score == 1:
                df['score'].iloc[index]=1
                need_mask = input('Do you need to add a mask for this image, enter y or n:  ')
                if need_mask == 'y':
                    create_mask(raw_img, seg.astype(np.uint8))
                    mask_fn = args.mask_path + os.sep + os.path.basename(row['raw'])[:-5] + '_mask.tiff'
                    crop_mask = np.zeros(seg.shape, dtype=np.uint8)
                    for zz in range(crop_mask.shape[0]):
                        crop_mask[zz,:,:] = draw_mask[:crop_mask.shape[1],:crop_mask.shape[2]]

                    crop_mask = crop_mask.astype(np.uint8)
                    crop_mask[crop_mask>0]=255
                    writer = omeTifWriter.OmeTifWriter(mask_fn)
                    writer.save(crop_mask)
                    df['mask'].iloc[index]=mask_fn
            else:
                df['score'].iloc[index]=0

            df.to_csv(args.csv_name, index=False)

        #########################################
        # generate training data:
        #  (we want to do this step after "sorting"
        #  (is mainly because we want to get the sorting 
        #  step as smooth as possible, even though
        #  this may waster i/o time on reloading images)
        # #######################################
        print('finish merging, start building the training data ...')
github AllenInstitute / aics-ml-segmentation / aicsmlsegment / bin / curator / curator_merging.py View on Github external
create_merge_mask(raw_img, seg1.astype(np.uint8), seg2.astype(np.uint8), 'merging_mask')

            if ignore_img:
                df['score'].iloc[index]=0
            else:
                df['score'].iloc[index]=1

                mask_fn = args.mask_path + os.sep + os.path.basename(row['raw'])[:-5] + '_mask.tiff'
                crop_mask = np.zeros(seg1.shape, dtype=np.uint8)
                for zz in range(crop_mask.shape[0]):
                    crop_mask[zz,:,:] = draw_mask[:crop_mask.shape[1],:crop_mask.shape[2]]

                crop_mask = crop_mask.astype(np.uint8)
                crop_mask[crop_mask>0]=255
                writer = omeTifWriter.OmeTifWriter(mask_fn)
                writer.save(crop_mask)
                df['merging_mask'].iloc[index]=mask_fn

                need_mask = input('Do you need to add an excluding mask for this image, enter y or n:  ')
                if need_mask == 'y':
                    create_merge_mask(raw_img, seg1.astype(np.uint8), seg2.astype(np.uint8), 'excluding mask')

                    mask_fn = args.ex_mask_path + os.sep + os.path.basename(row['raw'])[:-5] + '_mask.tiff'
                    crop_mask = np.zeros(seg1.shape, dtype=np.uint8)
                    for zz in range(crop_mask.shape[0]):
                        crop_mask[zz,:,:] = draw_mask[:crop_mask.shape[1],:crop_mask.shape[2]]

                    crop_mask = crop_mask.astype(np.uint8)
                    crop_mask[crop_mask>0]=255
                    writer = omeTifWriter.OmeTifWriter(mask_fn)
                    writer.save(crop_mask)
github AllenInstitute / aics-ml-segmentation / aicsmlsegment / bin / curator / curator_takeall.py View on Github external
seg = img[0,0,:,:,:]>0
            seg = seg.astype(np.uint8)
            seg[seg>0]=1

            # excluding mask
            cmap = np.ones(seg.shape, dtype=np.float32)
            mask_fn = args.mask_path + os.sep + os.path.basename(fn)[:-1*len(args.data_type)] + '_mask.tiff'
            if os.path.isfile(mask_fn):
                reader = AICSImage(mask_fn)
                img = reader.data
                assert img.shape[0]==1 and img.shape[1]==1
                mask = img[0,0,:,:,:]
                cmap[mask==0]=0


            writer = omeTifWriter.OmeTifWriter(args.train_path + os.sep + 'img_' + f'{training_data_count:03}' + '.ome.tif')
            writer.save(struct_img)

            writer = omeTifWriter.OmeTifWriter(args.train_path + os.sep + 'img_' + f'{training_data_count:03}' + '_GT.ome.tif')
            writer.save(seg)

            writer = omeTifWriter.OmeTifWriter(args.train_path + os.sep + 'img_' + f'{training_data_count:03}' + '_CM.ome.tif')
            writer.save(cmap)
github AllenInstitute / aics-ml-segmentation / aicsmlsegment / bin / curator / curator_merging.py View on Github external
cmap = np.ones(seg1.shape, dtype=np.float32)
                if os.path.isfile(str(row['excluding_mask'])):
                    reader = AICSImage(row['excluding_mask'])
                    img = reader.data
                    assert img.shape[0]==1 and img.shape[1]==1
                    ex_mask = img[0,0,:,:,:]>0
                    cmap[ex_mask>0]=0
 
                
                writer = omeTifWriter.OmeTifWriter(args.train_path + os.sep + 'img_' + f'{training_data_count:03}' + '.ome.tif')
                writer.save(struct_img)

                seg1 = seg1.astype(np.uint8)
                seg1[seg1>0]=1
                writer = omeTifWriter.OmeTifWriter(args.train_path + os.sep + 'img_' + f'{training_data_count:03}' + '_GT.ome.tif')
                writer.save(seg1)

                writer = omeTifWriter.OmeTifWriter(args.train_path + os.sep + 'img_' + f'{training_data_count:03}' + '_CM.ome.tif')
                writer.save(cmap)
        print('training data is ready')