How to use the mahotas.imsave function in mahotas

To help you get started, we’ve selected a few mahotas examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github varshini24 / Electron-microscopic-image-segmentation-using-Deep-Learning / utils / input_image_maker.py View on Github external
#print "check3"
			# changing image extension from tiff to png
			#img = str(img).split('.')[0]+'.png'  #images are converted from 32-bit to 16-bit when changed to png
			img = str(img).split('.')[0]+'_ac4'+'.png'

		except IOError:
			print('Unrecognized image name format!')

		count = count +1 

		
		print 'No. of images processed:', count, img

		
		
		mh.imsave(os.path.join(save_path, img), final_img)
github luispedro / BuildingMachineLearningSystemsWithPython / ch10 / threshold.py View on Github external
# Convert to greyscale
image = mh.colors.rgb2gray(image, dtype=np.uint8)

# Compute a threshold value:
thresh = mh.thresholding.otsu(image)
print('Otsu threshold is {0}'.format(thresh))

# Compute the thresholded image
otsubin = (image > thresh)
print('Saving thresholded image (with Otsu threshold) to otsu-threshold.jpeg')
mh.imsave('otsu-threshold.jpeg', otsubin.astype(np.uint8) * 255)

# Execute morphological opening to smooth out the edges
otsubin = mh.open(otsubin, np.ones((15, 15)))
mh.imsave('otsu-closed.jpeg', otsubin.astype(np.uint8) * 255)

# An alternative thresholding method:
thresh = mh.thresholding.rc(image)
print('Ridley-Calvard threshold is {0}'.format(thresh))
print('Saving thresholded image (with Ridley-Calvard threshold) to rc-threshold.jpeg')
mh.imsave('rc-threshold.jpeg', (image > thresh).astype(np.uint8) * 255)
github luispedro / luispedro_org / files / talks / 2014 / 09-tcmm2014 / jug-segmentation-tutorial / saveimages.py View on Github external
from jugfile import method1
from matplotlib import cm
import numpy as np

im = mh.imread('images/dna-21.jpg')
mh.imsave('image_stretched.jpeg', mh.stretch(im.astype(float)**.01))
m1 = method1.f('images/dna-21.jpg', 2)
m1 = m1.astype(np.uint8)
color = ((cm.rainbow(m1.astype(float)/m1.max())[:,:,:3]).reshape(m1.shape+(3,)))
color[m1 == 0] = (0,0,0)
mh.imsave('image_method1.jpeg', mh.stretch(color))

ref = mh.imread('references/dna-21.png')
color = ((cm.rainbow(ref.astype(float)/ref.max())[:,:,:3]).reshape(m1.shape+(3,)))
color[ref == 0] = (0,0,0)
mh.imsave('image_reference.jpeg', mh.stretch(color))
github Rhoana / dojo / _dojo / controller.py View on Github external
print n

    # remove small regions
    sizes = mh.labeled.labeled_size(seeds)
    min_seed_size = 5
    too_small = np.where(sizes < min_seed_size)
    seeds = mh.labeled.remove_regions(seeds, too_small).astype(np.uint8)


    #
    # run watershed
    #
    ws = mh.cwatershed(brush_image.max() - brush_image, seeds)

    mh.imsave('/tmp/end_points.tif', 50*end_points.astype(np.uint8))
    mh.imsave('/tmp/seeds_mask.tif', 50*seed_mask.astype(np.uint8))
    mh.imsave('/tmp/seeds.tif', 50*seeds.astype(np.uint8))
    mh.imsave('/tmp/ws.tif', 50*ws.astype(np.uint8))

    lines_array = np.zeros(ws.shape,dtype=np.uint8)
    lines = []

    print label_id

    # valid_labels = [label_id]

    # while label_id in self.__merge_table.values():
    #   label_id = self.__merge_table.values()[]
    #   valid_labels.append(label_id)

    for y in range(ws.shape[0]-1):
      for x in range(ws.shape[1]-1):
github Rhoana / dojo / _dojo / scripts / finalizesplit.py View on Github external
for c in i_js:
  s_tile[c[1], c[0]] = 0

label_image,n = mh.label(s_tile)

if (n!=3):
  print 'ERROR',n

# check which label was selected
selected_label = label_image[click[1], click[0]]

for c in i_js:
  label_image[c[1], c[0]] = selected_label # the line belongs to the selected label


mh.imsave('/tmp/seg2.tif', 10*label_image.astype(np.uint8))


# update the segmentation data

new_id = 6184


label_image[label_image == 1] = 0 # should be zero then
label_image[label_image == 2] = new_id - label_id

tile = np.add(tile, label_image).astype(np.uint32)


#mh.imsave('/tmp/newtile.tif', tile.astype(np.uint32))

# split tile and save as hdf5
github luispedro / BuildingMachineLearningSystemsWithPython / ch10 / figure5_6.py View on Github external
# Compute Gaussian filtered versions with increasing kernel widths
im8  = mh.gaussian_filter(image,  8)
im16 = mh.gaussian_filter(image, 16)
im32 = mh.gaussian_filter(image, 32)

# We now build a composite image with three panels:
#
# [ IM8 | | IM16 | | IM32 ]

h, w = im8.shape
canvas = np.ones((h, 3 * w + 256), np.uint8)
canvas *= 255
canvas[:, :w] = im8
canvas[:, w + 128:2 * w + 128] = im16
canvas[:, -w:] = im32
mh.imsave('../1400OS_10_05+.jpg', canvas[:, ::2])

# Threshold the image
# We need to first stretch it to convert to an integer image
im32 = mh.stretch(im32)
ot32 = mh.otsu(im32)

# Convert to 255 np.uint8 to match the other images
im255 = 255 * (im32 > ot32).astype(np.uint8)
mh.imsave('../1400OS_10_06+.jpg', im255)
github Rhoana / dojo / _dojo / controller.py View on Github external
print n

    # remove small regions
    sizes = mh.labeled.labeled_size(seeds)
    min_seed_size = 5
    too_small = np.where(sizes < min_seed_size)
    seeds = mh.labeled.remove_regions(seeds, too_small).astype(np.uint8)


    #
    # run watershed
    #
    ws = mh.cwatershed(brush_image.max() - brush_image, seeds)

    mh.imsave('/tmp/end_points.tif', 50*end_points.astype(np.uint8))
    mh.imsave('/tmp/seeds_mask.tif', 50*seed_mask.astype(np.uint8))
    mh.imsave('/tmp/seeds.tif', 50*seeds.astype(np.uint8))
    mh.imsave('/tmp/ws.tif', 50*ws.astype(np.uint8))

    lines_array = np.zeros(ws.shape,dtype=np.uint8)
    lines = []

    print label_id

    # valid_labels = [label_id]

    # while label_id in self.__merge_table.values():
    #   label_id = self.__merge_table.values()[]
    #   valid_labels.append(label_id)

    for y in range(ws.shape[0]-1):
github Rhoana / dojo / _dojo / controller.py View on Github external
label_image,n = mh.label(s_tile)

    if (n!=3):
      print 'ERROR',n

    # check which label was selected
    selected_label = label_image[click[1], click[0]]

    print 'selected', selected_label

    for c in i_js:
      label_image[c[1], c[0]] = selected_label # the line belongs to the selected label


    mh.imsave('/tmp/seg2.tif', 10*label_image.astype(np.uint8))


    # update the segmentation data

    self.__largest_id += 1
    new_id = self.__largest_id

    # unselected_label = selected_label==1 ? unselected_label=2 : unselected_label:1

    if selected_label == 1:
      unselected_label = 2
    else:
      unselected_label = 1

    full_coords = np.where(label_image > 0)
    full_bbox = [min(full_coords[1]), min(full_coords[0]), max(full_coords[1]), max(full_coords[0])]