How to use the skimage.io function in skimage

To help you get started, we’ve selected a few skimage examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ndrplz / dreyeve / experiments / assessment / create_attentional_videos.py View on Github external
Parameters
    ----------
    seq: int
        the sequence number.
    idx: int
        the frame number.

    Returns
    -------
    np.array
        the image.
    """

    seq_dir = join(dreyeve_root, 'DATA', '{:02d}'.format(seq), 'frames')

    img = io.imread(join(seq_dir, '{:06d}.jpg'.format(idx)))
    img = resize(img, output_shape=(1080 // 2, 1920 // 2), mode='constant', preserve_range=True)

    return np.uint8(img)
github broadinstitute / keras-rcnn / keras_rcnn / datasets / create_malaria.py View on Github external
directory = '/data/research/object-detection/malaria/data/'
    dictionary = []

    for t in ['training', 'validation', 'test']:

        dictionary_t = []

        dir_ = os.path.join(directory, 'images', t)

        image_files = glob.glob(os.path.join(dir_,"*"))

        for image_file in image_files:
            x = {}
            x['filepath'] = os.path.basename(image_file)
            image = skimage.io.imread(image_file)
            x['width'] = image.shape[0]
            x['height'] = image.shape[1]
            basename, imagename = image_file.split('/images/')
            label_file = os.path.join(basename, 'labels', imagename.rsplit('.')[0] + '.xml')
            x['bboxes'] = get_data(label_file)

            dictionary_t.append(x)
            
        dictionary.append(dictionary_t)

    return dictionary
github GuohongLi / face-alignment-pytorch / inference.py View on Github external
P.add_argument('--detectmodelfile', type=str, required=True, help='face detect model file')
    P.add_argument('--input', type=str, required=True, help='input image file')
    args = P.parse_args()
    fa = FaceAlignment(modelfilename=args.modelfile, facedetectmodelfile=args.detectmodelfile)
    if fa:
        img_in = io.imread(args.input)
        img = img_in
        preds, detected_faces, preds_in_crops, img_crops = fa.get_landmarks(img)
        for k,d in enumerate(detected_faces):
            cv2.rectangle(img_in,(d[0],d[1]),(d[2],d[3]),(255,255,255))
            landmark = preds[k]
            for i in range(landmark.shape[0]):
                pts = landmark[i]
                cv2.circle(img_in, (pts[0], pts[1]),5,(0,255,0), -1, 8)
                cv2.putText(img_in,str(i),(pts[0],pts[1]),cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,2555,255))
        io.imsave('res.jpg',img_in)
    else:
        print("FaceAlignment init error!")
github TenteEEEE / quiche_pantie_patch / src / models / masscat.py View on Github external
def __init__(self, body='./body/body_masscat.png', **options):
        super().__init__('量産型のらきゃっと', body=body, pantie_position=[2590, 1047], **options)
        self.mask = io.imread('./mask/mask_masscat.png')
        try:
            self.with_skin = self.options['with_skin']
        except:
            self.with_skin = self.ask(question='Overlay with skin?', default=True)
        if self.with_skin:
            self.skin = Image.open('./material/skin_masscat.png')
            try:
                self.with_socks = self.options['with_socks']
            except:
                self.with_socks = self.ask(question='Wear socks?', default=True)
            if self.with_socks:
                try:
                    self.is_knee = self.options['is_knee']
                except:
                    self.is_knee = self.ask(question='Knee socks?', default=True)
                if self.is_knee:
github barloccia / MASK-CNN-for-actions-recognition- / utils.py View on Github external
def load_image(self, image_id):
        """Load the specified image and return a [H,W,3] Numpy array.
        """
        print(self.image_info[image_id]['path'])
        # Load image
        image = skimage.io.imread(self.image_info[image_id]['path'])
        # If grayscale. Convert to RGB for consistency.
        if image.ndim != 3:
            image = skimage.color.gray2rgb(image)
        return image
github broadinstitute / keras-rcnn / keras_rcnn / data / shape / shape.py View on Github external
r, c = 224, 224

    for group in groups:
        dictionaries = []

        for _ in range(256):
            identifier = uuid.uuid4()

            image, objects = skimage.draw.random_shapes((r, c), 32, 2, 32)

            filename = "{}.png".format(identifier)

            pathname = os.path.join("images", filename)

            skimage.io.imsave(pathname, image)

            if os.path.exists(pathname):
                dictionary = {
                    "image": {
                        "checksum": md5sum(pathname),
                        "pathname": pathname,
                        "shape": {
                            "r": r,
                            "c": c,
                            "channels": 3
                        }
                    },
                    "objects": []
                }

                for category, (bounding_box_r, bounding_box_c) in objects:
github muggin / cbir-system / scripts / evaluation.py View on Github external
fig = plt.subplot(2, 2, 2)
                            fig.set_title("Evolution of recall and precision - P@{}={}".format(K, avg_pre[i]))
                            ax = fig.axes
                            ax.plot(recall, color='Red')
                            ax.set_ylabel('Recall', color='Red')
                            ax.tick_params(axis='y', colors='Red')
                            ax = fig.axes.twinx()
                            ax.plot(precision, color='Blue')
                            ax.set_ylabel('Precision', color='Blue')
                            ax.tick_params(axis='y', colors='Blue', direction='out')

                            for i in range(10):
                                fig = plt.subplot(4,5,11+i)
                                fig.set_title("result "+str(i+1))
                                fig = plt.imshow(io.imread(os.path.join(data_path, ranked_results[i][0])))
                                fig.axes.get_xaxis().set_visible(False)
                                fig.axes.get_yaxis().set_visible(False)


                            print relevance
                            print cumsum_relevance
                            print precision
                            print recall

                            plt.show()


            mean_avg_pre[a,b] = avg_pre.mean()

    print mean_avg_pre
    mean_avg_pre.tofile('map.csv',sep=',',format='%10.8f')
github awslabs / amazon-sagemaker-examples / sagemaker-python-sdk / mxnet_gluon_cifar10 / cifar10_utils.py View on Github external
def read_image(filename):
    img = io.imread(filename)
    img = np.array(img).transpose(2, 0, 1)
    img = np.expand_dims(img, axis=0)

    return img
github visionjo / FIW_KRT / src / scripts / dlib_cnn_face_detect_align.py View on Github external
# '''
# Detector returns a mmod_rectangles object containing a list of mmod_rectangle objects, which are accessed by
#  iterating over the mmod_rectangles object. mmod_rectangle has 2 members, dlib.rectangle object & confidence score.
#
# It is possible to pass a list of images to the detector.
#     - like this: dets = cnn_face_detector([image list], upsample_num, batch_size = 128)
# In this case it will return a mmod_rectangless object.
# This object behaves just like a list of lists and can be iterated over.
# '''
predictor_path = "shape_predictor_68_face_landmarks.dat"
sp = dlib.shape_predictor(predictor_path)
print("Number of faces detected: {}".format(len(dets)))
counter = 0
for faces, prefix in zip(dets, f_prefix):
    img = io.imread(counter)
    for i, d in enumerate(faces):
        f_name = prefix + str(i)
        df.loc[counter] = [fids[counter], pids[counter], i, f_name, d.rect.left(), d.rect.top(), d.rect.right(),
                           d.rect.bottom(), d.confidence]
        print("Detection {}: Left: {} Top: {} Right: {} Bottom: {} Confidence: {}".format(
            i, d.rect.left(), d.rect.top(), d.rect.right(), d.rect.bottom(), d.confidence))
        shape = sp(img, d)
        dlib.save_face_chip(img, shape, dir_det_out + f_name + ".jpg")

    counter += 1

df.to_csv("dnn_face_detections_bb_2.csv")
github pavelgonchar / neural-art-mini / run.py View on Github external
def PreprocessStyleImage(path, shape):
    img = io.imread(path)
    resized_img = transform.resize(img, (shape[2], shape[3]))
    sample = np.asarray(resized_img) * 256
    sample = np.swapaxes(sample, 0, 2)
    sample = np.swapaxes(sample, 1, 2)

    sample[0, :] -= 123.68
    sample[1, :] -= 116.779
    sample[2, :] -= 103.939
    return np.resize(sample, (1, 3, sample.shape[1], sample.shape[2]))