How to use the pulse2percept.utils.Watson2014Transform.ret2dva function in pulse2percept

To help you get started, we’ve selected a few pulse2percept examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github pulse2percept / pulse2percept / pulse2percept / models / beyeler2019.py View on Github external
def ret2dva(xret):
        """Convert retinal corods (um) to degrees of visual angle (dva)"""
        return Watson2014Transform.ret2dva(xret)
github pulse2percept / pulse2percept / pulse2percept / viz / argus.py View on Github external
img_argus = imread(PATH_ARGUS1)
    else:
        px_argus = PX_ARGUS2
        img_argus = imread(PATH_ARGUS2)

    # To simulate an implant in a left eye, flip the image left-right (along
    # with the electrode x-coordinates):
    if argus.eye == 'LE':
        img_argus = np.fliplr(img_argus)
        px_argus[:, 0] = img_argus.shape[1] - px_argus[:, 0] - 1

    # Add some padding to the output image so the array is not cut off:
    pad = 2000  # microns
    x_el = [e.x for e in argus.values()]
    y_el = [e.y for e in argus.values()]
    x_min = Watson2014Transform.ret2dva(np.min(x_el) - pad)
    x_max = Watson2014Transform.ret2dva(np.max(x_el) + pad)
    y_min = Watson2014Transform.ret2dva(np.min(y_el) - pad)
    y_max = Watson2014Transform.ret2dva(np.max(y_el) + pad)

    # Coordinate transform from degrees of visual angle to output, and from
    # image coordinates to output image:
    pts_in = []
    pts_dva = []
    pts_out = []
    out_shape = X.image.values[0].shape
    for xy, e in zip(px_argus, argus.values()):
        x_dva, y_dva = Watson2014Transform.ret2dva([e.x, e.y])
        x_out = (x_dva - x_min) / (x_max - x_min) * (out_shape[1] - 1)
        y_out = (y_dva - y_min) / (y_max - y_min) * (out_shape[0] - 1)
        pts_in.append(xy)
        pts_dva.append([x_dva, y_dva])
github pulse2percept / pulse2percept / pulse2percept / viz / argus.py View on Github external
else:
        px_argus = PX_ARGUS2
        img_argus = imread(PATH_ARGUS2)

    # To simulate an implant in a left eye, flip the image left-right (along
    # with the electrode x-coordinates):
    if argus.eye == 'LE':
        img_argus = np.fliplr(img_argus)
        px_argus[:, 0] = img_argus.shape[1] - px_argus[:, 0] - 1

    # Add some padding to the output image so the array is not cut off:
    pad = 2000  # microns
    x_el = [e.x for e in argus.values()]
    y_el = [e.y for e in argus.values()]
    x_min = Watson2014Transform.ret2dva(np.min(x_el) - pad)
    x_max = Watson2014Transform.ret2dva(np.max(x_el) + pad)
    y_min = Watson2014Transform.ret2dva(np.min(y_el) - pad)
    y_max = Watson2014Transform.ret2dva(np.max(y_el) + pad)

    # Coordinate transform from degrees of visual angle to output, and from
    # image coordinates to output image:
    pts_in = []
    pts_dva = []
    pts_out = []
    out_shape = X.image.values[0].shape
    for xy, e in zip(px_argus, argus.values()):
        x_dva, y_dva = Watson2014Transform.ret2dva([e.x, e.y])
        x_out = (x_dva - x_min) / (x_max - x_min) * (out_shape[1] - 1)
        y_out = (y_dva - y_min) / (y_max - y_min) * (out_shape[0] - 1)
        pts_in.append(xy)
        pts_dva.append([x_dva, y_dva])
        pts_out.append([x_out, y_out])
github pulse2percept / pulse2percept / pulse2percept / models / beyeler2019.py View on Github external
def ret2dva(xret):
        """Convert retinal corods (um) to degrees of visual angle (dva)"""
        return Watson2014Transform.ret2dva(xret)
github pulse2percept / pulse2percept / pulse2percept / viz / argus.py View on Github external
px_argus = PX_ARGUS2
        img_argus = imread(PATH_ARGUS2)

    # To simulate an implant in a left eye, flip the image left-right (along
    # with the electrode x-coordinates):
    if argus.eye == 'LE':
        img_argus = np.fliplr(img_argus)
        px_argus[:, 0] = img_argus.shape[1] - px_argus[:, 0] - 1

    # Add some padding to the output image so the array is not cut off:
    pad = 2000  # microns
    x_el = [e.x for e in argus.values()]
    y_el = [e.y for e in argus.values()]
    x_min = Watson2014Transform.ret2dva(np.min(x_el) - pad)
    x_max = Watson2014Transform.ret2dva(np.max(x_el) + pad)
    y_min = Watson2014Transform.ret2dva(np.min(y_el) - pad)
    y_max = Watson2014Transform.ret2dva(np.max(y_el) + pad)

    # Coordinate transform from degrees of visual angle to output, and from
    # image coordinates to output image:
    pts_in = []
    pts_dva = []
    pts_out = []
    out_shape = X.image.values[0].shape
    for xy, e in zip(px_argus, argus.values()):
        x_dva, y_dva = Watson2014Transform.ret2dva([e.x, e.y])
        x_out = (x_dva - x_min) / (x_max - x_min) * (out_shape[1] - 1)
        y_out = (y_dva - y_min) / (y_max - y_min) * (out_shape[0] - 1)
        pts_in.append(xy)
        pts_dva.append([x_dva, y_dva])
        pts_out.append([x_out, y_out])
    pts_in = np.array(pts_in)
github pulse2percept / pulse2percept / pulse2percept / viz / argus.py View on Github external
img_argus = imread(PATH_ARGUS2)

    # To simulate an implant in a left eye, flip the image left-right (along
    # with the electrode x-coordinates):
    if argus.eye == 'LE':
        img_argus = np.fliplr(img_argus)
        px_argus[:, 0] = img_argus.shape[1] - px_argus[:, 0] - 1

    # Add some padding to the output image so the array is not cut off:
    pad = 2000  # microns
    x_el = [e.x for e in argus.values()]
    y_el = [e.y for e in argus.values()]
    x_min = Watson2014Transform.ret2dva(np.min(x_el) - pad)
    x_max = Watson2014Transform.ret2dva(np.max(x_el) + pad)
    y_min = Watson2014Transform.ret2dva(np.min(y_el) - pad)
    y_max = Watson2014Transform.ret2dva(np.max(y_el) + pad)

    # Coordinate transform from degrees of visual angle to output, and from
    # image coordinates to output image:
    pts_in = []
    pts_dva = []
    pts_out = []
    out_shape = X.image.values[0].shape
    for xy, e in zip(px_argus, argus.values()):
        x_dva, y_dva = Watson2014Transform.ret2dva([e.x, e.y])
        x_out = (x_dva - x_min) / (x_max - x_min) * (out_shape[1] - 1)
        y_out = (y_dva - y_min) / (y_max - y_min) * (out_shape[0] - 1)
        pts_in.append(xy)
        pts_dva.append([x_dva, y_dva])
        pts_out.append([x_out, y_out])
    pts_in = np.array(pts_in)
    pts_dva = np.array(pts_dva)
github pulse2percept / pulse2percept / pulse2percept / viz / argus.py View on Github external
pad = 2000  # microns
    x_el = [e.x for e in argus.values()]
    y_el = [e.y for e in argus.values()]
    x_min = Watson2014Transform.ret2dva(np.min(x_el) - pad)
    x_max = Watson2014Transform.ret2dva(np.max(x_el) + pad)
    y_min = Watson2014Transform.ret2dva(np.min(y_el) - pad)
    y_max = Watson2014Transform.ret2dva(np.max(y_el) + pad)

    # Coordinate transform from degrees of visual angle to output, and from
    # image coordinates to output image:
    pts_in = []
    pts_dva = []
    pts_out = []
    out_shape = X.image.values[0].shape
    for xy, e in zip(px_argus, argus.values()):
        x_dva, y_dva = Watson2014Transform.ret2dva([e.x, e.y])
        x_out = (x_dva - x_min) / (x_max - x_min) * (out_shape[1] - 1)
        y_out = (y_dva - y_min) / (y_max - y_min) * (out_shape[0] - 1)
        pts_in.append(xy)
        pts_dva.append([x_dva, y_dva])
        pts_out.append([x_out, y_out])
    pts_in = np.array(pts_in)
    pts_dva = np.array(pts_dva)
    pts_out = np.array(pts_out)
    dva2out = img_transform('similarity', pts_dva, pts_out)
    argus2out = img_transform('similarity', pts_in, pts_out)

    # Top left, top right, bottom left, bottom right corners:
    x_range = X.img_x_dva
    y_range = X.img_y_dva
    pts_dva = [[x_range[0], y_range[0]], [x_range[0], y_range[1]],
               [x_range[1], y_range[0]], [x_range[1], y_range[1]]]
github pulse2percept / pulse2percept / pulse2percept / viz / argus.py View on Github external
pts_dva = np.array(pts_dva)
    pts_out = np.array(pts_out)
    dva2out = img_transform('similarity', pts_dva, pts_out)
    argus2out = img_transform('similarity', pts_in, pts_out)

    # Top left, top right, bottom left, bottom right corners:
    x_range = X.img_x_dva
    y_range = X.img_y_dva
    pts_dva = [[x_range[0], y_range[0]], [x_range[0], y_range[1]],
               [x_range[1], y_range[0]], [x_range[1], y_range[1]]]

    # Calculate average drawings, but don't binarize:
    all_imgs = np.zeros(out_shape)
    num_imgs = X.groupby('electrode')['image'].count()
    for _, row in X.iterrows():
        e_pos = Watson2014Transform.ret2dva((argus[row['electrode']].x,
                                             argus[row['electrode']].y))
        align_center = dva2out(e_pos)[0]
        img_drawing = scale_phosphene(row['image'], scale)
        img_drawing = center_phosphene(img_drawing, loc=align_center)
        # We normalize by the number of phosphenes per electrode, so that if
        # all phosphenes are the same, their brightness would add up to 1:
        all_imgs += 1.0 / num_imgs[row['electrode']] * img_drawing
    all_imgs = 1 - all_imgs

    # Draw array schematic with specific alpha level:
    img_arr = img_warp(img_argus, argus2out.inverse, cval=1.0,
                       output_shape=out_shape)
    img_arr[:, :, 3] = alpha_bg

    # Replace pixels where drawings are dark enough, set alpha=1:
    rr, cc = np.unravel_index(np.where(all_imgs.ravel() < thresh_fg)[0],