How to use the plantcv.plantcv.params function in plantcv

To help you get started, we’ve selected a few plantcv examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github danforthcenter / plantcv / plantcv / plantcv / hyperspectral / extract_wavelength.py View on Github external
wl_dict = spectral_data.wavelength_dict
    print("The closest band found to " + str(wavelength) + spectral_data.wavelength_units + " is: " +
          str(list(wl_dict.keys())[band_index]))

    # Reshape
    index_array_raw = spectral_data.array_data[:, :, [band_index]]
    index_array_raw = np.transpose(np.transpose(index_array_raw)[0])

    # Resulting array is float 32 from -1 to 1, transform into uint8 for plotting
    all_positive = np.add(index_array_raw, np.ones(np.shape(index_array_raw)))
    normalized = all_positive.astype(np.float64) / 2  # normalize the data to 0 - 1
    index_array = (255 * normalized).astype(np.uint8)  # scale to 255

    # Plot out grayscale image

    if params.debug == "plot":
        plot_image(index_array)
    elif params.debug == "print":
        print_image(index_array,
                    os.path.join(params.debug_outdir, str(params.device) + str(wavelength) + "_index.png"))

    # Make a spectral data instance
    index_array = Spectral_data(array_data=index_array_raw, max_wavelength=wavelength,
                                min_wavelength=wavelength, d_type=np.uint8,
                                wavelength_dict={}, samples=spectral_data.samples,
                                lines=spectral_data.lines, interleave=spectral_data.interleave,
                                wavelength_units=spectral_data.wavelength_units,
                                array_type="index_" + str(wavelength),
                                pseudo_rgb=None, filename=spectral_data.filename, default_bands=None)

    return index_array
github danforthcenter / plantcv / plantcv / plantcv / crop.py View on Github external
x         = X coordinate of starting point
       y         = Y coordinate of starting point
       h         = Height
       w         = Width

       Returns:
       cropped   = cropped image

       :param img: numpy.ndarray
       :param x: int
       :param y: int
       :param h: int
       :param w: int
       :return cropped: numpy.ndarray
       """
    params.device += 1

    # Check if the array data format
    if len(np.shape(img)) > 2 and np.shape(img)[-1] > 3:
        ref_img = img[:, :, [0]]
        ref_img = np.transpose(np.transpose(ref_img)[0])
        cropped = img[y:y + h, x:x + w, :]
    else:
        ref_img = np.copy(img)
        cropped = img[y:y + h, x:x + w]

    # Create the rectangle contour vertices
    pt1 = (x, y)
    pt2 = (x + w - 1, y + h - 1)

    ref_img = cv2.rectangle(img=ref_img, pt1=pt1, pt2=pt2, color=(255, 0, 0), thickness=params.line_thickness)
github danforthcenter / plantcv / plantcv / plantcv / naive_bayes_classifier.py View on Github external
# Initialize empty masks
    masks = {}
    for class_name in pdfs.keys():
        masks[class_name] = np.zeros([width, height], dtype=np.uint8)
    # Set pixel intensities to 255 (white) for the mask where the class has the highest probability
    for class_name in masks:
        background_classes = []
        for name in masks:
            if class_name is not name:
                background_classes.append(px_p[name])
        background_class = np.maximum.reduce(background_classes)
        masks[class_name][np.where(px_p[class_name] > background_class)] = 255
    # mask[np.where(plant > bg)] = 255

    # Print or plot the mask if debug is not None
    if params.debug == "print":
        for class_name, mask in masks.items():
            print_image(mask, os.path.join(params.debug_outdir,
                                           str(params.device) + "_naive_bayes_" + class_name + "_mask.jpg"))
    elif params.debug == "plot":
        for class_name, mask in masks.items():
            plot_image(mask, cmap="gray")

    return masks
github danforthcenter / plantcv / plantcv / plantcv / sobel_filter.py View on Github external
gray_img = Grayscale image data
    dx       = derivative of x to analyze
    dy       = derivative of x to analyze
    ksize        = specifies the size of the kernel (must be an odd integer: 1,3,5, ... , 31)

    Returns:
    sb_img   = Sobel filtered image

    :param gray_img: numpy.ndarray
    :param dx: int
    :param dy: int
    :param ksize: int
    :param scale: int
    :return sb_img: numpy.ndarray
    """
    params.device += 1
    sb_img = cv2.Sobel(src=gray_img, ddepth=-1, dx=dx, dy=dy, ksize=ksize)

    if params.debug == 'print':
        name = os.path.join(params.debug_outdir,
                            str(params.device) + '_sb_img_dx' + str(dx) + '_dy' + str(dy) + '_kernel' + str(ksize) + '.png')
        print_image(sb_img, name)
    elif params.debug == 'plot':
        plot_image(sb_img, cmap='gray')
    return sb_img
github danforthcenter / plantcv / plantcv / plantcv / morphology / segment_angle.py View on Github external
text = "{:.2f}".format(segment_angles[i])
        cv2.putText(img=labeled_img, text=text, org=(w, h), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=.55, color=(150, 150, 150), thickness=2)
        segment_label = "ID" + str(i)
        angle_header.append(segment_label)
    angle_data.extend(segment_angles)

    if 'morphology_data' not in outputs.measurements:
        outputs.measurements['morphology_data'] = {}
    outputs.measurements['morphology_data']['segment_angles'] = segment_angles

    # Auto-increment device
    params.device += 1

    if params.debug == 'print':
        print_image(labeled_img, os.path.join(params.debug_outdir, str(params.device) + '_segmented_angles.png'))
    elif params.debug == 'plot':
        plot_image(labeled_img)

    return angle_header, angle_data, labeled_img
github danforthcenter / plantcv / plantcv / plantcv / auto_crop.py View on Github external
padding_x = padding in the x direction
    padding_y = padding in the y direction
    color     = either 'black', 'white', or 'image'

    Returns:
    cropped   = cropped image

    :param img: numpy.ndarray
    :param obj: list
    :param padding_x: int
    :param padding_y: int
    :param color: str
    :return cropped: numpy.ndarray
    """

    params.device += 1
    img_copy = np.copy(img)
    img_copy2 = np.copy(img)

    x, y, w, h = cv2.boundingRect(obj)
    cv2.rectangle(img_copy, (x, y), (x + w, y + h), (0, 255, 0), 5)

    crop_img = img[y:y + h, x:x + w]

    offsetx = int(np.rint(padding_x))
    offsety = int(np.rint(padding_y))

    if color.upper() == 'BLACK':
        colorval = (0, 0, 0)
        cropped = cv2.copyMakeBorder(crop_img, offsety, offsety, offsetx, offsetx, cv2.BORDER_CONSTANT, value=colorval)
    elif color.upper() == 'WHITE':
        colorval = (255, 255, 255)
github danforthcenter / plantcv / plantcv / plantcv / hyperspectral / apply_mask_spectral.py View on Github external
# Take 3 wavelengths, first, middle and last available wavelength
    num_bands = np.shape(array)[2]
    med_band = int(num_bands / 2)
    pseudo_rgb = cv2.merge((array_data[:, :, [0]],
                            array_data[:, :, [med_band]],
                            array_data[:, :, [num_bands-1]]))

        # Gamma correct pseudo_rgb image
    pseudo_rgb = pseudo_rgb ** (1 / 2.2)

    if params.debug == "plot":
        # Gamma correct pseudo_rgb image
        plot_image(pseudo_rgb)
    elif params.debug == "print":
        print_image(pseudo_rgb, os.path.join(params.debug_outdir, str(params.device) + "_masked_spectral.png"))

    return array_data
github danforthcenter / plantcv / plantcv / plantcv / morphology / find_branch_pts.py View on Github external
cv2.drawContours(branch_plot, skel_obj, -1, (150, 150, 150), params.line_thickness, lineType=8,
                         hierarchy=skel_hier)

    branch_objects, _ = find_objects(branch_pts_img, branch_pts_img)
    for i in branch_objects:
        x, y = i.ravel()[:2]
        cv2.circle(branch_plot, (x, y), params.line_thickness, (255, 0, 255), -1)

    # Reset debug mode
    params.debug = debug
    # Auto-increment device
    params.device += 1

    if params.debug == 'print':
        print_image(branch_plot, os.path.join(params.debug_outdir, str(params.device) + '_skeleton_branches.png'))
    elif params.debug == 'plot':
        plot_image(branch_plot)

    return branch_pts_img
github danforthcenter / plantcv / plantcv / plantcv / cluster_contour_splitimg.py View on Github external
(likely list of genotypes)

    Returns:
    output_path             = array of paths to output images

    :param rgb_img: numpy.ndarray
    :param grouped_contour_indexes: list
    :param contours: list
    :param hierarchy: numpy.ndarray
    :param outdir: str
    :param file: str
    :param filenames: str
    :return output_path: str
    """

    params.device += 1

    sys.stderr.write(
        'This function has been updated to include object hierarchy so object holes can be included\n')

    # get names to split also to check the target number of objects

    i = datetime.now()
    timenow = i.strftime('%m-%d-%Y_%H:%M:%S')

    if file is None:
        filebase = timenow
    else:
        filebase = os.path.splitext(file)[0]

    if filenames is None:
        l = len(grouped_contour_indexes)