PPDA2 / app.py
yashzambre's picture
Update app.py
925d3e6
import gradio as gr
from PIL import Image
import torch
import numpy as np
import cv2
from plantcv import plantcv as pcv
from skimage.feature import local_binary_pattern
from io import BytesIO
from skimage.feature import hog
import base64 # import the base64 module
import openpyxl
import pandas as pd
from gradio import themes
import gradio as gr
theme = gr.themes.Base(
primary_hue="violet",
secondary_hue="green",
).set(
body_background_fill_dark='*checkbox_label_background_fill'
)
def image_processing(image,input_type,input_choice):
array = np.array(image)
array = array.astype(np.float32)
gray_img = cv2.cvtColor(array, cv2.COLOR_RGB2GRAY)
if input_type == "Tips":
img1 = pcv.morphology.skeletonize(mask=gray_img)
output_image = pcv.morphology.find_tips(skel_img=img1, mask=None, label="default")
elif input_type == "Branches":
img1 = pcv.morphology.skeletonize(mask=gray_img)
output_image = pcv.morphology.find_branch_pts(skel_img=img1, mask=None, label="default")
elif input_type == "Both":
img1 = pcv.morphology.skeletonize(mask=gray_img)
tips = pcv.morphology.find_tips(skel_img=img1, mask=None, label="default")
branches = pcv.morphology.find_branch_pts(skel_img=img1, mask=None, label="default")
output_image = np.zeros_like(img1)
output_image[tips > 0] = 255
output_image[branches > 0] = 128
elif input_type == "sort":
image = pcv.morphology.skeletonize(mask=gray_img)
img1,edge_objects = pcv.morphology.prune(skel_img=image, size=70, mask=None)
#output_image = leaf(skel_img=img1,objects=edge_objects, mask=None)
elif input_type == "sift transform":
image = pcv.morphology.skeletonize(mask=gray_img)
sift = cv2.SIFT_create()
kp, des= sift.detectAndCompute(image, None)
output_image = cv2.drawKeypoints(image, kp, des)
np.savez('sift_descriptors.npz', descriptors=des)
elif input_type == "lbp transform":
radius = 1 # LBP feature radius
n_points = 8 * radius # number of LBP feature points
output_image = local_binary_pattern(gray_img, n_points, radius)
# Save the LBP transformed image as a NumPy array in .npz format
np.savez('lbp_transform.npz', lbp=output_image)
elif input_type == "hog transform":
image = pcv.morphology.skeletonize(mask=array)
fd,output_image = hog(gray_img, orientations=10, pixels_per_cell=(16, 16),
cells_per_block=(1, 1), visualize=True, multichannel=False, channel_axis=-1)
np.savez('hog_transform.npz', hog=output_image)
elif input_type == "compute all":
img1 = pcv.morphology.skeletonize(mask=gray_img)
if input_choice == "compute_branches":
output_image = pcv.morphology.find_branch_pts(skel_img=img1, mask=None, label="default")
elif input_choice == "compute_tips":
output_image = pcv.morphology.find_tips(skel_img=img1, mask=None, label="default")
elif input_choice == "compute_both":
img1 = pcv.morphology.skeletonize(mask=gray_img)
tips = pcv.morphology.find_tips(skel_img=img1, mask=None, label="default")
branches = pcv.morphology.find_branch_pts(skel_img=img1, mask=None, label="default")
output_image = np.zeros_like(img1)
output_image[tips > 0] = 255
output_image[branches > 0] = 128
elif input_choice == "compute_sift":
image = pcv.morphology.skeletonize(mask=gray_img)
sift = cv2.SIFT_create()
kp, des= sift.detectAndCompute(image, None)
output_image = cv2.drawKeypoints(image, kp, des)
elif input_choice == "compute_lbp":
radius = 1 # LBP feature radius
n_points = 8 * radius # number of LBP feature points
output_image = local_binary_pattern(gray_img, n_points, radius)
elif input_choice == "compute_hog":
image = pcv.morphology.skeletonize(mask=array)
fd,output_image = hog(gray_img, orientations=10, pixels_per_cell=(16, 16),
cells_per_block=(1, 1), visualize=True, multichannel=False, channel_axis=-1)
# Convert the resulting NumPy array back to a PIL image object for Gradio output
img2 = Image.fromarray(output_image)
if img2.mode == 'F':
img2 = img2.convert('RGB')
# Return the processed image as a Gradio output
return img2
body = (
"<center>"
"<a href='https://precisiongreenhouse.tamu.edu/'><img src='https://peepleslab.engr.tamu.edu/wp-content/uploads/sites/268/2023/04/AgriLife_Logo-e1681857158121.png' width=1650></a>"
"<br>"
"This demo extracts the plant statistics and the image features and also stores them. "
"<br>"
"<a href ='https://precisiongreenhouse.tamu.edu/'>The Texas A&M Plant Growth and Phenotyping Facility Data Analysis Pipeline</a>"
"</center>"
)
#examples = [["img1.png"],["img2.png"],["img3.png"]]
iface = gr.Interface(
fn=image_processing,
inputs=[gr.inputs.Image(label="Input Image"),
gr.components.Dropdown(["Tips", "Branches","Both","sort","sift transform","lbp transform","hog transform","compute all"], label="Choose the operation to be performed"),
gr.components.Dropdown(["compute_branches","compute_tips","compute_both","compute_sift","compute_lbp","compute_hog"],label="choose from compute all"),
],
outputs=gr.outputs.Image(type="pil", label="Processed Image"),
#title="Plant Phenotyping and Data Analytics",
description=body,
layout="vertical",
allow_flagging=False,
allow_screenshot=False,
theme=theme
#examples=examples
)
#iface.launch()
iface.launch()