Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from ultralytics import YOLO | |
| from PIL import Image | |
| import numpy as np | |
| from huggingface_hub import hf_hub_download | |
| import os | |
| def preprocess_image(image): | |
| if image.mode == "RGBA": | |
| image = image.convert("RGB") | |
| return np.array(image) | |
| hf_token = os.getenv("second") | |
| # Load the trained YOLO models | |
| model1_path = hf_hub_download(repo_id="majidsigrow/organ", filename="organ1.pt", token=hf_token) | |
| model2_path = hf_hub_download(repo_id="majidsigrow/leaf", filename="leaf.pt", token=hf_token) | |
| model1 = YOLO(model1_path) # Replace 'organ1.pt' with the path to your first trained model | |
| model2 = YOLO(model2_path) # Replace 'leaf1.pt' with the path to your second trained model | |
| model1_names = ['Cucumber Flowers', 'Cucumber Fruits', 'Cucumber Plant Head', 'Dry Leaf'] | |
| model2_names = ['Cucumber Leaves'] | |
| # Streamlit app title and description | |
| st.title("Plant Segmentation App for Cucumbers") | |
| st.write("Upload an image, and the models will segment plant organs and leaves.") | |
| st.write("Draft 1") | |
| # File uploader | |
| uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"]) | |
| if uploaded_file is not None: | |
| # Load the uploaded image | |
| image = Image.open(uploaded_file) | |
| st.image(image, caption="Uploaded Image", use_container_width=True) | |
| st.write("Processing...") | |
| # Convert the image to a NumPy array for inference | |
| input_image = preprocess_image(image) | |
| # input_image = np.array(image) | |
| # Perform inference with the first model | |
| st.write("Processing with small organ model ...") | |
| results1 = model1.predict( | |
| source=input_image, | |
| save=False, | |
| imgsz=1920, # High-resolution input | |
| rect=True, | |
| conf=0.02, # Dynamic confidence threshold | |
| iou=0.4, # Dynamic IoU threshold | |
| max_det=2000 # Increase maximum detections | |
| ) | |
| result_image1 = results1[0].plot(labels=False) # Render results | |
| st.image(result_image1, caption="Segmented Image (Small Organ Model)", use_container_width=True) | |
| # Display class counts for model 1 | |
| class_counts1 = results1[0].boxes.cls.cpu().numpy() # Extract class indices | |
| unique_classes1, counts1 = np.unique(class_counts1, return_counts=True) | |
| st.write("Class Counts (Small Organ Model):") | |
| for cls, count in zip(unique_classes1, counts1): | |
| st.write(f"{model1_names[int(cls)]}: {count}") | |
| # Perform inference with the second model | |
| st.write("Processing with Leaves Model ...") | |
| results2 = model2.predict( | |
| source=input_image, | |
| save=False, | |
| imgsz=960, # High-resolution input | |
| rect=True, | |
| conf=0.35, # Dynamic confidence threshold | |
| iou=0.15, # Dynamic IoU threshold | |
| max_det=200 | |
| ) | |
| result_image2 = results2[0].plot(labels=False) # Render results | |
| st.image(result_image2, caption="Segmented Image (Leaves Model)", use_container_width=True) | |
| # Display class counts for model 2 | |
| class_counts2 = results2[0].boxes.cls.cpu().numpy() # Extract class indices | |
| unique_classes2, counts2 = np.unique(class_counts2, return_counts=True) | |
| st.write("Class Counts (Leaves Model):") | |
| for cls, count in zip(unique_classes2, counts2): | |
| st.write(f"{model2_names[int(cls)]}: {count}") | |
| # Optionally save both results | |
| save_option = st.checkbox("Save Results") | |
| if save_option: | |
| save_path1 = "segmented_output_model1.jpg" | |
| save_path2 = "segmented_output_model2.jpg" | |
| results1[0].save(save_path1) | |
| results2[0].save(save_path2) | |
| st.write(f"Results saved as {save_path1} and {save_path2}") | |
| st.write("Done!") | |