Esmaeilkiani commited on
Commit
d281fdb
·
verified ·
1 Parent(s): c113e69

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -41
app.py CHANGED
@@ -1,44 +1,45 @@
1
- import streamlit as st
2
- from PIL import Image
3
- import torch
4
- from ultralytics import YOLO
5
- import numpy as np
6
-
7
- # Load the YOLOv8 model
8
- @st.cache_resource
9
- def load_model():
10
- model = YOLO('yolov8n.pt') # Path to your YOLOv8 trained weights
11
- return model
12
-
13
- model = load_model()
14
 
15
- # Function to process image and detect weeds
16
- def detect_weeds(image):
17
- # Run inference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  results = model(image)
19
-
20
- # Process the results and draw bounding boxes
21
- result_img = image.copy() # Start with the original image
22
- for result in results:
23
- annotated_img = result.plot() # Plot boxes and labels
24
- result_img = np.array(annotated_img)
25
-
26
- return result_img
27
-
28
- # Streamlit app UI
29
- st.title("Weed Detection in Sugarcane Fields")
30
- st.write("Upload an image of the sugarcane field, and the model will detect weeds.")
31
-
32
- # Upload image
33
- uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
34
-
35
- if uploaded_file is not None:
36
- image = Image.open(uploaded_file)
37
- st.image(image, caption="Uploaded Image", use_column_width=True)
38
-
39
- st.write("Detecting weeds...")
40
- result_image = detect_weeds(np.array(image)) # Convert to array for processing
41
-
42
- # Display result
43
- st.image(result_image, caption="Detected Weeds", use_column_width=True)
44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
 
2
+ from PIL import Image
3
+ import torchvision.transforms as transforms
4
+
5
+ # لود مدل YOLOv5
6
+ model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
7
+
8
+ # پیش‌پردازش تصویر
9
+ def preprocess_image(image_path):
10
+ image = Image.open(image_path)
11
+ transform = transforms.Compose([
12
+ transforms.Resize((640, 640)),
13
+ transforms.ToTensor()
14
+ ])
15
+ return transform(image).unsqueeze(0)
16
+
17
+ # آموزش مدل
18
+ def train_model(data_dir, epochs=10):
19
+ # آماده‌سازی داده‌ها
20
+ dataset = ... # خواندن داده‌ها از data_dir
21
+ dataloader = ... # ایجاد DataLoader
22
+
23
+ # تنظیم پارامترهای آموزش
24
+ optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
25
+ criterion = torch.nn.CrossEntropyLoss()
26
+
27
+ for epoch in range(epochs):
28
+ for images, labels in dataloader:
29
+ optimizer.zero_grad()
30
+ outputs = model(images)
31
+ loss = criterion(outputs, labels)
32
+ loss.backward()
33
+ optimizer.step()
34
+ print(f'Epoch {epoch+1}/{epochs}, Loss: {loss.item()}')
35
+
36
+ # تشخیص مناطق دارای گپ
37
+ def detect_gaps(image_path):
38
+ image = preprocess_image(image_path)
39
  results = model(image)
40
+ return results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
+ # مثال استفاده
43
+ image_path = '/content/Sugarcane-Cultivation-in-Tamil-Nadu-1.jpg'
44
+ results = detect_gaps(image_path)
45
+ print(results)