edogarci commited on
Commit
110c98b
·
verified ·
1 Parent(s): d2ff043

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +48 -37
  2. requirements.txt +2 -1
app.py CHANGED
@@ -3,52 +3,63 @@ import requests
3
  import numpy as np
4
  from PIL import Image
5
  import io
 
 
6
 
7
- # Function to call the Hugging Face model
8
- def call_model(image):
9
- # TODO: Replace with your actual Hugging Face model endpoint
10
- API_URL = "YOUR_HUGGINGFACE_MODEL_ENDPOINT"
11
- headers = {"Authorization": "Bearer YOUR_API_TOKEN"}
12
-
13
- # Convert image to bytes
14
- img_byte_arr = io.BytesIO()
15
- image.save(img_byte_arr, format='PNG')
16
- img_byte_arr = img_byte_arr.getvalue()
17
-
18
- # Make API call
19
- response = requests.post(API_URL, headers=headers, data=img_byte_arr)
20
-
21
- # Process response and modify image
22
- # TODO: Implement your image modification logic based on model results
23
- # This is a placeholder that just returns the original image
24
- return image
25
 
26
  # Function to process the captured image
27
- def process_image(image):
 
28
  if image is None:
29
- return None
30
 
31
- # Convert to PIL Image if it's not already
32
- if not isinstance(image, Image.Image):
33
- image = Image.fromarray(image)
34
 
35
- # Call the model and get modified image
36
- modified_image = call_model(image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
 
38
- return modified_image
39
 
40
  # Create the Gradio interface
41
  with gr.Blocks() as demo:
42
- # Title and main description
43
- gr.Markdown("# Target Analyzer")
44
- gr.Markdown("### Instructions:")
45
- gr.Markdown("""
46
- 1. Click the '📸 Take Photo' button in the camera view to capture an image
47
- 2. Click 'Analyze' to process the image
48
- 3. View the results in the 'Analyzed Image' section
49
- 4. Use 'Reset' to start over
50
- """)
51
-
52
  with gr.Row():
53
  # Left column for camera and controls
54
  with gr.Column(scale=1):
@@ -80,4 +91,4 @@ with gr.Blocks() as demo:
80
 
81
  # Launch the interface
82
  if __name__ == "__main__":
83
- demo.launch()
 
3
  import numpy as np
4
  from PIL import Image
5
  import io
6
+ from ultralytics import YOLO
7
+ import cv2
8
 
9
+ # Load the YOLO model at startup
10
+ try:
11
+ model = YOLO('modelo_epoch_50.pt')
12
+ print("Model loaded successfully")
13
+ except Exception as e:
14
+ print(f"Error loading model: {str(e)}")
15
+ model = None
 
 
 
 
 
 
 
 
 
 
 
16
 
17
  # Function to process the captured image
18
+ def process_image(image):
19
+ # Check if image is None
20
  if image is None:
21
+ raise gr.Error("Please take a picture first before analyzing!")
22
 
23
+ # Convert image to RGB if it's not already
24
+ if image.mode != 'RGB':
25
+ image = image.convert('RGB')
26
 
27
+ # Run inference
28
+ results = model.predict(image, save=True, conf=0.5)
29
+
30
+ # Print the results
31
+ print("Model predictions:", results[0].boxes)
32
+
33
+ # Convert PIL Image to numpy array for OpenCV
34
+ image_cv = np.array(image)
35
+ # Convert RGB to BGR (OpenCV uses BGR format)
36
+ image_cv = cv2.cvtColor(image_cv, cv2.COLOR_RGB2BGR)
37
+
38
+ # Draw bounding boxes of the prediction
39
+ boxes = results[0].boxes
40
+ for box in boxes:
41
+ b = box.xyxy[0] # Bounding box coordinates
42
+ c = box.cls # Predicted class
43
+ confidence = box.conf
44
+
45
+ x1, y1, x2, y2 = map(int, b)
46
+ cv2.rectangle(image_cv, (x1, y1), (x2, y2), (255, 0, 0), 2) # Blue for prediction
47
+
48
+ label = f"{results[0].names[int(c)]} {confidence.item():.2f}"
49
+ cv2.putText(image_cv, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 0, 0), 2)
50
+ print(label)
51
+
52
+ # Convert back to RGB for display if needed
53
+ image_cv = cv2.cvtColor(image_cv, cv2.COLOR_BGR2RGB)
54
+ # Convert back to PIL Image
55
+ final_image = Image.fromarray(image_cv)
56
+ # Save the final image to current directory, overwriting if exists
57
+ final_image.save('FINAL.jpg', 'JPEG', quality=95)
58
 
59
+ return final_image
60
 
61
  # Create the Gradio interface
62
  with gr.Blocks() as demo:
 
 
 
 
 
 
 
 
 
 
63
  with gr.Row():
64
  # Left column for camera and controls
65
  with gr.Column(scale=1):
 
91
 
92
  # Launch the interface
93
  if __name__ == "__main__":
94
+ demo.launch(share=True)
requirements.txt CHANGED
@@ -1,4 +1,5 @@
1
  gradio>=4.0.0
2
  Pillow>=10.0.0
3
  requests>=2.31.0
4
- numpy>=1.24.0
 
 
1
  gradio>=4.0.0
2
  Pillow>=10.0.0
3
  requests>=2.31.0
4
+ numpy>=1.24.0
5
+ ultralytics>=8.0.0