File size: 4,267 Bytes
ed112f9 829fa4f 1c90932 2b25e41 1c90932 05f4e8c b812fc9 3fe9bd1 98513a6 64b6ab5 1c90932 f6a18cd b812fc9 1c90932 64b6ab5 b812fc9 05f4e8c 3dfe98c 05f4e8c ed112f9 b812fc9 ed112f9 2b25e41 b812fc9 2b25e41 b812fc9 2b25e41 b812fc9 fd2b59c 3dfe98c 2b25e41 64b6ab5 2b25e41 b19be11 00092aa b19be11 2b25e41 b812fc9 2b25e41 b812fc9 ed112f9 2b25e41 b812fc9 2b25e41 b812fc9 2b25e41 b812fc9 a8d3868 b812fc9 2b25e41 05f4e8c d8e2890 3fe9bd1 3dfe98c 71a62a9 5bfdcfe 71a62a9 5bfdcfe 71a62a9 5bfdcfe 71a62a9 3dfe98c 71a62a9 fd2b59c 71a62a9 5bfdcfe 71a62a9 5bfdcfe d8e2890 ed112f9 b812fc9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 | import base64
import cv2
import numpy as np
from flask import Flask, request, jsonify, render_template
import sys
import io
from ultralytics import YOLO
import time
from PIL import Image
import torch
# Set the default encoding to utf-8
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
app = Flask(__name__)
# Load your pre-trained vegetable classification model
vegetables = [
"banana", "beans broad", "beans cluster", "beans haricot", "beetroot",
"bitter guard", "bottle guard", "brinjal long", "brinjal[purple]", "cabbage",
"capsicum green", "carrot", "cauliflower", "chilli green", "colocasia arvi",
"corn", "cucumber", "drumstick", "garlic", "ginger", "ladies finger",
"lemons", "Onion red", "potato", "sweet potato", "tomato", "Zuchini"
]
model = YOLO('fresh_model.pt')
# Load the YOLO model
yolo_model = YOLO('another_model.pt') # Adjust the path as needed
CONFIDENCE_THRESHOLD=0.6
@app.route('/')
def index():
return render_template('index.html')
@app.route('/predict', methods=['POST'])
def predict():
try:
# Extract and decode image data
data = request.json
image_data = data['image'].split(',')[1] # Remove data:image/jpeg;base64,
# Decode the base64 string into a NumPy array
nparr = np.frombuffer(base64.b64decode(image_data), np.uint8)
# Convert the NumPy array into an OpenCV image (BGR format)
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
# Perform inference using the YOLO model
results = model(image) # Pass the image to the YOLO model
detections = results[0].boxes
detected_vegetables = []
for box in detections:
if box.conf > CONFIDENCE_THRESHOLD:
class_index = int(box.cls[0].item()) # Get class index as an integer
# Use model's class names dynamically
vegetable_name = model.names[class_index] # Get the name directly from the model
detected_vegetables.append(vegetable_name)
detected_classes=detected_vegetables
return jsonify({'predictions': detected_classes})
except Exception as e:
return jsonify({'error': str(e)}), 500
@app.route('/detect', methods=['POST'])
def detect():
try:
# Extract and decode image data
data = request.json
image_data = data['image'].split(',')[1] # Remove data:image/jpeg;base64,
# Decode the base64 string into a NumPy array
nparr = np.frombuffer(base64.b64decode(image_data), np.uint8)
# Convert the NumPy array into an OpenCV image (BGR format)
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
# Perform inference using the YOLO model
results = yolo_model(image) # Pass the image to the YOLO model
# Extract the detected classes
class_indices = results[0].boxes.cls.cpu().numpy().astype(int) # Get the class indices
unique_classes = np.unique(class_indices) # Get unique classes detected
# Map class indices to labels if needed
# Assuming you have a mapping for YOLO classes
detected_classes = [str(cls) for cls in unique_classes]
return jsonify({'predictions': detected_classes})
except Exception as e:
return jsonify({'error': str(e)}), 500
modeltext = YOLO('model_2.pt') # Replace with your model's path
@app.route('/ocr', methods=['POST'])
def ocr():
try:
# Extract and decode the base64 image data
data = request.json
image_data = data['image'].split(',')[1] # Remove "data:image/jpeg;base64,"
nparr = np.frombuffer(base64.b64decode(image_data), np.uint8)
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
# Perform text detection using YOLO
results = modeltext(image)
detections = results[0].boxes
high_conf_detections = [box for box in detections if box.conf > 0.55]
# Check if there are any high-confidence detections
text_present = len(high_conf_detections) > 0
return jsonify({'text_present': text_present})
except Exception as e:
return jsonify({'error': str(e)}), 500
if __name__ == '__main__':
app.run(host='0.0.0.0', port=7860, debug=True) |