SakibAhmed commited on
Commit
5c8508a
·
verified ·
1 Parent(s): 14bac29

Upload 3 files

Browse files
Files changed (3) hide show
  1. .env +11 -6
  2. app.py +61 -84
  3. processing.py +83 -0
.env CHANGED
@@ -1,6 +1,11 @@
1
- # Name of the first model (e.g., your original classifier)
2
- MODEL_1_NAME=best_new_EP382.pt
3
-
4
- # Name of the second model (for Tyre/Alloy classification)
5
- #MODEL_2_NAME=best_TA_377EP.pt
6
- MODEL_2_NAME=best_parts_EP336.pt
 
 
 
 
 
 
1
+ # # Name of the first model (e.g., your original classifier)
2
+ # MODEL_1_NAME=best_new_EP382.pt
3
+
4
+ # # Name of the second model (for Tyre/Alloy classification)
5
+ # #MODEL_2_NAME=best_TA_377EP.pt
6
+ # MODEL_2_NAME=best_parts_EP336.pt
7
+
8
+
9
+
10
+ PARTS_MODEL_NAME=best_parts_EP336.pt
11
+ DAMAGE_MODEL_NAME=best_new_EP382.pt
app.py CHANGED
@@ -8,6 +8,9 @@ from werkzeug.utils import secure_filename
8
  from ultralytics import YOLO
9
  from dotenv import load_dotenv
10
 
 
 
 
11
  # Load environment variables from .env file
12
  load_dotenv()
13
 
@@ -21,12 +24,13 @@ UPLOAD_FOLDER = 'static/uploads'
21
  MODELS_FOLDER = 'models'
22
  ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
23
 
24
- # --- NEW: Load model names from .env file, with fallback defaults ---
25
- MODEL_1_NAME = os.getenv('MODEL_1_NAME', 'best.pt')
26
- MODEL_2_NAME = os.getenv('MODEL_2_NAME', 'tyre_alloy.pt') # New model for Tyre/Alloy
 
27
 
28
- MODEL_1_PATH = os.path.join(MODELS_FOLDER, MODEL_1_NAME)
29
- MODEL_2_PATH = os.path.join(MODELS_FOLDER, MODEL_2_NAME)
30
 
31
  app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
32
  os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)
@@ -37,30 +41,30 @@ os.makedirs('templates', exist_ok=True)
37
  device = "cuda" if torch.cuda.is_available() else "cpu"
38
  print(f"Using device: {device}")
39
 
40
- # --- NEW: Load multiple YOLO Models ---
41
- model1, model2 = None, None
42
 
43
- # Load Model 1
44
  try:
45
- if not os.path.exists(MODEL_1_PATH):
46
- print(f"Warning: Model file not found at {MODEL_1_PATH}")
47
  else:
48
- model1 = YOLO(MODEL_1_PATH)
49
- model1.to(device)
50
- print(f"Successfully loaded model '{MODEL_1_NAME}' on {device}.")
51
  except Exception as e:
52
- print(f"Error loading Model 1 ({MODEL_1_NAME}): {e}")
53
 
54
- # Load Model 2
55
  try:
56
- if not os.path.exists(MODEL_2_PATH):
57
- print(f"Warning: Model file not found at {MODEL_2_PATH}")
58
  else:
59
- model2 = YOLO(MODEL_2_PATH)
60
- model2.to(device)
61
- print(f"Successfully loaded model '{MODEL_2_NAME}' on {device}.")
62
  except Exception as e:
63
- print(f"Error loading Model 2 ({MODEL_2_NAME}): {e}")
64
 
65
 
66
  def allowed_file(filename):
@@ -68,23 +72,6 @@ def allowed_file(filename):
68
  return '.' in filename and \
69
  filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
70
 
71
- def run_inference(model, filepath):
72
- """Helper function to run inference and format the result."""
73
- if model is None:
74
- return None # Return None if the model isn't loaded
75
-
76
- results = model(filepath)
77
- result = results[0]
78
- probs = result.probs
79
- top1_index = probs.top1
80
- top1_confidence = float(probs.top1conf)
81
- class_name = model.names[top1_index]
82
-
83
- return {
84
- "class": class_name,
85
- "confidence": top1_confidence
86
- }
87
-
88
  @app.route('/')
89
  def home():
90
  """Serve the main HTML page."""
@@ -93,61 +80,51 @@ def home():
93
  @app.route('/predict', methods=['POST'])
94
  def predict():
95
  """
96
- Endpoint to receive an image and run classification based on the requested model type.
 
97
  """
98
- # 1. --- File Validation ---
99
  if 'file' not in request.files:
100
  return jsonify({"error": "No file part in the request"}), 400
101
- file = request.files['file']
102
- if file.filename == '':
103
- return jsonify({"error": "No selected file"}), 400
104
- if not file or not allowed_file(file.filename):
105
- return jsonify({"error": "File type not allowed"}), 400
106
-
107
- # --- NEW: Get the model type from the form data ---
108
- model_type = request.form.get('model_type', 'model1') # default to model1
109
 
110
- # 2. --- Save the File Temporarily ---
111
- filename = secure_filename(file.filename)
112
- filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
113
- file.save(filepath)
 
 
 
 
 
 
 
114
 
115
- # 3. --- Perform Inference based on model_type ---
 
 
 
116
  try:
117
- if model_type == 'model1':
118
- if model1 is None:
119
- return jsonify({"error": f"Model '{MODEL_1_NAME}' is not loaded. Check server logs."}), 500
120
- prediction = run_inference(model1, filepath)
121
- return jsonify(prediction)
122
-
123
- elif model_type == 'model2':
124
- if model2 is None:
125
- return jsonify({"error": f"Model '{MODEL_2_NAME}' is not loaded. Check server logs."}), 500
126
- prediction = run_inference(model2, filepath)
127
- return jsonify(prediction)
128
-
129
- elif model_type == 'combined':
130
- if model1 is None or model2 is None:
131
- return jsonify({"error": "One or more models required for combined mode are not loaded. Check server logs."}), 500
132
-
133
- pred1 = run_inference(model1, filepath)
134
- pred2 = run_inference(model2, filepath)
135
-
136
- combined_prediction = {
137
- "model1_result": pred1,
138
- "model2_result": pred2
139
- }
140
- return jsonify(combined_prediction)
141
-
142
- else:
143
- return jsonify({"error": "Invalid model type specified"}), 400
144
 
145
  except Exception as e:
146
- return jsonify({"error": f"An error occurred during inference: {str(e)}"}), 500
 
 
 
 
147
  finally:
148
- # 4. --- Cleanup ---
149
- if os.path.exists(filepath):
150
- os.remove(filepath)
 
151
 
152
  if __name__ == '__main__':
 
153
  app.run(host='0.0.0.0', port=7860, debug=True)
 
8
  from ultralytics import YOLO
9
  from dotenv import load_dotenv
10
 
11
+ # Import the new processing logic
12
+ from processing import process_images
13
+
14
  # Load environment variables from .env file
15
  load_dotenv()
16
 
 
24
  MODELS_FOLDER = 'models'
25
  ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
26
 
27
+ # --- Load model names from .env file ---
28
+ # Updated names to be more descriptive
29
+ PARTS_MODEL_NAME = os.getenv('PARTS_MODEL_NAME', 'best_parts_EP336.pt')
30
+ DAMAGE_MODEL_NAME = os.getenv('DAMAGE_MODEL_NAME', 'best_new_EP382.pt')
31
 
32
+ PARTS_MODEL_PATH = os.path.join(MODELS_FOLDER, PARTS_MODEL_NAME)
33
+ DAMAGE_MODEL_PATH = os.path.join(MODELS_FOLDER, DAMAGE_MODEL_NAME)
34
 
35
  app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
36
  os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)
 
41
  device = "cuda" if torch.cuda.is_available() else "cpu"
42
  print(f"Using device: {device}")
43
 
44
+ # --- Load YOLO Models ---
45
+ parts_model, damage_model = None, None
46
 
47
+ # Load Parts Model
48
  try:
49
+ if not os.path.exists(PARTS_MODEL_PATH):
50
+ print(f"Warning: Parts model file not found at {PARTS_MODEL_PATH}")
51
  else:
52
+ parts_model = YOLO(PARTS_MODEL_PATH)
53
+ parts_model.to(device)
54
+ print(f"Successfully loaded parts model '{PARTS_MODEL_NAME}' on {device}.")
55
  except Exception as e:
56
+ print(f"Error loading Parts Model ({PARTS_MODEL_NAME}): {e}")
57
 
58
+ # Load Damage Model
59
  try:
60
+ if not os.path.exists(DAMAGE_MODEL_PATH):
61
+ print(f"Warning: Damage model file not found at {DAMAGE_MODEL_PATH}")
62
  else:
63
+ damage_model = YOLO(DAMAGE_MODEL_PATH)
64
+ damage_model.to(device)
65
+ print(f"Successfully loaded damage model '{DAMAGE_MODEL_NAME}' on {device}.")
66
  except Exception as e:
67
+ print(f"Error loading Damage Model ({DAMAGE_MODEL_NAME}): {e}")
68
 
69
 
70
  def allowed_file(filename):
 
72
  return '.' in filename and \
73
  filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
  @app.route('/')
76
  def home():
77
  """Serve the main HTML page."""
 
80
  @app.route('/predict', methods=['POST'])
81
  def predict():
82
  """
83
+ Endpoint to receive one or more images, run the two-step prediction,
84
+ and return the combined results.
85
  """
86
+ # 1. --- File Validation for Multiple Files ---
87
  if 'file' not in request.files:
88
  return jsonify({"error": "No file part in the request"}), 400
89
+
90
+ files = request.files.getlist('file')
91
+
92
+ if not files or all(f.filename == '' for f in files):
93
+ return jsonify({"error": "No selected files"}), 400
 
 
 
94
 
95
+ saved_filepaths = []
96
+
97
+ for file in files:
98
+ if file and allowed_file(file.filename):
99
+ filename = secure_filename(file.filename)
100
+ filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
101
+ file.save(filepath)
102
+ saved_filepaths.append(filepath)
103
+ else:
104
+ # You might want to log this or inform the user about skipped files
105
+ print(f"Skipped invalid file: {file.filename}")
106
 
107
+ if not saved_filepaths:
108
+ return jsonify({"error": "No valid files were uploaded. Allowed types: png, jpg, jpeg"}), 400
109
+
110
+ # 2. --- Perform Inference ---
111
  try:
112
+ # Pass the models and file paths to the processing function
113
+ results = process_images(parts_model, damage_model, saved_filepaths)
114
+ return jsonify(results)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
 
116
  except Exception as e:
117
+ # Log the full error for debugging
118
+ print(f"An error occurred during processing: {e}")
119
+ import traceback
120
+ traceback.print_exc()
121
+ return jsonify({"error": f"An error occurred during processing: {str(e)}"}), 500
122
  finally:
123
+ # 3. --- Cleanup ---
124
+ for filepath in saved_filepaths:
125
+ if os.path.exists(filepath):
126
+ os.remove(filepath)
127
 
128
  if __name__ == '__main__':
129
+ # Setting debug=False is recommended for production
130
  app.run(host='0.0.0.0', port=7860, debug=True)
processing.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # processing.py
2
+
3
+ import os
4
+ from ultralytics import YOLO
5
+
6
+ # --- Configuration ---
7
+ # These are the specific parts that require a subsequent damage check.
8
+ DAMAGE_CHECK_PARTS = {
9
+ 'driver_front_side',
10
+ 'driver_rear_side',
11
+ 'passenger_front_side',
12
+ 'passenger_rear_side',
13
+ }
14
+
15
+ def run_single_inference(model, filepath):
16
+ """
17
+ Helper function to run inference for a single model and format the result.
18
+ """
19
+ if model is None:
20
+ return None # Return None if the model isn't loaded
21
+
22
+ results = model(filepath, verbose=False) # verbose=False to keep logs clean
23
+ result = results[0]
24
+
25
+ # Check if it's a classification model with probabilities
26
+ if result.probs is not None:
27
+ probs = result.probs
28
+ top1_index = probs.top1
29
+ top1_confidence = float(probs.top1conf)
30
+ class_name = model.names[top1_index]
31
+ else: # Fallback for detection models or if probs are not available
32
+ # Assuming the top prediction is what we need
33
+ top1_index = result.boxes.cls[0].int() if len(result.boxes) > 0 else 0
34
+ top1_confidence = float(result.boxes.conf[0]) if len(result.boxes) > 0 else 0.0
35
+ class_name = model.names[top1_index] if len(result.boxes) > 0 else "unknown"
36
+
37
+ return {
38
+ "class": class_name,
39
+ "confidence": round(top1_confidence, 4)
40
+ }
41
+
42
+ def process_images(parts_model, damage_model, image_paths):
43
+ """
44
+ Processes a list of images.
45
+ 1. Runs the 'parts_model' on every image.
46
+ 2. If the detected part is in DAMAGE_CHECK_PARTS, it then runs the 'damage_model'.
47
+ 3. Otherwise, the damage status defaults to 'correct'.
48
+ """
49
+ if parts_model is None or damage_model is None:
50
+ raise RuntimeError("One or more models are not loaded. Check server logs.")
51
+
52
+ final_results = []
53
+
54
+ for filepath in image_paths:
55
+ filename = os.path.basename(filepath)
56
+ print(f"Processing {filename}...")
57
+
58
+ # 1. First, predict the part
59
+ part_prediction = run_single_inference(parts_model, filepath)
60
+ predicted_part = part_prediction.get("class") if part_prediction else "unknown"
61
+
62
+ damage_prediction = None
63
+ # 2. Conditionally predict the damage
64
+ if predicted_part in DAMAGE_CHECK_PARTS:
65
+ print(f" -> Part '{predicted_part}' requires damage check. Running damage model...")
66
+ damage_prediction = run_single_inference(damage_model, filepath)
67
+ else:
68
+ print(f" -> Part '{predicted_part}' does not require damage check. Defaulting to 'correct'.")
69
+ # 3. For other parts, default to 'correct'
70
+ damage_prediction = {
71
+ "class": "correct",
72
+ "confidence": 1.0,
73
+ "note": "Result by default, not by model inference."
74
+ }
75
+
76
+ # Assemble the final result for this image
77
+ final_results.append({
78
+ "filename": filename,
79
+ "part_prediction": part_prediction,
80
+ "damage_prediction": damage_prediction
81
+ })
82
+
83
+ return final_results