tyrwh commited on
Commit
e3446e1
·
1 Parent(s): 1b2b626

Adding confidence threshold plotting, fixing image browser issues

Browse files
Files changed (7) hide show
  1. app.py +124 -220
  2. nemaquant.py +6 -9
  3. static/script.js +291 -85
  4. static/style.css +17 -1
  5. templates/index.html +43 -15
  6. templates/plotly_cdn.html +2 -0
  7. yolo_utils.py +25 -0
app.py CHANGED
@@ -1,7 +1,8 @@
1
- from flask import Flask, render_template, request, jsonify, send_from_directory, send_file
2
- import subprocess
3
- import os
4
  from pathlib import Path
 
 
5
  import uuid
6
  import pandas as pd
7
  from werkzeug.utils import secure_filename
@@ -14,6 +15,11 @@ import threading
14
  import time
15
  from datetime import datetime
16
  import zipfile
 
 
 
 
 
17
 
18
  app = Flask(__name__)
19
 
@@ -43,118 +49,69 @@ def allowed_file(filename):
43
  def index():
44
  return render_template('index.html')
45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  @app.route('/process', methods=['POST'])
47
  def process_images():
48
- global job_status
49
  try:
50
- if 'files' not in request.files:
51
- return jsonify({"error": "No file part"}), 400
52
-
53
  files = request.files.getlist('files')
54
- input_mode = request.form.get('input_mode', 'single')
55
- confidence = request.form.get('confidence_threshold', '0.6')
56
-
57
  if not files or files[0].filename == '':
58
- return jsonify({"error": "No selected file"}), 400
59
-
60
- job_id = str(uuid.uuid4())
61
- job_output_dir = RESULT_FOLDER / job_id
62
- job_output_dir.mkdir(parents=True, exist_ok=True)
63
-
64
- saved_files = []
65
- error_files = []
66
-
67
- # For keyence mode, validate XY subdirectory structure but flatten files
68
- if input_mode == 'keyence':
69
- xy_subdirs = set()
70
- # First pass: validate XY subdirectory structure
71
- for file in files:
72
- if file and file.filename:
73
- relative_path = file.filename
74
- if '/' in relative_path:
75
- first_dir = relative_path.split('/')[0]
76
- if re.match(r'^XY[0-9][0-9]$', first_dir):
77
- xy_subdirs.add(first_dir)
78
-
79
- # Validate that we found XY subdirectories
80
- if not xy_subdirs:
81
- return jsonify({
82
- "error": "Keyence mode requires folder structure with XY[0-9][0-9] subdirectories (e.g., XY01/, XY02/). No such subdirectories found in uploaded folder."
83
- }), 400
84
-
85
- # Second pass: save files with flattened structure
86
- for file in files:
87
- if file and allowed_file(file.filename):
88
- # Extract just the filename, ignoring the folder structure
89
- filename = secure_filename(Path(file.filename).name)
90
- save_path = job_output_dir / filename
91
- file.save(str(save_path))
92
- saved_files.append(save_path)
93
- elif file:
94
- error_files.append(file.filename)
95
-
96
- else:
97
- # Original file handling for non-keyence modes
98
- for file in files:
99
- if file and allowed_file(file.filename):
100
- filename = secure_filename(file.filename)
101
- save_path = job_output_dir / filename
102
- file.save(str(save_path))
103
- saved_files.append(save_path)
104
- elif file:
105
- error_files.append(file.filename)
106
-
107
- if not saved_files:
108
- return jsonify({"error": f"No valid files uploaded. Invalid files: {error_files}"}), 400
109
-
110
- if input_mode in ['files', 'folder']:
111
- input_target = str(job_output_dir)
112
- img_mode_arg = 'dir'
113
- elif input_mode == 'single':
114
- input_target = str(saved_files[0])
115
- img_mode_arg = 'file'
116
- elif input_mode == 'keyence':
117
- input_target = str(job_output_dir)
118
- img_mode_arg = 'keyence'
119
- else:
120
- return jsonify({"error": f"Invalid input mode: {input_mode}"}), 400
121
-
122
- output_csv = job_output_dir / "results.csv"
123
- annotated_dir = job_output_dir
124
-
125
- cmd = [
126
- sys.executable,
127
- str(APP_ROOT / 'nemaquant.py'),
128
- '-m', img_mode_arg,
129
- '-i', input_target,
130
- '-w', str(WEIGHTS_FILE),
131
- '-o', str(output_csv),
132
- '-a', str(annotated_dir)
133
- ]
134
-
135
- print(f"[{job_id}] Prepared command: {' '.join(cmd)}")
136
-
137
- job_status[job_id] = {
138
- "status": "starting",
139
- "progress": 0,
140
- "log": "Job initiated",
141
- "results": None,
142
- "error": None
143
- }
144
-
145
- thread = threading.Thread(target=run_nemaquant_background, args=(job_id, cmd, job_output_dir, output_csv, input_mode))
146
- thread.start()
147
-
148
- return jsonify({
149
- "status": "processing",
150
- "job_id": job_id,
151
- "initial_log": f"Job '{job_id}' started. Input mode: {input_mode}. Files saved in results/{job_id}/. Polling for progress..."
152
- })
153
 
 
 
 
 
 
 
154
  except Exception as e:
155
- error_message = f"Error starting process: {str(e)}\\n{traceback.format_exc()}"
156
- print(error_message)
157
- return jsonify({"error": "Failed to start processing", "log": error_message}), 500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
158
 
159
  @app.route('/progress/<job_id>')
160
  def get_progress(job_id):
@@ -248,7 +205,7 @@ def export_images(job_id):
248
  zf.write(file_path, file_path.name)
249
 
250
  memory_file.seek(0)
251
- timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
252
 
253
  return send_file(
254
  memory_file,
@@ -262,119 +219,66 @@ def export_images(job_id):
262
  print(error_message)
263
  return jsonify({"error": "Server error", "log": error_message}), 500
264
 
265
- def run_nemaquant_background(job_id, cmd, job_output_dir, output_csv, input_mode):
266
- global job_status
267
  try:
268
- print(f"[{job_id}] Starting NemaQuant process...")
269
- job_status[job_id] = {
270
- "status": "running",
271
- "progress": 5,
272
- "log": "Starting NemaQuant...",
273
- "results": None,
274
- "error": None
275
- }
276
-
277
- process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, bufsize=1, universal_newlines=True)
278
-
279
- stdout_log = []
280
- stderr_log = []
281
- total_lines_estimate = 50
282
- lines_processed = 0
283
- last_reported_progress = 5
284
-
285
- if process.stdout:
286
- for line in iter(process.stdout.readline, ''):
287
- line_strip = line.strip()
288
- lines_processed += 1
289
- stdout_log.append(line_strip)
290
- print(f"[{job_id}] STDOUT: {line_strip}")
291
-
292
- if line_strip.startswith("PROGRESS:"):
293
- try:
294
- progress_val = int(line_strip.split(":")[1].strip())
295
- last_reported_progress = max(last_reported_progress, min(progress_val, 95))
296
- job_status[job_id]["progress"] = last_reported_progress
297
- except (IndexError, ValueError):
298
- progress_percent = min(90, 5 + int((lines_processed / total_lines_estimate) * 85))
299
- job_status[job_id]["progress"] = max(last_reported_progress, progress_percent)
300
- last_reported_progress = job_status[job_id]["progress"]
301
- else:
302
- progress_percent = min(90, 5 + int((lines_processed / total_lines_estimate) * 85))
303
- job_status[job_id]["progress"] = max(last_reported_progress, progress_percent)
304
- last_reported_progress = job_status[job_id]["progress"]
305
-
306
- job_status[job_id]["log"] = "\n".join(stdout_log[-5:])
307
-
308
- if process.stderr:
309
- for line in iter(process.stderr.readline, ''):
310
- stderr_log.append(line.strip())
311
- print(f"[{job_id}] STDERR: {line.strip()}")
312
-
313
- process.stdout.close()
314
- if process.stderr:
315
- process.stderr.close()
316
- return_code = process.wait()
317
-
318
- stdout_str = "\n".join(stdout_log)
319
- stderr_str = "\n".join(stderr_log)
320
- full_log = f"NemaQuant Output:\n{stdout_str}\nNemaQuant Errors:\n{stderr_str}"
321
- job_status[job_id]["log"] = full_log
322
-
323
- if return_code != 0:
324
- raise subprocess.CalledProcessError(return_code, cmd, output=stdout_str, stderr=stderr_str)
325
-
326
- job_status[job_id]["progress"] = 95
327
-
328
- if not output_csv.exists():
329
- csv_files = list(job_output_dir.glob('*.csv'))
330
- if csv_files:
331
- output_csv = csv_files[0]
332
- else:
333
- raise FileNotFoundError(f"Output CSV not found at {output_csv} and no alternatives found.")
334
 
335
- df = pd.read_csv(output_csv)
336
- results_list = []
337
-
338
- egg_count = None
339
- if input_mode == 'single':
340
- egg_count_match = re.search(r'n eggs:\\s*(\\d+)', full_log)
341
- if egg_count_match:
342
- egg_count = int(egg_count_match.group(1))
343
-
344
- for index, row in df.iterrows():
345
- original_filename = row.get('filename', '')
346
- num_eggs = egg_count if egg_count is not None else row.get('num_eggs', 'N/A')
347
- stem = Path(original_filename).stem
348
- suffix = Path(original_filename).suffix
349
- annotated_filename = f"{stem}_annotated{suffix}"
350
- annotated_path = job_output_dir / annotated_filename
351
-
352
- results_list.append({
353
- "filename": original_filename,
354
- "num_eggs": num_eggs,
355
- "annotated_filename": annotated_filename if annotated_path.exists() else None,
356
- })
357
-
358
- job_status[job_id] = {
359
- "status": "success",
360
- "progress": 100,
361
- "log": full_log,
362
- "results": results_list,
363
- "error": None
364
- }
365
-
366
- except subprocess.CalledProcessError as e:
367
- stdout_err = e.output if e.output else ""
368
- stderr_err = e.stderr if e.stderr else ""
369
- error_message = f"Error running NemaQuant:\nExit Code: {e.returncode}\nSTDOUT:\n{stdout_err}\nSTDERR:\n{stderr_err}"
370
- current_log = job_status[job_id].get("log", "")
371
- job_status[job_id] = {"status": "error", "progress": 100, "log": current_log, "results": None, "error": error_message}
372
- except FileNotFoundError as e:
373
- error_message = f"Error processing results: {e}"
374
- job_status[job_id] = {"status": "error", "progress": 100, "log": job_status[job_id].get("log", ""), "results": None, "error": error_message}
375
  except Exception as e:
376
- error_message = f"An unexpected error occurred: {str(e)}\\n{traceback.format_exc()}"
377
- job_status[job_id] = {"status": "error", "progress": 100, "log": job_status[job_id].get("log", ""), "results": None, "error": error_message}
 
378
 
379
  def print_startup_info():
380
  print("----- NemaQuant Flask App Starting -----")
@@ -422,4 +326,4 @@ def print_startup_info():
422
 
423
  if __name__ == '__main__':
424
  print_startup_info()
425
- app.run(host='0.0.0.0', port=7860, debug=True)
 
1
+ from flask import Flask, render_template, request, jsonify, send_from_directory, send_file, Response
2
+ from multiprocessing import Pool, cpu_count
 
3
  from pathlib import Path
4
+ import tempfile
5
+ import os
6
  import uuid
7
  import pandas as pd
8
  from werkzeug.utils import secure_filename
 
15
  import time
16
  from datetime import datetime
17
  import zipfile
18
+ import cv2
19
+ import numpy as np
20
+ import csv
21
+
22
+ from yolo_utils import load_model, detect_image
23
 
24
  app = Flask(__name__)
25
 
 
49
  def index():
50
  return render_template('index.html')
51
 
52
+ # Global model for each process
53
+ _model = None
54
+ def get_model():
55
+ global _model
56
+ if _model is None:
57
+ _model = load_model(WEIGHTS_FILE)
58
+ return _model
59
+
60
+ all_detections = {}
61
+
62
+ def process_image(args):
63
+ filename, image_bytes = args
64
+ model = get_model()
65
+ detections = detect_image(model, image_bytes, conf=0.05)
66
+ # Do NOT update all_detections here (worker process)
67
+ # Save original image to uploads for later annotation
68
+ img_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
69
+ with open(img_path, 'wb') as f:
70
+ f.write(image_bytes)
71
+ return {'filename': filename, 'detections': detections}
72
+
73
  @app.route('/process', methods=['POST'])
74
  def process_images():
 
75
  try:
 
 
 
76
  files = request.files.getlist('files')
 
 
 
77
  if not files or files[0].filename == '':
78
+ return jsonify({'error': 'No files uploaded'}), 400
79
+
80
+ # Read all files into memory as (filename, bytes)
81
+ file_data = [(secure_filename(f.filename), f.read()) for f in files]
82
+
83
+ # Use multiprocessing to process images in parallel
84
+ with Pool(processes=min(cpu_count(), len(file_data))) as pool:
85
+ results = pool.map(process_image, file_data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
 
87
+ # Aggregate results in the main process
88
+ for result in results:
89
+ all_detections[result['filename']] = result['detections']
90
+
91
+ # Return all detections for all images
92
+ return jsonify({'results': results})
93
  except Exception as e:
94
+ print(f"Error in /process: {e}")
95
+ print(traceback.format_exc())
96
+ return jsonify({'error': str(e)}), 500
97
+
98
+ @app.route('/annotate', methods=['POST'])
99
+ def annotate_image():
100
+ data = request.json
101
+ filename = secure_filename(data['filename'])
102
+ threshold = float(data['confidence'])
103
+ img_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
104
+ img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
105
+ detections = all_detections.get(filename, [])
106
+ filtered = [d for d in detections if d['score'] >= threshold]
107
+ # Draw boxes
108
+ for det in filtered:
109
+ x1, y1, x2, y2 = map(int, det['bbox'])
110
+ print(f"[annotate_image] Drawing box: ({x1}, {y1}), ({x2}, {y2})")
111
+ cv2.rectangle(img, (x1, y1), (x2, y2), (0,0,255), 3)
112
+ temp_path = os.path.join(tempfile.gettempdir(), 'annotated.png')
113
+ cv2.imwrite(temp_path, img)
114
+ return send_file(temp_path, mimetype='image/png')
115
 
116
  @app.route('/progress/<job_id>')
117
  def get_progress(job_id):
 
205
  zf.write(file_path, file_path.name)
206
 
207
  memory_file.seek(0)
208
+ timestamp = datetime.now().strftime('%Y%m%d-%H%M%S')
209
 
210
  return send_file(
211
  memory_file,
 
219
  print(error_message)
220
  return jsonify({"error": "Server error", "log": error_message}), 500
221
 
222
+ @app.route('/export_csv', methods=['POST'])
223
+ def export_csv():
224
  try:
225
+ data = request.json
226
+ threshold = float(data.get('confidence', 0.5))
227
+ # all_detections: {filename: [detections]}
228
+ rows = []
229
+ for filename, detections in all_detections.items():
230
+ count = sum(1 for d in detections if d['score'] >= threshold)
231
+ rows.append({'Filename': filename, 'EggsDetected': count})
232
+ timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
233
+ output = io.StringIO()
234
+ writer = csv.DictWriter(output, fieldnames=['Filename', 'EggsDetected'])
235
+ writer.writeheader()
236
+ writer.writerows(rows)
237
+ output.seek(0)
238
+ return Response(
239
+ output.getvalue(),
240
+ mimetype='text/csv',
241
+ headers={
242
+ 'Content-Disposition': f'attachment; filename=nemaquant_results_{timestamp}.csv'
243
+ }
244
+ )
245
+ except Exception as e:
246
+ error_message = f"Error exporting CSV: {str(e)}"
247
+ print(error_message)
248
+ return jsonify({"error": "Server error", "log": error_message}), 500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
249
 
250
+ @app.route('/export_images', methods=['POST'])
251
+ def export_images_post():
252
+ try:
253
+ data = request.json
254
+ threshold = float(data.get('confidence', 0.5))
255
+ # all_detections: {filename: [detections]}
256
+ memory_file = io.BytesIO()
257
+ with zipfile.ZipFile(memory_file, 'w', zipfile.ZIP_DEFLATED) as zf:
258
+ for filename, detections in all_detections.items():
259
+ filtered = [d for d in detections if d['score'] >= threshold]
260
+ img_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
261
+ img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
262
+ for det in filtered:
263
+ x1, y1, x2, y2 = map(int, det['bbox'])
264
+ cv2.rectangle(img, (x1, y1), (x2, y2), (0,0,255), 3)
265
+ # Save annotated image to memory
266
+ is_tiff = filename.lower().endswith(('.tif', '.tiff'))
267
+ out_name = f"{Path(filename).stem}.png"
268
+ _, img_bytes = cv2.imencode('.png', img)
269
+ zf.writestr(out_name, img_bytes.tobytes())
270
+ memory_file.seek(0)
271
+ timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
272
+ return send_file(
273
+ memory_file,
274
+ mimetype='application/zip',
275
+ as_attachment=True,
276
+ download_name=f'nemaquant_annotated_{timestamp}.zip'
277
+ )
 
 
 
 
 
 
 
 
 
 
 
 
278
  except Exception as e:
279
+ error_message = f"Error exporting images: {str(e)}"
280
+ print(error_message)
281
+ return jsonify({"error": "Server error", "log": error_message}), 500
282
 
283
  def print_startup_info():
284
  print("----- NemaQuant Flask App Starting -----")
 
326
 
327
  if __name__ == '__main__':
328
  print_startup_info()
329
+ app.run(host='0.0.0.0', port=7860, debug=True)
nemaquant.py CHANGED
@@ -1,7 +1,6 @@
1
  #!/usr/bin/env python
2
  # coding: utf-8
3
 
4
- import argparse
5
  import numpy as np
6
  import pandas as pd
7
  import cv2
@@ -11,7 +10,9 @@ from pathlib import Path
11
  from ultralytics import YOLO
12
  from glob import glob
13
  import re
14
- import sys
 
 
15
 
16
  def options():
17
  parser = argparse.ArgumentParser(description="Nematode egg image processing with YOLO11 model.")
@@ -127,9 +128,8 @@ def parse_key_file(keypath):
127
  key["keycol"] = key["keycol"].apply(lambda x: "_%s%s" % (re.findall("[A-H]",x)[0], re.findall("[0-9]+", x)[0].zfill(2)))
128
  return key
129
 
130
- def main():
131
- args = check_args()
132
- if args.key:
133
  key = parse_key_file(str(args.keypath))
134
  model = YOLO(args.weights)
135
  if cuda.is_available():
@@ -249,7 +249,4 @@ def main():
249
  outdf.to_csv(str(args.outpath), index=False)
250
  print('Saving output to %s...' % str(args.outpath))
251
  if args.annotated:
252
- print('Saving annotated images to %s...' % str(args.annotpath))
253
-
254
- if __name__ == '__main__':
255
- main()
 
1
  #!/usr/bin/env python
2
  # coding: utf-8
3
 
 
4
  import numpy as np
5
  import pandas as pd
6
  import cv2
 
10
  from ultralytics import YOLO
11
  from glob import glob
12
  import re
13
+
14
+ # YOLO model logic for backend inference
15
+ from yolo_utils import load_model, detect_image
16
 
17
  def options():
18
  parser = argparse.ArgumentParser(description="Nematode egg image processing with YOLO11 model.")
 
128
  key["keycol"] = key["keycol"].apply(lambda x: "_%s%s" % (re.findall("[A-H]",x)[0], re.findall("[0-9]+", x)[0].zfill(2)))
129
  return key
130
 
131
+ def detect_eggs(args, key=None):
132
+ if key:
 
133
  key = parse_key_file(str(args.keypath))
134
  model = YOLO(args.weights)
135
  if cuda.is_available():
 
249
  outdf.to_csv(str(args.outpath), index=False)
250
  print('Saving output to %s...' % str(args.outpath))
251
  if args.annotated:
252
+ print('Saving annotated images to %s...' % str(args.annotpath))
 
 
 
static/script.js CHANGED
@@ -50,6 +50,10 @@ document.addEventListener('DOMContentLoaded', () => {
50
  let currentSortField = null;
51
  let currentSortDirection = 'asc';
52
 
 
 
 
 
53
  // Input mode change
54
  inputMode.addEventListener('change', () => {
55
  const mode = inputMode.value;
@@ -326,8 +330,6 @@ document.addEventListener('DOMContentLoaded', () => {
326
  method: 'POST',
327
  body: formData,
328
  });
329
-
330
- // Handle non-JSON initial response or network errors
331
  if (!response.ok) {
332
  let errorText = `HTTP error! status: ${response.status}`;
333
  try {
@@ -340,27 +342,29 @@ document.addEventListener('DOMContentLoaded', () => {
340
  }
341
  throw new Error(errorText);
342
  }
343
-
344
  const data = await response.json();
345
-
346
- // Check for errors returned immediately by /process
347
  if (data.error) {
348
  logStatus(`Error starting process: ${data.error}`);
349
  if(data.log) logStatus(`Details: ${data.log}`);
350
  throw new Error(data.error); // Throw to trigger catch block
351
  }
352
-
353
- // If processing started successfully, begin polling
354
- if (data.status === 'processing' && data.job_id) {
355
- currentJobId = data.job_id;
356
- logStatus(data.initial_log || `Processing started with Job ID: ${currentJobId}. Polling for progress...`);
357
- updateProgress(1, 'Processing started...'); // Small initial progress
358
- pollProgress(currentJobId);
359
- } else {
360
- // Should not happen if backend is correct, but handle defensively
361
- throw new Error('Unexpected response from server after starting process.');
362
  }
363
-
 
 
 
 
 
 
364
  } catch (error) {
365
  logStatus(`Error: ${error.message}`);
366
  updateProgress(0, 'Error occurred');
@@ -373,6 +377,101 @@ document.addEventListener('DOMContentLoaded', () => {
373
  // Removed finally block here, setLoading(false) is handled by pollProgress or catch block
374
  });
375
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
376
  // --- New Polling Function ---
377
  function pollProgress(jobId) {
378
  if (progressInterval) {
@@ -700,67 +799,6 @@ document.addEventListener('DOMContentLoaded', () => {
700
  }
701
 
702
  // Image Preview
703
- function displayImage(index) {
704
- if (!currentResults[index]) return;
705
-
706
- currentImageIndex = index;
707
- const result = currentResults[index];
708
-
709
- if (result.annotated_filename) {
710
- const imageUrl = `/results/${currentJobId}/${result.annotated_filename}`;
711
-
712
- // Create a new image object to handle loading
713
- const tempImage = new Image();
714
- tempImage.onload = function() {
715
- previewImage.src = imageUrl;
716
- previewImage.alt = result.filename;
717
-
718
- // Update image info with the new function
719
- updateImageInfo();
720
-
721
- // Enable zoom controls
722
- zoomInBtn.disabled = false;
723
- zoomOutBtn.disabled = false;
724
-
725
- // Calculate which page this image should be on
726
- const targetPage = Math.floor(index / RESULTS_PER_PAGE) + 1;
727
-
728
- // If we're not on the correct page, switch to it
729
- if (currentPage !== targetPage) {
730
- currentPage = targetPage;
731
- displayResultsPage(currentPage);
732
- }
733
-
734
- // Remove selection from all rows
735
- document.querySelectorAll('.results-table tr').forEach(r => r.classList.remove('selected'));
736
-
737
- // Find and highlight the corresponding row
738
- const rows = Array.from(resultsTableBody.querySelectorAll('tr'));
739
- const targetRow = rows.find(row => parseInt(row.dataset.originalIndex, 10) === index);
740
- if (targetRow) {
741
- targetRow.classList.add('selected');
742
- }
743
-
744
- // Reset panning when a new image is displayed
745
- resetPanZoom();
746
- };
747
-
748
- tempImage.onerror = function() {
749
- console.error('Failed to load image:', imageUrl);
750
- clearPreview();
751
- };
752
-
753
- // Start loading the image
754
- tempImage.src = imageUrl;
755
- } else {
756
- clearPreview();
757
- }
758
-
759
- // Update navigation
760
- prevBtn.disabled = index <= 0;
761
- nextBtn.disabled = index >= currentResults.length - 1;
762
- }
763
-
764
  function clearPreview() {
765
  previewImage.src = '';
766
  previewImage.alt = 'No image selected';
@@ -947,16 +985,76 @@ document.addEventListener('DOMContentLoaded', () => {
947
  }
948
  }
949
 
950
- // Export Handlers
951
- exportCsvBtn.addEventListener('click', () => {
952
- if (!currentJobId) return;
953
- window.location.href = `/results/${currentJobId}/results.csv`;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
954
  });
955
 
956
- exportImagesBtn.addEventListener('click', () => {
957
- if (!currentJobId) return;
958
- logStatus('Downloading annotated images...');
959
- window.location.href = `/export_images/${currentJobId}`;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
960
  });
961
 
962
  // Add keyboard controls for panning (arrow keys)
@@ -1019,6 +1117,110 @@ document.addEventListener('DOMContentLoaded', () => {
1019
  imageInfo.innerHTML = infoText;
1020
  }
1021
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1022
  // Custom dialog for selecting among multiple CSV files
1023
  function showCsvSelectionDialog(csvFiles) {
1024
  // Create overlay
@@ -1200,6 +1402,10 @@ document.addEventListener('DOMContentLoaded', () => {
1200
  }
1201
 
1202
  // Initialize
 
1203
  updateUploadState();
1204
  logStatus('Application ready');
 
 
 
1205
  });
 
50
  let currentSortField = null;
51
  let currentSortDirection = 'asc';
52
 
53
+ // --- Store all detections for frontend filtering ---
54
+ let allDetections = [];
55
+ let allImageData = {};
56
+
57
  // Input mode change
58
  inputMode.addEventListener('change', () => {
59
  const mode = inputMode.value;
 
330
  method: 'POST',
331
  body: formData,
332
  });
 
 
333
  if (!response.ok) {
334
  let errorText = `HTTP error! status: ${response.status}`;
335
  try {
 
342
  }
343
  throw new Error(errorText);
344
  }
 
345
  const data = await response.json();
 
 
346
  if (data.error) {
347
  logStatus(`Error starting process: ${data.error}`);
348
  if(data.log) logStatus(`Details: ${data.log}`);
349
  throw new Error(data.error); // Throw to trigger catch block
350
  }
351
+ // Store all detections for frontend filtering
352
+ allDetections = data.results;
353
+ allImageData = {};
354
+ for (const file of files) {
355
+ const reader = new FileReader();
356
+ reader.onload = (e) => {
357
+ allImageData[file.name] = e.target.result;
358
+ };
359
+ reader.readAsDataURL(file);
 
360
  }
361
+ setTimeout(() => {
362
+ updateResultsTable();
363
+ setLoading(false);
364
+ updateProgress(100, 'Processing complete');
365
+ logStatus('Processing finished successfully.');
366
+ onProcessingComplete();
367
+ }, 500);
368
  } catch (error) {
369
  logStatus(`Error: ${error.message}`);
370
  updateProgress(0, 'Error occurred');
 
377
  // Removed finally block here, setLoading(false) is handled by pollProgress or catch block
378
  });
379
 
380
+ // --- Filtering and Table Update ---
381
+ function updateResultsTable() {
382
+ const threshold = parseFloat(confidenceSlider.value);
383
+ // Group detections by image
384
+ const grouped = {};
385
+ allDetections.forEach(imgResult => {
386
+ const filtered = imgResult.detections.filter(det => det.score >= threshold);
387
+ grouped[imgResult.filename] = filtered;
388
+ });
389
+ // Build results for table
390
+ const prevFilename = (currentImageIndex >= 0 && currentResults[currentImageIndex]) ? currentResults[currentImageIndex].filename : null;
391
+ currentResults = Object.keys(grouped).map(filename => ({
392
+ filename,
393
+ num_eggs: grouped[filename].length,
394
+ detections: grouped[filename]
395
+ }));
396
+ resultsTableBody.innerHTML = '';
397
+ currentSortField = null;
398
+ currentSortDirection = 'asc';
399
+ totalPages = Math.ceil(currentResults.length / RESULTS_PER_PAGE);
400
+ currentPage = 1;
401
+ displayResultsPage(currentPage);
402
+ exportCsvBtn.disabled = true;
403
+ exportImagesBtn.disabled = true;
404
+ // Try to restore previous image if it still exists
405
+ let newIndex = 0;
406
+ if (prevFilename) {
407
+ newIndex = currentResults.findIndex(r => r.filename === prevFilename);
408
+ if (newIndex === -1) newIndex = 0;
409
+ }
410
+ currentImageIndex = newIndex;
411
+ if (currentResults.length > 0) displayImage(currentImageIndex);
412
+
413
+ // Enable/disable export buttons based on results
414
+ if (currentResults.length > 0) {
415
+ exportCsvBtn.disabled = false;
416
+ exportImagesBtn.disabled = false;
417
+ } else {
418
+ exportCsvBtn.disabled = true;
419
+ exportImagesBtn.disabled = true;
420
+ }
421
+ }
422
+
423
+ confidenceSlider.addEventListener('input', () => {
424
+ confidenceValue.textContent = confidenceSlider.value;
425
+ updateResultsTable();
426
+ renderConfidencePlot();
427
+ if (currentImageIndex >= 0) displayImage(currentImageIndex);
428
+ });
429
+
430
+ // --- Replace displayImage to use backend-annotated PNG ---
431
+ async function displayImage(index) {
432
+ if (!currentResults[index]) return;
433
+ currentImageIndex = index;
434
+ const result = currentResults[index];
435
+ const filename = result.filename;
436
+ const confidence = parseFloat(confidenceSlider.value);
437
+ try {
438
+ const response = await fetch('/annotate', {
439
+ method: 'POST',
440
+ headers: { 'Content-Type': 'application/json' },
441
+ body: JSON.stringify({ filename, confidence })
442
+ });
443
+ if (response.ok) {
444
+ const blob = await response.blob();
445
+ previewImage.onload = function() {
446
+ updateImageInfo();
447
+ zoomInBtn.disabled = false;
448
+ zoomOutBtn.disabled = false;
449
+ // Calculate which page this image should be on
450
+ const targetPage = Math.floor(index / RESULTS_PER_PAGE) + 1;
451
+ if (currentPage !== targetPage) {
452
+ currentPage = targetPage;
453
+ displayResultsPage(currentPage);
454
+ }
455
+ document.querySelectorAll('.results-table tr').forEach(r => r.classList.remove('selected'));
456
+ const rows = Array.from(resultsTableBody.querySelectorAll('tr'));
457
+ const targetRow = rows.find(row => parseInt(row.dataset.originalIndex, 10) === index);
458
+ if (targetRow) {
459
+ targetRow.classList.add('selected');
460
+ }
461
+ resetPanZoom();
462
+ };
463
+ previewImage.src = URL.createObjectURL(blob);
464
+ previewImage.alt = filename;
465
+ } else {
466
+ clearPreview();
467
+ }
468
+ } catch (e) {
469
+ clearPreview();
470
+ }
471
+ prevBtn.disabled = index <= 0;
472
+ nextBtn.disabled = index >= currentResults.length - 1;
473
+ }
474
+
475
  // --- New Polling Function ---
476
  function pollProgress(jobId) {
477
  if (progressInterval) {
 
799
  }
800
 
801
  // Image Preview
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
802
  function clearPreview() {
803
  previewImage.src = '';
804
  previewImage.alt = 'No image selected';
 
985
  }
986
  }
987
 
988
+ // --- Export Handlers (updated for new workflow) ---
989
+ exportCsvBtn.addEventListener('click', async () => {
990
+ const threshold = parseFloat(confidenceSlider.value);
991
+ const timestamp = new Date().toISOString().replace(/[-:T.]/g, '').slice(0, 15);
992
+ const defaultName = `nemaquant_results_${timestamp}.csv`;
993
+ try {
994
+ const resp = await fetch('/export_csv', {
995
+ method: 'POST',
996
+ headers: { 'Content-Type': 'application/json' },
997
+ body: JSON.stringify({ confidence: threshold })
998
+ });
999
+ if (!resp.ok) throw new Error('Failed to export CSV');
1000
+ const blob = await resp.blob();
1001
+ if ('showSaveFilePicker' in window) {
1002
+ // File System Access API
1003
+ const handle = await window.showSaveFilePicker({
1004
+ suggestedName: defaultName,
1005
+ types: [{ description: 'CSV', accept: { 'text/csv': ['.csv'] } }]
1006
+ });
1007
+ const writable = await handle.createWritable();
1008
+ await writable.write(blob);
1009
+ await writable.close();
1010
+ } else {
1011
+ // Fallback: download link
1012
+ const url = URL.createObjectURL(blob);
1013
+ const a = document.createElement('a');
1014
+ a.href = url;
1015
+ a.download = defaultName;
1016
+ document.body.appendChild(a);
1017
+ a.click();
1018
+ setTimeout(() => { URL.revokeObjectURL(url); a.remove(); }, 1000);
1019
+ }
1020
+ } catch (err) {
1021
+ logStatus('Failed to export CSV: ' + err.message, true);
1022
+ }
1023
  });
1024
 
1025
+ exportImagesBtn.addEventListener('click', async () => {
1026
+ const threshold = parseFloat(confidenceSlider.value);
1027
+ const timestamp = new Date().toISOString().replace(/[-:T.]/g, '').slice(0, 15);
1028
+ const defaultName = `nemaquant_annotated_${timestamp}.zip`;
1029
+ try {
1030
+ logStatus('Preparing annotated images for download...');
1031
+ const resp = await fetch('/export_images', {
1032
+ method: 'POST',
1033
+ headers: { 'Content-Type': 'application/json' },
1034
+ body: JSON.stringify({ confidence: threshold })
1035
+ });
1036
+ if (!resp.ok) throw new Error('Failed to export images');
1037
+ const blob = await resp.blob();
1038
+ if ('showSaveFilePicker' in window) {
1039
+ const handle = await window.showSaveFilePicker({
1040
+ suggestedName: defaultName,
1041
+ types: [{ description: 'ZIP', accept: { 'application/zip': ['.zip'] } }]
1042
+ });
1043
+ const writable = await handle.createWritable();
1044
+ await writable.write(blob);
1045
+ await writable.close();
1046
+ } else {
1047
+ const url = URL.createObjectURL(blob);
1048
+ const a = document.createElement('a');
1049
+ a.href = url;
1050
+ a.download = defaultName;
1051
+ document.body.appendChild(a);
1052
+ a.click();
1053
+ setTimeout(() => { URL.revokeObjectURL(url); a.remove(); }, 1000);
1054
+ }
1055
+ } catch (err) {
1056
+ logStatus('Failed to export images: ' + err.message, true);
1057
+ }
1058
  });
1059
 
1060
  // Add keyboard controls for panning (arrow keys)
 
1117
  imageInfo.innerHTML = infoText;
1118
  }
1119
 
1120
+ // --- Plotly Dot Plot for Confidence Threshold ---
1121
+ function renderConfidencePlot() {
1122
+ const plotDiv = document.getElementById('confidence-plot');
1123
+ if (!allDetections || allDetections.length === 0) {
1124
+ Plotly.purge(plotDiv);
1125
+ plotDiv.style.display = 'none';
1126
+ return;
1127
+ }
1128
+ plotDiv.style.display = '';
1129
+ // Aggregate all detections for all images
1130
+ const bins = [];
1131
+ for (let x = 0.05; x <= 0.951; x += 0.05) {
1132
+ const conf = x;
1133
+ let count = 0;
1134
+ allDetections.forEach(imgResult => {
1135
+ count += imgResult.detections.filter(det => det.score >= conf).length;
1136
+ });
1137
+ bins.push({conf, count});
1138
+ }
1139
+ const xVals = bins.map(b => b.conf);
1140
+ const yVals = bins.map(b => b.count);
1141
+ const currentConf = parseFloat(confidenceSlider.value);
1142
+
1143
+ // Split points and lines by cutoff
1144
+ const leftX = [], leftY = [];
1145
+ const rightX = [], rightY = [];
1146
+ for (let i = 0; i < xVals.length; i++) {
1147
+ if (xVals[i] <= currentConf) {
1148
+ leftX.push(xVals[i]);
1149
+ leftY.push(yVals[i]);
1150
+ } else {
1151
+ rightX.push(xVals[i]);
1152
+ rightY.push(yVals[i]);
1153
+ }
1154
+ }
1155
+
1156
+ // For line segments, we need to split at the cutoff if necessary
1157
+ let splitIndex = xVals.findIndex(x => x > currentConf);
1158
+ if (splitIndex === -1) splitIndex = xVals.length;
1159
+
1160
+ // If the cutoff is between two points, interpolate a point at the cutoff for smooth color transition
1161
+ let interpX = null, interpY = null;
1162
+ if (splitIndex > 0 && splitIndex < xVals.length) {
1163
+ const x0 = xVals[splitIndex - 1], x1 = xVals[splitIndex];
1164
+ const y0 = yVals[splitIndex - 1], y1 = yVals[splitIndex];
1165
+ const t = (currentConf - x0) / (x1 - x0);
1166
+ interpX = currentConf;
1167
+ interpY = y0 + t * (y1 - y0);
1168
+ }
1169
+
1170
+ // Blue trace (left of or on cutoff)
1171
+ const blueTrace = {
1172
+ x: interpX !== null ? [...leftX, interpX] : leftX,
1173
+ y: interpY !== null ? [...leftY, interpY] : leftY,
1174
+ mode: 'markers+lines',
1175
+ marker: {size: 8, color: '#2563eb'},
1176
+ line: {shape: 'linear', color: '#2563eb'},
1177
+ type: 'scatter',
1178
+ hoverinfo: 'x+y',
1179
+ showlegend: false
1180
+ };
1181
+
1182
+ // Grey trace (right of cutoff)
1183
+ const greyTrace = {
1184
+ x: interpX !== null ? [interpX, ...rightX] : rightX,
1185
+ y: interpY !== null ? [interpY, ...rightY] : rightY,
1186
+ mode: 'markers+lines',
1187
+ marker: {size: 8, color: '#bbb'},
1188
+ line: {shape: 'linear', color: '#bbb'},
1189
+ type: 'scatter',
1190
+ hoverinfo: 'x+y',
1191
+ showlegend: false
1192
+ };
1193
+
1194
+ // Vertical line (drawn beneath dots/lines)
1195
+ const layout = {
1196
+ margin: {t: 20, r: 20, l: 40, b: 40},
1197
+ xaxis: {title: 'Threshold', dtick: 0.1, range: [0, 1]},
1198
+ yaxis: {title: 'Total Eggs Detected', rangemode: 'tozero'},
1199
+ showlegend: false,
1200
+ height: 320,
1201
+ shapes: [
1202
+ {
1203
+ type: 'line',
1204
+ x0: currentConf, x1: currentConf,
1205
+ y0: 0, y1: Math.max(...yVals, 1),
1206
+ line: {color: 'red', width: 2, dash: 'dot'},
1207
+ layer: 'below' // Draw beneath traces
1208
+ }
1209
+ ]
1210
+ };
1211
+
1212
+ Plotly.newPlot('confidence-plot', [greyTrace, blueTrace], layout, {
1213
+ displayModeBar: false,
1214
+ responsive: true,
1215
+ staticPlot: true // disables zoom/pan/drag
1216
+ });
1217
+ }
1218
+
1219
+ // Call this after processing completes
1220
+ function onProcessingComplete() {
1221
+ renderConfidencePlot();
1222
+ }
1223
+
1224
  // Custom dialog for selecting among multiple CSV files
1225
  function showCsvSelectionDialog(csvFiles) {
1226
  // Create overlay
 
1402
  }
1403
 
1404
  // Initialize
1405
+ inputMode.selectedIndex = 0; // Reset inputMode to default (first option)
1406
  updateUploadState();
1407
  logStatus('Application ready');
1408
+
1409
+ // Hide plot on page load and after clearing files
1410
+ document.getElementById('confidence-plot').style.display = 'none';
1411
  });
static/style.css CHANGED
@@ -659,4 +659,20 @@ body.processing progress::-moz-progress-bar {
659
 
660
  .results-table th.sort-asc .sort-icon {
661
  transform: translateY(-50%) rotate(180deg);
662
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
659
 
660
  .results-table th.sort-asc .sort-icon {
661
  transform: translateY(-50%) rotate(180deg);
662
+ }
663
+
664
+ .range-with-value {
665
+ width: 100%;
666
+ display: flex;
667
+ flex-direction: row;
668
+ align-items: center;
669
+ gap: 1em;
670
+ }
671
+
672
+ #confidence-value {
673
+ font-size: 1.1em;
674
+ min-width: 2.5em;
675
+ text-align: right;
676
+ margin-top: 0;
677
+ align-self: auto;
678
+ }
templates/index.html CHANGED
@@ -6,6 +6,7 @@
6
  <title>NemaQuant - Nematode Egg Detection</title>
7
  <link rel="stylesheet" href="{{ url_for('static', filename='style.css') }}">
8
  <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/remixicon@3.5.0/fonts/remixicon.css">
 
9
  </head>
10
  <body>
11
  <h1>
@@ -48,18 +49,6 @@
48
  <!-- Processing -->
49
  <div class="card compact">
50
  <h2><i class="ri-settings-4-line"></i> Processing</h2>
51
- <div class="form-group">
52
- <label for="confidence-threshold">
53
- Confidence Threshold
54
- <i class="ri-information-line" data-tooltip="Recommended range: 0.4 - 0.7. Higher values produce fewer false positives but more false negatives, while lower values produce fewer false negatives but more false positives."></i>
55
- </label>
56
- <div class="range-with-value">
57
- <input type="range" id="confidence-threshold" name="confidence-threshold"
58
- min="0.05" max="0.95" step="0.05" value="0.6">
59
- <span id="confidence-value">0.6</span>
60
- </div>
61
- </div>
62
-
63
  <div class="progress-container">
64
  <div class="progress-info">
65
  <span id="progress-text">Ready to process</span>
@@ -70,18 +59,57 @@
70
  </div>
71
  <progress id="progress" value="0" max="100"></progress>
72
  </div>
73
-
74
  <button id="start-processing" class="btn-primary">
75
  <i class="ri-play-line"></i>
76
  Start Processing
77
  </button>
78
  </div>
79
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
  <!-- Status Log -->
81
  <div class="card compact">
82
  <h2>
83
  <i class="ri-terminal-box-line"></i>
84
- Processing Log
85
  <button id="clear-log" class="btn-secondary" style="margin-left: auto; padding: 0.25rem 0.5rem;">
86
  <i class="ri-delete-bin-line"></i>
87
  </button>
@@ -172,4 +200,4 @@
172
 
173
  <script src="{{ url_for('static', filename='script.js') }}"></script>
174
  </body>
175
- </html>
 
6
  <title>NemaQuant - Nematode Egg Detection</title>
7
  <link rel="stylesheet" href="{{ url_for('static', filename='style.css') }}">
8
  <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/remixicon@3.5.0/fonts/remixicon.css">
9
+ <script src="https://cdn.plot.ly/plotly-2.26.0.min.js"></script>
10
  </head>
11
  <body>
12
  <h1>
 
49
  <!-- Processing -->
50
  <div class="card compact">
51
  <h2><i class="ri-settings-4-line"></i> Processing</h2>
 
 
 
 
 
 
 
 
 
 
 
 
52
  <div class="progress-container">
53
  <div class="progress-info">
54
  <span id="progress-text">Ready to process</span>
 
59
  </div>
60
  <progress id="progress" value="0" max="100"></progress>
61
  </div>
 
62
  <button id="start-processing" class="btn-primary">
63
  <i class="ri-play-line"></i>
64
  Start Processing
65
  </button>
66
  </div>
67
 
68
+ <!-- Confidence Threshold -->
69
+ <div class="card compact">
70
+ <h2><i class="ri-equalizer-line"></i> Confidence Threshold
71
+ <i class="ri-information-line"
72
+ data-tooltip="Cutoff value used to filter egg detections.
73
+ Recommended range: 0.4 - 0.7.
74
+ Higher values reduce the number of false positives but produce more false negatives."
75
+ style="margin-left: 0.5rem;"></i>
76
+ </h2>
77
+ <div class="form-group">
78
+ <div class="range-with-value" style="width:100%; display:flex; flex-direction:row; align-items:center; gap:1em;">
79
+ <input type="range" id="confidence-threshold" name="confidence-threshold"
80
+ min="0.05" max="0.95" step="0.05" value="0.6" list="confidence-ticks" style="width:100%;">
81
+ <datalist id="confidence-ticks">
82
+ <option value="0.05" label="0.05"></option>
83
+ <option value="0.10"></option>
84
+ <option value="0.15"></option>
85
+ <option value="0.20"></option>
86
+ <option value="0.25"></option>
87
+ <option value="0.30"></option>
88
+ <option value="0.35"></option>
89
+ <option value="0.40"></option>
90
+ <option value="0.45"></option>
91
+ <option value="0.50" label="0.50"></option>
92
+ <option value="0.55"></option>
93
+ <option value="0.60"></option>
94
+ <option value="0.65"></option>
95
+ <option value="0.70"></option>
96
+ <option value="0.75"></option>
97
+ <option value="0.80"></option>
98
+ <option value="0.85"></option>
99
+ <option value="0.90"></option>
100
+ <option value="0.95" label="0.95"></option>
101
+ </datalist>
102
+ <span id="confidence-value" style="font-size:1.1em; min-width:2.5em; text-align:right;">0.6</span>
103
+ </div>
104
+ </div>
105
+ <div id="confidence-plot" style="margin-top:1.5rem;"></div>
106
+ </div>
107
+
108
  <!-- Status Log -->
109
  <div class="card compact">
110
  <h2>
111
  <i class="ri-terminal-box-line"></i>
112
+ Status Log
113
  <button id="clear-log" class="btn-secondary" style="margin-left: auto; padding: 0.25rem 0.5rem;">
114
  <i class="ri-delete-bin-line"></i>
115
  </button>
 
200
 
201
  <script src="{{ url_for('static', filename='script.js') }}"></script>
202
  </body>
203
+ </html>
templates/plotly_cdn.html ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ <!-- Plotly.js CDN -->
2
+ <script src="https://cdn.plot.ly/plotly-2.26.0.min.js"></script>
yolo_utils.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ultralytics import YOLO
2
+ import cv2
3
+ import numpy as np
4
+
5
+ def load_model(weights_path):
6
+ return YOLO(weights_path)
7
+
8
+ def detect_image(model, image_bytes, conf=0.05):
9
+ # image_bytes: bytes-like object (from Flask upload)
10
+ arr = np.frombuffer(image_bytes, np.uint8)
11
+ img = cv2.imdecode(arr, cv2.IMREAD_COLOR)
12
+ results = model.predict(img, imgsz=1440, max_det=1000, verbose=False, conf=conf)
13
+ result = results[0]
14
+ detections = []
15
+ for i, xyxy in enumerate(result.boxes.xyxy):
16
+ score = float(result.boxes.conf[i])
17
+ class_id = int(result.boxes.cls[i])
18
+ class_name = result.names[class_id]
19
+ if class_name == 'egg':
20
+ detections.append({
21
+ 'bbox': [float(x) for x in xyxy.cpu().numpy()],
22
+ 'score': score,
23
+ 'class': class_name
24
+ })
25
+ return detections