hkai20000 commited on
Commit
f66fde9
·
verified ·
1 Parent(s): d6e33e8

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +305 -299
main.py CHANGED
@@ -1,299 +1,305 @@
1
- from fastapi import FastAPI, UploadFile, File, Form
2
- from fastapi.responses import JSONResponse
3
- from fastapi.middleware.cors import CORSMiddleware
4
- from transformers import pipeline, AutoTokenizer, AutoModelForTokenClassification
5
- from doctr.io import DocumentFile
6
- from doctr.models import ocr_predictor
7
- import cv2
8
- import numpy as np
9
- from PIL import Image
10
- import io
11
- from typing import Dict, Any, Optional
12
-
13
- app = FastAPI(title="ScanAssured OCR & NER API")
14
-
15
- # Enable CORS for Flutter app
16
- app.add_middleware(
17
- CORSMiddleware,
18
- allow_origins=["*"],
19
- allow_credentials=True,
20
- allow_methods=["*"],
21
- allow_headers=["*"],
22
- )
23
-
24
- # --- OCR PRESETS ---
25
- OCR_PRESETS = {
26
- "high_accuracy": {
27
- "det": "db_resnet50",
28
- "reco": "crnn_vgg16_bn",
29
- "name": "High Accuracy",
30
- "description": "Best quality, slower processing"
31
- },
32
- "balanced": {
33
- "det": "db_resnet50",
34
- "reco": "crnn_mobilenet_v3_small",
35
- "name": "Balanced (Recommended)",
36
- "description": "Good quality and speed"
37
- },
38
- "fast": {
39
- "det": "db_mobilenet_v3_large",
40
- "reco": "crnn_mobilenet_v3_small",
41
- "name": "Fast",
42
- "description": "Fastest processing, slightly lower quality"
43
- },
44
- }
45
-
46
- OCR_DETECTION_MODELS = ["db_resnet50", "db_mobilenet_v3_large", "linknet_resnet18"]
47
- OCR_RECOGNITION_MODELS = ["crnn_vgg16_bn", "crnn_mobilenet_v3_small", "parseq"]
48
-
49
- # --- NER MODELS ---
50
- NER_MODELS = {
51
- "alvaroalon2/biobert_chemical_ner": {
52
- "name": "Chemicals & Diseases",
53
- "description": "Identifies chemical compounds and disease names",
54
- "entities": ["CHEM", "DIS"]
55
- },
56
- "d4data/biomedical-ner-all": {
57
- "name": "Comprehensive Biomedical",
58
- "description": "80+ biomedical entity types including genes, proteins, cells",
59
- "entities": ["GENE", "PROTEIN", "CELL", "DISEASE", "CHEMICAL", "SPECIES", "PATHWAY"]
60
- },
61
- "samrawal/bert-base-uncased_clinical-ner": {
62
- "name": "Clinical Notes",
63
- "description": "Optimized for clinical/medical notes",
64
- "entities": ["PROBLEM", "TREATMENT", "TEST"]
65
- },
66
- "ukkendane/bert-finetuned-ner-bio": {
67
- "name": "Biomedical General",
68
- "description": "General biomedical entities from research papers",
69
- "entities": ["GENE", "PROTEIN", "DNA", "RNA", "CELL_LINE", "CELL_TYPE"]
70
- },
71
- }
72
-
73
- # --- GLOBAL MODEL CACHES ---
74
- ner_model_cache: Dict[str, Any] = {}
75
- ocr_model_cache: Dict[str, Any] = {}
76
-
77
- # --- OCR MODEL LOADING ---
78
- def get_ocr_predictor(det_arch: str, reco_arch: str):
79
- """Retrieves a loaded OCR predictor from cache or loads it if necessary."""
80
- cache_key = f"{det_arch}_{reco_arch}"
81
-
82
- if cache_key in ocr_model_cache:
83
- print(f"Using cached OCR model: {cache_key}")
84
- return ocr_model_cache[cache_key]
85
-
86
- try:
87
- print(f"Loading OCR model: det={det_arch}, reco={reco_arch}...")
88
- predictor = ocr_predictor(
89
- det_arch=det_arch,
90
- reco_arch=reco_arch,
91
- pretrained=True,
92
- assume_straight_pages=True
93
- )
94
- ocr_model_cache[cache_key] = predictor
95
- print(f"OCR model {cache_key} loaded successfully!")
96
- return predictor
97
- except Exception as e:
98
- print(f"ERROR: Failed to load OCR model {cache_key}: {e}")
99
- return None
100
-
101
- # --- NER MODEL LOADING ---
102
- def get_ner_pipeline(model_id: str):
103
- """Retrieves a loaded NER pipeline from cache or loads it if necessary."""
104
- if model_id not in NER_MODELS:
105
- raise ValueError(f"Unknown NER model ID: {model_id}")
106
-
107
- if model_id in ner_model_cache:
108
- print(f"Using cached NER model: {model_id}")
109
- return ner_model_cache[model_id]
110
-
111
- try:
112
- print(f"Loading NER model: {model_id}...")
113
- tokenizer = AutoTokenizer.from_pretrained(model_id)
114
- model = AutoModelForTokenClassification.from_pretrained(model_id)
115
-
116
- ner_pipeline = pipeline(
117
- "ner",
118
- model=model,
119
- tokenizer=tokenizer,
120
- aggregation_strategy="simple"
121
- )
122
- ner_model_cache[model_id] = ner_pipeline
123
- print(f"NER model {model_id} loaded successfully!")
124
- return ner_pipeline
125
- except Exception as e:
126
- print(f"ERROR: Failed to load NER model {model_id}: {e}")
127
- return None
128
-
129
- # --- IMAGE PREPROCESSING ---
130
- def deskew_image(image: np.ndarray) -> np.ndarray:
131
- """Deskew image using projection profile method."""
132
- try:
133
- gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) if len(image.shape) == 3 else image
134
- edges = cv2.Canny(gray, 50, 150, apertureSize=3)
135
- lines = cv2.HoughLinesP(edges, 1, np.pi/180, threshold=100, minLineLength=100, maxLineGap=10)
136
-
137
- if lines is not None and len(lines) > 0:
138
- angles = []
139
- for line in lines:
140
- x1, y1, x2, y2 = line[0]
141
- angle = np.arctan2(y2 - y1, x2 - x1) * 180 / np.pi
142
- if abs(angle) < 45:
143
- angles.append(angle)
144
-
145
- if angles:
146
- median_angle = np.median(angles)
147
- if abs(median_angle) > 0.5:
148
- (h, w) = image.shape[:2]
149
- center = (w // 2, h // 2)
150
- M = cv2.getRotationMatrix2D(center, median_angle, 1.0)
151
- rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)
152
- return rotated
153
-
154
- return image
155
- except Exception as e:
156
- print(f"Deskew warning: {e}")
157
- return image
158
-
159
- def preprocess_for_doctr(file_content: bytes) -> np.ndarray:
160
- """Automatic preprocessing pipeline optimized for docTR."""
161
- nparr = np.frombuffer(file_content, np.uint8)
162
- img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
163
-
164
- if img is None:
165
- raise ValueError("Failed to decode image")
166
-
167
- img = deskew_image(img)
168
-
169
- lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
170
- clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
171
- lab[:, :, 0] = clahe.apply(lab[:, :, 0])
172
- img = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
173
-
174
- img = cv2.fastNlMeansDenoisingColored(img, None, 6, 6, 7, 21)
175
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
176
-
177
- return img
178
-
179
- def basic_cleanup(text: str) -> str:
180
- """Clean up OCR text for NER processing."""
181
- text = " ".join(text.split())
182
- return text
183
-
184
- # --- FastAPI Routes ---
185
-
186
- @app.get("/")
187
- async def root():
188
- """Health check endpoint."""
189
- return {"status": "running", "message": "ScanAssured OCR & NER API"}
190
-
191
- @app.get("/models")
192
- async def get_available_models():
193
- """Return all available OCR and NER models."""
194
- return {
195
- "ocr_presets": [
196
- {
197
- "id": preset_id,
198
- "name": preset_data["name"],
199
- "description": preset_data["description"]
200
- }
201
- for preset_id, preset_data in OCR_PRESETS.items()
202
- ],
203
- "ocr_detection_models": OCR_DETECTION_MODELS,
204
- "ocr_recognition_models": OCR_RECOGNITION_MODELS,
205
- "ner_models": {
206
- model_id: {
207
- "name": model_data["name"],
208
- "description": model_data["description"],
209
- "entities": model_data["entities"]
210
- }
211
- for model_id, model_data in NER_MODELS.items()
212
- }
213
- }
214
-
215
- @app.post("/process")
216
- async def process_image(
217
- file: UploadFile = File(...),
218
- ner_model_id: str = Form(...),
219
- ocr_preset: str = Form("balanced"),
220
- ocr_det_model: Optional[str] = Form(None),
221
- ocr_reco_model: Optional[str] = Form(None),
222
- ):
223
- """Process an image with OCR and NER."""
224
-
225
- # Determine OCR models
226
- if ocr_det_model and ocr_reco_model:
227
- det_arch = ocr_det_model
228
- reco_arch = ocr_reco_model
229
- else:
230
- preset = OCR_PRESETS.get(ocr_preset, OCR_PRESETS["balanced"])
231
- det_arch = preset["det"]
232
- reco_arch = preset["reco"]
233
-
234
- # Validate NER model
235
- if ner_model_id not in NER_MODELS:
236
- return JSONResponse(
237
- status_code=400,
238
- content={"detail": f"Unknown NER model: {ner_model_id}"}
239
- )
240
-
241
- # Get OCR predictor
242
- ocr_predictor_instance = get_ocr_predictor(det_arch, reco_arch)
243
- if not ocr_predictor_instance:
244
- return JSONResponse(
245
- status_code=503,
246
- content={"detail": f"Failed to load OCR model: {det_arch}/{reco_arch}"}
247
- )
248
-
249
- # Get NER pipeline
250
- ner_pipeline = get_ner_pipeline(ner_model_id)
251
- if not ner_pipeline:
252
- return JSONResponse(
253
- status_code=503,
254
- content={"detail": f"Failed to load NER model: {ner_model_id}"}
255
- )
256
-
257
- try:
258
- # Read and preprocess image
259
- file_content = await file.read()
260
- preprocessed_img = preprocess_for_doctr(file_content)
261
-
262
- # Perform OCR with docTR
263
- print("Running docTR OCR...")
264
- doc = DocumentFile.from_images([preprocessed_img])
265
- result = ocr_predictor_instance(doc)
266
- raw_text = result.render()
267
- cleaned_text = basic_cleanup(raw_text)
268
-
269
- print(f"OCR Text (first 200 chars): {cleaned_text[:200]}...")
270
-
271
- # Perform NER
272
- print("Running NER...")
273
- entities = ner_pipeline(cleaned_text)
274
-
275
- # Filter and structure entities
276
- structured_entities = []
277
- for entity in entities:
278
- if entity.get('score', 0.0) > 0.6:
279
- structured_entities.append({
280
- 'entity_group': entity['entity_group'],
281
- 'score': float(entity['score']),
282
- 'word': entity['word'].strip(),
283
- })
284
-
285
- return {
286
- "cleaned_text": cleaned_text,
287
- "medical_entities": structured_entities,
288
- "model_id": NER_MODELS[ner_model_id]["name"],
289
- "ocr_model": f"{det_arch} + {reco_arch}"
290
- }
291
-
292
- except Exception as e:
293
- print(f"Processing error: {e}")
294
- import traceback
295
- traceback.print_exc()
296
- return JSONResponse(
297
- status_code=500,
298
- content={"detail": f"An error occurred during processing: {str(e)}"}
299
- )
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, UploadFile, File, Form
2
+ from fastapi.responses import JSONResponse
3
+ from fastapi.middleware.cors import CORSMiddleware
4
+ from transformers import pipeline, AutoTokenizer, AutoModelForTokenClassification
5
+ from doctr.io import DocumentFile
6
+ from doctr.models import ocr_predictor
7
+ import cv2
8
+ import numpy as np
9
+ from PIL import Image
10
+ import io
11
+ from typing import Dict, Any, Optional
12
+
13
+ app = FastAPI(title="ScanAssured OCR & NER API")
14
+
15
+ # Enable CORS for Flutter app
16
+ app.add_middleware(
17
+ CORSMiddleware,
18
+ allow_origins=["*"],
19
+ allow_credentials=True,
20
+ allow_methods=["*"],
21
+ allow_headers=["*"],
22
+ )
23
+
24
+ # --- OCR PRESETS ---
25
+ OCR_PRESETS = {
26
+ "high_accuracy": {
27
+ "det": "db_resnet50",
28
+ "reco": "crnn_vgg16_bn",
29
+ "name": "High Accuracy",
30
+ "description": "Best quality, slower processing"
31
+ },
32
+ "balanced": {
33
+ "det": "db_resnet50",
34
+ "reco": "crnn_mobilenet_v3_small",
35
+ "name": "Balanced (Recommended)",
36
+ "description": "Good quality and speed"
37
+ },
38
+ "fast": {
39
+ "det": "db_mobilenet_v3_large",
40
+ "reco": "crnn_mobilenet_v3_small",
41
+ "name": "Fast",
42
+ "description": "Fastest processing, slightly lower quality"
43
+ },
44
+ }
45
+
46
+ OCR_DETECTION_MODELS = ["db_resnet50", "db_mobilenet_v3_large", "linknet_resnet18"]
47
+ OCR_RECOGNITION_MODELS = ["crnn_vgg16_bn", "crnn_mobilenet_v3_small", "parseq"]
48
+
49
+ # --- NER MODELS ---
50
+ NER_MODELS = {
51
+ "alvaroalon2/biobert_chemical_ner": {
52
+ "name": "Chemicals & Diseases",
53
+ "description": "Identifies chemical compounds and disease names",
54
+ "entities": ["CHEM", "DIS"]
55
+ },
56
+ "d4data/biomedical-ner-all": {
57
+ "name": "Comprehensive Biomedical",
58
+ "description": "80+ biomedical entity types including genes, proteins, cells",
59
+ "entities": ["GENE", "PROTEIN", "CELL", "DISEASE", "CHEMICAL", "SPECIES", "PATHWAY"]
60
+ },
61
+ "samrawal/bert-base-uncased_clinical-ner": {
62
+ "name": "Clinical Notes",
63
+ "description": "Optimized for clinical/medical notes",
64
+ "entities": ["PROBLEM", "TREATMENT", "TEST"]
65
+ },
66
+ "ukkendane/bert-finetuned-ner-bio": {
67
+ "name": "Biomedical General",
68
+ "description": "General biomedical entities from research papers",
69
+ "entities": ["GENE", "PROTEIN", "DNA", "RNA", "CELL_LINE", "CELL_TYPE"]
70
+ },
71
+ }
72
+
73
+ # --- GLOBAL MODEL CACHES ---
74
+ ner_model_cache: Dict[str, Any] = {}
75
+ ocr_model_cache: Dict[str, Any] = {}
76
+
77
+ # --- OCR MODEL LOADING ---
78
+ def get_ocr_predictor(det_arch: str, reco_arch: str):
79
+ """Retrieves a loaded OCR predictor from cache or loads it if necessary."""
80
+ cache_key = f"{det_arch}_{reco_arch}"
81
+
82
+ if cache_key in ocr_model_cache:
83
+ print(f"Using cached OCR model: {cache_key}")
84
+ return ocr_model_cache[cache_key]
85
+
86
+ try:
87
+ print(f"Loading OCR model: det={det_arch}, reco={reco_arch}...")
88
+ predictor = ocr_predictor(
89
+ det_arch=det_arch,
90
+ reco_arch=reco_arch,
91
+ pretrained=True,
92
+ assume_straight_pages=True
93
+ )
94
+ ocr_model_cache[cache_key] = predictor
95
+ print(f"OCR model {cache_key} loaded successfully!")
96
+ return predictor
97
+ except Exception as e:
98
+ print(f"ERROR: Failed to load OCR model {cache_key}: {e}")
99
+ return None
100
+
101
+ # --- NER MODEL LOADING ---
102
+ def get_ner_pipeline(model_id: str):
103
+ """Retrieves a loaded NER pipeline from cache or loads it if necessary."""
104
+ if model_id not in NER_MODELS:
105
+ raise ValueError(f"Unknown NER model ID: {model_id}")
106
+
107
+ if model_id in ner_model_cache:
108
+ print(f"Using cached NER model: {model_id}")
109
+ return ner_model_cache[model_id]
110
+
111
+ try:
112
+ print(f"Loading NER model: {model_id}...")
113
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
114
+ model = AutoModelForTokenClassification.from_pretrained(model_id)
115
+
116
+ ner_pipeline = pipeline(
117
+ "ner",
118
+ model=model,
119
+ tokenizer=tokenizer,
120
+ aggregation_strategy="simple"
121
+ )
122
+ ner_model_cache[model_id] = ner_pipeline
123
+ print(f"NER model {model_id} loaded successfully!")
124
+ return ner_pipeline
125
+ except Exception as e:
126
+ print(f"ERROR: Failed to load NER model {model_id}: {e}")
127
+ return None
128
+
129
+ # --- IMAGE PREPROCESSING ---
130
+ def deskew_image(image: np.ndarray) -> np.ndarray:
131
+ """Deskew image using projection profile method."""
132
+ try:
133
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) if len(image.shape) == 3 else image
134
+ edges = cv2.Canny(gray, 50, 150, apertureSize=3)
135
+ lines = cv2.HoughLinesP(edges, 1, np.pi/180, threshold=100, minLineLength=100, maxLineGap=10)
136
+
137
+ if lines is not None and len(lines) > 0:
138
+ angles = []
139
+ for line in lines:
140
+ x1, y1, x2, y2 = line[0]
141
+ angle = np.arctan2(y2 - y1, x2 - x1) * 180 / np.pi
142
+ if abs(angle) < 45:
143
+ angles.append(angle)
144
+
145
+ if angles:
146
+ median_angle = np.median(angles)
147
+ if abs(median_angle) > 0.5:
148
+ (h, w) = image.shape[:2]
149
+ center = (w // 2, h // 2)
150
+ M = cv2.getRotationMatrix2D(center, median_angle, 1.0)
151
+ rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)
152
+ return rotated
153
+
154
+ return image
155
+ except Exception as e:
156
+ print(f"Deskew warning: {e}")
157
+ return image
158
+
159
+ def preprocess_for_doctr(file_content: bytes) -> np.ndarray:
160
+ """Automatic preprocessing pipeline optimized for docTR."""
161
+ nparr = np.frombuffer(file_content, np.uint8)
162
+ img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
163
+
164
+ if img is None:
165
+ raise ValueError("Failed to decode image")
166
+
167
+ img = deskew_image(img)
168
+
169
+ lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
170
+ clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
171
+ lab[:, :, 0] = clahe.apply(lab[:, :, 0])
172
+ img = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
173
+
174
+ img = cv2.fastNlMeansDenoisingColored(img, None, 6, 6, 7, 21)
175
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
176
+
177
+ return img
178
+
179
+ def basic_cleanup(text: str) -> str:
180
+ """Clean up OCR text for NER processing."""
181
+ text = " ".join(text.split())
182
+ return text
183
+
184
+ # --- FastAPI Routes ---
185
+
186
+ @app.get("/")
187
+ async def root():
188
+ """Health check endpoint."""
189
+ return {"status": "running", "message": "ScanAssured OCR & NER API"}
190
+
191
+ @app.get("/models")
192
+ async def get_available_models():
193
+ """Return all available OCR and NER models."""
194
+ return {
195
+ "ocr_presets": [
196
+ {
197
+ "id": preset_id,
198
+ "name": preset_data["name"],
199
+ "description": preset_data["description"]
200
+ }
201
+ for preset_id, preset_data in OCR_PRESETS.items()
202
+ ],
203
+ "ocr_detection_models": OCR_DETECTION_MODELS,
204
+ "ocr_recognition_models": OCR_RECOGNITION_MODELS,
205
+ "ner_models": {
206
+ model_id: {
207
+ "name": model_data["name"],
208
+ "description": model_data["description"],
209
+ "entities": model_data["entities"]
210
+ }
211
+ for model_id, model_data in NER_MODELS.items()
212
+ }
213
+ }
214
+
215
+ @app.post("/process")
216
+ async def process_image(
217
+ file: UploadFile = File(...),
218
+ ner_model_id: str = Form(...),
219
+ ocr_preset: str = Form("balanced"),
220
+ ocr_det_model: Optional[str] = Form(None),
221
+ ocr_reco_model: Optional[str] = Form(None),
222
+ ):
223
+ """Process an image with OCR and NER."""
224
+
225
+ # Determine OCR models
226
+ if ocr_det_model and ocr_reco_model:
227
+ det_arch = ocr_det_model
228
+ reco_arch = ocr_reco_model
229
+ else:
230
+ preset = OCR_PRESETS.get(ocr_preset, OCR_PRESETS["balanced"])
231
+ det_arch = preset["det"]
232
+ reco_arch = preset["reco"]
233
+
234
+ # Validate NER model
235
+ if ner_model_id not in NER_MODELS:
236
+ return JSONResponse(
237
+ status_code=400,
238
+ content={"detail": f"Unknown NER model: {ner_model_id}"}
239
+ )
240
+
241
+ # Get OCR predictor
242
+ ocr_predictor_instance = get_ocr_predictor(det_arch, reco_arch)
243
+ if not ocr_predictor_instance:
244
+ return JSONResponse(
245
+ status_code=503,
246
+ content={"detail": f"Failed to load OCR model: {det_arch}/{reco_arch}"}
247
+ )
248
+
249
+ # Get NER pipeline
250
+ ner_pipeline = get_ner_pipeline(ner_model_id)
251
+ if not ner_pipeline:
252
+ return JSONResponse(
253
+ status_code=503,
254
+ content={"detail": f"Failed to load NER model: {ner_model_id}"}
255
+ )
256
+
257
+ try:
258
+ # Read and preprocess image
259
+ file_content = await file.read()
260
+ preprocessed_img = preprocess_for_doctr(file_content)
261
+
262
+ # Perform OCR with docTR
263
+ print("Running docTR OCR...")
264
+ # Convert numpy array to bytes for docTR
265
+ pil_img = Image.fromarray(preprocessed_img)
266
+ img_byte_arr = io.BytesIO()
267
+ pil_img.save(img_byte_arr, format='PNG')
268
+ img_bytes = img_byte_arr.getvalue()
269
+
270
+ doc = DocumentFile.from_images([img_bytes])
271
+ result = ocr_predictor_instance(doc)
272
+ raw_text = result.render()
273
+ cleaned_text = basic_cleanup(raw_text)
274
+
275
+ print(f"OCR Text (first 200 chars): {cleaned_text[:200]}...")
276
+
277
+ # Perform NER
278
+ print("Running NER...")
279
+ entities = ner_pipeline(cleaned_text)
280
+
281
+ # Filter and structure entities
282
+ structured_entities = []
283
+ for entity in entities:
284
+ if entity.get('score', 0.0) > 0.6:
285
+ structured_entities.append({
286
+ 'entity_group': entity['entity_group'],
287
+ 'score': float(entity['score']),
288
+ 'word': entity['word'].strip(),
289
+ })
290
+
291
+ return {
292
+ "cleaned_text": cleaned_text,
293
+ "medical_entities": structured_entities,
294
+ "model_id": NER_MODELS[ner_model_id]["name"],
295
+ "ocr_model": f"{det_arch} + {reco_arch}"
296
+ }
297
+
298
+ except Exception as e:
299
+ print(f"Processing error: {e}")
300
+ import traceback
301
+ traceback.print_exc()
302
+ return JSONResponse(
303
+ status_code=500,
304
+ content={"detail": f"An error occurred during processing: {str(e)}"}
305
+ )