Hassan73 commited on
Commit
486bedb
·
verified ·
1 Parent(s): 294bc3c

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -13
app.py CHANGED
@@ -39,29 +39,35 @@ def read_root():
39
  @app.post("/analyze")
40
  async def analyze_image(
41
  prompt: str = Form("Describe this medical image and give a preliminary analysis."),
42
- file: UploadFile = File(...)
43
  ):
44
  if pipe is None:
45
  return {"error": "Model not loaded properly. Check logs."}
46
 
47
- # Read the uploaded image
48
- contents = await file.read()
49
- image = Image.open(io.BytesIO(contents)).convert("RGB")
50
 
51
- # Format messages for MedGemma
52
  messages = [
53
  {
54
  "role": "system",
55
- "content": [{"type": "text", "text": "You are an expert radiologist and medical consultant."}]
56
- },
57
- {
58
- "role": "user",
59
- "content": [
60
- {"type": "text", "text": prompt},
61
- {"type": "image", "image": image}
62
- ]
63
  }
64
  ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
 
66
  # Inference
67
  output = pipe(text=messages, max_new_tokens=250)
 
39
  @app.post("/analyze")
40
  async def analyze_image(
41
  prompt: str = Form("Describe this medical image and give a preliminary analysis."),
42
+ file: UploadFile = File(None)
43
  ):
44
  if pipe is None:
45
  return {"error": "Model not loaded properly. Check logs."}
46
 
47
+ # Format messages for MedGemma with Arabic instructions
48
+ system_prompt = "أنت خبير طبي ومستشار رقمي. يجب أن تكون إجابتك باللغة العربية بشكل أساسي. إذا وجدت مصطلحات طبية معقدة أو كلمات ليس لها ترجمة شائعة، فاذكرها بالإنجليزية بين أقواس. قدم إجابة علمية دقيقة بناءً على المعطيات."
 
49
 
 
50
  messages = [
51
  {
52
  "role": "system",
53
+ "content": [{"type": "text", "text": system_prompt}]
 
 
 
 
 
 
 
54
  }
55
  ]
56
+
57
+ # Handle image if provided
58
+ user_content = [{"type": "text", "text": prompt}]
59
+ if file is not None and file.filename != "":
60
+ try:
61
+ contents = await file.read()
62
+ image = Image.open(io.BytesIO(contents)).convert("RGB")
63
+ user_content.append({"type": "image", "image": image})
64
+ except Exception as e:
65
+ return {"error": f"Failed to process image: {str(e)}"}
66
+
67
+ messages.append({
68
+ "role": "user",
69
+ "content": user_content
70
+ })
71
 
72
  # Inference
73
  output = pipe(text=messages, max_new_tokens=250)