nusaibah0110 commited on
Commit
bd8d470
·
1 Parent(s): cba5d7a

Add model listing and try more compatible model names for v1beta API

Browse files
Files changed (1) hide show
  1. backend/app.py +28 -15
backend/app.py CHANGED
@@ -74,6 +74,16 @@ class SPAStaticFiles(StaticFiles):
74
  @app.get("/health")
75
  async def health_check():
76
  """Health check endpoint"""
 
 
 
 
 
 
 
 
 
 
77
  return {
78
  "status": "healthy",
79
  "service": "Pathora Colposcopy API",
@@ -83,7 +93,8 @@ async def health_check():
83
  },
84
  "llm": {
85
  "gemini_available": GEMINI_AVAILABLE,
86
- "api_key_configured": bool(GEMINI_API_KEY)
 
87
  }
88
  }
89
 
@@ -119,8 +130,13 @@ Be professional, evidence-based, and concise."""
119
 
120
  # Try different model names for compatibility
121
  model_names = [
122
- "gemini-1.5-flash",
123
- "gemini-1.5-flash-latest",
 
 
 
 
 
124
  "gemini-pro",
125
  ]
126
 
@@ -200,20 +216,13 @@ async def generate_report_endpoint(request: ReportGenerationRequest):
200
  detail="Gemini AI is not available. Install google-generativeai package."
201
  )
202
 
203
- # Re-fetch API key on each request (for HF Spaces)
204
- api_key = os.getenv("GEMINI_API_KEY") or os.getenv("VITE_GEMINI_API_KEY")
205
-
206
- if not api_key:
207
  raise HTTPException(
208
  status_code=503,
209
- detail="GEMINI_API_KEY not configured. Please add it to HF Space secrets or .env file."
210
  )
211
 
212
  try:
213
- # Reconfigure Gemini with fresh API key
214
- genai.configure(api_key=api_key)
215
- print(f"✅ Gemini configured for report generation with key: {api_key[:10]}...")
216
-
217
  # Use system prompt or default
218
  system_prompt = request.system_prompt or """You are an expert colposcopy AI assistant acting as a specialist gynaecologist.
219
  Analyse ALL the clinical data and the attached colposcopy images to generate a professional, evidence-based colposcopy report conclusion."""
@@ -239,10 +248,14 @@ Analyse ALL the clinical data and the attached colposcopy images to generate a p
239
 
240
  # Try different model names for compatibility
241
  model_names = [
242
- "gemini-1.5-flash",
243
- "gemini-1.5-flash-latest",
 
 
 
 
 
244
  "gemini-pro",
245
- "models/gemini-1.5-flash",
246
  ]
247
 
248
  response_text = None
 
74
  @app.get("/health")
75
  async def health_check():
76
  """Health check endpoint"""
77
+ available_models = []
78
+ if GEMINI_AVAILABLE and GEMINI_API_KEY:
79
+ try:
80
+ # List available Gemini models
81
+ for model in genai.list_models():
82
+ if 'generateContent' in model.supported_generation_methods:
83
+ available_models.append(model.name)
84
+ except Exception as e:
85
+ print(f"⚠️ Could not list models: {e}")
86
+
87
  return {
88
  "status": "healthy",
89
  "service": "Pathora Colposcopy API",
 
93
  },
94
  "llm": {
95
  "gemini_available": GEMINI_AVAILABLE,
96
+ "api_key_configured": bool(GEMINI_API_KEY),
97
+ "available_models": available_models
98
  }
99
  }
100
 
 
130
 
131
  # Try different model names for compatibility
132
  model_names = [
133
+ "models/gemini-1.5-pro-latest",
134
+ "models/gemini-1.5-pro",
135
+ "models/gemini-1.0-pro-latest",
136
+ "models/gemini-1.0-pro",
137
+ "models/gemini-pro",
138
+ "gemini-1.5-pro-latest",
139
+ "gemini-1.5-pro",
140
  "gemini-pro",
141
  ]
142
 
 
216
  detail="Gemini AI is not available. Install google-generativeai package."
217
  )
218
 
219
+ if not GEMINI_API_KEY:
 
 
 
220
  raise HTTPException(
221
  status_code=503,
222
+ detail="GEMINI_API_KEY not configured in environment variables"
223
  )
224
 
225
  try:
 
 
 
 
226
  # Use system prompt or default
227
  system_prompt = request.system_prompt or """You are an expert colposcopy AI assistant acting as a specialist gynaecologist.
228
  Analyse ALL the clinical data and the attached colposcopy images to generate a professional, evidence-based colposcopy report conclusion."""
 
248
 
249
  # Try different model names for compatibility
250
  model_names = [
251
+ "models/gemini-1.5-pro-latest",
252
+ "models/gemini-1.5-pro",
253
+ "models/gemini-1.0-pro-latest",
254
+ "models/gemini-1.0-pro",
255
+ "models/gemini-pro",
256
+ "gemini-1.5-pro-latest",
257
+ "gemini-1.5-pro",
258
  "gemini-pro",
 
259
  ]
260
 
261
  response_text = None