kaitongg commited on
Commit
6222e95
·
verified ·
1 Parent(s): de1976d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -5
app.py CHANGED
@@ -85,8 +85,20 @@ GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
85
 
86
  if GEMINI_API_KEY:
87
  genai.configure(api_key=GEMINI_API_KEY)
88
- gemini_model = genai.GenerativeModel('gemini-1.5-flash')
89
- print("✓ Gemini API initialized successfully!")
 
 
 
 
 
 
 
 
 
 
 
 
90
  else:
91
  gemini_model = None
92
  print("⚠️ Warning: GEMINI_API_KEY not found in environment variables")
@@ -245,11 +257,36 @@ def generate_feedback_from_prompt(prompt_input: str) -> str:
245
  top_p=0.9,
246
  )
247
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248
  response = gemini_model.generate_content(
249
  prompt_input,
250
- generation_config=generation_config
 
251
  )
252
 
 
 
 
 
253
  llm_response_text = response.text.strip()
254
 
255
  # Post-processing: Remove meta-commentary
@@ -265,8 +302,18 @@ def generate_feedback_from_prompt(prompt_input: str) -> str:
265
  return llm_response_text
266
 
267
  except Exception as e:
268
- print(f"❌ Error during Gemini interaction: {e}")
269
- return f"Error generating feedback: {str(e)}"
 
 
 
 
 
 
 
 
 
 
270
 
271
  # ============================================
272
  # 9. GRADIO INTERFACE
 
85
 
86
  if GEMINI_API_KEY:
87
  genai.configure(api_key=GEMINI_API_KEY)
88
+ # Use the correct model name for the stable API
89
+ try:
90
+ # Try gemini-pro first (most stable)
91
+ gemini_model = genai.GenerativeModel('gemini-pro')
92
+ print("✓ Gemini API initialized successfully with gemini-pro!")
93
+ except Exception as e:
94
+ print(f"Failed to load gemini-pro: {e}")
95
+ try:
96
+ # Fallback to gemini-1.5-flash-latest
97
+ gemini_model = genai.GenerativeModel('gemini-1.5-flash-latest')
98
+ print("✓ Gemini API initialized successfully with gemini-1.5-flash-latest!")
99
+ except Exception as e2:
100
+ print(f"Failed to load gemini-1.5-flash-latest: {e2}")
101
+ gemini_model = None
102
  else:
103
  gemini_model = None
104
  print("⚠️ Warning: GEMINI_API_KEY not found in environment variables")
 
257
  top_p=0.9,
258
  )
259
 
260
+ # Add safety settings to prevent blocking
261
+ safety_settings = [
262
+ {
263
+ "category": "HARM_CATEGORY_HARASSMENT",
264
+ "threshold": "BLOCK_NONE"
265
+ },
266
+ {
267
+ "category": "HARM_CATEGORY_HATE_SPEECH",
268
+ "threshold": "BLOCK_NONE"
269
+ },
270
+ {
271
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
272
+ "threshold": "BLOCK_NONE"
273
+ },
274
+ {
275
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
276
+ "threshold": "BLOCK_NONE"
277
+ }
278
+ ]
279
+
280
  response = gemini_model.generate_content(
281
  prompt_input,
282
+ generation_config=generation_config,
283
+ safety_settings=safety_settings
284
  )
285
 
286
+ # Check if response was blocked
287
+ if not response.text:
288
+ return f"⚠️ Response blocked by safety filters. Prompt feedback: {response.prompt_feedback}"
289
+
290
  llm_response_text = response.text.strip()
291
 
292
  # Post-processing: Remove meta-commentary
 
302
  return llm_response_text
303
 
304
  except Exception as e:
305
+ error_msg = str(e)
306
+ print(f"Error during Gemini interaction: {error_msg}")
307
+
308
+ # Provide helpful error messages
309
+ if "404" in error_msg or "not found" in error_msg:
310
+ return f"⚠️ Model not available. Please check your API key and model access. Error: {error_msg}"
311
+ elif "API key" in error_msg:
312
+ return "⚠️ Invalid API key. Please check your GEMINI_API_KEY in Spaces secrets."
313
+ elif "quota" in error_msg.lower():
314
+ return "⚠️ API quota exceeded. Please try again later or upgrade your API plan."
315
+ else:
316
+ return f"❌ Error generating feedback: {error_msg}"
317
 
318
  # ============================================
319
  # 9. GRADIO INTERFACE