Spaces:
Running
Running
Pulastya B
commited on
Commit
·
f8fdbd6
1
Parent(s):
8cbfaa1
fix: Add Gemini safety settings and handle blocked responses
Browse files- Add BLOCK_NONE safety settings for all categories
- Handle StopCandidateException (finish_reason: 12)
- Retry with simplified prompt when response blocked
- Fixes content filter blocking on Render deployment
- Safety settings allow data science content to pass
- src/orchestrator.py +42 -21
src/orchestrator.py
CHANGED
|
@@ -176,9 +176,19 @@ class DataScienceCopilot:
|
|
| 176 |
|
| 177 |
genai.configure(api_key=api_key)
|
| 178 |
self.model = os.getenv("GEMINI_MODEL", "gemini-2.0-flash-exp")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 179 |
self.gemini_model = genai.GenerativeModel(
|
| 180 |
self.model,
|
| 181 |
-
generation_config={"temperature": 0.1}
|
|
|
|
| 182 |
)
|
| 183 |
self.groq_client = None
|
| 184 |
print(f"🤖 Initialized with Gemini provider - Model: {self.model}")
|
|
@@ -1359,28 +1369,39 @@ You are a DOER. Complete workflows based on user intent."""
|
|
| 1359 |
|
| 1360 |
elif self.provider == "gemini":
|
| 1361 |
# Send messages WITHOUT tools parameter (tools already configured on model)
|
| 1362 |
-
|
| 1363 |
-
|
| 1364 |
-
|
| 1365 |
-
|
| 1366 |
-
|
| 1367 |
-
|
| 1368 |
-
|
| 1369 |
-
|
| 1370 |
-
|
| 1371 |
-
|
| 1372 |
-
|
| 1373 |
-
|
| 1374 |
-
|
| 1375 |
-
|
| 1376 |
-
|
|
|
|
|
|
|
| 1377 |
)
|
| 1378 |
-
|
| 1379 |
-
|
| 1380 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1381 |
else:
|
| 1382 |
-
|
| 1383 |
-
response = gemini_chat.send_message("Continue with the next step.")
|
| 1384 |
|
| 1385 |
self.api_calls_made += 1
|
| 1386 |
self.last_api_call_time = time.time()
|
|
|
|
| 176 |
|
| 177 |
genai.configure(api_key=api_key)
|
| 178 |
self.model = os.getenv("GEMINI_MODEL", "gemini-2.0-flash-exp")
|
| 179 |
+
|
| 180 |
+
# Configure safety settings to be more permissive for data science content
|
| 181 |
+
safety_settings = [
|
| 182 |
+
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
|
| 183 |
+
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
|
| 184 |
+
{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"},
|
| 185 |
+
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
|
| 186 |
+
]
|
| 187 |
+
|
| 188 |
self.gemini_model = genai.GenerativeModel(
|
| 189 |
self.model,
|
| 190 |
+
generation_config={"temperature": 0.1},
|
| 191 |
+
safety_settings=safety_settings
|
| 192 |
)
|
| 193 |
self.groq_client = None
|
| 194 |
print(f"🤖 Initialized with Gemini provider - Model: {self.model}")
|
|
|
|
| 1369 |
|
| 1370 |
elif self.provider == "gemini":
|
| 1371 |
# Send messages WITHOUT tools parameter (tools already configured on model)
|
| 1372 |
+
try:
|
| 1373 |
+
if iteration == 1:
|
| 1374 |
+
# First iteration: send system + user message
|
| 1375 |
+
combined_message = f"{messages[0]['content']}\n\n{messages[1]['content']}"
|
| 1376 |
+
response = gemini_chat.send_message(combined_message)
|
| 1377 |
+
else:
|
| 1378 |
+
# Subsequent iterations: send function responses
|
| 1379 |
+
last_tool_msg = messages[-1]
|
| 1380 |
+
if last_tool_msg.get("role") == "tool":
|
| 1381 |
+
# Send function response back to Gemini
|
| 1382 |
+
from google.ai.generativelanguage_v1beta.types import content as glm_content
|
| 1383 |
+
|
| 1384 |
+
function_response_part = glm_content.Part(
|
| 1385 |
+
function_response=glm_content.FunctionResponse(
|
| 1386 |
+
name=last_tool_msg["name"],
|
| 1387 |
+
response={"result": last_tool_msg["content"]}
|
| 1388 |
+
)
|
| 1389 |
)
|
| 1390 |
+
|
| 1391 |
+
response = gemini_chat.send_message(function_response_part)
|
| 1392 |
+
else:
|
| 1393 |
+
# Fallback
|
| 1394 |
+
response = gemini_chat.send_message("Continue with the next step.")
|
| 1395 |
+
except Exception as gemini_error:
|
| 1396 |
+
# Handle StopCandidateException (finish_reason: 12 = blocked/filtered)
|
| 1397 |
+
error_str = str(gemini_error)
|
| 1398 |
+
if "finish_reason" in error_str or "StopCandidateException" in str(type(gemini_error)):
|
| 1399 |
+
print(f"⚠️ Gemini response blocked (safety filter/content policy). Retrying with simplified prompt...")
|
| 1400 |
+
# Retry with a much shorter message
|
| 1401 |
+
simplified_msg = "Please provide the next step in data analysis using available tools."
|
| 1402 |
+
response = gemini_chat.send_message(simplified_msg)
|
| 1403 |
else:
|
| 1404 |
+
raise
|
|
|
|
| 1405 |
|
| 1406 |
self.api_calls_made += 1
|
| 1407 |
self.last_api_call_time = time.time()
|