Spaces:
Running
Running
Pulastya B
commited on
Commit
·
eeeb8b7
1
Parent(s):
f8fdbd6
fix: Remove system_instruction parameter from chat endpoint
Browse files- system_instruction not supported in google-generativeai SDK
- Prepend system message to first user message instead
- Add safety settings to chat endpoint
- Fixes: TypeError unexpected keyword argument 'system_instruction'
- Chat endpoint now works with current Gemini SDK
- src/api/app.py +21 -3
src/api/app.py
CHANGED
|
@@ -378,18 +378,36 @@ async def chat(request: ChatRequest) -> JSONResponse:
|
|
| 378 |
logger.info(f"Configuring Gemini with API key (length: {len(api_key)})")
|
| 379 |
genai.configure(api_key=api_key)
|
| 380 |
|
| 381 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 382 |
model = genai.GenerativeModel(
|
| 383 |
model_name=os.getenv("GEMINI_MODEL", "gemini-2.5-flash-lite"),
|
| 384 |
-
|
|
|
|
| 385 |
)
|
| 386 |
|
|
|
|
|
|
|
|
|
|
| 387 |
# Convert messages to Gemini format (exclude system message, just conversation)
|
| 388 |
chat_history = []
|
|
|
|
| 389 |
for msg in request.messages[:-1]: # Exclude the latest message
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 390 |
chat_history.append({
|
| 391 |
"role": "user" if msg.role == "user" else "model",
|
| 392 |
-
"parts": [
|
| 393 |
})
|
| 394 |
|
| 395 |
# Start chat with history
|
|
|
|
| 378 |
logger.info(f"Configuring Gemini with API key (length: {len(api_key)})")
|
| 379 |
genai.configure(api_key=api_key)
|
| 380 |
|
| 381 |
+
# Safety settings for data science content
|
| 382 |
+
safety_settings = [
|
| 383 |
+
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
|
| 384 |
+
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
|
| 385 |
+
{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"},
|
| 386 |
+
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
|
| 387 |
+
]
|
| 388 |
+
|
| 389 |
+
# Initialize Gemini model (system_instruction not supported in this SDK version)
|
| 390 |
model = genai.GenerativeModel(
|
| 391 |
model_name=os.getenv("GEMINI_MODEL", "gemini-2.5-flash-lite"),
|
| 392 |
+
generation_config={"temperature": 0.7},
|
| 393 |
+
safety_settings=safety_settings
|
| 394 |
)
|
| 395 |
|
| 396 |
+
# System message will be prepended to first user message
|
| 397 |
+
system_msg = "You are a Senior Data Science Autonomous Agent. You help users with end-to-end machine learning, data profiling, visualization, and strategic insights. Use a professional, technical yet accessible tone. Provide code snippets in Python if requested. You have access to tools for data analysis, ML training, visualization, and more.\\n\\n"
|
| 398 |
+
|
| 399 |
# Convert messages to Gemini format (exclude system message, just conversation)
|
| 400 |
chat_history = []
|
| 401 |
+
first_user_msg = True
|
| 402 |
for msg in request.messages[:-1]: # Exclude the latest message
|
| 403 |
+
content = msg.content
|
| 404 |
+
# Prepend system instruction to first user message
|
| 405 |
+
if first_user_msg and msg.role == "user":
|
| 406 |
+
content = system_msg + content
|
| 407 |
+
first_user_msg = False
|
| 408 |
chat_history.append({
|
| 409 |
"role": "user" if msg.role == "user" else "model",
|
| 410 |
+
"parts": [content]
|
| 411 |
})
|
| 412 |
|
| 413 |
# Start chat with history
|