Spaces:
Sleeping
Sleeping
Update src/llm_client.py
Browse files- src/llm_client.py +1 -1
src/llm_client.py
CHANGED
|
@@ -27,7 +27,7 @@ def ask_llm(query, context, mode="Executive Summary", model_provider="Gemini"):
|
|
| 27 |
|
| 28 |
# NEW SYNTAX: Call generate_content via the 'models' attribute
|
| 29 |
response = client.models.generate_content(
|
| 30 |
-
model='gemini-
|
| 31 |
contents=full_prompt,
|
| 32 |
config=types.GenerateContentConfig(
|
| 33 |
system_instruction=system_instruction,
|
|
|
|
| 27 |
|
| 28 |
# NEW SYNTAX: Call generate_content via the 'models' attribute
|
| 29 |
response = client.models.generate_content(
|
| 30 |
+
model='gemini-2.0-flash', # or 'gemini-2.0-flash' if available to you
|
| 31 |
contents=full_prompt,
|
| 32 |
config=types.GenerateContentConfig(
|
| 33 |
system_instruction=system_instruction,
|