Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -121,7 +121,24 @@ def ask_llm(user_message, model='llama-3.3-70b-versatile', system_prompt="You ar
|
|
| 121 |
return response.choices[0].message.content
|
| 122 |
|
| 123 |
def ask_ollama(user_message, model='llama-3.3-70b-versatile', system_prompt=search_prompt):
|
| 124 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 125 |
print(f"AI REPLY json:\n{ai_reply}")
|
| 126 |
|
| 127 |
# Process the response to ensure we return valid JSON
|
|
|
|
| 121 |
return response.choices[0].message.content
|
| 122 |
|
| 123 |
def ask_ollama(user_message, model='llama-3.3-70b-versatile', system_prompt=search_prompt):
|
| 124 |
+
client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
|
| 125 |
+
|
| 126 |
+
response = client.chat.completions.create(
|
| 127 |
+
model=model,
|
| 128 |
+
messages=[
|
| 129 |
+
{
|
| 130 |
+
"role": "system",
|
| 131 |
+
"content": system_prompt
|
| 132 |
+
},
|
| 133 |
+
{
|
| 134 |
+
"role": "user",
|
| 135 |
+
"content": user_message
|
| 136 |
+
}
|
| 137 |
+
],
|
| 138 |
+
stream=False,
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
ai_reply = response.choices[0].message.content
|
| 142 |
print(f"AI REPLY json:\n{ai_reply}")
|
| 143 |
|
| 144 |
# Process the response to ensure we return valid JSON
|