Spaces:
Running
Running
Update src/models.py
Browse files- src/models.py +17 -5
src/models.py
CHANGED
|
@@ -18,16 +18,28 @@ claude = InferenceClient(model="mistralai/Mistral-7B-Instruct-v0.3", token=anthr
|
|
| 18 |
|
| 19 |
def get_gpt_completion(prompt, system_message):
|
| 20 |
try:
|
| 21 |
-
response = openai.text_generation(
|
| 22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
except Exception as e:
|
| 24 |
print(f"GPT error: {e}")
|
| 25 |
raise
|
| 26 |
|
| 27 |
def get_claude_completion(prompt, system_message):
|
| 28 |
try:
|
| 29 |
-
response = claude.text_generation(
|
| 30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
except Exception as e:
|
| 32 |
print(f"Claude error: {e}")
|
| 33 |
-
raise
|
|
|
|
| 18 |
|
| 19 |
def get_gpt_completion(prompt, system_message):
|
| 20 |
try:
|
| 21 |
+
response = openai.text_generation(
|
| 22 |
+
model=openai,
|
| 23 |
+
messages=[
|
| 24 |
+
{"role": "system", "content": system_message},
|
| 25 |
+
{"role": "user", "content": prompt}
|
| 26 |
+
],
|
| 27 |
+
stream=False,
|
| 28 |
+
)
|
| 29 |
+
return response.choices[0].message.content
|
| 30 |
except Exception as e:
|
| 31 |
print(f"GPT error: {e}")
|
| 32 |
raise
|
| 33 |
|
| 34 |
def get_claude_completion(prompt, system_message):
|
| 35 |
try:
|
| 36 |
+
response = claude.text_generation(
|
| 37 |
+
model=claude,
|
| 38 |
+
max_tokens=2000,
|
| 39 |
+
system=system_message,
|
| 40 |
+
messages=[{"role": "user", "content": prompt}]
|
| 41 |
+
)
|
| 42 |
+
return result.content[0].text
|
| 43 |
except Exception as e:
|
| 44 |
print(f"Claude error: {e}")
|
| 45 |
+
raise
|