Spaces:
Running
Running
Update src/models.py
Browse files- src/models.py +2 -14
src/models.py
CHANGED
|
@@ -38,13 +38,7 @@ def hf_inference_request(token, model, prompt, max_new_tokens=200):
|
|
| 38 |
|
| 39 |
def get_gpt_completion(prompt, system_message):
|
| 40 |
try:
|
| 41 |
-
|
| 42 |
-
if not isinstance(system_message, str):
|
| 43 |
-
system_message = str(system_message)
|
| 44 |
-
if not isinstance(prompt, str):
|
| 45 |
-
prompt = str(prompt)
|
| 46 |
-
full_prompt = f"{system_message}\n{prompt}"
|
| 47 |
-
response = hf_inference_request(openai_api_key, OPENAI_MODEL, full_prompt, max_new_tokens=200)
|
| 48 |
return response
|
| 49 |
except Exception as e:
|
| 50 |
print(f"GPT error: {e}")
|
|
@@ -52,13 +46,7 @@ def get_gpt_completion(prompt, system_message):
|
|
| 52 |
|
| 53 |
def get_claude_completion(prompt, system_message):
|
| 54 |
try:
|
| 55 |
-
|
| 56 |
-
if not isinstance(system_message, str):
|
| 57 |
-
system_message = str(system_message)
|
| 58 |
-
if not isinstance(prompt, str):
|
| 59 |
-
prompt = str(prompt)
|
| 60 |
-
full_prompt = f"{system_message}\n{prompt}"
|
| 61 |
-
response = hf_inference_request(anthropic_api_key, CLAUDE_MODEL, full_prompt, max_new_tokens=200)
|
| 62 |
return response
|
| 63 |
except Exception as e:
|
| 64 |
print(f"Claude error: {e}")
|
|
|
|
| 38 |
|
| 39 |
def get_gpt_completion(prompt, system_message):
|
| 40 |
try:
|
| 41 |
+
response = openai.text_generation(prompt=f"{system_message}\n{prompt}", max_new_tokens=200)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
return response
|
| 43 |
except Exception as e:
|
| 44 |
print(f"GPT error: {e}")
|
|
|
|
| 46 |
|
| 47 |
def get_claude_completion(prompt, system_message):
|
| 48 |
try:
|
| 49 |
+
response = claude.text_generation(prompt=f"{system_message}\n{prompt}", max_new_tokens=200)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
return response
|
| 51 |
except Exception as e:
|
| 52 |
print(f"Claude error: {e}")
|