Spaces:
Sleeping
Sleeping
Delete gpt_request.py
Browse files- gpt_request.py +0 -26
gpt_request.py
DELETED
|
@@ -1,26 +0,0 @@
|
|
| 1 |
-
from openai import OpenAI
|
| 2 |
-
from tenacity import retry, wait_random_exponential, stop_after_attempt
|
| 3 |
-
import random
|
| 4 |
-
import json
|
| 5 |
-
|
| 6 |
-
import os
|
| 7 |
-
client = OpenAI()
|
| 8 |
-
|
| 9 |
-
# openai sent
|
| 10 |
-
@retry(wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(3))
|
| 11 |
-
def chat_completion_request(messages, tools=None, tool_choice=None, model="gpt-3.5-turbo-0125", temperature=0.6):
|
| 12 |
-
try:
|
| 13 |
-
response = client.chat.completions.create(
|
| 14 |
-
model=model,
|
| 15 |
-
messages=messages,
|
| 16 |
-
temperature=temperature,
|
| 17 |
-
tools=tools,
|
| 18 |
-
tool_choice=tool_choice,
|
| 19 |
-
response_format={ "type": "json_object" }
|
| 20 |
-
)
|
| 21 |
-
|
| 22 |
-
return json.loads(response.choices[0].message.content),response.usage.completion_tokens,response.usage.prompt_tokens
|
| 23 |
-
except Exception as e:
|
| 24 |
-
print("Unable to generate ChatCompletion response")
|
| 25 |
-
print(f"Exception: {e}")
|
| 26 |
-
return e
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|