Spaces:
Sleeping
Sleeping
Alexander Hux
commited on
Commit
·
5229c2b
1
Parent(s):
31fbac4
Update app.py
Browse files
app.py
CHANGED
|
@@ -14,11 +14,11 @@ def load_conversation():
|
|
| 14 |
return json.load(f)
|
| 15 |
except FileNotFoundError:
|
| 16 |
return []
|
|
|
|
| 17 |
messages = load_conversation()
|
| 18 |
|
| 19 |
if not messages:
|
| 20 |
-
messages.append({"role": "system", "content": "You are a
|
| 21 |
-
|
| 22 |
|
| 23 |
def CustomChatGPT(user_input):
|
| 24 |
messages.append({"role": "user", "content": user_input})
|
|
@@ -26,19 +26,13 @@ def CustomChatGPT(user_input):
|
|
| 26 |
# Ensure the conversation fits within the model's maximum token limit
|
| 27 |
conversation = messages[-4096:]
|
| 28 |
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
except openai.error.RequestError as e:
|
| 37 |
-
# Code to run if the exception is raised
|
| 38 |
-
print(f"Received error from OpenAI: {e}")
|
| 39 |
-
return "I'm sorry, but I'm unable to generate a response at this time."
|
| 40 |
-
|
| 41 |
-
except openai.error.RequestError as e:
|
| 42 |
print(f"Received error from OpenAI: {e}")
|
| 43 |
return "I'm sorry, but I'm unable to generate a response at this time."
|
| 44 |
|
|
@@ -52,7 +46,7 @@ except openai.error.RequestError as e:
|
|
| 52 |
interface = gr.Interface(fn=CustomChatGPT,
|
| 53 |
inputs="textbox",
|
| 54 |
outputs="textbox",
|
| 55 |
-
title="
|
| 56 |
-
description="
|
| 57 |
|
| 58 |
interface.launch()
|
|
|
|
| 14 |
return json.load(f)
|
| 15 |
except FileNotFoundError:
|
| 16 |
return []
|
| 17 |
+
|
| 18 |
messages = load_conversation()
|
| 19 |
|
| 20 |
if not messages:
|
| 21 |
+
messages.append({"role": "system", "content": "You are a knowledgeable assistant specialized in recruiting and hiring, and familiar with ADP Workforce Now Recruitment and various hiring and CRM tools."})
|
|
|
|
| 22 |
|
| 23 |
def CustomChatGPT(user_input):
|
| 24 |
messages.append({"role": "user", "content": user_input})
|
|
|
|
| 26 |
# Ensure the conversation fits within the model's maximum token limit
|
| 27 |
conversation = messages[-4096:]
|
| 28 |
|
| 29 |
+
try:
|
| 30 |
+
response = openai.ChatCompletion.create(
|
| 31 |
+
model="gpt-3.5-turbo",
|
| 32 |
+
messages=conversation,
|
| 33 |
+
max_tokens=1000,
|
| 34 |
+
temperature=0.7)
|
| 35 |
+
except openai.api_resources.request_error.RequestError as e:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
print(f"Received error from OpenAI: {e}")
|
| 37 |
return "I'm sorry, but I'm unable to generate a response at this time."
|
| 38 |
|
|
|
|
| 46 |
interface = gr.Interface(fn=CustomChatGPT,
|
| 47 |
inputs="textbox",
|
| 48 |
outputs="textbox",
|
| 49 |
+
title="VIP-GPT",
|
| 50 |
+
description="Chat with a specialized assistant that can answer questions about recruiting, hiring, and various HR and CRM tools. Developed by A. Leschik.")
|
| 51 |
|
| 52 |
interface.launch()
|