BMCVRN commited on
Commit
0c0cda3
·
verified ·
1 Parent(s): 7e9fed7

Fixed users sharing the same thread

Browse files
Files changed (1) hide show
  1. demo.py +68 -47
demo.py CHANGED
@@ -1,47 +1,68 @@
1
- from openai import OpenAI, AssistantEventHandler
2
- from dotenv import load_dotenv
3
- import os
4
- import gradio as gr
5
-
6
- # Load environment variables from .env file
7
- load_dotenv()
8
-
9
- # Load env variables
10
- OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
11
- ASSISTANT_ID = os.getenv("ASSISTANT_ID")
12
-
13
- client = OpenAI(api_key=OPENAI_API_KEY)
14
- history = []
15
-
16
- thread = client.beta.threads.create()
17
-
18
- def predict(user_message, history):
19
- # Append the new user message to the history
20
- history.append({"role": "user", "content": user_message})
21
-
22
- # Send the user message to the OpenAI API
23
- client.beta.threads.messages.create(
24
- thread_id=thread.id,
25
- role="user",
26
- content=user_message
27
- )
28
-
29
- response = []
30
- # Stream the assistant's response
31
- with client.beta.threads.runs.stream(
32
- thread_id=thread.id,
33
- assistant_id=ASSISTANT_ID
34
- ) as stream:
35
- for event in stream:
36
- if event.event == "thread.message.delta" and event.data.delta.content:
37
- assistant_message = event.data.delta.content[0].text
38
- response.append(assistant_message.value)
39
- yield {"role": "assistant", "content": ''.join(response)}
40
-
41
- # Append the assistant's response to the history
42
- history.append({"role": "assistant", "content": ''.join(response)})
43
-
44
- # Launch the Gradio chat interface
45
- gr.ChatInterface(predict, type="messages", title="Hair Library Shopping Assistant Demo").launch(share=True)
46
-
47
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from openai import OpenAI, AssistantEventHandler
2
+ from dotenv import load_dotenv
3
+ import os
4
+ import gradio as gr
5
+
6
+ # Load environment variables from .env file
7
+ load_dotenv()
8
+
9
+ # Load env variables
10
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
11
+ ASSISTANT_ID = os.getenv("ASSISTANT_ID")
12
+
13
+ client = OpenAI(api_key=OPENAI_API_KEY)
14
+
15
+ def create_thread():
16
+ return client.beta.threads.create()
17
+
18
+ def predict(user_message, history, thread):
19
+ # Append the new user message to the history
20
+ # history.append({"role": "user", "content": user_message})
21
+
22
+ # Send the user message to the OpenAI API
23
+ client.beta.threads.messages.create(
24
+ thread_id=thread.id,
25
+ role="user",
26
+ content=user_message
27
+ )
28
+
29
+ response = []
30
+ # Stream the assistant's response
31
+ with client.beta.threads.runs.stream(
32
+ thread_id=thread.id,
33
+ assistant_id=ASSISTANT_ID
34
+ ) as stream:
35
+ for event in stream:
36
+ if event.event == "thread.message.delta" and event.data.delta.content:
37
+ assistant_message = event.data.delta.content[0].text
38
+ response.append(assistant_message.value)
39
+ yield [{"role": "assistant", "content": ''.join(response)}], thread
40
+
41
+ # # Append the assistant's response to the history
42
+ history.append({"role": "assistant", "content": ''.join(response)})
43
+ yield history, thread
44
+
45
+ # Launch the Gradio chat interface
46
+ with gr.Blocks(title="Hair Library Shopping Assistant Demo") as demo:
47
+ chatbot = gr.Chatbot(type='messages', label="Hair Library Shopping Assistant Demo")
48
+ msg = gr.Textbox(placeholder="Type your message here...")
49
+ user_content = gr.State("")
50
+ thread = gr.State(create_thread)
51
+ history = gr.State([])
52
+ submit = gr.Button("Submit")
53
+
54
+ def user_input(user_message, history):
55
+ user_content = user_message
56
+ return "", history + [{"role": "user", "content": user_message}], user_content
57
+
58
+ msg.submit(user_input, [msg, chatbot], [msg, chatbot, user_content], queue=False).then(
59
+ predict, [user_content, chatbot, thread], [chatbot, thread]
60
+ )
61
+ submit.click(user_input, [msg, chatbot], [msg, chatbot, user_content], queue=False).then(
62
+ predict, [user_content, chatbot, thread], [chatbot, thread]
63
+ )
64
+
65
+ # msg.submit(predict, [msg, chatbot, thread], [chatbot, thread], queue=False)
66
+ # clear.click(lambda: None, None, chatbot, queue=False)
67
+
68
+ demo.launch(share=False)