whymath commited on
Commit
33018a6
·
1 Parent(s): 3c5a8b3

Switch to using LangChain Runnable for the OpenAI client

Browse files
Files changed (1) hide show
  1. app.py +38 -28
app.py CHANGED
@@ -5,6 +5,7 @@ import utils
5
 
6
  from openai import AsyncOpenAI
7
  import time
 
8
 
9
 
10
  load_dotenv()
@@ -14,9 +15,15 @@ start_msg = "Teach2Learn Virtual Student by Jerry Chiang and Yohan Mathew\n\nYou
14
  # instructions = "You are a helpful assistant"
15
  instructions = "You are a virtual student being taught by the user. You can ask clarifying questions to better understand the user's explanation. Your goal is to ensure that the user understands the concept they are explaining. You can also ask questions to help the user elaborate on their explanation. You can ask questions like 'Can you explain that in simpler terms?' or 'Can you provide an example?'."
16
  client = AsyncOpenAI()
17
- assistant = client.beta.assistants.create(
 
 
 
 
 
18
  name="T2L Virtual Student",
19
  instructions=instructions,
 
20
  model="gpt-3.5-turbo",
21
  )
22
  print("assistant =", assistant)
@@ -49,7 +56,7 @@ async def start_chat():
49
 
50
  # Send a welcome message with an action button
51
  actions = [
52
- cl.Action(name="upload_pdf", value="upload_pdf_value", label="Upload a PDF")
53
  ]
54
  await cl.Message(content=start_msg, actions=actions).send()
55
 
@@ -76,36 +83,39 @@ async def main(message: cl.Message):
76
  print("Using RAQA chain to generate response")
77
  query_response = raqa_chain.invoke({"question" : user_query})
78
  query_answer = query_response["response"].content
79
- print('query_answer =', query_answer)
80
  else:
81
  print("Using OpenAI assistant to generate response")
82
- message = client.beta.threads.messages.create(
83
- thread_id=thread.id,
84
- role="user",
85
- content=user_query
86
- )
87
- print("message =", message)
88
- run = client.beta.threads.runs.create(
89
- thread_id=thread.id,
90
- assistant_id=assistant.id,
91
- instructions=instructions
92
- )
93
- print("run =", run)
94
- while run.status == "in_progress" or run.status == "queued":
95
- time.sleep(1)
96
- run = client.beta.threads.runs.retrieve(
97
- thread_id=thread.id,
98
- run_id=run.id
99
- )
100
- print("run.status =", run.status)
101
- messages = client.beta.threads.messages.list(
102
- thread_id=thread.id
103
- )
104
- # print("messages =", messages)
105
- print("messages.data =", messages.data)
106
- query_answer = messages.data[0].content
 
 
 
107
 
108
  # Create and send the message stream
 
109
  msg = cl.Message(content=query_answer)
110
  await msg.send()
111
 
 
5
 
6
  from openai import AsyncOpenAI
7
  import time
8
+ from langchain.agents.openai_assistant import OpenAIAssistantRunnable
9
 
10
 
11
  load_dotenv()
 
15
  # instructions = "You are a helpful assistant"
16
  instructions = "You are a virtual student being taught by the user. You can ask clarifying questions to better understand the user's explanation. Your goal is to ensure that the user understands the concept they are explaining. You can also ask questions to help the user elaborate on their explanation. You can ask questions like 'Can you explain that in simpler terms?' or 'Can you provide an example?'."
17
  client = AsyncOpenAI()
18
+ # assistant = client.beta.assistants.create(
19
+ # name="T2L Virtual Student",
20
+ # instructions=instructions,
21
+ # model="gpt-3.5-turbo",
22
+ # )
23
+ assistant = OpenAIAssistantRunnable.create_assistant(
24
  name="T2L Virtual Student",
25
  instructions=instructions,
26
+ # tools=[{"type": "code_interpreter"}],
27
  model="gpt-3.5-turbo",
28
  )
29
  print("assistant =", assistant)
 
56
 
57
  # Send a welcome message with an action button
58
  actions = [
59
+ cl.Action(name="upload_pdf", value="upload_pdf_value", label="Upload a PDF", description="Upload a PDF")
60
  ]
61
  await cl.Message(content=start_msg, actions=actions).send()
62
 
 
83
  print("Using RAQA chain to generate response")
84
  query_response = raqa_chain.invoke({"question" : user_query})
85
  query_answer = query_response["response"].content
 
86
  else:
87
  print("Using OpenAI assistant to generate response")
88
+ # message = client.beta.threads.messages.create(
89
+ # thread_id=thread.id,
90
+ # role="user",
91
+ # content=user_query
92
+ # )
93
+ # print("message =", message)
94
+ # run = client.beta.threads.runs.create(
95
+ # thread_id=thread.id,
96
+ # assistant_id=assistant.id,
97
+ # instructions=instructions
98
+ # )
99
+ # print("run =", run)
100
+ # while run.status == "in_progress" or run.status == "queued":
101
+ # time.sleep(1)
102
+ # run = client.beta.threads.runs.retrieve(
103
+ # thread_id=thread.id,
104
+ # run_id=run.id
105
+ # )
106
+ # print("run.status =", run.status)
107
+ # messages = client.beta.threads.messages.list(
108
+ # thread_id=thread.id
109
+ # )
110
+ # # print("messages =", messages)
111
+ # print("messages.data =", messages.data)
112
+ # query_answer = messages.data[0].content
113
+
114
+ query_response = assistant.invoke({"content": user_query})
115
+ query_answer = query_response["response"].content
116
 
117
  # Create and send the message stream
118
+ print('query_answer =', query_answer)
119
  msg = cl.Message(content=query_answer)
120
  await msg.send()
121