jess commited on
Commit
0316613
·
1 Parent(s): 2fc09e8

add: hacky agentic chatbot

Browse files
Files changed (2) hide show
  1. agent_chat_example.py +3 -3
  2. sample_chat.py +51 -27
agent_chat_example.py CHANGED
@@ -65,13 +65,13 @@ image_generation_tool = Tool.from_space( # type: ignore
65
  api_name="/infer",
66
  )
67
 
68
- testing_tool = GenerateQuestionsTool()
69
 
70
- question_generator = Tool.from_gradio(testing_tool)
71
 
72
  llm_engine = HfApiEngine("Qwen/Qwen2.5-Coder-32B-Instruct")
73
  # Initialize the agent with both tools and engine
74
- agent = ReactCodeAgent(tools=[question_generator], llm_engine=llm_engine)
75
 
76
 
77
  def interact_with_agent(prompt, history):
 
65
  api_name="/infer",
66
  )
67
 
68
+ # testing_tool = GenerateQuestionsTool()
69
 
70
+ # question_generator = Tool.from_gradio(testing_tool)
71
 
72
  llm_engine = HfApiEngine("Qwen/Qwen2.5-Coder-32B-Instruct")
73
  # Initialize the agent with both tools and engine
74
+ agent = ReactCodeAgent(tools=[image_generation_tool], llm_engine=llm_engine)
75
 
76
 
77
  def interact_with_agent(prompt, history):
sample_chat.py CHANGED
@@ -1,12 +1,19 @@
1
  import gradio as gr
2
  from gradio import ChatMessage
3
  import time
 
 
 
 
4
 
5
- sleep_time = 0.2
 
 
6
 
7
  # ... existing code ...
8
 
9
- def get_client_information_questions():
 
10
  """Return client information gathering questions."""
11
  return """
12
  # Client Information Gathering Questions
@@ -33,13 +40,16 @@ def get_client_information_questions():
33
  11. Are there any parts of the process that are particularly time-consuming/ prone to error?
34
  """
35
 
36
- def simulate_thinking_chat(message, history):
 
 
37
  start_time = time.time()
38
  response = ChatMessage(
39
  content="",
40
  metadata={"title": "_Thinking_ step-by-step", "id": 0, "status": "pending"}
41
  )
42
- yield response
 
43
 
44
  thoughts = [
45
  "First, I need to understand the core aspects of the query...",
@@ -48,43 +58,57 @@ def simulate_thinking_chat(message, history):
48
  "Finally, structuring the response for clarity and completeness..."
49
  ]
50
 
 
 
 
 
51
  accumulated_thoughts = ""
52
- for thought in thoughts:
53
- time.sleep(sleep_time)
 
 
 
 
54
  accumulated_thoughts += f"- {thought}\n\n"
55
  response.content = accumulated_thoughts.strip()
56
- yield response
 
 
 
 
57
 
58
  response.metadata["status"] = "done"
59
  response.metadata["duration"] = time.time() - start_time
60
- yield response
 
61
 
62
- # Prepare the final response list
63
  response_list = [
64
  response,
65
- ChatMessage(
66
- content=get_client_information_questions()
67
- )
68
  ]
69
  print(f"Message: {message},\n Len: {len(history)}, \nHistory: {history}")
70
 
71
- # Print the response list to the console
72
- # print(response_list)
73
-
74
- yield response_list
75
-
76
- chatbot = gr.Chatbot(height=650 ,elem_classes=["chatbot-container"])
77
 
 
78
 
79
  with gr.Blocks(fill_height=True) as demo:
80
- gr.ChatInterface(
81
- simulate_thinking_chat,
82
- title="Thinking LLM Chat Interface 🤔",
83
- type="messages",
84
- fill_height=True,
85
- chatbot= chatbot,
86
- # show_progress= 'minimal',
87
- # save_history= True
88
- )
 
 
 
 
 
 
 
89
 
90
  demo.launch()
 
1
  import gradio as gr
2
  from gradio import ChatMessage
3
  import time
4
+ import asyncio
5
+ from functools import partial
6
+ import random
7
+ import logging
8
 
9
+ logging.basicConfig(level=logging.INFO)
10
+
11
+ sleep_time = random.randint(1, 3)
12
 
13
  # ... existing code ...
14
 
15
+ async def get_client_information_questions():
16
+ await asyncio.sleep(10) # Convert to async sleep
17
  """Return client information gathering questions."""
18
  return """
19
  # Client Information Gathering Questions
 
40
  11. Are there any parts of the process that are particularly time-consuming/ prone to error?
41
  """
42
 
43
+ async def simulate_thinking_chat(message, history):
44
+ logging.info(f"Received message: {message}")
45
+ logging.info(f"Initial history: {history}")
46
  start_time = time.time()
47
  response = ChatMessage(
48
  content="",
49
  metadata={"title": "_Thinking_ step-by-step", "id": 0, "status": "pending"}
50
  )
51
+ yield response, ""
52
+ # yield response
53
 
54
  thoughts = [
55
  "First, I need to understand the core aspects of the query...",
 
58
  "Finally, structuring the response for clarity and completeness..."
59
  ]
60
 
61
+ # Create a task for getting client information
62
+ client_info_task = asyncio.create_task(get_client_information_questions())
63
+
64
+ # Keep showing thoughts until client_info_task is done
65
  accumulated_thoughts = ""
66
+ thought_index = 0
67
+ while not client_info_task.done():
68
+ thought = thoughts[thought_index % len(thoughts)]
69
+ thought_index += 1
70
+
71
+ await asyncio.sleep(sleep_time)
72
  accumulated_thoughts += f"- {thought}\n\n"
73
  response.content = accumulated_thoughts.strip()
74
+ yield response, ""
75
+ # yield response
76
+
77
+ # Get the result from the completed task
78
+ client_info = await client_info_task
79
 
80
  response.metadata["status"] = "done"
81
  response.metadata["duration"] = time.time() - start_time
82
+ yield response, ""
83
+ # yield response
84
 
 
85
  response_list = [
86
  response,
87
+ ChatMessage(content=client_info)
 
 
88
  ]
89
  print(f"Message: {message},\n Len: {len(history)}, \nHistory: {history}")
90
 
91
+ # yield response_list
92
+ yield response_list, client_info
 
 
 
 
93
 
94
+ chatbot = gr.Chatbot(height=650 ,elem_classes=["chatbot-container"], label="Project Questions")
95
 
96
  with gr.Blocks(fill_height=True) as demo:
97
+ with gr.Row():
98
+ with gr.Column(scale=1):
99
+ # output = gr.Textbox(label="Output")
100
+ current_question = gr.Textbox(label="Edit Area", lines=30)
101
+ # submit_btn = gr.Button("Submit")
102
+ # clear_btn = gr.Button("Clear Chat")
103
+ with gr.Column(scale=1):
104
+ gr.ChatInterface(
105
+ simulate_thinking_chat,
106
+ chatbot= chatbot,
107
+ type="messages",
108
+ fill_height=True,
109
+ additional_outputs= [current_question]
110
+ # show_progress= 'minimal',
111
+ # save_history= True
112
+ )
113
 
114
  demo.launch()