jess commited on
Commit
04d9d9e
·
1 Parent(s): e220267

add: sample agentic chat experiment

Browse files
Files changed (1) hide show
  1. sample_chat.py +87 -115
sample_chat.py CHANGED
@@ -1,118 +1,90 @@
1
  import gradio as gr
2
- from dataclasses import asdict
3
- from transformers import Tool, ReactCodeAgent # type: ignore
4
- from transformers.agents import stream_to_gradio, HfApiEngine # type: ignore
5
-
6
- from gradio_tools import GradioTool # assuming you have gradio_tool installed
7
-
8
- class GenerateQuestionsTool(GradioTool):
9
- """
10
- A tool to generate general questions for deployment/integration gaps.
11
- The tool calls the execute_prompt method using the "generate_general_questions" prompt.
12
- Input: (optional) project detail as a string.
13
- Output: Generated questions as a string.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  """
15
- def __init__(
16
- self,
17
- name="GenerateQuestions",
18
- description=(
19
- "A tool that generates general deployment/integration questions "
20
- "by executing a prompt with project details. "
21
- "Input: a string with project detail (optional). Output: a string with generated questions."
22
- ),
23
- src="", # Replace with your actual Gradio space id or URL if needed
24
- ):
25
- super().__init__(name, description, src)
26
- # Optionally, you could initialize any state or dependencies here
27
-
28
- def create_job(self, query: str):
29
- """
30
- This method interprets the input query.
31
- In our case, if a query is provided, we use it as project_detail;
32
- otherwise, we rely on the internal method get_project_detail().
33
- """
34
- # Assuming your tool's class (or the project instance) has these methods.
35
- project_detail = query if query.strip() else self.get_project_detail()
36
- try:
37
- # Execute the prompt with provided project detail.
38
- result = self.execute_prompt("generate_general_questions", {"project_detail": project_detail})
39
- except Exception as e:
40
- result = f"Error during prompt execution: {str(e)}"
41
- return result
42
-
43
- def postprocess(self, output) -> str:
44
- """
45
- Process the output from the job to a string that can be returned to the LLM.
46
- """
47
- return str(output)
48
-
49
- def _block_input(self, gr):
50
- """
51
- Define the Gradio input component.
52
- Here, we use a textbox where the user can optionally provide project details.
53
- """
54
- return gr.Textbox(label="Project Detail (optional)", placeholder="Enter project detail or leave empty to use default")
55
-
56
- def _block_output(self, gr):
57
- """
58
- Define the Gradio output component.
59
- We return the generated questions in a textbox.
60
- """
61
- return gr.Textbox(label="Generated Questions")
62
-
63
-
64
- from langchain.agents import initialize_agent
65
- from langchain.llms import OpenAI
66
- from dotenv import load_dotenv
67
-
68
- load_dotenv()
69
-
70
- llm = OpenAI(temperature=0)
71
- tools = [GenerateQuestionsTool().langchain] # assuming your tool is properly integrated
72
-
73
-
74
- agent = initialize_agent(tools, llm, agent="conversational agent", verbose=True)
75
- output = agent.run(input="Please generate integration questions for my project")
76
- print(output)
77
-
78
- # # Import tool from Hub
79
- # image_generation_tool = Tool.from_space( # type: ignore
80
- # space_id="black-forest-labs/FLUX.1-schnell",
81
- # name="image_generator",
82
- # description="Generates an image following your prompt. Returns a PIL Image.",
83
- # api_name="/infer",
84
- # )
85
-
86
- # llm_engine = HfApiEngine("Qwen/Qwen2.5-Coder-32B-Instruct")
87
- # # Initialize the agent with both tools and engine
88
- # agent = ReactCodeAgent(tools=[image_generation_tool], llm_engine=llm_engine)
89
-
90
-
91
- # def interact_with_agent(prompt, history):
92
- # messages = []
93
- # yield messages
94
- # for msg in stream_to_gradio(agent, prompt):
95
- # messages.append(asdict(msg)) # type: ignore
96
- # yield messages
97
- # yield messages
98
-
99
-
100
- # demo = gr.ChatInterface(
101
- # interact_with_agent,
102
- # chatbot= gr.Chatbot(
103
- # label="Agent",
104
- # type="messages",
105
- # avatar_images=(
106
- # None,
107
- # "https://em-content.zobj.net/source/twitter/53/robot-face_1f916.png",
108
- # ),
109
- # ),
110
- # examples=[
111
- # ["Generate an image of an astronaut riding an alligator"],
112
- # ["I am writing a children's book for my daughter. Can you help me with some illustrations?"],
113
- # ],
114
- # type="messages",
115
- # )
116
 
117
- # if __name__ == "__main__":
118
- # demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from gradio import ChatMessage
3
+ import time
4
+
5
+ sleep_time = 0.2
6
+
7
+ # ... existing code ...
8
+
9
+ def get_client_information_questions():
10
+ """Return client information gathering questions."""
11
+ return """
12
+ # Client Information Gathering Questions
13
+
14
+ ### Company Background and Industry
15
+ 1. Can you provide some background about your company?
16
+ 2. Which industry do you operate in, and what is your company's niche or specialization?
17
+ 3. Who are your primary customers?
18
+ 4. What are the main objectives you want to achieve?
19
+ 5. What key features or functionalities do you need?
20
+
21
+ ### Current Challenges
22
+ 6. What are the biggest challenges your firm is currently facing?
23
+ 7. Can you describe your current processes?
24
+
25
+ ### Workflow and System Impact
26
+ 8. How will this solution benefit your firm as a whole?
27
+
28
+ ### Existing Workflow or System
29
+ 9. Can you describe your current workflow or system?
30
+
31
+ ### Pain Point Identification
32
+ 10. Where is your current system falling short or causing delays?
33
+ 11. Are there any parts of the process that are particularly time-consuming/ prone to error?
34
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
+ def simulate_thinking_chat(message, history):
37
+ start_time = time.time()
38
+ response = ChatMessage(
39
+ content="",
40
+ metadata={"title": "_Thinking_ step-by-step", "id": 0, "status": "pending"}
41
+ )
42
+ yield response
43
+
44
+ thoughts = [
45
+ "First, I need to understand the core aspects of the query...",
46
+ "Now, considering the broader context and implications...",
47
+ "Analyzing potential approaches to formulate a comprehensive answer...",
48
+ "Finally, structuring the response for clarity and completeness..."
49
+ ]
50
+
51
+ accumulated_thoughts = ""
52
+ for thought in thoughts:
53
+ time.sleep(sleep_time)
54
+ accumulated_thoughts += f"- {thought}\n\n"
55
+ response.content = accumulated_thoughts.strip()
56
+ yield response
57
+
58
+ response.metadata["status"] = "done"
59
+ response.metadata["duration"] = time.time() - start_time
60
+ yield response
61
+
62
+ # Prepare the final response list
63
+ response_list = [
64
+ response,
65
+ ChatMessage(
66
+ content=get_client_information_questions()
67
+ )
68
+ ]
69
+ print(f"Message: {message},\n Len: {len(history)}, \nHistory: {history}")
70
+
71
+ # Print the response list to the console
72
+ # print(response_list)
73
+
74
+ yield response_list
75
+
76
+ chatbot = gr.Chatbot(height=650 ,elem_classes=["chatbot-container"])
77
+
78
+
79
+ with gr.Blocks(fill_height=True) as demo:
80
+ gr.ChatInterface(
81
+ simulate_thinking_chat,
82
+ title="Thinking LLM Chat Interface 🤔",
83
+ type="messages",
84
+ fill_height=True,
85
+ chatbot= chatbot,
86
+ # show_progress= 'minimal',
87
+ # save_history= True
88
+ )
89
+
90
+ demo.launch()