genaitiwari commited on
Commit
7734f80
·
1 Parent(s): 4ba7a4d
.vscode/launch.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "0.2.0",
3
+ "configurations": [
4
+ {
5
+ "name": "debug streamlit",
6
+ "type": "debugpy",
7
+ "request": "launch",
8
+ "program": "./.venv/Lib/site-packages/streamlit", // /home/xx/tmp/venv/bin/streamlit",
9
+ "args": [
10
+ "run",
11
+ "app.py"
12
+ ],
13
+ "justMyCode": false
14
+ }
15
+ ]
16
+ }
README.md CHANGED
@@ -13,3 +13,8 @@ license: apache-2.0
13
  Autogen Multiagent
14
 
15
 
 
 
 
 
 
 
13
  Autogen Multiagent
14
 
15
 
16
+ ## Code execution
17
+
18
+ ![alt text](image.png)
19
+
20
+
app.py CHANGED
@@ -3,6 +3,7 @@ import streamlit as st
3
  from configfile import Config
4
  from src.streamlitui.loadui import LoadStreamlitUI
5
  from src.usecases.multiagentschat import MultiAgentChat
 
6
  from src.usecases.withllamaIndex import WithLlamaIndexMultiAgentChat
7
  from src.usecases.agentchatsqlspider import AgentChatSqlSpider
8
  from src.LLMS.groqllm import GroqLLM
@@ -28,7 +29,12 @@ if __name__ == "__main__":
28
 
29
  if problem:
30
  # start multichat
31
- if user_input['selected_usecase'] == "MultiAgent Chat":
 
 
 
 
 
32
  obj_usecases_multichat = MultiAgentChat(assistant_name='Assistant', user_proxy_name='Userproxy', llm_config=llm_config,
33
  problem=problem)
34
  obj_usecases_multichat.run()
 
3
  from configfile import Config
4
  from src.streamlitui.loadui import LoadStreamlitUI
5
  from src.usecases.multiagentschat import MultiAgentChat
6
+ from src.usecases.multiagentcodeexecution import MultiAgentCodeExecution
7
  from src.usecases.withllamaIndex import WithLlamaIndexMultiAgentChat
8
  from src.usecases.agentchatsqlspider import AgentChatSqlSpider
9
  from src.LLMS.groqllm import GroqLLM
 
29
 
30
  if problem:
31
  # start multichat
32
+ if user_input['selected_usecase'] == "MultiAgent Code Execution":
33
+ obj_usecases_multichat = MultiAgentCodeExecution(assistant_name=['Assistant',"Product_Manager"], user_proxy_name='Userproxy', llm_config=llm_config,
34
+ problem=problem)
35
+ obj_usecases_multichat.run()
36
+
37
+ elif user_input['selected_usecase'] == "MultiAgent Chat":
38
  obj_usecases_multichat = MultiAgentChat(assistant_name='Assistant', user_proxy_name='Userproxy', llm_config=llm_config,
39
  problem=problem)
40
  obj_usecases_multichat.run()
codegen/tmp_code_3e1806a0bf22b99c6c5d2b77650fe9a8.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tkinter as tk
2
+
3
+ class TodoApp:
4
+ def __init__(self, root):
5
+ self.root = root
6
+ self.tasks = []
7
+
8
+ # Create a text input field for task description
9
+ self.task_entry = tk.Entry(self.root, width=50)
10
+ self.task_entry.grid(row=0, column=0, padx=(20, 0), pady=(20, 0))
11
+
12
+ # Implement a button to add the task to the list
13
+ self.add_button = tk.Button(self.root, text="Add Task", command=self.add_task)
14
+ self.add_button.grid(row=0, column=1, padx=(10, 20), pady=(20, 0))
15
+
16
+ # Display the added task in a list
17
+ self.task_listbox = tk.Listbox(self.root, width=50, height=10)
18
+ self.task_listbox.grid(row=1, column=0, columnspan=2, padx=(20, 0), pady=(10, 20))
19
+
20
+ def add_task(self):
21
+ # Get the task description from the text input field
22
+ task = self.task_entry.get()
23
+
24
+ # Store the new task in a list
25
+ self.tasks.append(task)
26
+
27
+ # Clear the text input field
28
+ self.task_entry.delete(0, tk.END)
29
+
30
+ # Display the added task in the list
31
+ self.task_listbox.insert(tk.END, task)
32
+
33
+ root = tk.Tk()
34
+ app = TodoApp(root)
35
+ root.mainloop()
36
+ TERMINATE
configfile.ini CHANGED
@@ -1,6 +1,6 @@
1
  [DEFAULT]
2
  PAGE_TITLE = AUTOGEN IN ACTION
3
  LLM_OPTIONS = Groq, Huggingface
4
- USECASE_OPTIONS = MultiAgent Chat, RAG Chat, With LLamaIndex Tool, AgentChat Sql Spider
5
  GROQ_MODEL_OPTIONS = mixtral-8x7b-32768, llama3-8b-8192, llama3-70b-8192, gemma-7b-i
6
 
 
1
  [DEFAULT]
2
  PAGE_TITLE = AUTOGEN IN ACTION
3
  LLM_OPTIONS = Groq, Huggingface
4
+ USECASE_OPTIONS = MultiAgent Code Execution, MultiAgent Chat, RAG Chat, With LLamaIndex Tool, AgentChat Sql Spider
5
  GROQ_MODEL_OPTIONS = mixtral-8x7b-32768, llama3-8b-8192, llama3-70b-8192, gemma-7b-i
6
 
image.png ADDED
src/usecases/multiagentcodeexecution.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ from src.agents.assistantagent import TrackableAssistantAgent
3
+ from src.agents.userproxyagent import TrackableUserProxyAgent
4
+ import streamlit as st
5
+ import autogen
6
+
7
+
8
+ class MultiAgentCodeExecution:
9
+ def __init__(self, assistant_name, user_proxy_name, llm_config, problem):
10
+ self.coder = TrackableAssistantAgent(name=assistant_name[0],
11
+ system_message="""you are helpful assistant and efficient in writing code.""",
12
+ human_input_mode="NEVER",
13
+ llm_config=llm_config,
14
+ )
15
+
16
+ self.pm = TrackableAssistantAgent(name=assistant_name[1],
17
+ system_message="""You are efficient in Creative in software product ideas. Reply "TERMINATE" in
18
+ the end when everything is done """,
19
+ human_input_mode="NEVER",
20
+ llm_config=llm_config,
21
+ )
22
+
23
+ self.user_proxy = TrackableUserProxyAgent(name=user_proxy_name,
24
+ system_message="You are human Admin",
25
+ human_input_mode="NEVER",
26
+ llm_config=llm_config,
27
+ code_execution_config={"last_n_messages":2,"work_dir" : "./codegen","use_docker":False},
28
+ is_termination_msg=lambda x: x.get("content", "").strip().endswith(
29
+ "TERMINATE"))
30
+
31
+ self.groupchat = autogen.GroupChat(agents=[self.user_proxy,self.coder,self.pm],messages=[],max_round=12)
32
+ self.manager = autogen.GroupChatManager(groupchat=self.groupchat,llm_config=llm_config)
33
+ self.problem = problem
34
+ self.loop = asyncio.new_event_loop()
35
+ asyncio.set_event_loop(self.loop)
36
+
37
+ async def initiate_chat(self):
38
+ await self.user_proxy.a_initiate_chat(self.manager, message=self.problem, clear_history=st.session_state["chat_with_history"])
39
+
40
+ def run(self):
41
+ self.loop.run_until_complete(self.initiate_chat())