genaitiwari commited on
Commit
3328745
Β·
1 Parent(s): 67b7750

refinements of usecases amd readme file updated

Browse files
README.md CHANGED
@@ -51,16 +51,27 @@ Requirements
51
  ### LLM_OPTIONS
52
  Groq
53
  ### USECASE_OPTIONS
54
- #### MultiAgent Code Execution
55
- ![alt text](image.png)
56
- ![alt text](image-3.png)
57
 
58
  #### MultiAgent Chat
 
 
 
 
 
 
 
59
 
60
  #### RAG Chat
 
 
 
61
 
62
  #### With LLamaIndex Tool
63
  #### AgentChat Sql Spider
 
64
  ### GROQ_MODEL_OPTIONS
65
  mixtral-8x7b-32768
66
  llama3-8b-8192
 
51
  ### LLM_OPTIONS
52
  Groq
53
  ### USECASE_OPTIONS
54
+
55
+ #### Basic Example
56
+ ![alt text](basic_example.png)
57
 
58
  #### MultiAgent Chat
59
+ prompt : As a user , create a asp.net form with razor view page for health insaurance feedback page
60
+ ![alt text](multiagent_chat.png)
61
+
62
+ #### MultiAgent Code Execution
63
+
64
+ ![alt text](multiagent_code_execution.png)
65
+ ![alt text](image-3.png)
66
 
67
  #### RAG Chat
68
+ prompt : Explain
69
+ docs or filename path : https://github.com/microsoft/autogen/blob/main/python/samples/agentchat_chainlit/README.md
70
+ ![alt text](rag_chat.png)
71
 
72
  #### With LLamaIndex Tool
73
  #### AgentChat Sql Spider
74
+
75
  ### GROQ_MODEL_OPTIONS
76
  mixtral-8x7b-32768
77
  llama3-8b-8192
app.py CHANGED
@@ -8,6 +8,7 @@ from src.usecases.withllamaIndex import WithLlamaIndexMultiAgentChat
8
  from src.usecases.agentchatsqlspider import AgentChatSqlSpider
9
  from src.LLMS.groqllm import GroqLLM
10
  from src.usecases.multiagentragchat import MultiAgentRAGChat
 
11
 
12
 
13
  # MAIN Function START
@@ -58,5 +59,12 @@ if __name__ == "__main__":
58
  problem=problem)
59
 
60
  obj_sql_spider.run()
 
 
 
 
 
 
 
61
 
62
 
 
8
  from src.usecases.agentchatsqlspider import AgentChatSqlSpider
9
  from src.LLMS.groqllm import GroqLLM
10
  from src.usecases.multiagentragchat import MultiAgentRAGChat
11
+ from src.usecases.basicexample import BasicExample
12
 
13
 
14
  # MAIN Function START
 
59
  problem=problem)
60
 
61
  obj_sql_spider.run()
62
+
63
+ elif user_input['selected_usecase'] == "Basic Example":
64
+ obj_basic_example = BasicExample(assistant_name="Assistant", user_proxy_name='Userproxy',
65
+ llm_config=llm_config,
66
+ problem=problem)
67
+ obj_basic_example.run()
68
+
69
 
70
 
basic_example.png ADDED
configfile.ini CHANGED
@@ -1,6 +1,6 @@
1
  [DEFAULT]
2
  PAGE_TITLE = AUTOGEN IN ACTION
3
  LLM_OPTIONS = Groq, Huggingface
4
- USECASE_OPTIONS = MultiAgent Code Execution, MultiAgent Chat, RAG Chat, With LLamaIndex Tool, AgentChat Sql Spider
5
  GROQ_MODEL_OPTIONS = mixtral-8x7b-32768, llama3-8b-8192, llama3-70b-8192, gemma-7b-i
6
 
 
1
  [DEFAULT]
2
  PAGE_TITLE = AUTOGEN IN ACTION
3
  LLM_OPTIONS = Groq, Huggingface
4
+ USECASE_OPTIONS = Basic Example, MultiAgent Chat, MultiAgent Code Execution, RAG Chat, With LLamaIndex Tool, AgentChat Sql Spider
5
  GROQ_MODEL_OPTIONS = mixtral-8x7b-32768, llama3-8b-8192, llama3-70b-8192, gemma-7b-i
6
 
multiagent_chat.png ADDED
image.png β†’ multiagent_code_execution.png RENAMED
File without changes
rag_chat.png ADDED
src/agents/assistantagent.py CHANGED
@@ -5,8 +5,11 @@ from autogen.agentchat.contrib.llamaindex_conversable_agent import LLamaIndexCon
5
 
6
  class TrackableAssistantAgent(AssistantAgent):
7
  def _process_received_message(self, message, sender, silent):
8
- with st.chat_message(sender.name):
9
- st.write(message)
 
 
 
10
  return super()._process_received_message(message, sender, silent)
11
 
12
 
 
5
 
6
  class TrackableAssistantAgent(AssistantAgent):
7
  def _process_received_message(self, message, sender, silent):
8
+ if message and type(message)== str and sender.name =="Userproxy":
9
+ with st.chat_message("user"):
10
+ st.write(message)
11
+
12
+
13
  return super()._process_received_message(message, sender, silent)
14
 
15
 
src/agents/qdrantretrieveuserproxyagent.py CHANGED
@@ -5,6 +5,6 @@ from autogen.agentchat.contrib.qdrant_retrieve_user_proxy_agent import QdrantRet
5
 
6
  class TrackableQdrantRetrieveUserProxyAgent(QdrantRetrieveUserProxyAgent):
7
  def _process_received_message(self, message, sender, silent):
8
- with st.chat_message("user"):
9
- st.write(message)
10
  return super()._process_received_message(message, sender, silent)
 
5
 
6
  class TrackableQdrantRetrieveUserProxyAgent(QdrantRetrieveUserProxyAgent):
7
  def _process_received_message(self, message, sender, silent):
8
+ with st.chat_message("ai"):
9
+ st.write(message["content"])
10
  return super()._process_received_message(message, sender, silent)
src/agents/retrieveassistantagent.py CHANGED
@@ -7,6 +7,7 @@ from autogen.agentchat.contrib.qdrant_retrieve_user_proxy_agent import QdrantRet
7
 
8
  class TrackableRetrieveAssistantAgent(RetrieveAssistantAgent):
9
  def _process_received_message(self, message, sender, silent):
10
- with st.chat_message(sender.name):
11
- st.write(message)
 
12
  return super()._process_received_message(message, sender, silent)
 
7
 
8
  class TrackableRetrieveAssistantAgent(RetrieveAssistantAgent):
9
  def _process_received_message(self, message, sender, silent):
10
+ if type(message)== str and sender.name =="Userproxy":
11
+ with st.chat_message("user"):
12
+ st.write(message)
13
  return super()._process_received_message(message, sender, silent)
src/agents/userproxyagent.py CHANGED
@@ -4,6 +4,6 @@ import streamlit as st
4
 
5
  class TrackableUserProxyAgent(UserProxyAgent):
6
  def _process_received_message(self, message, sender, silent):
7
- with st.chat_message("user"):
8
- st.write(message)
9
  return super()._process_received_message(message, sender, silent)
 
4
 
5
  class TrackableUserProxyAgent(UserProxyAgent):
6
  def _process_received_message(self, message, sender, silent):
7
+ with st.chat_message(sender.name.lower()):
8
+ st.write(message['content'])
9
  return super()._process_received_message(message, sender, silent)
src/usecases/basicexample.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ from src.agents.assistantagent import TrackableAssistantAgent
3
+ from src.agents.userproxyagent import TrackableUserProxyAgent
4
+ import streamlit as st
5
+
6
+
7
+ class BasicExample:
8
+ def __init__(self, assistant_name, user_proxy_name, llm_config, problem):
9
+ self.assistant = TrackableAssistantAgent(name=assistant_name,
10
+ system_message="""you are helpful assistant. Reply "TERMINATE" in
11
+ the end when everything is done """,
12
+ human_input_mode="NEVER",
13
+ llm_config=llm_config,
14
+ )
15
+ self.user_proxy = TrackableUserProxyAgent(name=user_proxy_name,
16
+ system_message="You are Admin",
17
+ human_input_mode="NEVER",
18
+ llm_config=llm_config,
19
+ code_execution_config=False,
20
+ is_termination_msg=lambda x: x.get("content", "").strip().endswith(
21
+ "TERMINATE"))
22
+ self.problem = problem
23
+ self.loop = asyncio.new_event_loop()
24
+ asyncio.set_event_loop(self.loop)
25
+
26
+ async def initiate_chat(self):
27
+ await self.user_proxy.a_initiate_chat(self.assistant, max_turns=2, message=self.problem, clear_history=st.session_state["chat_with_history"])
28
+
29
+ def run(self):
30
+ self.loop.run_until_complete(self.initiate_chat())
src/usecases/multiagentragchat.py CHANGED
@@ -20,7 +20,8 @@ class MultiAgentRAGChat:
20
  max_consecutive_auto_reply=4,
21
  retrieve_config={
22
  "task": "code",
23
- "docs_path": self.list_files(st.session_state["docs_path"]),
 
24
  "chunk_token_size": 500,
25
  "model": llm_config["config_list"][0]["model"],
26
  "client": QdrantClient(":memory:"),
@@ -44,10 +45,18 @@ class MultiAgentRAGChat:
44
  # Ensure the directory path ends with a slash
45
  if not directory.endswith('/'):
46
  directory += '/'
47
-
48
- # Use glob to get the list of files
49
- files = glob.glob(os.path.join(directory, '*'))
50
- file_list = [path.replace('\\', '/') for path in files]
 
 
 
 
 
 
 
 
51
 
52
  return file_list
53
 
 
20
  max_consecutive_auto_reply=4,
21
  retrieve_config={
22
  "task": "code",
23
+ #"docs_path": self.list_files(st.session_state["docs_path"]), This is for loading custion files from directory - logic implemented
24
+ "docs_path": st.session_state["docs_path"],
25
  "chunk_token_size": 500,
26
  "model": llm_config["config_list"][0]["model"],
27
  "client": QdrantClient(":memory:"),
 
45
  # Ensure the directory path ends with a slash
46
  if not directory.endswith('/'):
47
  directory += '/'
48
+ try :
49
+
50
+ # Use glob to get the list of files
51
+ files = glob.glob(os.path.join(directory, '*'))
52
+ file_list = [path.replace('\\', '/') for path in files]
53
+
54
+ if file_list.count == 0:
55
+ raise ValueError('list of files is zero')
56
+
57
+ except Exception as ex:
58
+ raise ValueError('issue with file path')
59
+
60
 
61
  return file_list
62
 
src/usecases/multiagentschat.py CHANGED
@@ -24,7 +24,7 @@ class MultiAgentChat:
24
  asyncio.set_event_loop(self.loop)
25
 
26
  async def initiate_chat(self):
27
- await self.user_proxy.a_initiate_chat(self.assistant, message=self.problem, clear_history=st.session_state["chat_with_history"])
28
 
29
  def run(self):
30
  self.loop.run_until_complete(self.initiate_chat())
 
24
  asyncio.set_event_loop(self.loop)
25
 
26
  async def initiate_chat(self):
27
+ await self.user_proxy.a_initiate_chat(self.assistant, max_turns=2, message=self.problem, clear_history=st.session_state["chat_with_history"])
28
 
29
  def run(self):
30
  self.loop.run_until_complete(self.initiate_chat())