Spaces:
Sleeping
Sleeping
Commit
Β·
4583e4d
1
Parent(s):
3392ab1
teachable agent
Browse files- .gitignore +5 -0
- README.md +6 -0
- app.py +15 -0
- configfile.ini +1 -1
- src/streamlitui/loadui.py +17 -10
- src/usecases/cag_chat.py +2 -2
- src/usecases/teachableagent.py +46 -0
- teachable_agent.png +0 -0
.gitignore
CHANGED
|
@@ -7,3 +7,8 @@ codegen/tmp_code_3e1806a0bf22b99c6c5d2b77650fe9a8.py
|
|
| 7 |
/tmp/chromadb
|
| 8 |
/tmp/db
|
| 9 |
/.cache
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
/tmp/chromadb
|
| 8 |
/tmp/db
|
| 9 |
/.cache
|
| 10 |
+
teachability_db/2cff7ca5-81fd-4f89-a76a-2d5bfc940455/data_level0.bin
|
| 11 |
+
teachability_db/2cff7ca5-81fd-4f89-a76a-2d5bfc940455/header.bin
|
| 12 |
+
*.bin
|
| 13 |
+
*.sqlite3
|
| 14 |
+
*.pkl
|
README.md
CHANGED
|
@@ -55,6 +55,12 @@ Requirements
|
|
| 55 |
#### Basic Example
|
| 56 |

|
| 57 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
### Chat with CAG
|
| 59 |
prompt1: what is dotnet
|
| 60 |
prompt2: what is python
|
|
|
|
| 55 |
#### Basic Example
|
| 56 |

|
| 57 |
|
| 58 |
+
#### Teachabke Agent
|
| 59 |
+
Prompt1: who is Sachin Tiwari
|
| 60 |
+
Prompt2: Sachin is from jharkhand working in uk
|
| 61 |
+
prompt3 : who is sachin
|
| 62 |
+

|
| 63 |
+
|
| 64 |
### Chat with CAG
|
| 65 |
prompt1: what is dotnet
|
| 66 |
prompt2: what is python
|
app.py
CHANGED
|
@@ -11,6 +11,7 @@ from src.LLMS.groqllm import GroqLLM
|
|
| 11 |
from src.usecases.multiagentragchat import MultiAgentRAGChat
|
| 12 |
from src.usecases.basicexample import BasicExample
|
| 13 |
from src.usecases.cag_chat import CAGLLMChat
|
|
|
|
| 14 |
|
| 15 |
|
| 16 |
# MAIN Function START
|
|
@@ -79,5 +80,19 @@ if __name__ == "__main__":
|
|
| 79 |
obj_cag_llm.process_cag_llm()
|
| 80 |
|
| 81 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
|
| 83 |
|
|
|
|
| 11 |
from src.usecases.multiagentragchat import MultiAgentRAGChat
|
| 12 |
from src.usecases.basicexample import BasicExample
|
| 13 |
from src.usecases.cag_chat import CAGLLMChat
|
| 14 |
+
from src.usecases.teachableagent import TeachableAgent
|
| 15 |
|
| 16 |
|
| 17 |
# MAIN Function START
|
|
|
|
| 80 |
obj_cag_llm.process_cag_llm()
|
| 81 |
|
| 82 |
|
| 83 |
+
elif user_input['selected_usecase'] == "Teachable Agent":
|
| 84 |
+
|
| 85 |
+
obj_chat = TeachableAgent(llm_config=llm_config,problem=problem)
|
| 86 |
+
response = obj_chat.start_chat()
|
| 87 |
+
with st.chat_message("user"):
|
| 88 |
+
st.write(problem)
|
| 89 |
+
with st.chat_message("ai"):
|
| 90 |
+
st.markdown(response.summary)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
|
| 97 |
|
| 98 |
|
configfile.ini
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
[DEFAULT]
|
| 2 |
PAGE_TITLE = AUTOGEN IN ACTION
|
| 3 |
LLM_OPTIONS = Groq, Huggingface
|
| 4 |
-
USECASE_OPTIONS = Basic Example, Chat with CAG, MultiAgent Chat, MultiAgent Code Execution, RAG Chat, With LLamaIndex Tool
|
| 5 |
GROQ_MODEL_OPTIONS = llama-3.3-70b-versatile, mixtral-8x7b-32768, llama3-8b-8192, llama3-70b-8192, gemma2-9b-it
|
| 6 |
|
|
|
|
| 1 |
[DEFAULT]
|
| 2 |
PAGE_TITLE = AUTOGEN IN ACTION
|
| 3 |
LLM_OPTIONS = Groq, Huggingface
|
| 4 |
+
USECASE_OPTIONS = Basic Example, Chat with CAG, Teachable Agent, MultiAgent Chat, MultiAgent Code Execution, RAG Chat, With LLamaIndex Tool
|
| 5 |
GROQ_MODEL_OPTIONS = llama-3.3-70b-versatile, mixtral-8x7b-32768, llama3-8b-8192, llama3-70b-8192, gemma2-9b-it
|
| 6 |
|
src/streamlitui/loadui.py
CHANGED
|
@@ -32,17 +32,24 @@ class LoadStreamlitUI:
|
|
| 32 |
st.session_state["docs_path"] = st.text_input("Enter Docs path or filename")
|
| 33 |
|
| 34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
st.session_state["chat_with_history"]=True
|
| 41 |
-
|
| 42 |
-
if st.sidebar.toggle("LLM Caching"):
|
| 43 |
-
st.session_state["Cache_Seed"]=True
|
| 44 |
-
else :
|
| 45 |
-
st.session_state["Cache_Seed"]=False
|
| 46 |
|
| 47 |
if self.user_controls['selected_usecase'] == "With LLamaIndex Tool":
|
| 48 |
st.subheader("ποΈ Trip Advisor Specialist using wikipedia")
|
|
|
|
| 32 |
st.session_state["docs_path"] = st.text_input("Enter Docs path or filename")
|
| 33 |
|
| 34 |
|
| 35 |
+
if self.user_controls['selected_usecase'] == "Teachable Agent":
|
| 36 |
+
selected_chat_purpose = st.selectbox("Do you want me to ask or teach?",
|
| 37 |
+
("Ask", "Teach"))
|
| 38 |
+
if selected_chat_purpose == "Ask":
|
| 39 |
+
st.session_state["Chat_Purpose"]=True
|
| 40 |
+
|
| 41 |
+
else :
|
| 42 |
+
st.session_state["Chat_Purpose"]=False
|
| 43 |
+
else:
|
| 44 |
+
if st.sidebar.toggle("Chat With History"):
|
| 45 |
+
st.session_state["chat_with_history"]=False
|
| 46 |
+
else :
|
| 47 |
+
st.session_state["chat_with_history"]=True
|
| 48 |
|
| 49 |
+
if st.sidebar.toggle("LLM Caching"):
|
| 50 |
+
st.session_state["Cache_Seed"]=True
|
| 51 |
+
else :
|
| 52 |
+
st.session_state["Cache_Seed"]=False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
|
| 54 |
if self.user_controls['selected_usecase'] == "With LLamaIndex Tool":
|
| 55 |
st.subheader("ποΈ Trip Advisor Specialist using wikipedia")
|
src/usecases/cag_chat.py
CHANGED
|
@@ -10,7 +10,7 @@ class CAGLLMChat:
|
|
| 10 |
|
| 11 |
|
| 12 |
def start_chat(self):
|
| 13 |
-
llm_config=
|
| 14 |
problem = self.problem
|
| 15 |
assistant = AssistantAgent("assistant", llm_config=llm_config,code_execution_config=False,human_input_mode='NEVER')
|
| 16 |
user_proxy = UserProxyAgent("user_proxy", code_execution_config=False,human_input_mode='NEVER')
|
|
@@ -19,7 +19,7 @@ class CAGLLMChat:
|
|
| 19 |
response = user_proxy.initiate_chat(
|
| 20 |
assistant,
|
| 21 |
message=problem,
|
| 22 |
-
max_turns=
|
| 23 |
clear_history=st.session_state["chat_with_history"]
|
| 24 |
)
|
| 25 |
return response
|
|
|
|
| 10 |
|
| 11 |
|
| 12 |
def start_chat(self):
|
| 13 |
+
llm_config= st.session_state['llm_config']
|
| 14 |
problem = self.problem
|
| 15 |
assistant = AssistantAgent("assistant", llm_config=llm_config,code_execution_config=False,human_input_mode='NEVER')
|
| 16 |
user_proxy = UserProxyAgent("user_proxy", code_execution_config=False,human_input_mode='NEVER')
|
|
|
|
| 19 |
response = user_proxy.initiate_chat(
|
| 20 |
assistant,
|
| 21 |
message=problem,
|
| 22 |
+
max_turns=2,
|
| 23 |
clear_history=st.session_state["chat_with_history"]
|
| 24 |
)
|
| 25 |
return response
|
src/usecases/teachableagent.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from autogen import AssistantAgent, UserProxyAgent
|
| 3 |
+
import streamlit as st
|
| 4 |
+
from autogen import ConversableAgent, UserProxyAgent
|
| 5 |
+
from autogen.agentchat.contrib.capabilities.teachability import Teachability
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class TeachableAgent:
|
| 9 |
+
def __init__(self,llm_config,problem):
|
| 10 |
+
self.llm_config = llm_config
|
| 11 |
+
self.problem = problem
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def start_chat(self):
|
| 15 |
+
llm_config= st.session_state['llm_config']
|
| 16 |
+
problem = self.problem
|
| 17 |
+
# Start by instantiating any agent that inherits from ConversableAgent.
|
| 18 |
+
teachable_agent = ConversableAgent(
|
| 19 |
+
name="teachable_agent", # The name is flexible, but should not contain spaces to work in group chat.
|
| 20 |
+
llm_config=llm_config
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
# Instantiate the Teachability capability. Its parameters are all optional.
|
| 24 |
+
teachability = Teachability(
|
| 25 |
+
verbosity=0, # 0 for basic info, 1 to add memory operations, 2 for analyzer messages, 3 for memo lists.
|
| 26 |
+
reset_db=False,
|
| 27 |
+
path_to_db_dir="./teachability_db",
|
| 28 |
+
recall_threshold=1.5, # Higher numbers allow more (but less relevant) memos to be recalled.
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
# Now add the Teachability capability to the agent.
|
| 32 |
+
teachability.add_to_agent(teachable_agent)
|
| 33 |
+
|
| 34 |
+
# Instantiate a UserProxyAgent to represent the user. But in this notebook, all user input will be simulated.
|
| 35 |
+
user = UserProxyAgent(
|
| 36 |
+
name="user",
|
| 37 |
+
human_input_mode="NEVER",
|
| 38 |
+
is_termination_msg=lambda x: True if "TERMINATE" in x.get("content") else False,
|
| 39 |
+
max_consecutive_auto_reply=0,
|
| 40 |
+
code_execution_config={
|
| 41 |
+
"use_docker": False
|
| 42 |
+
}, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.
|
| 43 |
+
)
|
| 44 |
+
#clear_history = False - Teach
|
| 45 |
+
response = user.initiate_chat(teachable_agent, message=problem, clear_history=st.session_state["Chat_Purpose"])
|
| 46 |
+
return response
|
teachable_agent.png
ADDED
|