File size: 4,029 Bytes
10231de
 
3392ab1
10231de
 
 
7734f80
40fd4e5
3f4dbc7
10231de
7ddd05c
3328745
3392ab1
4583e4d
10231de
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7734f80
7ddd05c
7734f80
7ddd05c
7734f80
 
40fd4e5
 
 
7ddd05c
 
 
 
 
40fd4e5
 
ac85c1d
40fd4e5
 
 
 
 
3f4dbc7
ac85c1d
 
 
 
3f4dbc7
ac85c1d
3328745
 
 
 
 
 
 
3392ab1
 
 
 
 
 
 
 
 
 
4583e4d
 
 
 
 
 
 
 
 
 
 
 
 
 
3f4dbc7
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import streamlit as st

from src.cag.main import CAGLLM
from configfile import Config
from src.streamlitui.loadui import LoadStreamlitUI
from src.usecases.multiagentschat import MultiAgentChat
from src.usecases.multiagentcodeexecution import MultiAgentCodeExecution
from src.usecases.withllamaIndex import WithLlamaIndexMultiAgentChat
from src.usecases.agentchatsqlspider import AgentChatSqlSpider
from src.LLMS.groqllm import GroqLLM
from src.usecases.multiagentragchat import MultiAgentRAGChat
from src.usecases.basicexample import BasicExample
from src.usecases.cag_chat import CAGLLMChat
from src.usecases.teachableagent import TeachableAgent


# MAIN Function START


if __name__ == "__main__":
    # config
    obj_config = Config()
    # load ui
    ui = LoadStreamlitUI()
    user_input = ui.load_streamlit_ui()

    # Configure LLM
    obj_llm_config = GroqLLM(user_controls_input=user_input)
    obj_llm_config.groq_llm_config()
    llm_config = st.session_state['llm_config']

    # userInput
    problem = st.chat_input("Start Chat ")

    if problem:
        # start multichat
        if user_input['selected_usecase'] == "MultiAgent Code Execution":
            obj_usecases_multichatexec = MultiAgentCodeExecution(assistant_name=['Assistant',"Product_Manager"], user_proxy_name='Userproxy', llm_config=llm_config,
                                          problem=problem)
            obj_usecases_multichatexec.run()
            
        elif user_input['selected_usecase'] == "MultiAgent Chat":
            obj_usecases_multichat = MultiAgentChat(assistant_name='Assistant', user_proxy_name='Userproxy', llm_config=llm_config,
                                          problem=problem)
            obj_usecases_multichat.run()
            
        elif user_input['selected_usecase'] == "RAG Chat":
            obj_usecases_rag_multichat = MultiAgentRAGChat(assistant_name='Assistant', user_proxy_name='Userproxy', llm_config=llm_config,
                                          problem=problem)
            obj_usecases_rag_multichat.run()

        elif user_input['selected_usecase'] == "With LLamaIndex Tool":
           
            obj_usecases_with_llamaIndex_multichat = WithLlamaIndexMultiAgentChat(assistant_name='Assistant', user_proxy_name='Userproxy',
                                                    llm_config=llm_config,
                                                    problem=problem,user_input=user_input)
            obj_usecases_with_llamaIndex_multichat.run()


        # elif user_input['selected_usecase'] == "AgentChat Sql Spider":
        #     obj_sql_spider = AgentChatSqlSpider(assistant_name="Assistant", user_proxy_name='Userproxy',
        #                                             llm_config=llm_config,
        #                                             problem=problem)

        #     obj_sql_spider.run()
            
        elif user_input['selected_usecase'] == "Basic Example":
            obj_basic_example = BasicExample(assistant_name="Assistant", user_proxy_name='Userproxy',
                                                    llm_config=llm_config,
                                                    problem=problem)
            obj_basic_example.run()
            
        elif user_input['selected_usecase'] == "Chat with CAG":
            
            obj_chat = CAGLLMChat(llm_config=llm_config,problem=problem)
            response = obj_chat.start_chat()
            
            obj_cag_llm = CAGLLM(problem,response)
            
            obj_cag_llm.process_cag_llm()
            
            
        elif user_input['selected_usecase'] == "Teachable Agent":
            
            obj_chat = TeachableAgent(llm_config=llm_config,problem=problem)
            response = obj_chat.start_chat()
            with st.chat_message("user"):
                st.write(problem)
            with st.chat_message("ai"):
                st.markdown(response.summary)