File size: 5,371 Bytes
4fff95e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36bebda
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4fff95e
 
 
 
 
 
678f1bc
 
 
 
c029b60
 
 
 
 
678f1bc
 
4fff95e
678f1bc
4fff95e
678f1bc
 
4fff95e
678f1bc
4fff95e
678f1bc
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
from langchain_groq import ChatGroq
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder, PromptTemplate
from langchain_community.document_loaders import YoutubeLoader, WebBaseLoader
from langchain.chains.summarize import load_summarize_chain
from langchain_core.tools import Tool
from langchain_community.tools import DuckDuckGoSearchRun
from langchain.agents import create_react_agent
from langchain.agents import AgentExecutor
from langchain_community.callbacks.streamlit import StreamlitCallbackHandler
from langchain_community.utilities import WikipediaAPIWrapper, ArxivAPIWrapper
import streamlit as st

def groq_chatbot(model_params, question, api_key, chat_history):
    llm = ChatGroq(model=model_params['model'], api_key=api_key,
                temperature=model_params["temperature"],
                max_tokens=model_params['max_tokens']
                )
    
    system_template = (
    """Given a chat history and the latest user question 
    which might reference context in the chat history, 
    Answer the user question in a polite and professional manner."""
)   
    prompt = ChatPromptTemplate.from_messages(
    [
        ("system", system_template),
        MessagesPlaceholder(variable_name="chat_history"),
        ("user", "Questioin: {question}")
    ]
)
    chain = prompt | llm | StrOutputParser()

    return chain.stream({"question": question, "chat_history": chat_history})


def get_prompt():
    prompt = ChatPromptTemplate.from_template("""
Answer the following user questions as best you can. Use the available tools to find the answer.
You have access to the following tools:\n
{tools}\n\n
To use a tool, please use the following format:
```
Thought: Do I need to use a tool? Yes
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
```
If one tool doesn't give the relavant information, use another tool.
When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
                                              
```
Thought: Do I need to use a tool? No
Final Answer: [your response here]
```
Begin!
                                              
Previous conversation history:
{chat_history}
New input: {input}

{agent_scratchpad}
""")
    return prompt


def create_groq_agent(model_params, api_key, tools, question, chat_history):

    llm = ChatGroq(model=model_params['model'], api_key=api_key,
                    temperature=model_params["temperature"],
                    )
    prompt = get_prompt()

    agent = create_react_agent(llm, tools, prompt)
    
    agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, handle_parsing_errors=True, max_iterations=7)
    st_callback = StreamlitCallbackHandler(st.container())

    response = agent_executor.invoke({"input":question, "chat_history":chat_history}, {"callbacks": [st_callback]})
    return response['output']


def get_tools(selected_tools):
    # Define all available tools
    tools = {
        "Wikipedia": Tool(
            name="Wikipedia",
            func=WikipediaAPIWrapper(top_k_results=2, doc_content_chars_max=500).run,
            description="A useful tool for searching the Internet to find information on world events, issues, dates, years, etc."
        ),
        "ArXiv": Tool(
            name="ArXiv",
            func=ArxivAPIWrapper(top_k_results=2, doc_content_chars_max=500).run,
            description="A useful tool for searching scientific and research papers."
        ),
        "DuckDuckGo Search": Tool(
            name="DuckDuckGo Search",
            func=DuckDuckGoSearchRun().run,
            description="Useful for when you need to search the internet to find latest information, facts and figures that another tool can't find."
        )
    }

    # Filter and return only the tools selected by the user
    return [tools[tool_name] for tool_name in selected_tools]


def summarizer_model(model_params, api_key, url):
    llm = ChatGroq(model=model_params['model'], api_key=api_key,
            temperature=model_params["temperature"],
            max_tokens=model_params['max_tokens']
            )
    try:
        if "youtube.com" in url or "youtu.be" in url:
            video_id = YoutubeLoader.extract_video_id(url)

            loader = YoutubeLoader.from_youtube_url("https://www.youtube.com/watch?v=" + video_id,
                                                    add_video_info=False,
                                                    language=["en", "hi"],
                                                    translation="en",
                                                    continue_on_failure=True) 
        else:
            loader = WebBaseLoader(web_path=url)

        data = loader.load()

        prompt_template = """Provide a summary of the following content in proper markdown:
        Content:\n{text}"""

        prompt = PromptTemplate(input_variables=["text"], template=prompt_template)

        chain = load_summarize_chain(llm=llm, chain_type="stuff", prompt=prompt)
        output = chain.run(data)
        return output
    except Exception:
        st.error(f"An error occurred:An error occurred: Could not retrieve a transcript for the video", icon="❌")