File size: 2,927 Bytes
47d0213
 
 
 
 
 
 
 
 
19cc297
 
47d0213
 
 
 
 
 
 
19cc297
 
 
47d0213
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
# -*- coding: utf-8 -*-
"""Context-Aware Smart.ipynb

Automatically generated by Colab.

Original file is located at
    https://colab.research.google.com/drive/1zrqp56QXy-rRJtCXU_zSc6EPd1rgBg9l
"""

#!pip install langchain langchain-community gradio python-dotenv
#!pip install groq

from langchain.llms.base import LLM
import gradio as gr
from langchain.agents import initialize_agent, AgentType


from llm import llm
from web_search_tool import WebSearchTool
from Context_Relevance_Splitter import Context_Relevance_Splitter_tool
from context_presence_judge import context_tool




# --- Agent initialization with all tools ---
tools = [context_tool,
    Context_Relevance_Splitter_tool,
    WebSearchTool
]

agent = initialize_agent(
    tools=tools,
    llm=llm,
    agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
    handle_parsing_errors=True,
    verbose=True,
    max_iterations=5
)

# --- Enhanced Agent Runner Function ---
def run_agent(user_input):
    try:
        # Initial context processing
        context_result = Context_Relevance_Splitter_tool(user_input)

        # If result is a string (error message)
        if isinstance(context_result, str):
            if "🚫" in context_result or "⚠" in context_result:
                return context_result
            else:
                final_question = user_input
                background = ""
        else:
            # If result is a dictionary (successful processing)
            final_question = context_result.get('core_question', user_input)
            background = context_result.get('background', '')

        # Build final agent input
        if background:
            enhanced_input = f"""
            Context Background: {background}
            Question: {final_question}
            """
        else:
            enhanced_input = final_question

        # Run agent with final input
        return agent.run(enhanced_input)

    except Exception as e:
        return f"⛔ Unexpected error occurred: {str(e)}\nPlease rephrase your question or try again later."

# --- Enhanced Gradio Interface ---
interface = gr.Interface(
    fn=run_agent,
    inputs=gr.Textbox(
        lines=3,
        placeholder="Enter your question here...\nTo add context use format: Question||Context\nExample: What's France's capital||Speaking about a European country"
    ),
    outputs="text",
    title="🤖 Context-Aware Smart Agent",
    description="""
    Advanced system for understanding complex questions:
    - Supports external context using ||
    - Automatically get the answer from context
    - Answers directly or searches when needed
    """,
    examples=[
        ["العاصمة السعودية هي الرياض||ما هي عاصمة مصر؟"],
        ["العاصمة السعودية هي الرياض||ما هي عاصمة السعودية؟"]
    ]
)

# --- Launch Interface ---
if __name__ == "__main__":
    interface.launch()