Spaces:
Runtime error
Runtime error
Commit ·
3b298be
1
Parent(s): b616794
Initial commit
Browse files- .gitignore +79 -0
- .python-version +1 -0
- README.md +50 -12
- agent.py +219 -0
- app.py +126 -0
- assistant_avatar.png +0 -0
- config.yaml +33 -0
- pyproject.toml +19 -0
- tools/tavily_tool.py +29 -0
- tools/wikipedia_tool.py +23 -0
- uv.lock +0 -0
.gitignore
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Python
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
*.so
|
| 6 |
+
.Python
|
| 7 |
+
|
| 8 |
+
# Virtual environments
|
| 9 |
+
.venv/
|
| 10 |
+
venv/
|
| 11 |
+
ENV/
|
| 12 |
+
env/
|
| 13 |
+
|
| 14 |
+
# Environment variables and secrets
|
| 15 |
+
local/
|
| 16 |
+
.env
|
| 17 |
+
*.env
|
| 18 |
+
|
| 19 |
+
# IDE
|
| 20 |
+
.vscode/
|
| 21 |
+
.idea/
|
| 22 |
+
*.swp
|
| 23 |
+
*.swo
|
| 24 |
+
*~
|
| 25 |
+
|
| 26 |
+
# Jupyter Notebook
|
| 27 |
+
.ipynb_checkpoints
|
| 28 |
+
*.ipynb
|
| 29 |
+
|
| 30 |
+
# Distribution / packaging
|
| 31 |
+
.Python
|
| 32 |
+
build/
|
| 33 |
+
develop-eggs/
|
| 34 |
+
dist/
|
| 35 |
+
downloads/
|
| 36 |
+
eggs/
|
| 37 |
+
.eggs/
|
| 38 |
+
lib/
|
| 39 |
+
lib64/
|
| 40 |
+
parts/
|
| 41 |
+
sdist/
|
| 42 |
+
var/
|
| 43 |
+
wheels/
|
| 44 |
+
*.egg-info/
|
| 45 |
+
.installed.cfg
|
| 46 |
+
*.egg
|
| 47 |
+
|
| 48 |
+
# PyInstaller
|
| 49 |
+
*.manifest
|
| 50 |
+
*.spec
|
| 51 |
+
|
| 52 |
+
# Unit test / coverage reports
|
| 53 |
+
htmlcov/
|
| 54 |
+
.tox/
|
| 55 |
+
.coverage
|
| 56 |
+
.coverage.*
|
| 57 |
+
.cache
|
| 58 |
+
nosetests.xml
|
| 59 |
+
coverage.xml
|
| 60 |
+
*.cover
|
| 61 |
+
.hypothesis/
|
| 62 |
+
.pytest_cache/
|
| 63 |
+
|
| 64 |
+
# UV lock file (optional - uncomment if you want to ignore it)
|
| 65 |
+
# uv.lock
|
| 66 |
+
|
| 67 |
+
# Temporary files
|
| 68 |
+
*.log
|
| 69 |
+
*.tmp
|
| 70 |
+
*.temp
|
| 71 |
+
message.txt
|
| 72 |
+
debug_agent.py
|
| 73 |
+
|
| 74 |
+
# OS
|
| 75 |
+
.DS_Store
|
| 76 |
+
Thumbs.db
|
| 77 |
+
|
| 78 |
+
# Project specific
|
| 79 |
+
main.py
|
.python-version
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
3.12
|
README.md
CHANGED
|
@@ -1,12 +1,50 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generative AI Application
|
| 2 |
+
|
| 3 |
+
This is a simple generative AI application built using OpenAI's GPT model, Gradio for the user interface, and tools for Wikipedia and Tavily web search.
|
| 4 |
+
|
| 5 |
+
## Features
|
| 6 |
+
- Chat with OpenAI's GPT model.
|
| 7 |
+
- Use Wikipedia for quick information retrieval.
|
| 8 |
+
- Use Tavily for web search.
|
| 9 |
+
|
| 10 |
+
## Setup Instructions
|
| 11 |
+
|
| 12 |
+
### Prerequisites
|
| 13 |
+
- Python 3.10 or higher
|
| 14 |
+
- `uv` for package management
|
| 15 |
+
|
| 16 |
+
### Installation
|
| 17 |
+
cd CAS_generative_AI
|
| 18 |
+
```
|
| 19 |
+
|
| 20 |
+
2. Copy the `.env.example` file to `.env` and fill in your API keys:
|
| 21 |
+
```
|
| 22 |
+
|
| 23 |
+
3. Install dependencies using `uv`:
|
| 24 |
+
```bash
|
| 25 |
+
uv install
|
| 26 |
+
```
|
| 27 |
+
|
| 28 |
+
### Running the Application
|
| 29 |
+
This is a simple generative AI application built using OpenAI's GPT model, Gradio for the user interface, and tools for Wikipedia and Tavily web search.
|
| 30 |
+
**API keys are managed via a `.env` file and loaded automatically using `python-dotenv`.**
|
| 31 |
+
```bash
|
| 32 |
+
python app.py
|
| 33 |
+
```
|
| 34 |
+
|
| 35 |
+
The app will be available at `http://0.0.0.0:7860`.
|
| 36 |
+
|
| 37 |
+
## Configuration
|
| 38 |
+
All configurations are stored in `config.yaml`. Update this file to modify settings such as API keys, model parameters, and tool options.
|
| 39 |
+
|
| 40 |
+
## Tools
|
| 41 |
+
### Wikipedia Tool
|
| 42 |
+
- Retrieves summaries from Wikipedia.
|
| 43 |
+
- Configurable language and number of sentences.
|
| 44 |
+
|
| 45 |
+
### Tavily Tool
|
| 46 |
+
- Performs web searches using the Tavily API.
|
| 47 |
+
- Configurable search depth and result limits.
|
| 48 |
+
|
| 49 |
+
## License
|
| 50 |
+
This project is licensed under the MIT License.
|
agent.py
ADDED
|
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Generative AI Agent with Tool Calling Capabilities
|
| 3 |
+
|
| 4 |
+
This module provides an intelligent agent that can use multiple tools (Wikipedia, Tavily)
|
| 5 |
+
to answer user queries with up-to-date and accurate information.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import os
|
| 9 |
+
import yaml
|
| 10 |
+
import logging
|
| 11 |
+
from dotenv import load_dotenv
|
| 12 |
+
|
| 13 |
+
from langchain_openai import ChatOpenAI
|
| 14 |
+
from langchain_core.tools import Tool
|
| 15 |
+
from tools.wikipedia_tool import WikipediaTool
|
| 16 |
+
from langchain_tavily import TavilySearch
|
| 17 |
+
from langgraph.prebuilt import create_react_agent
|
| 18 |
+
|
| 19 |
+
# Configure logging
|
| 20 |
+
logging.basicConfig(level=logging.INFO)
|
| 21 |
+
logger = logging.getLogger(__name__)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class GenerativeAIAgent:
|
| 26 |
+
"""
|
| 27 |
+
An intelligent agent that uses LangGraph's ReAct pattern to answer queries.
|
| 28 |
+
|
| 29 |
+
The agent can dynamically select and use tools (Wikipedia, Tavily Search) based on
|
| 30 |
+
the user's query to provide accurate and up-to-date information.
|
| 31 |
+
|
| 32 |
+
Attributes:
|
| 33 |
+
llm: The language model (ChatOpenAI)
|
| 34 |
+
tools: List of available tools for the agent
|
| 35 |
+
agent_executor: The LangGraph ReAct agent executor
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
def __init__(self, config_path: str = "config.yaml"):
|
| 39 |
+
"""
|
| 40 |
+
Initialize the GenerativeAIAgent with configuration and tools.
|
| 41 |
+
|
| 42 |
+
Args:
|
| 43 |
+
config_path: Path to the YAML configuration file
|
| 44 |
+
"""
|
| 45 |
+
logger.info("Initializing GenerativeAIAgent...")
|
| 46 |
+
|
| 47 |
+
# Load environment variables
|
| 48 |
+
load_dotenv(dotenv_path="local/.env")
|
| 49 |
+
|
| 50 |
+
# Load configuration
|
| 51 |
+
self.config = self._load_config(config_path)
|
| 52 |
+
|
| 53 |
+
# Initialize tools
|
| 54 |
+
self.wikipedia_tool = WikipediaTool(config_path)
|
| 55 |
+
self.tavily_search = TavilySearch(max_results=5)
|
| 56 |
+
|
| 57 |
+
# Define available tools
|
| 58 |
+
self.tools = self._initialize_tools()
|
| 59 |
+
|
| 60 |
+
# Initialize language model
|
| 61 |
+
self.llm = self._initialize_llm()
|
| 62 |
+
|
| 63 |
+
# Create ReAct agent executor
|
| 64 |
+
self.agent_executor = create_react_agent(self.llm, self.tools)
|
| 65 |
+
|
| 66 |
+
logger.info("Agent initialized successfully with %d tools", len(self.tools))
|
| 67 |
+
|
| 68 |
+
def _load_config(self, config_path: str) -> dict:
|
| 69 |
+
"""Load configuration from YAML file."""
|
| 70 |
+
try:
|
| 71 |
+
with open(config_path, "r") as file:
|
| 72 |
+
config = yaml.safe_load(file)
|
| 73 |
+
logger.info("Configuration loaded from %s", config_path)
|
| 74 |
+
return config
|
| 75 |
+
except FileNotFoundError:
|
| 76 |
+
logger.error("Config file not found: %s", config_path)
|
| 77 |
+
raise
|
| 78 |
+
except yaml.YAMLError as e:
|
| 79 |
+
logger.error("Error parsing config file: %s", e)
|
| 80 |
+
raise
|
| 81 |
+
|
| 82 |
+
def _initialize_tools(self) -> list:
|
| 83 |
+
"""Initialize and return the list of tools available to the agent."""
|
| 84 |
+
tools = [
|
| 85 |
+
Tool(
|
| 86 |
+
name="Wikipedia",
|
| 87 |
+
description=(
|
| 88 |
+
"Search Wikipedia for factual, encyclopedic information. "
|
| 89 |
+
"Best for: historical facts, scientific concepts, biographies, "
|
| 90 |
+
"general knowledge. Input should be a clear search query."
|
| 91 |
+
),
|
| 92 |
+
func=self.wikipedia_tool.search
|
| 93 |
+
),
|
| 94 |
+
Tool(
|
| 95 |
+
name="Tavily",
|
| 96 |
+
description=(
|
| 97 |
+
"Search the web for current information and latest news. "
|
| 98 |
+
"Best for: recent events, breaking news, current trends, "
|
| 99 |
+
"real-time data. Input should be a search query."
|
| 100 |
+
),
|
| 101 |
+
func=self.tavily_search.invoke
|
| 102 |
+
)
|
| 103 |
+
]
|
| 104 |
+
logger.info("Initialized tools: %s", [tool.name for tool in tools])
|
| 105 |
+
return tools
|
| 106 |
+
|
| 107 |
+
def _initialize_llm(self) -> ChatOpenAI:
|
| 108 |
+
"""Initialize the language model with configuration."""
|
| 109 |
+
model_config = self.config.get("openai", {})
|
| 110 |
+
|
| 111 |
+
llm = ChatOpenAI(
|
| 112 |
+
model=model_config.get("model", "gpt-5"),
|
| 113 |
+
temperature=model_config.get("temperature", 0.7),
|
| 114 |
+
max_tokens=model_config.get("max_tokens", 1000),
|
| 115 |
+
api_key=os.getenv("OPENAI_API_KEY")
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
logger.info("LLM initialized: %s", model_config.get("model"))
|
| 119 |
+
return llm
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def generate_response(self, user_input: str) -> str:
|
| 123 |
+
"""
|
| 124 |
+
Generate a response to the user's input using the agent.
|
| 125 |
+
|
| 126 |
+
The agent will automatically select and use appropriate tools based on the query,
|
| 127 |
+
following the ReAct (Reasoning + Acting) pattern.
|
| 128 |
+
|
| 129 |
+
Args:
|
| 130 |
+
user_input: The user's question or query
|
| 131 |
+
|
| 132 |
+
Returns:
|
| 133 |
+
str: The agent's response
|
| 134 |
+
"""
|
| 135 |
+
if not user_input or not user_input.strip():
|
| 136 |
+
logger.warning("Empty input received")
|
| 137 |
+
return "Please provide a valid question or query."
|
| 138 |
+
|
| 139 |
+
try:
|
| 140 |
+
logger.info("Processing query: %s", user_input[:50] + "..." if len(user_input) > 50 else user_input)
|
| 141 |
+
|
| 142 |
+
# Get system prompt from config
|
| 143 |
+
system_prompt = self.config.get("app", {}).get(
|
| 144 |
+
"system_prompt",
|
| 145 |
+
"You are a helpful AI assistant with access to Wikipedia and web search tools."
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
# Prepare messages for the agent
|
| 149 |
+
messages = [
|
| 150 |
+
{"role": "system", "content": system_prompt},
|
| 151 |
+
{"role": "user", "content": user_input}
|
| 152 |
+
]
|
| 153 |
+
|
| 154 |
+
# Invoke the agent executor
|
| 155 |
+
response = self.agent_executor.invoke({"messages": messages})
|
| 156 |
+
|
| 157 |
+
# Extract the final answer from the response
|
| 158 |
+
final_answer = self._extract_final_answer(response)
|
| 159 |
+
|
| 160 |
+
logger.info("Response generated successfully")
|
| 161 |
+
return final_answer
|
| 162 |
+
|
| 163 |
+
except Exception as e:
|
| 164 |
+
logger.error("Error generating response: %s", str(e), exc_info=True)
|
| 165 |
+
return self._format_error_message(str(e))
|
| 166 |
+
|
| 167 |
+
def _extract_final_answer(self, response: dict) -> str:
|
| 168 |
+
"""
|
| 169 |
+
Extract the final answer from the agent's response.
|
| 170 |
+
|
| 171 |
+
Args:
|
| 172 |
+
response: The response dictionary from the agent executor
|
| 173 |
+
|
| 174 |
+
Returns:
|
| 175 |
+
str: The extracted final answer
|
| 176 |
+
"""
|
| 177 |
+
if isinstance(response, dict) and "messages" in response:
|
| 178 |
+
# Iterate through messages in reverse to find the last AI message with content
|
| 179 |
+
for msg in reversed(response["messages"]):
|
| 180 |
+
if hasattr(msg, "content") and msg.content and msg.content.strip():
|
| 181 |
+
return msg.content.strip()
|
| 182 |
+
|
| 183 |
+
# Fallback
|
| 184 |
+
logger.warning("Could not extract proper answer from response")
|
| 185 |
+
return "I apologize, but I couldn't generate a proper response. Please try rephrasing your question."
|
| 186 |
+
|
| 187 |
+
def _format_error_message(self, error: str) -> str:
|
| 188 |
+
"""
|
| 189 |
+
Format error messages in a user-friendly way.
|
| 190 |
+
|
| 191 |
+
Args:
|
| 192 |
+
error: The error message
|
| 193 |
+
|
| 194 |
+
Returns:
|
| 195 |
+
str: A formatted error message
|
| 196 |
+
"""
|
| 197 |
+
if "rate limit" in error.lower():
|
| 198 |
+
return "⚠️ Rate limit reached. Please wait a moment and try again."
|
| 199 |
+
elif "api key" in error.lower():
|
| 200 |
+
return "⚠️ API authentication error. Please check your API keys."
|
| 201 |
+
elif "timeout" in error.lower():
|
| 202 |
+
return "⚠️ Request timed out. Please try again."
|
| 203 |
+
else:
|
| 204 |
+
return f"⚠️ An error occurred: {error}\n\nPlease try rephrasing your question or try again later."
|
| 205 |
+
|
| 206 |
+
def get_available_tools(self) -> list:
|
| 207 |
+
"""
|
| 208 |
+
Get a list of available tools and their descriptions.
|
| 209 |
+
|
| 210 |
+
Returns:
|
| 211 |
+
list: List of dictionaries containing tool information
|
| 212 |
+
"""
|
| 213 |
+
return [
|
| 214 |
+
{
|
| 215 |
+
"name": tool.name,
|
| 216 |
+
"description": tool.description
|
| 217 |
+
}
|
| 218 |
+
for tool in self.tools
|
| 219 |
+
]
|
app.py
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import gradio as gr
|
| 3 |
+
import os
|
| 4 |
+
from agent import GenerativeAIAgent
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
# Initialize the agent
|
| 8 |
+
agent = GenerativeAIAgent()
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def chat_with_agent(user_input, history):
|
| 12 |
+
"""Chat function that maintains conversation history"""
|
| 13 |
+
if not user_input.strip():
|
| 14 |
+
return history, history
|
| 15 |
+
|
| 16 |
+
response = agent.generate_response(user_input)
|
| 17 |
+
history.append((user_input, response))
|
| 18 |
+
return history, history
|
| 19 |
+
|
| 20 |
+
# Create fancy Gradio interface with custom theme
|
| 21 |
+
with gr.Blocks(
|
| 22 |
+
theme=gr.themes.Soft(
|
| 23 |
+
primary_hue="blue",
|
| 24 |
+
secondary_hue="indigo",
|
| 25 |
+
neutral_hue="slate",
|
| 26 |
+
),
|
| 27 |
+
css="""
|
| 28 |
+
.gradio-container {
|
| 29 |
+
font-family: 'Inter', sans-serif;
|
| 30 |
+
}
|
| 31 |
+
.chat-container {
|
| 32 |
+
border-radius: 15px;
|
| 33 |
+
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
|
| 34 |
+
}
|
| 35 |
+
footer {
|
| 36 |
+
display: none !important;
|
| 37 |
+
}
|
| 38 |
+
"""
|
| 39 |
+
) as demo:
|
| 40 |
+
gr.Markdown(
|
| 41 |
+
"""
|
| 42 |
+
# 🤖 AI Assistant with Tool Integration
|
| 43 |
+
|
| 44 |
+
**Powered by GPT-4o-mini** • Ask anything and I'll use Wikipedia or web search to find accurate answers!
|
| 45 |
+
|
| 46 |
+
---
|
| 47 |
+
""",
|
| 48 |
+
elem_classes="header"
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
with gr.Row():
|
| 52 |
+
with gr.Column(scale=1):
|
| 53 |
+
gr.Markdown(
|
| 54 |
+
"""
|
| 55 |
+
### 🛠️ Available Tools
|
| 56 |
+
|
| 57 |
+
- 📚 **Wikipedia**: For factual information
|
| 58 |
+
- 🌐 **Tavily Search**: For latest news & web content
|
| 59 |
+
|
| 60 |
+
### 💡 Try asking:
|
| 61 |
+
- "What's the latest news about AI?"
|
| 62 |
+
- "Tell me about quantum computing"
|
| 63 |
+
- "Recent developments in climate change"
|
| 64 |
+
""",
|
| 65 |
+
elem_classes="sidebar"
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
with gr.Column(scale=3):
|
| 69 |
+
chatbot = gr.Chatbot(
|
| 70 |
+
label="Conversation",
|
| 71 |
+
height=500,
|
| 72 |
+
bubble_full_width=False,
|
| 73 |
+
avatar_images=(None, "assistant_avatar.png"),
|
| 74 |
+
elem_classes="chat-container"
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
with gr.Row():
|
| 78 |
+
user_input = gr.Textbox(
|
| 79 |
+
label="Your Message",
|
| 80 |
+
placeholder="Type your question here... 💬",
|
| 81 |
+
lines=2,
|
| 82 |
+
scale=4
|
| 83 |
+
)
|
| 84 |
+
submit_btn = gr.Button("Send 🚀", variant="primary", scale=1)
|
| 85 |
+
|
| 86 |
+
with gr.Row():
|
| 87 |
+
clear_btn = gr.Button("Clear Chat 🗑️", variant="secondary")
|
| 88 |
+
|
| 89 |
+
# Store conversation history
|
| 90 |
+
state = gr.State([])
|
| 91 |
+
|
| 92 |
+
# Event handlers
|
| 93 |
+
submit_btn.click(
|
| 94 |
+
fn=chat_with_agent,
|
| 95 |
+
inputs=[user_input, state],
|
| 96 |
+
outputs=[chatbot, state]
|
| 97 |
+
).then(
|
| 98 |
+
lambda: "",
|
| 99 |
+
outputs=[user_input]
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
user_input.submit(
|
| 103 |
+
fn=chat_with_agent,
|
| 104 |
+
inputs=[user_input, state],
|
| 105 |
+
outputs=[chatbot, state]
|
| 106 |
+
).then(
|
| 107 |
+
lambda: "",
|
| 108 |
+
outputs=[user_input]
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
clear_btn.click(
|
| 112 |
+
lambda: ([], []),
|
| 113 |
+
outputs=[chatbot, state]
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
gr.Markdown(
|
| 117 |
+
"""
|
| 118 |
+
---
|
| 119 |
+
|
| 120 |
+
🔒 **Privacy**: Your conversations are not stored • ⚡ **Fast**: Powered by OpenAI's latest models
|
| 121 |
+
""",
|
| 122 |
+
elem_classes="footer"
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
# Launch the app
|
| 126 |
+
demo.launch(share=False)
|
assistant_avatar.png
ADDED
|
|
config.yaml
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Configuration for Generative AI Application
|
| 2 |
+
# All API keys and parameters are externalized following Configuration-Driven Architecture
|
| 3 |
+
|
| 4 |
+
# OpenAI Configuration
|
| 5 |
+
openai:
|
| 6 |
+
api_key: "${OPENAI_API_KEY}" # Set via environment variable
|
| 7 |
+
model: "gpt-4o-mini"
|
| 8 |
+
temperature: 0.7
|
| 9 |
+
max_tokens: 1000
|
| 10 |
+
|
| 11 |
+
# Tavily Search Configuration
|
| 12 |
+
tavily:
|
| 13 |
+
api_key: "${TAVILY_API_KEY}" # Set via environment variable
|
| 14 |
+
max_results: 5
|
| 15 |
+
search_depth: "basic" # Options: basic, advanced
|
| 16 |
+
|
| 17 |
+
# Wikipedia Configuration
|
| 18 |
+
wikipedia:
|
| 19 |
+
language: "en"
|
| 20 |
+
sentences: 3
|
| 21 |
+
auto_suggest: true
|
| 22 |
+
|
| 23 |
+
# Gradio Configuration
|
| 24 |
+
gradio:
|
| 25 |
+
server_name: "0.0.0.0"
|
| 26 |
+
server_port: 7860
|
| 27 |
+
share: false
|
| 28 |
+
show_api: false
|
| 29 |
+
|
| 30 |
+
# Application Settings
|
| 31 |
+
app:
|
| 32 |
+
system_prompt: "You are a helpful AI assistant with access to Wikipedia and web search tools. Use these tools when you need current information or facts to answer user questions."
|
| 33 |
+
max_conversation_history: 10
|
pyproject.toml
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[project]
|
| 2 |
+
name = "cas-generative-ai"
|
| 3 |
+
version = "0.1.0"
|
| 4 |
+
description = "Add your description here"
|
| 5 |
+
readme = "README.md"
|
| 6 |
+
requires-python = ">=3.12"
|
| 7 |
+
dependencies = [
|
| 8 |
+
"gradio>=5.49.1",
|
| 9 |
+
"langchain>=1.0.2",
|
| 10 |
+
"langchain-community>=0.4",
|
| 11 |
+
"langchain-core>=1.0.1",
|
| 12 |
+
"langchain-openai>=1.0.1",
|
| 13 |
+
"langchain-tavily>=0.2.12",
|
| 14 |
+
"openai>=2.6.1",
|
| 15 |
+
"python-dotenv>=1.1.1",
|
| 16 |
+
"pyyaml>=6.0.3",
|
| 17 |
+
"tavily-python>=0.7.12",
|
| 18 |
+
"wikipedia>=1.4.0",
|
| 19 |
+
]
|
tools/tavily_tool.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
import yaml
|
| 3 |
+
import os
|
| 4 |
+
from dotenv import load_dotenv
|
| 5 |
+
|
| 6 |
+
class TavilyTool:
|
| 7 |
+
def __init__(self, config_path="config.yaml"):
|
| 8 |
+
load_dotenv()
|
| 9 |
+
with open(config_path, "r") as file:
|
| 10 |
+
config = yaml.safe_load(file)
|
| 11 |
+
self.max_results = config["tavily"]["max_results"]
|
| 12 |
+
self.search_depth = config["tavily"]["search_depth"]
|
| 13 |
+
self.api_key = os.getenv("TAVILY_API_KEY")
|
| 14 |
+
|
| 15 |
+
def search(self, query):
|
| 16 |
+
url = "https://api.tavily.com/search"
|
| 17 |
+
params = {
|
| 18 |
+
"query": query,
|
| 19 |
+
"max_results": self.max_results,
|
| 20 |
+
"depth": self.search_depth
|
| 21 |
+
}
|
| 22 |
+
headers = {"Authorization": f"Bearer {self.api_key}"}
|
| 23 |
+
|
| 24 |
+
try:
|
| 25 |
+
response = requests.get(url, params=params, headers=headers)
|
| 26 |
+
response.raise_for_status()
|
| 27 |
+
return response.json()
|
| 28 |
+
except requests.exceptions.RequestException as e:
|
| 29 |
+
return f"An error occurred: {str(e)}"
|
tools/wikipedia_tool.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import wikipedia
|
| 2 |
+
import yaml
|
| 3 |
+
|
| 4 |
+
class WikipediaTool:
|
| 5 |
+
def __init__(self, config_path="config.yaml"):
|
| 6 |
+
with open(config_path, "r") as file:
|
| 7 |
+
config = yaml.safe_load(file)
|
| 8 |
+
self.language = config["wikipedia"]["language"]
|
| 9 |
+
self.sentences = config["wikipedia"]["sentences"]
|
| 10 |
+
self.auto_suggest = config["wikipedia"]["auto_suggest"]
|
| 11 |
+
|
| 12 |
+
wikipedia.set_lang(self.language)
|
| 13 |
+
|
| 14 |
+
def search(self, query):
|
| 15 |
+
try:
|
| 16 |
+
summary = wikipedia.summary(query, sentences=self.sentences, auto_suggest=self.auto_suggest)
|
| 17 |
+
return summary
|
| 18 |
+
except wikipedia.exceptions.DisambiguationError as e:
|
| 19 |
+
return f"Disambiguation error: {e.options}"
|
| 20 |
+
except wikipedia.exceptions.PageError:
|
| 21 |
+
return "Page not found."
|
| 22 |
+
except Exception as e:
|
| 23 |
+
return f"An error occurred: {str(e)}"
|
uv.lock
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|