Commit ·
954371b
1
Parent(s): 741c3da
name of the agent changed
Browse files- .python-version +1 -1
- README.md +7 -3
- __pycache__/agent.cpython-313.pyc +0 -0
- agent.py +2 -2
- app.py +15 -15
- pyproject.toml +6 -1
- runtime.txt +1 -0
.python-version
CHANGED
|
@@ -1 +1 @@
|
|
| 1 |
-
3.
|
|
|
|
| 1 |
+
3.11
|
README.md
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
colorFrom: blue
|
| 5 |
colorTo: blue
|
| 6 |
sdk: gradio
|
|
@@ -9,7 +9,11 @@ app_file: app.py
|
|
| 9 |
pinned: false
|
| 10 |
license: apache-2.0
|
| 11 |
python_version: "3.13.3"
|
| 12 |
-
short_description:
|
| 13 |
---
|
| 14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 1 |
---
|
| 2 |
+
title: LlamaIndex Report Generation Agent
|
| 3 |
+
emoji: 🤖
|
| 4 |
colorFrom: blue
|
| 5 |
colorTo: blue
|
| 6 |
sdk: gradio
|
|
|
|
| 9 |
pinned: false
|
| 10 |
license: apache-2.0
|
| 11 |
python_version: "3.13.3"
|
| 12 |
+
short_description: LlamaIndex-based report generation agent using teacher-student methodology
|
| 13 |
---
|
| 14 |
|
| 15 |
+
# LlamaIndex Report Generation Agent
|
| 16 |
+
|
| 17 |
+
A multi-agent workflow built with LlamaIndex that uses teacher-student methodology to generate comprehensive reports. The system employs three specialized agents (Research, Write, Review) that collaborate to research topics, write reports, and ensure quality through iterative feedback.
|
| 18 |
+
|
| 19 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
__pycache__/agent.cpython-313.pyc
CHANGED
|
Binary files a/__pycache__/agent.cpython-313.pyc and b/__pycache__/agent.cpython-313.pyc differ
|
|
|
agent.py
CHANGED
|
@@ -10,7 +10,7 @@ from llama_index.core.workflow import Context
|
|
| 10 |
|
| 11 |
load_dotenv(os.path.join(os.path.dirname(__file__), 'env.local'))
|
| 12 |
|
| 13 |
-
class
|
| 14 |
def __init__(self):
|
| 15 |
self.llm = HuggingFaceInferenceAPI(
|
| 16 |
model_name="microsoft/Phi-3.5-mini-instruct",
|
|
@@ -167,7 +167,7 @@ class TeacherStudentAgentWorkflow:
|
|
| 167 |
|
| 168 |
if __name__ == "__main__":
|
| 169 |
import asyncio
|
| 170 |
-
agent =
|
| 171 |
user_msg = input("Enter the topic or instructions for the report (leave blank for default): ").strip()
|
| 172 |
if not user_msg:
|
| 173 |
user_msg = None
|
|
|
|
| 10 |
|
| 11 |
load_dotenv(os.path.join(os.path.dirname(__file__), 'env.local'))
|
| 12 |
|
| 13 |
+
class LlamaIndexReportAgent:
|
| 14 |
def __init__(self):
|
| 15 |
self.llm = HuggingFaceInferenceAPI(
|
| 16 |
model_name="microsoft/Phi-3.5-mini-instruct",
|
|
|
|
| 167 |
|
| 168 |
if __name__ == "__main__":
|
| 169 |
import asyncio
|
| 170 |
+
agent = LlamaIndexReportAgent()
|
| 171 |
user_msg = input("Enter the topic or instructions for the report (leave blank for default): ").strip()
|
| 172 |
if not user_msg:
|
| 173 |
user_msg = None
|
app.py
CHANGED
|
@@ -4,7 +4,7 @@ import asyncio
|
|
| 4 |
import json
|
| 5 |
import hashlib
|
| 6 |
from datetime import datetime
|
| 7 |
-
from agent import
|
| 8 |
from tools.simple_tools import get_workflow_state
|
| 9 |
from llama_index.core.agent.workflow import (
|
| 10 |
AgentInput,
|
|
@@ -21,7 +21,7 @@ agent_workflow = None
|
|
| 21 |
def get_agent_workflow():
|
| 22 |
global agent_workflow
|
| 23 |
if agent_workflow is None:
|
| 24 |
-
agent_workflow =
|
| 25 |
return agent_workflow
|
| 26 |
|
| 27 |
async def chat_with_agent(message, history):
|
|
@@ -199,16 +199,16 @@ def format_structured_report_display(structured_report_data):
|
|
| 199 |
)
|
| 200 |
|
| 201 |
# Create the Gradio interface
|
| 202 |
-
with gr.Blocks(title="
|
| 203 |
gr.Markdown("""
|
| 204 |
-
# 🤖
|
| 205 |
|
| 206 |
-
|
| 207 |
-
- **ResearchAgent**: Searches the web and records notes
|
| 208 |
-
- **WriteAgent**:
|
| 209 |
-
- **ReviewAgent**: Reviews and provides feedback
|
| 210 |
|
| 211 |
-
Enter
|
| 212 |
""")
|
| 213 |
|
| 214 |
chatbot = gr.Chatbot(
|
|
@@ -250,13 +250,13 @@ with gr.Blocks(title="Teacher-Student Agent Workflow", theme=gr.themes.Soft()) a
|
|
| 250 |
)
|
| 251 |
|
| 252 |
gr.Markdown("""
|
| 253 |
-
### How
|
| 254 |
-
1. **ResearchAgent** searches for information and takes notes
|
| 255 |
-
2. **WriteAgent** creates a report based on the research
|
| 256 |
-
3. **ReviewAgent** reviews the report and provides feedback
|
| 257 |
-
4. The process
|
| 258 |
|
| 259 |
-
Watch the real-time collaboration between agents as they
|
| 260 |
""")
|
| 261 |
|
| 262 |
# Event handlers
|
|
|
|
| 4 |
import json
|
| 5 |
import hashlib
|
| 6 |
from datetime import datetime
|
| 7 |
+
from agent import LlamaIndexReportAgent
|
| 8 |
from tools.simple_tools import get_workflow_state
|
| 9 |
from llama_index.core.agent.workflow import (
|
| 10 |
AgentInput,
|
|
|
|
| 21 |
def get_agent_workflow():
|
| 22 |
global agent_workflow
|
| 23 |
if agent_workflow is None:
|
| 24 |
+
agent_workflow = LlamaIndexReportAgent()
|
| 25 |
return agent_workflow
|
| 26 |
|
| 27 |
async def chat_with_agent(message, history):
|
|
|
|
| 199 |
)
|
| 200 |
|
| 201 |
# Create the Gradio interface
|
| 202 |
+
with gr.Blocks(title="LlamaIndex Report Generation Agent", theme=gr.themes.Soft()) as demo:
|
| 203 |
gr.Markdown("""
|
| 204 |
+
# 🤖 LlamaIndex Report Generation Agent
|
| 205 |
|
| 206 |
+
A multi-agent workflow built with LlamaIndex that uses teacher-student methodology to generate comprehensive reports. The system employs three specialized agents that collaborate step by step:
|
| 207 |
+
- **ResearchAgent**: Searches the web and records research notes
|
| 208 |
+
- **WriteAgent**: Creates structured reports based on research findings
|
| 209 |
+
- **ReviewAgent**: Reviews reports and provides iterative feedback for improvement
|
| 210 |
|
| 211 |
+
Enter any topic below to see the LlamaIndex agents collaborate using teacher-student methodology!
|
| 212 |
""")
|
| 213 |
|
| 214 |
chatbot = gr.Chatbot(
|
|
|
|
| 250 |
)
|
| 251 |
|
| 252 |
gr.Markdown("""
|
| 253 |
+
### How the LlamaIndex Teacher-Student Agent Works:
|
| 254 |
+
1. **ResearchAgent** searches for information and takes comprehensive notes
|
| 255 |
+
2. **WriteAgent** creates a structured report based on the research findings
|
| 256 |
+
3. **ReviewAgent** reviews the report and provides constructive feedback
|
| 257 |
+
4. The process iterates until the report meets quality standards
|
| 258 |
|
| 259 |
+
Watch the real-time collaboration between LlamaIndex agents as they employ teacher-student methodology!
|
| 260 |
""")
|
| 261 |
|
| 262 |
# Event handlers
|
pyproject.toml
CHANGED
|
@@ -3,7 +3,7 @@ name = "teacher-student-agent"
|
|
| 3 |
version = "0.1.0"
|
| 4 |
description = "Add your description here"
|
| 5 |
readme = "README.md"
|
| 6 |
-
requires-python = ">=3.
|
| 7 |
dependencies = [
|
| 8 |
"asyncio>=3.4.3",
|
| 9 |
"dotenv>=0.9.9",
|
|
@@ -12,3 +12,8 @@ dependencies = [
|
|
| 12 |
"llama-index-llms-huggingface-api>=0.5.0",
|
| 13 |
"tavily-python>=0.7.5",
|
| 14 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
version = "0.1.0"
|
| 4 |
description = "Add your description here"
|
| 5 |
readme = "README.md"
|
| 6 |
+
requires-python = ">=3.11,<3.12"
|
| 7 |
dependencies = [
|
| 8 |
"asyncio>=3.4.3",
|
| 9 |
"dotenv>=0.9.9",
|
|
|
|
| 12 |
"llama-index-llms-huggingface-api>=0.5.0",
|
| 13 |
"tavily-python>=0.7.5",
|
| 14 |
]
|
| 15 |
+
|
| 16 |
+
[tool.uv]
|
| 17 |
+
# Configure uv for faster builds in cloud environments
|
| 18 |
+
no-cache = true
|
| 19 |
+
index-strategy = "unsafe-best-match"
|
runtime.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
python-3.11.10
|