sebasmos's picture
Add user token input - users provide their own HF token
104b883
raw
history blame
12.6 kB
"""
Gradio app for AI Project Assistant.
"""
import gradio as gr
from pathlib import Path
import os
from datetime import datetime
from dotenv import load_dotenv
from src.rag import ProjectRAG
from src.agent import ProjectAgent
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
from langchain_core.messages import SystemMessage, HumanMessage
# Load environment variables
load_dotenv()
# Global state - Initialize RAG only (not agent)
rag = None
def initialize_rag():
"""Initialize RAG system (embeddings only, no LLM needed)."""
global rag
data_dir = Path("./data")
if not data_dir.exists():
return False
try:
rag = ProjectRAG(data_dir)
rag.load_and_index()
return True
except Exception as e:
print(f"RAG initialization error: {e}")
return False
# Initialize RAG on module load
initialize_rag()
def chat(message, history, project_filter, hf_token):
"""Process chat message."""
if not hf_token or hf_token.strip() == "":
yield "⚠️ Please enter your HuggingFace token first (get one at https://huggingface.co/settings/tokens)"
return
if not rag:
yield "⚠️ System not initialized. Please check the data directory."
return
try:
# Set token in environment for this request
os.environ["HF_TOKEN"] = hf_token.strip()
# Create agent with user's token
agent = ProjectAgent(rag)
# Add project context if specified
if project_filter and project_filter != "All Projects":
enhanced_prompt = f"[Project: {project_filter}] {message}"
else:
enhanced_prompt = message
response = agent.query(enhanced_prompt)
yield response
except Exception as e:
yield f"❌ Error: {str(e)}\n\nMake sure your HuggingFace token is valid."
def get_projects():
"""Get list of projects."""
if not rag:
return ["All Projects"]
projects = rag.get_all_projects()
return ["All Projects"] + projects
def structure_meeting(project_name, meeting_title, meeting_date, participants, meeting_text, hf_token):
"""Structure meeting notes using AI."""
if not hf_token or hf_token.strip() == "":
return "❌ Please enter your HuggingFace token first"
if not project_name or not meeting_text:
return "❌ Please provide both project name and meeting notes"
try:
# Use HF Inference API with user's token
endpoint = HuggingFaceEndpoint(
repo_id="meta-llama/Llama-3.2-3B-Instruct",
temperature=0.3,
max_new_tokens=1024,
huggingfacehub_api_token=hf_token.strip()
)
llm = ChatHuggingFace(llm=endpoint)
system_prompt = """You are a meeting notes structuring assistant.
Convert unstructured meeting notes into a well-formatted markdown document with these sections:
1. # Meeting: [title]
2. Date: [date]
3. Participants: [list]
4. ## Discussion (key points discussed)
5. ## Decisions (decisions made)
6. ## Action Items (as checkboxes with assignee and deadline if mentioned)
7. ## Blockers (any blockers or issues raised)
Format action items as:
- [ ] Person: Task description by deadline
or
- [ ] Task description (if no person/deadline mentioned)
Extract all relevant information from the raw notes."""
user_prompt = f"""Structure these meeting notes:
Raw Notes:
{meeting_text}
Meeting Details:
- Title: {meeting_title or 'Meeting'}
- Date: {meeting_date}
- Participants: {participants or 'Not specified'}
"""
messages = [
SystemMessage(content=system_prompt),
HumanMessage(content=user_prompt)
]
response = llm.invoke(messages)
structured_md = response.content
# Save to file
project_dir = Path("data") / project_name / "meetings"
project_dir.mkdir(parents=True, exist_ok=True)
filename = f"{meeting_date}-{meeting_title.lower().replace(' ', '-') if meeting_title else 'meeting'}.md"
file_path = project_dir / filename
with open(file_path, 'w') as f:
f.write(structured_md)
return f"✅ Meeting structured and saved to `{file_path}`\n\n---\n\n{structured_md}"
except Exception as e:
return f"❌ Error: {str(e)}"
# Create Gradio interface with custom CSS
custom_css = """
.chatbot-container {
background-color: #f7f7f8;
border-radius: 8px;
padding: 10px;
}
.example-panel {
background-color: #f0f2f6;
border-radius: 8px;
padding: 15px;
height: 100%;
}
/* Mobile responsiveness */
@media (max-width: 768px) {
.row {
flex-direction: column !important;
}
.chatbot-container {
margin-top: 10px;
}
}
"""
with gr.Blocks(title="Sherlock: AI Project Assistant", theme=gr.themes.Soft(), css=custom_css) as demo:
gr.Markdown("""
# 🤖 Sherlock: AI Project Assistant
Your intelligent assistant for managing multiple projects through meeting summaries.
""")
# Main tabs
with gr.Tabs():
# Chat tab
with gr.Tab("💬 Chat"):
gr.Markdown("### Ask questions about your projects")
# HuggingFace Token input
with gr.Row():
hf_token_chat = gr.Textbox(
label="🔑 HuggingFace Token (Required)",
placeholder="Enter your HF token from https://huggingface.co/settings/tokens",
type="password",
scale=3
)
gr.Markdown("""
**Get a free token:**
1. Go to [huggingface.co/settings/tokens](https://huggingface.co/settings/tokens)
2. Create a new token with "Read" permissions
3. Paste it here
""", scale=1)
# Project selection dropdown
project_dropdown = gr.Dropdown(
label="Select Project",
choices=get_projects(),
value="All Projects",
interactive=True
)
# Chat interface with example queries on the side
with gr.Row(elem_classes="row"):
# Left panel - Example queries (same width as right panel chat box)
with gr.Column(scale=1, elem_classes="example-panel"):
gr.Markdown("""
### 📖 How to Use
1. Select the project you want to query from the dropdown above
2. Type your question in the chat box or use one of the examples below
3. Press Enter or click Send
### 💡 Example Queries
- What are the open action items?
- What blockers do we have?
- What decisions were made?
- What should I focus on next?
- Summarize the project status
""")
# Right panel - Chat (same width as left panel)
with gr.Column(scale=1, elem_classes="chatbot-container"):
chatbot = gr.Chatbot(
label="Chat",
height=350,
show_label=False
)
msg = gr.Textbox(
label="Your Message",
placeholder="What are the open action items?",
lines=2,
show_label=False
)
with gr.Row():
submit_btn = gr.Button("Send", variant="primary", scale=1)
clear_btn = gr.Button("Clear", scale=1)
def respond(message, chat_history, project, token):
if not message:
return chat_history, ""
# Get bot response
bot_message = ""
for response_chunk in chat(message, chat_history, project, token):
bot_message = response_chunk
# Add to history as tuple
chat_history.append((message, bot_message))
return chat_history, ""
submit_btn.click(
fn=respond,
inputs=[msg, chatbot, project_dropdown, hf_token_chat],
outputs=[chatbot, msg]
)
msg.submit(
fn=respond,
inputs=[msg, chatbot, project_dropdown, hf_token_chat],
outputs=[chatbot, msg]
)
clear_btn.click(fn=lambda: [], outputs=chatbot)
# Upload Meeting tab
with gr.Tab("📤 Upload Meeting"):
gr.Markdown("### Upload plain text meeting notes and let AI structure them")
# HuggingFace Token input
hf_token_upload = gr.Textbox(
label="🔑 HuggingFace Token (Required)",
placeholder="Enter your HF token from https://huggingface.co/settings/tokens",
type="password"
)
# Project selection with toggle
with gr.Row():
with gr.Column():
project_mode = gr.Radio(
choices=["Use Existing Project", "Create New Project"],
value="Use Existing Project",
label="Project Selection"
)
# Existing project dropdown (shown when "Use Existing" is selected)
existing_project = gr.Dropdown(
label="Select Existing Project",
choices=get_projects()[1:], # Exclude "All Projects"
visible=True
)
# New project textbox (shown when "Create New" is selected)
new_project = gr.Textbox(
label="New Project Name",
placeholder="e.g., mobile_app_redesign",
visible=False
)
upload_title = gr.Textbox(
label="Meeting Title",
placeholder="e.g., Sprint Planning"
)
with gr.Column():
upload_date = gr.Textbox(
label="Meeting Date (YYYY-MM-DD)",
value=datetime.now().strftime("%Y-%m-%d"),
placeholder="2025-01-15"
)
upload_participants = gr.Textbox(
label="Participants (comma-separated)",
placeholder="e.g., Alice, Bob, Charlie"
)
# Toggle visibility based on project mode
def toggle_project_input(mode):
if mode == "Use Existing Project":
return gr.update(visible=True), gr.update(visible=False)
else:
return gr.update(visible=False), gr.update(visible=True)
project_mode.change(
fn=toggle_project_input,
inputs=[project_mode],
outputs=[existing_project, new_project]
)
upload_text = gr.Textbox(
label="Meeting Notes (plain text)",
placeholder="""Example:
We discussed the new feature requirements.
Alice will implement the login page by next Friday.
Bob raised a concern about the database migration.
We decided to use PostgreSQL instead of MySQL.
Charlie is blocked waiting for API credentials.""",
lines=10
)
structure_btn = gr.Button("🤖 Structure Meeting with AI", variant="primary")
structure_output = gr.Markdown(label="Structured Output")
def structure_meeting_wrapper(mode, existing_proj, new_proj, title, date, participants, text, token):
"""Wrapper to handle both project modes."""
# Determine which project name to use
project_name = existing_proj if mode == "Use Existing Project" else new_proj
return structure_meeting(project_name, title, date, participants, text, token)
structure_btn.click(
fn=structure_meeting_wrapper,
inputs=[project_mode, existing_project, new_project, upload_title, upload_date, upload_participants, upload_text, hf_token_upload],
outputs=structure_output
)
# Launch
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860)