Spaces:
Running
Running
Commit ·
b5a0b96
1
Parent(s): cd2cbb5
update app, storage, core_logic, requirements
Browse files- app.py +38 -33
- core_logic.py +26 -31
- requirements.txt +1 -1
- storage.py +69 -15
app.py
CHANGED
|
@@ -5,56 +5,61 @@
|
|
| 5 |
The Interface Skeleton - The code sets up the navigation panel and the multimodal chat interface
|
| 6 |
"""
|
| 7 |
|
|
|
|
| 8 |
import gradio as gr
|
| 9 |
from core_logic import chat_function
|
| 10 |
-
from storage import save_chat, load_history
|
| 11 |
-
|
| 12 |
-
with gr.Blocks(css="styles.css", theme=gr.themes.Soft()) as demo:
|
| 13 |
-
# State variables for persistence
|
| 14 |
-
current_chat_id = gr.State("")
|
| 15 |
-
session_history = gr.State([])
|
| 16 |
|
|
|
|
|
|
|
|
|
|
| 17 |
with gr.Row():
|
| 18 |
-
# ---
|
| 19 |
with gr.Column(scale=1, variant="secondary"):
|
| 20 |
-
gr.Markdown("## 🛠️
|
| 21 |
new_btn = gr.Button("➕ New Chat", variant="primary")
|
| 22 |
|
| 23 |
-
gr.Markdown("### 🕒 Recent Conversations")
|
| 24 |
-
# This would be populated from storage.py
|
| 25 |
history_list = gr.Dataset(
|
| 26 |
components=[gr.Textbox(visible=False)],
|
| 27 |
-
label="",
|
| 28 |
samples=load_history()
|
| 29 |
)
|
| 30 |
|
| 31 |
-
|
| 32 |
-
download_format = gr.Radio([".txt", ".pdf", ".docx"], label="Export Format")
|
| 33 |
-
export_btn = gr.Button("📥 Download Current Chat")
|
| 34 |
-
|
| 35 |
-
# --- MAIN CHAT INTERFACE ---
|
| 36 |
with gr.Column(scale=4):
|
|
|
|
| 37 |
chatbot = gr.Chatbot(
|
| 38 |
show_label=False,
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
multimodal=True
|
| 43 |
)
|
| 44 |
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
file_count="multiple"
|
| 51 |
-
)
|
| 52 |
|
| 53 |
-
# ---
|
| 54 |
-
def
|
| 55 |
-
#
|
| 56 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
|
| 58 |
-
|
|
|
|
|
|
|
| 59 |
|
| 60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
The Interface Skeleton - The code sets up the navigation panel and the multimodal chat interface
|
| 6 |
"""
|
| 7 |
|
| 8 |
+
|
| 9 |
import gradio as gr
|
| 10 |
from core_logic import chat_function
|
| 11 |
+
from storage import save_chat, load_history, get_chat_content
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
|
| 13 |
+
with gr.Blocks() as demo:
|
| 14 |
+
chat_id_state = gr.State("")
|
| 15 |
+
|
| 16 |
with gr.Row():
|
| 17 |
+
# --- Sidebar ---
|
| 18 |
with gr.Column(scale=1, variant="secondary"):
|
| 19 |
+
gr.Markdown("### 🛠️ Silicon Architect")
|
| 20 |
new_btn = gr.Button("➕ New Chat", variant="primary")
|
| 21 |
|
|
|
|
|
|
|
| 22 |
history_list = gr.Dataset(
|
| 23 |
components=[gr.Textbox(visible=False)],
|
| 24 |
+
label="Recent Conversations",
|
| 25 |
samples=load_history()
|
| 26 |
)
|
| 27 |
|
| 28 |
+
# --- Main Chat ---
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
with gr.Column(scale=4):
|
| 30 |
+
# Gradio 6: 'type' is removed, 'messages' is default
|
| 31 |
chatbot = gr.Chatbot(
|
| 32 |
show_label=False,
|
| 33 |
+
height=700,
|
| 34 |
+
multimodal=True,
|
| 35 |
+
autoscroll=True
|
|
|
|
| 36 |
)
|
| 37 |
|
| 38 |
+
chat_input = gr.MultimodalTextbox(
|
| 39 |
+
interactive=True,
|
| 40 |
+
placeholder="Discuss architecture or upload code...",
|
| 41 |
+
show_label=False
|
| 42 |
+
)
|
|
|
|
|
|
|
| 43 |
|
| 44 |
+
# --- Event Logic ---
|
| 45 |
+
def bot_response(message, history):
|
| 46 |
+
# chat_function yields strings; we must return the updated history dict
|
| 47 |
+
history.append({"role": "user", "content": message["text"]})
|
| 48 |
+
# Placeholder for assistant message
|
| 49 |
+
history.append({"role": "assistant", "content": ""})
|
| 50 |
+
|
| 51 |
+
for partial_resp in chat_function(message, history[:-2]):
|
| 52 |
+
history[-1]["content"] = partial_resp
|
| 53 |
+
yield history
|
| 54 |
|
| 55 |
+
chat_input.submit(bot_response, [chat_input, chatbot], [chatbot]).then(
|
| 56 |
+
lambda h: save_chat(None, h), [chatbot], None
|
| 57 |
+
)
|
| 58 |
|
| 59 |
+
new_btn.click(lambda: ([], ""), None, [chatbot, chat_id_state])
|
| 60 |
+
|
| 61 |
+
# Move theme and CSS here for Gradio 6.0
|
| 62 |
+
demo.launch(
|
| 63 |
+
theme=gr.themes.Soft(),
|
| 64 |
+
css="styles.css"
|
| 65 |
+
)
|
core_logic.py
CHANGED
|
@@ -17,6 +17,9 @@ SYSTEM_PROMPT = """
|
|
| 17 |
You are the 'Silicon Architect'—a master-stroke creative genius in AI Engineering and Technical Architecture.
|
| 18 |
Your goal is to provide production-grade, highly optimized solutions for web and mobile AI applications.
|
| 19 |
|
|
|
|
|
|
|
|
|
|
| 20 |
CORE DIRECTIVES:
|
| 21 |
1. ARCHITECTURAL RIGOR: Always consider scalability, async patterns, and state management.
|
| 22 |
2. AGENTIC EXPERTISE: You understand recurrent-depth simulations, tool-calling, and autonomous loops.
|
|
@@ -28,41 +31,33 @@ When a user provides files, analyze the code structure and logic before proposin
|
|
| 28 |
"""
|
| 29 |
|
| 30 |
def chat_function(message, history):
|
| 31 |
-
#
|
| 32 |
-
|
| 33 |
-
|
| 34 |
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
|
|
|
|
|
|
| 39 |
|
| 40 |
-
#
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
web_results = web_search(search_query)
|
| 45 |
-
full_user_input = f"RESEARCH RESULTS:\n{web_results}\n\nUSER FILES:\n{files_content}\n\nUSER QUESTION: {user_text}"
|
| 46 |
else:
|
| 47 |
-
|
| 48 |
|
| 49 |
-
#
|
| 50 |
messages = [{"role": "system", "content": SYSTEM_PROMPT}]
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
messages.append({"role": "user", "content": full_user_input})
|
| 56 |
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
messages,
|
| 61 |
-
max_tokens=2048,
|
| 62 |
-
stream=True,
|
| 63 |
-
temperature=0.2, # Low temperature for high technical precision
|
| 64 |
-
):
|
| 65 |
-
token = message_chunk.choices[0].delta.content
|
| 66 |
if token:
|
| 67 |
-
|
| 68 |
-
yield
|
|
|
|
| 17 |
You are the 'Silicon Architect'—a master-stroke creative genius in AI Engineering and Technical Architecture.
|
| 18 |
Your goal is to provide production-grade, highly optimized solutions for web and mobile AI applications.
|
| 19 |
|
| 20 |
+
Expertise: Python 3.12, Agentic Loops, FastAPI, and Scalable Architecture.
|
| 21 |
+
Provide production-ready code and rigorous technical research.
|
| 22 |
+
|
| 23 |
CORE DIRECTIVES:
|
| 24 |
1. ARCHITECTURAL RIGOR: Always consider scalability, async patterns, and state management.
|
| 25 |
2. AGENTIC EXPERTISE: You understand recurrent-depth simulations, tool-calling, and autonomous loops.
|
|
|
|
| 31 |
"""
|
| 32 |
|
| 33 |
def chat_function(message, history):
|
| 34 |
+
# message is now a dict: {"text": "...", "files": [...]}
|
| 35 |
+
user_text = message.get("text", "")
|
| 36 |
+
files = message.get("files", [])
|
| 37 |
|
| 38 |
+
# Process Files
|
| 39 |
+
context_from_files = ""
|
| 40 |
+
for f in files:
|
| 41 |
+
# f can be a filepath string or a dict depending on upload status
|
| 42 |
+
path = f["path"] if isinstance(f, dict) else f
|
| 43 |
+
context_from_files += parse_file(path)
|
| 44 |
|
| 45 |
+
# Research Trigger
|
| 46 |
+
if any(keyword in user_text.lower() for keyword in ["search", "docs", "latest"]):
|
| 47 |
+
research_context = web_search(user_text)
|
| 48 |
+
prompt = f"RESEARCH:\n{research_context}\n\nFILES:\n{context_from_files}\n\nUSER: {user_text}"
|
|
|
|
|
|
|
| 49 |
else:
|
| 50 |
+
prompt = f"FILES:\n{context_from_files}\n\nUSER: {user_text}"
|
| 51 |
|
| 52 |
+
# Build Gradio 6.0 compliant messages
|
| 53 |
messages = [{"role": "system", "content": SYSTEM_PROMPT}]
|
| 54 |
+
# 'history' is already a list of dicts in Gradio 6
|
| 55 |
+
messages.extend(history)
|
| 56 |
+
messages.append({"role": "user", "content": prompt})
|
|
|
|
|
|
|
| 57 |
|
| 58 |
+
response_text = ""
|
| 59 |
+
for chunk in client.chat_completion(messages, max_tokens=2048, stream=True, temperature=0.2):
|
| 60 |
+
token = chunk.choices[0].delta.content
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
if token:
|
| 62 |
+
response_text += token
|
| 63 |
+
yield response_text
|
requirements.txt
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
gradio
|
| 2 |
huggingface_hub
|
| 3 |
python-dotenv
|
| 4 |
duckduckgo-search
|
|
|
|
| 1 |
+
gradio>=6.0.0
|
| 2 |
huggingface_hub
|
| 3 |
python-dotenv
|
| 4 |
duckduckgo-search
|
storage.py
CHANGED
|
@@ -7,29 +7,83 @@ Persistence Layer - Handles the "Save/Load" functionality using Hugging Face Dat
|
|
| 7 |
|
| 8 |
import json
|
| 9 |
import os
|
|
|
|
| 10 |
from huggingface_hub import HfApi, hf_hub_download
|
| 11 |
|
| 12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
api = HfApi(token=os.getenv("HF_TOKEN"))
|
| 14 |
|
| 15 |
def save_chat(chat_id, history):
|
| 16 |
-
"""
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
def load_history():
|
| 29 |
-
"""
|
| 30 |
try:
|
|
|
|
| 31 |
files = api.list_repo_files(repo_id=REPO_ID, repo_type="dataset")
|
| 32 |
-
chat_files = [
|
| 33 |
-
|
| 34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
return []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
|
| 8 |
import json
|
| 9 |
import os
|
| 10 |
+
from datetime import datetime
|
| 11 |
from huggingface_hub import HfApi, hf_hub_download
|
| 12 |
|
| 13 |
+
# --- CONFIGURATION ---
|
| 14 |
+
REPO_ID = "prashantmatlani/chathistorycoderg"
|
| 15 |
+
HISTORY_DIR = "./chathistory"
|
| 16 |
+
|
| 17 |
+
# Initialize the API with your token
|
| 18 |
api = HfApi(token=os.getenv("HF_TOKEN"))
|
| 19 |
|
| 20 |
def save_chat(chat_id, history):
|
| 21 |
+
"""Saves chat to local subdirectory and syncs to Hugging Face Dataset."""
|
| 22 |
+
if not os.path.exists(HISTORY_DIR):
|
| 23 |
+
os.makedirs(HISTORY_DIR)
|
| 24 |
+
|
| 25 |
+
# Generate a unique ID if none exists (e.g., for a brand new chat)
|
| 26 |
+
if not chat_id:
|
| 27 |
+
chat_id = datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 28 |
+
|
| 29 |
+
filename = f"{chat_id}.json"
|
| 30 |
+
local_path = os.path.join(HISTORY_DIR, filename)
|
| 31 |
|
| 32 |
+
# 1. Save Locally
|
| 33 |
+
with open(local_path, "w", encoding="utf-8") as f:
|
| 34 |
+
json.dump(history, f, indent=4)
|
| 35 |
+
|
| 36 |
+
# 2. Sync to Hugging Face Dataset (Master Stroke Persistence)
|
| 37 |
+
try:
|
| 38 |
+
api.upload_file(
|
| 39 |
+
path_or_fileobj=local_path,
|
| 40 |
+
path_in_repo=f"chats/{filename}",
|
| 41 |
+
repo_id=REPO_ID,
|
| 42 |
+
repo_type="dataset"
|
| 43 |
+
)
|
| 44 |
+
except Exception as e:
|
| 45 |
+
print(f"Cloud Sync Warning: {e}")
|
| 46 |
+
|
| 47 |
+
return chat_id
|
| 48 |
|
| 49 |
def load_history():
|
| 50 |
+
"""Retrieves list of chat IDs from the Hub to populate the sidebar."""
|
| 51 |
try:
|
| 52 |
+
# We pull the list from the Hub so the sidebar reflects all saved sessions
|
| 53 |
files = api.list_repo_files(repo_id=REPO_ID, repo_type="dataset")
|
| 54 |
+
chat_files = [
|
| 55 |
+
f.split("/")[-1].replace(".json", "")
|
| 56 |
+
for f in files if f.startswith("chats/")
|
| 57 |
+
]
|
| 58 |
+
# Return as list of lists for Gradio Dataset component
|
| 59 |
+
return [[f] for f in sorted(chat_files, reverse=True)]
|
| 60 |
+
except Exception as e:
|
| 61 |
+
print(f"Error loading history from Hub: {e}")
|
| 62 |
return []
|
| 63 |
+
|
| 64 |
+
def get_chat_content(chat_id):
|
| 65 |
+
"""Loads a specific chat's content from the Hub or local cache."""
|
| 66 |
+
filename = f"chats/{chat_id}.json"
|
| 67 |
+
local_path = os.path.join(HISTORY_DIR, f"{chat_id}.json")
|
| 68 |
+
|
| 69 |
+
try:
|
| 70 |
+
# Ensure local dir exists
|
| 71 |
+
if not os.path.exists(HISTORY_DIR):
|
| 72 |
+
os.makedirs(HISTORY_DIR)
|
| 73 |
+
|
| 74 |
+
# Download from Hub to keep local state fresh
|
| 75 |
+
from huggingface_hub import hf_hub_download
|
| 76 |
+
downloaded_path = hf_hub_download(
|
| 77 |
+
repo_id=REPO_ID,
|
| 78 |
+
repo_type="dataset",
|
| 79 |
+
filename=filename,
|
| 80 |
+
token=os.getenv("HF_TOKEN")
|
| 81 |
+
)
|
| 82 |
+
with open(downloaded_path, "r", encoding="utf-8") as f:
|
| 83 |
+
return json.load(f)
|
| 84 |
+
except Exception:
|
| 85 |
+
# Fallback to local if Hub is unreachable
|
| 86 |
+
if os.path.exists(local_path):
|
| 87 |
+
with open(local_path, "r", encoding="utf-8") as f:
|
| 88 |
+
return json.load(f)
|
| 89 |
+
return []
|