Paperweb / app.py
Seriki's picture
Update app.py
698506e verified
# app.py
import gradio as gr
import requests
from datetime import datetime
import json
import os
import uuid
# Hugging Face API token
HF_API_TOKEN = "YOUR_HF_API_TOKEN"
headers = {"Authorization": f"Bearer {HF_API_TOKEN}"}
# Folder to store per-user conversation histories
HISTORY_FOLDER = "histories"
os.makedirs(HISTORY_FOLDER, exist_ok=True)
# Available models
MODELS = {
"GPT-2": "gpt2",
"GPT-J": "EleutherAI/gpt-j-6B",
"GPT-NeoX": "EleutherAI/gpt-neox-20b"
}
def load_user_history(session_id):
path = os.path.join(HISTORY_FOLDER, f"{session_id}.json")
if os.path.exists(path):
with open(path, "r") as f:
return json.load(f)
return []
def save_user_history(session_id, history):
path = os.path.join(HISTORY_FOLDER, f"{session_id}.json")
with open(path, "w") as f:
json.dump(history, f, indent=2)
def ai_assistant(user_text, uploaded_file, model_name, session_id=None):
# Generate session_id if not provided
if session_id is None:
session_id = str(uuid.uuid4())
# Load this session's conversation
conversation_history = load_user_history(session_id)
# Combine file content if uploaded
if uploaded_file:
file_content = uploaded_file.read().decode("utf-8")
user_text = f"{user_text}\n\nFile content:\n{file_content}"
# Append user input
conversation_history.append(f"User: {user_text}")
# Prepare prompt with conversation history
prompt = "\n".join(conversation_history) + "\nAI:"
# Call Hugging Face Inference API
payload = {"inputs": prompt}
model_id = MODELS.get(model_name, "gpt2")
response = requests.post(
f"https://api-inference.huggingface.co/models/{model_id}",
headers=headers,
json=payload
)
if response.status_code == 200:
result = response.json()
ai_text = result[0]["generated_text"] if isinstance(result, list) else str(result)
else:
ai_text = f"⚠️ Error: {response.status_code} - {response.text}"
# Append AI response
conversation_history.append(f"AI: {ai_text}")
# Save updated history for this session
save_user_history(session_id, conversation_history)
# Add timestamp and word count
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
word_count = len(ai_text.split())
display_text = f"{ai_text}\n\n🕒 Timestamp: {timestamp}"
return display_text, f"Word count: {word_count}", session_id
# Gradio Interface
demo = gr.Interface(
fn=ai_assistant,
inputs=[
gr.Textbox(label="Enter text", placeholder="Type your message here..."),
gr.File(label="Upload a text file (optional)"),
gr.Dropdown(label="Select AI Model", choices=list(MODELS.keys()), value="GPT-2"),
gr.Textbox(label="Session ID (auto-generated)", placeholder="Leave blank for new session", optional=True)
],
outputs=[
gr.Markdown(label="AI Response"),
gr.Textbox(label="Word Count"),
gr.Textbox(label="Session ID")
],
title="Session-Based Stateful GPT Assistant",
description=(
"Chat with an AI assistant with automatic session tracking. "
"Each session has its own conversation memory. Upload files, choose models, "
"and see timestamps & word counts. Session ID is returned for returning users."
)
)
if __name__ == "__main__":
demo.launch()