| | |
| | import gradio as gr |
| | import requests |
| | from datetime import datetime |
| | import json |
| | import os |
| | import uuid |
| |
|
| | |
| | HF_API_TOKEN = "YOUR_HF_API_TOKEN" |
| | headers = {"Authorization": f"Bearer {HF_API_TOKEN}"} |
| |
|
| | |
| | HISTORY_FOLDER = "histories" |
| | os.makedirs(HISTORY_FOLDER, exist_ok=True) |
| |
|
| | |
| | MODELS = { |
| | "GPT-2": "gpt2", |
| | "GPT-J": "EleutherAI/gpt-j-6B", |
| | "GPT-NeoX": "EleutherAI/gpt-neox-20b" |
| | } |
| |
|
| | def load_user_history(session_id): |
| | path = os.path.join(HISTORY_FOLDER, f"{session_id}.json") |
| | if os.path.exists(path): |
| | with open(path, "r") as f: |
| | return json.load(f) |
| | return [] |
| |
|
| | def save_user_history(session_id, history): |
| | path = os.path.join(HISTORY_FOLDER, f"{session_id}.json") |
| | with open(path, "w") as f: |
| | json.dump(history, f, indent=2) |
| |
|
| | def ai_assistant(user_text, uploaded_file, model_name, session_id=None): |
| | |
| | if session_id is None: |
| | session_id = str(uuid.uuid4()) |
| |
|
| | |
| | conversation_history = load_user_history(session_id) |
| |
|
| | |
| | if uploaded_file: |
| | file_content = uploaded_file.read().decode("utf-8") |
| | user_text = f"{user_text}\n\nFile content:\n{file_content}" |
| |
|
| | |
| | conversation_history.append(f"User: {user_text}") |
| |
|
| | |
| | prompt = "\n".join(conversation_history) + "\nAI:" |
| |
|
| | |
| | payload = {"inputs": prompt} |
| | model_id = MODELS.get(model_name, "gpt2") |
| | response = requests.post( |
| | f"https://api-inference.huggingface.co/models/{model_id}", |
| | headers=headers, |
| | json=payload |
| | ) |
| |
|
| | if response.status_code == 200: |
| | result = response.json() |
| | ai_text = result[0]["generated_text"] if isinstance(result, list) else str(result) |
| | else: |
| | ai_text = f"⚠️ Error: {response.status_code} - {response.text}" |
| |
|
| | |
| | conversation_history.append(f"AI: {ai_text}") |
| |
|
| | |
| | save_user_history(session_id, conversation_history) |
| |
|
| | |
| | timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S') |
| | word_count = len(ai_text.split()) |
| | display_text = f"{ai_text}\n\n🕒 Timestamp: {timestamp}" |
| |
|
| | return display_text, f"Word count: {word_count}", session_id |
| |
|
| | |
| | demo = gr.Interface( |
| | fn=ai_assistant, |
| | inputs=[ |
| | gr.Textbox(label="Enter text", placeholder="Type your message here..."), |
| | gr.File(label="Upload a text file (optional)"), |
| | gr.Dropdown(label="Select AI Model", choices=list(MODELS.keys()), value="GPT-2"), |
| | gr.Textbox(label="Session ID (auto-generated)", placeholder="Leave blank for new session", optional=True) |
| | ], |
| | outputs=[ |
| | gr.Markdown(label="AI Response"), |
| | gr.Textbox(label="Word Count"), |
| | gr.Textbox(label="Session ID") |
| | ], |
| | title="Session-Based Stateful GPT Assistant", |
| | description=( |
| | "Chat with an AI assistant with automatic session tracking. " |
| | "Each session has its own conversation memory. Upload files, choose models, " |
| | "and see timestamps & word counts. Session ID is returned for returning users." |
| | ) |
| | ) |
| |
|
| | if __name__ == "__main__": |
| | demo.launch() |