Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from huggingface_hub import InferenceClient | |
| import os | |
| import json | |
| # 1. SETUP | |
| token = os.getenv("HF_TOKEN") | |
| if not token: | |
| raise ValueError("HF_TOKEN is missing! Add it in Space Settings -> Secrets") | |
| # --- CLIENTS --- | |
| chat_client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct", token=token) | |
| image_client = InferenceClient("black-forest-labs/FLUX.1-schnell", token=token) | |
| # --- MEMORY SYSTEM --- | |
| HISTORY_FILE = "chat_history.json" | |
| def load_history(): | |
| if not os.path.exists(HISTORY_FILE): | |
| return [] | |
| try: | |
| with open(HISTORY_FILE, "r") as f: | |
| return json.load(f) | |
| except: | |
| return [] | |
| def save_history(history): | |
| with open(HISTORY_FILE, "w") as f: | |
| json.dump(history, f) | |
| def clear_memory(): | |
| if os.path.exists(HISTORY_FILE): | |
| os.remove(HISTORY_FILE) | |
| return "Memory Cleared!" | |
| # --- LOGIC --- | |
| def chat_engine(message): | |
| history = load_history() | |
| messages = [{"role": "system", "content": "You are a helpful AI assistant."}] | |
| for turn in history: | |
| messages.append({"role": "user", "content": str(turn['user'])}) | |
| messages.append({"role": "assistant", "content": str(turn['bot'])}) | |
| messages.append({"role": "user", "content": str(message)}) | |
| try: | |
| response = chat_client.chat_completion( | |
| messages, max_tokens=512, stream=False, temperature=0.7 | |
| ) | |
| bot_reply = response.choices[0].message.content | |
| history.append({"user": message, "bot": bot_reply}) | |
| save_history(history) | |
| return bot_reply | |
| except Exception as e: | |
| return f"⚠️ Error: {str(e)}" | |
| def generate_image(prompt): | |
| try: | |
| image = image_client.text_to_image(prompt) | |
| path = "generated_image.png" | |
| image.save(path) | |
| return path | |
| except Exception as e: | |
| return None | |
| # --- INTERFACE --- | |
| with gr.Blocks(theme=gr.themes.Soft()) as demo: | |
| gr.Markdown("# Nexus AI Backend") | |
| with gr.Tab("Chat"): | |
| gr.ChatInterface(fn=lambda m, h: chat_engine(m)) | |
| # --- MEMORY CONTROL (Updated) --- | |
| clear_btn = gr.Button("🗑️ Clear Server Memory") | |
| status_msg = gr.Markdown("") | |
| # WE ADDED api_name="clear_memory" HERE: | |
| clear_btn.click(fn=clear_memory, inputs=[], outputs=[status_msg], api_name="clear_memory") | |
| # HIDDEN API | |
| hidden_input = gr.Textbox(visible=False) | |
| hidden_output = gr.Textbox(visible=False) | |
| api_btn = gr.Button(visible=False) | |
| api_btn.click(fn=chat_engine, inputs=[hidden_input], outputs=[hidden_output], api_name="chat_engine") | |
| with gr.Tab("Image"): | |
| i_in = gr.Textbox(label="Prompt") | |
| i_out = gr.Image(label="Result") | |
| i_btn = gr.Button("Generate") | |
| i_btn.click(fn=generate_image, inputs=i_in, outputs=i_out, api_name="image") | |
| if __name__ == "__main__": | |
| demo.launch(server_name="0.0.0.0", server_port=7860) |