Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import os | |
| from typing import List, Dict, Optional | |
| # Function to handle chat messages | |
| def chat(message: str, history: List[Dict[str, str]], model_name: str, temperature: float, max_tokens: int) -> List[Dict[str, str]]: | |
| """ | |
| Generate a response using the Hugging Face Inference API. | |
| Args: | |
| message: The user's current message | |
| history: Chat history with previous messages | |
| model_name: The Hugging Face model to use | |
| temperature: Sampling temperature for generation | |
| max_tokens: Maximum tokens to generate | |
| Returns: | |
| Updated chat history | |
| """ | |
| try: | |
| # Build the conversation context | |
| conversation = [] | |
| # Add system message | |
| conversation.append({ | |
| "role": "system", | |
| "content": "You are a helpful, friendly AI assistant. Provide concise, accurate responses." | |
| }) | |
| # Add conversation history | |
| for user_msg, assistant_msg in history: | |
| conversation.append({"role": "user", "content": user_msg}) | |
| if assistant_msg: | |
| conversation.append({"role": "assistant", "content": assistant_msg}) | |
| # Add current message | |
| conversation.append({"role": "user", "content": message}) | |
| # Make API call to Hugging Face | |
| headers = {"Authorization": f"Bearer {os.environ.get('HF_TOKEN', '')}"} | |
| api_url = f"https://api-inference.huggingface.co/models/{model_name}" | |
| response = requests.post( | |
| api_url, | |
| headers=headers, | |
| json={"inputs": conversation, "parameters": {"temperature": temperature, "max_new_tokens": max_tokens}}, | |
| timeout=60 | |
| ) | |
| response.raise_for_status() | |
| # Parse the response | |
| result = response.json()[0] | |
| # Extract assistant's response | |
| if isinstance(result, list) and len(result) > 0: | |
| if isinstance(result[0], list) and len(result[0]) > 0: | |
| assistant_response = result[0][0].get("generated_text", "") | |
| else: | |
| assistant_response = str(result[0]) | |
| else: | |
| assistant_response = str(result) | |
| # Add assistant response to history | |
| history.append({"role": "user", "content": message}) | |
| history.append({"role": "assistant", "content": assistant_response}) | |
| return history | |
| except requests.exceptions.RequestException as e: | |
| error_msg = f"Error communicating with Hugging Face API: {str(e)}" | |
| history.append({"role": "user", "content": message}) | |
| history.append({"role": "assistant", "content": error_msg}) | |
| return history | |
| except Exception as e: | |
| error_msg = f"An unexpected error occurred: {str(e)}" | |
| history.append({"role": "user", "content": message}) | |
| history.append({"role": "assistant", "content": error_msg}) | |
| return history | |
| # Create the Gradio interface | |
| with gr.Blocks( | |
| title="AI Chatbot", | |
| description="Chat with various AI models powered by Hugging Face", | |
| theme=gr.themes.Soft( | |
| primary_hue="blue", | |
| secondary_hue="indigo", | |
| neutral_hue="slate", | |
| font=gr.themes.GoogleFont("Inter"), | |
| text_size="lg", | |
| spacing_size="lg", | |
| radius_size="md" | |
| ).set( | |
| button_primary_background_fill="*primary_600", | |
| button_primary_background_fill_hover="*primary_700", | |
| block_title_text_weight="600", | |
| ) | |
| ) as demo: | |
| # Header section | |
| gr.Markdown( | |
| """ | |
| # 🤖 AI Chatbot | |
| Chat with powerful AI models from Hugging Face | |
| """ | |
| ) | |
| # Model selection and settings | |
| with gr.Accordion("⚙️ Model Settings", open=False): | |
| with gr.Row(): | |
| with gr.Column(): | |
| model_dropdown = gr.Dropdown( | |
| choices=[ | |
| "mistralai/Mistral-7B-Instruct-v0.2", | |
| "meta-llama/Llama-2-7b-chat-hf", | |
| "tiiuae/falcon-7b-instruct", | |
| "bigscience/bloom-560m", | |
| "google/flan-t5-large", | |
| "gpt2-xl" | |
| ], | |
| value="mistralai/Mistral-7B-Instruct-v0.2", | |
| label="Select Model", | |
| info="Choose an AI model from Hugging Face" | |
| ) | |
| with gr.Column(): | |
| temperature_slider = gr.Slider( | |
| minimum=0.0, | |
| maximum=1.0, | |
| value=0.7, | |
| step=0.1, | |
| label="Temperature", | |
| info="Lower = more focused, Higher = more creative" | |
| ) | |
| with gr.Column(): | |
| max_tokens_slider = gr.Slider( | |
| minimum=50, | |
| maximum=2048, | |
| value=512, | |
| step=50, | |
| label="Max Tokens", | |
| info="Maximum length of response" | |
| ) | |
| # API Token input | |
| with gr.Row(): | |
| api_token = gr.Textbox( | |
| label="Hugging Face API Token", | |
| placeholder="Enter your HF_TOKEN environment variable or paste token here", | |
| type="password", | |
| info="Required for private models. Leave empty if using public models." | |
| ) | |
| # Chat interface | |
| with gr.Row(): | |
| with gr.Column(scale=3): | |
| chatbot = gr.Chatbot( | |
| label="Chat History", | |
| height=500, | |
| avatar_images=( | |
| "https://api.dicebear.com/7.x/avataaars/svg?seed=AI", | |
| "https://api.dicebear.com/7.x/avataaars/svg?seed=User" | |
| ), | |
| bubble_full_width=False | |
| ) | |
| with gr.Column(scale=1): | |
| gr.Markdown( | |
| """ | |
| ### 📝 Tips | |
| - Enter your API token for better performance | |
| - Try different models for different responses | |
| - Adjust temperature for more creative outputs | |
| - Clear chat to start fresh | |
| """ | |
| ) | |
| clear_button = gr.Button( | |
| "🗑️ Clear Chat", | |
| variant="secondary", | |
| size="lg" | |
| ) | |
| # User input | |
| with gr.Row(): | |
| user_input = gr.Textbox( | |
| label="Your Message", | |
| placeholder="Type your message here...", | |
| scale=4, | |
| show_label=False | |
| ) | |
| send_button = gr.Button( | |
| "Send", | |
| variant="primary", | |
| scale=1, | |
| size="lg" | |
| ) | |
| # Example prompts | |
| gr.Examples( | |
| examples=[ | |
| ["What is machine learning?"], | |
| ["Explain quantum computing in simple terms"], | |
| ["Write a short poem about AI"], | |
| ["Help me debug this code"], | |
| ["What are the benefits of renewable energy?"], | |
| ["Tell me a fun fact about space"], | |
| ["How do I make a good cup of coffee?"], | |
| ["What's the difference between Python 2 and 3?"], | |
| ], | |
| inputs=user_input, | |
| label="💡 Example Prompts" | |
| ) | |
| # Event handlers | |
| send_button.click( | |
| fn=chat, | |
| inputs=[user_input, chatbot, model_dropdown, temperature_slider, max_tokens_slider], | |
| outputs=[chatbot] | |
| ) | |
| user_input.submit( | |
| fn=chat, | |
| inputs=[user_input, chatbot, model_dropdown, temperature_slider, max_tokens_slider], | |
| outputs=[chatbot] | |
| ) | |
| clear_button.click( | |
| fn=lambda: [], | |
| outputs=[chatbot] | |
| ) | |
| # Footer | |
| gr.Markdown( | |
| """ | |
| --- | |
| Built with **[anycoder](https://huggingface.co/spaces/akhaliq/anycoder)** | |
| """ | |
| ) | |
| # Launch the application | |
| if __name__ == "__main__": | |
| demo.launch( | |
| theme=gr.themes.Soft( | |
| primary_hue="blue", | |
| secondary_hue="indigo", | |
| neutral_hue="slate", | |
| font=gr.themes.GoogleFont("Inter"), | |
| text_size="lg", | |
| spacing_size="lg", | |
| radius_size="md" | |
| ).set( | |
| button_primary_background_fill="*primary_600", | |
| button_primary_background_fill_hover="*primary_700", | |
| block_title_text_weight="600", | |
| ), | |
| css=""" | |
| .chatbot-container { | |
| max-height: 600px !important; | |
| } | |
| .chatbot-message { | |
| margin: 8px 0; | |
| padding: 12px 16px; | |
| border-radius: 12px; | |
| max-width: 80%; | |
| } | |
| .chatbot-message.user { | |
| background-color: #e3f2fd; | |
| margin-left: auto; | |
| } | |
| .chatbot-message.assistant { | |
| background-color: #f5f5f5; | |
| } | |
| """, | |
| footer_links=[ | |
| {"label": "Built with anycoder", "url": "https://huggingface.co/spaces/akhaliq/anycoder"} | |
| ] | |
| ) |