Spaces:
Running
Running
| import gradio as gr | |
| import time | |
| import requests | |
| import io | |
| from PIL import Image | |
| from huggingface_hub import InferenceClient, HfApi | |
| from deep_translator import GoogleTranslator | |
| from indic_transliteration import sanscript | |
| from indic_transliteration.detect import detect as detect_script | |
| from indic_transliteration.sanscript import transliterate | |
| import langdetect | |
| import re | |
| import os | |
| # Get secrets from Hugging Face Space | |
| HF_TOKEN = os.environ.get('HF_TOKEN') | |
| if not HF_TOKEN: | |
| raise ValueError("Please set the HF_TOKEN secret in your HuggingFace Space") | |
| # Initialize clients | |
| text_client = InferenceClient( | |
| "HuggingFaceH4/zephyr-7b-beta", | |
| token=HF_TOKEN | |
| ) | |
| # Image generation setup | |
| API_URL = "https://api-inference.huggingface.co/models/SG161222/RealVisXL_V4.0" | |
| headers = {"Authorization": f"Bearer {HF_TOKEN}"} | |
| def detect_language(text): | |
| """Detect the language of input text""" | |
| try: | |
| return langdetect.detect(text) | |
| except: | |
| return 'en' | |
| def detect_and_transliterate(text): | |
| """Detect script and transliterate if needed""" | |
| try: | |
| script = detect_script(text) | |
| if script: | |
| # Transliterate to Latin script if the text is in another script | |
| return transliterate(text, script, sanscript.IAST) | |
| except: | |
| pass | |
| return text | |
| def translate_to_english(text): | |
| """Translate text to English if not already in English""" | |
| try: | |
| lang = detect_language(text) | |
| if lang != 'en': | |
| translator = GoogleTranslator(source=lang, target='en') | |
| return translator.translate(text) | |
| except: | |
| pass | |
| return text | |
| def check_for_image_generation(message): | |
| """Check if message contains image generation commands""" | |
| image_triggers = [ | |
| r"generate (?:an? )?image", | |
| r"create (?:an? )?image", | |
| r"draw", | |
| r"show me", | |
| r"visualize", | |
| r"make (?:an? )?picture" | |
| ] | |
| for trigger in image_triggers: | |
| if re.search(trigger, message.lower()): | |
| return True | |
| return False | |
| def user_message(message, history): | |
| """Handle user messages""" | |
| if message is None or message == "": | |
| return "", history | |
| # Convert history to list if it's None | |
| history = history or [] | |
| # Add user message to history with proper format | |
| history.append({ | |
| "role": "user", | |
| "content": message | |
| }) | |
| return "", history | |
| def bot_response(history, system_msg, max_tokens, temperature, top_p): | |
| """Generate bot response""" | |
| if not history: | |
| return history, None | |
| try: | |
| # Get the last user message | |
| last_user_message = history[-1]["content"] | |
| # Process the message for language and script | |
| processed_message = detect_and_transliterate(last_user_message) | |
| translated_message = translate_to_english(processed_message) | |
| # Check for image generation request | |
| if check_for_image_generation(translated_message): | |
| # Generate image based on the message | |
| image = generate_image(translated_message) | |
| # Add bot response with proper format | |
| history.append({ | |
| "role": "assistant", | |
| "content": "I've generated an image based on your request." | |
| }) | |
| return history, image | |
| # Prepare chat messages for the model | |
| messages = [ | |
| {"role": "system", "content": system_msg}, | |
| ] | |
| # Add conversation history | |
| for msg in history: | |
| messages.append(msg) | |
| # Get model response | |
| response = text_client.text_generation( | |
| prompt=str(messages), | |
| max_new_tokens=max_tokens, | |
| temperature=temperature, | |
| top_p=top_p, | |
| repetition_penalty=1.1, | |
| ) | |
| # Add bot response with proper format | |
| history.append({ | |
| "role": "assistant", | |
| "content": response | |
| }) | |
| except Exception as e: | |
| # Handle errors gracefully | |
| error_message = f"I apologize, but I encountered an error: {str(e)}" | |
| history.append({ | |
| "role": "assistant", | |
| "content": error_message | |
| }) | |
| return history, None | |
| def create_chat_interface(): | |
| # Custom CSS for better styling with Inter font and animations | |
| custom_css = """ | |
| @import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700&display=swap'); | |
| * { | |
| font-family: 'Inter', sans-serif !important; | |
| } | |
| .container { | |
| max-width: 850px !important; | |
| margin: auto; | |
| } | |
| .chat-window { | |
| height: 600px !important; | |
| overflow-y: auto; | |
| border-radius: 15px !important; | |
| box-shadow: 0 8px 16px rgba(0, 0, 0, 0.1) !important; | |
| transition: all 0.3s ease !important; | |
| } | |
| .chat-window:hover { | |
| box-shadow: 0 12px 20px rgba(0, 0, 0, 0.15) !important; | |
| } | |
| .chat-message { | |
| padding: 1rem !important; | |
| margin: 0.5rem !important; | |
| border-radius: 12px !important; | |
| transition: all 0.2s ease !important; | |
| opacity: 0; | |
| animation: messageSlide 0.3s ease forwards; | |
| } | |
| @keyframes messageSlide { | |
| from { | |
| opacity: 0; | |
| transform: translateY(10px); | |
| } | |
| to { | |
| opacity: 1; | |
| transform: translateY(0); | |
| } | |
| } | |
| .user-message { | |
| background: linear-gradient(135deg, #6366f1 0%, #8b5cf6 100%) !important; | |
| color: white !important; | |
| margin-left: 2rem !important; | |
| } | |
| .bot-message { | |
| background: linear-gradient(135deg, #f3f4f6 0%, #e5e7eb 100%) !important; | |
| margin-right: 2rem !important; | |
| } | |
| /* Button Styles */ | |
| button.primary { | |
| background: linear-gradient(135deg, #6366f1 0%, #8b5cf6 100%) !important; | |
| border: none !important; | |
| color: white !important; | |
| padding: 0.75rem 1.5rem !important; | |
| border-radius: 12px !important; | |
| font-weight: 600 !important; | |
| transition: all 0.3s ease !important; | |
| transform: translateY(0); | |
| box-shadow: 0 4px 6px rgba(99, 102, 241, 0.2) !important; | |
| } | |
| button.primary:hover { | |
| transform: translateY(-2px); | |
| box-shadow: 0 8px 12px rgba(99, 102, 241, 0.3) !important; | |
| } | |
| button.primary:active { | |
| transform: translateY(0); | |
| } | |
| button.secondary { | |
| background: #f3f4f6 !important; | |
| border: 2px solid #e5e7eb !important; | |
| color: #4b5563 !important; | |
| padding: 0.75rem 1.5rem !important; | |
| border-radius: 12px !important; | |
| font-weight: 600 !important; | |
| transition: all 0.3s ease !important; | |
| } | |
| button.secondary:hover { | |
| background: #e5e7eb !important; | |
| border-color: #d1d5db !important; | |
| } | |
| /* Input Styles */ | |
| .input-container { | |
| position: relative; | |
| margin-bottom: 1rem; | |
| } | |
| textarea { | |
| border: 2px solid #e5e7eb !important; | |
| border-radius: 12px !important; | |
| padding: 1rem !important; | |
| transition: all 0.3s ease !important; | |
| font-size: 1rem !important; | |
| line-height: 1.5 !important; | |
| box-shadow: 0 2px 4px rgba(0, 0, 0, 0.05) !important; | |
| } | |
| textarea:focus { | |
| border-color: #6366f1 !important; | |
| box-shadow: 0 4px 6px rgba(99, 102, 241, 0.1) !important; | |
| } | |
| /* Settings Panel */ | |
| .settings-block { | |
| background: white !important; | |
| border-radius: 15px !important; | |
| padding: 1.5rem !important; | |
| margin-top: 1rem !important; | |
| box-shadow: 0 4px 6px rgba(0, 0, 0, 0.05) !important; | |
| transition: all 0.3s ease !important; | |
| } | |
| .settings-block:hover { | |
| box-shadow: 0 6px 8px rgba(0, 0, 0, 0.08) !important; | |
| } | |
| /* Slider Styles */ | |
| .gr-slider { | |
| height: 4px !important; | |
| background: #e5e7eb !important; | |
| border-radius: 2px !important; | |
| } | |
| .gr-slider .handle { | |
| width: 16px !important; | |
| height: 16px !important; | |
| border: 2px solid #6366f1 !important; | |
| background: white !important; | |
| border-radius: 50% !important; | |
| box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1) !important; | |
| transition: all 0.2s ease !important; | |
| } | |
| .gr-slider .handle:hover { | |
| transform: scale(1.1); | |
| } | |
| /* Loading Animation */ | |
| @keyframes pulse { | |
| 0% { opacity: 1; } | |
| 50% { opacity: 0.5; } | |
| 100% { opacity: 1; } | |
| } | |
| .loading { | |
| animation: pulse 1.5s ease-in-out infinite; | |
| } | |
| """ | |
| # Create the interface with custom theme | |
| with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo: | |
| # Header | |
| with gr.Row(): | |
| gr.HTML(""" | |
| <div style="text-align: center; margin-bottom: 2rem; padding: 2rem;"> | |
| <h1 style="font-size: 3rem; font-weight: 700; color: #4f46e5; margin-bottom: 0.5rem;"> | |
| ✨ Xylaria Chat | |
| </h1> | |
| <p style="color: #6b7280; font-size: 1.2rem; font-weight: 500;"> | |
| Your Intelligent Multilingual Assistant | |
| </p> | |
| </div> | |
| """) | |
| # Main chat interface | |
| with gr.Row(): | |
| with gr.Column(scale=4): | |
| chatbot = gr.Chatbot( | |
| height=500, | |
| show_label=False, | |
| container=True, | |
| elem_classes=["chat-window"], | |
| type='messages' | |
| ) | |
| image_output = gr.Image( | |
| type="pil", | |
| label="Generated Image", | |
| visible=False, | |
| elem_classes=["generated-image"] | |
| ) | |
| with gr.Row(): | |
| with gr.Column(scale=8): | |
| txt = gr.Textbox( | |
| show_label=False, | |
| placeholder="Type your message here...", | |
| container=False, | |
| elem_classes=["input-textbox"] | |
| ) | |
| with gr.Column(scale=1): | |
| send_btn = gr.Button( | |
| "Send", | |
| variant="primary", | |
| elem_classes=["primary"] | |
| ) | |
| with gr.Column(scale=1): | |
| clear_btn = gr.Button( | |
| "Clear", | |
| variant="secondary", | |
| elem_classes=["secondary"] | |
| ) | |
| # Settings panel | |
| with gr.Accordion( | |
| "⚙️ Advanced Settings", | |
| open=False, | |
| elem_classes=["settings-accordion"] | |
| ): | |
| with gr.Row(): | |
| with gr.Column(): | |
| system_msg = gr.Textbox( | |
| value="You are a friendly Chatbot who always responds in English unless the user specifically uses another language.", | |
| label="System Message", | |
| lines=2 | |
| ) | |
| max_tokens = gr.Slider( | |
| minimum=1, | |
| maximum=2048, | |
| value=512, | |
| step=1, | |
| label="Max Tokens" | |
| ) | |
| with gr.Column(): | |
| temperature = gr.Slider( | |
| minimum=0.1, | |
| maximum=4.0, | |
| value=0.7, | |
| step=0.1, | |
| label="Temperature" | |
| ) | |
| top_p = gr.Slider( | |
| minimum=0.1, | |
| maximum=1.0, | |
| value=0.95, | |
| step=0.05, | |
| label="Top-p (nucleus sampling)" | |
| ) | |
| # Rest of your existing functions (user_message, bot_response, etc.) | |
| # ... (keep the same function implementations) | |
| # Update the event handlers to use the new classes | |
| send_event = txt.submit( | |
| user_message, | |
| [txt, chatbot], | |
| [txt, chatbot], | |
| queue=False | |
| ).then( | |
| bot_response, | |
| [chatbot, system_msg, max_tokens, temperature, top_p], | |
| [chatbot, image_output] | |
| ) | |
| send_btn.click( | |
| user_message, | |
| [txt, chatbot], | |
| [txt, chatbot], | |
| queue=False | |
| ).then( | |
| bot_response, | |
| [chatbot, system_msg, max_tokens, temperature, top_p], | |
| [chatbot, image_output] | |
| ) | |
| clear_btn.click( | |
| lambda: (None, None), | |
| None, | |
| [chatbot, image_output], | |
| queue=False | |
| ) | |
| # Update image visibility | |
| send_event.then( | |
| lambda img: gr.update(visible=img is not None), | |
| image_output, | |
| image_output | |
| ) | |
| return demo | |
| # Create and launch the interface | |
| demo = create_chat_interface() | |
| if __name__ == "__main__": | |
| demo.launch(share=True) | |