π€ AI Assistant
Powered by HuggingFace Hub β’ Advanced AI Models
""" AI Assistant - Gradio Application Powered by HuggingFace Hub """ from random import random import gradio as gr import os import json from datetime import datetime from hf_api import HuggingFaceAPI from utils import load_settings, save_settings import warnings import warnings import torch # Suppress the specific deprecation warning warnings.filterwarnings("ignore", message=".*torch.distributed.reduce_op.*", category=FutureWarning) # Optional import for Google Translate Gemma try: from google_translate import GoogleTranslateGemma GOOGLE_TRANSLATE_AVAILABLE = True except ImportError as e: print(f"Warning: Google Translate Gemma not available: {str(e)}") GOOGLE_TRANSLATE_AVAILABLE = False GoogleTranslateGemma = None # Translation testing functions def test_translategemma(text, source_lang, target_lang): """Test Google Translate Gemma model directly""" if not GOOGLE_TRANSLATE_AVAILABLE: print("β Google Translate Gemma not available. Using chat completion fallback.") return test_chat_completion_translation(text, source_lang, target_lang) print(f"π§ͺ Testing Google Translate Gemma") print(f" Text: {text}") print(f" Source: {source_lang}") print(f" Target: {target_lang}") print("-" * 50) try: # Initialize the model translator = GoogleTranslateGemma() # Perform translation translation = translator.translate(text, source_lang, target_lang) if translation: print(f"β Translation: {translation}") print(" β Google Translate Gemma working correctly!") return translation else: print("β No translation returned") return None except Exception as e: print(f"β Error: {str(e)}") print(" β οΈ Falling back to chat completion translation...") return test_chat_completion_translation(text, source_lang, target_lang) def test_chat_completion_translation(text, source_lang, target_lang): """Test translation using chat completion fallback""" if not hf_api: print("β No HuggingFace API available. Please set your token first.") return None # Test models in order of preference models_to_test = [ "google/translategemma-12b-it", "meta-llama/Llama-3.2-3B-Instruct", "microsoft/Phi-3-mini-4k-instruct", "google/gemma-2-2b-it" ] print(f"π§ͺ Testing translation with chat completion") print(f" Text: {text}") print(f" Source: {source_lang}") print(f" Target: {target_lang}") print("-" * 50) for model_id in models_to_test: print(f"\nπ Testing with model: {model_id}") try: # Use the same translation logic as in the main translation function if "translategemma" in model_id.lower() and not GOOGLE_TRANSLATE_AVAILABLE: print(" β οΈ Google Translate Gemma not available, skipping...") continue # Dynamic system prompt based on target and source language source_info = f" from {source_lang}" if source_lang != "Auto-detect" else "" system_prompt = f"You are a professional translator specializing in translating{source_info} to {target_lang}. Translate the given text accurately while preserving the original meaning and tone. Only provide the translation without any additional explanations." prompt = f"Translate the following text{source_info} to {target_lang}: {text}" messages = [ { "role": "system", "content": system_prompt }, { "role": "user", "content": prompt } ] response = hf_api.chat_completion( model=model_id, messages=messages, max_tokens=1024, temperature=0.3 ) translation = response["choices"][0]["message"]["content"].strip() print(f"β Translation: {translation}") print(f" β Success with {model_id}!") return translation # Return first successful translation except Exception as e: print(f" β Error with {model_id}: {str(e)}") continue print("\nβ All models failed. Please check your token and model availability.") return None def run_multiple_translation_tests(): """Run multiple translation test scenarios""" test_cases = [ { "text": "Hello, how are you today?", "source": "English", "target": "Spanish", "description": "English to Spanish" }, { "text": "V nejhorΕ‘Γm pΕΓpadΔ i k prasknutΓ ΔoΔky.", "source": "Czech", "target": "German", "description": "Czech to German" }, { "text": "Bonjour, comment allez-vous?", "source": "French", "target": "English", "description": "French to English" }, { "text": "θΏζ―δΈδΈͺζ΅θ―γ", "source": "Chinese (Simplified)", "target": "English", "description": "Chinese to English" }, { "text": "Β‘Hola! ΒΏCΓ³mo estΓ‘s?", "source": "Spanish", "target": "Japanese", "description": "Spanish to Japanese" } ] results = [] for i, case in enumerate(test_cases, 1): print(f"\nπ Test {i}: {case['description']}") print(f" Source ({case['source']}): {case['text']}") # Map language names to codes lang_code_map = { "English": "en", "Spanish": "es", "French": "fr", "German": "de-DE", "Chinese (Simplified)": "zh-CN", "Chinese (Traditional)": "zh-TW", "Japanese": "ja", "Korean": "ko", "Italian": "it", "Portuguese": "pt", "Russian": "ru", "Arabic": "ar", "Hindi": "hi", "Dutch": "nl", "Turkish": "tr", "Polish": "pl", "Vietnamese": "vi", "Thai": "th", "Indonesian": "id", "Greek": "el", "Hebrew": "he", "Czech": "cs", "Swedish": "sv", "Danish": "da", "Norwegian": "no", "Finnish": "fi" } source_code = lang_code_map.get(case['source'], 'en') target_code = lang_code_map.get(case['target'], 'en') translation = test_translategemma( text=case['text'], source_lang=source_code, target_lang=target_code ) if translation: print(f" Target ({case['target']}): {translation}") results.append({ 'case': case['description'], 'original': case['text'], 'translation': translation, 'success': True }) else: results.append({ 'case': case['description'], 'original': case['text'], 'translation': None, 'success': False }) # Summary successful = sum(1 for r in results if r['success']) total = len(results) summary = f""" π Test Summary {"=" * 60} Total tests: {total} Successful: {successful} Failed: {total - successful} Success rate: {successful/total*100:.1f}% """ if successful < total: summary += "β Some tests failed. Check your HuggingFace token and model availability." else: summary += "β All tests passed successfully!" return results, summary # Settings paths SETTINGS_DIR = os.path.join(os.path.dirname(__file__), 'settings') MODELS_SETTINGS_FILE = os.path.join(SETTINGS_DIR, 'models.json') FIREBASE_SETTINGS_FILE = os.path.join(SETTINGS_DIR, 'firebase.json') APP_SETTINGS_FILE = os.path.join(SETTINGS_DIR, 'app.json') # Load initial settings model_settings = load_settings(MODELS_SETTINGS_FILE) HF_TOKEN = model_settings.get('huggingfaceToken', '') hf_api = HuggingFaceAPI(token=HF_TOKEN) if HF_TOKEN else None def reinit_api(token: str): """Reinitialize HuggingFace API with new token""" global hf_api hf_api = HuggingFaceAPI(token=token) def get_saved_models(): """Get list of saved models""" settings = load_settings(MODELS_SETTINGS_FILE) models = settings.get('models', []) return [(m.get('name', m.get('modelId', 'Unknown')), m.get('modelId', '')) for m in models if m.get('enabled', True)] def get_model_choices(): """Get model choices for dropdown""" models = get_saved_models() if not models: return ["meta-llama/Llama-3.2-3B-Instruct"] return [m[1] for m in models] # ============ Chat Functions ============ def chat_response(message: str, history: list, model_id: str, temperature: float, max_tokens: int, system_prompt: str): """Generate chat response""" if not hf_api: return "Please set your HuggingFace token in Settings first." if not message.strip(): return "" try: # Build messages with system prompt messages = [] if system_prompt.strip(): messages.append({"role": "system", "content": system_prompt}) # Add history from message format for msg in history: if isinstance(msg, dict): messages.append(msg) elif isinstance(msg, tuple) and len(msg) == 2: # Handle legacy tuple format user_msg, assistant_msg = msg if user_msg: messages.append({"role": "user", "content": user_msg}) if assistant_msg: messages.append({"role": "assistant", "content": assistant_msg}) # Add current message messages.append({"role": "user", "content": message}) response = hf_api.chat_completion( model=model_id, messages=messages, max_tokens=max_tokens, temperature=temperature ) return response["choices"][0]["message"]["content"] except Exception as e: error_str = str(e) # Check if it's a model not supported error if "model_not_supported" in error_str or "not supported by any provider" in error_str: # Try to get fallback models try: fallback_models = hf_api._find_fallback_models(model_id) if fallback_models: fallback_list = "\n".join([f"- {m['id']}" for m in fallback_models[:3]]) return f"Error: Model {model_id} is not supported. Try one of these models instead:\n{fallback_list}\n\nOriginal error: {error_str}" else: return f"Error: Model {model_id} is not supported and no fallback models are available.\n\nOriginal error: {error_str}" except: return f"Error: Model {model_id} is not supported. Please try a different model.\n\nOriginal error: {error_str}" else: return f"Error: {error_str}" def text_generation(prompt: str, model_id: str, temperature: float, max_tokens: int, top_p: float): """Generate text from prompt""" if not hf_api: return "Please set your HuggingFace token in Settings first." if not prompt.strip(): return "" try: # Check model settings for task support model_settings = load_settings(MODELS_SETTINGS_FILE) models = model_settings.get('models', []) # Find the model in settings model_info = None for m in models: if m.get('modelId') == model_id: model_info = m break # Check if model recommends chat_completion if model_info and model_info.get('recommendedMethod') == 'chat_completion': # Use chat completion for conversational models messages = [{"role": "user", "content": prompt}] response = hf_api.chat_completion( model=model_id, messages=messages, max_tokens=max_tokens, temperature=temperature ) return response["choices"][0]["message"]["content"] else: # Use text generation for other models response = hf_api.text_generation( model=model_id, prompt=prompt, max_new_tokens=max_tokens, temperature=temperature, top_p=top_p ) return response.get("generated_text", "") except Exception as e: error_str = str(e) # Check if it's a model not supported error if "model_not_supported" in error_str or "not supported by any provider" in error_str: # Try to get fallback models try: fallback_models = hf_api._find_fallback_models(model_id) if fallback_models: fallback_list = "\n".join([f"- {m['id']}" for m in fallback_models[:3]]) return f"Error: Model {model_id} is not supported. Try one of these models instead:\n{fallback_list}\n\nOriginal error: {error_str}" else: return f"Error: Model {model_id} is not supported and no fallback models are available.\n\nOriginal error: {error_str}" except: return f"Error: Model {model_id} is not supported. Please try a different model.\n\nOriginal error: {error_str}" else: return f"Error: {error_str}" def summarize_text(text: str, model_id: str, max_length: int, min_length: int): """Summarize text""" if not hf_api: return "Please set your HuggingFace token in Settings first." if not text.strip(): return "" try: response = hf_api.summarization( model=model_id, text=text, max_length=max_length, min_length=min_length ) if isinstance(response, list) and len(response) > 0: return response[0].get('summary_text', '') elif isinstance(response, dict): return response.get('summary_text', str(response)) return str(response) except Exception as e: return f"Error: {str(e)}" def translate_text(text: str, model_id: str, target_language: str = "", source_language: str = "Auto-detect"): """Translate text""" if not hf_api: return "Please set your HuggingFace token in Settings first." if not text.strip(): return "" try: # Use Google Translate Gemma module for Google TranslateGemma model if "translategemma" in model_id.lower(): if not GOOGLE_TRANSLATE_AVAILABLE: # If Google Translate is not available, fall back to chat completion print("Google Translate Gemma not available, falling back to chat completion") source_info = f" from {source_language}" if source_language != "Auto-detect" else "" system_prompt = f"You are a professional translator specializing in translating{source_info} to {target_language}. Translate the given text accurately while preserving the original meaning and tone. Only provide the translation without any additional explanations." prompt = f"Translate the following text{source_info} to {target_language}: {text}" messages = [ {"role": "system", "content": system_prompt}, {"role": "user", "content": prompt} ] response = hf_api.chat_completion( model=model_id, messages=messages, max_tokens=1024, temperature=0.3 ) return response["choices"][0]["message"]["content"].strip() try: # Initialize the translator translator = GoogleTranslateGemma() # Map language names to language codes lang_code_map = { "English": "en", "Spanish": "es", "French": "fr", "German": "de-DE", "Chinese (Simplified)": "zh-CN", "Chinese (Traditional)": "zh-TW", "Japanese": "ja", "Korean": "ko", "Italian": "it", "Portuguese": "pt", "Russian": "ru", "Arabic": "ar", "Hindi": "hi", "Dutch": "nl", "Turkish": "tr", "Polish": "pl", "Vietnamese": "vi", "Thai": "th", "Indonesian": "id", "Greek": "el", "Hebrew": "he", "Czech": "cs", "Swedish": "sv", "Danish": "da", "Norwegian": "no", "Finnish": "fi" } # Get source language code source_lang = "en" # Default to English if source_language != "Auto-detect" and source_language in lang_code_map: source_lang = lang_code_map[source_language] # Get target language code target_lang = "en" # Default to English if target_language in lang_code_map: target_lang = lang_code_map[target_language] # Perform translation translated = translator.translate_text( text=text, source_lang=source_lang, target_lang=target_lang ) return translated except Exception as gemma_e: # If Google Translate Gemma fails, fall back to chat completion print(f"Google Translate Gemma failed, falling back to chat completion: {str(gemma_e)}") # Use chat completion as fallback source_info = f" from {source_language}" if source_language != "Auto-detect" else "" system_prompt = f"You are a professional translator specializing in translating{source_info} to {target_language}. Translate the given text accurately while preserving the original meaning and tone. Only provide the translation without any additional explanations." prompt = f"Translate the following text{source_info} to {target_language}: {text}" messages = [ {"role": "system", "content": system_prompt}, {"role": "user", "content": prompt} ] response = hf_api.chat_completion( model=model_id, messages=messages, max_tokens=1024, temperature=0.3 ) return response["choices"][0]["message"]["content"].strip() # For models that support chat completion (like Llama and Mistral) elif "llama" in model_id.lower() or "mistral" in model_id.lower(): # Use chat completion for translation # Dynamic system prompt based on target and source language if target_language: source_info = f" from {source_language}" if source_language != "Auto-detect" else "" system_prompt = f"You are a professional translator specializing in translating{source_info} to {target_language}. Translate the given text accurately while preserving the original meaning and tone. Only provide the translation without any additional explanations." prompt = f"Translate the following text{source_info} to {target_language}: {text}" else: system_prompt = "You are a professional translator. Translate the given text accurately while preserving the original meaning and tone. Only provide the translation without any additional explanations." prompt = f"Translate this text: {text}" messages = [ { "role": "system", "content": system_prompt }, { "role": "user", "content": prompt } ] response = hf_api.chat_completion( model=model_id, messages=messages, max_tokens=1024, temperature=0.3 ) return response["choices"][0]["message"]["content"].strip() else: # Use the standard translation endpoint for other models response = hf_api.translation( model=model_id, text=text ) if isinstance(response, list) and len(response) > 0: return response[0].get('translation_text', '') elif isinstance(response, dict): return response.get('translation_text', str(response)) return str(response) except Exception as e: # Handle specific model errors with better fallback options error_str = str(e).lower() if "model_not_supported" in error_str or "not supported by any provider" in error_str or "inference api enabled" in error_str: # Try fallback models for translation fallback_models = [ "Helsinki-NLP/opus-mt-en-es", # English to Spanish "Helsinki-NLP/opus-mt-en-fr", # English to French "Helsinki-NLP/opus-mt-en-de", # English to German "Helsinki-NLP/opus-mt-en-zh", # English to Chinese "Helsinki-NLP/opus-mt-en-ja", # English to Japanese "meta-llama/Llama-3.2-3B-Instruct" # Llama as general fallback ] # Try each fallback model for fallback_model in fallback_models: try: # For Llama models, use chat completion if "llama" in fallback_model.lower(): system_prompt = "You are a professional translator. Translate the given text accurately while preserving the original meaning and tone. Only provide the translation without any additional explanations." # If target language is specified, include it in the instruction if target_language: prompt = f"Translate the following text to {target_language}: {text}" else: prompt = f"Translate this text: {text}" messages = [ { "role": "system", "content": system_prompt }, { "role": "user", "content": prompt } ] response = hf_api.chat_completion( model=fallback_model, messages=messages, max_tokens=1024, temperature=0.3 ) return f"{response['choices'][0]['message']['content'].strip()}" # For Helsinki models, use standard translation else: response = hf_api.translation( model=fallback_model, text=text ) if isinstance(response, list) and len(response) > 0: return f"{response[0].get('translation_text', '')}" elif isinstance(response, dict): return f"{response.get('translation_text', str(response))}" except Exception as fallback_e: continue # Try next fallback model # If all fallbacks fail, return original error with suggestions return f"Error: {str(e)}. Tried fallback models but none worked. Please try a different model or check your HuggingFace token." else: return f"Error: {str(e)}" def translate_image(image_path: str, model_id: str, target_language: str = "", source_language: str = "Auto-detect"): """Translate text from image""" if not image_path: return "Please upload an image first." # Only Google TranslateGemma supports image translation if "translategemma" not in model_id.lower(): return "Image translation is only supported with Google TranslateGemma model. Please select 'google/translategemma-12b-it' from the model dropdown." if not GOOGLE_TRANSLATE_AVAILABLE: return "Google Translate Gemma is not available. Please check your installation of transformers, torch, and torchvision." try: # Initialize the translator translator = GoogleTranslateGemma() # Map language names to language codes lang_code_map = { "English": "en", "Spanish": "es", "French": "fr", "German": "de-DE", "Chinese (Simplified)": "zh-CN", "Chinese (Traditional)": "zh-TW", "Japanese": "ja", "Korean": "ko", "Italian": "it", "Portuguese": "pt", "Russian": "ru", "Arabic": "ar", "Hindi": "hi", "Dutch": "nl", "Turkish": "tr", "Polish": "pl", "Vietnamese": "vi", "Thai": "th", "Indonesian": "id", "Greek": "el", "Hebrew": "he", "Czech": "cs", "Swedish": "sv", "Danish": "da", "Norwegian": "no", "Finnish": "fi" } # Get source language code source_lang = "en" # Default to English if source_language != "Auto-detect" and source_language in lang_code_map: source_lang = lang_code_map[source_language] # Get target language code target_lang = "en" # Default to English if target_language in lang_code_map: target_lang = lang_code_map[target_language] # Translate from image (now supports local files) translated = translator.translate_image( image_input=image_path, source_lang=source_lang, target_lang=target_lang ) return translated except Exception as e: return f"Error: {str(e)}" def answer_question(question: str, context: str, model_id: str): """Answer question based on context""" if not hf_api: return "Please set your HuggingFace token in Settings first.", 0.0 if not question.strip() or not context.strip(): return "", 0.0 try: response = hf_api.question_answering( model=model_id, question=question, context=context ) answer = response.get('answer', '') score = response.get('score', 0.0) return answer, round(score, 4) except Exception as e: return f"Error: {str(e)}", 0.0 def generate_image(prompt: str, model_id: str, negative_prompt: str, num_steps: int): """Generate image from prompt""" if not hf_api: return None if not prompt.strip(): return None try: image_bytes = hf_api.image_generation( model=model_id, prompt=prompt, negative_prompt=negative_prompt if negative_prompt.strip() else None, num_inference_steps=num_steps ) # Save to temp file and return path import tempfile temp_path = os.path.join(tempfile.gettempdir(), "generated_image.png") with open(temp_path, "wb") as f: f.write(image_bytes) return temp_path except Exception as e: gr.Warning(f"Image generation error: {str(e)}") return None # ============ Model Management Functions ============ def search_hf_models(query: str, task: str, limit: int): """Search HuggingFace models""" if not hf_api: return [] if not query.strip(): return [] try: models = list(hf_api.list_models( search=query, pipeline_tag=task, sort="downloads", direction=-1, limit=limit )) results = [] for model in models: downloads = model.downloads or 0 likes = model.likes or 0 downloads_str = f"{downloads/1000000:.1f}M" if downloads >= 1000000 else f"{downloads/1000:.1f}K" if downloads >= 1000 else str(downloads) results.append([ model.id, model.author or '', model.pipeline_tag or '', downloads_str, likes ]) return results except Exception as e: gr.Warning(f"Search error: {str(e)}") return [] def get_model_info(model_id: str): """Get detailed model information""" if not hf_api or not model_id.strip(): return "No model ID provided" try: info = hf_api.model_info(model_id) downloads = info.downloads or 0 likes = info.likes or 0 result = f"""### {model_id} **Author:** {info.author or 'Unknown'} **Pipeline:** {info.pipeline_tag or 'N/A'} **Library:** {info.library_name or 'N/A'} **Downloads:** {downloads:,} **Likes:** {likes:,} **Tags:** {', '.join(info.tags[:15]) if info.tags else 'None'} **Created:** {str(info.created_at)[:10] if info.created_at else 'Unknown'} """ return result except Exception as e: return f"Error fetching model info: {str(e)}" def add_model_to_settings(model_id: str, name: str, role: str, temperature: float, max_tokens: int, system_prompt: str): """Add a model to settings""" if not model_id.strip(): return "Model ID is required", get_models_table() settings = load_settings(MODELS_SETTINGS_FILE) if 'models' not in settings: settings['models'] = [] # Generate unique ID unique_id = f"model-{int(datetime.now().timestamp() * 1000)}" model_data = { "id": unique_id, "name": name or model_id.split('/')[-1], "modelId": model_id, "role": role, "temperature": temperature, "maxTokens": max_tokens, "systemPrompt": system_prompt, "keywords": [], "enabled": True, "createdAt": int(datetime.now().timestamp() * 1000), "updatedAt": int(datetime.now().timestamp() * 1000) } settings['models'].append(model_data) save_settings(MODELS_SETTINGS_FILE, settings) return f"Model '{name or model_id}' added successfully!", get_models_table() def get_models_table(): """Get models as table data""" settings = load_settings(MODELS_SETTINGS_FILE) models = settings.get('models', []) table_data = [] for m in models: table_data.append([ m.get('id', ''), m.get('name', ''), m.get('modelId', ''), m.get('role', ''), m.get('temperature', 0.3), m.get('maxTokens', 500), "β" if m.get('enabled', True) else "β" ]) return table_data def delete_model(model_id: str): """Delete a model from settings""" if not model_id.strip(): return "No model selected", get_models_table() settings = load_settings(MODELS_SETTINGS_FILE) if 'models' in settings: settings['models'] = [m for m in settings['models'] if m['id'] != model_id] save_settings(MODELS_SETTINGS_FILE, settings) return f"Model deleted", get_models_table() return "Model not found", get_models_table() def toggle_model(model_id: str, enabled: bool): """Toggle model enabled state""" settings = load_settings(MODELS_SETTINGS_FILE) if 'models' in settings: for m in settings['models']: if m['id'] == model_id: m['enabled'] = enabled m['updatedAt'] = int(datetime.now().timestamp() * 1000) break save_settings(MODELS_SETTINGS_FILE, settings) return get_models_table() # ============ Settings Functions ============ def save_hf_token(token: str): """Save HuggingFace token""" settings = load_settings(MODELS_SETTINGS_FILE) settings['huggingfaceToken'] = token save_settings(MODELS_SETTINGS_FILE, settings) reinit_api(token) return "Token saved successfully!" def get_hf_token(): """Get current HuggingFace token""" settings = load_settings(MODELS_SETTINGS_FILE) return settings.get('huggingfaceToken', '') def get_account_info(): """Get HuggingFace account info""" if not hf_api: return "No API token configured" try: info = hf_api.hf_api.whoami() return f"""### Account Info **Username:** {info.get('name', 'Unknown')} **Email:** {info.get('email', 'Not available')} **Organizations:** {len(info.get('orgs', []))} """ except Exception as e: return f"Error: {str(e)}" # ============ Gradio Interface ============ # Custom CSS for professional dark theme custom_css = """ /* Dark theme with modern design */ .gradio-container { background: linear-gradient(135deg, #0f0f23 0%, #1a1a2e 100%) !important; color: #e0e0e0 !important; font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif !important; } /* Header styling */ .main-header { background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important; padding: 2rem !important; border-radius: 16px !important; margin-bottom: 2rem !important; box-shadow: 0 10px 30px rgba(102, 126, 234, 0.3) !important; border: 1px solid rgba(255, 255, 255, 0.1) !important; } .main-header h1 { color: white !important; margin: 0 !important; font-size: 2.5rem !important; font-weight: 700 !important; text-shadow: 0 2px 4px rgba(0,0,0,0.3) !important; } .main-header p { color: rgba(255,255,255,0.9) !important; margin: 0.5rem 0 0 0 !important; font-size: 1.1rem !important; } /* Tab styling */ .tabs { background: transparent !important; border-radius: 12px !important; overflow: hidden !important; } .tab-nav { background: rgba(255, 255, 255, 0.05) !important; border: 1px solid rgba(255, 255, 255, 0.1) !important; border-radius: 12px !important; padding: 0.5rem !important; margin-bottom: 1.5rem !important; } .tab-nav button { background: transparent !important; color: #a0a0a0 !important; border: none !important; padding: 0.75rem 1.5rem !important; margin: 0 0.25rem !important; border-radius: 8px !important; transition: all 0.3s ease !important; font-weight: 500 !important; } .tab-nav button:hover { background: rgba(255, 255, 255, 0.1) !important; color: #ffffff !important; } .tab-nav button.selected { background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important; color: white !important; box-shadow: 0 4px 12px rgba(102, 126, 234, 0.3) !important; } /* Card styling */ .gradio-box { background: rgba(255, 255, 255, 0.05) !important; border: 1px solid rgba(255, 255, 255, 0.1) !important; border-radius: 12px !important; padding: 1.5rem !important; backdrop-filter: blur(10px) !important; transition: all 0.3s ease !important; } .gradio-box:hover { background: rgba(255, 255, 255, 0.08) !important; border-color: rgba(255, 255, 255, 0.15) !important; transform: translateY(-2px) !important; box-shadow: 0 8px 24px rgba(0, 0, 0, 0.2) !important; } /* Button styling */ .gradio-button { background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important; color: white !important; border: none !important; padding: 0.75rem 1.5rem !important; border-radius: 8px !important; font-weight: 600 !important; transition: all 0.3s ease !important; box-shadow: 0 4px 12px rgba(102, 126, 234, 0.3) !important; } .gradio-button:hover { transform: translateY(-2px) !important; box-shadow: 0 6px 20px rgba(102, 126, 234, 0.4) !important; } .gradio-button.secondary { background: rgba(255, 255, 255, 0.1) !important; color: #e0e0e0 !important; border: 1px solid rgba(255, 255, 255, 0.2) !important; } /* Input styling */ .gradio-textbox, .gradio-dropdown { background: rgba(255, 255, 255, 0.05) !important; border: 1px solid rgba(255, 255, 255, 0.2) !important; border-radius: 8px !important; color: #e0e0e0 !important; transition: all 0.3s ease !important; } .gradio-textbox:focus, .gradio-dropdown:focus { border-color: #667eea !important; box-shadow: 0 0 0 3px rgba(102, 126, 234, 0.2) !important; background: rgba(255, 255, 255, 0.08) !important; } .gradio-textbox::placeholder { color: rgba(255, 255, 255, 0.5) !important; } /* Slider styling */ .gradio-slider { background: rgba(255, 255, 255, 0.1) !important; } .gradio-slider .slider-track { background: linear-gradient(90deg, #667eea 0%, #764ba2 100%) !important; } /* Chatbot styling */ .gradio-chatbot { background: rgba(255, 255, 255, 0.03) !important; border: 1px solid rgba(255, 255, 255, 0.1) !important; border-radius: 12px !important; } .gradio-chatbot .message { background: rgba(255, 255, 255, 0.05) !important; border-radius: 8px !important; margin: 0.5rem !important; padding: 1rem !important; } .gradio-chatbot .message.user { background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important; color: white !important; } /* Dataframe styling */ .gradio-dataframe { background: rgba(255, 255, 255, 0.05) !important; border: 1px solid rgba(255, 255, 255, 0.1) !important; border-radius: 8px !important; } .gradio-dataframe table { color: #e0e0e0 !important; } .gradio-dataframe th { background: rgba(255, 255, 255, 0.1) !important; border-bottom: 1px solid rgba(255, 255, 255, 0.2) !important; } .gradio-dataframe td { border-bottom: 1px solid rgba(255, 255, 255, 0.05) !important; } /* Markdown styling */ .gradio-markdown { color: #e0e0e0 !important; } .gradio-markdown h1, .gradio-markdown h2, .gradio-markdown h3 { color: #ffffff !important; margin-top: 1.5rem !important; } .gradio-markdown a { color: #667eea !important; } /* Footer styling */ footer { background: rgba(255, 255, 255, 0.03) !important; border-top: 1px solid rgba(255, 255, 255, 0.1) !important; padding: 1.5rem !important; text-align: center !important; color: rgba(255, 255, 255, 0.6) !important; } /* Loading animation */ .loading { display: inline-block; width: 20px; height: 20px; border: 3px solid rgba(255, 255, 255, 0.3); border-radius: 50%; border-top-color: #667eea; animation: spin 1s ease-in-out infinite; } @keyframes spin { to { transform: rotate(360deg); } } /* Responsive design */ @media (max-width: 768px) { .main-header h1 { font-size: 2rem !important; } .gradio-box { padding: 1rem !important; } .tab-nav button { padding: 0.5rem 1rem !important; font-size: 0.9rem !important; } } /* Custom component styles */ .chatbot-container { background: rgba(255, 255, 255, 0.03) !important; border: 1px solid rgba(255, 255, 255, 0.1) !important; border-radius: 12px !important; overflow: hidden !important; } .chat-input textarea { background: rgba(255, 255, 255, 0.05) !important; border: 1px solid rgba(255, 255, 255, 0.2) !important; border-radius: 8px !important; resize: none !important; transition: all 0.3s ease !important; } .chat-input textarea:focus { border-color: #667eea !important; box-shadow: 0 0 0 3px rgba(102, 126, 234, 0.2) !important; } .send-button { background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important; border: none !important; height: 100% !important; min-height: 40px !important; } .settings-group { background: rgba(255, 255, 255, 0.05) !important; border: 1px solid rgba(255, 255, 255, 0.1) !important; border-radius: 12px !important; padding: 1.5rem !important; } .input-group, .output-group { background: rgba(255, 255, 255, 0.05) !important; border: 1px solid rgba(255, 255, 255, 0.1) !important; border-radius: 12px !important; padding: 1.5rem !important; height: 100% !important; } .output-textarea { background: rgba(255, 255, 255, 0.03) !important; border: 1px solid rgba(255, 255, 255, 0.1) !important; border-radius: 8px !important; font-family: 'Inter', monospace !important; line-height: 1.6 !important; } .translation-input, .translation-output { background: rgba(255, 255, 255, 0.05) !important; border: 1px solid rgba(255, 255, 255, 0.1) !important; border-radius: 12px !important; padding: 1.5rem !important; } .translation-result { background: rgba(102, 126, 234, 0.1) !important; border: 1px solid rgba(102, 126, 234, 0.3) !important; border-radius: 8px !important; font-weight: 500 !important; } .image-controls, .image-output { background: rgba(255, 255, 255, 0.05) !important; border: 1px solid rgba(255, 255, 255, 0.1) !important; border-radius: 12px !important; padding: 1.5rem !important; } .generated-image { border-radius: 8px !important; overflow: hidden !important; box-shadow: 0 4px 12px rgba(0, 0, 0, 0.2) !important; } /* Animation classes */ .fade-in { animation: fadeIn 0.5s ease-in-out; } @keyframes fadeIn { from { opacity: 0; transform: translateY(10px); } to { opacity: 1; transform: translateY(0); } } .slide-up { animation: slideUp 0.3s ease-out; } @keyframes slideUp { from { transform: translateY(20px); opacity: 0; } to { transform: translateY(0); opacity: 1; } } /* Custom scrollbar */ ::-webkit-scrollbar { width: 8px; height: 8px; } ::-webkit-scrollbar-track { background: rgba(255, 255, 255, 0.05); border-radius: 4px; } ::-webkit-scrollbar-thumb { background: rgba(102, 126, 234, 0.5); border-radius: 4px; } ::-webkit-scrollbar-thumb:hover { background: rgba(102, 126, 234, 0.7); } """ # Build the Gradio app with gr.Blocks( title="AI Assistant - HuggingFace", theme=gr.themes.Soft( primary_hue="purple", secondary_hue="blue", neutral_hue="slate", font=["Inter", "system-ui", "sans-serif"] ), css=custom_css ) as app: # Header with modern design gr.HTML("""
Powered by HuggingFace Hub β’ Advanced AI Models
Interactive chat with AI models
Generate creative text, stories, articles, and more with AI
Translate text between multiple languages with advanced AI models
β¨ New: Target language selection now works with all models!
Note: If a model is not available, the system will automatically try fallback models.
Upload an image containing text to extract and translate it
Test and validate translation functionality with comprehensive test scenarios
Choose between single translation test or comprehensive test suite
Create stunning images from text descriptions using advanced AI models