NewAI / app.py
Floncer's picture
Update app.py
639fcbc verified
import gradio as gr
from huggingface_hub import InferenceClient
import os
# 50 лучших моделей для выбора
MODELS = {
"🚀 LLaMA-3-70B": "meta-llama/Meta-Llama-3-70B-Instruct",
"⚡ LLaMA-3-8B": "meta-llama/Meta-Llama-3-8B-Instruct",
"🤖 Mistral-7B": "mistralai/Mistral-7B-Instruct-v0.3",
"🌪️ Mixtral-8x7B": "mistralai/Mixtral-8x7B-Instruct-v0.1",
"💬 Mixtral-8x22B": "mistralai/Mixtral-8x22B-Instruct-v0.1",
"🧠 Qwen-2.5-72B": "Qwen/Qwen2.5-72B-Instruct",
"📚 Qwen-2.5-7B": "Qwen/Qwen2.5-7B-Instruct",
"⚙️ Gemma-2-27B": "google/gemma-2-27b-it",
"🔧 Gemma-2-9B": "google/gemma-2-9b-it",
"🎯 Gemma-2B": "google/gemma-2b-it",
"🌟 Command-R+": "CohereForAI/c4ai-command-r-plus",
"📝 Command-R": "CohereForAI/c4ai-command-r-v01",
"🔥 DeepSeek-V3": "deepseek-ai/DeepSeek-V3",
"⚡ DeepSeek-R1": "deepseek-ai/DeepSeek-R1",
"🎨 DeepSeek-Coder": "deepseek-ai/DeepSeek-Coder-33B-instruct",
"💡 Phi-3-mini": "microsoft/Phi-3-mini-4k-instruct",
"📖 Phi-3-small": "microsoft/Phi-3-small-8k-instruct",
"🏆 Phi-3-medium": "microsoft/Phi-3-medium-4k-instruct",
"🚅 Starling-LM": "berkeley-nest/Starling-LM-7B-alpha",
"✨ Neural-7B": "Intel/neural-chat-7b-v3-3",
"🔮 SOLAR-10.7B": "upstage/SOLAR-10.7B-Instruct-v1.0",
"🌊 Yi-34B": "01-ai/Yi-34B-Chat",
"🌿 Yi-6B": "01-ai/Yi-6B-Chat",
"💫 Zephyr-7B": "HuggingFaceH4/zephyr-7b-beta",
"🔥 Tulu-2-70B": "allenai/tulu-2-70b",
"⚡ Tulu-2-13B": "allenai/tulu-2-13b",
"🎯 OpenHermes-2.5": "teknium/OpenHermes-2.5-Mistral-7B",
"🚀 Nous-Hermes-2": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
"💬 Vicuna-33B": "lmsys/vicuna-33b-v1.3",
"🧪 WizardLM-70B": "WizardLM/WizardLM-70B-V1.0",
"📊 WizardCoder-15B": "WizardLM/WizardCoder-15B-V1.0",
"🎓 WizardMath-70B": "WizardLM/WizardMath-70B-V1.0",
"🔬 Falcon-40B": "tiiuae/falcon-40b-instruct",
"🌍 Falcon-7B": "tiiuae/falcon-7b-instruct",
"⚡ MPT-30B": "mosaicml/mpt-30b-chat",
"🔥 MPT-7B": "mosaicml/mpt-7b-chat",
"💫 Dolly-v2-12B": "databricks/dolly-v2-12b",
"✨ Dolly-v2-3B": "databricks/dolly-v2-3b",
"🌟 Pythia-12B": "EleutherAI/pythia-12b-deduped",
"🚀 RedPajama-3B": "togethercomputer/RedPajama-INCITE-Chat-3B-v1",
"🎯 Guanaco-65B": "timdettmers/guanaco-65b",
"📚 Guanaco-33B": "timdettmers/guanaco-33b",
"🧠 Guanaco-13B": "timdettmers/guanaco-13b",
"⚡ Guanaco-7B": "timdettmers/guanaco-7b",
"💡 Cerebras-GPT-13B": "cerebras/Cerebras-GPT-13B",
"🔧 GPT-NeoX-20B": "EleutherAI/gpt-neox-20b",
"🌊 GPT-J-6B": "EleutherAI/gpt-j-6b",
"🎨 BLIP-2": "Salesforce/blip2-opt-2.7b",
"📝 FLAN-T5-XXL": "google/flan-t5-xxl",
"⚙️ FLAN-UL2": "google/flan-ul2"
}
def respond(message, history, model, system_message, max_tokens, temperature, top_p, hf_token):
try:
# Исправление: проверяем тип токена
token = None
if hf_token:
if hasattr(hf_token, 'token'):
token = hf_token.token
elif isinstance(hf_token, str):
token = hf_token
else:
token = os.getenv("HF_TOKEN") # Берем из переменных окружения
if not token:
yield "❌ Please login with your Hugging Face account or set HF_TOKEN"
return
client = InferenceClient(token=token, model=MODELS[model])
messages = [{"role": "system", "content": system_message}]
for msg in history:
messages.append({"role": msg["role"], "content": msg["content"]})
messages.append({"role": "user", "content": message})
response = ""
for chunk in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p
):
if chunk.choices and chunk.choices[0].delta.content:
response += chunk.choices[0].delta.content
yield response
except Exception as e:
yield f"❌ Error: {str(e)}"
# Крутой CSS дизайн
css = """
:root {
--primary: #8b5cf6;
--secondary: #ec4899;
--dark: #1f2937;
}
.gradio-container {
max-width: 1200px !important;
margin: auto !important;
font-family: 'Inter', sans-serif !important;
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
min-height: 100vh;
}
.chat-message {
border-radius: 20px !important;
border: none !important;
box-shadow: 0 10px 40px rgba(0,0,0,0.1) !important;
transition: all 0.3s ease !important;
}
.chat-message:hover {
transform: translateY(-2px);
box-shadow: 0 15px 50px rgba(0,0,0,0.15) !important;
}
.user-message {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
color: white !important;
}
.bot-message {
background: white !important;
color: #333 !important;
}
button.primary {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
border: none !important;
border-radius: 12px !important;
padding: 10px 25px !important;
font-weight: 600 !important;
transition: all 0.3s ease !important;
}
button.primary:hover {
transform: scale(1.05);
box-shadow: 0 10px 30px rgba(102,126,234,0.4) !important;
}
.sidebar {
background: rgba(255,255,255,0.95) !important;
border-radius: 20px !important;
padding: 20px !important;
backdrop-filter: blur(10px);
}
"""
# Создаем интерфейс
with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
with gr.Sidebar(open=True):
gr.Markdown("""
# 🤖 AI Chat Hub
### 50+ Models • Instant Answers
""")
login_btn = gr.LoginButton()
model_dropdown = gr.Dropdown(
choices=list(MODELS.keys()),
value="🚀 LLaMA-3-70B",
label="🎯 Select Model",
interactive=True
)
with gr.Accordion("⚙️ Advanced Settings", open=False):
system_msg = gr.Textbox(
value="You are a helpful AI assistant.",
label="System Prompt",
lines=2
)
with gr.Row():
max_tokens = gr.Slider(100, 4096, 1024, step=100, label="Max Tokens")
temperature = gr.Slider(0.1, 2.0, 0.7, step=0.1, label="Temperature")
top_p = gr.Slider(0.1, 1.0, 0.95, step=0.05, label="Top P")
gr.Markdown("""
💡 **Tips:**
- **Higher temperature** = more creative
- **Lower temperature** = more focused
- **Top P** controls diversity
""")
# Главный чат
chatbot = gr.ChatInterface(
respond,
additional_inputs=[
model_dropdown,
system_msg,
max_tokens,
temperature,
top_p,
login_btn
],
examples=[
["Explain quantum computing in simple terms"],
["Write a poem about AI"],
["How to learn programming?"],
["Tell me a joke"],
["What's the meaning of life?"],
],
cache_examples=False,
)
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860)