Update app.py
Browse files
app.py
CHANGED
|
@@ -7,34 +7,30 @@ from groq import Groq
|
|
| 7 |
from mistralai import Mistral
|
| 8 |
import google.generativeai as genai
|
| 9 |
|
| 10 |
-
# --- CONFIGURAÇÕES
|
| 11 |
|
| 12 |
-
# 1. LOCAL (H200
|
| 13 |
LOCAL_MODEL_ID = "Qwen/Qwen2.5-Coder-32B-Instruct"
|
| 14 |
local_model = None
|
| 15 |
local_tokenizer = None
|
| 16 |
|
| 17 |
-
# 2.
|
| 18 |
groq_key = os.environ.get("GROQ_API_KEY")
|
| 19 |
groq_client = Groq(api_key=groq_key) if groq_key else None
|
| 20 |
|
| 21 |
-
# 3. MISTRAL CLIENT
|
| 22 |
mistral_key = os.environ.get("MISTRAL_API_KEY")
|
| 23 |
mistral_client = Mistral(api_key=mistral_key) if mistral_key else None
|
| 24 |
|
| 25 |
-
# 4. GEMINI CLIENT (NOVO!)
|
| 26 |
gemini_key = os.environ.get("GEMINI_API_KEY")
|
| 27 |
if gemini_key:
|
| 28 |
genai.configure(api_key=gemini_key)
|
| 29 |
|
| 30 |
-
# ---
|
| 31 |
-
|
| 32 |
-
# A. Função Local H200 (Cota Limitada)
|
| 33 |
@spaces.GPU(duration=60)
|
| 34 |
def run_local_h200(messages):
|
| 35 |
global local_model, local_tokenizer
|
| 36 |
if local_model is None:
|
| 37 |
-
print(f"🐢
|
| 38 |
local_tokenizer = AutoTokenizer.from_pretrained(LOCAL_MODEL_ID)
|
| 39 |
local_model = AutoModelForCausalLM.from_pretrained(
|
| 40 |
LOCAL_MODEL_ID, torch_dtype=torch.bfloat16, device_map="cuda"
|
|
@@ -45,90 +41,88 @@ def run_local_h200(messages):
|
|
| 45 |
outputs = local_model.generate(**inputs, max_new_tokens=2048, temperature=0.6, do_sample=True)
|
| 46 |
return local_tokenizer.decode(outputs[0][inputs.input_ids.shape[1]:], skip_special_tokens=True)
|
| 47 |
|
| 48 |
-
#
|
| 49 |
def run_groq(messages, model_id):
|
| 50 |
-
if not groq_client: return "❌ Erro:
|
| 51 |
try:
|
| 52 |
completion = groq_client.chat.completions.create(
|
| 53 |
model=model_id, messages=messages, temperature=0.7, max_tokens=4096
|
| 54 |
)
|
| 55 |
return completion.choices[0].message.content
|
| 56 |
-
except Exception as e: return f"❌
|
| 57 |
|
| 58 |
-
#
|
| 59 |
def run_mistral(messages, model_id):
|
| 60 |
-
if not mistral_client: return "❌ Erro:
|
| 61 |
try:
|
|
|
|
| 62 |
res = mistral_client.chat.complete(model=model_id, messages=messages)
|
| 63 |
return res.choices[0].message.content
|
| 64 |
-
except Exception as e: return f"❌
|
| 65 |
|
| 66 |
-
#
|
| 67 |
def run_gemini(messages, model_id):
|
| 68 |
-
if not gemini_key: return "❌ Erro:
|
| 69 |
try:
|
| 70 |
model = genai.GenerativeModel(model_id)
|
| 71 |
-
|
| 72 |
-
# O Gemini funciona melhor com string direta ou histórico formatado
|
| 73 |
-
# Vamos converter o histórico do chat num prompt único para garantir contexto
|
| 74 |
-
full_prompt = ""
|
| 75 |
-
for m in messages:
|
| 76 |
-
role = "User" if m['role'] == 'user' else "Model"
|
| 77 |
-
full_prompt += f"{role}: {m['content']}\n"
|
| 78 |
-
|
| 79 |
response = model.generate_content(full_prompt)
|
| 80 |
return response.text
|
| 81 |
-
except Exception as e: return f"❌
|
| 82 |
|
| 83 |
-
# --- ROTEADOR
|
| 84 |
def router(message, history, model_selector):
|
| 85 |
-
# Formata histórico
|
| 86 |
messages = []
|
| 87 |
for user_msg, bot_msg in history:
|
| 88 |
if user_msg: messages.append({"role": "user", "content": user_msg})
|
| 89 |
if bot_msg: messages.append({"role": "assistant", "content": bot_msg})
|
| 90 |
messages.append({"role": "user", "content": message})
|
| 91 |
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
# --- SELEÇÃO ---
|
| 95 |
-
if "H200" in model_selector:
|
| 96 |
-
return run_local_h200(messages)
|
| 97 |
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
|
|
|
| 102 |
|
|
|
|
| 103 |
elif "Llama 3.3" in model_selector:
|
| 104 |
return run_groq(messages, "llama-3.3-70b-versatile")
|
| 105 |
-
elif "Llama 3.1 8B" in model_selector:
|
| 106 |
-
return run_groq(messages, "llama-3.1-8b-instant")
|
| 107 |
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 112 |
|
|
|
|
|
|
|
|
|
|
| 113 |
else:
|
| 114 |
-
return "⚠️ Modelo não
|
| 115 |
|
| 116 |
-
# ---
|
| 117 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 118 |
-
gr.Markdown("# 🔀 APIDOST Router
|
| 119 |
|
| 120 |
with gr.Row():
|
| 121 |
model_dropdown = gr.Dropdown(
|
| 122 |
choices=[
|
| 123 |
-
"✨ Google: Gemini
|
| 124 |
-
"✨ Google: Gemini
|
| 125 |
-
"☁️ Groq: Llama 3.3 70B
|
| 126 |
-
"
|
| 127 |
-
"🇫🇷 Mistral: Large
|
| 128 |
-
"🇫🇷 Mistral:
|
| 129 |
-
"
|
|
|
|
| 130 |
],
|
| 131 |
-
value="
|
| 132 |
label="Escolha o Cérebro",
|
| 133 |
interactive=True
|
| 134 |
)
|
|
|
|
| 7 |
from mistralai import Mistral
|
| 8 |
import google.generativeai as genai
|
| 9 |
|
| 10 |
+
# --- CONFIGURAÇÕES ---
|
| 11 |
|
| 12 |
+
# 1. LOCAL (H200)
|
| 13 |
LOCAL_MODEL_ID = "Qwen/Qwen2.5-Coder-32B-Instruct"
|
| 14 |
local_model = None
|
| 15 |
local_tokenizer = None
|
| 16 |
|
| 17 |
+
# 2. CLIENTES DE NUVEM
|
| 18 |
groq_key = os.environ.get("GROQ_API_KEY")
|
| 19 |
groq_client = Groq(api_key=groq_key) if groq_key else None
|
| 20 |
|
|
|
|
| 21 |
mistral_key = os.environ.get("MISTRAL_API_KEY")
|
| 22 |
mistral_client = Mistral(api_key=mistral_key) if mistral_key else None
|
| 23 |
|
|
|
|
| 24 |
gemini_key = os.environ.get("GEMINI_API_KEY")
|
| 25 |
if gemini_key:
|
| 26 |
genai.configure(api_key=gemini_key)
|
| 27 |
|
| 28 |
+
# --- FUNÇÃO 1: LOCAL H200 (ZeroGPU) ---
|
|
|
|
|
|
|
| 29 |
@spaces.GPU(duration=60)
|
| 30 |
def run_local_h200(messages):
|
| 31 |
global local_model, local_tokenizer
|
| 32 |
if local_model is None:
|
| 33 |
+
print(f"🐢 Carregando {LOCAL_MODEL_ID}...")
|
| 34 |
local_tokenizer = AutoTokenizer.from_pretrained(LOCAL_MODEL_ID)
|
| 35 |
local_model = AutoModelForCausalLM.from_pretrained(
|
| 36 |
LOCAL_MODEL_ID, torch_dtype=torch.bfloat16, device_map="cuda"
|
|
|
|
| 41 |
outputs = local_model.generate(**inputs, max_new_tokens=2048, temperature=0.6, do_sample=True)
|
| 42 |
return local_tokenizer.decode(outputs[0][inputs.input_ids.shape[1]:], skip_special_tokens=True)
|
| 43 |
|
| 44 |
+
# --- FUNÇÃO 2: GROQ ---
|
| 45 |
def run_groq(messages, model_id):
|
| 46 |
+
if not groq_client: return "❌ Erro: Configure a GROQ_API_KEY."
|
| 47 |
try:
|
| 48 |
completion = groq_client.chat.completions.create(
|
| 49 |
model=model_id, messages=messages, temperature=0.7, max_tokens=4096
|
| 50 |
)
|
| 51 |
return completion.choices[0].message.content
|
| 52 |
+
except Exception as e: return f"❌ Groq Error: {e}"
|
| 53 |
|
| 54 |
+
# --- FUNÇÃO 3: MISTRAL (IDs Novos da sua lista!) ---
|
| 55 |
def run_mistral(messages, model_id):
|
| 56 |
+
if not mistral_client: return "❌ Erro: Configure a MISTRAL_API_KEY."
|
| 57 |
try:
|
| 58 |
+
print(f"🇫🇷 Mistral Target: {model_id}")
|
| 59 |
res = mistral_client.chat.complete(model=model_id, messages=messages)
|
| 60 |
return res.choices[0].message.content
|
| 61 |
+
except Exception as e: return f"❌ Mistral Error: {e}"
|
| 62 |
|
| 63 |
+
# --- FUNÇÃO 4: GEMINI (Modo "Hacker" - Aceita o que vc mandar) ---
|
| 64 |
def run_gemini(messages, model_id):
|
| 65 |
+
if not gemini_key: return "❌ Erro: Configure a GEMINI_API_KEY."
|
| 66 |
try:
|
| 67 |
model = genai.GenerativeModel(model_id)
|
| 68 |
+
full_prompt = "\n".join([f"{m['role'].title()}: {m['content']}" for m in messages])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
response = model.generate_content(full_prompt)
|
| 70 |
return response.text
|
| 71 |
+
except Exception as e: return f"❌ Gemini Error ({model_id}): {e}"
|
| 72 |
|
| 73 |
+
# --- ROTEADOR ---
|
| 74 |
def router(message, history, model_selector):
|
|
|
|
| 75 |
messages = []
|
| 76 |
for user_msg, bot_msg in history:
|
| 77 |
if user_msg: messages.append({"role": "user", "content": user_msg})
|
| 78 |
if bot_msg: messages.append({"role": "assistant", "content": bot_msg})
|
| 79 |
messages.append({"role": "user", "content": message})
|
| 80 |
|
| 81 |
+
# --- MAPEAMENTO DE MODELOS ---
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
|
| 83 |
+
# Rota Google (Seus modelos "Future")
|
| 84 |
+
if "Gemini 3" in model_selector:
|
| 85 |
+
return run_gemini(messages, "gemini-3.0-pro-exp") # Tentativa de ID
|
| 86 |
+
elif "Gemini 2.5 Flash" in model_selector:
|
| 87 |
+
return run_gemini(messages, "gemini-2.0-flash-exp") # O Experimental atual
|
| 88 |
|
| 89 |
+
# Rota Groq
|
| 90 |
elif "Llama 3.3" in model_selector:
|
| 91 |
return run_groq(messages, "llama-3.3-70b-versatile")
|
|
|
|
|
|
|
| 92 |
|
| 93 |
+
# Rota Mistral (Sua Lista VIP)
|
| 94 |
+
elif "Mistral Large 2512" in model_selector:
|
| 95 |
+
return run_mistral(messages, "mistral-large-2512")
|
| 96 |
+
elif "Pixtral Large" in model_selector:
|
| 97 |
+
return run_mistral(messages, "pixtral-large-latest") # Multimodal!
|
| 98 |
+
elif "Magistral Medium" in model_selector:
|
| 99 |
+
return run_mistral(messages, "magistral-medium-latest") # Exclusivo
|
| 100 |
+
elif "Codestral 2508" in model_selector:
|
| 101 |
+
return run_mistral(messages, "codestral-2508")
|
| 102 |
|
| 103 |
+
# Rota Local
|
| 104 |
+
elif "H200" in model_selector:
|
| 105 |
+
return run_local_h200(messages)
|
| 106 |
else:
|
| 107 |
+
return "⚠️ Modelo não configurado no roteador."
|
| 108 |
|
| 109 |
+
# --- UI ---
|
| 110 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 111 |
+
gr.Markdown("# 🔀 APIDOST Router V5: Unleashed")
|
| 112 |
|
| 113 |
with gr.Row():
|
| 114 |
model_dropdown = gr.Dropdown(
|
| 115 |
choices=[
|
| 116 |
+
"✨ Google: Gemini 3 (Experimental)",
|
| 117 |
+
"✨ Google: Gemini 2.5 Flash",
|
| 118 |
+
"☁️ Groq: Llama 3.3 70B",
|
| 119 |
+
"🇫🇷 Mistral: Large 2512 (Dez/25)",
|
| 120 |
+
"🇫🇷 Mistral: Pixtral Large (Vision)",
|
| 121 |
+
"🇫🇷 Mistral: Magistral Medium (VIP)",
|
| 122 |
+
"🇫🇷 Mistral: Codestral 2508 (Code)",
|
| 123 |
+
"🔥 Local H200: Qwen 2.5 Coder"
|
| 124 |
],
|
| 125 |
+
value="🇫🇷 Mistral: Large 2512 (Dez/25)",
|
| 126 |
label="Escolha o Cérebro",
|
| 127 |
interactive=True
|
| 128 |
)
|