Madras1 commited on
Commit
d5d784c
·
verified ·
1 Parent(s): 5b2f8cd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -30
app.py CHANGED
@@ -4,13 +4,14 @@ import torch
4
  import os
5
  import time
6
  import base64
 
7
  from transformers import AutoModelForCausalLM, AutoTokenizer
8
  from groq import Groq
9
  from mistralai import Mistral
10
  import google.generativeai as genai
11
  from huggingface_hub import snapshot_download
12
 
13
- # --- 1. SEGURANÇA (ANTI-SPAM) ---
14
  MAX_REQUESTS_PER_MINUTE = 15
15
  BLOCK_TIME_SECONDS = 60
16
  ip_tracker = {}
@@ -35,8 +36,10 @@ LOCAL_MODEL_ID = "Qwen/Qwen2.5-Coder-32B-Instruct"
35
  local_model = None
36
  local_tokenizer = None
37
 
 
38
  groq_client = Groq(api_key=os.environ.get("GROQ_API_KEY")) if os.environ.get("GROQ_API_KEY") else None
39
  mistral_client = Mistral(api_key=os.environ.get("MISTRAL_API_KEY")) if os.environ.get("MISTRAL_API_KEY") else None
 
40
  if os.environ.get("GEMINI_API_KEY"):
41
  genai.configure(api_key=os.environ.get("GEMINI_API_KEY"))
42
 
@@ -47,16 +50,17 @@ def encode_image(image_path):
47
  return base64.b64encode(image_file.read()).decode('utf-8')
48
  except: return None
49
 
50
- # --- 4. EXECUTORES ---
51
 
52
  @spaces.GPU(duration=120)
53
  def run_local_h200(messages):
54
  global local_model, local_tokenizer
 
55
  for m in messages:
56
- if isinstance(m['content'], list): return "⚠️ Modelo Local não suporta imagens. Use Gemini/Pixtral."
57
 
58
  if local_model is None:
59
- print(f"🐢 Carregando {LOCAL_MODEL_ID}...")
60
  local_tokenizer = AutoTokenizer.from_pretrained(LOCAL_MODEL_ID)
61
  local_model = AutoModelForCausalLM.from_pretrained(
62
  LOCAL_MODEL_ID, torch_dtype=torch.bfloat16, device_map="cuda"
@@ -69,8 +73,8 @@ def run_local_h200(messages):
69
 
70
  def run_groq(messages, model_id):
71
  for m in messages:
72
- if isinstance(m['content'], list): return "⚠️ Groq não suporta imagens. Use Gemini/Pixtral."
73
- if not groq_client: return "❌ Erro: GROQ_API_KEY ausente."
74
  clean_msgs = [{"role": m['role'], "content": m['content']} for m in messages]
75
  try:
76
  completion = groq_client.chat.completions.create(
@@ -80,7 +84,7 @@ def run_groq(messages, model_id):
80
  except Exception as e: return f"❌ Groq Error: {e}"
81
 
82
  def run_mistral(messages, model_id):
83
- if not mistral_client: return "❌ Erro: MISTRAL_API_KEY ausente."
84
  formatted_msgs = []
85
  for m in messages:
86
  content = m['content']
@@ -102,10 +106,13 @@ def run_mistral(messages, model_id):
102
  except Exception as e: return f"❌ Mistral Error: {e}"
103
 
104
  def run_gemini(messages, model_id):
105
- if not os.environ.get("GEMINI_API_KEY"): return "❌ Erro: GEMINI_API_KEY ausente."
106
  try:
 
107
  model = genai.GenerativeModel(model_id)
108
  chat_history = []
 
 
109
  for m in messages[:-1]:
110
  role = "user" if m['role'] == "user" else "model"
111
  parts = []
@@ -119,6 +126,7 @@ def run_gemini(messages, model_id):
119
  if os.path.exists(path): parts.append(Image.open(path))
120
  if parts: chat_history.append({"role": role, "parts": parts})
121
 
 
122
  last_parts = []
123
  lc = messages[-1]['content']
124
  if isinstance(lc, str): last_parts.append(lc)
@@ -132,14 +140,14 @@ def run_gemini(messages, model_id):
132
  chat = model.start_chat(history=chat_history)
133
  response = chat.send_message(last_parts)
134
  return response.text
135
- except Exception as e: return f"❌ Gemini Error: {e}"
136
 
137
- # --- 5. ROTEADOR CENTRAL ---
138
  def router(message, history, model_selector, request: gr.Request):
139
  if not check_spam(request):
140
- return "⛔ BLOQUEADO: Limite de mensagens excedido. Aguarde."
141
 
142
- # Normaliza histórico
143
  messages = []
144
  if history:
145
  for turn in history:
@@ -165,15 +173,25 @@ def router(message, history, model_selector, request: gr.Request):
165
  else:
166
  messages.append({"role": "user", "content": str(message)})
167
 
168
- # Seleção
169
- if "Gemini" in model_selector:
 
 
 
170
  tid = "gemini-1.5-flash"
 
 
171
  if "3.0" in model_selector: tid = "gemini-3.0-pro-preview"
172
  elif "2.5 Pro" in model_selector: tid = "gemini-2.5-pro"
 
173
  elif "2.5 Flash" in model_selector: tid = "gemini-2.5-flash"
174
- elif "2.0" in model_selector: tid = "gemini-2.0-flash-exp"
 
 
 
175
  return run_gemini(messages, tid)
176
 
 
177
  elif "Mistral" in model_selector:
178
  tid = "mistral-large-latest"
179
  if "Pixtral" in model_selector: tid = "pixtral-large-latest"
@@ -182,30 +200,36 @@ def router(message, history, model_selector, request: gr.Request):
182
  elif "Codestral" in model_selector: tid = "codestral-2508"
183
  return run_mistral(messages, tid)
184
 
 
185
  elif "Groq" in model_selector:
186
  tid = "llama-3.3-70b-versatile"
187
  if "120B" in model_selector: tid = "openai/gpt-oss-120b"
188
  elif "20B" in model_selector: tid = "openai/gpt-oss-20b"
189
  return run_groq(messages, tid)
190
 
 
191
  elif "H200" in model_selector:
192
  return run_local_h200(messages)
193
 
194
- return "⚠️ Modelo não reconhecido."
195
 
196
  # --- 6. INTERFACE ---
197
- with gr.Blocks() as demo:
198
- gr.Markdown("# 🔀 APIDOST v9 (API Bridge Fixed)")
 
199
 
200
  models_list = [
 
 
201
  "✨ Google: Gemini 3.0 Pro (Experimental)",
202
  "✨ Google: Gemini 2.5 Pro",
203
  "✨ Google: Gemini 2.5 Flash",
 
204
  "✨ Google: Gemini 2.0 Flash",
205
- "☁️ Groq: GPT OSS 120B (OpenAI) 🆕",
206
- "☁️ Groq: GPT OSS 20B (OpenAI) 🆕",
207
  "☁️ Groq: Llama 3.3 70B",
208
- "🇫🇷 Mistral: Magistral Medium 2509 🆕",
209
  "🇫🇷 Mistral: Pixtral Large (Vision) 🖼️",
210
  "🇫🇷 Mistral: Large 2512 (Dez/25)",
211
  "🇫🇷 Mistral: Codestral 2508",
@@ -213,27 +237,26 @@ with gr.Blocks() as demo:
213
  ]
214
 
215
  with gr.Row():
216
- model_dropdown = gr.Dropdown(choices=models_list, value=models_list[-1], label="Cérebro", interactive=True)
217
 
218
- # 1. Interface Visual (para debug no Space)
219
  chat = gr.ChatInterface(
220
  fn=router,
221
  additional_inputs=[model_dropdown],
222
- multimodal=True
 
223
  )
224
 
225
- # 2. PONTE DE API (A CORREÇÃO DO JAVASCRIPT)
226
- # Use gr.JSON em vez de gr.State para 'history' para não exigir retorno.
227
- # Isso cria a rota "/chat" corretamente.
228
  api_bridge = gr.Interface(
229
  fn=router,
230
  inputs=[
231
  gr.MultimodalTextbox(label="message"),
232
- gr.JSON(value=[], label="history"), # <--- O SEGREDO ESTÁ AQUI. JSON aceita lista e não trava.
233
- gr.Dropdown(choices=models_list, label="model_selector", value=models_list[-1])
234
  ],
235
  outputs=[gr.Textbox(label="response")],
236
- api_name="chat" # <--- O JavaScript bate nessa porta aqui.
237
  )
238
 
239
  if __name__ == "__main__":
 
4
  import os
5
  import time
6
  import base64
7
+ from PIL import Image
8
  from transformers import AutoModelForCausalLM, AutoTokenizer
9
  from groq import Groq
10
  from mistralai import Mistral
11
  import google.generativeai as genai
12
  from huggingface_hub import snapshot_download
13
 
14
+ # --- 1. SEGURANÇA (ANTI-SPAM - O escudo da Berta) ---
15
  MAX_REQUESTS_PER_MINUTE = 15
16
  BLOCK_TIME_SECONDS = 60
17
  ip_tracker = {}
 
36
  local_model = None
37
  local_tokenizer = None
38
 
39
+ # Clientes de API (A Berta verifica se as chaves existem para não dar erro feio)
40
  groq_client = Groq(api_key=os.environ.get("GROQ_API_KEY")) if os.environ.get("GROQ_API_KEY") else None
41
  mistral_client = Mistral(api_key=os.environ.get("MISTRAL_API_KEY")) if os.environ.get("MISTRAL_API_KEY") else None
42
+
43
  if os.environ.get("GEMINI_API_KEY"):
44
  genai.configure(api_key=os.environ.get("GEMINI_API_KEY"))
45
 
 
50
  return base64.b64encode(image_file.read()).decode('utf-8')
51
  except: return None
52
 
53
+ # --- 4. EXECUTORES (Os operários da Berta) ---
54
 
55
  @spaces.GPU(duration=120)
56
  def run_local_h200(messages):
57
  global local_model, local_tokenizer
58
+ # Verificação crítica: Modelos locais de texto puro não leem imagens diretamente aqui
59
  for m in messages:
60
+ if isinstance(m['content'], list): return "⚠️ Berta avisa: Modelo Local não suporta imagens. Use Gemini ou Pixtral."
61
 
62
  if local_model is None:
63
+ print(f"🐢 Berta está carregando {LOCAL_MODEL_ID}... Tenha paciência, querido.")
64
  local_tokenizer = AutoTokenizer.from_pretrained(LOCAL_MODEL_ID)
65
  local_model = AutoModelForCausalLM.from_pretrained(
66
  LOCAL_MODEL_ID, torch_dtype=torch.bfloat16, device_map="cuda"
 
73
 
74
  def run_groq(messages, model_id):
75
  for m in messages:
76
+ if isinstance(m['content'], list): return "⚠️ Berta avisa: Groq ainda não suporta envio direto de imagens neste script."
77
+ if not groq_client: return "❌ Erro: Faltou a GROQ_API_KEY, meu anjo."
78
  clean_msgs = [{"role": m['role'], "content": m['content']} for m in messages]
79
  try:
80
  completion = groq_client.chat.completions.create(
 
84
  except Exception as e: return f"❌ Groq Error: {e}"
85
 
86
  def run_mistral(messages, model_id):
87
+ if not mistral_client: return "❌ Erro: Faltou a MISTRAL_API_KEY, príncipe."
88
  formatted_msgs = []
89
  for m in messages:
90
  content = m['content']
 
106
  except Exception as e: return f"❌ Mistral Error: {e}"
107
 
108
  def run_gemini(messages, model_id):
109
+ if not os.environ.get("GEMINI_API_KEY"): return "❌ Erro: Faltou a GEMINI_API_KEY."
110
  try:
111
+ # Instancia o modelo com o ID específico solicitado
112
  model = genai.GenerativeModel(model_id)
113
  chat_history = []
114
+
115
+ # Constrói o histórico (exceto a última mensagem)
116
  for m in messages[:-1]:
117
  role = "user" if m['role'] == "user" else "model"
118
  parts = []
 
126
  if os.path.exists(path): parts.append(Image.open(path))
127
  if parts: chat_history.append({"role": role, "parts": parts})
128
 
129
+ # Prepara a última mensagem (prompt atual)
130
  last_parts = []
131
  lc = messages[-1]['content']
132
  if isinstance(lc, str): last_parts.append(lc)
 
140
  chat = model.start_chat(history=chat_history)
141
  response = chat.send_message(last_parts)
142
  return response.text
143
+ except Exception as e: return f"❌ Gemini Error ({model_id}): {e}"
144
 
145
+ # --- 5. ROTEADOR CENTRAL (O cérebro da operação) ---
146
  def router(message, history, model_selector, request: gr.Request):
147
  if not check_spam(request):
148
+ return "⛔ BLOQUEADO: Você está indo rápido demais, querido. Respire um pouco."
149
 
150
+ # Normaliza histórico para formato OpenAI
151
  messages = []
152
  if history:
153
  for turn in history:
 
173
  else:
174
  messages.append({"role": "user", "content": str(message)})
175
 
176
+ # --- SELEÇÃO DE MODELOS ---
177
+
178
+ # Rota Google / Gemini / LearnLM / Gemma
179
+ if any(k in model_selector for k in ["Gemini", "LearnLM", "Gemma"]):
180
+ # IDs Padrão
181
  tid = "gemini-1.5-flash"
182
+
183
+ # Mapeamento Inteligente
184
  if "3.0" in model_selector: tid = "gemini-3.0-pro-preview"
185
  elif "2.5 Pro" in model_selector: tid = "gemini-2.5-pro"
186
+ elif "2.5 Flash Lite" in model_selector: tid = "gemini-2.5-flash-lite" # 🆕 Novo
187
  elif "2.5 Flash" in model_selector: tid = "gemini-2.5-flash"
188
+ elif "2.0" in model_selector and "LearnLM" not in model_selector: tid = "gemini-2.0-flash-exp"
189
+ elif "LearnLM" in model_selector: tid = "learnlm-2.0-flash-experimental" # 🆕 Novo
190
+ elif "Gemma 3" in model_selector: tid = "gemma-3-27b" # 🆕 Novo (Verificar se API aceita este ID exato)
191
+
192
  return run_gemini(messages, tid)
193
 
194
+ # Rota Mistral
195
  elif "Mistral" in model_selector:
196
  tid = "mistral-large-latest"
197
  if "Pixtral" in model_selector: tid = "pixtral-large-latest"
 
200
  elif "Codestral" in model_selector: tid = "codestral-2508"
201
  return run_mistral(messages, tid)
202
 
203
+ # Rota Groq
204
  elif "Groq" in model_selector:
205
  tid = "llama-3.3-70b-versatile"
206
  if "120B" in model_selector: tid = "openai/gpt-oss-120b"
207
  elif "20B" in model_selector: tid = "openai/gpt-oss-20b"
208
  return run_groq(messages, tid)
209
 
210
+ # Rota Local
211
  elif "H200" in model_selector:
212
  return run_local_h200(messages)
213
 
214
+ return "⚠️ Modelo não reconhecido. Verifique o seletor."
215
 
216
  # --- 6. INTERFACE ---
217
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
218
+ gr.Markdown("# 🔀 APIDOST v10 - Berta Edition")
219
+ gr.Markdown(f"### Olá Gabriel! Servindo seus modelos com carinho.")
220
 
221
  models_list = [
222
+ "✨ Google: LearnLM 1.5 Pro (Experimental) 📚", # LearnLM
223
+ "✨ Google: Gemma 3 27B (Preview) 💎", # Gemma 3
224
  "✨ Google: Gemini 3.0 Pro (Experimental)",
225
  "✨ Google: Gemini 2.5 Pro",
226
  "✨ Google: Gemini 2.5 Flash",
227
+ "✨ Google: Gemini 2.5 Flash Lite ⚡", # Flash Lite
228
  "✨ Google: Gemini 2.0 Flash",
229
+ "☁️ Groq: GPT OSS 120B (OpenAI)",
230
+ "☁️ Groq: GPT OSS 20B (OpenAI)",
231
  "☁️ Groq: Llama 3.3 70B",
232
+ "🇫🇷 Mistral: Magistral Medium 2509",
233
  "🇫🇷 Mistral: Pixtral Large (Vision) 🖼️",
234
  "🇫🇷 Mistral: Large 2512 (Dez/25)",
235
  "🇫🇷 Mistral: Codestral 2508",
 
237
  ]
238
 
239
  with gr.Row():
240
+ model_dropdown = gr.Dropdown(choices=models_list, value=models_list[0], label="Escolha o Cérebro", interactive=True)
241
 
242
+ # 1. Interface Visual
243
  chat = gr.ChatInterface(
244
  fn=router,
245
  additional_inputs=[model_dropdown],
246
+ multimodal=True,
247
+ description="Converse com a Berta e seus amigos AIs."
248
  )
249
 
250
+ # 2. PONTE DE API
 
 
251
  api_bridge = gr.Interface(
252
  fn=router,
253
  inputs=[
254
  gr.MultimodalTextbox(label="message"),
255
+ gr.JSON(value=[], label="history"),
256
+ gr.Dropdown(choices=models_list, label="model_selector", value=models_list[0])
257
  ],
258
  outputs=[gr.Textbox(label="response")],
259
+ api_name="chat"
260
  )
261
 
262
  if __name__ == "__main__":