Madras1 commited on
Commit
5b2f8cd
·
verified ·
1 Parent(s): 32fd3d5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -33
app.py CHANGED
@@ -10,7 +10,7 @@ from mistralai import Mistral
10
  import google.generativeai as genai
11
  from huggingface_hub import snapshot_download
12
 
13
- # --- 1. SEGURANÇA (RATE LIMIT) ---
14
  MAX_REQUESTS_PER_MINUTE = 15
15
  BLOCK_TIME_SECONDS = 60
16
  ip_tracker = {}
@@ -40,19 +40,18 @@ mistral_client = Mistral(api_key=os.environ.get("MISTRAL_API_KEY")) if os.enviro
40
  if os.environ.get("GEMINI_API_KEY"):
41
  genai.configure(api_key=os.environ.get("GEMINI_API_KEY"))
42
 
43
- # --- 3. HELPER (IMAGEM) ---
44
  def encode_image(image_path):
45
  try:
46
  with open(image_path, "rb") as image_file:
47
  return base64.b64encode(image_file.read()).decode('utf-8')
48
  except: return None
49
 
50
- # --- 4. FUNÇÕES DE EXECUÇÃO ---
51
 
52
  @spaces.GPU(duration=120)
53
  def run_local_h200(messages):
54
  global local_model, local_tokenizer
55
-
56
  for m in messages:
57
  if isinstance(m['content'], list): return "⚠️ Modelo Local não suporta imagens. Use Gemini/Pixtral."
58
 
@@ -72,7 +71,6 @@ def run_groq(messages, model_id):
72
  for m in messages:
73
  if isinstance(m['content'], list): return "⚠️ Groq não suporta imagens. Use Gemini/Pixtral."
74
  if not groq_client: return "❌ Erro: GROQ_API_KEY ausente."
75
-
76
  clean_msgs = [{"role": m['role'], "content": m['content']} for m in messages]
77
  try:
78
  completion = groq_client.chat.completions.create(
@@ -83,7 +81,6 @@ def run_groq(messages, model_id):
83
 
84
  def run_mistral(messages, model_id):
85
  if not mistral_client: return "❌ Erro: MISTRAL_API_KEY ausente."
86
-
87
  formatted_msgs = []
88
  for m in messages:
89
  content = m['content']
@@ -99,7 +96,6 @@ def run_mistral(messages, model_id):
99
  new_content.append({"type": "image_url", "image_url": f"data:image/jpeg;base64,{b64}"})
100
  else: new_content.append(item)
101
  formatted_msgs.append({"role": m['role'], "content": new_content})
102
-
103
  try:
104
  res = mistral_client.chat.complete(model=model_id, messages=formatted_msgs)
105
  return res.choices[0].message.content
@@ -110,7 +106,6 @@ def run_gemini(messages, model_id):
110
  try:
111
  model = genai.GenerativeModel(model_id)
112
  chat_history = []
113
-
114
  for m in messages[:-1]:
115
  role = "user" if m['role'] == "user" else "model"
116
  parts = []
@@ -144,6 +139,7 @@ def router(message, history, model_selector, request: gr.Request):
144
  if not check_spam(request):
145
  return "⛔ BLOQUEADO: Limite de mensagens excedido. Aguarde."
146
 
 
147
  messages = []
148
  if history:
149
  for turn in history:
@@ -156,6 +152,7 @@ def router(message, history, model_selector, request: gr.Request):
156
  elif isinstance(turn, dict):
157
  messages.append(turn)
158
 
 
159
  current_content = []
160
  if isinstance(message, dict):
161
  text = message.get("text", "")
@@ -168,8 +165,7 @@ def router(message, history, model_selector, request: gr.Request):
168
  else:
169
  messages.append({"role": "user", "content": str(message)})
170
 
171
- print(f"🔀 Roteando para: {model_selector}")
172
-
173
  if "Gemini" in model_selector:
174
  tid = "gemini-1.5-flash"
175
  if "3.0" in model_selector: tid = "gemini-3.0-pro-preview"
@@ -197,39 +193,50 @@ def router(message, history, model_selector, request: gr.Request):
197
 
198
  return "⚠️ Modelo não reconhecido."
199
 
200
- # --- 6. INTERFACE (SEM TEMA - CORREÇÃO) ---
201
- # Berta: Removi theme=gr.themes.Soft() pois sua versão do Gradio não suporta.
202
  with gr.Blocks() as demo:
203
- gr.Markdown("# 🔀 APIDOST v8: The Arsenal")
204
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
  with gr.Row():
206
- model_dropdown = gr.Dropdown(
207
- choices=[
208
- "✨ Google: Gemini 3.0 Pro (Experimental)",
209
- "✨ Google: Gemini 2.5 Pro",
210
- "✨ Google: Gemini 2.5 Flash",
211
- "✨ Google: Gemini 2.0 Flash",
212
- "☁️ Groq: GPT OSS 120B (OpenAI) 🆕",
213
- "☁️ Groq: GPT OSS 20B (OpenAI) 🆕",
214
- "☁️ Groq: Llama 3.3 70B",
215
- "🇫🇷 Mistral: Magistral Medium 2509 🆕",
216
- "🇫🇷 Mistral: Pixtral Large (Vision) 🖼️",
217
- "🇫🇷 Mistral: Large 2512 (Dez/25)",
218
- "🇫🇷 Mistral: Codestral 2508",
219
- "🔥 Local H200: Qwen 2.5 Coder 32B"
220
- ],
221
- value="🔥 Local H200: Qwen 2.5 Coder 32B",
222
- label="Escolha o Cérebro",
223
- interactive=True
224
- )
225
 
 
226
  chat = gr.ChatInterface(
227
  fn=router,
228
  additional_inputs=[model_dropdown],
229
  multimodal=True
230
  )
231
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232
  if __name__ == "__main__":
233
  try: snapshot_download(repo_id=LOCAL_MODEL_ID)
234
  except: pass
235
- demo.queue().launch()
 
10
  import google.generativeai as genai
11
  from huggingface_hub import snapshot_download
12
 
13
+ # --- 1. SEGURANÇA (ANTI-SPAM) ---
14
  MAX_REQUESTS_PER_MINUTE = 15
15
  BLOCK_TIME_SECONDS = 60
16
  ip_tracker = {}
 
40
  if os.environ.get("GEMINI_API_KEY"):
41
  genai.configure(api_key=os.environ.get("GEMINI_API_KEY"))
42
 
43
+ # --- 3. HELPER IMAGEM ---
44
  def encode_image(image_path):
45
  try:
46
  with open(image_path, "rb") as image_file:
47
  return base64.b64encode(image_file.read()).decode('utf-8')
48
  except: return None
49
 
50
+ # --- 4. EXECUTORES ---
51
 
52
  @spaces.GPU(duration=120)
53
  def run_local_h200(messages):
54
  global local_model, local_tokenizer
 
55
  for m in messages:
56
  if isinstance(m['content'], list): return "⚠️ Modelo Local não suporta imagens. Use Gemini/Pixtral."
57
 
 
71
  for m in messages:
72
  if isinstance(m['content'], list): return "⚠️ Groq não suporta imagens. Use Gemini/Pixtral."
73
  if not groq_client: return "❌ Erro: GROQ_API_KEY ausente."
 
74
  clean_msgs = [{"role": m['role'], "content": m['content']} for m in messages]
75
  try:
76
  completion = groq_client.chat.completions.create(
 
81
 
82
  def run_mistral(messages, model_id):
83
  if not mistral_client: return "❌ Erro: MISTRAL_API_KEY ausente."
 
84
  formatted_msgs = []
85
  for m in messages:
86
  content = m['content']
 
96
  new_content.append({"type": "image_url", "image_url": f"data:image/jpeg;base64,{b64}"})
97
  else: new_content.append(item)
98
  formatted_msgs.append({"role": m['role'], "content": new_content})
 
99
  try:
100
  res = mistral_client.chat.complete(model=model_id, messages=formatted_msgs)
101
  return res.choices[0].message.content
 
106
  try:
107
  model = genai.GenerativeModel(model_id)
108
  chat_history = []
 
109
  for m in messages[:-1]:
110
  role = "user" if m['role'] == "user" else "model"
111
  parts = []
 
139
  if not check_spam(request):
140
  return "⛔ BLOQUEADO: Limite de mensagens excedido. Aguarde."
141
 
142
+ # Normaliza histórico
143
  messages = []
144
  if history:
145
  for turn in history:
 
152
  elif isinstance(turn, dict):
153
  messages.append(turn)
154
 
155
+ # Processa mensagem atual
156
  current_content = []
157
  if isinstance(message, dict):
158
  text = message.get("text", "")
 
165
  else:
166
  messages.append({"role": "user", "content": str(message)})
167
 
168
+ # Seleção
 
169
  if "Gemini" in model_selector:
170
  tid = "gemini-1.5-flash"
171
  if "3.0" in model_selector: tid = "gemini-3.0-pro-preview"
 
193
 
194
  return "⚠️ Modelo não reconhecido."
195
 
196
+ # --- 6. INTERFACE ---
 
197
  with gr.Blocks() as demo:
198
+ gr.Markdown("# 🔀 APIDOST v9 (API Bridge Fixed)")
199
 
200
+ models_list = [
201
+ "✨ Google: Gemini 3.0 Pro (Experimental)",
202
+ "✨ Google: Gemini 2.5 Pro",
203
+ "✨ Google: Gemini 2.5 Flash",
204
+ "✨ Google: Gemini 2.0 Flash",
205
+ "☁️ Groq: GPT OSS 120B (OpenAI) 🆕",
206
+ "☁️ Groq: GPT OSS 20B (OpenAI) 🆕",
207
+ "☁️ Groq: Llama 3.3 70B",
208
+ "🇫🇷 Mistral: Magistral Medium 2509 🆕",
209
+ "🇫🇷 Mistral: Pixtral Large (Vision) 🖼️",
210
+ "🇫🇷 Mistral: Large 2512 (Dez/25)",
211
+ "🇫🇷 Mistral: Codestral 2508",
212
+ "🔥 Local H200: Qwen 2.5 Coder 32B"
213
+ ]
214
+
215
  with gr.Row():
216
+ model_dropdown = gr.Dropdown(choices=models_list, value=models_list[-1], label="Cérebro", interactive=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
217
 
218
+ # 1. Interface Visual (para debug no Space)
219
  chat = gr.ChatInterface(
220
  fn=router,
221
  additional_inputs=[model_dropdown],
222
  multimodal=True
223
  )
224
 
225
+ # 2. PONTE DE API (A CORREÇÃO DO JAVASCRIPT)
226
+ # Use gr.JSON em vez de gr.State para 'history' para não exigir retorno.
227
+ # Isso cria a rota "/chat" corretamente.
228
+ api_bridge = gr.Interface(
229
+ fn=router,
230
+ inputs=[
231
+ gr.MultimodalTextbox(label="message"),
232
+ gr.JSON(value=[], label="history"), # <--- O SEGREDO ESTÁ AQUI. JSON aceita lista e não trava.
233
+ gr.Dropdown(choices=models_list, label="model_selector", value=models_list[-1])
234
+ ],
235
+ outputs=[gr.Textbox(label="response")],
236
+ api_name="chat" # <--- O JavaScript bate nessa porta aqui.
237
+ )
238
+
239
  if __name__ == "__main__":
240
  try: snapshot_download(repo_id=LOCAL_MODEL_ID)
241
  except: pass
242
+ demo.queue(api_open=True).launch(server_name="0.0.0.0", server_port=7860)