jorgeiv500 commited on
Commit
b71f9aa
·
verified ·
1 Parent(s): ac0510e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +135 -89
app.py CHANGED
@@ -1,10 +1,13 @@
1
- # app.py — DeepSeek-OCR + BioMedLM-7B (GGUF llama.cpp local, ZeroGPU-safe) — Gradio 5
2
- # - OCR con DeepSeek-OCR (GPU solo en @spaces.GPU)
3
- # - Chat con BioMedLM-7B GGUF via llama.cpp (GPU solo en @spaces.GPU)
4
- # - Prompt reforzado (few-shot) y decodificación determinista
5
- # - Configurable por variables de entorno: GGUF_REPO, GGUF_FILE, N_CTX, N_BATCH, N_GPU_LAYERS
6
-
7
- import os, re, json, tempfile, traceback
 
 
 
8
  import gradio as gr
9
  import torch
10
  from PIL import Image
@@ -14,12 +17,12 @@ from huggingface_hub import hf_hub_download
14
  from llama_cpp import Llama
15
 
16
  # =========================
17
- # CONFIG (env)
18
  # =========================
19
- # --- Llama.cpp (BioMedLM-7B GGUF) ---
20
- GGUF_REPO = os.getenv("GGUF_REPO", "").strip() # ej: "theuser/biomedlm-7b-gguf" (pon el tuyo)
21
- GGUF_FILE = os.getenv("GGUF_FILE", "").strip() # ej: "BioMedLM-7B.Q4_K_M.gguf"
22
- # candidatos por defecto si no das GGUF_FILE
23
  _GGUF_CANDIDATES = [
24
  "BioMedLM-7B.Q4_K_M.gguf",
25
  "BioMedLM-7B.Q5_K_M.gguf",
@@ -32,36 +35,37 @@ _GGUF_CANDIDATES = [
32
  ]
33
  GGUF_CANDIDATES = [GGUF_FILE] if GGUF_FILE else _GGUF_CANDIDATES
34
 
35
- # rendimiento / memoria
36
  N_CTX = int(os.getenv("N_CTX", "4096"))
37
  N_THREADS = int(os.getenv("N_THREADS", str(os.cpu_count() or 4)))
38
- N_GPU_LAYERS = int(os.getenv("N_GPU_LAYERS", "35")) # 7B ~32 capas; 35 = "todas"
39
  N_BATCH = int(os.getenv("N_BATCH", "512")) # sube a 1024 si tu GPU lo permite
40
 
41
- # generación determinista para obediencia
42
  GEN_TEMPERATURE = float(os.getenv("TEMPERATURE", "0.0"))
43
  GEN_TOP_P = float(os.getenv("TOP_P", "1.0"))
44
  GEN_MAX_NEW_TOKENS = int(os.getenv("MAX_NEW_TOKENS", "384"))
45
  STOP_SEQS = ["\n###", "\nUser:", "\nAssistant:", "\nUsuario:", "\nAsistente:"]
46
 
47
- # DeepSeek-OCR revision opcional para evitar cambios inesperados
48
- DS_OCR_REV = os.getenv("DS_OCR_REV", None) # e.g. hash de commit
 
 
 
49
 
50
  # =========================
51
- # Estado global (solo dentro de workers GPU)
52
  # =========================
53
- _llm = None
54
- _llm_name = None
55
-
56
  def _truncate(s: str, n=3000):
57
  s = (s or "")
58
  return s if len(s) <= n else s[:n]
59
 
60
  def _clean_ocr(s: str) -> str:
61
- if not s: return ""
62
- s = re.sub(r'[^\S\r\n]+', ' ', s) # colapsa espacios
63
- s = re.sub(r'(\{#Sec\d+\}|#+\w*)', ' ', s) # anchors/headers raros
64
- s = re.sub(r'\s{2,}', ' ', s)
 
65
  lines = []
66
  for par in s.splitlines():
67
  par = par.strip()
@@ -110,75 +114,92 @@ def build_user_prompt(ocr_md, ocr_txt, user_msg):
110
  )
111
  return prompt
112
 
 
 
 
 
 
 
 
113
  # =========================
114
  # BioMedLM-7B GGUF — llama.cpp (GPU solo en worker)
115
  # =========================
 
 
 
116
  def _download_gguf_path():
 
117
  last_err = None
118
  if GGUF_REPO:
119
  for fname in GGUF_CANDIDATES:
120
  try:
121
- path = hf_hub_download(repo_id=GGUF_REPO, filename=fname)
122
  return path, f"{GGUF_REPO}:{fname}"
123
  except Exception as e:
124
  last_err = e
125
- # fallback: si subiste el gguf al Space (en la carpeta del repo)
126
  for fname in GGUF_CANDIDATES:
127
  local_path = os.path.join(os.getcwd(), fname)
128
  if os.path.exists(local_path):
129
  return local_path, f"./{fname}"
130
- raise RuntimeError(f"No se pudo localizar el GGUF. Configura GGUF_REPO/GGUF_FILE o sube el .gguf. Último error: {last_err}")
131
 
132
- @spaces.GPU
133
- def biomedlm_warmup():
134
- """Inicializa llama.cpp dentro del worker GPU (evita CUDA en main)."""
135
  global _llm, _llm_name
136
  if _llm is not None:
137
- return f"OK::warm (reusing {_llm_name})"
138
- gguf_path, used = _download_gguf_path()
139
- _llm = Llama(
140
- model_path=gguf_path,
141
- n_ctx=N_CTX,
142
- n_threads=N_THREADS,
143
- n_gpu_layers=N_GPU_LAYERS,
144
- n_batch=N_BATCH,
145
- # decodificación por defecto: greedy (sin sampling)
146
- verbose=False,
147
- )
148
- _llm_name = used
149
- return f"OK::loaded {used}"
 
 
150
 
151
- def _to_chatml(system_prompt, user_prompt):
152
- # formato simple ChatML-compatible para llama.cpp
153
- return [
154
- {"role": "system", "content": system_prompt},
155
- {"role": "user", "content": user_prompt},
156
- ]
157
 
158
  @spaces.GPU
159
- def biomedlm_chat(ocr_md, ocr_txt, user_msg, temperature=GEN_TEMPERATURE, top_p=GEN_TOP_P, max_tokens=GEN_MAX_NEW_TOKENS):
160
- """Generación dentro del worker GPU con el LLM ya inicializado."""
161
- global _llm
162
- if _llm is None:
163
- status = biomedlm_warmup()
164
- if not str(status).startswith("OK::"):
165
- return "ERR::No se pudo inicializar el modelo GGUF"
166
- prompt_user = build_user_prompt(ocr_md, ocr_txt, user_msg)
167
- messages = _to_chatml(SYSTEM_INSTR, prompt_user)
168
  try:
169
- out = _llm.create_chat_completion(
170
- messages=messages,
171
- temperature=temperature,
172
- top_p=top_p,
173
- max_tokens=max_tokens,
174
- )
175
- ans = out["choices"][0]["message"]["content"]
176
- return "OK::" + (ans or "").strip()
 
 
 
 
 
 
 
 
 
 
 
 
177
  except Exception as e:
178
- return f"ERR::[{e.__class__.__name__}] {str(e) or repr(e)}"
179
 
180
  # =========================
181
- # DeepSeek-OCR (GPU solo dentro del worker)
182
  # =========================
183
  def _load_ocr_model():
184
  model_name = "deepseek-ai/DeepSeek-OCR"
@@ -186,7 +207,7 @@ def _load_ocr_model():
186
  kwargs = dict(
187
  _attn_implementation=os.getenv("OCR_ATTN_IMPL", "flash_attention_2"),
188
  trust_remote_code=True,
189
- use_safetensors=True
190
  )
191
  if DS_OCR_REV:
192
  kwargs["revision"] = DS_OCR_REV
@@ -194,6 +215,7 @@ def _load_ocr_model():
194
  mdl = AutoModel.from_pretrained(model_name, **kwargs).eval()
195
  return tok, mdl
196
  except Exception as e:
 
197
  if any(k in str(e).lower() for k in ["flash_attn", "flashattention2", "flash_attention_2"]):
198
  kwargs["_attn_implementation"] = "eager"
199
  mdl = AutoModel.from_pretrained(model_name, **kwargs).eval()
@@ -206,7 +228,8 @@ tokenizer, model = _load_ocr_model()
206
  def process_image(image, model_size, task_type, is_eval_mode):
207
  if image is None:
208
  return None, "Please upload an image first.", "Please upload an image first."
209
- # mover a GPU SOLO dentro del worker
 
210
  if torch.cuda.is_available():
211
  dtype = torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float16
212
  model_device = model.to(dtype).to("cuda")
@@ -251,36 +274,46 @@ def process_image(image, model_size, task_type, is_eval_mode):
251
 
252
  result_image = None
253
  if os.path.exists(image_result_path):
254
- result_image = Image.open(image_result_path); result_image.load()
 
255
 
256
  text_result = plain_text if plain_text else markdown_content
257
  return result_image, markdown_content, text_result
258
 
259
  # =========================
260
- # Chat wrapper para la UI
261
  # =========================
262
  def biomedlm_reply(user_msg, chat_msgs, ocr_md, ocr_txt):
263
  try:
264
- res = biomedlm_chat(ocr_md, ocr_txt, user_msg, temperature=GEN_TEMPERATURE, top_p=GEN_TOP_P, max_tokens=GEN_MAX_NEW_TOKENS)
265
- if str(res).startswith("OK::"):
266
- answer = res[4:]
 
 
 
 
 
 
 
 
267
  updated = (chat_msgs or []) + [
268
  {"role": "user", "content": user_msg or "(analizar solo OCR)"},
269
- {"role": "assistant", "content": answer}
270
  ]
271
  return updated, "", gr.update(value="")
272
  else:
273
- err_msg = res[5:] if str(res).startswith("ERR::") else str(res)
 
274
  updated = (chat_msgs or []) + [
275
  {"role": "user", "content": user_msg or ""},
276
- {"role": "assistant", "content": "⚠️ Error LLM (local). Revisa el panel de debug."}
277
  ]
278
  return updated, "", gr.update(value=err_msg)
279
  except Exception as e:
280
  tb = traceback.format_exc(limit=2)
281
  updated = (chat_msgs or []) + [
282
  {"role": "user", "content": user_msg or ""},
283
- {"role": "assistant", "content": f"⚠️ Error LLM: {e}"}
284
  ]
285
  return updated, "", gr.update(value=f"{e}\n{tb}")
286
 
@@ -293,9 +326,9 @@ def clear_chat():
293
  with gr.Blocks(title="OpScanIA — DeepSeek-OCR + BioMedLM-7B (GGUF)", theme=gr.themes.Soft()) as demo:
294
  gr.Markdown(
295
  """
296
- # DeepSeek-OCR → Chat Clínico con **BioMedLM-7B (GGUF local)**
297
  1) **Sube una imagen** y corre **OCR** (imagen anotada, Markdown y texto).
298
- 2) **Chatea** con **BioMedLM-7B GGUF (llama.cpp)** usando automáticamente el **OCR** como contexto.
299
  *Uso educativo; no reemplaza consejo médico.*
300
  """
301
  )
@@ -305,17 +338,26 @@ with gr.Blocks(title="OpScanIA — DeepSeek-OCR + BioMedLM-7B (GGUF)", theme=gr.
305
 
306
  with gr.Row():
307
  with gr.Column(scale=1):
308
- image_input = gr.Image(type="pil", label="Upload Image", sources=["upload", "clipboard", "webcam"])
 
 
 
 
309
  model_size = gr.Dropdown(
310
  choices=["Tiny", "Small", "Base", "Large", "Gundam (Recommended)"],
311
- value="Gundam (Recommended)", label="Model Size"
 
312
  )
313
  task_type = gr.Dropdown(
314
  choices=["Free OCR", "Convert to Markdown"],
315
- value="Convert to Markdown", label="Task Type"
 
 
 
 
 
 
316
  )
317
- eval_mode_checkbox = gr.Checkbox(value=False, label="Enable Evaluation Mode",
318
- info="Solo texto (más rápido). Desmárcalo para ver imagen anotada y markdown.")
319
  submit_btn = gr.Button("Process Image", variant="primary")
320
  warm_btn = gr.Button("Warmup BioMedLM-7B (GGUF)")
321
 
@@ -335,7 +377,11 @@ with gr.Blocks(title="OpScanIA — DeepSeek-OCR + BioMedLM-7B (GGUF)", theme=gr.
335
  with gr.Row():
336
  with gr.Column(scale=2):
337
  chatbot = gr.Chatbot(label="Asistente OCR (BioMedLM-7B GGUF)", type="messages", height=420)
338
- user_in = gr.Textbox(label="Mensaje", placeholder="Escribe tu consulta… (vacío = analiza solo el OCR)", lines=2)
 
 
 
 
339
  with gr.Row():
340
  send_btn = gr.Button("Enviar", variant="primary")
341
  clear_btn = gr.Button("Limpiar")
@@ -353,7 +399,7 @@ with gr.Blocks(title="OpScanIA — DeepSeek-OCR + BioMedLM-7B (GGUF)", theme=gr.
353
  outputs=[ocr_md_state, ocr_txt_state, md_preview, txt_preview],
354
  )
355
 
356
- # Warmup LLM (descarga/crea el objeto Llama en GPU)
357
  warm_btn.click(fn=biomedlm_warmup, outputs=[debug_box])
358
 
359
  # Chat
 
1
+ # app.py — OpScanIA: DeepSeek-OCR + BioMedLM-7B (GGUF local con llama.cpp, ZeroGPU-safe) — Gradio 5
2
+ # -----------------------------------------------------------------------------------------------
3
+ # • OCR: DeepSeek-OCR (GPU SOLO dentro del worker @spaces.GPU; sin inicializar CUDA en el main).
4
+ # • Chat: BioMedLM-7B en formato GGUF con llama.cpp (también SOLO en worker GPU).
5
+ # Sin llamadas GPU anidadas; todo atrapado con try/except para evitar RuntimeError genéricos.
6
+ # • Prompt reforzado en español y generación determinista (sensible a OCR y sin alucinaciones).
7
+ # • Configurable por variables de entorno: GGUF_REPO, GGUF_FILE, N_CTX, N_BATCH, N_GPU_LAYERS, etc.
8
+ # -----------------------------------------------------------------------------------------------
9
+
10
+ import os, re, tempfile, traceback
11
  import gradio as gr
12
  import torch
13
  from PIL import Image
 
17
  from llama_cpp import Llama
18
 
19
  # =========================
20
+ # CONFIG (entorno)
21
  # =========================
22
+ # --- BioMedLM-7B (GGUF / llama.cpp) ---
23
+ GGUF_REPO = os.getenv("GGUF_REPO", "").strip() # p.ej.: "tu_usuario/biomedlm-7b-gguf" (si lo tienes en HF)
24
+ GGUF_FILE = os.getenv("GGUF_FILE", "").strip() # p.ej.: "BioMedLM-7B.Q4_K_M.gguf" (nombre exacto del archivo)
25
+ # Candidatos comunes si no estableces GGUF_FILE:
26
  _GGUF_CANDIDATES = [
27
  "BioMedLM-7B.Q4_K_M.gguf",
28
  "BioMedLM-7B.Q5_K_M.gguf",
 
35
  ]
36
  GGUF_CANDIDATES = [GGUF_FILE] if GGUF_FILE else _GGUF_CANDIDATES
37
 
38
+ # Rendimiento / memoria (ajusta según GPU del Space: T4 / A10G)
39
  N_CTX = int(os.getenv("N_CTX", "4096"))
40
  N_THREADS = int(os.getenv("N_THREADS", str(os.cpu_count() or 4)))
41
+ N_GPU_LAYERS = int(os.getenv("N_GPU_LAYERS", "35")) # 7B ~32 capas; 35 todas
42
  N_BATCH = int(os.getenv("N_BATCH", "512")) # sube a 1024 si tu GPU lo permite
43
 
44
+ # Decodificación (determinista por defecto)
45
  GEN_TEMPERATURE = float(os.getenv("TEMPERATURE", "0.0"))
46
  GEN_TOP_P = float(os.getenv("TOP_P", "1.0"))
47
  GEN_MAX_NEW_TOKENS = int(os.getenv("MAX_NEW_TOKENS", "384"))
48
  STOP_SEQS = ["\n###", "\nUser:", "\nAssistant:", "\nUsuario:", "\nAsistente:"]
49
 
50
+ # Token para repos privados en HF (opcional)
51
+ HF_TOKEN = os.getenv("HF_TOKEN")
52
+
53
+ # DeepSeek-OCR: fija una revisión/commit opcional para evitar cambios inesperados
54
+ DS_OCR_REV = os.getenv("DS_OCR_REV", None) # e.g., "2b6f6c2..."
55
 
56
  # =========================
57
+ # Utilidades de texto / prompt
58
  # =========================
 
 
 
59
  def _truncate(s: str, n=3000):
60
  s = (s or "")
61
  return s if len(s) <= n else s[:n]
62
 
63
  def _clean_ocr(s: str) -> str:
64
+ if not s:
65
+ return ""
66
+ s = re.sub(r"[^\S\r\n]+", " ", s) # colapsa espacios
67
+ s = re.sub(r"(\{#Sec\d+\}|#+\w*)", " ", s) # anchors/headers raros
68
+ s = re.sub(r"\s{2,}", " ", s)
69
  lines = []
70
  for par in s.splitlines():
71
  par = par.strip()
 
114
  )
115
  return prompt
116
 
117
+ def _to_chatml(system_prompt, user_prompt):
118
+ # Formato minimalista tipo ChatML/llama.cpp
119
+ return [
120
+ {"role": "system", "content": system_prompt},
121
+ {"role": "user", "content": user_prompt},
122
+ ]
123
+
124
  # =========================
125
  # BioMedLM-7B GGUF — llama.cpp (GPU solo en worker)
126
  # =========================
127
+ _llm = None
128
+ _llm_name = None
129
+
130
  def _download_gguf_path():
131
+ """Busca el .gguf en HF (con token si hace falta) o en local (Files del Space)."""
132
  last_err = None
133
  if GGUF_REPO:
134
  for fname in GGUF_CANDIDATES:
135
  try:
136
+ path = hf_hub_download(repo_id=GGUF_REPO, filename=fname, token=HF_TOKEN)
137
  return path, f"{GGUF_REPO}:{fname}"
138
  except Exception as e:
139
  last_err = e
140
+ # Fallback: archivo subido al Space (pestaña Files)
141
  for fname in GGUF_CANDIDATES:
142
  local_path = os.path.join(os.getcwd(), fname)
143
  if os.path.exists(local_path):
144
  return local_path, f"./{fname}"
145
+ raise RuntimeError(f"No se encontró el GGUF. Configura GGUF_REPO/GGUF_FILE o sube el .gguf. Último error: {last_err}")
146
 
147
+ def _ensure_llm():
148
+ """Inicializa llama.cpp en el MISMO worker; nunca lanza excepción hacia arriba."""
 
149
  global _llm, _llm_name
150
  if _llm is not None:
151
+ return True, f"warm (reusing {_llm_name})"
152
+ try:
153
+ gguf_path, used = _download_gguf_path()
154
+ _llm = Llama(
155
+ model_path=gguf_path,
156
+ n_ctx=N_CTX,
157
+ n_threads=N_THREADS,
158
+ n_gpu_layers=N_GPU_LAYERS,
159
+ n_batch=N_BATCH,
160
+ verbose=False,
161
+ )
162
+ _llm_name = used
163
+ return True, f"loaded {used}"
164
+ except Exception as e:
165
+ return False, f"[{e.__class__.__name__}] {str(e) or repr(e)}"
166
 
167
+ @spaces.GPU
168
+ def biomedlm_warmup():
169
+ """Warmup opcional (manual) — NO se llama desde otra función GPU."""
170
+ ok, msg = _ensure_llm()
171
+ return ("OK::" if ok else "ERR::") + msg
 
172
 
173
  @spaces.GPU
174
+ def biomedlm_chat(ocr_md, ocr_txt, user_msg,
175
+ temperature=GEN_TEMPERATURE, top_p=GEN_TOP_P, max_tokens=GEN_MAX_NEW_TOKENS):
176
+ """Chat en GPU; TODO envuelto en try/except para evitar RuntimeError del worker."""
 
 
 
 
 
 
177
  try:
178
+ ok, msg = _ensure_llm()
179
+ if not ok:
180
+ return "ERR::No se pudo inicializar el modelo GGUF -> " + msg
181
+
182
+ prompt_user = build_user_prompt(ocr_md, ocr_txt, user_msg)
183
+ messages = _to_chatml(SYSTEM_INSTR, prompt_user)
184
+
185
+ try:
186
+ out = _llm.create_chat_completion(
187
+ messages=messages,
188
+ temperature=temperature,
189
+ top_p=top_p,
190
+ max_tokens=max_tokens,
191
+ stop=STOP_SEQS,
192
+ )
193
+ ans = (out["choices"][0]["message"]["content"] or "").strip()
194
+ return "OK::" + ans
195
+ except Exception as e:
196
+ return f"ERR::[Inferencia] {e.__class__.__name__}: {str(e) or repr(e)}"
197
+
198
  except Exception as e:
199
+ return f"ERR::[Worker] {e.__class__.__name__}: {str(e) or repr(e)}"
200
 
201
  # =========================
202
+ # DeepSeek-OCR (GPU solo en worker)
203
  # =========================
204
  def _load_ocr_model():
205
  model_name = "deepseek-ai/DeepSeek-OCR"
 
207
  kwargs = dict(
208
  _attn_implementation=os.getenv("OCR_ATTN_IMPL", "flash_attention_2"),
209
  trust_remote_code=True,
210
+ use_safetensors=True,
211
  )
212
  if DS_OCR_REV:
213
  kwargs["revision"] = DS_OCR_REV
 
215
  mdl = AutoModel.from_pretrained(model_name, **kwargs).eval()
216
  return tok, mdl
217
  except Exception as e:
218
+ # Fallback si FA2 no está disponible
219
  if any(k in str(e).lower() for k in ["flash_attn", "flashattention2", "flash_attention_2"]):
220
  kwargs["_attn_implementation"] = "eager"
221
  mdl = AutoModel.from_pretrained(model_name, **kwargs).eval()
 
228
  def process_image(image, model_size, task_type, is_eval_mode):
229
  if image is None:
230
  return None, "Please upload an image first.", "Please upload an image first."
231
+
232
+ # Mover a GPU SOLO dentro del worker
233
  if torch.cuda.is_available():
234
  dtype = torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float16
235
  model_device = model.to(dtype).to("cuda")
 
274
 
275
  result_image = None
276
  if os.path.exists(image_result_path):
277
+ result_image = Image.open(image_result_path)
278
+ result_image.load()
279
 
280
  text_result = plain_text if plain_text else markdown_content
281
  return result_image, markdown_content, text_result
282
 
283
  # =========================
284
+ # Orquestador de chat (NO GPU)
285
  # =========================
286
  def biomedlm_reply(user_msg, chat_msgs, ocr_md, ocr_txt):
287
  try:
288
+ res = biomedlm_chat(
289
+ ocr_md,
290
+ ocr_txt,
291
+ user_msg,
292
+ temperature=GEN_TEMPERATURE,
293
+ top_p=GEN_TOP_P,
294
+ max_tokens=GEN_MAX_NEW_TOKENS,
295
+ )
296
+ s = str(res)
297
+ if s.startswith("OK::"):
298
+ answer = s[4:]
299
  updated = (chat_msgs or []) + [
300
  {"role": "user", "content": user_msg or "(analizar solo OCR)"},
301
+ {"role": "assistant", "content": answer},
302
  ]
303
  return updated, "", gr.update(value="")
304
  else:
305
+ # Mostramos TODO el mensaje de error del worker en el panel Debug
306
+ err_msg = s[5:] if s.startswith("ERR::") else s
307
  updated = (chat_msgs or []) + [
308
  {"role": "user", "content": user_msg or ""},
309
+ {"role": "assistant", "content": "⚠️ Error LLM (local). Revisa el panel de debug."},
310
  ]
311
  return updated, "", gr.update(value=err_msg)
312
  except Exception as e:
313
  tb = traceback.format_exc(limit=2)
314
  updated = (chat_msgs or []) + [
315
  {"role": "user", "content": user_msg or ""},
316
+ {"role": "assistant", "content": f"⚠️ Error LLM: {e}"},
317
  ]
318
  return updated, "", gr.update(value=f"{e}\n{tb}")
319
 
 
326
  with gr.Blocks(title="OpScanIA — DeepSeek-OCR + BioMedLM-7B (GGUF)", theme=gr.themes.Soft()) as demo:
327
  gr.Markdown(
328
  """
329
+ # DeepSeek-OCR → Chat Clínico con **BioMedLM-7B (GGUF local, llama.cpp)**
330
  1) **Sube una imagen** y corre **OCR** (imagen anotada, Markdown y texto).
331
+ 2) **Chatea** con **BioMedLM-7B GGUF** usando automáticamente el **OCR** como contexto.
332
  *Uso educativo; no reemplaza consejo médico.*
333
  """
334
  )
 
338
 
339
  with gr.Row():
340
  with gr.Column(scale=1):
341
+ image_input = gr.Image(
342
+ type="pil",
343
+ label="Upload Image",
344
+ sources=["upload", "clipboard", "webcam"]
345
+ )
346
  model_size = gr.Dropdown(
347
  choices=["Tiny", "Small", "Base", "Large", "Gundam (Recommended)"],
348
+ value="Gundam (Recommended)",
349
+ label="Model Size"
350
  )
351
  task_type = gr.Dropdown(
352
  choices=["Free OCR", "Convert to Markdown"],
353
+ value="Convert to Markdown",
354
+ label="Task Type"
355
+ )
356
+ eval_mode_checkbox = gr.Checkbox(
357
+ value=False,
358
+ label="Enable Evaluation Mode",
359
+ info="Solo texto (más rápido). Desmárcalo para ver imagen anotada y markdown."
360
  )
 
 
361
  submit_btn = gr.Button("Process Image", variant="primary")
362
  warm_btn = gr.Button("Warmup BioMedLM-7B (GGUF)")
363
 
 
377
  with gr.Row():
378
  with gr.Column(scale=2):
379
  chatbot = gr.Chatbot(label="Asistente OCR (BioMedLM-7B GGUF)", type="messages", height=420)
380
+ user_in = gr.Textbox(
381
+ label="Mensaje",
382
+ placeholder="Escribe tu consulta… (vacío = analiza solo el OCR)",
383
+ lines=2
384
+ )
385
  with gr.Row():
386
  send_btn = gr.Button("Enviar", variant="primary")
387
  clear_btn = gr.Button("Limpiar")
 
399
  outputs=[ocr_md_state, ocr_txt_state, md_preview, txt_preview],
400
  )
401
 
402
+ # Warmup LLM (descarga/carga el GGUF y crea el objeto Llama en GPU)
403
  warm_btn.click(fn=biomedlm_warmup, outputs=[debug_box])
404
 
405
  # Chat