Geoeasy commited on
Commit
8d23077
·
verified ·
1 Parent(s): a4b10db

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +96 -59
app.py CHANGED
@@ -5,7 +5,7 @@ import io
5
  import time
6
  import json
7
  from collections import Counter
8
- #
9
  import gradio as gr
10
  import numpy as np
11
  import faiss
@@ -73,7 +73,7 @@ index = faiss.read_index(INDEX_FILE)
73
  chunks = np.load(CHUNKS_FILE, allow_pickle=True)
74
 
75
  # ========= Embeddings =========
76
- embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
77
 
78
  # (NEW) pré-cálculo de embedding médio do CV (para match score global)
79
  _cv_emb_mean = None
@@ -85,52 +85,80 @@ def _ensure_cv_mean():
85
  return _cv_emb_mean
86
 
87
  def retrieve_context(query: str, k: int = 4) -> str:
88
- q_emb = embedding_model.encode([query], convert_to_numpy=True)
89
  _, I = index.search(q_emb, k)
90
  return "\n---\n".join(chunks[i] for i in I[0])
91
 
92
  # ========= Chat state =========
93
- dialog_history: list[tuple[str, str]] = []
 
94
 
95
  # ========= Helpers – NVIDIA chat =========
96
  def nv_stream(messages, temperature, top_p, max_tokens):
97
- """Streaming para o chat principal."""
98
  assistant_reply = ""
99
  stream = client.chat.completions.create(
100
- model=CHAT_MODEL, messages=messages,
101
- temperature=temperature, top_p=top_p, max_tokens=max_tokens, stream=True
 
 
 
 
102
  )
 
103
  for chunk in stream:
104
- delta = chunk.choices[0].delta
105
- if hasattr(delta, "content") and delta.content:
106
- assistant_reply += delta.content
 
 
 
 
 
 
 
 
 
 
 
 
107
  yield assistant_reply
108
 
 
 
 
 
109
  def nv_complete(messages, temperature, top_p, max_tokens) -> str:
110
  """Completa de uma vez (para PDFs e utilitários)."""
111
  resp = client.chat.completions.create(
112
- model=CHAT_MODEL, messages=messages,
113
- temperature=temperature, top_p=top_p, max_tokens=max_tokens, stream=False
 
 
 
 
114
  )
115
  return resp.choices[0].message.content.strip()
116
 
117
  # ========= PDF utils (NEW) =========
118
  def _to_pdf_bytes(title: str, body: str) -> bytes:
119
- # usa um PDF mínimo sem dependências (texto simples em uma “folha”)
120
- # para melhor tipografia, você pode trocar por reportlab.
121
  from reportlab.pdfgen import canvas
122
  from reportlab.lib.pagesizes import A4
123
  from reportlab.lib.utils import simpleSplit
 
124
  buf = io.BytesIO()
125
  c = canvas.Canvas(buf, pagesize=A4)
126
  w, h = A4
127
  margin = 50
 
128
  c.setTitle(title)
129
  c.setFont("Helvetica-Bold", 14)
130
  c.drawString(margin, h - margin, title)
 
131
  c.setFont("Helvetica", 11)
132
  y = h - margin - 30
133
  lines = simpleSplit(body, "Helvetica", 11, w - 2 * margin)
 
134
  for line in lines:
135
  if y < margin:
136
  c.showPage()
@@ -138,6 +166,7 @@ def _to_pdf_bytes(title: str, body: str) -> bytes:
138
  y = h - margin
139
  c.drawString(margin, y, line)
140
  y -= 15
 
141
  c.showPage()
142
  c.save()
143
  buf.seek(0)
@@ -156,23 +185,28 @@ def chatbot(user_input: str, temperature: float, top_p: float, max_tokens: int):
156
  "You are an assistant specialized in the candidate's CV. "
157
  "Use ONLY the retrieved context to answer. If you don't know, say you don't know.\n\n"
158
  f"=== Retrieved Context ===\n{context}\n\n"
159
- )
160
  }
161
 
162
- messages = [system_msg]
163
- for u, a in dialog_history:
164
- messages.extend([{"role": "user", "content": u}, {"role": "assistant", "content": a}])
165
- messages.append({"role": "user", "content": user_input})
166
 
167
- # Stream ( mantém UX atual )
168
  reply_full = ""
169
  try:
170
  for partial in nv_stream(messages, temperature, top_p, max_tokens):
171
  reply_full = partial
172
- dialog_history.append((user_input, reply_full))
 
 
 
 
 
173
  except OpenAIError as e:
174
  reply_full = f"⚠️ API Error: {e.__class__.__name__}: {e}"
175
- dialog_history.append((user_input, reply_full))
 
 
 
176
 
177
  return dialog_history, ""
178
 
@@ -185,13 +219,13 @@ def clear_history():
185
  MINI_BIO_STYLES = {
186
  "Acadêmico": "Estilo acadêmico, objetivo, cite publicações/projetos e área de pesquisa.",
187
  "Corporativo": "Tom profissional para negócios, destaque resultados, KPIs e liderança.",
188
- "Pitch curto": "3-4 frases diretas, chamando atenção para conquistas-chave."
189
  }
190
 
191
  def generate_mini_bio(style_key: str, temperature: float, top_p: float, max_tokens: int):
192
  if style_key not in MINI_BIO_STYLES:
193
  return None, "Selecione um formato de mini-bio."
194
- # usa os top contextos “genéricos”: consulta neutra para cobrir o CV
195
  context = retrieve_context("resumo do currículo, principais resultados e tecnologias", k=8)
196
  system_msg = {
197
  "role": "system",
@@ -199,12 +233,13 @@ def generate_mini_bio(style_key: str, temperature: float, top_p: float, max_toke
199
  "Use apenas o contexto do CV para gerar uma mini-bio. "
200
  "Não invente fatos. Seja fiel ao conteúdo.\n\n"
201
  f"=== Contexto do CV ===\n{context}\n"
202
- )
203
  }
204
  user_msg = {
205
  "role": "user",
206
- "content": f"Produza uma mini-bio em português. Estilo: {MINI_BIO_STYLES[style_key]} (150-220 palavras)."
207
  }
 
208
  try:
209
  text = nv_complete([system_msg, user_msg], temperature, top_p, max_tokens)
210
  pdf_bytes = _to_pdf_bytes(f"Mini-bio ({style_key})", text)
@@ -219,6 +254,7 @@ def generate_mini_bio(style_key: str, temperature: float, top_p: float, max_toke
219
  def generate_cover_letter(job_desc: str, temperature: float, top_p: float, max_tokens: int):
220
  if not job_desc or not job_desc.strip():
221
  return None, "Cole a descrição da vaga primeiro."
 
222
  context = retrieve_context(job_desc, k=8)
223
  sys = {
224
  "role": "system",
@@ -226,9 +262,13 @@ def generate_cover_letter(job_desc: str, temperature: float, top_p: float, max_t
226
  "Gere uma carta de motivação baseada SOMENTE no CV (contexto) e na vaga. "
227
  "Inclua 2-3 conquistas mensuráveis e tecnologias relevantes. 250-350 palavras.\n\n"
228
  f"=== Contexto (CV) ===\n{context}\n"
229
- )
 
 
 
 
230
  }
231
- usr = {"role": "user", "content": f"Descrição da vaga:\n{job_desc}\n\nGerar carta em PT-BR/PT-PT, tom profissional."}
232
  try:
233
  text = nv_complete([sys, usr], temperature, top_p, max_tokens)
234
  pdf_bytes = _to_pdf_bytes("Carta de Motivação", text)
@@ -250,8 +290,8 @@ def compute_match_score(job_desc: str):
250
  # Similaridade global
251
  cv_mean = _ensure_cv_mean()
252
  job_emb = embedding_model.encode([job_desc], convert_to_numpy=True, normalize_embeddings=True)[0]
253
- sim = float(np.dot(cv_mean, job_emb)) # [-1,1] com embs normalizados
254
- sim_norm = max(0.0, min(1.0, (sim + 1) / 2)) # [0,1]
255
 
256
  # Requisitos/cobertura (heurística simples)
257
  req_tokens = re.findall(r"[a-zA-ZÀ-ÿ0-9\-\+#\.]{3,}", job_desc.lower())
@@ -261,7 +301,7 @@ def compute_match_score(job_desc: str):
261
 
262
  retrieved = retrieve_context(job_desc, k=8).lower()
263
  hits = sum(1 for w in most_common if w in retrieved)
264
- coverage = hits / max(1, len(most_common)) # [0,1]
265
 
266
  score = int(round(100 * (0.6 * sim_norm + 0.4 * coverage)))
267
  explain = (
@@ -272,30 +312,30 @@ def compute_match_score(job_desc: str):
272
  return explain
273
 
274
  # ========= (NEW) Métricas do CV =========
275
- TECH_HINTS = ["python","r","faiss","qdrant","pytorch","tensorflow","scikit","gradio","streamlit",
276
- "gis","qgis","gdal","grass","sentinel","landsat","process mining","rag","vit","mask2former"]
277
-
278
- COUNTRY_HINTS = ["portugal","brasil","germany","alemanh", "spain","espanha","europe","europa","france","italy","uk","usa"]
 
279
 
280
  def extract_metrics():
281
  text_all = " \n".join(map(str, chunks))
282
- # Publicações aproximadas: procura palavras-chave
283
  pubs = len(re.findall(r"\b(publica(?:ç(?:ões|ao|ão)|dos?)|paper|article|artigo|ieee|springer|acm)\b", text_all, flags=re.I))
284
- # Anos presentes (para dar noção de linha do tempo)
285
  years = sorted(set(re.findall(r"\b(20\d{2}|19\d{2})\b", text_all)))
286
- # Tecnologias (contagem simples)
287
  tech_counts = {t: len(re.findall(re.escape(t), text_all, flags=re.I)) for t in TECH_HINTS}
288
- top_tech = sorted([k for k,v in tech_counts.items() if v>0], key=lambda k: tech_counts[k], reverse=True)[:8]
289
- # Presença internacional
290
  intl_hits = sum(len(re.findall(c, text_all, flags=re.I)) for c in COUNTRY_HINTS)
291
 
292
  md = [
293
  "### Métricas do CV (estimativas)\n",
294
  f"- **Publicações (sinalizadas)**: ~{pubs}",
295
- f"- **Anos mencionados**: {', '.join(years[:12])}{'…' if len(years)>12 else ''}",
296
  f"- **Tecnologias mais citadas**: {', '.join(top_tech) if top_tech else '—'}",
297
  f"- **Menções internacionais**: ~{intl_hits}",
298
- "\n> Observação: estimativas baseadas em busca por palavras-chave nos trechos indexados."
299
  ]
300
  return "\n".join(md)
301
 
@@ -314,15 +354,15 @@ with gr.Blocks(title=APP_TITLE, css=custom_css, theme=gr.themes.Base()) as demo:
314
  with gr.Row():
315
  # Main chat
316
  with gr.Column(scale=3):
317
- chatbot_ui = gr.Chatbot(type="tuples", elem_id="chat-window")
318
- txt = gr.Textbox(placeholder="Digite sua pergunta…", lines=2)
319
- btn_send = gr.Button("Enviar", variant="primary")
320
- btn_clear = gr.Button("Limpar")
321
 
322
  with gr.Accordion("Parâmetros avançados", open=False):
323
  temperature = gr.Slider(0, 1, value=0.6, label="Temperature")
324
- top_p = gr.Slider(0, 1, value=0.95, label="Top-p")
325
- max_tokens = gr.Slider(64, 2048, value=512, step=64, label="Max Tokens")
326
 
327
  btn_send.click(chatbot, [txt, temperature, top_p, max_tokens], [chatbot_ui, txt])
328
  txt.submit(chatbot, [txt, temperature, top_p, max_tokens], [chatbot_ui, txt])
@@ -337,7 +377,6 @@ with gr.Blocks(title=APP_TITLE, css=custom_css, theme=gr.themes.Base()) as demo:
337
  for q in SUGGESTION_QUESTIONS:
338
  gr.Button(q).click(lambda suggestion=q: suggestion, outputs=[txt])
339
 
340
- # (NEW) sugestões por tema
341
  gr.Markdown("---")
342
  gr.Markdown("### Sugestões por tema")
343
  for theme, qs in SUGGESTIONS_THEMES.items():
@@ -345,34 +384,32 @@ with gr.Blocks(title=APP_TITLE, css=custom_css, theme=gr.themes.Base()) as demo:
345
  for q in qs:
346
  gr.Button(q).click(lambda s=q: s, outputs=[txt])
347
 
348
- # (NEW) Mini-bios
349
  gr.Markdown("---")
350
  gr.Markdown("### Exportação rápida – Mini-bio (PDF)")
351
  bio_style = gr.Dropdown(choices=list(MINI_BIO_STYLES.keys()), value="Corporativo", label="Formato")
352
- btn_bio = gr.Button("Gerar Mini-bio (PDF)")
353
- bio_file = gr.File(label="Mini-bio gerada")
354
- bio_msg = gr.Markdown()
355
  btn_bio.click(generate_mini_bio, [bio_style, temperature, top_p, max_tokens], [bio_file, bio_msg])
356
 
357
- # (NEW) Assistente de candidatura
358
  gr.Markdown("---")
359
  gr.Markdown("### Assistente de candidatura")
360
- job_desc = gr.Textbox(label="Cole a descrição da vaga", lines=8, placeholder="Cole aqui a JD…")
361
  with gr.Row():
362
  btn_cover = gr.Button("Gerar Carta (PDF)")
363
  btn_match = gr.Button("Calcular Match Score")
 
364
  cover_file = gr.File(label="Carta gerada")
365
- cover_msg = gr.Markdown()
366
- match_out = gr.Markdown()
367
 
368
  btn_cover.click(generate_cover_letter, [job_desc, temperature, top_p, max_tokens], [cover_file, cover_msg])
369
  btn_match.click(lambda jd: compute_match_score(jd), [job_desc], [match_out])
370
 
371
- # (NEW) Métricas do CV
372
  gr.Markdown("---")
373
  gr.Markdown("### Métricas do CV")
374
  btn_metrics = gr.Button("Recalcular métricas")
375
- metrics_md = gr.Markdown(value=extract_metrics())
376
  btn_metrics.click(lambda: extract_metrics(), [], [metrics_md])
377
 
378
  gr.Markdown("---")
 
5
  import time
6
  import json
7
  from collections import Counter
8
+
9
  import gradio as gr
10
  import numpy as np
11
  import faiss
 
73
  chunks = np.load(CHUNKS_FILE, allow_pickle=True)
74
 
75
  # ========= Embeddings =========
76
+ embedding_model = SentenceTransformer("all-MiniLM-L6-v2")
77
 
78
  # (NEW) pré-cálculo de embedding médio do CV (para match score global)
79
  _cv_emb_mean = None
 
85
  return _cv_emb_mean
86
 
87
  def retrieve_context(query: str, k: int = 4) -> str:
88
+ q_emb = embedding_model.encode([query], convert_to_numpy=True, normalize_embeddings=True)
89
  _, I = index.search(q_emb, k)
90
  return "\n---\n".join(chunks[i] for i in I[0])
91
 
92
  # ========= Chat state =========
93
+ # Agora no formato OpenAI-style, compatível com gr.Chatbot(type="messages")
94
+ dialog_history: list[dict] = []
95
 
96
  # ========= Helpers – NVIDIA chat =========
97
  def nv_stream(messages, temperature, top_p, max_tokens):
98
+ """Streaming robusto (evita chunk sem choices e delta sem content)."""
99
  assistant_reply = ""
100
  stream = client.chat.completions.create(
101
+ model=CHAT_MODEL,
102
+ messages=messages,
103
+ temperature=temperature,
104
+ top_p=top_p,
105
+ max_tokens=max_tokens,
106
+ stream=True,
107
  )
108
+
109
  for chunk in stream:
110
+ # Alguns chunks podem vir sem "choices" (keep-alive / metadados)
111
+ choices = getattr(chunk, "choices", None)
112
+ if not choices:
113
+ continue
114
+ if len(choices) == 0:
115
+ continue
116
+
117
+ choice0 = choices[0]
118
+ delta = getattr(choice0, "delta", None)
119
+ if delta is None:
120
+ continue
121
+
122
+ content = getattr(delta, "content", None)
123
+ if content:
124
+ assistant_reply += content
125
  yield assistant_reply
126
 
127
+ finish_reason = getattr(choice0, "finish_reason", None)
128
+ if finish_reason in ("stop", "length"):
129
+ break
130
+
131
  def nv_complete(messages, temperature, top_p, max_tokens) -> str:
132
  """Completa de uma vez (para PDFs e utilitários)."""
133
  resp = client.chat.completions.create(
134
+ model=CHAT_MODEL,
135
+ messages=messages,
136
+ temperature=temperature,
137
+ top_p=top_p,
138
+ max_tokens=max_tokens,
139
+ stream=False,
140
  )
141
  return resp.choices[0].message.content.strip()
142
 
143
  # ========= PDF utils (NEW) =========
144
  def _to_pdf_bytes(title: str, body: str) -> bytes:
 
 
145
  from reportlab.pdfgen import canvas
146
  from reportlab.lib.pagesizes import A4
147
  from reportlab.lib.utils import simpleSplit
148
+
149
  buf = io.BytesIO()
150
  c = canvas.Canvas(buf, pagesize=A4)
151
  w, h = A4
152
  margin = 50
153
+
154
  c.setTitle(title)
155
  c.setFont("Helvetica-Bold", 14)
156
  c.drawString(margin, h - margin, title)
157
+
158
  c.setFont("Helvetica", 11)
159
  y = h - margin - 30
160
  lines = simpleSplit(body, "Helvetica", 11, w - 2 * margin)
161
+
162
  for line in lines:
163
  if y < margin:
164
  c.showPage()
 
166
  y = h - margin
167
  c.drawString(margin, y, line)
168
  y -= 15
169
+
170
  c.showPage()
171
  c.save()
172
  buf.seek(0)
 
185
  "You are an assistant specialized in the candidate's CV. "
186
  "Use ONLY the retrieved context to answer. If you don't know, say you don't know.\n\n"
187
  f"=== Retrieved Context ===\n{context}\n\n"
188
+ ),
189
  }
190
 
191
+ # mensagens = system + histórico + user
192
+ messages = [system_msg] + dialog_history + [{"role": "user", "content": user_input}]
 
 
193
 
 
194
  reply_full = ""
195
  try:
196
  for partial in nv_stream(messages, temperature, top_p, max_tokens):
197
  reply_full = partial
198
+
199
+ dialog_history.extend([
200
+ {"role": "user", "content": user_input},
201
+ {"role": "assistant", "content": reply_full},
202
+ ])
203
+
204
  except OpenAIError as e:
205
  reply_full = f"⚠️ API Error: {e.__class__.__name__}: {e}"
206
+ dialog_history.extend([
207
+ {"role": "user", "content": user_input},
208
+ {"role": "assistant", "content": reply_full},
209
+ ])
210
 
211
  return dialog_history, ""
212
 
 
219
  MINI_BIO_STYLES = {
220
  "Acadêmico": "Estilo acadêmico, objetivo, cite publicações/projetos e área de pesquisa.",
221
  "Corporativo": "Tom profissional para negócios, destaque resultados, KPIs e liderança.",
222
+ "Pitch curto": "3-4 frases diretas, chamando atenção para conquistas-chave.",
223
  }
224
 
225
  def generate_mini_bio(style_key: str, temperature: float, top_p: float, max_tokens: int):
226
  if style_key not in MINI_BIO_STYLES:
227
  return None, "Selecione um formato de mini-bio."
228
+
229
  context = retrieve_context("resumo do currículo, principais resultados e tecnologias", k=8)
230
  system_msg = {
231
  "role": "system",
 
233
  "Use apenas o contexto do CV para gerar uma mini-bio. "
234
  "Não invente fatos. Seja fiel ao conteúdo.\n\n"
235
  f"=== Contexto do CV ===\n{context}\n"
236
+ ),
237
  }
238
  user_msg = {
239
  "role": "user",
240
+ "content": f"Produza uma mini-bio em português. Estilo: {MINI_BIO_STYLES[style_key]} (150-220 palavras).",
241
  }
242
+
243
  try:
244
  text = nv_complete([system_msg, user_msg], temperature, top_p, max_tokens)
245
  pdf_bytes = _to_pdf_bytes(f"Mini-bio ({style_key})", text)
 
254
  def generate_cover_letter(job_desc: str, temperature: float, top_p: float, max_tokens: int):
255
  if not job_desc or not job_desc.strip():
256
  return None, "Cole a descrição da vaga primeiro."
257
+
258
  context = retrieve_context(job_desc, k=8)
259
  sys = {
260
  "role": "system",
 
262
  "Gere uma carta de motivação baseada SOMENTE no CV (contexto) e na vaga. "
263
  "Inclua 2-3 conquistas mensuráveis e tecnologias relevantes. 250-350 palavras.\n\n"
264
  f"=== Contexto (CV) ===\n{context}\n"
265
+ ),
266
+ }
267
+ usr = {
268
+ "role": "user",
269
+ "content": f"Descrição da vaga:\n{job_desc}\n\nGerar carta em PT-BR/PT-PT, tom profissional.",
270
  }
271
+
272
  try:
273
  text = nv_complete([sys, usr], temperature, top_p, max_tokens)
274
  pdf_bytes = _to_pdf_bytes("Carta de Motivação", text)
 
290
  # Similaridade global
291
  cv_mean = _ensure_cv_mean()
292
  job_emb = embedding_model.encode([job_desc], convert_to_numpy=True, normalize_embeddings=True)[0]
293
+ sim = float(np.dot(cv_mean, job_emb)) # [-1,1]
294
+ sim_norm = max(0.0, min(1.0, (sim + 1) / 2)) # [0,1]
295
 
296
  # Requisitos/cobertura (heurística simples)
297
  req_tokens = re.findall(r"[a-zA-ZÀ-ÿ0-9\-\+#\.]{3,}", job_desc.lower())
 
301
 
302
  retrieved = retrieve_context(job_desc, k=8).lower()
303
  hits = sum(1 for w in most_common if w in retrieved)
304
+ coverage = hits / max(1, len(most_common))
305
 
306
  score = int(round(100 * (0.6 * sim_norm + 0.4 * coverage)))
307
  explain = (
 
312
  return explain
313
 
314
  # ========= (NEW) Métricas do CV =========
315
+ TECH_HINTS = [
316
+ "python","r","faiss","qdrant","pytorch","tensorflow","scikit","gradio","streamlit",
317
+ "gis","qgis","gdal","grass","sentinel","landsat","process mining","rag","vit","mask2former"
318
+ ]
319
+ COUNTRY_HINTS = ["portugal","brasil","germany","alemanh","spain","espanha","europe","europa","france","italy","uk","usa"]
320
 
321
  def extract_metrics():
322
  text_all = " \n".join(map(str, chunks))
323
+
324
  pubs = len(re.findall(r"\b(publica(?:ç(?:ões|ao|ão)|dos?)|paper|article|artigo|ieee|springer|acm)\b", text_all, flags=re.I))
 
325
  years = sorted(set(re.findall(r"\b(20\d{2}|19\d{2})\b", text_all)))
326
+
327
  tech_counts = {t: len(re.findall(re.escape(t), text_all, flags=re.I)) for t in TECH_HINTS}
328
+ top_tech = sorted([k for k,v in tech_counts.items() if v > 0], key=lambda k: tech_counts[k], reverse=True)[:8]
329
+
330
  intl_hits = sum(len(re.findall(c, text_all, flags=re.I)) for c in COUNTRY_HINTS)
331
 
332
  md = [
333
  "### Métricas do CV (estimativas)\n",
334
  f"- **Publicações (sinalizadas)**: ~{pubs}",
335
+ f"- **Anos mencionados**: {', '.join(years[:12])}{'…' if len(years) > 12 else ''}",
336
  f"- **Tecnologias mais citadas**: {', '.join(top_tech) if top_tech else '—'}",
337
  f"- **Menções internacionais**: ~{intl_hits}",
338
+ "\n> Observação: estimativas baseadas em busca por palavras-chave nos trechos indexados.",
339
  ]
340
  return "\n".join(md)
341
 
 
354
  with gr.Row():
355
  # Main chat
356
  with gr.Column(scale=3):
357
+ chatbot_ui = gr.Chatbot(type="messages", elem_id="chat-window")
358
+ txt = gr.Textbox(placeholder="Digite sua pergunta…", lines=2)
359
+ btn_send = gr.Button("Enviar", variant="primary")
360
+ btn_clear = gr.Button("Limpar")
361
 
362
  with gr.Accordion("Parâmetros avançados", open=False):
363
  temperature = gr.Slider(0, 1, value=0.6, label="Temperature")
364
+ top_p = gr.Slider(0, 1, value=0.95, label="Top-p")
365
+ max_tokens = gr.Slider(64, 2048, value=512, step=64, label="Max Tokens")
366
 
367
  btn_send.click(chatbot, [txt, temperature, top_p, max_tokens], [chatbot_ui, txt])
368
  txt.submit(chatbot, [txt, temperature, top_p, max_tokens], [chatbot_ui, txt])
 
377
  for q in SUGGESTION_QUESTIONS:
378
  gr.Button(q).click(lambda suggestion=q: suggestion, outputs=[txt])
379
 
 
380
  gr.Markdown("---")
381
  gr.Markdown("### Sugestões por tema")
382
  for theme, qs in SUGGESTIONS_THEMES.items():
 
384
  for q in qs:
385
  gr.Button(q).click(lambda s=q: s, outputs=[txt])
386
 
 
387
  gr.Markdown("---")
388
  gr.Markdown("### Exportação rápida – Mini-bio (PDF)")
389
  bio_style = gr.Dropdown(choices=list(MINI_BIO_STYLES.keys()), value="Corporativo", label="Formato")
390
+ btn_bio = gr.Button("Gerar Mini-bio (PDF)")
391
+ bio_file = gr.File(label="Mini-bio gerada")
392
+ bio_msg = gr.Markdown()
393
  btn_bio.click(generate_mini_bio, [bio_style, temperature, top_p, max_tokens], [bio_file, bio_msg])
394
 
 
395
  gr.Markdown("---")
396
  gr.Markdown("### Assistente de candidatura")
397
+ job_desc = gr.Textbox(label="Cole a descrição da vaga", lines=8, placeholder="Cole aqui a JD…")
398
  with gr.Row():
399
  btn_cover = gr.Button("Gerar Carta (PDF)")
400
  btn_match = gr.Button("Calcular Match Score")
401
+
402
  cover_file = gr.File(label="Carta gerada")
403
+ cover_msg = gr.Markdown()
404
+ match_out = gr.Markdown()
405
 
406
  btn_cover.click(generate_cover_letter, [job_desc, temperature, top_p, max_tokens], [cover_file, cover_msg])
407
  btn_match.click(lambda jd: compute_match_score(jd), [job_desc], [match_out])
408
 
 
409
  gr.Markdown("---")
410
  gr.Markdown("### Métricas do CV")
411
  btn_metrics = gr.Button("Recalcular métricas")
412
+ metrics_md = gr.Markdown(value=extract_metrics())
413
  btn_metrics.click(lambda: extract_metrics(), [], [metrics_md])
414
 
415
  gr.Markdown("---")