Geoeasy commited on
Commit
91afbbb
·
verified ·
1 Parent(s): 8929e97

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +280 -91
app.py CHANGED
@@ -1,31 +1,26 @@
1
  import os
2
  from pathlib import Path
 
 
 
 
 
 
3
  import gradio as gr
4
  import numpy as np
5
  import faiss
6
  from sentence_transformers import SentenceTransformer
7
  from openai import OpenAI, OpenAIError
8
 
9
- # ----------------------------
10
- # NVIDIA API Key (via Space Variables & Secrets)
11
- # ----------------------------
12
  NV_API_KEY = os.environ.get("NV_API_KEY")
13
  if not NV_API_KEY:
14
- raise RuntimeError(
15
- "🔒 NV_API_KEY not set. "
16
- "Configure it in Settings → Variables & Secrets."
17
- )
18
 
19
- # NVIDIA-compatible OpenAI client for chat
20
- client = OpenAI(
21
- base_url="https://integrate.api.nvidia.com/v1",
22
- api_key=NV_API_KEY
23
- )
24
  CHAT_MODEL = "meta/llama3-8b-instruct"
25
 
26
- # ----------------------------
27
- # Application configuration
28
- # ----------------------------
29
  APP_TITLE = "CVchat – Ronaldo Menezes"
30
  INTRO = (
31
  "👋 Olá! Eu sou o CVchat do Ronaldo Menezes.\n"
@@ -45,113 +40,271 @@ SUGGESTION_QUESTIONS = [
45
  "Certificações?",
46
  ]
47
 
48
- # Paths for FAISS index and chunks
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  INDEX_FILE = "r_docs.index"
50
  CHUNKS_FILE = "r_chunks.npy"
51
  PDF_PATH = "CV-Ronaldo_Menezes_2025_06.pdf"
52
 
53
- # Verify that the index files exist
54
  if not Path(INDEX_FILE).exists() or not Path(CHUNKS_FILE).exists():
55
- raise FileNotFoundError(
56
- "Index not found. Run build_index.py to generate r_docs.index and r_chunks.npy."
57
- )
58
 
59
- # Load FAISS index and chunks
60
  index = faiss.read_index(INDEX_FILE)
61
  chunks = np.load(CHUNKS_FILE, allow_pickle=True)
62
 
63
- # ----------------------------
64
- # Context retrieval (local embeddings)
65
- # ----------------------------
66
  embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
67
 
 
 
 
 
 
 
 
 
 
68
  def retrieve_context(query: str, k: int = 4) -> str:
69
  q_emb = embedding_model.encode([query], convert_to_numpy=True)
70
  _, I = index.search(q_emb, k)
71
  return "\n---\n".join(chunks[i] for i in I[0])
72
 
73
- # ----------------------------
74
- # Chatbot state
75
- # ----------------------------
76
  dialog_history: list[tuple[str, str]] = []
77
 
78
- # ----------------------------
79
- # Chat function
80
- # ----------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  def chatbot(user_input: str, temperature: float, top_p: float, max_tokens: int):
82
  global dialog_history
83
  if not user_input:
84
  return dialog_history, ""
85
 
86
- # Retrieve context
87
  context = retrieve_context(user_input)
88
  system_msg = {
89
  "role": "system",
90
  "content": (
91
- "You are an assistant specialized in R packages. "
92
- "Use only the context below to answer. If you don't know, say so.\n\n"
93
  f"=== Retrieved Context ===\n{context}\n\n"
94
  )
95
  }
96
 
97
- # Build messages list
98
  messages = [system_msg]
99
  for u, a in dialog_history:
100
- messages.extend([
101
- {"role": "user", "content": u},
102
- {"role": "assistant", "content": a}
103
- ])
104
  messages.append({"role": "user", "content": user_input})
105
 
106
- # Call NVIDIA chat API in streaming mode
107
- assistant_reply = ""
108
  try:
109
- stream = client.chat.completions.create(
110
- model=CHAT_MODEL,
111
- messages=messages,
112
- temperature=temperature,
113
- top_p=top_p,
114
- max_tokens=max_tokens,
115
- stream=True
116
- )
117
- for chunk in stream:
118
- delta = chunk.choices[0].delta
119
- if hasattr(delta, "content") and delta.content:
120
- assistant_reply += delta.content
121
  except OpenAIError as e:
122
- assistant_reply = f"⚠️ API Error: {e.__class__.__name__}: {e}"
 
123
 
124
- # Update history and return
125
- dialog_history.append((user_input, assistant_reply))
126
  return dialog_history, ""
127
 
128
- # ----------------------------
129
- # Clear history
130
- # ----------------------------
131
  def clear_history():
132
  global dialog_history
133
  dialog_history = []
134
  return [], ""
135
 
136
- # ----------------------------
137
- # Gradio UI
138
- # ----------------------------
139
- custom_css = r"""
140
- :root {
141
- --primary: #4a90e2;
142
- --accent: #50e3c2;
143
- --bg-light: #f9f9f9;
144
- --txt-dark: #333333;
145
- --radius: 8px;
146
- --spacing: 1rem;
147
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
  body { background: var(--bg-light); color: var(--txt-dark); font-family: 'Helvetica Neue', sans-serif; }
149
  #chat-window { height: 65vh; overflow-y: auto; padding: var(--spacing); border: 1px solid #ddd; border-radius: var(--radius); }
150
- #input-area { display: flex; margin-top: var(--spacing); }
151
- #user-input { flex: 1; padding: 0.6rem; border: 1px solid #ccc; border-radius: var(--radius) 0 0 var(--radius); }
152
- #send-button { padding: 0 1rem; background: var(--primary); color: white; border: none; border-radius: 0 var(--radius) var(--radius) 0; cursor: pointer; }
153
  .sidebar { background: var(--bg-light); padding: var(--spacing); border-left: 1px solid #eee; }
154
- .sidebar h3 { margin-top: 0; }
155
  """
156
 
157
  with gr.Blocks(title=APP_TITLE, css=custom_css, theme=gr.themes.Base()) as demo:
@@ -159,37 +312,73 @@ with gr.Blocks(title=APP_TITLE, css=custom_css, theme=gr.themes.Base()) as demo:
159
  gr.Markdown(INTRO)
160
 
161
  with gr.Row():
162
- # Main chat column
163
  with gr.Column(scale=3):
164
  chatbot_ui = gr.Chatbot(type="tuples", elem_id="chat-window")
165
- txt = gr.Textbox(placeholder="Digite sua pergunta…", lines=2, elem_id="user-input")
166
- btn = gr.Button("Enviar", elem_id="send-button")
167
- # Advanced settings sliders
168
- temperature = gr.Slider(0, 1, value=0.6, label="Temperature")
169
- top_p = gr.Slider(0, 1, value=0.95, label="Top-p")
170
- max_tokens = gr.Slider(64, 2048, value=512, step=64, label="Max Tokens")
171
-
172
- # Bind events
173
- btn.click(chatbot, [txt, temperature, top_p, max_tokens], [chatbot_ui, txt])
 
174
  txt.submit(chatbot, [txt, temperature, top_p, max_tokens], [chatbot_ui, txt])
175
- gr.Button("Limpar").click(clear_history, [], [chatbot_ui, txt])
176
 
177
- # Sidebar with PDF & suggestions
178
- with gr.Column(scale=1, elem_classes="sidebar"):
179
  if Path(PDF_PATH).exists():
180
  gr.Markdown(f"[📄 Baixar CV em PDF](/file={PDF_PATH})")
 
181
  gr.Markdown("### Sugestões de Perguntas")
182
  for q in SUGGESTION_QUESTIONS:
183
  gr.Button(q).click(lambda suggestion=q: suggestion, outputs=[txt])
 
 
184
  gr.Markdown("---")
185
- gr.Markdown("### Dicas de Exploração do PDF")
186
- gr.Markdown("• Use palavras-chave como 'Process Mining' ou 'GIS' para ir direto à seção relevante.")
187
- gr.Markdown("• Peça detalhes de projetos com financiamento público (CNPQ, QREN).")
 
 
188
 
189
- if __name__ == "__main__":
190
- demo.launch(server_name="0.0.0.0", server_port=7860)
 
 
 
 
 
 
191
 
 
 
 
 
 
 
 
 
 
 
192
 
 
 
193
 
 
 
 
 
 
 
194
 
 
 
 
 
195
 
 
 
 
1
  import os
2
  from pathlib import Path
3
+ import re
4
+ import io
5
+ import time
6
+ import json
7
+ from collections import Counter
8
+
9
  import gradio as gr
10
  import numpy as np
11
  import faiss
12
  from sentence_transformers import SentenceTransformer
13
  from openai import OpenAI, OpenAIError
14
 
15
+ # ========= NVIDIA API =========
 
 
16
  NV_API_KEY = os.environ.get("NV_API_KEY")
17
  if not NV_API_KEY:
18
+ raise RuntimeError("🔒 NV_API_KEY not set. Configure it em Settings → Variables & Secrets.")
 
 
 
19
 
20
+ client = OpenAI(base_url="https://integrate.api.nvidia.com/v1", api_key=NV_API_KEY)
 
 
 
 
21
  CHAT_MODEL = "meta/llama3-8b-instruct"
22
 
23
+ # ========= App config =========
 
 
24
  APP_TITLE = "CVchat – Ronaldo Menezes"
25
  INTRO = (
26
  "👋 Olá! Eu sou o CVchat do Ronaldo Menezes.\n"
 
40
  "Certificações?",
41
  ]
42
 
43
+ # (NEW) sugestões por tema
44
+ SUGGESTIONS_THEMES = {
45
+ "Projetos financiados": [
46
+ "Liste projetos com financiamento público (CNPq, QREN, UE) e resultados.",
47
+ "Qual foi o impacto de projetos financiados (KPIs, prazos, orçamento)?",
48
+ ],
49
+ "Artigos & Publicações": [
50
+ "Quais artigos/publicações mais relevantes e onde foram publicados?",
51
+ "Resumo de publicações sobre sensoriamento remoto (Landsat/Sentinel).",
52
+ ],
53
+ "Habilidades técnicas": [
54
+ "Stack técnica principal (linguagens, libs, cloud, bancos).",
55
+ "Experiência com FAISS, RAG e LLMs na prática.",
56
+ ],
57
+ "Liderança & Gestão": [
58
+ "Experiência liderando equipes/projetos e responsabilidades.",
59
+ "Exemplos de melhorias de processo e resultados mensuráveis.",
60
+ ],
61
+ }
62
+
63
+ # ========= Paths =========
64
  INDEX_FILE = "r_docs.index"
65
  CHUNKS_FILE = "r_chunks.npy"
66
  PDF_PATH = "CV-Ronaldo_Menezes_2025_06.pdf"
67
 
 
68
  if not Path(INDEX_FILE).exists() or not Path(CHUNKS_FILE).exists():
69
+ raise FileNotFoundError("Index not found. Run build_index.py to generate r_docs.index and r_chunks.npy.")
 
 
70
 
71
+ # Load FAISS & chunks
72
  index = faiss.read_index(INDEX_FILE)
73
  chunks = np.load(CHUNKS_FILE, allow_pickle=True)
74
 
75
+ # ========= Embeddings =========
 
 
76
  embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
77
 
78
+ # (NEW) pré-cálculo de embedding médio do CV (para match score global)
79
+ _cv_emb_mean = None
80
+ def _ensure_cv_mean():
81
+ global _cv_emb_mean
82
+ if _cv_emb_mean is None:
83
+ embs = embedding_model.encode(list(chunks), convert_to_numpy=True, normalize_embeddings=True)
84
+ _cv_emb_mean = embs.mean(axis=0)
85
+ return _cv_emb_mean
86
+
87
  def retrieve_context(query: str, k: int = 4) -> str:
88
  q_emb = embedding_model.encode([query], convert_to_numpy=True)
89
  _, I = index.search(q_emb, k)
90
  return "\n---\n".join(chunks[i] for i in I[0])
91
 
92
+ # ========= Chat state =========
 
 
93
  dialog_history: list[tuple[str, str]] = []
94
 
95
+ # ========= Helpers – NVIDIA chat =========
96
+ def nv_stream(messages, temperature, top_p, max_tokens):
97
+ """Streaming para o chat principal."""
98
+ assistant_reply = ""
99
+ stream = client.chat.completions.create(
100
+ model=CHAT_MODEL, messages=messages,
101
+ temperature=temperature, top_p=top_p, max_tokens=max_tokens, stream=True
102
+ )
103
+ for chunk in stream:
104
+ delta = chunk.choices[0].delta
105
+ if hasattr(delta, "content") and delta.content:
106
+ assistant_reply += delta.content
107
+ yield assistant_reply
108
+
109
+ def nv_complete(messages, temperature, top_p, max_tokens) -> str:
110
+ """Completa de uma vez (para PDFs e utilitários)."""
111
+ resp = client.chat.completions.create(
112
+ model=CHAT_MODEL, messages=messages,
113
+ temperature=temperature, top_p=top_p, max_tokens=max_tokens, stream=False
114
+ )
115
+ return resp.choices[0].message.content.strip()
116
+
117
+ # ========= PDF utils (NEW) =========
118
+ def _to_pdf_bytes(title: str, body: str) -> bytes:
119
+ # usa um PDF mínimo sem dependências (texto simples em uma “folha”)
120
+ # para melhor tipografia, você pode trocar por reportlab.
121
+ from reportlab.pdfgen import canvas
122
+ from reportlab.lib.pagesizes import A4
123
+ from reportlab.lib.utils import simpleSplit
124
+ buf = io.BytesIO()
125
+ c = canvas.Canvas(buf, pagesize=A4)
126
+ w, h = A4
127
+ margin = 50
128
+ c.setTitle(title)
129
+ c.setFont("Helvetica-Bold", 14)
130
+ c.drawString(margin, h - margin, title)
131
+ c.setFont("Helvetica", 11)
132
+ y = h - margin - 30
133
+ lines = simpleSplit(body, "Helvetica", 11, w - 2 * margin)
134
+ for line in lines:
135
+ if y < margin:
136
+ c.showPage()
137
+ c.setFont("Helvetica", 11)
138
+ y = h - margin
139
+ c.drawString(margin, y, line)
140
+ y -= 15
141
+ c.showPage()
142
+ c.save()
143
+ buf.seek(0)
144
+ return buf.read()
145
+
146
+ # ========= Chat principal =========
147
  def chatbot(user_input: str, temperature: float, top_p: float, max_tokens: int):
148
  global dialog_history
149
  if not user_input:
150
  return dialog_history, ""
151
 
 
152
  context = retrieve_context(user_input)
153
  system_msg = {
154
  "role": "system",
155
  "content": (
156
+ "You are an assistant specialized in the candidate's CV. "
157
+ "Use ONLY the retrieved context to answer. If you don't know, say you don't know.\n\n"
158
  f"=== Retrieved Context ===\n{context}\n\n"
159
  )
160
  }
161
 
 
162
  messages = [system_msg]
163
  for u, a in dialog_history:
164
+ messages.extend([{"role": "user", "content": u}, {"role": "assistant", "content": a}])
 
 
 
165
  messages.append({"role": "user", "content": user_input})
166
 
167
+ # Stream ( mantém UX atual )
168
+ reply_full = ""
169
  try:
170
+ for partial in nv_stream(messages, temperature, top_p, max_tokens):
171
+ reply_full = partial
172
+ dialog_history.append((user_input, reply_full))
 
 
 
 
 
 
 
 
 
173
  except OpenAIError as e:
174
+ reply_full = f"⚠️ API Error: {e.__class__.__name__}: {e}"
175
+ dialog_history.append((user_input, reply_full))
176
 
 
 
177
  return dialog_history, ""
178
 
 
 
 
179
  def clear_history():
180
  global dialog_history
181
  dialog_history = []
182
  return [], ""
183
 
184
+ # ========= (NEW) Mini-bio multi-formato =========
185
+ MINI_BIO_STYLES = {
186
+ "Acadêmico": "Estilo acadêmico, objetivo, cite publicações/projetos e área de pesquisa.",
187
+ "Corporativo": "Tom profissional para negócios, destaque resultados, KPIs e liderança.",
188
+ "Pitch curto": "3-4 frases diretas, chamando atenção para conquistas-chave."
 
 
 
 
 
 
189
  }
190
+
191
+ def generate_mini_bio(style_key: str, temperature: float, top_p: float, max_tokens: int):
192
+ if style_key not in MINI_BIO_STYLES:
193
+ return None, "Selecione um formato de mini-bio."
194
+ # usa os top contextos “genéricos”: consulta neutra para cobrir o CV
195
+ context = retrieve_context("resumo do currículo, principais resultados e tecnologias", k=8)
196
+ system_msg = {
197
+ "role": "system",
198
+ "content": (
199
+ "Use apenas o contexto do CV para gerar uma mini-bio. "
200
+ "Não invente fatos. Seja fiel ao conteúdo.\n\n"
201
+ f"=== Contexto do CV ===\n{context}\n"
202
+ )
203
+ }
204
+ user_msg = {
205
+ "role": "user",
206
+ "content": f"Produza uma mini-bio em português. Estilo: {MINI_BIO_STYLES[style_key]} (150-220 palavras)."
207
+ }
208
+ try:
209
+ text = nv_complete([system_msg, user_msg], temperature, top_p, max_tokens)
210
+ pdf_bytes = _to_pdf_bytes(f"Mini-bio ({style_key})", text)
211
+ filename = f"mini_bio_{style_key.replace(' ','_').lower()}_{int(time.time())}.pdf"
212
+ with open(filename, "wb") as f:
213
+ f.write(pdf_bytes)
214
+ return filename, "Mini-bio gerada com sucesso."
215
+ except OpenAIError as e:
216
+ return None, f"⚠️ API Error: {e}"
217
+
218
+ # ========= (NEW) Carta de motivação + Match score =========
219
+ def generate_cover_letter(job_desc: str, temperature: float, top_p: float, max_tokens: int):
220
+ if not job_desc or not job_desc.strip():
221
+ return None, "Cole a descrição da vaga primeiro."
222
+ context = retrieve_context(job_desc, k=8)
223
+ sys = {
224
+ "role": "system",
225
+ "content": (
226
+ "Gere uma carta de motivação baseada SOMENTE no CV (contexto) e na vaga. "
227
+ "Inclua 2-3 conquistas mensuráveis e tecnologias relevantes. 250-350 palavras.\n\n"
228
+ f"=== Contexto (CV) ===\n{context}\n"
229
+ )
230
+ }
231
+ usr = {"role": "user", "content": f"Descrição da vaga:\n{job_desc}\n\nGerar carta em PT-BR/PT-PT, tom profissional."}
232
+ try:
233
+ text = nv_complete([sys, usr], temperature, top_p, max_tokens)
234
+ pdf_bytes = _to_pdf_bytes("Carta de Motivação", text)
235
+ filename = f"carta_{int(time.time())}.pdf"
236
+ with open(filename, "wb") as f:
237
+ f.write(pdf_bytes)
238
+ return filename, "Carta gerada com sucesso."
239
+ except OpenAIError as e:
240
+ return None, f"⚠️ API Error: {e}"
241
+
242
+ def compute_match_score(job_desc: str):
243
+ """
244
+ Score 0-100 = 60% similaridade (job vs CV médio) + 40% cobertura de requisitos.
245
+ Requisitos = palavras-chave (simples) extraídas da vaga; cobertura = % presentes no contexto recuperado.
246
+ """
247
+ if not job_desc or not job_desc.strip():
248
+ return "Cole a descrição da vaga para calcular o match score."
249
+
250
+ # Similaridade global
251
+ cv_mean = _ensure_cv_mean()
252
+ job_emb = embedding_model.encode([job_desc], convert_to_numpy=True, normalize_embeddings=True)[0]
253
+ sim = float(np.dot(cv_mean, job_emb)) # [-1,1] com embs normalizados
254
+ sim_norm = max(0.0, min(1.0, (sim + 1) / 2)) # → [0,1]
255
+
256
+ # Requisitos/cobertura (heurística simples)
257
+ req_tokens = re.findall(r"[a-zA-ZÀ-ÿ0-9\-\+#\.]{3,}", job_desc.lower())
258
+ stop = set(["com","para","dos","das","uma","um","de","da","do","and","the","with","sem","em","na","no","os","as","que"])
259
+ req_keywords = [t for t in req_tokens if t not in stop]
260
+ most_common = [w for w, _ in Counter(req_keywords).most_common(20)]
261
+
262
+ retrieved = retrieve_context(job_desc, k=8).lower()
263
+ hits = sum(1 for w in most_common if w in retrieved)
264
+ coverage = hits / max(1, len(most_common)) # [0,1]
265
+
266
+ score = int(round(100 * (0.6 * sim_norm + 0.4 * coverage)))
267
+ explain = (
268
+ f"Similaridade global: {int(sim_norm*100)}% | "
269
+ f"Cobertura de requisitos: {int(coverage*100)}% | "
270
+ f"→ Match score: **{score}/100**"
271
+ )
272
+ return explain
273
+
274
+ # ========= (NEW) Métricas do CV =========
275
+ TECH_HINTS = ["python","r","faiss","qdrant","pytorch","tensorflow","scikit","gradio","streamlit",
276
+ "gis","qgis","gdal","grass","sentinel","landsat","process mining","rag","vit","mask2former"]
277
+
278
+ COUNTRY_HINTS = ["portugal","brasil","germany","alemanh", "spain","espanha","europe","europa","france","italy","uk","usa"]
279
+
280
+ def extract_metrics():
281
+ text_all = " \n".join(map(str, chunks))
282
+ # Publicações aproximadas: procura palavras-chave
283
+ pubs = len(re.findall(r"\b(publica(?:ç(?:ões|ao|ão)|dos?)|paper|article|artigo|ieee|springer|acm)\b", text_all, flags=re.I))
284
+ # Anos presentes (para dar noção de linha do tempo)
285
+ years = sorted(set(re.findall(r"\b(20\d{2}|19\d{2})\b", text_all)))
286
+ # Tecnologias (contagem simples)
287
+ tech_counts = {t: len(re.findall(re.escape(t), text_all, flags=re.I)) for t in TECH_HINTS}
288
+ top_tech = sorted([k for k,v in tech_counts.items() if v>0], key=lambda k: tech_counts[k], reverse=True)[:8]
289
+ # Presença internacional
290
+ intl_hits = sum(len(re.findall(c, text_all, flags=re.I)) for c in COUNTRY_HINTS)
291
+
292
+ md = [
293
+ "### Métricas do CV (estimativas)\n",
294
+ f"- **Publicações (sinalizadas)**: ~{pubs}",
295
+ f"- **Anos mencionados**: {', '.join(years[:12])}{'…' if len(years)>12 else ''}",
296
+ f"- **Tecnologias mais citadas**: {', '.join(top_tech) if top_tech else '—'}",
297
+ f"- **Menções internacionais**: ~{intl_hits}",
298
+ "\n> Observação: estimativas baseadas em busca por palavras-chave nos trechos indexados."
299
+ ]
300
+ return "\n".join(md)
301
+
302
+ # ========= UI =========
303
+ custom_css = r"""
304
+ :root { --primary:#4a90e2; --bg-light:#f9f9f9; --txt-dark:#333; --radius:8px; --spacing:1rem; }
305
  body { background: var(--bg-light); color: var(--txt-dark); font-family: 'Helvetica Neue', sans-serif; }
306
  #chat-window { height: 65vh; overflow-y: auto; padding: var(--spacing); border: 1px solid #ddd; border-radius: var(--radius); }
 
 
 
307
  .sidebar { background: var(--bg-light); padding: var(--spacing); border-left: 1px solid #eee; }
 
308
  """
309
 
310
  with gr.Blocks(title=APP_TITLE, css=custom_css, theme=gr.themes.Base()) as demo:
 
312
  gr.Markdown(INTRO)
313
 
314
  with gr.Row():
315
+ # Main chat
316
  with gr.Column(scale=3):
317
  chatbot_ui = gr.Chatbot(type="tuples", elem_id="chat-window")
318
+ txt = gr.Textbox(placeholder="Digite sua pergunta…", lines=2)
319
+ btn_send = gr.Button("Enviar", variant="primary")
320
+ btn_clear = gr.Button("Limpar")
321
+
322
+ with gr.Accordion("Parâmetros avançados", open=False):
323
+ temperature = gr.Slider(0, 1, value=0.6, label="Temperature")
324
+ top_p = gr.Slider(0, 1, value=0.95, label="Top-p")
325
+ max_tokens = gr.Slider(64, 2048, value=512, step=64, label="Max Tokens")
326
+
327
+ btn_send.click(chatbot, [txt, temperature, top_p, max_tokens], [chatbot_ui, txt])
328
  txt.submit(chatbot, [txt, temperature, top_p, max_tokens], [chatbot_ui, txt])
329
+ btn_clear.click(clear_history, [], [chatbot_ui, txt])
330
 
331
+ # Sidebar
332
+ with gr.Column(scale=2, elem_classes="sidebar"):
333
  if Path(PDF_PATH).exists():
334
  gr.Markdown(f"[📄 Baixar CV em PDF](/file={PDF_PATH})")
335
+
336
  gr.Markdown("### Sugestões de Perguntas")
337
  for q in SUGGESTION_QUESTIONS:
338
  gr.Button(q).click(lambda suggestion=q: suggestion, outputs=[txt])
339
+
340
+ # (NEW) sugestões por tema
341
  gr.Markdown("---")
342
+ gr.Markdown("### Sugestões por tema")
343
+ for theme, qs in SUGGESTIONS_THEMES.items():
344
+ with gr.Accordion(theme, open=False):
345
+ for q in qs:
346
+ gr.Button(q).click(lambda s=q: s, outputs=[txt])
347
 
348
+ # (NEW) Mini-bios
349
+ gr.Markdown("---")
350
+ gr.Markdown("### Exportação rápida – Mini-bio (PDF)")
351
+ bio_style = gr.Dropdown(choices=list(MINI_BIO_STYLES.keys()), value="Corporativo", label="Formato")
352
+ btn_bio = gr.Button("Gerar Mini-bio (PDF)")
353
+ bio_file = gr.File(label="Mini-bio gerada")
354
+ bio_msg = gr.Markdown()
355
+ btn_bio.click(generate_mini_bio, [bio_style, temperature, top_p, max_tokens], [bio_file, bio_msg])
356
 
357
+ # (NEW) Assistente de candidatura
358
+ gr.Markdown("---")
359
+ gr.Markdown("### Assistente de candidatura")
360
+ job_desc = gr.Textbox(label="Cole a descrição da vaga", lines=8, placeholder="Cole aqui a JD…")
361
+ with gr.Row():
362
+ btn_cover = gr.Button("Gerar Carta (PDF)")
363
+ btn_match = gr.Button("Calcular Match Score")
364
+ cover_file = gr.File(label="Carta gerada")
365
+ cover_msg = gr.Markdown()
366
+ match_out = gr.Markdown()
367
 
368
+ btn_cover.click(generate_cover_letter, [job_desc, temperature, top_p, max_tokens], [cover_file, cover_msg])
369
+ btn_match.click(lambda jd: compute_match_score(jd), [job_desc], [match_out])
370
 
371
+ # (NEW) Métricas do CV
372
+ gr.Markdown("---")
373
+ gr.Markdown("### Métricas do CV")
374
+ btn_metrics = gr.Button("Recalcular métricas")
375
+ metrics_md = gr.Markdown(value=extract_metrics())
376
+ btn_metrics.click(lambda: extract_metrics(), [], [metrics_md])
377
 
378
+ gr.Markdown("---")
379
+ gr.Markdown("### Dicas de Exploração do PDF")
380
+ gr.Markdown("• Use palavras-chave como 'Process Mining', 'GIS', 'Sentinel' para ir direto à seção relevante.")
381
+ gr.Markdown("• Peça detalhes de projetos financiados (CNPq, QREN, UE) e resultados mensuráveis.")
382
 
383
+ if __name__ == "__main__":
384
+ demo.launch(server_name="0.0.0.0", server_port=7860)