ViniciusKhan commited on
Commit
4ced332
·
1 Parent(s): 8e6ffde

Add application file

Browse files
__pycache__/app.cpython-311.pyc ADDED
Binary file (21.8 kB). View file
 
__pycache__/llm_client.cpython-311.pyc ADDED
Binary file (3.36 kB). View file
 
__pycache__/models_schemas.cpython-311.pyc ADDED
Binary file (4.63 kB). View file
 
__pycache__/parsers.cpython-311.pyc ADDED
Binary file (1.47 kB). View file
 
__pycache__/prompts.cpython-311.pyc ADDED
Binary file (2.88 kB). View file
 
app.py CHANGED
@@ -1,21 +1,109 @@
1
  import os
2
  import uvicorn
3
- from typing import List, Optional
 
 
 
 
 
4
  from fastapi import FastAPI, UploadFile, File, Form, HTTPException, Request
5
  from fastapi.middleware.cors import CORSMiddleware
6
  from fastapi.responses import JSONResponse, HTMLResponse, RedirectResponse
7
 
 
8
  from models_schemas import AnalyzeResponse, AnalyzeBatchRequest
9
  from llm_client import analyze_cv_with_llm
10
  from parsers import extract_text_from_pdf
11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  # -----------------------------------------------------------------------------
13
  # App & Middlewares
14
  # -----------------------------------------------------------------------------
15
  app = FastAPI(
16
  title="RecrAI API",
17
- version="1.0.0",
18
- description="API de triagem e análise de currículos com Groq LLM."
19
  )
20
 
21
  app.add_middleware(
@@ -51,21 +139,15 @@ def home():
51
  <h2>Endpoints</h2>
52
  <ul>
53
  <li><code>GET /health</code> – status básico</li>
54
- <li><code>GET /info</code> – informações não sensíveis do runtime</li>
55
- <li><code>POST /analyze_cv</code> análise de 1 currículo (multipart)</li>
 
 
 
56
  <li><code>POST /analyze_cv_batch</code> – análise em lote (JSON)</li>
57
  </ul>
58
  <p>Documentação: <a href="/docs">Swagger UI</a> | <a href="/redoc">ReDoc</a></p>
59
  </div>
60
-
61
- <div class="card">
62
- <h2>Exemplo rápido (cURL)</h2>
63
- <pre>
64
- curl -X POST https://&lt;seu-space&gt;.hf.space/analyze_cv ^
65
- -F "job=Desenvolvedor(a) Full Stack..." ^
66
- -F "cv_text=Fulano trabalhou com Python e React ..."
67
- </pre>
68
- </div>
69
  </body>
70
  </html>
71
  """
@@ -85,7 +167,7 @@ def info():
85
  """Informações não sensíveis do ambiente (sem expor segredos)."""
86
  return {
87
  "app": "RecrAI API",
88
- "version": "1.0.0",
89
  "model_id": os.getenv("GROQ_MODEL_ID", "deepseek-r1-distill-llama-70b"),
90
  "temperature": float(os.getenv("TEMPERATURE", "0.7")),
91
  "env": "spaces" if os.getenv("HF_SPACE_ID") else "local"
@@ -105,21 +187,93 @@ async def unhandled_exception_handler(_: Request, exc: Exception):
105
  return JSONResponse(status_code=500, content={"detail": "Erro interno do servidor."})
106
 
107
  # -----------------------------------------------------------------------------
108
- # Endpoints principais
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
  # -----------------------------------------------------------------------------
110
  @app.post("/analyze_cv", response_model=AnalyzeResponse, summary="Analisa um currículo (PDF ou texto)")
111
  async def analyze_cv_endpoint(
112
- job: str = Form(..., description="Descrição completa da vaga"),
 
113
  cv_text: Optional[str] = Form(None, description="Texto do currículo (alternativa a PDF)"),
114
  file: Optional[UploadFile] = File(None, description="Arquivo PDF do currículo")
115
  ):
116
  """
117
  Envie **cv_text** OU **file** (PDF).
118
  - Se PDF for enviado, o texto é extraído automaticamente.
 
119
  """
120
  if not cv_text and not file:
121
  raise HTTPException(status_code=400, detail="Envie 'cv_text' ou 'file' (PDF).")
122
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
  if file:
124
  if not file.filename.lower().endswith(".pdf"):
125
  raise HTTPException(status_code=415, detail="Apenas PDF é suportado no 'file'.")
@@ -129,19 +283,61 @@ async def analyze_cv_endpoint(
129
  if not cv_text or not cv_text.strip():
130
  raise HTTPException(status_code=422, detail="Não foi possível extrair texto do currículo.")
131
 
132
- result = analyze_cv_with_llm(cv_text=cv_text, job_details=job)
133
- return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
  @app.post("/analyze_cv_batch", response_model=List[AnalyzeResponse], summary="Analisa múltiplos currículos (JSON)")
136
  async def analyze_cv_batch_endpoint(payload: AnalyzeBatchRequest):
137
- """
138
- Recebe uma lista de itens, cada um com:
139
- - `job` (str)
140
- - `cv_text` (opcional)
141
- - `cv_pdf_b64` (opcional, base64 do PDF)
142
-
143
- Pelo menos um dos campos `cv_text` ou `cv_pdf_b64` deve existir.
144
- """
145
  results: List[AnalyzeResponse] = []
146
  for item in payload.items:
147
  if not item.cv_text and not item.cv_pdf_b64:
@@ -149,23 +345,34 @@ async def analyze_cv_batch_endpoint(payload: AnalyzeBatchRequest):
149
 
150
  cv_text = item.cv_text
151
  if not cv_text and item.cv_pdf_b64:
152
- import base64
153
  try:
154
  pdf_bytes = base64.b64decode(item.cv_pdf_b64)
 
155
  except Exception:
156
  raise HTTPException(status_code=422, detail="cv_pdf_b64 inválido (base64).")
157
- cv_text = extract_text_from_pdf(pdf_bytes)
158
 
159
  if not cv_text or not cv_text.strip():
160
  raise HTTPException(status_code=422, detail="Não foi possível extrair texto de um dos currículos.")
161
 
162
  res = analyze_cv_with_llm(cv_text=cv_text, job_details=item.job)
163
- results.append(res)
 
164
 
165
  return results
166
 
 
 
 
 
 
 
 
 
 
 
 
167
  # -----------------------------------------------------------------------------
168
- # Main (útil localmente; no HF Space o CMD do Dockerfile já executa uvicorn)
169
  # -----------------------------------------------------------------------------
170
  if __name__ == "__main__":
171
  uvicorn.run("app:app", host="0.0.0.0", port=int(os.getenv("PORT", "7860")))
 
1
  import os
2
  import uvicorn
3
+ import uuid
4
+ import json
5
+ import base64
6
+ from datetime import datetime
7
+ from typing import List, Optional, Dict, Any
8
+
9
  from fastapi import FastAPI, UploadFile, File, Form, HTTPException, Request
10
  from fastapi.middleware.cors import CORSMiddleware
11
  from fastapi.responses import JSONResponse, HTMLResponse, RedirectResponse
12
 
13
+ # Usa seus módulos existentes
14
  from models_schemas import AnalyzeResponse, AnalyzeBatchRequest
15
  from llm_client import analyze_cv_with_llm
16
  from parsers import extract_text_from_pdf
17
 
18
+ # -----------------------------------------------------------------------------
19
+ # Helpers de persistência (usa /data do Space) - sem arquivos extras
20
+ # -----------------------------------------------------------------------------
21
+ DATA_DIR = os.path.join(os.getcwd(), "data")
22
+ JOBS_PATH = os.path.join(DATA_DIR, "jobs.json")
23
+ CVS_PATH = os.path.join(DATA_DIR, "cvs.json")
24
+
25
+ def _ensure_data_dir():
26
+ os.makedirs(DATA_DIR, exist_ok=True)
27
+
28
+ def _read_json(path: str):
29
+ _ensure_data_dir()
30
+ if not os.path.exists(path):
31
+ return []
32
+ try:
33
+ with open(path, "r", encoding="utf-8") as f:
34
+ return json.load(f)
35
+ except Exception:
36
+ return []
37
+
38
+ def _write_json(path: str, data: Any):
39
+ _ensure_data_dir()
40
+ with open(path, "w", encoding="utf-8") as f:
41
+ json.dump(data, f, indent=2, ensure_ascii=False)
42
+
43
+ def _list_jobs() -> List[Dict[str, Any]]:
44
+ data = _read_json(JOBS_PATH)
45
+ return data if isinstance(data, list) else [data]
46
+
47
+ def _create_job(title: str, description: str, details: str, requirements: Optional[List[str]] = None) -> Dict[str, Any]:
48
+ jobs = _list_jobs()
49
+ rec = {
50
+ "id": str(uuid.uuid4()),
51
+ "title": title,
52
+ "description": description,
53
+ "details": details,
54
+ "requirements": requirements or [],
55
+ "created_at": datetime.utcnow().isoformat()
56
+ }
57
+ jobs.append(rec)
58
+ _write_json(JOBS_PATH, jobs)
59
+ return rec
60
+
61
+ def _get_job(job_id: str) -> Optional[Dict[str, Any]]:
62
+ for j in _list_jobs():
63
+ if j.get("id") == job_id:
64
+ return j
65
+ return None
66
+
67
+ def _list_cvs() -> List[Dict[str, Any]]:
68
+ data = _read_json(CVS_PATH)
69
+ return data if isinstance(data, list) else [data]
70
+
71
+ def _save_cv_result(result: Dict[str, Any], job: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
72
+ cvs = _list_cvs()
73
+ rec = {
74
+ "id": str(uuid.uuid4()),
75
+ "name": result.get("name") or "",
76
+ "area": result.get("area") or "",
77
+ "summary": result.get("summary") or "",
78
+ "skills": result.get("skills") or [],
79
+ "education": result.get("education") or "",
80
+ "interview_questions": result.get("interview_questions") or [],
81
+ "strengths": result.get("strengths") or [],
82
+ "areas_for_development": result.get("areas_for_development") or [],
83
+ "important_considerations": result.get("important_considerations") or [],
84
+ "final_recommendations": result.get("final_recommendations") or "",
85
+ "score": float(result.get("score") or 0.0),
86
+ "created_at": datetime.utcnow().isoformat(),
87
+ "job_id": job.get("id") if job else None,
88
+ "job_title": job.get("title") if job else None
89
+ }
90
+ cvs.append(rec)
91
+ _write_json(CVS_PATH, cvs)
92
+ return rec
93
+
94
+ def _get_cv(cv_id: str) -> Optional[Dict[str, Any]]:
95
+ for c in _list_cvs():
96
+ if c.get("id") == cv_id:
97
+ return c
98
+ return None
99
+
100
  # -----------------------------------------------------------------------------
101
  # App & Middlewares
102
  # -----------------------------------------------------------------------------
103
  app = FastAPI(
104
  title="RecrAI API",
105
+ version="1.2.0",
106
+ description="API de triagem e análise de currículos com Groq LLM (compatível com o front)."
107
  )
108
 
109
  app.add_middleware(
 
139
  <h2>Endpoints</h2>
140
  <ul>
141
  <li><code>GET /health</code> – status básico</li>
142
+ <li><code>GET /info</code> – informações do runtime</li>
143
+ <li><code>GET /jobs</code> &bull; <code>POST /jobs</code></li>
144
+ <li><code>GET /cvs</code> &bull; <code>GET /cvs/{{id}}</code></li>
145
+ <li><code>POST /analyze_cv</code> – 1 currículo (multipart)</li>
146
+ <li><code>POST /analyze_cv_batch_multipart</code> – vários PDFs (multipart: files[])</li>
147
  <li><code>POST /analyze_cv_batch</code> – análise em lote (JSON)</li>
148
  </ul>
149
  <p>Documentação: <a href="/docs">Swagger UI</a> | <a href="/redoc">ReDoc</a></p>
150
  </div>
 
 
 
 
 
 
 
 
 
151
  </body>
152
  </html>
153
  """
 
167
  """Informações não sensíveis do ambiente (sem expor segredos)."""
168
  return {
169
  "app": "RecrAI API",
170
+ "version": "1.2.0",
171
  "model_id": os.getenv("GROQ_MODEL_ID", "deepseek-r1-distill-llama-70b"),
172
  "temperature": float(os.getenv("TEMPERATURE", "0.7")),
173
  "env": "spaces" if os.getenv("HF_SPACE_ID") else "local"
 
187
  return JSONResponse(status_code=500, content={"detail": "Erro interno do servidor."})
188
 
189
  # -----------------------------------------------------------------------------
190
+ # Jobs (compatível com teu front)
191
+ # -----------------------------------------------------------------------------
192
+ @app.get("/jobs", summary="Lista vagas cadastradas")
193
+ def list_jobs():
194
+ return _list_jobs()
195
+
196
+ @app.post("/jobs", summary="Cria uma vaga")
197
+ def create_job(payload: Dict[str, Any]):
198
+ title = (payload.get("title") or "").strip()
199
+ description = (payload.get("description") or "").strip()
200
+ details = (payload.get("details") or "").strip()
201
+ requirements = payload.get("requirements") or []
202
+ if not title or not description or not details:
203
+ raise HTTPException(status_code=400, detail="Campos obrigatórios: title, description, details.")
204
+ if isinstance(requirements, str):
205
+ requirements = [r.strip() for r in requirements.split(",") if r.strip()]
206
+ rec = _create_job(title, description, details, requirements)
207
+ return {"message": "Vaga criada com sucesso.", "job": rec}
208
+
209
+ # -----------------------------------------------------------------------------
210
+ # CVs (listagem/detalhe) para a seção de resultados do front
211
+ # -----------------------------------------------------------------------------
212
+ @app.get("/cvs", summary="Lista currículos analisados")
213
+ def list_cvs():
214
+ cvs = _list_cvs()
215
+ # reduz para listagem (nome, área, resumo, score…)
216
+ out = []
217
+ for c in cvs:
218
+ out.append({
219
+ "id": c.get("id"),
220
+ "name": c.get("name", ""),
221
+ "area": c.get("area", ""),
222
+ "summary": c.get("summary", ""),
223
+ "score": c.get("score", 0.0),
224
+ "created_at": c.get("created_at"),
225
+ "job_id": c.get("job_id"),
226
+ "job_title": c.get("job_title")
227
+ })
228
+ return out
229
+
230
+ @app.get("/cvs/{cv_id}", summary="Detalhe do currículo analisado", response_model=AnalyzeResponse)
231
+ def get_cv(cv_id: str):
232
+ c = _get_cv(cv_id)
233
+ if not c:
234
+ raise HTTPException(status_code=404, detail="Currículo não encontrado.")
235
+ # retorna o registro completo como AnalyzeResponse (os campos batem)
236
+ return AnalyzeResponse(**c)
237
+
238
+ # -----------------------------------------------------------------------------
239
+ # Analyze (single) — aceita job_id OU job texto
240
  # -----------------------------------------------------------------------------
241
  @app.post("/analyze_cv", response_model=AnalyzeResponse, summary="Analisa um currículo (PDF ou texto)")
242
  async def analyze_cv_endpoint(
243
+ job: Optional[str] = Form(None, description="Descrição completa da vaga (alternativa a job_id)"),
244
+ job_id: Optional[str] = Form(None, description="ID de uma vaga previamente criada"),
245
  cv_text: Optional[str] = Form(None, description="Texto do currículo (alternativa a PDF)"),
246
  file: Optional[UploadFile] = File(None, description="Arquivo PDF do currículo")
247
  ):
248
  """
249
  Envie **cv_text** OU **file** (PDF).
250
  - Se PDF for enviado, o texto é extraído automaticamente.
251
+ - Informe **job_id** ou **job** (texto). Se nenhum for informado, a avaliação será genérica.
252
  """
253
  if not cv_text and not file:
254
  raise HTTPException(status_code=400, detail="Envie 'cv_text' ou 'file' (PDF).")
255
 
256
+ # Resolve job details
257
+ job_rec = None
258
+ if job_id:
259
+ job_rec = _get_job(job_id)
260
+ if not job_rec:
261
+ raise HTTPException(status_code=404, detail="job_id não encontrado.")
262
+ job_details = f"""**Vaga: {job_rec.get('title')}**
263
+
264
+ **Descrição:**
265
+ {job_rec.get('description')}
266
+
267
+ **Detalhes:**
268
+ {job_rec.get('details')}
269
+
270
+ **Requisitos:**
271
+ {", ".join(job_rec.get("requirements") or [])}
272
+ """
273
+ else:
274
+ job_details = job or "Vaga não especificada."
275
+
276
+ # Extrai texto do PDF se necessário
277
  if file:
278
  if not file.filename.lower().endswith(".pdf"):
279
  raise HTTPException(status_code=415, detail="Apenas PDF é suportado no 'file'.")
 
283
  if not cv_text or not cv_text.strip():
284
  raise HTTPException(status_code=422, detail="Não foi possível extrair texto do currículo.")
285
 
286
+ # Chama LLM
287
+ result = analyze_cv_with_llm(cv_text=cv_text, job_details=job_details)
288
+ # Persiste e devolve
289
+ saved = _save_cv_result(result.dict(), job=job_rec)
290
+ return AnalyzeResponse(**saved)
291
+
292
+ # -----------------------------------------------------------------------------
293
+ # Analyze (batch) — multipart: files[] (compatível com teu front)
294
+ # -----------------------------------------------------------------------------
295
+ @app.post("/analyze_cv_batch_multipart", summary="Analisa vários PDFs via multipart (files[])")
296
+ async def analyze_cv_batch_multipart(
297
+ job_id: Optional[str] = Form(None),
298
+ files: List[UploadFile] = File(...)
299
+ ):
300
+ job_rec = _get_job(job_id) if job_id else None
301
+ if job_id and not job_rec:
302
+ raise HTTPException(status_code=404, detail="job_id não encontrado.")
303
+
304
+ if not files:
305
+ raise HTTPException(status_code=400, detail="Envie 'files[]' com pelo menos 1 PDF.")
306
+
307
+ if job_rec:
308
+ job_details = f"""**Vaga: {job_rec.get('title')}**
309
 
310
+ **Descrição:**
311
+ {job_rec.get('description')}
312
+
313
+ **Detalhes:**
314
+ {job_rec.get('details')}
315
+
316
+ **Requisitos:**
317
+ {", ".join(job_rec.get("requirements") or [])}
318
+ """
319
+ else:
320
+ job_details = "Vaga não especificada."
321
+
322
+ results = []
323
+ for f in files:
324
+ if not f.filename.lower().endswith(".pdf"):
325
+ raise HTTPException(status_code=415, detail=f"Apenas PDF é suportado. Arquivo inválido: {f.filename}")
326
+ pdf_bytes = await f.read()
327
+ cv_text = extract_text_from_pdf(pdf_bytes)
328
+ if not cv_text.strip():
329
+ continue
330
+ res = analyze_cv_with_llm(cv_text=cv_text, job_details=job_details)
331
+ saved = _save_cv_result(res.dict(), job=job_rec)
332
+ results.append(saved)
333
+
334
+ return {"message": f"Analisados {len(results)} currículo(s).", "results": results}
335
+
336
+ # -----------------------------------------------------------------------------
337
+ # Analyze (batch) — JSON (já existia; mantido para compatibilidade)
338
+ # -----------------------------------------------------------------------------
339
  @app.post("/analyze_cv_batch", response_model=List[AnalyzeResponse], summary="Analisa múltiplos currículos (JSON)")
340
  async def analyze_cv_batch_endpoint(payload: AnalyzeBatchRequest):
 
 
 
 
 
 
 
 
341
  results: List[AnalyzeResponse] = []
342
  for item in payload.items:
343
  if not item.cv_text and not item.cv_pdf_b64:
 
345
 
346
  cv_text = item.cv_text
347
  if not cv_text and item.cv_pdf_b64:
 
348
  try:
349
  pdf_bytes = base64.b64decode(item.cv_pdf_b64)
350
+ cv_text = extract_text_from_pdf(pdf_bytes)
351
  except Exception:
352
  raise HTTPException(status_code=422, detail="cv_pdf_b64 inválido (base64).")
 
353
 
354
  if not cv_text or not cv_text.strip():
355
  raise HTTPException(status_code=422, detail="Não foi possível extrair texto de um dos currículos.")
356
 
357
  res = analyze_cv_with_llm(cv_text=cv_text, job_details=item.job)
358
+ saved = _save_cv_result(res.dict(), job=None)
359
+ results.append(AnalyzeResponse(**saved))
360
 
361
  return results
362
 
363
+ # --- DEBUG: listar rotas no startup ---
364
+ @app.on_event("startup")
365
+ async def _print_routes():
366
+ print("\n=== ROTAS CARREGADAS ===")
367
+ for r in app.router.routes:
368
+ methods = getattr(r, "methods", [])
369
+ path = getattr(r, "path", "")
370
+ if methods and path:
371
+ print(f"{sorted(list(methods))} {path}")
372
+ print("========================\n")
373
+
374
  # -----------------------------------------------------------------------------
375
+ # Main (útil localmente; no HF o CMD do Dockerfile já executa uvicorn)
376
  # -----------------------------------------------------------------------------
377
  if __name__ == "__main__":
378
  uvicorn.run("app:app", host="0.0.0.0", port=int(os.getenv("PORT", "7860")))
models_schemas.py CHANGED
@@ -1,7 +1,11 @@
1
  from typing import List, Optional
2
  from pydantic import BaseModel, Field, validator
 
 
 
3
 
4
  class AnalyzeResponse(BaseModel):
 
5
  name: Optional[str] = ""
6
  area: Optional[str] = ""
7
  summary: Optional[str] = ""
@@ -13,6 +17,9 @@ class AnalyzeResponse(BaseModel):
13
  important_considerations: List[str] = Field(default_factory=list)
14
  final_recommendations: Optional[str] = ""
15
  score: float = 0.0
 
 
 
16
 
17
  @validator("score", pre=True, always=True)
18
  def coerce_score(cls, v):
@@ -28,3 +35,38 @@ class AnalyzeBatchItem(BaseModel):
28
 
29
  class AnalyzeBatchRequest(BaseModel):
30
  items: List[AnalyzeBatchItem]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from typing import List, Optional
2
  from pydantic import BaseModel, Field, validator
3
+ from datetime import datetime
4
+
5
+ # ----------- CV / Analyze -----------
6
 
7
  class AnalyzeResponse(BaseModel):
8
+ id: Optional[str] = None # id salvo na base local
9
  name: Optional[str] = ""
10
  area: Optional[str] = ""
11
  summary: Optional[str] = ""
 
17
  important_considerations: List[str] = Field(default_factory=list)
18
  final_recommendations: Optional[str] = ""
19
  score: float = 0.0
20
+ created_at: Optional[str] = None
21
+ job_id: Optional[str] = None
22
+ job_title: Optional[str] = None
23
 
24
  @validator("score", pre=True, always=True)
25
  def coerce_score(cls, v):
 
35
 
36
  class AnalyzeBatchRequest(BaseModel):
37
  items: List[AnalyzeBatchItem]
38
+
39
+ # ----------- Jobs -----------
40
+
41
+ class JobCreateRequest(BaseModel):
42
+ title: str
43
+ description: str
44
+ details: str
45
+ requirements: List[str] = Field(default_factory=list)
46
+
47
+ class Job(BaseModel):
48
+ id: str
49
+ title: str
50
+ description: str
51
+ details: str
52
+ requirements: List[str] = Field(default_factory=list)
53
+ created_at: str
54
+
55
+ # ----------- CV records (listagem) -----------
56
+
57
+ class CvRecord(BaseModel):
58
+ id: str
59
+ name: Optional[str] = ""
60
+ area: Optional[str] = ""
61
+ summary: Optional[str] = ""
62
+ score: float = 0.0
63
+ created_at: str
64
+ job_id: Optional[str] = None
65
+ job_title: Optional[str] = None
66
+
67
+ @validator("score", pre=True, always=True)
68
+ def cv_score_coerce(cls, v):
69
+ try:
70
+ return float(v)
71
+ except Exception:
72
+ return 0.0
storage.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import uuid
4
+ from typing import List, Dict, Any, Optional
5
+ from datetime import datetime
6
+
7
+ DATA_DIR = os.path.join(os.getcwd(), "data")
8
+ JOBS_PATH = os.path.join(DATA_DIR, "jobs.json")
9
+ CVS_PATH = os.path.join(DATA_DIR, "cvs.json")
10
+
11
+ def _ensure_data_dir():
12
+ os.makedirs(DATA_DIR, exist_ok=True)
13
+
14
+ def _read_json(path: str) -> Any:
15
+ _ensure_data_dir()
16
+ if not os.path.exists(path):
17
+ # inicializa vazios
18
+ return [] if path.endswith(".json") else None
19
+ with open(path, "r", encoding="utf-8") as f:
20
+ try:
21
+ return json.load(f)
22
+ except Exception:
23
+ return []
24
+
25
+ def _write_json(path: str, data: Any):
26
+ _ensure_data_dir()
27
+ with open(path, "w", encoding="utf-8") as f:
28
+ json.dump(data, f, indent=2, ensure_ascii=False)
29
+
30
+ # ---------------- Jobs ----------------
31
+
32
+ def list_jobs() -> List[Dict[str, Any]]:
33
+ data = _read_json(JOBS_PATH)
34
+ if isinstance(data, dict):
35
+ data = [data]
36
+ return data or []
37
+
38
+ def create_job(title: str, description: str, details: str, requirements: Optional[list]=None) -> Dict[str, Any]:
39
+ jobs = list_jobs()
40
+ job_id = str(uuid.uuid4())
41
+ rec = {
42
+ "id": job_id,
43
+ "title": title,
44
+ "description": description,
45
+ "details": details,
46
+ "requirements": requirements or [],
47
+ "created_at": datetime.utcnow().isoformat()
48
+ }
49
+ jobs.append(rec)
50
+ _write_json(JOBS_PATH, jobs)
51
+ return rec
52
+
53
+ def get_job(job_id: str) -> Optional[Dict[str, Any]]:
54
+ for j in list_jobs():
55
+ if j.get("id") == job_id:
56
+ return j
57
+ return None
58
+
59
+ # ---------------- CVs ----------------
60
+
61
+ def list_cvs() -> List[Dict[str, Any]]:
62
+ data = _read_json(CVS_PATH)
63
+ if isinstance(data, dict):
64
+ data = [data]
65
+ return data or []
66
+
67
+ def save_cv_result(result: Dict[str, Any], job: Optional[Dict[str, Any]]=None) -> Dict[str, Any]:
68
+ cvs = list_cvs()
69
+ rec_id = str(uuid.uuid4())
70
+
71
+ rec = {
72
+ "id": rec_id,
73
+ "name": result.get("name") or "",
74
+ "area": result.get("area") or "",
75
+ "summary": result.get("summary") or "",
76
+ "skills": result.get("skills") or [],
77
+ "education": result.get("education") or "",
78
+ "interview_questions": result.get("interview_questions") or [],
79
+ "strengths": result.get("strengths") or [],
80
+ "areas_for_development": result.get("areas_for_development") or [],
81
+ "important_considerations": result.get("important_considerations") or [],
82
+ "final_recommendations": result.get("final_recommendations") or "",
83
+ "score": result.get("score") or 0.0,
84
+ "created_at": datetime.utcnow().isoformat(),
85
+ "job_id": job.get("id") if job else None,
86
+ "job_title": job.get("title") if job else None
87
+ }
88
+
89
+ cvs.append(rec)
90
+ _write_json(CVS_PATH, cvs)
91
+ return rec
92
+
93
+ def get_cv(cv_id: str) -> Optional[Dict[str, Any]]:
94
+ for c in list_cvs():
95
+ if c.get("id") == cv_id:
96
+ return c
97
+ return None