Spaces:
Sleeping
Sleeping
Update main.py
Browse files
main.py
CHANGED
|
@@ -45,6 +45,33 @@ bucket = storage.bucket()
|
|
| 45 |
|
| 46 |
# --- Gemini Client ---
|
| 47 |
client = genai.Client(api_key=os.getenv("Gemini"))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
|
| 49 |
# --- FAISS Setup ---
|
| 50 |
INDEX_PATH = "vector.index"
|
|
@@ -427,6 +454,91 @@ def financial_statement():
|
|
| 427 |
|
| 428 |
return jsonify({"report": report, "cached": False}), 200
|
| 429 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 430 |
# --------- Run the App ---------
|
| 431 |
if __name__ == "__main__":
|
| 432 |
app.run(host="0.0.0.0", port=7860, debug=True)
|
|
|
|
| 45 |
|
| 46 |
# --- Gemini Client ---
|
| 47 |
client = genai.Client(api_key=os.getenv("Gemini"))
|
| 48 |
+
model_name = "gemini-2.0-flash-thinking-exp"
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class GenericEvaluator:
|
| 52 |
+
def generate_prompt(self, participant_info: dict) -> str:
|
| 53 |
+
prompt = f"""
|
| 54 |
+
You are an expert evaluator for a small business incubator in South Africa, reviewing candidate applications. Use your own expertise, critical thinking, and judgment to assess the following applicant. There are no predefined criteria or weights — your evaluation should be holistic and based on the information provided.
|
| 55 |
+
|
| 56 |
+
Participant Info:
|
| 57 |
+
{json.dumps(participant_info, indent=2)}
|
| 58 |
+
|
| 59 |
+
Based on your assessment, provide:
|
| 60 |
+
1. "AI Recommendation": either "Accept" or "Reject"
|
| 61 |
+
2. "AI Score": a score out of 100 reflecting overall business quality or readiness
|
| 62 |
+
3. "Justification": a brief explanation for your decision
|
| 63 |
+
4. "Recommended Interventions": A description of the interventions or what help the business should get from the incubator to take them to the next level"
|
| 64 |
+
Return your output strictly as a JSON dictionary with these keys: "AI Recommendation", "AI Score" and "Justification", "Interventions".
|
| 65 |
+
"""
|
| 66 |
+
return prompt
|
| 67 |
+
|
| 68 |
+
def parse_gemini_response(self, response_text: str) -> dict:
|
| 69 |
+
try:
|
| 70 |
+
return json.loads(response_text)
|
| 71 |
+
except Exception as e:
|
| 72 |
+
return {"error": str(e), "raw_response": response_text}
|
| 73 |
+
|
| 74 |
+
|
| 75 |
|
| 76 |
# --- FAISS Setup ---
|
| 77 |
INDEX_PATH = "vector.index"
|
|
|
|
| 454 |
|
| 455 |
return jsonify({"report": report, "cached": False}), 200
|
| 456 |
|
| 457 |
+
# AI Screening endpoint
|
| 458 |
+
@app.route('/api/evaluate', methods=['POST'])
|
| 459 |
+
def evaluate_participant():
|
| 460 |
+
try:
|
| 461 |
+
data = request.json
|
| 462 |
+
participant_id = data.get("participantId")
|
| 463 |
+
participant_info = data.get("participantInfo", {})
|
| 464 |
+
|
| 465 |
+
evaluator = GenericEvaluator()
|
| 466 |
+
prompt = evaluator.generate_prompt(participant_info)
|
| 467 |
+
|
| 468 |
+
response = client.models.generate_content(
|
| 469 |
+
model=model_name,
|
| 470 |
+
contents=prompt
|
| 471 |
+
)
|
| 472 |
+
|
| 473 |
+
evaluation = evaluator.parse_gemini_response(response.text)
|
| 474 |
+
|
| 475 |
+
return jsonify({
|
| 476 |
+
"status": "success",
|
| 477 |
+
"participantId": participant_id,
|
| 478 |
+
"evaluation": evaluation
|
| 479 |
+
})
|
| 480 |
+
|
| 481 |
+
except Exception as e:
|
| 482 |
+
return jsonify({
|
| 483 |
+
"status": "error",
|
| 484 |
+
"message": str(e)
|
| 485 |
+
}), 500
|
| 486 |
+
|
| 487 |
+
|
| 488 |
+
@app.route('/api/batch-evaluate', methods=['POST'])
|
| 489 |
+
def batch_evaluate():
|
| 490 |
+
try:
|
| 491 |
+
participants = request.json.get('participants', [])
|
| 492 |
+
results = []
|
| 493 |
+
|
| 494 |
+
evaluator = GenericEvaluator()
|
| 495 |
+
|
| 496 |
+
for item in participants:
|
| 497 |
+
participant_id = item.get("participantId")
|
| 498 |
+
participant_info = item.get("participantInfo", {})
|
| 499 |
+
prompt = evaluator.generate_prompt(participant_info)
|
| 500 |
+
|
| 501 |
+
response = client.models.generate_content(
|
| 502 |
+
model=model_name,
|
| 503 |
+
contents=prompt
|
| 504 |
+
)
|
| 505 |
+
|
| 506 |
+
evaluation = evaluator.parse_gemini_response(response.text)
|
| 507 |
+
|
| 508 |
+
results.append({
|
| 509 |
+
"participantId": participant_id,
|
| 510 |
+
"evaluation": evaluation
|
| 511 |
+
})
|
| 512 |
+
|
| 513 |
+
return jsonify({
|
| 514 |
+
"status": "success",
|
| 515 |
+
"evaluations": results
|
| 516 |
+
})
|
| 517 |
+
|
| 518 |
+
except Exception as e:
|
| 519 |
+
return jsonify({
|
| 520 |
+
"status": "error",
|
| 521 |
+
"message": str(e)
|
| 522 |
+
}), 500
|
| 523 |
+
|
| 524 |
+
|
| 525 |
+
@app.route('/api/shortlist', methods=['GET'])
|
| 526 |
+
def get_shortlist():
|
| 527 |
+
try:
|
| 528 |
+
# Placeholder logic
|
| 529 |
+
return jsonify({
|
| 530 |
+
"status": "success",
|
| 531 |
+
"shortlist": []
|
| 532 |
+
})
|
| 533 |
+
except Exception as e:
|
| 534 |
+
return jsonify({
|
| 535 |
+
"status": "error",
|
| 536 |
+
"message": str(e)
|
| 537 |
+
}), 500
|
| 538 |
+
|
| 539 |
+
|
| 540 |
+
|
| 541 |
+
|
| 542 |
# --------- Run the App ---------
|
| 543 |
if __name__ == "__main__":
|
| 544 |
app.run(host="0.0.0.0", port=7860, debug=True)
|