Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -6,7 +6,8 @@ import json
|
|
| 6 |
import gradio as gr
|
| 7 |
import atexit
|
| 8 |
import datetime
|
| 9 |
-
import re
|
|
|
|
| 10 |
|
| 11 |
# --- New libraries for file processing ---
|
| 12 |
import pypdf
|
|
@@ -20,8 +21,14 @@ BASE_URL = "https://api.deepinfra.com/v1/openai"
|
|
| 20 |
WEAVIATE_URL = "maf5cvz1saelnti3k34a.c0.europe-west3.gcp.weaviate.cloud"
|
| 21 |
WEAVIATE_API_KEY = "cHFZK1JOaEg3K2p6K3JnQl9ZM1FEQ2NhMVU1SnBRVUpYWCtCVHlVU0J2Qmx1Mk9SaktpT09UQTNiU1hRPV92MjAw"
|
| 22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
# --- Helper function to create schemas ---
|
| 24 |
def create_application_schema(client: weaviate.WeaviateClient):
|
|
|
|
| 25 |
collection_name = "Application"
|
| 26 |
if not client.collections.exists(collection_name):
|
| 27 |
print(f"Creating collection: {collection_name}")
|
|
@@ -29,16 +36,20 @@ def create_application_schema(client: weaviate.WeaviateClient):
|
|
| 29 |
name=collection_name,
|
| 30 |
properties=[
|
| 31 |
weaviate.classes.config.Property(name="job_id", data_type=weaviate.classes.config.DataType.TEXT),
|
|
|
|
| 32 |
weaviate.classes.config.Property(name="cv_content", data_type=weaviate.classes.config.DataType.TEXT),
|
| 33 |
weaviate.classes.config.Property(name="cover_letter_content", data_type=weaviate.classes.config.DataType.TEXT),
|
| 34 |
weaviate.classes.config.Property(name="submission_date", data_type=weaviate.classes.config.DataType.DATE),
|
|
|
|
| 35 |
]
|
| 36 |
)
|
| 37 |
print(f"✅ Collection '{collection_name}' created successfully.")
|
| 38 |
else:
|
| 39 |
print(f"✅ Collection '{collection_name}' already exists.")
|
| 40 |
|
|
|
|
| 41 |
def create_project_schema(client: weaviate.WeaviateClient):
|
|
|
|
| 42 |
collection_name = "Project"
|
| 43 |
if not client.collections.exists(collection_name):
|
| 44 |
print(f"Creating collection: {collection_name}")
|
|
@@ -49,6 +60,7 @@ def create_project_schema(client: weaviate.WeaviateClient):
|
|
| 49 |
weaviate.classes.config.Property(name="description", data_type=weaviate.classes.config.DataType.TEXT),
|
| 50 |
weaviate.classes.config.Property(name="required_skills", data_type=weaviate.classes.config.DataType.TEXT_ARRAY),
|
| 51 |
weaviate.classes.config.Property(name="team_members", data_type=weaviate.classes.config.DataType.TEXT_ARRAY),
|
|
|
|
| 52 |
weaviate.classes.config.Property(name="max_team_size", data_type=weaviate.classes.config.DataType.NUMBER),
|
| 53 |
weaviate.classes.config.Property(name="creator_id", data_type=weaviate.classes.config.DataType.TEXT),
|
| 54 |
weaviate.classes.config.Property(name="is_recruiting", data_type=weaviate.classes.config.DataType.BOOL),
|
|
@@ -58,6 +70,22 @@ def create_project_schema(client: weaviate.WeaviateClient):
|
|
| 58 |
else:
|
| 59 |
print(f"✅ Collection '{collection_name}' already exists.")
|
| 60 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
# --- 2. CHATBOT CLASS ---
|
| 62 |
class WeaviateChatbot:
|
| 63 |
def __init__(self, weaviate_url, weaviate_api_key, llm_api_key, llm_base_url):
|
|
@@ -70,22 +98,26 @@ class WeaviateChatbot:
|
|
| 70 |
self.weaviate_client.connect()
|
| 71 |
print("✅ Successfully connected to Weaviate.")
|
| 72 |
|
| 73 |
-
# Create schemas on startup
|
| 74 |
create_application_schema(self.weaviate_client)
|
| 75 |
create_project_schema(self.weaviate_client)
|
|
|
|
| 76 |
|
| 77 |
self.llm_client = OpenAI(api_key=llm_api_key, base_url=llm_base_url)
|
| 78 |
print("✅ Successfully connected to LLM client (DeepInfra).")
|
| 79 |
|
| 80 |
-
self.collection_names = ["Job", "Opportunities", "Project"]
|
| 81 |
-
|
|
|
|
| 82 |
def _embed_text(self, text: str) -> list[float]:
|
| 83 |
resp = self.llm_client.embeddings.create(model=EMBEDDING_MODEL_NAME, input=text, encoding_format="float")
|
| 84 |
return resp.data[0].embedding
|
| 85 |
|
| 86 |
-
def _search_database(self, query_vector: list[float], limit: int = 5) -> str:
|
|
|
|
| 87 |
all_results = []
|
| 88 |
-
|
|
|
|
|
|
|
| 89 |
try:
|
| 90 |
collection = self.weaviate_client.collections.get(name)
|
| 91 |
response = collection.query.near_vector(near_vector=query_vector, limit=limit)
|
|
@@ -94,24 +126,20 @@ class WeaviateChatbot:
|
|
| 94 |
except Exception as e:
|
| 95 |
print(f"Could not query collection '{name}'. Error: {e}")
|
| 96 |
return "\n---\n".join(all_results) if all_results else "No relevant information found in the database."
|
| 97 |
-
|
| 98 |
def _generate_response(self, query: str, context: str) -> str:
|
| 99 |
prompt = f"""
|
| 100 |
-
You are EduNatives Assistant
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
* If the user says they want to apply for a job (e.g., "I want to apply for job_021"), you MUST respond ONLY with the exact phrase:
|
| 109 |
-
`STARTING_APPLICATION_PROCESS:job_021`
|
| 110 |
-
* If the user wants to create a project (e.g., "create a new project"), respond ONLY with the exact phrase:
|
| 111 |
-
`STARTING_PROJECT_CREATION`.
|
| 112 |
--- CONTEXT FROM DATABASE START ---
|
| 113 |
{context}
|
| 114 |
-
--- CONTEXT
|
| 115 |
|
| 116 |
User Question: {query}
|
| 117 |
|
|
@@ -120,22 +148,118 @@ Answer:
|
|
| 120 |
response = self.llm_client.chat.completions.create(model=MODEL_NAME, messages=[{"role": "user", "content": prompt}], max_tokens=4096)
|
| 121 |
return response.choices[0].message.content.strip()
|
| 122 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 123 |
def ask(self, query: str):
|
|
|
|
| 124 |
print(f"\nProcessing query: '{query}'")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 125 |
query_vector = self._embed_text(query)
|
| 126 |
context = self._search_database(query_vector)
|
| 127 |
-
|
| 128 |
-
return answer
|
| 129 |
|
| 130 |
-
def save_application(self, application_data: dict):
|
|
|
|
| 131 |
print("Saving application to Weaviate...")
|
| 132 |
try:
|
| 133 |
applications = self.weaviate_client.collections.get("Application")
|
| 134 |
app_uuid = applications.data.insert({
|
| 135 |
"job_id": application_data.get("job_id"),
|
|
|
|
| 136 |
"cv_content": application_data.get("cv_content"),
|
| 137 |
"cover_letter_content": application_data.get("cover_letter_content"),
|
| 138 |
-
"submission_date": datetime.datetime.now(datetime.timezone.utc)
|
|
|
|
| 139 |
})
|
| 140 |
print(f"✅ Application saved with UUID: {app_uuid}")
|
| 141 |
return True
|
|
@@ -147,8 +271,9 @@ Answer:
|
|
| 147 |
if self.weaviate_client.is_connected():
|
| 148 |
self.weaviate_client.close()
|
| 149 |
print("\nWeaviate connection closed.")
|
| 150 |
-
|
| 151 |
def _get_job_details(self, job_id: str) -> dict:
|
|
|
|
| 152 |
try:
|
| 153 |
jobs = self.weaviate_client.collections.get("Job")
|
| 154 |
response = jobs.query.fetch_objects(
|
|
@@ -162,64 +287,48 @@ Answer:
|
|
| 162 |
return None
|
| 163 |
|
| 164 |
def generate_cover_letter(self, cv_content: str, job_id: str) -> str:
|
|
|
|
| 165 |
print(f"Generating Cover Letter for job: {job_id}")
|
| 166 |
job_details = self._get_job_details(job_id)
|
| 167 |
|
| 168 |
if not job_details:
|
| 169 |
-
# Fallback prompt: Generate a generic letter if job details are not found
|
| 170 |
print(f"⚠️ Job details for '{job_id}' not found. Generating a generic cover letter based on CV.")
|
| 171 |
prompt = f"""
|
| 172 |
You are an expert career coach. A user has provided their CV but the specific job details for job '{job_id}' could not be found.
|
| 173 |
-
|
| 174 |
**Goal:** Write a strong, general-purpose cover letter based ONLY on the user's CV.
|
| 175 |
-
|
| 176 |
**Instructions:**
|
| 177 |
-
1.
|
| 178 |
-
2.
|
| 179 |
-
3.
|
| 180 |
-
|
| 181 |
-
- Maintain a professional and enthusiastic tone.
|
| 182 |
-
- Highlight 2-3 key skills or projects from the CV.
|
| 183 |
-
- Conclude by expressing a strong interest in discussing their qualifications further.
|
| 184 |
-
- **Important:** Add a note at the end: "[This is a general cover letter as the specific job details for '{job_id}' were not found. For a more targeted letter, please ensure the job exists in the database.]"
|
| 185 |
-
|
| 186 |
--- USER CV CONTENT START ---
|
| 187 |
{cv_content}
|
| 188 |
--- USER CV CONTENT END ---
|
| 189 |
-
|
| 190 |
Now, write the general-purpose cover letter.
|
| 191 |
"""
|
| 192 |
else:
|
| 193 |
-
# Original, preferred prompt: Use specific job details to write a targeted letter
|
| 194 |
prompt = f"""
|
| 195 |
-
You are an expert career coach specializing in crafting impactful cover letters.
|
| 196 |
-
|
| 197 |
-
**Goal:** Write a professional, personalized, and enthusiastic cover letter.
|
| 198 |
-
|
| 199 |
**Instructions:**
|
| 200 |
-
1.
|
| 201 |
-
2.
|
| 202 |
-
3.
|
| 203 |
-
4.
|
| 204 |
-
- Start with "Dear Hiring Manager,".
|
| 205 |
-
- Maintain a professional and enthusiastic tone.
|
| 206 |
-
- Structure the letter with a clear introduction, body (where you make the connections), and conclusion.
|
| 207 |
-
- Do not invent skills or experiences the user does not have. Base everything strictly on the provided texts.
|
| 208 |
-
|
| 209 |
--- JOB DESCRIPTION START ---
|
| 210 |
{json.dumps(job_details, indent=2)}
|
| 211 |
--- JOB DESCRIPTION END ---
|
| 212 |
-
|
| 213 |
--- USER CV CONTENT START ---
|
| 214 |
{cv_content}
|
| 215 |
--- USER CV CONTENT END ---
|
| 216 |
-
|
| 217 |
-
Now, write the cover letter that perfectly bridges the candidate's CV with the job description.
|
| 218 |
"""
|
| 219 |
response = self.llm_client.chat.completions.create(model=MODEL_NAME, messages=[{"role": "user", "content": prompt}], max_tokens=2048)
|
| 220 |
return response.choices[0].message.content.strip()
|
| 221 |
|
| 222 |
-
|
|
|
|
|
|
|
| 223 |
print("Saving new project to Weaviate...")
|
| 224 |
try:
|
| 225 |
projects = self.weaviate_client.collections.get("Project")
|
|
@@ -228,18 +337,203 @@ Now, write the cover letter that perfectly bridges the candidate's CV with the j
|
|
| 228 |
"description": project_data.get("description"),
|
| 229 |
"required_skills": project_data.get("required_skills"),
|
| 230 |
"max_team_size": project_data.get("max_team_size"),
|
| 231 |
-
"creator_id":
|
| 232 |
"is_recruiting": True,
|
| 233 |
-
"team_members": []
|
|
|
|
| 234 |
})
|
| 235 |
print(f"✅ Project saved with UUID: {project_uuid}")
|
| 236 |
-
return True
|
| 237 |
except Exception as e:
|
| 238 |
print(f"❌ Failed to save project: {e}")
|
| 239 |
-
return False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 240 |
|
| 241 |
# --- Helper to extract text from uploaded files ---
|
| 242 |
def _extract_text_from_file(file_path):
|
|
|
|
| 243 |
print(f"Extracting text from: {file_path}")
|
| 244 |
if file_path.endswith('.pdf'):
|
| 245 |
try:
|
|
@@ -270,67 +564,129 @@ atexit.register(chatbot_instance.close_connections)
|
|
| 270 |
def chat_interface_func(message: str, history: list, app_state: dict, file_obj: object):
|
| 271 |
history = history or []
|
| 272 |
current_mode = app_state.get("mode", "GENERAL")
|
| 273 |
-
|
| 274 |
-
# After any interaction, the examples will be hidden
|
| 275 |
hide_examples = gr.update(visible=False)
|
| 276 |
|
| 277 |
# --- Part 1: Handle File Uploads ---
|
| 278 |
if file_obj is not None:
|
| 279 |
-
|
| 280 |
-
text = _extract_text_from_file(file_path)
|
| 281 |
-
|
| 282 |
-
if current_mode == "APPLYING_CV":
|
| 283 |
-
app_state["cv_content"] = text
|
| 284 |
-
bot_message = (f"📄 CV '{os.path.basename(file_path)}' uploaded. "
|
| 285 |
-
f"Would you like me to help you write a cover letter for job **{app_state.get('job_id')}**, "
|
| 286 |
-
"or would you prefer to upload your own?")
|
| 287 |
-
history.append((None, bot_message))
|
| 288 |
-
app_state["mode"] = "APPLYING_COVER_LETTER_CHOICE"
|
| 289 |
-
return history, app_state, gr.update(visible=True, value=None), hide_examples
|
| 290 |
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
|
|
|
| 294 |
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
|
| 298 |
-
|
| 299 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 300 |
|
| 301 |
# --- Part 2: Handle Text Messages ---
|
| 302 |
if message:
|
| 303 |
history.append((message, None))
|
| 304 |
|
| 305 |
-
# ---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 306 |
if current_mode == "CREATING_PROJECT_NAME":
|
| 307 |
app_state["project_name"] = message
|
| 308 |
app_state["mode"] = "CREATING_PROJECT_DESC"
|
| 309 |
history.append((None, "Great! Now, please provide a short description for your project."))
|
| 310 |
-
return history, app_state, gr.update(visible=False), hide_examples
|
|
|
|
| 311 |
elif current_mode == "CREATING_PROJECT_DESC":
|
| 312 |
app_state["description"] = message
|
| 313 |
app_state["mode"] = "CREATING_PROJECT_SKILLS"
|
| 314 |
-
history.append((None, "What skills are required
|
| 315 |
-
return history, app_state, gr.update(visible=False), hide_examples
|
| 316 |
elif current_mode == "CREATING_PROJECT_SKILLS":
|
| 317 |
app_state["required_skills"] = [skill.strip() for skill in message.split(',')]
|
| 318 |
app_state["mode"] = "CREATING_PROJECT_SIZE"
|
| 319 |
-
history.append((None, "Perfect. How many members are you looking for in the team?
|
| 320 |
-
return history, app_state, gr.update(visible=False), hide_examples
|
| 321 |
elif current_mode == "CREATING_PROJECT_SIZE":
|
| 322 |
try:
|
| 323 |
app_state["max_team_size"] = int(message)
|
| 324 |
-
success = chatbot_instance.create_project(app_state)
|
| 325 |
-
bot_message = f"🚀 Fantastic! Your project **'{app_state.get('project_name')}'** has been created." if success else "❌ Sorry, there was an error creating your project."
|
| 326 |
history.append((None, bot_message))
|
| 327 |
app_state = {"mode": "GENERAL"}
|
| 328 |
-
return history, app_state, gr.update(visible=False), hide_examples
|
| 329 |
except ValueError:
|
| 330 |
history.append((None, "Please enter a valid number for the team size."))
|
| 331 |
-
return history, app_state, gr.update(visible=False), hide_examples
|
| 332 |
-
|
| 333 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 334 |
if current_mode == "APPLYING_COVER_LETTER_CHOICE":
|
| 335 |
positive_keywords = ["help", "generate", "write", "yes", "ok", "sure", "please"]
|
| 336 |
|
|
@@ -340,55 +696,64 @@ def chat_interface_func(message: str, history: list, app_state: dict, file_obj:
|
|
| 340 |
history.append((None, f"Here is a draft for your cover letter:\n\n---\n{cover_letter}\n\n---\n\nIf you are happy with this, please type 'submit' to send the application."))
|
| 341 |
app_state["cover_letter_content"] = cover_letter
|
| 342 |
app_state["mode"] = "CONFIRM_SUBMISSION"
|
| 343 |
-
return history, app_state, gr.update(visible=False), hide_examples
|
| 344 |
elif "upload" in message.lower():
|
| 345 |
history.append((None, "Okay, please upload your cover letter file."))
|
| 346 |
app_state["mode"] = "APPLYING_COVER_LETTER_UPLOAD"
|
| 347 |
-
return history, app_state, gr.update(visible=True), hide_examples
|
| 348 |
else:
|
| 349 |
history.append((None, "I'm sorry, I didn't quite understand. Do you want me to **write** a letter for you, or would you prefer to **upload** your own?"))
|
| 350 |
-
return history, app_state, gr.update(visible=True), hide_examples
|
| 351 |
|
| 352 |
if current_mode == "CONFIRM_SUBMISSION":
|
| 353 |
if "submit" in message.lower():
|
| 354 |
history.append((None, "Thank you! Submitting your application now..."))
|
| 355 |
-
success = chatbot_instance.save_application(app_state)
|
| 356 |
final_message = f"✅ Your application for job **{app_state.get('job_id')}** has been submitted successfully!" if success else "❌ Sorry, there was an error submitting your application."
|
| 357 |
history.append((None, final_message))
|
| 358 |
app_state = {"mode": "GENERAL"}
|
| 359 |
-
return history, app_state, gr.update(visible=False), hide_examples
|
| 360 |
else:
|
| 361 |
history.append((None, "Please type 'submit' to confirm and send your application."))
|
| 362 |
-
return history, app_state, gr.update(visible=False), hide_examples
|
| 363 |
|
| 364 |
# --- General Chat & Starting New Flows ---
|
| 365 |
response = chatbot_instance.ask(message)
|
| 366 |
|
| 367 |
-
print(f"DEBUG: LLM Raw Response was -> '{response}'") # For debugging
|
| 368 |
-
|
| 369 |
app_match = re.search(r"STARTING_APPLICATION_PROCESS:([\w-]+)", response)
|
| 370 |
-
|
| 371 |
-
|
| 372 |
if app_match:
|
| 373 |
job_id = app_match.group(1)
|
| 374 |
app_state["mode"] = "APPLYING_CV"
|
| 375 |
app_state["job_id"] = job_id
|
| 376 |
bot_message = f"Starting application for job **{job_id}**. Please upload your CV."
|
| 377 |
history.append((None, bot_message))
|
| 378 |
-
return history, app_state, gr.update(visible=True), hide_examples
|
| 379 |
|
| 380 |
-
elif
|
| 381 |
app_state["mode"] = "CREATING_PROJECT_NAME"
|
| 382 |
bot_message = "Awesome! Let's create a new project. What would you like to name it?"
|
| 383 |
history.append((None, bot_message))
|
| 384 |
-
return history, app_state, gr.update(visible=False), hide_examples
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 385 |
|
| 386 |
else:
|
| 387 |
history.append((None, response))
|
| 388 |
-
return history, app_state, gr.update(visible=False), hide_examples
|
| 389 |
|
| 390 |
-
# Default return
|
| 391 |
-
return history, app_state, gr.update(visible=False), gr.update()
|
| 392 |
|
| 393 |
|
| 394 |
# --- 5. BUILD GRADIO UI ---
|
|
@@ -396,7 +761,8 @@ with gr.Blocks(theme=gr.themes.Soft(), title="EduNatives Assistant") as demo:
|
|
| 396 |
|
| 397 |
initial_state = {
|
| 398 |
"mode": "GENERAL", "job_id": None, "cv_content": None, "cover_letter_content": None,
|
| 399 |
-
"project_name": None, "description": None, "required_skills": None, "max_team_size": None
|
|
|
|
| 400 |
}
|
| 401 |
application_state = gr.State(initial_state)
|
| 402 |
|
|
@@ -409,14 +775,13 @@ with gr.Blocks(theme=gr.themes.Soft(), title="EduNatives Assistant") as demo:
|
|
| 409 |
|
| 410 |
chatbot_window = gr.Chatbot(height=450, label="Chat Window", bubble_full_width=False)
|
| 411 |
|
| 412 |
-
# The container for the example buttons, will be hidden after first use
|
| 413 |
with gr.Column() as examples_container:
|
| 414 |
examples_list = [
|
| 415 |
-
"
|
| 416 |
-
"
|
| 417 |
"I want to create a new project",
|
| 418 |
-
"
|
| 419 |
-
"
|
| 420 |
]
|
| 421 |
with gr.Row():
|
| 422 |
btn1 = gr.Button(examples_list[0], variant='secondary')
|
|
@@ -428,16 +793,12 @@ with gr.Blocks(theme=gr.themes.Soft(), title="EduNatives Assistant") as demo:
|
|
| 428 |
|
| 429 |
example_buttons = [btn1, btn2, btn3, btn4, btn5]
|
| 430 |
|
| 431 |
-
# The main input bar
|
| 432 |
with gr.Row() as main_input_row:
|
| 433 |
-
text_input = gr.Textbox(placeholder="Ask your question
|
| 434 |
submit_btn = gr.Button("Send", variant="primary", scale=1)
|
| 435 |
|
| 436 |
-
|
| 437 |
-
file_uploader = gr.File(label="Upload Document", file_types=['.pdf', '.docx', '.txt'], visible=False)
|
| 438 |
|
| 439 |
-
# --- Event Handlers ---
|
| 440 |
-
# The list of outputs now includes the examples_container to control its visibility
|
| 441 |
outputs_list = [chatbot_window, application_state, file_uploader, examples_container]
|
| 442 |
|
| 443 |
submit_btn.click(
|
|
|
|
| 6 |
import gradio as gr
|
| 7 |
import atexit
|
| 8 |
import datetime
|
| 9 |
+
import re
|
| 10 |
+
import uuid
|
| 11 |
|
| 12 |
# --- New libraries for file processing ---
|
| 13 |
import pypdf
|
|
|
|
| 21 |
WEAVIATE_URL = "maf5cvz1saelnti3k34a.c0.europe-west3.gcp.weaviate.cloud"
|
| 22 |
WEAVIATE_API_KEY = "cHFZK1JOaEg3K2p6K3JnQl9ZM1FEQ2NhMVU1SnBRVUpYWCtCVHlVU0J2Qmx1Mk9SaktpT09UQTNiU1hRPV92MjAw"
|
| 23 |
|
| 24 |
+
# --- SIMULATED USER FOR TESTING ---
|
| 25 |
+
# In a real application, this would come from a login system.
|
| 26 |
+
CURRENT_USER_ID = "recruiter_001"
|
| 27 |
+
# To test student features, change to "student_007"
|
| 28 |
+
|
| 29 |
# --- Helper function to create schemas ---
|
| 30 |
def create_application_schema(client: weaviate.WeaviateClient):
|
| 31 |
+
# ... (code unchanged)
|
| 32 |
collection_name = "Application"
|
| 33 |
if not client.collections.exists(collection_name):
|
| 34 |
print(f"Creating collection: {collection_name}")
|
|
|
|
| 36 |
name=collection_name,
|
| 37 |
properties=[
|
| 38 |
weaviate.classes.config.Property(name="job_id", data_type=weaviate.classes.config.DataType.TEXT),
|
| 39 |
+
weaviate.classes.config.Property(name="user_id", data_type=weaviate.classes.config.DataType.TEXT),
|
| 40 |
weaviate.classes.config.Property(name="cv_content", data_type=weaviate.classes.config.DataType.TEXT),
|
| 41 |
weaviate.classes.config.Property(name="cover_letter_content", data_type=weaviate.classes.config.DataType.TEXT),
|
| 42 |
weaviate.classes.config.Property(name="submission_date", data_type=weaviate.classes.config.DataType.DATE),
|
| 43 |
+
weaviate.classes.config.Property(name="status", data_type=weaviate.classes.config.DataType.TEXT),
|
| 44 |
]
|
| 45 |
)
|
| 46 |
print(f"✅ Collection '{collection_name}' created successfully.")
|
| 47 |
else:
|
| 48 |
print(f"✅ Collection '{collection_name}' already exists.")
|
| 49 |
|
| 50 |
+
|
| 51 |
def create_project_schema(client: weaviate.WeaviateClient):
|
| 52 |
+
# ... (code unchanged)
|
| 53 |
collection_name = "Project"
|
| 54 |
if not client.collections.exists(collection_name):
|
| 55 |
print(f"Creating collection: {collection_name}")
|
|
|
|
| 60 |
weaviate.classes.config.Property(name="description", data_type=weaviate.classes.config.DataType.TEXT),
|
| 61 |
weaviate.classes.config.Property(name="required_skills", data_type=weaviate.classes.config.DataType.TEXT_ARRAY),
|
| 62 |
weaviate.classes.config.Property(name="team_members", data_type=weaviate.classes.config.DataType.TEXT_ARRAY),
|
| 63 |
+
weaviate.classes.config.Property(name="pending_members", data_type=weaviate.classes.config.DataType.TEXT_ARRAY),
|
| 64 |
weaviate.classes.config.Property(name="max_team_size", data_type=weaviate.classes.config.DataType.NUMBER),
|
| 65 |
weaviate.classes.config.Property(name="creator_id", data_type=weaviate.classes.config.DataType.TEXT),
|
| 66 |
weaviate.classes.config.Property(name="is_recruiting", data_type=weaviate.classes.config.DataType.BOOL),
|
|
|
|
| 70 |
else:
|
| 71 |
print(f"✅ Collection '{collection_name}' already exists.")
|
| 72 |
|
| 73 |
+
def create_user_schema(client: weaviate.WeaviateClient):
|
| 74 |
+
# ... (code unchanged)
|
| 75 |
+
collection_name = "User"
|
| 76 |
+
if not client.collections.exists(collection_name):
|
| 77 |
+
print(f"Creating collection: {collection_name}")
|
| 78 |
+
client.collections.create(
|
| 79 |
+
name=collection_name,
|
| 80 |
+
properties=[
|
| 81 |
+
weaviate.classes.config.Property(name="user_id", data_type=weaviate.classes.config.DataType.TEXT),
|
| 82 |
+
weaviate.classes.config.Property(name="cv_content", data_type=weaviate.classes.config.DataType.TEXT),
|
| 83 |
+
]
|
| 84 |
+
)
|
| 85 |
+
print(f"✅ Collection '{collection_name}' created successfully.")
|
| 86 |
+
else:
|
| 87 |
+
print(f"✅ Collection '{collection_name}' already exists.")
|
| 88 |
+
|
| 89 |
# --- 2. CHATBOT CLASS ---
|
| 90 |
class WeaviateChatbot:
|
| 91 |
def __init__(self, weaviate_url, weaviate_api_key, llm_api_key, llm_base_url):
|
|
|
|
| 98 |
self.weaviate_client.connect()
|
| 99 |
print("✅ Successfully connected to Weaviate.")
|
| 100 |
|
|
|
|
| 101 |
create_application_schema(self.weaviate_client)
|
| 102 |
create_project_schema(self.weaviate_client)
|
| 103 |
+
create_user_schema(self.weaviate_client)
|
| 104 |
|
| 105 |
self.llm_client = OpenAI(api_key=llm_api_key, base_url=llm_base_url)
|
| 106 |
print("✅ Successfully connected to LLM client (DeepInfra).")
|
| 107 |
|
| 108 |
+
self.collection_names = ["Job", "Opportunities", "Project", "User"]
|
| 109 |
+
|
| 110 |
+
# --- Core Methods ---
|
| 111 |
def _embed_text(self, text: str) -> list[float]:
|
| 112 |
resp = self.llm_client.embeddings.create(model=EMBEDDING_MODEL_NAME, input=text, encoding_format="float")
|
| 113 |
return resp.data[0].embedding
|
| 114 |
|
| 115 |
+
def _search_database(self, query_vector: list[float], limit: int = 5, collection_name: str = None) -> str:
|
| 116 |
+
# ... (code unchanged)
|
| 117 |
all_results = []
|
| 118 |
+
collections_to_search = [collection_name] if collection_name else self.collection_names
|
| 119 |
+
|
| 120 |
+
for name in collections_to_search:
|
| 121 |
try:
|
| 122 |
collection = self.weaviate_client.collections.get(name)
|
| 123 |
response = collection.query.near_vector(near_vector=query_vector, limit=limit)
|
|
|
|
| 126 |
except Exception as e:
|
| 127 |
print(f"Could not query collection '{name}'. Error: {e}")
|
| 128 |
return "\n---\n".join(all_results) if all_results else "No relevant information found in the database."
|
| 129 |
+
|
| 130 |
def _generate_response(self, query: str, context: str) -> str:
|
| 131 |
prompt = f"""
|
| 132 |
+
You are *EduNatives Assistant*. You are a helpful assistant for jobs and projects.
|
| 133 |
+
- **IMPORTANT**: Respond in the same language as the user's query.
|
| 134 |
+
- Answer the user's question based on the CONTEXT.
|
| 135 |
+
- If the user wants to apply for a job, respond ONLY with: `STARTING_APPLICATION_PROCESS:job_id`.
|
| 136 |
+
- If the user wants to create a project, respond ONLY with: `STARTING_PROJECT_CREATION`.
|
| 137 |
+
- If the user wants to analyze their CV, respond ONLY with: `INTENT_ANALYZE_CV`.
|
| 138 |
+
- If the user wants to rerank or evaluate CVs, respond ONLY with: `INTENT_START_RERANK`.
|
| 139 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
| 140 |
--- CONTEXT FROM DATABASE START ---
|
| 141 |
{context}
|
| 142 |
+
--- CONTEXT FROM DATABASE END ---
|
| 143 |
|
| 144 |
User Question: {query}
|
| 145 |
|
|
|
|
| 148 |
response = self.llm_client.chat.completions.create(model=MODEL_NAME, messages=[{"role": "user", "content": prompt}], max_tokens=4096)
|
| 149 |
return response.choices[0].message.content.strip()
|
| 150 |
|
| 151 |
+
def _get_query_intent(self, query: str) -> dict:
|
| 152 |
+
# ... (code unchanged)
|
| 153 |
+
prompt = f"""
|
| 154 |
+
Analyze the user's query to understand their intent and extract key entities. Your goal is to route the query to the correct function.
|
| 155 |
+
Respond with a JSON object containing "intent", "entity_type", and "entity_id".
|
| 156 |
+
|
| 157 |
+
- 'intent' can be one of: ["get_details", "get_applicants", "general_query", "other_action"].
|
| 158 |
+
- 'entity_type' can be one of: ["job", "project", "user", "unknown"].
|
| 159 |
+
- 'entity_id' should be the specific identifier mentioned (e.g., "job_022", "user_010", "PROJ-007", "Blockchain-Based Academic Credential System").
|
| 160 |
+
|
| 161 |
+
If the query is a command to the chatbot (like "analyze my CV" or "join a project"), set intent to "other_action".
|
| 162 |
+
If the query is a general question without a specific ID, set intent to "general_query".
|
| 163 |
+
|
| 164 |
+
Examples:
|
| 165 |
+
- Query: "Who applied for job_022?" -> {{"intent": "get_applicants", "entity_type": "job", "entity_id": "job_022"}}
|
| 166 |
+
- Query: "Show me details about project_003" -> {{"intent": "get_details", "entity_type": "project", "entity_id": "project_003"}}
|
| 167 |
+
- Query: "tell me about user_010" -> {{"intent": "get_details", "entity_type": "user", "entity_id": "user_010"}}
|
| 168 |
+
- Query: "Show me details about PROJ-0007" -> {{"intent": "get_details", "entity_type": "project", "entity_id": "PROJ-0007"}}
|
| 169 |
+
- Query: "find me jobs in marketing" -> {{"intent": "general_query", "entity_type": "job", "entity_id": null}}
|
| 170 |
+
- Query: "join the AI project" -> {{"intent": "other_action", "entity_type": "project", "entity_id": null}}
|
| 171 |
+
|
| 172 |
+
User Query: "{query}"
|
| 173 |
+
|
| 174 |
+
JSON Response:
|
| 175 |
+
"""
|
| 176 |
+
try:
|
| 177 |
+
response = self.llm_client.chat.completions.create(
|
| 178 |
+
model=MODEL_NAME,
|
| 179 |
+
messages=[{"role": "user", "content": prompt}],
|
| 180 |
+
max_tokens=256,
|
| 181 |
+
response_format={"type": "json_object"} # Use JSON mode
|
| 182 |
+
)
|
| 183 |
+
result = json.loads(response.choices[0].message.content.strip())
|
| 184 |
+
print(f"DEBUG: Intent recognized -> {result}")
|
| 185 |
+
return result
|
| 186 |
+
except Exception as e:
|
| 187 |
+
print(f"❌ Error in intent recognition: {e}")
|
| 188 |
+
return {"intent": "general_query", "entity_type": "unknown", "entity_id": None}
|
| 189 |
+
|
| 190 |
+
def _get_details_by_id(self, collection_name: str, property_name: str, entity_id: str):
|
| 191 |
+
# ... (code unchanged)
|
| 192 |
+
try:
|
| 193 |
+
collection = self.weaviate_client.collections.get(collection_name)
|
| 194 |
+
response = collection.query.fetch_objects(
|
| 195 |
+
limit=1,
|
| 196 |
+
filters=weaviate.classes.query.Filter.by_property(property_name).equal(entity_id)
|
| 197 |
+
)
|
| 198 |
+
if response.objects:
|
| 199 |
+
return f"Details for {collection_name} '{entity_id}':\n\n{json.dumps(response.objects[0].properties, indent=2, default=str)}"
|
| 200 |
+
else:
|
| 201 |
+
return f"🔍 Sorry, I couldn't find any details for {collection_name} with the ID '{entity_id}'."
|
| 202 |
+
except Exception as e:
|
| 203 |
+
print(f"Error fetching details for {entity_id}: {e}")
|
| 204 |
+
return f"❌ An error occurred while searching for '{entity_id}'."
|
| 205 |
+
|
| 206 |
+
def _get_applicants_by_job_id(self, job_id: str):
|
| 207 |
+
# ... (code unchanged)
|
| 208 |
+
try:
|
| 209 |
+
applications = self.weaviate_client.collections.get("Application")
|
| 210 |
+
response = applications.query.fetch_objects(
|
| 211 |
+
filters=weaviate.classes.query.Filter.by_property("job_id").equal(job_id)
|
| 212 |
+
)
|
| 213 |
+
if response.objects:
|
| 214 |
+
user_ids = [obj.properties.get("user_id", "Unknown User") for obj in response.objects]
|
| 215 |
+
return f"Applicants for job '{job_id}':\n- " + "\n- ".join(user_ids)
|
| 216 |
+
else:
|
| 217 |
+
return f"I couldn't find any applicants for job '{job_id}'."
|
| 218 |
+
except Exception as e:
|
| 219 |
+
print(f"Error fetching applicants for {job_id}: {e}")
|
| 220 |
+
return f"❌ An error occurred while searching for applicants for job '{job_id}'."
|
| 221 |
+
|
| 222 |
def ask(self, query: str):
|
| 223 |
+
# ... (code unchanged)
|
| 224 |
print(f"\nProcessing query: '{query}'")
|
| 225 |
+
|
| 226 |
+
intent_data = self._get_query_intent(query)
|
| 227 |
+
intent = intent_data.get("intent")
|
| 228 |
+
entity_type = intent_data.get("entity_type")
|
| 229 |
+
entity_id = intent_data.get("entity_id")
|
| 230 |
+
|
| 231 |
+
if intent == "get_details" and entity_id:
|
| 232 |
+
entity_type_lower = entity_type.lower()
|
| 233 |
+
if entity_type_lower == "job":
|
| 234 |
+
return self._get_details_by_id("Job", "job_id", entity_id)
|
| 235 |
+
elif entity_type_lower == "user":
|
| 236 |
+
return self._get_details_by_id("User", "user_id", entity_id)
|
| 237 |
+
elif entity_type_lower == "project":
|
| 238 |
+
project_obj = self._get_project_by_name(entity_id) # Use smart search for project names
|
| 239 |
+
if project_obj:
|
| 240 |
+
return f"Details for project '{project_obj.properties.get('project_name')}':\n\n{json.dumps(project_obj.properties, indent=2, default=str)}"
|
| 241 |
+
else:
|
| 242 |
+
return f"🔍 Sorry, I couldn't find any details for a project named '{entity_id}'."
|
| 243 |
+
|
| 244 |
+
elif intent == "get_applicants" and entity_id and entity_type.lower() == "job":
|
| 245 |
+
return self._get_applicants_by_job_id(entity_id)
|
| 246 |
+
|
| 247 |
query_vector = self._embed_text(query)
|
| 248 |
context = self._search_database(query_vector)
|
| 249 |
+
return self._generate_response(query, context)
|
|
|
|
| 250 |
|
| 251 |
+
def save_application(self, application_data: dict, user_id: str):
|
| 252 |
+
# ... (code unchanged)
|
| 253 |
print("Saving application to Weaviate...")
|
| 254 |
try:
|
| 255 |
applications = self.weaviate_client.collections.get("Application")
|
| 256 |
app_uuid = applications.data.insert({
|
| 257 |
"job_id": application_data.get("job_id"),
|
| 258 |
+
"user_id": user_id,
|
| 259 |
"cv_content": application_data.get("cv_content"),
|
| 260 |
"cover_letter_content": application_data.get("cover_letter_content"),
|
| 261 |
+
"submission_date": datetime.datetime.now(datetime.timezone.utc),
|
| 262 |
+
"status": "Submitted"
|
| 263 |
})
|
| 264 |
print(f"✅ Application saved with UUID: {app_uuid}")
|
| 265 |
return True
|
|
|
|
| 271 |
if self.weaviate_client.is_connected():
|
| 272 |
self.weaviate_client.close()
|
| 273 |
print("\nWeaviate connection closed.")
|
| 274 |
+
|
| 275 |
def _get_job_details(self, job_id: str) -> dict:
|
| 276 |
+
# ... (code unchanged)
|
| 277 |
try:
|
| 278 |
jobs = self.weaviate_client.collections.get("Job")
|
| 279 |
response = jobs.query.fetch_objects(
|
|
|
|
| 287 |
return None
|
| 288 |
|
| 289 |
def generate_cover_letter(self, cv_content: str, job_id: str) -> str:
|
| 290 |
+
# ... (code unchanged)
|
| 291 |
print(f"Generating Cover Letter for job: {job_id}")
|
| 292 |
job_details = self._get_job_details(job_id)
|
| 293 |
|
| 294 |
if not job_details:
|
|
|
|
| 295 |
print(f"⚠️ Job details for '{job_id}' not found. Generating a generic cover letter based on CV.")
|
| 296 |
prompt = f"""
|
| 297 |
You are an expert career coach. A user has provided their CV but the specific job details for job '{job_id}' could not be found.
|
|
|
|
| 298 |
**Goal:** Write a strong, general-purpose cover letter based ONLY on the user's CV.
|
|
|
|
| 299 |
**Instructions:**
|
| 300 |
+
1. Analyze the User's CV to identify their key skills, main role, and accomplishments.
|
| 301 |
+
2. Write a cover letter that showcases these strengths for a typical role in their field.
|
| 302 |
+
3. Start with "Dear Hiring Manager,". Maintain a professional and enthusiastic tone.
|
| 303 |
+
4. **Important:** Add a note at the end: "[This is a general cover letter as the specific job details for '{job_id}' were not found.]"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 304 |
--- USER CV CONTENT START ---
|
| 305 |
{cv_content}
|
| 306 |
--- USER CV CONTENT END ---
|
|
|
|
| 307 |
Now, write the general-purpose cover letter.
|
| 308 |
"""
|
| 309 |
else:
|
|
|
|
| 310 |
prompt = f"""
|
| 311 |
+
You are an expert career coach specializing in crafting impactful cover letters.
|
| 312 |
+
**Goal:** Write a professional, personalized cover letter that bridges a candidate's CV and a job's requirements.
|
|
|
|
|
|
|
| 313 |
**Instructions:**
|
| 314 |
+
1. Analyze the Job Description for key responsibilities and skills.
|
| 315 |
+
2. Analyze the User's CV for relevant experiences.
|
| 316 |
+
3. Explicitly connect the user's qualifications to the job requirements.
|
| 317 |
+
4. Start with "Dear Hiring Manager,". Maintain a professional tone.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 318 |
--- JOB DESCRIPTION START ---
|
| 319 |
{json.dumps(job_details, indent=2)}
|
| 320 |
--- JOB DESCRIPTION END ---
|
|
|
|
| 321 |
--- USER CV CONTENT START ---
|
| 322 |
{cv_content}
|
| 323 |
--- USER CV CONTENT END ---
|
| 324 |
+
Now, write the cover letter.
|
|
|
|
| 325 |
"""
|
| 326 |
response = self.llm_client.chat.completions.create(model=MODEL_NAME, messages=[{"role": "user", "content": prompt}], max_tokens=2048)
|
| 327 |
return response.choices[0].message.content.strip()
|
| 328 |
|
| 329 |
+
# --- Project Management Methods ---
|
| 330 |
+
def create_project(self, project_data: dict, creator_id: str):
|
| 331 |
+
# ... (code unchanged)
|
| 332 |
print("Saving new project to Weaviate...")
|
| 333 |
try:
|
| 334 |
projects = self.weaviate_client.collections.get("Project")
|
|
|
|
| 337 |
"description": project_data.get("description"),
|
| 338 |
"required_skills": project_data.get("required_skills"),
|
| 339 |
"max_team_size": project_data.get("max_team_size"),
|
| 340 |
+
"creator_id": creator_id,
|
| 341 |
"is_recruiting": True,
|
| 342 |
+
"team_members": [creator_id], # Creator is the first member
|
| 343 |
+
"pending_members": []
|
| 344 |
})
|
| 345 |
print(f"✅ Project saved with UUID: {project_uuid}")
|
| 346 |
+
return True, "Project created successfully!"
|
| 347 |
except Exception as e:
|
| 348 |
print(f"❌ Failed to save project: {e}")
|
| 349 |
+
return False, "Sorry, there was an error creating your project. Please try again later."
|
| 350 |
+
|
| 351 |
+
def _get_project_by_name(self, project_name: str):
|
| 352 |
+
# ... (code unchanged)
|
| 353 |
+
try:
|
| 354 |
+
projects = self.weaviate_client.collections.get("Project")
|
| 355 |
+
response = projects.query.hybrid(
|
| 356 |
+
query=project_name,
|
| 357 |
+
limit=1,
|
| 358 |
+
query_properties=["project_name", "description"]
|
| 359 |
+
)
|
| 360 |
+
return response.objects[0] if response.objects else None
|
| 361 |
+
except Exception as e:
|
| 362 |
+
print(f"Error fetching project '{project_name}': {e}")
|
| 363 |
+
return None
|
| 364 |
+
|
| 365 |
+
def request_to_join_project(self, project_name: str, user_id: str):
|
| 366 |
+
# ... (code unchanged)
|
| 367 |
+
project = self._get_project_by_name(project_name)
|
| 368 |
+
if not project:
|
| 369 |
+
return False, f"🔍 Sorry, I couldn't find a project named '{project_name}'. Please check the name and try again."
|
| 370 |
+
|
| 371 |
+
props = project.properties
|
| 372 |
+
actual_project_name = props.get('project_name')
|
| 373 |
+
|
| 374 |
+
if user_id in props.get("team_members", []):
|
| 375 |
+
return False, f"You are already a member of the '{actual_project_name}' project."
|
| 376 |
+
if user_id in props.get("pending_members", []):
|
| 377 |
+
return False, f"You have already sent a request to join '{actual_project_name}'."
|
| 378 |
+
|
| 379 |
+
try:
|
| 380 |
+
projects = self.weaviate_client.collections.get("Project")
|
| 381 |
+
pending_list = props.get("pending_members", []) + [user_id]
|
| 382 |
+
projects.data.update(uuid=project.uuid, properties={"pending_members": pending_list})
|
| 383 |
+
return True, f"✅ Your request to join '{actual_project_name}' has been sent!"
|
| 384 |
+
except Exception as e:
|
| 385 |
+
print(f"❌ Failed to update project join requests: {e}")
|
| 386 |
+
return False, "Sorry, there was an error sending your request."
|
| 387 |
+
|
| 388 |
+
def get_project_requests(self, project_name: str, user_id: str):
|
| 389 |
+
# ... (code unchanged)
|
| 390 |
+
project = self._get_project_by_name(project_name)
|
| 391 |
+
if not project:
|
| 392 |
+
return f"🔍 Sorry, I couldn't find a project named '{project_name}'."
|
| 393 |
+
if project.properties.get("creator_id") != user_id:
|
| 394 |
+
return "You are not the creator of this project, so you cannot view its requests."
|
| 395 |
+
|
| 396 |
+
pending = project.properties.get("pending_members", [])
|
| 397 |
+
if not pending:
|
| 398 |
+
return f"There are currently no pending requests for '{project_name}'."
|
| 399 |
+
|
| 400 |
+
return f"Pending requests for '{project_name}':\n- " + "\n- ".join(pending)
|
| 401 |
+
|
| 402 |
+
def accept_project_member(self, project_name: str, member_id: str, user_id: str):
|
| 403 |
+
# ... (code unchanged)
|
| 404 |
+
project = self._get_project_by_name(project_name)
|
| 405 |
+
if not project:
|
| 406 |
+
return f"🔍 Sorry, I couldn't find a project named '{project_name}'."
|
| 407 |
+
if project.properties.get("creator_id") != user_id:
|
| 408 |
+
return "You are not the creator of this project."
|
| 409 |
+
|
| 410 |
+
props = project.properties
|
| 411 |
+
pending_list = props.get("pending_members", [])
|
| 412 |
+
if member_id not in pending_list:
|
| 413 |
+
return f"User '{member_id}' has not requested to join this project."
|
| 414 |
+
|
| 415 |
+
try:
|
| 416 |
+
projects = self.weaviate_client.collections.get("Project")
|
| 417 |
+
pending_list.remove(member_id)
|
| 418 |
+
team_list = props.get("team_members", []) + [member_id]
|
| 419 |
+
projects.data.update(uuid=project.uuid, properties={
|
| 420 |
+
"pending_members": pending_list,
|
| 421 |
+
"team_members": team_list
|
| 422 |
+
})
|
| 423 |
+
return f"✅ User '{member_id}' has been added to the '{project_name}' team!"
|
| 424 |
+
except Exception as e:
|
| 425 |
+
print(f"❌ Failed to accept member: {e}")
|
| 426 |
+
return "Sorry, there was an error accepting this member."
|
| 427 |
+
|
| 428 |
+
# --- Smart CV & Job Matching Methods ---
|
| 429 |
+
def analyze_cv(self, cv_content: str, user_id: str):
|
| 430 |
+
# ... (code unchanged)
|
| 431 |
+
print(f"Analyzing CV for user: {user_id}")
|
| 432 |
+
try:
|
| 433 |
+
users = self.weaviate_client.collections.get("User")
|
| 434 |
+
response = users.query.fetch_objects(limit=1, filters=weaviate.classes.query.Filter.by_property("user_id").equal(user_id))
|
| 435 |
+
if response.objects:
|
| 436 |
+
user_uuid = response.objects[0].uuid
|
| 437 |
+
users.data.update(uuid=user_uuid, properties={"cv_content": cv_content})
|
| 438 |
+
else:
|
| 439 |
+
users.data.insert({"user_id": user_id, "cv_content": cv_content})
|
| 440 |
+
except Exception as e:
|
| 441 |
+
print(f"❌ Could not save CV for user {user_id}: {e}")
|
| 442 |
+
|
| 443 |
+
prompt = f"""
|
| 444 |
+
You are an expert career coach and CV reviewer. Analyze the following CV and provide constructive feedback.
|
| 445 |
+
Focus on:
|
| 446 |
+
1. **Clarity and Conciseness:** Is the language clear? Are the sentences too long?
|
| 447 |
+
2. **Impactful Language:** Suggest stronger action verbs (e.g., instead of "worked on," suggest "developed," "engineered," "managed").
|
| 448 |
+
3. **Keywords:** Are there relevant industry keywords missing? Suggest some based on the content.
|
| 449 |
+
4. **Structure and Formatting:** Comment on the overall layout and readability.
|
| 450 |
+
Provide the feedback in a structured format with clear headings. Respond in the same language as the CV content.
|
| 451 |
+
--- CV CONTENT START ---
|
| 452 |
+
{cv_content}
|
| 453 |
+
--- CV CONTENT END ---
|
| 454 |
+
"""
|
| 455 |
+
response = self.llm_client.chat.completions.create(model=MODEL_NAME, messages=[{"role": "user", "content": prompt}], max_tokens=2048)
|
| 456 |
+
return response.choices[0].message.content.strip()
|
| 457 |
+
|
| 458 |
+
def match_jobs_to_cv(self, cv_content: str):
|
| 459 |
+
# ... (code unchanged)
|
| 460 |
+
print("Matching jobs to CV content...")
|
| 461 |
+
prompt = f"Extract a list of key technical and soft skills from this CV. Return them as a single, comma-separated string. CV: {cv_content}"
|
| 462 |
+
response = self.llm_client.chat.completions.create(model=MODEL_NAME, messages=[{"role": "user", "content": prompt}], max_tokens=512)
|
| 463 |
+
skills_text = response.choices[0].message.content.strip()
|
| 464 |
+
|
| 465 |
+
if not skills_text:
|
| 466 |
+
return "Could not extract skills from the CV to match jobs."
|
| 467 |
+
|
| 468 |
+
print(f"Extracted skills: {skills_text}")
|
| 469 |
+
skills_vector = self._embed_text(skills_text)
|
| 470 |
+
search_results = self._search_database(skills_vector, limit=3, collection_name="Job")
|
| 471 |
+
|
| 472 |
+
if "No relevant information" in search_results:
|
| 473 |
+
return "🔍 I couldn't find any jobs that closely match the skills in your CV right now."
|
| 474 |
+
else:
|
| 475 |
+
return f"Here are the top 3 jobs that match the skills in your CV:\n\n{search_results}"
|
| 476 |
+
|
| 477 |
+
# --- NEW: CV Reranking Engine ---
|
| 478 |
+
def rerank_cvs(self, requirements: str, cv_files: list):
|
| 479 |
+
print(f"Starting CV reranking process for requirements: {requirements}")
|
| 480 |
+
|
| 481 |
+
cv_contents_str = ""
|
| 482 |
+
for i, cv in enumerate(cv_files):
|
| 483 |
+
cv_contents_str += f"\n--- CV FILENAME: {cv['name']} ---\n{cv['content']}\n"
|
| 484 |
+
|
| 485 |
+
prompt = f"""
|
| 486 |
+
You are an expert AI-powered HR Recruiter. Your task is to analyze and rank multiple CVs based on a specific set of job requirements.
|
| 487 |
+
Provide a score from 1 to 100 for each CV, where 100 is a perfect match. Also, provide a brief, crisp justification for your score.
|
| 488 |
+
|
| 489 |
+
**Job Requirements:**
|
| 490 |
+
{requirements}
|
| 491 |
+
|
| 492 |
+
**CVs to Analyze:**
|
| 493 |
+
{cv_contents_str}
|
| 494 |
+
|
| 495 |
+
**Instructions:**
|
| 496 |
+
1. Carefully read each CV and compare it against the job requirements.
|
| 497 |
+
2. Assign a score based on skills, experience, and overall fit.
|
| 498 |
+
3. Write a short justification explaining the score.
|
| 499 |
+
4. Return the final result as a single JSON array of objects. Each object must have three keys: "cv_name", "score", and "justification".
|
| 500 |
+
5. **Important**: The JSON array should be sorted with the highest score first.
|
| 501 |
+
|
| 502 |
+
JSON Response:
|
| 503 |
+
"""
|
| 504 |
+
try:
|
| 505 |
+
response = self.llm_client.chat.completions.create(
|
| 506 |
+
model=MODEL_NAME,
|
| 507 |
+
messages=[{"role": "user", "content": prompt}],
|
| 508 |
+
max_tokens=4096, # Allow for longer responses with multiple CVs
|
| 509 |
+
response_format={"type": "json_object"}
|
| 510 |
+
)
|
| 511 |
+
# The model might return a JSON object with a key like "results"
|
| 512 |
+
result_data = json.loads(response.choices[0].message.content.strip())
|
| 513 |
+
|
| 514 |
+
# Handle both list and dict responses
|
| 515 |
+
ranked_list = result_data if isinstance(result_data, list) else result_data.get("results", [])
|
| 516 |
+
|
| 517 |
+
if not ranked_list:
|
| 518 |
+
return "I couldn't generate a ranking for the provided CVs. Please try again."
|
| 519 |
+
|
| 520 |
+
# Format the output for the user
|
| 521 |
+
output = "### CV Reranking Results\nHere are the CVs ranked by suitability:\n\n"
|
| 522 |
+
for i, item in enumerate(ranked_list):
|
| 523 |
+
output += f"**{i+1}. {item.get('cv_name')}**\n"
|
| 524 |
+
output += f" - **Score:** {item.get('score')}/100\n"
|
| 525 |
+
output += f" - **Justification:** {item.get('justification')}\n\n"
|
| 526 |
+
|
| 527 |
+
return output
|
| 528 |
+
|
| 529 |
+
except Exception as e:
|
| 530 |
+
print(f"❌ Error during CV reranking: {e}")
|
| 531 |
+
return "❌ An error occurred while trying to rerank the CVs. Please check the file formats and try again."
|
| 532 |
+
|
| 533 |
|
| 534 |
# --- Helper to extract text from uploaded files ---
|
| 535 |
def _extract_text_from_file(file_path):
|
| 536 |
+
# ... (code unchanged)
|
| 537 |
print(f"Extracting text from: {file_path}")
|
| 538 |
if file_path.endswith('.pdf'):
|
| 539 |
try:
|
|
|
|
| 564 |
def chat_interface_func(message: str, history: list, app_state: dict, file_obj: object):
|
| 565 |
history = history or []
|
| 566 |
current_mode = app_state.get("mode", "GENERAL")
|
|
|
|
|
|
|
| 567 |
hide_examples = gr.update(visible=False)
|
| 568 |
|
| 569 |
# --- Part 1: Handle File Uploads ---
|
| 570 |
if file_obj is not None:
|
| 571 |
+
files = file_obj if isinstance(file_obj, list) else [file_obj]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 572 |
|
| 573 |
+
# Standard single file uploads
|
| 574 |
+
if len(files) == 1:
|
| 575 |
+
file_path = files[0].name
|
| 576 |
+
text = _extract_text_from_file(file_path)
|
| 577 |
|
| 578 |
+
if current_mode == "APPLYING_CV":
|
| 579 |
+
app_state["cv_content"] = text
|
| 580 |
+
bot_message = (f"📄 CV '{os.path.basename(file_path)}' uploaded. "
|
| 581 |
+
f"Would you like me to help you write a cover letter for job **{app_state.get('job_id')}**, "
|
| 582 |
+
"or would you prefer to upload your own?")
|
| 583 |
+
history.append((None, bot_message))
|
| 584 |
+
app_state["mode"] = "APPLYING_COVER_LETTER_CHOICE"
|
| 585 |
+
return history, app_state, gr.update(visible=True, value=None, file_count="single"), hide_examples
|
| 586 |
+
|
| 587 |
+
elif current_mode == "APPLYING_COVER_LETTER_UPLOAD":
|
| 588 |
+
app_state["cover_letter_content"] = text
|
| 589 |
+
history.append((f"📄 Cover Letter '{os.path.basename(file_path)}' uploaded.", "Thank you! Submitting your application now..."))
|
| 590 |
+
success = chatbot_instance.save_application(app_state, CURRENT_USER_ID)
|
| 591 |
+
final_message = f"✅ Your application for job **{app_state.get('job_id')}** has been submitted successfully!" if success else "❌ Sorry, there was an error submitting your application."
|
| 592 |
+
history.append((None, final_message))
|
| 593 |
+
app_state = {"mode": "GENERAL"}
|
| 594 |
+
return history, app_state, gr.update(visible=False, value=None, file_count="single"), hide_examples
|
| 595 |
+
|
| 596 |
+
elif current_mode == "AWAITING_CV_FOR_ANALYSIS":
|
| 597 |
+
history.append((f"📄 CV '{os.path.basename(file_path)}' received. Analyzing now...", None))
|
| 598 |
+
feedback = chatbot_instance.analyze_cv(text, CURRENT_USER_ID)
|
| 599 |
+
job_matches = chatbot_instance.match_jobs_to_cv(text)
|
| 600 |
+
full_response = f"### CV Analysis & Feedback\n\n{feedback}\n\n---\n\n### Top Job Matches For You\n\n{job_matches}"
|
| 601 |
+
history.append((None, full_response))
|
| 602 |
+
app_state["mode"] = "GENERAL"
|
| 603 |
+
return history, app_state, gr.update(visible=False, value=None, file_count="single"), hide_examples
|
| 604 |
+
|
| 605 |
+
# NEW: Multi-file upload for Reranking
|
| 606 |
+
if current_mode == "AWAITING_CVs_FOR_RERANK":
|
| 607 |
+
history.append((f"📄 Received {len(files)} CVs. Starting the reranking process now...", None))
|
| 608 |
+
cv_files_data = []
|
| 609 |
+
for file in files:
|
| 610 |
+
cv_files_data.append({
|
| 611 |
+
"name": os.path.basename(file.name),
|
| 612 |
+
"content": _extract_text_from_file(file.name)
|
| 613 |
+
})
|
| 614 |
+
|
| 615 |
+
requirements = app_state.get("rerank_requirements")
|
| 616 |
+
ranked_results = chatbot_instance.rerank_cvs(requirements, cv_files_data)
|
| 617 |
+
|
| 618 |
+
history.append((None, ranked_results))
|
| 619 |
+
app_state["mode"] = "GENERAL"
|
| 620 |
+
return history, app_state, gr.update(visible=False, value=None, file_count="single"), hide_examples
|
| 621 |
+
|
| 622 |
|
| 623 |
# --- Part 2: Handle Text Messages ---
|
| 624 |
if message:
|
| 625 |
history.append((message, None))
|
| 626 |
|
| 627 |
+
# --- Multi-step Conversation Flows ---
|
| 628 |
+
if current_mode == "AWAITING_REQUIREMENTS_FOR_RERANK":
|
| 629 |
+
app_state["rerank_requirements"] = message
|
| 630 |
+
app_state["mode"] = "AWAITING_CVs_FOR_RERANK"
|
| 631 |
+
bot_message = "Great. Now, please upload all the CVs you want me to rank based on these requirements."
|
| 632 |
+
history.append((None, bot_message))
|
| 633 |
+
return history, app_state, gr.update(visible=True, file_count="multiple"), hide_examples
|
| 634 |
+
|
| 635 |
if current_mode == "CREATING_PROJECT_NAME":
|
| 636 |
app_state["project_name"] = message
|
| 637 |
app_state["mode"] = "CREATING_PROJECT_DESC"
|
| 638 |
history.append((None, "Great! Now, please provide a short description for your project."))
|
| 639 |
+
return history, app_state, gr.update(visible=False, file_count="single"), hide_examples
|
| 640 |
+
# ... (rest of multi-step flows are similar)
|
| 641 |
elif current_mode == "CREATING_PROJECT_DESC":
|
| 642 |
app_state["description"] = message
|
| 643 |
app_state["mode"] = "CREATING_PROJECT_SKILLS"
|
| 644 |
+
history.append((None, "What skills are required? (e.g., Python, UI/UX, Marketing)"))
|
| 645 |
+
return history, app_state, gr.update(visible=False, file_count="single"), hide_examples
|
| 646 |
elif current_mode == "CREATING_PROJECT_SKILLS":
|
| 647 |
app_state["required_skills"] = [skill.strip() for skill in message.split(',')]
|
| 648 |
app_state["mode"] = "CREATING_PROJECT_SIZE"
|
| 649 |
+
history.append((None, "Perfect. How many members are you looking for in the team?"))
|
| 650 |
+
return history, app_state, gr.update(visible=False, file_count="single"), hide_examples
|
| 651 |
elif current_mode == "CREATING_PROJECT_SIZE":
|
| 652 |
try:
|
| 653 |
app_state["max_team_size"] = int(message)
|
| 654 |
+
success, bot_message = chatbot_instance.create_project(app_state, CURRENT_USER_ID)
|
|
|
|
| 655 |
history.append((None, bot_message))
|
| 656 |
app_state = {"mode": "GENERAL"}
|
| 657 |
+
return history, app_state, gr.update(visible=False, file_count="single"), hide_examples
|
| 658 |
except ValueError:
|
| 659 |
history.append((None, "Please enter a valid number for the team size."))
|
| 660 |
+
return history, app_state, gr.update(visible=False, file_count="single"), hide_examples
|
| 661 |
+
|
| 662 |
+
elif current_mode == "AWAITING_PROJECT_TO_JOIN":
|
| 663 |
+
project_name = message
|
| 664 |
+
success, bot_message = chatbot_instance.request_to_join_project(project_name, CURRENT_USER_ID)
|
| 665 |
+
history.append((None, bot_message))
|
| 666 |
+
if success:
|
| 667 |
+
app_state["mode"] = "GENERAL"
|
| 668 |
+
return history, app_state, gr.update(visible=False, file_count="single"), hide_examples
|
| 669 |
+
|
| 670 |
+
elif current_mode == "AWAITING_PROJECT_TO_VIEW":
|
| 671 |
+
project_name = message
|
| 672 |
+
bot_message = chatbot_instance.get_project_requests(project_name, CURRENT_USER_ID)
|
| 673 |
+
history.append((None, bot_message))
|
| 674 |
+
app_state["mode"] = "GENERAL"
|
| 675 |
+
return history, app_state, gr.update(visible=False, file_count="single"), hide_examples
|
| 676 |
+
|
| 677 |
+
elif current_mode == "AWAITING_MEMBER_TO_ACCEPT":
|
| 678 |
+
app_state["member_to_accept"] = message
|
| 679 |
+
app_state["mode"] = "AWAITING_PROJECT_FOR_ACCEPT"
|
| 680 |
+
history.append((None, f"Which project do you want to accept '{message}' for?"))
|
| 681 |
+
return history, app_state, gr.update(visible=False, file_count="single"), hide_examples
|
| 682 |
+
elif current_mode == "AWAITING_PROJECT_FOR_ACCEPT":
|
| 683 |
+
project_name = message
|
| 684 |
+
member_id = app_state.get("member_to_accept")
|
| 685 |
+
bot_message = chatbot_instance.accept_project_member(project_name, member_id, CURRENT_USER_ID)
|
| 686 |
+
history.append((None, bot_message))
|
| 687 |
+
app_state = {"mode": "GENERAL"}
|
| 688 |
+
return history, app_state, gr.update(visible=False, file_count="single"), hide_examples
|
| 689 |
+
|
| 690 |
if current_mode == "APPLYING_COVER_LETTER_CHOICE":
|
| 691 |
positive_keywords = ["help", "generate", "write", "yes", "ok", "sure", "please"]
|
| 692 |
|
|
|
|
| 696 |
history.append((None, f"Here is a draft for your cover letter:\n\n---\n{cover_letter}\n\n---\n\nIf you are happy with this, please type 'submit' to send the application."))
|
| 697 |
app_state["cover_letter_content"] = cover_letter
|
| 698 |
app_state["mode"] = "CONFIRM_SUBMISSION"
|
| 699 |
+
return history, app_state, gr.update(visible=False, file_count="single"), hide_examples
|
| 700 |
elif "upload" in message.lower():
|
| 701 |
history.append((None, "Okay, please upload your cover letter file."))
|
| 702 |
app_state["mode"] = "APPLYING_COVER_LETTER_UPLOAD"
|
| 703 |
+
return history, app_state, gr.update(visible=True, file_count="single"), hide_examples
|
| 704 |
else:
|
| 705 |
history.append((None, "I'm sorry, I didn't quite understand. Do you want me to **write** a letter for you, or would you prefer to **upload** your own?"))
|
| 706 |
+
return history, app_state, gr.update(visible=True, file_count="single"), hide_examples
|
| 707 |
|
| 708 |
if current_mode == "CONFIRM_SUBMISSION":
|
| 709 |
if "submit" in message.lower():
|
| 710 |
history.append((None, "Thank you! Submitting your application now..."))
|
| 711 |
+
success = chatbot_instance.save_application(app_state, CURRENT_USER_ID)
|
| 712 |
final_message = f"✅ Your application for job **{app_state.get('job_id')}** has been submitted successfully!" if success else "❌ Sorry, there was an error submitting your application."
|
| 713 |
history.append((None, final_message))
|
| 714 |
app_state = {"mode": "GENERAL"}
|
| 715 |
+
return history, app_state, gr.update(visible=False, file_count="single"), hide_examples
|
| 716 |
else:
|
| 717 |
history.append((None, "Please type 'submit' to confirm and send your application."))
|
| 718 |
+
return history, app_state, gr.update(visible=False, file_count="single"), hide_examples
|
| 719 |
|
| 720 |
# --- General Chat & Starting New Flows ---
|
| 721 |
response = chatbot_instance.ask(message)
|
| 722 |
|
|
|
|
|
|
|
| 723 |
app_match = re.search(r"STARTING_APPLICATION_PROCESS:([\w-]+)", response)
|
| 724 |
+
|
|
|
|
| 725 |
if app_match:
|
| 726 |
job_id = app_match.group(1)
|
| 727 |
app_state["mode"] = "APPLYING_CV"
|
| 728 |
app_state["job_id"] = job_id
|
| 729 |
bot_message = f"Starting application for job **{job_id}**. Please upload your CV."
|
| 730 |
history.append((None, bot_message))
|
| 731 |
+
return history, app_state, gr.update(visible=True, file_count="single"), hide_examples
|
| 732 |
|
| 733 |
+
elif "STARTING_PROJECT_CREATION" in response:
|
| 734 |
app_state["mode"] = "CREATING_PROJECT_NAME"
|
| 735 |
bot_message = "Awesome! Let's create a new project. What would you like to name it?"
|
| 736 |
history.append((None, bot_message))
|
| 737 |
+
return history, app_state, gr.update(visible=False, file_count="single"), hide_examples
|
| 738 |
+
|
| 739 |
+
elif "INTENT_ANALYZE_CV" in response:
|
| 740 |
+
app_state["mode"] = "AWAITING_CV_FOR_ANALYSIS"
|
| 741 |
+
bot_message = "Of course! I can help with that. Please upload your CV, and I'll provide feedback and match you with the best jobs."
|
| 742 |
+
history.append((None, bot_message))
|
| 743 |
+
return history, app_state, gr.update(visible=True, file_count="single"), hide_examples
|
| 744 |
+
|
| 745 |
+
elif "INTENT_START_RERANK" in response:
|
| 746 |
+
app_state["mode"] = "AWAITING_REQUIREMENTS_FOR_RERANK"
|
| 747 |
+
bot_message = "I can definitely help with that. Please provide the job requirements or the key skills you are looking for."
|
| 748 |
+
history.append((None, bot_message))
|
| 749 |
+
return history, app_state, gr.update(visible=False, file_count="single"), hide_examples
|
| 750 |
|
| 751 |
else:
|
| 752 |
history.append((None, response))
|
| 753 |
+
return history, app_state, gr.update(visible=False, file_count="single"), hide_examples
|
| 754 |
|
| 755 |
+
# Default return
|
| 756 |
+
return history, app_state, gr.update(visible=False, file_count="single"), gr.update()
|
| 757 |
|
| 758 |
|
| 759 |
# --- 5. BUILD GRADIO UI ---
|
|
|
|
| 761 |
|
| 762 |
initial_state = {
|
| 763 |
"mode": "GENERAL", "job_id": None, "cv_content": None, "cover_letter_content": None,
|
| 764 |
+
"project_name": None, "description": None, "required_skills": None, "max_team_size": None,
|
| 765 |
+
"rerank_requirements": None
|
| 766 |
}
|
| 767 |
application_state = gr.State(initial_state)
|
| 768 |
|
|
|
|
| 775 |
|
| 776 |
chatbot_window = gr.Chatbot(height=450, label="Chat Window", bubble_full_width=False)
|
| 777 |
|
|
|
|
| 778 |
with gr.Column() as examples_container:
|
| 779 |
examples_list = [
|
| 780 |
+
"Analyze my CV",
|
| 781 |
+
"Rerank CVs for a job",
|
| 782 |
"I want to create a new project",
|
| 783 |
+
"Who applied for job_022?",
|
| 784 |
+
"Show me details about user_010"
|
| 785 |
]
|
| 786 |
with gr.Row():
|
| 787 |
btn1 = gr.Button(examples_list[0], variant='secondary')
|
|
|
|
| 793 |
|
| 794 |
example_buttons = [btn1, btn2, btn3, btn4, btn5]
|
| 795 |
|
|
|
|
| 796 |
with gr.Row() as main_input_row:
|
| 797 |
+
text_input = gr.Textbox(placeholder="Ask your question or try an example from above...", container=False, scale=7)
|
| 798 |
submit_btn = gr.Button("Send", variant="primary", scale=1)
|
| 799 |
|
| 800 |
+
file_uploader = gr.File(label="Upload Document(s)", file_types=['.pdf', '.docx', '.txt'], visible=False)
|
|
|
|
| 801 |
|
|
|
|
|
|
|
| 802 |
outputs_list = [chatbot_window, application_state, file_uploader, examples_container]
|
| 803 |
|
| 804 |
submit_btn.click(
|