Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -9,15 +9,15 @@ from functools import lru_cache
|
|
| 9 |
# CONFIG
|
| 10 |
# ----------------------------
|
| 11 |
JSON_FILE = "form-submissions-1.json"
|
| 12 |
-
#
|
| 13 |
MODEL_ID = "google/flan-t5-large"
|
| 14 |
HF_API_TOKEN = os.environ.get("HF_API_TOKEN")
|
|
|
|
| 15 |
FILTERED_CSV = "/tmp/filtered_candidates.csv"
|
| 16 |
OUTPUT_FILE = "/tmp/outputs.csv"
|
| 17 |
BATCH_SIZE = 50
|
| 18 |
|
| 19 |
if not HF_API_TOKEN:
|
| 20 |
-
# This check is good, but ensure the token is set in your environment (or space secrets)
|
| 21 |
raise ValueError("HF_API_TOKEN not found in environment. Add it in Space Secrets.")
|
| 22 |
|
| 23 |
CATEGORIES = {
|
|
@@ -46,7 +46,16 @@ Candidate JSON: {candidate_str}
|
|
| 46 |
Respond only 'Yes' if suitable, otherwise 'No'.
|
| 47 |
"""
|
| 48 |
headers = {"Authorization": f"Bearer {HF_API_TOKEN}", "Content-Type": "application/json"}
|
| 49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
try:
|
| 51 |
response = requests.post(
|
| 52 |
f"https://api-inference.huggingface.co/models/{MODEL_ID}",
|
|
@@ -57,16 +66,13 @@ Respond only 'Yes' if suitable, otherwise 'No'.
|
|
| 57 |
response.raise_for_status()
|
| 58 |
result = response.json()
|
| 59 |
|
| 60 |
-
# Check for API error structure
|
| 61 |
if isinstance(result, dict) and "error" in result:
|
| 62 |
print(f"LLM API Error: {result.get('error')}")
|
| 63 |
return "No"
|
| 64 |
|
| 65 |
-
# Extract the generated text safely and clean it up
|
| 66 |
generated_text = result[0].get("generated_text", "No").strip().lower()
|
| 67 |
|
| 68 |
-
#
|
| 69 |
-
# We look for 'yes' or 'no' anywhere in the response and prioritize 'yes' if found.
|
| 70 |
if "yes" in generated_text:
|
| 71 |
return "Yes"
|
| 72 |
elif "no" in generated_text:
|
|
@@ -76,6 +82,7 @@ Respond only 'Yes' if suitable, otherwise 'No'.
|
|
| 76 |
|
| 77 |
except Exception as e:
|
| 78 |
print("LLM call failed:", e)
|
|
|
|
| 79 |
return "No"
|
| 80 |
|
| 81 |
# ----------------------------
|
|
@@ -83,13 +90,11 @@ Respond only 'Yes' if suitable, otherwise 'No'.
|
|
| 83 |
# ----------------------------
|
| 84 |
def filter_by_roles(category_name):
|
| 85 |
job_titles = CATEGORIES[category_name]
|
| 86 |
-
# NOTE: Assuming 'form-submissions-1.json' exists in the current directory
|
| 87 |
try:
|
| 88 |
with open(JSON_FILE, encoding="utf-8") as f:
|
| 89 |
data = json.load(f)
|
| 90 |
except FileNotFoundError:
|
| 91 |
-
|
| 92 |
-
return pd.DataFrame(), None
|
| 93 |
|
| 94 |
filtered = []
|
| 95 |
|
|
@@ -97,7 +102,6 @@ def filter_by_roles(category_name):
|
|
| 97 |
work_exps = person.get("work_experiences", [])
|
| 98 |
if not work_exps:
|
| 99 |
continue
|
| 100 |
-
# Improved: Check if roleName is not None before calling .lower()
|
| 101 |
non_fullstack_roles = [
|
| 102 |
exp.get("roleName") for exp in work_exps
|
| 103 |
if exp.get("roleName") and "full stack developer" not in exp.get("roleName").lower()
|
|
@@ -105,7 +109,6 @@ def filter_by_roles(category_name):
|
|
| 105 |
if not non_fullstack_roles:
|
| 106 |
continue
|
| 107 |
|
| 108 |
-
# Check for role match in the list of titles
|
| 109 |
if any(role in job_titles for role in non_fullstack_roles):
|
| 110 |
filtered.append({
|
| 111 |
"Name": person.get("name"),
|
|
@@ -117,41 +120,41 @@ def filter_by_roles(category_name):
|
|
| 117 |
"Salary": person.get("annual_salary_expectation", {}).get("full-time","N/A"),
|
| 118 |
"Category": category_name
|
| 119 |
})
|
|
|
|
| 120 |
if not filtered:
|
| 121 |
-
|
|
|
|
| 122 |
|
| 123 |
df = pd.DataFrame(filtered)
|
| 124 |
df.to_csv(FILTERED_CSV, index=False)
|
| 125 |
-
|
|
|
|
| 126 |
|
| 127 |
# ----------------------------
|
| 128 |
-
# Step 2: LLM recommendations
|
| 129 |
# ----------------------------
|
| 130 |
def llm_recommendations(category_name):
|
| 131 |
job_titles = CATEGORIES[category_name]
|
| 132 |
|
| 133 |
if not os.path.exists(FILTERED_CSV):
|
| 134 |
-
#
|
| 135 |
-
df_filtered,
|
| 136 |
if df_filtered.empty:
|
| 137 |
-
return
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
|
| 142 |
if df_filtered.empty:
|
| 143 |
-
return
|
| 144 |
|
| 145 |
recommended = []
|
| 146 |
-
# Drop N/A values before converting to dict, otherwise json.dumps might fail if they are NaN
|
| 147 |
df_filtered_clean = df_filtered.fillna('N/A')
|
| 148 |
filtered_candidates = df_filtered_clean.to_dict(orient="records")
|
| 149 |
|
| 150 |
-
# The batching loop is fine, we will rely on the improved call_llm
|
| 151 |
for i in range(0, len(filtered_candidates), BATCH_SIZE):
|
| 152 |
batch = filtered_candidates[i:i+BATCH_SIZE]
|
| 153 |
for person in batch:
|
| 154 |
-
# Only send necessary info to save context length and cost
|
| 155 |
candidate_info = {
|
| 156 |
"Name": person.get("Name"),
|
| 157 |
"Roles": person.get("Roles"),
|
|
@@ -160,18 +163,17 @@ def llm_recommendations(category_name):
|
|
| 160 |
candidate_str = json.dumps(candidate_info)
|
| 161 |
response = call_llm(candidate_str, category_name, tuple(job_titles))
|
| 162 |
|
| 163 |
-
# 🚩 IMPROVEMENT: The call_llm function now returns a clean 'Yes' or 'No'
|
| 164 |
if response == "Yes":
|
| 165 |
recommended.append(person)
|
| 166 |
|
| 167 |
if not recommended:
|
| 168 |
-
return
|
| 169 |
|
| 170 |
df_rec = pd.DataFrame(recommended)
|
| 171 |
-
|
|
|
|
| 172 |
def parse_salary(s):
|
| 173 |
try:
|
| 174 |
-
# Remove currency symbols, commas, and convert to float
|
| 175 |
return float(str(s).replace("$","").replace(",","").replace("N/A", str(float('inf'))))
|
| 176 |
except:
|
| 177 |
return float('inf')
|
|
@@ -179,15 +181,26 @@ def llm_recommendations(category_name):
|
|
| 179 |
df_rec["Salary_sort"] = df_rec["Salary"].apply(parse_salary)
|
| 180 |
df_rec = df_rec.sort_values("Salary_sort").drop(columns=["Salary_sort"])
|
| 181 |
df_top5 = df_rec.head(5)
|
| 182 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 183 |
|
| 184 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 185 |
|
| 186 |
# ----------------------------
|
| 187 |
# Show first 5 raw JSON candidates
|
| 188 |
# ----------------------------
|
| 189 |
def show_first_candidates():
|
| 190 |
-
# NOTE: Assuming 'form-submissions-1.json' exists
|
| 191 |
try:
|
| 192 |
with open(JSON_FILE, encoding="utf-8") as f:
|
| 193 |
data = json.load(f)
|
|
@@ -198,30 +211,31 @@ def show_first_candidates():
|
|
| 198 |
return pd.DataFrame({"Error": [f"Failed to load JSON: {e}"]})
|
| 199 |
|
| 200 |
# ----------------------------
|
| 201 |
-
# Gradio interface
|
| 202 |
# ----------------------------
|
| 203 |
with gr.Blocks() as app:
|
| 204 |
-
gr.Markdown("
|
| 205 |
|
| 206 |
gr.Markdown("#### Raw JSON Preview: First 5 Candidates")
|
| 207 |
gr.Dataframe(show_first_candidates(), label="First 5 JSON Entries")
|
| 208 |
|
| 209 |
gr.Markdown("---")
|
| 210 |
-
category_dropdown = gr.Dropdown(list(CATEGORIES.keys()), label="Select Category")
|
| 211 |
|
| 212 |
# Step 1: Filter by roles
|
| 213 |
-
filter_button = gr.Button("
|
| 214 |
-
filtered_df = gr.Dataframe(label="Filtered Candidates
|
| 215 |
-
|
| 216 |
-
|
|
|
|
| 217 |
|
| 218 |
gr.Markdown("---")
|
| 219 |
|
| 220 |
# Step 2: LLM Recommendations
|
| 221 |
-
llm_button = gr.Button("
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
llm_button.click(llm_recommendations, inputs=[category_dropdown], outputs=[
|
| 225 |
|
| 226 |
if __name__ == "__main__":
|
| 227 |
app.launch()
|
|
|
|
| 9 |
# CONFIG
|
| 10 |
# ----------------------------
|
| 11 |
JSON_FILE = "form-submissions-1.json"
|
| 12 |
+
# Using a suitable generative LLM (Flan-T5 Large)
|
| 13 |
MODEL_ID = "google/flan-t5-large"
|
| 14 |
HF_API_TOKEN = os.environ.get("HF_API_TOKEN")
|
| 15 |
+
# NOTE: Keeping these temp files for the filtering step, though output format changes
|
| 16 |
FILTERED_CSV = "/tmp/filtered_candidates.csv"
|
| 17 |
OUTPUT_FILE = "/tmp/outputs.csv"
|
| 18 |
BATCH_SIZE = 50
|
| 19 |
|
| 20 |
if not HF_API_TOKEN:
|
|
|
|
| 21 |
raise ValueError("HF_API_TOKEN not found in environment. Add it in Space Secrets.")
|
| 22 |
|
| 23 |
CATEGORIES = {
|
|
|
|
| 46 |
Respond only 'Yes' if suitable, otherwise 'No'.
|
| 47 |
"""
|
| 48 |
headers = {"Authorization": f"Bearer {HF_API_TOKEN}", "Content-Type": "application/json"}
|
| 49 |
+
|
| 50 |
+
# Adding generation parameters for strict, short output
|
| 51 |
+
payload = {
|
| 52 |
+
"inputs": prompt,
|
| 53 |
+
"parameters": {
|
| 54 |
+
"max_new_tokens": 10,
|
| 55 |
+
"return_full_text": False
|
| 56 |
+
}
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
try:
|
| 60 |
response = requests.post(
|
| 61 |
f"https://api-inference.huggingface.co/models/{MODEL_ID}",
|
|
|
|
| 66 |
response.raise_for_status()
|
| 67 |
result = response.json()
|
| 68 |
|
|
|
|
| 69 |
if isinstance(result, dict) and "error" in result:
|
| 70 |
print(f"LLM API Error: {result.get('error')}")
|
| 71 |
return "No"
|
| 72 |
|
|
|
|
| 73 |
generated_text = result[0].get("generated_text", "No").strip().lower()
|
| 74 |
|
| 75 |
+
# Robust check for 'yes' (in case the model is not perfectly following instructions)
|
|
|
|
| 76 |
if "yes" in generated_text:
|
| 77 |
return "Yes"
|
| 78 |
elif "no" in generated_text:
|
|
|
|
| 82 |
|
| 83 |
except Exception as e:
|
| 84 |
print("LLM call failed:", e)
|
| 85 |
+
# In case of API failure, it should not be cached as a negative result (but the lru_cache will cache the 'No')
|
| 86 |
return "No"
|
| 87 |
|
| 88 |
# ----------------------------
|
|
|
|
| 90 |
# ----------------------------
|
| 91 |
def filter_by_roles(category_name):
|
| 92 |
job_titles = CATEGORIES[category_name]
|
|
|
|
| 93 |
try:
|
| 94 |
with open(JSON_FILE, encoding="utf-8") as f:
|
| 95 |
data = json.load(f)
|
| 96 |
except FileNotFoundError:
|
| 97 |
+
return pd.DataFrame(), f"Error: JSON file '{JSON_FILE}' not found."
|
|
|
|
| 98 |
|
| 99 |
filtered = []
|
| 100 |
|
|
|
|
| 102 |
work_exps = person.get("work_experiences", [])
|
| 103 |
if not work_exps:
|
| 104 |
continue
|
|
|
|
| 105 |
non_fullstack_roles = [
|
| 106 |
exp.get("roleName") for exp in work_exps
|
| 107 |
if exp.get("roleName") and "full stack developer" not in exp.get("roleName").lower()
|
|
|
|
| 109 |
if not non_fullstack_roles:
|
| 110 |
continue
|
| 111 |
|
|
|
|
| 112 |
if any(role in job_titles for role in non_fullstack_roles):
|
| 113 |
filtered.append({
|
| 114 |
"Name": person.get("name"),
|
|
|
|
| 120 |
"Salary": person.get("annual_salary_expectation", {}).get("full-time","N/A"),
|
| 121 |
"Category": category_name
|
| 122 |
})
|
| 123 |
+
|
| 124 |
if not filtered:
|
| 125 |
+
# Return a message instead of the CSV path
|
| 126 |
+
return pd.DataFrame(), f"No candidates found matching roles for category '{category_name}'."
|
| 127 |
|
| 128 |
df = pd.DataFrame(filtered)
|
| 129 |
df.to_csv(FILTERED_CSV, index=False)
|
| 130 |
+
# Return a success message
|
| 131 |
+
return df, f"{len(df)} candidates filtered by role for category '{category_name}'. Ready for LLM check."
|
| 132 |
|
| 133 |
# ----------------------------
|
| 134 |
+
# Step 2: LLM recommendations (Modified to return text)
|
| 135 |
# ----------------------------
|
| 136 |
def llm_recommendations(category_name):
|
| 137 |
job_titles = CATEGORIES[category_name]
|
| 138 |
|
| 139 |
if not os.path.exists(FILTERED_CSV):
|
| 140 |
+
# Rerun filtering to ensure the CSV exists
|
| 141 |
+
df_filtered, msg = filter_by_roles(category_name)
|
| 142 |
if df_filtered.empty:
|
| 143 |
+
return msg # Return the error/no-match message
|
| 144 |
+
|
| 145 |
+
df_filtered = pd.read_csv(FILTERED_CSV)
|
| 146 |
+
df_filtered = df_filtered[df_filtered["Category"] == category_name]
|
| 147 |
|
| 148 |
if df_filtered.empty:
|
| 149 |
+
return f"No filtered candidates found for category '{category_name}'. Run Step 1 first."
|
| 150 |
|
| 151 |
recommended = []
|
|
|
|
| 152 |
df_filtered_clean = df_filtered.fillna('N/A')
|
| 153 |
filtered_candidates = df_filtered_clean.to_dict(orient="records")
|
| 154 |
|
|
|
|
| 155 |
for i in range(0, len(filtered_candidates), BATCH_SIZE):
|
| 156 |
batch = filtered_candidates[i:i+BATCH_SIZE]
|
| 157 |
for person in batch:
|
|
|
|
| 158 |
candidate_info = {
|
| 159 |
"Name": person.get("Name"),
|
| 160 |
"Roles": person.get("Roles"),
|
|
|
|
| 163 |
candidate_str = json.dumps(candidate_info)
|
| 164 |
response = call_llm(candidate_str, category_name, tuple(job_titles))
|
| 165 |
|
|
|
|
| 166 |
if response == "Yes":
|
| 167 |
recommended.append(person)
|
| 168 |
|
| 169 |
if not recommended:
|
| 170 |
+
return f"LLM determined no candidates are suitable for the '{category_name}' category."
|
| 171 |
|
| 172 |
df_rec = pd.DataFrame(recommended)
|
| 173 |
+
|
| 174 |
+
# Sort by numeric salary to get the top 5 with lowest expected salary first
|
| 175 |
def parse_salary(s):
|
| 176 |
try:
|
|
|
|
| 177 |
return float(str(s).replace("$","").replace(",","").replace("N/A", str(float('inf'))))
|
| 178 |
except:
|
| 179 |
return float('inf')
|
|
|
|
| 181 |
df_rec["Salary_sort"] = df_rec["Salary"].apply(parse_salary)
|
| 182 |
df_rec = df_rec.sort_values("Salary_sort").drop(columns=["Salary_sort"])
|
| 183 |
df_top5 = df_rec.head(5)
|
| 184 |
+
|
| 185 |
+
# 🚩 NEW: Generate Text Output
|
| 186 |
+
candidate_names = df_top5["Name"].tolist()
|
| 187 |
+
|
| 188 |
+
if not candidate_names:
|
| 189 |
+
return f"LLM check passed, but sorting resulted in an empty list (unexpected). No recommendations to display."
|
| 190 |
|
| 191 |
+
output_text = f"Top {len(candidate_names)} Recommended Candidates for the '{category_name}' Category:\n\n"
|
| 192 |
+
|
| 193 |
+
for i, name in enumerate(candidate_names):
|
| 194 |
+
output_text += f"{i+1}. {name}\n"
|
| 195 |
+
|
| 196 |
+
output_text += "\nThese candidates were selected based on relevance by the LLM and sorted by lowest expected salary."
|
| 197 |
+
|
| 198 |
+
return output_text
|
| 199 |
|
| 200 |
# ----------------------------
|
| 201 |
# Show first 5 raw JSON candidates
|
| 202 |
# ----------------------------
|
| 203 |
def show_first_candidates():
|
|
|
|
| 204 |
try:
|
| 205 |
with open(JSON_FILE, encoding="utf-8") as f:
|
| 206 |
data = json.load(f)
|
|
|
|
| 211 |
return pd.DataFrame({"Error": [f"Failed to load JSON: {e}"]})
|
| 212 |
|
| 213 |
# ----------------------------
|
| 214 |
+
# Gradio interface (Updated)
|
| 215 |
# ----------------------------
|
| 216 |
with gr.Blocks() as app:
|
| 217 |
+
gr.Markdown("# Candidate Recommendation Engine")
|
| 218 |
|
| 219 |
gr.Markdown("#### Raw JSON Preview: First 5 Candidates")
|
| 220 |
gr.Dataframe(show_first_candidates(), label="First 5 JSON Entries")
|
| 221 |
|
| 222 |
gr.Markdown("---")
|
| 223 |
+
category_dropdown = gr.Dropdown(list(CATEGORIES.keys()), label="1. Select Category")
|
| 224 |
|
| 225 |
# Step 1: Filter by roles
|
| 226 |
+
filter_button = gr.Button("2. Filter Candidates by Roles")
|
| 227 |
+
filtered_df = gr.Dataframe(label="Filtered Candidates (Preview)")
|
| 228 |
+
# 🚩 CHANGE: Display a status message for filtering
|
| 229 |
+
filter_status = gr.Textbox(label="Filter Status", placeholder="Click 'Filter Candidates by Roles' to start.")
|
| 230 |
+
filter_button.click(filter_by_roles, inputs=[category_dropdown], outputs=[filtered_df, filter_status])
|
| 231 |
|
| 232 |
gr.Markdown("---")
|
| 233 |
|
| 234 |
# Step 2: LLM Recommendations
|
| 235 |
+
llm_button = gr.Button("3. Get LLM Recommendations (Text Summary)")
|
| 236 |
+
# 🚩 CHANGE: Output is now a Textbox
|
| 237 |
+
llm_output_text = gr.Textbox(label="Top Candidate Recommendations Summary", lines=10, placeholder="Click 'Get LLM Recommendations' after Step 2 completes.")
|
| 238 |
+
llm_button.click(llm_recommendations, inputs=[category_dropdown], outputs=[llm_output_text])
|
| 239 |
|
| 240 |
if __name__ == "__main__":
|
| 241 |
app.launch()
|