File size: 6,898 Bytes
cfd6cec | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 | """
Input Processing Layer
Parses and normalizes raw inputs (job description, company context, resume)
into structured intermediate representations for feature extraction.
"""
import json
import re
from dataclasses import dataclass, field, asdict
from typing import Optional
@dataclass
class CompanyContext:
stage: str # seed, series_a, series_b, growth, public, enterprise
industry: str
compensation_band: str # e.g. "$150K-$200K" or "L5 band"
location: str
remote_type: str # onsite, hybrid, remote, flexible
headcount: Optional[int] = None
glassdoor_rating: Optional[float] = None
def validate(self) -> list[str]:
errors = []
valid_stages = {"seed", "series_a", "series_b", "series_c", "growth", "public", "enterprise", "government", "nonprofit"}
if self.stage.lower() not in valid_stages:
errors.append(f"Unknown company stage: {self.stage}")
valid_remote = {"onsite", "hybrid", "remote", "flexible"}
if self.remote_type.lower() not in valid_remote:
errors.append(f"Unknown remote type: {self.remote_type}")
return errors
@dataclass
class ProcessedInput:
job_description: str
company_context: CompanyContext
resume_text: str
jd_sections: dict = field(default_factory=dict)
resume_sections: dict = field(default_factory=dict)
data_quality_score: float = 0.0
warnings: list = field(default_factory=list)
def to_dict(self) -> dict:
return {
"job_description": self.job_description,
"company_context": asdict(self.company_context),
"resume_text": self.resume_text,
"jd_sections": self.jd_sections,
"resume_sections": self.resume_sections,
"data_quality_score": self.data_quality_score,
"warnings": self.warnings,
}
class InputProcessor:
"""Processes raw inputs into structured representations."""
def process(
self,
job_description: str,
company_context: dict,
resume_text: str,
) -> ProcessedInput:
ctx = CompanyContext(**company_context)
warnings = ctx.validate()
jd_clean = self._clean_text(job_description)
resume_clean = self._clean_text(resume_text)
jd_sections = self._segment_jd(jd_clean)
resume_sections = self._segment_resume(resume_clean)
data_quality = self._assess_data_quality(jd_clean, resume_clean, ctx)
if data_quality < 0.3:
warnings.append("LOW_DATA_QUALITY: Inputs may be too sparse for reliable scoring")
return ProcessedInput(
job_description=jd_clean,
company_context=ctx,
resume_text=resume_clean,
jd_sections=jd_sections,
resume_sections=resume_sections,
data_quality_score=data_quality,
warnings=warnings,
)
def _clean_text(self, text: str) -> str:
text = re.sub(r"\r\n", "\n", text)
text = re.sub(r"[ \t]+", " ", text)
text = re.sub(r"\n{3,}", "\n\n", text)
return text.strip()
def _segment_jd(self, jd: str) -> dict:
"""Heuristic segmentation of job description into sections."""
sections = {
"title": "",
"responsibilities": "",
"requirements": "",
"preferred": "",
"benefits": "",
"about": "",
"full_text": jd,
}
# Pattern-based extraction
patterns = {
"responsibilities": r"(?:responsibilities|what you.?ll do|the role|job duties)[:\s]*\n(.*?)(?=\n(?:requirements|qualifications|what we|preferred|benefits|about)|$)",
"requirements": r"(?:requirements|qualifications|what we.?re looking for|must have|minimum)[:\s]*\n(.*?)(?=\n(?:preferred|nice to have|benefits|about|responsibilities)|$)",
"preferred": r"(?:preferred|nice to have|bonus|ideal)[:\s]*\n(.*?)(?=\n(?:benefits|about|responsibilities|requirements)|$)",
"benefits": r"(?:benefits|perks|what we offer|compensation)[:\s]*\n(.*?)(?=\n(?:about|responsibilities|requirements)|$)",
}
for section, pattern in patterns.items():
match = re.search(pattern, jd, re.IGNORECASE | re.DOTALL)
if match:
sections[section] = match.group(1).strip()
# Extract title from first non-empty line
lines = [l.strip() for l in jd.split("\n") if l.strip()]
if lines:
sections["title"] = lines[0]
return sections
def _segment_resume(self, resume: str) -> dict:
"""Heuristic segmentation of resume."""
sections = {
"contact": "",
"summary": "",
"experience": "",
"education": "",
"skills": "",
"projects": "",
"certifications": "",
"full_text": resume,
}
patterns = {
"summary": r"(?:summary|profile|objective|about)[:\s]*\n(.*?)(?=\n(?:experience|education|skills|projects|work)|$)",
"experience": r"(?:experience|work history|employment|professional background)[:\s]*\n(.*?)(?=\n(?:education|skills|projects|certifications)|$)",
"education": r"(?:education|academic|degrees?)[:\s]*\n(.*?)(?=\n(?:skills|projects|certifications|experience)|$)",
"skills": r"(?:skills|technical skills|technologies|competencies)[:\s]*\n(.*?)(?=\n(?:projects|certifications|education|experience)|$)",
}
for section, pattern in patterns.items():
match = re.search(pattern, resume, re.IGNORECASE | re.DOTALL)
if match:
sections[section] = match.group(1).strip()
return sections
def _assess_data_quality(self, jd: str, resume: str, ctx: CompanyContext) -> float:
"""Score 0-1 representing input completeness and richness."""
signals = 0
total = 10
# JD quality
if len(jd) > 200:
signals += 1
if len(jd) > 500:
signals += 1
if any(kw in jd.lower() for kw in ["requirements", "qualifications", "responsibilities"]):
signals += 1
# Resume quality
if len(resume) > 300:
signals += 1
if len(resume) > 800:
signals += 1
if re.search(r"\d{4}", resume): # Contains years
signals += 1
if re.search(r"\d+%|\$\d+|\d+\s*(users|customers|team|engineers)", resume, re.IGNORECASE):
signals += 1 # Quantified achievements
# Company context quality
if ctx.compensation_band and ctx.compensation_band != "unknown":
signals += 1
if ctx.industry and ctx.industry != "unknown":
signals += 1
if ctx.stage and ctx.stage != "unknown":
signals += 1
return signals / total
|