File size: 4,839 Bytes
7f9f518 a63012b 7f9f518 a63012b 7f9f518 a63012b 7f9f518 a63012b 7f9f518 a63012b 7f9f518 a63012b 7f9f518 a63012b 7f9f518 a63012b 7f9f518 a63012b 7f9f518 a63012b 7f9f518 a63012b 7f9f518 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 |
import { SectionKey, SECTIONS } from "@/lib/sections";
/**
* Keep scoring independent from question-module export shape to avoid build breaks.
* We only rely on fields actually used for scoring.
*/
type SelectedQuestion = {
baseId: string;
section: SectionKey;
type: "likert" | "mcq" | "text";
prompt: string;
reverse?: boolean;
mcqScores?: Record<string, number>;
};
export type CandidateMeta = {
candidateName: string;
passportNumber: string; // renamed from candidateId
jobTitle: string;
};
export type AnswerMap = Record<string, string>; // baseId -> selected option / text
export type SectionScore = {
rawPct: number; // 0..100
weighted: number; // 0..100
flags: string[];
};
export type ExamScore = {
sections: Record<SectionKey, SectionScore>;
overall: number; // 0..100 weighted
decision: "Strong Hire" | "Hire" | "Proceed with Conditions" | "Do Not Proceed";
validity: {
completionPct: number;
attentionFlags: number;
inconsistentFlags: number;
};
};
const LIKERT_MAP: Record<string, number> = {
"Strongly Disagree": 1,
"Disagree": 2,
"Neutral": 3,
"Agree": 4,
"Strongly Agree": 5
};
function clamp(n: number, a: number, b: number) { return Math.max(a, Math.min(b, n)); }
function avg(xs: number[]) { return xs.length ? xs.reduce((s, x) => s + x, 0) / xs.length : 0; }
function scoreItemLikert(ans: string, reverse?: boolean): number | null {
if (!ans) return null;
const v = LIKERT_MAP[ans];
if (!v) return null;
const vv = reverse ? (6 - v) : v; // reverse score
const pct = ((vv - 1) / 4) * 100; // 1..5 -> 0..100
return clamp(pct, 0, 100);
}
function scoreItemMCQ(ans: string, mcqScores?: Record<string, number>): number | null {
if (!ans || !mcqScores) return null;
const raw = mcqScores[ans];
if (typeof raw !== "number") return null;
return clamp((raw / 5) * 100, 0, 100);
}
export function scoreExam(selected: SelectedQuestion[], answers: AnswerMap): ExamScore {
const sections: Record<SectionKey, SectionScore> = {
A: { rawPct: 0, weighted: 0, flags: [] },
B: { rawPct: 0, weighted: 0, flags: [] },
C: { rawPct: 0, weighted: 0, flags: [] },
D: { rawPct: 0, weighted: 0, flags: [] },
E: { rawPct: 0, weighted: 0, flags: [] },
F: { rawPct: 0, weighted: 0, flags: [] }
};
const perSectionScores: Record<SectionKey, number[]> = { A: [], B: [], C: [], D: [], E: [], F: [] };
let answered = 0;
// validity checks
let attentionFlags = 0;
let inconsistentFlags = 0;
// simple inconsistency: reverse items answered "high" too often
let reverseHighCount = 0;
let reverseCount = 0;
for (const q of selected) {
const a = (answers[q.baseId] ?? "").trim();
if (a) answered++;
let s: number | null = null;
if (q.type === "likert") s = scoreItemLikert(a, q.reverse);
if (q.type === "mcq") s = scoreItemMCQ(a, q.mcqScores);
// text items are scored by AI separately (not included directly here)
if (s !== null) perSectionScores[q.section].push(s);
// attention check: detect prompt contains "attention check"
if (q.section === "A" && q.prompt.toLowerCase().includes("attention check")) {
// correct is Agree
if (a && a !== "Agree") attentionFlags++;
}
if (q.reverse) {
reverseCount++;
const v = LIKERT_MAP[a] ?? 0;
if (v >= 4) reverseHighCount++;
}
}
if (reverseCount >= 10) {
const ratio = reverseHighCount / reverseCount;
if (ratio > 0.6) inconsistentFlags++;
}
for (const sec of Object.keys(sections) as SectionKey[]) {
const raw = Math.round(avg(perSectionScores[sec]));
const def = SECTIONS.find(s => s.key === sec)!;
const weighted = Math.round(raw * def.weight);
const flags: string[] = [];
if (sec === "B" && raw < 60) flags.push("Capability threshold not met (<60).");
if (sec === "F" && raw < 60) flags.push("Functional readiness threshold not met (<60).");
if (raw < 50) flags.push("Section score below 50.");
sections[sec] = { rawPct: isFinite(raw) ? raw : 0, weighted, flags };
}
const overall = Math.round(Object.values(sections).reduce((s, x) => s + x.weighted, 0));
let decision: ExamScore["decision"] = "Do Not Proceed";
const failCritical = sections.B.rawPct < 60 || sections.F.rawPct < 60;
if (overall >= 80 && !failCritical) decision = "Strong Hire";
else if (overall >= 70 && overall <= 79 && !failCritical) decision = "Hire";
else if (overall >= 60 && overall <= 69 && !failCritical) decision = "Proceed with Conditions";
else decision = "Do Not Proceed";
if (overall < 50 || failCritical) decision = "Do Not Proceed";
const completionPct = Math.round((answered / selected.length) * 100);
return {
sections,
overall,
decision,
validity: { completionPct, attentionFlags, inconsistentFlags }
};
}
|