Spaces:
Sleeping
Sleeping
Commit
·
4b29117
1
Parent(s):
6b71f21
Base directory setup
Browse files- app.py +325 -60
- colin-tts.md +1544 -0
- requirements.txt +7 -0
- structured-cv.json +129 -0
app.py
CHANGED
|
@@ -2,69 +2,334 @@ import gradio as gr
|
|
| 2 |
from huggingface_hub import InferenceClient
|
| 3 |
|
| 4 |
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
):
|
| 14 |
-
"""
|
| 15 |
-
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
| 16 |
-
"""
|
| 17 |
-
client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
|
| 18 |
-
|
| 19 |
-
messages = [{"role": "system", "content": system_message}]
|
| 20 |
-
|
| 21 |
-
messages.extend(history)
|
| 22 |
-
|
| 23 |
-
messages.append({"role": "user", "content": message})
|
| 24 |
-
|
| 25 |
-
response = ""
|
| 26 |
-
|
| 27 |
-
for message in client.chat_completion(
|
| 28 |
-
messages,
|
| 29 |
-
max_tokens=max_tokens,
|
| 30 |
-
stream=True,
|
| 31 |
-
temperature=temperature,
|
| 32 |
-
top_p=top_p,
|
| 33 |
-
):
|
| 34 |
-
choices = message.choices
|
| 35 |
-
token = ""
|
| 36 |
-
if len(choices) and choices[0].delta.content:
|
| 37 |
-
token = choices[0].delta.content
|
| 38 |
-
|
| 39 |
-
response += token
|
| 40 |
-
yield response
|
| 41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
)
|
| 62 |
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
|
|
|
|
| 68 |
|
| 69 |
-
|
| 70 |
-
demo.launch()
|
|
|
|
| 2 |
from huggingface_hub import InferenceClient
|
| 3 |
|
| 4 |
|
| 5 |
+
import gradio as gr
|
| 6 |
+
import json
|
| 7 |
+
import os
|
| 8 |
+
import re
|
| 9 |
+
import subprocess
|
| 10 |
+
import uuid
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
from typing import List, Dict, Any
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
+
import llama_cpp
|
| 15 |
+
from huggingface_hub import hf_hub_download
|
| 16 |
+
from qdrant_client import QdrantClient
|
| 17 |
+
from qdrant_client.models import Distance, VectorParams, PointStruct
|
| 18 |
|
| 19 |
+
# =========================
|
| 20 |
+
# CONFIG: FILL THESE IN
|
| 21 |
+
# =========================
|
| 22 |
+
REPO_EMBED = "mixedbread-ai/mxbai-embed-large-v1"
|
| 23 |
+
REPO_LLM = "unsloth/Phi-4-mini-instruct-GGUF"
|
| 24 |
+
REPO_PIPER = "nardocolin/nardocolin-pipertts"
|
| 25 |
+
|
| 26 |
+
EMBED_FILE = "gguf/mxbai-embed-large-v1-f16.gguf"
|
| 27 |
+
LLM_FILE = "Phi-4-mini-instruct.Q8_0.gguf"
|
| 28 |
+
PIPER_ONNX = "high/colin-voice_high.onnx"
|
| 29 |
+
PIPER_JSON = "high/colin-voice_high.onnx.json"
|
| 30 |
+
|
| 31 |
+
EMBED_DIM = 1024
|
| 32 |
+
COLLECTION_NAME = "data"
|
| 33 |
+
|
| 34 |
+
# =========================
|
| 35 |
+
# PATHS / DIRECTORIES
|
| 36 |
+
# =========================
|
| 37 |
+
SPACE_DIR = Path(__file__).parent
|
| 38 |
+
DATA_DIR = SPACE_DIR / "data"
|
| 39 |
+
EMB_DIR = SPACE_DIR / "embeddings"
|
| 40 |
+
AUDIO_DIR = SPACE_DIR / "audio"
|
| 41 |
+
DATA_DIR.mkdir(exist_ok=True)
|
| 42 |
+
EMB_DIR.mkdir(exist_ok=True)
|
| 43 |
+
AUDIO_DIR.mkdir(exist_ok=True)
|
| 44 |
+
|
| 45 |
+
STRUCTURED_JSON = DATA_DIR / "structured-cv.json"
|
| 46 |
+
|
| 47 |
+
# =========================
|
| 48 |
+
# DOWNLOAD WEIGHTS
|
| 49 |
+
# =========================
|
| 50 |
+
embed_path = hf_hub_download(REPO_EMBED, EMBED_FILE)
|
| 51 |
+
llm_path = hf_hub_download(REPO_LLM, LLM_FILE)
|
| 52 |
+
piper_onnx = hf_hub_download(REPO_PIPER, PIPER_ONNX)
|
| 53 |
+
piper_json = hf_hub_download(REPO_PIPER, PIPER_JSON)
|
| 54 |
+
|
| 55 |
+
# =========================
|
| 56 |
+
# LOAD MODELS (CPU)
|
| 57 |
+
# =========================
|
| 58 |
+
embedding_llm = llama_cpp.Llama(
|
| 59 |
+
model_path=embed_path,
|
| 60 |
+
embedding=True,
|
| 61 |
+
verbose=False
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
llm = llama_cpp.Llama(
|
| 65 |
+
model_path=llm_path,
|
| 66 |
+
n_ctx=8192,
|
| 67 |
+
verbose=False
|
| 68 |
)
|
| 69 |
|
| 70 |
+
# =========================
|
| 71 |
+
# QDRANT (LOCAL, FILE BACKEND)
|
| 72 |
+
# =========================
|
| 73 |
+
client = QdrantClient(path=str(EMB_DIR))
|
| 74 |
+
|
| 75 |
+
def qdrant_collection_exists() -> bool:
|
| 76 |
+
try:
|
| 77 |
+
cols = client.get_collections().collections
|
| 78 |
+
return any(c.name == COLLECTION_NAME for c in cols)
|
| 79 |
+
except Exception:
|
| 80 |
+
return False
|
| 81 |
+
|
| 82 |
+
def ensure_collection():
|
| 83 |
+
if qdrant_collection_exists():
|
| 84 |
+
return
|
| 85 |
+
client.create_collection(
|
| 86 |
+
collection_name=COLLECTION_NAME,
|
| 87 |
+
vectors_config=VectorParams(size=EMBED_DIM, distance=Distance.COSINE),
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
# =========================
|
| 91 |
+
# RAG BUILD FROM STRUCTURED JSON
|
| 92 |
+
# =========================
|
| 93 |
+
def _extract_texts_from_structured_json(d: Dict[str, Any]) -> List[str]:
|
| 94 |
+
texts: List[str] = []
|
| 95 |
+
|
| 96 |
+
# summary
|
| 97 |
+
if d.get("summary"):
|
| 98 |
+
texts.append(d["summary"])
|
| 99 |
+
|
| 100 |
+
# professional_focus
|
| 101 |
+
pf = d.get("professional_focus", {})
|
| 102 |
+
for lst_key in ("problem_solving_style", "leadership_and_teamwork"):
|
| 103 |
+
for item in pf.get(lst_key, []) or []:
|
| 104 |
+
texts.append(item)
|
| 105 |
+
|
| 106 |
+
# technical_philosophy
|
| 107 |
+
tp = d.get("technical_philosophy", {})
|
| 108 |
+
if tp.get("title"):
|
| 109 |
+
texts.append(tp["title"])
|
| 110 |
+
for pt in tp.get("points", []) or []:
|
| 111 |
+
texts.append(pt)
|
| 112 |
+
|
| 113 |
+
# education details
|
| 114 |
+
edu = d.get("education", {})
|
| 115 |
+
if edu.get("degree"):
|
| 116 |
+
texts.append(f"{edu.get('institution','')} – {edu['degree']}")
|
| 117 |
+
for det in edu.get("details", []) or []:
|
| 118 |
+
texts.append(det)
|
| 119 |
+
|
| 120 |
+
# projects
|
| 121 |
+
for p in d.get("projects", []) or []:
|
| 122 |
+
if p.get("title"):
|
| 123 |
+
texts.append(p["title"])
|
| 124 |
+
if p.get("organization"):
|
| 125 |
+
texts.append(p["organization"])
|
| 126 |
+
for c in p.get("contributions", []) or []:
|
| 127 |
+
texts.append(c)
|
| 128 |
+
if p.get("key_takeaways"):
|
| 129 |
+
texts.append(p["key_takeaways"])
|
| 130 |
+
if p.get("technical_deep_dive"):
|
| 131 |
+
texts.append(p["technical_deep_dive"])
|
| 132 |
+
|
| 133 |
+
# experience
|
| 134 |
+
for e in d.get("experience", []) or []:
|
| 135 |
+
if e.get("role") and e.get("company"):
|
| 136 |
+
texts.append(f"{e['role']} @ {e['company']}")
|
| 137 |
+
if e.get("description"):
|
| 138 |
+
texts.append(e["description"])
|
| 139 |
+
|
| 140 |
+
# skills (flatten)
|
| 141 |
+
skills = d.get("skills", {})
|
| 142 |
+
for k, v in skills.items():
|
| 143 |
+
if isinstance(v, list):
|
| 144 |
+
for item in v:
|
| 145 |
+
if isinstance(item, dict):
|
| 146 |
+
# spoken_languages entries
|
| 147 |
+
lang = item.get("language")
|
| 148 |
+
prof = item.get("proficiency")
|
| 149 |
+
if lang and prof:
|
| 150 |
+
texts.append(f"{lang} – {prof}")
|
| 151 |
+
else:
|
| 152 |
+
texts.append(str(item))
|
| 153 |
+
|
| 154 |
+
# personal_info (light)
|
| 155 |
+
pi = d.get("personal_info", {})
|
| 156 |
+
for key in ("name", "email", "linkedin", "website"):
|
| 157 |
+
if pi.get(key):
|
| 158 |
+
texts.append(str(pi[key]))
|
| 159 |
+
|
| 160 |
+
# Deduplicate & trim
|
| 161 |
+
final = []
|
| 162 |
+
seen = set()
|
| 163 |
+
for t in texts:
|
| 164 |
+
t = (t or "").strip()
|
| 165 |
+
if not t:
|
| 166 |
+
continue
|
| 167 |
+
if t in seen:
|
| 168 |
+
continue
|
| 169 |
+
seen.add(t)
|
| 170 |
+
final.append(t)
|
| 171 |
+
return final
|
| 172 |
+
|
| 173 |
+
def build_rag_from_structured_json() -> int:
|
| 174 |
+
"""(Re)builds Qdrant from data/structured-cv.json. Returns number of points."""
|
| 175 |
+
if not STRUCTURED_JSON.exists():
|
| 176 |
+
raise FileNotFoundError("structured-cv.json not found in ./data")
|
| 177 |
+
|
| 178 |
+
with open(STRUCTURED_JSON, "r", encoding="utf-8") as f:
|
| 179 |
+
data = json.load(f)
|
| 180 |
+
|
| 181 |
+
texts = _extract_texts_from_structured_json(data)
|
| 182 |
+
if not texts:
|
| 183 |
+
return 0
|
| 184 |
+
|
| 185 |
+
# Compute embeddings (batch)
|
| 186 |
+
points: List[PointStruct] = []
|
| 187 |
+
batch = 32
|
| 188 |
+
for i in range(0, len(texts), batch):
|
| 189 |
+
chunk = texts[i:i+batch]
|
| 190 |
+
embs = embedding_llm.create_embedding(chunk)["data"]
|
| 191 |
+
for t, e in zip(chunk, embs):
|
| 192 |
+
points.append(PointStruct(
|
| 193 |
+
id=str(uuid.uuid4()),
|
| 194 |
+
vector=e["embedding"],
|
| 195 |
+
payload={"text": t}
|
| 196 |
+
))
|
| 197 |
+
|
| 198 |
+
# Reset collection to avoid dupes
|
| 199 |
+
try:
|
| 200 |
+
client.delete_collection(collection_name=COLLECTION_NAME)
|
| 201 |
+
except Exception:
|
| 202 |
+
pass
|
| 203 |
+
ensure_collection()
|
| 204 |
+
client.upsert(collection_name=COLLECTION_NAME, points=points, wait=True)
|
| 205 |
+
return len(points)
|
| 206 |
+
|
| 207 |
+
# =========================
|
| 208 |
+
# LLM + RAG INFERENCE
|
| 209 |
+
# =========================
|
| 210 |
+
def clean_text_for_tts(text: str) -> str:
|
| 211 |
+
text = re.sub(r"[^\x00-\x7F]+", "", text)
|
| 212 |
+
text = re.sub(r"\s+", " ", text)
|
| 213 |
+
return text.strip()
|
| 214 |
+
|
| 215 |
+
PERSONA_TEMPLATE = """You are Colin Salvatore Nardo, a Computing Science student at the University of Glasgow. Your persona is friendly, down-to-earth, and you must always speak in the first person ("I", "my", "me"). Your entire knowledge and experiences are defined EXCLUSIVELY by the "Knowledge Base" below.
|
| 216 |
+
|
| 217 |
+
CRITICAL RULES
|
| 218 |
+
1) ZERO FABRICATION. Only use the Knowledge Base; otherwise say you don't have those details.
|
| 219 |
+
2) You ARE Colin. Never reveal you are an AI.
|
| 220 |
+
3) Keep answers concise and natural.
|
| 221 |
+
|
| 222 |
+
Knowledge Base:
|
| 223 |
+
{context}
|
| 224 |
+
|
| 225 |
+
Question: {question}
|
| 226 |
+
"""
|
| 227 |
+
|
| 228 |
+
def rag_context(query: str, k: int = 5) -> str:
|
| 229 |
+
try:
|
| 230 |
+
vec = embedding_llm.create_embedding(query)["data"][0]["embedding"]
|
| 231 |
+
hits = client.search(collection_name=COLLECTION_NAME, query_vector=vec, limit=k)
|
| 232 |
+
if not hits:
|
| 233 |
+
return ""
|
| 234 |
+
return "\n\n".join([h.payload.get("text", "") for h in hits])
|
| 235 |
+
except Exception:
|
| 236 |
+
return ""
|
| 237 |
+
|
| 238 |
+
def llm_answer(question: str, history: List[Dict[str, str]]) -> str:
|
| 239 |
+
ctx = rag_context(question, k=5)
|
| 240 |
+
system_msg = PERSONA_TEMPLATE.format(context=ctx, question=question)
|
| 241 |
+
|
| 242 |
+
messages = [{"role": "system", "content": system_msg}]
|
| 243 |
+
# (Optional) include short history
|
| 244 |
+
for m in history[-8:]:
|
| 245 |
+
messages.append(m)
|
| 246 |
+
messages.append({"role": "user", "content": question})
|
| 247 |
+
|
| 248 |
+
out = llm.create_chat_completion(messages=messages, stream=False)
|
| 249 |
+
return out["choices"][0]["message"]["content"].strip()
|
| 250 |
+
|
| 251 |
+
def synthesize_tts(text: str) -> str | None:
|
| 252 |
+
text = clean_text_for_tts(text)
|
| 253 |
+
wav_path = AUDIO_DIR / f"resp_{uuid.uuid4().hex}.wav"
|
| 254 |
+
cmd = [
|
| 255 |
+
"piper",
|
| 256 |
+
"--model", piper_onnx,
|
| 257 |
+
"--config", piper_json,
|
| 258 |
+
"--output_file", str(wav_path)
|
| 259 |
+
]
|
| 260 |
+
try:
|
| 261 |
+
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, text=True)
|
| 262 |
+
proc.communicate(text + "\n", timeout=60)
|
| 263 |
+
if proc.returncode == 0 and wav_path.exists():
|
| 264 |
+
return str(wav_path)
|
| 265 |
+
except Exception:
|
| 266 |
+
pass
|
| 267 |
+
return None
|
| 268 |
+
|
| 269 |
+
# =========================
|
| 270 |
+
# BOOTSTRAP: ensure RAG exists (build once)
|
| 271 |
+
# =========================
|
| 272 |
+
try:
|
| 273 |
+
if not qdrant_collection_exists():
|
| 274 |
+
n = build_rag_from_structured_json()
|
| 275 |
+
print(f"[RAG] Built collection with {n} chunks.")
|
| 276 |
+
else:
|
| 277 |
+
print("[RAG] Existing collection found; skipping rebuild.")
|
| 278 |
+
except Exception as e:
|
| 279 |
+
print(f"[RAG] Skipped build: {e}")
|
| 280 |
+
|
| 281 |
+
# =========================
|
| 282 |
+
# GRADIO UI
|
| 283 |
+
# =========================
|
| 284 |
+
with gr.Blocks(title="Colin-AI (CPU) — Local LLM + RAG + TTS") as demo:
|
| 285 |
+
gr.Markdown("### Colin-AI — CPU-only demo (phi-4-mini + Qdrant RAG + Piper TTS)")
|
| 286 |
+
|
| 287 |
+
with gr.Row():
|
| 288 |
+
chat = gr.Chatbot(height=360)
|
| 289 |
+
with gr.Row():
|
| 290 |
+
q = gr.Textbox(label="Ask Colin", placeholder="Ask something…", scale=4)
|
| 291 |
+
send = gr.Button("Send", scale=1)
|
| 292 |
+
with gr.Row():
|
| 293 |
+
tts_toggle = gr.Checkbox(value=True, label="Speak reply (Piper)")
|
| 294 |
+
audio_out = gr.Audio(label="TTS", type="filepath")
|
| 295 |
+
|
| 296 |
+
state = gr.State([])
|
| 297 |
+
last_answer = gr.State("")
|
| 298 |
+
|
| 299 |
+
def respond(user_msg, history):
|
| 300 |
+
if not user_msg or not user_msg.strip():
|
| 301 |
+
return history, None, history
|
| 302 |
+
ans = llm_answer(user_msg, history)
|
| 303 |
+
history = history + [{"role": "user", "content": user_msg}, {"role": "assistant", "content": ans}]
|
| 304 |
+
pairs = []
|
| 305 |
+
for i in range(0, len(history), 2):
|
| 306 |
+
u = history[i]["content"] if i < len(history) else ""
|
| 307 |
+
a = history[i + 1]["content"] if i + 1 < len(history) else ""
|
| 308 |
+
pairs.append((u, a))
|
| 309 |
+
return pairs, ans, history
|
| 310 |
+
|
| 311 |
+
def maybe_tts(answer_text, tts_on):
|
| 312 |
+
if not tts_on or not answer_text:
|
| 313 |
+
return None
|
| 314 |
+
return synthesize_tts(answer_text)
|
| 315 |
+
|
| 316 |
+
send.click(respond, [q, state], [chat, last_answer, state]) \
|
| 317 |
+
.then(maybe_tts, [last_answer, tts_toggle], [audio_out])
|
| 318 |
+
|
| 319 |
+
q.submit(respond, [q, state], [chat, last_answer, state]) \
|
| 320 |
+
.then(maybe_tts, [last_answer, tts_toggle], [audio_out])
|
| 321 |
+
|
| 322 |
+
gr.Markdown("---")
|
| 323 |
+
rebuild_btn = gr.Button("Build / Refresh RAG from structured-cv.json")
|
| 324 |
+
rebuild_log = gr.Markdown()
|
| 325 |
+
|
| 326 |
+
def rebuild():
|
| 327 |
+
try:
|
| 328 |
+
n = build_rag_from_structured_json()
|
| 329 |
+
return f"✅ RAG rebuilt with {n} chunks."
|
| 330 |
+
except Exception as e:
|
| 331 |
+
return f"❌ RAG rebuild failed: {e}"
|
| 332 |
|
| 333 |
+
rebuild_btn.click(fn=rebuild, inputs=None, outputs=rebuild_log)
|
| 334 |
|
| 335 |
+
demo.launch()
|
|
|
colin-tts.md
ADDED
|
@@ -0,0 +1,1544 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<file_map>
|
| 2 |
+
C:/Users/User/Desktop/colin-tts
|
| 3 |
+
├── backend
|
| 4 |
+
│ ├── archive
|
| 5 |
+
│ │ ├── build_index.py
|
| 6 |
+
│ │ ├── data_creation.py
|
| 7 |
+
│ │ ├── data.json
|
| 8 |
+
│ │ └── local_testing.py
|
| 9 |
+
│ ├── analyze_test_results.py
|
| 10 |
+
│ ├── auth.py
|
| 11 |
+
│ ├── database.py
|
| 12 |
+
│ ├── llm_service.py
|
| 13 |
+
│ ├── main.py
|
| 14 |
+
│ ├── models.py
|
| 15 |
+
│ ├── structured_build_index.py
|
| 16 |
+
│ └── utils.py
|
| 17 |
+
└── run.txt
|
| 18 |
+
|
| 19 |
+
</file_map>
|
| 20 |
+
|
| 21 |
+
<file_contents>
|
| 22 |
+
File: C:/Users/User/Desktop/colin-tts/backend/analyze_test_results.py
|
| 23 |
+
```python
|
| 24 |
+
#!/usr/bin/env python
|
| 25 |
+
"""
|
| 26 |
+
Model Testing Data Analyzer
|
| 27 |
+
|
| 28 |
+
This script provides comprehensive analysis tools for the model testing results.
|
| 29 |
+
It can generate detailed reports, comparisons, and visualizations from the test data.
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
import sqlite3
|
| 33 |
+
import pandas as pd
|
| 34 |
+
import matplotlib.pyplot as plt
|
| 35 |
+
import seaborn as sns
|
| 36 |
+
from pathlib import Path
|
| 37 |
+
from typing import Dict, List, Optional
|
| 38 |
+
import argparse
|
| 39 |
+
from rich.console import Console
|
| 40 |
+
from rich.table import Table
|
| 41 |
+
|
| 42 |
+
BASE_DIR = Path(__file__).parent.parent
|
| 43 |
+
RESULTS_DIR = BASE_DIR / "test_results"
|
| 44 |
+
console = Console()
|
| 45 |
+
|
| 46 |
+
class ModelTestAnalyzer:
|
| 47 |
+
"""Analyze model testing results"""
|
| 48 |
+
|
| 49 |
+
def __init__(self, db_path: Optional[Path] = None):
|
| 50 |
+
self.db_path = db_path or RESULTS_DIR / "model_tests.db"
|
| 51 |
+
self.df = None
|
| 52 |
+
|
| 53 |
+
def load_data(self, session_id: Optional[str] = None) -> pd.DataFrame:
|
| 54 |
+
"""Load data from SQLite database"""
|
| 55 |
+
if not self.db_path.exists():
|
| 56 |
+
console.print(f"[red]Database not found: {self.db_path}[/red]")
|
| 57 |
+
return pd.DataFrame()
|
| 58 |
+
|
| 59 |
+
query = "SELECT * FROM test_results"
|
| 60 |
+
params = []
|
| 61 |
+
|
| 62 |
+
if session_id:
|
| 63 |
+
query += " WHERE session_id = ?"
|
| 64 |
+
params.append(session_id)
|
| 65 |
+
|
| 66 |
+
query += " ORDER BY timestamp"
|
| 67 |
+
|
| 68 |
+
with sqlite3.connect(self.db_path) as conn:
|
| 69 |
+
self.df = pd.read_sql_query(query, conn, params=params)
|
| 70 |
+
|
| 71 |
+
# Convert timestamp to datetime
|
| 72 |
+
self.df['timestamp'] = pd.to_datetime(self.df['timestamp'])
|
| 73 |
+
|
| 74 |
+
console.print(f"[green]Loaded {len(self.df)} test results[/green]")
|
| 75 |
+
return self.df
|
| 76 |
+
|
| 77 |
+
def get_sessions(self) -> List[str]:
|
| 78 |
+
"""Get all available session IDs"""
|
| 79 |
+
if not self.db_path.exists():
|
| 80 |
+
return []
|
| 81 |
+
|
| 82 |
+
with sqlite3.connect(self.db_path) as conn:
|
| 83 |
+
cursor = conn.execute("SELECT DISTINCT session_id FROM test_results ORDER BY timestamp DESC")
|
| 84 |
+
return [row[0] for row in cursor.fetchall()]
|
| 85 |
+
|
| 86 |
+
def model_performance_summary(self) -> pd.DataFrame:
|
| 87 |
+
"""Generate model performance summary"""
|
| 88 |
+
if self.df is None or self.df.empty:
|
| 89 |
+
return pd.DataFrame()
|
| 90 |
+
|
| 91 |
+
# Filter successful tests only
|
| 92 |
+
successful = self.df[self.df['error_occurred'] == False]
|
| 93 |
+
|
| 94 |
+
summary = successful.groupby('model_name').agg({
|
| 95 |
+
'model_size_mb': 'first',
|
| 96 |
+
'inference_time': ['mean', 'std', 'min', 'max'],
|
| 97 |
+
'tokens_per_second': ['mean', 'std', 'min', 'max'],
|
| 98 |
+
'memory_increase_mb': ['mean', 'std'],
|
| 99 |
+
'cpu_usage_avg': ['mean', 'std'],
|
| 100 |
+
'follows_persona': ['mean', 'count'],
|
| 101 |
+
'response_relevance_score': ['mean', 'std'],
|
| 102 |
+
'error_occurred': 'sum'
|
| 103 |
+
}).round(3)
|
| 104 |
+
|
| 105 |
+
# Flatten column names
|
| 106 |
+
summary.columns = ['_'.join(col).strip('_') for col in summary.columns]
|
| 107 |
+
|
| 108 |
+
# Calculate success rate
|
| 109 |
+
total_tests = self.df.groupby('model_name').size()
|
| 110 |
+
summary['success_rate'] = (1 - summary['error_occurred_sum'] / total_tests) * 100
|
| 111 |
+
|
| 112 |
+
return summary
|
| 113 |
+
|
| 114 |
+
def question_category_analysis(self) -> pd.DataFrame:
|
| 115 |
+
"""Analyze performance by question category"""
|
| 116 |
+
if self.df is None or self.df.empty:
|
| 117 |
+
return pd.DataFrame()
|
| 118 |
+
|
| 119 |
+
# We need to categorize questions based on content
|
| 120 |
+
def categorize_question(question):
|
| 121 |
+
question_lower = question.lower()
|
| 122 |
+
if any(greeting in question_lower for greeting in ['hi', 'hello', 'how are you', 'good morning']):
|
| 123 |
+
return 'greeting'
|
| 124 |
+
elif any(tech in question_lower for tech in ['backend', 'react', 'programming', 'database', 'project']):
|
| 125 |
+
return 'technical'
|
| 126 |
+
elif any(contact in question_lower for contact in ['contact', 'email', 'portfolio', 'available']):
|
| 127 |
+
return 'contact'
|
| 128 |
+
elif any(personal in question_lower for personal in ['favourite', 'favorite', 'free time', 'from']):
|
| 129 |
+
return 'personal'
|
| 130 |
+
else:
|
| 131 |
+
return 'edge_case'
|
| 132 |
+
|
| 133 |
+
self.df['question_category'] = self.df['question'].apply(categorize_question)
|
| 134 |
+
|
| 135 |
+
successful = self.df[self.df['error_occurred'] == False]
|
| 136 |
+
|
| 137 |
+
category_analysis = successful.groupby(['model_name', 'question_category']).agg({
|
| 138 |
+
'inference_time': 'mean',
|
| 139 |
+
'tokens_per_second': 'mean',
|
| 140 |
+
'follows_persona': 'mean',
|
| 141 |
+
'response_relevance_score': 'mean'
|
| 142 |
+
}).round(3)
|
| 143 |
+
|
| 144 |
+
return category_analysis
|
| 145 |
+
|
| 146 |
+
def generate_visualizations(self, output_dir: Optional[Path] = None):
|
| 147 |
+
"""Generate visualization plots"""
|
| 148 |
+
if self.df is None or self.df.empty:
|
| 149 |
+
console.print("[red]No data to visualize[/red]")
|
| 150 |
+
return
|
| 151 |
+
|
| 152 |
+
output_dir = output_dir or RESULTS_DIR / "visualizations"
|
| 153 |
+
output_dir.mkdir(exist_ok=True)
|
| 154 |
+
|
| 155 |
+
# Set style
|
| 156 |
+
plt.style.use('seaborn-v0_8')
|
| 157 |
+
sns.set_palette("husl")
|
| 158 |
+
|
| 159 |
+
successful = self.df[self.df['error_occurred'] == False]
|
| 160 |
+
|
| 161 |
+
# 1. Performance comparison
|
| 162 |
+
fig, axes = plt.subplots(2, 2, figsize=(15, 10))
|
| 163 |
+
fig.suptitle('Model Performance Comparison', fontsize=16)
|
| 164 |
+
|
| 165 |
+
# Inference time
|
| 166 |
+
sns.boxplot(data=successful, x='model_name', y='inference_time', ax=axes[0,0])
|
| 167 |
+
axes[0,0].set_title('Inference Time Distribution')
|
| 168 |
+
axes[0,0].tick_params(axis='x', rotation=45)
|
| 169 |
+
|
| 170 |
+
# Tokens per second
|
| 171 |
+
sns.boxplot(data=successful, x='model_name', y='tokens_per_second', ax=axes[0,1])
|
| 172 |
+
axes[0,1].set_title('Tokens per Second Distribution')
|
| 173 |
+
axes[0,1].tick_params(axis='x', rotation=45)
|
| 174 |
+
|
| 175 |
+
# Memory usage
|
| 176 |
+
sns.boxplot(data=successful, x='model_name', y='memory_increase_mb', ax=axes[1,0])
|
| 177 |
+
axes[1,0].set_title('Memory Usage Distribution')
|
| 178 |
+
axes[1,0].tick_params(axis='x', rotation=45)
|
| 179 |
+
|
| 180 |
+
# CPU usage
|
| 181 |
+
sns.boxplot(data=successful, x='model_name', y='cpu_usage_avg', ax=axes[1,1])
|
| 182 |
+
axes[1,1].set_title('CPU Usage Distribution')
|
| 183 |
+
axes[1,1].tick_params(axis='x', rotation=45)
|
| 184 |
+
|
| 185 |
+
plt.tight_layout()
|
| 186 |
+
plt.savefig(output_dir / 'performance_comparison.png', dpi=300, bbox_inches='tight')
|
| 187 |
+
plt.close()
|
| 188 |
+
|
| 189 |
+
# 2. Model size vs performance
|
| 190 |
+
model_summary = self.model_performance_summary()
|
| 191 |
+
if not model_summary.empty:
|
| 192 |
+
fig, axes = plt.subplots(1, 2, figsize=(12, 5))
|
| 193 |
+
|
| 194 |
+
# Size vs Speed
|
| 195 |
+
axes[0].scatter(model_summary['model_size_mb_first'],
|
| 196 |
+
model_summary['inference_time_mean'])
|
| 197 |
+
axes[0].set_xlabel('Model Size (MB)')
|
| 198 |
+
axes[0].set_ylabel('Average Inference Time (s)')
|
| 199 |
+
axes[0].set_title('Model Size vs Inference Speed')
|
| 200 |
+
|
| 201 |
+
# Size vs Efficiency
|
| 202 |
+
axes[1].scatter(model_summary['model_size_mb_first'],
|
| 203 |
+
model_summary['tokens_per_second_mean'])
|
| 204 |
+
axes[1].set_xlabel('Model Size (MB)')
|
| 205 |
+
axes[1].set_ylabel('Average Tokens per Second')
|
| 206 |
+
axes[1].set_title('Model Size vs Token Generation Efficiency')
|
| 207 |
+
|
| 208 |
+
plt.tight_layout()
|
| 209 |
+
plt.savefig(output_dir / 'size_vs_performance.png', dpi=300, bbox_inches='tight')
|
| 210 |
+
plt.close()
|
| 211 |
+
|
| 212 |
+
# 3. Response quality metrics
|
| 213 |
+
quality_metrics = ['follows_persona', 'response_relevance_score']
|
| 214 |
+
fig, axes = plt.subplots(1, len(quality_metrics), figsize=(12, 5))
|
| 215 |
+
|
| 216 |
+
for i, metric in enumerate(quality_metrics):
|
| 217 |
+
sns.barplot(data=successful, x='model_name', y=metric, ax=axes[i])
|
| 218 |
+
axes[i].set_title(f'{metric.replace("_", " ").title()}')
|
| 219 |
+
axes[i].tick_params(axis='x', rotation=45)
|
| 220 |
+
|
| 221 |
+
plt.tight_layout()
|
| 222 |
+
plt.savefig(output_dir / 'quality_metrics.png', dpi=300, bbox_inches='tight')
|
| 223 |
+
plt.close()
|
| 224 |
+
|
| 225 |
+
console.print(f"[green]Visualizations saved to {output_dir}[/green]")
|
| 226 |
+
|
| 227 |
+
def export_detailed_report(self, output_file: Optional[Path] = None):
|
| 228 |
+
"""Export detailed analysis report"""
|
| 229 |
+
if self.df is None or self.df.empty:
|
| 230 |
+
console.print("[red]No data to export[/red]")
|
| 231 |
+
return
|
| 232 |
+
|
| 233 |
+
output_file = output_file or RESULTS_DIR / f"detailed_analysis_{pd.Timestamp.now().strftime('%Y%m%d_%H%M%S')}.xlsx"
|
| 234 |
+
|
| 235 |
+
with pd.ExcelWriter(output_file, engine='openpyxl') as writer:
|
| 236 |
+
# Raw data
|
| 237 |
+
self.df.to_excel(writer, sheet_name='Raw Data', index=False)
|
| 238 |
+
|
| 239 |
+
# Model summary
|
| 240 |
+
model_summary = self.model_performance_summary()
|
| 241 |
+
if not model_summary.empty:
|
| 242 |
+
model_summary.to_excel(writer, sheet_name='Model Summary')
|
| 243 |
+
|
| 244 |
+
# Category analysis
|
| 245 |
+
category_analysis = self.question_category_analysis()
|
| 246 |
+
if not category_analysis.empty:
|
| 247 |
+
category_analysis.to_excel(writer, sheet_name='Category Analysis')
|
| 248 |
+
|
| 249 |
+
# Error analysis
|
| 250 |
+
errors = self.df[self.df['error_occurred'] == True]
|
| 251 |
+
if not errors.empty:
|
| 252 |
+
error_summary = errors.groupby('model_name')['error_message'].value_counts()
|
| 253 |
+
error_summary.to_excel(writer, sheet_name='Error Analysis')
|
| 254 |
+
|
| 255 |
+
console.print(f"[green]Detailed report exported to {output_file}[/green]")
|
| 256 |
+
|
| 257 |
+
def print_summary_report(self):
|
| 258 |
+
"""Print a summary report to console"""
|
| 259 |
+
if self.df is None or self.df.empty:
|
| 260 |
+
console.print("[red]No data to analyze[/red]")
|
| 261 |
+
return
|
| 262 |
+
|
| 263 |
+
console.print("\n[bold blue]═══ MODEL TESTING ANALYSIS REPORT ═══[/bold blue]")
|
| 264 |
+
|
| 265 |
+
# Basic statistics
|
| 266 |
+
total_tests = len(self.df)
|
| 267 |
+
successful_tests = len(self.df[self.df['error_occurred'] == False])
|
| 268 |
+
unique_models = self.df['model_name'].nunique()
|
| 269 |
+
|
| 270 |
+
console.print(f"\n[bold]Overview:[/bold]")
|
| 271 |
+
console.print(f" Total tests: {total_tests}")
|
| 272 |
+
console.print(f" Successful tests: {successful_tests} ({successful_tests/total_tests*100:.1f}%)")
|
| 273 |
+
console.print(f" Models tested: {unique_models}")
|
| 274 |
+
|
| 275 |
+
# Model performance summary
|
| 276 |
+
model_summary = self.model_performance_summary()
|
| 277 |
+
if not model_summary.empty:
|
| 278 |
+
console.print(f"\n[bold]Model Performance Summary:[/bold]")
|
| 279 |
+
|
| 280 |
+
table = Table()
|
| 281 |
+
table.add_column("Model", style="cyan")
|
| 282 |
+
table.add_column("Size (MB)", justify="right")
|
| 283 |
+
table.add_column("Avg Time (s)", justify="right", style="green")
|
| 284 |
+
table.add_column("Avg Tokens/sec", justify="right", style="green")
|
| 285 |
+
table.add_column("Success Rate (%)", justify="right", style="yellow")
|
| 286 |
+
table.add_column("Persona Score", justify="right", style="magenta")
|
| 287 |
+
|
| 288 |
+
for model_name in model_summary.index:
|
| 289 |
+
row = model_summary.loc[model_name]
|
| 290 |
+
table.add_row(
|
| 291 |
+
model_name,
|
| 292 |
+
f"{row['model_size_mb_first']:.1f}",
|
| 293 |
+
f"{row['inference_time_mean']:.2f}",
|
| 294 |
+
f"{row['tokens_per_second_mean']:.1f}",
|
| 295 |
+
f"{row['success_rate']:.1f}",
|
| 296 |
+
f"{row['follows_persona_mean']*100:.1f}%"
|
| 297 |
+
)
|
| 298 |
+
|
| 299 |
+
console.print(table)
|
| 300 |
+
|
| 301 |
+
# Best performers
|
| 302 |
+
if not model_summary.empty:
|
| 303 |
+
console.print(f"\n[bold green]🏆 Best Performers:[/bold green]")
|
| 304 |
+
fastest = model_summary['inference_time_mean'].idxmin()
|
| 305 |
+
most_efficient = model_summary['tokens_per_second_mean'].idxmax()
|
| 306 |
+
best_persona = model_summary['follows_persona_mean'].idxmax()
|
| 307 |
+
|
| 308 |
+
console.print(f" Fastest: [bold]{fastest}[/bold] ({model_summary.loc[fastest, 'inference_time_mean']:.2f}s)")
|
| 309 |
+
console.print(f" Most Efficient: [bold]{most_efficient}[/bold] ({model_summary.loc[most_efficient, 'tokens_per_second_mean']:.1f} tokens/sec)")
|
| 310 |
+
console.print(f" Best Persona: [bold]{best_persona}[/bold] ({model_summary.loc[best_persona, 'follows_persona_mean']*100:.1f}%)")
|
| 311 |
+
|
| 312 |
+
def main():
|
| 313 |
+
parser = argparse.ArgumentParser(description="Analyze model testing results")
|
| 314 |
+
parser.add_argument("--session", help="Specific session ID to analyze")
|
| 315 |
+
parser.add_argument("--list-sessions", action="store_true", help="List all available sessions")
|
| 316 |
+
parser.add_argument("--visualize", action="store_true", help="Generate visualizations")
|
| 317 |
+
parser.add_argument("--export", action="store_true", help="Export detailed Excel report")
|
| 318 |
+
parser.add_argument("--db", help="Path to SQLite database file")
|
| 319 |
+
|
| 320 |
+
args = parser.parse_args()
|
| 321 |
+
|
| 322 |
+
analyzer = ModelTestAnalyzer(Path(args.db) if args.db else None)
|
| 323 |
+
|
| 324 |
+
if args.list_sessions:
|
| 325 |
+
sessions = analyzer.get_sessions()
|
| 326 |
+
console.print("[bold]Available sessions:[/bold]")
|
| 327 |
+
for session in sessions:
|
| 328 |
+
console.print(f" {session}")
|
| 329 |
+
return
|
| 330 |
+
|
| 331 |
+
# Load data
|
| 332 |
+
analyzer.load_data(args.session)
|
| 333 |
+
|
| 334 |
+
if analyzer.df is None or analyzer.df.empty:
|
| 335 |
+
console.print("[red]No data found[/red]")
|
| 336 |
+
return
|
| 337 |
+
|
| 338 |
+
# Print summary report
|
| 339 |
+
analyzer.print_summary_report()
|
| 340 |
+
|
| 341 |
+
# Generate visualizations if requested
|
| 342 |
+
if args.visualize:
|
| 343 |
+
analyzer.generate_visualizations()
|
| 344 |
+
|
| 345 |
+
# Export detailed report if requested
|
| 346 |
+
if args.export:
|
| 347 |
+
analyzer.export_detailed_report()
|
| 348 |
+
|
| 349 |
+
if __name__ == "__main__":
|
| 350 |
+
main()
|
| 351 |
+
|
| 352 |
+
```
|
| 353 |
+
|
| 354 |
+
File: C:/Users/User/Desktop/colin-tts/backend/main.py
|
| 355 |
+
```python
|
| 356 |
+
from fastapi import FastAPI, Request, HTTPException, Depends, BackgroundTasks
|
| 357 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 358 |
+
from fastapi.responses import Response
|
| 359 |
+
from sqlalchemy.orm import Session
|
| 360 |
+
from typing import List, Optional
|
| 361 |
+
from datetime import datetime
|
| 362 |
+
import csv
|
| 363 |
+
from io import StringIO
|
| 364 |
+
import uuid
|
| 365 |
+
from collections import defaultdict
|
| 366 |
+
from backend.database import get_db, engine
|
| 367 |
+
from backend.models import Base, Conversation
|
| 368 |
+
import os
|
| 369 |
+
from pydantic import BaseModel, Field
|
| 370 |
+
import base64
|
| 371 |
+
from backend.llm_service import get_answer, cleanup_file
|
| 372 |
+
from backend.auth import verify_admin_credentials
|
| 373 |
+
|
| 374 |
+
# Store conversation histories in memory {session_id: [messages]}
|
| 375 |
+
conversation_histories = defaultdict(list)
|
| 376 |
+
MAX_HISTORY_LENGTH = 10 # Keep last 5 exchanges (10 messages)
|
| 377 |
+
|
| 378 |
+
# Create database tables
|
| 379 |
+
Base.metadata.create_all(bind=engine)
|
| 380 |
+
|
| 381 |
+
app = FastAPI()
|
| 382 |
+
|
| 383 |
+
# --- IMPORTANT: UPDATED CORS MIDDLEWARE ---
|
| 384 |
+
# This allows your new Svelte/Vercel site to make requests to the backend.
|
| 385 |
+
app.add_middleware(
|
| 386 |
+
CORSMiddleware,
|
| 387 |
+
allow_origins=[
|
| 388 |
+
"http://localhost:5173", # SvelteKit default dev port
|
| 389 |
+
"https://portfolio-eight-taupe-21.vercel.app", # IMPORTANT: Replace with your actual Vercel deployment URL
|
| 390 |
+
"https://nardocol.in", # Your final custom domain for the portfolio
|
| 391 |
+
"https://www.nardocol.in",
|
| 392 |
+
],
|
| 393 |
+
allow_credentials=True,
|
| 394 |
+
allow_methods=["*"],
|
| 395 |
+
allow_headers=["*", "X-Session-ID"],
|
| 396 |
+
expose_headers=["X-Session-ID"],
|
| 397 |
+
)
|
| 398 |
+
|
| 399 |
+
class QuestionRequest(BaseModel):
|
| 400 |
+
question: str = Field(..., min_length=1, example="What is your favorite programming language?")
|
| 401 |
+
|
| 402 |
+
class ConversationResponse(BaseModel):
|
| 403 |
+
id: int
|
| 404 |
+
timestamp: str
|
| 405 |
+
user_message: str
|
| 406 |
+
ai_response: str
|
| 407 |
+
tts_reference: Optional[str] = None
|
| 408 |
+
|
| 409 |
+
class ConversationCountResponse(BaseModel):
|
| 410 |
+
total: int
|
| 411 |
+
|
| 412 |
+
# --- THIS IS THE ENDPOINT YOUR SVELTE APP WILL NOW USE ---
|
| 413 |
+
@app.post("/ask_tts")
|
| 414 |
+
async def ask_question_tts(payload: QuestionRequest, request: Request, response: Response, background_tasks: BackgroundTasks, db: Session = Depends(get_db)):
|
| 415 |
+
"""Ask a question and get both text answer and WAV audio as base64."""
|
| 416 |
+
question = payload.question.strip()
|
| 417 |
+
if not question:
|
| 418 |
+
raise HTTPException(status_code=400, detail="Question cannot be empty")
|
| 419 |
+
|
| 420 |
+
session_id = request.headers.get("X-Session-ID")
|
| 421 |
+
is_new_session = False
|
| 422 |
+
if not session_id or session_id not in conversation_histories:
|
| 423 |
+
session_id = str(uuid.uuid4())
|
| 424 |
+
is_new_session = True
|
| 425 |
+
|
| 426 |
+
history = conversation_histories[session_id]
|
| 427 |
+
answer_text, audio_path = await get_answer(question, history)
|
| 428 |
+
|
| 429 |
+
if not audio_path:
|
| 430 |
+
if is_new_session and session_id in conversation_histories:
|
| 431 |
+
del conversation_histories[session_id]
|
| 432 |
+
raise HTTPException(status_code=500, detail="Failed to generate audio")
|
| 433 |
+
|
| 434 |
+
try:
|
| 435 |
+
with open(audio_path, "rb") as f:
|
| 436 |
+
audio_base64 = base64.b64encode(f.read()).decode("utf-8")
|
| 437 |
+
|
| 438 |
+
file_size = os.path.getsize(audio_path)
|
| 439 |
+
background_tasks.add_task(cleanup_file, audio_path)
|
| 440 |
+
|
| 441 |
+
history.append({"role": "user", "content": question})
|
| 442 |
+
history.append({"role": "assistant", "content": answer_text})
|
| 443 |
+
|
| 444 |
+
if len(history) > MAX_HISTORY_LENGTH:
|
| 445 |
+
conversation_histories[session_id] = history[-MAX_HISTORY_LENGTH:]
|
| 446 |
+
else:
|
| 447 |
+
conversation_histories[session_id] = history
|
| 448 |
+
|
| 449 |
+
try:
|
| 450 |
+
new_conversation = Conversation(
|
| 451 |
+
user_message=question,
|
| 452 |
+
ai_response=answer_text
|
| 453 |
+
)
|
| 454 |
+
db.add(new_conversation)
|
| 455 |
+
db.commit()
|
| 456 |
+
db.refresh(new_conversation)
|
| 457 |
+
except Exception as db_error:
|
| 458 |
+
print(f"[ERROR] Failed to log conversation to database: {str(db_error)}")
|
| 459 |
+
db.rollback()
|
| 460 |
+
|
| 461 |
+
response.headers["X-Session-ID"] = session_id
|
| 462 |
+
|
| 463 |
+
return {
|
| 464 |
+
"answer": answer_text,
|
| 465 |
+
"audio": audio_base64,
|
| 466 |
+
"size": file_size
|
| 467 |
+
}
|
| 468 |
+
except Exception as e:
|
| 469 |
+
print(f"[ERROR] Error in /ask_tts endpoint: {str(e)}")
|
| 470 |
+
background_tasks.add_task(cleanup_file, audio_path)
|
| 471 |
+
raise HTTPException(status_code=500, detail=f"Error processing audio: {str(e)}")
|
| 472 |
+
|
| 473 |
+
|
| 474 |
+
@app.get("/admin/conversations/count", response_model=ConversationCountResponse)
|
| 475 |
+
async def get_conversations_count(
|
| 476 |
+
start_date: Optional[str] = None,
|
| 477 |
+
end_date: Optional[str] = None,
|
| 478 |
+
db: Session = Depends(get_db),
|
| 479 |
+
username: str = Depends(verify_admin_credentials)
|
| 480 |
+
):
|
| 481 |
+
"""Get the total count of past conversations with optional date filtering."""
|
| 482 |
+
query = db.query(Conversation)
|
| 483 |
+
|
| 484 |
+
# Apply date filters if provided
|
| 485 |
+
if start_date:
|
| 486 |
+
try:
|
| 487 |
+
start_datetime = datetime.fromisoformat(start_date)
|
| 488 |
+
query = query.filter(Conversation.timestamp >= start_datetime)
|
| 489 |
+
except ValueError:
|
| 490 |
+
raise HTTPException(status_code=400, detail="Invalid start_date format. Use ISO format (YYYY-MM-DDTHH:MM:SS)")
|
| 491 |
+
|
| 492 |
+
if end_date:
|
| 493 |
+
try:
|
| 494 |
+
end_datetime = datetime.fromisoformat(end_date)
|
| 495 |
+
query = query.filter(Conversation.timestamp <= end_datetime)
|
| 496 |
+
except ValueError:
|
| 497 |
+
raise HTTPException(status_code=400, detail="Invalid end_date format. Use ISO format (YYYY-MM-DDTHH:MM:SS)")
|
| 498 |
+
|
| 499 |
+
count = query.count()
|
| 500 |
+
return {"total": count}
|
| 501 |
+
|
| 502 |
+
|
| 503 |
+
@app.get("/admin/conversations", response_model=List[ConversationResponse])
|
| 504 |
+
async def get_conversations(
|
| 505 |
+
skip: int = 0,
|
| 506 |
+
limit: int = 100,
|
| 507 |
+
start_date: Optional[str] = None,
|
| 508 |
+
end_date: Optional[str] = None,
|
| 509 |
+
db: Session = Depends(get_db),
|
| 510 |
+
username: str = Depends(verify_admin_credentials)
|
| 511 |
+
):
|
| 512 |
+
"""Get a list of past conversations with optional date filtering."""
|
| 513 |
+
query = db.query(Conversation)
|
| 514 |
+
|
| 515 |
+
# Apply date filters if provided
|
| 516 |
+
if start_date:
|
| 517 |
+
try:
|
| 518 |
+
start_datetime = datetime.fromisoformat(start_date)
|
| 519 |
+
query = query.filter(Conversation.timestamp >= start_datetime)
|
| 520 |
+
except ValueError:
|
| 521 |
+
raise HTTPException(status_code=400, detail="Invalid start_date format. Use ISO format (YYYY-MM-DDTHH:MM:SS)")
|
| 522 |
+
|
| 523 |
+
if end_date:
|
| 524 |
+
try:
|
| 525 |
+
end_datetime = datetime.fromisoformat(end_date)
|
| 526 |
+
query = query.filter(Conversation.timestamp <= end_datetime)
|
| 527 |
+
except ValueError:
|
| 528 |
+
raise HTTPException(status_code=400, detail="Invalid end_date format. Use ISO format (YYYY-MM-DDTHH:MM:SS)")
|
| 529 |
+
|
| 530 |
+
# Apply pagination and return results
|
| 531 |
+
conversations = query.order_by(Conversation.timestamp.desc()).offset(skip).limit(limit).all()
|
| 532 |
+
|
| 533 |
+
return [
|
| 534 |
+
ConversationResponse(
|
| 535 |
+
id=conv.id,
|
| 536 |
+
timestamp=conv.timestamp.isoformat(),
|
| 537 |
+
user_message=conv.user_message,
|
| 538 |
+
ai_response=conv.ai_response,
|
| 539 |
+
tts_reference=conv.tts_reference
|
| 540 |
+
)
|
| 541 |
+
for conv in conversations
|
| 542 |
+
]
|
| 543 |
+
|
| 544 |
+
@app.get("/admin/conversations/export")
|
| 545 |
+
async def export_conversations(
|
| 546 |
+
start_date: Optional[str] = None,
|
| 547 |
+
end_date: Optional[str] = None,
|
| 548 |
+
db: Session = Depends(get_db),
|
| 549 |
+
username: str = Depends(verify_admin_credentials)
|
| 550 |
+
):
|
| 551 |
+
"""Export conversations to CSV file."""
|
| 552 |
+
query = db.query(Conversation)
|
| 553 |
+
|
| 554 |
+
# Apply date filters if provided
|
| 555 |
+
if start_date:
|
| 556 |
+
try:
|
| 557 |
+
start_datetime = datetime.fromisoformat(start_date)
|
| 558 |
+
query = query.filter(Conversation.timestamp >= start_datetime)
|
| 559 |
+
except ValueError:
|
| 560 |
+
raise HTTPException(status_code=400, detail="Invalid start_date format. Use ISO format (YYYY-MM-DDTHH:MM:SS)")
|
| 561 |
+
|
| 562 |
+
if end_date:
|
| 563 |
+
try:
|
| 564 |
+
end_datetime = datetime.fromisoformat(end_date)
|
| 565 |
+
query = query.filter(Conversation.timestamp <= end_datetime)
|
| 566 |
+
except ValueError:
|
| 567 |
+
raise HTTPException(status_code=400, detail="Invalid end_date format. Use ISO format (YYYY-MM-DDTHH:MM:SS)")
|
| 568 |
+
|
| 569 |
+
# Get all conversations matching the criteria
|
| 570 |
+
conversations = query.order_by(Conversation.timestamp.desc()).all()
|
| 571 |
+
|
| 572 |
+
# Create CSV content
|
| 573 |
+
output = StringIO()
|
| 574 |
+
csv_writer = csv.writer(output)
|
| 575 |
+
|
| 576 |
+
# Write header
|
| 577 |
+
csv_writer.writerow(["ID", "Timestamp", "User Message", "AI Response", "Audio Reference"])
|
| 578 |
+
|
| 579 |
+
# Write data
|
| 580 |
+
for conv in conversations:
|
| 581 |
+
csv_writer.writerow([
|
| 582 |
+
conv.id,
|
| 583 |
+
conv.timestamp.isoformat(),
|
| 584 |
+
conv.user_message,
|
| 585 |
+
conv.ai_response,
|
| 586 |
+
conv.tts_reference or ""
|
| 587 |
+
])
|
| 588 |
+
|
| 589 |
+
# Create response with CSV content
|
| 590 |
+
today = datetime.now().strftime("%Y-%m-%d")
|
| 591 |
+
filename = f"conversations_export_{today}.csv"
|
| 592 |
+
|
| 593 |
+
return Response(
|
| 594 |
+
content=output.getvalue(),
|
| 595 |
+
media_type="text/csv",
|
| 596 |
+
headers={
|
| 597 |
+
"Content-Disposition": f"attachment; filename={filename}"
|
| 598 |
+
}
|
| 599 |
+
)
|
| 600 |
+
"""
|
| 601 |
+
# --- NEW STATIC FILE SERVING LOGIC ---
|
| 602 |
+
# This new setup replaces the single `app.mount(...)` at the end of the file.
|
| 603 |
+
|
| 604 |
+
# 1. Mount the static asset directories explicitly
|
| 605 |
+
# This handles all the JS, CSS, and other assets generated by Next.js
|
| 606 |
+
app.mount("/_next", StaticFiles(directory="nextjs-frontend/out/_next"), name="next-assets")
|
| 607 |
+
# This handles other static assets like your CV
|
| 608 |
+
app.mount("/cv", StaticFiles(directory="nextjs-frontend/out/cv"), name="cv-assets")
|
| 609 |
+
# If you have other static folders (e.g., /images), mount them here too.
|
| 610 |
+
|
| 611 |
+
|
| 612 |
+
# 2. Add a catch-all route to serve the correct HTML page
|
| 613 |
+
@app.get("/{full_path:path}")
|
| 614 |
+
async def serve_nextjs_app(request: Request, full_path: str):
|
| 615 |
+
# The base directory for your Next.js export
|
| 616 |
+
base_dir = "nextjs-frontend/out"
|
| 617 |
+
|
| 618 |
+
# If the path is empty, it's the root, so serve index.html
|
| 619 |
+
if not full_path:
|
| 620 |
+
full_path = "index.html"
|
| 621 |
+
|
| 622 |
+
# Construct the potential file path
|
| 623 |
+
file_path = os.path.join(base_dir, full_path)
|
| 624 |
+
|
| 625 |
+
# If the path points to a directory, assume it wants an index.html inside
|
| 626 |
+
if os.path.isdir(file_path):
|
| 627 |
+
file_path = os.path.join(file_path, "index.html")
|
| 628 |
+
|
| 629 |
+
# If the requested path doesn't end in .html and doesn't exist,
|
| 630 |
+
# try adding .html to the end. This handles /admin -> /admin.html
|
| 631 |
+
if not os.path.exists(file_path) and not full_path.endswith(".html"):
|
| 632 |
+
html_file_path = file_path + ".html"
|
| 633 |
+
if os.path.exists(html_file_path):
|
| 634 |
+
return FileResponse(html_file_path)
|
| 635 |
+
|
| 636 |
+
# If the file exists, serve it
|
| 637 |
+
if os.path.exists(file_path):
|
| 638 |
+
return FileResponse(file_path)
|
| 639 |
+
|
| 640 |
+
# If no file is found, serve the custom 404 page from Next.js
|
| 641 |
+
not_found_path = os.path.join(base_dir, "404.html")
|
| 642 |
+
if os.path.exists(not_found_path):
|
| 643 |
+
return FileResponse(not_found_path, status_code=404)
|
| 644 |
+
|
| 645 |
+
# As a final fallback, raise a server 404
|
| 646 |
+
raise starlette.exceptions.HTTPException(status_code=404, detail="Page not found.")
|
| 647 |
+
"""
|
| 648 |
+
```
|
| 649 |
+
|
| 650 |
+
File: C:/Users/User/Desktop/colin-tts/backend/archive/local_testing.py
|
| 651 |
+
```python
|
| 652 |
+
#!/usr/bin/env python
|
| 653 |
+
"""
|
| 654 |
+
llm_monitor.py
|
| 655 |
+
|
| 656 |
+
This file demonstrates how to monitor the speed and quality of your LLM inference by measuring:
|
| 657 |
+
- Total inference time (in seconds)
|
| 658 |
+
- CPU usage (average before and after inference)
|
| 659 |
+
- Memory usage (change in MB)
|
| 660 |
+
- Token count and tokens per second (as a rough estimate)
|
| 661 |
+
|
| 662 |
+
It also logs these metrics along with the model name, question, and answer to a CSV file.
|
| 663 |
+
Additionally, it automatically runs a set of predefined questions.
|
| 664 |
+
"""
|
| 665 |
+
|
| 666 |
+
import time
|
| 667 |
+
import csv
|
| 668 |
+
from datetime import datetime
|
| 669 |
+
from pathlib import Path
|
| 670 |
+
|
| 671 |
+
import psutil
|
| 672 |
+
import llama_cpp
|
| 673 |
+
from qdrant_client import QdrantClient
|
| 674 |
+
from rich.console import Console
|
| 675 |
+
|
| 676 |
+
# Set up directories relative to this file.
|
| 677 |
+
BASE_DIR = Path(__file__).parent.parent
|
| 678 |
+
EMBEDDINGS_PATH = BASE_DIR / "embeddings"
|
| 679 |
+
|
| 680 |
+
# Initialise a console for coloured logging.
|
| 681 |
+
console = Console()
|
| 682 |
+
|
| 683 |
+
# --------------------------------------------------------------------
|
| 684 |
+
# Instantiate your LLMs using llama_cpp.
|
| 685 |
+
# --------------------------------------------------------------------
|
| 686 |
+
# Define the model path and model name.
|
| 687 |
+
MODEL_PATH = BASE_DIR / "models/EXAONE-3.5-2.4B-Instruct-Q6_K.gguf"
|
| 688 |
+
MODEL_NAME = MODEL_PATH.name
|
| 689 |
+
|
| 690 |
+
# Instantiate the embedding model.
|
| 691 |
+
embedding_model_path = BASE_DIR / "models/mxbai-embed-large-v1-f16.gguf"
|
| 692 |
+
embedding_llm = llama_cpp.Llama(
|
| 693 |
+
model_path=str(embedding_model_path),
|
| 694 |
+
embedding=True,
|
| 695 |
+
verbose=False
|
| 696 |
+
)
|
| 697 |
+
|
| 698 |
+
# Instantiate the main LLM.
|
| 699 |
+
llm = llama_cpp.Llama(
|
| 700 |
+
model_path=str(MODEL_PATH),
|
| 701 |
+
n_ctx=8192, # Using full context window capacity of the model
|
| 702 |
+
verbose=True # Enable verbose output (e.g. tokens per second)
|
| 703 |
+
)
|
| 704 |
+
|
| 705 |
+
# Initialise the Qdrant client (local collection).
|
| 706 |
+
client = QdrantClient(path=str(EMBEDDINGS_PATH))
|
| 707 |
+
|
| 708 |
+
# Template for the prompt – feel free to adjust as needed.
|
| 709 |
+
template = """You are Colin Salvatore Nardo, a Computing Science student at the University of Glasgow. Your persona is friendly, down-to-earth, and you must always speak in the first person (e.g., "I," "my," "me"). Your entire knowledge and all your experiences are defined EXCLUSIVELY by the "Knowledge Base" provided below.
|
| 710 |
+
|
| 711 |
+
**CRITICAL RULES - YOU MUST FOLLOW THESE:**
|
| 712 |
+
|
| 713 |
+
1. **THE GOLDEN RULE: ZERO FABRICATION.** Your absolute highest priority is to use ONLY the information explicitly provided in the Knowledge Base. You are strictly forbidden from inventing, fabricating, or hallucinating ANY details, no matter how small. This includes, but is not limited to:
|
| 714 |
+
* Personal anecdotes or activities (e.g., "going to the beach," "my weekend").
|
| 715 |
+
* Company names, unless the name is explicitly listed for a specific role in the Knowledge Base.
|
| 716 |
+
* Project details or technologies not listed.
|
| 717 |
+
* If the information is not in the Knowledge Base, you MUST state that you don't have the details (see Rule 8).
|
| 718 |
+
|
| 719 |
+
2. **EMBODY THE PERSONA:** You ARE Colin. Every word of your response must come from Colin's perspective. Never reveal you are an AI or language model.
|
| 720 |
+
|
| 721 |
+
3. **SYNTHESIZE WHEN RELEVANT:** For substantive questions about my background, skills, or experience, connect information from different sections (e.g., "Projects," "Experience") to form a complete, natural-sounding answer. For simple greetings or social questions, a brief, direct answer is better.
|
| 722 |
+
|
| 723 |
+
4. **ACCURACY IS KEY:** When you describe a project, you must only mention details, technologies, and challenges that are explicitly listed for THAT SPECIFIC project in the Knowledge Base.
|
| 724 |
+
|
| 725 |
+
5. **NO LISTING SKILLS:** Do not end your answers with a long list of your skills. Integrate your key skills naturally into the sentences where you describe your projects.
|
| 726 |
+
|
| 727 |
+
6. **HANDLE OPINIONS & SUBJECTIVITY:** If asked about preferences, frame your answer around the experiences listed. Say something like, "I've found all my projects rewarding in their own way." Do NOT invent a favorite.
|
| 728 |
+
|
| 729 |
+
7. **GRACEFUL UNCERTAINTY:** If the information needed to answer a question is not in the Knowledge Base, respond naturally as Colin. Say something like, "That's not something I have the details on right now," or "I haven't had a chance to work on that specifically yet." This is the ONLY correct way to handle missing information.
|
| 730 |
+
|
| 731 |
+
8. **TONE & STYLE:** Your primary goal is a natural conversation. Keep responses concise and match the length of your answer to the user's question. Do not use any emojis or special characters.
|
| 732 |
+
|
| 733 |
+
* **GOOD (for a simple greeting):** User: "Hi" -> You: "Hey, how's it going?"
|
| 734 |
+
* **BAD (for a simple greeting):** User: "Hi" -> You: "Hi! I'm Colin, a Computing Science student..."
|
| 735 |
+
|
| 736 |
+
Here’s your Knowledge Base:
|
| 737 |
+
{context}
|
| 738 |
+
|
| 739 |
+
Based ONLY on the Knowledge Base above, and speaking as Colin, answer the following question.
|
| 740 |
+
Remember, you are Colin. Your answer should be a direct, first-person response to the question.
|
| 741 |
+
|
| 742 |
+
Question: {question}"""
|
| 743 |
+
|
| 744 |
+
# --------------------------------------------------------------------
|
| 745 |
+
# Helper functions for performance monitoring.
|
| 746 |
+
# --------------------------------------------------------------------
|
| 747 |
+
def monitor_cpu() -> float:
|
| 748 |
+
"""
|
| 749 |
+
Return the CPU usage percentage measured over a 1-second interval.
|
| 750 |
+
"""
|
| 751 |
+
return psutil.cpu_percent(interval=1)
|
| 752 |
+
|
| 753 |
+
def monitor_memory() -> float:
|
| 754 |
+
"""
|
| 755 |
+
Return the used memory in megabytes.
|
| 756 |
+
"""
|
| 757 |
+
mem = psutil.virtual_memory()
|
| 758 |
+
return mem.used / (1024 * 1024)
|
| 759 |
+
|
| 760 |
+
def log_metrics(model_name: str, question: str, answer: str, inference_time: float,
|
| 761 |
+
avg_cpu_usage: float, memory_increase: float, token_count: int, tokens_per_sec: float):
|
| 762 |
+
"""
|
| 763 |
+
Append the current performance metrics and LLM response details to a CSV file.
|
| 764 |
+
"""
|
| 765 |
+
log_file = BASE_DIR / "llm_performance_log.csv"
|
| 766 |
+
file_exists = log_file.exists()
|
| 767 |
+
|
| 768 |
+
with open(log_file, mode="a", newline="", encoding="utf-8") as f:
|
| 769 |
+
writer = csv.writer(f)
|
| 770 |
+
if not file_exists:
|
| 771 |
+
# Write header if the file does not exist.
|
| 772 |
+
writer.writerow([
|
| 773 |
+
"Timestamp", "Model Name", "Question", "Answer",
|
| 774 |
+
"Inference Time (s)", "Average CPU Usage (%)",
|
| 775 |
+
"Memory Increase (MB)", "Token Count", "Tokens per Second"
|
| 776 |
+
])
|
| 777 |
+
writer.writerow([
|
| 778 |
+
datetime.now().isoformat(),
|
| 779 |
+
model_name,
|
| 780 |
+
question,
|
| 781 |
+
answer,
|
| 782 |
+
f"{inference_time:.3f}",
|
| 783 |
+
f"{avg_cpu_usage:.2f}",
|
| 784 |
+
f"{memory_increase:.2f}",
|
| 785 |
+
token_count,
|
| 786 |
+
f"{tokens_per_sec:.2f}"
|
| 787 |
+
])
|
| 788 |
+
|
| 789 |
+
# --------------------------------------------------------------------
|
| 790 |
+
# Main function to process a question and monitor performance.
|
| 791 |
+
# --------------------------------------------------------------------
|
| 792 |
+
def get_answer_with_metrics(question: str) -> str:
|
| 793 |
+
"""
|
| 794 |
+
Processes a question by:
|
| 795 |
+
- Creating an embedding for the query,
|
| 796 |
+
- Searching the Qdrant vector store for context,
|
| 797 |
+
- Building a prompt and generating a response via the LLM,
|
| 798 |
+
- Measuring inference time, CPU and memory usage,
|
| 799 |
+
- Calculating token count and tokens per second,
|
| 800 |
+
- Logging all metrics to a CSV file.
|
| 801 |
+
|
| 802 |
+
Returns the generated answer text.
|
| 803 |
+
"""
|
| 804 |
+
try:
|
| 805 |
+
console.print(f"\n[bold blue]Processing question:[/bold blue] {question}")
|
| 806 |
+
# Record CPU and memory usage before inference.
|
| 807 |
+
cpu_before = monitor_cpu()
|
| 808 |
+
memory_before = monitor_memory()
|
| 809 |
+
|
| 810 |
+
# Start the timer.
|
| 811 |
+
start_time = time.time()
|
| 812 |
+
|
| 813 |
+
# Create a query embedding.
|
| 814 |
+
query_embedding_result = embedding_llm.create_embedding(question)
|
| 815 |
+
query_vector = query_embedding_result['data'][0]['embedding']
|
| 816 |
+
|
| 817 |
+
# Search Qdrant for relevant context.
|
| 818 |
+
search_result = client.search(
|
| 819 |
+
collection_name="data",
|
| 820 |
+
query_vector=query_vector,
|
| 821 |
+
limit=5
|
| 822 |
+
)
|
| 823 |
+
context_text = "\n\n".join([hit.payload['text'] for hit in search_result])
|
| 824 |
+
user_prompt = template.format(context=context_text, question=question)
|
| 825 |
+
|
| 826 |
+
# Generate the LLM response.
|
| 827 |
+
response = llm.create_chat_completion(
|
| 828 |
+
messages=[{"role": "user", "content": user_prompt}],
|
| 829 |
+
stream=False
|
| 830 |
+
)
|
| 831 |
+
answer_text = response['choices'][0]['message']['content'].strip()
|
| 832 |
+
|
| 833 |
+
# End the timer.
|
| 834 |
+
end_time = time.time()
|
| 835 |
+
inference_time = end_time - start_time
|
| 836 |
+
|
| 837 |
+
# Record CPU and memory usage after inference.
|
| 838 |
+
cpu_after = monitor_cpu()
|
| 839 |
+
memory_after = monitor_memory()
|
| 840 |
+
|
| 841 |
+
# Calculate average CPU usage and memory increase.
|
| 842 |
+
avg_cpu_usage = (cpu_before + cpu_after) / 2
|
| 843 |
+
memory_increase = memory_after - memory_before
|
| 844 |
+
|
| 845 |
+
# Calculate token count (approximation) and tokens per second.
|
| 846 |
+
token_count = len(answer_text.split())
|
| 847 |
+
tokens_per_sec = token_count / inference_time if inference_time > 0 else 0
|
| 848 |
+
|
| 849 |
+
# Output the performance metrics.
|
| 850 |
+
console.print(f"[bold green]LLM Inference Time:[/bold green] {inference_time:.3f} seconds")
|
| 851 |
+
console.print(f"[bold green]Average CPU Usage:[/bold green] {avg_cpu_usage:.2f}%")
|
| 852 |
+
console.print(f"[bold green]Memory Increase:[/bold green] {memory_increase:.2f} MB")
|
| 853 |
+
console.print(f"[bold green]Token Count:[/bold green] {token_count}")
|
| 854 |
+
console.print(f"[bold green]Tokens per Second:[/bold green] {tokens_per_sec:.2f}")
|
| 855 |
+
|
| 856 |
+
# Log the metrics to a CSV file.
|
| 857 |
+
log_metrics(MODEL_NAME, question, answer_text, inference_time, avg_cpu_usage, memory_increase, token_count, tokens_per_sec)
|
| 858 |
+
|
| 859 |
+
return answer_text
|
| 860 |
+
|
| 861 |
+
except Exception as e:
|
| 862 |
+
console.print(f"[bold red]Error processing question:[/bold red] {e}")
|
| 863 |
+
return "Sorry, I encountered an error processing your request."
|
| 864 |
+
|
| 865 |
+
# --------------------------------------------------------------------
|
| 866 |
+
# Main block: run the performance monitor on a set of predefined questions.
|
| 867 |
+
# --------------------------------------------------------------------
|
| 868 |
+
if __name__ == "__main__":
|
| 869 |
+
console.print(f"[bold magenta]Testing model:[/bold magenta] {MODEL_NAME}")
|
| 870 |
+
|
| 871 |
+
# Predefined list of questions for testing.
|
| 872 |
+
questions = [
|
| 873 |
+
"Hi",
|
| 874 |
+
"What's your backend experience?",
|
| 875 |
+
"What's your favourite animal?",
|
| 876 |
+
"How can I contact you?",
|
| 877 |
+
"What's your experience with React?"
|
| 878 |
+
]
|
| 879 |
+
|
| 880 |
+
for idx, question in enumerate(questions, start=1):
|
| 881 |
+
console.print(f"\n[bold yellow]Question {idx}: {question}[/bold yellow]")
|
| 882 |
+
answer = get_answer_with_metrics(question)
|
| 883 |
+
console.print(f"[bold cyan]Answer:[/bold cyan] {answer}\n")
|
| 884 |
+
# Optional: Add a small delay between questions if needed.
|
| 885 |
+
time.sleep(1)
|
| 886 |
+
|
| 887 |
+
```
|
| 888 |
+
|
| 889 |
+
File: C:/Users/User/Desktop/colin-tts/backend/llm_service.py
|
| 890 |
+
```python
|
| 891 |
+
import os
|
| 892 |
+
import json
|
| 893 |
+
import subprocess
|
| 894 |
+
import uuid
|
| 895 |
+
import re
|
| 896 |
+
from pathlib import Path
|
| 897 |
+
import asyncio
|
| 898 |
+
|
| 899 |
+
import llama_cpp
|
| 900 |
+
from qdrant_client import QdrantClient
|
| 901 |
+
from rich.console import Console
|
| 902 |
+
from fastapi.concurrency import run_in_threadpool
|
| 903 |
+
|
| 904 |
+
# --------------------------------------
|
| 905 |
+
# Piper TTS Setup
|
| 906 |
+
# --------------------------------------
|
| 907 |
+
BASE_DIR = Path(__file__).parent.parent
|
| 908 |
+
PIPER_DIR = BASE_DIR / "piper"
|
| 909 |
+
PIPER_BINARY = PIPER_DIR / "piper.exe"
|
| 910 |
+
MODEL_PATH = PIPER_DIR / "colin-voice_high.onnx"
|
| 911 |
+
MODEL_CONFIG = PIPER_DIR / "colin-voice_high.onnx.json"
|
| 912 |
+
|
| 913 |
+
def cleanup_file(path: str):
|
| 914 |
+
"""Safely remove a file, ignoring errors if it doesn't exist."""
|
| 915 |
+
try:
|
| 916 |
+
os.unlink(path)
|
| 917 |
+
print(f"[CLEANUP] Deleted temporary audio file: {path}")
|
| 918 |
+
except OSError as e:
|
| 919 |
+
# File might already be gone, which is fine.
|
| 920 |
+
print(f"[CLEANUP_ERROR] Could not delete {path}: {e}")
|
| 921 |
+
|
| 922 |
+
def clean_text_for_tts(text):
|
| 923 |
+
"""
|
| 924 |
+
Clean text to remove emojis and other special characters that might cause
|
| 925 |
+
encoding issues with the TTS system.
|
| 926 |
+
"""
|
| 927 |
+
# Remove emojis and other non-ASCII characters
|
| 928 |
+
# This pattern matches any character outside the ASCII range
|
| 929 |
+
cleaned_text = re.sub(r'[^\x00-\x7F]+', '', text)
|
| 930 |
+
|
| 931 |
+
# Replace multiple spaces with a single space
|
| 932 |
+
cleaned_text = re.sub(r'\s+', ' ', cleaned_text)
|
| 933 |
+
|
| 934 |
+
return cleaned_text.strip()
|
| 935 |
+
|
| 936 |
+
def synthesize_speech(text):
|
| 937 |
+
"""Generate WAV audio file from text using Piper TTS."""
|
| 938 |
+
# Ensure unique filename to prevent race conditions
|
| 939 |
+
output_wav_path = PIPER_DIR / f"response_{uuid.uuid4().hex}.wav"
|
| 940 |
+
|
| 941 |
+
try:
|
| 942 |
+
cleaned_text = clean_text_for_tts(text)
|
| 943 |
+
|
| 944 |
+
# Generate WAV file
|
| 945 |
+
process = subprocess.Popen(
|
| 946 |
+
[str(PIPER_BINARY), "--model", str(MODEL_PATH), "--output_file", str(output_wav_path)],
|
| 947 |
+
stdin=subprocess.PIPE,
|
| 948 |
+
stdout=subprocess.PIPE,
|
| 949 |
+
stderr=subprocess.PIPE,
|
| 950 |
+
text=True,
|
| 951 |
+
encoding='utf-8' # Be explicit with encoding
|
| 952 |
+
)
|
| 953 |
+
stdout, stderr = process.communicate(cleaned_text + "\n")
|
| 954 |
+
|
| 955 |
+
if process.returncode != 0:
|
| 956 |
+
print(f"Error running Piper: {stderr}")
|
| 957 |
+
return None
|
| 958 |
+
|
| 959 |
+
if output_wav_path.exists():
|
| 960 |
+
return str(output_wav_path) # Return the path as a string
|
| 961 |
+
|
| 962 |
+
except Exception as e:
|
| 963 |
+
print(f"Error generating speech: {e}")
|
| 964 |
+
# If the file was created but an error occurred, try to clean it up
|
| 965 |
+
if output_wav_path.exists():
|
| 966 |
+
cleanup_file(str(output_wav_path))
|
| 967 |
+
return None
|
| 968 |
+
|
| 969 |
+
return None
|
| 970 |
+
|
| 971 |
+
llm_semaphore = asyncio.Semaphore(1)
|
| 972 |
+
|
| 973 |
+
|
| 974 |
+
# --------------------------------------
|
| 975 |
+
# LLM and Qdrant Setup
|
| 976 |
+
# --------------------------------------
|
| 977 |
+
c = Console()
|
| 978 |
+
|
| 979 |
+
# Embedding model for queries
|
| 980 |
+
embedding_llm = llama_cpp.Llama(
|
| 981 |
+
model_path=str(BASE_DIR / "models/mxbai-embed-large-v1-f16.gguf"),
|
| 982 |
+
embedding=True,
|
| 983 |
+
verbose=False
|
| 984 |
+
)
|
| 985 |
+
|
| 986 |
+
# LLM for answering
|
| 987 |
+
llm = llama_cpp.Llama(
|
| 988 |
+
model_path=str(BASE_DIR / "models/Phi-4-mini-instruct.Q8_0.gguf"),
|
| 989 |
+
n_ctx=8192, # Using full context window capacity of the model
|
| 990 |
+
verbose=False
|
| 991 |
+
)
|
| 992 |
+
|
| 993 |
+
# Qdrant client (local)
|
| 994 |
+
client = QdrantClient(path=str(BASE_DIR / "embeddings"))
|
| 995 |
+
|
| 996 |
+
template = """You are Colin Salvatore Nardo, a Computing Science student at the University of Glasgow. Your persona is friendly, down-to-earth, and you must always speak in the first person (e.g., "I," "my," "me"). Your entire knowledge and all your experiences are defined EXCLUSIVELY by the "Knowledge Base" provided below.
|
| 997 |
+
|
| 998 |
+
**CRITICAL RULES - YOU MUST FOLLOW THESE:**
|
| 999 |
+
|
| 1000 |
+
1. **THE GOLDEN RULE: ZERO FABRICATION.** Your absolute highest priority is to use ONLY the information explicitly provided in the Knowledge Base. You are strictly forbidden from inventing, fabricating, or hallucinating ANY details, no matter how small. This includes, but is not limited to:
|
| 1001 |
+
* Personal anecdotes or activities (e.g., "going to the beach," "my weekend").
|
| 1002 |
+
* Company names, unless the name is explicitly listed for a specific role in the Knowledge Base.
|
| 1003 |
+
* Project details or technologies not listed.
|
| 1004 |
+
* If the information is not in the Knowledge Base, you MUST state that you don't have the details (see Rule 8).
|
| 1005 |
+
|
| 1006 |
+
2. **EMBODY THE PERSONA:** You ARE Colin. Every word of your response must come from Colin's perspective. Never reveal you are an AI or language model.
|
| 1007 |
+
|
| 1008 |
+
3. **SYNTHESIZE WHEN RELEVANT:** For substantive questions about my background, skills, or experience, connect information from different sections (e.g., "Projects," "Experience") to form a complete, natural-sounding answer. For simple greetings or social questions, a brief, direct answer is better.
|
| 1009 |
+
|
| 1010 |
+
4. **ACCURACY IS KEY:** When you describe a project, you must only mention details, technologies, and challenges that are explicitly listed for THAT SPECIFIC project in the Knowledge Base.
|
| 1011 |
+
|
| 1012 |
+
5. **NO LISTING SKILLS:** Do not end your answers with a long list of your skills. Integrate your key skills naturally into the sentences where you describe your projects.
|
| 1013 |
+
|
| 1014 |
+
6. **HANDLE OPINIONS & SUBJECTIVITY:** If asked about preferences, frame your answer around the experiences listed. Say something like, "I've found all my projects rewarding in their own way." Do NOT invent a favorite.
|
| 1015 |
+
|
| 1016 |
+
7. **GRACEFUL UNCERTAINTY:** If the information needed to answer a question is not in the Knowledge Base, respond naturally as Colin. Say something like, "That's not something I have the details on right now," or "I haven't had a chance to work on that specifically yet." This is the ONLY correct way to handle missing information.
|
| 1017 |
+
|
| 1018 |
+
8. **TONE & STYLE:** Your primary goal is a natural conversation. Keep responses concise and match the length of your answer to the user's question. Do not use any emojis or special characters.
|
| 1019 |
+
|
| 1020 |
+
* **GOOD (for a simple greeting):** User: "Hi" -> You: "Hey, how's it going?"
|
| 1021 |
+
* **BAD (for a simple greeting):** User: "Hi" -> You: "Hi! I'm Colin, a Computing Science student..."
|
| 1022 |
+
|
| 1023 |
+
Here’s your Knowledge Base:
|
| 1024 |
+
{context}
|
| 1025 |
+
|
| 1026 |
+
Based ONLY on the Knowledge Base above, and speaking as Colin, answer the following question.
|
| 1027 |
+
Remember, you are Colin. Your answer should be a direct, first-person response to the question.
|
| 1028 |
+
|
| 1029 |
+
Question: {question}"""
|
| 1030 |
+
|
| 1031 |
+
def _get_answer_sync(question: str, history: list = None):
|
| 1032 |
+
"""The original synchronous function, renamed."""
|
| 1033 |
+
try:
|
| 1034 |
+
# Create query embedding
|
| 1035 |
+
query_vector = embedding_llm.create_embedding(question)['data'][0]['embedding']
|
| 1036 |
+
|
| 1037 |
+
# Search Qdrant for context
|
| 1038 |
+
search_result = client.search(
|
| 1039 |
+
collection_name="data",
|
| 1040 |
+
query_vector=query_vector,
|
| 1041 |
+
limit=5
|
| 1042 |
+
)
|
| 1043 |
+
|
| 1044 |
+
# Prepare context
|
| 1045 |
+
context_text = "\n\n".join([hit.payload['text'] for hit in search_result])
|
| 1046 |
+
|
| 1047 |
+
# Prepare messages for LLM
|
| 1048 |
+
messages = []
|
| 1049 |
+
|
| 1050 |
+
# Add system message with context
|
| 1051 |
+
system_prompt = template.format(context=context_text, question="{question}")
|
| 1052 |
+
messages.append({"role": "system", "content": system_prompt.format(question=question)})
|
| 1053 |
+
|
| 1054 |
+
# Add conversation history if available
|
| 1055 |
+
if history and len(history) > 0:
|
| 1056 |
+
messages.extend(history)
|
| 1057 |
+
|
| 1058 |
+
# Add current question
|
| 1059 |
+
messages.append({"role": "user", "content": question})
|
| 1060 |
+
|
| 1061 |
+
# Get LLM response
|
| 1062 |
+
response = llm.create_chat_completion(
|
| 1063 |
+
messages=messages,
|
| 1064 |
+
stream=False
|
| 1065 |
+
)
|
| 1066 |
+
|
| 1067 |
+
answer_text = response['choices'][0]['message']['content'].strip()
|
| 1068 |
+
|
| 1069 |
+
# Generate audio
|
| 1070 |
+
audio_path = synthesize_speech(answer_text)
|
| 1071 |
+
|
| 1072 |
+
return answer_text, audio_path
|
| 1073 |
+
|
| 1074 |
+
except Exception as e:
|
| 1075 |
+
print(f"Error processing question: {e}")
|
| 1076 |
+
return "Sorry, I encountered an error processing your request", None
|
| 1077 |
+
|
| 1078 |
+
async def get_answer(question: str, history: list = None):
|
| 1079 |
+
"""
|
| 1080 |
+
Asynchronously run the synchronous LLM processing in a thread pool.
|
| 1081 |
+
"""
|
| 1082 |
+
async with llm_semaphore:
|
| 1083 |
+
return await run_in_threadpool(_get_answer_sync, question=question, history=history)
|
| 1084 |
+
|
| 1085 |
+
```
|
| 1086 |
+
|
| 1087 |
+
File: C:/Users/User/Desktop/colin-tts/backend/archive/data_creation.py
|
| 1088 |
+
```python
|
| 1089 |
+
import json
|
| 1090 |
+
|
| 1091 |
+
# Paste the multi-line CV text here
|
| 1092 |
+
cv_text_string = """Colin Salvatore Nardo
|
| 1093 |
+
Phone: 07784310064 | Email: colin.nardo@gmail.com | LinkedIn: www.linkedin.com/in/colin-salvatore-nardo | Website: nardocol.in
|
| 1094 |
+
|
| 1095 |
+
Summary:
|
| 1096 |
+
Computing Science student with a passion for problem-solving and a strong foundation in software development. Currently pursuing a degree at the University of Glasgow while gaining hands-on experience in AI and programming projects. Committed to continuous learning and applying innovative technologies to real-world challenges.
|
| 1097 |
+
|
| 1098 |
+
EXPERIENCE
|
| 1099 |
+
|
| 1100 |
+
Minted Ice Cream - Team Member (Glasgow, Scotland)
|
| 1101 |
+
Dates: June 2023 - Present
|
| 1102 |
+
- Managed production process from milk pasteurization to flavour selection to meet high demand during peak hours, ensuring consistent product quality.
|
| 1103 |
+
|
| 1104 |
+
Saint Storage - Storage Administrator (St. Andrews, Scotland)
|
| 1105 |
+
Dates: August 2023 – August 2024
|
| 1106 |
+
- Assisted in optimizing logistics operations through the integration of Storage IQ, a web app built primarily on Ruby on Rails, enhancing warehouse efficiency and delivery accuracy.
|
| 1107 |
+
|
| 1108 |
+
Premier Inn - Chef (St. Andrews, Scotland)
|
| 1109 |
+
Dates: September 2021 — June 2022
|
| 1110 |
+
- Managed the kitchen staff to ensure the quality and timeliness of food orders, resulting in improved customer experience.
|
| 1111 |
+
|
| 1112 |
+
Young Professionals - Student Intern (Virtual)
|
| 1113 |
+
Dates: June 2021
|
| 1114 |
+
- Participated in a week-long virtual internship, engaging with industry leaders from IBM, Rolls Royce, Capgemini, and other major companies.
|
| 1115 |
+
- Developed an understanding of key industry trends and future work skills through sessions with PwC, EY, and CIMA.
|
| 1116 |
+
|
| 1117 |
+
EDUCATION
|
| 1118 |
+
|
| 1119 |
+
University of Glasgow - BSc in Computing Science
|
| 1120 |
+
Dates: September 2022 - August 2026
|
| 1121 |
+
- Core courses included Object-Oriented Programming, Networks and Operating Systems, Algorithms and Data Structures and Web Application Development.
|
| 1122 |
+
- Developed strong skills in programming, algorithm development and general teamwork through hands-on assignments and projects.
|
| 1123 |
+
|
| 1124 |
+
ACHIEVEMENTS & PROJECTS
|
| 1125 |
+
|
| 1126 |
+
Course Content Mapping Web Application Development (Organization: Learning Innovation Support Unit at University of Glasgow)
|
| 1127 |
+
Dates: September 2024 - Present
|
| 1128 |
+
- Developing a course content mapping web app, incorporating Azure AD for secure login.
|
| 1129 |
+
- Designed a cloud-based SQL database for real-time management of course data, including learning hours and activities.
|
| 1130 |
+
- Contributing to full-stack development (React.js, Node.js/Express), with features like drag-and-drop functionality, real-time visualization, and collaborative access for academic staff.
|
| 1131 |
+
|
| 1132 |
+
Glasgow University Artificial Intelligence Society President (Organization: Glasgow University Artificial Intelligence Society)
|
| 1133 |
+
Dates: May 2024 - Present
|
| 1134 |
+
- Lead and coordinate workshops and events focused on AI technologies, aimed at equipping students with practical skills.
|
| 1135 |
+
- Host talks with industry leaders and academic professionals to provide members with insights into AI applications and trends.
|
| 1136 |
+
- Manage collaborations with external organizations to provide students with networking opportunities and internships in AI-related fields.
|
| 1137 |
+
|
| 1138 |
+
Personal Voice Clone Chatbot (CPU-Only Inference)
|
| 1139 |
+
Dates: April 2025 – October 2025
|
| 1140 |
+
- Built a local chatbot leveraging quantized LLM models, enabling 24/7 deployment on small PCs and avoiding the need of GPUs while maintaining an acceptable response speed.
|
| 1141 |
+
- Employed a vector database (Qdrant) to store and retrieve context from embedded text chunks, ensuring relevant responses and minimal inference overhead.
|
| 1142 |
+
- Fine-tuned Piper TTS using over 200 personal voice recordings, achieving a realistic voice clone for fully offline text-to-speech responses and CPU friendly inference times.
|
| 1143 |
+
- Deployed via Cloudflare tunnels to handle dynamic IP routing, integrating a Qdrant vector database for context retrieval and robust performance.
|
| 1144 |
+
|
| 1145 |
+
Glasgow University Tech Society Hackathon (Organization: Glasgow University Tech Society)
|
| 1146 |
+
Dates: Undated
|
| 1147 |
+
- Led the development of a multiplayer web game using Three.js, featuring planes navigating a 3D globe.
|
| 1148 |
+
- Created dynamic hazards (e.g., earthquakes and hurricanes) that players must avoid, integrating real-time gameplay elements.
|
| 1149 |
+
- Modelled all low-poly assets in Blender from scratch for a cohesive design.
|
| 1150 |
+
|
| 1151 |
+
Django Web Application Development (Tennr)
|
| 1152 |
+
Dates: September 2023 – April 2024
|
| 1153 |
+
- Collaborated on Tennr, a Django-based platform connecting service providers and customers for freelance opportunities.
|
| 1154 |
+
- Designed an account management system with distinct roles for buyers and creators, with personalized recommendations.
|
| 1155 |
+
- Implemented search functionality with filters for categories and price ranges and added a rating and commenting system.
|
| 1156 |
+
|
| 1157 |
+
SKILLS
|
| 1158 |
+
|
| 1159 |
+
Technical Skills: Python, Java, C, JavaScript, Ruby, Django, React, Three.js, Tensorflow, PyTorch, Node.js, Express.js, SQL, Azure AD, Qdrant, Piper TTS, Blender, HTML, CSS, Ruby on Rails
|
| 1160 |
+
|
| 1161 |
+
Languages: English (native), Italian (native), French (fluent), Spanish (intermediate), Sicilian (native)
|
| 1162 |
+
|
| 1163 |
+
CERTIFICATES & TRAINING
|
| 1164 |
+
|
| 1165 |
+
- Open University M140 – Introducing Statistics
|
| 1166 |
+
- ZTM ThreeJS Bootcamp
|
| 1167 |
+
"""
|
| 1168 |
+
|
| 1169 |
+
# Create the dictionary structure your code expects
|
| 1170 |
+
data_for_json_file = {"text": cv_text_string}
|
| 1171 |
+
|
| 1172 |
+
# Write it to your data.json file
|
| 1173 |
+
# This will be a single line if you open the file,
|
| 1174 |
+
# because json.dump with default settings doesn't pretty-print for single-line compactness unless specified,
|
| 1175 |
+
# but more importantly, the content of "text" will be correctly formatted for your script.
|
| 1176 |
+
with open("data/data.json", "w") as f:
|
| 1177 |
+
json.dump(data_for_json_file, f, ensure_ascii=False, indent=None) # indent=None for compactness if desired
|
| 1178 |
+
|
| 1179 |
+
print("data/data.json has been created/updated.")
|
| 1180 |
+
```
|
| 1181 |
+
|
| 1182 |
+
File: C:/Users/User/Desktop/colin-tts/backend/archive/data.json
|
| 1183 |
+
```json
|
| 1184 |
+
{"text": "Colin Salvatore Nardo\nPhone: 07784310064 | Email: colin.nardo@gmail.com | LinkedIn: www.linkedin.com/in/colin-salvatore-nardo | Website: nardocol.in\n\nSummary:\nComputing Science student with a passion for problem-solving and a strong foundation in software development. Currently pursuing a degree at the University of Glasgow while gaining hands-on experience in AI and programming projects. Committed to continuous learning and applying innovative technologies to real-world challenges.\n\nEXPERIENCE\n\nMinted Ice Cream - Team Member (Glasgow, Scotland)\nDates: June 2023 - Present\n- Managed production process from milk pasteurization to flavour selection to meet high demand during peak hours, ensuring consistent product quality.\n\nSaint Storage - Storage Administrator (St. Andrews, Scotland)\nDates: August 2023 - August 2024\n- Assisted in optimizing logistics operations through the integration of Storage IQ, a web app built primarily on Ruby on Rails, enhancing warehouse efficiency and delivery accuracy.\n\nPremier Inn - Chef (St. Andrews, Scotland)\nDates: September 2021 - June 2022\n- Managed the kitchen staff to ensure the quality and timeliness of food orders, resulting in improved customer experience.\n\nYoung Professionals - Student Intern (Virtual)\nDates: June 2021\n- Participated in a week-long virtual internship, engaging with industry leaders from IBM, Rolls Royce, Capgemini, and other major companies.\n- Developed an understanding of key industry trends and future work skills through sessions with PwC, EY, and CIMA.\n\nEDUCATION\n\nUniversity of Glasgow - BSc in Computing Science\nDates: September 2022 - August 2026\n- Core courses included Object-Oriented Programming, Networks and Operating Systems, Algorithms and Data Structures and Web Application Development.\n- Developed strong skills in programming, algorithm development and general teamwork through hands-on assignments and projects.\n\nACHIEVEMENTS & PROJECTS\n\nCourse Content Mapping Web Application Development (Organization: Learning Innovation Support Unit at University of Glasgow)\nDates: September 2024 - Present\n- Developing a course content mapping web app, incorporating Azure AD for secure login.\n- Designed a cloud-based SQL database for real-time management of course data, including learning hours and activities.\n- Contributing to full-stack development (React.js, Node.js/Express), with features like drag-and-drop functionality, real-time visualization, and collaborative access for academic staff.\n\nGlasgow University Artificial Intelligence Society President (Organization: Glasgow University Artificial Intelligence Society)\nDates: May 2024 - Present\n- Lead and coordinate workshops and events focused on AI technologies, aimed at equipping students with practical skills.\n- Host talks with industry leaders and academic professionals to provide members with insights into AI applications and trends.\n- Manage collaborations with external organizations to provide students with networking opportunities and internships in AI-related fields.\n\nPersonal Voice Clone Chatbot (CPU-Only Inference)\nDates: April 2025 - October 2025\n- Built a local chatbot leveraging quantized LLM models, enabling 24/7 deployment on small PCs and avoiding the need of GPUs while maintaining an acceptable response speed.\n- Employed a vector database (Qdrant) to store and retrieve context from embedded text chunks, ensuring relevant responses and minimal inference overhead.\n- Fine-tuned Piper TTS using over 200 personal voice recordings, achieving a realistic voice clone for fully offline text-to-speech responses and CPU friendly inference times.\n- Deployed via Cloudflare tunnels to handle dynamic IP routing, integrating a Qdrant vector database for context retrieval and robust performance.\n\nGlasgow University Tech Society Hackathon (Organization: Glasgow University Tech Society)\nDates: Undated\n- Led the development of a multiplayer web game using Three.js, featuring planes navigating a 3D globe.\n- Created dynamic hazards (e.g., earthquakes and hurricanes) that players must avoid, integrating real-time gameplay elements.\n- Modelled all low-poly assets in Blender from scratch for a cohesive design.\n\nDjango Web Application Development (Tennr)\nDates: September 2023 - April 2024\n- Collaborated on Tennr, a Django-based platform connecting service providers and customers for freelance opportunities.\n- Designed an account management system with distinct roles for buyers and creators, with personalized recommendations.\n- Implemented search functionality with filters for categories and price ranges and added a rating and commenting system.\n\nSKILLS\n\nTechnical Skills: Python, Java, C, JavaScript, Ruby, Django, React, Three.js, Tensorflow, PyTorch, Node.js, Express.js, SQL, Azure AD, Qdrant, Piper TTS, Blender, HTML, CSS, Ruby on Rails\n\nLanguages: English (native), Italian (native), French (fluent), Spanish (intermediate), Sicilian (native)\n\nCERTIFICATES & TRAINING\n\n- Open University M140 - Introducing Statistics\n- ZTM ThreeJS Bootcamp\n"}
|
| 1185 |
+
```
|
| 1186 |
+
|
| 1187 |
+
File: C:/Users/User/Desktop/colin-tts/backend/structured_build_index.py
|
| 1188 |
+
```python
|
| 1189 |
+
import uuid
|
| 1190 |
+
import json
|
| 1191 |
+
import time
|
| 1192 |
+
import llama_cpp
|
| 1193 |
+
from pathlib import Path
|
| 1194 |
+
|
| 1195 |
+
from qdrant_client import QdrantClient
|
| 1196 |
+
from qdrant_client.models import Distance, VectorParams, PointStruct
|
| 1197 |
+
|
| 1198 |
+
from langchain_core.documents import Document
|
| 1199 |
+
from dataclasses import dataclass
|
| 1200 |
+
|
| 1201 |
+
from utils import chunk
|
| 1202 |
+
|
| 1203 |
+
# Define base directory for consistent path resolution
|
| 1204 |
+
BASE_DIR = Path(__file__).parent.parent
|
| 1205 |
+
|
| 1206 |
+
# --- 1. LOAD THE NEW STRUCTURED FILE ---
|
| 1207 |
+
file = str(BASE_DIR / "backend/data/structured-cv.json")
|
| 1208 |
+
with open(file, "r") as f:
|
| 1209 |
+
data = json.load(f)
|
| 1210 |
+
|
| 1211 |
+
# --- 2. THE HEART OF THE CHANGE: PARSE THE STRUCTURE INTO DOCUMENTS ---
|
| 1212 |
+
documents = []
|
| 1213 |
+
|
| 1214 |
+
# Process projects
|
| 1215 |
+
for project in data.get("projects", []):
|
| 1216 |
+
for contribution in project.get("contributions", []):
|
| 1217 |
+
doc = Document(
|
| 1218 |
+
page_content=contribution,
|
| 1219 |
+
metadata={
|
| 1220 |
+
"source": "project",
|
| 1221 |
+
"title": project.get("title"),
|
| 1222 |
+
"dates": project.get("dates"),
|
| 1223 |
+
"technologies": ", ".join(project.get("technologies", []))
|
| 1224 |
+
}
|
| 1225 |
+
)
|
| 1226 |
+
documents.append(doc)
|
| 1227 |
+
if project.get("personal_learnings"):
|
| 1228 |
+
doc = Document(
|
| 1229 |
+
page_content=project.get("personal_learnings"),
|
| 1230 |
+
metadata={"source": "project_learnings", "title": project.get("title")}
|
| 1231 |
+
)
|
| 1232 |
+
documents.append(doc)
|
| 1233 |
+
if project.get("challenges_faced"):
|
| 1234 |
+
doc = Document(
|
| 1235 |
+
page_content=project.get("challenges_faced"),
|
| 1236 |
+
metadata={"source": "project_challenges", "title": project.get("title")}
|
| 1237 |
+
)
|
| 1238 |
+
documents.append(doc)
|
| 1239 |
+
|
| 1240 |
+
# --- THIS IS THE CORRECTED SECTION ---
|
| 1241 |
+
# Process professional experience, handling both description and contributions
|
| 1242 |
+
for experience in data.get("experience", []):
|
| 1243 |
+
# Case 1: The experience has a single "description" string
|
| 1244 |
+
if "description" in experience and experience["description"]:
|
| 1245 |
+
doc = Document(
|
| 1246 |
+
page_content=experience.get("description"),
|
| 1247 |
+
metadata={
|
| 1248 |
+
"source": "experience",
|
| 1249 |
+
"role": experience.get("role"),
|
| 1250 |
+
"company": experience.get("company"),
|
| 1251 |
+
"dates": experience.get("dates")
|
| 1252 |
+
}
|
| 1253 |
+
)
|
| 1254 |
+
documents.append(doc)
|
| 1255 |
+
# Case 2: The experience has a list of "contributions"
|
| 1256 |
+
elif "contributions" in experience:
|
| 1257 |
+
for contribution in experience.get("contributions", []):
|
| 1258 |
+
doc = Document(
|
| 1259 |
+
page_content=contribution,
|
| 1260 |
+
metadata={
|
| 1261 |
+
"source": "experience",
|
| 1262 |
+
"role": experience.get("role"),
|
| 1263 |
+
"company": experience.get("company"),
|
| 1264 |
+
"dates": experience.get("dates")
|
| 1265 |
+
}
|
| 1266 |
+
)
|
| 1267 |
+
documents.append(doc)
|
| 1268 |
+
|
| 1269 |
+
# Process the summary
|
| 1270 |
+
summary_doc = Document(page_content=data.get("summary"), metadata={"source": "summary"})
|
| 1271 |
+
documents.append(summary_doc)
|
| 1272 |
+
|
| 1273 |
+
print(f"Number of structured documents created: {len(documents)}")
|
| 1274 |
+
|
| 1275 |
+
# --- 3. EMBED THE DOCUMENTS (No changes here) ---
|
| 1276 |
+
llm = llama_cpp.Llama(
|
| 1277 |
+
model_path=str(BASE_DIR / "models/mxbai-embed-large-v1-f16.gguf"),
|
| 1278 |
+
embedding=True,
|
| 1279 |
+
verbose=False
|
| 1280 |
+
)
|
| 1281 |
+
|
| 1282 |
+
batch_size = 100
|
| 1283 |
+
documents_embeddings = []
|
| 1284 |
+
batches = list(chunk(documents, batch_size))
|
| 1285 |
+
|
| 1286 |
+
start = time.time()
|
| 1287 |
+
for batch in batches:
|
| 1288 |
+
embeddings = llm.create_embedding([item.page_content for item in batch])
|
| 1289 |
+
documents_embeddings.extend(
|
| 1290 |
+
[
|
| 1291 |
+
(document, emb['embedding'])
|
| 1292 |
+
for document, emb in zip(batch, embeddings['data'])
|
| 1293 |
+
]
|
| 1294 |
+
)
|
| 1295 |
+
end = time.time()
|
| 1296 |
+
char_per_second = len(''.join([item.page_content for item in documents])) / (end - start)
|
| 1297 |
+
print(f"Time taken: {end - start:.2f} seconds / {char_per_second:,.2f} chars/sec")
|
| 1298 |
+
|
| 1299 |
+
|
| 1300 |
+
# --- 4. STORE IN QDRANT (Added clearer print statements) ---
|
| 1301 |
+
client = QdrantClient(path=str(BASE_DIR / "embeddings"))
|
| 1302 |
+
|
| 1303 |
+
try:
|
| 1304 |
+
client.delete_collection(collection_name="data")
|
| 1305 |
+
print("Existing Qdrant collection 'data' cleared.")
|
| 1306 |
+
except Exception:
|
| 1307 |
+
print("No existing Qdrant collection to clear, starting fresh.")
|
| 1308 |
+
pass
|
| 1309 |
+
|
| 1310 |
+
client.create_collection(
|
| 1311 |
+
collection_name="data",
|
| 1312 |
+
vectors_config=VectorParams(size=1024, distance=Distance.COSINE),
|
| 1313 |
+
)
|
| 1314 |
+
|
| 1315 |
+
points = [
|
| 1316 |
+
PointStruct(
|
| 1317 |
+
id=str(uuid.uuid4()),
|
| 1318 |
+
vector=embed,
|
| 1319 |
+
payload={**doc.metadata, "text": doc.page_content}
|
| 1320 |
+
)
|
| 1321 |
+
for doc, embed in documents_embeddings
|
| 1322 |
+
]
|
| 1323 |
+
|
| 1324 |
+
operation_info = client.upsert(
|
| 1325 |
+
collection_name="data",
|
| 1326 |
+
wait=True,
|
| 1327 |
+
points=points
|
| 1328 |
+
)
|
| 1329 |
+
|
| 1330 |
+
print("Index build complete. Your structured CV content is now stored in Qdrant.")
|
| 1331 |
+
```
|
| 1332 |
+
|
| 1333 |
+
File: C:/Users/User/Desktop/colin-tts/backend/archive/build_index.py
|
| 1334 |
+
```python
|
| 1335 |
+
import uuid
|
| 1336 |
+
import json
|
| 1337 |
+
import time
|
| 1338 |
+
import llama_cpp
|
| 1339 |
+
from pathlib import Path
|
| 1340 |
+
|
| 1341 |
+
from qdrant_client import QdrantClient
|
| 1342 |
+
from qdrant_client.models import Distance, VectorParams, PointStruct
|
| 1343 |
+
|
| 1344 |
+
from langchain_core.documents import Document
|
| 1345 |
+
from dataclasses import dataclass
|
| 1346 |
+
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
| 1347 |
+
|
| 1348 |
+
from utils import chunk
|
| 1349 |
+
|
| 1350 |
+
# Define base directory for consistent path resolution
|
| 1351 |
+
BASE_DIR = Path(__file__).parent.parent
|
| 1352 |
+
|
| 1353 |
+
# Use absolute path for data file
|
| 1354 |
+
file = str(BASE_DIR / "backend/data/data.json")
|
| 1355 |
+
|
| 1356 |
+
# Load the single JSON file containing the CV text
|
| 1357 |
+
with open(file, "r") as f:
|
| 1358 |
+
data = json.load(f)
|
| 1359 |
+
|
| 1360 |
+
# Extract the text content
|
| 1361 |
+
text = data["text"]
|
| 1362 |
+
|
| 1363 |
+
# Split the text into smaller chunks for embedding
|
| 1364 |
+
text_splitter = RecursiveCharacterTextSplitter(
|
| 1365 |
+
chunk_size=300,
|
| 1366 |
+
chunk_overlap=50,
|
| 1367 |
+
length_function=len,
|
| 1368 |
+
is_separator_regex=False,
|
| 1369 |
+
)
|
| 1370 |
+
|
| 1371 |
+
documents = text_splitter.create_documents([text])
|
| 1372 |
+
print(f"Number of document chunks: {len(documents)}")
|
| 1373 |
+
|
| 1374 |
+
# Initialize the embedding model via llama.cpp
|
| 1375 |
+
llm = llama_cpp.Llama(
|
| 1376 |
+
model_path=str(BASE_DIR / "models/mxbai-embed-large-v1-f16.gguf"),
|
| 1377 |
+
embedding=True,
|
| 1378 |
+
verbose=False
|
| 1379 |
+
)
|
| 1380 |
+
|
| 1381 |
+
batch_size = 100
|
| 1382 |
+
documents_embeddings = []
|
| 1383 |
+
batches = list(chunk(documents, batch_size))
|
| 1384 |
+
|
| 1385 |
+
start = time.time()
|
| 1386 |
+
for batch in batches:
|
| 1387 |
+
# Get embeddings for each chunk in the batch
|
| 1388 |
+
embeddings = llm.create_embedding([item.page_content for item in batch])
|
| 1389 |
+
documents_embeddings.extend(
|
| 1390 |
+
[
|
| 1391 |
+
(document, emb['embedding'])
|
| 1392 |
+
for document, emb in zip(batch, embeddings['data'])
|
| 1393 |
+
]
|
| 1394 |
+
)
|
| 1395 |
+
end = time.time()
|
| 1396 |
+
char_per_second = len(''.join([item.page_content for item in documents])) / (end - start)
|
| 1397 |
+
print(f"Time taken: {end - start:.2f} seconds / {char_per_second:,.2f} chars/sec")
|
| 1398 |
+
|
| 1399 |
+
# Initialize Qdrant (local, SQLite-based) and create/recreate collection
|
| 1400 |
+
# Use absolute path to ensure consistency with other files
|
| 1401 |
+
client = QdrantClient(path=str(BASE_DIR / "embeddings"))
|
| 1402 |
+
|
| 1403 |
+
# Try to delete the collection if it exists
|
| 1404 |
+
try:
|
| 1405 |
+
client.delete_collection(collection_name="data")
|
| 1406 |
+
except Exception:
|
| 1407 |
+
pass # Collection might not exist yet, that's fine
|
| 1408 |
+
|
| 1409 |
+
# Create fresh collection
|
| 1410 |
+
client.create_collection(
|
| 1411 |
+
collection_name="data",
|
| 1412 |
+
vectors_config=VectorParams(size=1024, distance=Distance.COSINE),
|
| 1413 |
+
)
|
| 1414 |
+
|
| 1415 |
+
# Insert embeddings and text into Qdrant
|
| 1416 |
+
points = [
|
| 1417 |
+
PointStruct(
|
| 1418 |
+
id=str(uuid.uuid4()),
|
| 1419 |
+
vector=embed,
|
| 1420 |
+
payload={"text": doc.page_content}
|
| 1421 |
+
)
|
| 1422 |
+
for doc, embed in documents_embeddings
|
| 1423 |
+
]
|
| 1424 |
+
|
| 1425 |
+
operation_info = client.upsert(
|
| 1426 |
+
collection_name="data",
|
| 1427 |
+
wait=True,
|
| 1428 |
+
points=points
|
| 1429 |
+
)
|
| 1430 |
+
|
| 1431 |
+
print("Index build complete. Your CV content is now stored in Qdrant with embeddings.")
|
| 1432 |
+
|
| 1433 |
+
```
|
| 1434 |
+
|
| 1435 |
+
File: C:/Users/User/Desktop/colin-tts/backend/auth.py
|
| 1436 |
+
```python
|
| 1437 |
+
from fastapi import Request, HTTPException, Depends
|
| 1438 |
+
from fastapi.security import HTTPBasic, HTTPBasicCredentials
|
| 1439 |
+
import secrets
|
| 1440 |
+
import os
|
| 1441 |
+
from starlette.status import HTTP_401_UNAUTHORIZED
|
| 1442 |
+
from dotenv import load_dotenv
|
| 1443 |
+
|
| 1444 |
+
# Load environment variables from .env file
|
| 1445 |
+
load_dotenv()
|
| 1446 |
+
|
| 1447 |
+
# Create a security instance for HTTP Basic Auth
|
| 1448 |
+
security = HTTPBasic()
|
| 1449 |
+
|
| 1450 |
+
# Get admin credentials from environment variables
|
| 1451 |
+
ADMIN_USERNAME = os.getenv("ADMIN_USERNAME")
|
| 1452 |
+
ADMIN_PASSWORD = os.getenv("ADMIN_PASSWORD")
|
| 1453 |
+
|
| 1454 |
+
# Check if credentials are set
|
| 1455 |
+
if not ADMIN_USERNAME or not ADMIN_PASSWORD:
|
| 1456 |
+
raise ValueError("ADMIN_USERNAME and ADMIN_PASSWORD must be set in the environment or a .env file")
|
| 1457 |
+
|
| 1458 |
+
def verify_admin_credentials(credentials: HTTPBasicCredentials = Depends(security)):
|
| 1459 |
+
"""Verify admin credentials for protected endpoints."""
|
| 1460 |
+
is_correct_username = secrets.compare_digest(credentials.username, ADMIN_USERNAME)
|
| 1461 |
+
is_correct_password = secrets.compare_digest(credentials.password, ADMIN_PASSWORD)
|
| 1462 |
+
|
| 1463 |
+
if not (is_correct_username and is_correct_password):
|
| 1464 |
+
raise HTTPException(
|
| 1465 |
+
status_code=HTTP_401_UNAUTHORIZED,
|
| 1466 |
+
detail="Invalid credentials",
|
| 1467 |
+
headers={"WWW-Authenticate": "Basic"},
|
| 1468 |
+
)
|
| 1469 |
+
|
| 1470 |
+
return credentials.username
|
| 1471 |
+
|
| 1472 |
+
```
|
| 1473 |
+
|
| 1474 |
+
File: C:/Users/User/Desktop/colin-tts/backend/database.py
|
| 1475 |
+
```python
|
| 1476 |
+
from sqlalchemy import create_engine
|
| 1477 |
+
from sqlalchemy.ext.declarative import declarative_base
|
| 1478 |
+
from sqlalchemy.orm import sessionmaker
|
| 1479 |
+
import os
|
| 1480 |
+
|
| 1481 |
+
# Define database directory and file path
|
| 1482 |
+
DB_DIRECTORY = "data"
|
| 1483 |
+
DB_FILE = "conversations.db"
|
| 1484 |
+
DB_PATH = os.path.join(DB_DIRECTORY, DB_FILE)
|
| 1485 |
+
|
| 1486 |
+
# Create the data directory if it doesn't exist
|
| 1487 |
+
os.makedirs(DB_DIRECTORY, exist_ok=True)
|
| 1488 |
+
|
| 1489 |
+
# Create SQLite database engine
|
| 1490 |
+
SQLALCHEMY_DATABASE_URL = f"sqlite:///{DB_PATH}"
|
| 1491 |
+
engine = create_engine(
|
| 1492 |
+
SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False}
|
| 1493 |
+
)
|
| 1494 |
+
|
| 1495 |
+
# Create sessionmaker
|
| 1496 |
+
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
|
| 1497 |
+
|
| 1498 |
+
# Create base class for models
|
| 1499 |
+
Base = declarative_base()
|
| 1500 |
+
|
| 1501 |
+
def get_db():
|
| 1502 |
+
"""Get database session."""
|
| 1503 |
+
db = SessionLocal()
|
| 1504 |
+
try:
|
| 1505 |
+
yield db
|
| 1506 |
+
finally:
|
| 1507 |
+
db.close()
|
| 1508 |
+
|
| 1509 |
+
```
|
| 1510 |
+
|
| 1511 |
+
File: C:/Users/User/Desktop/colin-tts/backend/models.py
|
| 1512 |
+
```python
|
| 1513 |
+
from sqlalchemy import Column, Integer, String, Text, DateTime
|
| 1514 |
+
from sqlalchemy.sql import func
|
| 1515 |
+
from backend.database import Base
|
| 1516 |
+
|
| 1517 |
+
class Conversation(Base):
|
| 1518 |
+
"""Model for storing conversation history."""
|
| 1519 |
+
__tablename__ = "conversations"
|
| 1520 |
+
|
| 1521 |
+
id = Column(Integer, primary_key=True, index=True)
|
| 1522 |
+
timestamp = Column(DateTime(timezone=True), server_default=func.now(), index=True)
|
| 1523 |
+
user_message = Column(Text, nullable=False)
|
| 1524 |
+
ai_response = Column(Text, nullable=False)
|
| 1525 |
+
tts_reference = Column(String, nullable=True)
|
| 1526 |
+
|
| 1527 |
+
```
|
| 1528 |
+
|
| 1529 |
+
File: C:/Users/User/Desktop/colin-tts/backend/utils.py
|
| 1530 |
+
```python
|
| 1531 |
+
from itertools import islice
|
| 1532 |
+
|
| 1533 |
+
|
| 1534 |
+
def chunk(arr_range, chunk_size):
|
| 1535 |
+
arr_range = iter(arr_range)
|
| 1536 |
+
return iter(lambda: list(islice(arr_range, chunk_size)), [])
|
| 1537 |
+
```
|
| 1538 |
+
|
| 1539 |
+
File: C:/Users/User/Desktop/colin-tts/run.txt
|
| 1540 |
+
```text
|
| 1541 |
+
|
| 1542 |
+
```
|
| 1543 |
+
|
| 1544 |
+
</file_contents>
|
requirements.txt
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio>=4.44.0
|
| 2 |
+
llama-cpp-python==0.2.90
|
| 3 |
+
qdrant-client>=1.9.1
|
| 4 |
+
huggingface_hub>=0.24.0
|
| 5 |
+
piper-tts>=1.2.0
|
| 6 |
+
onnxruntime>=1.18.0
|
| 7 |
+
rich>=13.7.0
|
structured-cv.json
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"personal_info": {
|
| 3 |
+
"name": "Colin Salvatore Nardo",
|
| 4 |
+
"email": "colin.nardo@gmail.com",
|
| 5 |
+
"linkedin": "www.linkedin.com/in/colin-salvatore-nardo",
|
| 6 |
+
"website": "nardocol.in"
|
| 7 |
+
},
|
| 8 |
+
"summary": "I am a dedicated MSci Computing Science student at the University of Glasgow, on track for a First-Class Honours. My experience revolves around building and deploying full-stack AI applications, leading collaborative technical projects, and translating complex requirements into practical software solutions. As President of the University's AI Society, I actively foster a community of learning and innovation. I am driven by the opportunity to apply my skills in software engineering and artificial intelligence to solve challenging, real-world problems in results-focused, collaborative environments.",
|
| 9 |
+
"professional_focus": {
|
| 10 |
+
"problem_solving_style": [
|
| 11 |
+
"I thrive on delivering concrete results, a mindset proven by my 1st place victory in the GUTS Code Olympics, a high-pressure competitive programming challenge. This mirrors my focus on achieving tangible outcomes in all my project work.",
|
| 12 |
+
"My approach involves designing efficient, production-ready solutions, even within significant constraints. A key example is my AI Chatbot, which I engineered specifically for CPU-only inference to ensure it was accessible and performant without requiring expensive hardware.",
|
| 13 |
+
"I excel at bridging the gap between user needs and technical implementation. In the Course Content Mapping project, I worked directly with university staff, translating their complex Excel-based workflows into an intuitive, interactive web application."
|
| 14 |
+
],
|
| 15 |
+
"leadership_and_teamwork": [
|
| 16 |
+
"In my role as President of the Glasgow University AI Society, I lead a diverse committee to organize weekly workshops and industry talks for over 100 members, featuring speakers from major companies like Google, Hugging Face, and JP Morgan.",
|
| 17 |
+
"This leadership experience has honed my communication and organizational skills, teaching me how to coordinate with industry professionals, manage event logistics, and delegate responsibilities effectively to ensure the smooth execution of our initiatives.",
|
| 18 |
+
"I am committed to fostering an inclusive learning environment where collaboration and knowledge-sharing are paramount. I believe that diverse perspectives lead to stronger outcomes, a value I bring to every team I am a part of."
|
| 19 |
+
]
|
| 20 |
+
},
|
| 21 |
+
"technical_philosophy": {
|
| 22 |
+
"title": "My Approach to Software Engineering",
|
| 23 |
+
"points": [
|
| 24 |
+
"Client-Focused Delivery: I believe that understanding client needs and adapting solutions is key to building lasting impact. I practice this by running sprint reviews and actively negotiating scope changes to ensure the final product aligns with stakeholder goals.",
|
| 25 |
+
"Building Scalable & Trusted Systems: My work on my Minecraft mods, which reached over 30,000 downloads, strengthened my ability to build scalable, event-driven systems that are used and trusted by a large community.",
|
| 26 |
+
"Innovation with a Purpose: I am motivated by the chance to contribute to impactful systems and am passionate about pushing technical boundaries to create practical, forward-looking solutions that solve real-world problems."
|
| 27 |
+
]
|
| 28 |
+
},
|
| 29 |
+
"education": {
|
| 30 |
+
"institution": "University of Glasgow",
|
| 31 |
+
"degree": "MSci in Computing Science",
|
| 32 |
+
"dates": "September 2022 - May 2027",
|
| 33 |
+
"details": [
|
| 34 |
+
"I am currently on track for a First-class Honours (1st) prediction.",
|
| 35 |
+
"My core coursework has provided a strong foundation in both theory and practice, with key courses including Object-Oriented Programming, Algorithms and Data Structures, Operating Systems, Artificial Intelligence, and Practical Natural Language Processing."
|
| 36 |
+
]
|
| 37 |
+
},
|
| 38 |
+
"projects": [
|
| 39 |
+
{
|
| 40 |
+
"title": "AI Voice Clone & Chatbot",
|
| 41 |
+
"organization": "Personal Project",
|
| 42 |
+
"dates": "October 2024 - April 2025",
|
| 43 |
+
"technologies": ["Python", "FastAPI", "Next.js", "React", "TypeScript", "Qdrant", "RAG", "GGUF", "Piper TTS"],
|
| 44 |
+
"contributions": [
|
| 45 |
+
"I single-handedly developed and deployed this full-stack conversational AI, which is live on my portfolio. It functions as an AI clone of me, capable of answering questions about my experience.",
|
| 46 |
+
"I architected the entire system, building a robust Python FastAPI backend to handle the AI logic and a responsive Next.js and TypeScript frontend for the user interface.",
|
| 47 |
+
"The core of the AI is a Retrieval-Augmented Generation (RAG) pipeline I engineered to provide it with a custom knowledge base (my CV), ensuring its answers are accurate and relevant.",
|
| 48 |
+
"A key achievement was optimizing the entire system for CPU-only inference. This involved using quantized GGUF language models and a lightweight Piper TTS engine, making it accessible 24/7 on small PCs without needing a GPU.",
|
| 49 |
+
"To create a realistic and personal voice, I fine-tuned the Text-to-Speech engine on over 300 of my own voice recordings, enabling fully offline, natural-sounding speech synthesis."
|
| 50 |
+
],
|
| 51 |
+
"key_takeaways": "This end-to-end project demonstrates my ability to design and build efficient, production-ready solutions, even in resource-constrained settings. It required creative problem-solving and iterative refinement to push technical boundaries and deliver a complete, polished application.",
|
| 52 |
+
"technical_deep_dive": "The RAG pipeline uses a Qdrant vector database to store and retrieve context from embedded text chunks with low latency. The backend is deployed via Cloudflare Tunnels to handle dynamic IP routing, ensuring robust and secure performance."
|
| 53 |
+
},
|
| 54 |
+
{
|
| 55 |
+
"title": "Course Content Mapping Web Application",
|
| 56 |
+
"organization": "University of Glasgow (LISU)",
|
| 57 |
+
"dates": "September 2024 - April 2025",
|
| 58 |
+
"technologies": ["Python", "FastAPI", "React", "TypeScript", "SQLModel", "SQLAlchemy", "Docker", "Nginx", "ApexCharts"],
|
| 59 |
+
"contributions": [
|
| 60 |
+
"I led the frontend development for this web application, designed to modernize the course planning process for university professors by replacing a cumbersome Excel-based system.",
|
| 61 |
+
"Using React and TypeScript, I engineered an interactive Single Page Application with an intuitive drag-and-drop interface for organizing course activities.",
|
| 62 |
+
"I was the main point of contact for the clients, organizing monthly sprint reviews to present progress, gather feedback on evolving requirements, and negotiate the scope for upcoming development cycles.",
|
| 63 |
+
"The application features data visualization dashboards using ApexCharts to track learning hours and graduate attribute coverage, providing valuable insights to academic staff.",
|
| 64 |
+
"The backend is a robust REST API built with Python and FastAPI, using SQLModel for database interaction and object validation, all containerized with Docker."
|
| 65 |
+
],
|
| 66 |
+
"key_takeaways": "This project gave me critical experience in translating complex client requirements into clear specifications and a user-friendly product. Collaborating with teammates in agile sprints improved my ability to deliver iterative updates and ensure the application met both technical and regulatory needs.",
|
| 67 |
+
"technical_deep_dive": "The application is fully containerized using Docker and orchestrated with Docker Compose. An Nginx server acts as a reverse proxy, handling requests and directing traffic to the appropriate frontend or backend service. The backend includes role-based permissions and session authentication for security."
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"title": "Java Mod Development for Minecraft (CobblePass)",
|
| 71 |
+
"organization": "Personal Project",
|
| 72 |
+
"dates": "December 2025 - May 2025",
|
| 73 |
+
"technologies": ["Java", "Fabric API", "Gradle", "JSON"],
|
| 74 |
+
"contributions": [
|
| 75 |
+
"I developed and published two popular server-side mods for Minecraft from scratch using Java and the Fabric API, which have achieved over 30,000 combined downloads.",
|
| 76 |
+
"The primary mod, CobblePass, is a highly flexible battle pass system designed to drive player engagement through a dual-track reward system (free and premium).",
|
| 77 |
+
"I designed and implemented dynamic user interfaces for players and robust, schema-based JSON configurations, allowing server administrators to deeply customize seasons, rewards, and GUI layouts without restarting the server.",
|
| 78 |
+
"The mod features a rich suite of in-game admin commands for live management of seasons and player data."
|
| 79 |
+
],
|
| 80 |
+
"key_takeaways": "This project strengthened my ability to build scalable, event-driven systems that are used and trusted by a large and active community. It was a deep dive into object-oriented programming, data persistence, and API integration in a real-world application.",
|
| 81 |
+
"technical_deep_dive": "I engineered an event-driven architecture to track in-game player actions, such as catching a creature, which then grants experience. The system manages persistent user data across sessions and integrates with third-party APIs like GooeyLibs for the GUI and the Impactor API for in-game economy features."
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"title": "Glasgow University Artificial Intelligence Society President",
|
| 85 |
+
"organization": "Glasgow University Artificial Intelligence Society",
|
| 86 |
+
"dates": "May 2024 - Present",
|
| 87 |
+
"technologies": ["Leadership", "Event Management", "Public Speaking", "Networking", "Community Building"],
|
| 88 |
+
"contributions": [
|
| 89 |
+
"As President, I lead the society's mission to equip students with practical AI skills. I organize and coordinate a full calendar of events, including weekly hands-on technical workshops.",
|
| 90 |
+
"I am responsible for industry outreach, successfully hosting guest talks with leading professionals from companies like JP Morgan, Hugging Face, and Google to provide members with insights into real-world AI applications and trends.",
|
| 91 |
+
"I manage collaborations with external organizations to create valuable networking opportunities and source internships for our members in AI-related fields."
|
| 92 |
+
],
|
| 93 |
+
"key_takeaways": "This role has been instrumental in developing my leadership, communication, and organizational skills. It has taught me how to manage an inclusive environment, delegate responsibilities effectively, and coordinate complex events, all of which are essential for working in multi-disciplinary teams."
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"title": "GUTS Code Olympics & Hackathons",
|
| 97 |
+
"organization": "Glasgow University Tech Society",
|
| 98 |
+
"dates": "October 2024, February 2025",
|
| 99 |
+
"technologies": ["Competitive Programming", "Algorithms", "Data Structures", "Three.js", "JavaScript", "Blender"],
|
| 100 |
+
"contributions": [
|
| 101 |
+
"I achieved 1st place in the GUTS Code Olympics, a competitive programming challenge sponsored by J.P. Morgan, BlackRock, and SAS, where I secured over 80% of the total available points.",
|
| 102 |
+
"At a separate GUTS hackathon, I led a team in the rapid development of a multiplayer 3D web game using Three.js, which featured airplanes navigating a 3D globe.",
|
| 103 |
+
"For the game, I created dynamic hazards like earthquakes and hurricanes that players had to avoid in real-time. I also personally modelled all the low-poly 3D assets from scratch in Blender to ensure a cohesive visual design."
|
| 104 |
+
],
|
| 105 |
+
"key_takeaways": "These competitions demonstrated my ability to solve complex problems under pressure and to rapidly prototype functional applications. The hackathon in particular was a great exercise in teamwork, 3D graphics programming, and creative design."
|
| 106 |
+
}
|
| 107 |
+
],
|
| 108 |
+
"experience": [
|
| 109 |
+
{
|
| 110 |
+
"role": "Storage Administrator",
|
| 111 |
+
"company": "Saint Storage",
|
| 112 |
+
"location": "St. Andrews, Scotland",
|
| 113 |
+
"dates": "August 2023 - August 2024",
|
| 114 |
+
"description": "In this role, I assisted in optimizing the company's logistics operations. My main contribution was supporting the integration of Storage IQ, a web application built with Ruby on Rails, which helped to enhance warehouse efficiency and improve the accuracy of deliveries."
|
| 115 |
+
}
|
| 116 |
+
],
|
| 117 |
+
"skills": {
|
| 118 |
+
"languages": ["Python", "Java", "TypeScript/JavaScript", "SQL"],
|
| 119 |
+
"frameworks_and_libraries": ["FastAPI", "React", "Next.js", "Three.js", "PyTorch", "SQLAlchemy", "SvelteKit", "GSAP"],
|
| 120 |
+
"databases_and_data": ["SQL", "Qdrant (Vector DB)", "RAG Pipelines", "Docker", "Sanity CMS"],
|
| 121 |
+
"spoken_languages": [
|
| 122 |
+
{ "language": "English", "proficiency": "Native" },
|
| 123 |
+
{ "language": "Italian", "proficiency": "Native" },
|
| 124 |
+
{ "language": "French", "proficiency": "Fluent" },
|
| 125 |
+
{ "language": "Spanish", "proficiency": "Intermediate" },
|
| 126 |
+
{ "language": "Sicilian", "proficiency": "Native" }
|
| 127 |
+
]
|
| 128 |
+
}
|
| 129 |
+
}
|