MBilal-72 commited on
Commit
da10b2d
·
verified ·
1 Parent(s): 4cf525d

Delete backend

Browse files
backend/__init__.py DELETED
File without changes
backend/agents/matcher.py DELETED
@@ -1,26 +0,0 @@
1
- # matcher.py
2
-
3
- from sentence_transformers import SentenceTransformer, util
4
- import re
5
- from collections import Counter
6
-
7
- model = SentenceTransformer("BAAI/bge-small-en-v1.5")
8
-
9
- def compute_match(resume_text: str, job_desc: str) -> float:
10
- emb_resume = model.encode(resume_text, convert_to_tensor=True, normalize_embeddings=True)
11
- emb_job = model.encode(job_desc, convert_to_tensor=True, normalize_embeddings=True)
12
- score = util.cos_sim(emb_resume, emb_job).item()
13
- return round(float(score), 3)
14
-
15
- def extract_keywords(text, top_n=20):
16
- stopwords = set([
17
- "the","and","a","an","to","of","in","for","on","with",
18
- "at","by","from","or","is","are","as","this","that",
19
- "your","you","be","has","have","will","can","may","our"
20
- ])
21
- text = re.sub(r"[^a-zA-Z0-9\s]", "", text)
22
- words = text.lower().split()
23
- words = [w for w in words if w not in stopwords and len(w) > 2]
24
- counter = Counter(words)
25
- keywords = [word for word, _ in counter.most_common(top_n)]
26
- return keywords
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backend/agents/normalizer.py DELETED
@@ -1,13 +0,0 @@
1
- from typing import List, Dict
2
-
3
- def normalize_job(raw: Dict, source: str) -> Dict:
4
- return {
5
- "id": str(raw.get("id", "")),
6
- "source": source,
7
- "title": raw.get("title") or raw.get("position", ""),
8
- "company": raw.get("company", ""),
9
- "description": raw.get("description", ""),
10
- "skills": raw.get("skills") or raw.get("tags", []),
11
- "url": raw.get("url", ""),
12
- "date": raw.get("date", ""),
13
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backend/agents/rapidapi_freelancer.py DELETED
@@ -1,28 +0,0 @@
1
- import os
2
- import requests
3
- from .normalizer import normalize_job
4
-
5
- RAPIDAPI_KEY = os.getenv("RAPIDAPI_KEY")
6
-
7
- def fetch_freelancer_jobs(query="AI", limit=5):
8
- url = "https://freelancer-com.p.rapidapi.com/projects/search" # Example endpoint
9
- headers = {
10
- "X-RapidAPI-Key": RAPIDAPI_KEY,
11
- "X-RapidAPI-Host": "freelancer-com.p.rapidapi.com"
12
- }
13
- params = {"q": query, "limit": limit}
14
- response = requests.get(url, headers=headers, params=params)
15
- jobs = response.json().get("projects", [])
16
-
17
- normalized = []
18
- for job in jobs[:limit]:
19
- normalized.append(normalize_job({
20
- "id": job.get("id"),
21
- "title": job.get("title"),
22
- "company": "Freelancer Client",
23
- "description": job.get("description"),
24
- "skills": [s["name"] for s in job.get("jobs", [])],
25
- "url": f"https://www.freelancer.com/projects/{job.get('seo_url')}",
26
- "date": job.get("submitdate")
27
- }, "Freelancer"))
28
- return normalized
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backend/agents/rapidapi_linkedin.py DELETED
@@ -1,28 +0,0 @@
1
- import os
2
- import requests
3
- from .normalizer import normalize_job
4
-
5
- RAPIDAPI_KEY = os.getenv("RAPIDAPI_KEY")
6
-
7
- def fetch_linkedin_jobs(query="software engineer", limit=5):
8
- url = "https://linkedin-jobs-search.p.rapidapi.com/" # Example endpoint
9
- headers = {
10
- "X-RapidAPI-Key": RAPIDAPI_KEY,
11
- "X-RapidAPI-Host": "linkedin-jobs-search.p.rapidapi.com"
12
- }
13
- payload = {"search_terms": query, "location": "remote", "page": "1"}
14
- response = requests.post(url, headers=headers, json=payload)
15
- jobs = response.json()
16
-
17
- normalized = []
18
- for job in jobs[:limit]:
19
- normalized.append(normalize_job({
20
- "id": job.get("job_id"),
21
- "title": job.get("job_title"),
22
- "company": job.get("company_name"),
23
- "description": job.get("job_description"),
24
- "skills": job.get("job_skills", []),
25
- "url": job.get("linkedin_url"),
26
- "date": job.get("posted_date")
27
- }, "LinkedIn"))
28
- return normalized
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backend/agents/rapidapi_upwork.py DELETED
@@ -1,28 +0,0 @@
1
- import os
2
- import requests
3
- from .normalizer import normalize_job
4
-
5
- RAPIDAPI_KEY = os.getenv("RAPIDAPI_KEY")
6
-
7
- def fetch_upwork_jobs(query="python developer", limit=5):
8
- url = "https://upwork-api.p.rapidapi.com/search/jobs" # Example endpoint
9
- headers = {
10
- "X-RapidAPI-Key": RAPIDAPI_KEY,
11
- "X-RapidAPI-Host": "upwork-api.p.rapidapi.com"
12
- }
13
- params = {"q": query, "page": "1"}
14
- response = requests.get(url, headers=headers, params=params)
15
- jobs = response.json().get("jobs", [])
16
-
17
- normalized = []
18
- for job in jobs[:limit]:
19
- normalized.append(normalize_job({
20
- "id": job.get("id"),
21
- "title": job.get("title"),
22
- "company": "Upwork Client",
23
- "description": job.get("description"),
24
- "skills": job.get("skills", []),
25
- "url": f"https://www.upwork.com/jobs/{job.get('id')}",
26
- "date": job.get("date_created")
27
- }, "Upwork"))
28
- return normalized
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backend/agents/remoteok_agent.py DELETED
@@ -1,8 +0,0 @@
1
- import requests
2
- from .normalizer import normalize_job
3
-
4
- def fetch_remoteok_jobs(limit=10):
5
- url = "https://remoteok.com/api"
6
- res = requests.get(url, headers={"User-Agent": "MATCHHIVE"})
7
- jobs = res.json()[1:] # skip metadata
8
- return [normalize_job(job, "RemoteOK") for job in jobs[:limit]]
 
 
 
 
 
 
 
 
 
backend/agents/resume_gen.py DELETED
File without changes
backend/agents/resume_parser.py DELETED
@@ -1,10 +0,0 @@
1
- import pdfplumber, docx, os
2
-
3
- def extract_text(file_path: str) -> str:
4
- if file_path.endswith(".pdf"):
5
- with pdfplumber.open(file_path) as pdf:
6
- return " ".join(page.extract_text() or "" for page in pdf.pages)
7
- elif file_path.endswith(".docx"):
8
- doc = docx.Document(file_path)
9
- return " ".join(p.text for p in doc.paragraphs)
10
- return ""