Alpha108 commited on
Commit
ad8c88c
·
verified ·
1 Parent(s): 0b9ee4b

Update backend/agents/rapidapi_linkedin.py

Browse files
Files changed (1) hide show
  1. backend/agents/rapidapi_linkedin.py +17 -42
backend/agents/rapidapi_linkedin.py CHANGED
@@ -1,23 +1,15 @@
1
  import requests
2
- from .normalizer import normalize_job_data
3
  import os
4
 
5
- # --- THIS FILE IS NOW CONFIGURED FOR SCRAPINGDOG API ---
6
- # IMPORTANT: You must add your ScrapingDog API key to Hugging Face secrets.
7
- # Secret Name: SCRAPINGDOG_API_KEY
8
-
9
  try:
10
  import streamlit as st
11
- # Fetch the new secret for ScrapingDog
12
  SCRAPINGDOG_API_KEY = st.secrets.get("SCRAPINGDOG_API_KEY")
13
  except (ImportError, KeyError, FileNotFoundError):
14
- # Fallback for local testing
15
- SCRAPINGDOG_API_KEY = os.getenv("SCRAPINGDOG_API_KEY", "68d8076313776e5f15a57b26")
16
 
17
  def fetch_linkedin_jobs_stub(query="python developer", location="remote", limit=5):
18
- """
19
- Skeleton function returning mock data. This remains unchanged for testing purposes.
20
- """
21
  print("LinkedIn agent is a stub. Returning mock data.")
22
  mock_linkedin_jobs = [
23
  {
@@ -31,44 +23,29 @@ def fetch_linkedin_jobs_stub(query="python developer", location="remote", limit=
31
  ]
32
  return [normalize_job_data(job, "LinkedIn (Stub)") for job in mock_linkedin_jobs]
33
 
34
-
35
  def fetch_linkedin_jobs_real(query="python developer", location="remote", limit=10):
36
- """
37
- Fetches jobs from the ScrapingDog LinkedIn API.
38
-
39
- Args:
40
- query (str): The job title or keyword to search for (e.g., "Software Engineer").
41
- location (str): The geographical location to search in.
42
- limit (int): The number of results to fetch. (Note: ScrapingDog may not have a limit param, this is illustrative).
43
-
44
- Returns:
45
- list: A list of normalized job dictionaries.
46
- """
47
- if not SCRAPINGDOG_API_KEY or SCRAPINGDOG_API_KEY == "YOUR_SCRAPINGDOG_API_KEY_HERE":
48
  print("SCRAPINGDOG_API_KEY not set. Cannot fetch real data from LinkedIn.")
49
  return []
50
 
51
- url = "https://api.scrapingdog.com/linkedinjobs"
52
-
53
  params = {
54
  "api_key": SCRAPINGDOG_API_KEY,
55
- "field": query,
56
  "location": location,
 
57
  }
58
 
59
  try:
60
  response = requests.get(url, params=params)
61
- response.raise_for_status() # Raise an error for bad responses (4xx or 5xx)
62
-
63
  jobs = response.json()
64
-
65
  if not isinstance(jobs, list):
66
- print("ScrapingDog API did not return a list of jobs. Check the response format.")
67
  return []
68
 
69
- # IMPORTANT: We are assuming the field names from the ScrapingDog API.
70
- # You may need to inspect the actual response and adjust the keys below (e.g., 'job_title' might be 'title').
71
- # This pre-mapping step adapts the ScrapingDog response to what our normalizer expects.
72
  adapted_jobs = []
73
  for job in jobs:
74
  adapted_job = {
@@ -76,18 +53,16 @@ def fetch_linkedin_jobs_real(query="python developer", location="remote", limit=
76
  'company_name': job.get('company_name', 'N/A'),
77
  'location': job.get('job_location', 'Remote'),
78
  'description': job.get('job_description', ''),
79
- 'posted_at': job.get('posted_at', 'N/A'), # Adjust if date format is different
80
  'job_url': job.get('job_url', '')
81
  }
82
  adapted_jobs.append(adapted_job)
83
-
84
- # The source name is kept as "LinkedIn" for display consistency in the UI
85
- return [normalize_job_data(job, "LinkedIn (Stub)") for job in adapted_jobs]
86
 
87
  except requests.exceptions.RequestException as e:
88
- print(f"Error fetching jobs from ScrapingDog LinkedIn API: {e}")
89
  return []
90
- except ValueError as e: # Catches JSON decoding errors
91
- print(f"Error parsing JSON from ScrapingDog API: {e}")
92
  return []
93
-
 
1
  import requests
2
+ from normalizer import normalize_job_data # absolute import
3
  import os
4
 
5
+ # --- CONFIGURATION FOR SCRAPINGDOG API ---
 
 
 
6
  try:
7
  import streamlit as st
 
8
  SCRAPINGDOG_API_KEY = st.secrets.get("SCRAPINGDOG_API_KEY")
9
  except (ImportError, KeyError, FileNotFoundError):
10
+ SCRAPINGDOG_API_KEY = os.getenv("SCRAPINGDOG_API_KEY")
 
11
 
12
  def fetch_linkedin_jobs_stub(query="python developer", location="remote", limit=5):
 
 
 
13
  print("LinkedIn agent is a stub. Returning mock data.")
14
  mock_linkedin_jobs = [
15
  {
 
23
  ]
24
  return [normalize_job_data(job, "LinkedIn (Stub)") for job in mock_linkedin_jobs]
25
 
 
26
  def fetch_linkedin_jobs_real(query="python developer", location="remote", limit=10):
27
+ if not SCRAPINGDOG_API_KEY:
 
 
 
 
 
 
 
 
 
 
 
28
  print("SCRAPINGDOG_API_KEY not set. Cannot fetch real data from LinkedIn.")
29
  return []
30
 
31
+ url = "https://api.scrapingdog.com/linkedinjobs/"
32
+
33
  params = {
34
  "api_key": SCRAPINGDOG_API_KEY,
35
+ "query": query,
36
  "location": location,
37
+ "num": limit
38
  }
39
 
40
  try:
41
  response = requests.get(url, params=params)
42
+ response.raise_for_status()
 
43
  jobs = response.json()
44
+
45
  if not isinstance(jobs, list):
46
+ print("Unexpected API response format.")
47
  return []
48
 
 
 
 
49
  adapted_jobs = []
50
  for job in jobs:
51
  adapted_job = {
 
53
  'company_name': job.get('company_name', 'N/A'),
54
  'location': job.get('job_location', 'Remote'),
55
  'description': job.get('job_description', ''),
56
+ 'posted_at': job.get('posted_at', 'N/A'),
57
  'job_url': job.get('job_url', '')
58
  }
59
  adapted_jobs.append(adapted_job)
60
+
61
+ return [normalize_job_data(job, "LinkedIn") for job in adapted_jobs]
 
62
 
63
  except requests.exceptions.RequestException as e:
64
+ print(f"Error fetching jobs from ScrapingDog API: {e}")
65
  return []
66
+ except ValueError as e:
67
+ print(f"Error parsing JSON: {e}")
68
  return []