Spaces:
Sleeping
Sleeping
File size: 2,468 Bytes
0f01725 e6f086e ad8c88c 0b9ee4b ad8c88c 0f01725 0b9ee4b 0f01725 ad8c88c 0b9ee4b 0f01725 ad8c88c 0b9ee4b ad8c88c 0b9ee4b ad8c88c 0f01725 0b9ee4b ad8c88c 0b9ee4b ad8c88c 0b9ee4b ad8c88c 0b9ee4b ad8c88c 0b9ee4b ad8c88c 0b9ee4b 0f01725 ad8c88c 0f01725 ad8c88c 0b9ee4b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
import requests
import os
# Relative import (same folder)
from .normalizer import normalize_job_data
# --- CONFIGURATION FOR SCRAPINGDOG API ---
try:
import streamlit as st
SCRAPINGDOG_API_KEY = st.secrets.get("SCRAPINGDOG_API_KEY")
except (ImportError, KeyError, FileNotFoundError):
SCRAPINGDOG_API_KEY = os.getenv("SCRAPINGDOG_API_KEY")
def fetch_linkedin_jobs_stub(query="python developer", location="remote", limit=5):
print("LinkedIn agent is a stub. Returning mock data.")
mock_linkedin_jobs = [
{
'title': 'Senior Python Engineer (Stub)',
'company_name': 'Innovate Inc.',
'location': 'Remote',
'description': 'Looking for a senior python developer with experience in Django and cloud services. This is a mock job from a stub function.',
'posted_at': '2025-09-27',
'job_url': 'https://www.linkedin.com/jobs'
}
]
return [normalize_job_data(job, "LinkedIn (Stub)") for job in mock_linkedin_jobs]
def fetch_linkedin_jobs_real(query="python developer", location="remote", limit=10):
if not SCRAPINGDOG_API_KEY:
print("SCRAPINGDOG_API_KEY not set. Cannot fetch real data from LinkedIn.")
return []
url = "https://api.scrapingdog.com/linkedinjobs/"
params = {
"api_key": SCRAPINGDOG_API_KEY,
"query": query,
"location": location,
"num": limit
}
try:
response = requests.get(url, params=params)
response.raise_for_status()
jobs = response.json()
if not isinstance(jobs, list):
print("Unexpected API response format.")
return []
adapted_jobs = []
for job in jobs:
adapted_job = {
'title': job.get('job_title', 'N/A'),
'company_name': job.get('company_name', 'N/A'),
'location': job.get('job_location', 'Remote'),
'description': job.get('job_description', ''),
'posted_at': job.get('posted_at', 'N/A'),
'job_url': job.get('job_url', '')
}
adapted_jobs.append(adapted_job)
return [normalize_job_data(job, "LinkedIn") for job in adapted_jobs]
except requests.exceptions.RequestException as e:
print(f"Error fetching jobs from ScrapingDog API: {e}")
return []
except ValueError as e:
print(f"Error parsing JSON: {e}")
return []
|