olympiads-ref / CEMC /download_script /download_pfc.py
beichen0426's picture
Duplicate from AI-MO/olympiads-ref
63c5bce verified
# -----------------------------------------------------------------------------
# Author: Jiawei Liu
# Date: 2024-11-27
# -----------------------------------------------------------------------------
'''
Download script for CEMC PFC(Pascal、Fermat、Cayley)
To run:
`python CEMC_Euclid/download_script/download_pfc.py`
'''
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
from pathlib import Path
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from urllib.parse import urljoin
def build_session(
max_retries: int = 3,
backoff_factor: int = 2,
session: requests.Session = None
) -> requests.Session:
"""
Build a requests session with retries
Args:
max_retries (int, optional): Number of retries. Defaults to 3.
backoff_factor (int, optional): Backoff factor. Defaults to 2.
session (requests.Session, optional): Session object. Defaults to None.
"""
session = session or requests.Session()
adapter = HTTPAdapter(max_retries=Retry(total=max_retries, backoff_factor=backoff_factor))
session.mount("http://", adapter)
session.mount("https://", adapter)
session.headers.update({
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3"
})
return session
def main():
base_url = "https://cemc.uwaterloo.ca/resources/past-contests?grade=All&academic_year=All&contest_category=14"
req_session = build_session()
output_dir = Path(__file__).parent.parent / "raw" / "pfc"
output_dir.mkdir(parents=True, exist_ok=True)
while True:
resp = req_session.get(base_url)
soup = BeautifulSoup(resp.text, 'html.parser')
problem_pdf = soup.find_all('a', href=lambda x: x and x.endswith('Contest.pdf'))
solution_pdf = soup.find_all('a', href=lambda x: x and x.endswith('Solution.pdf'))
pdf_uris = [urljoin(base_url, pdf['href']) for pdf in problem_pdf + solution_pdf]
for pdf_uri in tqdm(pdf_uris):
output_file = output_dir / f"en-{Path(pdf_uri).name}"
# Check if the file already exists
if output_file.exists():
continue
pdf_resp = req_session.get(pdf_uri)
if pdf_resp.status_code != 200:
print(f"Failed to download {pdf_uri}")
continue
output_file.write_bytes(pdf_resp.content)
next_page = soup.find('li', class_='pager__item--next')
if not next_page:
break
else:
base_url = urljoin(base_url, next_page.find("a")['href'])
print(f"Next page: {base_url}")
if __name__ == "__main__":
main()