| | import requests |
| | from bs4 import BeautifulSoup |
| | import json |
| | from concurrent.futures import ThreadPoolExecutor |
| | import argparse |
| |
|
| | BASE = "https://cosppi.net" |
| | USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:108.0) Gecko/20100101 Firefox/108.0" |
| | RETRY_MAX = 3 |
| | TIMEOUT = 20 |
| |
|
| | SROTS = ["all-rank", "china-rank", "new-add", "follower-rank"] |
| |
|
| | def process_url(url): |
| | if url.startswith("//"): |
| | return f"https:{url}" |
| | elif url.startswith("/"): |
| | return f"{BASE}{url}" |
| | else: |
| | return url |
| |
|
| | def get_user_urls(debug, sort, page): |
| | found_urls = [] |
| | url = f"{BASE}/sort/{sort}/page/{page}" |
| | print(f"Fetching {url}...") |
| | for i in range(RETRY_MAX): |
| | try: |
| | response = requests.get( |
| | url, |
| | headers={"User-Agent": USER_AGENT}, |
| | timeout=TIMEOUT, |
| | ) |
| | response.raise_for_status() |
| | break |
| | except requests.exceptions.RequestException as e: |
| | print(f"Error: {e}") |
| | if i < RETRY_MAX - 1: |
| | print("Retrying after 3 seconds...") |
| | else: |
| | print("Max retries reached.") |
| | |
| | if response.status_code == 404 or (debug and page >= 2): |
| | print(f"Page {page} not found!") |
| | return None |
| | else: |
| | print(f"Fetched page {page}") |
| | soup = BeautifulSoup(response.text, "html.parser") |
| | main_section = soup.find("main") |
| | users = main_section.find_all("a", {"class": "sort_prof_link"}) |
| | if not users: |
| | return None |
| | for user in users: |
| | raw_url = user.get("href") |
| | processed_url = process_url(raw_url) |
| | found_urls.append(processed_url) |
| | print(f"User URL found: {processed_url}") |
| | return found_urls |
| |
|
| | def get_all_user_urls(debug, sort, thread_count): |
| | pages = range(1, 50) |
| | |
| | with ThreadPoolExecutor(max_workers=thread_count) as executor: |
| | user_urls = executor.map(get_user_urls, [debug] * len(pages), [sort] * len(pages), pages) |
| |
|
| | |
| | user_urls = [url for sublist in user_urls if sublist for url in sublist] |
| |
|
| | print(f"\nTotal number of user pages: {len(user_urls)}\n") |
| | return user_urls |
| |
|
| | def get_user_images(user_url): |
| | page = 1 |
| |
|
| | images = [] |
| |
|
| | while True: |
| | response = requests.get( |
| | f"{user_url}/page/{page}", |
| | headers={"User-Agent": USER_AGENT}, |
| | ) |
| | if response.status_code == 404: |
| | print(f"User {user_url} not found!") |
| | break |
| | else: |
| | print(f"Fetching user images {user_url}...") |
| |
|
| | soup = BeautifulSoup(response.text, "html.parser") |
| | img_wrapper_outer = soup.find("div", {"class": "img_wrapper_outer"}) |
| | image_wrappers = img_wrapper_outer.find_all("div", {"class": "img_wrapper"}) |
| |
|
| | if not image_wrappers: |
| | print(f"User {user_url} has no images!") |
| | break |
| |
|
| | print(f"Found {len(image_wrappers)} images on page {page}...") |
| |
|
| | for image_wrapper in image_wrappers: |
| | url = image_wrapper.find("img").get("data-src") |
| | |
| | url = url.rsplit(":", 1)[0] |
| | span_tags = image_wrapper.find("div", {"class": "img_num"}).find_all("span") |
| | [likes, retweets] = [int(span_tag.text) for span_tag in span_tags] |
| |
|
| | image_item = { |
| | "url": url, |
| | "likes": likes, |
| | "retweets": retweets, |
| | } |
| | images.append(image_item) |
| |
|
| | page += 1 |
| |
|
| | print(f"Images found in article {user_url}: {len(images)}") |
| |
|
| | username = user_url.split("/")[-1] |
| |
|
| | return { |
| | "username": username, |
| | "images": images, |
| | } |
| |
|
| | def get_image_urls(user_urls, thread_count): |
| |
|
| | with ThreadPoolExecutor(max_workers=thread_count) as executor: |
| | results = executor.map(get_user_images, user_urls) |
| |
|
| | return [result for result in results] |
| |
|
| | def main(debug, thread_count, output_path, sort): |
| | user_urls = get_all_user_urls(debug, sort, thread_count) |
| |
|
| | print(f"\nFetching images from {len(user_urls)} user pages...") |
| | print(user_urls) |
| |
|
| | users_and_images = get_image_urls(user_urls, thread_count) |
| | |
| | print(f"\nTotal number of users: {len(users_and_images)}") |
| | num_images = 0 |
| | for user in users_and_images: |
| | num_images += len(user["images"]) |
| | print(f"Total number of images: {num_images}") |
| |
|
| | |
| | print("\nPreview:") |
| | |
| | for user in users_and_images[:3]: |
| | print(f"Username: {user['username']}") |
| | print(f"Number of images: {len(user['images'])}") |
| | print(f"First image: {user['images'][0]['url']}") |
| | print(f"Last image: {user['images'][-1]['url']}") |
| | print() |
| |
|
| | print(f"\nWriting to {output_path}...") |
| | with open(output_path, "w", encoding="utf-8") as output_file: |
| | json.dump(users_and_images, output_file, indent=4) |
| | print("Done!") |
| |
|
| | if __name__ == "__main__": |
| | parser = argparse.ArgumentParser() |
| | parser.add_argument("--debug", action="store_true", help="Enable debug mode (only fetches up to page 2)") |
| | parser.add_argument("--threads", type=int, default=5, help="Number of threads to use for parallel processing") |
| | parser.add_argument("--output", type=str, default="output.json", help="Output file name for JSON data") |
| | parser.add_argument("--sort", type=str, default="all-rank", help="Sort type") |
| | args = parser.parse_args() |
| |
|
| | main(args.debug, args.threads, args.output, args.sort) |
| |
|
| |
|