import requests from requests.adapters import HTTPAdapter from urllib3.util.retry import Retry import json import logging import sqlite3 import time from datetime import datetime import urllib3 import os from typing import List, Dict, Any, Optional, Set # Disable SSL warnings urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) class FollowingCrawlerV5: def __init__(self, api_key: str, input_db: str, output_db: str = None): self.api_key = api_key self.base_url = "https://api.tweetscout.io/v2" self.input_db = input_db self.output_db = output_db or "following_expanded.db" self.progress_db = "crawler_progress.db" self.headers = { "ApiKey": api_key, "Accept": "application/json" } self.session = self._create_session() self._setup_logging() self._init_databases() def _create_session(self) -> requests.Session: session = requests.Session() retry_strategy = Retry( total=3, backoff_factor=10, # 基础等待时间 status_forcelist=[429, 500, 502, 503, 504, 520, 521, 522, 523, 524, 525, 526, 527, 528, 598, 599], # 增加更多状态码 allowed_methods=["GET", "POST", "HEAD", "PUT", "DELETE", "OPTIONS", "TRACE"], respect_retry_after_header=True, raise_on_status=False ) adapter = HTTPAdapter( max_retries=retry_strategy, pool_maxsize=10, pool_block=True ) session.mount("http://", adapter) session.mount("https://", adapter) session.verify = False # 设置更长的超时 session.timeout = (60, 120) # 连接超时60秒,读取超时180秒 return session def _setup_logging(self): logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', handlers=[ logging.FileHandler('crawler_expanded.log'), logging.StreamHandler() ] ) self.logger = logging.getLogger("FollowingCrawlerV5") def _init_databases(self): # Initialize progress tracking database self.progress_conn = sqlite3.connect(self.progress_db) self.progress_cursor = self.progress_conn.cursor() self.progress_cursor.execute(''' CREATE TABLE IF NOT EXISTS processed_users ( user_id TEXT PRIMARY KEY, processed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ) ''') self.progress_conn.commit() # Initialize output database if not os.path.exists(self.output_db): self.output_conn = sqlite3.connect(self.output_db) self.output_cursor = self.output_conn.cursor() self.output_cursor.executescript(''' CREATE TABLE IF NOT EXISTS users ( user_id TEXT PRIMARY KEY, name TEXT, screen_name TEXT, description TEXT, followers_count INTEGER, friends_count INTEGER, tweets_count INTEGER, register_date TEXT, avatar TEXT, banner TEXT, verified BOOLEAN, can_dm BOOLEAN, last_updated TIMESTAMP DEFAULT CURRENT_TIMESTAMP ); CREATE TABLE IF NOT EXISTS following_relationships ( id INTEGER PRIMARY KEY AUTOINCREMENT, user_id TEXT, following_of TEXT, collected_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, UNIQUE(user_id, following_of), FOREIGN KEY(user_id) REFERENCES users(user_id) ); CREATE INDEX IF NOT EXISTS idx_user_id ON users(user_id); CREATE INDEX IF NOT EXISTS idx_followers_count ON users(followers_count); CREATE INDEX IF NOT EXISTS idx_following_rel ON following_relationships(user_id, following_of); ''') self.output_conn.commit() else: self.output_conn = sqlite3.connect(self.output_db) self.output_cursor = self.output_conn.cursor() def get_unprocessed_users(self, batch_size: int = 100) -> List[Dict[str, Any]]: input_conn = sqlite3.connect(self.input_db) input_cursor = input_conn.cursor() try: self.logger.info("Starting to fetch unprocessed users...") self.progress_cursor.execute("ATTACH DATABASE ? AS input_db", (self.input_db,)) # 检查用户处理状态 self.progress_cursor.execute( "SELECT user_id FROM processed_users WHERE user_id IN (SELECT user_id FROM input_db.users WHERE screen_name = ?)", ('vc_btxcap',) ) result = self.progress_cursor.fetchone() self.logger.info(f"User vc_btxcap processed status: {'Already processed' if result else 'Not processed'}") # 获取未处理的用户 query = ''' SELECT DISTINCT u.user_id, u.screen_name, u.followers_count FROM input_db.users u WHERE u.user_id NOT IN ( SELECT user_id FROM processed_users ) LIMIT ? ''' self.progress_cursor.execute(query, (batch_size,)) users = [ { "user_id": row[0], "screen_name": row[1], "followers_count": row[2] } for row in self.progress_cursor.fetchall() ] # 断开数据库连接 self.progress_cursor.execute("DETACH DATABASE input_db") self.logger.info(f"Found {len(users)} unprocessed users") if users: self.logger.info(f"First unprocessed user: {users[0]['screen_name']}") return users except Exception as e: self.logger.error(f"Error getting unprocessed users: {str(e)}") return [] finally: input_conn.close() def get_user_following(self, user: Dict[str, Any], retries: int = 1) -> List[Dict[str, Any]]: # 增加重试次数 all_following = [] retry_count = 0 cursor = None base_wait_time = 20 # 基础等待时间 while True: try: url = f"{self.base_url}/follows" params = { "user_id": user['user_id'], "limit": 20 } if cursor: params['cursor'] = cursor # 增加请求前的等待时间 # wait_time = base_wait_time + (len(all_following) // 100) * 10 # 每100条数据增加10秒等待 self.logger.info(f"Waiting {base_wait_time} seconds before next request...") time.sleep(base_wait_time) response = self.session.get( url, headers=self.headers, params=params, timeout=(30, 60) # 匹配session的超时设置 ) if response.status_code == 200: data = response.json() if isinstance(data, list): retry_count = 0 cursor = response.headers.get('x-pagination-cursor') all_following.extend(data) self.logger.info( f"Retrieved batch of {len(data)} following for {user['screen_name']}, " f"total: {len(all_following)}" ) if not cursor: break elif response.status_code == 429: retry_after = int(response.headers.get('Retry-After', 30)) # 默认等待时间增30秒 self.logger.warning(f"Rate limited. Waiting {retry_after} seconds...") time.sleep(retry_after + 5) # 额外等待5秒 continue else: raise requests.exceptions.RequestException( f"Status {response.status_code}, {response.text}" ) except Exception as e: retry_count += 1 self.logger.error(f"Error getting following for {user['screen_name']}: {str(e)}") if retry_count >= retries: self.logger.warning( f"Max retries reached for {user['screen_name']}, recording failure and moving on..." ) # 记录失败的用户 with open('failed_users.txt', 'a') as f: f.write(f"{user['screen_name']},{user['user_id']},{datetime.now()}\n") return [] # 直接返回空列表,跳过这个用户 wait_time = min(120, base_wait_time * (2 ** retry_count)) # 最多等待2分钟 self.logger.info(f"Waiting {wait_time} seconds before retry {retry_count}") time.sleep(wait_time) filtered_following = [ f for f in all_following if isinstance(f.get('followers_count'), int) and f.get('followers_count', 0) > 5000 ] self.logger.info( f"Retrieved {len(filtered_following)} filtered following (>5000) " f"for {user['screen_name']} out of {len(all_following)} total" ) return filtered_following def save_following(self, user: Dict[str, Any], following_of: str) -> None: try: # Insert or update user information self.output_cursor.execute(''' INSERT OR REPLACE INTO users (user_id, name, screen_name, description, followers_count, friends_count, tweets_count, register_date, avatar, banner, verified, can_dm, last_updated) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, CURRENT_TIMESTAMP) ''', ( user.get('id'), user.get('name'), user.get('screen_name'), user.get('description'), user.get('followers_count', 0), user.get('friends_count', 0), user.get('tweets_count', 0), user.get('register_date'), user.get('avatar'), user.get('banner'), user.get('verified', False), user.get('can_dm', False) )) # Insert following relationship self.output_cursor.execute(''' INSERT OR IGNORE INTO following_relationships (user_id, following_of) VALUES (?, ?) ''', (user.get('id'), following_of)) self.output_conn.commit() except sqlite3.Error as e: self.logger.error(f"Database error saving following: {str(e)}") self.output_conn.rollback() except Exception as e: self.logger.error(f"Error saving following: {str(e)}") self.output_conn.rollback() def mark_user_processed(self, user_id: str) -> None: try: self.progress_cursor.execute(''' INSERT OR REPLACE INTO processed_users (user_id) VALUES (?) ''', (user_id,)) self.progress_conn.commit() except sqlite3.Error as e: self.logger.error(f"Error marking user as processed: {str(e)}") self.progress_conn.rollback() def process_user_batch(self, users: List[Dict[str, Any]]) -> None: for user in users: try: self.logger.info(f"Processing user: {user['screen_name']}") following_users = self.get_user_following(user) for idx, following_user in enumerate(following_users, 1): self.save_following(following_user, user['screen_name']) if idx % 100 == 0: self.logger.info( f"Processed {idx}/{len(following_users)} following " f"for {user['screen_name']}" ) time.sleep(0.5) # Increased delay between saves self.mark_user_processed(user['user_id']) self.logger.info( f"Completed processing {len(following_users)} following " f"for {user['screen_name']}" ) # Longer pause between users time.sleep(30) # Increased to 30 seconds except Exception as e: self.logger.error(f"Error processing user {user['screen_name']}: {str(e)}") continue def get_progress_stats(self) -> None: try: # Get total users to process from input database input_conn = sqlite3.connect(self.input_db) input_cursor = input_conn.cursor() input_cursor.execute("SELECT COUNT(DISTINCT user_id) FROM users") total_users = input_cursor.fetchone()[0] input_conn.close() # Get processed users count self.progress_cursor.execute("SELECT COUNT(*) FROM processed_users") processed_users = self.progress_cursor.fetchone()[0] # Get collected relationships self.output_cursor.execute("SELECT COUNT(*) FROM following_relationships") total_relationships = self.output_cursor.fetchone()[0] self.logger.info(f"\nProgress Statistics:") self.logger.info(f"Total users to process: {total_users}") self.logger.info(f"Processed users: {processed_users}") self.logger.info(f"Progress: {(processed_users/total_users)*100:.2f}%") self.logger.info(f"Total following relationships collected: {total_relationships}") except sqlite3.Error as e: self.logger.error(f"Error getting progress stats: {str(e)}") def run(self, batch_size: int = 15): max_retries = 3 retry_count = 0 while retry_count < max_retries: try: self.logger.info("Starting expanded following crawler") # 读取上次的进度 last_processed_user = None if os.path.exists('checkpoint.txt'): with open('checkpoint.txt', 'r') as f: last_processed_user = f.read().strip() self.logger.info(f"Resuming from user: {last_processed_user}") while True: users = self.get_unprocessed_users(batch_size) if users: self.logger.info("Debug - All users in batch:") for user in users: self.logger.info(f" - {user['screen_name']}") if not users: break # 如果有断点,跳过直到找到上次处理的用户 if last_processed_user: for idx, user in enumerate(users): self.logger.info(f"Checking user: {user['screen_name']}") if user['screen_name'] == last_processed_user: users = users[idx+1:] last_processed_user = None self.logger.info("Found checkpoint user!") break self.process_user_batch(users) # 记录最后处理的用户 with open('checkpoint.txt', 'w') as f: f.write(users[-1]['screen_name']) self.get_progress_stats() batch_delay = 180 # 3分钟 self.logger.info(f"Waiting {batch_delay} seconds before next batch...") time.sleep(batch_delay) break # 成功完成,退出重试循环 except Exception as e: retry_count += 1 self.logger.error(f"Error in main run loop (attempt {retry_count}/{max_retries}): {str(e)}") if retry_count < max_retries: wait_time = 60 * retry_count # 5分钟 * 重试次数 self.logger.info(f"Waiting {wait_time} seconds before retrying...") time.sleep(wait_time) else: self.logger.error("Max retries reached, stopping crawler") finally: self.progress_conn.close() self.output_conn.close() self.logger.info("Crawler finished, database connections closed") def main(): crawler = FollowingCrawlerV5( api_key="Enter your API KEY", input_db="following.db", output_db="following_expanded.db" ) crawler.run(batch_size=15) # Process 15 users at a time if __name__ == "__main__": main()