Datasets:

Modalities:
Text
Formats:
text
Size:
< 1K
Libraries:
Datasets
License:
naseele commited on
Commit
bc33227
·
1 Parent(s): e3f30ae

Upload 03GetTweetsFor800Users

Browse files
02CommonConcern20to50/2.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sqlite3
2
+ import pandas as pd
3
+ import os
4
+ from datetime import datetime
5
+ import json
6
+
7
+ def process_following_data(source_db_path, target_db_path, min_followers=20, max_followers=50):
8
+ """
9
+ 处理关注者数据,筛选出符合条件的用户并保存到新数据库
10
+ """
11
+ if not os.path.exists(source_db_path):
12
+ raise FileNotFoundError(f"找不到数据库文件: {source_db_path}")
13
+
14
+ conn = sqlite3.connect(source_db_path)
15
+
16
+ print("正在读取关注关系数据...")
17
+ df_relationships = pd.read_sql_query("""
18
+ SELECT user_id, following_of
19
+ FROM following_relationships
20
+ """, conn)
21
+
22
+ print("正在统计关注者数量...")
23
+ grouped = (
24
+ df_relationships
25
+ .groupby('user_id')['following_of']
26
+ .agg(lambda x: list(set(x)))
27
+ .reset_index()
28
+ )
29
+ grouped['follower_count'] = grouped['following_of'].apply(len)
30
+
31
+ filtered = grouped[
32
+ (grouped['follower_count'] >= min_followers) &
33
+ (grouped['follower_count'] <= max_followers)
34
+ ]
35
+
36
+ print("正在获取用户详细信息...")
37
+ target_users = tuple(filtered['user_id'].tolist())
38
+
39
+ if not target_users:
40
+ print("没有找到符合条件的用户")
41
+ conn.close()
42
+ return 0
43
+
44
+ # 将列表转换为字符串
45
+ filtered['following_of'] = filtered['following_of'].apply(json.dumps)
46
+
47
+ users_df = pd.read_sql_query("""
48
+ SELECT *
49
+ FROM users
50
+ WHERE user_id IN ({})
51
+ """.format(','.join(['?' for _ in target_users])), conn, params=target_users)
52
+
53
+ # 合并数据,只保留需要的列
54
+ result_df = pd.merge(
55
+ users_df,
56
+ filtered[['user_id', 'follower_count']],
57
+ on='user_id',
58
+ how='inner'
59
+ )
60
+
61
+ print(f"正在保存符合条件的 {len(result_df)} 个用户信息到新数据库...")
62
+
63
+ # 保存到新数据库
64
+ target_conn = sqlite3.connect(target_db_path)
65
+
66
+ # 确保删除following_of列(如果存在),因为我们会在单独的表中存储它
67
+ if 'following_of' in result_df.columns:
68
+ result_df = result_df.drop('following_of', axis=1)
69
+
70
+ # 创建主表
71
+ result_df.to_sql('users', target_conn, index=False, if_exists='replace')
72
+
73
+ # 创建索引
74
+ target_conn.execute("CREATE INDEX IF NOT EXISTS idx_user_id ON users(user_id)")
75
+ target_conn.execute("CREATE INDEX IF NOT EXISTS idx_followers_count ON users(followers_count)")
76
+
77
+ # 创建并填充关注者表
78
+ target_conn.execute("""
79
+ CREATE TABLE IF NOT EXISTS user_followers (
80
+ user_id TEXT PRIMARY KEY,
81
+ followers TEXT,
82
+ follower_count INTEGER,
83
+ FOREIGN KEY(user_id) REFERENCES users(user_id)
84
+ )
85
+ """)
86
+
87
+ # 准备关注者数据
88
+ followers_data = filtered[['user_id', 'following_of', 'follower_count']]
89
+ followers_data.to_sql('user_followers', target_conn, index=False, if_exists='replace')
90
+
91
+ # 打印预览
92
+ print("\n符合条件的用户数据预览:")
93
+ preview_df = result_df[['user_id', 'name', 'followers_count', 'friends_count']].head()
94
+ print(preview_df)
95
+
96
+ conn.close()
97
+ target_conn.close()
98
+
99
+ return len(result_df)
100
+
101
+ if __name__ == "__main__":
102
+ # 设置数据库路径
103
+ source_db = "following_expanded.db"
104
+ target_db = f"filtered_users_{datetime.now().strftime('%Y%m%d_%H%M%S')}.db"
105
+
106
+ try:
107
+ result_count = process_following_data(source_db, target_db)
108
+ print(f"\n处理完成!已将 {result_count} 个用户的信息保存到 {target_db}")
109
+ except Exception as e:
110
+ print(f"处理过程中出现错误: {str(e)}")
111
+ # 打印详细的错误跟踪信息
112
+ import traceback
113
+ print("\n详细错误信息:")
114
+ print(traceback.format_exc())
02CommonConcern20to50/requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ requests>=2.25.0
2
+ urllib3>=2.0.0
3
+ typing>=3.7.4
4
+ sqlite3-api>=2.0.0
02CommonConcern20to50/result/filtered_users.db ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:297b29029aaf2b55c5018c0e115d0e7677bf0bc38ebe531248be56af0e344f34
3
+ size 3432448
03GetTweetsFor800Users/3.py ADDED
@@ -0,0 +1,516 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import aiohttp
2
+ import asyncio
3
+ import json
4
+ import logging
5
+ import sqlite3
6
+ import pandas as pd
7
+ from datetime import datetime, timedelta
8
+ from email.utils import parsedate_to_datetime
9
+ from typing import List, Dict, Set, Optional, Tuple
10
+ from pathlib import Path
11
+
12
+ class QuotaLowException(Exception):
13
+ pass
14
+
15
+ class ArticleCrawler:
16
+ def __init__(self, api_key: str, source_db: str, target_user_count: int = 800, output_db: str = "tweets.db"):
17
+ self.api_key = api_key
18
+ self.base_url = "https://api.tweetscout.io/v2"
19
+ self.source_db = source_db
20
+ self.target_user_count = target_user_count
21
+ self.db_path = output_db
22
+ self.headers = {
23
+ "ApiKey": api_key,
24
+ "Accept": "application/json"
25
+ }
26
+ self.processed_users_file = "processed_users.json"
27
+ self.no_tweets_users_file = "no_tweets_users.json"
28
+ self.processed_users: Set[str] = self._load_processed_users()
29
+ self.no_tweets_users: Set[str] = self._load_no_tweets_users()
30
+ self.api_call_count = 0
31
+ self.warning_threshold = 5500 # 提高阈值到5000次调用
32
+ self.last_save_time = datetime.now()
33
+ self.save_interval = timedelta(minutes=5) # 每5分钟保存一次
34
+ self._setup_logging()
35
+ self._init_database()
36
+
37
+ def _load_processed_users(self) -> Set[str]:
38
+ if Path(self.processed_users_file).exists():
39
+ try:
40
+ with open(self.processed_users_file, 'r') as f:
41
+ return set(json.load(f))
42
+ except Exception as e:
43
+ logging.error(f"Error loading processed users: {e}")
44
+ return set()
45
+ return set()
46
+
47
+ def _load_no_tweets_users(self) -> Set[str]:
48
+ if Path(self.no_tweets_users_file).exists():
49
+ try:
50
+ with open(self.no_tweets_users_file, 'r') as f:
51
+ return set(json.load(f))
52
+ except Exception as e:
53
+ logging.error(f"Error loading no tweets users: {e}")
54
+ return set()
55
+ return set()
56
+
57
+ def _save_processed_users(self):
58
+ try:
59
+ with open(self.processed_users_file, 'w') as f:
60
+ json.dump(list(self.processed_users), f)
61
+ except Exception as e:
62
+ logging.error(f"Error saving processed users: {e}")
63
+
64
+ def _save_no_tweets_users(self):
65
+ try:
66
+ with open(self.no_tweets_users_file, 'w') as f:
67
+ json.dump(list(self.no_tweets_users), f)
68
+ except Exception as e:
69
+ logging.error(f"Error saving no tweets users: {e}")
70
+
71
+ def _get_next_rotation_time(self):
72
+ """计算下一次日志切换的时间"""
73
+ now = datetime.now()
74
+ if now.hour < 12:
75
+ # 如果当前时间早于12点,下次切换时间为当天12点
76
+ next_rotation = now.replace(hour=12, minute=0, second=0, microsecond=0)
77
+ else:
78
+ # 如果当前时间晚于12点,下次切换时间为第二天0点
79
+ next_rotation = (now + timedelta(days=1)).replace(hour=0, minute=0, second=0, microsecond=0)
80
+ return next_rotation
81
+
82
+ def get_progress_stats(self) -> Dict:
83
+ """获取当前进度统计"""
84
+ try:
85
+ if not hasattr(self, 'cursor') or self.cursor is None:
86
+ return {
87
+ 'user_count': 0,
88
+ 'tweet_count': 0,
89
+ 'start_time': None,
90
+ 'last_update': None,
91
+ 'processed_users': len(self.processed_users),
92
+ 'no_tweets_users': len(self.no_tweets_users)
93
+ }
94
+
95
+ self.cursor.execute("""
96
+ SELECT
97
+ COUNT(DISTINCT author_id) as user_count,
98
+ COUNT(*) as tweet_count,
99
+ MIN(collected_at) as start_time,
100
+ MAX(collected_at) as last_update
101
+ FROM tweets
102
+ """)
103
+ stats = dict(zip(['user_count', 'tweet_count', 'start_time', 'last_update'],
104
+ self.cursor.fetchone()))
105
+ stats['processed_users'] = len(self.processed_users)
106
+ stats['no_tweets_users'] = len(self.no_tweets_users)
107
+ return stats
108
+ except Exception as e:
109
+ self.logger.error(f"Error getting progress stats: {e}")
110
+ return {
111
+ 'user_count': 0,
112
+ 'tweet_count': 0,
113
+ 'start_time': None,
114
+ 'last_update': None,
115
+ 'processed_users': len(self.processed_users),
116
+ 'no_tweets_users': len(self.no_tweets_users)
117
+ }
118
+
119
+ def _rotate_log_file(self):
120
+ """切换到新的日志文件"""
121
+ # 关闭现有的文件处理器
122
+ for handler in self.logger.handlers[:]:
123
+ if isinstance(handler, logging.FileHandler):
124
+ handler.close()
125
+ self.logger.removeHandler(handler)
126
+
127
+ # 创建新的文件处理器,使用 utf-8 编码
128
+ new_log_file = f"crawler_{datetime.now().strftime('%Y%m%d_%H%M')}.log"
129
+ file_handler = logging.FileHandler(new_log_file, encoding='utf-8')
130
+ file_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
131
+ self.logger.addHandler(file_handler)
132
+
133
+ # 计算并设置下次切换时间
134
+ self.next_log_rotation = self._get_next_rotation_time()
135
+
136
+ # 输出进度统计
137
+ stats = self.get_progress_stats()
138
+ self.logger.info(f"Rotated to new log file: {new_log_file}")
139
+ self.logger.info(f"Progress Stats: {json.dumps(stats, default=str)}")
140
+ def _setup_logging(self):
141
+ """设置日志系统"""
142
+ self.logger = logging.getLogger("ArticleCrawler")
143
+ self.logger.setLevel(logging.INFO)
144
+
145
+ # 添加控制台处理器
146
+ console_handler = logging.StreamHandler()
147
+ console_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
148
+ self.logger.addHandler(console_handler)
149
+
150
+ # 设置初始日志文件
151
+ self.next_log_rotation = self._get_next_rotation_time()
152
+ self._rotate_log_file()
153
+
154
+ def _init_database(self):
155
+ self.conn = sqlite3.connect(
156
+ self.db_path,
157
+ detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES,
158
+ timeout=30
159
+ )
160
+ self.conn.execute("PRAGMA journal_mode=WAL")
161
+ self.conn.execute("PRAGMA synchronous=NORMAL")
162
+ self.cursor = self.conn.cursor()
163
+
164
+ self.cursor.executescript('''
165
+ CREATE TABLE IF NOT EXISTS tweets (
166
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
167
+ tweet_id TEXT UNIQUE,
168
+ author_id TEXT,
169
+ author_name TEXT,
170
+ full_text TEXT,
171
+ created_at TIMESTAMP,
172
+ likes_count INTEGER DEFAULT 0,
173
+ retweets_count INTEGER DEFAULT 0,
174
+ replies_count INTEGER DEFAULT 0,
175
+ views_count INTEGER DEFAULT 0,
176
+ quote_count INTEGER DEFAULT 0,
177
+ is_quote_status BOOLEAN DEFAULT 0,
178
+ conversation_id TEXT,
179
+ in_reply_to_status_id TEXT,
180
+ quoted_status TEXT, -- 引用推文信息,存储为JSON
181
+ retweeted_status TEXT, -- 转发推文信息,存储为JSON
182
+ entities TEXT, -- 推文中的链接、媒体等实体,存储为JSON
183
+ author_avatar TEXT, -- 作者头像URL
184
+ author_description TEXT, -- 作者简介
185
+ author_followers_count INT, -- 作者粉丝数
186
+ author_friends_count INT, -- 作者关注数
187
+ collected_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
188
+ );
189
+
190
+ CREATE INDEX IF NOT EXISTS idx_tweet_author ON tweets(author_id);
191
+ CREATE INDEX IF NOT EXISTS idx_tweet_id ON tweets(tweet_id);
192
+ CREATE INDEX IF NOT EXISTS idx_tweet_created ON tweets(created_at);
193
+ ''')
194
+ self.conn.commit()
195
+
196
+ def get_successful_user_count(self) -> int:
197
+ """获取已成功爬取推文的用户数量"""
198
+ try:
199
+ self.cursor.execute(
200
+ "SELECT COUNT(DISTINCT author_id) FROM tweets"
201
+ )
202
+ return self.cursor.fetchone()[0]
203
+ except Exception as e:
204
+ self.logger.error(f"Error getting successful user count: {e}")
205
+ return len(self.processed_users)
206
+
207
+ def load_users(self) -> List[Dict]:
208
+ source_conn = sqlite3.connect(self.source_db)
209
+
210
+ # 计算还需要处理的用户数量
211
+ successful_count = self.get_successful_user_count()
212
+ remaining_target = max(0, self.target_user_count - successful_count)
213
+
214
+ # 如果已经达到目标,返回空列表
215
+ if remaining_target <= 0:
216
+ self.logger.info(f"Already reached target user count: {self.target_user_count}")
217
+ source_conn.close()
218
+ return []
219
+
220
+ # 考虑到可能有用户没有推文,多取一些用户
221
+ fetch_limit = remaining_target * 2
222
+
223
+ # 排除已处理和无推文的用户
224
+ excluded_users = self.processed_users.union(self.no_tweets_users)
225
+ if excluded_users:
226
+ placeholders = ','.join(['?' for _ in excluded_users])
227
+ query = f"""
228
+ SELECT user_id, name
229
+ FROM users
230
+ WHERE user_id NOT IN ({placeholders})
231
+ LIMIT {fetch_limit}
232
+ """
233
+ df = pd.read_sql_query(query, source_conn, params=list(excluded_users))
234
+ else:
235
+ query = f"""
236
+ SELECT user_id, name
237
+ FROM users
238
+ LIMIT {fetch_limit}
239
+ """
240
+ df = pd.read_sql_query(query, source_conn)
241
+
242
+ source_conn.close()
243
+ users = df.to_dict('records')
244
+ self.logger.info(
245
+ f"Loaded {len(users)} new users to process. "
246
+ f"Current successful users: {successful_count}, Target: {self.target_user_count}"
247
+ )
248
+ return users
249
+
250
+ async def check_api_quota(self) -> bool:
251
+ """检查API配额,如果配额不足返回False"""
252
+ try:
253
+ async with aiohttp.ClientSession() as session:
254
+ async with session.get(f"{self.base_url}/quota", headers=self.headers) as response:
255
+ if response.status == 200:
256
+ quota_data = await response.json()
257
+ remaining = quota_data.get('remaining', 0)
258
+ self.logger.info(f"Current API quota remaining: {remaining}")
259
+ return remaining >= 100 # 降低保留配额到100
260
+ except Exception as e:
261
+ self.logger.error(f"Error checking API quota: {e}")
262
+ return False
263
+ return True
264
+
265
+ async def get_user_tweets(self, user: Dict, max_tweets: int = 200) -> List[Dict]:
266
+ try:
267
+ url = f"{self.base_url}/user-tweets"
268
+ all_tweets = []
269
+ cursor = ""
270
+ retries = 3
271
+
272
+ while len(all_tweets) < max_tweets:
273
+ if self.api_call_count >= self.warning_threshold:
274
+ quota_ok = await self.check_api_quota()
275
+ if not quota_ok and self.api_call_count % 100 == 0:
276
+ self.logger.warning(f"API calls: {self.api_call_count}, checking quota...")
277
+ if not await self.check_api_quota():
278
+ raise QuotaLowException("API quota running low")
279
+
280
+ data = {
281
+ "user_id": user['user_id'],
282
+ "cursor": cursor,
283
+ "limit": 200
284
+ }
285
+
286
+ got_response = False
287
+ for attempt in range(retries):
288
+ try:
289
+ async with aiohttp.ClientSession() as session:
290
+ async with session.post(url, headers=self.headers, json=data, timeout=30) as response:
291
+ if response.status == 200:
292
+ got_response = True
293
+ self.api_call_count += 1
294
+ response_data = await response.json()
295
+ tweets = response_data.get('tweets', [])
296
+
297
+ if not tweets:
298
+ self.logger.info(
299
+ f"No more tweets available for user {user['name']}. "
300
+ f"Collected {len(all_tweets)} tweets."
301
+ )
302
+ return all_tweets
303
+
304
+ # 为每条推文添加类型标记
305
+ for tweet in tweets:
306
+ if tweet.get('retweeted_status'):
307
+ tweet['tweet_type'] = 'retweet'
308
+ elif tweet.get('is_quote_status'):
309
+ tweet['tweet_type'] = 'quote'
310
+ else:
311
+ tweet['tweet_type'] = 'original'
312
+
313
+ all_tweets.extend(tweets)
314
+
315
+ # 记录各类推文的数量
316
+ type_counts = {
317
+ 'original': len([t for t in tweets if t['tweet_type'] == 'original']),
318
+ 'retweet': len([t for t in tweets if t['tweet_type'] == 'retweet']),
319
+ 'quote': len([t for t in tweets if t['tweet_type'] == 'quote'])
320
+ }
321
+
322
+ self.logger.info(
323
+ f"User {user['name']}: Got {len(tweets)} tweets "
324
+ f"(Original: {type_counts['original']}, "
325
+ f"Retweet: {type_counts['retweet']}, "
326
+ f"Quote: {type_counts['quote']}). "
327
+ f"Current total: {len(all_tweets)}"
328
+ )
329
+
330
+ cursor = response_data.get('next_cursor')
331
+ if not cursor or len(all_tweets) >= max_tweets:
332
+ # 如果达到目标数量,截断到指定数量
333
+ return all_tweets[:max_tweets]
334
+
335
+ break # 成功获取数据,跳出重试循环
336
+
337
+ elif response.status == 429:
338
+ wait_time = int(response.headers.get('Retry-After', 60))
339
+ self.logger.warning(f"Rate limited, waiting {wait_time} seconds")
340
+ await asyncio.sleep(wait_time)
341
+ elif response.status == 403:
342
+ self.logger.error(f"Access forbidden for user {user['name']}")
343
+ return [] # 只有在被禁止访问时才返回空列表
344
+ elif response.status == 404:
345
+ self.logger.error(f"User {user['name']} not found")
346
+ return [] # 用户不存在时返回空列表
347
+ else:
348
+ self.logger.error(
349
+ f"Failed to get tweets for {user['name']}: "
350
+ f"Status {response.status}, Response: {await response.text()}"
351
+ )
352
+ if attempt == retries - 1:
353
+ if got_response:
354
+ return all_tweets # 如果之前成功获取过推文,返回已有的
355
+ return [] # 否则返回空列表
356
+ except Exception as e:
357
+ self.logger.error(f"Request error: {str(e)}")
358
+ if attempt == retries - 1:
359
+ if got_response:
360
+ return all_tweets # 如果之前成功获取过推文,返回已有的
361
+ return [] # 否则返回空列表
362
+ await asyncio.sleep(5)
363
+
364
+ await asyncio.sleep(3) # 请求间隔
365
+
366
+ return all_tweets # 达到目标数量时返回
367
+
368
+ except QuotaLowException:
369
+ raise
370
+ except Exception as e:
371
+ self.logger.error(f"Error getting tweets for {user['name']}: {str(e)}")
372
+ return []
373
+
374
+ def save_tweet(self, user: Dict, tweet_data: Dict) -> bool:
375
+ try:
376
+ created_at = parsedate_to_datetime(tweet_data.get('created_at'))
377
+
378
+ # 提取用户信息
379
+ user_info = tweet_data.get('user', {})
380
+
381
+ # 准备引用推文和转发推文的数据
382
+ quoted_status = tweet_data.get('quoted_status')
383
+ retweeted_status = tweet_data.get('retweeted_status')
384
+
385
+ self.cursor.execute('''
386
+ INSERT OR REPLACE INTO tweets
387
+ (tweet_id, author_id, author_name, full_text, created_at,
388
+ likes_count, retweets_count, replies_count, views_count,
389
+ quote_count, is_quote_status, conversation_id, in_reply_to_status_id,
390
+ quoted_status, retweeted_status, entities,
391
+ author_avatar, author_description, author_followers_count,
392
+ author_friends_count, collected_at)
393
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, CURRENT_TIMESTAMP)
394
+ ''', (
395
+ tweet_data.get('id_str'),
396
+ user['user_id'],
397
+ user['name'],
398
+ tweet_data.get('full_text', ''),
399
+ created_at,
400
+ tweet_data.get('favorite_count', 0),
401
+ tweet_data.get('retweet_count', 0),
402
+ tweet_data.get('reply_count', 0),
403
+ tweet_data.get('view_count', 0),
404
+ tweet_data.get('quote_count', 0),
405
+ tweet_data.get('is_quote_status', False),
406
+ tweet_data.get('conversation_id_str'),
407
+ tweet_data.get('in_reply_to_status_id_str'),
408
+ json.dumps(quoted_status) if quoted_status else None,
409
+ json.dumps(retweeted_status) if retweeted_status else None,
410
+ json.dumps(tweet_data.get('entities', {})),
411
+ user_info.get('avatar'),
412
+ user_info.get('description'),
413
+ user_info.get('followers_count'),
414
+ user_info.get('friends_count')
415
+ ))
416
+ self.conn.commit()
417
+ return True
418
+ except Exception as e:
419
+ self.logger.error(f"Error saving tweet: {str(e)}")
420
+ self.conn.rollback()
421
+ return False
422
+
423
+ async def process_user(self, user: Dict):
424
+ try:
425
+ # 检查是否需要切换日志文件
426
+ if datetime.now() >= self.next_log_rotation:
427
+ self._rotate_log_file()
428
+
429
+ # 检查是否需要定时保存
430
+ if datetime.now() - self.last_save_time > self.save_interval:
431
+ self._save_processed_users()
432
+ self._save_no_tweets_users()
433
+ self.last_save_time = datetime.now()
434
+
435
+ if user['user_id'] in self.processed_users or user['user_id'] in self.no_tweets_users:
436
+ self.logger.info(f"Skipping already processed user {user['name']}")
437
+ return
438
+
439
+ tweets = await self.get_user_tweets(user, max_tweets=100)
440
+
441
+ if not tweets:
442
+ self.logger.info(f"No tweets found for user {user['name']}")
443
+ self.no_tweets_users.add(user['user_id'])
444
+ self._save_no_tweets_users()
445
+ return
446
+
447
+ self.logger.info(f"Processing {len(tweets)} tweets for user {user['name']}")
448
+
449
+ success = True
450
+ for tweet in tweets:
451
+ if not self.save_tweet(user, tweet):
452
+ success = False
453
+ break
454
+
455
+ if success:
456
+ self.processed_users.add(user['user_id'])
457
+ self._save_processed_users()
458
+ self.logger.info(f"Successfully processed user {user['name']}")
459
+
460
+ except QuotaLowException:
461
+ raise
462
+ except Exception as e:
463
+ self.logger.error(f"Error processing user {user['name']}: {str(e)}")
464
+
465
+ async def run(self):
466
+ try:
467
+ while True:
468
+ users = self.load_users()
469
+ if not users:
470
+ self.logger.info("No more users to process or reached target count")
471
+ break
472
+
473
+ for user in users:
474
+ try:
475
+ await self.process_user(user)
476
+ # 检查是否已达到目标用户数
477
+ if self.get_successful_user_count() >= self.target_user_count:
478
+ self.logger.info(f"Reached target user count: {self.target_user_count}")
479
+ return
480
+ except QuotaLowException:
481
+ self.logger.info(f"API quota running low, stopping at user {user['name']}")
482
+ return
483
+
484
+ await asyncio.sleep(2) # 控制请求频率
485
+
486
+ except Exception as e:
487
+ self.logger.error(f"Unexpected error in main loop: {str(e)}")
488
+ raise # 重新抛出异常以便外层捕获
489
+ finally:
490
+ try:
491
+ self._save_processed_users()
492
+ self._save_no_tweets_users()
493
+ self.conn.close()
494
+ except Exception as e:
495
+ self.logger.error(f"Error during cleanup: {str(e)}")
496
+
497
+ async def main():
498
+ crawler = None
499
+ try:
500
+ crawler = ArticleCrawler(
501
+ api_key="Enter your API KEY",
502
+ source_db="filtered_users.db",
503
+ target_user_count=800, # 设置目标用户数
504
+ output_db="tweetsFor800.db" # 指定输出数据库文件名
505
+ )
506
+ await crawler.run()
507
+ except KeyboardInterrupt:
508
+ print("\nReceived shutdown signal, cleaning up...")
509
+ except Exception as e:
510
+ print(f"Critical error: {str(e)}")
511
+ finally:
512
+ if crawler:
513
+ crawler.conn.close() # 确保数据库连接被关闭
514
+
515
+ if __name__ == "__main__":
516
+ asyncio.run(main())
03GetTweetsFor800Users/filtered_users.db ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:297b29029aaf2b55c5018c0e115d0e7677bf0bc38ebe531248be56af0e344f34
3
+ size 3432448
03GetTweetsFor800Users/requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ aiohttp>=3.8.0
2
+ pandas>=1.5.0
3
+ python-dateutil>=2.8.0
03GetTweetsFor800Users/result/tweetsFor800.db ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c6e634208bccd92717e3790f3abce488515c4f57054bcf5ac0a88764130a30e
3
+ size 111566848