Synced repo using 'sync_with_huggingface' Github Action
Browse files- comment_scraper +0 -0
- comment_scraper.py +73 -0
- output_comments_after2022.csv +0 -0
- output_posts.csv +0 -0
- post_scraper.py +93 -0
comment_scraper
ADDED
|
File without changes
|
comment_scraper.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import praw
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import os
|
| 4 |
+
from datetime import datetime
|
| 5 |
+
|
| 6 |
+
FILENAME_COMMENTS = 'output_comments_after2022.csv'
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def create_reddit_instance():
|
| 10 |
+
reddit = praw.Reddit(
|
| 11 |
+
client_id=os.getenv('REDDIT_CLIENT_ID'),
|
| 12 |
+
client_secret=os.getenv('REDDIT_CLIENT_SECRET'),
|
| 13 |
+
user_agent=os.getenv('REDDIT_USER_AGENT')
|
| 14 |
+
)
|
| 15 |
+
return reddit
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def get_subreddit(reddit, subreddit_name):
|
| 19 |
+
subreddit = reddit.subreddit(subreddit_name)
|
| 20 |
+
return subreddit
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def load_existing_data(file_name):
|
| 24 |
+
if os.path.exists(file_name):
|
| 25 |
+
df = pd.read_csv(file_name)
|
| 26 |
+
existing_ids = df['id'].tolist()
|
| 27 |
+
else:
|
| 28 |
+
df = pd.DataFrame()
|
| 29 |
+
existing_ids = []
|
| 30 |
+
return df, existing_ids
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def get_new_comment_row(comment):
|
| 34 |
+
date = datetime.fromtimestamp(comment.created)
|
| 35 |
+
new_row = {
|
| 36 |
+
"id": comment.id,
|
| 37 |
+
"url": "https://www.reddit.com" + comment.permalink,
|
| 38 |
+
"score": comment.score,
|
| 39 |
+
"body": comment.body,
|
| 40 |
+
"date": date
|
| 41 |
+
}
|
| 42 |
+
return new_row
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def save_data(df, file_name):
|
| 46 |
+
df.to_csv(file_name, index=False)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def main():
|
| 50 |
+
reddit = create_reddit_instance()
|
| 51 |
+
subreddit = get_subreddit(reddit, 'tunisia')
|
| 52 |
+
df_comments, existing_comment_ids = load_existing_data(FILENAME_COMMENTS)
|
| 53 |
+
|
| 54 |
+
print('Starting to scrape comments')
|
| 55 |
+
|
| 56 |
+
# Fetch 1000 newest comments
|
| 57 |
+
new_comments = list(subreddit.comments(limit=1000))
|
| 58 |
+
|
| 59 |
+
for comment in new_comments:
|
| 60 |
+
if comment.id in existing_comment_ids or comment.created_utc < 1672444800: # Skip comments before 2022
|
| 61 |
+
print(f'Skipped comment {comment.id}')
|
| 62 |
+
continue
|
| 63 |
+
new_row = get_new_comment_row(comment)
|
| 64 |
+
df_comments = df_comments._append(new_row, ignore_index=True)
|
| 65 |
+
save_data(df_comments, FILENAME_COMMENTS)
|
| 66 |
+
print(f'Loaded comment {comment.id}')
|
| 67 |
+
|
| 68 |
+
print('Finished scraping')
|
| 69 |
+
print("Data saved to ", FILENAME_COMMENTS)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
if __name__ == "__main__":
|
| 73 |
+
main()
|
output_comments_after2022.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
output_posts.csv
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
post_scraper.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import time
|
| 2 |
+
import praw
|
| 3 |
+
import prawcore
|
| 4 |
+
import pandas as pd
|
| 5 |
+
import os
|
| 6 |
+
from datetime import datetime
|
| 7 |
+
|
| 8 |
+
FILENAME_POSTS = 'output_posts_after2022.csv'
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def create_reddit_instance():
|
| 12 |
+
reddit = praw.Reddit(
|
| 13 |
+
client_id=os.getenv('REDDIT_CLIENT_ID'),
|
| 14 |
+
client_secret=os.getenv('REDDIT_CLIENT_SECRET'),
|
| 15 |
+
user_agent=os.getenv('REDDIT_USER_AGENT')
|
| 16 |
+
)
|
| 17 |
+
return reddit
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def get_subreddit(reddit, subreddit_name):
|
| 21 |
+
subreddit = reddit.subreddit(subreddit_name)
|
| 22 |
+
return subreddit
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def load_existing_data(file_name):
|
| 26 |
+
if os.path.exists(file_name):
|
| 27 |
+
df = pd.read_csv(file_name)
|
| 28 |
+
existing_ids = df['id'].tolist()
|
| 29 |
+
else:
|
| 30 |
+
df = pd.DataFrame()
|
| 31 |
+
existing_ids = []
|
| 32 |
+
return df, existing_ids
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def get_top_comments(submission):
|
| 36 |
+
top_comments = []
|
| 37 |
+
submission.comments.sort_by = 'top'
|
| 38 |
+
for comment in submission.comments[:5]:
|
| 39 |
+
top_comments.append(comment.body)
|
| 40 |
+
return top_comments
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def get_new_post_row(submission, top_comments):
|
| 44 |
+
date = datetime.fromtimestamp(submission.created)
|
| 45 |
+
new_row = {
|
| 46 |
+
"id": submission.id,
|
| 47 |
+
"url": submission.url,
|
| 48 |
+
"score": submission.score,
|
| 49 |
+
"title": submission.title,
|
| 50 |
+
"body": submission.selftext,
|
| 51 |
+
"top_comment1": top_comments[0] if len(top_comments) > 0 else None,
|
| 52 |
+
"top_comment2": top_comments[1] if len(top_comments) > 1 else None,
|
| 53 |
+
"top_comment3": top_comments[2] if len(top_comments) > 2 else None,
|
| 54 |
+
"top_comment4": top_comments[3] if len(top_comments) > 3 else None,
|
| 55 |
+
"top_comment5": top_comments[4] if len(top_comments) > 4 else None,
|
| 56 |
+
"date": date
|
| 57 |
+
}
|
| 58 |
+
return new_row
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def save_data(df, file_name):
|
| 62 |
+
df.to_csv(file_name, index=False)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def main():
|
| 66 |
+
reddit = create_reddit_instance()
|
| 67 |
+
subreddit = get_subreddit(reddit, 'tunisia')
|
| 68 |
+
df_posts, existing_post_ids = load_existing_data(FILENAME_POSTS)
|
| 69 |
+
|
| 70 |
+
print('Starting to scrape posts')
|
| 71 |
+
|
| 72 |
+
# Fetch 1000 newest posts
|
| 73 |
+
new_posts = list(subreddit.new(limit=1000))
|
| 74 |
+
|
| 75 |
+
for submission in new_posts:
|
| 76 |
+
if submission.id in existing_post_ids:
|
| 77 |
+
print(f'Skipped post {submission.id}')
|
| 78 |
+
continue
|
| 79 |
+
try:
|
| 80 |
+
top_comments = get_top_comments(submission)
|
| 81 |
+
new_row = get_new_post_row(submission, top_comments)
|
| 82 |
+
df_posts = df_posts._append(new_row, ignore_index=True)
|
| 83 |
+
except prawcore.exceptions.TooManyRequests:
|
| 84 |
+
print("Hit rate limit, sleeping .....")
|
| 85 |
+
time.sleep(60)
|
| 86 |
+
|
| 87 |
+
save_data(df_posts, FILENAME_POSTS)
|
| 88 |
+
print('Finished scraping')
|
| 89 |
+
print("Data saved to ", FILENAME_POSTS)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
if __name__ == "__main__":
|
| 93 |
+
main()
|