reddit_climate_comment / reddit_climate_comment.py
cathw's picture
Upload reddit_climate_comment.py
a823de1 verified
raw
history blame
5.37 kB
import csv
import json
import os
from datasets import GeneratorBasedBuilder, Features, Value, Sequence, SplitGenerator, BuilderConfig, DatasetInfo, Split
import logging
import pandas as pd
from typing import Dict
CITATION = ""
_DESCRIPTION = "Demo"
_URL = ""
_HOMEPAGE = ""
_LICENSE = ""
_URL = "https://github.com/catherine-ywang/reddit_climate_comment_data/raw/main/climate_comments.csv.zip"
class NewDataset(GeneratorBasedBuilder):
def _info(self):
return DatasetInfo(
description=_DESCRIPTION,
features=Features({
"id": Value("string"),
"post_title": Value("string"),
"post_author": Value("string"),
"post_body": Value("string"),
"post_url": Value("string"),
"post_pic": Value("string"),
"subreddit": Value("string"),
"post_timestamp": Value("string"),
"post_upvotes": Value("int32"),
"post_permalink": Value("string"),
"comments": Sequence({
"CommentID": Value("string"),
"CommentAuthor": Value("string"),
"CommentBody": Value("string"),
"CommentTimestamp": Value("string"),
"CommentUpvotes": Value("int32"),
"CommentPermalink": Value("string"),
"replies": Sequence({
"ReplyID": Value("string"),
"ReplyAuthor": Value("string"),
"ReplyBody": Value("string"),
"ReplyTimestamp": Value("string"),
"ReplyUpvotes": Value("int32"),
"ReplyPermalink": Value("string"),
})
})
}),
homepage=_HOMEPAGE,
)
def _split_generators(self, dl_manager):
path = dl_manager.download_and_extract(_URL)
train_splits = SplitGenerator(name=Split.TRAIN, gen_kwargs={"filepath": path+"/climate_comments.csv"})
return [train_splits]
def _generate_examples(self, filepath):
df = pd.read_csv(filepath)
# Group the DataFrame by post ID
grouped_df = df.groupby('PostID')
for post_id, group in grouped_df:
post_data = group.iloc[0] # Get the data for the post
post_title = post_data['PostTitle']
post_author = post_data['PostAuthor']
post_body = post_data['PostBody']
post_url = post_data['PostUrl']
post_pic = post_data['PostPic']
subreddit = post_data['Subreddit']
post_timestamp = post_data['PostTimestamp']
post_upvotes = post_data['PostUpvotes']
post_permalink = post_data['PostPermalink']
comments = []
# Extract comment data
comment_ids = group['CommentID'].tolist()
comment_authors = group['CommentAuthor'].tolist()
comment_bodies = group['CommentBody'].tolist()
comment_timestamps = group['CommentTimestamp'].tolist()
comment_upvotes = group['CommentUpvotes'].tolist()
comment_permalinks = group['CommentPermalink'].tolist()
replies = group['replies'].tolist() # Assuming 'replies' column is a list
for i in range(len(comment_ids)):
comment_id = str(comment_ids[i])
if pd.isna(comment_id): # Skip if CommentID is NaN
continue
comment = {
"CommentID": comment_id,
"CommentAuthor": comment_authors[i],
"CommentBody": comment_bodies[i],
"CommentTimestamp": comment_timestamps[i],
"CommentUpvotes": comment_upvotes[i],
"CommentPermalink": comment_permalinks[i],
"replies": [] # Initialize empty list for replies
}
# Check if 'replies' column exists and is not NaN
if replies[i] is not None and not pd.isna(replies[i]):
# Filter out NaNs from reply IDs
reply_ids = [str(reply_id) for reply_id in replies[i] if not pd.isna(reply_id)]
# Add replies for the current comment
for reply_id in reply_ids:
reply_data = group[group['ReplyID'] == reply_id].iloc[0]
reply = {
"ReplyID": reply_id,
"ReplyAuthor": reply_data['ReplyAuthor'],
"ReplyBody": reply_data['ReplyBody'],
"ReplyTimestamp": reply_data['ReplyTimestamp'],
"ReplyUpvotes": reply_data['ReplyUpvotes'],
"ReplyPermalink": reply_data['ReplyPermalink']
}
comment["replies"].append(reply)
comments.append(comment)
yield post_id, {
"id": post_id,
"post_title": post_title,
"post_author": post_author,
"post_body": post_body,
"post_url": post_url,
"post_pic": post_pic,
"subreddit": subreddit,
"post_timestamp": post_timestamp,
"post_upvotes": post_upvotes,
"post_permalink": post_permalink,
"comments": comments
}