|
|
--- |
|
|
license: unlicense |
|
|
dataset_info: |
|
|
features: |
|
|
- name: title |
|
|
dtype: string |
|
|
- name: selftext |
|
|
dtype: string |
|
|
- name: top_comment |
|
|
dtype: string |
|
|
- name: subreddit |
|
|
dtype: string |
|
|
splits: |
|
|
- name: train |
|
|
num_bytes: 12747912959 |
|
|
num_examples: 15689260 |
|
|
download_size: 7773494765 |
|
|
dataset_size: 12747912959 |
|
|
configs: |
|
|
- config_name: default |
|
|
data_files: |
|
|
- split: train |
|
|
path: data/train-* |
|
|
--- |
|
|
|
|
|
## Top comments from subbreddits |
|
|
|
|
|
These are comments from a select group of subbreddits (see below) and all the posts were filtered to select only the top comment from that post. |
|
|
|
|
|
The filter criteria was that it must have had at least one up vote. |
|
|
|
|
|
It covers the dates from 2005-2022. |
|
|
|
|
|
I picked the subreddits that were the most popular. I did not pick NSFW but there is probably some NSFW language in here so be aware. |
|
|
|
|
|
The subreddits in the dataset are: |
|
|
|
|
|
* AskReddit |
|
|
* worldnews |
|
|
* todayilearned |
|
|
* Music |
|
|
* movies |
|
|
* science |
|
|
* Showerthoughts |
|
|
* Jokes |
|
|
* space |
|
|
* books |
|
|
* WritingPrompts |
|
|
* tifu |
|
|
* wallstreetbets |
|
|
* explainlikeimfive |
|
|
* askscience |
|
|
* history |
|
|
* technology |
|
|
* relationship_advice |
|
|
* relationships |
|
|
* Damnthatsinteresting |
|
|
* CryptoCurrency |
|
|
* television |
|
|
* politics |
|
|
* Parenting |
|
|
* Bitcoin |
|
|
* creepy |
|
|
* nosleep |
|
|
|
|
|
## Loading the dataset |
|
|
|
|
|
Each entry in the dataset includes the following columns: |
|
|
- **title**: The title of the Reddit post. |
|
|
- **selftext**: The body text of the Reddit post. |
|
|
- **top_comment**: The top comment on the Reddit post. |
|
|
- **subreddit**: The subreddit where the post was made. |
|
|
|
|
|
### 1. Loading the Entire Dataset |
|
|
|
|
|
To load the entire dataset, use the following code: |
|
|
|
|
|
```python |
|
|
from datasets import load_dataset |
|
|
|
|
|
# Load the dataset |
|
|
dataset = load_dataset("cowWhySo/reddit_top_comments") |
|
|
``` |
|
|
|
|
|
### 2. Loading Specific Splits |
|
|
To load specific splits of the dataset: |
|
|
|
|
|
```python |
|
|
from datasets import load_dataset |
|
|
|
|
|
# Load the train split |
|
|
train_dataset = load_dataset("cowWhySo/reddit_top_comments", split="train") |
|
|
|
|
|
# Load the validation split |
|
|
validation_dataset = load_dataset("cowWhySo/reddit_top_comments", split="validation") |
|
|
|
|
|
# Load the test split |
|
|
test_dataset = load_dataset("cowWhySo/reddit_top_comments", split="test") |
|
|
``` |
|
|
|
|
|
### 3. Streaming the Dataset |
|
|
You can stream the data: |
|
|
|
|
|
```python |
|
|
from datasets import load_dataset |
|
|
|
|
|
# Stream the train split |
|
|
train_streaming = load_dataset("cowWhySo/reddit_top_comments", split="train", streaming=True) |
|
|
|
|
|
# Iterate through the dataset |
|
|
for example in train_streaming: |
|
|
print(example) |
|
|
break # Just print the first example for demonstration |
|
|
``` |
|
|
|
|
|
### 4. Loading a Specific Slice |
|
|
To load a specific portion of the dataset: |
|
|
|
|
|
```python |
|
|
from datasets import load_dataset |
|
|
|
|
|
# Load the first 10% of the train split |
|
|
train_slice = load_dataset("your-username/your-dataset-name", split="train[:10%]") |
|
|
|
|
|
# Print the first few examples |
|
|
print(train_slice[:5]) |
|
|
``` |
|
|
|
|
|
## Code to download subredditt's |
|
|
|
|
|
dl_subbreddits.sh: |
|
|
``` |
|
|
#!/bin/bash |
|
|
# |
|
|
# Directions: |
|
|
#./dl_reddit_comments.sh submissions |
|
|
# or |
|
|
#./dl_reddit_comments.sh comments |
|
|
|
|
|
# Check if the argument is provided and valid |
|
|
if [ "$#" -ne 1 ] || { [ "$1" != "submissions" ] && [ "$1" != "comments" ]; }; then |
|
|
echo "Usage: $0 <submissions|comments>" |
|
|
exit 1 |
|
|
fi |
|
|
|
|
|
# Create the reddit_data folder if it doesn't exist |
|
|
mkdir -p reddit_data |
|
|
|
|
|
# Base URL |
|
|
base_url="https://the-eye.eu/redarcs/files/" |
|
|
|
|
|
# Array of subreddit names |
|
|
subreddits=( |
|
|
"AskReddit" |
|
|
"worldnews" |
|
|
"todayilearned" |
|
|
"Music" |
|
|
"movies" |
|
|
"science" |
|
|
"Showerthoughts" |
|
|
"Jokes" |
|
|
"space" |
|
|
"books" |
|
|
"WritingPrompts" |
|
|
"tifu" |
|
|
"wallstreetbets" |
|
|
"explainlikeimfive" |
|
|
"askscience" |
|
|
"history" |
|
|
"technology" |
|
|
"relationship_advice" |
|
|
"relationships" |
|
|
"Damnthatsinteresting" |
|
|
"CryptoCurrency" |
|
|
"television" |
|
|
"politics" |
|
|
"Parenting" |
|
|
"Bitcoin" |
|
|
"creepy" |
|
|
"nosleep" |
|
|
) |
|
|
|
|
|
# Export base_url so it can be used by xargs |
|
|
export base_url |
|
|
|
|
|
# Argument to determine whether to download comments or submissions |
|
|
type=$1 |
|
|
|
|
|
# Generate file names based on the argument |
|
|
file_names=() |
|
|
for subreddit in "${subreddits[@]}"; do |
|
|
file_names+=("${subreddit}_${type}.zst") |
|
|
done |
|
|
|
|
|
# Download each file using wget in parallel |
|
|
printf "%s\n" "${file_names[@]}" | xargs -n 1 -P 8 -I {} wget -P reddit_data "${base_url}{}" |
|
|
``` |
|
|
|
|
|
## Code to process for top comments |
|
|
|
|
|
This may need some work. There is some chunking that needed to be done because some of the comment files are very large. |
|
|
|
|
|
AskReddit subbreddit was 50gb of comments so processing that to a csv was a bit painful. |
|
|
|
|
|
``` |
|
|
import zstandard |
|
|
import os |
|
|
import json |
|
|
import sys |
|
|
import csv |
|
|
from datetime import datetime |
|
|
import logging |
|
|
from concurrent.futures import ProcessPoolExecutor |
|
|
|
|
|
log = logging.getLogger("bot") |
|
|
log.setLevel(logging.DEBUG) |
|
|
log.addHandler(logging.StreamHandler()) |
|
|
|
|
|
def read_and_decode(reader, chunk_size, max_window_size, previous_chunk=None, bytes_read=0): |
|
|
chunk = reader.read(chunk_size) |
|
|
bytes_read += chunk_size |
|
|
if previous_chunk is not None: |
|
|
chunk = previous_chunk + chunk |
|
|
try: |
|
|
return chunk.decode() |
|
|
except UnicodeDecodeError: |
|
|
if bytes_read > max_window_size: |
|
|
raise UnicodeError(f"Unable to decode frame after reading {bytes_read:,} bytes") |
|
|
log.info(f"Decoding error with {bytes_read:,} bytes, reading another chunk") |
|
|
return read_and_decode(reader, chunk_size, max_window_size, chunk, bytes_read) |
|
|
|
|
|
def read_lines_zst(file_name): |
|
|
with open(file_name, 'rb') as file_handle: |
|
|
buffer = '' |
|
|
reader = zstandard.ZstdDecompressor(max_window_size=2**31).stream_reader(file_handle) |
|
|
while True: |
|
|
chunk = read_and_decode(reader, 2**27, (2**29) * 2) |
|
|
if not chunk: |
|
|
break |
|
|
lines = (buffer + chunk).split("\n") |
|
|
for line in lines[:-1]: |
|
|
yield line, file_handle.tell() |
|
|
buffer = lines[-1] |
|
|
reader.close() |
|
|
|
|
|
def process_file(input_file, output_folder): |
|
|
output_file_path = os.path.join(output_folder, os.path.splitext(os.path.basename(input_file))[0] + '.csv') |
|
|
log.info(f"Processing {input_file} to {output_file_path}") |
|
|
|
|
|
is_submission = "submission" in input_file |
|
|
if is_submission: |
|
|
fields = ["author", "title", "score", "created", "link", "text", "url"] |
|
|
else: |
|
|
fields = ["author", "score", "created", "link", "body"] |
|
|
|
|
|
file_size = os.stat(input_file).st_size |
|
|
file_lines, bad_lines = 0, 0 |
|
|
line, created = None, None |
|
|
|
|
|
# Dictionary to store the top comment for each post |
|
|
top_comments = {} |
|
|
|
|
|
with open(output_file_path, "w", encoding='utf-8', newline="") as output_file: |
|
|
writer = csv.DictWriter(output_file, fieldnames=fields, quoting=csv.QUOTE_MINIMAL, quotechar='"', escapechar='\\') |
|
|
writer.writeheader() |
|
|
|
|
|
try: |
|
|
for line, file_bytes_processed in read_lines_zst(input_file): |
|
|
try: |
|
|
obj = json.loads(line) |
|
|
if is_submission: |
|
|
# Process submission data |
|
|
submission = { |
|
|
'author': f"u/{obj['author']}", |
|
|
'title': obj.get('title', ''), |
|
|
'score': obj.get('score', 0), |
|
|
'created': datetime.fromtimestamp(int(obj['created_utc'])).strftime("%Y-%m-%d %H:%M"), |
|
|
'link': f"https://www.reddit.com/r/{obj['subreddit']}/comments/{obj['id']}/", |
|
|
'text': obj.get('selftext', ''), |
|
|
'url': obj.get('url', ''), |
|
|
} |
|
|
writer.writerow(submission) |
|
|
else: |
|
|
# Process comment data and look for top comments |
|
|
post_id = obj['link_id'] |
|
|
score = obj.get('score', 0) |
|
|
body = obj.get('body', '') |
|
|
|
|
|
if "[deleted]" in body or score <= 1: |
|
|
continue |
|
|
|
|
|
comment = { |
|
|
'author': f"u/{obj['author']}", |
|
|
'score': score, |
|
|
'created': datetime.fromtimestamp(int(obj['created_utc'])).strftime("%Y-%m-%d %H:%M"), |
|
|
'link': f"https://www.reddit.com/r/{obj['subreddit']}/comments/{obj['link_id'][3:]}/_/{obj['id']}/", |
|
|
'body': body, |
|
|
} |
|
|
|
|
|
if post_id not in top_comments or score > top_comments[post_id]['score']: |
|
|
top_comments[post_id] = comment |
|
|
writer.writerow(comment) |
|
|
|
|
|
created = datetime.utcfromtimestamp(int(obj['created_utc'])) |
|
|
except json.JSONDecodeError as err: |
|
|
bad_lines += 1 |
|
|
file_lines += 1 |
|
|
if file_lines % 100000 == 0: |
|
|
log.info(f"{created.strftime('%Y-%m-%d %H:%M:%S')} : {file_lines:,} : {bad_lines:,} : {(file_bytes_processed / file_size) * 100:.0f}%") |
|
|
except KeyError as err: |
|
|
log.info(f"Object has no key: {err}") |
|
|
log.info(line) |
|
|
except Exception as err: |
|
|
log.info(err) |
|
|
log.info(line) |
|
|
|
|
|
log.info(f"Complete : {file_lines:,} : {bad_lines:,}") |
|
|
|
|
|
def convert_to_csv(input_folder, output_folder): |
|
|
input_files = [] |
|
|
for subdir, dirs, files in os.walk(input_folder): |
|
|
for filename in files: |
|
|
input_path = os.path.join(subdir, filename) |
|
|
if input_path.endswith(".zst"): |
|
|
input_files.append(input_path) |
|
|
|
|
|
with ProcessPoolExecutor() as executor: |
|
|
futures = [executor.submit(process_file, input_file, output_folder) for input_file in input_files] |
|
|
for future in futures: |
|
|
future.result() |
|
|
|
|
|
if __name__ == "__main__": |
|
|
if len(sys.argv) < 3: |
|
|
print("Usage: python script.py <input_folder> <output_folder>") |
|
|
sys.exit(1) |
|
|
input_folder = sys.argv[1] |
|
|
output_folder = sys.argv[2] |
|
|
convert_to_csv(input_folder, output_folder) |
|
|
``` |
|
|
|
|
|
## Combining into one dataset |
|
|
|
|
|
Afer finishing, combined into one parquet: |
|
|
|
|
|
``` |
|
|
import pandas as pd |
|
|
import os |
|
|
|
|
|
# Define the folder containing the CSV files |
|
|
folder_path = 'csv' |
|
|
|
|
|
# List of files in the folder |
|
|
files = os.listdir(folder_path) |
|
|
|
|
|
# Initialize an empty list to store dataframes |
|
|
dfs = [] |
|
|
|
|
|
# Process each file |
|
|
for file in files: |
|
|
if file.endswith('.csv'): |
|
|
# Extract subreddit name from the file name |
|
|
subreddit = file.split('_')[0] |
|
|
|
|
|
# Read the CSV file |
|
|
df = pd.read_csv(os.path.join(folder_path, file)) |
|
|
|
|
|
# Add the subreddit name as a new column |
|
|
df['subreddit'] = subreddit |
|
|
|
|
|
# Keep only the required columns and rename them |
|
|
df = df[['title', 'selftext', 'top_comment_body', 'subreddit']] |
|
|
df.columns = ['title', 'selftext', 'top_comment', 'subreddit'] |
|
|
|
|
|
# Append the dataframe to the list |
|
|
dfs.append(df) |
|
|
|
|
|
# Concatenate all dataframes |
|
|
combined_df = pd.concat(dfs, ignore_index=True) |
|
|
|
|
|
# Save the combined dataframe to a Parquet file |
|
|
combined_df.to_parquet('reddit_top_comments.parquet', index=False) |
|
|
``` |
|
|
|
|
|
## Source |
|
|
|
|
|
https://the-eye.eu/redarcs/ |
|
|
|