File size: 4,229 Bytes
5943094
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c498bcc
5943094
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
be8e5ea
5943094
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
import csv
import json
import os
import praw
import random
import re
import sys

env = json.load(open('.env', 'r'))
#print(env)

reddit = praw.Reddit(
    client_id=env["app_id"],
    client_secret=env["secret"],
    user_agent=env["user_agent"]
)

def main(post_id):
    post = reddit.submission(id=post_id)
    post.comments.replace_more(limit=0)
    sorted_comments = sorted(post.comments, key=lambda c: c.score, reverse=True)

    if post.link_flair_text is None:
        raise Exception("Must be a gender or non-binary fashion flair post")
    elif "Men's" in post.link_flair_text:
        gender = "M"
    elif "Women's" in post.link_flair_text or "Ladies'" in post.link_flair_text:
        gender = "F"
    elif "Non Binary" in post.link_flair_text:
        gender = "X"
    else:
        raise Exception("Must be a gender or non-binary fashion flair post")

    data = {
        "post_id": post_id,
        "gender_flair": gender,
        "title": post.title,
        "selftext": post.selftext,
        "images": [],
    }

    image_urls = []
    if post.is_gallery:
        metadata = post.media_metadata
        for item in post.gallery_data['items']:
            url = metadata[item['media_id']]['s']['u'].replace("&", "&")
            image_urls.append(url)
    else:
        raise Exception("Need multiple images for there to be a multimodal eval question")

    lead_choices = {}
    firstChoice = None
    secondChoice = None
    for c in sorted_comments:
        text = c.body.strip().lower().replace('first', '1st').replace('second', '2nd').replace('third', '3rd').replace('fourth', '4th').replace('fifth', '5th')
        text = text.replace('last', str(len(image_urls)))
        selections = re.findall(r'\d', text)
        if len(selections) == 0:
            print("Skipped comment without digit")
            continue
        elif len(set(selections)) > 1:
            print(text)
            raise Exception("Encountered a top comment with multiple digits, complex")
        selection = int(selections[0])
        if selection not in lead_choices:
            print(f"Found comment with {c.score} votes: {text}")
            lead_choices[selection] = c.score
            data["images"].append(image_urls[selection - 1])
            if firstChoice is None:
                firstChoice = selection
            elif secondChoice is None:
                secondChoice = selection
                break
        else:
            print(f"Found repeat comment with {c.score} votes")
            lead_choices[selection] += c.score
            if lead_choices[selection] > 100:
                print("Overwhelming support")
                if selection == 1:
                   secondChoice = 2
                else:
                   secondChoice = selection - 1
                lead_choices[secondChoice] = 1
                data["images"].append(image_urls[secondChoice - 1])
                break
    if len(lead_choices.keys()) != 2:
        raise Exception("Did not find two distinct comments with single outfit suggestions")

    data["firstChoiceVotes"] = lead_choices[firstChoice]
    data["secondChoice"] = lead_choices[secondChoice]
    if len(image_urls) > 2:
        print("We are assuming that there is one image of each outfit")
        used_set = set(image_urls)
        remaining = [img for img in used_set if img not in data["images"]]
        extra_images = random.sample(remaining, min(len(remaining), 2))
        data["images"] += extra_images

    # print(json.dumps(data, indent=2))
    fieldnames = [
        "post_id",
        "gender_flair",
        "title",
        "selftext",
        "images",
        "firstChoiceVotes",
        "secondChoice"
    ]

    filename = "dataset.csv"
    file_exists = os.path.isfile(filename)
    with open(filename, mode="a", newline="", encoding="utf-8") as f:
        writer = csv.DictWriter(f, fieldnames=fieldnames)
        if not file_exists:
            writer.writeheader()
        row = data.copy()
        row["images"] = ",".join(data["images"])
        writer.writerow(row)


if __name__ == "__main__":
    if len(sys.argv) != 2:
        print("Usage: python outfit_scraper.py <reddit_post_id>")
        sys.exit(1)
    main(sys.argv[1])