bumbledeep commited on
Commit
3a10f04
·
verified ·
1 Parent(s): 524e643

script to extract reviews

Browse files
Files changed (1) hide show
  1. extract_reviews_distrowatch.py +122 -0
extract_reviews_distrowatch.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """extract_reviews_distrowatch.ipynb
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1RZzzKmKsBL3KzWLmtgQh7cJvIVmQYFSO
8
+
9
+ # SENTIMENT ANALYSIS OF DISTROWATCH REVIEWS.
10
+
11
+ ## Part 1: Web scraping and data collection
12
+ """
13
+
14
+ from google.colab import drive
15
+ drive.mount('/content/drive')
16
+
17
+ # Commented out IPython magic to ensure Python compatibility.
18
+ # %cd /content/drive/MyDrive/Colab_Notebooks/distrowatch NLP
19
+
20
+ import requests
21
+ from pickle import dump as pickle_dump, load as pickle_load
22
+ import pandas as pd
23
+ try:
24
+ import scrapy
25
+ except:
26
+ ModuleNotFoundError
27
+ !uv pip install scrapy --quiet
28
+ import scrapy
29
+ !uv pip install selenium --quiet
30
+
31
+ """Now with more distros"""
32
+
33
+ headers = {
34
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36"
35
+ }
36
+ url_popularity = 'https://distrowatch.com/dwres.php?resource=popularity'
37
+ sel = scrapy.Selector(text=requests.get(url_popularity, headers=headers).text)
38
+
39
+ distros = list(set(sel.xpath('//td[@class = "phr2"]/a/@href').extract()))
40
+ distros[:10]
41
+
42
+ # with open("distro_names", "wb") as f:
43
+ # pickle_dump(distros, f)
44
+ # distros[:10]
45
+
46
+ # with open("distro_names", "rb") as f:
47
+ # distros = pickle_load(f)
48
+ # distros[:10]
49
+
50
+ # if we want to focus only on some distros we can provide them in a list
51
+ # distros = ['ubuntu','fedora','opensuse','mint','manjaro','debian','kali','endeavour','zorin','parrot','mx']
52
+
53
+ dfs = []
54
+
55
+ for distro in distros:
56
+ url_rating = f'https://distrowatch.com/dwres.php?resource=ratings&distro={distro}'
57
+ try:
58
+ response = requests.get(url_rating, headers=headers, timeout=(60, 60))
59
+ response.raise_for_status()
60
+ except requests.exceptions.RequestException as e:
61
+ print(f"{distro}: failed with error {e}")
62
+ continue
63
+
64
+ sel = scrapy.Selector(text=response.text)
65
+
66
+ project = sel.xpath('//td[@class = "News1"]//table[2]//tr/td[1]/text()[3]').extract()
67
+ version = sel.xpath('//td[@class = "News1"]//table[2]//tr/td[1]/text()[4]').extract()
68
+ rating = sel.xpath('//td[@class = "News1"]//table[2]//tr/td[1]/text()[5]').extract()
69
+ date_review = sel.xpath('//td[@class = "News1"]//table[2]//tr/td[1]/text()[6]').extract()
70
+ votes = sel.xpath('//td[@class = "News1"]//table[2]//tr/td[1]/text()[7]').extract()
71
+ reviews = sel.xpath('//td[@class = "News1"]//table[2]//tr/td[2]/text()').extract()
72
+
73
+ if not reviews:
74
+ print(distro + ': no reviews')
75
+ continue
76
+
77
+ # Different paragraphs in the same review are separated by `/r`, but different reviews are separated by `\n`.
78
+ # Also, there is another `\n` character at the beggining of the next review.
79
+ # In order to split the reviews correctly we can first "glue" all reviews and then use the pattern `'\n\n'` to separate reviews.
80
+ reviews = ''.join(reviews).split('\n\n')
81
+ df_distro = pd.DataFrame({'date': date_review,
82
+ 'project': project,
83
+ 'version': version,
84
+ 'rating': rating,
85
+ 'votes': votes,
86
+ 'review': reviews})
87
+
88
+ dfs.append(df_distro)
89
+
90
+ df = pd.concat(dfs)
91
+ df = (
92
+ df.assign(date=pd.to_datetime(df['date']),
93
+ votes=pd.to_numeric(df['votes']),
94
+ rating=pd.to_numeric(df['rating']))
95
+ .reset_index(drop=True)
96
+ )
97
+
98
+ df.to_csv('distrowatch_2.csv') # 6th April 2025
99
+
100
+ df.head()
101
+
102
+ df.groupby('project')['rating'].describe().sort_values(by='count',ascending=False)
103
+
104
+ df_list = [pd.read_csv(f'distrowatch_{i}.csv', index_col=0,lineterminator='\n') for i in range(1,3)]
105
+ full_df = pd.concat(df_list) \
106
+ .drop_duplicates() \
107
+ .reset_index(drop=True)
108
+
109
+ full_df['date'] = pd.to_datetime(full_df['date'])
110
+ full_df['review'] = full_df['review'].str.strip().str.replace('\r+', ' ', regex=True)
111
+ full_df['project'] = full_df['project'].str.strip()
112
+
113
+ full_df
114
+
115
+ full_df.info()
116
+
117
+ full_df.iloc[0,5]
118
+
119
+ full_df.to_csv('full_distrowatch.csv', index=False)
120
+
121
+
122
+