Rachel74mx commited on
Commit
035b9a8
·
verified ·
1 Parent(s): f086bc2

Upload crawler

Browse files
Files changed (2) hide show
  1. 1.1 reptile_of_weibo.py +114 -0
  2. 1.2 reptile_of_comment.py +108 -0
1.1 reptile_of_weibo.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ 作者id author_id; 作者url author_url;
3
+ 发布时间 text_time; 内容 text_content; 微博url text_url; 微博/评论 text_type;
4
+ 点赞数 like_num; 评论数 comment_num;
5
+ '''
6
+
7
+ from selenium import webdriver
8
+ import pandas as pd
9
+ import selenium.webdriver.support.ui as ui
10
+ from selenium.webdriver.common.by import By
11
+ import datetime as dt
12
+ import time
13
+ from tqdm import tqdm
14
+ import re
15
+ import csv
16
+
17
+ def login():
18
+ driver.get('https://weibo.com/login.php')
19
+ driver.maximize_window()
20
+ time.sleep(3)
21
+ title = driver.title
22
+ print(title)
23
+ while (title != "微博 – 随时随地发现新鲜事"):
24
+ time.sleep(1)
25
+ title = driver.title
26
+ print(title)
27
+ time.sleep(1)
28
+
29
+ def get_url(_key, begin, end):
30
+ url_list = []
31
+ t0 = "https://s.weibo.com/weibo?q=" + _key + "&xsort=hot&suball=1&timescope=custom:"
32
+ be = dt.datetime.strptime(str(begin), '%Y/%m/%d')
33
+ en = dt.datetime.strptime(str(end), '%Y/%m/%d')
34
+ delta = dt.timedelta(days=1)
35
+ i = be + delta
36
+ while (i < en):
37
+ for j in range(0, 24):
38
+ t = t0 + i.strftime('%Y-%m-%d') + '-' + str(j) + ":"
39
+ t = t + i.strftime('%Y-%m-%d') + '-' + str(j+1) + "&page="
40
+ url_list.append(t)
41
+ i += delta
42
+ return url_list
43
+
44
+
45
+ # 打开Chorme并初始化一些参数
46
+ driver = webdriver.Chrome()
47
+ login()
48
+
49
+ pd_data = pd.read_csv("data/data_case.csv", encoding="gbk")
50
+
51
+ for name, key1, key2, key3, begin, end, fbegin, fend, csvname, have_reptiled in pd_data.values:
52
+ if (have_reptiled<=1):
53
+ continue
54
+ url_list = []
55
+ time.sleep(3)
56
+ if (str(key1)!="nan"):
57
+ url_list += get_url(str(key1), begin, end)
58
+ if (str(key2)!="nan"):
59
+ url_list += get_url(str(key2), begin, end)
60
+ if (str(key3)!="nan"):
61
+ url_list += get_url(str(key3), begin, end)
62
+ print(url_list)
63
+
64
+ k = 0
65
+ author_id = []
66
+ text_time = []
67
+ fp = open(str(csvname), 'w', encoding='utf-8')
68
+ fp.writelines('author_id,author_url,text_time,text_content,text_url,text_type,like_num,comment_num\n')
69
+ for u in url_list:
70
+ err = 0
71
+ for p in range(1, 51):
72
+ driver.get(u + str(p))
73
+ time.sleep(1)
74
+ div_list = driver.find_elements(By.XPATH, "/html/body/div[1]/div[2]/div/div[2]/div[1]/div[2]/div")
75
+
76
+ if (len(div_list)==0):
77
+ print("Something Error")
78
+ err = err + 1
79
+ if (err>=2):
80
+ break
81
+ else:
82
+ continue
83
+
84
+ # 爬取相关数据
85
+ for div in div_list:
86
+ print("No. ", k)
87
+ now = div.find_element(By.XPATH, "./div/div[1]/div[2]/div/div[2]/a")
88
+ aid = now.text
89
+ aurl = now.get_attribute('href')
90
+ print("author_id: ", aid)
91
+ print("author_url: ", aurl)
92
+ now = div.find_element(By.XPATH, "./div/div[1]/div[2]/div[2]/a")
93
+
94
+ _time = now.text
95
+ turl = now.get_attribute('href')
96
+ print("text_time: ", _time)
97
+ print("text_url: ", turl)
98
+ if (aid in author_id and _time in text_time):
99
+ print("Have Reptiled!")
100
+ continue
101
+ now = div.find_element(By.XPATH, "./div/div[1]/div[2]/p")
102
+ content = now.text
103
+ content = content.replace('\n', '').replace(',', ',')
104
+ print("text_content: ", content)
105
+ comment = div.find_element(By.XPATH, "./div/div[2]/ul/li[2]/a").text
106
+ like = div.find_element(By.XPATH, "./div/div[2]/ul/li[3]/a").text
107
+ print("comment_num: ", comment)
108
+ print("like_num: ", like)
109
+
110
+ author_id.append(aid)
111
+ text_time.append(_time)
112
+ k = k + 1
113
+ fp.writelines(aid + "," + aurl + "," + _time + "," + content + "," + turl + ",1," + like + "," + comment + "\n")
114
+ request.get("https://weibo.com/ajax/statuses/mymblog?uid=6723451248&page=2&feature=0&since_id=4963035557659776kp2")
1.2 reptile_of_comment.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ 作者id author_id; 作者url author_url;
3
+ 发布时间 text_time; 内容 text_content; 微博url text_url; 微博/评论 text_type;
4
+ 点赞数 like_num; 评论数 comment_num;
5
+ '''
6
+ from selenium import webdriver
7
+ import pandas as pd
8
+ import selenium.webdriver.support.ui as ui
9
+ import time
10
+ import re
11
+ import os
12
+
13
+
14
+ # 模拟登录微博
15
+ def login():
16
+ driver.get('https://weibo.com/login.php')
17
+ driver.maximize_window()
18
+ time.sleep(3)
19
+ title = driver.title
20
+ print(title)
21
+ while (title != "微博 – 随时随地发现新鲜事"):
22
+ time.sleep(1)
23
+ title = driver.title
24
+ print(title)
25
+ time.sleep(1)
26
+
27
+
28
+ # 打开Chorme并登录微博
29
+ driver = webdriver.Chrome()
30
+ wait = ui.WebDriverWait(driver, 10)
31
+ kk = re.compile(r'\d+')
32
+ login()
33
+
34
+
35
+ for i in range(28):
36
+ # 导入weibo数据
37
+ csv_name = "weibo_case" + str(i) + ".csv"
38
+ print(csv_name)
39
+ if (not os.path.exists(csv_name)):
40
+ continue
41
+ pd_data = pd.read_csv(csv_name, encoding="utf-8")
42
+ urls = pd_data["text_url"].tolist()
43
+ comments = pd_data["comment_num"].tolist()
44
+ Num = len(urls) + 1 # 记录爬取条数
45
+ fp = open(csv_name, 'a', encoding='utf-8')
46
+
47
+ for k in range(len(urls)):
48
+ print(k, "_of_", len(urls))
49
+ time.sleep(1)
50
+ # 评论数量不足则不爬
51
+ if (comments[k]=="评论"):
52
+ continue
53
+ #if (int(comments[k])<100):
54
+ #continue
55
+ url_name = urls[k]
56
+ driver.get(url_name)
57
+ time.sleep(2)
58
+ # 开始爬虫
59
+ author_id = []
60
+ text_comment = []
61
+ no_fresh = 0
62
+ for i in range(2000):
63
+ # 当前窗口显示的所有评论的div
64
+ div_list = driver.find_elements_by_xpath('//*[@id="scroller"]/div[1]/div')
65
+ no_fresh += 1
66
+ for div in div_list:
67
+ _time = div.find_element_by_xpath('./div/div/div/div[1]/div[2]/div[2]/div[1]').text # 爬取时间
68
+ name = div.find_element_by_xpath('./div/div/div/div[1]/div[2]/div[1]/a[1]').text # 爬取发表人id
69
+ aurl = div.find_element_by_xpath('./div/div/div/div[1]/div[2]/div[1]/a[1]').get_attribute('href') # 爬取发表人url
70
+ comment = div.find_element_by_xpath('./div/div/div/div[1]/div[2]/div[1]/span').text # 爬取微博内容
71
+ comment = comment.replace(',', ',')
72
+ if ((name in author_id) and (comment in text_comment) or (len(name)<=1)): # 去重
73
+ #print("Have Reptiled!")
74
+ continue
75
+
76
+ ele = div.find_elements_by_xpath('./div/div/div/div[1]/div[2]/div[2]/div[2]/div[4]/button/span[2]') # 爬取点赞数量
77
+ if (len(ele) == 1):
78
+ like = ele[0].text
79
+ else:
80
+ like = 0
81
+
82
+ ele = div.find_elements_by_xpath('./div/div/div/div[2]/div/div/div/span') # 爬取评论数量
83
+ reply = 0
84
+ if (len(ele) == 1):
85
+ x = re.findall(kk, ele[0].text) # 正则表达式定位数字
86
+ if (len(x) == 1):
87
+ reply = int(x[0])
88
+
89
+ print("No. ", Num, "(", k, "_of_", len(urls), ")")
90
+ print("Time:", _time)
91
+ print("Name:", name)
92
+ print("Comment:", comment)
93
+ print("Like:", like)
94
+ print("Reply:", reply)
95
+
96
+ # 爬取完毕,添加到数据列表当中
97
+ text_comment.append(comment)
98
+ author_id.append(name)
99
+ Num += 1
100
+ no_fresh = 0
101
+ fp.writelines(name + "," + aurl + "," + _time + "," + comment + ",,2," + str(like) + "," + str(reply) + "\n")
102
+
103
+ if (no_fresh>=5):
104
+ break
105
+ else:
106
+ driver.execute_script("window.scrollBy(0,500)") # 往下滑动更新页面显示的微博
107
+ time.sleep(2)
108
+