izuemon commited on
Commit
490219d
·
verified ·
1 Parent(s): 80d2524

Update watcher.py

Browse files
Files changed (1) hide show
  1. watcher.py +120 -85
watcher.py CHANGED
@@ -6,7 +6,7 @@ import requests
6
  from datetime import datetime, timezone
7
  from bs4 import BeautifulSoup
8
 
9
- # ===== Channel.io =====
10
  GET_URL = "https://desk-api.channel.io/desk/channels/200605/groups/519217/messages"
11
  POST_URL = GET_URL
12
 
@@ -34,14 +34,13 @@ HEADERS_POST = {
34
  }
35
 
36
  # ===== ssyoutube =====
37
- DETAIL_URL = "https://ssyoutube.online/yt-video-detail/"
38
- MERGE_API = "https://ssyoutube.online/wp-admin/admin-ajax.php"
39
 
40
  # ===== Utils =====
41
  def parse_updated_at(value):
42
  if isinstance(value, (int, float)):
43
  return datetime.fromtimestamp(value / 1000, tz=timezone.utc)
44
- if isinstance(value, str):
45
  return datetime.fromisoformat(value.replace("Z", "+00:00"))
46
  return None
47
 
@@ -56,125 +55,143 @@ def extract_youtube_id(text):
56
  return m.group(1)
57
  return None
58
 
59
- # ===== ssyoutube =====
60
- def fetch_nonce():
61
- res = requests.get(DETAIL_URL, headers={"User-Agent": "Mozilla/5.0"}, timeout=30)
62
- res.raise_for_status()
63
- soup = BeautifulSoup(res.text, "lxml")
64
-
65
- nonce = soup.select_one('input[name="nonce"]')
66
- if nonce:
67
- return nonce["value"]
68
-
69
- m = re.search(r'"nonce":"([a-f0-9]+)"', res.text)
70
- if m:
71
- return m.group(1)
72
-
73
- raise RuntimeError("nonce が取得できません")
74
-
75
- def fetch_download_links(youtube_url):
76
  res = requests.post(
77
- DETAIL_URL,
78
  data={"videoURL": youtube_url},
 
79
  headers={
80
  "User-Agent": "Mozilla/5.0",
81
- "Referer": DETAIL_URL,
82
- },
83
- timeout=30,
84
  )
85
  res.raise_for_status()
86
 
87
  soup = BeautifulSoup(res.text, "lxml")
 
 
 
 
 
88
  buttons = soup.select("button[data-url]")
 
89
 
90
- results = []
91
  for btn in buttons:
92
  url = btn.get("data-url")
93
  quality = btn.get("data-quality")
94
  has_audio = btn.get("data-has-audio")
95
- if url:
96
- results.append({
97
- "url": url,
98
- "quality": quality or "audio",
99
- "has_audio": has_audio,
100
- })
101
- return results
102
 
103
- def pick_best_streams(items):
104
- videos = [i for i in items if i["has_audio"] == "false"]
105
- audios = [i for i in items if i["has_audio"] != "false"]
 
 
 
 
 
 
 
 
 
 
 
106
 
107
- def vkey(x):
108
- m = re.search(r"(\d+)p", x["quality"])
109
- return int(m.group(1)) if m else 0
 
 
 
110
 
111
- best_video = sorted(videos, key=vkey, reverse=True)[0]
112
- best_audio = audios[0]
 
 
 
113
 
114
  return best_video, best_audio
115
 
116
- def start_merge(video, audio, youtube_id, quality, nonce):
117
- payload = {
 
 
118
  "id": f"{youtube_id}_{quality}",
119
  "ttl": 3600000,
120
  "inputs": [
121
- {"url": audio["url"], "ext": "mp4"},
122
- {"url": video["url"], "ext": "mp4"},
 
 
 
 
 
 
 
 
123
  ],
124
  "output": {
125
  "ext": "mp4",
126
  "downloadName": f"{youtube_id}_{quality}.mp4",
 
127
  },
128
- "operation": {"type": "replace_audio_in_video"},
129
  }
130
 
131
  files = {
132
  "action": (None, "process_video_merge"),
133
  "nonce": (None, nonce),
134
- "request_data": (None, json.dumps(payload)),
135
  }
136
 
137
- res = requests.post(
138
- MERGE_API,
139
- files=files,
140
- headers={"User-Agent": "Mozilla/5.0"},
141
- timeout=30,
142
- )
143
  res.raise_for_status()
144
  return res.json()
145
 
146
- def wait_merge_done(monitor_url):
147
  while True:
148
- res = requests.get(monitor_url, timeout=15)
149
  res.raise_for_status()
150
  data = res.json()
151
-
152
  if data["result"]["status"] == "done":
153
  return data["result"]["output"]["url"]
154
-
155
  time.sleep(5)
156
 
157
- def build_links(items, merged_url, quality):
 
158
  lines = []
159
- for i in items:
160
- lines.append(
161
- f'<link type="url" value="{i["url"]}"> {i["quality"]}</link>'
162
- )
163
- lines.append(
164
- f'\n<link type="url" value="{merged_url}"> 🔊音声付き {quality}</link>'
165
- )
 
 
 
 
 
 
 
166
  return "\n".join(lines)
167
 
168
  def send_to_channel(text):
169
  payload = {
170
- "requestId": f"desk-{int(time.time()*1000)}",
171
  "blocks": [{"type": "text", "value": text}],
 
 
 
 
 
172
  }
 
173
  res = requests.post(
174
  POST_URL,
175
  headers=HEADERS_POST,
176
  data=json.dumps(payload),
177
- timeout=30,
178
  )
179
  res.raise_for_status()
180
 
@@ -184,39 +201,57 @@ def main():
184
  try:
185
  res = requests.get(GET_URL, headers=HEADERS_GET, params=PARAMS, timeout=30)
186
  res.raise_for_status()
 
187
  messages = res.json().get("messages", [])
 
 
 
 
 
 
 
 
188
 
189
- latest = max(
190
- (
191
- m for m in messages
192
- if m.get("plainText") and m.get("updatedAt")
193
- ),
194
- key=lambda m: parse_updated_at(m["updatedAt"]),
195
- default=None,
196
- )
197
 
198
- if not latest:
 
 
 
 
199
  time.sleep(10)
200
  continue
201
 
202
- youtube_id = extract_youtube_id(latest["plainText"])
 
203
  if not youtube_id:
204
  time.sleep(10)
205
  continue
206
 
207
  youtube_url = f"https://www.youtube.com/watch?v={youtube_id}"
208
 
209
- items = fetch_download_links(youtube_url)
210
- best_video, best_audio = pick_best_streams(items)
 
 
 
 
 
 
 
 
 
211
 
212
- nonce = fetch_nonce()
213
- merge = start_merge(best_video, best_audio, youtube_id, best_video["quality"], nonce)
 
214
 
215
- monitor = merge["data"]["result"]["monitor"]["http"]
216
- merged_url = wait_merge_done(monitor)
217
 
218
- message = build_links(items, merged_url, best_video["quality"])
219
- send_to_channel(message)
220
  print("送信完了")
221
 
222
  except Exception as e:
 
6
  from datetime import datetime, timezone
7
  from bs4 import BeautifulSoup
8
 
9
+ # ===== Channel.io 設定 =====
10
  GET_URL = "https://desk-api.channel.io/desk/channels/200605/groups/519217/messages"
11
  POST_URL = GET_URL
12
 
 
34
  }
35
 
36
  # ===== ssyoutube =====
37
+ SSYOUTUBE_URL = "https://ssyoutube.online/yt-video-detail/"
 
38
 
39
  # ===== Utils =====
40
  def parse_updated_at(value):
41
  if isinstance(value, (int, float)):
42
  return datetime.fromtimestamp(value / 1000, tz=timezone.utc)
43
+ elif isinstance(value, str):
44
  return datetime.fromisoformat(value.replace("Z", "+00:00"))
45
  return None
46
 
 
55
  return m.group(1)
56
  return None
57
 
58
+ # ===== ssyoutube HTML解析 & nonce取得 =====
59
+ def fetch_download_links_and_nonce(youtube_url):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  res = requests.post(
61
+ SSYOUTUBE_URL,
62
  data={"videoURL": youtube_url},
63
+ timeout=30,
64
  headers={
65
  "User-Agent": "Mozilla/5.0",
66
+ "Referer": "https://ssyoutube.online/",
67
+ }
 
68
  )
69
  res.raise_for_status()
70
 
71
  soup = BeautifulSoup(res.text, "lxml")
72
+
73
+ # ★ wp nonce 取得
74
+ nonce_el = soup.select_one('meta[name="wp-nonce"], input[name="nonce"]')
75
+ nonce = nonce_el["content"] if nonce_el and nonce_el.has_attr("content") else nonce_el["value"]
76
+
77
  buttons = soup.select("button[data-url]")
78
+ items = []
79
 
 
80
  for btn in buttons:
81
  url = btn.get("data-url")
82
  quality = btn.get("data-quality")
83
  has_audio = btn.get("data-has-audio")
 
 
 
 
 
 
 
84
 
85
+ if not url:
86
+ continue
87
+
88
+ items.append({
89
+ "url": url,
90
+ "quality": quality,
91
+ "has_audio": has_audio,
92
+ })
93
+
94
+ return items, nonce
95
+
96
+ def pick_best_video_and_audio(items):
97
+ video_candidates = []
98
+ audio_candidates = []
99
 
100
+ for item in items:
101
+ if item["has_audio"] == "false" and item["quality"]:
102
+ q = int(re.sub(r"\D", "", item["quality"]))
103
+ video_candidates.append((q, item))
104
+ if item["quality"] in (None, "audio"):
105
+ audio_candidates.append(item)
106
 
107
+ if not video_candidates or not audio_candidates:
108
+ return None, None
109
+
110
+ best_video = sorted(video_candidates, key=lambda x: x[0], reverse=True)[0][1]
111
+ best_audio = audio_candidates[0]
112
 
113
  return best_video, best_audio
114
 
115
+ def merge_video_audio(video, audio, nonce, youtube_id, quality):
116
+ ajax_url = "https://ssyoutube.online/wp-admin/admin-ajax.php"
117
+
118
+ request_data = {
119
  "id": f"{youtube_id}_{quality}",
120
  "ttl": 3600000,
121
  "inputs": [
122
+ {
123
+ "url": audio["url"],
124
+ "ext": "mp4",
125
+ "chunkDownload": {"type": "header", "size": 52428800, "concurrency": 3}
126
+ },
127
+ {
128
+ "url": video["url"],
129
+ "ext": "mp4",
130
+ "chunkDownload": {"type": "header", "size": 52428800, "concurrency": 3}
131
+ }
132
  ],
133
  "output": {
134
  "ext": "mp4",
135
  "downloadName": f"{youtube_id}_{quality}.mp4",
136
+ "chunkUpload": {"size": 104857600, "concurrency": 3}
137
  },
138
+ "operation": {"type": "replace_audio_in_video"}
139
  }
140
 
141
  files = {
142
  "action": (None, "process_video_merge"),
143
  "nonce": (None, nonce),
144
+ "request_data": (None, json.dumps(request_data)),
145
  }
146
 
147
+ res = requests.post(ajax_url, files=files, timeout=30)
 
 
 
 
 
148
  res.raise_for_status()
149
  return res.json()
150
 
151
+ def wait_for_merge_done(monitor_http):
152
  while True:
153
+ res = requests.get(monitor_http, timeout=15)
154
  res.raise_for_status()
155
  data = res.json()
 
156
  if data["result"]["status"] == "done":
157
  return data["result"]["output"]["url"]
 
158
  time.sleep(5)
159
 
160
+ # ===== Build & Send =====
161
+ def build_links(items):
162
  lines = []
163
+ for item in items:
164
+ url = item["url"]
165
+ quality = item["quality"]
166
+ has_audio = item["has_audio"]
167
+
168
+ audio_label = ""
169
+ if has_audio == "false":
170
+ audio_label = "(映像のみ)"
171
+ elif has_audio == "true":
172
+ audio_label = "(音声付き)"
173
+
174
+ line = f'<link type="url" value="{url}"> {quality} {audio_label}</link>'
175
+ lines.append(line)
176
+
177
  return "\n".join(lines)
178
 
179
  def send_to_channel(text):
180
  payload = {
181
+ "requestId": f"desk-web-{int(time.time() * 1000)}",
182
  "blocks": [{"type": "text", "value": text}],
183
+ "buttons": None,
184
+ "form": None,
185
+ "webPage": None,
186
+ "files": None,
187
+ "customPayload": None
188
  }
189
+
190
  res = requests.post(
191
  POST_URL,
192
  headers=HEADERS_POST,
193
  data=json.dumps(payload),
194
+ timeout=30
195
  )
196
  res.raise_for_status()
197
 
 
201
  try:
202
  res = requests.get(GET_URL, headers=HEADERS_GET, params=PARAMS, timeout=30)
203
  res.raise_for_status()
204
+
205
  messages = res.json().get("messages", [])
206
+ latest_msg = None
207
+ latest_time = None
208
+
209
+ for msg in messages:
210
+ plain_text = msg.get("plainText")
211
+ updated_at = msg.get("updatedAt")
212
+ if not plain_text or updated_at is None:
213
+ continue
214
 
215
+ t = parse_updated_at(updated_at)
216
+ if not t:
217
+ continue
 
 
 
 
 
218
 
219
+ if latest_time is None or t > latest_time:
220
+ latest_time = t
221
+ latest_msg = msg
222
+
223
+ if not latest_msg:
224
  time.sleep(10)
225
  continue
226
 
227
+ text = latest_msg["plainText"]
228
+ youtube_id = extract_youtube_id(text)
229
  if not youtube_id:
230
  time.sleep(10)
231
  continue
232
 
233
  youtube_url = f"https://www.youtube.com/watch?v={youtube_id}"
234
 
235
+ items, nonce = fetch_download_links_and_nonce(youtube_url)
236
+ if not items:
237
+ print("ダウンロードリンクが取得できませんでした")
238
+ time.sleep(10)
239
+ continue
240
+
241
+ video, audio = pick_best_video_and_audio(items)
242
+ if not video or not audio:
243
+ print("動画または音声が取得できませんでした")
244
+ time.sleep(10)
245
+ continue
246
 
247
+ merge_res = merge_video_audio(video, audio, nonce, youtube_id, video["quality"])
248
+ monitor_http = merge_res["data"]["result"]["monitor"]["http"]
249
+ final_video_url = wait_for_merge_done(monitor_http)
250
 
251
+ message_text = build_links(items)
252
+ message_text += f'\n<link type="url" value="{final_video_url}">🎬 音声付き {video["quality"]}</link>'
253
 
254
+ send_to_channel(message_text)
 
255
  print("送信完了")
256
 
257
  except Exception as e: