Spaces:
Paused
Paused
GilangAlRusliadi commited on
Commit ·
4691839
1
Parent(s): f86ff85
Anjay
Browse files- app.py +1 -1
- bunkrr.py +35 -0
- iwara.py +353 -0
- mega.py +27 -0
- other.py +249 -2
- paipancon.py +91 -0
- requirements.txt +5 -1
- rule34.py +81 -0
- sankaku.py +27 -0
- trailer.py +80 -0
app.py
CHANGED
|
@@ -6,7 +6,7 @@ from pornhub import pornhub
|
|
| 6 |
from other import cut_video, session, convert_size
|
| 7 |
|
| 8 |
# Navigasi Sidebar
|
| 9 |
-
options = ['Youtube', 'Pornhub', 'Iwara', 'Mega', 'Rule34', 'Paipancon', 'Trailer']
|
| 10 |
with st.sidebar:
|
| 11 |
selected = option_menu("Video Downloader", options,
|
| 12 |
icons=['play', 'fire', 'star', 'moon','gear', 'house', 'lightning'], menu_icon="cast", default_index=0)
|
|
|
|
| 6 |
from other import cut_video, session, convert_size
|
| 7 |
|
| 8 |
# Navigasi Sidebar
|
| 9 |
+
options = ['Youtube', 'Pornhub (Not Work)', 'Iwara (Not Work)', 'Mega', 'Rule34', 'Paipancon', 'Trailer']
|
| 10 |
with st.sidebar:
|
| 11 |
selected = option_menu("Video Downloader", options,
|
| 12 |
icons=['play', 'fire', 'star', 'moon','gear', 'house', 'lightning'], menu_icon="cast", default_index=0)
|
bunkrr.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from bs4 import BeautifulSoup
|
| 2 |
+
import re
|
| 3 |
+
|
| 4 |
+
def get_info_bunkrr(soup: BeautifulSoup):
|
| 5 |
+
# Mencari judul video di elemen <title>
|
| 6 |
+
title = soup.find("title")
|
| 7 |
+
if title:
|
| 8 |
+
video_title = title.text
|
| 9 |
+
|
| 10 |
+
# Ubah '&' menjadi '&'
|
| 11 |
+
video_title = video_title.replace('&', '&')
|
| 12 |
+
|
| 13 |
+
# Jika ada karakter '-' maka cari '-' paling belakang dan hapus mulai dari itu sampai ke belakang
|
| 14 |
+
if '-' in video_title:
|
| 15 |
+
last_dash_index = video_title.rfind('-')
|
| 16 |
+
video_title = video_title[:last_dash_index]
|
| 17 |
+
|
| 18 |
+
# Sisa dari karakter '-' ubah menjadi ' '
|
| 19 |
+
video_title = video_title.replace('-', ' ')
|
| 20 |
+
|
| 21 |
+
# Mencari link download yang berawalan https dan berakhiran .mp4
|
| 22 |
+
link_download = soup.find("source", src=re.compile(r'^https.*\.mp4$'))
|
| 23 |
+
if link_download:
|
| 24 |
+
link_download = link_download['src']
|
| 25 |
+
|
| 26 |
+
# Ubah '&' menjadi '&'
|
| 27 |
+
link_download = link_download.replace('&', '&')
|
| 28 |
+
print(link_download)
|
| 29 |
+
else:
|
| 30 |
+
link_download = ''
|
| 31 |
+
|
| 32 |
+
return video_title, link_download
|
| 33 |
+
else:
|
| 34 |
+
print("Tidak ditemukan elemen <title>")
|
| 35 |
+
return '', ''
|
iwara.py
ADDED
|
@@ -0,0 +1,353 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests, hashlib, os
|
| 2 |
+
|
| 3 |
+
api_url = 'https://api.iwara.tv'
|
| 4 |
+
file_url = 'https://files.iwara.tv'
|
| 5 |
+
|
| 6 |
+
class BearerAuth(requests.auth.AuthBase):
|
| 7 |
+
"""Bearer Authentication"""
|
| 8 |
+
def __init__(self, token):
|
| 9 |
+
self.token = token
|
| 10 |
+
|
| 11 |
+
def __call__(self, r):
|
| 12 |
+
r.headers['Authorization'] = 'Bearer ' + self.token
|
| 13 |
+
return r
|
| 14 |
+
|
| 15 |
+
class ApiClient:
|
| 16 |
+
def __init__(self, email, password):
|
| 17 |
+
self.email = email
|
| 18 |
+
self.password = password
|
| 19 |
+
|
| 20 |
+
# self.headers = {
|
| 21 |
+
# 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
|
| 22 |
+
# 'X-Version': 's'
|
| 23 |
+
# }
|
| 24 |
+
|
| 25 |
+
# API
|
| 26 |
+
self.api_url = api_url
|
| 27 |
+
self.file_url = file_url
|
| 28 |
+
self.timeout = 30
|
| 29 |
+
# self.max_retries = 5
|
| 30 |
+
self.download_timeout = 300
|
| 31 |
+
self.token = None
|
| 32 |
+
|
| 33 |
+
# HTML
|
| 34 |
+
# self.html_url = html_url
|
| 35 |
+
|
| 36 |
+
# Cloudscraper
|
| 37 |
+
# self.scraper = cloudscraper.create_scraper(browser={'browser': 'firefox','platform': 'windows','mobile': False},
|
| 38 |
+
# # interpreter = 'nodejs'
|
| 39 |
+
# )
|
| 40 |
+
# Requests-html
|
| 41 |
+
# self.session = HTMLSession()
|
| 42 |
+
|
| 43 |
+
def login(self) -> requests.Response:
|
| 44 |
+
url = self.api_url + '/user/login'
|
| 45 |
+
json = {'email': self.email, 'password': self.password}
|
| 46 |
+
r = requests.post(url, json=json, timeout=self.timeout)
|
| 47 |
+
try:
|
| 48 |
+
self.token = r.json()['token']
|
| 49 |
+
print('API Login success')
|
| 50 |
+
except:
|
| 51 |
+
print('API Login failed')
|
| 52 |
+
|
| 53 |
+
# try:
|
| 54 |
+
# # Cloudscraper
|
| 55 |
+
# # r = self.scraper.post(url, json=json, headers=self.headers, timeout=self.timeout)
|
| 56 |
+
|
| 57 |
+
# # Requests-html
|
| 58 |
+
# r = self.session.post(url, json=json, headers=self.headers, timeout=self.timeout)
|
| 59 |
+
# except:
|
| 60 |
+
# print('BS4 Login failed')
|
| 61 |
+
|
| 62 |
+
return r
|
| 63 |
+
|
| 64 |
+
# limit query is not working
|
| 65 |
+
def get_videos(self, sort = 'date', rating = 'all', page = 0, limit = 32, subscribed = False) -> requests.Response:
|
| 66 |
+
"""# Get new videos from iwara.tv
|
| 67 |
+
- sort: date, trending, popularity, views, likes
|
| 68 |
+
- rating: all, general, ecchi
|
| 69 |
+
"""
|
| 70 |
+
url = self.api_url + '/videos'
|
| 71 |
+
params = {'sort': sort,
|
| 72 |
+
'rating': rating,
|
| 73 |
+
'page': page,
|
| 74 |
+
'limit': limit,
|
| 75 |
+
'subscribed': 'true' if subscribed else 'false',
|
| 76 |
+
}
|
| 77 |
+
if self.token is None:
|
| 78 |
+
r = requests.get(url, params=params, timeout=self.timeout)
|
| 79 |
+
else:
|
| 80 |
+
|
| 81 |
+
# Verbose Debug
|
| 82 |
+
# request = requests.Request('GET', url, params=params, auth=BearerAuth(self.token))
|
| 83 |
+
# print(request.prepare().method, request.prepare().url, request.prepare().headers, request.prepare().body, sep='\n')
|
| 84 |
+
# r = requests.Session().send(request.prepare())
|
| 85 |
+
|
| 86 |
+
r = requests.get(url, params=params, auth=BearerAuth(self.token), timeout=self.timeout)
|
| 87 |
+
|
| 88 |
+
#Debug
|
| 89 |
+
print("[DEBUG] get_videos response:", r)
|
| 90 |
+
|
| 91 |
+
return r
|
| 92 |
+
|
| 93 |
+
def get_video(self, video_id) -> requests.Response:
|
| 94 |
+
"""# Get video info from iwara.tv
|
| 95 |
+
"""
|
| 96 |
+
url = self.api_url + '/video/' + video_id
|
| 97 |
+
|
| 98 |
+
if self.token is None:
|
| 99 |
+
r = requests.get(url, timeout=self.timeout)
|
| 100 |
+
else:
|
| 101 |
+
r = requests.get(url, auth=BearerAuth(self.token), timeout=self.timeout)
|
| 102 |
+
|
| 103 |
+
#Debug
|
| 104 |
+
print("[DEBUG] get_video response:", r)
|
| 105 |
+
|
| 106 |
+
return r
|
| 107 |
+
|
| 108 |
+
def download_video_thumbnail(self, video_id) -> str:
|
| 109 |
+
"""# Download video thumbnail from iwara.tv
|
| 110 |
+
"""
|
| 111 |
+
video = self.get_video(video_id).json()
|
| 112 |
+
|
| 113 |
+
file_id = video['file']['id']
|
| 114 |
+
thumbnail_id = video['thumbnail']
|
| 115 |
+
|
| 116 |
+
url = self.file_url + '/image/original/' + file_id + '/thumbnail-{:02d}.jpg'.format(thumbnail_id)
|
| 117 |
+
|
| 118 |
+
thumbnail_file_name = video_id + '.jpg'
|
| 119 |
+
|
| 120 |
+
if (os.path.exists(thumbnail_file_name)):
|
| 121 |
+
print(f"Video ID {video_id} thumbnail already downloaded, skipped downloading. ")
|
| 122 |
+
return thumbnail_file_name
|
| 123 |
+
|
| 124 |
+
print(f"Downloading thumbnail for video ID: {video_id} ...")
|
| 125 |
+
with open(thumbnail_file_name, "wb") as f:
|
| 126 |
+
for chunk in requests.get(url).iter_content(chunk_size=1024):
|
| 127 |
+
if chunk:
|
| 128 |
+
f.write(chunk)
|
| 129 |
+
f.flush()
|
| 130 |
+
|
| 131 |
+
return thumbnail_file_name
|
| 132 |
+
|
| 133 |
+
def download_video(self, video_id) -> str:
|
| 134 |
+
"""# Download video from iwara.tv
|
| 135 |
+
"""
|
| 136 |
+
|
| 137 |
+
# html
|
| 138 |
+
# url = self.html_url + '/video/' + video_id
|
| 139 |
+
|
| 140 |
+
# Cloudscraer
|
| 141 |
+
# html = self.scraper.get(url, auth=BearerAuth(self.token), timeout=self.timeout).text
|
| 142 |
+
|
| 143 |
+
# Requests-html
|
| 144 |
+
# html = self.session.get(url, auth=BearerAuth(self.token), timeout=self.timeout).text
|
| 145 |
+
|
| 146 |
+
# print(html)
|
| 147 |
+
# html = BeautifulSoup(, 'html.parser')
|
| 148 |
+
# downloadLink = html.find('div', class_='dropdown_content')
|
| 149 |
+
# print(downloadLink)
|
| 150 |
+
|
| 151 |
+
# API
|
| 152 |
+
try:
|
| 153 |
+
video = self.get_video(video_id).json()
|
| 154 |
+
except Exception as e:
|
| 155 |
+
raise Exception(f"Failed to get video info for video ID: {video_id}, error: {e}")
|
| 156 |
+
|
| 157 |
+
#Debug
|
| 158 |
+
print(video)
|
| 159 |
+
|
| 160 |
+
url = video['fileUrl']
|
| 161 |
+
file_id = video['file']['id']
|
| 162 |
+
expires = url.split('/')[4].split('?')[1].split('&')[0].split('=')[1]
|
| 163 |
+
|
| 164 |
+
# IMPORTANT: This might change in the future.
|
| 165 |
+
SHA_postfix = "_5nFp9kmbNnHdAFhaqMvt"
|
| 166 |
+
|
| 167 |
+
SHA_key = file_id + "_" + expires + SHA_postfix
|
| 168 |
+
hash = hashlib.sha1(SHA_key.encode('utf-8')).hexdigest()
|
| 169 |
+
|
| 170 |
+
headers = {"X-Version": hash}
|
| 171 |
+
|
| 172 |
+
resources = requests.get(url, headers=headers, auth=BearerAuth(self.token), timeout=self.timeout).json()
|
| 173 |
+
|
| 174 |
+
#Debug
|
| 175 |
+
print(resources)
|
| 176 |
+
|
| 177 |
+
resources_by_quality = [None for i in range(10)]
|
| 178 |
+
|
| 179 |
+
for resource in resources:
|
| 180 |
+
if resource['name'] == 'Source':
|
| 181 |
+
resources_by_quality[0] = resource
|
| 182 |
+
# elif resource['name'] == '1080':
|
| 183 |
+
# resources_by_quality[1] = resource
|
| 184 |
+
# elif resource['name'] == '720':
|
| 185 |
+
# resources_by_quality[2] = resource
|
| 186 |
+
# elif resource['name'] == '480':
|
| 187 |
+
# resources_by_quality[3] = resource
|
| 188 |
+
# elif resource['name'] == '540':
|
| 189 |
+
# resources_by_quality[4] = resource
|
| 190 |
+
# elif resource['name'] == '360':
|
| 191 |
+
# resources_by_quality[5] = resource
|
| 192 |
+
|
| 193 |
+
for resource in resources_by_quality:
|
| 194 |
+
if resource is not None:
|
| 195 |
+
#Debug
|
| 196 |
+
print(resource)
|
| 197 |
+
|
| 198 |
+
download_link = "https:" + resource['src']['download']
|
| 199 |
+
file_type = resource['type'].split('/')[1]
|
| 200 |
+
|
| 201 |
+
video_file_name = video_id + '.' + file_type
|
| 202 |
+
|
| 203 |
+
if (os.path.exists(video_file_name)):
|
| 204 |
+
print(f"Video ID {video_id} Already downloaded, skipped downloading. ")
|
| 205 |
+
return video_file_name
|
| 206 |
+
|
| 207 |
+
print(f"Downloading video ID: {video_id} ...")
|
| 208 |
+
try:
|
| 209 |
+
with open(video_file_name, "wb") as f:
|
| 210 |
+
for chunk in requests.get(download_link).iter_content(chunk_size=1024):
|
| 211 |
+
if chunk:
|
| 212 |
+
f.write(chunk)
|
| 213 |
+
f.flush()
|
| 214 |
+
return video_file_name
|
| 215 |
+
except Exception as e:
|
| 216 |
+
os.remove(video_file_name)
|
| 217 |
+
raise Exception(f"Failed to download video ID: {video_id}, error: {e}")
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
raise Exception("No video with Source quality found")
|
| 221 |
+
|
| 222 |
+
# -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
# -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
| 227 |
+
|
| 228 |
+
### download video from iwara.tv
|
| 229 |
+
### usage: python iwara [url]
|
| 230 |
+
### by AngelBottomless @ github
|
| 231 |
+
# download from iwara page
|
| 232 |
+
import requests
|
| 233 |
+
# use selenium to get video url
|
| 234 |
+
from selenium import webdriver
|
| 235 |
+
import argparse
|
| 236 |
+
|
| 237 |
+
def download_video(url):
|
| 238 |
+
# save video to local
|
| 239 |
+
filename = url.split('/')[-1] + '.mp4'
|
| 240 |
+
# get video
|
| 241 |
+
driver = run_webdriver(url)
|
| 242 |
+
click_accept(driver)
|
| 243 |
+
driver.implicitly_wait(2)
|
| 244 |
+
click_play(driver)
|
| 245 |
+
url = find_video_url(driver)
|
| 246 |
+
# download video
|
| 247 |
+
r = requests.get(url)
|
| 248 |
+
with open(filename, 'wb') as f:
|
| 249 |
+
f.write(r.content)
|
| 250 |
+
# close driver
|
| 251 |
+
driver.close()
|
| 252 |
+
|
| 253 |
+
def download_with_retry(url, retry=3):
|
| 254 |
+
# retry download
|
| 255 |
+
for _ in range(retry):
|
| 256 |
+
try:
|
| 257 |
+
download_video(url)
|
| 258 |
+
return True
|
| 259 |
+
except:
|
| 260 |
+
print('download failed, retrying...')
|
| 261 |
+
continue
|
| 262 |
+
return False
|
| 263 |
+
|
| 264 |
+
def run_webdriver(url):
|
| 265 |
+
# use selenium to get video url
|
| 266 |
+
# mute chrome
|
| 267 |
+
chrome_options = webdriver.ChromeOptions()
|
| 268 |
+
chrome_options.add_argument("--mute-audio")
|
| 269 |
+
# run webdriver
|
| 270 |
+
driver = webdriver.Chrome(options=chrome_options)
|
| 271 |
+
driver.get(url)
|
| 272 |
+
driver.implicitly_wait(4)
|
| 273 |
+
return driver
|
| 274 |
+
|
| 275 |
+
def click_accept(driver):
|
| 276 |
+
# xpath = /html/body/div[3]/div/div[2]/button[1]
|
| 277 |
+
button = driver.find_element('xpath', '/html/body/div[3]/div/div[2]/button[1]')
|
| 278 |
+
button.click()
|
| 279 |
+
def click_play(driver):
|
| 280 |
+
# xpath = //*[@id="vjs_video_3"]/button
|
| 281 |
+
button = driver.find_element('xpath', '//*[@id="vjs_video_3"]/button')
|
| 282 |
+
button.click()
|
| 283 |
+
|
| 284 |
+
def find_video_url(driver):
|
| 285 |
+
# xpath //*[@id="vjs_video_3_html5_api"]
|
| 286 |
+
#access 'src'
|
| 287 |
+
video = driver.find_element('xpath', '//*[@id="vjs_video_3_html5_api"]')
|
| 288 |
+
video_url = video.get_attribute('src')
|
| 289 |
+
return video_url
|
| 290 |
+
|
| 291 |
+
def track_clipboard():
|
| 292 |
+
import pyperclip
|
| 293 |
+
import time
|
| 294 |
+
import subprocess
|
| 295 |
+
failed_urls = []
|
| 296 |
+
success_urls = set()
|
| 297 |
+
print('tracking clipboard...')
|
| 298 |
+
# loop to track clipboard
|
| 299 |
+
# if clipboard contains url, download video
|
| 300 |
+
# track every 1 second
|
| 301 |
+
previous = ''
|
| 302 |
+
# expect KeyboardInterrupt and return 0
|
| 303 |
+
try:
|
| 304 |
+
while True:
|
| 305 |
+
# get clipboard
|
| 306 |
+
clipboard = pyperclip.paste()
|
| 307 |
+
if clipboard != previous:
|
| 308 |
+
# if clipboard contains url
|
| 309 |
+
if 'iwara.tv' in clipboard:
|
| 310 |
+
print('url detected, downloading...')
|
| 311 |
+
# use subprocess to download video in background
|
| 312 |
+
# ['python', '-m', 'iwara', clipboard]
|
| 313 |
+
subprocess.Popen(['python', '-m', 'iwara', clipboard])
|
| 314 |
+
print('download complete')
|
| 315 |
+
previous = clipboard
|
| 316 |
+
time.sleep(1)
|
| 317 |
+
except KeyboardInterrupt:
|
| 318 |
+
print('exiting...')
|
| 319 |
+
return 0
|
| 320 |
+
|
| 321 |
+
if __name__ == '__main__':
|
| 322 |
+
failed_urls = []
|
| 323 |
+
success_urls = set()
|
| 324 |
+
import sys
|
| 325 |
+
# parse args
|
| 326 |
+
parser = argparse.ArgumentParser()
|
| 327 |
+
# track clipboard option, when 'track' is used, url is not required
|
| 328 |
+
parser.add_argument('-t', '--track', action='store_true', help='track clipboard for iwara url')
|
| 329 |
+
# add url argument, if not specified, use ''
|
| 330 |
+
parser.add_argument('url', nargs='?', default='', help='iwara url')
|
| 331 |
+
args = parser.parse_args()
|
| 332 |
+
# download video
|
| 333 |
+
if args.track:
|
| 334 |
+
track_clipboard()
|
| 335 |
+
elif 'iwara.tv' in args.url:
|
| 336 |
+
result = download_with_retry(args.url)
|
| 337 |
+
if not result:
|
| 338 |
+
print('download failed')
|
| 339 |
+
failed_urls.append(args.url)
|
| 340 |
+
else:
|
| 341 |
+
print('download complete')
|
| 342 |
+
success_urls.add(args.url)
|
| 343 |
+
if len(failed_urls) > 0:
|
| 344 |
+
print('failed urls:')
|
| 345 |
+
for url in failed_urls:
|
| 346 |
+
print(url)
|
| 347 |
+
# write in ./failed.txt
|
| 348 |
+
with open('failed.txt', 'a') as f:
|
| 349 |
+
f.write(url + '\n')
|
| 350 |
+
sys.exit(1)
|
| 351 |
+
else:
|
| 352 |
+
print('invalid url')
|
| 353 |
+
sys.exit(1)
|
mega.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import shutil
|
| 3 |
+
from mega import Mega
|
| 4 |
+
from other import convert_videos
|
| 5 |
+
|
| 6 |
+
def download_mega(name, directory, url):
|
| 7 |
+
if not os.path.exists(directory):
|
| 8 |
+
os.makedirs(directory)
|
| 9 |
+
|
| 10 |
+
mega = Mega()
|
| 11 |
+
m = mega.login()
|
| 12 |
+
|
| 13 |
+
# Download the file to a temporary location
|
| 14 |
+
file = m.download_url(url, dest_filename=name)
|
| 15 |
+
|
| 16 |
+
# Rename the file and move it to the specified directory
|
| 17 |
+
filename = os.path.join(directory, file)
|
| 18 |
+
shutil.move(file, filename)
|
| 19 |
+
|
| 20 |
+
return filename
|
| 21 |
+
|
| 22 |
+
def mega(url, judul):
|
| 23 |
+
judul = judul + '.mp4'
|
| 24 |
+
download = '/home/user/app/Mega'
|
| 25 |
+
filename = download_mega(judul, download, url)
|
| 26 |
+
output_file = convert_videos(720, download)
|
| 27 |
+
return output_file
|
other.py
CHANGED
|
@@ -1,12 +1,22 @@
|
|
| 1 |
import os
|
| 2 |
import requests
|
| 3 |
import time
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
from datetime import datetime, timedelta
|
| 5 |
from moviepy.editor import VideoFileClip
|
| 6 |
import streamlit as st
|
| 7 |
from streamlit_option_menu import option_menu
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
-
def
|
| 10 |
start_time = None
|
| 11 |
start_time = time.time()
|
| 12 |
if start_time is not None:
|
|
@@ -69,4 +79,241 @@ def convert_size(size_bytes):
|
|
| 69 |
while size_bytes >= 1024 and index < len(units) - 1:
|
| 70 |
size_bytes /= 1024
|
| 71 |
index += 1
|
| 72 |
-
return f"{size_bytes:.2f} {units[index]}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
import requests
|
| 3 |
import time
|
| 4 |
+
import math
|
| 5 |
+
import re
|
| 6 |
+
import shutil
|
| 7 |
+
import json
|
| 8 |
+
import subprocess
|
| 9 |
+
import mimetypes
|
| 10 |
from datetime import datetime, timedelta
|
| 11 |
from moviepy.editor import VideoFileClip
|
| 12 |
import streamlit as st
|
| 13 |
from streamlit_option_menu import option_menu
|
| 14 |
+
from collections import defaultdict
|
| 15 |
+
from tqdm import tqdm
|
| 16 |
+
from googletrans import Translator
|
| 17 |
+
from bs4 import BeautifulSoup
|
| 18 |
|
| 19 |
+
def start_clock():
|
| 20 |
start_time = None
|
| 21 |
start_time = time.time()
|
| 22 |
if start_time is not None:
|
|
|
|
| 79 |
while size_bytes >= 1024 and index < len(units) - 1:
|
| 80 |
size_bytes /= 1024
|
| 81 |
index += 1
|
| 82 |
+
return f"{size_bytes:.2f} {units[index]}"
|
| 83 |
+
|
| 84 |
+
def add_space(name):
|
| 85 |
+
new_name = ''
|
| 86 |
+
for i in range(len(name)):
|
| 87 |
+
if i > 0 and name[i].isupper() and name[i-1].islower():
|
| 88 |
+
new_name += ' '
|
| 89 |
+
new_name += name[i]
|
| 90 |
+
return new_name
|
| 91 |
+
|
| 92 |
+
def download_file(url, new_name, directory):
|
| 93 |
+
response = requests.get(url, stream=True)
|
| 94 |
+
content_type = response.headers.get("Content-Type")
|
| 95 |
+
extension = mimetypes.guess_extension(content_type)
|
| 96 |
+
|
| 97 |
+
if not os.path.exists(directory):
|
| 98 |
+
os.makedirs(directory)
|
| 99 |
+
|
| 100 |
+
filename = f"{directory}/{new_name}{extension}"
|
| 101 |
+
with open(filename, 'wb') as file:
|
| 102 |
+
total_size = int(response.headers.get("Content-Length", 0))
|
| 103 |
+
progress_bar = tqdm(total=total_size, unit="B", unit_scale=True, ncols=80)
|
| 104 |
+
|
| 105 |
+
for chunk in response.iter_content(chunk_size=1024):
|
| 106 |
+
if chunk:
|
| 107 |
+
file.write(chunk)
|
| 108 |
+
progress_bar.update(len(chunk))
|
| 109 |
+
|
| 110 |
+
progress_bar.close()
|
| 111 |
+
|
| 112 |
+
return filename
|
| 113 |
+
|
| 114 |
+
def convert_japanese_to_romaji(text):
|
| 115 |
+
translator = Translator(service_urls=['translate.google.com'])
|
| 116 |
+
translation = translator.translate(text, src='ja', dest='ja')
|
| 117 |
+
return translation.pronunciation
|
| 118 |
+
|
| 119 |
+
def translate_japanese_to_english(text):
|
| 120 |
+
translator = Translator(service_urls=['translate.google.com'])
|
| 121 |
+
translation = translator.translate(text, src='ja', dest='en')
|
| 122 |
+
return translation.text
|
| 123 |
+
|
| 124 |
+
def get_video_resolution(input_file):
|
| 125 |
+
command = ['ffprobe', '-v', 'error', '-select_streams', 'v:0', '-show_entries', 'stream=width,height', '-of', 'json', input_file]
|
| 126 |
+
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
| 127 |
+
if result.returncode == 0:
|
| 128 |
+
video_info = json.loads(result.stdout)
|
| 129 |
+
width = video_info['streams'][0]['width']
|
| 130 |
+
height = video_info['streams'][0]['height']
|
| 131 |
+
return (width, height)
|
| 132 |
+
else:
|
| 133 |
+
print(f'Error getting video resolution: {result.stderr}')
|
| 134 |
+
return None
|
| 135 |
+
|
| 136 |
+
def get_video_info(soup: BeautifulSoup):
|
| 137 |
+
# Mencari judul video di elemen meta dengan name="twitter:title"
|
| 138 |
+
title = soup.find("meta", attrs={"name": "twitter:title"})
|
| 139 |
+
if title:
|
| 140 |
+
video_title = title['content']
|
| 141 |
+
|
| 142 |
+
# Hapus semua karakter yang terdapat di dalam kurung siku (termasuk angka)
|
| 143 |
+
video_title = re.sub(r'\[[^\]]*\]', '', video_title)
|
| 144 |
+
|
| 145 |
+
# Hapus kata 'Cosplay '
|
| 146 |
+
video_title = video_title.replace('Cosplay ', '')
|
| 147 |
+
|
| 148 |
+
# Tukar posisi kalimat yang dipisahkan oleh ' - '
|
| 149 |
+
if ' - ' in video_title:
|
| 150 |
+
parts = video_title.split(' - ')
|
| 151 |
+
video_title = f"{parts[1]} - {parts[0]}"
|
| 152 |
+
actress = parts[0].lstrip()
|
| 153 |
+
series = parts[1].lstrip()
|
| 154 |
+
else:
|
| 155 |
+
actress = ''
|
| 156 |
+
series = ''
|
| 157 |
+
|
| 158 |
+
# Ubah dua spasi menjadi satu spasi
|
| 159 |
+
video_title = re.sub(r'\s{2,}', '', video_title)
|
| 160 |
+
|
| 161 |
+
print(f"Artis: {actress} dan Series: {series}")
|
| 162 |
+
return actress, series, title, video_title
|
| 163 |
+
else:
|
| 164 |
+
print("Tidak ditemukan elemen meta dengan name twitter:title")
|
| 165 |
+
return '', ''
|
| 166 |
+
|
| 167 |
+
def get_digits(series: str, thumbnail_url: str):
|
| 168 |
+
if series == 'Cospuri':
|
| 169 |
+
# Extract digits from thumbnail_url
|
| 170 |
+
match = re.search(r'/0(\d{3})', thumbnail_url)
|
| 171 |
+
if match:
|
| 172 |
+
digits = match.group(1)
|
| 173 |
+
print(f"Kode Digit: {digits}")
|
| 174 |
+
return digits
|
| 175 |
+
else:
|
| 176 |
+
return ''
|
| 177 |
+
elif series == 'Fellatio Japan':
|
| 178 |
+
# Extract digits from thumbnail_url
|
| 179 |
+
match = re.search(r'/(\d+)_', thumbnail_url)
|
| 180 |
+
if match:
|
| 181 |
+
digits = match.group(1)
|
| 182 |
+
print(f"Kode Digit: {digits}")
|
| 183 |
+
return digits
|
| 184 |
+
else:
|
| 185 |
+
return ''
|
| 186 |
+
else:
|
| 187 |
+
return ''
|
| 188 |
+
|
| 189 |
+
def find_image_file(name, directory):
|
| 190 |
+
for root, dirs, files in os.walk(directory):
|
| 191 |
+
for file in files:
|
| 192 |
+
if file == f'{name}.jpg':
|
| 193 |
+
return os.path.join(root, file)
|
| 194 |
+
return None
|
| 195 |
+
|
| 196 |
+
def extract_number(video_file):
|
| 197 |
+
# Use a regular expression to find the first group of digits in the video file name
|
| 198 |
+
match = re.search(r'\d+', video_file)
|
| 199 |
+
if match:
|
| 200 |
+
# If a group of digits is found, return it as an integer
|
| 201 |
+
return int(match.group())
|
| 202 |
+
else:
|
| 203 |
+
# If no group of digits is found, return a large number to ensure that this video file is processed last
|
| 204 |
+
return float('inf')
|
| 205 |
+
|
| 206 |
+
def get_video_resolution(input_file):
|
| 207 |
+
command = ['ffprobe', '-v', 'error', '-select_streams', 'v:0', '-show_entries', 'stream=width,height', '-of', 'json', input_file]
|
| 208 |
+
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
| 209 |
+
if result.returncode == 0:
|
| 210 |
+
video_info = json.loads(result.stdout)
|
| 211 |
+
width = video_info['streams'][0]['width']
|
| 212 |
+
height = video_info['streams'][0]['height']
|
| 213 |
+
return (width, height)
|
| 214 |
+
else:
|
| 215 |
+
print(f'Error getting video resolution: {result.stderr}')
|
| 216 |
+
return None
|
| 217 |
+
|
| 218 |
+
def convert_videos(height, download_folder):
|
| 219 |
+
konversi_folder = '/home/user/app/Hasil Konversi'
|
| 220 |
+
if not os.path.exists(konversi_folder):
|
| 221 |
+
os.makedirs(konversi_folder)
|
| 222 |
+
|
| 223 |
+
video_extensions = ['.mp4', '.avi', '.mov', '.wmv', '.flv', '.mkv', '.webm', '.mpeg', '.3gp', '.m4v', '.mpg', '.vob', '.ts', '.asf', '.rm', '.swf', '.ogv']
|
| 224 |
+
for file in os.listdir(download_folder):
|
| 225 |
+
file_extension = os.path.splitext(file)[1].lower()
|
| 226 |
+
if file_extension in video_extensions:
|
| 227 |
+
input_file = os.path.join(download_folder, file)
|
| 228 |
+
output_file = os.path.join(konversi_folder, file)
|
| 229 |
+
|
| 230 |
+
# Get the original width and height of the video using ffprobe
|
| 231 |
+
resolution = get_video_resolution(input_file)
|
| 232 |
+
if resolution is not None:
|
| 233 |
+
width_ori, height_ori = resolution
|
| 234 |
+
|
| 235 |
+
# Calculate the width/height for conversion using the formula
|
| 236 |
+
if (height_ori < width_ori):
|
| 237 |
+
width = int((width_ori / height_ori) * height)
|
| 238 |
+
if (width_ori / height_ori) * height % 1 != 0:
|
| 239 |
+
width += 1
|
| 240 |
+
|
| 241 |
+
# Convert video using ffmpeg
|
| 242 |
+
if height == 720:
|
| 243 |
+
command = f'ffmpeg -i "{input_file}" -s {width}x{height} -b:v 850k -bufsize 1000k -r 30 -b:a 160k -ar 44100 -ac 2 "{output_file}"'
|
| 244 |
+
# command = f'ffmpeg -i "{input_file}" -s {width}x{height} -b:v 1350k -bufsize 1500k -r 30 -b:a 160k -ar 48000 -ac 2 "{output_file}"'
|
| 245 |
+
elif height == 480:
|
| 246 |
+
command = f'ffmpeg -i "{input_file}" -s {width}x{height} -b:v 600k -bufsize 700k -r 24 -b:a 95k -ar 44100 -ac 2 "{output_file}"'
|
| 247 |
+
elif height == 360:
|
| 248 |
+
command = f'ffmpeg -i "{input_file}" -s {width}x{height} -b:v 250k -bufsize 300k -r 24 -b:a 50k -ar 44100 -ac 2 "{output_file}"'
|
| 249 |
+
|
| 250 |
+
else:
|
| 251 |
+
width = height
|
| 252 |
+
height = int((height_ori / width_ori) * width)
|
| 253 |
+
if (height_ori / width_ori) * width % 1 != 0:
|
| 254 |
+
height += 1
|
| 255 |
+
|
| 256 |
+
# Convert video using ffmpeg
|
| 257 |
+
if width == 720:
|
| 258 |
+
command = f'ffmpeg -i "{input_file}" -s {width}x{height} -b:v 850k -bufsize 1000k -r 30 -b:a 160k -ar 44100 -ac 2 "{output_file}"'
|
| 259 |
+
# command = f'ffmpeg -i "{input_file}" -s {width}x{height} -b:v 1350k -bufsize 1500k -r 30 -b:a 160k -ar 48000 -ac 2 "{output_file}"'
|
| 260 |
+
elif width == 480:
|
| 261 |
+
command = f'ffmpeg -i "{input_file}" -s {width}x{height} -b:v 600k -bufsize 700k -r 24 -b:a 95k -ar 44100 -ac 2 "{output_file}"'
|
| 262 |
+
|
| 263 |
+
os.system(command)
|
| 264 |
+
|
| 265 |
+
return output_file
|
| 266 |
+
|
| 267 |
+
def join_video():
|
| 268 |
+
# Tentukan direktori untuk gambar, video, dan output
|
| 269 |
+
image_dir = '/home/user/app/Trailer/Thumbnail'
|
| 270 |
+
if not os.path.exists(image_dir):
|
| 271 |
+
os.makedirs(image_dir)
|
| 272 |
+
video_dir = '/home/user/app/Hasil Konversi'
|
| 273 |
+
if not os.path.exists(video_dir):
|
| 274 |
+
os.makedirs(video_dir)
|
| 275 |
+
video_path = '/home/user/app/Trailer/Video'
|
| 276 |
+
if not os.path.exists(video_path):
|
| 277 |
+
os.makedirs(video_path)
|
| 278 |
+
|
| 279 |
+
# Tentukan jalur ke file audio yang akan digunakan untuk intro
|
| 280 |
+
audio_file = './sound.wav'
|
| 281 |
+
|
| 282 |
+
# Dapatkan daftar file video di direktori video
|
| 283 |
+
video_files = os.listdir(video_dir)
|
| 284 |
+
|
| 285 |
+
# Ulangi daftar file video
|
| 286 |
+
for video_file in video_files:
|
| 287 |
+
# Dapatkan nama file video tanpa ekstensi
|
| 288 |
+
name = os.path.splitext(video_file)[0]
|
| 289 |
+
# Temukan file gambar yang sesuai
|
| 290 |
+
image_file = find_image_file(name, image_dir)
|
| 291 |
+
# Periksa apakah file gambar ada
|
| 292 |
+
if image_file:
|
| 293 |
+
output_dir = '/home/user/app/Hasil Join'
|
| 294 |
+
if not os.path.exists(output_dir):
|
| 295 |
+
os.makedirs(output_dir)
|
| 296 |
+
# Tentukan jalur ke file output
|
| 297 |
+
output_file = os.path.join(output_dir, f'{name}.mp4')
|
| 298 |
+
# Gunakan ffmpeg untuk membuat video 1 detik dari file gambar dan audio
|
| 299 |
+
os.system(f"ffmpeg -loop 1 -i {image_file} -i {audio_file} -c:v libx264 -b:v 850k -bufsize 1000k -r 30 -b:a 160k -ar 44100 -ac 2 -t 1 -pix_fmt yuv420p -vf scale=1280:720 -shortest {name}.mp4")
|
| 300 |
+
# Gunakan ffmpeg untuk menggabungkan intro dan video
|
| 301 |
+
with open('input.txt', 'w') as f:
|
| 302 |
+
f.write(f"file '{name}.mp4'\n")
|
| 303 |
+
f.write(f"file '{os.path.join(video_dir, video_file)}'\n")
|
| 304 |
+
os.system( f"ffmpeg -f concat -safe 0 -i input.txt -c:v libx264 -b:v 850k -bufsize 1000k -r 30 -b:a 160k -ar 44100 -ac 2 -c:a aac -pix_fmt yuv420p -vf scale=1280:720 {output_file}")
|
| 305 |
+
print(f"Video intro dan video asli berhasil digabungkan menjadi {output_file}")
|
| 306 |
+
|
| 307 |
+
# Dapatkan ukuran output
|
| 308 |
+
output_file_size = os.path.getsize(output_file)
|
| 309 |
+
|
| 310 |
+
# Menampilkan nama file, ukuran file, dan progress
|
| 311 |
+
print(f"Ukuran file output: {convert_size(output_file_size)}")
|
| 312 |
+
|
| 313 |
+
# Hapus file video asli dari direktori video_dir
|
| 314 |
+
os.remove(os.path.join(video_dir, video_file))
|
| 315 |
+
|
| 316 |
+
# Hapus file video intro
|
| 317 |
+
os.remove(f'{name}.mp4')
|
| 318 |
+
|
| 319 |
+
return output_file
|
paipancon.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
import re
|
| 3 |
+
|
| 4 |
+
from other import convert_japanese_to_romaji
|
| 5 |
+
|
| 6 |
+
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def get_info_paipancon(link):
|
| 10 |
+
# Send a GET request to the link and get the HTML response
|
| 11 |
+
response = requests.get(link, headers=headers)
|
| 12 |
+
html = response.text
|
| 13 |
+
|
| 14 |
+
# Find the title
|
| 15 |
+
title_start = html.find('"text-align:center;font-size:x-large;"><a>') + len('"text-align:center;font-size:x-large;"><a>')
|
| 16 |
+
title_end = html.find('</a>', title_start)
|
| 17 |
+
title = html[title_start:title_end]
|
| 18 |
+
|
| 19 |
+
# Add spaces to the title if necessary
|
| 20 |
+
title = re.sub(r'([a-z])([A-Z])', r'\1 \2', title)
|
| 21 |
+
title = re.sub(r'([A-Za-z])-([A-Za-z])', r'\1 - \2', title)
|
| 22 |
+
|
| 23 |
+
# # Translate Japanese text to english
|
| 24 |
+
# if 'GirlsDelta' not in title:
|
| 25 |
+
# title = translate_japanese_to_english(title)
|
| 26 |
+
|
| 27 |
+
# Capitalize the first letter of each word in the title
|
| 28 |
+
title = ' '.join([word[0].upper() + word[1:] for word in title.split()])
|
| 29 |
+
|
| 30 |
+
# Extract digits from the title
|
| 31 |
+
digit = re.search(r'(?<=-)\d[\d_]*|^\d[\d_]*', title)
|
| 32 |
+
|
| 33 |
+
if digit:
|
| 34 |
+
digit = digit.group()
|
| 35 |
+
|
| 36 |
+
# Remove leading zeros if there are 4 digits
|
| 37 |
+
if digit and len(digit) == 4 and digit[0] == '0':
|
| 38 |
+
digit = digit[1:]
|
| 39 |
+
|
| 40 |
+
# Find the series
|
| 41 |
+
series_start = html.find('>系列:') + len('>系列:')
|
| 42 |
+
series_end = html.find('</a>', series_start)
|
| 43 |
+
series = html[series_start:series_end]
|
| 44 |
+
|
| 45 |
+
# Add spaces to the title if necessary
|
| 46 |
+
series = re.sub(r'([a-z])([A-Z])', r'\1 \2', series)
|
| 47 |
+
series = re.sub(r'([A-Za-z])-([A-Za-z])', r'\1 - \2', series)
|
| 48 |
+
|
| 49 |
+
# # Translate Japanese text to english
|
| 50 |
+
# if series == '一本道':
|
| 51 |
+
# series = '1pondo'
|
| 52 |
+
# elif series == 'ラフォーレガール':
|
| 53 |
+
# series = 'LaForet Girl'
|
| 54 |
+
# else:
|
| 55 |
+
# series = translate_japanese_to_english(series)
|
| 56 |
+
|
| 57 |
+
# Capitalize the first letter of each word in the title
|
| 58 |
+
series = series.title()
|
| 59 |
+
|
| 60 |
+
# Find the actress
|
| 61 |
+
actress_start = html.find('女優:') + len('女優:')
|
| 62 |
+
actress_end = html.find('</a>', actress_start)
|
| 63 |
+
actress = html[actress_start:actress_end]
|
| 64 |
+
|
| 65 |
+
# Convert Japanese text to romaji
|
| 66 |
+
actress = convert_japanese_to_romaji(actress)
|
| 67 |
+
|
| 68 |
+
# Add spaces to the title if necessary
|
| 69 |
+
actress = re.sub(r'([a-z])([A-Z])', r'\1 \2', actress)
|
| 70 |
+
actress = re.sub(r'([A-Za-z])-([A-Za-z])', r'\1 - \2', actress)
|
| 71 |
+
|
| 72 |
+
# Capitalize the first letter of each word in the title
|
| 73 |
+
actress = actress.title().replace(', ',' & ')
|
| 74 |
+
|
| 75 |
+
# Find the server link
|
| 76 |
+
server_link_start = html.find('https://op1')
|
| 77 |
+
server_link_end = html.find("' type=", server_link_start)
|
| 78 |
+
server_link = html[server_link_start:server_link_end]
|
| 79 |
+
|
| 80 |
+
# Find the HQ link
|
| 81 |
+
hq_link_start = html.find('https://fl')
|
| 82 |
+
hq_link_end = html.find("' type=", hq_link_start)
|
| 83 |
+
hq_link = html[hq_link_start:hq_link_end]
|
| 84 |
+
|
| 85 |
+
# Return the results
|
| 86 |
+
return title, series, actress, digit, server_link, hq_link
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
|
requirements.txt
CHANGED
|
@@ -3,4 +3,8 @@ streamlit_option_menu
|
|
| 3 |
pytube
|
| 4 |
youtube-dl
|
| 5 |
moviepy
|
| 6 |
-
tqdm
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
pytube
|
| 4 |
youtube-dl
|
| 5 |
moviepy
|
| 6 |
+
tqdm
|
| 7 |
+
selenium
|
| 8 |
+
pyperclip
|
| 9 |
+
googletrans==4.0.0-rc1
|
| 10 |
+
mega.py
|
rule34.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from bs4 import BeautifulSoup
|
| 2 |
+
import json
|
| 3 |
+
|
| 4 |
+
def get_info_rule34(soup: BeautifulSoup):
|
| 5 |
+
# Mencari judul video di elemen dengan class title_video
|
| 6 |
+
title = soup.find(class_="title_video")
|
| 7 |
+
if title:
|
| 8 |
+
video_title = title.text.strip().replace('/', ' -')
|
| 9 |
+
idx = video_title.find(']')
|
| 10 |
+
if idx != -1 and idx + 1 < len(video_title) and video_title[idx + 1].isalpha():
|
| 11 |
+
video_title = video_title[:idx + 1] + ' ' + video_title[idx + 1:]
|
| 12 |
+
|
| 13 |
+
video_title = video_title.title()
|
| 14 |
+
print(f"Judul Video: {video_title}")
|
| 15 |
+
else:
|
| 16 |
+
print("Judul Video tidak ditemukan")
|
| 17 |
+
|
| 18 |
+
# Mencari nama artist di elemen dengan class col
|
| 19 |
+
cols = soup.find_all(class_="col") # Menggunakan find_all untuk mendapatkan semua elemen dengan class col
|
| 20 |
+
if cols:
|
| 21 |
+
for col in cols: # Melakukan iterasi untuk setiap elemen col
|
| 22 |
+
# Mencari elemen dengan class label yang memiliki teks yang cocok dengan regex "Artist.*"
|
| 23 |
+
label = col.find(class_="label", string="Artist:")
|
| 24 |
+
if label:
|
| 25 |
+
# Mencari elemen dengan class item yang merupakan saudara dari label
|
| 26 |
+
item = label.find_next_sibling(class_="item")
|
| 27 |
+
if item:
|
| 28 |
+
# Mencari elemen dengan class name yang merupakan anak dari item
|
| 29 |
+
name = item.find(class_="name")
|
| 30 |
+
if name:
|
| 31 |
+
artist = name.text.strip()
|
| 32 |
+
print(f"Nama Artist: {artist}")
|
| 33 |
+
break # Keluar dari loop jika sudah menemukan nama artist
|
| 34 |
+
else: # Menambahkan else di akhir loop
|
| 35 |
+
print("Nama Artist tidak ditemukan") # Mencetak pesan jika tidak ada nama artist yang ditemukan
|
| 36 |
+
else:
|
| 37 |
+
print("Elemen col tidak ditemukan")
|
| 38 |
+
|
| 39 |
+
# Mencari thumbnailUrl di script type="application/ld+json"
|
| 40 |
+
script = soup.find("script", type="application/ld+json")
|
| 41 |
+
if script:
|
| 42 |
+
data = json.loads(script.string)
|
| 43 |
+
if "thumbnailUrl" in data:
|
| 44 |
+
thumbnail_url = data['thumbnailUrl']
|
| 45 |
+
print(f"Thumbnail URL: {thumbnail_url}")
|
| 46 |
+
else:
|
| 47 |
+
print("Tidak ditemukan thumbnail URL")
|
| 48 |
+
else:
|
| 49 |
+
print("Tidak ditemukan elemen script dengan type application/ld+json")
|
| 50 |
+
|
| 51 |
+
# Mencari resolusi yang tersedia
|
| 52 |
+
resolutions = []
|
| 53 |
+
for a in soup.find_all('a'):
|
| 54 |
+
if 'MP4' in a.text and 'p' in a.text:
|
| 55 |
+
resolutions.append(a.text.split()[1])
|
| 56 |
+
if resolutions:
|
| 57 |
+
print("Resolusi yang tersedia: " + ", ".join(resolutions))
|
| 58 |
+
else:
|
| 59 |
+
print("Tidak ditemukan resolusi yang tersedia")
|
| 60 |
+
|
| 61 |
+
# Mencari kualitas video 720p atau 480p
|
| 62 |
+
video_quality_elements = soup.find_all("a", class_="tag_item")
|
| 63 |
+
video_quality_720p = None
|
| 64 |
+
video_quality_480p = None
|
| 65 |
+
for element in video_quality_elements:
|
| 66 |
+
if "720p" in element.text:
|
| 67 |
+
video_quality_720p = element['href']
|
| 68 |
+
elif "480p" in element.text:
|
| 69 |
+
video_quality_480p = element['href']
|
| 70 |
+
|
| 71 |
+
if video_quality_720p:
|
| 72 |
+
print(f"Video kualitas 720p: {video_quality_720p}")
|
| 73 |
+
video_url = video_quality_720p
|
| 74 |
+
elif video_quality_480p:
|
| 75 |
+
print(f"Video kualitas 480p: {video_quality_480p}")
|
| 76 |
+
video_url = video_quality_480p
|
| 77 |
+
else:
|
| 78 |
+
print("Tidak ditemukan video kualitas 720p atau 480p")
|
| 79 |
+
video_url = None
|
| 80 |
+
|
| 81 |
+
return video_title, artist, thumbnail_url, video_url
|
sankaku.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from bs4 import BeautifulSoup
|
| 2 |
+
|
| 3 |
+
def get_info_sankaku(soup: BeautifulSoup):
|
| 4 |
+
# Find the artist information
|
| 5 |
+
artist_tag = soup.find('li', class_='tag-type-artist')
|
| 6 |
+
artist = artist_tag.find('a')['href'].split('=')[-1]
|
| 7 |
+
artist = artist.replace('_', ' ').title()
|
| 8 |
+
|
| 9 |
+
# Find the series information
|
| 10 |
+
series_tag = soup.find('li', class_='tag-type-copyright')
|
| 11 |
+
series = series_tag.find('a')['href'].split('=')[-1]
|
| 12 |
+
series = series.replace('_', ' ').title()
|
| 13 |
+
|
| 14 |
+
# Find the characters information
|
| 15 |
+
characters_tags = soup.find_all('li', class_='tag-type-character')
|
| 16 |
+
characters = []
|
| 17 |
+
for tag in characters_tags:
|
| 18 |
+
character = tag.find('a')['href'].split('=')[-1]
|
| 19 |
+
character = character.replace('_', ' ').title()
|
| 20 |
+
characters.append(character)
|
| 21 |
+
characters = ', '.join(characters)
|
| 22 |
+
|
| 23 |
+
# Find the link information
|
| 24 |
+
link_tag = soup.find('source')
|
| 25 |
+
link = 'https:' + link_tag['src']
|
| 26 |
+
|
| 27 |
+
return artist, series, characters, link
|
trailer.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from bs4 import BeautifulSoup
|
| 2 |
+
import requests
|
| 3 |
+
import os
|
| 4 |
+
import json
|
| 5 |
+
from other import get_video_info, get_digits, download_file, convert_videos, join_video
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def trailer(url):
|
| 9 |
+
# Set the path to the thumbnail directory
|
| 10 |
+
thumbnail_dir = "/home/user/app/Trailer/Thumbnail"
|
| 11 |
+
if not os.path.exists(thumbnail_dir):
|
| 12 |
+
os.makedirs(thumbnail_dir)
|
| 13 |
+
|
| 14 |
+
# Set the path to the video directory
|
| 15 |
+
video_dir = "/home/user/app/Hasil Download"
|
| 16 |
+
response = requests.get(url)
|
| 17 |
+
soup = BeautifulSoup(response.text, 'html.parser')
|
| 18 |
+
|
| 19 |
+
# Dapatkan variabel artis dan seri
|
| 20 |
+
actress, series, title, video_title = get_video_info(soup)
|
| 21 |
+
|
| 22 |
+
# Mencari thumbnailUrl di elemen meta dengan name="twitter:image"
|
| 23 |
+
thumbnail = soup.find("meta", attrs={"name": "twitter:image"})
|
| 24 |
+
if thumbnail:
|
| 25 |
+
thumbnail_url = thumbnail['content']
|
| 26 |
+
print(f"Thumbnail URL: {thumbnail_url}")
|
| 27 |
+
else:
|
| 28 |
+
print("Tidak ditemukan elemen meta dengan name twitter:image")
|
| 29 |
+
|
| 30 |
+
if title and thumbnail and thumbnail_url:
|
| 31 |
+
response = requests.get(thumbnail_url)
|
| 32 |
+
if response.status_code == 200:
|
| 33 |
+
|
| 34 |
+
thumbnail_directory = f"{thumbnail_dir}/{series}"
|
| 35 |
+
if not os.path.exists(thumbnail_directory):
|
| 36 |
+
os.makedirs(thumbnail_directory)
|
| 37 |
+
|
| 38 |
+
# Dapatkan kode Digit
|
| 39 |
+
digits = get_digits(series, thumbnail_url)
|
| 40 |
+
thumbnail_name = f"{series} {digits} - {actress}.jpg"
|
| 41 |
+
|
| 42 |
+
with open(f"{thumbnail_directory}/{thumbnail_name}", 'wb') as f:
|
| 43 |
+
f.write(response.content)
|
| 44 |
+
print(f"Gambar thumbnail berhasil diunduh dengan nama file {thumbnail_name}")
|
| 45 |
+
else:
|
| 46 |
+
print(f"Gagal mengunduh gambar thumbnail dari {thumbnail_url}")
|
| 47 |
+
|
| 48 |
+
else:
|
| 49 |
+
print("Tidak dapat mengunduh gambar thumbnail karena tidak ditemukan thumbnail URL atau judul video")
|
| 50 |
+
|
| 51 |
+
# Mencari Video
|
| 52 |
+
script_tag = soup.find('script', type='application/ld+json')
|
| 53 |
+
data = json.loads(script_tag.string)
|
| 54 |
+
selected_video_url = None
|
| 55 |
+
for item in data['@graph']:
|
| 56 |
+
if item['@type'] == 'VideoObject':
|
| 57 |
+
selected_video_url = item['contentURL']
|
| 58 |
+
break
|
| 59 |
+
|
| 60 |
+
# Membuat judul video
|
| 61 |
+
video_name = f"{series} {digits} - {actress}.mp4"
|
| 62 |
+
|
| 63 |
+
if selected_video_url:
|
| 64 |
+
status = download_file(selected_video_url, video_title, video_dir)
|
| 65 |
+
print(f"Video trailer berhasil diunduh dengan nama file {video_name}")
|
| 66 |
+
|
| 67 |
+
for filename in os.listdir(video_dir):
|
| 68 |
+
if video_title in filename and filename.endswith('.mp4'):
|
| 69 |
+
os.rename(os.path.join(video_dir, filename), os.path.join(video_dir, video_name))
|
| 70 |
+
|
| 71 |
+
else:
|
| 72 |
+
print("Tidak dapat mengunduh video karena tidak ditemukan URL atau judul video")
|
| 73 |
+
|
| 74 |
+
print("==================================================================================")
|
| 75 |
+
# Mengkonversi video
|
| 76 |
+
convert_videos(720)
|
| 77 |
+
print("==================================================================================")
|
| 78 |
+
# Menggabungkan video
|
| 79 |
+
output_file = join_video()
|
| 80 |
+
print("==================================================================================")
|