Spaces:
Paused
Paused
Upload 20 files
Browse files- Dockerfile +15 -0
- LICENSE +21 -0
- animeworld.py +146 -0
- config.json +40 -0
- config.py +31 -0
- convert.py +17 -0
- convert_date.py +39 -0
- cool.py +145 -0
- dictionaries.py +30 -0
- example.env +3 -0
- filmpertutti.py +136 -0
- info.py +145 -0
- loadenv.py +22 -0
- lordchannel.py +104 -0
- okru.py +19 -0
- requirements.txt +9 -0
- run.py +190 -0
- streamingcommunity.py +219 -0
- streamingwatch.py +103 -0
- tantifilm.py +267 -0
Dockerfile
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use an official Python runtime as a parent image
|
| 2 |
+
FROM python:3.10-slim-buster
|
| 3 |
+
# Set the working directory in the container to /app
|
| 4 |
+
WORKDIR /app
|
| 5 |
+
|
| 6 |
+
# Copy the current directory contents into the container at /app
|
| 7 |
+
# (including run.py, filmpertutti.py, and requirements.txt)
|
| 8 |
+
ADD . /app
|
| 9 |
+
# Install any needed packages specified in requirements.txt
|
| 10 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 11 |
+
#EXPOSE the port, for now default is 8080 cause it's the only one really allowed by HuggingFace
|
| 12 |
+
EXPOSE 8080
|
| 13 |
+
|
| 14 |
+
# Run run.py when the container launches
|
| 15 |
+
CMD ["python", "run.py"]
|
LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2024 Urlo30
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
animeworld.py
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import httpx
|
| 2 |
+
import asyncio
|
| 3 |
+
|
| 4 |
+
from bs4 import BeautifulSoup
|
| 5 |
+
import datetime
|
| 6 |
+
import json
|
| 7 |
+
from info import get_info_kitsu
|
| 8 |
+
import config
|
| 9 |
+
import re
|
| 10 |
+
months = {
|
| 11 |
+
"Gennaio": "January", "Febbraio": "February", "Marzo": "March",
|
| 12 |
+
"Aprile": "April", "Maggio": "May", "Giugno": "June",
|
| 13 |
+
"Luglio": "July", "Agosto": "August", "Settembre": "September",
|
| 14 |
+
"Ottobre": "October", "Novembre": "November", "Dicembre": "December"
|
| 15 |
+
}
|
| 16 |
+
showname_replace = {
|
| 17 |
+
"Attack on Titan": "L'attacco dei Giganti"
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
AW_DOMAIN = config.AW_DOMAIN
|
| 21 |
+
async def get_mp4(anime_url,ismovie,episode,client):
|
| 22 |
+
response = await client.get(anime_url,follow_redirects=True)
|
| 23 |
+
soup = BeautifulSoup(response.text,'lxml')
|
| 24 |
+
episode_page = soup.find('a', {'data-episode-num':episode })
|
| 25 |
+
episode_page = f'https://animeworld.{AW_DOMAIN}{episode_page["href"]}'
|
| 26 |
+
response = await client.get(episode_page,follow_redirects=True)
|
| 27 |
+
soup = BeautifulSoup(response.text,'lxml')
|
| 28 |
+
a_tag = soup.find('a', {'id': 'alternativeDownloadLink', 'class': 'm-1 btn btn-sm btn-primary'})
|
| 29 |
+
url = a_tag['href']
|
| 30 |
+
response = await client.head(url)
|
| 31 |
+
if response.status_code == 404:
|
| 32 |
+
url = None
|
| 33 |
+
return url
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
async def old_search(showname,date,ismovie,episode,client):
|
| 42 |
+
cookies = {
|
| 43 |
+
'sessionId': 's%3AtGSRfYcsIoaeV0nqFJgN69Zxixb_-uJU.fcNz%2FsJBiiP8v8TwthMN9%2FmynWFciI5gezZuz8CltyQ',
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
headers = {
|
| 47 |
+
'authority': f'www.animeworld.{AW_DOMAIN}',
|
| 48 |
+
'accept': 'application/json, text/javascript, */*; q=0.01',
|
| 49 |
+
'accept-language': 'it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7',
|
| 50 |
+
# 'content-length': '0',
|
| 51 |
+
# 'cookie': 'sessionId=s%3AtGSRfYcsIoaeV0nqFJgN69Zxixb_-uJU.fcNz%2FsJBiiP8v8TwthMN9%2FmynWFciI5gezZuz8CltyQ',
|
| 52 |
+
'csrf-token': 'oKFK43s4-BzfqPX27RlAORUd-iyiAfXyfDAo',
|
| 53 |
+
'origin': f'https://www.animeworld.{AW_DOMAIN}',
|
| 54 |
+
'referer': f'https://www.animeworld.{AW_DOMAIN}/',
|
| 55 |
+
'sec-ch-ua': '"Not-A.Brand";v="99", "Chromium";v="124"',
|
| 56 |
+
'sec-ch-ua-mobile': '?0',
|
| 57 |
+
'sec-ch-ua-platform': '"Android"',
|
| 58 |
+
'sec-fetch-dest': 'empty',
|
| 59 |
+
'sec-fetch-mode': 'cors',
|
| 60 |
+
'sec-fetch-site': 'same-origin',
|
| 61 |
+
'user-agent': 'Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
|
| 62 |
+
'x-requested-with': 'XMLHttpRequest',
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
params = {
|
| 66 |
+
'keyword': showname,
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
response = await client.post(f'https://www.animeworld.{AW_DOMAIN}/api/search/v2', params=params, cookies=cookies, headers=headers,follow_redirects=True)
|
| 70 |
+
|
| 71 |
+
data = json.loads(response.text)
|
| 72 |
+
final_urls = []
|
| 73 |
+
for anime in data["animes"]:
|
| 74 |
+
release_date = anime["release"]
|
| 75 |
+
for ita, eng in months.items():
|
| 76 |
+
release_date = release_date.replace(ita, eng)
|
| 77 |
+
release_date = datetime.datetime.strptime(release_date, "%d %B %Y")
|
| 78 |
+
release_date = release_date.strftime("%Y-%m-%d")
|
| 79 |
+
if release_date == date:
|
| 80 |
+
identifier = anime["identifier"]
|
| 81 |
+
link = anime["link"]
|
| 82 |
+
anime_url = f'https://animeworld.{AW_DOMAIN}/play/{link}.{identifier}'
|
| 83 |
+
final_url = await get_mp4(anime_url,ismovie,episode,client)
|
| 84 |
+
final_urls.append(final_url)
|
| 85 |
+
break
|
| 86 |
+
showname = showname + " (ITA)"
|
| 87 |
+
params = {
|
| 88 |
+
'keyword': showname,
|
| 89 |
+
}
|
| 90 |
+
response = await client.post(f'https://www.animeworld.{AW_DOMAIN}/api/search/v2', params=params, cookies=cookies, headers=headers, follow_redirects=True)
|
| 91 |
+
data = json.loads(response.text)
|
| 92 |
+
for anime in data["animes"]:
|
| 93 |
+
release_date = anime["release"]
|
| 94 |
+
for ita, eng in months.items():
|
| 95 |
+
release_date = release_date.replace(ita, eng)
|
| 96 |
+
release_date = datetime.datetime.strptime(release_date, "%d %B %Y")
|
| 97 |
+
release_date = release_date.strftime("%Y-%m-%d")
|
| 98 |
+
if release_date == date:
|
| 99 |
+
identifier = anime["identifier"]
|
| 100 |
+
link = anime["link"]
|
| 101 |
+
anime_url = f'https://animeworld.{AW_DOMAIN}/play/{link}.{identifier}'
|
| 102 |
+
final_url = await get_mp4(anime_url,ismovie,episode,client)
|
| 103 |
+
final_urls.append(final_url)
|
| 104 |
+
break
|
| 105 |
+
return final_urls
|
| 106 |
+
|
| 107 |
+
async def search(showname,date,ismovie,episode,client):
|
| 108 |
+
search_year = date[:4]
|
| 109 |
+
response = await client.get(f'https://www.animeworld.so/filter?year={search_year}&sort=2&keyword={showname}',follow_redirects=True)
|
| 110 |
+
soup = BeautifulSoup(response.text,'lxml')
|
| 111 |
+
anime_list = soup.find_all('a', class_=['poster', 'tooltipstered'])
|
| 112 |
+
final_urls = []
|
| 113 |
+
for anime in anime_list:
|
| 114 |
+
anime_info_url = f'https://www.animeworld.{AW_DOMAIN}/{anime["data-tip"]}'
|
| 115 |
+
response = await client.get(anime_info_url,follow_redirects=True)
|
| 116 |
+
pattern = r'<label>Data di uscita:</label>\s*<span>\s*(.*?)\s*</span>'
|
| 117 |
+
match = re.search(pattern, response.text, re.S)
|
| 118 |
+
release_date = match.group(1).strip()
|
| 119 |
+
for ita, eng in months.items():
|
| 120 |
+
release_date = release_date.replace(ita, eng)
|
| 121 |
+
release_date = datetime.datetime.strptime(release_date, "%d %B %Y")
|
| 122 |
+
release_date = release_date.strftime("%Y-%m-%d")
|
| 123 |
+
print(release_date)
|
| 124 |
+
if release_date == date:
|
| 125 |
+
anime_url = f'https://www.animeworld.{AW_DOMAIN}{anime["href"]}'
|
| 126 |
+
final_url = await get_mp4(anime_url,ismovie,episode,client)
|
| 127 |
+
if final_url:
|
| 128 |
+
final_urls.append(final_url)
|
| 129 |
+
|
| 130 |
+
return final_urls
|
| 131 |
+
|
| 132 |
+
async def animeworld(id,client):
|
| 133 |
+
try:
|
| 134 |
+
print(id)
|
| 135 |
+
kitsu_id = id.split(":")[1]
|
| 136 |
+
episode = id.split(":")[2]
|
| 137 |
+
ismovie = 1 if len(id.split(":")) == 2 else 0
|
| 138 |
+
showname,date = await get_info_kitsu(kitsu_id,client)
|
| 139 |
+
if showname in showname_replace:
|
| 140 |
+
showname = showname_replace[showname]
|
| 141 |
+
final_urls = await search(showname,date,ismovie,episode,client)
|
| 142 |
+
return final_urls
|
| 143 |
+
except:
|
| 144 |
+
print("Animeworld failed")
|
| 145 |
+
return None
|
| 146 |
+
|
config.json
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"Siti": {
|
| 3 |
+
"StreamingCommunity": {
|
| 4 |
+
"enabled": "1",
|
| 5 |
+
"domain": "buzz",
|
| 6 |
+
"fast_search": "0"
|
| 7 |
+
},
|
| 8 |
+
"Filmpertutti": {
|
| 9 |
+
"enabled": "0",
|
| 10 |
+
"domain": "diy"
|
| 11 |
+
},
|
| 12 |
+
"Tuttifilm":{
|
| 13 |
+
"enabled": "0",
|
| 14 |
+
"domain": "bond",
|
| 15 |
+
"fast_search": "0"
|
| 16 |
+
},
|
| 17 |
+
"Mysterius":{
|
| 18 |
+
"enabled": "0"
|
| 19 |
+
},
|
| 20 |
+
"LordChannel":{
|
| 21 |
+
"enabled":"1",
|
| 22 |
+
"domain":"com"
|
| 23 |
+
},
|
| 24 |
+
"StreamingWatch":{
|
| 25 |
+
"enabled":"1",
|
| 26 |
+
"domain":"org"
|
| 27 |
+
},
|
| 28 |
+
"AnimeWorld":{
|
| 29 |
+
"enabled":"1",
|
| 30 |
+
"domain":"so"
|
| 31 |
+
}
|
| 32 |
+
},
|
| 33 |
+
"General":{
|
| 34 |
+
"load_env": "0",
|
| 35 |
+
"HOST": "0.0.0.0",
|
| 36 |
+
"PORT": "8080",
|
| 37 |
+
"HF": "1"
|
| 38 |
+
}
|
| 39 |
+
}
|
| 40 |
+
|
config.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#LOAD THE CONFIG
|
| 2 |
+
import json
|
| 3 |
+
|
| 4 |
+
# Open the configuration file
|
| 5 |
+
with open('config.json') as f:
|
| 6 |
+
# Load JSON data from file
|
| 7 |
+
config = json.load(f)
|
| 8 |
+
|
| 9 |
+
# Accessing SC_DOMAIN
|
| 10 |
+
SITE = config["Siti"]
|
| 11 |
+
FT_DOMAIN = SITE["Filmpertutti"]['domain']
|
| 12 |
+
SC_DOMAIN = SITE["StreamingCommunity"]['domain']
|
| 13 |
+
TF_DOMAIN = SITE["Tuttifilm"]['domain']
|
| 14 |
+
LC_DOMAIN = SITE["LordChannel"]['domain']
|
| 15 |
+
SW_DOMAIN = SITE["StreamingWatch"]['domain']
|
| 16 |
+
AW_DOMAIN = SITE['AnimeWorld']['domain']
|
| 17 |
+
FILMPERTUTTI = SITE["Filmpertutti"]['enabled']
|
| 18 |
+
STREAMINGCOMMUNITY = SITE["StreamingCommunity"]['enabled']
|
| 19 |
+
MYSTERIUS = SITE["Mysterius"]['enabled']
|
| 20 |
+
TUTTIFILM = SITE["Tuttifilm"]['enabled']
|
| 21 |
+
LORDCHANNEL = SITE["LordChannel"]['enabled']
|
| 22 |
+
STREAMINGWATCH = SITE["StreamingWatch"]['enabled']
|
| 23 |
+
ANIMEWORLD = SITE['AnimeWorld']['enabled']
|
| 24 |
+
SC_FAST_SEARCH = SITE["StreamingCommunity"]['fast_search']
|
| 25 |
+
TF_FAST_SEARCH = SITE["Tuttifilm"]['fast_search']
|
| 26 |
+
#General
|
| 27 |
+
GENERAL = config['General']
|
| 28 |
+
dotenv = GENERAL["load_env"]
|
| 29 |
+
HOST = GENERAL["HOST"]
|
| 30 |
+
PORT = GENERAL["PORT"]
|
| 31 |
+
HF = GENERAL["HF"]
|
convert.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
from tmdbv3api import TMDb, Movie, TV
|
| 3 |
+
from loadenv import load_env
|
| 4 |
+
import config
|
| 5 |
+
env_vars = load_env()
|
| 6 |
+
|
| 7 |
+
TMDB_KEY = env_vars.get('TMDB_KEY')
|
| 8 |
+
async def get_TMDb_id_from_IMDb_id(imdb_id,client):
|
| 9 |
+
response = await client.get(f'https://api.themoviedb.org/3/find/{imdb_id}',
|
| 10 |
+
params={'external_source': 'imdb_id', 'api_key': f'{TMDB_KEY}'})
|
| 11 |
+
tmbda = response.json()
|
| 12 |
+
if tmbda['movie_results']:
|
| 13 |
+
return tmbda['movie_results'][0]['id']
|
| 14 |
+
elif tmbda['tv_results']:
|
| 15 |
+
return tmbda['tv_results'][0]['id']
|
| 16 |
+
else:
|
| 17 |
+
return None
|
convert_date.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
def convert_US_date(date):
|
| 3 |
+
us_data = next((country_data for country_data in date['results'] if country_data["iso_3166_1"] == "US"), None)
|
| 4 |
+
if us_data:
|
| 5 |
+
us_release_dates_type_3 = [rd for rd in us_data['release_dates'] if rd['type'] == 3]
|
| 6 |
+
# Sort the list of release dates and get the latest
|
| 7 |
+
us_release_dates_type_3.sort(key = lambda x: x['release_date'], reverse=True)
|
| 8 |
+
if len(us_release_dates_type_3) > 0:
|
| 9 |
+
latest_release_date = us_release_dates_type_3[0]['release_date']
|
| 10 |
+
date = latest_release_date.split('T')[0]
|
| 11 |
+
print('Latest US theatrical release date:', date)
|
| 12 |
+
return date
|
| 13 |
+
else:
|
| 14 |
+
us_release_dates_type_4 = [rd for rd in us_data['release_dates'] if rd['type'] == 4]
|
| 15 |
+
us_release_dates_type_4.sort(key = lambda x: x['release_date'], reverse=True)
|
| 16 |
+
if len(us_release_dates_type_4) > 0:
|
| 17 |
+
latest_release_date = us_release_dates_type_4[0]['release_date']
|
| 18 |
+
date = latest_release_date.split('T')[0]
|
| 19 |
+
print('Latest US theatrical release date (type 4):', date)
|
| 20 |
+
return date
|
| 21 |
+
def convert_IT_date(date):
|
| 22 |
+
it_data = next((country_data for country_data in date['results'] if country_data["iso_3166_1"] == "IT"), None)
|
| 23 |
+
if it_data:
|
| 24 |
+
it_release_dates_type_3 = [rd for rd in it_data['release_dates'] if rd['type'] == 3]
|
| 25 |
+
# Sort the list of release dates and get the latest
|
| 26 |
+
it_release_dates_type_3.sort(key = lambda x: x['release_date'], reverse=True)
|
| 27 |
+
if len(it_release_dates_type_3) > 0:
|
| 28 |
+
latest_release_date = it_release_dates_type_3[0]['release_date']
|
| 29 |
+
date = latest_release_date.split('T')[0]
|
| 30 |
+
print('Latest IT theatrical release date:', date)
|
| 31 |
+
return date
|
| 32 |
+
else:
|
| 33 |
+
it_release_dates_type_4 = [rd for rd in it_data['release_dates'] if rd['type'] == 4]
|
| 34 |
+
it_release_dates_type_4.sort(key = lambda x: x['release_date'], reverse=True)
|
| 35 |
+
if len(it_release_dates_type_4) > 0:
|
| 36 |
+
latest_release_date = it_release_dates_type_4[0]['release_date']
|
| 37 |
+
date = latest_release_date.split('T')[0]
|
| 38 |
+
print('Latest IT theatrical release date (type 4):', date)
|
| 39 |
+
return date
|
cool.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
import json
|
| 3 |
+
from info import get_info_tmdb,is_movie
|
| 4 |
+
from convert import get_TMDb_id_from_IMDb_id
|
| 5 |
+
from loadenv import load_env
|
| 6 |
+
env_vars = load_env()
|
| 7 |
+
MYSTERIUS_KEY = env_vars.get('MYSTERIUS_KEY')
|
| 8 |
+
async def get_links(slug,season,episode,ismovie,client):
|
| 9 |
+
try:
|
| 10 |
+
headers = {
|
| 11 |
+
"x-api-key": MYSTERIUS_KEY
|
| 12 |
+
}
|
| 13 |
+
response = await client.get("https://mammamia-urlo-ulala12431.hf.space/api/cookie", headers=headers)
|
| 14 |
+
Auths = response.json()
|
| 15 |
+
Bearer = Auths.get('cookie')
|
| 16 |
+
ap_session = Auths.get('auth')
|
| 17 |
+
|
| 18 |
+
cookies = {'ap_session': ap_session}
|
| 19 |
+
|
| 20 |
+
headers = {
|
| 21 |
+
'accept': 'application/json, text/plain, */*',
|
| 22 |
+
'accept-language': 'it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7',
|
| 23 |
+
'authorization': f'Bearer {Bearer}',
|
| 24 |
+
'referer': f'https://altadefinizione-originale.com/play/{slug}',
|
| 25 |
+
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 OPR/111.0.0.0',
|
| 26 |
+
'x-requested-with': 'XMLHttpRequest',
|
| 27 |
+
}
|
| 28 |
+
if ismovie == 1:
|
| 29 |
+
|
| 30 |
+
response = await client.get(f'https://altadefinizione-originale.com/api/post/urls/stream/{slug}',cookies=cookies,headers=headers)
|
| 31 |
+
elif ismovie == 0:
|
| 32 |
+
print("HERE SEASON",season)
|
| 33 |
+
print("HERE EPISODE",episode)
|
| 34 |
+
request_url =f'https://altadefinizione-originale.com/api/post/urls/stream/{slug}/{season}/{episode}'
|
| 35 |
+
print(request_url)
|
| 36 |
+
response = await client.get(request_url,cookies=cookies,headers=headers)
|
| 37 |
+
try:
|
| 38 |
+
video_data = response.json() # Assuming this is the JSON response containing video streams
|
| 39 |
+
if 'streams' not in video_data:
|
| 40 |
+
print("Invalid JSON format: 'streams' key not found or incorrect structure")
|
| 41 |
+
return None
|
| 42 |
+
|
| 43 |
+
streams = video_data['streams']
|
| 44 |
+
|
| 45 |
+
resolutions = {}
|
| 46 |
+
|
| 47 |
+
for stream in streams:
|
| 48 |
+
resolution_name = stream['resolution']['name'].lower() # Convert resolution name to lowercase
|
| 49 |
+
url = stream['url']
|
| 50 |
+
|
| 51 |
+
# Remove everything after '.mp4' in the URL
|
| 52 |
+
mp4_index = url.find('.mp4')
|
| 53 |
+
if mp4_index != -1:
|
| 54 |
+
url = url[:mp4_index + 4] # +4 to include '.mp4' in the substring
|
| 55 |
+
|
| 56 |
+
resolutions[resolution_name] = url
|
| 57 |
+
|
| 58 |
+
return resolutions
|
| 59 |
+
|
| 60 |
+
except KeyError as e:
|
| 61 |
+
print(f"KeyError: {e}")
|
| 62 |
+
return None
|
| 63 |
+
except json.JSONDecodeError as e:
|
| 64 |
+
print(f"JSONDecodeError: {e}")
|
| 65 |
+
return None
|
| 66 |
+
|
| 67 |
+
except requests.RequestException as e:
|
| 68 |
+
print(f"Request error: {e}")
|
| 69 |
+
return None
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
# Example usage: Fetch video links
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
# Print the dictionary
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
async def search_imdb(showname,tmdba,client):
|
| 82 |
+
tmdba = str(tmdba)
|
| 83 |
+
query = f'https://altadefinizione-originale.com/api/search?search={showname}&page=1'
|
| 84 |
+
response = await client.get(query,follow_redirects=True)
|
| 85 |
+
if response.status_code == 200:
|
| 86 |
+
data = response.json()
|
| 87 |
+
if 'data' in data:
|
| 88 |
+
for item in data['data']:
|
| 89 |
+
if item.get('tmdb_id') == tmdba:
|
| 90 |
+
slug = item.get('slug')
|
| 91 |
+
print(slug)
|
| 92 |
+
return slug
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def parse_links(resolution_links):
|
| 97 |
+
results = {}
|
| 98 |
+
if resolution_links:
|
| 99 |
+
print("Video links:")
|
| 100 |
+
for resolution, link in resolution_links.items():
|
| 101 |
+
if "cdn.altadefinizione-originale.com" in link:
|
| 102 |
+
link = link.replace("cdn.altadefinizione-originale.com","protectlinknt.b-cdn.net")
|
| 103 |
+
print(f"{resolution}: {link}")
|
| 104 |
+
results[resolution] = link
|
| 105 |
+
return results
|
| 106 |
+
else:
|
| 107 |
+
print("Failed to fetch video links")
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
async def cool(imdb,client):
|
| 111 |
+
try:
|
| 112 |
+
type = "Cool"
|
| 113 |
+
general = is_movie(imdb)
|
| 114 |
+
ismovie = general[0]
|
| 115 |
+
imdb_id = general[1]
|
| 116 |
+
if ismovie == 0 :
|
| 117 |
+
season = int(general[2])
|
| 118 |
+
episode = int(general[3])
|
| 119 |
+
|
| 120 |
+
if "tt" in imdb:
|
| 121 |
+
#Get showname
|
| 122 |
+
tmdba = await get_TMDb_id_from_IMDb_id(imdb_id,client)
|
| 123 |
+
else:
|
| 124 |
+
tmdba = imdb_id.replace("tmdb:","")
|
| 125 |
+
|
| 126 |
+
showname = get_info_tmdb(tmdba,ismovie,type)
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
slug = await search_imdb(showname,tmdba,client)
|
| 130 |
+
print(ismovie)
|
| 131 |
+
if ismovie == 1:
|
| 132 |
+
season = None
|
| 133 |
+
episode = None
|
| 134 |
+
resolution_links = await get_links(slug,episode,season,ismovie,client)
|
| 135 |
+
results = parse_links(resolution_links)
|
| 136 |
+
return results
|
| 137 |
+
elif ismovie == 0:
|
| 138 |
+
season = season -1
|
| 139 |
+
episode = episode - 1
|
| 140 |
+
resolution_links = get_links(slug,season,episode,ismovie)
|
| 141 |
+
results = parse_links(resolution_links)
|
| 142 |
+
return results
|
| 143 |
+
except Exception as e:
|
| 144 |
+
print("Cool Error",e)
|
| 145 |
+
return None
|
dictionaries.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
okru = {
|
| 2 |
+
"rai1": "https://ok.ru/videoembed/7703488765552?nochat=1",
|
| 3 |
+
"rai2": "https://ok.ru/videoembed/7805618364016?nochat=1"
|
| 4 |
+
}
|
| 5 |
+
|
| 6 |
+
STREAM = {
|
| 7 |
+
"channels": [
|
| 8 |
+
{
|
| 9 |
+
"id": "la7",
|
| 10 |
+
"title": "LA7",
|
| 11 |
+
"name": "LA7",
|
| 12 |
+
"poster": "https://static.wikia.nocookie.net/logopedia/images/0/02/LA7_-_Logo_2011.svg/revision/latest?cb=20190728152519",
|
| 13 |
+
"url": "https://d3749synfikwkv.cloudfront.net/v1/master/3722c60a815c199d9c0ef36c5b73da68a62b09d1/cc-74ylxpgd78bpb/Live.m3u8"
|
| 14 |
+
},
|
| 15 |
+
{
|
| 16 |
+
"id": "rai1",
|
| 17 |
+
"title": "Rai 1",
|
| 18 |
+
"name": "Full HD",
|
| 19 |
+
"poster": "https://upload.wikimedia.org/wikipedia/commons/thumb/f/fa/Rai_1_-_Logo_2016.svg/1280px-Rai_1_-_Logo_2016.svg.png",
|
| 20 |
+
"url": "http://173.208.52.200/rai1/index.m3u8"
|
| 21 |
+
},
|
| 22 |
+
{
|
| 23 |
+
"id": "rai2",
|
| 24 |
+
"title": "Rai 2",
|
| 25 |
+
"name": "Full HD",
|
| 26 |
+
"poster": "https://upload.wikimedia.org/wikipedia/commons/thumb/9/99/Rai_2_-_Logo_2016.svg/1280px-Rai_2_-_Logo_2016.svg.png",
|
| 27 |
+
"url": "http://173.208.52.200/rai2/index.m3u8"
|
| 28 |
+
}
|
| 29 |
+
]
|
| 30 |
+
}
|
example.env
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
TMDB_KEY= "INSERT YOUR API KEY HERE"
|
| 2 |
+
|
| 3 |
+
|
filmpertutti.py
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
from tmdbv3api import TMDb, Movie, TV
|
| 3 |
+
import requests
|
| 4 |
+
from bs4 import BeautifulSoup,SoupStrainer
|
| 5 |
+
import string
|
| 6 |
+
import re
|
| 7 |
+
from datetime import datetime
|
| 8 |
+
import dateparser
|
| 9 |
+
from convert import get_TMDb_id_from_IMDb_id
|
| 10 |
+
from info import get_info_tmdb, is_movie, get_info_imdb
|
| 11 |
+
from convert_date import convert_US_date
|
| 12 |
+
import logging
|
| 13 |
+
import config
|
| 14 |
+
|
| 15 |
+
FT_DOMAIN = config.FT_DOMAIN
|
| 16 |
+
|
| 17 |
+
#Some basic headers
|
| 18 |
+
headers = {
|
| 19 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.10; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3',
|
| 20 |
+
'Accept-Language': 'en-US,en;q=0.5'
|
| 21 |
+
}
|
| 22 |
+
#Map months to check if date = date
|
| 23 |
+
month_mapping = {
|
| 24 |
+
'Jan': 'Gennaio', 'Feb': 'Febbraio', 'Mar': 'Marzo', 'Apr': 'Aprile',
|
| 25 |
+
'May': 'Maggio', 'Jun': 'Giugno', 'Jul': 'Luglio', 'Aug': 'Agosto',
|
| 26 |
+
'Sep': 'Settembre', 'Oct': 'Ottobre', 'Nov': 'Novembre', 'Dec': 'Dicembre'
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
async def search(query,date,client):
|
| 30 |
+
response = await client.get(query).json()
|
| 31 |
+
#Get link tid of every item and then open the link to see if the date = date
|
| 32 |
+
for json in response:
|
| 33 |
+
link = json['link']
|
| 34 |
+
tid = json['id']
|
| 35 |
+
series_response = await client.get(link, headers=headers, follow_redirects=True)
|
| 36 |
+
series_soup = BeautifulSoup(series_response.text, 'lxml')
|
| 37 |
+
release_span = series_soup.find('span', class_='released')
|
| 38 |
+
if release_span:
|
| 39 |
+
if release_span.text != "Data di uscita: N/A":
|
| 40 |
+
date_string = release_span.text.split(': ')[-1] # Get the date part
|
| 41 |
+
for eng, ita in month_mapping.items():
|
| 42 |
+
date_string = re.sub(rf'\b{eng}\b', ita, date_string)
|
| 43 |
+
|
| 44 |
+
# Swap to YY-MM-DD formatting using dateparser
|
| 45 |
+
release_date = dateparser.parse(date_string, languages=['it']).strftime("%Y-%m-%d")
|
| 46 |
+
if release_date == date:
|
| 47 |
+
url = link
|
| 48 |
+
tid = tid
|
| 49 |
+
return url, tid
|
| 50 |
+
else:
|
| 51 |
+
print("Date are not equals")
|
| 52 |
+
|
| 53 |
+
def get_episode_link(season,episode,tid,url):
|
| 54 |
+
#Get the link from where we have to obtain mixdrop link
|
| 55 |
+
tlink = f'{url}?show_video=true&post_id={tid}&season_id={season-1}&episode_id={episode-1}'
|
| 56 |
+
return tlink
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def get_film(url):
|
| 60 |
+
#Get the link from where we have to obtain mixdrop link
|
| 61 |
+
tlink = url + "?show_video=true"
|
| 62 |
+
return tlink
|
| 63 |
+
|
| 64 |
+
async def get_real_link(tlink,client):
|
| 65 |
+
#Some basic code to get the mixdrop link
|
| 66 |
+
page = await client.get(tlink, headers=headers, follow_redirects=True)
|
| 67 |
+
soup = BeautifulSoup(page.content(), features="lxml",parse_only=SoupStrainer('iframe'))
|
| 68 |
+
iframe_src = soup.find('iframe')['src']
|
| 69 |
+
|
| 70 |
+
iframe_page = await client.get(iframe_src, headers=headers, follow_redirects=True)
|
| 71 |
+
iframe_soup = BeautifulSoup(iframe_page.content(), features="lxml")
|
| 72 |
+
|
| 73 |
+
mega_button = iframe_soup.find('div', attrs={'class': 'megaButton', 'rel': 'nofollow'}, string='MIXDROP')
|
| 74 |
+
if mega_button:
|
| 75 |
+
real_link = mega_button.get('meta-link')
|
| 76 |
+
return real_link
|
| 77 |
+
|
| 78 |
+
async def get_true_link(real_link,client):
|
| 79 |
+
response = await client.get(real_link, headers=headers, follow_redirects=True)
|
| 80 |
+
[s1, s2] = re.search(r"\}\('(.+)',.+,'(.+)'\.split", response.text).group(1, 2)
|
| 81 |
+
schema = s1.split(";")[2][5:-1]
|
| 82 |
+
terms = s2.split("|")
|
| 83 |
+
charset = string.digits + string.ascii_letters
|
| 84 |
+
d = dict()
|
| 85 |
+
for i in range(len(terms)):
|
| 86 |
+
d[charset[i]] = terms[i] or charset[i]
|
| 87 |
+
s = 'https:'
|
| 88 |
+
for c in schema:
|
| 89 |
+
s += d[c] if c in d else c
|
| 90 |
+
return s
|
| 91 |
+
|
| 92 |
+
async def filmpertutti(imdb,client):
|
| 93 |
+
general = is_movie(imdb)
|
| 94 |
+
ismovie = general[0]
|
| 95 |
+
imdb_id = general[1]
|
| 96 |
+
type = "Filmpertutti"
|
| 97 |
+
if ismovie == 0 :
|
| 98 |
+
season = int(general[2])
|
| 99 |
+
episode = int(general[3])
|
| 100 |
+
if "tt" in imdb:
|
| 101 |
+
if ismovie == 0:
|
| 102 |
+
#Get showname and date
|
| 103 |
+
showname,date = await get_info_imdb(imdb_id,ismovie,type,client)
|
| 104 |
+
else:
|
| 105 |
+
#THIS IS needed cause the only way to get all releases dates is by giving a tmdb ID not a IMDB
|
| 106 |
+
tmdba = await get_TMDb_id_from_IMDb_id(imdb_id,client)
|
| 107 |
+
showname,date = get_info_tmdb(tmdba,ismovie,type)
|
| 108 |
+
|
| 109 |
+
elif "tmdb" in imdb:
|
| 110 |
+
#Get showname and date
|
| 111 |
+
tmdba = imdb_id.replace("tmdb:","")
|
| 112 |
+
showname,date = get_info_tmdb(tmdba,ismovie,type)
|
| 113 |
+
showname = showname.replace(" ", "+").replace("–", "+").replace("—","+")
|
| 114 |
+
#Build the query
|
| 115 |
+
query = f'https://filmpertutti.{FT_DOMAIN}/wp-json/wp/v2/posts?search={showname}&page=1&_fields=link,id'
|
| 116 |
+
try:
|
| 117 |
+
url,tid = await search(query,date,client)
|
| 118 |
+
except:
|
| 119 |
+
print("No results found")
|
| 120 |
+
return None
|
| 121 |
+
if ismovie == 0:
|
| 122 |
+
episode_link = get_episode_link(season,episode,tid,url)
|
| 123 |
+
#Let's get mixdrop link
|
| 124 |
+
real_link = await get_real_link(episode_link,client)
|
| 125 |
+
#let's get delivery link, streaming link
|
| 126 |
+
streaming_link = await get_true_link(real_link,client)
|
| 127 |
+
print(streaming_link)
|
| 128 |
+
return streaming_link
|
| 129 |
+
elif ismovie == 1:
|
| 130 |
+
film_link = get_film(url)
|
| 131 |
+
#Let's get mixdrop link
|
| 132 |
+
real_link = await get_real_link(film_link,client)
|
| 133 |
+
#let's get delivery link, streaming link
|
| 134 |
+
streaming_link = await get_true_link(real_link,client)
|
| 135 |
+
print(streaming_link)
|
| 136 |
+
return streaming_link
|
info.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from loadenv import load_env
|
| 2 |
+
from tmdbv3api import TMDb, Movie, TV
|
| 3 |
+
from convert_date import convert_US_date, convert_IT_date
|
| 4 |
+
import requests
|
| 5 |
+
import config
|
| 6 |
+
import json
|
| 7 |
+
SC_FAST_SEARCH = config.SC_FAST_SEARCH
|
| 8 |
+
TF_FAST_SEARCH = config.TF_FAST_SEARCH
|
| 9 |
+
env_vars = load_env()
|
| 10 |
+
TMDB_KEY = env_vars.get('TMDB_KEY')
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def get_info_tmdb(tmbda,ismovie,type):
|
| 14 |
+
tmdb = TMDb()
|
| 15 |
+
tmdb.api_key = f'{TMDB_KEY}'
|
| 16 |
+
tmdb.language = 'it'
|
| 17 |
+
if ismovie == 0:
|
| 18 |
+
tv = TV()
|
| 19 |
+
show= tv.details(tmbda)
|
| 20 |
+
showname = show.name
|
| 21 |
+
if type == "Filmpertutti":
|
| 22 |
+
date= show.first_air_date
|
| 23 |
+
print("Real date",date)
|
| 24 |
+
return showname,date
|
| 25 |
+
elif type == "StreamingCommunity":
|
| 26 |
+
if SC_FAST_SEARCH == "0":
|
| 27 |
+
n_season = show.number_of_seasons
|
| 28 |
+
full_date = show.first_air_date
|
| 29 |
+
date = full_date.split("-")[0]
|
| 30 |
+
print(date)
|
| 31 |
+
return showname,date
|
| 32 |
+
else:
|
| 33 |
+
return showname
|
| 34 |
+
elif type == "Tuttifilm":
|
| 35 |
+
if TF_FAST_SEARCH == "0":
|
| 36 |
+
date = show.first_air_date
|
| 37 |
+
date = date.split("-")[0]
|
| 38 |
+
print("Real date",date)
|
| 39 |
+
return showname,date
|
| 40 |
+
else:
|
| 41 |
+
return showname
|
| 42 |
+
elif type == "Cool":
|
| 43 |
+
return showname
|
| 44 |
+
elif type == "LordChannel":
|
| 45 |
+
date = show.first_air_date
|
| 46 |
+
date = date.split("-")[0]
|
| 47 |
+
print("Real date",date)
|
| 48 |
+
return showname,date
|
| 49 |
+
elif type == "StreamingWatch":
|
| 50 |
+
date = show.first_air_date
|
| 51 |
+
date = date.split("-")[0]
|
| 52 |
+
print("Real date",date)
|
| 53 |
+
return showname,date
|
| 54 |
+
|
| 55 |
+
elif ismovie == 1:
|
| 56 |
+
movie = Movie()
|
| 57 |
+
show= movie.details(tmbda)
|
| 58 |
+
showname= show.title
|
| 59 |
+
#Get all release dates
|
| 60 |
+
if type == "Filmpertutti":
|
| 61 |
+
date = show.release_dates
|
| 62 |
+
#GET US RELEASE DATE because filmpertutti somewhy uses US release date
|
| 63 |
+
date = convert_US_date(date)
|
| 64 |
+
return showname,date
|
| 65 |
+
elif type == "StreamingCommunity":
|
| 66 |
+
return showname
|
| 67 |
+
elif type == "Tuttifilm":
|
| 68 |
+
if TF_FAST_SEARCH == "0":
|
| 69 |
+
date = show.release_date
|
| 70 |
+
date = date.split("-")[0]
|
| 71 |
+
print("Real date",date)
|
| 72 |
+
return showname,date
|
| 73 |
+
else:
|
| 74 |
+
return showname
|
| 75 |
+
elif type == "Cool":
|
| 76 |
+
return showname
|
| 77 |
+
elif type == "LordChannel":
|
| 78 |
+
date = show.release_date
|
| 79 |
+
date = date.split("-")[0]
|
| 80 |
+
print("Real date",date)
|
| 81 |
+
return showname,date
|
| 82 |
+
elif type == "StreamingWatch":
|
| 83 |
+
date = show.release_date
|
| 84 |
+
date = date.split("-")[0]
|
| 85 |
+
print("Real date",date)
|
| 86 |
+
return showname,date
|
| 87 |
+
|
| 88 |
+
async def get_info_imdb(imdb_id, ismovie, type,client):
|
| 89 |
+
|
| 90 |
+
resp = await client.get(f'https://api.themoviedb.org/3/find/{imdb_id}?api_key={TMDB_KEY}&language=it&external_source=imdb_id')
|
| 91 |
+
data = resp.json()
|
| 92 |
+
if ismovie == 0:
|
| 93 |
+
showname = data['tv_results'][0]['name']
|
| 94 |
+
if type == "Filmpertutti":
|
| 95 |
+
date= data['tv_results'][0]['first_air_date']
|
| 96 |
+
print("Real date",date)
|
| 97 |
+
return showname, date
|
| 98 |
+
elif type == "StreamingCommunity":
|
| 99 |
+
return showname
|
| 100 |
+
elif type == "Tuttifilm":
|
| 101 |
+
if TF_FAST_SEARCH == "0":
|
| 102 |
+
date = data['tv_results'][0]['first_air_date']
|
| 103 |
+
date = date.split("-")[0]
|
| 104 |
+
return showname,date
|
| 105 |
+
elif TF_FAST_SEARCH == "1":
|
| 106 |
+
return showname
|
| 107 |
+
elif type == "Cool":
|
| 108 |
+
return showname
|
| 109 |
+
|
| 110 |
+
elif ismovie == 1:
|
| 111 |
+
showname= data['movie_results'][0]['title']
|
| 112 |
+
if type == "Filmpertutti":
|
| 113 |
+
return
|
| 114 |
+
elif type == "StreamingCommunity":
|
| 115 |
+
return showname
|
| 116 |
+
elif type == "Tuttifilm":
|
| 117 |
+
date = data['movie_results'][0]['release_date']
|
| 118 |
+
date = date.split("-")[0]
|
| 119 |
+
return showname,date
|
| 120 |
+
elif type == "Cool":
|
| 121 |
+
return showname
|
| 122 |
+
|
| 123 |
+
async def get_info_kitsu(kitsu_id,client):
|
| 124 |
+
api_url = f'https://kitsu.io/api/edge/anime/{kitsu_id}'
|
| 125 |
+
response = await client.get(api_url)
|
| 126 |
+
data = json.loads(response.text)
|
| 127 |
+
showname = data['data']['attributes']['canonicalTitle']
|
| 128 |
+
date = data['data']['attributes']['startDate']
|
| 129 |
+
return showname,date
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def is_movie(imdb_id):
|
| 135 |
+
if "tmdb:" in imdb_id:
|
| 136 |
+
imdb_id = imdb_id.replace("tmdb:","")
|
| 137 |
+
if ":" in imdb_id:
|
| 138 |
+
season = imdb_id.split(":")[1]
|
| 139 |
+
episode = imdb_id.split(":")[-1]
|
| 140 |
+
ismovie = 0
|
| 141 |
+
imdb_id = imdb_id.split(":")[0]
|
| 142 |
+
return ismovie,imdb_id,season,episode
|
| 143 |
+
else:
|
| 144 |
+
ismovie = 1
|
| 145 |
+
return ismovie,imdb_id
|
loadenv.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import config
|
| 3 |
+
import config
|
| 4 |
+
MYSTERIUS = config.MYSTERIUS
|
| 5 |
+
dotenv = config.dotenv
|
| 6 |
+
TUTTIFILM = config.TUTTIFILM
|
| 7 |
+
HF = config.HF
|
| 8 |
+
#You need to keep dotenv disabled on remote servers
|
| 9 |
+
if dotenv == "1":
|
| 10 |
+
from dotenv import load_dotenv
|
| 11 |
+
load_dotenv(".env")
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def load_env():
|
| 15 |
+
env_vars = {}
|
| 16 |
+
env_vars['TMDB_KEY'] = os.getenv('TMDB_KEY')
|
| 17 |
+
if MYSTERIUS == "1":
|
| 18 |
+
env_vars['MYSTERIUS_KEY'] = os.getenv('MYSTERIUS_KEY')
|
| 19 |
+
if TUTTIFILM == "1":
|
| 20 |
+
if HF == "1":
|
| 21 |
+
env_vars['PROXY_CREDENTIALS'] = os.getenv('PROXY')
|
| 22 |
+
return env_vars
|
lordchannel.py
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from tmdbv3api import TMDb, Movie, TV
|
| 2 |
+
import requests
|
| 3 |
+
import logging
|
| 4 |
+
from bs4 import BeautifulSoup,SoupStrainer
|
| 5 |
+
from datetime import datetime
|
| 6 |
+
import dateparser
|
| 7 |
+
from convert import get_TMDb_id_from_IMDb_id
|
| 8 |
+
from info import get_info_tmdb, is_movie, get_info_imdb
|
| 9 |
+
import config
|
| 10 |
+
import re
|
| 11 |
+
import json
|
| 12 |
+
LC_DOMAIN = config.LC_DOMAIN
|
| 13 |
+
async def search(showname,date,season,episode,ismovie,client):
|
| 14 |
+
cookies = {
|
| 15 |
+
'csrftoken': '7lvc502CZe8Zbx7iSX1xkZOBA1NbDxJZ',
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
headers = {
|
| 19 |
+
'authority': f'lordchannel.{LC_DOMAIN}',
|
| 20 |
+
'accept': '*/*',
|
| 21 |
+
'accept-language': 'it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7',
|
| 22 |
+
# 'cookie': 'csrftoken=7lvc502CZe8Zbx7iSX1xkZOBA1NbDxJZ',
|
| 23 |
+
'referer': f'https://lordchannel.{LC_DOMAIN}/anime/anime-ita/',
|
| 24 |
+
'sec-ch-ua': '"Not-A.Brand";v="99", "Chromium";v="124"',
|
| 25 |
+
'sec-ch-ua-mobile': '?0',
|
| 26 |
+
'sec-ch-ua-platform': '"Android"',
|
| 27 |
+
'sec-fetch-dest': 'empty',
|
| 28 |
+
'sec-fetch-mode': 'cors',
|
| 29 |
+
'sec-fetch-site': 'same-origin',
|
| 30 |
+
'user-agent': 'Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
|
| 31 |
+
'x-requested-with': 'XMLHttpRequest',
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
params = {
|
| 35 |
+
'media': showname,
|
| 36 |
+
'_': '1724421723999',
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
response = await client.get(f'https://lordchannel.{LC_DOMAIN}/live_search/', params=params, cookies=cookies, headers=headers, follow_redirects=True)
|
| 40 |
+
data = json.loads(response.text)
|
| 41 |
+
for entry in data['data']:
|
| 42 |
+
if entry is not None: # check if the a_tag exists
|
| 43 |
+
href = entry['url']
|
| 44 |
+
quality = entry['qualit\u00e0_video']
|
| 45 |
+
link = f'https://lordchannel.{LC_DOMAIN}{href}'
|
| 46 |
+
response = await client.get(link, follow_redirects=True)
|
| 47 |
+
soup2 = BeautifulSoup(response.text,'lxml')
|
| 48 |
+
li_tag = soup2.select_one("ul.card__meta li:nth-of-type(2)")
|
| 49 |
+
if li_tag is not None: # check if the li_tag exists
|
| 50 |
+
card_date = li_tag.text[-4:]
|
| 51 |
+
if card_date == date:
|
| 52 |
+
if ismovie == 1:
|
| 53 |
+
video_url = soup2.find('a', class_="btn-streaming streaming_btn")
|
| 54 |
+
video_url = video_url['href']
|
| 55 |
+
return video_url,quality
|
| 56 |
+
elif ismovie == 0:
|
| 57 |
+
div = soup2.find('div', id=f'collapse{season}')
|
| 58 |
+
episode = episode -1 #Index start from 0 so I need to subtract 1
|
| 59 |
+
episode = div.select('tr')[2] # index is 2 because we want the correct element
|
| 60 |
+
video_url = href = episode.find('a').get('href')
|
| 61 |
+
return video_url,quality
|
| 62 |
+
else:
|
| 63 |
+
print("Sadly date are not equals")
|
| 64 |
+
continue
|
| 65 |
+
|
| 66 |
+
async def get_m3u8(video_url,client):
|
| 67 |
+
response = await client.get(video_url, follow_redirects=True)
|
| 68 |
+
pattern = r'const videoData = \[(.*?)\];'
|
| 69 |
+
match = re.search(pattern, response.text)
|
| 70 |
+
|
| 71 |
+
if match:
|
| 72 |
+
video_data = match.group(1).strip().split(', ')
|
| 73 |
+
url = video_data[0]
|
| 74 |
+
return url
|
| 75 |
+
|
| 76 |
+
async def lordchannel(imdb,client):
|
| 77 |
+
try:
|
| 78 |
+
general = is_movie(imdb)
|
| 79 |
+
ismovie = general[0]
|
| 80 |
+
imdb_id = general[1]
|
| 81 |
+
type = "LordChannel"
|
| 82 |
+
if ismovie == 0:
|
| 83 |
+
season = int(general[2])
|
| 84 |
+
episode = int(general[3])
|
| 85 |
+
if "tt" in imdb:
|
| 86 |
+
tmdba = await get_TMDb_id_from_IMDb_id(imdb_id,client)
|
| 87 |
+
else:
|
| 88 |
+
tmdba = imdb_id
|
| 89 |
+
else:
|
| 90 |
+
season = None
|
| 91 |
+
episode = None
|
| 92 |
+
if "tt" in imdb:
|
| 93 |
+
tmdba = await get_TMDb_id_from_IMDb_id(imdb_id,client)
|
| 94 |
+
else:
|
| 95 |
+
tmdba = imdb_id
|
| 96 |
+
showname,date = get_info_tmdb(tmdba,ismovie,type)
|
| 97 |
+
video_url,quality = await search(showname,date,season,episode,ismovie,client)
|
| 98 |
+
url = await get_m3u8(video_url,client)
|
| 99 |
+
url = url.replace('"','')
|
| 100 |
+
print(url)
|
| 101 |
+
return url,quality
|
| 102 |
+
except:
|
| 103 |
+
print("Lordchannel Failed")
|
| 104 |
+
return None,None
|
okru.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
from bs4 import BeautifulSoup
|
| 3 |
+
import json
|
| 4 |
+
from dictionaries import okru
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
async def okru_get_url(id,client):
|
| 9 |
+
embed_link = okru[id]
|
| 10 |
+
print(embed_link)
|
| 11 |
+
response = await client.get(embed_link, follow_redirects=True)
|
| 12 |
+
soup = BeautifulSoup(response.text, 'lxml')
|
| 13 |
+
div = soup.find('div', {'data-module': 'OKVideo'})
|
| 14 |
+
data_options = div.get('data-options')
|
| 15 |
+
data = json.loads(data_options)
|
| 16 |
+
metadata = json.loads(data['flashvars']['metadata'])
|
| 17 |
+
m3u8_link = metadata['hlsMasterPlaylistUrl']
|
| 18 |
+
print(m3u8_link)
|
| 19 |
+
return m3u8_link
|
requirements.txt
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
bs4
|
| 2 |
+
tmdbv3api
|
| 3 |
+
dateparser
|
| 4 |
+
python-dotenv
|
| 5 |
+
fastapi
|
| 6 |
+
uvicorn
|
| 7 |
+
tzdata
|
| 8 |
+
lxml
|
| 9 |
+
httpx
|
run.py
ADDED
|
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI, HTTPException
|
| 2 |
+
from fastapi.responses import JSONResponse
|
| 3 |
+
from filmpertutti import filmpertutti
|
| 4 |
+
from streamingcommunity import streaming_community
|
| 5 |
+
from tantifilm import tantifilm
|
| 6 |
+
from lordchannel import lordchannel
|
| 7 |
+
from streamingwatch import streamingwatch
|
| 8 |
+
import json
|
| 9 |
+
import config
|
| 10 |
+
import logging
|
| 11 |
+
from okru import okru_get_url
|
| 12 |
+
from animeworld import animeworld
|
| 13 |
+
from dictionaries import okru,STREAM
|
| 14 |
+
import httpx
|
| 15 |
+
# Configure logging
|
| 16 |
+
FILMPERTUTTI = config.FILMPERTUTTI
|
| 17 |
+
STREAMINGCOMMUNITY = config.STREAMINGCOMMUNITY
|
| 18 |
+
MYSTERIUS = config.MYSTERIUS
|
| 19 |
+
TUTTIFILM = config.TUTTIFILM
|
| 20 |
+
TF_DOMAIN = config.TF_DOMAIN
|
| 21 |
+
LORDCHANNEL = config.LORDCHANNEL
|
| 22 |
+
STREAMINGWATCH= config.STREAMINGWATCH
|
| 23 |
+
ANIMEWORLD = config.ANIMEWORLD
|
| 24 |
+
HOST = config.HOST
|
| 25 |
+
PORT = int(config.PORT)
|
| 26 |
+
HF = config.HF
|
| 27 |
+
if HF == "1":
|
| 28 |
+
HF = "🤗️"
|
| 29 |
+
#Cool code to set the hugging face if the service is hosted there.
|
| 30 |
+
else:
|
| 31 |
+
HF = ""
|
| 32 |
+
if MYSTERIUS == "1":
|
| 33 |
+
from cool import cool
|
| 34 |
+
|
| 35 |
+
app = FastAPI()
|
| 36 |
+
MANIFEST = {
|
| 37 |
+
"id": "org.stremio.mammamia",
|
| 38 |
+
"version": "1.0.5",
|
| 39 |
+
"catalogs": [
|
| 40 |
+
{"type": "tv", "id": "tv_channels", "name": "TV Channels"}
|
| 41 |
+
],
|
| 42 |
+
"resources": ["stream", "catalog","meta"],
|
| 43 |
+
"types": ["movie", "series", "tv"],
|
| 44 |
+
"name": "Mamma Mia",
|
| 45 |
+
"description": "Addon providing HTTPS Streams for Italian Movies,Series and Live TV! Note that you need to have Kitsu Addon installed in order to watch Anime",
|
| 46 |
+
"logo": "https://creazilla-store.fra1.digitaloceanspaces.com/emojis/49647/pizza-emoji-clipart-md.png"
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def respond_with(data):
|
| 52 |
+
resp = JSONResponse(data)
|
| 53 |
+
resp.headers['Access-Control-Allow-Origin'] = '*'
|
| 54 |
+
resp.headers['Access-Control-Allow-Headers'] = '*'
|
| 55 |
+
return resp
|
| 56 |
+
|
| 57 |
+
@app.get('/manifest.json')
|
| 58 |
+
def addon_manifest():
|
| 59 |
+
return respond_with(MANIFEST)
|
| 60 |
+
|
| 61 |
+
@app.get('/')
|
| 62 |
+
def root():
|
| 63 |
+
return "Hello, this is a Stremio Addon providing HTTPS Stream for Italian Movies/Series, to install it add /manifest.json to the url and then add it into the Stremio search bar"
|
| 64 |
+
|
| 65 |
+
@app.get('/catalog/<type>/<id>.json')
|
| 66 |
+
def addon_catalog(type, id):
|
| 67 |
+
if type != "tv":
|
| 68 |
+
raise HTTPException(status_code=404)
|
| 69 |
+
|
| 70 |
+
catalogs = {"metas": []}
|
| 71 |
+
for channel in STREAM["channels"]:
|
| 72 |
+
catalogs["metas"].append({
|
| 73 |
+
"id": channel["id"],
|
| 74 |
+
"type": "tv",
|
| 75 |
+
"name": channel["title"],
|
| 76 |
+
"poster": channel["poster"], # Add poster URL if available
|
| 77 |
+
"description": f"Watch {channel['title']}"
|
| 78 |
+
})
|
| 79 |
+
|
| 80 |
+
return respond_with(catalogs)
|
| 81 |
+
|
| 82 |
+
@app.get('/meta/<type>/<id>.json')
|
| 83 |
+
def addon_meta(type, id):
|
| 84 |
+
if type != "tv":
|
| 85 |
+
raise HTTPException(status_code=404)
|
| 86 |
+
|
| 87 |
+
for channel in STREAM["channels"]:
|
| 88 |
+
if channel["id"] == id:
|
| 89 |
+
meta = {
|
| 90 |
+
"id": id,
|
| 91 |
+
"type": "tv",
|
| 92 |
+
"name": channel["title"],
|
| 93 |
+
"poster": channel["poster"], # Add poster URL if available
|
| 94 |
+
"description": f"Watch {channel['title']}",
|
| 95 |
+
"background": "", # Add background image URL if available
|
| 96 |
+
"logo": "", # Add logo URL if available
|
| 97 |
+
"videos": [{
|
| 98 |
+
"title": channel["title"],
|
| 99 |
+
"streams": [{
|
| 100 |
+
"title": channel["title"],
|
| 101 |
+
"url": channel["url"]
|
| 102 |
+
}]
|
| 103 |
+
}]
|
| 104 |
+
}
|
| 105 |
+
return respond_with({"meta": meta})
|
| 106 |
+
|
| 107 |
+
raise HTTPException(status_code=404)
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
@app.get('/stream/{type}/{id}.json')
|
| 111 |
+
async def addon_stream(type, id):
|
| 112 |
+
if type not in MANIFEST['types']:
|
| 113 |
+
raise HTTPException(status_code=404)
|
| 114 |
+
streams = {'streams': []}
|
| 115 |
+
async with httpx.AsyncClient() as client:
|
| 116 |
+
if type == "tv":
|
| 117 |
+
for channel in STREAM["channels"]:
|
| 118 |
+
if channel["id"] == id:
|
| 119 |
+
streams['streams'].append({
|
| 120 |
+
'title': channel['name'],
|
| 121 |
+
'url': channel['url']
|
| 122 |
+
})
|
| 123 |
+
if id in okru:
|
| 124 |
+
channel_url = await okru_get_url(id,client)
|
| 125 |
+
streams['streams'].append({
|
| 126 |
+
'title': channel['name'] + "OKRU",
|
| 127 |
+
'url': channel_url
|
| 128 |
+
})
|
| 129 |
+
if not streams['streams']:
|
| 130 |
+
raise HTTPException(status_code=404)
|
| 131 |
+
return respond_with(streams)
|
| 132 |
+
else:
|
| 133 |
+
logging.debug(f"Handling movie or series: {id}")
|
| 134 |
+
if "kitsu" in id:
|
| 135 |
+
if ANIMEWORLD == "1":
|
| 136 |
+
animeworld_urls = await animeworld(id,client)
|
| 137 |
+
print(animeworld_urls)
|
| 138 |
+
if animeworld_urls:
|
| 139 |
+
i = 0
|
| 140 |
+
for url in animeworld_urls:
|
| 141 |
+
if url:
|
| 142 |
+
if i == 0:
|
| 143 |
+
title = "Original"
|
| 144 |
+
elif i == 1:
|
| 145 |
+
title = "Italian"
|
| 146 |
+
streams['streams'].append({'title': f'{HF}Animeworld {title}', 'url': url})
|
| 147 |
+
i+=1
|
| 148 |
+
else:
|
| 149 |
+
if MYSTERIUS == "1":
|
| 150 |
+
results = await cool(id,client)
|
| 151 |
+
if results:
|
| 152 |
+
for resolution, link in results.items():
|
| 153 |
+
streams['streams'].append({'title': f'{HF}Mysterious {resolution}', 'url': link})
|
| 154 |
+
if STREAMINGCOMMUNITY == "1":
|
| 155 |
+
url_streaming_community,url_720_streaming_community,quality_sc = await streaming_community(id,client)
|
| 156 |
+
if url_streaming_community is not None:
|
| 157 |
+
if quality_sc == "1080":
|
| 158 |
+
streams['streams'].append({'title': f'{HF}StreamingCommunity 1080p Max', 'url': url_streaming_community})
|
| 159 |
+
streams['streams'].append({'title': f'{HF}StreamingCommunity 720p Max', 'url': url_720_streaming_community})
|
| 160 |
+
else:
|
| 161 |
+
streams['streams'].append({'title': f'{HF}StreamingCommunity 720p Max', 'url': url_streaming_community})
|
| 162 |
+
if LORDCHANNEL == "1":
|
| 163 |
+
url_lordchannel,quality_lordchannel = await lordchannel(id,client)
|
| 164 |
+
if quality_lordchannel == "FULL HD" and url_lordchannel != None:
|
| 165 |
+
streams['streams'].append({'title': f'{HF}LordChannel 1080p', 'url': url_lordchannel})
|
| 166 |
+
elif url_lordchannel != None:
|
| 167 |
+
streams['streams'].append({'title': f'{HF}LordChannel 720p', 'url': url_lordchannel})
|
| 168 |
+
if FILMPERTUTTI == "1":
|
| 169 |
+
url_filmpertutti = await filmpertutti(id,client)
|
| 170 |
+
if url_filmpertutti is not None:
|
| 171 |
+
streams['streams'].append({'title': 'Filmpertutti', 'url': url_filmpertutti})
|
| 172 |
+
if TUTTIFILM == "1":
|
| 173 |
+
url_tuttifilm = await tantifilm(id,client)
|
| 174 |
+
if url_tuttifilm:
|
| 175 |
+
if not isinstance(url_tuttifilm, str):
|
| 176 |
+
for title, url in url_tuttifilm.items():
|
| 177 |
+
streams['streams'].append({'title': f'{HF}Tantifilm {title}', 'url': url, 'behaviorHints': {'proxyHeaders': {"request": {"Referer": "https://d000d.com/"}}, 'notWebReady': True}})
|
| 178 |
+
if STREAMINGWATCH == "1":
|
| 179 |
+
url_streamingwatch = await streamingwatch(id,client)
|
| 180 |
+
if url_streamingwatch:
|
| 181 |
+
streams['streams'].append({'title': f'{HF}StreamingWatch 720p', 'url': url_streamingwatch})
|
| 182 |
+
if not streams['streams']:
|
| 183 |
+
raise HTTPException(status_code=404)
|
| 184 |
+
|
| 185 |
+
return respond_with(streams)
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
if __name__ == '__main__':
|
| 189 |
+
import uvicorn
|
| 190 |
+
uvicorn.run("run:app", host=HOST, port=PORT, log_level="info")
|
streamingcommunity.py
ADDED
|
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from tmdbv3api import TMDb, Movie, TV
|
| 2 |
+
import logging
|
| 3 |
+
from bs4 import BeautifulSoup,SoupStrainer
|
| 4 |
+
from datetime import datetime
|
| 5 |
+
import dateparser
|
| 6 |
+
from convert import get_TMDb_id_from_IMDb_id
|
| 7 |
+
from info import get_info_tmdb, is_movie, get_info_imdb
|
| 8 |
+
import config
|
| 9 |
+
import json
|
| 10 |
+
import re
|
| 11 |
+
from urllib.parse import urlparse, parse_qs
|
| 12 |
+
|
| 13 |
+
#Get domain
|
| 14 |
+
SC_DOMAIN= config.SC_DOMAIN
|
| 15 |
+
SC_FAST_SEARCH = config.SC_FAST_SEARCH
|
| 16 |
+
|
| 17 |
+
headers = {
|
| 18 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.10; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3',
|
| 19 |
+
'Accept-Language': 'en-US,en;q=0.5'
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
#GET VERSION OF STREAMING COMMUNITY:
|
| 23 |
+
async def get_version(client):
|
| 24 |
+
#Extract the version from the main page of the site
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
try:
|
| 28 |
+
base_url = f'https://streamingcommunity.{SC_DOMAIN}/richiedi-un-titolo'
|
| 29 |
+
response = await client.get(base_url, headers=headers, follow_redirects=True)
|
| 30 |
+
#Soup the response
|
| 31 |
+
soup = BeautifulSoup(response.text, "lxml")
|
| 32 |
+
|
| 33 |
+
# Extract version
|
| 34 |
+
version = json.loads(soup.find("div", {"id": "app"}).get("data-page"))['version']
|
| 35 |
+
return version
|
| 36 |
+
except:
|
| 37 |
+
print("Couldn't find the version")
|
| 38 |
+
version = "65e52dcf34d64173542cd2dc6b8bb75b"
|
| 39 |
+
return version
|
| 40 |
+
|
| 41 |
+
async def search(query,date,ismovie, client):
|
| 42 |
+
#Do a request to get the ID of serie/move and it's slug in the URL
|
| 43 |
+
response = await client.get(query, follow_redirects=True)
|
| 44 |
+
response = response.json()
|
| 45 |
+
|
| 46 |
+
for item in response['data']:
|
| 47 |
+
tid = item['id']
|
| 48 |
+
slug = item['slug']
|
| 49 |
+
type = item['type']
|
| 50 |
+
if type == "tv":
|
| 51 |
+
type = 0
|
| 52 |
+
elif type == "movie":
|
| 53 |
+
type = 1
|
| 54 |
+
if type == ismovie:
|
| 55 |
+
#Added a Check to see if the result is what it is supposed to be
|
| 56 |
+
if SC_FAST_SEARCH == "0":
|
| 57 |
+
if ismovie == 0:
|
| 58 |
+
response = await client.get ( f'https://streamingcommunity.{SC_DOMAIN}/titles/{tid}-{slug}', follow_redirects=True)
|
| 59 |
+
pattern = r'<div[^>]*class="features"[^>]*>.*?<span[^>]*>(.*?)<\/span>'
|
| 60 |
+
match = re.search(pattern, response.text)
|
| 61 |
+
print(match.group(1).split("-")[0])
|
| 62 |
+
first_air_year = match.group(1).split("-")[0]
|
| 63 |
+
date = int(date)
|
| 64 |
+
first_air_year = int(first_air_year)
|
| 65 |
+
if first_air_year == date:
|
| 66 |
+
return tid,slug
|
| 67 |
+
elif ismovie == 1:
|
| 68 |
+
return tid,slug
|
| 69 |
+
elif SC_FAST_SEARCH == "1":
|
| 70 |
+
return tid,slug
|
| 71 |
+
else:
|
| 72 |
+
print("Couldn't find anything")
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
async def get_film(tid,version,client):
|
| 76 |
+
headers = {
|
| 77 |
+
'user-agent': "Mozilla/5.0 (Windows NT 10.10; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537",
|
| 78 |
+
'x-inertia': 'true',
|
| 79 |
+
#Version of streaming community
|
| 80 |
+
'x-inertia-version': version
|
| 81 |
+
}
|
| 82 |
+
#Access the iframe
|
| 83 |
+
url = f'https://streamingcommunity.{SC_DOMAIN}/iframe/{tid}'
|
| 84 |
+
response = await client.get(url, headers=headers, follow_redirects=True)
|
| 85 |
+
iframe = BeautifulSoup(response.text, 'lxml')
|
| 86 |
+
#Get the link of iframe
|
| 87 |
+
iframe = iframe.find('iframe').get("src")
|
| 88 |
+
#Get the ID containted in the src of iframe
|
| 89 |
+
vixid = iframe.split("/embed/")[1].split("?")[0]
|
| 90 |
+
parsed_url = urlparse(iframe)
|
| 91 |
+
query_params = parse_qs(parsed_url.query)
|
| 92 |
+
#Get real token and expires by looking at the page in the iframe, vixcloud/embed
|
| 93 |
+
resp = await client.get(iframe, headers = headers, follow_redirects=True)
|
| 94 |
+
soup= BeautifulSoup(resp.text, "lxml")
|
| 95 |
+
script = soup.find("body").find("script").text
|
| 96 |
+
token = re.search(r"'token':\s*'(\w+)'", script).group(1)
|
| 97 |
+
expires = re.search(r"'expires':\s*'(\d+)'", script).group(1)
|
| 98 |
+
quality = re.search(r'"quality":(\d+)', script).group(1)
|
| 99 |
+
#Example url https://vixcloud.co/playlist/231315?b=1&token=bce060eec3dc9d1965a5d258dc78c964&expires=1728995040&rendition=1080p
|
| 100 |
+
url = f'https://vixcloud.co/playlist/{vixid}?token={token}&expires={expires}'
|
| 101 |
+
if 'canPlayFHD' in query_params:
|
| 102 |
+
canPlayFHD = 'h=1'
|
| 103 |
+
url += "&h=1"
|
| 104 |
+
if 'b' in query_params:
|
| 105 |
+
b = 'b=1'
|
| 106 |
+
url += "&b=1"
|
| 107 |
+
url720 = f'https://vixcloud.co/playlist/{vixid}'
|
| 108 |
+
return url,url720,quality,
|
| 109 |
+
|
| 110 |
+
async def get_season_episode_id(tid,slug,season,episode,version,client):
|
| 111 |
+
#Set some basic headers for the request
|
| 112 |
+
headers = {
|
| 113 |
+
'user-agent': "Mozilla/5.0 (Windows NT 10.10; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3",
|
| 114 |
+
'x-inertia': 'true',
|
| 115 |
+
#Version of streaming community
|
| 116 |
+
'x-inertia-version': version
|
| 117 |
+
}
|
| 118 |
+
#Get episode ID
|
| 119 |
+
response = await client.get(f'https://streamingcommunity.{SC_DOMAIN}/titles/{tid}-{slug}/stagione-{season}', headers=headers, follow_redirects=True)
|
| 120 |
+
# Print the json got
|
| 121 |
+
json_response = response.json().get('props', {}).get('loadedSeason', {}).get('episodes', [])
|
| 122 |
+
for dict_episode in json_response:
|
| 123 |
+
if dict_episode['number'] == episode:
|
| 124 |
+
return dict_episode['id']
|
| 125 |
+
|
| 126 |
+
async def get_episode_link(episode_id,tid,version,client):
|
| 127 |
+
#The parameters for the request
|
| 128 |
+
params = {
|
| 129 |
+
'episode_id': episode_id,
|
| 130 |
+
'next_episode': '1'
|
| 131 |
+
}
|
| 132 |
+
#Let's try to get the link from iframe source
|
| 133 |
+
# Make a request to get iframe source
|
| 134 |
+
response = await client.get(f"https://streamingcommunity.{SC_DOMAIN}/iframe/{tid}", params=params, follow_redirects=True)
|
| 135 |
+
|
| 136 |
+
# Parse response with BeautifulSoup to get iframe source
|
| 137 |
+
soup = BeautifulSoup(response.text, "lxml")
|
| 138 |
+
iframe = soup.find("iframe").get("src")
|
| 139 |
+
vixid = iframe.split("/embed/")[1].split("?")[0]
|
| 140 |
+
headers = {
|
| 141 |
+
'user-agent': "Mozilla/5.0 (Windows NT 10.10; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537",
|
| 142 |
+
'x-inertia': 'true',
|
| 143 |
+
#Version of streaming community
|
| 144 |
+
'x-inertia-version': version
|
| 145 |
+
}
|
| 146 |
+
parsed_url = urlparse(iframe)
|
| 147 |
+
query_params = parse_qs(parsed_url.query)
|
| 148 |
+
#Get real token and expires by looking at the page in the iframe, vixcloud/embed
|
| 149 |
+
resp = await client.get(iframe, headers = headers, follow_redirects=True)
|
| 150 |
+
soup= BeautifulSoup(resp.text, "lxml")
|
| 151 |
+
script = soup.find("body").find("script").text
|
| 152 |
+
token = re.search(r"'token':\s*'(\w+)'", script).group(1)
|
| 153 |
+
expires = re.search(r"'expires':\s*'(\d+)'", script).group(1)
|
| 154 |
+
quality = re.search(r'"quality":(\d+)', script).group(1)
|
| 155 |
+
#Example url https://vixcloud.co/playlist/231315?b=1&token=bce060eec3dc9d1965a5d258dc78c964&expires=1728995040&rendition=1080p
|
| 156 |
+
url = f'https://vixcloud.co/playlist/{vixid}?token={token}&expires={expires}'
|
| 157 |
+
if 'canPlayFHD' in query_params:
|
| 158 |
+
canPlayFHD = 'h=1'
|
| 159 |
+
url += "&h=1"
|
| 160 |
+
if 'b' in query_params:
|
| 161 |
+
b = 'b=1'
|
| 162 |
+
url += "&b=1"
|
| 163 |
+
url720 = f'https://vixcloud.co/playlist/{vixid}'
|
| 164 |
+
return url,url720,quality
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
async def streaming_community(imdb,client):
|
| 168 |
+
try:
|
| 169 |
+
general = is_movie(imdb)
|
| 170 |
+
ismovie = general[0]
|
| 171 |
+
imdb_id = general[1]
|
| 172 |
+
type = "StreamingCommunity"
|
| 173 |
+
if ismovie == 0 :
|
| 174 |
+
season = int(general[2])
|
| 175 |
+
episode = int(general[3])
|
| 176 |
+
#Check if fast search is enabled or disabled
|
| 177 |
+
if SC_FAST_SEARCH == "1":
|
| 178 |
+
if "tt" in imdb:
|
| 179 |
+
#Get showname
|
| 180 |
+
showname = await get_info_imdb(imdb_id,ismovie,type,client)
|
| 181 |
+
date = None
|
| 182 |
+
else:
|
| 183 |
+
#I just set n season to None to avoid bugs, but it is not needed if Fast search is enabled
|
| 184 |
+
date = None
|
| 185 |
+
#else just equals them
|
| 186 |
+
tmdba = imdb_id.replace("tmdb:","")
|
| 187 |
+
showname = get_info_tmdb(tmdba,ismovie,type)
|
| 188 |
+
elif SC_FAST_SEARCH == "0":
|
| 189 |
+
tmdba = await get_TMDb_id_from_IMDb_id(imdb_id,client)
|
| 190 |
+
showname,date = get_info_tmdb(tmdba,ismovie,type)
|
| 191 |
+
#HERE THE CASE IF IT IS A MOVIE
|
| 192 |
+
else:
|
| 193 |
+
if "tt" in imdb:
|
| 194 |
+
#Get showname
|
| 195 |
+
date = None
|
| 196 |
+
showname = await get_info_imdb(imdb_id,ismovie,type,client)
|
| 197 |
+
else:
|
| 198 |
+
date = None
|
| 199 |
+
tmdba = imdb_id.replace("tmdb:","")
|
| 200 |
+
showname = get_info_tmdb(tmdba,ismovie,type)
|
| 201 |
+
|
| 202 |
+
showname = showname.replace(" ", "+").replace("–", "+").replace("—","+")
|
| 203 |
+
query = f'https://streamingcommunity.{SC_DOMAIN}/api/search?q={showname}'
|
| 204 |
+
tid,slug = await search(query,date,ismovie,client)
|
| 205 |
+
version = await get_version(client)
|
| 206 |
+
if ismovie == 1:
|
| 207 |
+
#TID means temporaly ID
|
| 208 |
+
url,url720,quality = await get_film(tid,version,client)
|
| 209 |
+
print(url)
|
| 210 |
+
return url,url720,quality
|
| 211 |
+
if ismovie == 0:
|
| 212 |
+
#Uid = URL ID
|
| 213 |
+
episode_id = await get_season_episode_id(tid,slug,season,episode,version,client)
|
| 214 |
+
url,url720,quality = await get_episode_link(episode_id,tid,version,client)
|
| 215 |
+
print(url)
|
| 216 |
+
return url,url720,quality
|
| 217 |
+
except Exception as e:
|
| 218 |
+
print("StreamingCommunity failed")
|
| 219 |
+
return None,None,None
|
streamingwatch.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from tmdbv3api import TMDb, Movie, TV
|
| 2 |
+
import requests
|
| 3 |
+
import logging
|
| 4 |
+
from bs4 import BeautifulSoup,SoupStrainer
|
| 5 |
+
from datetime import datetime
|
| 6 |
+
import dateparser
|
| 7 |
+
from convert import get_TMDb_id_from_IMDb_id
|
| 8 |
+
from info import get_info_tmdb, is_movie, get_info_imdb
|
| 9 |
+
import config
|
| 10 |
+
import re
|
| 11 |
+
import json
|
| 12 |
+
SW_DOMAIN = config.SW_DOMAIN
|
| 13 |
+
async def search(showname,season,episode,date,ismovie,client):
|
| 14 |
+
if ismovie == 1:
|
| 15 |
+
query = f'https://www.streamingwatch.{SW_DOMAIN}/wp-admin/admin-ajax.php'
|
| 16 |
+
headers = {
|
| 17 |
+
'authority': f'www.streamingwatch.{SW_DOMAIN}',
|
| 18 |
+
'accept': '*/*',
|
| 19 |
+
'accept-language': 'it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7',
|
| 20 |
+
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
|
| 21 |
+
# 'cookie': 'wordpress_test_cookie=WP%20Cookie%20check',
|
| 22 |
+
'origin': f'https://www.streamingwatch.{SW_DOMAIN}',
|
| 23 |
+
'referer': f'https://www.streamingwatch.{SW_DOMAIN}',
|
| 24 |
+
'sec-ch-ua': '"Not-A.Brand";v="99", "Chromium";v="124"',
|
| 25 |
+
'sec-ch-ua-mobile': '?0',
|
| 26 |
+
'sec-ch-ua-platform': '"Android"',
|
| 27 |
+
'sec-fetch-dest': 'empty',
|
| 28 |
+
'sec-fetch-mode': 'cors',
|
| 29 |
+
'sec-fetch-site': 'same-origin',
|
| 30 |
+
'user-agent': 'Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
|
| 31 |
+
'x-requested-with': 'XMLHttpRequest',
|
| 32 |
+
}
|
| 33 |
+
data = {
|
| 34 |
+
'action': 'data_fetch',
|
| 35 |
+
'keyword': showname,
|
| 36 |
+
'_wpnonce': '648328b831',
|
| 37 |
+
}
|
| 38 |
+
cookies = {
|
| 39 |
+
'wordpress_test_cookie': 'WP%20Cookie%20check',
|
| 40 |
+
}
|
| 41 |
+
response = await client.post(query,cookies=cookies, headers=headers, data=data)
|
| 42 |
+
soup = BeautifulSoup(response.content,'lxml')
|
| 43 |
+
page_date = soup.find(id = 'search-cat-year').text.strip()
|
| 44 |
+
if page_date == date:
|
| 45 |
+
href = soup.find('a')['href']
|
| 46 |
+
response = await client.get(href, follow_redirects=True)
|
| 47 |
+
soup = BeautifulSoup(response.text,'lxml',parse_only=SoupStrainer('iframe'))
|
| 48 |
+
iframe = soup.find('iframe')
|
| 49 |
+
hdplayer = iframe.get('data-lazy-src')
|
| 50 |
+
|
| 51 |
+
return hdplayer
|
| 52 |
+
elif ismovie == 0:
|
| 53 |
+
#Some series have the name in english so we first search with the categories option and then we use the obtained ID to get all the episodes
|
| 54 |
+
id_response = await client.get(f'https://streamingwatch.{SW_DOMAIN}/wp-json/wp/v2/categories?search={showname}&_fields=id', follow_redirects=True)
|
| 55 |
+
data = json.loads(id_response.text)
|
| 56 |
+
category_id = data[0]['id']
|
| 57 |
+
query = f'https://streamingwatch.{SW_DOMAIN}/wp-json/wp/v2/posts?categories={category_id}&per_page=100'
|
| 58 |
+
response = await client.get(query, follow_redirects=True)
|
| 59 |
+
data_json = response.text
|
| 60 |
+
data = json.loads(data_json)
|
| 61 |
+
for entry in data:
|
| 62 |
+
if f"stagione-{season}-episodio-{episode}" in entry["slug"]:
|
| 63 |
+
content = entry["content"]["rendered"]
|
| 64 |
+
#"content":{
|
| 65 |
+
# "rendered":"<p><!--baslik:PRO--><iframe loading=\"lazy\" src=\"https:\/\/hdplayer.gives\/embed\/YErLVq64uNTZRNz\" frameborder=\"0\" width=\"700\" height=\"400\" allowfullscreen><\/iframe><\/p>\n","protected":false}
|
| 66 |
+
start = content.find('src="') + len('src="') #start of url
|
| 67 |
+
end = content.find('"', start) #end of url
|
| 68 |
+
hdplayer = content[start:end]
|
| 69 |
+
return hdplayer
|
| 70 |
+
async def hls_url(hdplayer,client):
|
| 71 |
+
response = await client.get(hdplayer, follow_redirects=True)
|
| 72 |
+
match = re.search(r'sources:\s*\[\s*\{\s*file\s*:\s*"([^"]*)"', response.text)
|
| 73 |
+
url = match.group(1)
|
| 74 |
+
print(url)
|
| 75 |
+
return url
|
| 76 |
+
async def streamingwatch(imdb,client):
|
| 77 |
+
try:
|
| 78 |
+
general = is_movie(imdb)
|
| 79 |
+
ismovie = general[0]
|
| 80 |
+
imdb_id = general[1]
|
| 81 |
+
type = "StreamingWatch"
|
| 82 |
+
if ismovie == 0:
|
| 83 |
+
season = int(general[2])
|
| 84 |
+
episode = int(general[3])
|
| 85 |
+
if "tt" in imdb:
|
| 86 |
+
tmdba = await get_TMDb_id_from_IMDb_id(imdb_id,client)
|
| 87 |
+
else:
|
| 88 |
+
tmdba = imdb_id
|
| 89 |
+
else:
|
| 90 |
+
season = None
|
| 91 |
+
episode = None
|
| 92 |
+
if "tt" in imdb:
|
| 93 |
+
tmdba = await get_TMDb_id_from_IMDb_id(imdb_id,client)
|
| 94 |
+
else:
|
| 95 |
+
tmdba = imdb_id
|
| 96 |
+
showname,date = get_info_tmdb(tmdba,ismovie,type)
|
| 97 |
+
showname = showname.replace(" ", "+").replace("–", "+").replace("—","+")
|
| 98 |
+
hdplayer = await search(showname,season,episode,date,ismovie,client)
|
| 99 |
+
url = await hls_url(hdplayer,client)
|
| 100 |
+
return url
|
| 101 |
+
except:
|
| 102 |
+
print("StreamingWatch Failed")
|
| 103 |
+
return None
|
tantifilm.py
ADDED
|
@@ -0,0 +1,267 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
from bs4 import BeautifulSoup,SoupStrainer
|
| 3 |
+
import re
|
| 4 |
+
import time
|
| 5 |
+
from info import is_movie,get_info_imdb,get_info_tmdb
|
| 6 |
+
import config
|
| 7 |
+
from loadenv import load_env
|
| 8 |
+
TF_FAST_SEARCH = config.TF_FAST_SEARCH
|
| 9 |
+
TF_DOMAIN = config.TF_DOMAIN
|
| 10 |
+
HF = config.HF
|
| 11 |
+
env_vars = load_env()
|
| 12 |
+
PROXY_CREDENTIALS = env_vars.get('PROXY_CREDENTIALS')
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
async def search(showname,ismovie,date,client):
|
| 16 |
+
url = f'https://www.tanti.{TF_DOMAIN}/search/{showname}'
|
| 17 |
+
response = await client.get(url, follow_redirects=True)
|
| 18 |
+
soup = BeautifulSoup(response.text, "lxml")
|
| 19 |
+
if ismovie == 1:
|
| 20 |
+
all_link = soup.select('#movies .col .list-media')
|
| 21 |
+
for link in all_link:
|
| 22 |
+
url = link['href']
|
| 23 |
+
response = client.get(url, follow_redirects=True)
|
| 24 |
+
pattern = r'Data di rilascio\s*</div>\s*<div class="text">\s*(\d{4})\s*</div>'
|
| 25 |
+
found_date = re.search(pattern, response.text)
|
| 26 |
+
release_date = str(found_date.group(1))
|
| 27 |
+
if release_date == date:
|
| 28 |
+
tid= url.split('-')[-1]
|
| 29 |
+
#Return URL and even the soup so I can use it later
|
| 30 |
+
#I try to get doodstream link inside this function so I do not have to get again the response
|
| 31 |
+
return tid,url
|
| 32 |
+
elif ismovie == 0:
|
| 33 |
+
all_link = soup.select('#series .col .list-media')
|
| 34 |
+
for link in all_link:
|
| 35 |
+
base_url = link['href']
|
| 36 |
+
url = f'{base_url}-1-season-1-episode'
|
| 37 |
+
response = await client.get(url, follow_redirects=True)
|
| 38 |
+
pattern = r'Data di rilascio\s*</div>\s*<div class="text">\s*(\d{4})\s*</div>'
|
| 39 |
+
found_date = re.search(pattern, response.text)
|
| 40 |
+
release_date = str(found_date.group(1))
|
| 41 |
+
if release_date == date:
|
| 42 |
+
tid= url.split('-')[1]
|
| 43 |
+
soup = BeautifulSoup(response.text, 'lxml')
|
| 44 |
+
a_tag = soup.find('a', class_='dropdown-toggle btn-service selected')
|
| 45 |
+
embed_id = a_tag['data-embed']
|
| 46 |
+
#I try to get doodstream link inside this function so I do not have to get again the response
|
| 47 |
+
return url,embed_id
|
| 48 |
+
|
| 49 |
+
async def fast_search(showname,ismovie,client):
|
| 50 |
+
url = f'https://www.tanti.{TF_DOMAIN}/search/{showname}'
|
| 51 |
+
response = await client.get(url, follow_redirects=True)
|
| 52 |
+
soup = BeautifulSoup(response.text, "lxml")
|
| 53 |
+
if ismovie == 1:
|
| 54 |
+
first_link = soup.select_one('#movies .col .list-media')
|
| 55 |
+
url = first_link['href']
|
| 56 |
+
tid= url.split('-')[1]
|
| 57 |
+
return tid,url
|
| 58 |
+
elif ismovie == 0:
|
| 59 |
+
first_link = soup.select_one('#series .col .list-media')
|
| 60 |
+
base_url = first_link['href']
|
| 61 |
+
url = f'{base_url}-1-season-1-episode'
|
| 62 |
+
response = await client.get(url, follow_redirects=True)
|
| 63 |
+
soup = BeautifulSoup(response.text, 'lxml')
|
| 64 |
+
a_tag = soup.find('a', class_='dropdown-toggle btn-service selected')
|
| 65 |
+
embed_id = a_tag['data-embed']
|
| 66 |
+
return url,embed_id
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
async def get_protect_link(id,url,client):
|
| 71 |
+
#Get the link where the Iframe is located, which contains the doodstream url kind of.
|
| 72 |
+
response = await client.get(f"https://p.hdplayer.casa/myadmin/play.php?id={id}", follow_redirects=True)
|
| 73 |
+
soup = BeautifulSoup(response.text, "lxml", parse_only=SoupStrainer('iframe'))
|
| 74 |
+
protect_link = soup.iframe['src']
|
| 75 |
+
if "protect" in protect_link:
|
| 76 |
+
return protect_link
|
| 77 |
+
else:
|
| 78 |
+
#DO this in case the movie has a 3D version etc
|
| 79 |
+
response = await client.get(url, follow_redirects=True)
|
| 80 |
+
soup = BeautifulSoup(response.text, 'lxml')
|
| 81 |
+
a_tag = soup.find('a', class_='dropdown-toggle btn-service selected')
|
| 82 |
+
embed_id = a_tag['data-embed']
|
| 83 |
+
headers = {
|
| 84 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36',
|
| 85 |
+
'Referer': url
|
| 86 |
+
}
|
| 87 |
+
#Parameters needed is the embed ID
|
| 88 |
+
data = {
|
| 89 |
+
'id': embed_id
|
| 90 |
+
}
|
| 91 |
+
ajax_url = f"https://www.tanti.{TF_DOMAIN}/ajax/embed"
|
| 92 |
+
response = await client.post(ajax_url, headers=headers, data=data)
|
| 93 |
+
hdplayer = response.text[43:-27]
|
| 94 |
+
response = await client.get(hdplayer, follow_redirects=True)
|
| 95 |
+
soup = BeautifulSoup(response.text, 'lxml')
|
| 96 |
+
links_dict = {}
|
| 97 |
+
li_tags = soup.select('ul.nav.navbar-nav li.dropdown')
|
| 98 |
+
for li_tag in li_tags:
|
| 99 |
+
a_tag = li_tag.find('a')
|
| 100 |
+
if a_tag:
|
| 101 |
+
title = a_tag.text.strip()
|
| 102 |
+
#Since tantifilm player is broken I just skip it
|
| 103 |
+
if title == "1" or "Tantifilm" in title:
|
| 104 |
+
continue # Get the text of the <a> tag
|
| 105 |
+
href = a_tag['href']
|
| 106 |
+
response = await client.get(href, follow_redirects=True)
|
| 107 |
+
soup = BeautifulSoup(response.text, "lxml", parse_only=SoupStrainer('iframe'))
|
| 108 |
+
protect_link = soup.iframe['src']
|
| 109 |
+
if "protect" in protect_link:
|
| 110 |
+
url = true_url(protect_link)
|
| 111 |
+
links_dict[title] = url
|
| 112 |
+
return links_dict
|
| 113 |
+
# Get the value of the href attribute
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
async def get_nuovo_indirizzo_and_protect_link(url,embed_id,season,episode,client):
|
| 117 |
+
headers = {
|
| 118 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36',
|
| 119 |
+
'Referer': url
|
| 120 |
+
}
|
| 121 |
+
#Parameters needed is the embed ID
|
| 122 |
+
data = {
|
| 123 |
+
'id': embed_id
|
| 124 |
+
}
|
| 125 |
+
ajax_url = f"https://www.tanti.{TF_DOMAIN}/ajax/embed"
|
| 126 |
+
response = await client.post(ajax_url, headers=headers, data=data)
|
| 127 |
+
nuovo_indirizzo = response.text[43:-27]
|
| 128 |
+
response = await client.get(nuovo_indirizzo, follow_redirects=True)
|
| 129 |
+
soup = BeautifulSoup(response.text, 'lxml')
|
| 130 |
+
#Get season
|
| 131 |
+
season = season - 1
|
| 132 |
+
li_tags = soup.select('ul.nav.navbar-nav > li.dropdown')
|
| 133 |
+
if len(li_tags) != 1:
|
| 134 |
+
link = li_tags[season].find('a')['href']
|
| 135 |
+
response = await client.get(link, follow_redirects=True)
|
| 136 |
+
soup = BeautifulSoup(response.text, 'lxml')
|
| 137 |
+
option_tag = soup.select(f'select[name="ep_select"] > option:nth-of-type({episode})')[0]
|
| 138 |
+
link = option_tag['value']
|
| 139 |
+
#Let's find protect link now
|
| 140 |
+
response = await client.get(link, follow_redirects=True)
|
| 141 |
+
soup = BeautifulSoup(response.text, "lxml", parse_only=SoupStrainer('iframe'))
|
| 142 |
+
protect_link = soup.iframe['src']
|
| 143 |
+
return protect_link
|
| 144 |
+
|
| 145 |
+
else:
|
| 146 |
+
#If there is only one season than
|
| 147 |
+
option_tag = soup.select('select.dynamic_select > option')[episode]
|
| 148 |
+
link = option_tag['value']
|
| 149 |
+
#Let's find protect link now
|
| 150 |
+
response = await client.get(link, follow_redirects=True)
|
| 151 |
+
soup = BeautifulSoup(response.text, "lxml", parse_only=SoupStrainer('iframe'))
|
| 152 |
+
protect_link = soup.iframe['src']
|
| 153 |
+
return protect_link
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
async def true_url(protect_link,client):
|
| 157 |
+
print(protect_link)
|
| 158 |
+
# Define headers
|
| 159 |
+
headers = {
|
| 160 |
+
"Range": "bytes=0-",
|
| 161 |
+
"Referer": "https://d000d.com/",
|
| 162 |
+
}
|
| 163 |
+
if HF == "1":
|
| 164 |
+
proxy = PROXY_CREDENTIALS
|
| 165 |
+
proxies = {
|
| 166 |
+
"http": proxy,
|
| 167 |
+
"https": proxy
|
| 168 |
+
}
|
| 169 |
+
response = await client.get(protect_link, proxies=proxies, follow_redirects=True)
|
| 170 |
+
else:
|
| 171 |
+
response = await client.get(protect_link, follow_redirects=True)
|
| 172 |
+
link = response.url
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
if response.status_code == 200:
|
| 176 |
+
# Get unique timestamp for the request
|
| 177 |
+
real_time = str(int(time.time()))
|
| 178 |
+
|
| 179 |
+
# Regular Expression Pattern for the match
|
| 180 |
+
pattern = r"(\/pass_md5\/.*?)'.*(\?token=.*?expiry=)"
|
| 181 |
+
|
| 182 |
+
# Find the match
|
| 183 |
+
match = re.search(pattern, response.text, re.DOTALL)
|
| 184 |
+
|
| 185 |
+
# If a match was found
|
| 186 |
+
if match:
|
| 187 |
+
# Create real link (match[0] includes all matched elements)
|
| 188 |
+
url =f'https://d000d.com{match[1]}'
|
| 189 |
+
print("MD5: ", url)
|
| 190 |
+
rebobo = await client.get(url, headers=headers, follow_redirects=True)
|
| 191 |
+
real_url = f'{rebobo.text}123456789{match[2]}{real_time}'
|
| 192 |
+
print(real_url)
|
| 193 |
+
return real_url
|
| 194 |
+
else:
|
| 195 |
+
print("No match found in the text.")
|
| 196 |
+
return None
|
| 197 |
+
|
| 198 |
+
print("Error: Could not get the response.")
|
| 199 |
+
return None
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
#Get temporaly ID
|
| 204 |
+
async def tantifilm(imdb,client):
|
| 205 |
+
urls = None
|
| 206 |
+
try:
|
| 207 |
+
general = is_movie(imdb)
|
| 208 |
+
ismovie = general[0]
|
| 209 |
+
imdb_id = general[1]
|
| 210 |
+
type = "Tuttifilm"
|
| 211 |
+
if ismovie == 0 :
|
| 212 |
+
season = int(general[2])
|
| 213 |
+
episode = int(general[3])
|
| 214 |
+
if "tt" in imdb:
|
| 215 |
+
if TF_FAST_SEARCH == "0":
|
| 216 |
+
showname,date = await get_info_imdb(imdb_id,ismovie,type,client)
|
| 217 |
+
url,embed_id = await search(showname,ismovie,date,client)
|
| 218 |
+
elif TF_FAST_SEARCH == "1":
|
| 219 |
+
showname = await get_info_imdb(imdb_id,ismovie,type,client)
|
| 220 |
+
url,embed_id = await fast_search(showname,ismovie,client)
|
| 221 |
+
else:
|
| 222 |
+
#else just equals them
|
| 223 |
+
tmdba = imdb_id.replace("tmdb:","")
|
| 224 |
+
if TF_FAST_SEARCH == "0":
|
| 225 |
+
showname,date = get_info_tmdb(tmdba,ismovie,type)
|
| 226 |
+
url,embed_id = await search(showname,ismovie,date,client)
|
| 227 |
+
elif TF_FAST_SEARCH == "1":
|
| 228 |
+
showname= get_info_tmdb(tmdba,ismovie,type)
|
| 229 |
+
url,embed_id = await fast_search(showname,ismovie,client)
|
| 230 |
+
protect_link = await get_nuovo_indirizzo_and_protect_link(url,embed_id,season,episode,client)
|
| 231 |
+
url = await true_url(protect_link,client)
|
| 232 |
+
return url
|
| 233 |
+
elif ismovie == 1:
|
| 234 |
+
if "tt" in imdb:
|
| 235 |
+
#Get showname
|
| 236 |
+
if TF_FAST_SEARCH == "0":
|
| 237 |
+
showname,date = await get_info_imdb(imdb_id,ismovie,type,client)
|
| 238 |
+
tid,url = await search(showname,ismovie,date,client)
|
| 239 |
+
elif TF_FAST_SEARCH == "1":
|
| 240 |
+
showname = await get_info_imdb(imdb_id,ismovie,type,client)
|
| 241 |
+
date = None
|
| 242 |
+
tid,url = await fast_search(showname,ismovie,client)
|
| 243 |
+
else:
|
| 244 |
+
|
| 245 |
+
#else just equals themtantifilm("tt2096673")
|
| 246 |
+
|
| 247 |
+
if TF_FAST_SEARCH == "0":
|
| 248 |
+
showname,date = get_info_tmdb(imdb,ismovie,type)
|
| 249 |
+
tid,url = await search(showname,ismovie,date,client)
|
| 250 |
+
elif TF_FAST_SEARCH == "1":
|
| 251 |
+
showname = get_info_tmdb(imdb,ismovie,type)
|
| 252 |
+
tid,url = await fast_search(showname,ismovie,client)
|
| 253 |
+
protect_link = await get_protect_link(tid,url,client)
|
| 254 |
+
if not isinstance(protect_link, str):
|
| 255 |
+
urls = protect_link
|
| 256 |
+
if urls:
|
| 257 |
+
return urls
|
| 258 |
+
else:
|
| 259 |
+
print("Tantifilm Error v2")
|
| 260 |
+
else:
|
| 261 |
+
url = await true_url(protect_link,client)
|
| 262 |
+
if url:
|
| 263 |
+
return url
|
| 264 |
+
|
| 265 |
+
except Exception as e:
|
| 266 |
+
print("Tantifilm Error: ", e)
|
| 267 |
+
return None
|