File size: 4,614 Bytes
5b63e44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import requests
import re
from bs4 import BeautifulSoup, SoupStrainer
from Src.Utilities.info import get_info_imdb, is_movie, get_info_tmdb

Cookies =  {
	"ips4_member_id": "4029329",
	"ips4_device_key": "b6f084be79c28bb53ecfc556fd7a2a70",
	"ips4_login_key": "1725525639",
	"ips4_IPSSessionFront": "1fe0cb08a08c5689a9ac93b94faf769e",
}

async def search_series(client,id,season,episode):
    season = 5
    response = await client.get("https://ddlstreamitaly.co/search/?&q=La%20casa%20di%20carta%203%20Streaming&type=videobox_video&quick=1&nodes=11&search_and_or=and&search_in=titles&sortby=relevancy")
    soup = BeautifulSoup(response.text, 'lxml',parse_only=SoupStrainer('a'))
    a_tags = soup.find_all('a', {'data-linktype': 'link'})
    for a in a_tags:
        href = a['href']
        response = await client.get(href, cookies = Cookies)
        soup = BeautifulSoup(response.text, 'lxml')
        movie_ids = soup.find_all('a',{'rel':'external nofollow'})
        for database in movie_ids:
            link = database['href']
            real_id = link.split('/')[4]
            if real_id == id:
                meta_description = soup.find('meta',{'name':'description'})
                content = meta_description['content']
                if f"Stagione {season}" in content:
                    return href
                else:
                    continue
            else:
                continue

async def get_episode(client,link,episode):
    Cookies =  {
	"ips4_member_id": "4029329",
	"ips4_device_key": "b6f084be79c28bb53ecfc556fd7a2a70",
	"ips4_login_key": "1725525639",
	"ips4_IPSSessionFront": "1fe0cb08a08c5689a9ac93b94faf769e"
    }
    episode = "6"
    link = link + "?area=online"
    response = await client.get(link, cookies = Cookies,impersonate="chrome120")
    pattern = rf'<a\s+href="([^"]+)"[^>]*>\s*(Part\s+{episode})\s*</a>'
    match = re.search(pattern, response.text)
    mp4_link = match.group(1)
    mp4_link = mp4_link.replace("&amp","")
    mp4_link = mp4_link.replace(";","&")
    return mp4_link

async def search_movie():
    link = "https://ddlstreamitaly.co/search/?&q=%20Noi%2C%20i%20ragazzi%20dello%20zoo%20di%20Berlino%20Streaming&type=videobox_video&quick=1&nodes=11&search_and_or=and&sortby=relevancy"
    response = requests.get(link,impersonate = "chrome120")
    soup = BeautifulSoup(response.text, 'lxml',parse_only=SoupStrainer('a'))
    a_tags = soup.find_all('a', {'data-linktype': 'link'})
    for a in a_tags:
        href = a['href']
        response = requests.get(href, cookies = Cookies)
        soup = BeautifulSoup(response.text, 'lxml', parse_only=SoupStrainer('a'))
        movie_ids = soup.find_all('a',{'rel':'external nofollow'})
        for database in movie_ids:
            link = database['href']
            real_id = link.split('/')[4]
            if real_id == id:
                return href
            else:
                continue
    return



async def get_mp4(client,link):
    response = await client.get(link, cookies = Cookies)   
    soup = BeautifulSoup(response.text,'lxml',parse_only=SoupStrainer('source'))
    source_tag = soup.find('source')
    final_url = source_tag['src']
    res = source_tag.get('res')
    return final_url,res














async def ddlstream(imdb,client):
    try:
        general = is_movie(imdb)
        ismovie = general[0]
        id = general[1]
        type = "DDLStream"
        if "tt" in imdb:
                showname = await get_info_imdb(id,ismovie,type,client)
                print(showname)
        else:
            showname = get_info_tmdb(id,ismovie,type)
        if ismovie == 0:
            season  = general[2]
            episode = general[3]
            page_link = await search_series(client,id,season,episode)
            mp4_link = await get_episode(client,page_link,episode)
            final_url = await get_mp4(client,mp4_link)
            print(final_url)
        else:
             page_link = await search_movie()
             mp4_link = page_link + "?area=online"
             final_url = await get_mp4(client,mp4_link)
             print(final_url) 
            
    except Exception as e:
        print(f"MammaMia: DDLStream Failed {e}")
        return None,None
    
'''
async def test_animeworld():
    from curl_cffi.requests import AsyncSession
    async with AsyncSession() as client:
        test_id = "tt6468322:5:1"  # This is an example ID format
        results = await ddlstream(test_id, client)
        print(results)

if __name__ == "__main__":
    import asyncio
    asyncio.run(test_animeworld())


    #python3 -m Src.API.ddlstream
'''