Spaces:
Running
Running
File size: 8,221 Bytes
87ec624 3fc915a affe38c 1ddbe65 2a0a05e 2d11e75 2a0a05e 1ddbe65 87ec624 a5a0c5c 7b9d69b 87ec624 3fc915a 440921e affe38c 3fc915a 74ac5f8 1b7f333 2a29f24 affe38c 3531a86 7b9d69b a3f8c77 1ddbe65 a5a0c5c 1ddbe65 34b6af5 549bae6 2a0a05e 549bae6 2a0a05e 34b6af5 2a0a05e affe38c 2a0a05e 1ddbe65 1149572 2d11e75 2a0a05e 1149572 2d11e75 3531a86 2a0a05e b7d4337 2a0a05e 2d11e75 fc47567 2a0a05e fc47567 2a0a05e affe38c 2a0a05e 1149572 2d11e75 3fc915a 1ddbe65 a5a0c5c 1ddbe65 c60549e 549bae6 c60549e 214fcc2 34b6af5 214fcc2 34b6af5 214fcc2 34b6af5 549bae6 c60549e 3fc915a 2a0a05e 3fc915a dc2aa5a 3fc915a 1ddbe65 26a26d7 1ddbe65 0f196fd a5a0c5c 1ddbe65 34b6af5 549bae6 f722c0d 549bae6 214fcc2 549bae6 1ddbe65 a5a0c5c 1ddbe65 50e694b 1ddbe65 3e8e235 1ddbe65 fd9bdbb c9c1922 1ddbe65 fd9bdbb c9c1922 a5a0c5c 34b6af5 fd9bdbb 2a0a05e 04fa3a5 a5a0c5c 549bae6 a5a0c5c 549bae6 387910a 1ddbe65 3e8e235 549bae6 3e8e235 3816526 1ddbe65 549bae6 1ddbe65 63fa4d7 214fcc2 3fc915a | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 | import os
import re
import json
import time
import httpx
import random
import asyncio
import urllib.parse
import mimetypes
from bs4 import BeautifulSoup
from fastapi import FastAPI, HTTPException, Request
from fastapi.responses import StreamingResponse
from fake_useragent import UserAgent
import uvicorn
# Google Auth Libraries
from google.oauth2 import service_account
import google.auth.transport.requests
app = FastAPI()
# --- Configurations ---
ACCESS_KEY = os.getenv("ACCESS_KEY", "0000")
SERVICE_ACCOUNT_JSON_STR = os.getenv("GOOGLE_SERVICE_ACCOUNT_JSON")
ua = UserAgent(fallback='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36')
# MediaFire Cache
MEDIAFIRE_CACHE = {}
CACHE_TTL = 1800
MAX_CONCURRENT_REQUESTS = 30
semaphore = asyncio.Semaphore(MAX_CONCURRENT_REQUESTS)
client = httpx.AsyncClient(
timeout=httpx.Timeout(30.0, read=None),
follow_redirects=True,
limits=httpx.Limits(max_connections=500, max_keepalive_connections=100)
)
@app.get("/")
async def index():
return {"status": "Online"}
def get_all_service_accounts():
if not SERVICE_ACCOUNT_JSON_STR: return []
try:
data = json.loads(SERVICE_ACCOUNT_JSON_STR)
return data if isinstance(data, list) else [data]
except: return []
async def get_token_for_account(cred_dict):
try:
scopes = ['https://www.googleapis.com/auth/drive.readonly']
creds = service_account.Credentials.from_service_account_info(cred_dict, scopes=scopes)
loop = asyncio.get_event_loop()
auth_req = google.auth.transport.requests.Request()
await loop.run_in_executor(None, creds.refresh, auth_req)
return creds.token
except: return None
def get_google_file_id(url):
fid = re.search(r'/(?:d|file/d|open\?id=)/([a-zA-Z0-9_-]+)', url)
return fid.group(1) if fid else None
def get_clean_filename(url):
decoded_url = urllib.parse.unquote(url)
name = decoded_url.split('/')[-1].split('?')[0]
return name if (name and '.' in name) else "video.mp4"
async def scrape_mediafire(url):
try:
headers = {
'User-Agent': ua.random,
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',
'Referer': 'https://www.mediafire.com/'
}
async with httpx.AsyncClient(headers=headers, follow_redirects=True, timeout=15.0) as temp_client:
r = await temp_client.get(url)
if r.status_code != 200: return None
# Direct link search
target = None
match = re.search(r'https?://download[^\s"\']+mediafire\.com/[^\s"\']+', r.text)
if match:
target = match.group(0).strip().replace("'", "").replace('"', '')
if not target:
soup = BeautifulSoup(r.text, 'html.parser')
btn = soup.find('a', {'id': 'downloadButton'}) or soup.find('a', {'aria-label': re.compile(r'Download', re.I)})
if btn: target = btn.get('href')
# --- URL Fix Section ---
if target:
# α‘αααΊα // αα²α·α
αααΊ https: ααα·αΊαααΊ
if target.startswith("//"):
target = f"https:{target}"
# α‘αααΊα /file/... ααα―α· Relative path αα²α·αα¬αααΊ MediaFire domain ααα·αΊαα±αΈαααΊ
elif target.startswith("/"):
target = f"https://www.mediafire.com{target}"
return target
except: pass
return None
@app.get("/download")
async def download_proxy(request: Request, url: str, key: str = None):
if key != ACCESS_KEY:
raise HTTPException(status_code=403, detail="Access Denied")
clean_url = urllib.parse.unquote(url)
filename = get_clean_filename(clean_url)
range_header = request.headers.get('range')
current_time = time.time()
# --- MediaFire Section ---
if "mediafire.com" in clean_url:
async with semaphore:
cached = MEDIAFIRE_CACHE.get(clean_url)
target_link = cached['link'] if (cached and (current_time - cached['time']) < CACHE_TTL) else None
if not target_link:
target_link = await scrape_mediafire(clean_url)
if target_link:
MEDIAFIRE_CACHE[clean_url] = {'link': target_link, 'time': current_time}
if not target_link:
raise HTTPException(status_code=404, detail="Direct link not found.")
try:
return await stream_file(target_link, range_header, filename, referer=clean_url)
except HTTPException as e:
if e.status_code == 415:
if clean_url in MEDIAFIRE_CACHE: del MEDIAFIRE_CACHE[clean_url]
new_link = await scrape_mediafire(clean_url)
if new_link: return await stream_file(new_link, range_header, filename, referer=clean_url)
raise e
except Exception:
raise HTTPException(status_code=500, detail="Streaming process error.")
# --- Google Drive Section ---
elif "drive.google.com" in clean_url:
file_id = get_google_file_id(clean_url)
if not file_id: raise HTTPException(status_code=400, detail="Invalid GD Link")
accounts = get_all_service_accounts()
random.shuffle(accounts)
for account in accounts:
token = await get_token_for_account(account)
if not token: continue
api_link = f"https://www.googleapis.com/drive/v3/files/{file_id}?alt=media"
headers = {"Authorization": f"Bearer {token}"}
if range_header: headers['Range'] = range_header
try:
req = client.build_request("GET", api_link, headers=headers)
r = await client.send(req, stream=True)
if r.status_code in [200, 206]:
return await process_response(r, filename)
await r.aclose()
except: continue
public_url = f"https://drive.google.com/uc?export=download&id={file_id}"
return await stream_file(public_url, range_header, filename)
else:
return await stream_file(clean_url, range_header, filename)
async def stream_file(target_url, range_header, filename, referer=None):
# Ensure full URL for httpx
if not target_url.startswith("http"):
raise HTTPException(status_code=500, detail=f"Malformed URL: {target_url}")
headers = {'User-Agent': ua.random}
if range_header: headers['Range'] = range_header
if referer: headers['Referer'] = referer
req = client.build_request("GET", target_url, headers=headers)
r = await client.send(req, stream=True)
if "text/html" in r.headers.get("Content-Type", "").lower() and r.status_code == 200:
await r.aclose()
raise HTTPException(status_code=415, detail="Blocked by MediaFire.")
return await process_response(r, filename)
async def process_response(r, filename):
mime_type, _ = mimetypes.guess_type(filename)
if not mime_type or 'application' in mime_type:
mime_type = 'video/mp4' if filename.lower().endswith('.mp4') else 'video/x-matroska'
res_headers = {
'Content-Type': mime_type,
'Accept-Ranges': 'bytes',
'Content-Disposition': f'inline; filename="{urllib.parse.quote(filename)}"',
'Cache-Control': 'no-cache'
}
if 'content-length' in r.headers: res_headers['Content-Length'] = r.headers['content-length']
if 'content-range' in r.headers: res_headers['Content-Range'] = r.headers['content-range']
async def stream_generator():
try:
async for chunk in r.aiter_bytes(chunk_size=131072):
yield chunk
finally:
await r.aclose()
return StreamingResponse(
stream_generator(),
status_code=r.status_code,
headers=res_headers,
media_type=mime_type
)
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=7860) |