Spaces:
Paused
Paused
Upload 22 files
Browse files- .dockerignore +38 -0
- .github/FUNDING.yml +2 -0
- .github/dependabot.yml +8 -0
- .github/workflows/main.yml +40 -0
- .gitignore +140 -0
- mediaflow_proxy/__init__.py +0 -0
- mediaflow_proxy/configs.py +14 -0
- mediaflow_proxy/drm/__init__.py +11 -0
- mediaflow_proxy/drm/decrypter.py +778 -0
- mediaflow_proxy/handlers.py +268 -0
- mediaflow_proxy/main.py +51 -0
- mediaflow_proxy/mpd_processor.py +202 -0
- mediaflow_proxy/routes.py +148 -0
- mediaflow_proxy/utils/__init__.py +0 -0
- mediaflow_proxy/utils/cache_utils.py +58 -0
- mediaflow_proxy/utils/http_utils.py +220 -0
- mediaflow_proxy/utils/m3u8_processor.py +82 -0
- mediaflow_proxy/utils/mpd_utils.py +555 -0
- poetry.lock +578 -0
- pyproject.toml +29 -0
- static/index.html +76 -0
- static/logo.png +0 -0
.dockerignore
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__/
|
| 2 |
+
*.py[cod]
|
| 3 |
+
*$py.class
|
| 4 |
+
|
| 5 |
+
# C extensions
|
| 6 |
+
*.so
|
| 7 |
+
|
| 8 |
+
# Distribution / packaging
|
| 9 |
+
.Python
|
| 10 |
+
build/
|
| 11 |
+
develop-eggs/
|
| 12 |
+
dist/
|
| 13 |
+
downloads/
|
| 14 |
+
eggs/
|
| 15 |
+
.eggs/
|
| 16 |
+
lib/
|
| 17 |
+
lib64/
|
| 18 |
+
parts/
|
| 19 |
+
sdist/
|
| 20 |
+
var/
|
| 21 |
+
wheels/
|
| 22 |
+
pip-wheel-metadata/
|
| 23 |
+
share/python-wheels/
|
| 24 |
+
*.egg-info/
|
| 25 |
+
.installed.cfg
|
| 26 |
+
*.egg
|
| 27 |
+
MANIFEST
|
| 28 |
+
|
| 29 |
+
.env
|
| 30 |
+
.idea/
|
| 31 |
+
*.service
|
| 32 |
+
|
| 33 |
+
# Ignore all files under drm folder
|
| 34 |
+
mediaflow_proxy/drm/*
|
| 35 |
+
|
| 36 |
+
# Unignore specific files
|
| 37 |
+
!mediaflow_proxy/drm/__init__.py
|
| 38 |
+
!mediaflow_proxy/drm/decrypter.py
|
.github/FUNDING.yml
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
github: [mhdzumair]
|
.github/dependabot.yml
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version: 2
|
| 2 |
+
updates:
|
| 3 |
+
- package-ecosystem: "pip"
|
| 4 |
+
directory: "/"
|
| 5 |
+
schedule:
|
| 6 |
+
interval: "weekly"
|
| 7 |
+
commit-message:
|
| 8 |
+
prefix: "dependabot"
|
.github/workflows/main.yml
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: MediaFlow Proxy CI/CD
|
| 2 |
+
|
| 3 |
+
on:
|
| 4 |
+
release:
|
| 5 |
+
types: [ created ]
|
| 6 |
+
|
| 7 |
+
jobs:
|
| 8 |
+
mediaflow_proxy_docker_build:
|
| 9 |
+
runs-on: ubuntu-latest
|
| 10 |
+
|
| 11 |
+
steps:
|
| 12 |
+
- name: Checkout
|
| 13 |
+
uses: actions/checkout@v4
|
| 14 |
+
|
| 15 |
+
- name: Set up QEMU
|
| 16 |
+
uses: docker/setup-qemu-action@v3
|
| 17 |
+
|
| 18 |
+
- name: Set up Docker Buildx
|
| 19 |
+
uses: docker/setup-buildx-action@v3
|
| 20 |
+
|
| 21 |
+
- name: Login to Docker Hub
|
| 22 |
+
uses: docker/login-action@v3
|
| 23 |
+
with:
|
| 24 |
+
username: mhdzumair
|
| 25 |
+
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
| 26 |
+
|
| 27 |
+
- name: Build and push
|
| 28 |
+
id: docker_build
|
| 29 |
+
uses: docker/build-push-action@v5
|
| 30 |
+
with:
|
| 31 |
+
context: .
|
| 32 |
+
file: Dockerfile
|
| 33 |
+
platforms: linux/amd64,linux/arm64
|
| 34 |
+
push: true
|
| 35 |
+
tags: |
|
| 36 |
+
mhdzumair/mediaflow-proxy:v${{ github.ref_name }}
|
| 37 |
+
mhdzumair/mediaflow-proxy:latest
|
| 38 |
+
|
| 39 |
+
- name: Image digest
|
| 40 |
+
run: echo ${{ steps.docker_build.outputs.digest }}
|
.gitignore
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / optimized / DLL files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
|
| 6 |
+
# C extensions
|
| 7 |
+
*.so
|
| 8 |
+
|
| 9 |
+
# Distribution / packaging
|
| 10 |
+
.Python
|
| 11 |
+
build/
|
| 12 |
+
develop-eggs/
|
| 13 |
+
dist/
|
| 14 |
+
downloads/
|
| 15 |
+
eggs/
|
| 16 |
+
.eggs/
|
| 17 |
+
lib/
|
| 18 |
+
lib64/
|
| 19 |
+
parts/
|
| 20 |
+
sdist/
|
| 21 |
+
var/
|
| 22 |
+
wheels/
|
| 23 |
+
pip-wheel-metadata/
|
| 24 |
+
share/python-wheels/
|
| 25 |
+
*.egg-info/
|
| 26 |
+
.installed.cfg
|
| 27 |
+
*.egg
|
| 28 |
+
MANIFEST
|
| 29 |
+
|
| 30 |
+
# PyInstaller
|
| 31 |
+
# Usually these files are written by a python script from a template
|
| 32 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 33 |
+
*.manifest
|
| 34 |
+
*.spec
|
| 35 |
+
|
| 36 |
+
# Installer logs
|
| 37 |
+
pip-log.txt
|
| 38 |
+
pip-delete-this-directory.txt
|
| 39 |
+
|
| 40 |
+
# Unit test / coverage reports
|
| 41 |
+
htmlcov/
|
| 42 |
+
.tox/
|
| 43 |
+
.nox/
|
| 44 |
+
.coverage
|
| 45 |
+
.coverage.*
|
| 46 |
+
.cache
|
| 47 |
+
nosetests.xml
|
| 48 |
+
coverage.xml
|
| 49 |
+
*.cover
|
| 50 |
+
*.py,cover
|
| 51 |
+
.hypothesis/
|
| 52 |
+
.pytest_cache/
|
| 53 |
+
|
| 54 |
+
# Translations
|
| 55 |
+
*.mo
|
| 56 |
+
*.pot
|
| 57 |
+
|
| 58 |
+
# Django stuff:
|
| 59 |
+
*.log
|
| 60 |
+
local_settings.py
|
| 61 |
+
db.sqlite3
|
| 62 |
+
db.sqlite3-journal
|
| 63 |
+
|
| 64 |
+
# Flask stuff:
|
| 65 |
+
instance/
|
| 66 |
+
.webassets-cache
|
| 67 |
+
|
| 68 |
+
# Scrapy stuff:
|
| 69 |
+
.scrapy
|
| 70 |
+
|
| 71 |
+
# Sphinx documentation
|
| 72 |
+
docs/_build/
|
| 73 |
+
|
| 74 |
+
# PyBuilder
|
| 75 |
+
target/
|
| 76 |
+
|
| 77 |
+
# Jupyter Notebook
|
| 78 |
+
.ipynb_checkpoints
|
| 79 |
+
|
| 80 |
+
# IPython
|
| 81 |
+
profile_default/
|
| 82 |
+
ipython_config.py
|
| 83 |
+
|
| 84 |
+
# pyenv
|
| 85 |
+
.python-version
|
| 86 |
+
|
| 87 |
+
# pipenv
|
| 88 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 89 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 90 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 91 |
+
# install all needed dependencies.
|
| 92 |
+
#Pipfile.lock
|
| 93 |
+
|
| 94 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
| 95 |
+
__pypackages__/
|
| 96 |
+
|
| 97 |
+
# Celery stuff
|
| 98 |
+
celerybeat-schedule
|
| 99 |
+
celerybeat.pid
|
| 100 |
+
|
| 101 |
+
# SageMath parsed files
|
| 102 |
+
*.sage.py
|
| 103 |
+
|
| 104 |
+
# Environments
|
| 105 |
+
.env
|
| 106 |
+
.venv
|
| 107 |
+
env/
|
| 108 |
+
venv/
|
| 109 |
+
ENV/
|
| 110 |
+
env.bak/
|
| 111 |
+
venv.bak/
|
| 112 |
+
|
| 113 |
+
# Spyder project settings
|
| 114 |
+
.spyderproject
|
| 115 |
+
.spyproject
|
| 116 |
+
|
| 117 |
+
# Rope project settings
|
| 118 |
+
.ropeproject
|
| 119 |
+
|
| 120 |
+
# mkdocs documentation
|
| 121 |
+
/site
|
| 122 |
+
|
| 123 |
+
# mypy
|
| 124 |
+
.mypy_cache/
|
| 125 |
+
.dmypy.json
|
| 126 |
+
dmypy.json
|
| 127 |
+
|
| 128 |
+
# Pyre type checker
|
| 129 |
+
.pyre/
|
| 130 |
+
|
| 131 |
+
.idea/
|
| 132 |
+
*.service
|
| 133 |
+
|
| 134 |
+
# Ignore all files under drm folder
|
| 135 |
+
mediaflow_proxy/drm/*
|
| 136 |
+
|
| 137 |
+
# Unignore specific files
|
| 138 |
+
!mediaflow_proxy/drm/__init__.py
|
| 139 |
+
!mediaflow_proxy/drm/decrypter.py
|
| 140 |
+
|
mediaflow_proxy/__init__.py
ADDED
|
File without changes
|
mediaflow_proxy/configs.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic_settings import BaseSettings
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class Settings(BaseSettings):
|
| 5 |
+
api_password: str # The password for accessing the API endpoints.
|
| 6 |
+
proxy_url: str | None = None # The URL of the proxy server to route requests through.
|
| 7 |
+
mpd_live_stream_delay: int = 30 # The delay in seconds for live MPD streams.
|
| 8 |
+
|
| 9 |
+
class Config:
|
| 10 |
+
env_file = ".env"
|
| 11 |
+
extra = "ignore"
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
settings = Settings()
|
mediaflow_proxy/drm/__init__.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import tempfile
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
async def create_temp_file(suffix: str, content: bytes = None, prefix: str = None) -> tempfile.NamedTemporaryFile:
|
| 6 |
+
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=suffix, prefix=prefix)
|
| 7 |
+
temp_file.delete_file = lambda: os.unlink(temp_file.name)
|
| 8 |
+
if content:
|
| 9 |
+
temp_file.write(content)
|
| 10 |
+
temp_file.close()
|
| 11 |
+
return temp_file
|
mediaflow_proxy/drm/decrypter.py
ADDED
|
@@ -0,0 +1,778 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import struct
|
| 3 |
+
import sys
|
| 4 |
+
|
| 5 |
+
from Crypto.Cipher import AES
|
| 6 |
+
from collections import namedtuple
|
| 7 |
+
import array
|
| 8 |
+
|
| 9 |
+
CENCSampleAuxiliaryDataFormat = namedtuple("CENCSampleAuxiliaryDataFormat", ["is_encrypted", "iv", "sub_samples"])
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class MP4Atom:
|
| 13 |
+
"""
|
| 14 |
+
Represents an MP4 atom, which is a basic unit of data in an MP4 file.
|
| 15 |
+
Each atom contains a header (size and type) and data.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
__slots__ = ("atom_type", "size", "data")
|
| 19 |
+
|
| 20 |
+
def __init__(self, atom_type: bytes, size: int, data: memoryview | bytearray):
|
| 21 |
+
"""
|
| 22 |
+
Initializes an MP4Atom instance.
|
| 23 |
+
|
| 24 |
+
Args:
|
| 25 |
+
atom_type (bytes): The type of the atom.
|
| 26 |
+
size (int): The size of the atom.
|
| 27 |
+
data (memoryview | bytearray): The data contained in the atom.
|
| 28 |
+
"""
|
| 29 |
+
self.atom_type = atom_type
|
| 30 |
+
self.size = size
|
| 31 |
+
self.data = data
|
| 32 |
+
|
| 33 |
+
def __repr__(self):
|
| 34 |
+
return f"<MP4Atom type={self.atom_type}, size={self.size}>"
|
| 35 |
+
|
| 36 |
+
def pack(self):
|
| 37 |
+
"""
|
| 38 |
+
Packs the atom into binary data.
|
| 39 |
+
|
| 40 |
+
Returns:
|
| 41 |
+
bytes: Packed binary data with size, type, and data.
|
| 42 |
+
"""
|
| 43 |
+
return struct.pack(">I", self.size) + self.atom_type + self.data
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class MP4Parser:
|
| 47 |
+
"""
|
| 48 |
+
Parses MP4 data to extract atoms and their structure.
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
def __init__(self, data: memoryview):
|
| 52 |
+
"""
|
| 53 |
+
Initializes an MP4Parser instance.
|
| 54 |
+
|
| 55 |
+
Args:
|
| 56 |
+
data (memoryview): The binary data of the MP4 file.
|
| 57 |
+
"""
|
| 58 |
+
self.data = data
|
| 59 |
+
self.position = 0
|
| 60 |
+
|
| 61 |
+
def read_atom(self) -> MP4Atom | None:
|
| 62 |
+
"""
|
| 63 |
+
Reads the next atom from the data.
|
| 64 |
+
|
| 65 |
+
Returns:
|
| 66 |
+
MP4Atom | None: MP4Atom object or None if no more atoms are available.
|
| 67 |
+
"""
|
| 68 |
+
pos = self.position
|
| 69 |
+
if pos + 8 > len(self.data):
|
| 70 |
+
return None
|
| 71 |
+
|
| 72 |
+
size, atom_type = struct.unpack_from(">I4s", self.data, pos)
|
| 73 |
+
pos += 8
|
| 74 |
+
|
| 75 |
+
if size == 1:
|
| 76 |
+
if pos + 8 > len(self.data):
|
| 77 |
+
return None
|
| 78 |
+
size = struct.unpack_from(">Q", self.data, pos)[0]
|
| 79 |
+
pos += 8
|
| 80 |
+
|
| 81 |
+
if size < 8 or pos + size - 8 > len(self.data):
|
| 82 |
+
return None
|
| 83 |
+
|
| 84 |
+
atom_data = self.data[pos : pos + size - 8]
|
| 85 |
+
self.position = pos + size - 8
|
| 86 |
+
return MP4Atom(atom_type, size, atom_data)
|
| 87 |
+
|
| 88 |
+
def list_atoms(self) -> list[MP4Atom]:
|
| 89 |
+
"""
|
| 90 |
+
Lists all atoms in the data.
|
| 91 |
+
|
| 92 |
+
Returns:
|
| 93 |
+
list[MP4Atom]: List of MP4Atom objects.
|
| 94 |
+
"""
|
| 95 |
+
atoms = []
|
| 96 |
+
original_position = self.position
|
| 97 |
+
self.position = 0
|
| 98 |
+
while self.position + 8 <= len(self.data):
|
| 99 |
+
atom = self.read_atom()
|
| 100 |
+
if not atom:
|
| 101 |
+
break
|
| 102 |
+
atoms.append(atom)
|
| 103 |
+
self.position = original_position
|
| 104 |
+
return atoms
|
| 105 |
+
|
| 106 |
+
def _read_atom_at(self, pos: int, end: int) -> MP4Atom | None:
|
| 107 |
+
if pos + 8 > end:
|
| 108 |
+
return None
|
| 109 |
+
|
| 110 |
+
size, atom_type = struct.unpack_from(">I4s", self.data, pos)
|
| 111 |
+
pos += 8
|
| 112 |
+
|
| 113 |
+
if size == 1:
|
| 114 |
+
if pos + 8 > end:
|
| 115 |
+
return None
|
| 116 |
+
size = struct.unpack_from(">Q", self.data, pos)[0]
|
| 117 |
+
pos += 8
|
| 118 |
+
|
| 119 |
+
if size < 8 or pos + size - 8 > end:
|
| 120 |
+
return None
|
| 121 |
+
|
| 122 |
+
atom_data = self.data[pos : pos + size - 8]
|
| 123 |
+
return MP4Atom(atom_type, size, atom_data)
|
| 124 |
+
|
| 125 |
+
def print_atoms_structure(self, indent: int = 0):
|
| 126 |
+
"""
|
| 127 |
+
Prints the structure of all atoms in the data.
|
| 128 |
+
|
| 129 |
+
Args:
|
| 130 |
+
indent (int): The indentation level for printing.
|
| 131 |
+
"""
|
| 132 |
+
pos = 0
|
| 133 |
+
end = len(self.data)
|
| 134 |
+
while pos + 8 <= end:
|
| 135 |
+
atom = self._read_atom_at(pos, end)
|
| 136 |
+
if not atom:
|
| 137 |
+
break
|
| 138 |
+
self.print_single_atom_structure(atom, pos, indent)
|
| 139 |
+
pos += atom.size
|
| 140 |
+
|
| 141 |
+
def print_single_atom_structure(self, atom: MP4Atom, parent_position: int, indent: int):
|
| 142 |
+
"""
|
| 143 |
+
Prints the structure of a single atom.
|
| 144 |
+
|
| 145 |
+
Args:
|
| 146 |
+
atom (MP4Atom): The atom to print.
|
| 147 |
+
parent_position (int): The position of the parent atom.
|
| 148 |
+
indent (int): The indentation level for printing.
|
| 149 |
+
"""
|
| 150 |
+
try:
|
| 151 |
+
atom_type = atom.atom_type.decode("utf-8")
|
| 152 |
+
except UnicodeDecodeError:
|
| 153 |
+
atom_type = repr(atom.atom_type)
|
| 154 |
+
print(" " * indent + f"Type: {atom_type}, Size: {atom.size}")
|
| 155 |
+
|
| 156 |
+
child_pos = 0
|
| 157 |
+
child_end = len(atom.data)
|
| 158 |
+
while child_pos + 8 <= child_end:
|
| 159 |
+
child_atom = self._read_atom_at(parent_position + 8 + child_pos, parent_position + 8 + child_end)
|
| 160 |
+
if not child_atom:
|
| 161 |
+
break
|
| 162 |
+
self.print_single_atom_structure(child_atom, parent_position, indent + 2)
|
| 163 |
+
child_pos += child_atom.size
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
class MP4Decrypter:
|
| 167 |
+
"""
|
| 168 |
+
Class to handle the decryption of CENC encrypted MP4 segments.
|
| 169 |
+
|
| 170 |
+
Attributes:
|
| 171 |
+
key_map (dict[bytes, bytes]): Mapping of track IDs to decryption keys.
|
| 172 |
+
current_key (bytes | None): Current decryption key.
|
| 173 |
+
trun_sample_sizes (array.array): Array of sample sizes from the 'trun' box.
|
| 174 |
+
current_sample_info (list): List of sample information from the 'senc' box.
|
| 175 |
+
encryption_overhead (int): Total size of encryption-related boxes.
|
| 176 |
+
"""
|
| 177 |
+
|
| 178 |
+
def __init__(self, key_map: dict[bytes, bytes]):
|
| 179 |
+
"""
|
| 180 |
+
Initializes the MP4Decrypter with a key map.
|
| 181 |
+
|
| 182 |
+
Args:
|
| 183 |
+
key_map (dict[bytes, bytes]): Mapping of track IDs to decryption keys.
|
| 184 |
+
"""
|
| 185 |
+
self.key_map = key_map
|
| 186 |
+
self.current_key = None
|
| 187 |
+
self.trun_sample_sizes = array.array("I")
|
| 188 |
+
self.current_sample_info = []
|
| 189 |
+
self.encryption_overhead = 0
|
| 190 |
+
|
| 191 |
+
def decrypt_segment(self, combined_segment: bytes) -> bytes:
|
| 192 |
+
"""
|
| 193 |
+
Decrypts a combined MP4 segment.
|
| 194 |
+
|
| 195 |
+
Args:
|
| 196 |
+
combined_segment (bytes): Combined initialization and media segment.
|
| 197 |
+
|
| 198 |
+
Returns:
|
| 199 |
+
bytes: Decrypted segment content.
|
| 200 |
+
"""
|
| 201 |
+
data = memoryview(combined_segment)
|
| 202 |
+
parser = MP4Parser(data)
|
| 203 |
+
atoms = parser.list_atoms()
|
| 204 |
+
|
| 205 |
+
atom_process_order = [b"moov", b"moof", b"sidx", b"mdat"]
|
| 206 |
+
|
| 207 |
+
processed_atoms = {}
|
| 208 |
+
for atom_type in atom_process_order:
|
| 209 |
+
if atom := next((a for a in atoms if a.atom_type == atom_type), None):
|
| 210 |
+
processed_atoms[atom_type] = self._process_atom(atom_type, atom)
|
| 211 |
+
|
| 212 |
+
result = bytearray()
|
| 213 |
+
for atom in atoms:
|
| 214 |
+
if atom.atom_type in processed_atoms:
|
| 215 |
+
processed_atom = processed_atoms[atom.atom_type]
|
| 216 |
+
result.extend(processed_atom.pack())
|
| 217 |
+
else:
|
| 218 |
+
result.extend(atom.pack())
|
| 219 |
+
|
| 220 |
+
return bytes(result)
|
| 221 |
+
|
| 222 |
+
def _process_atom(self, atom_type: bytes, atom: MP4Atom) -> MP4Atom:
|
| 223 |
+
"""
|
| 224 |
+
Processes an MP4 atom based on its type.
|
| 225 |
+
|
| 226 |
+
Args:
|
| 227 |
+
atom_type (bytes): Type of the atom.
|
| 228 |
+
atom (MP4Atom): The atom to process.
|
| 229 |
+
|
| 230 |
+
Returns:
|
| 231 |
+
MP4Atom: Processed atom.
|
| 232 |
+
"""
|
| 233 |
+
match atom_type:
|
| 234 |
+
case b"moov":
|
| 235 |
+
return self._process_moov(atom)
|
| 236 |
+
case b"moof":
|
| 237 |
+
return self._process_moof(atom)
|
| 238 |
+
case b"sidx":
|
| 239 |
+
return self._process_sidx(atom)
|
| 240 |
+
case b"mdat":
|
| 241 |
+
return self._decrypt_mdat(atom)
|
| 242 |
+
case _:
|
| 243 |
+
return atom
|
| 244 |
+
|
| 245 |
+
def _process_moov(self, moov: MP4Atom) -> MP4Atom:
|
| 246 |
+
"""
|
| 247 |
+
Processes the 'moov' (Movie) atom, which contains metadata about the entire presentation.
|
| 248 |
+
This includes information about tracks, media data, and other movie-level metadata.
|
| 249 |
+
|
| 250 |
+
Args:
|
| 251 |
+
moov (MP4Atom): The 'moov' atom to process.
|
| 252 |
+
|
| 253 |
+
Returns:
|
| 254 |
+
MP4Atom: Processed 'moov' atom with updated track information.
|
| 255 |
+
"""
|
| 256 |
+
parser = MP4Parser(moov.data)
|
| 257 |
+
new_moov_data = bytearray()
|
| 258 |
+
|
| 259 |
+
for atom in iter(parser.read_atom, None):
|
| 260 |
+
if atom.atom_type == b"trak":
|
| 261 |
+
new_trak = self._process_trak(atom)
|
| 262 |
+
new_moov_data.extend(new_trak.pack())
|
| 263 |
+
elif atom.atom_type != b"pssh":
|
| 264 |
+
# Skip PSSH boxes as they are not needed in the decrypted output
|
| 265 |
+
new_moov_data.extend(atom.pack())
|
| 266 |
+
|
| 267 |
+
return MP4Atom(b"moov", len(new_moov_data) + 8, new_moov_data)
|
| 268 |
+
|
| 269 |
+
def _process_moof(self, moof: MP4Atom) -> MP4Atom:
|
| 270 |
+
"""
|
| 271 |
+
Processes the 'moov' (Movie) atom, which contains metadata about the entire presentation.
|
| 272 |
+
This includes information about tracks, media data, and other movie-level metadata.
|
| 273 |
+
|
| 274 |
+
Args:
|
| 275 |
+
moov (MP4Atom): The 'moov' atom to process.
|
| 276 |
+
|
| 277 |
+
Returns:
|
| 278 |
+
MP4Atom: Processed 'moov' atom with updated track information.
|
| 279 |
+
"""
|
| 280 |
+
parser = MP4Parser(moof.data)
|
| 281 |
+
new_moof_data = bytearray()
|
| 282 |
+
|
| 283 |
+
for atom in iter(parser.read_atom, None):
|
| 284 |
+
if atom.atom_type == b"traf":
|
| 285 |
+
new_traf = self._process_traf(atom)
|
| 286 |
+
new_moof_data.extend(new_traf.pack())
|
| 287 |
+
else:
|
| 288 |
+
new_moof_data.extend(atom.pack())
|
| 289 |
+
|
| 290 |
+
return MP4Atom(b"moof", len(new_moof_data) + 8, new_moof_data)
|
| 291 |
+
|
| 292 |
+
def _process_traf(self, traf: MP4Atom) -> MP4Atom:
|
| 293 |
+
"""
|
| 294 |
+
Processes the 'traf' (Track Fragment) atom, which contains information about a track fragment.
|
| 295 |
+
This includes sample information, sample encryption data, and other track-level metadata.
|
| 296 |
+
|
| 297 |
+
Args:
|
| 298 |
+
traf (MP4Atom): The 'traf' atom to process.
|
| 299 |
+
|
| 300 |
+
Returns:
|
| 301 |
+
MP4Atom: Processed 'traf' atom with updated sample information.
|
| 302 |
+
"""
|
| 303 |
+
parser = MP4Parser(traf.data)
|
| 304 |
+
new_traf_data = bytearray()
|
| 305 |
+
tfhd = None
|
| 306 |
+
sample_count = 0
|
| 307 |
+
sample_info = []
|
| 308 |
+
|
| 309 |
+
atoms = parser.list_atoms()
|
| 310 |
+
|
| 311 |
+
# calculate encryption_overhead earlier to avoid dependency on trun
|
| 312 |
+
self.encryption_overhead = sum(a.size for a in atoms if a.atom_type in {b"senc", b"saiz", b"saio"})
|
| 313 |
+
|
| 314 |
+
for atom in atoms:
|
| 315 |
+
if atom.atom_type == b"tfhd":
|
| 316 |
+
tfhd = atom
|
| 317 |
+
new_traf_data.extend(atom.pack())
|
| 318 |
+
elif atom.atom_type == b"trun":
|
| 319 |
+
sample_count = self._process_trun(atom)
|
| 320 |
+
new_trun = self._modify_trun(atom)
|
| 321 |
+
new_traf_data.extend(new_trun.pack())
|
| 322 |
+
elif atom.atom_type == b"senc":
|
| 323 |
+
# Parse senc but don't include it in the new decrypted traf data and similarly don't include saiz and saio
|
| 324 |
+
sample_info = self._parse_senc(atom, sample_count)
|
| 325 |
+
elif atom.atom_type not in {b"saiz", b"saio"}:
|
| 326 |
+
new_traf_data.extend(atom.pack())
|
| 327 |
+
|
| 328 |
+
if tfhd:
|
| 329 |
+
tfhd_track_id = struct.unpack_from(">I", tfhd.data, 4)[0]
|
| 330 |
+
self.current_key = self._get_key_for_track(tfhd_track_id)
|
| 331 |
+
self.current_sample_info = sample_info
|
| 332 |
+
|
| 333 |
+
return MP4Atom(b"traf", len(new_traf_data) + 8, new_traf_data)
|
| 334 |
+
|
| 335 |
+
def _decrypt_mdat(self, mdat: MP4Atom) -> MP4Atom:
|
| 336 |
+
"""
|
| 337 |
+
Decrypts the 'mdat' (Media Data) atom, which contains the actual media data (audio, video, etc.).
|
| 338 |
+
The decryption is performed using the current decryption key and sample information.
|
| 339 |
+
|
| 340 |
+
Args:
|
| 341 |
+
mdat (MP4Atom): The 'mdat' atom to decrypt.
|
| 342 |
+
|
| 343 |
+
Returns:
|
| 344 |
+
MP4Atom: Decrypted 'mdat' atom with decrypted media data.
|
| 345 |
+
"""
|
| 346 |
+
if not self.current_key or not self.current_sample_info:
|
| 347 |
+
return mdat # Return original mdat if we don't have decryption info
|
| 348 |
+
|
| 349 |
+
decrypted_samples = bytearray()
|
| 350 |
+
mdat_data = mdat.data
|
| 351 |
+
position = 0
|
| 352 |
+
|
| 353 |
+
for i, info in enumerate(self.current_sample_info):
|
| 354 |
+
if position >= len(mdat_data):
|
| 355 |
+
break # No more data to process
|
| 356 |
+
|
| 357 |
+
sample_size = self.trun_sample_sizes[i] if i < len(self.trun_sample_sizes) else len(mdat_data) - position
|
| 358 |
+
sample = mdat_data[position : position + sample_size]
|
| 359 |
+
position += sample_size
|
| 360 |
+
decrypted_sample = self._process_sample(sample, info, self.current_key)
|
| 361 |
+
decrypted_samples.extend(decrypted_sample)
|
| 362 |
+
|
| 363 |
+
return MP4Atom(b"mdat", len(decrypted_samples) + 8, decrypted_samples)
|
| 364 |
+
|
| 365 |
+
def _parse_senc(self, senc: MP4Atom, sample_count: int) -> list[CENCSampleAuxiliaryDataFormat]:
|
| 366 |
+
"""
|
| 367 |
+
Parses the 'senc' (Sample Encryption) atom, which contains encryption information for samples.
|
| 368 |
+
This includes initialization vectors (IVs) and sub-sample encryption data.
|
| 369 |
+
|
| 370 |
+
Args:
|
| 371 |
+
senc (MP4Atom): The 'senc' atom to parse.
|
| 372 |
+
sample_count (int): The number of samples.
|
| 373 |
+
|
| 374 |
+
Returns:
|
| 375 |
+
list[CENCSampleAuxiliaryDataFormat]: List of sample auxiliary data formats with encryption information.
|
| 376 |
+
"""
|
| 377 |
+
data = memoryview(senc.data)
|
| 378 |
+
version_flags = struct.unpack_from(">I", data, 0)[0]
|
| 379 |
+
version, flags = version_flags >> 24, version_flags & 0xFFFFFF
|
| 380 |
+
position = 4
|
| 381 |
+
|
| 382 |
+
if version == 0:
|
| 383 |
+
sample_count = struct.unpack_from(">I", data, position)[0]
|
| 384 |
+
position += 4
|
| 385 |
+
|
| 386 |
+
sample_info = []
|
| 387 |
+
for _ in range(sample_count):
|
| 388 |
+
if position + 8 > len(data):
|
| 389 |
+
break
|
| 390 |
+
|
| 391 |
+
iv = data[position : position + 8].tobytes()
|
| 392 |
+
position += 8
|
| 393 |
+
|
| 394 |
+
sub_samples = []
|
| 395 |
+
if flags & 0x000002 and position + 2 <= len(data): # Check if subsample information is present
|
| 396 |
+
subsample_count = struct.unpack_from(">H", data, position)[0]
|
| 397 |
+
position += 2
|
| 398 |
+
|
| 399 |
+
for _ in range(subsample_count):
|
| 400 |
+
if position + 6 <= len(data):
|
| 401 |
+
clear_bytes, encrypted_bytes = struct.unpack_from(">HI", data, position)
|
| 402 |
+
position += 6
|
| 403 |
+
sub_samples.append((clear_bytes, encrypted_bytes))
|
| 404 |
+
else:
|
| 405 |
+
break
|
| 406 |
+
|
| 407 |
+
sample_info.append(CENCSampleAuxiliaryDataFormat(True, iv, sub_samples))
|
| 408 |
+
|
| 409 |
+
return sample_info
|
| 410 |
+
|
| 411 |
+
def _get_key_for_track(self, track_id: int) -> bytes:
|
| 412 |
+
"""
|
| 413 |
+
Retrieves the decryption key for a given track ID from the key map.
|
| 414 |
+
|
| 415 |
+
Args:
|
| 416 |
+
track_id (int): The track ID.
|
| 417 |
+
|
| 418 |
+
Returns:
|
| 419 |
+
bytes: The decryption key for the specified track ID.
|
| 420 |
+
"""
|
| 421 |
+
if len(self.key_map) == 1:
|
| 422 |
+
return next(iter(self.key_map.values()))
|
| 423 |
+
key = self.key_map.get(track_id.pack(4, "big"))
|
| 424 |
+
if not key:
|
| 425 |
+
raise ValueError(f"No key found for track ID {track_id}")
|
| 426 |
+
return key
|
| 427 |
+
|
| 428 |
+
@staticmethod
|
| 429 |
+
def _process_sample(
|
| 430 |
+
sample: memoryview, sample_info: CENCSampleAuxiliaryDataFormat, key: bytes
|
| 431 |
+
) -> memoryview | bytearray | bytes:
|
| 432 |
+
"""
|
| 433 |
+
Processes and decrypts a sample using the provided sample information and decryption key.
|
| 434 |
+
This includes handling sub-sample encryption if present.
|
| 435 |
+
|
| 436 |
+
Args:
|
| 437 |
+
sample (memoryview): The sample data.
|
| 438 |
+
sample_info (CENCSampleAuxiliaryDataFormat): The sample auxiliary data format with encryption information.
|
| 439 |
+
key (bytes): The decryption key.
|
| 440 |
+
|
| 441 |
+
Returns:
|
| 442 |
+
memoryview | bytearray | bytes: The decrypted sample.
|
| 443 |
+
"""
|
| 444 |
+
if not sample_info.is_encrypted:
|
| 445 |
+
return sample
|
| 446 |
+
|
| 447 |
+
# pad IV to 16 bytes
|
| 448 |
+
iv = sample_info.iv + b"\x00" * (16 - len(sample_info.iv))
|
| 449 |
+
cipher = AES.new(key, AES.MODE_CTR, initial_value=iv, nonce=b"")
|
| 450 |
+
|
| 451 |
+
if not sample_info.sub_samples:
|
| 452 |
+
# If there are no sub_samples, decrypt the entire sample
|
| 453 |
+
return cipher.decrypt(sample)
|
| 454 |
+
|
| 455 |
+
result = bytearray()
|
| 456 |
+
offset = 0
|
| 457 |
+
for clear_bytes, encrypted_bytes in sample_info.sub_samples:
|
| 458 |
+
result.extend(sample[offset : offset + clear_bytes])
|
| 459 |
+
offset += clear_bytes
|
| 460 |
+
result.extend(cipher.decrypt(sample[offset : offset + encrypted_bytes]))
|
| 461 |
+
offset += encrypted_bytes
|
| 462 |
+
|
| 463 |
+
# If there's any remaining data, treat it as encrypted
|
| 464 |
+
if offset < len(sample):
|
| 465 |
+
result.extend(cipher.decrypt(sample[offset:]))
|
| 466 |
+
|
| 467 |
+
return result
|
| 468 |
+
|
| 469 |
+
def _process_trun(self, trun: MP4Atom) -> int:
|
| 470 |
+
"""
|
| 471 |
+
Processes the 'trun' (Track Fragment Run) atom, which contains information about the samples in a track fragment.
|
| 472 |
+
This includes sample sizes, durations, flags, and composition time offsets.
|
| 473 |
+
|
| 474 |
+
Args:
|
| 475 |
+
trun (MP4Atom): The 'trun' atom to process.
|
| 476 |
+
|
| 477 |
+
Returns:
|
| 478 |
+
int: The number of samples in the 'trun' atom.
|
| 479 |
+
"""
|
| 480 |
+
trun_flags, sample_count = struct.unpack_from(">II", trun.data, 0)
|
| 481 |
+
data_offset = 8
|
| 482 |
+
|
| 483 |
+
if trun_flags & 0x000001:
|
| 484 |
+
data_offset += 4
|
| 485 |
+
if trun_flags & 0x000004:
|
| 486 |
+
data_offset += 4
|
| 487 |
+
|
| 488 |
+
self.trun_sample_sizes = array.array("I")
|
| 489 |
+
|
| 490 |
+
for _ in range(sample_count):
|
| 491 |
+
if trun_flags & 0x000100: # sample-duration-present flag
|
| 492 |
+
data_offset += 4
|
| 493 |
+
if trun_flags & 0x000200: # sample-size-present flag
|
| 494 |
+
sample_size = struct.unpack_from(">I", trun.data, data_offset)[0]
|
| 495 |
+
self.trun_sample_sizes.append(sample_size)
|
| 496 |
+
data_offset += 4
|
| 497 |
+
else:
|
| 498 |
+
self.trun_sample_sizes.append(0) # Using 0 instead of None for uniformity in the array
|
| 499 |
+
if trun_flags & 0x000400: # sample-flags-present flag
|
| 500 |
+
data_offset += 4
|
| 501 |
+
if trun_flags & 0x000800: # sample-composition-time-offsets-present flag
|
| 502 |
+
data_offset += 4
|
| 503 |
+
|
| 504 |
+
return sample_count
|
| 505 |
+
|
| 506 |
+
def _modify_trun(self, trun: MP4Atom) -> MP4Atom:
|
| 507 |
+
"""
|
| 508 |
+
Modifies the 'trun' (Track Fragment Run) atom to update the data offset.
|
| 509 |
+
This is necessary to account for the encryption overhead.
|
| 510 |
+
|
| 511 |
+
Args:
|
| 512 |
+
trun (MP4Atom): The 'trun' atom to modify.
|
| 513 |
+
|
| 514 |
+
Returns:
|
| 515 |
+
MP4Atom: Modified 'trun' atom with updated data offset.
|
| 516 |
+
"""
|
| 517 |
+
trun_data = bytearray(trun.data)
|
| 518 |
+
current_flags = struct.unpack_from(">I", trun_data, 0)[0] & 0xFFFFFF
|
| 519 |
+
|
| 520 |
+
# If the data-offset-present flag is set, update the data offset to account for encryption overhead
|
| 521 |
+
if current_flags & 0x000001:
|
| 522 |
+
current_data_offset = struct.unpack_from(">i", trun_data, 8)[0]
|
| 523 |
+
struct.pack_into(">i", trun_data, 8, current_data_offset - self.encryption_overhead)
|
| 524 |
+
|
| 525 |
+
return MP4Atom(b"trun", len(trun_data) + 8, trun_data)
|
| 526 |
+
|
| 527 |
+
def _process_sidx(self, sidx: MP4Atom) -> MP4Atom:
|
| 528 |
+
"""
|
| 529 |
+
Processes the 'sidx' (Segment Index) atom, which contains indexing information for media segments.
|
| 530 |
+
This includes references to media segments and their durations.
|
| 531 |
+
|
| 532 |
+
Args:
|
| 533 |
+
sidx (MP4Atom): The 'sidx' atom to process.
|
| 534 |
+
|
| 535 |
+
Returns:
|
| 536 |
+
MP4Atom: Processed 'sidx' atom with updated segment references.
|
| 537 |
+
"""
|
| 538 |
+
sidx_data = bytearray(sidx.data)
|
| 539 |
+
|
| 540 |
+
current_size = struct.unpack_from(">I", sidx_data, 32)[0]
|
| 541 |
+
reference_type = current_size >> 31
|
| 542 |
+
current_referenced_size = current_size & 0x7FFFFFFF
|
| 543 |
+
|
| 544 |
+
# Remove encryption overhead from referenced size
|
| 545 |
+
new_referenced_size = current_referenced_size - self.encryption_overhead
|
| 546 |
+
new_size = (reference_type << 31) | new_referenced_size
|
| 547 |
+
struct.pack_into(">I", sidx_data, 32, new_size)
|
| 548 |
+
|
| 549 |
+
return MP4Atom(b"sidx", len(sidx_data) + 8, sidx_data)
|
| 550 |
+
|
| 551 |
+
def _process_trak(self, trak: MP4Atom) -> MP4Atom:
|
| 552 |
+
"""
|
| 553 |
+
Processes the 'trak' (Track) atom, which contains information about a single track in the movie.
|
| 554 |
+
This includes track header, media information, and other track-level metadata.
|
| 555 |
+
|
| 556 |
+
Args:
|
| 557 |
+
trak (MP4Atom): The 'trak' atom to process.
|
| 558 |
+
|
| 559 |
+
Returns:
|
| 560 |
+
MP4Atom: Processed 'trak' atom with updated track information.
|
| 561 |
+
"""
|
| 562 |
+
parser = MP4Parser(trak.data)
|
| 563 |
+
new_trak_data = bytearray()
|
| 564 |
+
|
| 565 |
+
for atom in iter(parser.read_atom, None):
|
| 566 |
+
if atom.atom_type == b"mdia":
|
| 567 |
+
new_mdia = self._process_mdia(atom)
|
| 568 |
+
new_trak_data.extend(new_mdia.pack())
|
| 569 |
+
else:
|
| 570 |
+
new_trak_data.extend(atom.pack())
|
| 571 |
+
|
| 572 |
+
return MP4Atom(b"trak", len(new_trak_data) + 8, new_trak_data)
|
| 573 |
+
|
| 574 |
+
def _process_mdia(self, mdia: MP4Atom) -> MP4Atom:
|
| 575 |
+
"""
|
| 576 |
+
Processes the 'mdia' (Media) atom, which contains media information for a track.
|
| 577 |
+
This includes media header, handler reference, and media information container.
|
| 578 |
+
|
| 579 |
+
Args:
|
| 580 |
+
mdia (MP4Atom): The 'mdia' atom to process.
|
| 581 |
+
|
| 582 |
+
Returns:
|
| 583 |
+
MP4Atom: Processed 'mdia' atom with updated media information.
|
| 584 |
+
"""
|
| 585 |
+
parser = MP4Parser(mdia.data)
|
| 586 |
+
new_mdia_data = bytearray()
|
| 587 |
+
|
| 588 |
+
for atom in iter(parser.read_atom, None):
|
| 589 |
+
if atom.atom_type == b"minf":
|
| 590 |
+
new_minf = self._process_minf(atom)
|
| 591 |
+
new_mdia_data.extend(new_minf.pack())
|
| 592 |
+
else:
|
| 593 |
+
new_mdia_data.extend(atom.pack())
|
| 594 |
+
|
| 595 |
+
return MP4Atom(b"mdia", len(new_mdia_data) + 8, new_mdia_data)
|
| 596 |
+
|
| 597 |
+
def _process_minf(self, minf: MP4Atom) -> MP4Atom:
|
| 598 |
+
"""
|
| 599 |
+
Processes the 'minf' (Media Information) atom, which contains information about the media data in a track.
|
| 600 |
+
This includes data information, sample table, and other media-level metadata.
|
| 601 |
+
|
| 602 |
+
Args:
|
| 603 |
+
minf (MP4Atom): The 'minf' atom to process.
|
| 604 |
+
|
| 605 |
+
Returns:
|
| 606 |
+
MP4Atom: Processed 'minf' atom with updated media information.
|
| 607 |
+
"""
|
| 608 |
+
parser = MP4Parser(minf.data)
|
| 609 |
+
new_minf_data = bytearray()
|
| 610 |
+
|
| 611 |
+
for atom in iter(parser.read_atom, None):
|
| 612 |
+
if atom.atom_type == b"stbl":
|
| 613 |
+
new_stbl = self._process_stbl(atom)
|
| 614 |
+
new_minf_data.extend(new_stbl.pack())
|
| 615 |
+
else:
|
| 616 |
+
new_minf_data.extend(atom.pack())
|
| 617 |
+
|
| 618 |
+
return MP4Atom(b"minf", len(new_minf_data) + 8, new_minf_data)
|
| 619 |
+
|
| 620 |
+
def _process_stbl(self, stbl: MP4Atom) -> MP4Atom:
|
| 621 |
+
"""
|
| 622 |
+
Processes the 'stbl' (Sample Table) atom, which contains information about the samples in a track.
|
| 623 |
+
This includes sample descriptions, sample sizes, sample times, and other sample-level metadata.
|
| 624 |
+
|
| 625 |
+
Args:
|
| 626 |
+
stbl (MP4Atom): The 'stbl' atom to process.
|
| 627 |
+
|
| 628 |
+
Returns:
|
| 629 |
+
MP4Atom: Processed 'stbl' atom with updated sample information.
|
| 630 |
+
"""
|
| 631 |
+
parser = MP4Parser(stbl.data)
|
| 632 |
+
new_stbl_data = bytearray()
|
| 633 |
+
|
| 634 |
+
for atom in iter(parser.read_atom, None):
|
| 635 |
+
if atom.atom_type == b"stsd":
|
| 636 |
+
new_stsd = self._process_stsd(atom)
|
| 637 |
+
new_stbl_data.extend(new_stsd.pack())
|
| 638 |
+
else:
|
| 639 |
+
new_stbl_data.extend(atom.pack())
|
| 640 |
+
|
| 641 |
+
return MP4Atom(b"stbl", len(new_stbl_data) + 8, new_stbl_data)
|
| 642 |
+
|
| 643 |
+
def _process_stsd(self, stsd: MP4Atom) -> MP4Atom:
|
| 644 |
+
"""
|
| 645 |
+
Processes the 'stsd' (Sample Description) atom, which contains descriptions of the sample entries in a track.
|
| 646 |
+
This includes codec information, sample entry details, and other sample description metadata.
|
| 647 |
+
|
| 648 |
+
Args:
|
| 649 |
+
stsd (MP4Atom): The 'stsd' atom to process.
|
| 650 |
+
|
| 651 |
+
Returns:
|
| 652 |
+
MP4Atom: Processed 'stsd' atom with updated sample descriptions.
|
| 653 |
+
"""
|
| 654 |
+
parser = MP4Parser(stsd.data)
|
| 655 |
+
entry_count = struct.unpack_from(">I", parser.data, 4)[0]
|
| 656 |
+
new_stsd_data = bytearray(stsd.data[:8])
|
| 657 |
+
|
| 658 |
+
parser.position = 8 # Move past version_flags and entry_count
|
| 659 |
+
|
| 660 |
+
for _ in range(entry_count):
|
| 661 |
+
sample_entry = parser.read_atom()
|
| 662 |
+
if not sample_entry:
|
| 663 |
+
break
|
| 664 |
+
|
| 665 |
+
processed_entry = self._process_sample_entry(sample_entry)
|
| 666 |
+
new_stsd_data.extend(processed_entry.pack())
|
| 667 |
+
|
| 668 |
+
return MP4Atom(b"stsd", len(new_stsd_data) + 8, new_stsd_data)
|
| 669 |
+
|
| 670 |
+
def _process_sample_entry(self, entry: MP4Atom) -> MP4Atom:
|
| 671 |
+
"""
|
| 672 |
+
Processes a sample entry atom, which contains information about a specific type of sample.
|
| 673 |
+
This includes codec-specific information and other sample entry details.
|
| 674 |
+
|
| 675 |
+
Args:
|
| 676 |
+
entry (MP4Atom): The sample entry atom to process.
|
| 677 |
+
|
| 678 |
+
Returns:
|
| 679 |
+
MP4Atom: Processed sample entry atom with updated information.
|
| 680 |
+
"""
|
| 681 |
+
# Determine the size of fixed fields based on sample entry type
|
| 682 |
+
if entry.atom_type in {b"mp4a", b"enca"}:
|
| 683 |
+
fixed_size = 28 # 8 bytes for size, type and reserved, 20 bytes for fixed fields in Audio Sample Entry.
|
| 684 |
+
elif entry.atom_type in {b"mp4v", b"encv", b"avc1", b"hev1", b"hvc1"}:
|
| 685 |
+
fixed_size = 78 # 8 bytes for size, type and reserved, 70 bytes for fixed fields in Video Sample Entry.
|
| 686 |
+
else:
|
| 687 |
+
fixed_size = 16 # 8 bytes for size, type and reserved, 8 bytes for fixed fields in other Sample Entries.
|
| 688 |
+
|
| 689 |
+
new_entry_data = bytearray(entry.data[:fixed_size])
|
| 690 |
+
parser = MP4Parser(entry.data[fixed_size:])
|
| 691 |
+
codec_format = None
|
| 692 |
+
|
| 693 |
+
for atom in iter(parser.read_atom, None):
|
| 694 |
+
if atom.atom_type in {b"sinf", b"schi", b"tenc", b"schm"}:
|
| 695 |
+
if atom.atom_type == b"sinf":
|
| 696 |
+
codec_format = self._extract_codec_format(atom)
|
| 697 |
+
continue # Skip encryption-related atoms
|
| 698 |
+
new_entry_data.extend(atom.pack())
|
| 699 |
+
|
| 700 |
+
# Replace the atom type with the extracted codec format
|
| 701 |
+
new_type = codec_format if codec_format else entry.atom_type
|
| 702 |
+
return MP4Atom(new_type, len(new_entry_data) + 8, new_entry_data)
|
| 703 |
+
|
| 704 |
+
def _extract_codec_format(self, sinf: MP4Atom) -> bytes | None:
|
| 705 |
+
"""
|
| 706 |
+
Extracts the codec format from the 'sinf' (Protection Scheme Information) atom.
|
| 707 |
+
This includes information about the original format of the protected content.
|
| 708 |
+
|
| 709 |
+
Args:
|
| 710 |
+
sinf (MP4Atom): The 'sinf' atom to extract from.
|
| 711 |
+
|
| 712 |
+
Returns:
|
| 713 |
+
bytes | None: The codec format or None if not found.
|
| 714 |
+
"""
|
| 715 |
+
parser = MP4Parser(sinf.data)
|
| 716 |
+
for atom in iter(parser.read_atom, None):
|
| 717 |
+
if atom.atom_type == b"frma":
|
| 718 |
+
return atom.data
|
| 719 |
+
return None
|
| 720 |
+
|
| 721 |
+
|
| 722 |
+
def decrypt_segment(init_segment: bytes, segment_content: bytes, key_id: str, key: str) -> bytes:
|
| 723 |
+
"""
|
| 724 |
+
Decrypts a CENC encrypted MP4 segment.
|
| 725 |
+
|
| 726 |
+
Args:
|
| 727 |
+
init_segment (bytes): Initialization segment data.
|
| 728 |
+
segment_content (bytes): Encrypted segment content.
|
| 729 |
+
key_id (str): Key ID in hexadecimal format.
|
| 730 |
+
key (str): Key in hexadecimal format.
|
| 731 |
+
"""
|
| 732 |
+
key_map = {bytes.fromhex(key_id): bytes.fromhex(key)}
|
| 733 |
+
decrypter = MP4Decrypter(key_map)
|
| 734 |
+
decrypted_content = decrypter.decrypt_segment(init_segment + segment_content)
|
| 735 |
+
return decrypted_content
|
| 736 |
+
|
| 737 |
+
|
| 738 |
+
def cli():
|
| 739 |
+
"""
|
| 740 |
+
Command line interface for decrypting a CENC encrypted MP4 segment.
|
| 741 |
+
"""
|
| 742 |
+
init_segment = b""
|
| 743 |
+
|
| 744 |
+
if args.init and args.segment:
|
| 745 |
+
with open(args.init, "rb") as f:
|
| 746 |
+
init_segment = f.read()
|
| 747 |
+
with open(args.segment, "rb") as f:
|
| 748 |
+
segment_content = f.read()
|
| 749 |
+
elif args.combined_segment:
|
| 750 |
+
with open(args.combined_segment, "rb") as f:
|
| 751 |
+
segment_content = f.read()
|
| 752 |
+
else:
|
| 753 |
+
print("Usage: python mp4decrypt.py --help")
|
| 754 |
+
sys.exit(1)
|
| 755 |
+
|
| 756 |
+
try:
|
| 757 |
+
decrypted_segment = decrypt_segment(init_segment, segment_content, args.key_id, args.key)
|
| 758 |
+
print(f"Decrypted content size is {len(decrypted_segment)} bytes")
|
| 759 |
+
with open(args.output, "wb") as f:
|
| 760 |
+
f.write(decrypted_segment)
|
| 761 |
+
print(f"Decrypted segment written to {args.output}")
|
| 762 |
+
except Exception as e:
|
| 763 |
+
print(f"Error: {e}")
|
| 764 |
+
sys.exit(1)
|
| 765 |
+
|
| 766 |
+
|
| 767 |
+
if __name__ == "__main__":
|
| 768 |
+
arg_parser = argparse.ArgumentParser(description="Decrypts a MP4 init and media segment using CENC encryption.")
|
| 769 |
+
arg_parser.add_argument("--init", help="Path to the init segment file", required=False)
|
| 770 |
+
arg_parser.add_argument("--segment", help="Path to the media segment file", required=False)
|
| 771 |
+
arg_parser.add_argument(
|
| 772 |
+
"--combined_segment", help="Path to the combined init and media segment file", required=False
|
| 773 |
+
)
|
| 774 |
+
arg_parser.add_argument("--key_id", help="Key ID in hexadecimal format", required=True)
|
| 775 |
+
arg_parser.add_argument("--key", help="Key in hexadecimal format", required=True)
|
| 776 |
+
arg_parser.add_argument("--output", help="Path to the output file", required=True)
|
| 777 |
+
args = arg_parser.parse_args()
|
| 778 |
+
cli()
|
mediaflow_proxy/handlers.py
ADDED
|
@@ -0,0 +1,268 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import base64
|
| 2 |
+
import logging
|
| 3 |
+
from ipaddress import ip_address
|
| 4 |
+
|
| 5 |
+
import httpx
|
| 6 |
+
from fastapi import Request, Response, HTTPException
|
| 7 |
+
from fastapi.responses import StreamingResponse
|
| 8 |
+
from pydantic import HttpUrl
|
| 9 |
+
from starlette.background import BackgroundTask
|
| 10 |
+
|
| 11 |
+
from .configs import settings
|
| 12 |
+
from .mpd_processor import process_manifest, process_playlist, process_segment
|
| 13 |
+
from .utils.cache_utils import get_cached_mpd, get_cached_init_segment
|
| 14 |
+
from .utils.http_utils import Streamer, DownloadError, download_file_with_retry, request_with_retry
|
| 15 |
+
from .utils.m3u8_processor import M3U8Processor
|
| 16 |
+
from .utils.mpd_utils import pad_base64
|
| 17 |
+
|
| 18 |
+
logger = logging.getLogger(__name__)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
async def handle_hls_stream_proxy(request: Request, destination: str, headers: dict, key_url: HttpUrl = None):
|
| 22 |
+
"""
|
| 23 |
+
Handles the HLS stream proxy request, fetching and processing the m3u8 playlist or streaming the content.
|
| 24 |
+
|
| 25 |
+
Args:
|
| 26 |
+
request (Request): The incoming HTTP request.
|
| 27 |
+
destination (str): The destination URL to fetch the content from.
|
| 28 |
+
headers (dict): The headers to include in the request.
|
| 29 |
+
key_url (str, optional): The HLS Key URL to replace the original key URL. Defaults to None.
|
| 30 |
+
|
| 31 |
+
Returns:
|
| 32 |
+
Response: The HTTP response with the processed m3u8 playlist or streamed content.
|
| 33 |
+
"""
|
| 34 |
+
try:
|
| 35 |
+
if destination.endswith((".m3u", ".m3u8")) or "mpegurl" in headers.get("accept", "").lower():
|
| 36 |
+
return await fetch_and_process_m3u8(destination, headers, request, key_url)
|
| 37 |
+
|
| 38 |
+
return await handle_stream_request(request.method, destination, headers)
|
| 39 |
+
except httpx.HTTPStatusError as e:
|
| 40 |
+
logger.error(f"HTTP error while fetching m3u8: {e}")
|
| 41 |
+
return Response(status_code=e.response.status_code, content=str(e))
|
| 42 |
+
except Exception as e:
|
| 43 |
+
logger.exception(f"Error in live_stream_proxy: {str(e)}")
|
| 44 |
+
return Response(status_code=500, content=f"Internal server error: {str(e)}")
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
async def proxy_stream(method: str, video_url: str, headers: dict):
|
| 48 |
+
"""
|
| 49 |
+
Proxies the stream request to the given video URL.
|
| 50 |
+
|
| 51 |
+
Args:
|
| 52 |
+
method (str): The HTTP method (e.g., GET, HEAD).
|
| 53 |
+
video_url (str): The URL of the video to stream.
|
| 54 |
+
headers (dict): The headers to include in the request.
|
| 55 |
+
|
| 56 |
+
Returns:
|
| 57 |
+
Response: The HTTP response with the streamed content.
|
| 58 |
+
"""
|
| 59 |
+
return await handle_stream_request(method, video_url, headers)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
async def handle_stream_request(method: str, video_url: str, headers: dict):
|
| 63 |
+
"""
|
| 64 |
+
Handles the stream request, fetching the content from the video URL and streaming it.
|
| 65 |
+
|
| 66 |
+
Args:
|
| 67 |
+
method (str): The HTTP method (e.g., GET, HEAD).
|
| 68 |
+
video_url (str): The URL of the video to stream.
|
| 69 |
+
headers (dict): The headers to include in the request.
|
| 70 |
+
|
| 71 |
+
Returns:
|
| 72 |
+
Response: The HTTP response with the streamed content.
|
| 73 |
+
"""
|
| 74 |
+
client = httpx.AsyncClient(
|
| 75 |
+
follow_redirects=True,
|
| 76 |
+
timeout=httpx.Timeout(30.0),
|
| 77 |
+
limits=httpx.Limits(max_keepalive_connections=10, max_connections=20),
|
| 78 |
+
proxy=settings.proxy_url,
|
| 79 |
+
)
|
| 80 |
+
streamer = Streamer(client)
|
| 81 |
+
try:
|
| 82 |
+
response = await streamer.head(video_url, headers)
|
| 83 |
+
if method == "HEAD":
|
| 84 |
+
await streamer.close()
|
| 85 |
+
return Response(headers=response.headers, status_code=response.status_code)
|
| 86 |
+
else:
|
| 87 |
+
return StreamingResponse(
|
| 88 |
+
streamer.stream_content(video_url, headers),
|
| 89 |
+
headers=response.headers,
|
| 90 |
+
background=BackgroundTask(streamer.close),
|
| 91 |
+
)
|
| 92 |
+
except httpx.HTTPStatusError as e:
|
| 93 |
+
logger.error(f"Upstream service error while handling {method} request: {e}")
|
| 94 |
+
await client.aclose()
|
| 95 |
+
return Response(status_code=e.response.status_code, content=f"Upstream service error: {e}")
|
| 96 |
+
except DownloadError as e:
|
| 97 |
+
logger.error(f"Error downloading {video_url}: {e}")
|
| 98 |
+
return Response(status_code=502, content=str(e))
|
| 99 |
+
except Exception as e:
|
| 100 |
+
logger.error(f"Internal server error while handling {method} request: {e}")
|
| 101 |
+
await client.aclose()
|
| 102 |
+
return Response(status_code=502, content=f"Internal server error: {e}")
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
async def fetch_and_process_m3u8(url: str, headers: dict, request: Request, key_url: HttpUrl = None):
|
| 106 |
+
"""
|
| 107 |
+
Fetches and processes the m3u8 playlist, converting it to an HLS playlist.
|
| 108 |
+
|
| 109 |
+
Args:
|
| 110 |
+
url (str): The URL of the m3u8 playlist.
|
| 111 |
+
headers (dict): The headers to include in the request.
|
| 112 |
+
request (Request): The incoming HTTP request.
|
| 113 |
+
key_url (HttpUrl, optional): The HLS Key URL to replace the original key URL. Defaults to None.
|
| 114 |
+
|
| 115 |
+
Returns:
|
| 116 |
+
Response: The HTTP response with the processed m3u8 playlist.
|
| 117 |
+
"""
|
| 118 |
+
async with httpx.AsyncClient(
|
| 119 |
+
follow_redirects=True,
|
| 120 |
+
timeout=httpx.Timeout(30.0),
|
| 121 |
+
limits=httpx.Limits(max_keepalive_connections=10, max_connections=20),
|
| 122 |
+
proxy=settings.proxy_url,
|
| 123 |
+
) as client:
|
| 124 |
+
try:
|
| 125 |
+
streamer = Streamer(client)
|
| 126 |
+
content = await streamer.get_text(url, headers)
|
| 127 |
+
processor = M3U8Processor(request, key_url)
|
| 128 |
+
processed_content = await processor.process_m3u8(content, str(streamer.response.url))
|
| 129 |
+
return Response(
|
| 130 |
+
content=processed_content,
|
| 131 |
+
media_type="application/vnd.apple.mpegurl",
|
| 132 |
+
headers={
|
| 133 |
+
"Content-Disposition": "inline",
|
| 134 |
+
"Accept-Ranges": "none",
|
| 135 |
+
},
|
| 136 |
+
)
|
| 137 |
+
except httpx.HTTPStatusError as e:
|
| 138 |
+
logger.error(f"HTTP error while fetching m3u8: {e}")
|
| 139 |
+
return Response(status_code=e.response.status_code, content=str(e))
|
| 140 |
+
except DownloadError as e:
|
| 141 |
+
logger.error(f"Error downloading m3u8: {url}")
|
| 142 |
+
return Response(status_code=502, content=str(e))
|
| 143 |
+
except Exception as e:
|
| 144 |
+
logger.exception(f"Unexpected error while processing m3u8: {e}")
|
| 145 |
+
return Response(status_code=502, content=str(e))
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
async def handle_drm_key_data(key_id, key, drm_info):
|
| 149 |
+
"""
|
| 150 |
+
Handles the DRM key data, retrieving the key ID and key from the DRM info if not provided.
|
| 151 |
+
|
| 152 |
+
Args:
|
| 153 |
+
key_id (str): The DRM key ID.
|
| 154 |
+
key (str): The DRM key.
|
| 155 |
+
drm_info (dict): The DRM information from the MPD manifest.
|
| 156 |
+
|
| 157 |
+
Returns:
|
| 158 |
+
tuple: The key ID and key.
|
| 159 |
+
"""
|
| 160 |
+
if drm_info and not drm_info.get("isDrmProtected"):
|
| 161 |
+
return None, None
|
| 162 |
+
|
| 163 |
+
if not key_id or not key:
|
| 164 |
+
if "keyId" in drm_info and "key" in drm_info:
|
| 165 |
+
key_id = drm_info["keyId"]
|
| 166 |
+
key = drm_info["key"]
|
| 167 |
+
elif "laUrl" in drm_info and "keyId" in drm_info:
|
| 168 |
+
raise HTTPException(status_code=400, detail="LA URL is not supported yet")
|
| 169 |
+
else:
|
| 170 |
+
raise HTTPException(
|
| 171 |
+
status_code=400, detail="Unable to determine key_id and key, and they were not provided"
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
return key_id, key
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
async def get_manifest(request: Request, mpd_url: str, headers: dict, key_id: str = None, key: str = None):
|
| 178 |
+
"""
|
| 179 |
+
Retrieves and processes the MPD manifest, converting it to an HLS manifest.
|
| 180 |
+
|
| 181 |
+
Args:
|
| 182 |
+
request (Request): The incoming HTTP request.
|
| 183 |
+
mpd_url (str): The URL of the MPD manifest.
|
| 184 |
+
headers (dict): The headers to include in the request.
|
| 185 |
+
key_id (str, optional): The DRM key ID. Defaults to None.
|
| 186 |
+
key (str, optional): The DRM key. Defaults to None.
|
| 187 |
+
|
| 188 |
+
Returns:
|
| 189 |
+
Response: The HTTP response with the HLS manifest.
|
| 190 |
+
"""
|
| 191 |
+
try:
|
| 192 |
+
mpd_dict = await get_cached_mpd(mpd_url, headers=headers, parse_drm=not key_id and not key)
|
| 193 |
+
except DownloadError as e:
|
| 194 |
+
raise HTTPException(status_code=e.status_code, detail=f"Failed to download MPD: {e.message}")
|
| 195 |
+
drm_info = mpd_dict.get("drmInfo", {})
|
| 196 |
+
|
| 197 |
+
if drm_info and not drm_info.get("isDrmProtected"):
|
| 198 |
+
# For non-DRM protected MPD, we still create an HLS manifest
|
| 199 |
+
return await process_manifest(request, mpd_dict, None, None)
|
| 200 |
+
|
| 201 |
+
key_id, key = await handle_drm_key_data(key_id, key, drm_info)
|
| 202 |
+
|
| 203 |
+
# check if the provided key_id and key are valid
|
| 204 |
+
if key_id and len(key_id) != 32:
|
| 205 |
+
key_id = base64.urlsafe_b64decode(pad_base64(key_id)).hex()
|
| 206 |
+
if key and len(key) != 32:
|
| 207 |
+
key = base64.urlsafe_b64decode(pad_base64(key)).hex()
|
| 208 |
+
|
| 209 |
+
return await process_manifest(request, mpd_dict, key_id, key)
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
async def get_playlist(
|
| 213 |
+
request: Request, mpd_url: str, profile_id: str, headers: dict, key_id: str = None, key: str = None
|
| 214 |
+
):
|
| 215 |
+
"""
|
| 216 |
+
Retrieves and processes the MPD manifest, converting it to an HLS playlist for a specific profile.
|
| 217 |
+
|
| 218 |
+
Args:
|
| 219 |
+
request (Request): The incoming HTTP request.
|
| 220 |
+
mpd_url (str): The URL of the MPD manifest.
|
| 221 |
+
profile_id (str): The profile ID to generate the playlist for.
|
| 222 |
+
headers (dict): The headers to include in the request.
|
| 223 |
+
key_id (str, optional): The DRM key ID. Defaults to None.
|
| 224 |
+
key (str, optional): The DRM key. Defaults to None.
|
| 225 |
+
|
| 226 |
+
Returns:
|
| 227 |
+
Response: The HTTP response with the HLS playlist.
|
| 228 |
+
"""
|
| 229 |
+
mpd_dict = await get_cached_mpd(
|
| 230 |
+
mpd_url, headers=headers, parse_drm=not key_id and not key, parse_segment_profile_id=profile_id
|
| 231 |
+
)
|
| 232 |
+
return await process_playlist(request, mpd_dict, profile_id)
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
async def get_segment(
|
| 236 |
+
init_url: str, segment_url: str, mimetype: str, headers: dict, key_id: str = None, key: str = None
|
| 237 |
+
):
|
| 238 |
+
"""
|
| 239 |
+
Retrieves and processes a media segment, decrypting it if necessary.
|
| 240 |
+
|
| 241 |
+
Args:
|
| 242 |
+
init_url (str): The URL of the initialization segment.
|
| 243 |
+
segment_url (str): The URL of the media segment.
|
| 244 |
+
mimetype (str): The MIME type of the segment.
|
| 245 |
+
headers (dict): The headers to include in the request.
|
| 246 |
+
key_id (str, optional): The DRM key ID. Defaults to None.
|
| 247 |
+
key (str, optional): The DRM key. Defaults to None.
|
| 248 |
+
|
| 249 |
+
Returns:
|
| 250 |
+
Response: The HTTP response with the processed segment.
|
| 251 |
+
"""
|
| 252 |
+
try:
|
| 253 |
+
init_content = await get_cached_init_segment(init_url, headers)
|
| 254 |
+
segment_content = await download_file_with_retry(segment_url, headers)
|
| 255 |
+
except DownloadError as e:
|
| 256 |
+
raise HTTPException(status_code=e.status_code, detail=f"Failed to download segment: {e.message}")
|
| 257 |
+
return await process_segment(init_content, segment_content, mimetype, key_id, key)
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
async def get_public_ip():
|
| 261 |
+
"""
|
| 262 |
+
Retrieves the public IP address of the MediaFlow proxy.
|
| 263 |
+
|
| 264 |
+
Returns:
|
| 265 |
+
Response: The HTTP response with the public IP address.
|
| 266 |
+
"""
|
| 267 |
+
ip_address_data = await request_with_retry("GET", "https://api.ipify.org?format=json", {})
|
| 268 |
+
return ip_address_data.json()
|
mediaflow_proxy/main.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
|
| 3 |
+
from fastapi import FastAPI, Depends, Security, HTTPException
|
| 4 |
+
from fastapi.security import APIKeyQuery, APIKeyHeader
|
| 5 |
+
from starlette.responses import RedirectResponse
|
| 6 |
+
from starlette.staticfiles import StaticFiles
|
| 7 |
+
|
| 8 |
+
from mediaflow_proxy.configs import settings
|
| 9 |
+
from mediaflow_proxy.routes import proxy_router
|
| 10 |
+
|
| 11 |
+
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
|
| 12 |
+
app = FastAPI()
|
| 13 |
+
api_password_query = APIKeyQuery(name="api_password", auto_error=False)
|
| 14 |
+
api_password_header = APIKeyHeader(name="api_password", auto_error=False)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
async def verify_api_key(api_key: str = Security(api_password_query), api_key_alt: str = Security(api_password_header)):
|
| 18 |
+
"""
|
| 19 |
+
Verifies the API key for the request.
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
api_key (str): The API key to validate.
|
| 23 |
+
api_key_alt (str): The alternative API key to validate.
|
| 24 |
+
|
| 25 |
+
Raises:
|
| 26 |
+
HTTPException: If the API key is invalid.
|
| 27 |
+
"""
|
| 28 |
+
if api_key == settings.api_password or api_key_alt == settings.api_password:
|
| 29 |
+
return
|
| 30 |
+
|
| 31 |
+
raise HTTPException(status_code=403, detail="Could not validate credentials")
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
@app.get("/health")
|
| 35 |
+
async def health_check():
|
| 36 |
+
return {"status": "healthy"}
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
@app.get("/favicon.ico")
|
| 40 |
+
async def get_favicon():
|
| 41 |
+
return RedirectResponse(url="/logo.png")
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
app.include_router(proxy_router, prefix="/proxy", tags=["proxy"], dependencies=[Depends(verify_api_key)])
|
| 45 |
+
app.mount("/", StaticFiles(directory="static", html=True), name="static")
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
if __name__ == "__main__":
|
| 49 |
+
import uvicorn
|
| 50 |
+
|
| 51 |
+
uvicorn.run(app, host="127.0.0.1", port=8888)
|
mediaflow_proxy/mpd_processor.py
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import math
|
| 3 |
+
import time
|
| 4 |
+
from datetime import datetime, timezone, timedelta
|
| 5 |
+
|
| 6 |
+
from fastapi import Request, Response, HTTPException
|
| 7 |
+
|
| 8 |
+
from mediaflow_proxy.configs import settings
|
| 9 |
+
from mediaflow_proxy.drm.decrypter import decrypt_segment
|
| 10 |
+
from mediaflow_proxy.utils.http_utils import encode_mediaflow_proxy_url
|
| 11 |
+
|
| 12 |
+
logger = logging.getLogger(__name__)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
async def process_manifest(request: Request, mpd_dict: dict, key_id: str = None, key: str = None) -> Response:
|
| 16 |
+
"""
|
| 17 |
+
Processes the MPD manifest and converts it to an HLS manifest.
|
| 18 |
+
|
| 19 |
+
Args:
|
| 20 |
+
request (Request): The incoming HTTP request.
|
| 21 |
+
mpd_dict (dict): The MPD manifest data.
|
| 22 |
+
key_id (str, optional): The DRM key ID. Defaults to None.
|
| 23 |
+
key (str, optional): The DRM key. Defaults to None.
|
| 24 |
+
|
| 25 |
+
Returns:
|
| 26 |
+
Response: The HLS manifest as an HTTP response.
|
| 27 |
+
"""
|
| 28 |
+
hls_content = build_hls(mpd_dict, request, key_id, key)
|
| 29 |
+
return Response(content=hls_content, media_type="application/vnd.apple.mpegurl")
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
async def process_playlist(request: Request, mpd_dict: dict, profile_id: str) -> Response:
|
| 33 |
+
"""
|
| 34 |
+
Processes the MPD manifest and converts it to an HLS playlist for a specific profile.
|
| 35 |
+
|
| 36 |
+
Args:
|
| 37 |
+
request (Request): The incoming HTTP request.
|
| 38 |
+
mpd_dict (dict): The MPD manifest data.
|
| 39 |
+
profile_id (str): The profile ID to generate the playlist for.
|
| 40 |
+
|
| 41 |
+
Returns:
|
| 42 |
+
Response: The HLS playlist as an HTTP response.
|
| 43 |
+
|
| 44 |
+
Raises:
|
| 45 |
+
HTTPException: If the profile is not found in the MPD manifest.
|
| 46 |
+
"""
|
| 47 |
+
matching_profiles = [p for p in mpd_dict["profiles"] if p["id"] == profile_id]
|
| 48 |
+
if not matching_profiles:
|
| 49 |
+
raise HTTPException(status_code=404, detail="Profile not found")
|
| 50 |
+
|
| 51 |
+
hls_content = build_hls_playlist(mpd_dict, matching_profiles, request)
|
| 52 |
+
return Response(content=hls_content, media_type="application/vnd.apple.mpegurl")
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
async def process_segment(
|
| 56 |
+
init_content: bytes,
|
| 57 |
+
segment_content: bytes,
|
| 58 |
+
mimetype: str,
|
| 59 |
+
key_id: str = None,
|
| 60 |
+
key: str = None,
|
| 61 |
+
) -> Response:
|
| 62 |
+
"""
|
| 63 |
+
Processes and decrypts a media segment.
|
| 64 |
+
|
| 65 |
+
Args:
|
| 66 |
+
init_content (bytes): The initialization segment content.
|
| 67 |
+
segment_content (bytes): The media segment content.
|
| 68 |
+
mimetype (str): The MIME type of the segment.
|
| 69 |
+
key_id (str, optional): The DRM key ID. Defaults to None.
|
| 70 |
+
key (str, optional): The DRM key. Defaults to None.
|
| 71 |
+
|
| 72 |
+
Returns:
|
| 73 |
+
Response: The decrypted segment as an HTTP response.
|
| 74 |
+
"""
|
| 75 |
+
if key_id and key:
|
| 76 |
+
# For DRM protected content
|
| 77 |
+
now = time.time()
|
| 78 |
+
decrypted_content = decrypt_segment(init_content, segment_content, key_id, key)
|
| 79 |
+
logger.info(f"Decryption of {mimetype} segment took {time.time() - now:.4f} seconds")
|
| 80 |
+
else:
|
| 81 |
+
# For non-DRM protected content, we just concatenate init and segment content
|
| 82 |
+
decrypted_content = init_content + segment_content
|
| 83 |
+
|
| 84 |
+
return Response(content=decrypted_content, media_type=mimetype)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def build_hls(mpd_dict: dict, request: Request, key_id: str = None, key: str = None) -> str:
|
| 88 |
+
"""
|
| 89 |
+
Builds an HLS manifest from the MPD manifest.
|
| 90 |
+
|
| 91 |
+
Args:
|
| 92 |
+
mpd_dict (dict): The MPD manifest data.
|
| 93 |
+
request (Request): The incoming HTTP request.
|
| 94 |
+
key_id (str, optional): The DRM key ID. Defaults to None.
|
| 95 |
+
key (str, optional): The DRM key. Defaults to None.
|
| 96 |
+
|
| 97 |
+
Returns:
|
| 98 |
+
str: The HLS manifest as a string.
|
| 99 |
+
"""
|
| 100 |
+
hls = ["#EXTM3U", "#EXT-X-VERSION:6"]
|
| 101 |
+
query_params = dict(request.query_params)
|
| 102 |
+
|
| 103 |
+
video_profiles = {}
|
| 104 |
+
audio_profiles = {}
|
| 105 |
+
|
| 106 |
+
for profile in mpd_dict["profiles"]:
|
| 107 |
+
query_params.update({"profile_id": profile["id"], "key_id": key_id or "", "key": key or ""})
|
| 108 |
+
playlist_url = encode_mediaflow_proxy_url(
|
| 109 |
+
str(request.url_for("playlist_endpoint")),
|
| 110 |
+
query_params=query_params,
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
if "video" in profile["mimeType"]:
|
| 114 |
+
video_profiles[profile["id"]] = (profile, playlist_url)
|
| 115 |
+
elif "audio" in profile["mimeType"]:
|
| 116 |
+
audio_profiles[profile["id"]] = (profile, playlist_url)
|
| 117 |
+
|
| 118 |
+
# Add audio streams
|
| 119 |
+
for i, (profile, playlist_url) in enumerate(audio_profiles.values()):
|
| 120 |
+
is_default = "YES" if i == 0 else "NO" # Set the first audio track as default
|
| 121 |
+
hls.append(
|
| 122 |
+
f'#EXT-X-MEDIA:TYPE=AUDIO,GROUP-ID="audio",NAME="{profile["id"]}",DEFAULT={is_default},AUTOSELECT={is_default},LANGUAGE="{profile.get("lang", "und")}",URI="{playlist_url}"'
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
# Add video streams
|
| 126 |
+
for profile, playlist_url in video_profiles.values():
|
| 127 |
+
hls.append(
|
| 128 |
+
f'#EXT-X-STREAM-INF:BANDWIDTH={profile["bandwidth"]},RESOLUTION={profile["width"]}x{profile["height"]},CODECS="{profile["codecs"]}",FRAME-RATE={profile["frameRate"]},AUDIO="audio"'
|
| 129 |
+
)
|
| 130 |
+
hls.append(playlist_url)
|
| 131 |
+
|
| 132 |
+
return "\n".join(hls)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def build_hls_playlist(mpd_dict: dict, profiles: list[dict], request: Request) -> str:
|
| 136 |
+
"""
|
| 137 |
+
Builds an HLS playlist from the MPD manifest for specific profiles.
|
| 138 |
+
|
| 139 |
+
Args:
|
| 140 |
+
mpd_dict (dict): The MPD manifest data.
|
| 141 |
+
profiles (list[dict]): The profiles to include in the playlist.
|
| 142 |
+
request (Request): The incoming HTTP request.
|
| 143 |
+
|
| 144 |
+
Returns:
|
| 145 |
+
str: The HLS playlist as a string.
|
| 146 |
+
"""
|
| 147 |
+
hls = ["#EXTM3U", "#EXT-X-VERSION:6"]
|
| 148 |
+
|
| 149 |
+
added_segments = 0
|
| 150 |
+
current_time = datetime.now(timezone.utc)
|
| 151 |
+
live_stream_delay = timedelta(seconds=settings.mpd_live_stream_delay)
|
| 152 |
+
target_end_time = current_time - live_stream_delay
|
| 153 |
+
for index, profile in enumerate(profiles):
|
| 154 |
+
segments = profile["segments"]
|
| 155 |
+
if not segments:
|
| 156 |
+
logger.warning(f"No segments found for profile {profile['id']}")
|
| 157 |
+
continue
|
| 158 |
+
|
| 159 |
+
# Add headers for only the first profile
|
| 160 |
+
if index == 0:
|
| 161 |
+
sequence = segments[0]["number"]
|
| 162 |
+
extinf_values = [f["extinf"] for f in segments if "extinf" in f]
|
| 163 |
+
target_duration = math.ceil(max(extinf_values)) if extinf_values else 3
|
| 164 |
+
hls.extend(
|
| 165 |
+
[
|
| 166 |
+
f"#EXT-X-TARGETDURATION:{target_duration}",
|
| 167 |
+
f"#EXT-X-MEDIA-SEQUENCE:{sequence}",
|
| 168 |
+
]
|
| 169 |
+
)
|
| 170 |
+
if mpd_dict["isLive"]:
|
| 171 |
+
hls.append("#EXT-X-PLAYLIST-TYPE:EVENT")
|
| 172 |
+
else:
|
| 173 |
+
hls.append("#EXT-X-PLAYLIST-TYPE:VOD")
|
| 174 |
+
|
| 175 |
+
init_url = profile["initUrl"]
|
| 176 |
+
|
| 177 |
+
query_params = dict(request.query_params)
|
| 178 |
+
query_params.pop("profile_id", None)
|
| 179 |
+
query_params.pop("d", None)
|
| 180 |
+
|
| 181 |
+
for segment in segments:
|
| 182 |
+
if mpd_dict["isLive"]:
|
| 183 |
+
if segment["end_time"] > target_end_time:
|
| 184 |
+
continue
|
| 185 |
+
hls.append(f"#EXT-X-PROGRAM-DATE-TIME:{segment['program_date_time']}")
|
| 186 |
+
hls.append(f'#EXTINF:{segment["extinf"]:.3f},')
|
| 187 |
+
query_params.update(
|
| 188 |
+
{"init_url": init_url, "segment_url": segment["media"], "mime_type": profile["mimeType"]}
|
| 189 |
+
)
|
| 190 |
+
hls.append(
|
| 191 |
+
encode_mediaflow_proxy_url(
|
| 192 |
+
str(request.url_for("segment_endpoint")),
|
| 193 |
+
query_params=query_params,
|
| 194 |
+
)
|
| 195 |
+
)
|
| 196 |
+
added_segments += 1
|
| 197 |
+
|
| 198 |
+
if not mpd_dict["isLive"]:
|
| 199 |
+
hls.append("#EXT-X-ENDLIST")
|
| 200 |
+
|
| 201 |
+
logger.info(f"Added {added_segments} segments to HLS playlist")
|
| 202 |
+
return "\n".join(hls)
|
mediaflow_proxy/routes.py
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import Request, Depends, APIRouter
|
| 2 |
+
from pydantic import HttpUrl
|
| 3 |
+
|
| 4 |
+
from .handlers import handle_hls_stream_proxy, proxy_stream, get_manifest, get_playlist, get_segment, get_public_ip
|
| 5 |
+
|
| 6 |
+
proxy_router = APIRouter()
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def get_proxy_headers(request: Request) -> dict:
|
| 10 |
+
"""
|
| 11 |
+
Extracts proxy headers from the request query parameters.
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
request (Request): The incoming HTTP request.
|
| 15 |
+
|
| 16 |
+
Returns:
|
| 17 |
+
dict: A dictionary of proxy headers.
|
| 18 |
+
"""
|
| 19 |
+
return {k[2:]: v for k, v in request.query_params.items() if k.startswith("h_")}
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@proxy_router.head("/hls")
|
| 23 |
+
@proxy_router.get("/hls")
|
| 24 |
+
async def hls_stream_proxy(
|
| 25 |
+
request: Request,
|
| 26 |
+
d: HttpUrl,
|
| 27 |
+
headers: dict = Depends(get_proxy_headers),
|
| 28 |
+
key_url: HttpUrl | None = None,
|
| 29 |
+
):
|
| 30 |
+
"""
|
| 31 |
+
Proxify HLS stream requests, fetching and processing the m3u8 playlist or streaming the content.
|
| 32 |
+
|
| 33 |
+
Args:
|
| 34 |
+
request (Request): The incoming HTTP request.
|
| 35 |
+
d (HttpUrl): The destination URL to fetch the content from.
|
| 36 |
+
key_url (HttpUrl, optional): The HLS Key URL to replace the original key URL. Defaults to None. (Useful for bypassing some sneaky protection)
|
| 37 |
+
headers (dict): The headers to include in the request.
|
| 38 |
+
|
| 39 |
+
Returns:
|
| 40 |
+
Response: The HTTP response with the processed m3u8 playlist or streamed content.
|
| 41 |
+
"""
|
| 42 |
+
destination = str(d)
|
| 43 |
+
return await handle_hls_stream_proxy(request, destination, headers, key_url)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
@proxy_router.head("/stream")
|
| 47 |
+
@proxy_router.get("/stream")
|
| 48 |
+
async def proxy_stream_endpoint(request: Request, d: HttpUrl, headers: dict = Depends(get_proxy_headers)):
|
| 49 |
+
"""
|
| 50 |
+
Proxies stream requests to the given video URL.
|
| 51 |
+
|
| 52 |
+
Args:
|
| 53 |
+
request (Request): The incoming HTTP request.
|
| 54 |
+
d (HttpUrl): The URL of the video to stream.
|
| 55 |
+
headers (dict): The headers to include in the request.
|
| 56 |
+
|
| 57 |
+
Returns:
|
| 58 |
+
Response: The HTTP response with the streamed content.
|
| 59 |
+
"""
|
| 60 |
+
headers.update({"range": headers.get("range", "bytes=0-")})
|
| 61 |
+
return await proxy_stream(request.method, str(d), headers)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
@proxy_router.get("/mpd/manifest")
|
| 65 |
+
async def manifest_endpoint(
|
| 66 |
+
request: Request,
|
| 67 |
+
d: HttpUrl,
|
| 68 |
+
headers: dict = Depends(get_proxy_headers),
|
| 69 |
+
key_id: str = None,
|
| 70 |
+
key: str = None,
|
| 71 |
+
):
|
| 72 |
+
"""
|
| 73 |
+
Retrieves and processes the MPD manifest, converting it to an HLS manifest.
|
| 74 |
+
|
| 75 |
+
Args:
|
| 76 |
+
request (Request): The incoming HTTP request.
|
| 77 |
+
d (HttpUrl): The URL of the MPD manifest.
|
| 78 |
+
headers (dict): The headers to include in the request.
|
| 79 |
+
key_id (str, optional): The DRM key ID. Defaults to None.
|
| 80 |
+
key (str, optional): The DRM key. Defaults to None.
|
| 81 |
+
|
| 82 |
+
Returns:
|
| 83 |
+
Response: The HTTP response with the HLS manifest.
|
| 84 |
+
"""
|
| 85 |
+
return await get_manifest(request, str(d), headers, key_id, key)
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
@proxy_router.get("/mpd/playlist")
|
| 89 |
+
async def playlist_endpoint(
|
| 90 |
+
request: Request,
|
| 91 |
+
d: HttpUrl,
|
| 92 |
+
profile_id: str,
|
| 93 |
+
headers: dict = Depends(get_proxy_headers),
|
| 94 |
+
key_id: str = None,
|
| 95 |
+
key: str = None,
|
| 96 |
+
):
|
| 97 |
+
"""
|
| 98 |
+
Retrieves and processes the MPD manifest, converting it to an HLS playlist for a specific profile.
|
| 99 |
+
|
| 100 |
+
Args:
|
| 101 |
+
request (Request): The incoming HTTP request.
|
| 102 |
+
d (HttpUrl): The URL of the MPD manifest.
|
| 103 |
+
profile_id (str): The profile ID to generate the playlist for.
|
| 104 |
+
headers (dict): The headers to include in the request.
|
| 105 |
+
key_id (str, optional): The DRM key ID. Defaults to None.
|
| 106 |
+
key (str, optional): The DRM key. Defaults to None.
|
| 107 |
+
|
| 108 |
+
Returns:
|
| 109 |
+
Response: The HTTP response with the HLS playlist.
|
| 110 |
+
"""
|
| 111 |
+
return await get_playlist(request, str(d), profile_id, headers, key_id, key)
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
@proxy_router.get("/mpd/segment")
|
| 115 |
+
async def segment_endpoint(
|
| 116 |
+
init_url: HttpUrl,
|
| 117 |
+
segment_url: HttpUrl,
|
| 118 |
+
mime_type: str,
|
| 119 |
+
headers: dict = Depends(get_proxy_headers),
|
| 120 |
+
key_id: str = None,
|
| 121 |
+
key: str = None,
|
| 122 |
+
):
|
| 123 |
+
"""
|
| 124 |
+
Retrieves and processes a media segment, decrypting it if necessary.
|
| 125 |
+
|
| 126 |
+
Args:
|
| 127 |
+
init_url (HttpUrl): The URL of the initialization segment.
|
| 128 |
+
segment_url (HttpUrl): The URL of the media segment.
|
| 129 |
+
mime_type (str): The MIME type of the segment.
|
| 130 |
+
headers (dict): The headers to include in the request.
|
| 131 |
+
key_id (str, optional): The DRM key ID. Defaults to None.
|
| 132 |
+
key (str, optional): The DRM key. Defaults to None.
|
| 133 |
+
|
| 134 |
+
Returns:
|
| 135 |
+
Response: The HTTP response with the processed segment.
|
| 136 |
+
"""
|
| 137 |
+
return await get_segment(str(init_url), str(segment_url), mime_type, headers, key_id, key)
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
@proxy_router.get("/ip")
|
| 141 |
+
async def get_mediaflow_proxy_public_ip():
|
| 142 |
+
"""
|
| 143 |
+
Retrieves the public IP address of the MediaFlow proxy server.
|
| 144 |
+
|
| 145 |
+
Returns:
|
| 146 |
+
Response: The HTTP response with the public IP address in the form of a JSON object. {"ip": "xxx.xxx.xxx.xxx"}
|
| 147 |
+
"""
|
| 148 |
+
return await get_public_ip()
|
mediaflow_proxy/utils/__init__.py
ADDED
|
File without changes
|
mediaflow_proxy/utils/cache_utils.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import datetime
|
| 2 |
+
import logging
|
| 3 |
+
|
| 4 |
+
from cachetools import TTLCache
|
| 5 |
+
|
| 6 |
+
from .http_utils import download_file_with_retry
|
| 7 |
+
from .mpd_utils import parse_mpd, parse_mpd_dict
|
| 8 |
+
|
| 9 |
+
logger = logging.getLogger(__name__)
|
| 10 |
+
|
| 11 |
+
# cache dictionary
|
| 12 |
+
mpd_cache = TTLCache(maxsize=100, ttl=300) # 5 minutes default TTL
|
| 13 |
+
init_segment_cache = TTLCache(maxsize=100, ttl=3600) # 1 hour default TTL
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
async def get_cached_mpd(
|
| 17 |
+
mpd_url: str, headers: dict, parse_drm: bool, parse_segment_profile_id: str | None = None
|
| 18 |
+
) -> dict:
|
| 19 |
+
"""
|
| 20 |
+
Retrieves and caches the MPD manifest, parsing it if not already cached.
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
mpd_url (str): The URL of the MPD manifest.
|
| 24 |
+
headers (dict): The headers to include in the request.
|
| 25 |
+
parse_drm (bool): Whether to parse DRM information.
|
| 26 |
+
parse_segment_profile_id (str, optional): The profile ID to parse segments for. Defaults to None.
|
| 27 |
+
|
| 28 |
+
Returns:
|
| 29 |
+
dict: The parsed MPD manifest data.
|
| 30 |
+
"""
|
| 31 |
+
current_time = datetime.datetime.now(datetime.UTC)
|
| 32 |
+
if mpd_url in mpd_cache and mpd_cache[mpd_url]["expires"] > current_time:
|
| 33 |
+
logger.info(f"Using cached MPD for {mpd_url}")
|
| 34 |
+
return parse_mpd_dict(mpd_cache[mpd_url]["mpd"], mpd_url, parse_drm, parse_segment_profile_id)
|
| 35 |
+
|
| 36 |
+
mpd_dict = parse_mpd(await download_file_with_retry(mpd_url, headers))
|
| 37 |
+
parsed_mpd_dict = parse_mpd_dict(mpd_dict, mpd_url, parse_drm, parse_segment_profile_id)
|
| 38 |
+
current_time = datetime.datetime.now(datetime.UTC)
|
| 39 |
+
expiration_time = current_time + datetime.timedelta(seconds=parsed_mpd_dict.get("minimumUpdatePeriod", 300))
|
| 40 |
+
mpd_cache[mpd_url] = {"mpd": mpd_dict, "expires": expiration_time}
|
| 41 |
+
return parsed_mpd_dict
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
async def get_cached_init_segment(init_url: str, headers: dict) -> bytes:
|
| 45 |
+
"""
|
| 46 |
+
Retrieves and caches the initialization segment.
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
init_url (str): The URL of the initialization segment.
|
| 50 |
+
headers (dict): The headers to include in the request.
|
| 51 |
+
|
| 52 |
+
Returns:
|
| 53 |
+
bytes: The initialization segment content.
|
| 54 |
+
"""
|
| 55 |
+
if init_url not in init_segment_cache:
|
| 56 |
+
init_content = await download_file_with_retry(init_url, headers)
|
| 57 |
+
init_segment_cache[init_url] = init_content
|
| 58 |
+
return init_segment_cache[init_url]
|
mediaflow_proxy/utils/http_utils.py
ADDED
|
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
from urllib import parse
|
| 3 |
+
|
| 4 |
+
import httpx
|
| 5 |
+
import tenacity
|
| 6 |
+
from tenacity import retry, stop_after_attempt, wait_exponential, retry_if_exception_type
|
| 7 |
+
|
| 8 |
+
from mediaflow_proxy.configs import settings
|
| 9 |
+
|
| 10 |
+
logger = logging.getLogger(__name__)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class DownloadError(Exception):
|
| 14 |
+
def __init__(self, status_code, message):
|
| 15 |
+
self.status_code = status_code
|
| 16 |
+
self.message = message
|
| 17 |
+
super().__init__(message)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
@retry(
|
| 21 |
+
stop=stop_after_attempt(3),
|
| 22 |
+
wait=wait_exponential(multiplier=1, min=4, max=10),
|
| 23 |
+
retry=retry_if_exception_type(DownloadError),
|
| 24 |
+
)
|
| 25 |
+
async def fetch_with_retry(client, method, url, headers, follow_redirects=True, **kwargs):
|
| 26 |
+
"""
|
| 27 |
+
Fetches a URL with retry logic.
|
| 28 |
+
|
| 29 |
+
Args:
|
| 30 |
+
client (httpx.AsyncClient): The HTTP client to use for the request.
|
| 31 |
+
method (str): The HTTP method to use (e.g., GET, POST).
|
| 32 |
+
url (str): The URL to fetch.
|
| 33 |
+
headers (dict): The headers to include in the request.
|
| 34 |
+
follow_redirects (bool, optional): Whether to follow redirects. Defaults to True.
|
| 35 |
+
**kwargs: Additional arguments to pass to the request.
|
| 36 |
+
|
| 37 |
+
Returns:
|
| 38 |
+
httpx.Response: The HTTP response.
|
| 39 |
+
|
| 40 |
+
Raises:
|
| 41 |
+
DownloadError: If the request fails after retries.
|
| 42 |
+
"""
|
| 43 |
+
try:
|
| 44 |
+
response = await client.request(method, url, headers=headers, follow_redirects=follow_redirects, **kwargs)
|
| 45 |
+
response.raise_for_status()
|
| 46 |
+
return response
|
| 47 |
+
except httpx.TimeoutException:
|
| 48 |
+
logger.warning(f"Timeout while downloading {url}")
|
| 49 |
+
raise DownloadError(409, f"Timeout while downloading {url}")
|
| 50 |
+
except httpx.HTTPStatusError as e:
|
| 51 |
+
logger.error(f"HTTP error {e.response.status_code} while downloading {url}")
|
| 52 |
+
# if e.response.status_code == 404:
|
| 53 |
+
# logger.error(f"Segment Resource not found: {url}")
|
| 54 |
+
# raise e
|
| 55 |
+
raise DownloadError(e.response.status_code, f"HTTP error {e.response.status_code} while downloading {url}")
|
| 56 |
+
except Exception as e:
|
| 57 |
+
logger.error(f"Error downloading {url}: {e}")
|
| 58 |
+
raise
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
class Streamer:
|
| 62 |
+
def __init__(self, client):
|
| 63 |
+
"""
|
| 64 |
+
Initializes the Streamer with an HTTP client.
|
| 65 |
+
|
| 66 |
+
Args:
|
| 67 |
+
client (httpx.AsyncClient): The HTTP client to use for streaming.
|
| 68 |
+
"""
|
| 69 |
+
self.client = client
|
| 70 |
+
self.response = None
|
| 71 |
+
|
| 72 |
+
async def stream_content(self, url: str, headers: dict):
|
| 73 |
+
"""
|
| 74 |
+
Streams content from a URL.
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
url (str): The URL to stream content from.
|
| 78 |
+
headers (dict): The headers to include in the request.
|
| 79 |
+
|
| 80 |
+
Yields:
|
| 81 |
+
bytes: Chunks of the streamed content.
|
| 82 |
+
"""
|
| 83 |
+
async with self.client.stream("GET", url, headers=headers, follow_redirects=True) as self.response:
|
| 84 |
+
self.response.raise_for_status()
|
| 85 |
+
async for chunk in self.response.aiter_bytes():
|
| 86 |
+
yield chunk
|
| 87 |
+
|
| 88 |
+
async def head(self, url: str, headers: dict):
|
| 89 |
+
"""
|
| 90 |
+
Sends a HEAD request to a URL.
|
| 91 |
+
|
| 92 |
+
Args:
|
| 93 |
+
url (str): The URL to send the HEAD request to.
|
| 94 |
+
headers (dict): The headers to include in the request.
|
| 95 |
+
|
| 96 |
+
Returns:
|
| 97 |
+
httpx.Response: The HTTP response.
|
| 98 |
+
"""
|
| 99 |
+
try:
|
| 100 |
+
self.response = await fetch_with_retry(self.client, "HEAD", url, headers)
|
| 101 |
+
except tenacity.RetryError as e:
|
| 102 |
+
raise e.last_attempt.result()
|
| 103 |
+
return self.response
|
| 104 |
+
|
| 105 |
+
async def get_text(self, url: str, headers: dict):
|
| 106 |
+
"""
|
| 107 |
+
Sends a GET request to a URL and returns the response text.
|
| 108 |
+
|
| 109 |
+
Args:
|
| 110 |
+
url (str): The URL to send the GET request to.
|
| 111 |
+
headers (dict): The headers to include in the request.
|
| 112 |
+
|
| 113 |
+
Returns:
|
| 114 |
+
str: The response text.
|
| 115 |
+
"""
|
| 116 |
+
try:
|
| 117 |
+
self.response = await fetch_with_retry(self.client, "GET", url, headers)
|
| 118 |
+
except tenacity.RetryError as e:
|
| 119 |
+
raise e.last_attempt.result()
|
| 120 |
+
return self.response.text
|
| 121 |
+
|
| 122 |
+
async def close(self):
|
| 123 |
+
"""
|
| 124 |
+
Closes the HTTP client and response.
|
| 125 |
+
"""
|
| 126 |
+
if self.response:
|
| 127 |
+
await self.response.aclose()
|
| 128 |
+
await self.client.aclose()
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
async def download_file_with_retry(url: str, headers: dict, timeout: float = 10.0):
|
| 132 |
+
"""
|
| 133 |
+
Downloads a file with retry logic.
|
| 134 |
+
|
| 135 |
+
Args:
|
| 136 |
+
url (str): The URL of the file to download.
|
| 137 |
+
headers (dict): The headers to include in the request.
|
| 138 |
+
timeout (float, optional): The request timeout. Defaults to 10.0.
|
| 139 |
+
|
| 140 |
+
Returns:
|
| 141 |
+
bytes: The downloaded file content.
|
| 142 |
+
|
| 143 |
+
Raises:
|
| 144 |
+
DownloadError: If the download fails after retries.
|
| 145 |
+
"""
|
| 146 |
+
async with httpx.AsyncClient(follow_redirects=True, timeout=timeout, proxy=settings.proxy_url) as client:
|
| 147 |
+
try:
|
| 148 |
+
response = await fetch_with_retry(client, "GET", url, headers)
|
| 149 |
+
return response.content
|
| 150 |
+
except DownloadError as e:
|
| 151 |
+
logger.error(f"Failed to download file: {e}")
|
| 152 |
+
raise e
|
| 153 |
+
except tenacity.RetryError as e:
|
| 154 |
+
raise DownloadError(502, f"Failed to download file: {e.last_attempt.result()}")
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
async def request_with_retry(method: str, url: str, headers: dict, timeout: float = 10.0, **kwargs):
|
| 158 |
+
"""
|
| 159 |
+
Sends an HTTP request with retry logic.
|
| 160 |
+
|
| 161 |
+
Args:
|
| 162 |
+
method (str): The HTTP method to use (e.g., GET, POST).
|
| 163 |
+
url (str): The URL to send the request to.
|
| 164 |
+
headers (dict): The headers to include in the request.
|
| 165 |
+
timeout (float, optional): The request timeout. Defaults to 10.0.
|
| 166 |
+
**kwargs: Additional arguments to pass to the request.
|
| 167 |
+
|
| 168 |
+
Returns:
|
| 169 |
+
httpx.Response: The HTTP response.
|
| 170 |
+
|
| 171 |
+
Raises:
|
| 172 |
+
DownloadError: If the request fails after retries.
|
| 173 |
+
"""
|
| 174 |
+
async with httpx.AsyncClient(follow_redirects=True, timeout=timeout, proxy=settings.proxy_url) as client:
|
| 175 |
+
try:
|
| 176 |
+
response = await fetch_with_retry(client, method, url, headers, **kwargs)
|
| 177 |
+
return response
|
| 178 |
+
except DownloadError as e:
|
| 179 |
+
logger.error(f"Failed to download file: {e}")
|
| 180 |
+
raise
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
def encode_mediaflow_proxy_url(
|
| 184 |
+
mediaflow_proxy_url: str,
|
| 185 |
+
endpoint: str | None = None,
|
| 186 |
+
destination_url: str | None = None,
|
| 187 |
+
query_params: dict | None = None,
|
| 188 |
+
request_headers: dict | None = None,
|
| 189 |
+
) -> str:
|
| 190 |
+
"""
|
| 191 |
+
Encodes a MediaFlow proxy URL with query parameters and headers.
|
| 192 |
+
|
| 193 |
+
Args:
|
| 194 |
+
mediaflow_proxy_url (str): The base MediaFlow proxy URL.
|
| 195 |
+
endpoint (str, optional): The endpoint to append to the base URL. Defaults to None.
|
| 196 |
+
destination_url (str, optional): The destination URL to include in the query parameters. Defaults to None.
|
| 197 |
+
query_params (dict, optional): Additional query parameters to include. Defaults to None.
|
| 198 |
+
request_headers (dict, optional): Headers to include as query parameters. Defaults to None.
|
| 199 |
+
|
| 200 |
+
Returns:
|
| 201 |
+
str: The encoded MediaFlow proxy URL.
|
| 202 |
+
"""
|
| 203 |
+
query_params = query_params or {}
|
| 204 |
+
if destination_url is not None:
|
| 205 |
+
query_params["d"] = destination_url
|
| 206 |
+
|
| 207 |
+
# Add headers if provided
|
| 208 |
+
if request_headers:
|
| 209 |
+
query_params.update(
|
| 210 |
+
{key if key.startswith("h_") else f"h_{key}": value for key, value in request_headers.items()}
|
| 211 |
+
)
|
| 212 |
+
# Encode the query parameters
|
| 213 |
+
encoded_params = parse.urlencode(query_params, quote_via=parse.quote)
|
| 214 |
+
|
| 215 |
+
# Construct the full URL
|
| 216 |
+
if endpoint is None:
|
| 217 |
+
return f"{mediaflow_proxy_url}?{encoded_params}"
|
| 218 |
+
|
| 219 |
+
base_url = parse.urljoin(mediaflow_proxy_url, endpoint)
|
| 220 |
+
return f"{base_url}?{encoded_params}"
|
mediaflow_proxy/utils/m3u8_processor.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
from urllib import parse
|
| 3 |
+
|
| 4 |
+
from pydantic import HttpUrl
|
| 5 |
+
|
| 6 |
+
from mediaflow_proxy.utils.http_utils import encode_mediaflow_proxy_url
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class M3U8Processor:
|
| 10 |
+
def __init__(self, request, key_url: HttpUrl = None):
|
| 11 |
+
"""
|
| 12 |
+
Initializes the M3U8Processor with the request and URL prefix.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
request (Request): The incoming HTTP request.
|
| 16 |
+
key_url (HttpUrl, optional): The URL of the key server. Defaults to None.
|
| 17 |
+
"""
|
| 18 |
+
self.request = request
|
| 19 |
+
self.key_url = key_url
|
| 20 |
+
|
| 21 |
+
async def process_m3u8(self, content: str, base_url: str) -> str:
|
| 22 |
+
"""
|
| 23 |
+
Processes the m3u8 content, proxying URLs and handling key lines.
|
| 24 |
+
|
| 25 |
+
Args:
|
| 26 |
+
content (str): The m3u8 content to process.
|
| 27 |
+
base_url (str): The base URL to resolve relative URLs.
|
| 28 |
+
|
| 29 |
+
Returns:
|
| 30 |
+
str: The processed m3u8 content.
|
| 31 |
+
"""
|
| 32 |
+
lines = content.splitlines()
|
| 33 |
+
processed_lines = []
|
| 34 |
+
for line in lines:
|
| 35 |
+
if "URI=" in line:
|
| 36 |
+
processed_lines.append(await self.process_key_line(line, base_url))
|
| 37 |
+
elif not line.startswith("#") and line.strip():
|
| 38 |
+
processed_lines.append(await self.proxy_url(line, base_url))
|
| 39 |
+
else:
|
| 40 |
+
processed_lines.append(line)
|
| 41 |
+
return "\n".join(processed_lines)
|
| 42 |
+
|
| 43 |
+
async def process_key_line(self, line: str, base_url: str) -> str:
|
| 44 |
+
"""
|
| 45 |
+
Processes a key line in the m3u8 content, proxying the URI.
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
line (str): The key line to process.
|
| 49 |
+
base_url (str): The base URL to resolve relative URLs.
|
| 50 |
+
|
| 51 |
+
Returns:
|
| 52 |
+
str: The processed key line.
|
| 53 |
+
"""
|
| 54 |
+
uri_match = re.search(r'URI="([^"]+)"', line)
|
| 55 |
+
if uri_match:
|
| 56 |
+
original_uri = uri_match.group(1)
|
| 57 |
+
uri = parse.urlparse(original_uri)
|
| 58 |
+
if self.key_url:
|
| 59 |
+
uri = uri._replace(scheme=self.key_url.scheme, netloc=self.key_url.host)
|
| 60 |
+
new_uri = await self.proxy_url(uri.geturl(), base_url)
|
| 61 |
+
line = line.replace(f'URI="{original_uri}"', f'URI="{new_uri}"')
|
| 62 |
+
return line
|
| 63 |
+
|
| 64 |
+
async def proxy_url(self, url: str, base_url: str) -> str:
|
| 65 |
+
"""
|
| 66 |
+
Proxies a URL, encoding it with the MediaFlow proxy URL.
|
| 67 |
+
|
| 68 |
+
Args:
|
| 69 |
+
url (str): The URL to proxy.
|
| 70 |
+
base_url (str): The base URL to resolve relative URLs.
|
| 71 |
+
|
| 72 |
+
Returns:
|
| 73 |
+
str: The proxied URL.
|
| 74 |
+
"""
|
| 75 |
+
full_url = parse.urljoin(base_url, url)
|
| 76 |
+
|
| 77 |
+
return encode_mediaflow_proxy_url(
|
| 78 |
+
str(self.request.url_for("hls_stream_proxy")),
|
| 79 |
+
"",
|
| 80 |
+
full_url,
|
| 81 |
+
query_params=dict(self.request.query_params),
|
| 82 |
+
)
|
mediaflow_proxy/utils/mpd_utils.py
ADDED
|
@@ -0,0 +1,555 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import math
|
| 3 |
+
import re
|
| 4 |
+
from datetime import datetime, timedelta, timezone
|
| 5 |
+
from typing import List, Dict
|
| 6 |
+
from urllib.parse import urljoin
|
| 7 |
+
|
| 8 |
+
import xmltodict
|
| 9 |
+
|
| 10 |
+
logger = logging.getLogger(__name__)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def parse_mpd(mpd_content: str) -> dict:
|
| 14 |
+
"""
|
| 15 |
+
Parses the MPD content into a dictionary.
|
| 16 |
+
|
| 17 |
+
Args:
|
| 18 |
+
mpd_content (str): The MPD content as a string.
|
| 19 |
+
|
| 20 |
+
Returns:
|
| 21 |
+
dict: The parsed MPD content as a dictionary.
|
| 22 |
+
"""
|
| 23 |
+
return xmltodict.parse(mpd_content)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def parse_mpd_dict(
|
| 27 |
+
mpd_dict: dict, mpd_url: str, parse_drm: bool = True, parse_segment_profile_id: str | None = None
|
| 28 |
+
) -> dict:
|
| 29 |
+
"""
|
| 30 |
+
Parses the MPD dictionary and extracts relevant information.
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
mpd_dict (dict): The MPD content as a dictionary.
|
| 34 |
+
mpd_url (str): The URL of the MPD manifest.
|
| 35 |
+
parse_drm (bool, optional): Whether to parse DRM information. Defaults to True.
|
| 36 |
+
parse_segment_profile_id (str, optional): The profile ID to parse segments for. Defaults to None.
|
| 37 |
+
|
| 38 |
+
Returns:
|
| 39 |
+
dict: The parsed MPD information including profiles and DRM info.
|
| 40 |
+
|
| 41 |
+
This function processes the MPD dictionary to extract profiles, DRM information, and other relevant data.
|
| 42 |
+
It handles both live and static MPD manifests.
|
| 43 |
+
"""
|
| 44 |
+
profiles = []
|
| 45 |
+
parsed_dict = {}
|
| 46 |
+
source = "/".join(mpd_url.split("/")[:-1])
|
| 47 |
+
|
| 48 |
+
is_live = mpd_dict["MPD"].get("@type", "static").lower() == "dynamic"
|
| 49 |
+
parsed_dict["isLive"] = is_live
|
| 50 |
+
|
| 51 |
+
media_presentation_duration = mpd_dict["MPD"].get("@mediaPresentationDuration")
|
| 52 |
+
|
| 53 |
+
# Parse additional MPD attributes for live streams
|
| 54 |
+
if is_live:
|
| 55 |
+
parsed_dict["minimumUpdatePeriod"] = parse_duration(mpd_dict["MPD"].get("@minimumUpdatePeriod", "PT0S"))
|
| 56 |
+
parsed_dict["timeShiftBufferDepth"] = parse_duration(mpd_dict["MPD"].get("@timeShiftBufferDepth", "PT2M"))
|
| 57 |
+
parsed_dict["availabilityStartTime"] = datetime.fromisoformat(
|
| 58 |
+
mpd_dict["MPD"]["@availabilityStartTime"].replace("Z", "+00:00")
|
| 59 |
+
)
|
| 60 |
+
parsed_dict["publishTime"] = datetime.fromisoformat(
|
| 61 |
+
mpd_dict["MPD"].get("@publishTime", "").replace("Z", "+00:00")
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
periods = mpd_dict["MPD"]["Period"]
|
| 65 |
+
periods = periods if isinstance(periods, list) else [periods]
|
| 66 |
+
|
| 67 |
+
for period in periods:
|
| 68 |
+
parsed_dict["PeriodStart"] = parse_duration(period.get("@start", "PT0S"))
|
| 69 |
+
for adaptation in period["AdaptationSet"]:
|
| 70 |
+
representations = adaptation["Representation"]
|
| 71 |
+
representations = representations if isinstance(representations, list) else [representations]
|
| 72 |
+
|
| 73 |
+
for representation in representations:
|
| 74 |
+
profile = parse_representation(
|
| 75 |
+
parsed_dict,
|
| 76 |
+
representation,
|
| 77 |
+
adaptation,
|
| 78 |
+
source,
|
| 79 |
+
media_presentation_duration,
|
| 80 |
+
parse_segment_profile_id,
|
| 81 |
+
)
|
| 82 |
+
if profile:
|
| 83 |
+
profiles.append(profile)
|
| 84 |
+
parsed_dict["profiles"] = profiles
|
| 85 |
+
|
| 86 |
+
if parse_drm:
|
| 87 |
+
drm_info = extract_drm_info(periods, mpd_url)
|
| 88 |
+
else:
|
| 89 |
+
drm_info = {}
|
| 90 |
+
parsed_dict["drmInfo"] = drm_info
|
| 91 |
+
|
| 92 |
+
return parsed_dict
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def pad_base64(encoded_key_id):
|
| 96 |
+
"""
|
| 97 |
+
Pads a base64 encoded key ID to make its length a multiple of 4.
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
encoded_key_id (str): The base64 encoded key ID.
|
| 101 |
+
|
| 102 |
+
Returns:
|
| 103 |
+
str: The padded base64 encoded key ID.
|
| 104 |
+
"""
|
| 105 |
+
return encoded_key_id + "=" * (4 - len(encoded_key_id) % 4)
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def extract_drm_info(periods: List[Dict], mpd_url: str) -> Dict:
|
| 109 |
+
"""
|
| 110 |
+
Extracts DRM information from the MPD periods.
|
| 111 |
+
|
| 112 |
+
Args:
|
| 113 |
+
periods (List[Dict]): The list of periods in the MPD.
|
| 114 |
+
mpd_url (str): The URL of the MPD manifest.
|
| 115 |
+
|
| 116 |
+
Returns:
|
| 117 |
+
Dict: The extracted DRM information.
|
| 118 |
+
|
| 119 |
+
This function processes the ContentProtection elements in the MPD to extract DRM system information,
|
| 120 |
+
such as ClearKey, Widevine, and PlayReady.
|
| 121 |
+
"""
|
| 122 |
+
drm_info = {"isDrmProtected": False}
|
| 123 |
+
|
| 124 |
+
for period in periods:
|
| 125 |
+
adaptation_sets: list[dict] | dict = period.get("AdaptationSet", [])
|
| 126 |
+
if not isinstance(adaptation_sets, list):
|
| 127 |
+
adaptation_sets = [adaptation_sets]
|
| 128 |
+
|
| 129 |
+
for adaptation_set in adaptation_sets:
|
| 130 |
+
# Check ContentProtection in AdaptationSet
|
| 131 |
+
process_content_protection(adaptation_set.get("ContentProtection", []), drm_info)
|
| 132 |
+
|
| 133 |
+
# Check ContentProtection inside each Representation
|
| 134 |
+
representations: list[dict] | dict = adaptation_set.get("Representation", [])
|
| 135 |
+
if not isinstance(representations, list):
|
| 136 |
+
representations = [representations]
|
| 137 |
+
|
| 138 |
+
for representation in representations:
|
| 139 |
+
process_content_protection(representation.get("ContentProtection", []), drm_info)
|
| 140 |
+
|
| 141 |
+
# If we have a license acquisition URL, make sure it's absolute
|
| 142 |
+
if "laUrl" in drm_info and not drm_info["laUrl"].startswith(("http://", "https://")):
|
| 143 |
+
drm_info["laUrl"] = urljoin(mpd_url, drm_info["laUrl"])
|
| 144 |
+
|
| 145 |
+
return drm_info
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def process_content_protection(content_protection: list[dict] | dict, drm_info: dict):
|
| 149 |
+
"""
|
| 150 |
+
Processes the ContentProtection elements to extract DRM information.
|
| 151 |
+
|
| 152 |
+
Args:
|
| 153 |
+
content_protection (list[dict] | dict): The ContentProtection elements.
|
| 154 |
+
drm_info (dict): The dictionary to store DRM information.
|
| 155 |
+
|
| 156 |
+
This function updates the drm_info dictionary with DRM system information found in the ContentProtection elements.
|
| 157 |
+
"""
|
| 158 |
+
if not isinstance(content_protection, list):
|
| 159 |
+
content_protection = [content_protection]
|
| 160 |
+
|
| 161 |
+
for protection in content_protection:
|
| 162 |
+
drm_info["isDrmProtected"] = True
|
| 163 |
+
scheme_id_uri = protection.get("@schemeIdUri", "").lower()
|
| 164 |
+
|
| 165 |
+
if "clearkey" in scheme_id_uri:
|
| 166 |
+
drm_info["drmSystem"] = "clearkey"
|
| 167 |
+
if "clearkey:Laurl" in protection:
|
| 168 |
+
la_url = protection["clearkey:Laurl"].get("#text")
|
| 169 |
+
if la_url and "laUrl" not in drm_info:
|
| 170 |
+
drm_info["laUrl"] = la_url
|
| 171 |
+
|
| 172 |
+
elif "widevine" in scheme_id_uri or "edef8ba9-79d6-4ace-a3c8-27dcd51d21ed" in scheme_id_uri:
|
| 173 |
+
drm_info["drmSystem"] = "widevine"
|
| 174 |
+
pssh = protection.get("cenc:pssh", {}).get("#text")
|
| 175 |
+
if pssh:
|
| 176 |
+
drm_info["pssh"] = pssh
|
| 177 |
+
|
| 178 |
+
elif "playready" in scheme_id_uri or "9a04f079-9840-4286-ab92-e65be0885f95" in scheme_id_uri:
|
| 179 |
+
drm_info["drmSystem"] = "playready"
|
| 180 |
+
|
| 181 |
+
if "@cenc:default_KID" in protection:
|
| 182 |
+
key_id = protection["@cenc:default_KID"].replace("-", "")
|
| 183 |
+
if "keyId" not in drm_info:
|
| 184 |
+
drm_info["keyId"] = key_id
|
| 185 |
+
|
| 186 |
+
if "ms:laurl" in protection:
|
| 187 |
+
la_url = protection["ms:laurl"].get("@licenseUrl")
|
| 188 |
+
if la_url and "laUrl" not in drm_info:
|
| 189 |
+
drm_info["laUrl"] = la_url
|
| 190 |
+
|
| 191 |
+
return drm_info
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def parse_representation(
|
| 195 |
+
parsed_dict: dict,
|
| 196 |
+
representation: dict,
|
| 197 |
+
adaptation: dict,
|
| 198 |
+
source: str,
|
| 199 |
+
media_presentation_duration: str,
|
| 200 |
+
parse_segment_profile_id: str | None,
|
| 201 |
+
) -> dict | None:
|
| 202 |
+
"""
|
| 203 |
+
Parses a representation and extracts profile information.
|
| 204 |
+
|
| 205 |
+
Args:
|
| 206 |
+
parsed_dict (dict): The parsed MPD data.
|
| 207 |
+
representation (dict): The representation data.
|
| 208 |
+
adaptation (dict): The adaptation set data.
|
| 209 |
+
source (str): The source URL.
|
| 210 |
+
media_presentation_duration (str): The media presentation duration.
|
| 211 |
+
parse_segment_profile_id (str, optional): The profile ID to parse segments for. Defaults to None.
|
| 212 |
+
|
| 213 |
+
Returns:
|
| 214 |
+
dict | None: The parsed profile information or None if not applicable.
|
| 215 |
+
"""
|
| 216 |
+
mime_type = _get_key(adaptation, representation, "@mimeType") or (
|
| 217 |
+
"video/mp4" if "avc" in representation["@codecs"] else "audio/mp4"
|
| 218 |
+
)
|
| 219 |
+
if "video" not in mime_type and "audio" not in mime_type:
|
| 220 |
+
return None
|
| 221 |
+
|
| 222 |
+
profile = {
|
| 223 |
+
"id": representation.get("@id") or adaptation.get("@id"),
|
| 224 |
+
"mimeType": mime_type,
|
| 225 |
+
"lang": representation.get("@lang") or adaptation.get("@lang"),
|
| 226 |
+
"codecs": representation.get("@codecs") or adaptation.get("@codecs"),
|
| 227 |
+
"bandwidth": int(representation.get("@bandwidth") or adaptation.get("@bandwidth")),
|
| 228 |
+
"startWithSAP": (_get_key(adaptation, representation, "@startWithSAP") or "1") == "1",
|
| 229 |
+
"mediaPresentationDuration": media_presentation_duration,
|
| 230 |
+
}
|
| 231 |
+
|
| 232 |
+
if "audio" in profile["mimeType"]:
|
| 233 |
+
profile["audioSamplingRate"] = representation.get("@audioSamplingRate") or adaptation.get("@audioSamplingRate")
|
| 234 |
+
profile["channels"] = representation.get("AudioChannelConfiguration", {}).get("@value", "2")
|
| 235 |
+
else:
|
| 236 |
+
profile["width"] = int(representation["@width"])
|
| 237 |
+
profile["height"] = int(representation["@height"])
|
| 238 |
+
frame_rate = representation.get("@frameRate") or adaptation.get("@maxFrameRate") or "30000/1001"
|
| 239 |
+
frame_rate = frame_rate if "/" in frame_rate else f"{frame_rate}/1"
|
| 240 |
+
profile["frameRate"] = round(int(frame_rate.split("/")[0]) / int(frame_rate.split("/")[1]), 3)
|
| 241 |
+
profile["sar"] = representation.get("@sar", "1:1")
|
| 242 |
+
|
| 243 |
+
if parse_segment_profile_id is None or profile["id"] != parse_segment_profile_id:
|
| 244 |
+
return profile
|
| 245 |
+
|
| 246 |
+
item = adaptation.get("SegmentTemplate") or representation.get("SegmentTemplate")
|
| 247 |
+
if item:
|
| 248 |
+
profile["segments"] = parse_segment_template(parsed_dict, item, profile, source)
|
| 249 |
+
else:
|
| 250 |
+
profile["segments"] = parse_segment_base(representation, source)
|
| 251 |
+
|
| 252 |
+
return profile
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
def _get_key(adaptation: dict, representation: dict, key: str) -> str | None:
|
| 256 |
+
"""
|
| 257 |
+
Retrieves a key from the representation or adaptation set.
|
| 258 |
+
|
| 259 |
+
Args:
|
| 260 |
+
adaptation (dict): The adaptation set data.
|
| 261 |
+
representation (dict): The representation data.
|
| 262 |
+
key (str): The key to retrieve.
|
| 263 |
+
|
| 264 |
+
Returns:
|
| 265 |
+
str | None: The value of the key or None if not found.
|
| 266 |
+
"""
|
| 267 |
+
return representation.get(key, adaptation.get(key, None))
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
def parse_segment_template(parsed_dict: dict, item: dict, profile: dict, source: str) -> List[Dict]:
|
| 271 |
+
"""
|
| 272 |
+
Parses a segment template and extracts segment information.
|
| 273 |
+
|
| 274 |
+
Args:
|
| 275 |
+
parsed_dict (dict): The parsed MPD data.
|
| 276 |
+
item (dict): The segment template data.
|
| 277 |
+
profile (dict): The profile information.
|
| 278 |
+
source (str): The source URL.
|
| 279 |
+
|
| 280 |
+
Returns:
|
| 281 |
+
List[Dict]: The list of parsed segments.
|
| 282 |
+
"""
|
| 283 |
+
segments = []
|
| 284 |
+
timescale = int(item.get("@timescale", 1))
|
| 285 |
+
|
| 286 |
+
# Initialization
|
| 287 |
+
if "@initialization" in item:
|
| 288 |
+
media = item["@initialization"]
|
| 289 |
+
media = media.replace("$RepresentationID$", profile["id"])
|
| 290 |
+
media = media.replace("$Bandwidth$", str(profile["bandwidth"]))
|
| 291 |
+
if not media.startswith("http"):
|
| 292 |
+
media = f"{source}/{media}"
|
| 293 |
+
profile["initUrl"] = media
|
| 294 |
+
|
| 295 |
+
# Segments
|
| 296 |
+
if "SegmentTimeline" in item:
|
| 297 |
+
segments.extend(parse_segment_timeline(parsed_dict, item, profile, source, timescale))
|
| 298 |
+
elif "@duration" in item:
|
| 299 |
+
segments.extend(parse_segment_duration(parsed_dict, item, profile, source, timescale))
|
| 300 |
+
|
| 301 |
+
return segments
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
def parse_segment_timeline(parsed_dict: dict, item: dict, profile: dict, source: str, timescale: int) -> List[Dict]:
|
| 305 |
+
"""
|
| 306 |
+
Parses a segment timeline and extracts segment information.
|
| 307 |
+
|
| 308 |
+
Args:
|
| 309 |
+
parsed_dict (dict): The parsed MPD data.
|
| 310 |
+
item (dict): The segment timeline data.
|
| 311 |
+
profile (dict): The profile information.
|
| 312 |
+
source (str): The source URL.
|
| 313 |
+
timescale (int): The timescale for the segments.
|
| 314 |
+
|
| 315 |
+
Returns:
|
| 316 |
+
List[Dict]: The list of parsed segments.
|
| 317 |
+
"""
|
| 318 |
+
timelines = item["SegmentTimeline"]["S"]
|
| 319 |
+
timelines = timelines if isinstance(timelines, list) else [timelines]
|
| 320 |
+
period_start = parsed_dict["availabilityStartTime"] + timedelta(seconds=parsed_dict.get("PeriodStart", 0))
|
| 321 |
+
presentation_time_offset = int(item.get("@presentationTimeOffset", 0))
|
| 322 |
+
start_number = int(item.get("@startNumber", 1))
|
| 323 |
+
|
| 324 |
+
segments = [
|
| 325 |
+
create_segment_data(timeline, item, profile, source, timescale)
|
| 326 |
+
for timeline in preprocess_timeline(timelines, start_number, period_start, presentation_time_offset, timescale)
|
| 327 |
+
]
|
| 328 |
+
return segments
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
def preprocess_timeline(
|
| 332 |
+
timelines: List[Dict], start_number: int, period_start: datetime, presentation_time_offset: int, timescale: int
|
| 333 |
+
) -> List[Dict]:
|
| 334 |
+
"""
|
| 335 |
+
Preprocesses the segment timeline data.
|
| 336 |
+
|
| 337 |
+
Args:
|
| 338 |
+
timelines (List[Dict]): The list of timeline segments.
|
| 339 |
+
start_number (int): The starting segment number.
|
| 340 |
+
period_start (datetime): The start time of the period.
|
| 341 |
+
presentation_time_offset (int): The presentation time offset.
|
| 342 |
+
timescale (int): The timescale for the segments.
|
| 343 |
+
|
| 344 |
+
Returns:
|
| 345 |
+
List[Dict]: The list of preprocessed timeline segments.
|
| 346 |
+
"""
|
| 347 |
+
processed_data = []
|
| 348 |
+
current_time = 0
|
| 349 |
+
for timeline in timelines:
|
| 350 |
+
repeat = int(timeline.get("@r", 0))
|
| 351 |
+
duration = int(timeline["@d"])
|
| 352 |
+
start_time = int(timeline.get("@t", current_time))
|
| 353 |
+
|
| 354 |
+
for _ in range(repeat + 1):
|
| 355 |
+
segment_start_time = period_start + timedelta(seconds=(start_time - presentation_time_offset) / timescale)
|
| 356 |
+
segment_end_time = segment_start_time + timedelta(seconds=duration / timescale)
|
| 357 |
+
processed_data.append(
|
| 358 |
+
{
|
| 359 |
+
"number": start_number,
|
| 360 |
+
"start_time": segment_start_time,
|
| 361 |
+
"end_time": segment_end_time,
|
| 362 |
+
"duration": duration,
|
| 363 |
+
"time": start_time,
|
| 364 |
+
}
|
| 365 |
+
)
|
| 366 |
+
start_time += duration
|
| 367 |
+
start_number += 1
|
| 368 |
+
|
| 369 |
+
current_time = start_time
|
| 370 |
+
|
| 371 |
+
return processed_data
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
def parse_segment_duration(parsed_dict: dict, item: dict, profile: dict, source: str, timescale: int) -> List[Dict]:
|
| 375 |
+
"""
|
| 376 |
+
Parses segment duration and extracts segment information.
|
| 377 |
+
This is used for static or live MPD manifests.
|
| 378 |
+
|
| 379 |
+
Args:
|
| 380 |
+
parsed_dict (dict): The parsed MPD data.
|
| 381 |
+
item (dict): The segment duration data.
|
| 382 |
+
profile (dict): The profile information.
|
| 383 |
+
source (str): The source URL.
|
| 384 |
+
timescale (int): The timescale for the segments.
|
| 385 |
+
|
| 386 |
+
Returns:
|
| 387 |
+
List[Dict]: The list of parsed segments.
|
| 388 |
+
"""
|
| 389 |
+
duration = int(item["@duration"])
|
| 390 |
+
start_number = int(item.get("@startNumber", 1))
|
| 391 |
+
segment_duration_sec = duration / timescale
|
| 392 |
+
|
| 393 |
+
if parsed_dict["isLive"]:
|
| 394 |
+
segments = generate_live_segments(parsed_dict, segment_duration_sec, start_number)
|
| 395 |
+
else:
|
| 396 |
+
segments = generate_vod_segments(profile, duration, timescale, start_number)
|
| 397 |
+
|
| 398 |
+
return [create_segment_data(seg, item, profile, source, timescale) for seg in segments]
|
| 399 |
+
|
| 400 |
+
|
| 401 |
+
def generate_live_segments(parsed_dict: dict, segment_duration_sec: float, start_number: int) -> List[Dict]:
|
| 402 |
+
"""
|
| 403 |
+
Generates live segments based on the segment duration and start number.
|
| 404 |
+
This is used for live MPD manifests.
|
| 405 |
+
|
| 406 |
+
Args:
|
| 407 |
+
parsed_dict (dict): The parsed MPD data.
|
| 408 |
+
segment_duration_sec (float): The segment duration in seconds.
|
| 409 |
+
start_number (int): The starting segment number.
|
| 410 |
+
|
| 411 |
+
Returns:
|
| 412 |
+
List[Dict]: The list of generated live segments.
|
| 413 |
+
"""
|
| 414 |
+
time_shift_buffer_depth = timedelta(seconds=parsed_dict.get("timeShiftBufferDepth", 60))
|
| 415 |
+
segment_count = math.ceil(time_shift_buffer_depth.total_seconds() / segment_duration_sec)
|
| 416 |
+
current_time = datetime.now(tz=timezone.utc)
|
| 417 |
+
earliest_segment_number = max(
|
| 418 |
+
start_number
|
| 419 |
+
+ math.floor((current_time - parsed_dict["availabilityStartTime"]).total_seconds() / segment_duration_sec)
|
| 420 |
+
- segment_count,
|
| 421 |
+
start_number,
|
| 422 |
+
)
|
| 423 |
+
|
| 424 |
+
return [
|
| 425 |
+
{
|
| 426 |
+
"number": number,
|
| 427 |
+
"start_time": parsed_dict["availabilityStartTime"]
|
| 428 |
+
+ timedelta(seconds=(number - start_number) * segment_duration_sec),
|
| 429 |
+
"duration": segment_duration_sec,
|
| 430 |
+
}
|
| 431 |
+
for number in range(earliest_segment_number, earliest_segment_number + segment_count)
|
| 432 |
+
]
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
def generate_vod_segments(profile: dict, duration: int, timescale: int, start_number: int) -> List[Dict]:
|
| 436 |
+
"""
|
| 437 |
+
Generates VOD segments based on the segment duration and start number.
|
| 438 |
+
This is used for static MPD manifests.
|
| 439 |
+
|
| 440 |
+
Args:
|
| 441 |
+
profile (dict): The profile information.
|
| 442 |
+
duration (int): The segment duration.
|
| 443 |
+
timescale (int): The timescale for the segments.
|
| 444 |
+
start_number (int): The starting segment number.
|
| 445 |
+
|
| 446 |
+
Returns:
|
| 447 |
+
List[Dict]: The list of generated VOD segments.
|
| 448 |
+
"""
|
| 449 |
+
total_duration = profile.get("mediaPresentationDuration") or 0
|
| 450 |
+
if isinstance(total_duration, str):
|
| 451 |
+
total_duration = parse_duration(total_duration)
|
| 452 |
+
segment_count = math.ceil(total_duration * timescale / duration)
|
| 453 |
+
|
| 454 |
+
return [{"number": start_number + i, "duration": duration / timescale} for i in range(segment_count)]
|
| 455 |
+
|
| 456 |
+
|
| 457 |
+
def create_segment_data(segment: Dict, item: dict, profile: dict, source: str, timescale: int | None = None) -> Dict:
|
| 458 |
+
"""
|
| 459 |
+
Creates segment data based on the segment information. This includes the segment URL and metadata.
|
| 460 |
+
|
| 461 |
+
Args:
|
| 462 |
+
segment (Dict): The segment information.
|
| 463 |
+
item (dict): The segment template data.
|
| 464 |
+
profile (dict): The profile information.
|
| 465 |
+
source (str): The source URL.
|
| 466 |
+
timescale (int, optional): The timescale for the segments. Defaults to None.
|
| 467 |
+
|
| 468 |
+
Returns:
|
| 469 |
+
Dict: The created segment data.
|
| 470 |
+
"""
|
| 471 |
+
media_template = item["@media"]
|
| 472 |
+
media = media_template.replace("$RepresentationID$", profile["id"])
|
| 473 |
+
media = media.replace("$Number%04d$", f"{segment['number']:04d}")
|
| 474 |
+
media = media.replace("$Number$", str(segment["number"]))
|
| 475 |
+
media = media.replace("$Bandwidth$", str(profile["bandwidth"]))
|
| 476 |
+
|
| 477 |
+
if "time" in segment and timescale is not None:
|
| 478 |
+
media = media.replace("$Time$", str(int(segment["time"] * timescale)))
|
| 479 |
+
|
| 480 |
+
if not media.startswith("http"):
|
| 481 |
+
media = f"{source}/{media}"
|
| 482 |
+
|
| 483 |
+
segment_data = {
|
| 484 |
+
"type": "segment",
|
| 485 |
+
"media": media,
|
| 486 |
+
"number": segment["number"],
|
| 487 |
+
}
|
| 488 |
+
|
| 489 |
+
if "start_time" in segment and "end_time" in segment:
|
| 490 |
+
segment_data.update(
|
| 491 |
+
{
|
| 492 |
+
"start_time": segment["start_time"],
|
| 493 |
+
"end_time": segment["end_time"],
|
| 494 |
+
"extinf": (segment["end_time"] - segment["start_time"]).total_seconds(),
|
| 495 |
+
"program_date_time": segment["start_time"].isoformat() + "Z",
|
| 496 |
+
}
|
| 497 |
+
)
|
| 498 |
+
elif "start_time" in segment and "duration" in segment:
|
| 499 |
+
duration = segment["duration"]
|
| 500 |
+
segment_data.update(
|
| 501 |
+
{
|
| 502 |
+
"start_time": segment["start_time"],
|
| 503 |
+
"end_time": segment["start_time"] + timedelta(seconds=duration),
|
| 504 |
+
"extinf": duration,
|
| 505 |
+
"program_date_time": segment["start_time"].isoformat() + "Z",
|
| 506 |
+
}
|
| 507 |
+
)
|
| 508 |
+
elif "duration" in segment:
|
| 509 |
+
segment_data["extinf"] = segment["duration"]
|
| 510 |
+
|
| 511 |
+
return segment_data
|
| 512 |
+
|
| 513 |
+
|
| 514 |
+
def parse_segment_base(representation: dict, source: str) -> List[Dict]:
|
| 515 |
+
"""
|
| 516 |
+
Parses segment base information and extracts segment data. This is used for single-segment representations.
|
| 517 |
+
|
| 518 |
+
Args:
|
| 519 |
+
representation (dict): The representation data.
|
| 520 |
+
source (str): The source URL.
|
| 521 |
+
|
| 522 |
+
Returns:
|
| 523 |
+
List[Dict]: The list of parsed segments.
|
| 524 |
+
"""
|
| 525 |
+
segment = representation["SegmentBase"]
|
| 526 |
+
start, end = map(int, segment["@indexRange"].split("-"))
|
| 527 |
+
if "Initialization" in segment:
|
| 528 |
+
start, _ = map(int, segment["Initialization"]["@range"].split("-"))
|
| 529 |
+
|
| 530 |
+
return [
|
| 531 |
+
{
|
| 532 |
+
"type": "segment",
|
| 533 |
+
"range": f"{start}-{end}",
|
| 534 |
+
"media": f"{source}/{representation['BaseURL']}",
|
| 535 |
+
}
|
| 536 |
+
]
|
| 537 |
+
|
| 538 |
+
|
| 539 |
+
def parse_duration(duration_str: str) -> float:
|
| 540 |
+
"""
|
| 541 |
+
Parses a duration ISO 8601 string into seconds.
|
| 542 |
+
|
| 543 |
+
Args:
|
| 544 |
+
duration_str (str): The duration string to parse.
|
| 545 |
+
|
| 546 |
+
Returns:
|
| 547 |
+
float: The parsed duration in seconds.
|
| 548 |
+
"""
|
| 549 |
+
pattern = re.compile(r"P(?:(\d+)Y)?(?:(\d+)M)?(?:(\d+)D)?T?(?:(\d+)H)?(?:(\d+)M)?(?:(\d+(?:\.\d+)?)S)?")
|
| 550 |
+
match = pattern.match(duration_str)
|
| 551 |
+
if not match:
|
| 552 |
+
raise ValueError(f"Invalid duration format: {duration_str}")
|
| 553 |
+
|
| 554 |
+
years, months, days, hours, minutes, seconds = [float(g) if g else 0 for g in match.groups()]
|
| 555 |
+
return years * 365 * 24 * 3600 + months * 30 * 24 * 3600 + days * 24 * 3600 + hours * 3600 + minutes * 60 + seconds
|
poetry.lock
ADDED
|
@@ -0,0 +1,578 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand.
|
| 2 |
+
|
| 3 |
+
[[package]]
|
| 4 |
+
name = "annotated-types"
|
| 5 |
+
version = "0.7.0"
|
| 6 |
+
description = "Reusable constraint types to use with typing.Annotated"
|
| 7 |
+
optional = false
|
| 8 |
+
python-versions = ">=3.8"
|
| 9 |
+
files = [
|
| 10 |
+
{file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"},
|
| 11 |
+
{file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
|
| 12 |
+
]
|
| 13 |
+
|
| 14 |
+
[[package]]
|
| 15 |
+
name = "anyio"
|
| 16 |
+
version = "4.4.0"
|
| 17 |
+
description = "High level compatibility layer for multiple asynchronous event loop implementations"
|
| 18 |
+
optional = false
|
| 19 |
+
python-versions = ">=3.8"
|
| 20 |
+
files = [
|
| 21 |
+
{file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"},
|
| 22 |
+
{file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"},
|
| 23 |
+
]
|
| 24 |
+
|
| 25 |
+
[package.dependencies]
|
| 26 |
+
idna = ">=2.8"
|
| 27 |
+
sniffio = ">=1.1"
|
| 28 |
+
|
| 29 |
+
[package.extras]
|
| 30 |
+
doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
|
| 31 |
+
test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"]
|
| 32 |
+
trio = ["trio (>=0.23)"]
|
| 33 |
+
|
| 34 |
+
[[package]]
|
| 35 |
+
name = "black"
|
| 36 |
+
version = "24.8.0"
|
| 37 |
+
description = "The uncompromising code formatter."
|
| 38 |
+
optional = false
|
| 39 |
+
python-versions = ">=3.8"
|
| 40 |
+
files = [
|
| 41 |
+
{file = "black-24.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:09cdeb74d494ec023ded657f7092ba518e8cf78fa8386155e4a03fdcc44679e6"},
|
| 42 |
+
{file = "black-24.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:81c6742da39f33b08e791da38410f32e27d632260e599df7245cccee2064afeb"},
|
| 43 |
+
{file = "black-24.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:707a1ca89221bc8a1a64fb5e15ef39cd755633daa672a9db7498d1c19de66a42"},
|
| 44 |
+
{file = "black-24.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:d6417535d99c37cee4091a2f24eb2b6d5ec42b144d50f1f2e436d9fe1916fe1a"},
|
| 45 |
+
{file = "black-24.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fb6e2c0b86bbd43dee042e48059c9ad7830abd5c94b0bc518c0eeec57c3eddc1"},
|
| 46 |
+
{file = "black-24.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:837fd281f1908d0076844bc2b801ad2d369c78c45cf800cad7b61686051041af"},
|
| 47 |
+
{file = "black-24.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:62e8730977f0b77998029da7971fa896ceefa2c4c4933fcd593fa599ecbf97a4"},
|
| 48 |
+
{file = "black-24.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:72901b4913cbac8972ad911dc4098d5753704d1f3c56e44ae8dce99eecb0e3af"},
|
| 49 |
+
{file = "black-24.8.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:7c046c1d1eeb7aea9335da62472481d3bbf3fd986e093cffd35f4385c94ae368"},
|
| 50 |
+
{file = "black-24.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:649f6d84ccbae73ab767e206772cc2d7a393a001070a4c814a546afd0d423aed"},
|
| 51 |
+
{file = "black-24.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2b59b250fdba5f9a9cd9d0ece6e6d993d91ce877d121d161e4698af3eb9c1018"},
|
| 52 |
+
{file = "black-24.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:6e55d30d44bed36593c3163b9bc63bf58b3b30e4611e4d88a0c3c239930ed5b2"},
|
| 53 |
+
{file = "black-24.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:505289f17ceda596658ae81b61ebbe2d9b25aa78067035184ed0a9d855d18afd"},
|
| 54 |
+
{file = "black-24.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b19c9ad992c7883ad84c9b22aaa73562a16b819c1d8db7a1a1a49fb7ec13c7d2"},
|
| 55 |
+
{file = "black-24.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1f13f7f386f86f8121d76599114bb8c17b69d962137fc70efe56137727c7047e"},
|
| 56 |
+
{file = "black-24.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:f490dbd59680d809ca31efdae20e634f3fae27fba3ce0ba3208333b713bc3920"},
|
| 57 |
+
{file = "black-24.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eab4dd44ce80dea27dc69db40dab62d4ca96112f87996bca68cd75639aeb2e4c"},
|
| 58 |
+
{file = "black-24.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3c4285573d4897a7610054af5a890bde7c65cb466040c5f0c8b732812d7f0e5e"},
|
| 59 |
+
{file = "black-24.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e84e33b37be070ba135176c123ae52a51f82306def9f7d063ee302ecab2cf47"},
|
| 60 |
+
{file = "black-24.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:73bbf84ed136e45d451a260c6b73ed674652f90a2b3211d6a35e78054563a9bb"},
|
| 61 |
+
{file = "black-24.8.0-py3-none-any.whl", hash = "sha256:972085c618ee94f402da1af548a4f218c754ea7e5dc70acb168bfaca4c2542ed"},
|
| 62 |
+
{file = "black-24.8.0.tar.gz", hash = "sha256:2500945420b6784c38b9ee885af039f5e7471ef284ab03fa35ecdde4688cd83f"},
|
| 63 |
+
]
|
| 64 |
+
|
| 65 |
+
[package.dependencies]
|
| 66 |
+
click = ">=8.0.0"
|
| 67 |
+
mypy-extensions = ">=0.4.3"
|
| 68 |
+
packaging = ">=22.0"
|
| 69 |
+
pathspec = ">=0.9.0"
|
| 70 |
+
platformdirs = ">=2"
|
| 71 |
+
|
| 72 |
+
[package.extras]
|
| 73 |
+
colorama = ["colorama (>=0.4.3)"]
|
| 74 |
+
d = ["aiohttp (>=3.7.4)", "aiohttp (>=3.7.4,!=3.9.0)"]
|
| 75 |
+
jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"]
|
| 76 |
+
uvloop = ["uvloop (>=0.15.2)"]
|
| 77 |
+
|
| 78 |
+
[[package]]
|
| 79 |
+
name = "cachetools"
|
| 80 |
+
version = "5.5.0"
|
| 81 |
+
description = "Extensible memoizing collections and decorators"
|
| 82 |
+
optional = false
|
| 83 |
+
python-versions = ">=3.7"
|
| 84 |
+
files = [
|
| 85 |
+
{file = "cachetools-5.5.0-py3-none-any.whl", hash = "sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292"},
|
| 86 |
+
{file = "cachetools-5.5.0.tar.gz", hash = "sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a"},
|
| 87 |
+
]
|
| 88 |
+
|
| 89 |
+
[[package]]
|
| 90 |
+
name = "certifi"
|
| 91 |
+
version = "2024.7.4"
|
| 92 |
+
description = "Python package for providing Mozilla's CA Bundle."
|
| 93 |
+
optional = false
|
| 94 |
+
python-versions = ">=3.6"
|
| 95 |
+
files = [
|
| 96 |
+
{file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"},
|
| 97 |
+
{file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"},
|
| 98 |
+
]
|
| 99 |
+
|
| 100 |
+
[[package]]
|
| 101 |
+
name = "click"
|
| 102 |
+
version = "8.1.7"
|
| 103 |
+
description = "Composable command line interface toolkit"
|
| 104 |
+
optional = false
|
| 105 |
+
python-versions = ">=3.7"
|
| 106 |
+
files = [
|
| 107 |
+
{file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"},
|
| 108 |
+
{file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"},
|
| 109 |
+
]
|
| 110 |
+
|
| 111 |
+
[package.dependencies]
|
| 112 |
+
colorama = {version = "*", markers = "platform_system == \"Windows\""}
|
| 113 |
+
|
| 114 |
+
[[package]]
|
| 115 |
+
name = "colorama"
|
| 116 |
+
version = "0.4.6"
|
| 117 |
+
description = "Cross-platform colored terminal text."
|
| 118 |
+
optional = false
|
| 119 |
+
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
|
| 120 |
+
files = [
|
| 121 |
+
{file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
|
| 122 |
+
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
|
| 123 |
+
]
|
| 124 |
+
|
| 125 |
+
[[package]]
|
| 126 |
+
name = "fastapi"
|
| 127 |
+
version = "0.112.1"
|
| 128 |
+
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
|
| 129 |
+
optional = false
|
| 130 |
+
python-versions = ">=3.8"
|
| 131 |
+
files = [
|
| 132 |
+
{file = "fastapi-0.112.1-py3-none-any.whl", hash = "sha256:bcbd45817fc2a1cd5da09af66815b84ec0d3d634eb173d1ab468ae3103e183e4"},
|
| 133 |
+
{file = "fastapi-0.112.1.tar.gz", hash = "sha256:b2537146f8c23389a7faa8b03d0bd38d4986e6983874557d95eed2acc46448ef"},
|
| 134 |
+
]
|
| 135 |
+
|
| 136 |
+
[package.dependencies]
|
| 137 |
+
pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0 || >2.0.0,<2.0.1 || >2.0.1,<2.1.0 || >2.1.0,<3.0.0"
|
| 138 |
+
starlette = ">=0.37.2,<0.39.0"
|
| 139 |
+
typing-extensions = ">=4.8.0"
|
| 140 |
+
|
| 141 |
+
[package.extras]
|
| 142 |
+
all = ["email_validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.5)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.7)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"]
|
| 143 |
+
standard = ["email_validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.5)", "httpx (>=0.23.0)", "jinja2 (>=2.11.2)", "python-multipart (>=0.0.7)", "uvicorn[standard] (>=0.12.0)"]
|
| 144 |
+
|
| 145 |
+
[[package]]
|
| 146 |
+
name = "gunicorn"
|
| 147 |
+
version = "23.0.0"
|
| 148 |
+
description = "WSGI HTTP Server for UNIX"
|
| 149 |
+
optional = false
|
| 150 |
+
python-versions = ">=3.7"
|
| 151 |
+
files = [
|
| 152 |
+
{file = "gunicorn-23.0.0-py3-none-any.whl", hash = "sha256:ec400d38950de4dfd418cff8328b2c8faed0edb0d517d3394e457c317908ca4d"},
|
| 153 |
+
{file = "gunicorn-23.0.0.tar.gz", hash = "sha256:f014447a0101dc57e294f6c18ca6b40227a4c90e9bdb586042628030cba004ec"},
|
| 154 |
+
]
|
| 155 |
+
|
| 156 |
+
[package.dependencies]
|
| 157 |
+
packaging = "*"
|
| 158 |
+
|
| 159 |
+
[package.extras]
|
| 160 |
+
eventlet = ["eventlet (>=0.24.1,!=0.36.0)"]
|
| 161 |
+
gevent = ["gevent (>=1.4.0)"]
|
| 162 |
+
setproctitle = ["setproctitle"]
|
| 163 |
+
testing = ["coverage", "eventlet", "gevent", "pytest", "pytest-cov"]
|
| 164 |
+
tornado = ["tornado (>=0.2)"]
|
| 165 |
+
|
| 166 |
+
[[package]]
|
| 167 |
+
name = "h11"
|
| 168 |
+
version = "0.14.0"
|
| 169 |
+
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
|
| 170 |
+
optional = false
|
| 171 |
+
python-versions = ">=3.7"
|
| 172 |
+
files = [
|
| 173 |
+
{file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
|
| 174 |
+
{file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
|
| 175 |
+
]
|
| 176 |
+
|
| 177 |
+
[[package]]
|
| 178 |
+
name = "httpcore"
|
| 179 |
+
version = "1.0.5"
|
| 180 |
+
description = "A minimal low-level HTTP client."
|
| 181 |
+
optional = false
|
| 182 |
+
python-versions = ">=3.8"
|
| 183 |
+
files = [
|
| 184 |
+
{file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"},
|
| 185 |
+
{file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"},
|
| 186 |
+
]
|
| 187 |
+
|
| 188 |
+
[package.dependencies]
|
| 189 |
+
certifi = "*"
|
| 190 |
+
h11 = ">=0.13,<0.15"
|
| 191 |
+
|
| 192 |
+
[package.extras]
|
| 193 |
+
asyncio = ["anyio (>=4.0,<5.0)"]
|
| 194 |
+
http2 = ["h2 (>=3,<5)"]
|
| 195 |
+
socks = ["socksio (==1.*)"]
|
| 196 |
+
trio = ["trio (>=0.22.0,<0.26.0)"]
|
| 197 |
+
|
| 198 |
+
[[package]]
|
| 199 |
+
name = "httpx"
|
| 200 |
+
version = "0.27.0"
|
| 201 |
+
description = "The next generation HTTP client."
|
| 202 |
+
optional = false
|
| 203 |
+
python-versions = ">=3.8"
|
| 204 |
+
files = [
|
| 205 |
+
{file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"},
|
| 206 |
+
{file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"},
|
| 207 |
+
]
|
| 208 |
+
|
| 209 |
+
[package.dependencies]
|
| 210 |
+
anyio = "*"
|
| 211 |
+
certifi = "*"
|
| 212 |
+
httpcore = "==1.*"
|
| 213 |
+
idna = "*"
|
| 214 |
+
sniffio = "*"
|
| 215 |
+
socksio = {version = "==1.*", optional = true, markers = "extra == \"socks\""}
|
| 216 |
+
|
| 217 |
+
[package.extras]
|
| 218 |
+
brotli = ["brotli", "brotlicffi"]
|
| 219 |
+
cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
|
| 220 |
+
http2 = ["h2 (>=3,<5)"]
|
| 221 |
+
socks = ["socksio (==1.*)"]
|
| 222 |
+
|
| 223 |
+
[[package]]
|
| 224 |
+
name = "idna"
|
| 225 |
+
version = "3.8"
|
| 226 |
+
description = "Internationalized Domain Names in Applications (IDNA)"
|
| 227 |
+
optional = false
|
| 228 |
+
python-versions = ">=3.6"
|
| 229 |
+
files = [
|
| 230 |
+
{file = "idna-3.8-py3-none-any.whl", hash = "sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac"},
|
| 231 |
+
{file = "idna-3.8.tar.gz", hash = "sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603"},
|
| 232 |
+
]
|
| 233 |
+
|
| 234 |
+
[[package]]
|
| 235 |
+
name = "mypy-extensions"
|
| 236 |
+
version = "1.0.0"
|
| 237 |
+
description = "Type system extensions for programs checked with the mypy type checker."
|
| 238 |
+
optional = false
|
| 239 |
+
python-versions = ">=3.5"
|
| 240 |
+
files = [
|
| 241 |
+
{file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"},
|
| 242 |
+
{file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"},
|
| 243 |
+
]
|
| 244 |
+
|
| 245 |
+
[[package]]
|
| 246 |
+
name = "packaging"
|
| 247 |
+
version = "24.1"
|
| 248 |
+
description = "Core utilities for Python packages"
|
| 249 |
+
optional = false
|
| 250 |
+
python-versions = ">=3.8"
|
| 251 |
+
files = [
|
| 252 |
+
{file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"},
|
| 253 |
+
{file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"},
|
| 254 |
+
]
|
| 255 |
+
|
| 256 |
+
[[package]]
|
| 257 |
+
name = "pathspec"
|
| 258 |
+
version = "0.12.1"
|
| 259 |
+
description = "Utility library for gitignore style pattern matching of file paths."
|
| 260 |
+
optional = false
|
| 261 |
+
python-versions = ">=3.8"
|
| 262 |
+
files = [
|
| 263 |
+
{file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"},
|
| 264 |
+
{file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"},
|
| 265 |
+
]
|
| 266 |
+
|
| 267 |
+
[[package]]
|
| 268 |
+
name = "platformdirs"
|
| 269 |
+
version = "4.2.2"
|
| 270 |
+
description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`."
|
| 271 |
+
optional = false
|
| 272 |
+
python-versions = ">=3.8"
|
| 273 |
+
files = [
|
| 274 |
+
{file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"},
|
| 275 |
+
{file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"},
|
| 276 |
+
]
|
| 277 |
+
|
| 278 |
+
[package.extras]
|
| 279 |
+
docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"]
|
| 280 |
+
test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"]
|
| 281 |
+
type = ["mypy (>=1.8)"]
|
| 282 |
+
|
| 283 |
+
[[package]]
|
| 284 |
+
name = "pycryptodome"
|
| 285 |
+
version = "3.20.0"
|
| 286 |
+
description = "Cryptographic library for Python"
|
| 287 |
+
optional = false
|
| 288 |
+
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
|
| 289 |
+
files = [
|
| 290 |
+
{file = "pycryptodome-3.20.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:f0e6d631bae3f231d3634f91ae4da7a960f7ff87f2865b2d2b831af1dfb04e9a"},
|
| 291 |
+
{file = "pycryptodome-3.20.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:baee115a9ba6c5d2709a1e88ffe62b73ecc044852a925dcb67713a288c4ec70f"},
|
| 292 |
+
{file = "pycryptodome-3.20.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:417a276aaa9cb3be91f9014e9d18d10e840a7a9b9a9be64a42f553c5b50b4d1d"},
|
| 293 |
+
{file = "pycryptodome-3.20.0-cp27-cp27m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a1250b7ea809f752b68e3e6f3fd946b5939a52eaeea18c73bdab53e9ba3c2dd"},
|
| 294 |
+
{file = "pycryptodome-3.20.0-cp27-cp27m-musllinux_1_1_aarch64.whl", hash = "sha256:d5954acfe9e00bc83ed9f5cb082ed22c592fbbef86dc48b907238be64ead5c33"},
|
| 295 |
+
{file = "pycryptodome-3.20.0-cp27-cp27m-win32.whl", hash = "sha256:06d6de87c19f967f03b4cf9b34e538ef46e99a337e9a61a77dbe44b2cbcf0690"},
|
| 296 |
+
{file = "pycryptodome-3.20.0-cp27-cp27m-win_amd64.whl", hash = "sha256:ec0bb1188c1d13426039af8ffcb4dbe3aad1d7680c35a62d8eaf2a529b5d3d4f"},
|
| 297 |
+
{file = "pycryptodome-3.20.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:5601c934c498cd267640b57569e73793cb9a83506f7c73a8ec57a516f5b0b091"},
|
| 298 |
+
{file = "pycryptodome-3.20.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:d29daa681517f4bc318cd8a23af87e1f2a7bad2fe361e8aa29c77d652a065de4"},
|
| 299 |
+
{file = "pycryptodome-3.20.0-cp27-cp27mu-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3427d9e5310af6680678f4cce149f54e0bb4af60101c7f2c16fdf878b39ccccc"},
|
| 300 |
+
{file = "pycryptodome-3.20.0-cp27-cp27mu-musllinux_1_1_aarch64.whl", hash = "sha256:3cd3ef3aee1079ae44afaeee13393cf68b1058f70576b11439483e34f93cf818"},
|
| 301 |
+
{file = "pycryptodome-3.20.0-cp35-abi3-macosx_10_9_universal2.whl", hash = "sha256:ac1c7c0624a862f2e53438a15c9259d1655325fc2ec4392e66dc46cdae24d044"},
|
| 302 |
+
{file = "pycryptodome-3.20.0-cp35-abi3-macosx_10_9_x86_64.whl", hash = "sha256:76658f0d942051d12a9bd08ca1b6b34fd762a8ee4240984f7c06ddfb55eaf15a"},
|
| 303 |
+
{file = "pycryptodome-3.20.0-cp35-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f35d6cee81fa145333137009d9c8ba90951d7d77b67c79cbe5f03c7eb74d8fe2"},
|
| 304 |
+
{file = "pycryptodome-3.20.0-cp35-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76cb39afede7055127e35a444c1c041d2e8d2f1f9c121ecef573757ba4cd2c3c"},
|
| 305 |
+
{file = "pycryptodome-3.20.0-cp35-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49a4c4dc60b78ec41d2afa392491d788c2e06edf48580fbfb0dd0f828af49d25"},
|
| 306 |
+
{file = "pycryptodome-3.20.0-cp35-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:fb3b87461fa35afa19c971b0a2b7456a7b1db7b4eba9a8424666104925b78128"},
|
| 307 |
+
{file = "pycryptodome-3.20.0-cp35-abi3-musllinux_1_1_i686.whl", hash = "sha256:acc2614e2e5346a4a4eab6e199203034924313626f9620b7b4b38e9ad74b7e0c"},
|
| 308 |
+
{file = "pycryptodome-3.20.0-cp35-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:210ba1b647837bfc42dd5a813cdecb5b86193ae11a3f5d972b9a0ae2c7e9e4b4"},
|
| 309 |
+
{file = "pycryptodome-3.20.0-cp35-abi3-win32.whl", hash = "sha256:8d6b98d0d83d21fb757a182d52940d028564efe8147baa9ce0f38d057104ae72"},
|
| 310 |
+
{file = "pycryptodome-3.20.0-cp35-abi3-win_amd64.whl", hash = "sha256:9b3ae153c89a480a0ec402e23db8d8d84a3833b65fa4b15b81b83be9d637aab9"},
|
| 311 |
+
{file = "pycryptodome-3.20.0-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:4401564ebf37dfde45d096974c7a159b52eeabd9969135f0426907db367a652a"},
|
| 312 |
+
{file = "pycryptodome-3.20.0-pp27-pypy_73-win32.whl", hash = "sha256:ec1f93feb3bb93380ab0ebf8b859e8e5678c0f010d2d78367cf6bc30bfeb148e"},
|
| 313 |
+
{file = "pycryptodome-3.20.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:acae12b9ede49f38eb0ef76fdec2df2e94aad85ae46ec85be3648a57f0a7db04"},
|
| 314 |
+
{file = "pycryptodome-3.20.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f47888542a0633baff535a04726948e876bf1ed880fddb7c10a736fa99146ab3"},
|
| 315 |
+
{file = "pycryptodome-3.20.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e0e4a987d38cfc2e71b4a1b591bae4891eeabe5fa0f56154f576e26287bfdea"},
|
| 316 |
+
{file = "pycryptodome-3.20.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:c18b381553638414b38705f07d1ef0a7cf301bc78a5f9bc17a957eb19446834b"},
|
| 317 |
+
{file = "pycryptodome-3.20.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a60fedd2b37b4cb11ccb5d0399efe26db9e0dd149016c1cc6c8161974ceac2d6"},
|
| 318 |
+
{file = "pycryptodome-3.20.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:405002eafad114a2f9a930f5db65feef7b53c4784495dd8758069b89baf68eab"},
|
| 319 |
+
{file = "pycryptodome-3.20.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2ab6ab0cb755154ad14e507d1df72de9897e99fd2d4922851a276ccc14f4f1a5"},
|
| 320 |
+
{file = "pycryptodome-3.20.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:acf6e43fa75aca2d33e93409f2dafe386fe051818ee79ee8a3e21de9caa2ac9e"},
|
| 321 |
+
{file = "pycryptodome-3.20.0.tar.gz", hash = "sha256:09609209ed7de61c2b560cc5c8c4fbf892f8b15b1faf7e4cbffac97db1fffda7"},
|
| 322 |
+
]
|
| 323 |
+
|
| 324 |
+
[[package]]
|
| 325 |
+
name = "pydantic"
|
| 326 |
+
version = "2.8.2"
|
| 327 |
+
description = "Data validation using Python type hints"
|
| 328 |
+
optional = false
|
| 329 |
+
python-versions = ">=3.8"
|
| 330 |
+
files = [
|
| 331 |
+
{file = "pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8"},
|
| 332 |
+
{file = "pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a"},
|
| 333 |
+
]
|
| 334 |
+
|
| 335 |
+
[package.dependencies]
|
| 336 |
+
annotated-types = ">=0.4.0"
|
| 337 |
+
pydantic-core = "2.20.1"
|
| 338 |
+
typing-extensions = [
|
| 339 |
+
{version = ">=4.12.2", markers = "python_version >= \"3.13\""},
|
| 340 |
+
{version = ">=4.6.1", markers = "python_version < \"3.13\""},
|
| 341 |
+
]
|
| 342 |
+
|
| 343 |
+
[package.extras]
|
| 344 |
+
email = ["email-validator (>=2.0.0)"]
|
| 345 |
+
|
| 346 |
+
[[package]]
|
| 347 |
+
name = "pydantic-core"
|
| 348 |
+
version = "2.20.1"
|
| 349 |
+
description = "Core functionality for Pydantic validation and serialization"
|
| 350 |
+
optional = false
|
| 351 |
+
python-versions = ">=3.8"
|
| 352 |
+
files = [
|
| 353 |
+
{file = "pydantic_core-2.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3"},
|
| 354 |
+
{file = "pydantic_core-2.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6"},
|
| 355 |
+
{file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a"},
|
| 356 |
+
{file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3"},
|
| 357 |
+
{file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1"},
|
| 358 |
+
{file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953"},
|
| 359 |
+
{file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98"},
|
| 360 |
+
{file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a"},
|
| 361 |
+
{file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a"},
|
| 362 |
+
{file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840"},
|
| 363 |
+
{file = "pydantic_core-2.20.1-cp310-none-win32.whl", hash = "sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250"},
|
| 364 |
+
{file = "pydantic_core-2.20.1-cp310-none-win_amd64.whl", hash = "sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c"},
|
| 365 |
+
{file = "pydantic_core-2.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312"},
|
| 366 |
+
{file = "pydantic_core-2.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88"},
|
| 367 |
+
{file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc"},
|
| 368 |
+
{file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43"},
|
| 369 |
+
{file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6"},
|
| 370 |
+
{file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121"},
|
| 371 |
+
{file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1"},
|
| 372 |
+
{file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b"},
|
| 373 |
+
{file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27"},
|
| 374 |
+
{file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b"},
|
| 375 |
+
{file = "pydantic_core-2.20.1-cp311-none-win32.whl", hash = "sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a"},
|
| 376 |
+
{file = "pydantic_core-2.20.1-cp311-none-win_amd64.whl", hash = "sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2"},
|
| 377 |
+
{file = "pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231"},
|
| 378 |
+
{file = "pydantic_core-2.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9"},
|
| 379 |
+
{file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f"},
|
| 380 |
+
{file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52"},
|
| 381 |
+
{file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237"},
|
| 382 |
+
{file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe"},
|
| 383 |
+
{file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e"},
|
| 384 |
+
{file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24"},
|
| 385 |
+
{file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1"},
|
| 386 |
+
{file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd"},
|
| 387 |
+
{file = "pydantic_core-2.20.1-cp312-none-win32.whl", hash = "sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688"},
|
| 388 |
+
{file = "pydantic_core-2.20.1-cp312-none-win_amd64.whl", hash = "sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d"},
|
| 389 |
+
{file = "pydantic_core-2.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686"},
|
| 390 |
+
{file = "pydantic_core-2.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a"},
|
| 391 |
+
{file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b"},
|
| 392 |
+
{file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19"},
|
| 393 |
+
{file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac"},
|
| 394 |
+
{file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703"},
|
| 395 |
+
{file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c"},
|
| 396 |
+
{file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83"},
|
| 397 |
+
{file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203"},
|
| 398 |
+
{file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0"},
|
| 399 |
+
{file = "pydantic_core-2.20.1-cp313-none-win32.whl", hash = "sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e"},
|
| 400 |
+
{file = "pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20"},
|
| 401 |
+
{file = "pydantic_core-2.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91"},
|
| 402 |
+
{file = "pydantic_core-2.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b"},
|
| 403 |
+
{file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a"},
|
| 404 |
+
{file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f"},
|
| 405 |
+
{file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad"},
|
| 406 |
+
{file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c"},
|
| 407 |
+
{file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598"},
|
| 408 |
+
{file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd"},
|
| 409 |
+
{file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa"},
|
| 410 |
+
{file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987"},
|
| 411 |
+
{file = "pydantic_core-2.20.1-cp38-none-win32.whl", hash = "sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a"},
|
| 412 |
+
{file = "pydantic_core-2.20.1-cp38-none-win_amd64.whl", hash = "sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434"},
|
| 413 |
+
{file = "pydantic_core-2.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c"},
|
| 414 |
+
{file = "pydantic_core-2.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6"},
|
| 415 |
+
{file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2"},
|
| 416 |
+
{file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a"},
|
| 417 |
+
{file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611"},
|
| 418 |
+
{file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b"},
|
| 419 |
+
{file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006"},
|
| 420 |
+
{file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1"},
|
| 421 |
+
{file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09"},
|
| 422 |
+
{file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab"},
|
| 423 |
+
{file = "pydantic_core-2.20.1-cp39-none-win32.whl", hash = "sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2"},
|
| 424 |
+
{file = "pydantic_core-2.20.1-cp39-none-win_amd64.whl", hash = "sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669"},
|
| 425 |
+
{file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906"},
|
| 426 |
+
{file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94"},
|
| 427 |
+
{file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f"},
|
| 428 |
+
{file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482"},
|
| 429 |
+
{file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6"},
|
| 430 |
+
{file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc"},
|
| 431 |
+
{file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99"},
|
| 432 |
+
{file = "pydantic_core-2.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6"},
|
| 433 |
+
{file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331"},
|
| 434 |
+
{file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad"},
|
| 435 |
+
{file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1"},
|
| 436 |
+
{file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86"},
|
| 437 |
+
{file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e"},
|
| 438 |
+
{file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0"},
|
| 439 |
+
{file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a"},
|
| 440 |
+
{file = "pydantic_core-2.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7"},
|
| 441 |
+
{file = "pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4"},
|
| 442 |
+
]
|
| 443 |
+
|
| 444 |
+
[package.dependencies]
|
| 445 |
+
typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
|
| 446 |
+
|
| 447 |
+
[[package]]
|
| 448 |
+
name = "pydantic-settings"
|
| 449 |
+
version = "2.4.0"
|
| 450 |
+
description = "Settings management using Pydantic"
|
| 451 |
+
optional = false
|
| 452 |
+
python-versions = ">=3.8"
|
| 453 |
+
files = [
|
| 454 |
+
{file = "pydantic_settings-2.4.0-py3-none-any.whl", hash = "sha256:bb6849dc067f1687574c12a639e231f3a6feeed0a12d710c1382045c5db1c315"},
|
| 455 |
+
{file = "pydantic_settings-2.4.0.tar.gz", hash = "sha256:ed81c3a0f46392b4d7c0a565c05884e6e54b3456e6f0fe4d8814981172dc9a88"},
|
| 456 |
+
]
|
| 457 |
+
|
| 458 |
+
[package.dependencies]
|
| 459 |
+
pydantic = ">=2.7.0"
|
| 460 |
+
python-dotenv = ">=0.21.0"
|
| 461 |
+
|
| 462 |
+
[package.extras]
|
| 463 |
+
azure-key-vault = ["azure-identity (>=1.16.0)", "azure-keyvault-secrets (>=4.8.0)"]
|
| 464 |
+
toml = ["tomli (>=2.0.1)"]
|
| 465 |
+
yaml = ["pyyaml (>=6.0.1)"]
|
| 466 |
+
|
| 467 |
+
[[package]]
|
| 468 |
+
name = "python-dotenv"
|
| 469 |
+
version = "1.0.1"
|
| 470 |
+
description = "Read key-value pairs from a .env file and set them as environment variables"
|
| 471 |
+
optional = false
|
| 472 |
+
python-versions = ">=3.8"
|
| 473 |
+
files = [
|
| 474 |
+
{file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"},
|
| 475 |
+
{file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"},
|
| 476 |
+
]
|
| 477 |
+
|
| 478 |
+
[package.extras]
|
| 479 |
+
cli = ["click (>=5.0)"]
|
| 480 |
+
|
| 481 |
+
[[package]]
|
| 482 |
+
name = "sniffio"
|
| 483 |
+
version = "1.3.1"
|
| 484 |
+
description = "Sniff out which async library your code is running under"
|
| 485 |
+
optional = false
|
| 486 |
+
python-versions = ">=3.7"
|
| 487 |
+
files = [
|
| 488 |
+
{file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"},
|
| 489 |
+
{file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
|
| 490 |
+
]
|
| 491 |
+
|
| 492 |
+
[[package]]
|
| 493 |
+
name = "socksio"
|
| 494 |
+
version = "1.0.0"
|
| 495 |
+
description = "Sans-I/O implementation of SOCKS4, SOCKS4A, and SOCKS5."
|
| 496 |
+
optional = false
|
| 497 |
+
python-versions = ">=3.6"
|
| 498 |
+
files = [
|
| 499 |
+
{file = "socksio-1.0.0-py3-none-any.whl", hash = "sha256:95dc1f15f9b34e8d7b16f06d74b8ccf48f609af32ab33c608d08761c5dcbb1f3"},
|
| 500 |
+
{file = "socksio-1.0.0.tar.gz", hash = "sha256:f88beb3da5b5c38b9890469de67d0cb0f9d494b78b106ca1845f96c10b91c4ac"},
|
| 501 |
+
]
|
| 502 |
+
|
| 503 |
+
[[package]]
|
| 504 |
+
name = "starlette"
|
| 505 |
+
version = "0.38.2"
|
| 506 |
+
description = "The little ASGI library that shines."
|
| 507 |
+
optional = false
|
| 508 |
+
python-versions = ">=3.8"
|
| 509 |
+
files = [
|
| 510 |
+
{file = "starlette-0.38.2-py3-none-any.whl", hash = "sha256:4ec6a59df6bbafdab5f567754481657f7ed90dc9d69b0c9ff017907dd54faeff"},
|
| 511 |
+
{file = "starlette-0.38.2.tar.gz", hash = "sha256:c7c0441065252160993a1a37cf2a73bb64d271b17303e0b0c1eb7191cfb12d75"},
|
| 512 |
+
]
|
| 513 |
+
|
| 514 |
+
[package.dependencies]
|
| 515 |
+
anyio = ">=3.4.0,<5"
|
| 516 |
+
|
| 517 |
+
[package.extras]
|
| 518 |
+
full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.7)", "pyyaml"]
|
| 519 |
+
|
| 520 |
+
[[package]]
|
| 521 |
+
name = "tenacity"
|
| 522 |
+
version = "9.0.0"
|
| 523 |
+
description = "Retry code until it succeeds"
|
| 524 |
+
optional = false
|
| 525 |
+
python-versions = ">=3.8"
|
| 526 |
+
files = [
|
| 527 |
+
{file = "tenacity-9.0.0-py3-none-any.whl", hash = "sha256:93de0c98785b27fcf659856aa9f54bfbd399e29969b0621bc7f762bd441b4539"},
|
| 528 |
+
{file = "tenacity-9.0.0.tar.gz", hash = "sha256:807f37ca97d62aa361264d497b0e31e92b8027044942bfa756160d908320d73b"},
|
| 529 |
+
]
|
| 530 |
+
|
| 531 |
+
[package.extras]
|
| 532 |
+
doc = ["reno", "sphinx"]
|
| 533 |
+
test = ["pytest", "tornado (>=4.5)", "typeguard"]
|
| 534 |
+
|
| 535 |
+
[[package]]
|
| 536 |
+
name = "typing-extensions"
|
| 537 |
+
version = "4.12.2"
|
| 538 |
+
description = "Backported and Experimental Type Hints for Python 3.8+"
|
| 539 |
+
optional = false
|
| 540 |
+
python-versions = ">=3.8"
|
| 541 |
+
files = [
|
| 542 |
+
{file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"},
|
| 543 |
+
{file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
|
| 544 |
+
]
|
| 545 |
+
|
| 546 |
+
[[package]]
|
| 547 |
+
name = "uvicorn"
|
| 548 |
+
version = "0.30.6"
|
| 549 |
+
description = "The lightning-fast ASGI server."
|
| 550 |
+
optional = false
|
| 551 |
+
python-versions = ">=3.8"
|
| 552 |
+
files = [
|
| 553 |
+
{file = "uvicorn-0.30.6-py3-none-any.whl", hash = "sha256:65fd46fe3fda5bdc1b03b94eb634923ff18cd35b2f084813ea79d1f103f711b5"},
|
| 554 |
+
{file = "uvicorn-0.30.6.tar.gz", hash = "sha256:4b15decdda1e72be08209e860a1e10e92439ad5b97cf44cc945fcbee66fc5788"},
|
| 555 |
+
]
|
| 556 |
+
|
| 557 |
+
[package.dependencies]
|
| 558 |
+
click = ">=7.0"
|
| 559 |
+
h11 = ">=0.8"
|
| 560 |
+
|
| 561 |
+
[package.extras]
|
| 562 |
+
standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"]
|
| 563 |
+
|
| 564 |
+
[[package]]
|
| 565 |
+
name = "xmltodict"
|
| 566 |
+
version = "0.13.0"
|
| 567 |
+
description = "Makes working with XML feel like you are working with JSON"
|
| 568 |
+
optional = false
|
| 569 |
+
python-versions = ">=3.4"
|
| 570 |
+
files = [
|
| 571 |
+
{file = "xmltodict-0.13.0-py2.py3-none-any.whl", hash = "sha256:aa89e8fd76320154a40d19a0df04a4695fb9dc5ba977cbb68ab3e4eb225e7852"},
|
| 572 |
+
{file = "xmltodict-0.13.0.tar.gz", hash = "sha256:341595a488e3e01a85a9d8911d8912fd922ede5fecc4dce437eb4b6c8d037e56"},
|
| 573 |
+
]
|
| 574 |
+
|
| 575 |
+
[metadata]
|
| 576 |
+
lock-version = "2.0"
|
| 577 |
+
python-versions = "^3.12"
|
| 578 |
+
content-hash = "8cfbb5ac5e9e2098578646c06fbc895ae04531d66c9985635f06ee2b787e3c75"
|
pyproject.toml
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[tool.poetry]
|
| 2 |
+
name = "mediaflow proxy"
|
| 3 |
+
version = "1.0.0"
|
| 4 |
+
description = "A high-performance proxy server for streaming media, supporting HTTP(S), HLS, and MPEG-DASH with real-time DRM decryption."
|
| 5 |
+
authors = ["mhdzumair <mhdzumair@gmail.com>"]
|
| 6 |
+
readme = "README.md"
|
| 7 |
+
|
| 8 |
+
[tool.poetry.dependencies]
|
| 9 |
+
python = "^3.12"
|
| 10 |
+
fastapi = "^0.112.0"
|
| 11 |
+
httpx = {extras = ["socks"], version = "^0.27.0"}
|
| 12 |
+
tenacity = "^9.0.0"
|
| 13 |
+
xmltodict = "^0.13.0"
|
| 14 |
+
cachetools = "^5.4.0"
|
| 15 |
+
pydantic-settings = "^2.4.0"
|
| 16 |
+
gunicorn = "^23.0.0"
|
| 17 |
+
pycryptodome = "^3.20.0"
|
| 18 |
+
uvicorn = "^0.30.6"
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
[tool.poetry.group.dev.dependencies]
|
| 22 |
+
black = "^24.8.0"
|
| 23 |
+
|
| 24 |
+
[build-system]
|
| 25 |
+
requires = ["poetry-core"]
|
| 26 |
+
build-backend = "poetry.core.masonry.api"
|
| 27 |
+
|
| 28 |
+
[tool.black]
|
| 29 |
+
line-length = 120
|
static/index.html
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="UTF-8">
|
| 5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 6 |
+
<title>MediaFlow Proxy</title>
|
| 7 |
+
<link rel="icon" href="/logo.png" type="image/x-icon">
|
| 8 |
+
<style>
|
| 9 |
+
body {
|
| 10 |
+
font-family: Arial, sans-serif;
|
| 11 |
+
line-height: 1.6;
|
| 12 |
+
color: #333;
|
| 13 |
+
max-width: 800px;
|
| 14 |
+
margin: 0 auto;
|
| 15 |
+
padding: 20px;
|
| 16 |
+
background-color: #f9f9f9;
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
header {
|
| 20 |
+
background-color: #90aacc;
|
| 21 |
+
color: #fff;
|
| 22 |
+
padding: 10px 0;
|
| 23 |
+
text-align: center;
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
header img {
|
| 27 |
+
width: 200px;
|
| 28 |
+
height: 200px;
|
| 29 |
+
vertical-align: middle;
|
| 30 |
+
border-radius: 15px;
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
header h1 {
|
| 34 |
+
display: inline;
|
| 35 |
+
margin-left: 20px;
|
| 36 |
+
font-size: 36px;
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
.feature {
|
| 40 |
+
background-color: #f4f4f4;
|
| 41 |
+
border-left: 4px solid #3498db;
|
| 42 |
+
padding: 10px;
|
| 43 |
+
margin-bottom: 10px;
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
a {
|
| 47 |
+
color: #3498db;
|
| 48 |
+
}
|
| 49 |
+
</style>
|
| 50 |
+
</head>
|
| 51 |
+
<body>
|
| 52 |
+
<header>
|
| 53 |
+
<img src="/logo.png" alt="MediaFlow Proxy Logo">
|
| 54 |
+
<h1>MediaFlow Proxy</h1>
|
| 55 |
+
</header>
|
| 56 |
+
<p>A high-performance proxy server for streaming media, supporting HTTP(S), HLS, and MPEG-DASH with real-time DRM decryption.</p>
|
| 57 |
+
|
| 58 |
+
<h2>Key Features</h2>
|
| 59 |
+
<div class="feature">Convert MPEG-DASH streams (DRM-protected and non-protected) to HLS</div>
|
| 60 |
+
<div class="feature">Support for Clear Key DRM-protected MPD DASH streams</div>
|
| 61 |
+
<div class="feature">Handle both live and video-on-demand (VOD) DASH streams</div>
|
| 62 |
+
<div class="feature">Proxy HTTP/HTTPS links with custom headers</div>
|
| 63 |
+
<div class="feature">Proxy and modify HLS (M3U8) streams in real-time with custom headers and key URL modifications for bypassing some sneaky restrictions.</div>
|
| 64 |
+
<div class="feature">Protect against unauthorized access and network bandwidth abuses</div>
|
| 65 |
+
|
| 66 |
+
<h2>Getting Started</h2>
|
| 67 |
+
<p>Visit the <a href="https://github.com/mhdzumair/mediaflow-proxy">GitHub repository</a> for installation instructions and documentation.</p>
|
| 68 |
+
|
| 69 |
+
<h2>Premium Hosted Service</h2>
|
| 70 |
+
<p>For a hassle-free experience, check out <a href="https://store.elfhosted.com/product/mediaflow-proxy">premium hosted service on ElfHosted</a>.</p>
|
| 71 |
+
|
| 72 |
+
<h2>API Documentation</h2>
|
| 73 |
+
<p>Explore the <a href="/docs">Swagger UI</a> for comprehensive details about the API endpoints and their usage.</p>
|
| 74 |
+
|
| 75 |
+
</body>
|
| 76 |
+
</html>
|
static/logo.png
ADDED
|