repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_pot/test_pot_director.py
test/test_pot/test_pot_director.py
from __future__ import annotations import abc import base64 import dataclasses import hashlib import json import time import pytest from yt_dlp.extractor.youtube.pot._provider import BuiltinIEContentProvider, IEContentProvider from yt_dlp.extractor.youtube.pot.provider import ( PoTokenRequest, PoTokenContext, PoTokenProviderError, PoTokenProviderRejectedRequest, ) from yt_dlp.extractor.youtube.pot._director import ( PoTokenCache, validate_cache_spec, clean_pot, validate_response, PoTokenRequestDirector, provider_display_list, ) from yt_dlp.extractor.youtube.pot.cache import ( PoTokenCacheSpec, PoTokenCacheSpecProvider, PoTokenCacheProvider, CacheProviderWritePolicy, PoTokenCacheProviderError, ) from yt_dlp.extractor.youtube.pot.provider import ( PoTokenResponse, PoTokenProvider, ) class BaseMockPoTokenProvider(PoTokenProvider, abc.ABC): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.available_called_times = 0 self.request_called_times = 0 self.close_called = False def is_available(self) -> bool: self.available_called_times += 1 return True def request_pot(self, *args, **kwargs): self.request_called_times += 1 return super().request_pot(*args, **kwargs) def close(self): self.close_called = True super().close() class ExamplePTP(BaseMockPoTokenProvider): PROVIDER_NAME = 'example' PROVIDER_VERSION = '0.0.1' BUG_REPORT_LOCATION = 'https://example.com/issues' _SUPPORTED_CLIENTS = ('WEB',) _SUPPORTED_CONTEXTS = (PoTokenContext.GVS, ) def _real_request_pot(self, request: PoTokenRequest) -> PoTokenResponse: if request.data_sync_id == 'example': return PoTokenResponse(request.video_id) return PoTokenResponse(EXAMPLE_PO_TOKEN) def success_ptp(response: PoTokenResponse | None = None, key: str | None = None): class SuccessPTP(BaseMockPoTokenProvider): PROVIDER_NAME = 'success' PROVIDER_VERSION = '0.0.1' BUG_REPORT_LOCATION = 'https://success.example.com/issues' _SUPPORTED_CLIENTS = ('WEB',) _SUPPORTED_CONTEXTS = (PoTokenContext.GVS,) def _real_request_pot(self, request: PoTokenRequest) -> PoTokenResponse: return response or PoTokenResponse(EXAMPLE_PO_TOKEN) if key: SuccessPTP.PROVIDER_KEY = key return SuccessPTP @pytest.fixture def pot_provider(ie, logger): return success_ptp()(ie=ie, logger=logger, settings={}) class UnavailablePTP(BaseMockPoTokenProvider): PROVIDER_NAME = 'unavailable' BUG_REPORT_LOCATION = 'https://unavailable.example.com/issues' _SUPPORTED_CLIENTS = None _SUPPORTED_CONTEXTS = None def is_available(self) -> bool: super().is_available() return False def _real_request_pot(self, request: PoTokenRequest) -> PoTokenResponse: raise PoTokenProviderError('something went wrong') class UnsupportedPTP(BaseMockPoTokenProvider): PROVIDER_NAME = 'unsupported' BUG_REPORT_LOCATION = 'https://unsupported.example.com/issues' _SUPPORTED_CLIENTS = None _SUPPORTED_CONTEXTS = None def _real_request_pot(self, request: PoTokenRequest) -> PoTokenResponse: raise PoTokenProviderRejectedRequest('unsupported request') class ErrorPTP(BaseMockPoTokenProvider): PROVIDER_NAME = 'error' BUG_REPORT_LOCATION = 'https://error.example.com/issues' _SUPPORTED_CLIENTS = None _SUPPORTED_CONTEXTS = None def _real_request_pot(self, request: PoTokenRequest) -> PoTokenResponse: expected = request.video_id == 'expected' raise PoTokenProviderError('an error occurred', expected=expected) class UnexpectedErrorPTP(BaseMockPoTokenProvider): PROVIDER_NAME = 'unexpected_error' BUG_REPORT_LOCATION = 'https://unexpected.example.com/issues' _SUPPORTED_CLIENTS = None _SUPPORTED_CONTEXTS = None def _real_request_pot(self, request: PoTokenRequest) -> PoTokenResponse: raise ValueError('an unexpected error occurred') class InvalidPTP(BaseMockPoTokenProvider): PROVIDER_NAME = 'invalid' BUG_REPORT_LOCATION = 'https://invalid.example.com/issues' _SUPPORTED_CLIENTS = None _SUPPORTED_CONTEXTS = None def _real_request_pot(self, request: PoTokenRequest) -> PoTokenResponse: if request.video_id == 'invalid_type': return 'invalid-response' else: return PoTokenResponse('example-token?', expires_at='123') class BaseMockCacheSpecProvider(PoTokenCacheSpecProvider, abc.ABC): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.generate_called_times = 0 self.is_available_called_times = 0 self.close_called = False def is_available(self) -> bool: self.is_available_called_times += 1 return super().is_available() def generate_cache_spec(self, request: PoTokenRequest): self.generate_called_times += 1 def close(self): self.close_called = True super().close() class ExampleCacheSpecProviderPCSP(BaseMockCacheSpecProvider): PROVIDER_NAME = 'example' PROVIDER_VERSION = '0.0.1' BUG_REPORT_LOCATION = 'https://example.com/issues' def generate_cache_spec(self, request: PoTokenRequest): super().generate_cache_spec(request) return PoTokenCacheSpec( key_bindings={'v': request.video_id, 'e': None}, default_ttl=60, ) class UnavailableCacheSpecProviderPCSP(BaseMockCacheSpecProvider): PROVIDER_NAME = 'unavailable' PROVIDER_VERSION = '0.0.1' def is_available(self) -> bool: super().is_available() return False def generate_cache_spec(self, request: PoTokenRequest): super().generate_cache_spec(request) return None class UnsupportedCacheSpecProviderPCSP(BaseMockCacheSpecProvider): PROVIDER_NAME = 'unsupported' PROVIDER_VERSION = '0.0.1' def generate_cache_spec(self, request: PoTokenRequest): super().generate_cache_spec(request) return None class InvalidSpecCacheSpecProviderPCSP(BaseMockCacheSpecProvider): PROVIDER_NAME = 'invalid' PROVIDER_VERSION = '0.0.1' def generate_cache_spec(self, request: PoTokenRequest): super().generate_cache_spec(request) return 'invalid-spec' class ErrorSpecCacheSpecProviderPCSP(BaseMockCacheSpecProvider): PROVIDER_NAME = 'invalid' PROVIDER_VERSION = '0.0.1' def generate_cache_spec(self, request: PoTokenRequest): super().generate_cache_spec(request) raise ValueError('something went wrong') class BaseMockCacheProvider(PoTokenCacheProvider, abc.ABC): BUG_REPORT_MESSAGE = 'example bug report message' def __init__(self, *args, available=True, **kwargs): super().__init__(*args, **kwargs) self.store_calls = 0 self.delete_calls = 0 self.get_calls = 0 self.available_called_times = 0 self.available = available def is_available(self) -> bool: self.available_called_times += 1 return self.available def store(self, *args, **kwargs): self.store_calls += 1 def delete(self, *args, **kwargs): self.delete_calls += 1 def get(self, *args, **kwargs): self.get_calls += 1 def close(self): self.close_called = True super().close() class ErrorPCP(BaseMockCacheProvider): PROVIDER_NAME = 'error' def store(self, *args, **kwargs): super().store(*args, **kwargs) raise PoTokenCacheProviderError('something went wrong') def get(self, *args, **kwargs): super().get(*args, **kwargs) raise PoTokenCacheProviderError('something went wrong') class UnexpectedErrorPCP(BaseMockCacheProvider): PROVIDER_NAME = 'unexpected_error' def store(self, *args, **kwargs): super().store(*args, **kwargs) raise ValueError('something went wrong') def get(self, *args, **kwargs): super().get(*args, **kwargs) raise ValueError('something went wrong') class MockMemoryPCP(BaseMockCacheProvider): PROVIDER_NAME = 'memory' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.cache = {} def store(self, key, value, expires_at): super().store(key, value, expires_at) self.cache[key] = (value, expires_at) def delete(self, key): super().delete(key) self.cache.pop(key, None) def get(self, key): super().get(key) return self.cache.get(key, [None])[0] def create_memory_pcp(ie, logger, provider_key='memory', provider_name='memory', available=True): cache = MockMemoryPCP(ie, logger, {}, available=available) cache.PROVIDER_KEY = provider_key cache.PROVIDER_NAME = provider_name return cache @pytest.fixture def memorypcp(ie, logger) -> MockMemoryPCP: return create_memory_pcp(ie, logger) @pytest.fixture def pot_cache(ie, logger): class MockPoTokenCache(PoTokenCache): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.get_calls = 0 self.store_calls = 0 self.close_called = False def get(self, *args, **kwargs): self.get_calls += 1 return super().get(*args, **kwargs) def store(self, *args, **kwargs): self.store_calls += 1 return super().store(*args, **kwargs) def close(self): self.close_called = True super().close() return MockPoTokenCache( cache_providers=[MockMemoryPCP(ie, logger, {})], cache_spec_providers=[ExampleCacheSpecProviderPCSP(ie, logger, settings={})], logger=logger, ) EXAMPLE_PO_TOKEN = base64.urlsafe_b64encode(b'example-token').decode() class TestPoTokenCache: def test_cache_success(self, memorypcp, pot_request, ie, logger): cache = PoTokenCache( cache_providers=[memorypcp], cache_spec_providers=[ExampleCacheSpecProviderPCSP(ie=ie, logger=logger, settings={})], logger=logger, ) response = PoTokenResponse(EXAMPLE_PO_TOKEN) assert cache.get(pot_request) is None cache.store(pot_request, response) cached_response = cache.get(pot_request) assert cached_response is not None assert cached_response.po_token == EXAMPLE_PO_TOKEN assert cached_response.expires_at is not None assert cache.get(dataclasses.replace(pot_request, video_id='another-video-id')) is None def test_unsupported_cache_spec_no_fallback(self, memorypcp, pot_request, ie, logger): unsupported_provider = UnsupportedCacheSpecProviderPCSP(ie=ie, logger=logger, settings={}) cache = PoTokenCache( cache_providers=[memorypcp], cache_spec_providers=[unsupported_provider], logger=logger, ) response = PoTokenResponse(EXAMPLE_PO_TOKEN) assert cache.get(pot_request) is None assert unsupported_provider.generate_called_times == 1 cache.store(pot_request, response) assert len(memorypcp.cache) == 0 assert unsupported_provider.generate_called_times == 2 assert cache.get(pot_request) is None assert unsupported_provider.generate_called_times == 3 assert len(logger.messages.get('error', [])) == 0 def test_unsupported_cache_spec_fallback(self, memorypcp, pot_request, ie, logger): unsupported_provider = UnsupportedCacheSpecProviderPCSP(ie=ie, logger=logger, settings={}) example_provider = ExampleCacheSpecProviderPCSP(ie=ie, logger=logger, settings={}) cache = PoTokenCache( cache_providers=[memorypcp], cache_spec_providers=[unsupported_provider, example_provider], logger=logger, ) response = PoTokenResponse(EXAMPLE_PO_TOKEN) assert cache.get(pot_request) is None assert unsupported_provider.generate_called_times == 1 assert example_provider.generate_called_times == 1 cache.store(pot_request, response) assert unsupported_provider.generate_called_times == 2 assert example_provider.generate_called_times == 2 cached_response = cache.get(pot_request) assert unsupported_provider.generate_called_times == 3 assert example_provider.generate_called_times == 3 assert cached_response is not None assert cached_response.po_token == EXAMPLE_PO_TOKEN assert cached_response.expires_at is not None assert len(logger.messages.get('error', [])) == 0 def test_invalid_cache_spec_no_fallback(self, memorypcp, pot_request, ie, logger): cache = PoTokenCache( cache_providers=[memorypcp], cache_spec_providers=[InvalidSpecCacheSpecProviderPCSP(ie=ie, logger=logger, settings={})], logger=logger, ) response = PoTokenResponse(EXAMPLE_PO_TOKEN) assert cache.get(pot_request) is None cache.store(pot_request, response) assert cache.get(pot_request) is None assert 'PoTokenCacheSpecProvider "InvalidSpecCacheSpecProvider" generate_cache_spec() returned invalid spec invalid-spec; please report this issue to the provider developer at (developer has not provided a bug report location) .' in logger.messages['error'] def test_invalid_cache_spec_fallback(self, memorypcp, pot_request, ie, logger): invalid_provider = InvalidSpecCacheSpecProviderPCSP(ie=ie, logger=logger, settings={}) example_provider = ExampleCacheSpecProviderPCSP(ie=ie, logger=logger, settings={}) cache = PoTokenCache( cache_providers=[memorypcp], cache_spec_providers=[invalid_provider, example_provider], logger=logger, ) response = PoTokenResponse(EXAMPLE_PO_TOKEN) assert cache.get(pot_request) is None assert invalid_provider.generate_called_times == example_provider.generate_called_times == 1 cache.store(pot_request, response) assert invalid_provider.generate_called_times == example_provider.generate_called_times == 2 cached_response = cache.get(pot_request) assert invalid_provider.generate_called_times == example_provider.generate_called_times == 3 assert cached_response is not None assert cached_response.po_token == EXAMPLE_PO_TOKEN assert cached_response.expires_at is not None assert 'PoTokenCacheSpecProvider "InvalidSpecCacheSpecProvider" generate_cache_spec() returned invalid spec invalid-spec; please report this issue to the provider developer at (developer has not provided a bug report location) .' in logger.messages['error'] def test_unavailable_cache_spec_no_fallback(self, memorypcp, pot_request, ie, logger): unavailable_provider = UnavailableCacheSpecProviderPCSP(ie=ie, logger=logger, settings={}) cache = PoTokenCache( cache_providers=[memorypcp], cache_spec_providers=[unavailable_provider], logger=logger, ) response = PoTokenResponse(EXAMPLE_PO_TOKEN) assert cache.get(pot_request) is None cache.store(pot_request, response) assert cache.get(pot_request) is None assert unavailable_provider.generate_called_times == 0 def test_unavailable_cache_spec_fallback(self, memorypcp, pot_request, ie, logger): unavailable_provider = UnavailableCacheSpecProviderPCSP(ie=ie, logger=logger, settings={}) example_provider = ExampleCacheSpecProviderPCSP(ie=ie, logger=logger, settings={}) cache = PoTokenCache( cache_providers=[memorypcp], cache_spec_providers=[unavailable_provider, example_provider], logger=logger, ) response = PoTokenResponse(EXAMPLE_PO_TOKEN) assert cache.get(pot_request) is None assert unavailable_provider.generate_called_times == 0 assert unavailable_provider.is_available_called_times == 1 assert example_provider.generate_called_times == 1 cache.store(pot_request, response) assert unavailable_provider.generate_called_times == 0 assert unavailable_provider.is_available_called_times == 2 assert example_provider.generate_called_times == 2 cached_response = cache.get(pot_request) assert unavailable_provider.generate_called_times == 0 assert unavailable_provider.is_available_called_times == 3 assert example_provider.generate_called_times == 3 assert example_provider.is_available_called_times == 3 assert cached_response is not None assert cached_response.po_token == EXAMPLE_PO_TOKEN assert cached_response.expires_at is not None def test_unexpected_error_cache_spec(self, memorypcp, pot_request, ie, logger): error_provider = ErrorSpecCacheSpecProviderPCSP(ie=ie, logger=logger, settings={}) cache = PoTokenCache( cache_providers=[memorypcp], cache_spec_providers=[error_provider], logger=logger, ) response = PoTokenResponse(EXAMPLE_PO_TOKEN) assert cache.get(pot_request) is None cache.store(pot_request, response) assert cache.get(pot_request) is None assert error_provider.generate_called_times == 3 assert error_provider.is_available_called_times == 3 assert 'Error occurred with "invalid" PO Token cache spec provider: ValueError(\'something went wrong\'); please report this issue to the provider developer at (developer has not provided a bug report location) .' in logger.messages['error'] def test_unexpected_error_cache_spec_fallback(self, memorypcp, pot_request, ie, logger): error_provider = ErrorSpecCacheSpecProviderPCSP(ie=ie, logger=logger, settings={}) example_provider = ExampleCacheSpecProviderPCSP(ie=ie, logger=logger, settings={}) cache = PoTokenCache( cache_providers=[memorypcp], cache_spec_providers=[error_provider, example_provider], logger=logger, ) response = PoTokenResponse(EXAMPLE_PO_TOKEN) assert cache.get(pot_request) is None assert error_provider.generate_called_times == 1 assert error_provider.is_available_called_times == 1 assert example_provider.generate_called_times == 1 cache.store(pot_request, response) assert error_provider.generate_called_times == 2 assert error_provider.is_available_called_times == 2 assert example_provider.generate_called_times == 2 cached_response = cache.get(pot_request) assert error_provider.generate_called_times == 3 assert error_provider.is_available_called_times == 3 assert example_provider.generate_called_times == 3 assert example_provider.is_available_called_times == 3 assert cached_response is not None assert cached_response.po_token == EXAMPLE_PO_TOKEN assert cached_response.expires_at is not None assert 'Error occurred with "invalid" PO Token cache spec provider: ValueError(\'something went wrong\'); please report this issue to the provider developer at (developer has not provided a bug report location) .' in logger.messages['error'] def test_key_bindings_spec_provider(self, memorypcp, pot_request, ie, logger): class ExampleProviderPCSP(PoTokenCacheSpecProvider): PROVIDER_NAME = 'example' def generate_cache_spec(self, request: PoTokenRequest): return PoTokenCacheSpec( key_bindings={'v': request.video_id}, default_ttl=60, ) class ExampleProviderTwoPCSP(ExampleProviderPCSP): pass example_provider = ExampleProviderPCSP(ie=ie, logger=logger, settings={}) example_provider_two = ExampleProviderTwoPCSP(ie=ie, logger=logger, settings={}) response = PoTokenResponse(EXAMPLE_PO_TOKEN) cache = PoTokenCache( cache_providers=[memorypcp], cache_spec_providers=[example_provider], logger=logger, ) assert cache.get(pot_request) is None cache.store(pot_request, response) assert len(memorypcp.cache) == 1 assert hashlib.sha256( f"{{'_dlp_cache': 'v1', '_p': 'ExampleProvider', 'v': '{pot_request.video_id}'}}".encode()).hexdigest() in memorypcp.cache # The second spec provider returns the exact same key bindings as the first one, # however the PoTokenCache should use the provider key to differentiate between them cache = PoTokenCache( cache_providers=[memorypcp], cache_spec_providers=[example_provider_two], logger=logger, ) assert cache.get(pot_request) is None cache.store(pot_request, response) assert len(memorypcp.cache) == 2 assert hashlib.sha256( f"{{'_dlp_cache': 'v1', '_p': 'ExampleProviderTwo', 'v': '{pot_request.video_id}'}}".encode()).hexdigest() in memorypcp.cache def test_cache_provider_preferences(self, pot_request, ie, logger): pcp_one = create_memory_pcp(ie, logger, provider_key='memory_pcp_one') pcp_two = create_memory_pcp(ie, logger, provider_key='memory_pcp_two') cache = PoTokenCache( cache_providers=[pcp_one, pcp_two], cache_spec_providers=[ExampleCacheSpecProviderPCSP(ie=ie, logger=logger, settings={})], logger=logger, ) cache.store(pot_request, PoTokenResponse(EXAMPLE_PO_TOKEN), write_policy=CacheProviderWritePolicy.WRITE_FIRST) assert len(pcp_one.cache) == 1 assert len(pcp_two.cache) == 0 assert cache.get(pot_request) assert pcp_one.get_calls == 1 assert pcp_two.get_calls == 0 standard_preference_called = False pcp_one_preference_claled = False def standard_preference(provider, request, *_, **__): nonlocal standard_preference_called standard_preference_called = True assert isinstance(provider, PoTokenCacheProvider) assert isinstance(request, PoTokenRequest) return 1 def pcp_one_preference(provider, request, *_, **__): nonlocal pcp_one_preference_claled pcp_one_preference_claled = True assert isinstance(provider, PoTokenCacheProvider) assert isinstance(request, PoTokenRequest) if provider.PROVIDER_KEY == pcp_one.PROVIDER_KEY: return -100 return 0 # test that it can hanldle multiple preferences cache.cache_provider_preferences.append(standard_preference) cache.cache_provider_preferences.append(pcp_one_preference) cache.store(pot_request, PoTokenResponse(EXAMPLE_PO_TOKEN), write_policy=CacheProviderWritePolicy.WRITE_FIRST) assert cache.get(pot_request) assert len(pcp_one.cache) == len(pcp_two.cache) == 1 assert pcp_two.get_calls == pcp_one.get_calls == 1 assert pcp_one.store_calls == pcp_two.store_calls == 1 assert standard_preference_called assert pcp_one_preference_claled def test_secondary_cache_provider_hit(self, pot_request, ie, logger): pcp_one = create_memory_pcp(ie, logger, provider_key='memory_pcp_one') pcp_two = create_memory_pcp(ie, logger, provider_key='memory_pcp_two') cache = PoTokenCache( cache_providers=[pcp_two], cache_spec_providers=[ExampleCacheSpecProviderPCSP(ie=ie, logger=logger, settings={})], logger=logger, ) # Given the lower priority provider has the cache hit, store the response in the higher priority provider cache.store(pot_request, PoTokenResponse(EXAMPLE_PO_TOKEN)) assert cache.get(pot_request) cache.cache_providers[pcp_one.PROVIDER_KEY] = pcp_one def pcp_one_pref(provider, *_, **__): if provider.PROVIDER_KEY == pcp_one.PROVIDER_KEY: return 1 return -1 cache.cache_provider_preferences.append(pcp_one_pref) assert cache.get(pot_request) assert pcp_one.get_calls == 1 assert pcp_two.get_calls == 2 # Should write back to pcp_one (now the highest priority cache provider) assert pcp_one.store_calls == pcp_two.store_calls == 1 assert 'Writing PO Token response to highest priority cache provider' in logger.messages['trace'] def test_cache_provider_no_hits(self, pot_request, ie, logger): pcp_one = create_memory_pcp(ie, logger, provider_key='memory_pcp_one') pcp_two = create_memory_pcp(ie, logger, provider_key='memory_pcp_two') cache = PoTokenCache( cache_providers=[pcp_one, pcp_two], cache_spec_providers=[ExampleCacheSpecProviderPCSP(ie=ie, logger=logger, settings={})], logger=logger, ) assert cache.get(pot_request) is None assert pcp_one.get_calls == pcp_two.get_calls == 1 def test_get_invalid_po_token_response(self, pot_request, ie, logger): # Test various scenarios where the po token response stored in the cache provider is invalid pcp_one = create_memory_pcp(ie, logger, provider_key='memory_pcp_one') pcp_two = create_memory_pcp(ie, logger, provider_key='memory_pcp_two') cache = PoTokenCache( cache_providers=[pcp_one, pcp_two], cache_spec_providers=[ExampleCacheSpecProviderPCSP(ie=ie, logger=logger, settings={})], logger=logger, ) valid_response = PoTokenResponse(EXAMPLE_PO_TOKEN) cache.store(pot_request, valid_response) assert len(pcp_one.cache) == len(pcp_two.cache) == 1 # Overwrite the valid response with an invalid one in the cache pcp_one.store(next(iter(pcp_one.cache.keys())), json.dumps(dataclasses.asdict(PoTokenResponse(None))), int(time.time() + 1000)) assert cache.get(pot_request).po_token == valid_response.po_token assert pcp_one.get_calls == pcp_two.get_calls == 1 assert pcp_one.delete_calls == 1 # Invalid response should be deleted from cache assert pcp_one.store_calls == 3 # Since response was fetched from second cache provider, it should be stored in the first one assert len(pcp_one.cache) == 1 assert 'Invalid PO Token response retrieved from cache provider "memory": {"po_token": null, "expires_at": null}; example bug report message' in logger.messages['error'] # Overwrite the valid response with an invalid json in the cache pcp_one.store(next(iter(pcp_one.cache.keys())), 'invalid-json', int(time.time() + 1000)) assert cache.get(pot_request).po_token == valid_response.po_token assert pcp_one.get_calls == pcp_two.get_calls == 2 assert pcp_one.delete_calls == 2 assert pcp_one.store_calls == 5 # 3 + 1 store we made in the test + 1 store from lower priority cache provider assert len(pcp_one.cache) == 1 assert 'Invalid PO Token response retrieved from cache provider "memory": invalid-json; example bug report message' in logger.messages['error'] # Valid json, but missing required fields pcp_one.store(next(iter(pcp_one.cache.keys())), '{"unknown_param": 0}', int(time.time() + 1000)) assert cache.get(pot_request).po_token == valid_response.po_token assert pcp_one.get_calls == pcp_two.get_calls == 3 assert pcp_one.delete_calls == 3 assert pcp_one.store_calls == 7 # 5 + 1 store from test + 1 store from lower priority cache provider assert len(pcp_one.cache) == 1 assert 'Invalid PO Token response retrieved from cache provider "memory": {"unknown_param": 0}; example bug report message' in logger.messages['error'] def test_store_invalid_po_token_response(self, pot_request, ie, logger): # Should not store an invalid po token response pcp_one = create_memory_pcp(ie, logger, provider_key='memory_pcp_one') cache = PoTokenCache( cache_providers=[pcp_one], cache_spec_providers=[ExampleCacheSpecProviderPCSP(ie=ie, logger=logger, settings={})], logger=logger, ) cache.store(pot_request, PoTokenResponse(po_token=EXAMPLE_PO_TOKEN, expires_at=80)) assert cache.get(pot_request) is None assert pcp_one.store_calls == 0 assert 'Invalid PO Token response provided to PoTokenCache.store()' in logger.messages['error'][0] def test_store_write_policy(self, pot_request, ie, logger): pcp_one = create_memory_pcp(ie, logger, provider_key='memory_pcp_one') pcp_two = create_memory_pcp(ie, logger, provider_key='memory_pcp_two') cache = PoTokenCache( cache_providers=[pcp_one, pcp_two], cache_spec_providers=[ExampleCacheSpecProviderPCSP(ie=ie, logger=logger, settings={})], logger=logger, ) cache.store(pot_request, PoTokenResponse(EXAMPLE_PO_TOKEN), write_policy=CacheProviderWritePolicy.WRITE_FIRST) assert pcp_one.store_calls == 1 assert pcp_two.store_calls == 0 cache.store(pot_request, PoTokenResponse(EXAMPLE_PO_TOKEN), write_policy=CacheProviderWritePolicy.WRITE_ALL) assert pcp_one.store_calls == 2 assert pcp_two.store_calls == 1 def test_store_write_first_policy_cache_spec(self, pot_request, ie, logger): pcp_one = create_memory_pcp(ie, logger, provider_key='memory_pcp_one') pcp_two = create_memory_pcp(ie, logger, provider_key='memory_pcp_two') class WriteFirstPCSP(BaseMockCacheSpecProvider): def generate_cache_spec(self, request: PoTokenRequest): super().generate_cache_spec(request) return PoTokenCacheSpec( key_bindings={'v': request.video_id, 'e': None}, default_ttl=60, write_policy=CacheProviderWritePolicy.WRITE_FIRST, ) cache = PoTokenCache( cache_providers=[pcp_one, pcp_two], cache_spec_providers=[WriteFirstPCSP(ie=ie, logger=logger, settings={})], logger=logger, ) cache.store(pot_request, PoTokenResponse(EXAMPLE_PO_TOKEN)) assert pcp_one.store_calls == 1 assert pcp_two.store_calls == 0 def test_store_write_all_policy_cache_spec(self, pot_request, ie, logger): pcp_one = create_memory_pcp(ie, logger, provider_key='memory_pcp_one') pcp_two = create_memory_pcp(ie, logger, provider_key='memory_pcp_two') class WriteAllPCSP(BaseMockCacheSpecProvider): def generate_cache_spec(self, request: PoTokenRequest): super().generate_cache_spec(request) return PoTokenCacheSpec( key_bindings={'v': request.video_id, 'e': None}, default_ttl=60, write_policy=CacheProviderWritePolicy.WRITE_ALL, ) cache = PoTokenCache( cache_providers=[pcp_one, pcp_two], cache_spec_providers=[WriteAllPCSP(ie=ie, logger=logger, settings={})], logger=logger, ) cache.store(pot_request, PoTokenResponse(EXAMPLE_PO_TOKEN)) assert pcp_one.store_calls == 1 assert pcp_two.store_calls == 1 def test_expires_at_pot_response(self, pot_request, memorypcp, ie, logger): cache = PoTokenCache( cache_providers=[memorypcp], cache_spec_providers=[ExampleCacheSpecProviderPCSP(ie=ie, logger=logger, settings={})], logger=logger, ) response = PoTokenResponse(EXAMPLE_PO_TOKEN, expires_at=10000000000) cache.store(pot_request, response) assert next(iter(memorypcp.cache.values()))[1] == 10000000000 def test_expires_at_default_spec(self, pot_request, memorypcp, ie, logger): class TtlPCSP(BaseMockCacheSpecProvider): def generate_cache_spec(self, request: PoTokenRequest): super().generate_cache_spec(request) return PoTokenCacheSpec( key_bindings={'v': request.video_id, 'e': None}, default_ttl=10000000000, ) cache = PoTokenCache( cache_providers=[memorypcp], cache_spec_providers=[TtlPCSP(ie=ie, logger=logger, settings={})], logger=logger, )
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
true
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_pot/conftest.py
test/test_pot/conftest.py
import collections import pytest from yt_dlp import YoutubeDL from yt_dlp.cookies import YoutubeDLCookieJar from yt_dlp.extractor.common import InfoExtractor from yt_dlp.extractor.youtube.pot._provider import IEContentProviderLogger from yt_dlp.extractor.youtube.pot.provider import PoTokenRequest, PoTokenContext from yt_dlp.utils.networking import HTTPHeaderDict class MockLogger(IEContentProviderLogger): log_level = IEContentProviderLogger.LogLevel.TRACE def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.messages = collections.defaultdict(list) def trace(self, message: str): self.messages['trace'].append(message) def debug(self, message: str): self.messages['debug'].append(message) def info(self, message: str): self.messages['info'].append(message) def warning(self, message: str, *, once=False): self.messages['warning'].append(message) def error(self, message: str): self.messages['error'].append(message) @pytest.fixture def ie() -> InfoExtractor: ydl = YoutubeDL() return ydl.get_info_extractor('Youtube') @pytest.fixture def logger() -> MockLogger: return MockLogger() @pytest.fixture() def pot_request() -> PoTokenRequest: return PoTokenRequest( context=PoTokenContext.GVS, innertube_context={'client': {'clientName': 'WEB'}}, innertube_host='youtube.com', session_index=None, player_url=None, is_authenticated=False, video_webpage=None, visitor_data='example-visitor-data', data_sync_id='example-data-sync-id', video_id='example-video-id', request_cookiejar=YoutubeDLCookieJar(), request_proxy=None, request_headers=HTTPHeaderDict(), request_timeout=None, request_source_address=None, request_verify_tls=True, bypass_cache=False, )
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_pot/test_pot_builtin_memorycache.py
test/test_pot/test_pot_builtin_memorycache.py
import threading import time from collections import OrderedDict import pytest from yt_dlp.extractor.youtube.pot._provider import IEContentProvider, BuiltinIEContentProvider from yt_dlp.utils import bug_reports_message from yt_dlp.extractor.youtube.pot._builtin.memory_cache import MemoryLRUPCP, memorylru_preference, initialize_global_cache from yt_dlp.version import __version__ from yt_dlp.extractor.youtube.pot._registry import _pot_cache_providers, _pot_memory_cache class TestMemoryLRUPCS: def test_base_type(self): assert issubclass(MemoryLRUPCP, IEContentProvider) assert issubclass(MemoryLRUPCP, BuiltinIEContentProvider) @pytest.fixture def pcp(self, ie, logger) -> MemoryLRUPCP: return MemoryLRUPCP(ie, logger, {}, initialize_cache=lambda max_size: (OrderedDict(), threading.Lock(), max_size)) def test_is_registered(self): assert _pot_cache_providers.value.get('MemoryLRU') == MemoryLRUPCP def test_initialization(self, pcp): assert pcp.PROVIDER_NAME == 'memory' assert pcp.PROVIDER_VERSION == __version__ assert pcp.BUG_REPORT_MESSAGE == bug_reports_message(before='') assert pcp.is_available() def test_store_and_get(self, pcp): pcp.store('key1', 'value1', int(time.time()) + 60) assert pcp.get('key1') == 'value1' assert len(pcp.cache) == 1 def test_store_ignore_expired(self, pcp): pcp.store('key1', 'value1', int(time.time()) - 1) assert len(pcp.cache) == 0 assert pcp.get('key1') is None assert len(pcp.cache) == 0 def test_store_override_existing_key(self, ie, logger): MAX_SIZE = 2 pcp = MemoryLRUPCP(ie, logger, {}, initialize_cache=lambda max_size: (OrderedDict(), threading.Lock(), MAX_SIZE)) pcp.store('key1', 'value1', int(time.time()) + 60) pcp.store('key2', 'value2', int(time.time()) + 60) assert len(pcp.cache) == 2 pcp.store('key1', 'value2', int(time.time()) + 60) # Ensure that the override key gets added to the end of the cache instead of in the same position pcp.store('key3', 'value3', int(time.time()) + 60) assert pcp.get('key1') == 'value2' def test_store_ignore_expired_existing_key(self, pcp): pcp.store('key1', 'value2', int(time.time()) + 60) pcp.store('key1', 'value1', int(time.time()) - 1) assert len(pcp.cache) == 1 assert pcp.get('key1') == 'value2' assert len(pcp.cache) == 1 def test_get_key_expired(self, pcp): pcp.store('key1', 'value1', int(time.time()) + 60) assert pcp.get('key1') == 'value1' assert len(pcp.cache) == 1 pcp.cache['key1'] = ('value1', int(time.time()) - 1) assert pcp.get('key1') is None assert len(pcp.cache) == 0 def test_lru_eviction(self, ie, logger): MAX_SIZE = 2 provider = MemoryLRUPCP(ie, logger, {}, initialize_cache=lambda max_size: (OrderedDict(), threading.Lock(), MAX_SIZE)) provider.store('key1', 'value1', int(time.time()) + 5) provider.store('key2', 'value2', int(time.time()) + 5) assert len(provider.cache) == 2 assert provider.get('key1') == 'value1' provider.store('key3', 'value3', int(time.time()) + 5) assert len(provider.cache) == 2 assert provider.get('key2') is None provider.store('key4', 'value4', int(time.time()) + 5) assert len(provider.cache) == 2 assert provider.get('key1') is None assert provider.get('key3') == 'value3' assert provider.get('key4') == 'value4' def test_delete(self, pcp): pcp.store('key1', 'value1', int(time.time()) + 5) assert len(pcp.cache) == 1 assert pcp.get('key1') == 'value1' pcp.delete('key1') assert len(pcp.cache) == 0 assert pcp.get('key1') is None def test_use_global_cache_default(self, ie, logger): pcp = MemoryLRUPCP(ie, logger, {}) assert pcp.max_size == _pot_memory_cache.value['max_size'] == 25 assert pcp.cache is _pot_memory_cache.value['cache'] assert pcp.lock is _pot_memory_cache.value['lock'] pcp2 = MemoryLRUPCP(ie, logger, {}) assert pcp.max_size == pcp2.max_size == _pot_memory_cache.value['max_size'] == 25 assert pcp.cache is pcp2.cache is _pot_memory_cache.value['cache'] assert pcp.lock is pcp2.lock is _pot_memory_cache.value['lock'] def test_fail_max_size_change_global(self, ie, logger): pcp = MemoryLRUPCP(ie, logger, {}) assert pcp.max_size == _pot_memory_cache.value['max_size'] == 25 with pytest.raises(ValueError, match='Cannot change max_size of initialized global memory cache'): initialize_global_cache(50) assert pcp.max_size == _pot_memory_cache.value['max_size'] == 25 def test_memory_lru_preference(self, pcp, ie, pot_request): assert memorylru_preference(pcp, pot_request) == 10000
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/testdata/plugin_packages/testpackage/yt_dlp_plugins/extractor/package.py
test/testdata/plugin_packages/testpackage/yt_dlp_plugins/extractor/package.py
from yt_dlp.extractor.common import InfoExtractor class PackagePluginIE(InfoExtractor): _VALID_URL = 'package' pass
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/testdata/netrc/print_netrc.py
test/testdata/netrc/print_netrc.py
with open('./test/testdata/netrc/netrc', encoding='utf-8') as fp: print(fp.read())
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/testdata/reload_plugins/yt_dlp_plugins/extractor/normal.py
test/testdata/reload_plugins/yt_dlp_plugins/extractor/normal.py
from yt_dlp.extractor.common import InfoExtractor class NormalPluginIE(InfoExtractor): _VALID_URL = 'normal' REPLACED = True class _IgnoreUnderscorePluginIE(InfoExtractor): pass
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/testdata/reload_plugins/yt_dlp_plugins/postprocessor/normal.py
test/testdata/reload_plugins/yt_dlp_plugins/postprocessor/normal.py
from yt_dlp.postprocessor.common import PostProcessor class NormalPluginPP(PostProcessor): REPLACED = True
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/testdata/yt_dlp_plugins/extractor/overridetwo.py
test/testdata/yt_dlp_plugins/extractor/overridetwo.py
from yt_dlp.extractor.generic import GenericIE class _UnderscoreOverrideGenericIE(GenericIE, plugin_name='underscore-override'): SECONDARY_TEST_FIELD = 'underscore-override'
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/testdata/yt_dlp_plugins/extractor/_ignore.py
test/testdata/yt_dlp_plugins/extractor/_ignore.py
from yt_dlp.extractor.common import InfoExtractor class IgnorePluginIE(InfoExtractor): pass
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/testdata/yt_dlp_plugins/extractor/ignore.py
test/testdata/yt_dlp_plugins/extractor/ignore.py
from yt_dlp.extractor.common import InfoExtractor class IgnoreNotInAllPluginIE(InfoExtractor): pass class InAllPluginIE(InfoExtractor): _VALID_URL = 'inallpluginie' pass __all__ = ['InAllPluginIE']
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/testdata/yt_dlp_plugins/extractor/override.py
test/testdata/yt_dlp_plugins/extractor/override.py
from yt_dlp.extractor.generic import GenericIE class OverrideGenericIE(GenericIE, plugin_name='override'): TEST_FIELD = 'override'
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/testdata/yt_dlp_plugins/extractor/normal.py
test/testdata/yt_dlp_plugins/extractor/normal.py
from yt_dlp.extractor.common import InfoExtractor class NormalPluginIE(InfoExtractor): _VALID_URL = 'normalpluginie' REPLACED = False class _IgnoreUnderscorePluginIE(InfoExtractor): _VALID_URL = 'ignoreunderscorepluginie' pass
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/testdata/yt_dlp_plugins/postprocessor/normal.py
test/testdata/yt_dlp_plugins/postprocessor/normal.py
from yt_dlp.postprocessor.common import PostProcessor class NormalPluginPP(PostProcessor): REPLACED = False
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/testdata/zipped_plugins/yt_dlp_plugins/extractor/zipped.py
test/testdata/zipped_plugins/yt_dlp_plugins/extractor/zipped.py
from yt_dlp.extractor.common import InfoExtractor class ZippedPluginIE(InfoExtractor): _VALID_URL = 'zippedpluginie' pass
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/testdata/zipped_plugins/yt_dlp_plugins/postprocessor/zipped.py
test/testdata/zipped_plugins/yt_dlp_plugins/postprocessor/zipped.py
from yt_dlp.postprocessor.common import PostProcessor class ZippedPluginPP(PostProcessor): pass
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_jsc/conftest.py
test/test_jsc/conftest.py
import re import pathlib import pytest import yt_dlp.globals from yt_dlp import YoutubeDL from yt_dlp.extractor.common import InfoExtractor _TESTDATA_PATH = pathlib.Path(__file__).parent.parent / 'testdata/sigs' _player_re = re.compile(r'^.+/player/(?P<id>[a-zA-Z0-9_/.-]+)\.js$') _player_id_trans = str.maketrans(dict.fromkeys('/.-', '_')) @pytest.fixture def ie() -> InfoExtractor: runtime_names = yt_dlp.globals.supported_js_runtimes.value ydl = YoutubeDL({'js_runtimes': {key: {} for key in runtime_names}}) ie = ydl.get_info_extractor('Youtube') def _load_player(video_id, player_url, fatal=True): match = _player_re.match(player_url) test_id = match.group('id').translate(_player_id_trans) cached_file = _TESTDATA_PATH / f'player-{test_id}.js' if cached_file.exists(): return cached_file.read_text() if code := ie._download_webpage(player_url, video_id, fatal=fatal): _TESTDATA_PATH.mkdir(exist_ok=True, parents=True) cached_file.write_text(code) return code return None ie._load_player = _load_player return ie class MockLogger: def trace(self, message: str): print(f'trace: {message}') def debug(self, message: str, *, once=False): print(f'debug: {message}') def info(self, message: str): print(f'info: {message}') def warning(self, message: str, *, once=False): print(f'warning: {message}') def error(self, message: str): print(f'error: {message}') @pytest.fixture def logger(): return MockLogger()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_jsc/test_ejs_integration.py
test/test_jsc/test_ejs_integration.py
from __future__ import annotations import dataclasses import enum import importlib.util import json import pytest from yt_dlp.extractor.youtube.jsc.provider import ( JsChallengeRequest, JsChallengeType, JsChallengeProviderResponse, JsChallengeResponse, NChallengeInput, NChallengeOutput, SigChallengeInput, SigChallengeOutput, ) from yt_dlp.extractor.youtube.jsc._builtin.bun import BunJCP from yt_dlp.extractor.youtube.jsc._builtin.deno import DenoJCP from yt_dlp.extractor.youtube.jsc._builtin.node import NodeJCP from yt_dlp.extractor.youtube.jsc._builtin.quickjs import QuickJSJCP _has_ejs = bool(importlib.util.find_spec('yt_dlp_ejs')) pytestmark = pytest.mark.skipif(not _has_ejs, reason='yt-dlp-ejs not available') class Variant(enum.Enum): main = 'player_ias.vflset/en_US/base.js' tcc = 'player_ias_tcc.vflset/en_US/base.js' tce = 'player_ias_tce.vflset/en_US/base.js' es5 = 'player_es5.vflset/en_US/base.js' es6 = 'player_es6.vflset/en_US/base.js' tv = 'tv-player-ias.vflset/tv-player-ias.js' tv_es6 = 'tv-player-es6.vflset/tv-player-es6.js' phone = 'player-plasma-ias-phone-en_US.vflset/base.js' tablet = 'player-plasma-ias-tablet-en_US.vflset/base.js' @dataclasses.dataclass class Challenge: player: str variant: Variant type: JsChallengeType values: dict[str, str] = dataclasses.field(default_factory=dict) def url(self, /): return f'https://www.youtube.com/s/player/{self.player}/{self.variant.value}' CHALLENGES: list[Challenge] = [ Challenge('3d3ba064', Variant.tce, JsChallengeType.N, { 'ZdZIqFPQK-Ty8wId': 'qmtUsIz04xxiNW', '4GMrWHyKI5cEvhDO': 'N9gmEX7YhKTSmw', }), Challenge('3d3ba064', Variant.tce, JsChallengeType.SIG, { 'gN7a-hudCuAuPH6fByOk1_GNXN0yNMHShjZXS2VOgsEItAJz0tipeavEOmNdYN-wUtcEqD3bCXjc0iyKfAyZxCBGgIARwsSdQfJ2CJtt': 'ttJC2JfQdSswRAIgGBCxZyAfKyi0cjXCb3gqEctUw-NYdNmOEvaepit0zJAtIEsgOV2SXZjhSHMNy0NXNG_1kNyBf6HPuAuCduh-a7O', }), Challenge('5ec65609', Variant.tce, JsChallengeType.N, { '0eRGgQWJGfT5rFHFj': '4SvMpDQH-vBJCw', }), Challenge('5ec65609', Variant.tce, JsChallengeType.SIG, { 'AAJAJfQdSswRQIhAMG5SN7-cAFChdrE7tLA6grH0rTMICA1mmDc0HoXgW3CAiAQQ4=CspfaF_vt82XH5yewvqcuEkvzeTsbRuHssRMyJQ=I': 'AJfQdSswRQIhAMG5SN7-cAFChdrE7tLA6grI0rTMICA1mmDc0HoXgW3CAiAQQ4HCspfaF_vt82XH5yewvqcuEkvzeTsbRuHssRMyJQ==', }), Challenge('6742b2b9', Variant.tce, JsChallengeType.N, { '_HPB-7GFg1VTkn9u': 'qUAsPryAO_ByYg', 'K1t_fcB6phzuq2SF': 'Y7PcOt3VE62mog', }), Challenge('6742b2b9', Variant.tce, JsChallengeType.SIG, { 'MMGZJMUucirzS_SnrSPYsc85CJNnTUi6GgR5NKn-znQEICACojE8MHS6S7uYq4TGjQX_D4aPk99hNU6wbTvorvVVMgIARwsSdQfJAA': 'AJfQdSswRAIgMVVvrovTbw6UNh99kPa4D_XQjGT4qYu7S6SHM8EjoCACIEQnz-nKN5RgG6iUTnNJC58csYPSrnS_SzricuUMJZGM', }), Challenge('2b83d2e0', Variant.main, JsChallengeType.N, { '0eRGgQWJGfT5rFHFj': 'euHbygrCMLksxd', }), Challenge('2b83d2e0', Variant.main, JsChallengeType.SIG, { 'MMGZJMUucirzS_SnrSPYsc85CJNnTUi6GgR5NKn-znQEICACojE8MHS6S7uYq4TGjQX_D4aPk99hNU6wbTvorvVVMgIARwsSdQfJA': '-MGZJMUucirzS_SnrSPYsc85CJNnTUi6GgR5NKnMznQEICACojE8MHS6S7uYq4TGjQX_D4aPk99hNU6wbTvorvVVMgIARwsSdQfJ', }), Challenge('638ec5c6', Variant.main, JsChallengeType.N, { 'ZdZIqFPQK-Ty8wId': '1qov8-KM-yH', }), Challenge('638ec5c6', Variant.main, JsChallengeType.SIG, { 'gN7a-hudCuAuPH6fByOk1_GNXN0yNMHShjZXS2VOgsEItAJz0tipeavEOmNdYN-wUtcEqD3bCXjc0iyKfAyZxCBGgIARwsSdQfJ2CJtt': 'MhudCuAuP-6fByOk1_GNXN7gNHHShjyXS2VOgsEItAJz0tipeav0OmNdYN-wUtcEqD3bCXjc0iyKfAyZxCBGgIARwsSdQfJ2CJtt', }), ] requests: list[JsChallengeRequest] = [] responses: list[JsChallengeProviderResponse] = [] for test in CHALLENGES: input_type, output_type = { JsChallengeType.N: (NChallengeInput, NChallengeOutput), JsChallengeType.SIG: (SigChallengeInput, SigChallengeOutput), }[test.type] request = JsChallengeRequest(test.type, input_type(test.url(), list(test.values.keys())), test.player) requests.append(request) responses.append(JsChallengeProviderResponse(request, JsChallengeResponse(test.type, output_type(test.values)))) @pytest.fixture(params=[BunJCP, DenoJCP, NodeJCP, QuickJSJCP]) def jcp(request, ie, logger): obj = request.param(ie, logger, None) if not obj.is_available(): pytest.skip(f'{obj.PROVIDER_NAME} is not available') obj.is_dev = True return obj @pytest.mark.download def test_bulk_requests(jcp): assert list(jcp.bulk_solve(requests)) == responses @pytest.mark.download def test_using_cached_player(jcp): first_player_requests = requests[:3] player = jcp._get_player(first_player_requests[0].video_id, first_player_requests[0].input.player_url) initial = json.loads(jcp._run_js_runtime(jcp._construct_stdin(player, False, first_player_requests))) preprocessed = initial.pop('preprocessed_player') result = json.loads(jcp._run_js_runtime(jcp._construct_stdin(preprocessed, True, first_player_requests))) assert initial == result
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/test/test_jsc/test_provider.py
test/test_jsc/test_provider.py
import pytest from yt_dlp.extractor.youtube.jsc.provider import ( JsChallengeProvider, JsChallengeRequest, JsChallengeProviderResponse, JsChallengeProviderRejectedRequest, JsChallengeType, JsChallengeResponse, NChallengeOutput, NChallengeInput, JsChallengeProviderError, register_provider, register_preference, ) from yt_dlp.extractor.youtube.pot._provider import IEContentProvider from yt_dlp.utils import ExtractorError from yt_dlp.extractor.youtube.jsc._registry import _jsc_preferences, _jsc_providers class ExampleJCP(JsChallengeProvider): PROVIDER_NAME = 'example-provider' PROVIDER_VERSION = '0.0.1' BUG_REPORT_LOCATION = 'https://example.com/issues' _SUPPORTED_TYPES = [JsChallengeType.N] def is_available(self) -> bool: return True def _real_bulk_solve(self, requests): for request in requests: results = dict.fromkeys(request.input.challenges, 'example-solution') response = JsChallengeResponse( type=request.type, output=NChallengeOutput(results=results)) yield JsChallengeProviderResponse(request=request, response=response) PLAYER_URL = 'https://example.com/player.js' class TestJsChallengeProvider: # note: some test covered in TestPoTokenProvider which shares the same base class def test_base_type(self): assert issubclass(JsChallengeProvider, IEContentProvider) def test_create_provider_missing_bulk_solve_method(self, ie, logger): class MissingMethodsJCP(JsChallengeProvider): def is_available(self) -> bool: return True with pytest.raises(TypeError, match='bulk_solve'): MissingMethodsJCP(ie=ie, logger=logger, settings={}) def test_create_provider_missing_available_method(self, ie, logger): class MissingMethodsJCP(JsChallengeProvider): def _real_bulk_solve(self, requests): raise JsChallengeProviderRejectedRequest('Not implemented') with pytest.raises(TypeError, match='is_available'): MissingMethodsJCP(ie=ie, logger=logger, settings={}) def test_barebones_provider(self, ie, logger): class BarebonesProviderJCP(JsChallengeProvider): def is_available(self) -> bool: return True def _real_bulk_solve(self, requests): raise JsChallengeProviderRejectedRequest('Not implemented') provider = BarebonesProviderJCP(ie=ie, logger=logger, settings={}) assert provider.PROVIDER_NAME == 'BarebonesProvider' assert provider.PROVIDER_KEY == 'BarebonesProvider' assert provider.PROVIDER_VERSION == '0.0.0' assert provider.BUG_REPORT_MESSAGE == 'please report this issue to the provider developer at (developer has not provided a bug report location) .' def test_example_provider_success(self, ie, logger): provider = ExampleJCP(ie=ie, logger=logger, settings={}) request = JsChallengeRequest( type=JsChallengeType.N, input=NChallengeInput(player_url=PLAYER_URL, challenges=['example-challenge'])) request_two = JsChallengeRequest( type=JsChallengeType.N, input=NChallengeInput(player_url=PLAYER_URL, challenges=['example-challenge-2'])) responses = list(provider.bulk_solve([request, request_two])) assert len(responses) == 2 assert all(isinstance(r, JsChallengeProviderResponse) for r in responses) assert responses == [ JsChallengeProviderResponse( request=request, response=JsChallengeResponse( type=JsChallengeType.N, output=NChallengeOutput(results={'example-challenge': 'example-solution'}), ), ), JsChallengeProviderResponse( request=request_two, response=JsChallengeResponse( type=JsChallengeType.N, output=NChallengeOutput(results={'example-challenge-2': 'example-solution'}), ), ), ] def test_provider_unsupported_challenge_type(self, ie, logger): provider = ExampleJCP(ie=ie, logger=logger, settings={}) request_supported = JsChallengeRequest( type=JsChallengeType.N, input=NChallengeInput(player_url=PLAYER_URL, challenges=['example-challenge'])) request_unsupported = JsChallengeRequest( type=JsChallengeType.SIG, input=NChallengeInput(player_url=PLAYER_URL, challenges=['example-challenge'])) responses = list(provider.bulk_solve([request_supported, request_unsupported, request_supported])) assert len(responses) == 3 # Requests are validated first before continuing to _real_bulk_solve assert isinstance(responses[0], JsChallengeProviderResponse) assert isinstance(responses[0].error, JsChallengeProviderRejectedRequest) assert responses[0].request is request_unsupported assert str(responses[0].error) == 'JS Challenge type "JsChallengeType.SIG" is not supported by example-provider' assert responses[1:] == [ JsChallengeProviderResponse( request=request_supported, response=JsChallengeResponse( type=JsChallengeType.N, output=NChallengeOutput(results={'example-challenge': 'example-solution'}), ), ), JsChallengeProviderResponse( request=request_supported, response=JsChallengeResponse( type=JsChallengeType.N, output=NChallengeOutput(results={'example-challenge': 'example-solution'}), ), ), ] def test_provider_get_player(self, ie, logger): ie._load_player = lambda video_id, player_url, fatal: (video_id, player_url, fatal) provider = ExampleJCP(ie=ie, logger=logger, settings={}) assert provider._get_player('video123', PLAYER_URL) == ('video123', PLAYER_URL, True) def test_provider_get_player_error(self, ie, logger): def raise_error(video_id, player_url, fatal): raise ExtractorError('Failed to load player') ie._load_player = raise_error provider = ExampleJCP(ie=ie, logger=logger, settings={}) with pytest.raises(JsChallengeProviderError, match='Failed to load player for JS challenge'): provider._get_player('video123', PLAYER_URL) def test_require_class_end_with_suffix(self, ie, logger): class InvalidSuffix(JsChallengeProvider): PROVIDER_NAME = 'invalid-suffix' def _real_bulk_solve(self, requests): raise JsChallengeProviderRejectedRequest('Not implemented') def is_available(self) -> bool: return True provider = InvalidSuffix(ie=ie, logger=logger, settings={}) with pytest.raises(AssertionError): provider.PROVIDER_KEY # noqa: B018 def test_register_provider(ie): @register_provider class UnavailableProviderJCP(JsChallengeProvider): def is_available(self) -> bool: return False def _real_bulk_solve(self, requests): raise JsChallengeProviderRejectedRequest('Not implemented') assert _jsc_providers.value.get('UnavailableProvider') == UnavailableProviderJCP _jsc_providers.value.pop('UnavailableProvider') def test_register_preference(ie): before = len(_jsc_preferences.value) @register_preference(ExampleJCP) def unavailable_preference(*args, **kwargs): return 1 assert len(_jsc_preferences.value) == before + 1
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/devscripts/tomlparse.py
devscripts/tomlparse.py
#!/usr/bin/env python3 """ Simple parser for spec compliant toml files A simple toml parser for files that comply with the spec. Should only be used to parse `pyproject.toml` for `install_deps.py`. IMPORTANT: INVALID FILES OR MULTILINE STRINGS ARE NOT SUPPORTED! """ from __future__ import annotations import datetime as dt import json import re WS = r'(?:[\ \t]*)' STRING_RE = re.compile(r'"(?:\\.|[^\\"\n])*"|\'[^\'\n]*\'') SINGLE_KEY_RE = re.compile(rf'{STRING_RE.pattern}|[A-Za-z0-9_-]+') KEY_RE = re.compile(rf'{WS}(?:{SINGLE_KEY_RE.pattern}){WS}(?:\.{WS}(?:{SINGLE_KEY_RE.pattern}){WS})*') EQUALS_RE = re.compile(rf'={WS}') WS_RE = re.compile(WS) _SUBTABLE = rf'(?P<subtable>^\[(?P<is_list>\[)?(?P<path>{KEY_RE.pattern})\]\]?)' EXPRESSION_RE = re.compile(rf'^(?:{_SUBTABLE}|{KEY_RE.pattern}=)', re.MULTILINE) LIST_WS_RE = re.compile(rf'{WS}((#[^\n]*)?\n{WS})*') LEFTOVER_VALUE_RE = re.compile(r'[^,}\]\t\n#]+') def parse_key(value: str): for match in SINGLE_KEY_RE.finditer(value): if match[0][0] == '"': yield json.loads(match[0]) elif match[0][0] == '\'': yield match[0][1:-1] else: yield match[0] def get_target(root: dict, paths: list[str], is_list=False): target = root for index, key in enumerate(paths, 1): use_list = is_list and index == len(paths) result = target.get(key) if result is None: result = [] if use_list else {} target[key] = result if isinstance(result, dict): target = result elif use_list: target = {} result.append(target) else: target = result[-1] assert isinstance(target, dict) return target def parse_enclosed(data: str, index: int, end: str, ws_re: re.Pattern): index += 1 if match := ws_re.match(data, index): index = match.end() while data[index] != end: index = yield True, index if match := ws_re.match(data, index): index = match.end() if data[index] == ',': index += 1 if match := ws_re.match(data, index): index = match.end() assert data[index] == end yield False, index + 1 def parse_value(data: str, index: int): if data[index] == '[': result = [] indices = parse_enclosed(data, index, ']', LIST_WS_RE) valid, index = next(indices) while valid: index, value = parse_value(data, index) result.append(value) valid, index = indices.send(index) return index, result if data[index] == '{': result = {} indices = parse_enclosed(data, index, '}', WS_RE) valid, index = next(indices) while valid: valid, index = indices.send(parse_kv_pair(data, index, result)) return index, result if match := STRING_RE.match(data, index): return match.end(), json.loads(match[0]) if match[0][0] == '"' else match[0][1:-1] match = LEFTOVER_VALUE_RE.match(data, index) assert match value = match[0].strip() for func in [ int, float, dt.time.fromisoformat, dt.date.fromisoformat, dt.datetime.fromisoformat, {'true': True, 'false': False}.get, ]: try: value = func(value) break except Exception: pass return match.end(), value def parse_kv_pair(data: str, index: int, target: dict): match = KEY_RE.match(data, index) if not match: return None *keys, key = parse_key(match[0]) match = EQUALS_RE.match(data, match.end()) assert match index = match.end() index, value = parse_value(data, index) get_target(target, keys)[key] = value return index def parse_toml(data: str): root = {} target = root index = 0 while True: match = EXPRESSION_RE.search(data, index) if not match: break if match.group('subtable'): index = match.end() path, is_list = match.group('path', 'is_list') target = get_target(root, list(parse_key(path)), bool(is_list)) continue index = parse_kv_pair(data, match.start(), target) assert index is not None return root def main(): import argparse from pathlib import Path parser = argparse.ArgumentParser() parser.add_argument('infile', type=Path, help='The TOML file to read as input') args = parser.parse_args() with args.infile.open('r', encoding='utf-8') as file: data = file.read() def default(obj): if isinstance(obj, (dt.date, dt.time, dt.datetime)): return obj.isoformat() print(json.dumps(parse_toml(data), default=default)) if __name__ == '__main__': main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/devscripts/set-variant.py
devscripts/set-variant.py
#!/usr/bin/env python3 # Allow direct execution import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import argparse import functools import re from devscripts.utils import compose_functions, read_file, write_file VERSION_FILE = 'yt_dlp/version.py' def parse_options(): parser = argparse.ArgumentParser(description='Set the build variant of the package') parser.add_argument('variant', help='Name of the variant') parser.add_argument('-M', '--update-message', default=None, help='Message to show in -U') return parser.parse_args() def property_setter(name, value): return functools.partial(re.sub, rf'(?m)^{name}\s*=\s*.+$', f'{name} = {value!r}') opts = parse_options() transform = compose_functions( property_setter('VARIANT', opts.variant), property_setter('UPDATE_HINT', opts.update_message), ) write_file(VERSION_FILE, transform(read_file(VERSION_FILE)))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/devscripts/make_contributing.py
devscripts/make_contributing.py
#!/usr/bin/env python3 import optparse import re def main(): return # This is unused in yt-dlp parser = optparse.OptionParser(usage='%prog INFILE OUTFILE') _, args = parser.parse_args() if len(args) != 2: parser.error('Expected an input and an output filename') infile, outfile = args with open(infile, encoding='utf-8') as inf: readme = inf.read() bug_text = re.search( r'(?s)#\s*BUGS\s*[^\n]*\s*(.*?)#\s*COPYRIGHT', readme).group(1) dev_text = re.search( r'(?s)(#\s*DEVELOPER INSTRUCTIONS.*?)#\s*EMBEDDING yt-dlp', readme).group(1) out = bug_text + dev_text with open(outfile, 'w', encoding='utf-8') as outf: outf.write(out) if __name__ == '__main__': main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/devscripts/make_readme.py
devscripts/make_readme.py
#!/usr/bin/env python3 """ yt-dlp --help | make_readme.py This must be run in a console of correct width """ # Allow direct execution import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import functools import re from devscripts.utils import read_file, write_file README_FILE = 'README.md' OPTIONS_START = 'General Options:' OPTIONS_END = 'CONFIGURATION' EPILOG_START = 'See full documentation' ALLOWED_OVERSHOOT = 2 DISABLE_PATCH = object() def take_section(text, start=None, end=None, *, shift=0): return text[ text.index(start) + shift if start else None: text.index(end) + shift if end else None ] def apply_patch(text, patch): return text if patch[0] is DISABLE_PATCH else re.sub(*patch, text) options = take_section(sys.stdin.read(), f'\n {OPTIONS_START}', f'\n{EPILOG_START}', shift=1) max_width = max(map(len, options.split('\n'))) switch_col_width = len(re.search(r'(?m)^\s{5,}', options).group()) delim = f'\n{" " * switch_col_width}' PATCHES = ( ( # Standardize `--update` message r'(?m)^( -U, --update\s+).+(\n \s.+)*$', r'\1Update this program to the latest version', ), ( # Headings r'(?m)^ (\w.+\n)( (?=\w))?', r'## \1', ), ( # Fixup `--date` formatting rf'(?m)( --date DATE.+({delim}[^\[]+)*)\[.+({delim}.+)*$', (rf'\1[now|today|yesterday][-N[day|week|month|year]].{delim}' f'E.g. "--date today-2weeks" downloads only{delim}' 'videos uploaded on the same day two weeks ago'), ), ( # Do not split URLs rf'({delim[:-1]})? (?P<label>\[\S+\] )?(?P<url>https?({delim})?:({delim})?/({delim})?/(({delim})?\S+)+)\s', lambda mobj: ''.join((delim, mobj.group('label') or '', re.sub(r'\s+', '', mobj.group('url')), '\n')), ), ( # Do not split "words" rf'(?m)({delim}\S+)+$', lambda mobj: ''.join((delim, mobj.group(0).replace(delim, ''))), ), ( # Allow overshooting last line rf'(?m)^(?P<prev>.+)${delim}(?P<current>.+)$(?!{delim})', lambda mobj: (mobj.group().replace(delim, ' ') if len(mobj.group()) - len(delim) + 1 <= max_width + ALLOWED_OVERSHOOT else mobj.group()), ), ( # Avoid newline when a space is available b/w switch and description DISABLE_PATCH, # This creates issues with prepare_manpage r'(?m)^(\s{4}-.{%d})(%s)' % (switch_col_width - 6, delim), r'\1 ', ), ( # Replace brackets with a Markdown link r'SponsorBlock API \((http.+)\)', r'[SponsorBlock API](\1)', ), ) readme = read_file(README_FILE) write_file(README_FILE, ''.join(( take_section(readme, end=f'## {OPTIONS_START}'), functools.reduce(apply_patch, PATCHES, options), take_section(readme, f'# {OPTIONS_END}'), )))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/devscripts/prepare_manpage.py
devscripts/prepare_manpage.py
#!/usr/bin/env python3 # Allow direct execution import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import os.path import re from devscripts.utils import ( compose_functions, get_filename_args, read_file, write_file, ) ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) README_FILE = os.path.join(ROOT_DIR, 'README.md') PREFIX = r'''%yt-dlp(1) # NAME yt\-dlp \- A feature\-rich command\-line audio/video downloader # SYNOPSIS **yt-dlp** \[OPTIONS\] URL [URL...] # DESCRIPTION ''' def filter_excluded_sections(readme): EXCLUDED_SECTION_BEGIN_STRING = re.escape('<!-- MANPAGE: BEGIN EXCLUDED SECTION -->') EXCLUDED_SECTION_END_STRING = re.escape('<!-- MANPAGE: END EXCLUDED SECTION -->') return re.sub( rf'(?s){EXCLUDED_SECTION_BEGIN_STRING}.+?{EXCLUDED_SECTION_END_STRING}\n', '', readme) def _convert_code_blocks(readme): current_code_block = None for line in readme.splitlines(True): if current_code_block: if line == current_code_block: current_code_block = None yield '\n' else: yield f' {line}' elif line.startswith('```'): current_code_block = line.count('`') * '`' + '\n' yield '\n' else: yield line def convert_code_blocks(readme): return ''.join(_convert_code_blocks(readme)) def move_sections(readme): MOVE_TAG_TEMPLATE = '<!-- MANPAGE: MOVE "%s" SECTION HERE -->' sections = re.findall(r'(?m)^%s$' % ( re.escape(MOVE_TAG_TEMPLATE).replace(r'\%', '%') % '(.+)'), readme) for section_name in sections: move_tag = MOVE_TAG_TEMPLATE % section_name if readme.count(move_tag) > 1: raise Exception(f'There is more than one occurrence of "{move_tag}". This is unexpected') sections = re.findall(rf'(?sm)(^# {re.escape(section_name)}.+?)(?=^# )', readme) if len(sections) < 1: raise Exception(f'The section {section_name} does not exist') elif len(sections) > 1: raise Exception(f'There are multiple occurrences of section {section_name}, this is unhandled') readme = readme.replace(sections[0], '', 1).replace(move_tag, sections[0], 1) return readme def filter_options(readme): section = re.search(r'(?sm)^# USAGE AND OPTIONS\n.+?(?=^# )', readme).group(0) section_new = section.replace('*', R'\*') options = '# OPTIONS\n' for line in section_new.split('\n')[1:]: mobj = re.fullmatch(r'''(?x) \s{4}(?P<opt>-(?:,\s|[^\s])+) (?:\s(?P<meta>(?:[^\s]|\s(?!\s))+))? (\s{2,}(?P<desc>.+))? ''', line) if not mobj: options += f'{line.lstrip()}\n' continue option, metavar, description = mobj.group('opt', 'meta', 'desc') # Pandoc's definition_lists. See http://pandoc.org/README.html option = f'{option} *{metavar}*' if metavar else option description = f'{description}\n' if description else '' options += f'\n{option}\n: {description}' continue return readme.replace(section, options, 1) TRANSFORM = compose_functions(filter_excluded_sections, convert_code_blocks, move_sections, filter_options) def main(): write_file(get_filename_args(), PREFIX + TRANSFORM(read_file(README_FILE))) if __name__ == '__main__': main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/devscripts/lazy_load_template.py
devscripts/lazy_load_template.py
import importlib import random import re from ..utils import ( age_restricted, bug_reports_message, classproperty, variadic, write_string, ) # These bloat the lazy_extractors, so allow them to passthrough silently ALLOWED_CLASSMETHODS = {'extract_from_webpage', 'get_testcases', 'get_webpage_testcases'} _WARNED = False class LazyLoadMetaClass(type): def __getattr__(cls, name): global _WARNED if ('_real_class' not in cls.__dict__ and name not in ALLOWED_CLASSMETHODS and not _WARNED): _WARNED = True write_string('WARNING: Falling back to normal extractor since lazy extractor ' f'{cls.__name__} does not have attribute {name}{bug_reports_message()}\n') return getattr(cls.real_class, name) class LazyLoadExtractor(metaclass=LazyLoadMetaClass): @classproperty def real_class(cls): if '_real_class' not in cls.__dict__: cls._real_class = getattr(importlib.import_module(cls._module), cls.__name__) return cls._real_class def __new__(cls, *args, **kwargs): instance = cls.real_class.__new__(cls.real_class) instance.__init__(*args, **kwargs) return instance
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/devscripts/bash-completion.py
devscripts/bash-completion.py
#!/usr/bin/env python3 # Allow direct execution import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import yt_dlp BASH_COMPLETION_FILE = 'completions/bash/yt-dlp' BASH_COMPLETION_TEMPLATE = 'devscripts/bash-completion.in' def build_completion(opt_parser): opts_flag = [] for group in opt_parser.option_groups: for option in group.option_list: # for every long flag opts_flag.append(option.get_opt_string()) with open(BASH_COMPLETION_TEMPLATE) as f: template = f.read() with open(BASH_COMPLETION_FILE, 'w') as f: # just using the special char filled_template = template.replace('{{flags}}', ' '.join(opts_flag)) f.write(filled_template) parser = yt_dlp.parseOpts(ignore_config_files=True)[0] build_completion(parser)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/devscripts/zsh-completion.py
devscripts/zsh-completion.py
#!/usr/bin/env python3 # Allow direct execution import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import yt_dlp ZSH_COMPLETION_FILE = 'completions/zsh/_yt-dlp' ZSH_COMPLETION_TEMPLATE = 'devscripts/zsh-completion.in' def build_completion(opt_parser): opts = [opt for group in opt_parser.option_groups for opt in group.option_list] opts_file = [opt for opt in opts if opt.metavar == 'FILE'] opts_dir = [opt for opt in opts if opt.metavar == 'DIR'] opts_path = [opt for opt in opts if opt.metavar == 'PATH'] fileopts = [] for opt in opts_file: if opt._short_opts: fileopts.extend(opt._short_opts) if opt._long_opts: fileopts.extend(opt._long_opts) for opt in opts_path: if opt._short_opts: fileopts.extend(opt._short_opts) if opt._long_opts: fileopts.extend(opt._long_opts) diropts = [] for opt in opts_dir: if opt._short_opts: diropts.extend(opt._short_opts) if opt._long_opts: diropts.extend(opt._long_opts) flags = [opt.get_opt_string() for opt in opts] with open(ZSH_COMPLETION_TEMPLATE) as f: template = f.read() template = template.replace('{{fileopts}}', '|'.join(fileopts)) template = template.replace('{{diropts}}', '|'.join(diropts)) template = template.replace('{{flags}}', ' '.join(flags)) with open(ZSH_COMPLETION_FILE, 'w') as f: f.write(template) parser = yt_dlp.parseOpts(ignore_config_files=True)[0] build_completion(parser)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/devscripts/make_issue_template.py
devscripts/make_issue_template.py
#!/usr/bin/env python3 # Allow direct execution import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import re from devscripts.utils import get_filename_args, read_file, write_file VERBOSE = ''' - type: checkboxes id: verbose attributes: label: Provide verbose output that clearly demonstrates the problem description: | This is mandatory unless absolutely impossible to provide. If you are unable to provide the output, please explain why. options: - label: Run **your** yt-dlp command with **-vU** flag added (`yt-dlp -vU <your command line>`) required: true - label: "If using API, add `'verbose': True` to `YoutubeDL` params instead" required: false - label: Copy the WHOLE output (starting with `[debug] Command-line config`) and insert it below required: true - type: textarea id: log attributes: label: Complete Verbose Output description: | It should start like this: placeholder: | [debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc'] [debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8 [debug] yt-dlp version nightly@... from yt-dlp/yt-dlp-nightly-builds [1a176d874] (win_exe) [debug] Python 3.10.11 (CPython AMD64 64bit) - Windows-10-10.0.20348-SP0 (OpenSSL 1.1.1t 7 Feb 2023) [debug] exe versions: ffmpeg 7.0.2 (setts), ffprobe 7.0.2 [debug] Optional libraries: Cryptodome-3.21.0, brotli-1.1.0, certifi-2024.08.30, curl_cffi-0.5.10, mutagen-1.47.0, requests-2.32.3, sqlite3-3.40.1, urllib3-2.2.3, websockets-13.1 [debug] Proxy map: {} [debug] Request Handlers: urllib, requests, websockets, curl_cffi [debug] Loaded 1838 extractors [debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest Latest version: nightly@... from yt-dlp/yt-dlp-nightly-builds yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds) [youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc <more lines> render: shell validations: required: true '''.strip() NO_SKIP = ''' - type: markdown attributes: value: | > [!IMPORTANT] > Not providing the required (*) information or removing the template will result in your issue being closed and ignored. '''.strip() def main(): fields = { 'no_skip': NO_SKIP, 'verbose': VERBOSE, 'verbose_optional': re.sub(r'(\n\s+validations:)?\n\s+required: true', '', VERBOSE), } infile, outfile = get_filename_args(has_infile=True) write_file(outfile, read_file(infile) % fields) if __name__ == '__main__': main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/devscripts/fish-completion.py
devscripts/fish-completion.py
#!/usr/bin/env python3 # Allow direct execution import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import optparse import yt_dlp from yt_dlp.utils import shell_quote FISH_COMPLETION_FILE = 'completions/fish/yt-dlp.fish' FISH_COMPLETION_TEMPLATE = 'devscripts/fish-completion.in' EXTRA_ARGS = { 'remux-video': ['--arguments', 'mp4 mkv', '--exclusive'], 'recode-video': ['--arguments', 'mp4 flv ogg webm mkv', '--exclusive'], # Options that need a file parameter 'download-archive': ['--require-parameter'], 'cookies': ['--require-parameter'], 'load-info': ['--require-parameter'], 'batch-file': ['--require-parameter'], } def build_completion(opt_parser): commands = [] for group in opt_parser.option_groups: for option in group.option_list: long_option = option.get_opt_string().strip('-') complete_cmd = ['complete', '--command', 'yt-dlp', '--long-option', long_option] if option._short_opts: complete_cmd += ['--short-option', option._short_opts[0].strip('-')] if option.help != optparse.SUPPRESS_HELP: complete_cmd += ['--description', option.help] complete_cmd.extend(EXTRA_ARGS.get(long_option, [])) commands.append(shell_quote(complete_cmd)) with open(FISH_COMPLETION_TEMPLATE) as f: template = f.read() filled_template = template.replace('{{commands}}', '\n'.join(commands)) with open(FISH_COMPLETION_FILE, 'w') as f: f.write(filled_template) parser = yt_dlp.parseOpts(ignore_config_files=True)[0] build_completion(parser)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/devscripts/generate_third_party_licenses.py
devscripts/generate_third_party_licenses.py
import requests from dataclasses import dataclass from pathlib import Path import hashlib DEFAULT_OUTPUT = 'THIRD_PARTY_LICENSES.txt' CACHE_LOCATION = '.license_cache' HEADER = '''THIRD-PARTY LICENSES This file aggregates license texts of third-party components included with the yt-dlp PyInstaller-bundled executables. yt-dlp itself is licensed under the Unlicense (see LICENSE file). Source code for bundled third-party components is available from the original projects. If you cannot obtain it, the maintainers will provide it as per license obligation; maintainer emails are listed in pyproject.toml.''' @dataclass(frozen=True) class Dependency: name: str license_url: str project_url: str = '' license: str = '' comment: str = '' DEPENDENCIES: list[Dependency] = [ # Core runtime environment components Dependency( name='Python', license='PSF-2.0', license_url='https://raw.githubusercontent.com/python/cpython/refs/heads/main/LICENSE', project_url='https://www.python.org/', ), Dependency( name='Microsoft Distributable Code', license_url='https://raw.githubusercontent.com/python/cpython/refs/heads/main/PC/crtlicense.txt', comment='Only included in Windows builds', ), Dependency( name='bzip2', license='bzip2-1.0.6', license_url='https://gitlab.com/federicomenaquintero/bzip2/-/raw/master/COPYING', project_url='https://sourceware.org/bzip2/', ), Dependency( name='libffi', license='MIT', license_url='https://raw.githubusercontent.com/libffi/libffi/refs/heads/master/LICENSE', project_url='https://sourceware.org/libffi/', ), Dependency( name='OpenSSL 3.0+', license='Apache-2.0', license_url='https://raw.githubusercontent.com/openssl/openssl/refs/heads/master/LICENSE.txt', project_url='https://www.openssl.org/', ), Dependency( name='SQLite', license='Public Domain', # Technically does not need to be included license_url='https://sqlite.org/src/raw/e108e1e69ae8e8a59e93c455654b8ac9356a11720d3345df2a4743e9590fb20d?at=LICENSE.md', project_url='https://www.sqlite.org/', ), Dependency( name='liblzma', license='0BSD', # Technically does not need to be included license_url='https://raw.githubusercontent.com/tukaani-project/xz/refs/heads/master/COPYING', project_url='https://tukaani.org/xz/', ), Dependency( name='mpdecimal', license='BSD-2-Clause', # No official repo URL license_url='https://gist.githubusercontent.com/seproDev/9e5dbfc08af35c3f2463e64eb9b27161/raw/61f5a98bc1a4ad7d48b1c793fc3314d4d43c2ab1/mpdecimal_COPYRIGHT.txt', project_url='https://www.bytereef.org/mpdecimal/', ), Dependency( name='zlib', license='zlib', license_url='https://raw.githubusercontent.com/madler/zlib/refs/heads/develop/LICENSE', project_url='https://zlib.net/', ), Dependency( name='Expat', license='MIT', license_url='https://raw.githubusercontent.com/libexpat/libexpat/refs/heads/master/COPYING', project_url='https://libexpat.github.io/', ), Dependency( name='ncurses', license='X11-distribute-modifications-variant', license_url='https://raw.githubusercontent.com/mirror/ncurses/refs/heads/master/COPYING', comment='Only included in Linux/macOS builds', project_url='https://invisible-island.net/ncurses/', ), Dependency( name='GNU Readline', license='GPL-3.0-or-later', license_url='https://tiswww.case.edu/php/chet/readline/COPYING', comment='Only included in Linux builds', project_url='https://www.gnu.org/software/readline/', ), Dependency( name='libstdc++', license='GPL-3.0-with-GCC-exception', license_url='https://raw.githubusercontent.com/gcc-mirror/gcc/refs/heads/master/COPYING.RUNTIME', comment='Only included in Linux builds', project_url='https://gcc.gnu.org/onlinedocs/libstdc++/', ), Dependency( name='libgcc', license='GPL-3.0-with-GCC-exception', license_url='https://raw.githubusercontent.com/gcc-mirror/gcc/refs/heads/master/COPYING.RUNTIME', comment='Only included in Linux builds', project_url='https://gcc.gnu.org/', ), Dependency( name='libuuid', license='BSD-3-Clause', license_url='https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/plain/lib/uuid/COPYING', comment='Only included in Linux builds', project_url='https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/tree/lib/uuid', ), Dependency( name='libintl', license='LGPL-2.1-or-later', license_url='https://raw.githubusercontent.com/autotools-mirror/gettext/refs/heads/master/gettext-runtime/intl/COPYING.LIB', comment='Only included in macOS builds', project_url='https://www.gnu.org/software/gettext/', ), Dependency( name='libidn2', license='LGPL-3.0-or-later', license_url='https://gitlab.com/libidn/libidn2/-/raw/master/COPYING.LESSERv3', comment='Only included in macOS builds', project_url='https://www.gnu.org/software/libidn/', ), Dependency( name='libidn2 (Unicode character data files)', license='Unicode-TOU AND Unicode-DFS-2016', license_url='https://gitlab.com/libidn/libidn2/-/raw/master/COPYING.unicode', comment='Only included in macOS builds', project_url='https://www.gnu.org/software/libidn/', ), Dependency( name='libunistring', license='LGPL-3.0-or-later', license_url='https://gitweb.git.savannah.gnu.org/gitweb/?p=libunistring.git;a=blob_plain;f=COPYING.LIB;hb=HEAD', comment='Only included in macOS builds', project_url='https://www.gnu.org/software/libunistring/', ), Dependency( name='librtmp', license='LGPL-2.1-or-later', # No official repo URL license_url='https://gist.githubusercontent.com/seproDev/31d8c691ccddebe37b8b379307cb232d/raw/053408e98547ea8c7d9ba3a80c965f33e163b881/librtmp_COPYING.txt', comment='Only included in macOS builds', project_url='https://rtmpdump.mplayerhq.hu/', ), Dependency( name='zstd', license='BSD-3-Clause', license_url='https://raw.githubusercontent.com/facebook/zstd/refs/heads/dev/LICENSE', comment='Only included in macOS builds', project_url='https://facebook.github.io/zstd/', ), # Python packages Dependency( name='brotli', license='MIT', license_url='https://raw.githubusercontent.com/google/brotli/refs/heads/master/LICENSE', project_url='https://brotli.org/', ), Dependency( name='curl_cffi', license='MIT', license_url='https://raw.githubusercontent.com/lexiforest/curl_cffi/refs/heads/main/LICENSE', comment='Not included in `yt-dlp_x86` and `yt-dlp_musllinux_aarch64` builds', project_url='https://curl-cffi.readthedocs.io/', ), # Dependency of curl_cffi Dependency( name='curl-impersonate', license='MIT', license_url='https://raw.githubusercontent.com/lexiforest/curl-impersonate/refs/heads/main/LICENSE', comment='Not included in `yt-dlp_x86` and `yt-dlp_musllinux_aarch64` builds', project_url='https://github.com/lexiforest/curl-impersonate', ), Dependency( name='cffi', license='MIT-0', # Technically does not need to be included license_url='https://raw.githubusercontent.com/python-cffi/cffi/refs/heads/main/LICENSE', project_url='https://cffi.readthedocs.io/', ), # Dependecy of cffi Dependency( name='pycparser', license='BSD-3-Clause', license_url='https://raw.githubusercontent.com/eliben/pycparser/refs/heads/main/LICENSE', project_url='https://github.com/eliben/pycparser', ), Dependency( name='mutagen', license='GPL-2.0-or-later', license_url='https://raw.githubusercontent.com/quodlibet/mutagen/refs/heads/main/COPYING', project_url='https://mutagen.readthedocs.io/', ), Dependency( name='PyCryptodome', license='Public Domain and BSD-2-Clause', license_url='https://raw.githubusercontent.com/Legrandin/pycryptodome/refs/heads/master/LICENSE.rst', project_url='https://www.pycryptodome.org/', ), Dependency( name='certifi', license='MPL-2.0', license_url='https://raw.githubusercontent.com/certifi/python-certifi/refs/heads/master/LICENSE', project_url='https://github.com/certifi/python-certifi', ), Dependency( name='requests', license='Apache-2.0', license_url='https://raw.githubusercontent.com/psf/requests/refs/heads/main/LICENSE', project_url='https://requests.readthedocs.io/', ), # Dependency of requests Dependency( name='charset-normalizer', license='MIT', license_url='https://raw.githubusercontent.com/jawah/charset_normalizer/refs/heads/master/LICENSE', project_url='https://charset-normalizer.readthedocs.io/', ), # Dependency of requests Dependency( name='idna', license='BSD-3-Clause', license_url='https://raw.githubusercontent.com/kjd/idna/refs/heads/master/LICENSE.md', project_url='https://github.com/kjd/idna', ), Dependency( name='urllib3', license='MIT', license_url='https://raw.githubusercontent.com/urllib3/urllib3/refs/heads/main/LICENSE.txt', project_url='https://urllib3.readthedocs.io/', ), Dependency( name='SecretStorage', license='BSD-3-Clause', license_url='https://raw.githubusercontent.com/mitya57/secretstorage/refs/heads/master/LICENSE', comment='Only included in Linux builds', project_url='https://secretstorage.readthedocs.io/', ), # Dependency of SecretStorage Dependency( name='cryptography', license='Apache-2.0', # Also available as BSD-3-Clause license_url='https://raw.githubusercontent.com/pyca/cryptography/refs/heads/main/LICENSE.APACHE', comment='Only included in Linux builds', project_url='https://cryptography.io/', ), # Dependency of SecretStorage Dependency( name='Jeepney', license='MIT', license_url='https://gitlab.com/takluyver/jeepney/-/raw/master/LICENSE', comment='Only included in Linux builds', project_url='https://jeepney.readthedocs.io/', ), Dependency( name='websockets', license='BSD-3-Clause', license_url='https://raw.githubusercontent.com/python-websockets/websockets/refs/heads/main/LICENSE', project_url='https://websockets.readthedocs.io/', ), # Dependencies of yt-dlp-ejs Dependency( name='Meriyah', license='ISC', license_url='https://raw.githubusercontent.com/meriyah/meriyah/refs/heads/main/LICENSE.md', project_url='https://github.com/meriyah/meriyah', ), Dependency( name='Astring', license='MIT', license_url='https://raw.githubusercontent.com/davidbonnet/astring/refs/heads/main/LICENSE', project_url='https://github.com/davidbonnet/astring/', ), ] def fetch_text(dep: Dependency) -> str: cache_dir = Path(CACHE_LOCATION) cache_dir.mkdir(exist_ok=True) url_hash = hashlib.sha256(dep.license_url.encode('utf-8')).hexdigest() cache_file = cache_dir / f'{url_hash}.txt' if cache_file.exists(): return cache_file.read_text() # UA needed since some domains block requests default UA req = requests.get(dep.license_url, headers={'User-Agent': 'yt-dlp license fetcher'}) req.raise_for_status() text = req.text cache_file.write_text(text) return text def build_output() -> str: lines = [HEADER] for d in DEPENDENCIES: lines.append('\n') lines.append('-' * 80) header = f'{d.name}' if d.license: header += f' | {d.license}' if d.comment: header += f'\nNote: {d.comment}' if d.project_url: header += f'\nURL: {d.project_url}' lines.append(header) lines.append('-' * 80) text = fetch_text(d) lines.append(text.strip('\n') + '\n') return '\n'.join(lines) if __name__ == '__main__': content = build_output() Path(DEFAULT_OUTPUT).write_text(content)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/devscripts/generate_aes_testdata.py
devscripts/generate_aes_testdata.py
#!/usr/bin/env python3 # Allow direct execution import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import codecs import subprocess from yt_dlp.aes import aes_encrypt, key_expansion secret_msg = b'Secret message goes here' def hex_str(int_list): return codecs.encode(bytes(int_list), 'hex') def openssl_encode(algo, key, iv): cmd = ['openssl', 'enc', '-e', '-' + algo, '-K', hex_str(key), '-iv', hex_str(iv)] prog = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE) out, _ = prog.communicate(secret_msg) return out iv = key = [0x20, 0x15] + 14 * [0] r = openssl_encode('aes-128-cbc', key, iv) print('aes_cbc_decrypt') print(repr(r)) password = key new_key = aes_encrypt(password, key_expansion(password)) r = openssl_encode('aes-128-ctr', new_key, iv) print('aes_decrypt_text 16') print(repr(r)) password = key + 16 * [0] new_key = aes_encrypt(password, key_expansion(password)) * (32 // 16) r = openssl_encode('aes-256-ctr', new_key, iv) print('aes_decrypt_text 32') print(repr(r))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/devscripts/check-porn.py
devscripts/check-porn.py
#!/usr/bin/env python3 """ This script employs a VERY basic heuristic ('porn' in webpage.lower()) to check if we are not 'age_limit' tagging some porn site A second approach implemented relies on a list of porn domains, to activate it pass the list filename as the only argument """ # Allow direct execution import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import urllib.parse import urllib.request from test.helper import gettestcases if len(sys.argv) > 1: METHOD = 'LIST' LIST = open(sys.argv[1]).read().decode('utf8').strip() else: METHOD = 'EURISTIC' for test in gettestcases(): if METHOD == 'EURISTIC': try: webpage = urllib.request.urlopen(test['url'], timeout=10).read() except Exception: print('\nFail: {}'.format(test['name'])) continue webpage = webpage.decode('utf8', 'replace') RESULT = 'porn' in webpage.lower() elif METHOD == 'LIST': domain = urllib.parse.urlparse(test['url']).netloc if not domain: print('\nFail: {}'.format(test['name'])) continue domain = '.'.join(domain.split('.')[-2:]) RESULT = ('.' + domain + '\n' in LIST or '\n' + domain + '\n' in LIST) if RESULT and ('info_dict' not in test or 'age_limit' not in test['info_dict'] or test['info_dict']['age_limit'] != 18): print('\nPotential missing age_limit check: {}'.format(test['name'])) elif not RESULT and ('info_dict' in test and 'age_limit' in test['info_dict'] and test['info_dict']['age_limit'] == 18): print('\nPotential false negative: {}'.format(test['name'])) else: sys.stdout.write('.') sys.stdout.flush() print()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/devscripts/setup_variables.py
devscripts/setup_variables.py
# Allow direct execution import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import datetime as dt import json from devscripts.utils import calculate_version STABLE_REPOSITORY = 'yt-dlp/yt-dlp' def setup_variables(environment): """ `environment` must contain these keys: REPOSITORY, INPUTS, PROCESSED, PUSH_VERSION_COMMIT, PYPI_PROJECT, SOURCE_PYPI_PROJECT, SOURCE_PYPI_SUFFIX, TARGET_PYPI_PROJECT, TARGET_PYPI_SUFFIX, SOURCE_ARCHIVE_REPO, TARGET_ARCHIVE_REPO, HAS_ARCHIVE_REPO_TOKEN `INPUTS` must contain these keys: prerelease `PROCESSED` must contain these keys: source_repo, source_tag, target_repo, target_tag """ REPOSITORY = environment['REPOSITORY'] INPUTS = json.loads(environment['INPUTS']) PROCESSED = json.loads(environment['PROCESSED']) source_channel = None pypi_project = None pypi_suffix = None source_repo = PROCESSED['source_repo'] source_tag = PROCESSED['source_tag'] if source_repo == 'stable': source_repo = STABLE_REPOSITORY if not source_repo: source_repo = REPOSITORY elif environment['SOURCE_ARCHIVE_REPO']: source_channel = environment['SOURCE_ARCHIVE_REPO'] elif not source_tag and '/' not in source_repo: source_tag = source_repo source_repo = REPOSITORY resolved_source = source_repo if source_tag: resolved_source = f'{resolved_source}@{source_tag}' elif source_repo == STABLE_REPOSITORY: resolved_source = 'stable' revision = None if INPUTS['prerelease'] or not environment['PUSH_VERSION_COMMIT']: revision = dt.datetime.now(tz=dt.timezone.utc).strftime('%H%M%S') version = calculate_version(INPUTS.get('version') or revision) target_repo = PROCESSED['target_repo'] target_tag = PROCESSED['target_tag'] if target_repo: if target_repo == 'stable': target_repo = STABLE_REPOSITORY if not target_tag: if target_repo == STABLE_REPOSITORY: target_tag = version elif environment['TARGET_ARCHIVE_REPO']: target_tag = source_tag or version else: target_tag = target_repo target_repo = REPOSITORY if target_repo != REPOSITORY: target_repo = environment['TARGET_ARCHIVE_REPO'] pypi_project = environment['TARGET_PYPI_PROJECT'] or None pypi_suffix = environment['TARGET_PYPI_SUFFIX'] or None else: target_tag = source_tag or version if source_channel: target_repo = source_channel pypi_project = environment['SOURCE_PYPI_PROJECT'] or None pypi_suffix = environment['SOURCE_PYPI_SUFFIX'] or None else: target_repo = REPOSITORY if target_repo != REPOSITORY and not json.loads(environment['HAS_ARCHIVE_REPO_TOKEN']): return None if target_repo == REPOSITORY and not INPUTS['prerelease']: pypi_project = environment['PYPI_PROJECT'] or None return { 'channel': resolved_source, 'version': version, 'target_repo': target_repo, 'target_tag': target_tag, 'pypi_project': pypi_project, 'pypi_suffix': pypi_suffix, } def process_inputs(inputs): outputs = {} for key in ('source', 'target'): repo, _, tag = inputs.get(key, '').partition('@') outputs[f'{key}_repo'] = repo outputs[f'{key}_tag'] = tag return outputs if __name__ == '__main__': if not os.getenv('GITHUB_OUTPUT'): print('This script is only intended for use with GitHub Actions', file=sys.stderr) sys.exit(1) if 'process_inputs' in sys.argv: inputs = json.loads(os.environ['INPUTS']) print('::group::Inputs') print(json.dumps(inputs, indent=2)) print('::endgroup::') outputs = process_inputs(inputs) print('::group::Processed') print(json.dumps(outputs, indent=2)) print('::endgroup::') with open(os.environ['GITHUB_OUTPUT'], 'a') as f: f.write('\n'.join(f'{key}={value}' for key, value in outputs.items())) sys.exit(0) outputs = setup_variables(dict(os.environ)) if not outputs: print('::error::Repository access secret ARCHIVE_REPO_TOKEN not found') sys.exit(1) print('::group::Output variables') print(json.dumps(outputs, indent=2)) print('::endgroup::') with open(os.environ['GITHUB_OUTPUT'], 'a') as f: f.write('\n'.join(f'{key}={value or ""}' for key, value in outputs.items()))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/devscripts/utils.py
devscripts/utils.py
import argparse import datetime as dt import functools import re import subprocess def read_file(fname): with open(fname, encoding='utf-8') as f: return f.read() def write_file(fname, content, mode='w'): with open(fname, mode, encoding='utf-8') as f: return f.write(content) def read_version(fname='yt_dlp/version.py', varname='__version__'): """Get the version without importing the package""" items = {} exec(compile(read_file(fname), fname, 'exec'), items) return items[varname] def calculate_version(version=None, fname='yt_dlp/version.py'): if version and '.' in version: return version revision = version version = dt.datetime.now(dt.timezone.utc).strftime('%Y.%m.%d') if revision: assert re.fullmatch(r'[0-9]+', revision), 'Revision must be numeric' else: old_version = read_version(fname=fname).split('.') if version.split('.') == old_version[:3]: revision = str(int(([*old_version, 0])[3]) + 1) return f'{version}.{revision}' if revision else version def get_filename_args(has_infile=False, default_outfile=None): parser = argparse.ArgumentParser() if has_infile: parser.add_argument('infile', help='Input file') kwargs = {'nargs': '?', 'default': default_outfile} if default_outfile else {} parser.add_argument('outfile', **kwargs, help='Output file') opts = parser.parse_args() if has_infile: return opts.infile, opts.outfile return opts.outfile def compose_functions(*functions): return lambda x: functools.reduce(lambda y, f: f(y), functions, x) def run_process(*args, **kwargs): kwargs.setdefault('text', True) kwargs.setdefault('check', True) kwargs.setdefault('capture_output', True) if kwargs['text']: kwargs.setdefault('encoding', 'utf-8') kwargs.setdefault('errors', 'replace') return subprocess.run(args, **kwargs)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/devscripts/update-version.py
devscripts/update-version.py
#!/usr/bin/env python3 # Allow direct execution import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import argparse import contextlib import sys from devscripts.utils import calculate_version, run_process, write_file def get_git_head(): with contextlib.suppress(Exception): return run_process('git', 'rev-parse', 'HEAD').stdout.strip() VERSION_TEMPLATE = '''\ # Autogenerated by devscripts/update-version.py __version__ = {version!r} RELEASE_GIT_HEAD = {git_head!r} VARIANT = None UPDATE_HINT = None CHANNEL = {channel!r} ORIGIN = {origin!r} _pkg_version = {package_version!r} ''' if __name__ == '__main__': parser = argparse.ArgumentParser(description='Update the version.py file') parser.add_argument( '-c', '--channel', default='stable', help='Select update channel (default: %(default)s)') parser.add_argument( '-r', '--origin', default='local', help='Select origin/repository (default: %(default)s)') parser.add_argument( '-s', '--suffix', default='', help='Add an alphanumeric suffix to the package version, e.g. "dev"') parser.add_argument( '-o', '--output', default='yt_dlp/version.py', help='The output file to write to (default: %(default)s)') parser.add_argument( 'version', nargs='?', default=None, help='A version or revision to use instead of generating one') args = parser.parse_args() git_head = get_git_head() version = calculate_version(args.version) write_file(args.output, VERSION_TEMPLATE.format( version=version, git_head=git_head, channel=args.channel, origin=args.origin, package_version=f'{version}{args.suffix}')) print(f'version={version} ({args.channel}), head={git_head}')
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/devscripts/update_changelog.py
devscripts/update_changelog.py
#!/usr/bin/env python3 # Allow direct execution import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from pathlib import Path from devscripts.make_changelog import create_changelog, create_parser from devscripts.utils import read_file, read_version, write_file # Always run after devscripts/update-version.py, and run before `make doc|pypi-files|tar|all` if __name__ == '__main__': parser = create_parser() parser.description = 'Update an existing changelog file with an entry for a new release' parser.add_argument( '--changelog-path', type=Path, default=Path(__file__).parent.parent / 'Changelog.md', help='path to the Changelog file') args = parser.parse_args() header, sep, changelog = read_file(args.changelog_path).partition('\n### ') current_version = read_version() if current_version != changelog.splitlines()[0]: new_entry = create_changelog(args) write_file(args.changelog_path, f'{header}{sep}{current_version}\n{new_entry}\n{sep}{changelog}')
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/devscripts/run_tests.py
devscripts/run_tests.py
#!/usr/bin/env python3 import argparse import functools import os import re import shlex import subprocess import sys from pathlib import Path fix_test_name = functools.partial(re.compile(r'IE(_all|_\d+)?$').sub, r'\1') def parse_args(): parser = argparse.ArgumentParser(description='Run selected yt-dlp tests') parser.add_argument( 'test', help='an extractor test, test path, or one of "core" or "download"', nargs='*') parser.add_argument( '--flaky', action='store_true', default=None, help='Allow running flaky tests. (default: run, unless in CI)', ) parser.add_argument( '--no-flaky', action='store_false', dest='flaky', help=argparse.SUPPRESS, ) parser.add_argument( '-k', help='run a test matching EXPRESSION. Same as "pytest -k"', metavar='EXPRESSION') parser.add_argument( '--pytest-args', help='arguments to passthrough to pytest') return parser.parse_args() def run_tests(*tests, pattern=None, ci=False, flaky: bool | None = None): # XXX: hatch uses `tests` if no arguments are passed run_core = 'core' in tests or 'tests' in tests or (not pattern and not tests) run_download = 'download' in tests run_flaky = flaky or (flaky is None and not ci) pytest_args = args.pytest_args or os.getenv('HATCH_TEST_ARGS', '') arguments = ['pytest', '-Werror', '--tb=short', *shlex.split(pytest_args)] if ci: arguments.append('--color=yes') if pattern: arguments.extend(['-k', pattern]) if run_core: arguments.extend(['-m', 'not download']) elif run_download: arguments.extend(['-m', 'download']) else: arguments.extend( test if '/' in test else f'test/test_download.py::TestDownload::test_{fix_test_name(test)}' for test in tests) if not run_flaky: arguments.append('--disallow-flaky') print(f'Running {arguments}', flush=True) try: return subprocess.call(arguments) except FileNotFoundError: pass arguments = [sys.executable, '-Werror', '-m', 'unittest'] if pattern: arguments.extend(['-k', pattern]) if run_core: print('"pytest" needs to be installed to run core tests', file=sys.stderr, flush=True) return 1 elif run_download: arguments.append('test.test_download') else: arguments.extend( f'test.test_download.TestDownload.test_{test}' for test in tests) print(f'Running {arguments}', flush=True) return subprocess.call(arguments) if __name__ == '__main__': try: args = parse_args() os.chdir(Path(__file__).parent.parent) sys.exit(run_tests( *args.test, pattern=args.k, ci=bool(os.getenv('CI')), flaky=args.flaky, )) except KeyboardInterrupt: pass
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/devscripts/install_deps.py
devscripts/install_deps.py
#!/usr/bin/env python3 # Allow execution from anywhere import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import argparse import re import subprocess from pathlib import Path from devscripts.tomlparse import parse_toml from devscripts.utils import read_file def parse_args(): parser = argparse.ArgumentParser(description='Install dependencies for yt-dlp') parser.add_argument( 'input', nargs='?', metavar='TOMLFILE', default=Path(__file__).parent.parent / 'pyproject.toml', help='input file (default: %(default)s)') parser.add_argument( '-e', '--exclude-dependency', metavar='DEPENDENCY', action='append', help='exclude a dependency (can be used multiple times)') parser.add_argument( '-i', '--include-extra', metavar='EXTRA', action='append', help='include an extra/optional-dependencies list (can be used multiple times)') parser.add_argument( '-c', '--cherry-pick', metavar='DEPENDENCY', action='append', help=( 'only include a specific dependency from the resulting dependency list ' '(can be used multiple times)')) parser.add_argument( '-o', '--omit-default', action='store_true', help='omit the "default" extra unless it is explicitly included (it is included by default)') parser.add_argument( '-p', '--print', action='store_true', help='only print requirements to stdout') parser.add_argument( '-u', '--user', action='store_true', help='install with pip as --user') return parser.parse_args() def uniq(arg) -> dict[str, None]: return dict.fromkeys(map(str.lower, arg or ())) def main(): args = parse_args() project_table = parse_toml(read_file(args.input))['project'] recursive_pattern = re.compile(rf'{project_table["name"]}\[(?P<extra_name>[\w-]+)\]') extras = project_table['optional-dependencies'] excludes = uniq(args.exclude_dependency) only_includes = uniq(args.cherry_pick) include_extras = uniq(args.include_extra) def yield_deps(extra): for dep in extra: if mobj := recursive_pattern.fullmatch(dep): yield from extras.get(mobj.group('extra_name'), ()) else: yield dep targets = {} if not args.omit_default: # legacy: 'dependencies' is empty now targets.update(dict.fromkeys(project_table['dependencies'])) targets.update(dict.fromkeys(yield_deps(extras['default']))) for include in filter(None, map(extras.get, include_extras)): targets.update(dict.fromkeys(yield_deps(include))) def target_filter(target): name = re.match(r'[\w-]+', target).group(0).lower() return name not in excludes and (not only_includes or name in only_includes) targets = list(filter(target_filter, targets)) if args.print: for target in targets: print(target) return pip_args = [sys.executable, '-m', 'pip', 'install', '-U'] if args.user: pip_args.append('--user') pip_args.extend(targets) return subprocess.call(pip_args) if __name__ == '__main__': sys.exit(main())
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/devscripts/__init__.py
devscripts/__init__.py
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/devscripts/make_changelog.py
devscripts/make_changelog.py
from __future__ import annotations # Allow direct execution import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import enum import itertools import json import logging import re from collections import defaultdict from dataclasses import dataclass from functools import lru_cache from pathlib import Path from devscripts.utils import read_file, run_process, write_file BASE_URL = 'https://github.com' LOCATION_PATH = Path(__file__).parent HASH_LENGTH = 7 logger = logging.getLogger(__name__) class CommitGroup(enum.Enum): PRIORITY = 'Important' CORE = 'Core' EXTRACTOR = 'Extractor' DOWNLOADER = 'Downloader' POSTPROCESSOR = 'Postprocessor' NETWORKING = 'Networking' MISC = 'Misc.' @classmethod @lru_cache def subgroup_lookup(cls): return { name: group for group, names in { cls.MISC: { 'build', 'ci', 'cleanup', 'devscripts', 'docs', 'test', }, cls.NETWORKING: { 'rh', }, }.items() for name in names } @classmethod @lru_cache def group_lookup(cls): result = { 'fd': cls.DOWNLOADER, 'ie': cls.EXTRACTOR, 'pp': cls.POSTPROCESSOR, 'upstream': cls.CORE, } result.update({item.name.lower(): item for item in iter(cls)}) return result @classmethod def get(cls, value: str) -> tuple[CommitGroup | None, str | None]: group, _, subgroup = (group.strip().lower() for group in value.partition('/')) if result := cls.group_lookup().get(group): return result, subgroup or None if subgroup: return None, value return cls.subgroup_lookup().get(group), group or None @dataclass class Commit: hash: str | None short: str authors: list[str] def __str__(self): result = f'{self.short!r}' if self.hash: result += f' ({self.hash[:HASH_LENGTH]})' if self.authors: authors = ', '.join(self.authors) result += f' by {authors}' return result @dataclass class CommitInfo: details: str | None sub_details: tuple[str, ...] message: str issues: list[str] commit: Commit fixes: list[Commit] def key(self): return ((self.details or '').lower(), self.sub_details, self.message) def unique(items): return sorted({item.strip().lower(): item for item in items if item}.values()) class Changelog: MISC_RE = re.compile(r'(?:^|\b)(?:lint(?:ing)?|misc|format(?:ting)?|fixes)(?:\b|$)', re.IGNORECASE) ALWAYS_SHOWN = (CommitGroup.PRIORITY,) def __init__(self, groups, repo, collapsible=False): self._groups = groups self._repo = repo self._collapsible = collapsible def __str__(self): return '\n'.join(self._format_groups(self._groups)).replace('\t', ' ') def _format_groups(self, groups): first = True for item in CommitGroup: if self._collapsible and item not in self.ALWAYS_SHOWN and first: first = False yield '\n<details><summary><h3>Changelog</h3></summary>\n' if group := groups[item]: yield self.format_module(item.value, group) if self._collapsible: yield '\n</details>' def format_module(self, name, group): result = f'\n#### {name} changes\n' if name else '\n' return result + '\n'.join(self._format_group(group)) def _format_group(self, group): sorted_group = sorted(group, key=CommitInfo.key) detail_groups = itertools.groupby(sorted_group, lambda item: (item.details or '').lower()) for _, items in detail_groups: items = list(items) details = items[0].details if details == 'cleanup': items = self._prepare_cleanup_misc_items(items) prefix = '-' if details: if len(items) == 1: prefix = f'- **{details}**:' else: yield f'- **{details}**' prefix = '\t-' sub_detail_groups = itertools.groupby(items, lambda item: tuple(map(str.lower, item.sub_details))) for sub_details, entries in sub_detail_groups: if not sub_details: for entry in entries: yield f'{prefix} {self.format_single_change(entry)}' continue entries = list(entries) sub_prefix = f'{prefix} {", ".join(entries[0].sub_details)}' if len(entries) == 1: yield f'{sub_prefix}: {self.format_single_change(entries[0])}' continue yield sub_prefix for entry in entries: yield f'\t{prefix} {self.format_single_change(entry)}' def _prepare_cleanup_misc_items(self, items): cleanup_misc_items = defaultdict(list) sorted_items = [] for item in items: if self.MISC_RE.search(item.message): cleanup_misc_items[tuple(item.commit.authors)].append(item) else: sorted_items.append(item) for commit_infos in cleanup_misc_items.values(): sorted_items.append(CommitInfo( 'cleanup', ('Miscellaneous',), ', '.join( self._format_message_link(None, info.commit.hash) for info in sorted(commit_infos, key=lambda item: item.commit.hash or '')), [], Commit(None, '', commit_infos[0].commit.authors), [])) return sorted_items def format_single_change(self, info: CommitInfo): message, sep, rest = info.message.partition('\n') if '[' not in message: # If the message doesn't already contain markdown links, try to add a link to the commit message = self._format_message_link(message, info.commit.hash) if info.issues: message = f'{message} ({self._format_issues(info.issues)})' if info.commit.authors: message = f'{message} by {self._format_authors(info.commit.authors)}' if info.fixes: fix_message = ', '.join(f'{self._format_message_link(None, fix.hash)}' for fix in info.fixes) authors = sorted({author for fix in info.fixes for author in fix.authors}, key=str.casefold) if authors != info.commit.authors: fix_message = f'{fix_message} by {self._format_authors(authors)}' message = f'{message} (With fixes in {fix_message})' return message if not sep else f'{message}{sep}{rest}' def _format_message_link(self, message, commit_hash): assert message or commit_hash, 'Improperly defined commit message or override' message = message if message else commit_hash[:HASH_LENGTH] return f'[{message}]({self.repo_url}/commit/{commit_hash})' if commit_hash else message def _format_issues(self, issues): return ', '.join(f'[#{issue}]({self.repo_url}/issues/{issue})' for issue in issues) @staticmethod def _format_authors(authors): return ', '.join(f'[{author}]({BASE_URL}/{author})' for author in authors) @property def repo_url(self): return f'{BASE_URL}/{self._repo}' class CommitRange: COMMAND = 'git' COMMIT_SEPARATOR = '-----' AUTHOR_INDICATOR_RE = re.compile(r'Authored by:? ', re.IGNORECASE) MESSAGE_RE = re.compile(r''' (?:\[(?P<prefix>[^\]]+)\]\ )? (?:(?P<sub_details>`?[\w.-]+`?): )? (?P<message>.+?) (?:\ \((?P<issues>\#\d+(?:,\ \#\d+)*)\))? ''', re.VERBOSE | re.DOTALL) EXTRACTOR_INDICATOR_RE = re.compile(r'(?:Fix|Add)\s+Extractors?', re.IGNORECASE) REVERT_RE = re.compile(r'(?:\[[^\]]+\]\s+)?(?i:Revert)\s+([\da-f]{40})') FIXES_RE = re.compile(r''' (?i: (?:bug\s*)?fix(?:es)?(?: \s+(?:bugs?|regression(?:\s+introduced)?) )?(?:\s+(?:in|for|from|by))? |Improve )\s+([\da-f]{40})''', re.VERBOSE) UPSTREAM_MERGE_RE = re.compile(r'Update to ytdl-commit-([\da-f]+)') def __init__(self, start, end, default_author=None): self._start, self._end = start, end self._commits, self._fixes = self._get_commits_and_fixes(default_author) self._commits_added = [] def __iter__(self): return iter(itertools.chain(self._commits.values(), self._commits_added)) def __len__(self): return len(self._commits) + len(self._commits_added) def __contains__(self, commit): if isinstance(commit, Commit): if not commit.hash: return False commit = commit.hash return commit in self._commits def _get_commits_and_fixes(self, default_author): result = run_process( self.COMMAND, 'log', f'--format=%H%n%s%n%b%n{self.COMMIT_SEPARATOR}', f'{self._start}..{self._end}' if self._start else self._end).stdout commits, reverts = {}, {} fixes = defaultdict(list) lines = iter(result.splitlines(False)) for i, commit_hash in enumerate(lines): short = next(lines) skip = short.startswith('Release ') or short == '[version] update' fix_commitish = None if match := self.FIXES_RE.search(short): fix_commitish = match.group(1) authors = [default_author] if default_author else [] for line in iter(lambda: next(lines), self.COMMIT_SEPARATOR): if match := self.AUTHOR_INDICATOR_RE.match(line): authors = sorted(map(str.strip, line[match.end():].split(',')), key=str.casefold) if not fix_commitish and (match := self.FIXES_RE.fullmatch(line)): fix_commitish = match.group(1) commit = Commit(commit_hash, short, authors) if skip and (self._start or not i): logger.debug(f'Skipped commit: {commit}') continue elif skip: logger.debug(f'Reached Release commit, breaking: {commit}') break if match := self.REVERT_RE.fullmatch(commit.short): reverts[match.group(1)] = commit continue if fix_commitish: fixes[fix_commitish].append(commit) commits[commit.hash] = commit for commitish, revert_commit in reverts.items(): if reverted := commits.pop(commitish, None): logger.debug(f'{commitish} fully reverted {reverted}') else: commits[revert_commit.hash] = revert_commit for commitish, fix_commits in fixes.items(): if commitish in commits: hashes = ', '.join(commit.hash[:HASH_LENGTH] for commit in fix_commits) logger.info(f'Found fix(es) for {commitish[:HASH_LENGTH]}: {hashes}') for fix_commit in fix_commits: del commits[fix_commit.hash] else: logger.debug(f'Commit with fixes not in changes: {commitish[:HASH_LENGTH]}') return commits, fixes def apply_overrides(self, overrides): for override in overrides: when = override.get('when') if when and when not in self and when != self._start: logger.debug(f'Ignored {when!r} override') continue override_hash = override.get('hash') or when if override['action'] == 'add': commit = Commit(override.get('hash'), override['short'], override.get('authors') or []) logger.info(f'ADD {commit}') self._commits_added.append(commit) elif override['action'] == 'remove': if override_hash in self._commits: logger.info(f'REMOVE {self._commits[override_hash]}') del self._commits[override_hash] elif override['action'] == 'change': if override_hash not in self._commits: continue commit = Commit(override_hash, override['short'], override.get('authors') or []) logger.info(f'CHANGE {self._commits[commit.hash]} -> {commit}') if match := self.FIXES_RE.search(commit.short): fix_commitish = match.group(1) if fix_commitish in self._commits: del self._commits[commit.hash] self._fixes[fix_commitish].append(commit) logger.info(f'Found fix for {fix_commitish[:HASH_LENGTH]}: {commit.hash[:HASH_LENGTH]}') continue self._commits[commit.hash] = commit self._commits = dict(reversed(self._commits.items())) def groups(self): group_dict = defaultdict(list) for commit in self: upstream_re = self.UPSTREAM_MERGE_RE.search(commit.short) if upstream_re: commit.short = f'[upstream] Merged with youtube-dl {upstream_re.group(1)}' match = self.MESSAGE_RE.fullmatch(commit.short) if not match: logger.error(f'Error parsing short commit message: {commit.short!r}') continue prefix, sub_details_alt, message, issues = match.groups() issues = [issue.strip()[1:] for issue in issues.split(',')] if issues else [] if prefix: groups, details, sub_details = zip(*map(self.details_from_prefix, prefix.split(',')), strict=True) group = next(iter(filter(None, groups)), None) details = ', '.join(unique(details)) sub_details = list(itertools.chain.from_iterable(sub_details)) else: group = CommitGroup.CORE details = None sub_details = [] if sub_details_alt: sub_details.append(sub_details_alt) sub_details = tuple(unique(sub_details)) if not group: if self.EXTRACTOR_INDICATOR_RE.search(commit.short): group = CommitGroup.EXTRACTOR logger.error(f'Assuming [ie] group for {commit.short!r}') else: group = CommitGroup.CORE commit_info = CommitInfo( details, sub_details, message.strip(), issues, commit, self._fixes[commit.hash]) logger.debug(f'Resolved {commit.short!r} to {commit_info!r}') group_dict[group].append(commit_info) return group_dict @staticmethod def details_from_prefix(prefix): if not prefix: return CommitGroup.CORE, None, () prefix, *sub_details = prefix.split(':') group, details = CommitGroup.get(prefix) if group is CommitGroup.PRIORITY and details: details = details.partition('/')[2].strip() if details and '/' in details: logger.error(f'Prefix is overnested, using first part: {prefix}') details = details.partition('/')[0].strip() if details == 'common': details = None elif group is CommitGroup.NETWORKING and details == 'rh': details = 'Request Handler' return group, details, sub_details def get_new_contributors(contributors_path, commits): contributors = set() if contributors_path.exists(): for line in read_file(contributors_path).splitlines(): author, _, _ = line.strip().partition(' (') authors = author.split('/') contributors.update(map(str.casefold, authors)) new_contributors = set() for commit in commits: for author in commit.authors: author_folded = author.casefold() if author_folded not in contributors: contributors.add(author_folded) new_contributors.add(author) return sorted(new_contributors, key=str.casefold) def create_changelog(args): logging.basicConfig( datefmt='%Y-%m-%d %H-%M-%S', format='{asctime} | {levelname:<8} | {message}', level=logging.WARNING - 10 * args.verbosity, style='{', stream=sys.stderr) commits = CommitRange(None, args.commitish, args.default_author) if not args.no_override: if args.override_path.exists(): overrides = json.loads(read_file(args.override_path)) commits.apply_overrides(overrides) else: logger.warning(f'File {args.override_path.as_posix()} does not exist') logger.info(f'Loaded {len(commits)} commits') if new_contributors := get_new_contributors(args.contributors_path, commits): if args.contributors: write_file(args.contributors_path, '\n'.join(new_contributors) + '\n', mode='a') logger.info(f'New contributors: {", ".join(new_contributors)}') return Changelog(commits.groups(), args.repo, args.collapsible) def create_parser(): import argparse parser = argparse.ArgumentParser( description='Create a changelog markdown from a git commit range') parser.add_argument( 'commitish', default='HEAD', nargs='?', help='The commitish to create the range from (default: %(default)s)') parser.add_argument( '-v', '--verbosity', action='count', default=0, help='increase verbosity (can be used twice)') parser.add_argument( '-c', '--contributors', action='store_true', help='update CONTRIBUTORS file (default: %(default)s)') parser.add_argument( '--contributors-path', type=Path, default=LOCATION_PATH.parent / 'CONTRIBUTORS', help='path to the CONTRIBUTORS file') parser.add_argument( '--no-override', action='store_true', help='skip override json in commit generation (default: %(default)s)') parser.add_argument( '--override-path', type=Path, default=LOCATION_PATH / 'changelog_override.json', help='path to the changelog_override.json file') parser.add_argument( '--default-author', default='pukkandan', help='the author to use without a author indicator (default: %(default)s)') parser.add_argument( '--repo', default='yt-dlp/yt-dlp', help='the github repository to use for the operations (default: %(default)s)') parser.add_argument( '--collapsible', action='store_true', help='make changelog collapsible (default: %(default)s)') return parser if __name__ == '__main__': print(create_changelog(create_parser().parse_args()))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/devscripts/update_ejs.py
devscripts/update_ejs.py
#!/usr/bin/env python3 from __future__ import annotations import contextlib import io import json import hashlib import pathlib import urllib.request import zipfile TEMPLATE = '''\ # This file is generated by devscripts/update_ejs.py. DO NOT MODIFY! VERSION = {version!r} HASHES = {{ {hash_mapping} }} ''' PREFIX = ' "yt-dlp-ejs==' BASE_PATH = pathlib.Path(__file__).parent.parent PYPROJECT_PATH = BASE_PATH / 'pyproject.toml' PACKAGE_PATH = BASE_PATH / 'yt_dlp/extractor/youtube/jsc/_builtin/vendor' RELEASE_URL = 'https://api.github.com/repos/yt-dlp/ejs/releases/latest' ASSETS = { 'yt.solver.lib.js': False, 'yt.solver.lib.min.js': False, 'yt.solver.deno.lib.js': True, 'yt.solver.bun.lib.js': True, 'yt.solver.core.min.js': False, 'yt.solver.core.js': True, } MAKEFILE_PATH = BASE_PATH / 'Makefile' def request(url: str): return contextlib.closing(urllib.request.urlopen(url)) def makefile_variables( version: str | None = None, name: str | None = None, digest: str | None = None, data: bytes | None = None, keys_only: bool = False, ) -> dict[str, str | None]: assert keys_only or all(arg is not None for arg in (version, name, digest, data)) return { 'EJS_VERSION': None if keys_only else version, 'EJS_WHEEL_NAME': None if keys_only else name, 'EJS_WHEEL_HASH': None if keys_only else digest, 'EJS_PY_FOLDERS': None if keys_only else list_wheel_contents(data, 'py', files=False), 'EJS_PY_FILES': None if keys_only else list_wheel_contents(data, 'py', folders=False), 'EJS_JS_FOLDERS': None if keys_only else list_wheel_contents(data, 'js', files=False), 'EJS_JS_FILES': None if keys_only else list_wheel_contents(data, 'js', folders=False), } def list_wheel_contents( wheel_data: bytes, suffix: str | None = None, folders: bool = True, files: bool = True, ) -> str: assert folders or files, 'at least one of "folders" or "files" must be True' with zipfile.ZipFile(io.BytesIO(wheel_data)) as zipf: path_gen = (zinfo.filename for zinfo in zipf.infolist()) filtered = filter(lambda path: path.startswith('yt_dlp_ejs/'), path_gen) if suffix: filtered = filter(lambda path: path.endswith(f'.{suffix}'), filtered) files_list = list(filtered) if not folders: return ' '.join(files_list) folders_list = list(dict.fromkeys(path.rpartition('/')[0] for path in files_list)) if not files: return ' '.join(folders_list) return ' '.join(folders_list + files_list) def main(): current_version = None with PYPROJECT_PATH.open() as file: for line in file: if not line.startswith(PREFIX): continue current_version, _, _ = line.removeprefix(PREFIX).partition('"') if not current_version: print('yt-dlp-ejs dependency line could not be found') return makefile_info = makefile_variables(keys_only=True) prefixes = tuple(f'{key} = ' for key in makefile_info) with MAKEFILE_PATH.open() as file: for line in file: if not line.startswith(prefixes): continue key, _, val = line.partition(' = ') makefile_info[key] = val.rstrip() with request(RELEASE_URL) as resp: info = json.load(resp) version = info['tag_name'] if version == current_version: print(f'yt-dlp-ejs is up to date! ({version})') return print(f'Updating yt-dlp-ejs from {current_version} to {version}') hashes = [] wheel_info = {} for asset in info['assets']: name = asset['name'] is_wheel = name.startswith('yt_dlp_ejs-') and name.endswith('.whl') if not is_wheel and name not in ASSETS: continue with request(asset['browser_download_url']) as resp: data = resp.read() # verify digest from github digest = asset['digest'] algo, _, expected = digest.partition(':') hexdigest = hashlib.new(algo, data).hexdigest() assert hexdigest == expected, f'downloaded attest mismatch ({hexdigest!r} != {expected!r})' if is_wheel: wheel_info = makefile_variables(version, name, digest, data) continue # calculate sha3-512 digest asset_hash = hashlib.sha3_512(data).hexdigest() hashes.append(f' {name!r}: {asset_hash!r},') if ASSETS[name]: (PACKAGE_PATH / name).write_bytes(data) hash_mapping = '\n'.join(hashes) for asset_name in ASSETS: assert asset_name in hash_mapping, f'{asset_name} not found in release' assert all(wheel_info.get(key) for key in makefile_info), 'wheel info not found in release' (PACKAGE_PATH / '_info.py').write_text(TEMPLATE.format( version=version, hash_mapping=hash_mapping, )) content = PYPROJECT_PATH.read_text() updated = content.replace(PREFIX + current_version, PREFIX + version) PYPROJECT_PATH.write_text(updated) makefile = MAKEFILE_PATH.read_text() for key in wheel_info: makefile = makefile.replace(f'{key} = {makefile_info[key]}', f'{key} = {wheel_info[key]}') MAKEFILE_PATH.write_text(makefile) if __name__ == '__main__': main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/devscripts/setup_variables_tests.py
devscripts/setup_variables_tests.py
import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import datetime as dt import json from devscripts.setup_variables import STABLE_REPOSITORY, process_inputs, setup_variables from devscripts.utils import calculate_version GENERATE_TEST_DATA = object() def _test(github_repository, note, repo_vars, repo_secrets, inputs, expected, ignore_revision=False): inp = inputs.copy() inp.setdefault('linux_armv7l', True) inp.setdefault('prerelease', False) processed = process_inputs(inp) source_repo = processed['source_repo'].upper() target_repo = processed['target_repo'].upper() variables = {k.upper(): v for k, v in repo_vars.items()} secrets = {k.upper(): v for k, v in repo_secrets.items()} env = { # Keep this in sync with prepare.setup_variables in release.yml 'INPUTS': json.dumps(inp), 'PROCESSED': json.dumps(processed), 'REPOSITORY': github_repository, 'PUSH_VERSION_COMMIT': variables.get('PUSH_VERSION_COMMIT') or '', 'PYPI_PROJECT': variables.get('PYPI_PROJECT') or '', 'SOURCE_PYPI_PROJECT': variables.get(f'{source_repo}_PYPI_PROJECT') or '', 'SOURCE_PYPI_SUFFIX': variables.get(f'{source_repo}_PYPI_SUFFIX') or '', 'TARGET_PYPI_PROJECT': variables.get(f'{target_repo}_PYPI_PROJECT') or '', 'TARGET_PYPI_SUFFIX': variables.get(f'{target_repo}_PYPI_SUFFIX') or '', 'SOURCE_ARCHIVE_REPO': variables.get(f'{source_repo}_ARCHIVE_REPO') or '', 'TARGET_ARCHIVE_REPO': variables.get(f'{target_repo}_ARCHIVE_REPO') or '', 'HAS_ARCHIVE_REPO_TOKEN': json.dumps(bool(secrets.get('ARCHIVE_REPO_TOKEN'))), } result = setup_variables(env) if expected is GENERATE_TEST_DATA: print(' {\n' + '\n'.join(f' {k!r}: {v!r},' for k, v in result.items()) + '\n }') return if expected is None: assert result is None, f'expected error/None but got dict: {github_repository} {note}' return exp = expected.copy() if ignore_revision: assert len(result['version']) == len(exp['version']), f'revision missing: {github_repository} {note}' version_is_tag = result['version'] == result['target_tag'] for dct in (result, exp): dct['version'] = '.'.join(dct['version'].split('.')[:3]) if version_is_tag: dct['target_tag'] = dct['version'] assert result == exp, f'unexpected result: {github_repository} {note}' def test_setup_variables(): DEFAULT_VERSION_WITH_REVISION = dt.datetime.now(tz=dt.timezone.utc).strftime('%Y.%m.%d.%H%M%S') DEFAULT_VERSION = calculate_version() BASE_REPO_VARS = { 'MASTER_ARCHIVE_REPO': 'yt-dlp/yt-dlp-master-builds', 'NIGHTLY_ARCHIVE_REPO': 'yt-dlp/yt-dlp-nightly-builds', 'NIGHTLY_PYPI_PROJECT': 'yt-dlp', 'NIGHTLY_PYPI_SUFFIX': 'dev', 'PUSH_VERSION_COMMIT': '1', 'PYPI_PROJECT': 'yt-dlp', } BASE_REPO_SECRETS = { 'ARCHIVE_REPO_TOKEN': '1', } FORK_REPOSITORY = 'fork/yt-dlp' FORK_ORG = FORK_REPOSITORY.partition('/')[0] _test( STABLE_REPOSITORY, 'official vars/secrets, stable', BASE_REPO_VARS, BASE_REPO_SECRETS, {}, { 'channel': 'stable', 'version': DEFAULT_VERSION, 'target_repo': STABLE_REPOSITORY, 'target_tag': DEFAULT_VERSION, 'pypi_project': 'yt-dlp', 'pypi_suffix': None, }) _test( STABLE_REPOSITORY, 'official vars/secrets, nightly (w/o target)', BASE_REPO_VARS, BASE_REPO_SECRETS, { 'source': 'nightly', 'prerelease': True, }, { 'channel': 'nightly', 'version': DEFAULT_VERSION_WITH_REVISION, 'target_repo': 'yt-dlp/yt-dlp-nightly-builds', 'target_tag': DEFAULT_VERSION_WITH_REVISION, 'pypi_project': 'yt-dlp', 'pypi_suffix': 'dev', }, ignore_revision=True) _test( STABLE_REPOSITORY, 'official vars/secrets, nightly', BASE_REPO_VARS, BASE_REPO_SECRETS, { 'source': 'nightly', 'target': 'nightly', 'prerelease': True, }, { 'channel': 'nightly', 'version': DEFAULT_VERSION_WITH_REVISION, 'target_repo': 'yt-dlp/yt-dlp-nightly-builds', 'target_tag': DEFAULT_VERSION_WITH_REVISION, 'pypi_project': 'yt-dlp', 'pypi_suffix': 'dev', }, ignore_revision=True) _test( STABLE_REPOSITORY, 'official vars/secrets, master (w/o target)', BASE_REPO_VARS, BASE_REPO_SECRETS, { 'source': 'master', 'prerelease': True, }, { 'channel': 'master', 'version': DEFAULT_VERSION_WITH_REVISION, 'target_repo': 'yt-dlp/yt-dlp-master-builds', 'target_tag': DEFAULT_VERSION_WITH_REVISION, 'pypi_project': None, 'pypi_suffix': None, }, ignore_revision=True) _test( STABLE_REPOSITORY, 'official vars/secrets, master', BASE_REPO_VARS, BASE_REPO_SECRETS, { 'source': 'master', 'target': 'master', 'prerelease': True, }, { 'channel': 'master', 'version': DEFAULT_VERSION_WITH_REVISION, 'target_repo': 'yt-dlp/yt-dlp-master-builds', 'target_tag': DEFAULT_VERSION_WITH_REVISION, 'pypi_project': None, 'pypi_suffix': None, }, ignore_revision=True) _test( STABLE_REPOSITORY, 'official vars/secrets, special tag, updates to stable', BASE_REPO_VARS, BASE_REPO_SECRETS, { 'target': f'{STABLE_REPOSITORY}@experimental', 'prerelease': True, }, { 'channel': 'stable', 'version': DEFAULT_VERSION_WITH_REVISION, 'target_repo': STABLE_REPOSITORY, 'target_tag': 'experimental', 'pypi_project': None, 'pypi_suffix': None, }, ignore_revision=True) _test( STABLE_REPOSITORY, 'official vars/secrets, special tag, "stable" as target repo', BASE_REPO_VARS, BASE_REPO_SECRETS, { 'target': 'stable@experimental', 'prerelease': True, }, { 'channel': 'stable', 'version': DEFAULT_VERSION_WITH_REVISION, 'target_repo': STABLE_REPOSITORY, 'target_tag': 'experimental', 'pypi_project': None, 'pypi_suffix': None, }, ignore_revision=True) _test( FORK_REPOSITORY, 'fork w/o vars/secrets, stable', {}, {}, {}, { 'channel': FORK_REPOSITORY, 'version': DEFAULT_VERSION_WITH_REVISION, 'target_repo': FORK_REPOSITORY, 'target_tag': DEFAULT_VERSION_WITH_REVISION, 'pypi_project': None, 'pypi_suffix': None, }, ignore_revision=True) _test( FORK_REPOSITORY, 'fork w/o vars/secrets, prerelease', {}, {}, {'prerelease': True}, { 'channel': FORK_REPOSITORY, 'version': DEFAULT_VERSION_WITH_REVISION, 'target_repo': FORK_REPOSITORY, 'target_tag': DEFAULT_VERSION_WITH_REVISION, 'pypi_project': None, 'pypi_suffix': None, }, ignore_revision=True) _test( FORK_REPOSITORY, 'fork w/o vars/secrets, nightly', {}, {}, { 'prerelease': True, 'source': 'nightly', 'target': 'nightly', }, { 'channel': f'{FORK_REPOSITORY}@nightly', 'version': DEFAULT_VERSION_WITH_REVISION, 'target_repo': FORK_REPOSITORY, 'target_tag': 'nightly', 'pypi_project': None, 'pypi_suffix': None, }, ignore_revision=True) _test( FORK_REPOSITORY, 'fork w/o vars/secrets, master', {}, {}, { 'prerelease': True, 'source': 'master', 'target': 'master', }, { 'channel': f'{FORK_REPOSITORY}@master', 'version': DEFAULT_VERSION_WITH_REVISION, 'target_repo': FORK_REPOSITORY, 'target_tag': 'master', 'pypi_project': None, 'pypi_suffix': None, }, ignore_revision=True) _test( FORK_REPOSITORY, 'fork w/o vars/secrets, revision', {}, {}, {'version': '123'}, { 'channel': FORK_REPOSITORY, 'version': f'{DEFAULT_VERSION[:10]}.123', 'target_repo': FORK_REPOSITORY, 'target_tag': f'{DEFAULT_VERSION[:10]}.123', 'pypi_project': None, 'pypi_suffix': None, }) _test( FORK_REPOSITORY, 'fork w/ PUSH_VERSION_COMMIT, stable', {'PUSH_VERSION_COMMIT': '1'}, {}, {}, { 'channel': FORK_REPOSITORY, 'version': DEFAULT_VERSION, 'target_repo': FORK_REPOSITORY, 'target_tag': DEFAULT_VERSION, 'pypi_project': None, 'pypi_suffix': None, }) _test( FORK_REPOSITORY, 'fork w/ PUSH_VERSION_COMMIT, prerelease', {'PUSH_VERSION_COMMIT': '1'}, {}, {'prerelease': True}, { 'channel': FORK_REPOSITORY, 'version': DEFAULT_VERSION_WITH_REVISION, 'target_repo': FORK_REPOSITORY, 'target_tag': DEFAULT_VERSION_WITH_REVISION, 'pypi_project': None, 'pypi_suffix': None, }, ignore_revision=True) _test( FORK_REPOSITORY, 'fork, nightly', { 'NIGHTLY_ARCHIVE_REPO': f'{FORK_ORG}/yt-dlp-nightly-builds', 'PYPI_PROJECT': 'yt-dlp-test', }, BASE_REPO_SECRETS, { 'source': f'{FORK_ORG}/yt-dlp-nightly-builds', 'target': 'nightly', 'prerelease': True, }, { 'channel': f'{FORK_ORG}/yt-dlp-nightly-builds', 'version': DEFAULT_VERSION_WITH_REVISION, 'target_repo': f'{FORK_ORG}/yt-dlp-nightly-builds', 'target_tag': DEFAULT_VERSION_WITH_REVISION, 'pypi_project': None, 'pypi_suffix': None, }, ignore_revision=True) _test( FORK_REPOSITORY, 'fork, master', { 'MASTER_ARCHIVE_REPO': f'{FORK_ORG}/yt-dlp-master-builds', 'MASTER_PYPI_PROJECT': 'yt-dlp-test', 'MASTER_PYPI_SUFFIX': 'dev', }, BASE_REPO_SECRETS, { 'source': f'{FORK_ORG}/yt-dlp-master-builds', 'target': 'master', 'prerelease': True, }, { 'channel': f'{FORK_ORG}/yt-dlp-master-builds', 'version': DEFAULT_VERSION_WITH_REVISION, 'target_repo': f'{FORK_ORG}/yt-dlp-master-builds', 'target_tag': DEFAULT_VERSION_WITH_REVISION, 'pypi_project': 'yt-dlp-test', 'pypi_suffix': 'dev', }, ignore_revision=True) _test( FORK_REPOSITORY, 'fork, non-numeric tag', {}, {}, {'source': 'experimental'}, { 'channel': f'{FORK_REPOSITORY}@experimental', 'version': DEFAULT_VERSION_WITH_REVISION, 'target_repo': FORK_REPOSITORY, 'target_tag': 'experimental', 'pypi_project': None, 'pypi_suffix': None, }, ignore_revision=True) _test( FORK_REPOSITORY, 'fork, non-numeric tag, updates to stable', {}, {}, { 'prerelease': True, 'source': 'stable', 'target': 'experimental', }, { 'channel': 'stable', 'version': DEFAULT_VERSION_WITH_REVISION, 'target_repo': FORK_REPOSITORY, 'target_tag': 'experimental', 'pypi_project': None, 'pypi_suffix': None, }, ignore_revision=True) _test( STABLE_REPOSITORY, 'official vars but no ARCHIVE_REPO_TOKEN, nightly', BASE_REPO_VARS, {}, { 'source': 'nightly', 'target': 'nightly', 'prerelease': True, }, None)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/devscripts/cli_to_api.py
devscripts/cli_to_api.py
#!/usr/bin/env python3 # Allow direct execution import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import yt_dlp import yt_dlp.options create_parser = yt_dlp.options.create_parser def parse_patched_options(opts): patched_parser = create_parser() patched_parser.defaults.update({ 'ignoreerrors': False, 'retries': 0, 'fragment_retries': 0, 'extract_flat': False, 'concat_playlist': 'never', 'update_self': False, }) yt_dlp.options.create_parser = lambda: patched_parser try: return yt_dlp.parse_options(opts) finally: yt_dlp.options.create_parser = create_parser default_opts = parse_patched_options([]).ydl_opts def cli_to_api(opts, cli_defaults=False): opts = (yt_dlp.parse_options if cli_defaults else parse_patched_options)(opts).ydl_opts diff = {k: v for k, v in opts.items() if default_opts[k] != v} if 'postprocessors' in diff: diff['postprocessors'] = [pp for pp in diff['postprocessors'] if pp not in default_opts['postprocessors']] return diff if __name__ == '__main__': from pprint import pprint print('\nThe arguments passed translate to:\n') pprint(cli_to_api(sys.argv[1:])) print('\nCombining these with the CLI defaults gives:\n') pprint(cli_to_api(sys.argv[1:], True))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/devscripts/make_supportedsites.py
devscripts/make_supportedsites.py
#!/usr/bin/env python3 # Allow direct execution import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from devscripts.utils import get_filename_args, write_file from yt_dlp.extractor import list_extractor_classes TEMPLATE = '''\ # Supported sites Below is a list of all extractors that are currently included with yt-dlp. If a site is not listed here, it might still be supported by yt-dlp's embed extraction or generic extractor. Not all sites listed here are guaranteed to work; websites are constantly changing and sometimes this breaks yt-dlp's support for them. The only reliable way to check if a site is supported is to try it. {ie_list} ''' def main(): out = '\n'.join(ie.description() for ie in list_extractor_classes() if ie.IE_DESC is not False) write_file(get_filename_args(), TEMPLATE.format(ie_list=out)) if __name__ == '__main__': main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/devscripts/make_lazy_extractors.py
devscripts/make_lazy_extractors.py
#!/usr/bin/env python3 # Allow direct execution import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from inspect import getsource from devscripts.utils import get_filename_args, read_file, write_file from yt_dlp.extractor import import_extractors from yt_dlp.extractor.common import InfoExtractor, SearchInfoExtractor from yt_dlp.globals import extractors NO_ATTR = object() STATIC_CLASS_PROPERTIES = [ 'IE_NAME', '_ENABLED', '_VALID_URL', # Used for URL matching '_WORKING', 'IE_DESC', '_NETRC_MACHINE', 'SEARCH_KEY', # Used for --extractor-descriptions 'age_limit', # Used for --age-limit (evaluated) '_RETURN_TYPE', # Accessed in CLI only with instance (evaluated) ] CLASS_METHODS = [ 'ie_key', 'suitable', '_match_valid_url', # Used for URL matching 'working', 'get_temp_id', '_match_id', # Accessed just before instance creation 'description', # Used for --extractor-descriptions 'is_suitable', # Used for --age-limit 'supports_login', 'is_single_video', # Accessed in CLI only with instance ] IE_TEMPLATE = ''' class {name}({bases}): _module = {module!r} ''' MODULE_TEMPLATE = read_file('devscripts/lazy_load_template.py') def main(): os.environ['YTDLP_NO_PLUGINS'] = 'true' os.environ['YTDLP_NO_LAZY_EXTRACTORS'] = 'true' lazy_extractors_filename = get_filename_args(default_outfile='yt_dlp/extractor/lazy_extractors.py') import_extractors() DummyInfoExtractor = type('InfoExtractor', (InfoExtractor,), {'IE_NAME': NO_ATTR}) module_src = '\n'.join(( MODULE_TEMPLATE, ' _module = None', *extra_ie_code(DummyInfoExtractor), '\nclass LazyLoadSearchExtractor(LazyLoadExtractor):\n pass\n', *build_ies(list(extractors.value.values()), (InfoExtractor, SearchInfoExtractor), DummyInfoExtractor), )) write_file(lazy_extractors_filename, f'{module_src}\n') def extra_ie_code(ie, base=None): for var in STATIC_CLASS_PROPERTIES: val = getattr(ie, var) if val != (getattr(base, var) if base else NO_ATTR): yield f' {var} = {val!r}' yield '' for name in CLASS_METHODS: f = getattr(ie, name) if not base or f.__func__ != getattr(base, name).__func__: yield getsource(f) def build_ies(ies, bases, attr_base): names = [] for ie in sort_ies(ies, bases): yield build_lazy_ie(ie, ie.__name__, attr_base) if ie in ies: names.append(ie.__name__) yield '\n_CLASS_LOOKUP = {%s}' % ', '.join(f'{name!r}: {name}' for name in names) def sort_ies(ies, ignored_bases): """find the correct sorting and add the required base classes so that subclasses can be correctly created""" classes, returned_classes = ies[:-1], set() assert ies[-1].__name__ == 'GenericIE', 'Last IE must be GenericIE' while classes: for c in classes[:]: bases = set(c.__bases__) - {object, *ignored_bases} restart = False for b in sorted(bases, key=lambda x: x.__name__): if b not in classes and b not in returned_classes: assert b.__name__ != 'GenericIE', 'Cannot inherit from GenericIE' classes.insert(0, b) restart = True if restart: break if bases <= returned_classes: yield c returned_classes.add(c) classes.remove(c) break yield ies[-1] def build_lazy_ie(ie, name, attr_base): bases = ', '.join({ 'InfoExtractor': 'LazyLoadExtractor', 'SearchInfoExtractor': 'LazyLoadSearchExtractor', }.get(base.__name__, base.__name__) for base in ie.__bases__) s = IE_TEMPLATE.format(name=name, module=ie.__module__, bases=bases) return s + '\n'.join(extra_ie_code(ie, attr_base)) if __name__ == '__main__': main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/bundle/__init__.py
bundle/__init__.py
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/bundle/pyinstaller.py
bundle/pyinstaller.py
#!/usr/bin/env python3 # Allow direct execution import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import platform from PyInstaller.__main__ import run as run_pyinstaller from devscripts.utils import read_version OS_NAME, MACHINE, ARCH = sys.platform, platform.machine().lower(), platform.architecture()[0][:2] if OS_NAME == 'linux' and platform.libc_ver()[0] != 'glibc': OS_NAME = 'musllinux' if MACHINE in ('x86', 'x86_64', 'amd64', 'i386', 'i686'): MACHINE = 'x86' if ARCH == '32' else '' def main(): opts, version = parse_options(), read_version() onedir = '--onedir' in opts or '-D' in opts if not onedir and '-F' not in opts and '--onefile' not in opts: opts.append('--onefile') name, final_file = exe(onedir) print(f'Building yt-dlp v{version} for {OS_NAME} {platform.machine()} with options {opts}') print('Remember to update the version using "devscripts/update-version.py"') if not os.path.isfile('yt_dlp/extractor/lazy_extractors.py'): print('WARNING: Building without lazy_extractors. Run ' '"devscripts/make_lazy_extractors.py" to build lazy extractors', file=sys.stderr) print(f'Destination: {final_file}\n') opts = [ f'--name={name}', '--icon=devscripts/logo.ico', '--upx-exclude=vcruntime140.dll', # Ref: https://github.com/yt-dlp/yt-dlp/issues/13311 # https://github.com/pyinstaller/pyinstaller/issues/9149 '--exclude-module=pkg_resources', '--noconfirm', '--additional-hooks-dir=yt_dlp/__pyinstaller', *opts, 'yt_dlp/__main__.py', ] print(f'Running PyInstaller with {opts}') run_pyinstaller(opts) set_version_info(final_file, version) def parse_options(): # Compatibility with older arguments opts = sys.argv[1:] if opts[0:1] in (['32'], ['64']): if ARCH != opts[0]: raise Exception(f'{opts[0]}bit executable cannot be built on a {ARCH}bit system') opts = opts[1:] return opts def exe(onedir): """@returns (name, path)""" platform_name, machine, extension = { 'win32': (None, MACHINE, '.exe'), 'darwin': ('macos', None, None), }.get(OS_NAME, (OS_NAME, MACHINE, None)) name = '_'.join(filter(None, ( 'yt-dlp', platform_name, machine, ))) return name, ''.join(filter(None, ( 'dist/', onedir and f'{name}/', name, extension, ))) def version_to_list(version): version_list = version.split('.') return list(map(int, version_list)) + [0] * (4 - len(version_list)) def set_version_info(exe, version): if OS_NAME == 'win32': windows_set_version(exe, version) def windows_set_version(exe, version): from PyInstaller.utils.win32.versioninfo import ( FixedFileInfo, StringFileInfo, StringStruct, StringTable, VarFileInfo, VarStruct, VSVersionInfo, ) try: from PyInstaller.utils.win32.versioninfo import SetVersion except ImportError: # Pyinstaller >= 5.8 from PyInstaller.utils.win32.versioninfo import write_version_info_to_executable as SetVersion version_list = version_to_list(version) suffix = MACHINE and f'_{MACHINE}' SetVersion(exe, VSVersionInfo( ffi=FixedFileInfo( filevers=version_list, prodvers=version_list, mask=0x3F, flags=0x0, OS=0x4, fileType=0x1, subtype=0x0, date=(0, 0), ), kids=[ StringFileInfo([StringTable('040904B0', [ StringStruct('Comments', f'yt-dlp{suffix} Command Line Interface'), StringStruct('CompanyName', 'https://github.com/yt-dlp'), StringStruct('FileDescription', 'yt-dlp%s' % (MACHINE and f' ({MACHINE})')), StringStruct('FileVersion', version), StringStruct('InternalName', f'yt-dlp{suffix}'), StringStruct('OriginalFilename', f'yt-dlp{suffix}.exe'), StringStruct('ProductName', f'yt-dlp{suffix}'), StringStruct( 'ProductVersion', f'{version}{suffix} on Python {platform.python_version()}'), ])]), VarFileInfo([VarStruct('Translation', [0, 1200])]), ], )) if __name__ == '__main__': main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/cookies.py
yt_dlp/cookies.py
import base64 import collections import contextlib import datetime as dt import functools import glob import hashlib import http.cookiejar import http.cookies import io import json import os import re import shutil import struct import subprocess import sys import tempfile import time import urllib.request from enum import Enum, auto from .aes import ( aes_cbc_decrypt_bytes, aes_gcm_decrypt_and_verify_bytes, unpad_pkcs7, ) from .dependencies import ( _SECRETSTORAGE_UNAVAILABLE_REASON, secretstorage, sqlite3, ) from .minicurses import MultilinePrinter, QuietMultilinePrinter from .utils import ( DownloadError, YoutubeDLError, Popen, error_to_str, expand_path, is_path_like, sanitize_url, str_or_none, try_call, write_string, ) from .utils._utils import _YDLLogger from .utils.networking import normalize_url CHROMIUM_BASED_BROWSERS = {'brave', 'chrome', 'chromium', 'edge', 'opera', 'vivaldi', 'whale'} SUPPORTED_BROWSERS = CHROMIUM_BASED_BROWSERS | {'firefox', 'safari'} class YDLLogger(_YDLLogger): def warning(self, message, only_once=False): # compat return super().warning(message, once=only_once) class ProgressBar(MultilinePrinter): _DELAY, _timer = 0.1, 0 def print(self, message): if time.time() - self._timer > self._DELAY: self.print_at_line(f'[Cookies] {message}', 0) self._timer = time.time() def progress_bar(self): """Return a context manager with a print method. (Optional)""" # Do not print to files/pipes, loggers, or when --no-progress is used if not self._ydl or self._ydl.params.get('noprogress') or self._ydl.params.get('logger'): return file = self._ydl._out_files.error try: if not file.isatty(): return except BaseException: return return self.ProgressBar(file, preserve_output=False) def _create_progress_bar(logger): if hasattr(logger, 'progress_bar'): printer = logger.progress_bar() if printer: return printer printer = QuietMultilinePrinter() printer.print = lambda _: None return printer class CookieLoadError(YoutubeDLError): pass def load_cookies(cookie_file, browser_specification, ydl): try: cookie_jars = [] if browser_specification is not None: browser_name, profile, keyring, container = _parse_browser_specification(*browser_specification) cookie_jars.append( extract_cookies_from_browser(browser_name, profile, YDLLogger(ydl), keyring=keyring, container=container)) if cookie_file is not None: is_filename = is_path_like(cookie_file) if is_filename: cookie_file = expand_path(cookie_file) jar = YoutubeDLCookieJar(cookie_file) if not is_filename or os.access(cookie_file, os.R_OK): jar.load() cookie_jars.append(jar) return _merge_cookie_jars(cookie_jars) except Exception: raise CookieLoadError('failed to load cookies') def extract_cookies_from_browser(browser_name, profile=None, logger=YDLLogger(), *, keyring=None, container=None): if browser_name == 'firefox': return _extract_firefox_cookies(profile, container, logger) elif browser_name == 'safari': return _extract_safari_cookies(profile, logger) elif browser_name in CHROMIUM_BASED_BROWSERS: return _extract_chrome_cookies(browser_name, profile, keyring, logger) else: raise ValueError(f'unknown browser: {browser_name}') def _extract_firefox_cookies(profile, container, logger): MAX_SUPPORTED_DB_SCHEMA_VERSION = 17 logger.info('Extracting cookies from firefox') if not sqlite3: logger.warning('Cannot extract cookies from firefox without sqlite3 support. ' 'Please use a Python interpreter compiled with sqlite3 support') return YoutubeDLCookieJar() if profile is None: search_roots = list(_firefox_browser_dirs()) elif _is_path(profile): search_roots = [profile] else: search_roots = [os.path.join(path, profile) for path in _firefox_browser_dirs()] search_root = ', '.join(map(repr, search_roots)) cookie_database_path = _newest(_firefox_cookie_dbs(search_roots)) if cookie_database_path is None: raise FileNotFoundError(f'could not find firefox cookies database in {search_root}') logger.debug(f'Extracting cookies from: "{cookie_database_path}"') container_id = None if container not in (None, 'none'): containers_path = os.path.join(os.path.dirname(cookie_database_path), 'containers.json') if not os.path.isfile(containers_path) or not os.access(containers_path, os.R_OK): raise FileNotFoundError(f'could not read containers.json in {search_root}') with open(containers_path, encoding='utf8') as containers: identities = json.load(containers).get('identities', []) container_id = next((context.get('userContextId') for context in identities if container in ( context.get('name'), try_call(lambda: re.fullmatch(r'userContext([^\.]+)\.label', context['l10nID']).group()), )), None) if not isinstance(container_id, int): raise ValueError(f'could not find firefox container "{container}" in containers.json') with tempfile.TemporaryDirectory(prefix='yt_dlp') as tmpdir: cursor = _open_database_copy(cookie_database_path, tmpdir) with contextlib.closing(cursor.connection): db_schema_version = cursor.execute('PRAGMA user_version;').fetchone()[0] if db_schema_version > MAX_SUPPORTED_DB_SCHEMA_VERSION: logger.warning(f'Possibly unsupported firefox cookies database version: {db_schema_version}') else: logger.debug(f'Firefox cookies database version: {db_schema_version}') if isinstance(container_id, int): logger.debug( f'Only loading cookies from firefox container "{container}", ID {container_id}') cursor.execute( 'SELECT host, name, value, path, expiry, isSecure FROM moz_cookies WHERE originAttributes LIKE ? OR originAttributes LIKE ?', (f'%userContextId={container_id}', f'%userContextId={container_id}&%')) elif container == 'none': logger.debug('Only loading cookies not belonging to any container') cursor.execute( 'SELECT host, name, value, path, expiry, isSecure FROM moz_cookies WHERE NOT INSTR(originAttributes,"userContextId=")') else: cursor.execute('SELECT host, name, value, path, expiry, isSecure FROM moz_cookies') jar = YoutubeDLCookieJar() with _create_progress_bar(logger) as progress_bar: table = cursor.fetchall() total_cookie_count = len(table) for i, (host, name, value, path, expiry, is_secure) in enumerate(table): progress_bar.print(f'Loading cookie {i: 6d}/{total_cookie_count: 6d}') # FF142 upgraded cookies DB to schema version 16 and started using milliseconds for cookie expiry # Ref: https://github.com/mozilla-firefox/firefox/commit/5869af852cd20425165837f6c2d9971f3efba83d if db_schema_version >= 16 and expiry is not None: expiry /= 1000 cookie = http.cookiejar.Cookie( version=0, name=name, value=value, port=None, port_specified=False, domain=host, domain_specified=bool(host), domain_initial_dot=host.startswith('.'), path=path, path_specified=bool(path), secure=is_secure, expires=expiry, discard=False, comment=None, comment_url=None, rest={}) jar.set_cookie(cookie) logger.info(f'Extracted {len(jar)} cookies from firefox') return jar def _firefox_browser_dirs(): if sys.platform in ('cygwin', 'win32'): yield from map(os.path.expandvars, ( R'%APPDATA%\Mozilla\Firefox\Profiles', R'%LOCALAPPDATA%\Packages\Mozilla.Firefox_n80bbvh6b1yt2\LocalCache\Roaming\Mozilla\Firefox\Profiles', )) elif sys.platform == 'darwin': yield os.path.expanduser('~/Library/Application Support/Firefox/Profiles') else: yield from map(os.path.expanduser, ( # New installations of FF147+ respect the XDG base directory specification # Ref: https://bugzilla.mozilla.org/show_bug.cgi?id=259356 os.path.join(_config_home(), 'mozilla/firefox'), # Existing FF version<=146 installations '~/.mozilla/firefox', # Flatpak XDG: https://docs.flatpak.org/en/latest/conventions.html#xdg-base-directories '~/.var/app/org.mozilla.firefox/config/mozilla/firefox', '~/.var/app/org.mozilla.firefox/.mozilla/firefox', # Snap installations do not respect the XDG base directory specification '~/snap/firefox/common/.mozilla/firefox', )) def _firefox_cookie_dbs(roots): for root in map(os.path.abspath, roots): for pattern in ('', '*/', 'Profiles/*/'): yield from glob.iglob(os.path.join(root, pattern, 'cookies.sqlite')) def _get_chromium_based_browser_settings(browser_name): # https://chromium.googlesource.com/chromium/src/+/HEAD/docs/user_data_dir.md if sys.platform in ('cygwin', 'win32'): appdata_local = os.path.expandvars('%LOCALAPPDATA%') appdata_roaming = os.path.expandvars('%APPDATA%') browser_dir = { 'brave': os.path.join(appdata_local, R'BraveSoftware\Brave-Browser\User Data'), 'chrome': os.path.join(appdata_local, R'Google\Chrome\User Data'), 'chromium': os.path.join(appdata_local, R'Chromium\User Data'), 'edge': os.path.join(appdata_local, R'Microsoft\Edge\User Data'), 'opera': os.path.join(appdata_roaming, R'Opera Software\Opera Stable'), 'vivaldi': os.path.join(appdata_local, R'Vivaldi\User Data'), 'whale': os.path.join(appdata_local, R'Naver\Naver Whale\User Data'), }[browser_name] elif sys.platform == 'darwin': appdata = os.path.expanduser('~/Library/Application Support') browser_dir = { 'brave': os.path.join(appdata, 'BraveSoftware/Brave-Browser'), 'chrome': os.path.join(appdata, 'Google/Chrome'), 'chromium': os.path.join(appdata, 'Chromium'), 'edge': os.path.join(appdata, 'Microsoft Edge'), 'opera': os.path.join(appdata, 'com.operasoftware.Opera'), 'vivaldi': os.path.join(appdata, 'Vivaldi'), 'whale': os.path.join(appdata, 'Naver/Whale'), }[browser_name] else: config = _config_home() browser_dir = { 'brave': os.path.join(config, 'BraveSoftware/Brave-Browser'), 'chrome': os.path.join(config, 'google-chrome'), 'chromium': os.path.join(config, 'chromium'), 'edge': os.path.join(config, 'microsoft-edge'), 'opera': os.path.join(config, 'opera'), 'vivaldi': os.path.join(config, 'vivaldi'), 'whale': os.path.join(config, 'naver-whale'), }[browser_name] # Linux keyring names can be determined by snooping on dbus while opening the browser in KDE: # dbus-monitor "interface='org.kde.KWallet'" "type=method_return" keyring_name = { 'brave': 'Brave', 'chrome': 'Chrome', 'chromium': 'Chromium', 'edge': 'Microsoft Edge' if sys.platform == 'darwin' else 'Chromium', 'opera': 'Opera' if sys.platform == 'darwin' else 'Chromium', 'vivaldi': 'Vivaldi' if sys.platform == 'darwin' else 'Chrome', 'whale': 'Whale', }[browser_name] browsers_without_profiles = {'opera'} return { 'browser_dir': browser_dir, 'keyring_name': keyring_name, 'supports_profiles': browser_name not in browsers_without_profiles, } def _extract_chrome_cookies(browser_name, profile, keyring, logger): logger.info(f'Extracting cookies from {browser_name}') if not sqlite3: logger.warning(f'Cannot extract cookies from {browser_name} without sqlite3 support. ' 'Please use a Python interpreter compiled with sqlite3 support') return YoutubeDLCookieJar() config = _get_chromium_based_browser_settings(browser_name) if profile is None: search_root = config['browser_dir'] elif _is_path(profile): search_root = profile config['browser_dir'] = os.path.dirname(profile) if config['supports_profiles'] else profile else: if config['supports_profiles']: search_root = os.path.join(config['browser_dir'], profile) else: logger.error(f'{browser_name} does not support profiles') search_root = config['browser_dir'] cookie_database_path = _newest(_find_files(search_root, 'Cookies', logger)) if cookie_database_path is None: raise FileNotFoundError(f'could not find {browser_name} cookies database in "{search_root}"') logger.debug(f'Extracting cookies from: "{cookie_database_path}"') with tempfile.TemporaryDirectory(prefix='yt_dlp') as tmpdir: cursor = None try: cursor = _open_database_copy(cookie_database_path, tmpdir) # meta_version is necessary to determine if we need to trim the hash prefix from the cookies # Ref: https://chromium.googlesource.com/chromium/src/+/b02dcebd7cafab92770734dc2bc317bd07f1d891/net/extras/sqlite/sqlite_persistent_cookie_store.cc#223 meta_version = int(cursor.execute('SELECT value FROM meta WHERE key = "version"').fetchone()[0]) decryptor = get_cookie_decryptor( config['browser_dir'], config['keyring_name'], logger, keyring=keyring, meta_version=meta_version) cursor.connection.text_factory = bytes column_names = _get_column_names(cursor, 'cookies') secure_column = 'is_secure' if 'is_secure' in column_names else 'secure' cursor.execute(f'SELECT host_key, name, value, encrypted_value, path, expires_utc, {secure_column} FROM cookies') jar = YoutubeDLCookieJar() failed_cookies = 0 unencrypted_cookies = 0 with _create_progress_bar(logger) as progress_bar: table = cursor.fetchall() total_cookie_count = len(table) for i, line in enumerate(table): progress_bar.print(f'Loading cookie {i: 6d}/{total_cookie_count: 6d}') is_encrypted, cookie = _process_chrome_cookie(decryptor, *line) if not cookie: failed_cookies += 1 continue elif not is_encrypted: unencrypted_cookies += 1 jar.set_cookie(cookie) if failed_cookies > 0: failed_message = f' ({failed_cookies} could not be decrypted)' else: failed_message = '' logger.info(f'Extracted {len(jar)} cookies from {browser_name}{failed_message}') counts = decryptor._cookie_counts.copy() counts['unencrypted'] = unencrypted_cookies logger.debug(f'cookie version breakdown: {counts}') return jar except PermissionError as error: if os.name == 'nt' and error.errno == 13: message = 'Could not copy Chrome cookie database. See https://github.com/yt-dlp/yt-dlp/issues/7271 for more info' logger.error(message) raise DownloadError(message) # force exit raise finally: if cursor is not None: cursor.connection.close() def _process_chrome_cookie(decryptor, host_key, name, value, encrypted_value, path, expires_utc, is_secure): host_key = host_key.decode() name = name.decode() value = value.decode() path = path.decode() is_encrypted = not value and encrypted_value if is_encrypted: value = decryptor.decrypt(encrypted_value) if value is None: return is_encrypted, None # In chrome, session cookies have expires_utc set to 0 # In our cookie-store, cookies that do not expire should have expires set to None if not expires_utc: expires_utc = None return is_encrypted, http.cookiejar.Cookie( version=0, name=name, value=value, port=None, port_specified=False, domain=host_key, domain_specified=bool(host_key), domain_initial_dot=host_key.startswith('.'), path=path, path_specified=bool(path), secure=is_secure, expires=expires_utc, discard=False, comment=None, comment_url=None, rest={}) class ChromeCookieDecryptor: """ Overview: Linux: - cookies are either v10 or v11 - v10: AES-CBC encrypted with a fixed key - also attempts empty password if decryption fails - v11: AES-CBC encrypted with an OS protected key (keyring) - also attempts empty password if decryption fails - v11 keys can be stored in various places depending on the activate desktop environment [2] Mac: - cookies are either v10 or not v10 - v10: AES-CBC encrypted with an OS protected key (keyring) and more key derivation iterations than linux - not v10: 'old data' stored as plaintext Windows: - cookies are either v10 or not v10 - v10: AES-GCM encrypted with a key which is encrypted with DPAPI - not v10: encrypted with DPAPI Sources: - [1] https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/ - [2] https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/sync/key_storage_linux.cc - KeyStorageLinux::CreateService """ _cookie_counts = {} def decrypt(self, encrypted_value): raise NotImplementedError('Must be implemented by sub classes') def get_cookie_decryptor(browser_root, browser_keyring_name, logger, *, keyring=None, meta_version=None): if sys.platform == 'darwin': return MacChromeCookieDecryptor(browser_keyring_name, logger, meta_version=meta_version) elif sys.platform in ('win32', 'cygwin'): return WindowsChromeCookieDecryptor(browser_root, logger, meta_version=meta_version) return LinuxChromeCookieDecryptor(browser_keyring_name, logger, keyring=keyring, meta_version=meta_version) class LinuxChromeCookieDecryptor(ChromeCookieDecryptor): def __init__(self, browser_keyring_name, logger, *, keyring=None, meta_version=None): self._logger = logger self._v10_key = self.derive_key(b'peanuts') self._empty_key = self.derive_key(b'') self._cookie_counts = {'v10': 0, 'v11': 0, 'other': 0} self._browser_keyring_name = browser_keyring_name self._keyring = keyring self._meta_version = meta_version or 0 @functools.cached_property def _v11_key(self): password = _get_linux_keyring_password(self._browser_keyring_name, self._keyring, self._logger) return None if password is None else self.derive_key(password) @staticmethod def derive_key(password): # values from # https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/sync/os_crypt_linux.cc return pbkdf2_sha1(password, salt=b'saltysalt', iterations=1, key_length=16) def decrypt(self, encrypted_value): """ following the same approach as the fix in [1]: if cookies fail to decrypt then attempt to decrypt with an empty password. The failure detection is not the same as what chromium uses so the results won't be perfect References: - [1] https://chromium.googlesource.com/chromium/src/+/bbd54702284caca1f92d656fdcadf2ccca6f4165%5E%21/ - a bugfix to try an empty password as a fallback """ version = encrypted_value[:3] ciphertext = encrypted_value[3:] if version == b'v10': self._cookie_counts['v10'] += 1 return _decrypt_aes_cbc_multi( ciphertext, (self._v10_key, self._empty_key), self._logger, hash_prefix=self._meta_version >= 24) elif version == b'v11': self._cookie_counts['v11'] += 1 if self._v11_key is None: self._logger.warning('cannot decrypt v11 cookies: no key found', only_once=True) return None return _decrypt_aes_cbc_multi( ciphertext, (self._v11_key, self._empty_key), self._logger, hash_prefix=self._meta_version >= 24) else: self._logger.warning(f'unknown cookie version: "{version}"', only_once=True) self._cookie_counts['other'] += 1 return None class MacChromeCookieDecryptor(ChromeCookieDecryptor): def __init__(self, browser_keyring_name, logger, meta_version=None): self._logger = logger password = _get_mac_keyring_password(browser_keyring_name, logger) self._v10_key = None if password is None else self.derive_key(password) self._cookie_counts = {'v10': 0, 'other': 0} self._meta_version = meta_version or 0 @staticmethod def derive_key(password): # values from # https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/sync/os_crypt_mac.mm return pbkdf2_sha1(password, salt=b'saltysalt', iterations=1003, key_length=16) def decrypt(self, encrypted_value): version = encrypted_value[:3] ciphertext = encrypted_value[3:] if version == b'v10': self._cookie_counts['v10'] += 1 if self._v10_key is None: self._logger.warning('cannot decrypt v10 cookies: no key found', only_once=True) return None return _decrypt_aes_cbc_multi( ciphertext, (self._v10_key,), self._logger, hash_prefix=self._meta_version >= 24) else: self._cookie_counts['other'] += 1 # other prefixes are considered 'old data' which were stored as plaintext # https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/sync/os_crypt_mac.mm return encrypted_value class WindowsChromeCookieDecryptor(ChromeCookieDecryptor): def __init__(self, browser_root, logger, meta_version=None): self._logger = logger self._v10_key = _get_windows_v10_key(browser_root, logger) self._cookie_counts = {'v10': 0, 'other': 0} self._meta_version = meta_version or 0 def decrypt(self, encrypted_value): version = encrypted_value[:3] ciphertext = encrypted_value[3:] if version == b'v10': self._cookie_counts['v10'] += 1 if self._v10_key is None: self._logger.warning('cannot decrypt v10 cookies: no key found', only_once=True) return None # https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/sync/os_crypt_win.cc # kNonceLength nonce_length = 96 // 8 # boringssl # EVP_AEAD_AES_GCM_TAG_LEN authentication_tag_length = 16 raw_ciphertext = ciphertext nonce = raw_ciphertext[:nonce_length] ciphertext = raw_ciphertext[nonce_length:-authentication_tag_length] authentication_tag = raw_ciphertext[-authentication_tag_length:] return _decrypt_aes_gcm( ciphertext, self._v10_key, nonce, authentication_tag, self._logger, hash_prefix=self._meta_version >= 24) else: self._cookie_counts['other'] += 1 # any other prefix means the data is DPAPI encrypted # https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/sync/os_crypt_win.cc return _decrypt_windows_dpapi(encrypted_value, self._logger).decode() def _extract_safari_cookies(profile, logger): if sys.platform not in ('darwin', 'ios'): raise ValueError(f'unsupported platform: {sys.platform}') if profile: cookies_path = os.path.expanduser(profile) if not os.path.isfile(cookies_path): raise FileNotFoundError('custom safari cookies database not found') else: cookies_path = os.path.expanduser('~/Library/Cookies/Cookies.binarycookies') if not os.path.isfile(cookies_path): logger.debug('Trying secondary cookie location') cookies_path = os.path.expanduser('~/Library/Containers/com.apple.Safari/Data/Library/Cookies/Cookies.binarycookies') if not os.path.isfile(cookies_path): raise FileNotFoundError('could not find safari cookies database') with open(cookies_path, 'rb') as f: cookies_data = f.read() jar = parse_safari_cookies(cookies_data, logger=logger) logger.info(f'Extracted {len(jar)} cookies from safari') return jar class ParserError(Exception): pass class DataParser: def __init__(self, data, logger): self._data = data self.cursor = 0 self._logger = logger def read_bytes(self, num_bytes): if num_bytes < 0: raise ParserError(f'invalid read of {num_bytes} bytes') end = self.cursor + num_bytes if end > len(self._data): raise ParserError('reached end of input') data = self._data[self.cursor:end] self.cursor = end return data def expect_bytes(self, expected_value, message): value = self.read_bytes(len(expected_value)) if value != expected_value: raise ParserError(f'unexpected value: {value} != {expected_value} ({message})') def read_uint(self, big_endian=False): data_format = '>I' if big_endian else '<I' return struct.unpack(data_format, self.read_bytes(4))[0] def read_double(self, big_endian=False): data_format = '>d' if big_endian else '<d' return struct.unpack(data_format, self.read_bytes(8))[0] def read_cstring(self): buffer = [] while True: c = self.read_bytes(1) if c == b'\x00': return b''.join(buffer).decode() else: buffer.append(c) def skip(self, num_bytes, description='unknown'): if num_bytes > 0: self._logger.debug(f'skipping {num_bytes} bytes ({description}): {self.read_bytes(num_bytes)!r}') elif num_bytes < 0: raise ParserError(f'invalid skip of {num_bytes} bytes') def skip_to(self, offset, description='unknown'): self.skip(offset - self.cursor, description) def skip_to_end(self, description='unknown'): self.skip_to(len(self._data), description) def _mac_absolute_time_to_posix(timestamp): return int((dt.datetime(2001, 1, 1, 0, 0, tzinfo=dt.timezone.utc) + dt.timedelta(seconds=timestamp)).timestamp()) def _parse_safari_cookies_header(data, logger): p = DataParser(data, logger) p.expect_bytes(b'cook', 'database signature') number_of_pages = p.read_uint(big_endian=True) page_sizes = [p.read_uint(big_endian=True) for _ in range(number_of_pages)] return page_sizes, p.cursor def _parse_safari_cookies_page(data, jar, logger): p = DataParser(data, logger) p.expect_bytes(b'\x00\x00\x01\x00', 'page signature') number_of_cookies = p.read_uint() record_offsets = [p.read_uint() for _ in range(number_of_cookies)] if number_of_cookies == 0: logger.debug(f'a cookies page of size {len(data)} has no cookies') return p.skip_to(record_offsets[0], 'unknown page header field') with _create_progress_bar(logger) as progress_bar: for i, record_offset in enumerate(record_offsets): progress_bar.print(f'Loading cookie {i: 6d}/{number_of_cookies: 6d}') p.skip_to(record_offset, 'space between records') record_length = _parse_safari_cookies_record(data[record_offset:], jar, logger) p.read_bytes(record_length) p.skip_to_end('space in between pages') def _parse_safari_cookies_record(data, jar, logger): p = DataParser(data, logger) record_size = p.read_uint() p.skip(4, 'unknown record field 1') flags = p.read_uint() is_secure = bool(flags & 0x0001) p.skip(4, 'unknown record field 2') domain_offset = p.read_uint() name_offset = p.read_uint() path_offset = p.read_uint() value_offset = p.read_uint() p.skip(8, 'unknown record field 3') expiration_date = _mac_absolute_time_to_posix(p.read_double()) _creation_date = _mac_absolute_time_to_posix(p.read_double()) # noqa: F841 try: p.skip_to(domain_offset) domain = p.read_cstring() p.skip_to(name_offset) name = p.read_cstring() p.skip_to(path_offset) path = p.read_cstring() p.skip_to(value_offset) value = p.read_cstring() except UnicodeDecodeError: logger.warning('failed to parse Safari cookie because UTF-8 decoding failed', only_once=True) return record_size p.skip_to(record_size, 'space at the end of the record') cookie = http.cookiejar.Cookie( version=0, name=name, value=value, port=None, port_specified=False, domain=domain, domain_specified=bool(domain), domain_initial_dot=domain.startswith('.'), path=path, path_specified=bool(path), secure=is_secure, expires=expiration_date, discard=False, comment=None, comment_url=None, rest={}) jar.set_cookie(cookie) return record_size def parse_safari_cookies(data, jar=None, logger=YDLLogger()): """ References: - https://github.com/libyal/dtformats/blob/main/documentation/Safari%20Cookies.asciidoc - this data appears to be out of date but the important parts of the database structure is the same - there are a few bytes here and there which are skipped during parsing """ if jar is None: jar = YoutubeDLCookieJar() page_sizes, body_start = _parse_safari_cookies_header(data, logger) p = DataParser(data[body_start:], logger) for page_size in page_sizes: _parse_safari_cookies_page(p.read_bytes(page_size), jar, logger) p.skip_to_end('footer') return jar class _LinuxDesktopEnvironment(Enum): """ https://chromium.googlesource.com/chromium/src/+/refs/heads/main/base/nix/xdg_util.h DesktopEnvironment """ OTHER = auto() CINNAMON = auto() DEEPIN = auto() GNOME = auto() KDE3 = auto() KDE4 = auto() KDE5 = auto() KDE6 = auto() PANTHEON = auto() UKUI = auto() UNITY = auto() XFCE = auto() LXQT = auto() class _LinuxKeyring(Enum): """ https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/sync/key_storage_util_linux.h SelectedLinuxBackend """ KWALLET = auto() # KDE4 KWALLET5 = auto() KWALLET6 = auto() GNOMEKEYRING = auto() BASICTEXT = auto() SUPPORTED_KEYRINGS = _LinuxKeyring.__members__.keys() def _get_linux_desktop_environment(env, logger): """ https://chromium.googlesource.com/chromium/src/+/refs/heads/main/base/nix/xdg_util.cc GetDesktopEnvironment """ xdg_current_desktop = env.get('XDG_CURRENT_DESKTOP', None) desktop_session = env.get('DESKTOP_SESSION', '') if xdg_current_desktop is not None: for part in map(str.strip, xdg_current_desktop.split(':')): if part == 'Unity': if 'gnome-fallback' in desktop_session: return _LinuxDesktopEnvironment.GNOME else: return _LinuxDesktopEnvironment.UNITY elif part == 'Deepin': return _LinuxDesktopEnvironment.DEEPIN elif part == 'GNOME': return _LinuxDesktopEnvironment.GNOME elif part == 'X-Cinnamon': return _LinuxDesktopEnvironment.CINNAMON elif part == 'KDE':
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
true
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/version.py
yt_dlp/version.py
# Autogenerated by devscripts/update-version.py __version__ = '2025.12.08' RELEASE_GIT_HEAD = '7a52ff29d86efc8f3adeba977b2009ce40b8e52e' VARIANT = None UPDATE_HINT = None CHANNEL = 'stable' ORIGIN = 'yt-dlp/yt-dlp' _pkg_version = '2025.12.08'
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/YoutubeDL.py
yt_dlp/YoutubeDL.py
import collections import contextlib import copy import datetime as dt import errno import fileinput import functools import http.cookiejar import io import itertools import json import locale import operator import os import random import re import shutil import string import subprocess import sys import tempfile import time import tokenize import traceback import unicodedata from .cache import Cache from .compat import urllib # isort: split from .compat import urllib_req_to_req from .cookies import CookieLoadError, LenientSimpleCookie, load_cookies from .downloader import FFmpegFD, get_suitable_downloader, shorten_protocol_name from .downloader.rtmp import rtmpdump_version from .extractor import gen_extractor_classes, get_info_extractor, import_extractors from .extractor.common import UnsupportedURLIE from .extractor.openload import PhantomJSwrapper from .globals import ( IN_CLI, LAZY_EXTRACTORS, WINDOWS_VT_MODE, plugin_ies, plugin_ies_overrides, plugin_pps, all_plugins_loaded, plugin_dirs, supported_js_runtimes, supported_remote_components, ) from .minicurses import format_text from .networking import HEADRequest, Request, RequestDirector from .networking.common import _REQUEST_HANDLERS, _RH_PREFERENCES from .networking.exceptions import ( HTTPError, NoSupportingHandlers, RequestError, SSLError, network_exceptions, ) from .networking.impersonate import ImpersonateRequestHandler, ImpersonateTarget from .plugins import directories as plugin_directories, load_all_plugins from .postprocessor import ( EmbedThumbnailPP, FFmpegFixupDuplicateMoovPP, FFmpegFixupDurationPP, FFmpegFixupM3u8PP, FFmpegFixupM4aPP, FFmpegFixupStretchedPP, FFmpegFixupTimestampPP, FFmpegMergerPP, FFmpegPostProcessor, FFmpegVideoConvertorPP, MoveFilesAfterDownloadPP, get_postprocessor, ) from .postprocessor.ffmpeg import resolve_mapping as resolve_recode_mapping from .update import ( REPOSITORY, _get_system_deprecation, _get_outdated_warning, _make_label, current_git_head, detect_variant, ) from .utils import ( DEFAULT_OUTTMPL, IDENTITY, LINK_TEMPLATES, MEDIA_EXTENSIONS, NO_DEFAULT, NUMBER_RE, OUTTMPL_TYPES, POSTPROCESS_WHEN, STR_FORMAT_RE_TMPL, STR_FORMAT_TYPES, ContentTooShortError, DateRange, DownloadCancelled, DownloadError, EntryNotInPlaylist, ExistingVideoReached, ExtractorError, FormatSorter, GeoRestrictedError, ISO3166Utils, LazyList, MaxDownloadsReached, Namespace, PagedList, PlaylistEntries, Popen, PostProcessingError, ReExtractInfo, RejectedVideoReached, SameFileError, UnavailableVideoError, UserNotLive, YoutubeDLError, age_restricted, bug_reports_message, date_from_str, deprecation_warning, determine_ext, determine_protocol, encode_compat_str, escapeHTML, expand_path, extract_basic_auth, filter_dict, float_or_none, format_bytes, format_decimal_suffix, format_field, formatSeconds, get_compatible_ext, get_domain, int_or_none, iri_to_uri, is_path_like, join_nonempty, locked_file, make_archive_id, make_dir, number_of_digits, orderedSet, orderedSet_from_options, parse_filesize, preferredencoding, prepend_extension, remove_terminal_sequences, render_table, replace_extension, sanitize_filename, sanitize_path, sanitize_url, shell_quote, str_or_none, strftime_or_none, subtitles_filename, supports_terminal_sequences, system_identifier, filesize_from_tbr, timetuple_from_msec, to_high_limit_path, traverse_obj, try_call, try_get, url_basename, variadic, windows_enable_vt_mode, write_json_file, write_string, ) from .utils._utils import _UnsafeExtensionError, _YDLLogger, _ProgressState from .utils.networking import ( HTTPHeaderDict, clean_headers, clean_proxies, std_headers, ) from .version import CHANNEL, ORIGIN, RELEASE_GIT_HEAD, VARIANT, __version__ if os.name == 'nt': import ctypes def _catch_unsafe_extension_error(func): @functools.wraps(func) def wrapper(self, *args, **kwargs): try: return func(self, *args, **kwargs) except _UnsafeExtensionError as error: self.report_error( f'The extracted extension ({error.extension!r}) is unusual ' 'and will be skipped for safety reasons. ' f'If you believe this is an error{bug_reports_message(",")}') return wrapper class YoutubeDL: """YoutubeDL class. YoutubeDL objects are the ones responsible of downloading the actual video file and writing it to disk if the user has requested it, among some other tasks. In most cases there should be one per program. As, given a video URL, the downloader doesn't know how to extract all the needed information, task that InfoExtractors do, it has to pass the URL to one of them. For this, YoutubeDL objects have a method that allows InfoExtractors to be registered in a given order. When it is passed a URL, the YoutubeDL object handles it to the first InfoExtractor it finds that reports being able to handle it. The InfoExtractor extracts all the information about the video or videos the URL refers to, and YoutubeDL process the extracted information, possibly using a File Downloader to download the video. YoutubeDL objects accept a lot of parameters. In order not to saturate the object constructor with arguments, it receives a dictionary of options instead. These options are available through the params attribute for the InfoExtractors to use. The YoutubeDL also registers itself as the downloader in charge for the InfoExtractors that are added to it, so this is a "mutual registration". Available options: username: Username for authentication purposes. password: Password for authentication purposes. videopassword: Password for accessing a video. ap_mso: Adobe Pass multiple-system operator identifier. ap_username: Multiple-system operator account username. ap_password: Multiple-system operator account password. usenetrc: Use netrc for authentication instead. netrc_location: Location of the netrc file. Defaults to ~/.netrc. netrc_cmd: Use a shell command to get credentials verbose: Print additional info to stdout. quiet: Do not print messages to stdout. no_warnings: Do not print out anything for warnings. forceprint: A dict with keys WHEN mapped to a list of templates to print to stdout. The allowed keys are video or any of the items in utils.POSTPROCESS_WHEN. For compatibility, a single list is also accepted print_to_file: A dict with keys WHEN (same as forceprint) mapped to a list of tuples with (template, filename) forcejson: Force printing info_dict as JSON. dump_single_json: Force printing the info_dict of the whole playlist (or video) as a single JSON line. force_write_download_archive: Force writing download archive regardless of 'skip_download' or 'simulate'. simulate: Do not download the video files. If unset (or None), simulate only if listsubtitles, listformats or list_thumbnails is used format: Video format code. see "FORMAT SELECTION" for more details. You can also pass a function. The function takes 'ctx' as argument and returns the formats to download. See "build_format_selector" for an implementation allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded. ignore_no_formats_error: Ignore "No video formats" error. Usefull for extracting metadata even if the video is not actually available for download (experimental) format_sort: A list of fields by which to sort the video formats. See "Sorting Formats" for more details. format_sort_force: Force the given format_sort. see "Sorting Formats" for more details. prefer_free_formats: Whether to prefer video formats with free containers over non-free ones of the same quality. allow_multiple_video_streams: Allow multiple video streams to be merged into a single file allow_multiple_audio_streams: Allow multiple audio streams to be merged into a single file check_formats Whether to test if the formats are downloadable. Can be True (check all), False (check none), 'selected' (check selected formats), or None (check only if requested by extractor) paths: Dictionary of output paths. The allowed keys are 'home' 'temp' and the keys of OUTTMPL_TYPES (in utils/_utils.py) outtmpl: Dictionary of templates for output names. Allowed keys are 'default' and the keys of OUTTMPL_TYPES (in utils/_utils.py). For compatibility with youtube-dl, a single string can also be used outtmpl_na_placeholder: Placeholder for unavailable meta fields. restrictfilenames: Do not allow "&" and spaces in file names trim_file_name: Limit length of filename (extension excluded) windowsfilenames: True: Force filenames to be Windows compatible False: Sanitize filenames only minimally This option has no effect when running on Windows ignoreerrors: Do not stop on download/postprocessing errors. Can be 'only_download' to ignore only download errors. Default is 'only_download' for CLI, but False for API skip_playlist_after_errors: Number of allowed failures until the rest of the playlist is skipped allowed_extractors: List of regexes to match against extractor names that are allowed overwrites: Overwrite all video and metadata files if True, overwrite only non-video files if None and don't overwrite any file if False playlist_items: Specific indices of playlist to download. playlistrandom: Download playlist items in random order. lazy_playlist: Process playlist entries as they are received. matchtitle: Download only matching titles. rejecttitle: Reject downloads for matching titles. logger: A class having a `debug`, `warning` and `error` function where each has a single string parameter, the message to be logged. For compatibility reasons, both debug and info messages are passed to `debug`. A debug message will have a prefix of `[debug] ` to discern it from info messages. logtostderr: Print everything to stderr instead of stdout. consoletitle: Display progress in the console window's titlebar. writedescription: Write the video description to a .description file writeinfojson: Write the video description to a .info.json file clean_infojson: Remove internal metadata from the infojson getcomments: Extract video comments. This will not be written to disk unless writeinfojson is also given writethumbnail: Write the thumbnail image to a file allow_playlist_files: Whether to write playlists' description, infojson etc also to disk when using the 'write*' options write_all_thumbnails: Write all thumbnail formats to files writelink: Write an internet shortcut file, depending on the current platform (.url/.webloc/.desktop) writeurllink: Write a Windows internet shortcut file (.url) writewebloclink: Write a macOS internet shortcut file (.webloc) writedesktoplink: Write a Linux internet shortcut file (.desktop) writesubtitles: Write the video subtitles to a file writeautomaticsub: Write the automatically generated subtitles to a file listsubtitles: Lists all available subtitles for the video subtitlesformat: The format code for subtitles subtitleslangs: List of languages of the subtitles to download (can be regex). The list may contain "all" to refer to all the available subtitles. The language can be prefixed with a "-" to exclude it from the requested languages, e.g. ['all', '-live_chat'] keepvideo: Keep the video file after post-processing daterange: A utils.DateRange object, download only if the upload_date is in the range. skip_download: Skip the actual download of the video file cachedir: Location of the cache files in the filesystem. False to disable filesystem cache. noplaylist: Download single video instead of a playlist if in doubt. age_limit: An integer representing the user's age in years. Unsuitable videos for the given age are skipped. min_views: An integer representing the minimum view count the video must have in order to not be skipped. Videos without view count information are always downloaded. None for no limit. max_views: An integer representing the maximum view count. Videos that are more popular than that are not downloaded. Videos without view count information are always downloaded. None for no limit. download_archive: A set, or the name of a file where all downloads are recorded. Videos already present in the file are not downloaded again. break_on_existing: Stop the download process after attempting to download a file that is in the archive. break_per_url: Whether break_on_reject and break_on_existing should act on each input URL as opposed to for the entire queue cookiefile: File name or text stream from where cookies should be read and dumped to cookiesfrombrowser: A tuple containing the name of the browser, the profile name/path from where cookies are loaded, the name of the keyring, and the container name, e.g. ('chrome', ) or ('vivaldi', 'default', 'BASICTEXT') or ('firefox', 'default', None, 'Meta') legacyserverconnect: Explicitly allow HTTPS connection to servers that do not support RFC 5746 secure renegotiation nocheckcertificate: Do not verify SSL certificates client_certificate: Path to client certificate file in PEM format. May include the private key client_certificate_key: Path to private key file for client certificate client_certificate_password: Password for client certificate private key, if encrypted. If not provided and the key is encrypted, yt-dlp will ask interactively prefer_insecure: Use HTTP instead of HTTPS to retrieve information. (Only supported by some extractors) enable_file_urls: Enable file:// URLs. This is disabled by default for security reasons. http_headers: A dictionary of custom headers to be used for all requests proxy: URL of the proxy server to use geo_verification_proxy: URL of the proxy to use for IP address verification on geo-restricted sites. socket_timeout: Time to wait for unresponsive hosts, in seconds bidi_workaround: Work around buggy terminals without bidirectional text support, using fridibi debug_printtraffic:Print out sent and received HTTP traffic default_search: Prepend this string if an input url is not valid. 'auto' for elaborate guessing encoding: Use this encoding instead of the system-specified. extract_flat: Whether to resolve and process url_results further * False: Always process. Default for API * True: Never process * 'in_playlist': Do not process inside playlist/multi_video * 'discard': Always process, but don't return the result from inside playlist/multi_video * 'discard_in_playlist': Same as "discard", but only for playlists (not multi_video). Default for CLI wait_for_video: If given, wait for scheduled streams to become available. The value should be a tuple containing the range (min_secs, max_secs) to wait between retries postprocessors: A list of dictionaries, each with an entry * key: The name of the postprocessor. See yt_dlp/postprocessor/__init__.py for a list. * when: When to run the postprocessor. Allowed values are the entries of utils.POSTPROCESS_WHEN Assumed to be 'post_process' if not given progress_hooks: A list of functions that get called on download progress, with a dictionary with the entries * status: One of "downloading", "error", or "finished". Check this first and ignore unknown values. * info_dict: The extracted info_dict If status is one of "downloading", or "finished", the following properties may also be present: * filename: The final filename (always present) * tmpfilename: The filename we're currently writing to * downloaded_bytes: Bytes on disk * total_bytes: Size of the whole file, None if unknown * total_bytes_estimate: Guess of the eventual file size, None if unavailable. * elapsed: The number of seconds since download started. * eta: The estimated time in seconds, None if unknown * speed: The download speed in bytes/second, None if unknown * fragment_index: The counter of the currently downloaded video fragment. * fragment_count: The number of fragments (= individual files that will be merged) Progress hooks are guaranteed to be called at least once (with status "finished") if the download is successful. postprocessor_hooks: A list of functions that get called on postprocessing progress, with a dictionary with the entries * status: One of "started", "processing", or "finished". Check this first and ignore unknown values. * postprocessor: Name of the postprocessor * info_dict: The extracted info_dict Progress hooks are guaranteed to be called at least twice (with status "started" and "finished") if the processing is successful. merge_output_format: "/" separated list of extensions to use when merging formats. final_ext: Expected final extension; used to detect when the file was already downloaded and converted fixup: Automatically correct known faults of the file. One of: - "never": do nothing - "warn": only emit a warning - "detect_or_warn": check whether we can do anything about it, warn otherwise (default) source_address: Client-side IP address to bind to. impersonate: Client to impersonate for requests. An ImpersonateTarget (from yt_dlp.networking.impersonate) sleep_interval_requests: Number of seconds to sleep between requests during extraction sleep_interval: Number of seconds to sleep before each download when used alone or a lower bound of a range for randomized sleep before each download (minimum possible number of seconds to sleep) when used along with max_sleep_interval. max_sleep_interval:Upper bound of a range for randomized sleep before each download (maximum possible number of seconds to sleep). Must only be used along with sleep_interval. Actual sleep time will be a random float from range [sleep_interval; max_sleep_interval]. sleep_interval_subtitles: Number of seconds to sleep before each subtitle download listformats: Print an overview of available video formats and exit. list_thumbnails: Print a table of all thumbnails and exit. match_filter: A function that gets called for every video with the signature (info_dict, *, incomplete: bool) -> Optional[str] For backward compatibility with youtube-dl, the signature (info_dict) -> Optional[str] is also allowed. - If it returns a message, the video is ignored. - If it returns None, the video is downloaded. - If it returns utils.NO_DEFAULT, the user is interactively asked whether to download the video. - Raise utils.DownloadCancelled(msg) to abort remaining downloads when a video is rejected. match_filter_func in utils/_utils.py is one example for this. color: A Dictionary with output stream names as keys and their respective color policy as values. Can also just be a single color policy, in which case it applies to all outputs. Valid stream names are 'stdout' and 'stderr'. Valid color policies are one of 'always', 'auto', 'no_color', 'never', 'auto-tty' or 'no_color-tty'. geo_bypass: Bypass geographic restriction via faking X-Forwarded-For HTTP header geo_bypass_country: Two-letter ISO 3166-2 country code that will be used for explicit geographic restriction bypassing via faking X-Forwarded-For HTTP header geo_bypass_ip_block: IP range in CIDR notation that will be used similarly to geo_bypass_country external_downloader: A dictionary of protocol keys and the executable of the external downloader to use for it. The allowed protocols are default|http|ftp|m3u8|dash|rtsp|rtmp|mms. Set the value to 'native' to use the native downloader compat_opts: Compatibility options. See "Differences in default behavior". The following options do not work when used through the API: filename, abort-on-error, multistreams, no-live-chat, format-sort, no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json, allow-unsafe-ext, prefer-vp9-sort, mtime-by-default. Refer __init__.py for their implementation progress_template: Dictionary of templates for progress outputs. Allowed keys are 'download', 'postprocess', 'download-title' (console title) and 'postprocess-title'. The template is mapped on a dictionary with keys 'progress' and 'info' retry_sleep_functions: Dictionary of functions that takes the number of attempts as argument and returns the time to sleep in seconds. Allowed keys are 'http', 'fragment', 'file_access', 'extractor' download_ranges: A callback function that gets called for every video with the signature (info_dict, ydl) -> Iterable[Section]. Only the returned sections will be downloaded. Each Section is a dict with the following keys: * start_time: Start time of the section in seconds * end_time: End time of the section in seconds * title: Section title (Optional) * index: Section number (Optional) force_keyframes_at_cuts: Re-encode the video when downloading ranges to get precise cuts noprogress: Do not print the progress bar live_from_start: Whether to download livestreams videos from the start warn_when_outdated: Emit a warning if the yt-dlp version is older than 90 days The following parameters are not used by YoutubeDL itself, they are used by the downloader (see yt_dlp/downloader/common.py): nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize, max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries, continuedl, hls_use_mpegts, http_chunk_size, external_downloader_args, concurrent_fragment_downloads, progress_delta. The following options are used by the post processors: ffmpeg_location: Location of the ffmpeg binary; either the path to the binary or its containing directory. postprocessor_args: A dictionary of postprocessor/executable keys (in lower case) and a list of additional command-line arguments for the postprocessor/executable. The dict can also have "PP+EXE" keys which are used when the given exe is used by the given PP. Use 'default' as the name for arguments to passed to all PP For compatibility with youtube-dl, a single list of args can also be used The following options are used by the extractors: extractor_retries: Number of times to retry for known errors (default: 3) dynamic_mpd: Whether to process dynamic DASH manifests (default: True) hls_split_discontinuity: Split HLS playlists into different formats at discontinuities such as ad breaks (default: False) extractor_args: A dictionary of arguments to be passed to the extractors. See "EXTRACTOR ARGUMENTS" for details. Argument values must always be a list of string(s). E.g. {'youtube': {'skip': ['dash', 'hls']}} js_runtimes: A dictionary of JavaScript runtime keys (in lower case) to enable and a dictionary of additional configuration for the runtime. Currently supported runtimes are 'deno', 'node', 'bun', and 'quickjs'. If None, the default runtime of "deno" will be enabled. The runtime configuration dictionary can have the following keys: - path: Path to the executable (optional) E.g. {'deno': {'path': '/path/to/deno'} remote_components: A list of remote components that are allowed to be fetched when required. Supported components: - ejs:npm (external JavaScript components from npm) - ejs:github (external JavaScript components from yt-dlp-ejs GitHub) By default, no remote components are allowed to be fetched. mark_watched: Mark videos watched (even with --simulate). Only for YouTube The following options are deprecated and may be removed in the future: break_on_reject: Stop the download process when encountering a video that has been filtered out. - `raise DownloadCancelled(msg)` in match_filter instead force_generic_extractor: Force downloader to use the generic extractor - Use allowed_extractors = ['generic', 'default'] playliststart: - Use playlist_items Playlist item to start at. playlistend: - Use playlist_items Playlist item to end at. playlistreverse: - Use playlist_items Download playlist items in reverse order. forceurl: - Use forceprint Force printing final URL. forcetitle: - Use forceprint Force printing title. forceid: - Use forceprint Force printing ID. forcethumbnail: - Use forceprint Force printing thumbnail URL. forcedescription: - Use forceprint Force printing description. forcefilename: - Use forceprint Force printing final filename. forceduration: - Use forceprint Force printing duration. allsubtitles: - Use subtitleslangs = ['all'] Downloads all the subtitles of the video (requires writesubtitles or writeautomaticsub) post_hooks: - Register a custom postprocessor A list of functions that get called as the final step for each video file, after all postprocessors have been called. The filename will be passed as the only argument. hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}. Use the native HLS downloader instead of ffmpeg if True, otherwise use ffmpeg if False, otherwise use downloader suggested by extractor if None. no_color: Same as `color='no_color'` no_overwrites: Same as `overwrites=False` """ _NUMERIC_FIELDS = { 'width', 'height', 'asr', 'audio_channels', 'fps', 'tbr', 'abr', 'vbr', 'filesize', 'filesize_approx', 'timestamp', 'release_timestamp', 'available_at', 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count', 'save_count', 'average_rating', 'comment_count', 'age_limit', 'start_time', 'end_time', 'chapter_number', 'season_number', 'episode_number', 'track_number', 'disc_number', 'release_year', } _format_fields = { # NB: Keep in sync with the docstring of extractor/common.py 'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note', 'available_at', 'width', 'height', 'aspect_ratio', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr', 'audio_channels', 'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx', 'rows', 'columns', 'hls_media_playlist_data', 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start', 'is_dash_periods', 'request_data', 'preference', 'language', 'language_preference', 'quality', 'source_preference', 'cookies', 'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'extra_param_to_segment_url', 'extra_param_to_key_url', 'hls_aes', 'downloader_options', 'impersonate', 'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time', } _deprecated_multivalue_fields = { 'album_artist': 'album_artists', 'artist': 'artists', 'composer': 'composers', 'creator': 'creators',
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
true
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/__main__.py
yt_dlp/__main__.py
#!/usr/bin/env python3 # Execute with # $ python3 -m yt_dlp import sys if __package__ is None and not getattr(sys, 'frozen', False): # direct call of __main__.py import os.path path = os.path.realpath(os.path.abspath(__file__)) sys.path.insert(0, os.path.dirname(os.path.dirname(path))) import yt_dlp if __name__ == '__main__': yt_dlp.main()
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/update.py
yt_dlp/update.py
from __future__ import annotations import atexit import contextlib import datetime as dt import functools import hashlib import json import os import platform import re import subprocess import sys from dataclasses import dataclass from zipimport import zipimporter from .networking import Request from .networking.exceptions import HTTPError, network_exceptions from .utils import ( NO_DEFAULT, Popen, deprecation_warning, format_field, remove_end, shell_quote, system_identifier, version_tuple, ) from .version import ( CHANNEL, ORIGIN, RELEASE_GIT_HEAD, UPDATE_HINT, VARIANT, __version__, ) UPDATE_SOURCES = { 'stable': 'yt-dlp/yt-dlp', 'nightly': 'yt-dlp/yt-dlp-nightly-builds', 'master': 'yt-dlp/yt-dlp-master-builds', } REPOSITORY = UPDATE_SOURCES['stable'] _INVERSE_UPDATE_SOURCES = {value: key for key, value in UPDATE_SOURCES.items()} _VERSION_RE = re.compile(r'(\d+\.)*\d+') _HASH_PATTERN = r'[\da-f]{40}' _COMMIT_RE = re.compile(rf'Generated from: https://(?:[^/?#]+/){{3}}commit/(?P<hash>{_HASH_PATTERN})') API_BASE_URL = 'https://api.github.com/repos' # Backwards compatibility variables for the current channel API_URL = f'{API_BASE_URL}/{REPOSITORY}/releases' @functools.cache def _get_variant_and_executable_path(): """@returns (variant, executable_path)""" if getattr(sys, 'frozen', False): path = sys.executable # py2exe: No longer officially supported, but still identify it to block updates if not hasattr(sys, '_MEIPASS'): return 'py2exe', path # staticx builds: sys.executable returns a /tmp/ path # No longer officially supported, but still identify them to block updates # Ref: https://staticx.readthedocs.io/en/latest/usage.html#run-time-information if static_exe_path := os.getenv('STATICX_PROG_PATH'): return 'linux_static_exe', static_exe_path # We know it's a PyInstaller bundle, but is it "onedir" or "onefile"? if ( # PyInstaller >= 6.0.0 sets sys._MEIPASS for onedir to its `_internal` subdirectory # Ref: https://pyinstaller.org/en/v6.0.0/CHANGES.html#incompatible-changes sys._MEIPASS == f'{os.path.dirname(path)}/_internal' # compat: PyInstaller < 6.0.0 or sys._MEIPASS == os.path.dirname(path) ): suffix = 'dir' else: suffix = 'exe' system_platform = remove_end(sys.platform, '32') if system_platform == 'darwin': # darwin_legacy_exe is no longer supported, but still identify it to block updates machine = '_legacy' if version_tuple(platform.mac_ver()[0]) < (10, 15) else '' return f'darwin{machine}_{suffix}', path if system_platform == 'linux' and platform.libc_ver()[0] != 'glibc': system_platform = 'musllinux' machine = f'_{platform.machine().lower()}' is_64bits = sys.maxsize > 2**32 # Ref: https://en.wikipedia.org/wiki/Uname#Examples if machine[1:] in ('x86', 'x86_64', 'amd64', 'i386', 'i686'): machine = '_x86' if not is_64bits else '' # platform.machine() on 32-bit raspbian OS may return 'aarch64', so check "64-bitness" # See: https://github.com/yt-dlp/yt-dlp/issues/11813 elif machine[1:] == 'aarch64' and not is_64bits: machine = '_armv7l' return f'{system_platform}{machine}_{suffix}', path path = os.path.dirname(__file__) if isinstance(__loader__, zipimporter): return 'zip', os.path.join(path, '..') elif (os.path.basename(sys.argv[0]) in ('__main__.py', '-m') and os.path.exists(os.path.join(path, '../.git/HEAD'))): return 'source', path return 'unknown', path def detect_variant(): return VARIANT or _get_variant_and_executable_path()[0] @functools.cache def current_git_head(): if detect_variant() != 'source': return with contextlib.suppress(Exception): stdout, _, _ = Popen.run( ['git', 'rev-parse', '--short', 'HEAD'], text=True, cwd=os.path.dirname(os.path.abspath(__file__)), stdout=subprocess.PIPE, stderr=subprocess.PIPE) if re.fullmatch('[0-9a-f]+', stdout.strip()): return stdout.strip() _FILE_SUFFIXES = { 'zip': '', 'win_exe': '.exe', 'win_x86_exe': '_x86.exe', 'win_arm64_exe': '_arm64.exe', 'darwin_exe': '_macos', 'linux_exe': '_linux', 'linux_aarch64_exe': '_linux_aarch64', 'musllinux_exe': '_musllinux', 'musllinux_aarch64_exe': '_musllinux_aarch64', } _NON_UPDATEABLE_REASONS = { **dict.fromkeys(_FILE_SUFFIXES), # Updatable **dict.fromkeys( ['linux_armv7l_dir', *(f'{variant[:-4]}_dir' for variant in _FILE_SUFFIXES if variant.endswith('_exe'))], 'Auto-update is not supported for unpackaged executables; Re-download the latest release'), 'py2exe': 'py2exe is no longer supported by yt-dlp; This executable cannot be updated', 'source': 'You cannot update when running from source code; Use git to pull the latest changes', 'unknown': 'You installed yt-dlp from a manual build or with a package manager; Use that to update', 'other': 'You are using an unofficial build of yt-dlp; Build the executable again', } def is_non_updateable(): if UPDATE_HINT: return UPDATE_HINT return _NON_UPDATEABLE_REASONS.get( detect_variant(), _NON_UPDATEABLE_REASONS['unknown' if VARIANT else 'other']) def _get_binary_name(): return format_field(_FILE_SUFFIXES, detect_variant(), template='yt-dlp%s', ignore=None, default=None) def _get_system_deprecation(): MIN_SUPPORTED, MIN_RECOMMENDED = (3, 10), (3, 10) if sys.version_info > MIN_RECOMMENDED: return None major, minor = sys.version_info[:2] PYTHON_MSG = f'Please update to Python {".".join(map(str, MIN_RECOMMENDED))} or above' if sys.version_info < MIN_SUPPORTED: return f'Python version {major}.{minor} is no longer supported! {PYTHON_MSG}' return f'Support for Python version {major}.{minor} has been deprecated. {PYTHON_MSG}' def _get_outdated_warning(): # Only yt-dlp guarantees a stable release at least every 90 days if not ORIGIN.startswith('yt-dlp/'): return None with contextlib.suppress(Exception): last_updated = dt.date(*version_tuple(__version__)[:3]) if last_updated < dt.datetime.now(dt.timezone.utc).date() - dt.timedelta(days=90): return ('\n '.join(( f'Your yt-dlp version ({__version__}) is older than 90 days!', 'It is strongly recommended to always use the latest version.', f'{is_non_updateable() or """Run "yt-dlp --update" or "yt-dlp -U" to update"""}.', 'To suppress this warning, add --no-update to your command/config.'))) return None def _sha256_file(path): h = hashlib.sha256() mv = memoryview(bytearray(128 * 1024)) with open(os.path.realpath(path), 'rb', buffering=0) as f: for n in iter(lambda: f.readinto(mv), 0): h.update(mv[:n]) return h.hexdigest() def _make_label(origin, tag, version=None): if tag != version: if version: return f'{origin}@{tag} build {version}' return f'{origin}@{tag}' if channel := _INVERSE_UPDATE_SOURCES.get(origin): return f'{channel}@{tag} from {origin}' return f'{origin}@{tag}' @dataclass class UpdateInfo: """ Update target information Can be created by `query_update()` or manually. Attributes: tag The release tag that will be updated to. If from query_update, the value is after API resolution and update spec processing. The only property that is required. version The actual numeric version (if available) of the binary to be updated to, after API resolution and update spec processing. (default: None) requested_version Numeric version of the binary being requested (if available), after API resolution only. (default: None) commit Commit hash (if available) of the binary to be updated to, after API resolution and update spec processing. (default: None) This value will only match the RELEASE_GIT_HEAD of prerelease builds. binary_name Filename of the binary to be updated to. (default: current binary name) checksum Expected checksum (if available) of the binary to be updated to. (default: None) """ tag: str version: str | None = None requested_version: str | None = None commit: str | None = None binary_name: str | None = _get_binary_name() # noqa: RUF009 # Always returns the same value checksum: str | None = None class Updater: # XXX: use class variables to simplify testing _channel = CHANNEL _origin = ORIGIN _update_sources = UPDATE_SOURCES def __init__(self, ydl, target: str | None = None): self.ydl = ydl # For backwards compat, target needs to be treated as if it could be None self.requested_channel, sep, self.requested_tag = (target or self._channel).rpartition('@') # Check if requested_tag is actually the requested repo/channel if not sep and ('/' in self.requested_tag or self.requested_tag in self._update_sources): self.requested_channel = self.requested_tag self.requested_tag: str = None # type: ignore (we set it later) elif not self.requested_channel: # User did not specify a channel, so we are requesting the default channel self.requested_channel = self._channel.partition('@')[0] # --update should not be treated as an exact tag request even if CHANNEL has a @tag self._exact = bool(target) and target != self._channel if not self.requested_tag: # User did not specify a tag, so we request 'latest' and track that no exact tag was passed self.requested_tag = 'latest' self._exact = False if '/' in self.requested_channel: # requested_channel is actually a repository self.requested_repo = self.requested_channel if not self.requested_repo.startswith('yt-dlp/') and self.requested_repo != self._origin: self.ydl.report_warning( f'You are switching to an {self.ydl._format_err("unofficial", "red")} executable ' f'from {self.ydl._format_err(self.requested_repo, self.ydl.Styles.EMPHASIS)}. ' f'Run {self.ydl._format_err("at your own risk", "light red")}') self._block_restart('Automatically restarting into custom builds is disabled for security reasons') else: # Check if requested_channel resolves to a known repository or else raise self.requested_repo = self._update_sources.get(self.requested_channel) if not self.requested_repo: self._report_error( f'Invalid update channel {self.requested_channel!r} requested. ' f'Valid channels are {", ".join(self._update_sources)}', True) self._identifier = f'{detect_variant()} {system_identifier()}' @property def current_version(self): """Current version""" return __version__ @property def current_commit(self): """Current commit hash""" return RELEASE_GIT_HEAD def _download_asset(self, name, tag=None): if not tag: tag = self.requested_tag path = 'latest/download' if tag == 'latest' else f'download/{tag}' url = f'https://github.com/{self.requested_repo}/releases/{path}/{name}' self.ydl.write_debug(f'Downloading {name} from {url}') return self.ydl.urlopen(url).read() def _call_api(self, tag): tag = f'tags/{tag}' if tag != 'latest' else tag url = f'{API_BASE_URL}/{self.requested_repo}/releases/{tag}' self.ydl.write_debug(f'Fetching release info: {url}') return json.loads(self.ydl.urlopen(Request(url, headers={ 'Accept': 'application/vnd.github+json', 'User-Agent': 'yt-dlp', 'X-GitHub-Api-Version': '2022-11-28', })).read().decode()) def _get_version_info(self, tag: str) -> tuple[str | None, str | None]: if _VERSION_RE.fullmatch(tag): return tag, None api_info = self._call_api(tag) if tag == 'latest': requested_version = api_info['tag_name'] else: match = re.search(rf'\s+(?P<version>{_VERSION_RE.pattern})$', api_info.get('name', '')) requested_version = match.group('version') if match else None if re.fullmatch(_HASH_PATTERN, api_info.get('target_commitish', '')): target_commitish = api_info['target_commitish'] else: match = _COMMIT_RE.match(api_info.get('body', '')) target_commitish = match.group('hash') if match else None if not (requested_version or target_commitish): self._report_error('One of either version or commit hash must be available on the release', expected=True) return requested_version, target_commitish def _download_update_spec(self, source_tags): for tag in source_tags: try: return self._download_asset('_update_spec', tag=tag).decode() except network_exceptions as error: if isinstance(error, HTTPError) and error.status == 404: continue self._report_network_error(f'fetch update spec: {error}') return None self._report_error( f'The requested tag {self.requested_tag} does not exist for {self.requested_repo}', True) return None def _process_update_spec(self, lockfile: str, resolved_tag: str): lines = lockfile.splitlines() is_version2 = any(line.startswith('lockV2 ') for line in lines) for line in lines: if is_version2: if not line.startswith(f'lockV2 {self.requested_repo} '): continue _, _, tag, pattern = line.split(' ', 3) else: if not line.startswith('lock '): continue _, tag, pattern = line.split(' ', 2) if re.match(pattern, self._identifier): if _VERSION_RE.fullmatch(tag): if not self._exact: return tag elif self._version_compare(tag, resolved_tag): return resolved_tag elif tag != resolved_tag: continue self._report_error( f'yt-dlp cannot be updated to {resolved_tag} since you are on an older Python version ' 'or your operating system is not compatible with the requested build', True) return None return resolved_tag def _version_compare(self, a: str, b: str): """ Compare two version strings This function SHOULD NOT be called if self._exact == True """ if _VERSION_RE.fullmatch(f'{a}.{b}'): return version_tuple(a) >= version_tuple(b) return a == b def query_update(self, *, _output=False) -> UpdateInfo | None: """Fetches info about the available update @returns An `UpdateInfo` if there is an update available, else None """ if not self.requested_repo: self._report_error('No target repository could be determined from input') return None try: requested_version, target_commitish = self._get_version_info(self.requested_tag) except network_exceptions as e: self._report_network_error(f'obtain version info ({e})', delim='; Please try again later or') return None if self._exact and self._origin != self.requested_repo: has_update = True elif requested_version: if self._exact: has_update = self.current_version != requested_version else: has_update = not self._version_compare(self.current_version, requested_version) elif target_commitish: has_update = target_commitish != self.current_commit else: has_update = False resolved_tag = requested_version if self.requested_tag == 'latest' else self.requested_tag current_label = _make_label(self._origin, self._channel.partition('@')[2] or self.current_version, self.current_version) requested_label = _make_label(self.requested_repo, resolved_tag, requested_version) latest_or_requested = f'{"Latest" if self.requested_tag == "latest" else "Requested"} version: {requested_label}' if not has_update: if _output: self.ydl.to_screen(f'{latest_or_requested}\nyt-dlp is up to date ({current_label})') return None update_spec = self._download_update_spec(('latest', None) if requested_version else (None,)) if not update_spec: return None # `result_` prefixed vars == post-_process_update_spec() values result_tag = self._process_update_spec(update_spec, resolved_tag) if not result_tag or result_tag == self.current_version: return None elif result_tag == resolved_tag: result_version = requested_version elif _VERSION_RE.fullmatch(result_tag): result_version = result_tag else: # actual version being updated to is unknown result_version = None checksum = None # Non-updateable variants can get update_info but need to skip checksum if not is_non_updateable(): try: hashes = self._download_asset('SHA2-256SUMS', result_tag) except network_exceptions as error: if not isinstance(error, HTTPError) or error.status != 404: self._report_network_error(f'fetch checksums: {error}') return None self.ydl.report_warning('No hash information found for the release, skipping verification') else: for ln in hashes.decode().splitlines(): if ln.endswith(_get_binary_name()): checksum = ln.split()[0] break if not checksum: self.ydl.report_warning('The hash could not be found in the checksum file, skipping verification') if _output: update_label = _make_label(self.requested_repo, result_tag, result_version) self.ydl.to_screen( f'Current version: {current_label}\n{latest_or_requested}' + (f'\nUpgradable to: {update_label}' if update_label != requested_label else '')) return UpdateInfo( tag=result_tag, version=result_version, requested_version=requested_version, commit=target_commitish if result_tag == resolved_tag else None, checksum=checksum) def update(self, update_info=NO_DEFAULT): """Update yt-dlp executable to the latest version @param update_info `UpdateInfo | None` as returned by query_update() """ if update_info is NO_DEFAULT: update_info = self.query_update(_output=True) if not update_info: return False err = is_non_updateable() if err: self._report_error(err, True) return False self.ydl.to_screen(f'Current Build Hash: {_sha256_file(self.filename)}') update_label = _make_label(self.requested_repo, update_info.tag, update_info.version) self.ydl.to_screen(f'Updating to {update_label} ...') directory = os.path.dirname(self.filename) if not os.access(self.filename, os.W_OK): return self._report_permission_error(self.filename) elif not os.access(directory, os.W_OK): return self._report_permission_error(directory) new_filename, old_filename = f'{self.filename}.new', f'{self.filename}.old' if detect_variant() == 'zip': # Can be replaced in-place new_filename, old_filename = self.filename, None try: if os.path.exists(old_filename or ''): os.remove(old_filename) except OSError: return self._report_error('Unable to remove the old version') try: newcontent = self._download_asset(update_info.binary_name, update_info.tag) except network_exceptions as e: if isinstance(e, HTTPError) and e.status == 404: return self._report_error( f'The requested tag {self.requested_repo}@{update_info.tag} does not exist', True) return self._report_network_error(f'fetch updates: {e}', tag=update_info.tag) if not update_info.checksum: self._block_restart('Automatically restarting into unverified builds is disabled for security reasons') elif hashlib.sha256(newcontent).hexdigest() != update_info.checksum: return self._report_network_error('verify the new executable', tag=update_info.tag) try: with open(new_filename, 'wb') as outf: outf.write(newcontent) except OSError: return self._report_permission_error(new_filename) if old_filename: mask = os.stat(self.filename).st_mode try: os.rename(self.filename, old_filename) except OSError: return self._report_error('Unable to move current version') try: os.rename(new_filename, self.filename) except OSError: self._report_error('Unable to overwrite current version') return os.rename(old_filename, self.filename) variant = detect_variant() if variant.startswith('win'): atexit.register(Popen, f'ping 127.0.0.1 -n 5 -w 1000 & del /F "{old_filename}"', shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) elif old_filename: try: os.remove(old_filename) except OSError: self._report_error('Unable to remove the old version') try: os.chmod(self.filename, mask) except OSError: return self._report_error( f'Unable to set permissions. Run: sudo chmod a+rx {shell_quote(self.filename)}') self.ydl.to_screen(f'Updated yt-dlp to {update_label}') return True @functools.cached_property def filename(self): """Filename of the executable""" return os.path.realpath(_get_variant_and_executable_path()[1]) @functools.cached_property def cmd(self): """The command-line to run the executable, if known""" argv = sys.orig_argv # sys.orig_argv can be [] when frozen if not argv and getattr(sys, 'frozen', False): argv = sys.argv # linux_static exe's argv[0] will be /tmp/staticx-NNNN/yt-dlp_linux if we don't fixup here if argv and os.getenv('STATICX_PROG_PATH'): argv = [self.filename, *argv[1:]] return argv def restart(self): """Restart the executable""" assert self.cmd, 'Unable to determine argv' self.ydl.write_debug(f'Restarting: {shell_quote(self.cmd)}') _, _, returncode = Popen.run(self.cmd) return returncode def _block_restart(self, msg): def wrapper(): self._report_error(f'{msg}. Restart yt-dlp to use the updated version', expected=True) return self.ydl._download_retcode self.restart = wrapper def _report_error(self, msg, expected=False): self.ydl.report_error(msg, tb=False if expected else None) self.ydl._download_retcode = 100 def _report_permission_error(self, file): self._report_error(f'Unable to write to {file}; try running as administrator', True) def _report_network_error(self, action, delim=';', tag=None): if not tag: tag = self.requested_tag path = tag if tag == 'latest' else f'tag/{tag}' self._report_error( f'Unable to {action}{delim} visit ' f'https://github.com/{self.requested_repo}/releases/{path}', True) def run_update(ydl): """Update the program file with the latest version from the repository @returns Whether there was a successful update (No update = False) """ deprecation_warning( '"yt_dlp.update.run_update(ydl)" is deprecated and may be removed in a future version. ' 'Use "yt_dlp.update.Updater(ydl).update()" instead') return Updater(ydl).update() __all__ = ['Updater']
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/socks.py
yt_dlp/socks.py
# Public Domain SOCKS proxy protocol implementation # Adapted from https://gist.github.com/bluec0re/cafd3764412967417fd3 # References: # SOCKS4 protocol http://www.openssh.com/txt/socks4.protocol # SOCKS4A protocol http://www.openssh.com/txt/socks4a.protocol # SOCKS5 protocol https://tools.ietf.org/html/rfc1928 # SOCKS5 username/password authentication https://tools.ietf.org/html/rfc1929 import collections import socket import struct from .compat import compat_ord __author__ = 'Timo Schmid <coding@timoschmid.de>' SOCKS4_VERSION = 4 SOCKS4_REPLY_VERSION = 0x00 # Excerpt from SOCKS4A protocol: # if the client cannot resolve the destination host's domain name to find its # IP address, it should set the first three bytes of DSTIP to NULL and the last # byte to a non-zero value. SOCKS4_DEFAULT_DSTIP = struct.pack('!BBBB', 0, 0, 0, 0xFF) SOCKS5_VERSION = 5 SOCKS5_USER_AUTH_VERSION = 0x01 SOCKS5_USER_AUTH_SUCCESS = 0x00 class Socks4Command: CMD_CONNECT = 0x01 CMD_BIND = 0x02 class Socks5Command(Socks4Command): CMD_UDP_ASSOCIATE = 0x03 class Socks5Auth: AUTH_NONE = 0x00 AUTH_GSSAPI = 0x01 AUTH_USER_PASS = 0x02 AUTH_NO_ACCEPTABLE = 0xFF # For server response class Socks5AddressType: ATYP_IPV4 = 0x01 ATYP_DOMAINNAME = 0x03 ATYP_IPV6 = 0x04 class ProxyError(OSError): ERR_SUCCESS = 0x00 def __init__(self, code=None, msg=None): if code is not None and msg is None: msg = self.CODES.get(code) or 'unknown error' super().__init__(code, msg) class InvalidVersionError(ProxyError): def __init__(self, expected_version, got_version): msg = (f'Invalid response version from server. Expected {expected_version:02x} got ' f'{got_version:02x}') super().__init__(0, msg) class Socks4Error(ProxyError): ERR_SUCCESS = 90 CODES = { 91: 'request rejected or failed', 92: 'request rejected because SOCKS server cannot connect to identd on the client', 93: 'request rejected because the client program and identd report different user-ids', } class Socks5Error(ProxyError): ERR_GENERAL_FAILURE = 0x01 CODES = { 0x01: 'general SOCKS server failure', 0x02: 'connection not allowed by ruleset', 0x03: 'Network unreachable', 0x04: 'Host unreachable', 0x05: 'Connection refused', 0x06: 'TTL expired', 0x07: 'Command not supported', 0x08: 'Address type not supported', 0xFE: 'unknown username or invalid password', 0xFF: 'all offered authentication methods were rejected', } class ProxyType: SOCKS4 = 0 SOCKS4A = 1 SOCKS5 = 2 Proxy = collections.namedtuple('Proxy', ( 'type', 'host', 'port', 'username', 'password', 'remote_dns')) class sockssocket(socket.socket): def __init__(self, *args, **kwargs): self._proxy = None super().__init__(*args, **kwargs) def setproxy(self, proxytype, addr, port, rdns=True, username=None, password=None): assert proxytype in (ProxyType.SOCKS4, ProxyType.SOCKS4A, ProxyType.SOCKS5) self._proxy = Proxy(proxytype, addr, port, username, password, rdns) def recvall(self, cnt): data = b'' while len(data) < cnt: cur = self.recv(cnt - len(data)) if not cur: raise EOFError(f'{cnt - len(data)} bytes missing') data += cur return data def _recv_bytes(self, cnt): data = self.recvall(cnt) return struct.unpack(f'!{cnt}B', data) @staticmethod def _len_and_data(data): return struct.pack('!B', len(data)) + data def _check_response_version(self, expected_version, got_version): if got_version != expected_version: self.close() raise InvalidVersionError(expected_version, got_version) def _resolve_address(self, destaddr, default, use_remote_dns, family=None): for f in (family,) if family else (socket.AF_INET, socket.AF_INET6): try: return f, socket.inet_pton(f, destaddr) except OSError: continue if use_remote_dns and self._proxy.remote_dns: return 0, default else: res = socket.getaddrinfo(destaddr, None, family=family or 0) f, _, _, _, ipaddr = res[0] return f, socket.inet_pton(f, ipaddr[0]) def _setup_socks4(self, address, is_4a=False): destaddr, port = address _, ipaddr = self._resolve_address(destaddr, SOCKS4_DEFAULT_DSTIP, use_remote_dns=is_4a, family=socket.AF_INET) packet = struct.pack('!BBH', SOCKS4_VERSION, Socks4Command.CMD_CONNECT, port) + ipaddr username = (self._proxy.username or '').encode() packet += username + b'\x00' if is_4a and self._proxy.remote_dns and ipaddr == SOCKS4_DEFAULT_DSTIP: packet += destaddr.encode() + b'\x00' self.sendall(packet) version, resp_code, dstport, dsthost = struct.unpack('!BBHI', self.recvall(8)) self._check_response_version(SOCKS4_REPLY_VERSION, version) if resp_code != Socks4Error.ERR_SUCCESS: self.close() raise Socks4Error(resp_code) return (dsthost, dstport) def _setup_socks4a(self, address): self._setup_socks4(address, is_4a=True) def _socks5_auth(self): packet = struct.pack('!B', SOCKS5_VERSION) auth_methods = [Socks5Auth.AUTH_NONE] if self._proxy.username and self._proxy.password: auth_methods.append(Socks5Auth.AUTH_USER_PASS) packet += struct.pack('!B', len(auth_methods)) packet += struct.pack(f'!{len(auth_methods)}B', *auth_methods) self.sendall(packet) version, method = self._recv_bytes(2) self._check_response_version(SOCKS5_VERSION, version) if method == Socks5Auth.AUTH_NO_ACCEPTABLE or ( method == Socks5Auth.AUTH_USER_PASS and (not self._proxy.username or not self._proxy.password)): self.close() raise Socks5Error(Socks5Auth.AUTH_NO_ACCEPTABLE) if method == Socks5Auth.AUTH_USER_PASS: username = self._proxy.username.encode() password = self._proxy.password.encode() packet = struct.pack('!B', SOCKS5_USER_AUTH_VERSION) packet += self._len_and_data(username) + self._len_and_data(password) self.sendall(packet) version, status = self._recv_bytes(2) self._check_response_version(SOCKS5_USER_AUTH_VERSION, version) if status != SOCKS5_USER_AUTH_SUCCESS: self.close() raise Socks5Error(Socks5Error.ERR_GENERAL_FAILURE) def _setup_socks5(self, address): destaddr, port = address family, ipaddr = self._resolve_address(destaddr, None, use_remote_dns=True) self._socks5_auth() reserved = 0 packet = struct.pack('!BBB', SOCKS5_VERSION, Socks5Command.CMD_CONNECT, reserved) if ipaddr is None: destaddr = destaddr.encode() packet += struct.pack('!B', Socks5AddressType.ATYP_DOMAINNAME) packet += self._len_and_data(destaddr) elif family == socket.AF_INET: packet += struct.pack('!B', Socks5AddressType.ATYP_IPV4) + ipaddr elif family == socket.AF_INET6: packet += struct.pack('!B', Socks5AddressType.ATYP_IPV6) + ipaddr packet += struct.pack('!H', port) self.sendall(packet) version, status, reserved, atype = self._recv_bytes(4) self._check_response_version(SOCKS5_VERSION, version) if status != Socks5Error.ERR_SUCCESS: self.close() raise Socks5Error(status) if atype == Socks5AddressType.ATYP_IPV4: destaddr = self.recvall(4) elif atype == Socks5AddressType.ATYP_DOMAINNAME: alen = compat_ord(self.recv(1)) destaddr = self.recvall(alen) elif atype == Socks5AddressType.ATYP_IPV6: destaddr = self.recvall(16) destport = struct.unpack('!H', self.recvall(2))[0] return (destaddr, destport) def _make_proxy(self, connect_func, address): if not self._proxy: return connect_func(self, address) result = connect_func(self, (self._proxy.host, self._proxy.port)) if result != 0 and result is not None: return result setup_funcs = { ProxyType.SOCKS4: self._setup_socks4, ProxyType.SOCKS4A: self._setup_socks4a, ProxyType.SOCKS5: self._setup_socks5, } setup_funcs[self._proxy.type](address) return result def connect(self, address): self._make_proxy(socket.socket.connect, address) def connect_ex(self, address): return self._make_proxy(socket.socket.connect_ex, address)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/plugins.py
yt_dlp/plugins.py
import contextlib import dataclasses import functools import importlib import importlib.abc import importlib.machinery import importlib.util import inspect import itertools import os import pkgutil import sys import traceback from pathlib import Path from zipfile import ZipFile from .globals import ( Indirect, plugin_dirs, all_plugins_loaded, plugin_specs, ) from .utils import ( get_executable_path, get_system_config_dirs, get_user_config_dirs, merge_dicts, orderedSet, write_string, ) PACKAGE_NAME = 'yt_dlp_plugins' COMPAT_PACKAGE_NAME = 'ytdlp_plugins' _BASE_PACKAGE_PATH = Path(__file__).parent # Please Note: Due to necessary changes and the complex nature involved, # no backwards compatibility is guaranteed for the plugin system API. # However, we will still try our best. __all__ = [ 'COMPAT_PACKAGE_NAME', 'PACKAGE_NAME', 'PluginSpec', 'directories', 'load_all_plugins', 'load_plugins', 'register_plugin_spec', ] @dataclasses.dataclass class PluginSpec: module_name: str suffix: str destination: Indirect plugin_destination: Indirect class PluginLoader(importlib.abc.Loader): """Dummy loader for virtual namespace packages""" def exec_module(self, module): return None @functools.cache def dirs_in_zip(archive): try: with ZipFile(archive) as zip_: return set(itertools.chain.from_iterable( Path(file).parents for file in zip_.namelist())) except FileNotFoundError: pass except Exception as e: write_string(f'WARNING: Could not read zip file {archive}: {e}\n') return () def default_plugin_paths(): def _get_package_paths(*root_paths, containing_folder): for config_dir in orderedSet(map(Path, root_paths), lazy=True): # We need to filter the base path added when running __main__.py directly if config_dir == _BASE_PACKAGE_PATH: continue with contextlib.suppress(OSError): yield from (config_dir / containing_folder).iterdir() # Load from yt-dlp config folders yield from _get_package_paths( *get_user_config_dirs('yt-dlp'), *get_system_config_dirs('yt-dlp'), containing_folder='plugins', ) # Load from yt-dlp-plugins folders yield from _get_package_paths( get_executable_path(), *get_user_config_dirs(''), *get_system_config_dirs(''), containing_folder='yt-dlp-plugins', ) # Load from PYTHONPATH directories yield from (path for path in map(Path, sys.path) if path != _BASE_PACKAGE_PATH) def candidate_plugin_paths(candidate): candidate_path = Path(candidate) if not candidate_path.is_dir(): raise ValueError(f'Invalid plugin directory: {candidate_path}') yield from candidate_path.iterdir() class PluginFinder(importlib.abc.MetaPathFinder): """ This class provides one or multiple namespace packages. It searches in sys.path and yt-dlp config folders for the existing subdirectories from which the modules can be imported """ def __init__(self, *packages): self._zip_content_cache = {} self.packages = set( itertools.chain.from_iterable( itertools.accumulate(name.split('.'), lambda a, b: '.'.join((a, b))) for name in packages)) def search_locations(self, fullname): candidate_locations = itertools.chain.from_iterable( default_plugin_paths() if candidate == 'default' else candidate_plugin_paths(candidate) for candidate in plugin_dirs.value ) parts = Path(*fullname.split('.')) for path in orderedSet(candidate_locations, lazy=True): candidate = path / parts try: if candidate.is_dir(): yield candidate elif path.suffix in ('.zip', '.egg', '.whl') and path.is_file(): if parts in dirs_in_zip(path): yield candidate except PermissionError as e: write_string(f'Permission error while accessing modules in "{e.filename}"\n') def find_spec(self, fullname, path=None, target=None): if fullname not in self.packages: return None search_locations = list(map(str, self.search_locations(fullname))) if not search_locations: # Prevent using built-in meta finders for searching plugins. raise ModuleNotFoundError(fullname) spec = importlib.machinery.ModuleSpec(fullname, PluginLoader(), is_package=True) spec.submodule_search_locations = search_locations return spec def invalidate_caches(self): dirs_in_zip.cache_clear() for package in self.packages: if package in sys.modules: del sys.modules[package] def directories(): with contextlib.suppress(ModuleNotFoundError): if spec := importlib.util.find_spec(PACKAGE_NAME): return list(spec.submodule_search_locations) return [] def iter_modules(subpackage): fullname = f'{PACKAGE_NAME}.{subpackage}' with contextlib.suppress(ModuleNotFoundError): pkg = importlib.import_module(fullname) yield from pkgutil.iter_modules(path=pkg.__path__, prefix=f'{fullname}.') def get_regular_classes(module, module_name, suffix): # Find standard public plugin classes (not overrides) return inspect.getmembers(module, lambda obj: ( inspect.isclass(obj) and obj.__name__.endswith(suffix) and obj.__module__.startswith(module_name) and not obj.__name__.startswith('_') and obj.__name__ in getattr(module, '__all__', [obj.__name__]) and getattr(obj, 'PLUGIN_NAME', None) is None )) def load_plugins(plugin_spec: PluginSpec): name, suffix = plugin_spec.module_name, plugin_spec.suffix regular_classes = {} if os.environ.get('YTDLP_NO_PLUGINS') or not plugin_dirs.value: return regular_classes for finder, module_name, _ in iter_modules(name): if any(x.startswith('_') for x in module_name.split('.')): continue try: spec = finder.find_spec(module_name) module = importlib.util.module_from_spec(spec) sys.modules[module_name] = module spec.loader.exec_module(module) except Exception: write_string( f'Error while importing module {module_name!r}\n{traceback.format_exc(limit=-1)}', ) continue regular_classes.update(get_regular_classes(module, module_name, suffix)) # Compat: old plugin system using __init__.py # Note: plugins imported this way do not show up in directories() # nor are considered part of the yt_dlp_plugins namespace package if 'default' in plugin_dirs.value: with contextlib.suppress(FileNotFoundError): spec = importlib.util.spec_from_file_location( name, Path(get_executable_path(), COMPAT_PACKAGE_NAME, name, '__init__.py'), ) plugins = importlib.util.module_from_spec(spec) sys.modules[spec.name] = plugins spec.loader.exec_module(plugins) regular_classes.update(get_regular_classes(plugins, spec.name, suffix)) # Add the classes into the global plugin lookup for that type plugin_spec.plugin_destination.value = regular_classes # We want to prepend to the main lookup for that type plugin_spec.destination.value = merge_dicts(regular_classes, plugin_spec.destination.value) return regular_classes def load_all_plugins(): for plugin_spec in plugin_specs.value.values(): load_plugins(plugin_spec) all_plugins_loaded.value = True def register_plugin_spec(plugin_spec: PluginSpec): # If the plugin spec for a module is already registered, it will not be added again if plugin_spec.module_name not in plugin_specs.value: plugin_specs.value[plugin_spec.module_name] = plugin_spec sys.meta_path.insert(0, PluginFinder(f'{PACKAGE_NAME}.{plugin_spec.module_name}'))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/jsinterp.py
yt_dlp/jsinterp.py
import collections import contextlib import itertools import json import math import operator import re from .utils import ( NO_DEFAULT, ExtractorError, function_with_repr, js_to_json, remove_quotes, truncate_string, unified_timestamp, write_string, ) def _js_bit_op(op): def zeroise(x): if x in (None, JS_Undefined): return 0 with contextlib.suppress(TypeError): if math.isnan(x): # NB: NaN cannot be checked by membership return 0 return int(float(x)) def wrapped(a, b): return op(zeroise(a), zeroise(b)) & 0xffffffff return wrapped def _js_arith_op(op): def wrapped(a, b): if JS_Undefined in (a, b): return float('nan') return op(a or 0, b or 0) return wrapped def _js_div(a, b): if JS_Undefined in (a, b) or not (a or b): return float('nan') return (a or 0) / b if b else float('inf') def _js_mod(a, b): if JS_Undefined in (a, b) or not b: return float('nan') return (a or 0) % b def _js_exp(a, b): if not b: return 1 # even 0 ** 0 !! elif JS_Undefined in (a, b): return float('nan') return (a or 0) ** b def _js_eq_op(op): def wrapped(a, b): if {a, b} <= {None, JS_Undefined}: return op(a, a) return op(a, b) return wrapped def _js_comp_op(op): def wrapped(a, b): if JS_Undefined in (a, b): return False if isinstance(a, str) or isinstance(b, str): return op(str(a or 0), str(b or 0)) return op(a or 0, b or 0) return wrapped def _js_ternary(cndn, if_true=True, if_false=False): """Simulate JS's ternary operator (cndn?if_true:if_false)""" if cndn in (False, None, 0, '', JS_Undefined): return if_false with contextlib.suppress(TypeError): if math.isnan(cndn): # NB: NaN cannot be checked by membership return if_false return if_true # Ref: https://es5.github.io/#x9.8.1 def js_number_to_string(val: float, radix: int = 10): if radix in (JS_Undefined, None): radix = 10 assert radix in range(2, 37), 'radix must be an integer at least 2 and no greater than 36' if math.isnan(val): return 'NaN' if val == 0: return '0' if math.isinf(val): return '-Infinity' if val < 0 else 'Infinity' if radix == 10: # TODO: implement special cases ... ALPHABET = b'0123456789abcdefghijklmnopqrstuvwxyz.-' result = collections.deque() sign = val < 0 val = abs(val) fraction, integer = math.modf(val) delta = max(math.nextafter(.0, math.inf), math.ulp(val) / 2) if fraction >= delta: result.append(-2) # `.` while fraction >= delta: delta *= radix fraction, digit = math.modf(fraction * radix) result.append(int(digit)) # if we need to round, propagate potential carry through fractional part needs_rounding = fraction > 0.5 or (fraction == 0.5 and int(digit) & 1) if needs_rounding and fraction + delta > 1: for index in reversed(range(1, len(result))): if result[index] + 1 < radix: result[index] += 1 break result.pop() else: integer += 1 break integer, digit = divmod(int(integer), radix) result.appendleft(digit) while integer > 0: integer, digit = divmod(integer, radix) result.appendleft(digit) if sign: result.appendleft(-1) # `-` return bytes(ALPHABET[digit] for digit in result).decode('ascii') # Ref: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Operator_Precedence _OPERATORS = { # None => Defined in JSInterpreter._operator '?': None, '??': None, '||': None, '&&': None, '|': _js_bit_op(operator.or_), '^': _js_bit_op(operator.xor), '&': _js_bit_op(operator.and_), '===': operator.is_, '!==': operator.is_not, '==': _js_eq_op(operator.eq), '!=': _js_eq_op(operator.ne), '<=': _js_comp_op(operator.le), '>=': _js_comp_op(operator.ge), '<': _js_comp_op(operator.lt), '>': _js_comp_op(operator.gt), '>>': _js_bit_op(operator.rshift), '<<': _js_bit_op(operator.lshift), '+': _js_arith_op(operator.add), '-': _js_arith_op(operator.sub), '*': _js_arith_op(operator.mul), '%': _js_mod, '/': _js_div, '**': _js_exp, } _COMP_OPERATORS = {'===', '!==', '==', '!=', '<=', '>=', '<', '>'} _NAME_RE = r'[a-zA-Z_$][\w$]*' _MATCHING_PARENS = dict(zip(*zip('()', '{}', '[]', strict=True), strict=True)) _QUOTES = '\'"/' _NESTED_BRACKETS = r'[^[\]]+(?:\[[^[\]]+(?:\[[^\]]+\])?\])?' class JS_Undefined: pass class JS_Break(ExtractorError): def __init__(self): ExtractorError.__init__(self, 'Invalid break') class JS_Continue(ExtractorError): def __init__(self): ExtractorError.__init__(self, 'Invalid continue') class JS_Throw(ExtractorError): def __init__(self, e): self.error = e ExtractorError.__init__(self, f'Uncaught exception {e}') class LocalNameSpace(collections.ChainMap): def __setitem__(self, key, value): for scope in self.maps: if key in scope: scope[key] = value return self.maps[0][key] = value def __delitem__(self, key): raise NotImplementedError('Deleting is not supported') def set_local(self, key, value): self.maps[0][key] = value def get_local(self, key): if key in self.maps[0]: return self.maps[0][key] return JS_Undefined class Debugger: import sys ENABLED = False and 'pytest' in sys.modules @staticmethod def write(*args, level=100): write_string(f'[debug] JS: {" " * (100 - level)}' f'{" ".join(truncate_string(str(x), 50, 50) for x in args)}\n') @classmethod def wrap_interpreter(cls, f): def interpret_statement(self, stmt, local_vars, allow_recursion, *args, **kwargs): if cls.ENABLED and stmt.strip(): cls.write(stmt, level=allow_recursion) try: ret, should_ret = f(self, stmt, local_vars, allow_recursion, *args, **kwargs) except Exception as e: if cls.ENABLED: if isinstance(e, ExtractorError): e = e.orig_msg cls.write('=> Raises:', e, '<-|', stmt, level=allow_recursion) raise if cls.ENABLED and stmt.strip(): if should_ret or repr(ret) != stmt: cls.write(['->', '=>'][should_ret], repr(ret), '<-|', stmt, level=allow_recursion) return ret, should_ret return interpret_statement class JSInterpreter: __named_object_counter = 0 _RE_FLAGS = { # special knowledge: Python's re flags are bitmask values, current max 128 # invent new bitmask values well above that for literal parsing # TODO: new pattern class to execute matches with these flags 'd': 1024, # Generate indices for substring matches 'g': 2048, # Global search 'i': re.I, # Case-insensitive search 'm': re.M, # Multi-line search 's': re.S, # Allows . to match newline characters 'u': re.U, # Treat a pattern as a sequence of unicode code points 'y': 4096, # Perform a "sticky" search that matches starting at the current position in the target string } def __init__(self, code, objects=None): self.code, self._functions = code, {} self._objects = {} if objects is None else objects self._undefined_varnames = set() class Exception(ExtractorError): # noqa: A001 def __init__(self, msg, expr=None, *args, **kwargs): if expr is not None: msg = f'{msg.rstrip()} in: {truncate_string(expr, 50, 50)}' super().__init__(msg, *args, **kwargs) def _named_object(self, namespace, obj): self.__named_object_counter += 1 name = f'__yt_dlp_jsinterp_obj{self.__named_object_counter}' if callable(obj) and not isinstance(obj, function_with_repr): obj = function_with_repr(obj, f'F<{self.__named_object_counter}>') namespace[name] = obj return name @classmethod def _regex_flags(cls, expr): flags = 0 if not expr: return flags, expr for idx, ch in enumerate(expr): # noqa: B007 if ch not in cls._RE_FLAGS: break flags |= cls._RE_FLAGS[ch] return flags, expr[idx + 1:] @staticmethod def _separate(expr, delim=',', max_split=None): OP_CHARS = '+-*/%&|^=<>!,;{}:[' if not expr: return counters = dict.fromkeys(_MATCHING_PARENS.values(), 0) start, splits, pos, delim_len = 0, 0, 0, len(delim) - 1 in_quote, escaping, after_op, in_regex_char_group = None, False, True, False for idx, char in enumerate(expr): if not in_quote and char in _MATCHING_PARENS: counters[_MATCHING_PARENS[char]] += 1 elif not in_quote and char in counters: # Something's wrong if we get negative, but ignore it anyway if counters[char]: counters[char] -= 1 elif not escaping: if char in _QUOTES and in_quote in (char, None): if in_quote or after_op or char != '/': in_quote = None if in_quote and not in_regex_char_group else char elif in_quote == '/' and char in '[]': in_regex_char_group = char == '[' escaping = not escaping and in_quote and char == '\\' in_unary_op = (not in_quote and not in_regex_char_group and after_op not in (True, False) and char in '-+') after_op = char if (not in_quote and char in OP_CHARS) else (char.isspace() and after_op) if char != delim[pos] or any(counters.values()) or in_quote or in_unary_op: pos = 0 continue elif pos != delim_len: pos += 1 continue yield expr[start: idx - delim_len] start, pos = idx + 1, 0 splits += 1 if max_split and splits >= max_split: break yield expr[start:] @classmethod def _separate_at_paren(cls, expr, delim=None): if delim is None: delim = expr and _MATCHING_PARENS[expr[0]] separated = list(cls._separate(expr, delim, 1)) if len(separated) < 2: raise cls.Exception(f'No terminating paren {delim}', expr) return separated[0][1:].strip(), separated[1].strip() def _operator(self, op, left_val, right_expr, expr, local_vars, allow_recursion): if op in ('||', '&&'): if (op == '&&') ^ _js_ternary(left_val): return left_val # short circuiting elif op == '??': if left_val not in (None, JS_Undefined): return left_val elif op == '?': right_expr = _js_ternary(left_val, *self._separate(right_expr, ':', 1)) right_val = self.interpret_expression(right_expr, local_vars, allow_recursion) if not _OPERATORS.get(op): return right_val try: return _OPERATORS[op](left_val, right_val) except Exception as e: raise self.Exception(f'Failed to evaluate {left_val!r} {op} {right_val!r}', expr, cause=e) def _index(self, obj, idx, allow_undefined=False): if idx == 'length': return len(obj) try: return obj[int(idx)] if isinstance(obj, list) else obj[idx] except Exception as e: if allow_undefined: return JS_Undefined raise self.Exception(f'Cannot get index {idx}', repr(obj), cause=e) def _dump(self, obj, namespace): try: return json.dumps(obj) except TypeError: return self._named_object(namespace, obj) @Debugger.wrap_interpreter def interpret_statement(self, stmt, local_vars, allow_recursion=100, _is_var_declaration=False): if allow_recursion < 0: raise self.Exception('Recursion limit reached') allow_recursion -= 1 should_return = False sub_statements = list(self._separate(stmt, ';')) or [''] expr = stmt = sub_statements.pop().strip() for sub_stmt in sub_statements: ret, should_return = self.interpret_statement(sub_stmt, local_vars, allow_recursion) if should_return: return ret, should_return m = re.match(r'(?P<var>(?:var|const|let)\s)|return(?:\s+|(?=["\'])|$)|(?P<throw>throw\s+)', stmt) if m: expr = stmt[len(m.group(0)):].strip() if m.group('throw'): raise JS_Throw(self.interpret_expression(expr, local_vars, allow_recursion)) should_return = not m.group('var') _is_var_declaration = _is_var_declaration or bool(m.group('var')) if not expr: return None, should_return if expr[0] in _QUOTES: inner, outer = self._separate(expr, expr[0], 1) if expr[0] == '/': flags, outer = self._regex_flags(outer) # We don't support regex methods yet, so no point compiling it inner = f'{inner}/{flags}' # Avoid https://github.com/python/cpython/issues/74534 # inner = re.compile(inner[1:].replace('[[', r'[\['), flags=flags) else: inner = json.loads(js_to_json(f'{inner}{expr[0]}', strict=True)) if not outer: return inner, should_return expr = self._named_object(local_vars, inner) + outer if expr.startswith('new '): obj = expr[4:] if obj.startswith('Date('): left, right = self._separate_at_paren(obj[4:]) date = unified_timestamp( self.interpret_expression(left, local_vars, allow_recursion), False) if date is None: raise self.Exception(f'Failed to parse date {left!r}', expr) expr = self._dump(int(date * 1000), local_vars) + right else: raise self.Exception(f'Unsupported object {obj}', expr) if expr.startswith('void '): left = self.interpret_expression(expr[5:], local_vars, allow_recursion) return None, should_return if expr.startswith('{'): inner, outer = self._separate_at_paren(expr) # try for object expression (Map) sub_expressions = [list(self._separate(sub_expr.strip(), ':', 1)) for sub_expr in self._separate(inner)] if all(len(sub_expr) == 2 for sub_expr in sub_expressions): def dict_item(key, val): val = self.interpret_expression(val, local_vars, allow_recursion) if re.match(_NAME_RE, key): return key, val return self.interpret_expression(key, local_vars, allow_recursion), val return dict(dict_item(k, v) for k, v in sub_expressions), should_return inner, should_abort = self.interpret_statement(inner, local_vars, allow_recursion) if not outer or should_abort: return inner, should_abort or should_return else: expr = self._dump(inner, local_vars) + outer if expr.startswith('('): inner, outer = self._separate_at_paren(expr) inner, should_abort = self.interpret_statement(inner, local_vars, allow_recursion) if not outer or should_abort: return inner, should_abort or should_return else: expr = self._dump(inner, local_vars) + outer if expr.startswith('['): inner, outer = self._separate_at_paren(expr) name = self._named_object(local_vars, [ self.interpret_expression(item, local_vars, allow_recursion) for item in self._separate(inner)]) expr = name + outer m = re.match(r'''(?x) (?P<try>try)\s*\{| (?P<if>if)\s*\(| (?P<switch>switch)\s*\(| (?P<for>for)\s*\( ''', expr) md = m.groupdict() if m else {} if md.get('if'): cndn, expr = self._separate_at_paren(expr[m.end() - 1:]) if_expr, expr = self._separate_at_paren(expr.lstrip()) # TODO: "else if" is not handled else_expr = None m = re.match(r'else\s*{', expr) if m: else_expr, expr = self._separate_at_paren(expr[m.end() - 1:]) cndn = _js_ternary(self.interpret_expression(cndn, local_vars, allow_recursion)) ret, should_abort = self.interpret_statement( if_expr if cndn else else_expr, local_vars, allow_recursion) if should_abort: return ret, True if md.get('try'): try_expr, expr = self._separate_at_paren(expr[m.end() - 1:]) err = None try: ret, should_abort = self.interpret_statement(try_expr, local_vars, allow_recursion) if should_abort: return ret, True except Exception as e: # XXX: This works for now, but makes debugging future issues very hard err = e pending = (None, False) m = re.match(fr'catch\s*(?P<err>\(\s*{_NAME_RE}\s*\))?\{{', expr) if m: sub_expr, expr = self._separate_at_paren(expr[m.end() - 1:]) if err: catch_vars = {} if m.group('err'): catch_vars[m.group('err')] = err.error if isinstance(err, JS_Throw) else err catch_vars = local_vars.new_child(catch_vars) err, pending = None, self.interpret_statement(sub_expr, catch_vars, allow_recursion) m = re.match(r'finally\s*\{', expr) if m: sub_expr, expr = self._separate_at_paren(expr[m.end() - 1:]) ret, should_abort = self.interpret_statement(sub_expr, local_vars, allow_recursion) if should_abort: return ret, True ret, should_abort = pending if should_abort: return ret, True if err: raise err elif md.get('for'): constructor, remaining = self._separate_at_paren(expr[m.end() - 1:]) if remaining.startswith('{'): body, expr = self._separate_at_paren(remaining) else: switch_m = re.match(r'switch\s*\(', remaining) # FIXME: ? if switch_m: switch_val, remaining = self._separate_at_paren(remaining[switch_m.end() - 1:]) body, expr = self._separate_at_paren(remaining, '}') body = 'switch(%s){%s}' % (switch_val, body) else: body, expr = remaining, '' start, cndn, increment = self._separate(constructor, ';') self.interpret_expression(start, local_vars, allow_recursion) while True: if not _js_ternary(self.interpret_expression(cndn, local_vars, allow_recursion)): break try: ret, should_abort = self.interpret_statement(body, local_vars, allow_recursion) if should_abort: return ret, True except JS_Break: break except JS_Continue: pass self.interpret_expression(increment, local_vars, allow_recursion) elif md.get('switch'): switch_val, remaining = self._separate_at_paren(expr[m.end() - 1:]) switch_val = self.interpret_expression(switch_val, local_vars, allow_recursion) body, expr = self._separate_at_paren(remaining, '}') items = body.replace('default:', 'case default:').split('case ')[1:] for default in (False, True): matched = False for item in items: case, stmt = (i.strip() for i in self._separate(item, ':', 1)) if default: matched = matched or case == 'default' elif not matched: matched = (case != 'default' and switch_val == self.interpret_expression(case, local_vars, allow_recursion)) if not matched: continue try: ret, should_abort = self.interpret_statement(stmt, local_vars, allow_recursion) if should_abort: return ret except JS_Break: break if matched: break if md: ret, should_abort = self.interpret_statement(expr, local_vars, allow_recursion) return ret, should_abort or should_return # Comma separated statements sub_expressions = list(self._separate(expr)) if len(sub_expressions) > 1: for sub_expr in sub_expressions: ret, should_abort = self.interpret_statement( sub_expr, local_vars, allow_recursion, _is_var_declaration=_is_var_declaration) if should_abort: return ret, True return ret, False m = re.match(fr'''(?x) (?P<out>{_NAME_RE})(?:\[(?P<index>{_NESTED_BRACKETS})\])?\s* (?P<op>{"|".join(map(re.escape, set(_OPERATORS) - _COMP_OPERATORS))})? =(?!=)(?P<expr>.*)$ ''', expr) if m: # We are assigning a value to a variable left_val = local_vars.get(m.group('out')) if not m.group('index'): eval_result = self._operator( m.group('op'), left_val, m.group('expr'), expr, local_vars, allow_recursion) if _is_var_declaration: local_vars.set_local(m.group('out'), eval_result) else: local_vars[m.group('out')] = eval_result return local_vars[m.group('out')], should_return elif left_val in (None, JS_Undefined): raise self.Exception(f'Cannot index undefined variable {m.group("out")}', expr) idx = self.interpret_expression(m.group('index'), local_vars, allow_recursion) if not isinstance(idx, (int, float)): raise self.Exception(f'List index {idx} must be integer', expr) idx = int(idx) left_val[idx] = self._operator( m.group('op'), self._index(left_val, idx), m.group('expr'), expr, local_vars, allow_recursion) return left_val[idx], should_return for m in re.finditer(rf'''(?x) (?P<pre_sign>\+\+|--)(?P<var1>{_NAME_RE})| (?P<var2>{_NAME_RE})(?P<post_sign>\+\+|--)''', expr): var = m.group('var1') or m.group('var2') start, end = m.span() sign = m.group('pre_sign') or m.group('post_sign') ret = local_vars[var] local_vars[var] += 1 if sign[0] == '+' else -1 if m.group('pre_sign'): ret = local_vars[var] expr = expr[:start] + self._dump(ret, local_vars) + expr[end:] if not expr: return None, should_return m = re.match(fr'''(?x) (?P<return> (?!if|return|true|false|null|undefined|NaN)(?P<name>{_NAME_RE})$ )|(?P<attribute> (?P<var>{_NAME_RE})(?: (?P<nullish>\?)?\.(?P<member>[^(]+)| \[(?P<member2>{_NESTED_BRACKETS})\] )\s* )|(?P<indexing> (?P<in>{_NAME_RE})\[(?P<idx>.+)\]$ )|(?P<function> (?P<fname>{_NAME_RE})\((?P<args>.*)\)$ )''', expr) if expr.isdigit(): return int(expr), should_return elif expr == 'break': raise JS_Break elif expr == 'continue': raise JS_Continue elif expr == 'undefined': return JS_Undefined, should_return elif expr == 'NaN': return float('NaN'), should_return elif m and m.group('return'): var = m.group('name') # Declared variables if _is_var_declaration: ret = local_vars.get_local(var) # Register varname in local namespace # Set value as JS_Undefined or its pre-existing value local_vars.set_local(var, ret) else: ret = local_vars.get(var, NO_DEFAULT) if ret is NO_DEFAULT: ret = JS_Undefined self._undefined_varnames.add(var) return ret, should_return with contextlib.suppress(ValueError): return json.loads(js_to_json(expr, strict=True)), should_return if m and m.group('indexing'): val = local_vars[m.group('in')] idx = self.interpret_expression(m.group('idx'), local_vars, allow_recursion) return self._index(val, idx), should_return for op in _OPERATORS: separated = list(self._separate(expr, op)) right_expr = separated.pop() while True: if op in '?<>*-' and len(separated) > 1 and not separated[-1].strip(): separated.pop() elif not (separated and op == '?' and right_expr.startswith('.')): break right_expr = f'{op}{right_expr}' if op != '-': right_expr = f'{separated.pop()}{op}{right_expr}' if not separated: continue left_val = self.interpret_expression(op.join(separated), local_vars, allow_recursion) return self._operator(op, left_val, right_expr, expr, local_vars, allow_recursion), should_return if m and m.group('attribute'): variable, member, nullish = m.group('var', 'member', 'nullish') if not member: member = self.interpret_expression(m.group('member2'), local_vars, allow_recursion) arg_str = expr[m.end():] if arg_str.startswith('('): arg_str, remaining = self._separate_at_paren(arg_str) else: arg_str, remaining = None, arg_str def assertion(cndn, msg): """ assert, but without risk of getting optimized out """ if not cndn: raise self.Exception(f'{member} {msg}', expr) def eval_method(): nonlocal member if (variable, member) == ('console', 'debug'): if Debugger.ENABLED: Debugger.write(self.interpret_expression(f'[{arg_str}]', local_vars, allow_recursion)) return types = { 'String': str, 'Math': float, 'Array': list, } obj = local_vars.get(variable, types.get(variable, NO_DEFAULT)) if obj is NO_DEFAULT: if variable not in self._objects: try: self._objects[variable] = self.extract_object(variable, local_vars) except self.Exception: if not nullish: raise obj = self._objects.get(variable, JS_Undefined) if nullish and obj is JS_Undefined: return JS_Undefined # Member access if arg_str is None: return self._index(obj, member, nullish) # Function call argvals = [ self.interpret_expression(v, local_vars, allow_recursion) for v in self._separate(arg_str)] # Fixup prototype call if isinstance(obj, type) and member.startswith('prototype.'): new_member, _, func_prototype = member.partition('.')[2].partition('.') assertion(argvals, 'takes one or more arguments') assertion(isinstance(argvals[0], obj), f'needs binding to type {obj}') if func_prototype == 'call': obj, *argvals = argvals elif func_prototype == 'apply': assertion(len(argvals) == 2, 'takes two arguments') obj, argvals = argvals assertion(isinstance(argvals, list), 'second argument needs to be a list') else: raise self.Exception(f'Unsupported Function method {func_prototype}', expr) member = new_member if obj is str: if member == 'fromCharCode': assertion(argvals, 'takes one or more arguments') return ''.join(map(chr, argvals)) raise self.Exception(f'Unsupported String method {member}', expr) elif obj is float: if member == 'pow': assertion(len(argvals) == 2, 'takes two arguments') return argvals[0] ** argvals[1] raise self.Exception(f'Unsupported Math method {member}', expr) if member == 'split': assertion(argvals, 'takes one or more arguments') assertion(len(argvals) == 1, 'with limit argument is not implemented') return obj.split(argvals[0]) if argvals[0] else list(obj) elif member == 'join': assertion(isinstance(obj, list), 'must be applied on a list') assertion(len(argvals) == 1, 'takes exactly one argument') return argvals[0].join(obj) elif member == 'reverse': assertion(not argvals, 'does not take any arguments') obj.reverse() return obj elif member == 'slice': assertion(isinstance(obj, (list, str)), 'must be applied on a list or string') assertion(len(argvals) <= 2, 'takes between 0 and 2 arguments') return obj[slice(*argvals, None)] elif member == 'splice': assertion(isinstance(obj, list), 'must be applied on a list') assertion(argvals, 'takes one or more arguments') index, how_many = map(int, ([*argvals, len(obj)])[:2]) if index < 0: index += len(obj) add_items = argvals[2:] res = [] for _ in range(index, min(index + how_many, len(obj))): res.append(obj.pop(index)) for i, item in enumerate(add_items): obj.insert(index + i, item) return res elif member == 'unshift': assertion(isinstance(obj, list), 'must be applied on a list') assertion(argvals, 'takes one or more arguments') for item in reversed(argvals): obj.insert(0, item) return obj elif member == 'pop': assertion(isinstance(obj, list), 'must be applied on a list') assertion(not argvals, 'does not take any arguments') if not obj: return return obj.pop() elif member == 'push': assertion(argvals, 'takes one or more arguments') obj.extend(argvals) return obj
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
true
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/__init__.py
yt_dlp/__init__.py
import sys if sys.version_info < (3, 10): raise ImportError( f'You are using an unsupported version of Python. Only Python versions 3.10 and above are supported by yt-dlp') # noqa: F541 __license__ = 'The Unlicense' import collections import getpass import itertools import optparse import os import re import traceback from .cookies import SUPPORTED_BROWSERS, SUPPORTED_KEYRINGS, CookieLoadError from .downloader.external import get_external_downloader from .extractor import list_extractor_classes from .extractor.adobepass import MSO_INFO from .networking.impersonate import ImpersonateTarget from .globals import IN_CLI, plugin_dirs from .options import parseOpts from .plugins import load_all_plugins as _load_all_plugins from .postprocessor import ( FFmpegExtractAudioPP, FFmpegMergerPP, FFmpegPostProcessor, FFmpegSubtitlesConvertorPP, FFmpegThumbnailsConvertorPP, FFmpegVideoConvertorPP, FFmpegVideoRemuxerPP, MetadataFromFieldPP, MetadataParserPP, ) from .update import Updater from .utils import ( NO_DEFAULT, POSTPROCESS_WHEN, DateRange, DownloadCancelled, DownloadError, FormatSorter, GeoUtils, PlaylistEntries, SameFileError, download_range_func, expand_path, float_or_none, format_field, int_or_none, join_nonempty, match_filter_func, parse_bytes, parse_duration, preferredencoding, read_batch_urls, read_stdin, render_table, setproctitle, shell_quote, variadic, write_string, ) from .utils._utils import _UnsafeExtensionError from .utils._jsruntime import ( BunJsRuntime as _BunJsRuntime, DenoJsRuntime as _DenoJsRuntime, NodeJsRuntime as _NodeJsRuntime, QuickJsRuntime as _QuickJsRuntime, ) from .YoutubeDL import YoutubeDL def _exit(status=0, *args): for msg in args: sys.stderr.write(msg) raise SystemExit(status) def get_urls(urls, batchfile, verbose): """ @param verbose -1: quiet, 0: normal, 1: verbose """ batch_urls = [] if batchfile is not None: try: batch_urls = read_batch_urls( read_stdin(None if verbose == -1 else 'URLs') if batchfile == '-' else open(expand_path(batchfile), encoding='utf-8', errors='ignore')) if verbose == 1: write_string('[debug] Batch file urls: ' + repr(batch_urls) + '\n') except OSError: _exit(f'ERROR: batch file {batchfile} could not be read') _enc = preferredencoding() return [ url.strip().decode(_enc, 'ignore') if isinstance(url, bytes) else url.strip() for url in batch_urls + urls] def print_extractor_information(opts, urls): out = '' if opts.list_extractors: # Importing GenericIE is currently slow since it imports YoutubeIE from .extractor.generic import GenericIE urls = dict.fromkeys(urls, False) for ie in list_extractor_classes(opts.age_limit): out += ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie.working() else '') + '\n' if ie == GenericIE: matched_urls = [url for url, matched in urls.items() if not matched] else: matched_urls = tuple(filter(ie.suitable, urls.keys())) urls.update(dict.fromkeys(matched_urls, True)) out += ''.join(f' {url}\n' for url in matched_urls) elif opts.list_extractor_descriptions: _SEARCHES = ('cute kittens', 'slithering pythons', 'falling cat', 'angry poodle', 'purple fish', 'running tortoise', 'sleeping bunny', 'burping cow') out = '\n'.join( ie.description(markdown=False, search_examples=_SEARCHES) for ie in list_extractor_classes(opts.age_limit) if ie.working() and ie.IE_DESC is not False) elif opts.ap_list_mso: out = 'Supported TV Providers:\n{}\n'.format(render_table( ['mso', 'mso name'], [[mso_id, mso_info['name']] for mso_id, mso_info in MSO_INFO.items()])) else: return False write_string(out, out=sys.stdout) return True def set_compat_opts(opts): def _unused_compat_opt(name): if name not in opts.compat_opts: return False opts.compat_opts.discard(name) opts.compat_opts.update([f'*{name}']) return True def set_default_compat(compat_name, opt_name, default=True, remove_compat=True): attr = getattr(opts, opt_name) if compat_name in opts.compat_opts: if attr is None: setattr(opts, opt_name, not default) return True else: if remove_compat: _unused_compat_opt(compat_name) return False elif attr is None: setattr(opts, opt_name, default) return None set_default_compat('abort-on-error', 'ignoreerrors', 'only_download') set_default_compat('no-playlist-metafiles', 'allow_playlist_files') set_default_compat('no-clean-infojson', 'clean_infojson') if 'no-attach-info-json' in opts.compat_opts: if opts.embed_infojson: _unused_compat_opt('no-attach-info-json') else: opts.embed_infojson = False if 'format-sort' in opts.compat_opts: opts.format_sort.extend(FormatSorter.ytdl_default) elif 'prefer-vp9-sort' in opts.compat_opts: FormatSorter.default = FormatSorter._prefer_vp9_sort if 'mtime-by-default' in opts.compat_opts: if opts.updatetime is None: opts.updatetime = True else: _unused_compat_opt('mtime-by-default') _video_multistreams_set = set_default_compat('multistreams', 'allow_multiple_video_streams', False, remove_compat=False) _audio_multistreams_set = set_default_compat('multistreams', 'allow_multiple_audio_streams', False, remove_compat=False) if _video_multistreams_set is False and _audio_multistreams_set is False: _unused_compat_opt('multistreams') if 'filename' in opts.compat_opts: if opts.outtmpl.get('default') is None: opts.outtmpl.update({'default': '%(title)s-%(id)s.%(ext)s'}) else: _unused_compat_opt('filename') def validate_options(opts): def validate(cndn, name, value=None, msg=None): if cndn: return True raise ValueError((msg or 'invalid {name} "{value}" given').format(name=name, value=value)) def validate_in(name, value, items, msg=None): return validate(value is None or value in items, name, value, msg) def validate_regex(name, value, regex): return validate(value is None or re.match(regex, value), name, value) def validate_positive(name, value, strict=False): return validate(value is None or value > 0 or (not strict and value == 0), name, value, '{name} "{value}" must be positive' + ('' if strict else ' or 0')) def validate_minmax(min_val, max_val, min_name, max_name=None): if max_val is None or min_val is None or max_val >= min_val: return if not max_name: min_name, max_name = f'min {min_name}', f'max {min_name}' raise ValueError(f'{max_name} "{max_val}" must be must be greater than or equal to {min_name} "{min_val}"') # Usernames and passwords validate(sum(map(bool, (opts.usenetrc, opts.netrc_cmd, opts.username))) <= 1, '.netrc', msg='{name}, netrc command and username/password are mutually exclusive options') validate(opts.password is None or opts.username is not None, 'account username', msg='{name} missing') validate(opts.ap_password is None or opts.ap_username is not None, 'TV Provider account username', msg='{name} missing') validate_in('TV Provider', opts.ap_mso, MSO_INFO, 'Unsupported {name} "{value}", use --ap-list-mso to get a list of supported TV Providers') # Numbers validate_positive('autonumber start', opts.autonumber_start) validate_positive('autonumber size', opts.autonumber_size, True) validate_positive('concurrent fragments', opts.concurrent_fragment_downloads, True) validate_positive('playlist start', opts.playliststart, True) if opts.playlistend != -1: validate_minmax(opts.playliststart, opts.playlistend, 'playlist start', 'playlist end') # Time ranges validate_positive('subtitles sleep interval', opts.sleep_interval_subtitles) validate_positive('requests sleep interval', opts.sleep_interval_requests) validate_positive('sleep interval', opts.sleep_interval) validate_positive('max sleep interval', opts.max_sleep_interval) if opts.sleep_interval is None: validate( opts.max_sleep_interval is None, 'min sleep interval', msg='{name} must be specified; use --min-sleep-interval') elif opts.max_sleep_interval is None: opts.max_sleep_interval = opts.sleep_interval else: validate_minmax(opts.sleep_interval, opts.max_sleep_interval, 'sleep interval') if opts.wait_for_video is not None: min_wait, max_wait, *_ = map(parse_duration, [*opts.wait_for_video.split('-', 1), None]) validate(min_wait is not None and not (max_wait is None and '-' in opts.wait_for_video), 'time range to wait for video', opts.wait_for_video) validate_minmax(min_wait, max_wait, 'time range to wait for video') opts.wait_for_video = (min_wait, max_wait) # Format sort for f in opts.format_sort: validate_regex('format sorting', f, FormatSorter.regex) # Postprocessor formats if opts.convertsubtitles == 'none': opts.convertsubtitles = None if opts.convertthumbnails == 'none': opts.convertthumbnails = None validate_regex('merge output format', opts.merge_output_format, r'({0})(/({0}))*'.format('|'.join(map(re.escape, FFmpegMergerPP.SUPPORTED_EXTS)))) validate_regex('audio format', opts.audioformat, FFmpegExtractAudioPP.FORMAT_RE) validate_in('subtitle format', opts.convertsubtitles, FFmpegSubtitlesConvertorPP.SUPPORTED_EXTS) validate_regex('thumbnail format', opts.convertthumbnails, FFmpegThumbnailsConvertorPP.FORMAT_RE) validate_regex('recode video format', opts.recodevideo, FFmpegVideoConvertorPP.FORMAT_RE) validate_regex('remux video format', opts.remuxvideo, FFmpegVideoRemuxerPP.FORMAT_RE) if opts.audioquality: opts.audioquality = opts.audioquality.strip('k').strip('K') # int_or_none prevents inf, nan validate_positive('audio quality', int_or_none(float_or_none(opts.audioquality), default=0)) # Retries def parse_retries(name, value): if value is None: return None elif value in ('inf', 'infinite'): return float('inf') try: int_value = int(value) except (TypeError, ValueError): validate(False, f'{name} retry count', value) validate_positive(f'{name} retry count', int_value) return int_value opts.retries = parse_retries('download', opts.retries) opts.fragment_retries = parse_retries('fragment', opts.fragment_retries) opts.extractor_retries = parse_retries('extractor', opts.extractor_retries) opts.file_access_retries = parse_retries('file access', opts.file_access_retries) # Retry sleep function def parse_sleep_func(expr): NUMBER_RE = r'\d+(?:\.\d+)?' op, start, limit, step, *_ = (*tuple(re.fullmatch( rf'(?:(linear|exp)=)?({NUMBER_RE})(?::({NUMBER_RE})?)?(?::({NUMBER_RE}))?', expr.strip()).groups()), None, None) if op == 'exp': return lambda n: min(float(start) * (float(step or 2) ** n), float(limit or 'inf')) else: default_step = start if op or limit else 0 return lambda n: min(float(start) + float(step or default_step) * n, float(limit or 'inf')) for key, expr in opts.retry_sleep.items(): if not expr: del opts.retry_sleep[key] continue try: opts.retry_sleep[key] = parse_sleep_func(expr) except AttributeError: raise ValueError(f'invalid {key} retry sleep expression {expr!r}') # Bytes def validate_bytes(name, value, strict_positive=False): if value is None: return None numeric_limit = parse_bytes(value) validate(numeric_limit is not None, name, value) if strict_positive: validate_positive(name, numeric_limit, True) return numeric_limit opts.ratelimit = validate_bytes('rate limit', opts.ratelimit, True) opts.throttledratelimit = validate_bytes('throttled rate limit', opts.throttledratelimit) opts.min_filesize = validate_bytes('min filesize', opts.min_filesize) opts.max_filesize = validate_bytes('max filesize', opts.max_filesize) opts.buffersize = validate_bytes('buffer size', opts.buffersize, True) opts.http_chunk_size = validate_bytes('http chunk size', opts.http_chunk_size) # Output templates def validate_outtmpl(tmpl, msg): err = YoutubeDL.validate_outtmpl(tmpl) if err: raise ValueError(f'invalid {msg} "{tmpl}": {err}') for k, tmpl in opts.outtmpl.items(): validate_outtmpl(tmpl, f'{k} output template') for type_, tmpl_list in opts.forceprint.items(): for tmpl in tmpl_list: validate_outtmpl(tmpl, f'{type_} print template') for type_, tmpl_list in opts.print_to_file.items(): for tmpl, file in tmpl_list: validate_outtmpl(tmpl, f'{type_} print to file template') validate_outtmpl(file, f'{type_} print to file filename') validate_outtmpl(opts.sponsorblock_chapter_title, 'SponsorBlock chapter title') for k, tmpl in opts.progress_template.items(): k = f'{k[:-6]} console title' if '-title' in k else f'{k} progress' validate_outtmpl(tmpl, f'{k} template') outtmpl_default = opts.outtmpl.get('default') if outtmpl_default == '': opts.skip_download = None del opts.outtmpl['default'] def parse_chapters(name, value, advanced=False): parse_timestamp = lambda x: float('inf') if x in ('inf', 'infinite') else parse_duration(x) TIMESTAMP_RE = r'''(?x)(?: (?P<start_sign>-?)(?P<start>[^-]+) )?\s*-\s*(?: (?P<end_sign>-?)(?P<end>[^-]+) )?''' chapters, ranges, from_url = [], [], False for regex in value or []: if advanced and regex == '*from-url': from_url = True continue elif not regex.startswith('*'): try: chapters.append(re.compile(regex)) except re.error as err: raise ValueError(f'invalid {name} regex "{regex}" - {err}') continue for range_ in map(str.strip, regex[1:].split(',')): mobj = range_ != '-' and re.fullmatch(TIMESTAMP_RE, range_) dur = mobj and [parse_timestamp(mobj.group('start') or '0'), parse_timestamp(mobj.group('end') or 'inf')] signs = mobj and (mobj.group('start_sign'), mobj.group('end_sign')) err = None if None in (dur or [None]): err = 'Must be of the form "*start-end"' elif not advanced and any(signs): err = 'Negative timestamps are not allowed' else: dur[0] *= -1 if signs[0] else 1 dur[1] *= -1 if signs[1] else 1 if dur[1] == float('-inf'): err = '"-inf" is not a valid end' if err: raise ValueError(f'invalid {name} time range "{regex}". {err}') ranges.append(dur) return chapters, ranges, from_url opts.remove_chapters, opts.remove_ranges, _ = parse_chapters('--remove-chapters', opts.remove_chapters) opts.download_ranges = download_range_func(*parse_chapters('--download-sections', opts.download_ranges, True)) # Cookies from browser if opts.cookiesfrombrowser: container = None mobj = re.fullmatch(r'''(?x) (?P<name>[^+:]+) (?:\s*\+\s*(?P<keyring>[^:]+))? (?:\s*:\s*(?!:)(?P<profile>.+?))? (?:\s*::\s*(?P<container>.+))? ''', opts.cookiesfrombrowser) if mobj is None: raise ValueError(f'invalid cookies from browser arguments: {opts.cookiesfrombrowser}') browser_name, keyring, profile, container = mobj.group('name', 'keyring', 'profile', 'container') browser_name = browser_name.lower() if browser_name not in SUPPORTED_BROWSERS: raise ValueError(f'unsupported browser specified for cookies: "{browser_name}". ' f'Supported browsers are: {", ".join(sorted(SUPPORTED_BROWSERS))}') if keyring is not None: keyring = keyring.upper() if keyring not in SUPPORTED_KEYRINGS: raise ValueError(f'unsupported keyring specified for cookies: "{keyring}". ' f'Supported keyrings are: {", ".join(sorted(SUPPORTED_KEYRINGS))}') opts.cookiesfrombrowser = (browser_name, profile, keyring, container) if opts.impersonate is not None: opts.impersonate = ImpersonateTarget.from_str(opts.impersonate.lower()) # MetadataParser def metadataparser_actions(f): if isinstance(f, str): cmd = f'--parse-metadata {shell_quote(f)}' try: actions = [MetadataFromFieldPP.to_action(f)] except Exception as err: raise ValueError(f'{cmd} is invalid; {err}') else: cmd = f'--replace-in-metadata {shell_quote(f)}' actions = ((MetadataParserPP.Actions.REPLACE, x, *f[1:]) for x in f[0].split(',')) for action in actions: try: MetadataParserPP.validate_action(*action) except Exception as err: raise ValueError(f'{cmd} is invalid; {err}') yield action if opts.metafromtitle is not None: opts.parse_metadata.setdefault('pre_process', []).append(f'title:{opts.metafromtitle}') opts.parse_metadata = { k: list(itertools.chain(*map(metadataparser_actions, v))) for k, v in opts.parse_metadata.items() } # Other options opts.plugin_dirs = opts.plugin_dirs if opts.plugin_dirs is None: opts.plugin_dirs = ['default'] if opts.playlist_items is not None: try: tuple(PlaylistEntries.parse_playlist_items(opts.playlist_items)) except Exception as err: raise ValueError(f'Invalid playlist-items {opts.playlist_items!r}: {err}') opts.geo_bypass_country, opts.geo_bypass_ip_block = None, None if opts.geo_bypass.lower() not in ('default', 'never'): try: GeoUtils.random_ipv4(opts.geo_bypass) except Exception: raise ValueError(f'Unsupported --xff "{opts.geo_bypass}"') if len(opts.geo_bypass) == 2: opts.geo_bypass_country = opts.geo_bypass else: opts.geo_bypass_ip_block = opts.geo_bypass opts.geo_bypass = opts.geo_bypass.lower() != 'never' opts.match_filter = match_filter_func(opts.match_filter, opts.breaking_match_filter) if opts.download_archive is not None: opts.download_archive = expand_path(opts.download_archive) if opts.ffmpeg_location is not None: opts.ffmpeg_location = expand_path(opts.ffmpeg_location) if opts.user_agent is not None: opts.headers.setdefault('User-Agent', opts.user_agent) if opts.referer is not None: opts.headers.setdefault('Referer', opts.referer) if opts.no_sponsorblock: opts.sponsorblock_mark = opts.sponsorblock_remove = set() default_downloader = None for proto, path in opts.external_downloader.items(): if path == 'native': continue ed = get_external_downloader(path) if ed is None: raise ValueError( f'No such {format_field(proto, None, "%s ", ignore="default")}external downloader "{path}"') elif ed and proto == 'default': default_downloader = ed.get_basename() for policy in opts.color.values(): if policy not in ('always', 'auto', 'auto-tty', 'no_color', 'no_color-tty', 'never'): raise ValueError(f'"{policy}" is not a valid color policy') warnings, deprecation_warnings = [], [] # Common mistake: -f best if opts.format == 'best': warnings.append('.\n '.join(( '"-f best" selects the best pre-merged format which is often not the best option', 'To let yt-dlp download and merge the best available formats, simply do not pass any format selection', 'If you know what you are doing and want only the best pre-merged format, use "-f b" instead to suppress this warning'))) # Common mistake: -f mp4 if opts.format == 'mp4': warnings.append('.\n '.join(( '"-f mp4" selects the best pre-merged mp4 format which is often not what\'s intended', 'Pre-merged mp4 formats are not available from all sites, or may only be available in lower quality', 'To prioritize the best h264 video and aac audio in an mp4 container, use "-t mp4" instead', 'If you know what you are doing and want a pre-merged mp4 format, use "-f b[ext=mp4]" instead to suppress this warning'))) # --(postprocessor/downloader)-args without name def report_args_compat(name, value, key1, key2=None, where=None): if key1 in value and key2 not in value: warnings.append(f'{name.title()} arguments given without specifying name. ' f'The arguments will be given to {where or f"all {name}s"}') return True return False if report_args_compat('external downloader', opts.external_downloader_args, 'default', where=default_downloader) and default_downloader: # Compat with youtube-dl's behavior. See https://github.com/ytdl-org/youtube-dl/commit/49c5293014bc11ec8c009856cd63cffa6296c1e1 opts.external_downloader_args.setdefault(default_downloader, opts.external_downloader_args.pop('default')) if report_args_compat('post-processor', opts.postprocessor_args, 'default-compat', 'default'): opts.postprocessor_args['default'] = opts.postprocessor_args.pop('default-compat') def report_conflict(arg1, opt1, arg2='--allow-unplayable-formats', opt2='allow_unplayable_formats', val1=NO_DEFAULT, val2=NO_DEFAULT, default=False): if val2 is NO_DEFAULT: val2 = getattr(opts, opt2) if not val2: return if val1 is NO_DEFAULT: val1 = getattr(opts, opt1) if val1: warnings.append(f'{arg1} is ignored since {arg2} was given') setattr(opts, opt1, default) # Conflicting options report_conflict('--playlist-reverse', 'playlist_reverse', '--playlist-random', 'playlist_random') report_conflict('--playlist-reverse', 'playlist_reverse', '--lazy-playlist', 'lazy_playlist') report_conflict('--playlist-random', 'playlist_random', '--lazy-playlist', 'lazy_playlist') report_conflict('--dateafter', 'dateafter', '--date', 'date', default=None) report_conflict('--datebefore', 'datebefore', '--date', 'date', default=None) report_conflict('--exec-before-download', 'exec_before_dl_cmd', '"--exec before_dl:"', 'exec_cmd', val2=opts.exec_cmd.get('before_dl')) report_conflict('--id', 'useid', '--output', 'outtmpl', val2=opts.outtmpl.get('default')) report_conflict('--remux-video', 'remuxvideo', '--recode-video', 'recodevideo') # Conflicts with --allow-unplayable-formats report_conflict('--embed-metadata', 'addmetadata') report_conflict('--embed-chapters', 'addchapters') report_conflict('--embed-info-json', 'embed_infojson') report_conflict('--embed-subs', 'embedsubtitles') report_conflict('--embed-thumbnail', 'embedthumbnail') report_conflict('--extract-audio', 'extractaudio') report_conflict('--fixup', 'fixup', val1=opts.fixup not in (None, 'never', 'ignore'), default='never') report_conflict('--recode-video', 'recodevideo') report_conflict('--remove-chapters', 'remove_chapters', default=[]) report_conflict('--remux-video', 'remuxvideo') report_conflict('--sponsorblock-remove', 'sponsorblock_remove', default=set()) report_conflict('--xattrs', 'xattrs') if hasattr(opts, '_deprecated_options'): deprecation_warnings.append( f'The following options have been deprecated: {", ".join(opts._deprecated_options)}\n' 'Please remove them from your command/configuration to avoid future errors.\n' 'See https://github.com/yt-dlp/yt-dlp/issues/14198 for more details') del opts._deprecated_options # Dependent options opts.date = DateRange.day(opts.date) if opts.date else DateRange(opts.dateafter, opts.datebefore) if opts.exec_before_dl_cmd: opts.exec_cmd['before_dl'] = opts.exec_before_dl_cmd if opts.useid: # --id is not deprecated in youtube-dl opts.outtmpl['default'] = '%(id)s.%(ext)s' if opts.overwrites: # --force-overwrites implies --no-continue opts.continue_dl = False if (opts.addmetadata or opts.sponsorblock_mark) and opts.addchapters is None: # Add chapters when adding metadata or marking sponsors opts.addchapters = True if opts.extractaudio and not opts.keepvideo and opts.format is None: # Do not unnecessarily download audio opts.format = 'bestaudio/best' if opts.getcomments and opts.writeinfojson is None and not opts.embed_infojson: # If JSON is not printed anywhere, but comments are requested, save it to file if not opts.dumpjson or opts.print_json or opts.dump_single_json: opts.writeinfojson = True if opts.allsubtitles and not (opts.embedsubtitles or opts.writeautomaticsub): # --all-sub automatically sets --write-sub if --write-auto-sub is not given opts.writesubtitles = True if opts.addmetadata and opts.embed_infojson is None: # If embedding metadata and infojson is present, embed it opts.embed_infojson = 'if_exists' # Ask for passwords if opts.username is not None and opts.password is None: opts.password = getpass.getpass('Type account password and press [Return]: ') if opts.ap_username is not None and opts.ap_password is None: opts.ap_password = getpass.getpass('Type TV provider account password and press [Return]: ') # compat option changes global state destructively; only allow from cli if 'allow-unsafe-ext' in opts.compat_opts: warnings.append( 'Using allow-unsafe-ext opens you up to potential attacks. ' 'Use with great care!') _UnsafeExtensionError.sanitize_extension = lambda x, prepend=False: x return warnings, deprecation_warnings def get_postprocessors(opts): yield from opts.add_postprocessors for when, actions in opts.parse_metadata.items(): yield { 'key': 'MetadataParser', 'actions': actions, 'when': when, } sponsorblock_query = opts.sponsorblock_mark | opts.sponsorblock_remove if sponsorblock_query: yield { 'key': 'SponsorBlock', 'categories': sponsorblock_query, 'api': opts.sponsorblock_api, 'when': 'after_filter', } if opts.convertsubtitles: yield { 'key': 'FFmpegSubtitlesConvertor', 'format': opts.convertsubtitles, 'when': 'before_dl', } if opts.convertthumbnails: yield { 'key': 'FFmpegThumbnailsConvertor', 'format': opts.convertthumbnails, 'when': 'before_dl', } if opts.extractaudio: yield { 'key': 'FFmpegExtractAudio', 'preferredcodec': opts.audioformat, 'preferredquality': opts.audioquality, 'nopostoverwrites': opts.nopostoverwrites, } if opts.remuxvideo: yield { 'key': 'FFmpegVideoRemuxer', 'preferedformat': opts.remuxvideo, } if opts.recodevideo: yield { 'key': 'FFmpegVideoConvertor', 'preferedformat': opts.recodevideo, } # If ModifyChapters is going to remove chapters, subtitles must already be in the container. if opts.embedsubtitles: keep_subs = 'no-keep-subs' not in opts.compat_opts yield { 'key': 'FFmpegEmbedSubtitle', # already_have_subtitle = True prevents the file from being deleted after embedding 'already_have_subtitle': opts.writesubtitles and keep_subs, } if not opts.writeautomaticsub and keep_subs: opts.writesubtitles = True # ModifyChapters must run before FFmpegMetadataPP if opts.remove_chapters or sponsorblock_query: yield { 'key': 'ModifyChapters', 'remove_chapters_patterns': opts.remove_chapters, 'remove_sponsor_segments': opts.sponsorblock_remove, 'remove_ranges': opts.remove_ranges, 'sponsorblock_chapter_title': opts.sponsorblock_chapter_title, 'force_keyframes': opts.force_keyframes_at_cuts, } # FFmpegMetadataPP should be run after FFmpegVideoConvertorPP and # FFmpegExtractAudioPP as containers before conversion may not support # metadata (3gp, webm, etc.) # By default ffmpeg preserves metadata applicable for both # source and target containers. From this point the container won't change, # so metadata can be added here. if opts.addmetadata or opts.addchapters or opts.embed_infojson: yield { 'key': 'FFmpegMetadata', 'add_chapters': opts.addchapters, 'add_metadata': opts.addmetadata, 'add_infojson': opts.embed_infojson, } if opts.embedthumbnail: yield { 'key': 'EmbedThumbnail', # already_have_thumbnail = True prevents the file from being deleted after embedding 'already_have_thumbnail': opts.writethumbnail, } if not opts.writethumbnail: opts.writethumbnail = True opts.outtmpl['pl_thumbnail'] = '' if opts.split_chapters: yield { 'key': 'FFmpegSplitChapters', 'force_keyframes': opts.force_keyframes_at_cuts, } # XAttrMetadataPP should be run after post-processors that may change file contents if opts.xattrs: yield {'key': 'XAttrMetadata'} if opts.concat_playlist != 'never': yield { 'key': 'FFmpegConcat', 'only_multi_video': opts.concat_playlist != 'always', 'when': 'playlist', } # Exec must be the last PP of each category for when, exec_cmd in opts.exec_cmd.items(): yield { 'key': 'Exec', 'exec_cmd': exec_cmd, 'when': when, } ParsedOptions = collections.namedtuple('ParsedOptions', ('parser', 'options', 'urls', 'ydl_opts')) def parse_options(argv=None): """@returns ParsedOptions(parser, opts, urls, ydl_opts)""" parser, opts, urls = parseOpts(argv) urls = get_urls(urls, opts.batchfile, -1 if opts.quiet and not opts.verbose else opts.verbose) set_compat_opts(opts) try: warnings, deprecation_warnings = validate_options(opts) except ValueError as err: parser.error(f'{err}\n') postprocessors = list(get_postprocessors(opts)) print_only = bool(opts.forceprint) and all(k not in opts.forceprint for k in POSTPROCESS_WHEN[3:]) any_getting = any(getattr(opts, k) for k in ( 'dumpjson', 'dump_single_json', 'getdescription', 'getduration', 'getfilename', 'getformat', 'getid', 'getthumbnail', 'gettitle', 'geturl', )) if opts.quiet is None: opts.quiet = any_getting or opts.print_json or bool(opts.forceprint) playlist_pps = [pp for pp in postprocessors if pp.get('when') == 'playlist'] write_playlist_infojson = (opts.writeinfojson and not opts.clean_infojson and opts.allow_playlist_files and opts.outtmpl.get('pl_infojson') != '') if not any(( opts.extract_flat,
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
true
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/minicurses.py
yt_dlp/minicurses.py
import functools from threading import Lock from .utils import supports_terminal_sequences, write_string CONTROL_SEQUENCES = { 'DOWN': '\n', 'UP': '\033[A', 'ERASE_LINE': '\033[K', 'RESET': '\033[0m', } _COLORS = { 'BLACK': '0', 'RED': '1', 'GREEN': '2', 'YELLOW': '3', 'BLUE': '4', 'PURPLE': '5', 'CYAN': '6', 'WHITE': '7', } _TEXT_STYLES = { 'NORMAL': '0', 'BOLD': '1', 'UNDERLINED': '4', } def format_text(text, f): ''' @param f String representation of formatting to apply in the form: [style] [light] font_color [on [light] bg_color] E.g. "red", "bold green on light blue" ''' f = f.upper() tokens = f.strip().split() bg_color = '' if 'ON' in tokens: if tokens[-1] == 'ON': raise SyntaxError(f'Empty background format specified in {f!r}') if tokens[-1] not in _COLORS: raise SyntaxError(f'{tokens[-1]} in {f!r} must be a color') bg_color = f'4{_COLORS[tokens.pop()]}' if tokens[-1] == 'LIGHT': bg_color = f'0;10{bg_color[1:]}' tokens.pop() if tokens[-1] != 'ON': raise SyntaxError(f'Invalid format {f.split(" ON ", 1)[1]!r} in {f!r}') bg_color = f'\033[{bg_color}m' tokens.pop() if not tokens: fg_color = '' elif tokens[-1] not in _COLORS: raise SyntaxError(f'{tokens[-1]} in {f!r} must be a color') else: fg_color = f'3{_COLORS[tokens.pop()]}' if tokens and tokens[-1] == 'LIGHT': fg_color = f'9{fg_color[1:]}' tokens.pop() fg_style = tokens.pop() if tokens and tokens[-1] in _TEXT_STYLES else 'NORMAL' fg_color = f'\033[{_TEXT_STYLES[fg_style]};{fg_color}m' if tokens: raise SyntaxError(f'Invalid format {" ".join(tokens)!r} in {f!r}') if fg_color or bg_color: text = text.replace(CONTROL_SEQUENCES['RESET'], f'{fg_color}{bg_color}') return f'{fg_color}{bg_color}{text}{CONTROL_SEQUENCES["RESET"]}' else: return text class MultilinePrinterBase: def __init__(self, stream=None, lines=1): self.stream = stream self.maximum = lines - 1 self._HAVE_FULLCAP = supports_terminal_sequences(stream) def __enter__(self): return self def __exit__(self, *args): self.end() def print_at_line(self, text, pos): pass def end(self): pass def _add_line_number(self, text, line): if self.maximum: return f'{line + 1}: {text}' return text def write(self, *text): write_string(''.join(text), self.stream) class QuietMultilinePrinter(MultilinePrinterBase): pass class MultilineLogger(MultilinePrinterBase): def write(self, *text): self.stream.debug(''.join(text)) def print_at_line(self, text, pos): # stream is the logger object, not an actual stream self.write(self._add_line_number(text, pos)) class BreaklineStatusPrinter(MultilinePrinterBase): def print_at_line(self, text, pos): self.write(self._add_line_number(text, pos), '\n') class MultilinePrinter(MultilinePrinterBase): def __init__(self, stream=None, lines=1, preserve_output=True): super().__init__(stream, lines) self.preserve_output = preserve_output self._lastline = self._lastlength = 0 self._movelock = Lock() def lock(func): @functools.wraps(func) def wrapper(self, *args, **kwargs): with self._movelock: return func(self, *args, **kwargs) return wrapper def _move_cursor(self, dest): current = min(self._lastline, self.maximum) yield '\r' distance = dest - current if distance < 0: yield CONTROL_SEQUENCES['UP'] * -distance elif distance > 0: yield CONTROL_SEQUENCES['DOWN'] * distance self._lastline = dest @lock def print_at_line(self, text, pos): if self._HAVE_FULLCAP: self.write(*self._move_cursor(pos), CONTROL_SEQUENCES['ERASE_LINE'], text) return text = self._add_line_number(text, pos) textlen = len(text) if self._lastline == pos: # move cursor at the start of progress when writing to same line prefix = '\r' if self._lastlength > textlen: text += ' ' * (self._lastlength - textlen) self._lastlength = textlen else: # otherwise, break the line prefix = '\n' self._lastlength = textlen self.write(prefix, text) self._lastline = pos @lock def end(self): # move cursor to the end of the last line, and write line break # so that other to_screen calls can precede text = self._move_cursor(self.maximum) if self._HAVE_FULLCAP else [] if self.preserve_output: self.write(*text, '\n') return if self._HAVE_FULLCAP: self.write( *text, CONTROL_SEQUENCES['ERASE_LINE'], f'{CONTROL_SEQUENCES["UP"]}{CONTROL_SEQUENCES["ERASE_LINE"]}' * self.maximum) else: self.write('\r', ' ' * self._lastlength, '\r')
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/aes.py
yt_dlp/aes.py
import base64 from math import ceil from .compat import compat_ord from .dependencies import Cryptodome if Cryptodome.AES: def aes_cbc_decrypt_bytes(data, key, iv): """ Decrypt bytes with AES-CBC using pycryptodome """ return Cryptodome.AES.new(key, Cryptodome.AES.MODE_CBC, iv).decrypt(data) def aes_gcm_decrypt_and_verify_bytes(data, key, tag, nonce): """ Decrypt bytes with AES-GCM using pycryptodome """ return Cryptodome.AES.new(key, Cryptodome.AES.MODE_GCM, nonce).decrypt_and_verify(data, tag) else: def aes_cbc_decrypt_bytes(data, key, iv): """ Decrypt bytes with AES-CBC using native implementation since pycryptodome is unavailable """ return bytes(aes_cbc_decrypt(*map(list, (data, key, iv)))) def aes_gcm_decrypt_and_verify_bytes(data, key, tag, nonce): """ Decrypt bytes with AES-GCM using native implementation since pycryptodome is unavailable """ return bytes(aes_gcm_decrypt_and_verify(*map(list, (data, key, tag, nonce)))) def aes_cbc_encrypt_bytes(data, key, iv, **kwargs): return bytes(aes_cbc_encrypt(*map(list, (data, key, iv)), **kwargs)) BLOCK_SIZE_BYTES = 16 def unpad_pkcs7(data): return data[:-compat_ord(data[-1])] def pkcs7_padding(data): """ PKCS#7 padding @param {int[]} data cleartext @returns {int[]} padding data """ remaining_length = BLOCK_SIZE_BYTES - len(data) % BLOCK_SIZE_BYTES return data + [remaining_length] * remaining_length def pad_block(block, padding_mode): """ Pad a block with the given padding mode @param {int[]} block block to pad @param padding_mode padding mode """ padding_size = BLOCK_SIZE_BYTES - len(block) PADDING_BYTE = { 'pkcs7': padding_size, 'iso7816': 0x0, 'whitespace': 0x20, 'zero': 0x0, } if padding_size < 0: raise ValueError('Block size exceeded') elif padding_mode not in PADDING_BYTE: raise NotImplementedError(f'Padding mode {padding_mode} is not implemented') if padding_mode == 'iso7816' and padding_size: block = [*block, 0x80] # NB: += mutates list padding_size -= 1 return block + [PADDING_BYTE[padding_mode]] * padding_size def aes_ecb_encrypt(data, key, iv=None): """ Encrypt with aes in ECB mode. Using PKCS#7 padding @param {int[]} data cleartext @param {int[]} key 16/24/32-Byte cipher key @param {int[]} iv Unused for this mode @returns {int[]} encrypted data """ expanded_key = key_expansion(key) block_count = ceil(len(data) / BLOCK_SIZE_BYTES) encrypted_data = [] for i in range(block_count): block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES] encrypted_data += aes_encrypt(pkcs7_padding(block), expanded_key) return encrypted_data def aes_ecb_decrypt(data, key, iv=None): """ Decrypt with aes in ECB mode @param {int[]} data cleartext @param {int[]} key 16/24/32-Byte cipher key @param {int[]} iv Unused for this mode @returns {int[]} decrypted data """ expanded_key = key_expansion(key) block_count = ceil(len(data) / BLOCK_SIZE_BYTES) encrypted_data = [] for i in range(block_count): block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES] encrypted_data += aes_decrypt(block, expanded_key) return encrypted_data[:len(data)] def aes_ctr_decrypt(data, key, iv): """ Decrypt with aes in counter mode @param {int[]} data cipher @param {int[]} key 16/24/32-Byte cipher key @param {int[]} iv 16-Byte initialization vector @returns {int[]} decrypted data """ return aes_ctr_encrypt(data, key, iv) def aes_ctr_encrypt(data, key, iv): """ Encrypt with aes in counter mode @param {int[]} data cleartext @param {int[]} key 16/24/32-Byte cipher key @param {int[]} iv 16-Byte initialization vector @returns {int[]} encrypted data """ expanded_key = key_expansion(key) block_count = ceil(len(data) / BLOCK_SIZE_BYTES) counter = iter_vector(iv) encrypted_data = [] for i in range(block_count): counter_block = next(counter) block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES] block += [0] * (BLOCK_SIZE_BYTES - len(block)) cipher_counter_block = aes_encrypt(counter_block, expanded_key) encrypted_data += xor(block, cipher_counter_block) return encrypted_data[:len(data)] def aes_cbc_decrypt(data, key, iv): """ Decrypt with aes in CBC mode @param {int[]} data cipher @param {int[]} key 16/24/32-Byte cipher key @param {int[]} iv 16-Byte IV @returns {int[]} decrypted data """ expanded_key = key_expansion(key) block_count = ceil(len(data) / BLOCK_SIZE_BYTES) decrypted_data = [] previous_cipher_block = iv for i in range(block_count): block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES] block += [0] * (BLOCK_SIZE_BYTES - len(block)) decrypted_block = aes_decrypt(block, expanded_key) decrypted_data += xor(decrypted_block, previous_cipher_block) previous_cipher_block = block return decrypted_data[:len(data)] def aes_cbc_encrypt(data, key, iv, *, padding_mode='pkcs7'): """ Encrypt with aes in CBC mode @param {int[]} data cleartext @param {int[]} key 16/24/32-Byte cipher key @param {int[]} iv 16-Byte IV @param padding_mode Padding mode to use @returns {int[]} encrypted data """ expanded_key = key_expansion(key) block_count = ceil(len(data) / BLOCK_SIZE_BYTES) encrypted_data = [] previous_cipher_block = iv for i in range(block_count): block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES] block = pad_block(block, padding_mode) mixed_block = xor(block, previous_cipher_block) encrypted_block = aes_encrypt(mixed_block, expanded_key) encrypted_data += encrypted_block previous_cipher_block = encrypted_block return encrypted_data def aes_gcm_decrypt_and_verify(data, key, tag, nonce): """ Decrypt with aes in GBM mode and checks authenticity using tag @param {int[]} data cipher @param {int[]} key 16-Byte cipher key @param {int[]} tag authentication tag @param {int[]} nonce IV (recommended 12-Byte) @returns {int[]} decrypted data """ # XXX: check aes, gcm param hash_subkey = aes_encrypt([0] * BLOCK_SIZE_BYTES, key_expansion(key)) if len(nonce) == 12: j0 = [*nonce, 0, 0, 0, 1] else: fill = (BLOCK_SIZE_BYTES - (len(nonce) % BLOCK_SIZE_BYTES)) % BLOCK_SIZE_BYTES + 8 ghash_in = nonce + [0] * fill + list((8 * len(nonce)).to_bytes(8, 'big')) j0 = ghash(hash_subkey, ghash_in) # TODO: add nonce support to aes_ctr_decrypt # nonce_ctr = j0[:12] iv_ctr = inc(j0) decrypted_data = aes_ctr_decrypt(data, key, iv_ctr + [0] * (BLOCK_SIZE_BYTES - len(iv_ctr))) pad_len = (BLOCK_SIZE_BYTES - (len(data) % BLOCK_SIZE_BYTES)) % BLOCK_SIZE_BYTES s_tag = ghash( hash_subkey, data + [0] * pad_len # pad + list((0 * 8).to_bytes(8, 'big') # length of associated data + ((len(data) * 8).to_bytes(8, 'big'))), # length of data ) if tag != aes_ctr_encrypt(s_tag, key, j0): raise ValueError('Mismatching authentication tag') return decrypted_data def aes_encrypt(data, expanded_key): """ Encrypt one block with aes @param {int[]} data 16-Byte state @param {int[]} expanded_key 176/208/240-Byte expanded key @returns {int[]} 16-Byte cipher """ rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1 data = xor(data, expanded_key[:BLOCK_SIZE_BYTES]) for i in range(1, rounds + 1): data = sub_bytes(data) data = shift_rows(data) if i != rounds: data = list(iter_mix_columns(data, MIX_COLUMN_MATRIX)) data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]) return data def aes_decrypt(data, expanded_key): """ Decrypt one block with aes @param {int[]} data 16-Byte cipher @param {int[]} expanded_key 176/208/240-Byte expanded key @returns {int[]} 16-Byte state """ rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1 for i in range(rounds, 0, -1): data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]) if i != rounds: data = list(iter_mix_columns(data, MIX_COLUMN_MATRIX_INV)) data = shift_rows_inv(data) data = sub_bytes_inv(data) return xor(data, expanded_key[:BLOCK_SIZE_BYTES]) def aes_decrypt_text(data, password, key_size_bytes): """ Decrypt text - The first 8 Bytes of decoded 'data' are the 8 high Bytes of the counter - The cipher key is retrieved by encrypting the first 16 Byte of 'password' with the first 'key_size_bytes' Bytes from 'password' (if necessary filled with 0's) - Mode of operation is 'counter' @param {str} data Base64 encoded string @param {str,unicode} password Password (will be encoded with utf-8) @param {int} key_size_bytes Possible values: 16 for 128-Bit, 24 for 192-Bit or 32 for 256-Bit @returns {str} Decrypted data """ NONCE_LENGTH_BYTES = 8 data = list(base64.b64decode(data)) password = list(password.encode()) key = password[:key_size_bytes] + [0] * (key_size_bytes - len(password)) key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES) nonce = data[:NONCE_LENGTH_BYTES] cipher = data[NONCE_LENGTH_BYTES:] decrypted_data = aes_ctr_decrypt(cipher, key, nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)) return bytes(decrypted_data) RCON = (0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36) SBOX = (0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76, 0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0, 0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15, 0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75, 0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84, 0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF, 0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8, 0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2, 0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73, 0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB, 0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79, 0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08, 0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A, 0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E, 0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF, 0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16) SBOX_INV = (0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb, 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e, 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25, 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92, 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84, 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06, 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73, 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e, 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b, 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4, 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f, 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d) MIX_COLUMN_MATRIX = ((0x2, 0x3, 0x1, 0x1), (0x1, 0x2, 0x3, 0x1), (0x1, 0x1, 0x2, 0x3), (0x3, 0x1, 0x1, 0x2)) MIX_COLUMN_MATRIX_INV = ((0xE, 0xB, 0xD, 0x9), (0x9, 0xE, 0xB, 0xD), (0xD, 0x9, 0xE, 0xB), (0xB, 0xD, 0x9, 0xE)) RIJNDAEL_EXP_TABLE = (0x01, 0x03, 0x05, 0x0F, 0x11, 0x33, 0x55, 0xFF, 0x1A, 0x2E, 0x72, 0x96, 0xA1, 0xF8, 0x13, 0x35, 0x5F, 0xE1, 0x38, 0x48, 0xD8, 0x73, 0x95, 0xA4, 0xF7, 0x02, 0x06, 0x0A, 0x1E, 0x22, 0x66, 0xAA, 0xE5, 0x34, 0x5C, 0xE4, 0x37, 0x59, 0xEB, 0x26, 0x6A, 0xBE, 0xD9, 0x70, 0x90, 0xAB, 0xE6, 0x31, 0x53, 0xF5, 0x04, 0x0C, 0x14, 0x3C, 0x44, 0xCC, 0x4F, 0xD1, 0x68, 0xB8, 0xD3, 0x6E, 0xB2, 0xCD, 0x4C, 0xD4, 0x67, 0xA9, 0xE0, 0x3B, 0x4D, 0xD7, 0x62, 0xA6, 0xF1, 0x08, 0x18, 0x28, 0x78, 0x88, 0x83, 0x9E, 0xB9, 0xD0, 0x6B, 0xBD, 0xDC, 0x7F, 0x81, 0x98, 0xB3, 0xCE, 0x49, 0xDB, 0x76, 0x9A, 0xB5, 0xC4, 0x57, 0xF9, 0x10, 0x30, 0x50, 0xF0, 0x0B, 0x1D, 0x27, 0x69, 0xBB, 0xD6, 0x61, 0xA3, 0xFE, 0x19, 0x2B, 0x7D, 0x87, 0x92, 0xAD, 0xEC, 0x2F, 0x71, 0x93, 0xAE, 0xE9, 0x20, 0x60, 0xA0, 0xFB, 0x16, 0x3A, 0x4E, 0xD2, 0x6D, 0xB7, 0xC2, 0x5D, 0xE7, 0x32, 0x56, 0xFA, 0x15, 0x3F, 0x41, 0xC3, 0x5E, 0xE2, 0x3D, 0x47, 0xC9, 0x40, 0xC0, 0x5B, 0xED, 0x2C, 0x74, 0x9C, 0xBF, 0xDA, 0x75, 0x9F, 0xBA, 0xD5, 0x64, 0xAC, 0xEF, 0x2A, 0x7E, 0x82, 0x9D, 0xBC, 0xDF, 0x7A, 0x8E, 0x89, 0x80, 0x9B, 0xB6, 0xC1, 0x58, 0xE8, 0x23, 0x65, 0xAF, 0xEA, 0x25, 0x6F, 0xB1, 0xC8, 0x43, 0xC5, 0x54, 0xFC, 0x1F, 0x21, 0x63, 0xA5, 0xF4, 0x07, 0x09, 0x1B, 0x2D, 0x77, 0x99, 0xB0, 0xCB, 0x46, 0xCA, 0x45, 0xCF, 0x4A, 0xDE, 0x79, 0x8B, 0x86, 0x91, 0xA8, 0xE3, 0x3E, 0x42, 0xC6, 0x51, 0xF3, 0x0E, 0x12, 0x36, 0x5A, 0xEE, 0x29, 0x7B, 0x8D, 0x8C, 0x8F, 0x8A, 0x85, 0x94, 0xA7, 0xF2, 0x0D, 0x17, 0x39, 0x4B, 0xDD, 0x7C, 0x84, 0x97, 0xA2, 0xFD, 0x1C, 0x24, 0x6C, 0xB4, 0xC7, 0x52, 0xF6, 0x01) RIJNDAEL_LOG_TABLE = (0x00, 0x00, 0x19, 0x01, 0x32, 0x02, 0x1a, 0xc6, 0x4b, 0xc7, 0x1b, 0x68, 0x33, 0xee, 0xdf, 0x03, 0x64, 0x04, 0xe0, 0x0e, 0x34, 0x8d, 0x81, 0xef, 0x4c, 0x71, 0x08, 0xc8, 0xf8, 0x69, 0x1c, 0xc1, 0x7d, 0xc2, 0x1d, 0xb5, 0xf9, 0xb9, 0x27, 0x6a, 0x4d, 0xe4, 0xa6, 0x72, 0x9a, 0xc9, 0x09, 0x78, 0x65, 0x2f, 0x8a, 0x05, 0x21, 0x0f, 0xe1, 0x24, 0x12, 0xf0, 0x82, 0x45, 0x35, 0x93, 0xda, 0x8e, 0x96, 0x8f, 0xdb, 0xbd, 0x36, 0xd0, 0xce, 0x94, 0x13, 0x5c, 0xd2, 0xf1, 0x40, 0x46, 0x83, 0x38, 0x66, 0xdd, 0xfd, 0x30, 0xbf, 0x06, 0x8b, 0x62, 0xb3, 0x25, 0xe2, 0x98, 0x22, 0x88, 0x91, 0x10, 0x7e, 0x6e, 0x48, 0xc3, 0xa3, 0xb6, 0x1e, 0x42, 0x3a, 0x6b, 0x28, 0x54, 0xfa, 0x85, 0x3d, 0xba, 0x2b, 0x79, 0x0a, 0x15, 0x9b, 0x9f, 0x5e, 0xca, 0x4e, 0xd4, 0xac, 0xe5, 0xf3, 0x73, 0xa7, 0x57, 0xaf, 0x58, 0xa8, 0x50, 0xf4, 0xea, 0xd6, 0x74, 0x4f, 0xae, 0xe9, 0xd5, 0xe7, 0xe6, 0xad, 0xe8, 0x2c, 0xd7, 0x75, 0x7a, 0xeb, 0x16, 0x0b, 0xf5, 0x59, 0xcb, 0x5f, 0xb0, 0x9c, 0xa9, 0x51, 0xa0, 0x7f, 0x0c, 0xf6, 0x6f, 0x17, 0xc4, 0x49, 0xec, 0xd8, 0x43, 0x1f, 0x2d, 0xa4, 0x76, 0x7b, 0xb7, 0xcc, 0xbb, 0x3e, 0x5a, 0xfb, 0x60, 0xb1, 0x86, 0x3b, 0x52, 0xa1, 0x6c, 0xaa, 0x55, 0x29, 0x9d, 0x97, 0xb2, 0x87, 0x90, 0x61, 0xbe, 0xdc, 0xfc, 0xbc, 0x95, 0xcf, 0xcd, 0x37, 0x3f, 0x5b, 0xd1, 0x53, 0x39, 0x84, 0x3c, 0x41, 0xa2, 0x6d, 0x47, 0x14, 0x2a, 0x9e, 0x5d, 0x56, 0xf2, 0xd3, 0xab, 0x44, 0x11, 0x92, 0xd9, 0x23, 0x20, 0x2e, 0x89, 0xb4, 0x7c, 0xb8, 0x26, 0x77, 0x99, 0xe3, 0xa5, 0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07) def key_expansion(data): """ Generate key schedule @param {int[]} data 16/24/32-Byte cipher key @returns {int[]} 176/208/240-Byte expanded key """ data = data[:] # copy rcon_iteration = 1 key_size_bytes = len(data) expanded_key_size_bytes = (key_size_bytes // 4 + 7) * BLOCK_SIZE_BYTES while len(data) < expanded_key_size_bytes: temp = data[-4:] temp = key_schedule_core(temp, rcon_iteration) rcon_iteration += 1 data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) for _ in range(3): temp = data[-4:] data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) if key_size_bytes == 32: temp = data[-4:] temp = sub_bytes(temp) data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0): temp = data[-4:] data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) return data[:expanded_key_size_bytes] def iter_vector(iv): while True: yield iv iv = inc(iv) def sub_bytes(data): return [SBOX[x] for x in data] def sub_bytes_inv(data): return [SBOX_INV[x] for x in data] def rotate(data): return [*data[1:], data[0]] def key_schedule_core(data, rcon_iteration): data = rotate(data) data = sub_bytes(data) data[0] = data[0] ^ RCON[rcon_iteration] return data def xor(data1, data2): return [x ^ y for x, y in zip(data1, data2, strict=False)] def iter_mix_columns(data, matrix): for i in (0, 4, 8, 12): for row in matrix: mixed = 0 for j in range(4): # xor is (+) and (-) mixed ^= (0 if data[i:i + 4][j] == 0 or row[j] == 0 else RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[data[i + j]] + RIJNDAEL_LOG_TABLE[row[j]]) % 0xFF]) yield mixed def shift_rows(data): return [data[((column + row) & 0b11) * 4 + row] for column in range(4) for row in range(4)] def shift_rows_inv(data): return [data[((column - row) & 0b11) * 4 + row] for column in range(4) for row in range(4)] def shift_block(data): data_shifted = [] bit = 0 for n in data: if bit: n |= 0x100 bit = n & 1 n >>= 1 data_shifted.append(n) return data_shifted def inc(data): data = data[:] # copy for i in range(len(data) - 1, -1, -1): if data[i] == 255: data[i] = 0 else: data[i] = data[i] + 1 break return data def block_product(block_x, block_y): # NIST SP 800-38D, Algorithm 1 if len(block_x) != BLOCK_SIZE_BYTES or len(block_y) != BLOCK_SIZE_BYTES: raise ValueError(f'Length of blocks need to be {BLOCK_SIZE_BYTES} bytes') block_r = [0xE1] + [0] * (BLOCK_SIZE_BYTES - 1) block_v = block_y[:] block_z = [0] * BLOCK_SIZE_BYTES for i in block_x: for bit in range(7, -1, -1): if i & (1 << bit): block_z = xor(block_z, block_v) do_xor = block_v[-1] & 1 block_v = shift_block(block_v) if do_xor: block_v = xor(block_v, block_r) return block_z def ghash(subkey, data): # NIST SP 800-38D, Algorithm 2 if len(data) % BLOCK_SIZE_BYTES: raise ValueError(f'Length of data should be {BLOCK_SIZE_BYTES} bytes') last_y = [0] * BLOCK_SIZE_BYTES for i in range(0, len(data), BLOCK_SIZE_BYTES): block = data[i: i + BLOCK_SIZE_BYTES] last_y = block_product(xor(last_y, block), subkey) return last_y __all__ = [ 'aes_cbc_decrypt', 'aes_cbc_decrypt_bytes', 'aes_cbc_encrypt', 'aes_cbc_encrypt_bytes', 'aes_ctr_decrypt', 'aes_ctr_encrypt', 'aes_decrypt', 'aes_decrypt_text', 'aes_ecb_decrypt', 'aes_ecb_encrypt', 'aes_encrypt', 'aes_gcm_decrypt_and_verify', 'aes_gcm_decrypt_and_verify_bytes', 'key_expansion', 'pad_block', 'pkcs7_padding', 'unpad_pkcs7', ]
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/globals.py
yt_dlp/globals.py
from __future__ import annotations import os from collections import defaultdict # Please Note: Due to necessary changes and the complex nature involved in the plugin/globals system, # no backwards compatibility is guaranteed for the plugin system API. # However, we will still try our best. class Indirect: def __init__(self, initial, /): self.value = initial def __repr__(self, /): return f'{type(self).__name__}({self.value!r})' postprocessors = Indirect({}) extractors = Indirect({}) # Plugins all_plugins_loaded = Indirect(False) plugin_specs = Indirect({}) plugin_dirs = Indirect(['default']) plugin_ies = Indirect({}) plugin_pps = Indirect({}) plugin_ies_overrides = Indirect(defaultdict(list)) # Misc IN_CLI = Indirect(False) LAZY_EXTRACTORS = Indirect(None) # `False`=force, `None`=disabled, `True`=enabled WINDOWS_VT_MODE = Indirect(False if os.name == 'nt' else None) # JS Runtimes # If adding support for another runtime, register it here to allow `js_runtimes` option to accept it. # key is the runtime name, value a JsRuntime subclass (internal-only) or None supported_js_runtimes = Indirect({}) # List of remote components supported with --remote-components option supported_remote_components = Indirect([])
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/cache.py
yt_dlp/cache.py
import contextlib import json import os import re import shutil import traceback import urllib.parse from .utils import expand_path, traverse_obj, version_tuple, write_json_file from .version import __version__ class Cache: def __init__(self, ydl): self._ydl = ydl def _get_root_dir(self): res = self._ydl.params.get('cachedir') if res is None: cache_root = os.getenv('XDG_CACHE_HOME', '~/.cache') res = os.path.join(cache_root, 'yt-dlp') return expand_path(res) def _get_cache_fn(self, section, key, dtype): assert re.match(r'^[\w.-]+$', section), f'invalid section {section!r}' key = urllib.parse.quote(key, safe='').replace('%', ',') # encode non-ascii characters return os.path.join(self._get_root_dir(), section, f'{key}.{dtype}') @property def enabled(self): return self._ydl.params.get('cachedir') is not False def store(self, section, key, data, dtype='json'): assert dtype in ('json',) if not self.enabled: return fn = self._get_cache_fn(section, key, dtype) try: os.makedirs(os.path.dirname(fn), exist_ok=True) self._ydl.write_debug(f'Saving {section}.{key} to cache') write_json_file({'yt-dlp_version': __version__, 'data': data}, fn) except Exception: tb = traceback.format_exc() self._ydl.report_warning(f'Writing cache to {fn!r} failed: {tb}') def _validate(self, data, min_ver): version = traverse_obj(data, 'yt-dlp_version') if not version: # Backward compatibility data, version = {'data': data}, '2022.08.19' if not min_ver or version_tuple(version) >= version_tuple(min_ver): return data['data'] self._ydl.write_debug(f'Discarding old cache from version {version} (needs {min_ver})') def load(self, section, key, dtype='json', default=None, *, min_ver=None): assert dtype in ('json',) if not self.enabled: return default cache_fn = self._get_cache_fn(section, key, dtype) with contextlib.suppress(OSError): try: with open(cache_fn, encoding='utf-8') as cachef: self._ydl.write_debug(f'Loading {section}.{key} from cache') return self._validate(json.load(cachef), min_ver) except (ValueError, KeyError): try: file_size = os.path.getsize(cache_fn) except OSError as oe: file_size = str(oe) self._ydl.report_warning(f'Cache retrieval from {cache_fn} failed ({file_size})') return default def remove(self): if not self.enabled: self._ydl.to_screen('Cache is disabled (Did you combine --no-cache-dir and --rm-cache-dir?)') return cachedir = self._get_root_dir() if not any((term in cachedir) for term in ('cache', 'tmp')): raise Exception(f'Not removing directory {cachedir} - this does not look like a cache dir') self._ydl.to_screen( f'Removing cache dir {cachedir} .', skip_eol=True) if os.path.exists(cachedir): self._ydl.to_screen('.', skip_eol=True) shutil.rmtree(cachedir) self._ydl.to_screen('.')
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/options.py
yt_dlp/options.py
import collections import contextlib import optparse import os.path import re import shlex import shutil import string import sys from .compat import compat_expanduser from .cookies import SUPPORTED_BROWSERS, SUPPORTED_KEYRINGS from .downloader.external import list_external_downloaders from .postprocessor import ( FFmpegExtractAudioPP, FFmpegMergerPP, FFmpegSubtitlesConvertorPP, FFmpegThumbnailsConvertorPP, FFmpegVideoRemuxerPP, SponsorBlockPP, ) from .postprocessor.modify_chapters import DEFAULT_SPONSORBLOCK_CHAPTER_TITLE from .update import UPDATE_SOURCES, detect_variant, is_non_updateable from .utils import ( OUTTMPL_TYPES, POSTPROCESS_WHEN, Config, deprecation_warning, expand_path, format_field, get_executable_path, get_system_config_dirs, get_user_config_dirs, join_nonempty, orderedSet_from_options, remove_end, variadic, write_string, ) from .version import CHANNEL, __version__ def parseOpts(overrideArguments=None, ignore_config_files='if_override'): # noqa: N803 PACKAGE_NAME = 'yt-dlp' root = Config(create_parser()) if ignore_config_files == 'if_override': ignore_config_files = overrideArguments is not None def read_config(*paths): path = os.path.join(*paths) conf = Config.read_file(path, default=None) if conf is not None: return conf, path def _load_from_config_dirs(config_dirs): for config_dir in config_dirs: head, tail = os.path.split(config_dir) assert tail == PACKAGE_NAME or config_dir == os.path.join(compat_expanduser('~'), f'.{PACKAGE_NAME}') yield read_config(head, f'{PACKAGE_NAME}.conf') if tail.startswith('.'): # ~/.PACKAGE_NAME yield read_config(head, f'{PACKAGE_NAME}.conf.txt') yield read_config(config_dir, 'config') yield read_config(config_dir, 'config.txt') def add_config(label, path=None, func=None): """ Adds config and returns whether to continue """ if root.parse_known_args()[0].ignoreconfig: return False elif func: assert path is None args, current_path = next( filter(None, _load_from_config_dirs(func(PACKAGE_NAME))), (None, None)) else: current_path = os.path.join(path, 'yt-dlp.conf') args = Config.read_file(current_path, default=None) if args is not None: root.append_config(args, current_path, label=label) return True def load_configs(): yield not ignore_config_files yield add_config('Portable', get_executable_path()) yield add_config('Home', expand_path(root.parse_known_args()[0].paths.get('home', '')).strip()) yield add_config('User', func=get_user_config_dirs) yield add_config('System', func=get_system_config_dirs) opts = optparse.Values({'verbose': True, 'print_help': False}) try: try: if overrideArguments is not None: root.append_config(overrideArguments, label='Override') else: root.append_config(sys.argv[1:], label='Command-line') loaded_all_configs = all(load_configs()) except ValueError as err: raise root.parser.error(err) if loaded_all_configs: # If ignoreconfig is found inside the system configuration file, # the user configuration is removed if root.parse_known_args()[0].ignoreconfig: user_conf = next((i for i, conf in enumerate(root.configs) if conf.label == 'User'), None) if user_conf is not None: root.configs.pop(user_conf) try: root.configs[0].load_configs() # Resolve any aliases using --config-location except ValueError as err: raise root.parser.error(err) opts, args = root.parse_args() except optparse.OptParseError: with contextlib.suppress(optparse.OptParseError): opts, _ = root.parse_known_args(strict=False) raise except (SystemExit, KeyboardInterrupt): opts.verbose = False raise finally: verbose = opts.verbose and f'\n{root}'.replace('\n| ', '\n[debug] ')[1:] if verbose: write_string(f'{verbose}\n') if opts.print_help: if verbose: write_string('\n') root.parser.print_help() if opts.print_help: sys.exit() return root.parser, opts, args class _YoutubeDLHelpFormatter(optparse.IndentedHelpFormatter): def __init__(self): # No need to wrap help messages if we're on a wide console max_width = shutil.get_terminal_size().columns or 80 # The % is chosen to get a pretty output in README.md super().__init__(width=max_width, max_help_position=int(0.45 * max_width)) @staticmethod def format_option_strings(option): """ ('-o', '--option') -> -o, --format METAVAR """ opts = join_nonempty( option._short_opts and option._short_opts[0], option._long_opts and option._long_opts[0], delim=', ') if option.takes_value(): opts += f' {option.metavar}' return opts _PRESET_ALIASES = { 'mp3': ['-f', 'ba[acodec^=mp3]/ba/b', '-x', '--audio-format', 'mp3'], 'aac': ['-f', 'ba[acodec^=aac]/ba[acodec^=mp4a.40.]/ba/b', '-x', '--audio-format', 'aac'], 'mp4': ['--merge-output-format', 'mp4', '--remux-video', 'mp4', '-S', 'vcodec:h264,lang,quality,res,fps,hdr:12,acodec:aac'], 'mkv': ['--merge-output-format', 'mkv', '--remux-video', 'mkv'], 'sleep': ['--sleep-subtitles', '5', '--sleep-requests', '0.75', '--sleep-interval', '10', '--max-sleep-interval', '20'], } class _YoutubeDLOptionParser(optparse.OptionParser): # optparse is deprecated since Python 3.2. So assume a stable interface even for private methods ALIAS_DEST = '_triggered_aliases' ALIAS_TRIGGER_LIMIT = 100 def __init__(self): super().__init__( prog='yt-dlp' if detect_variant() == 'source' else None, version=__version__, usage='%prog [OPTIONS] URL [URL...]', epilog='See full documentation at https://github.com/yt-dlp/yt-dlp#readme', formatter=_YoutubeDLHelpFormatter(), conflict_handler='resolve', ) self.set_default(self.ALIAS_DEST, collections.defaultdict(int)) _UNKNOWN_OPTION = (optparse.BadOptionError, optparse.AmbiguousOptionError) _BAD_OPTION = optparse.OptionValueError def parse_known_args(self, args=None, values=None, strict=True): """Same as parse_args, but ignore unknown switches. Similar to argparse.parse_known_args""" self.rargs, self.largs = self._get_args(args), [] self.values = values or self.get_default_values() while self.rargs: arg = self.rargs[0] try: if arg == '--': del self.rargs[0] break elif arg.startswith('--'): self._process_long_opt(self.rargs, self.values) elif arg.startswith('-') and arg != '-': self._process_short_opts(self.rargs, self.values) elif self.allow_interspersed_args: self.largs.append(self.rargs.pop(0)) else: break except optparse.OptParseError as err: if isinstance(err, self._UNKNOWN_OPTION): self.largs.append(err.opt_str) elif strict: if isinstance(err, self._BAD_OPTION): self.error(str(err)) raise return self.check_values(self.values, self.largs) def _generate_error_message(self, msg): msg = f'{self.get_prog_name()}: error: {str(msg).strip()}\n' return f'{self.get_usage()}\n{msg}' if self.usage else msg def error(self, msg): raise optparse.OptParseError(self._generate_error_message(msg)) def _get_args(self, args): return sys.argv[1:] if args is None else list(args) def _match_long_opt(self, opt): """Improve ambiguous argument resolution by comparing option objects instead of argument strings""" try: return super()._match_long_opt(opt) except optparse.AmbiguousOptionError as e: if len({self._long_opt[p] for p in e.possibilities}) == 1: return e.possibilities[0] raise def format_option_help(self, formatter=None): assert formatter, 'Formatter can not be None' formatted_help = super().format_option_help(formatter=formatter) formatter.indent() heading = formatter.format_heading('Preset Aliases') formatter.indent() description = formatter.format_description( 'Predefined aliases for convenience and ease of use. Note that future versions of yt-dlp ' 'may add or adjust presets, but the existing preset names will not be changed or removed') result = [] for name, args in _PRESET_ALIASES.items(): option = optparse.Option('-t', help=shlex.join(args)) formatter.option_strings[option] = f'-t {name}' result.append(formatter.format_option(option)) formatter.dedent() formatter.dedent() help_lines = '\n'.join(result) return f'{formatted_help}\n{heading}{description}\n{help_lines}' def create_parser(): def _list_from_options_callback(option, opt_str, value, parser, append=True, delim=',', process=str.strip): # append can be True, False or -1 (prepend) current = list(getattr(parser.values, option.dest)) if append else [] value = list(filter(None, [process(value)] if delim is None else map(process, value.split(delim)))) setattr( parser.values, option.dest, current + value if append is True else value + current) def _set_from_options_callback( option, opt_str, value, parser, allowed_values, delim=',', aliases={}, process=lambda x: x.lower().strip()): values = [process(value)] if delim is None else map(process, value.split(delim)) try: requested = orderedSet_from_options(values, collections.ChainMap(aliases, {'all': allowed_values}), start=getattr(parser.values, option.dest)) except ValueError as e: raise optparse.OptionValueError(f'wrong {option.metavar} for {opt_str}: {e.args[0]}') setattr(parser.values, option.dest, set(requested)) def _dict_from_options_callback( option, opt_str, value, parser, allowed_keys=r'[\w-]+', delimiter=':', default_key=None, process=None, multiple_keys=True, process_key=str.lower, append=False): out_dict = dict(getattr(parser.values, option.dest)) multiple_args = not isinstance(value, str) if multiple_keys: allowed_keys = fr'({allowed_keys})(,({allowed_keys}))*' mobj = re.match( fr'(?is)(?P<keys>{allowed_keys}){delimiter}(?P<val>.*)$', value[0] if multiple_args else value) if mobj is not None: keys, val = mobj.group('keys').split(','), mobj.group('val') if multiple_args: val = [val, *value[1:]] elif default_key is not None: keys, val = variadic(default_key), value else: raise optparse.OptionValueError( f'wrong {opt_str} formatting; it should be {option.metavar}, not "{value}"') try: keys = map(process_key, keys) if process_key else keys val = process(val) if process else val except Exception as err: raise optparse.OptionValueError(f'wrong {opt_str} formatting; {err}') for key in keys: out_dict[key] = [*out_dict.get(key, []), val] if append else val setattr(parser.values, option.dest, out_dict) def when_prefix(default): return { 'default': {}, 'type': 'str', 'action': 'callback', 'callback': _dict_from_options_callback, 'callback_kwargs': { 'allowed_keys': '|'.join(map(re.escape, POSTPROCESS_WHEN)), 'default_key': default, 'multiple_keys': False, 'append': True, }, } parser = _YoutubeDLOptionParser() alias_group = optparse.OptionGroup(parser, 'Aliases') Formatter = string.Formatter() def _create_alias(option, opt_str, value, parser): aliases, opts = value try: nargs = len({i if f == '' else f for i, (_, f, _, _) in enumerate(Formatter.parse(opts)) if f is not None}) opts.format(*map(str, range(nargs))) # validate except Exception as err: raise optparse.OptionValueError(f'wrong {opt_str} OPTIONS formatting; {err}') if alias_group not in parser.option_groups: parser.add_option_group(alias_group) aliases = (x if x.startswith('-') else f'--{x}' for x in map(str.strip, aliases.split(','))) try: args = [f'ARG{i}' for i in range(nargs)] alias_group.add_option( *aliases, nargs=nargs, dest=parser.ALIAS_DEST, type='str' if nargs else None, metavar=' '.join(args), help=opts.format(*args), action='callback', callback=_alias_callback, callback_kwargs={'opts': opts, 'nargs': nargs}) except Exception as err: raise optparse.OptionValueError(f'wrong {opt_str} formatting; {err}') def _alias_callback(option, opt_str, value, parser, opts, nargs): counter = getattr(parser.values, option.dest) counter[opt_str] += 1 if counter[opt_str] > parser.ALIAS_TRIGGER_LIMIT: raise optparse.OptionValueError(f'Alias {opt_str} exceeded invocation limit') if nargs == 1: value = [value] assert (nargs == 0 and value is None) or len(value) == nargs parser.rargs[:0] = shlex.split( opts if value is None else opts.format(*map(shlex.quote, value))) def _preset_alias_callback(option, opt_str, value, parser): if not value: return if value not in _PRESET_ALIASES: raise optparse.OptionValueError(f'Unknown preset alias: {value}') parser.rargs[:0] = _PRESET_ALIASES[value] general = optparse.OptionGroup(parser, 'General Options') general.add_option( '-h', '--help', dest='print_help', action='store_true', help='Print this help text and exit') general.add_option( '--version', action='version', help='Print program version and exit') general.add_option( '-U', '--update', action='store_const', dest='update_self', const=CHANNEL, help=format_field( is_non_updateable(), None, 'Check if updates are available. %s', default=f'Update this program to the latest {CHANNEL} version')) general.add_option( '--no-update', action='store_false', dest='update_self', help='Do not check for updates (default)') general.add_option( '--update-to', action='store', dest='update_self', metavar='[CHANNEL]@[TAG]', help=( 'Upgrade/downgrade to a specific version. CHANNEL can be a repository as well. ' f'CHANNEL and TAG default to "{CHANNEL.partition("@")[0]}" and "latest" respectively if omitted; ' f'See "UPDATE" for details. Supported channels: {", ".join(UPDATE_SOURCES)}')) general.add_option( '-i', '--ignore-errors', action='store_true', dest='ignoreerrors', help='Ignore download and postprocessing errors. The download will be considered successful even if the postprocessing fails') general.add_option( '--no-abort-on-error', action='store_const', dest='ignoreerrors', const='only_download', help='Continue with next video on download errors; e.g. to skip unavailable videos in a playlist (default)') general.add_option( '--abort-on-error', '--no-ignore-errors', action='store_false', dest='ignoreerrors', help='Abort downloading of further videos if an error occurs (Alias: --no-ignore-errors)') general.add_option( '--list-extractors', action='store_true', dest='list_extractors', default=False, help='List all supported extractors and exit') general.add_option( '--extractor-descriptions', action='store_true', dest='list_extractor_descriptions', default=False, help='Output descriptions of all supported extractors and exit') general.add_option( '--use-extractors', '--ies', action='callback', dest='allowed_extractors', metavar='NAMES', type='str', default=[], callback=_list_from_options_callback, help=( 'Extractor names to use separated by commas. ' 'You can also use regexes, "all", "default" and "end" (end URL matching); ' 'e.g. --ies "holodex.*,end,youtube". ' 'Prefix the name with a "-" to exclude it, e.g. --ies default,-generic. ' 'Use --list-extractors for a list of extractor names. (Alias: --ies)')) general.add_option( '--force-generic-extractor', action='store_true', dest='force_generic_extractor', default=False, help=optparse.SUPPRESS_HELP) general.add_option( '--default-search', dest='default_search', metavar='PREFIX', help=( 'Use this prefix for unqualified URLs. ' 'E.g. "gvsearch2:python" downloads two videos from google videos for the search term "python". ' 'Use the value "auto" to let yt-dlp guess ("auto_warning" to emit a warning when guessing). ' '"error" just throws an error. The default value "fixup_error" repairs broken URLs, ' 'but emits an error if this is not possible instead of searching')) general.add_option( '--ignore-config', '--no-config', action='store_true', dest='ignoreconfig', help=( 'Don\'t load any more configuration files except those given to --config-locations. ' 'For backward compatibility, if this option is found inside the system configuration file, the user configuration is not loaded. ' '(Alias: --no-config)')) general.add_option( '--no-config-locations', action='store_const', dest='config_locations', const=None, help=( 'Do not load any custom configuration files (default). When given inside a ' 'configuration file, ignore all previous --config-locations defined in the current file')) general.add_option( '--config-locations', dest='config_locations', metavar='PATH', action='append', help=( 'Location of the main configuration file; either the path to the config or its containing directory ' '("-" for stdin). Can be used multiple times and inside other configuration files')) general.add_option( '--plugin-dirs', metavar='DIR', dest='plugin_dirs', action='callback', callback=_list_from_options_callback, type='str', callback_kwargs={'delim': None}, default=['default'], help=( 'Path to an additional directory to search for plugins. ' 'This option can be used multiple times to add multiple directories. ' 'Use "default" to search the default plugin directories (default)')) general.add_option( '--no-plugin-dirs', dest='plugin_dirs', action='store_const', const=[], help='Clear plugin directories to search, including defaults and those provided by previous --plugin-dirs') general.add_option( '--js-runtimes', metavar='RUNTIME[:PATH]', dest='js_runtimes', action='callback', callback=_list_from_options_callback, type='str', callback_kwargs={'delim': None}, default=['deno'], help=( 'Additional JavaScript runtime to enable, with an optional location for the runtime ' '(either the path to the binary or its containing directory). ' 'This option can be used multiple times to enable multiple runtimes. ' 'Supported runtimes are (in order of priority, from highest to lowest): deno, node, quickjs, bun. ' 'Only "deno" is enabled by default. The highest priority runtime that is both enabled and ' 'available will be used. In order to use a lower priority runtime when "deno" is available, ' '--no-js-runtimes needs to be passed before enabling other runtimes')) general.add_option( '--no-js-runtimes', dest='js_runtimes', action='store_const', const=[], help='Clear JavaScript runtimes to enable, including defaults and those provided by previous --js-runtimes') general.add_option( '--remote-components', metavar='COMPONENT', dest='remote_components', action='callback', callback=_list_from_options_callback, type='str', callback_kwargs={'delim': None}, default=[], help=( 'Remote components to allow yt-dlp to fetch when required. ' 'This option is currently not needed if you are using an official executable ' 'or have the requisite version of the yt-dlp-ejs package installed. ' 'You can use this option multiple times to allow multiple components. ' 'Supported values: ejs:npm (external JavaScript components from npm), ' 'ejs:github (external JavaScript components from yt-dlp-ejs GitHub). ' 'By default, no remote components are allowed')) general.add_option( '--no-remote-components', dest='remote_components', action='store_const', const=[], help='Disallow fetching of all remote components, including any previously allowed by --remote-components or defaults.') general.add_option( '--flat-playlist', action='store_const', dest='extract_flat', const='in_playlist', default=False, help=( 'Do not extract a playlist\'s URL result entries; ' 'some entry metadata may be missing and downloading may be bypassed')) general.add_option( '--no-flat-playlist', action='store_false', dest='extract_flat', help='Fully extract the videos of a playlist (default)') general.add_option( '--live-from-start', action='store_true', dest='live_from_start', help='Download livestreams from the start. Currently experimental and only supported for YouTube and Twitch') general.add_option( '--no-live-from-start', action='store_false', dest='live_from_start', help='Download livestreams from the current time (default)') general.add_option( '--wait-for-video', dest='wait_for_video', metavar='MIN[-MAX]', default=None, help=( 'Wait for scheduled streams to become available. ' 'Pass the minimum number of seconds (or range) to wait between retries')) general.add_option( '--no-wait-for-video', dest='wait_for_video', action='store_const', const=None, help='Do not wait for scheduled streams (default)') general.add_option( '--mark-watched', action='store_true', dest='mark_watched', default=False, help='Mark videos watched (even with --simulate)') general.add_option( '--no-mark-watched', action='store_false', dest='mark_watched', help='Do not mark videos watched (default)') general.add_option( '--no-colors', '--no-colours', action='store_const', dest='color', const={ 'stdout': 'no_color', 'stderr': 'no_color', }, help=optparse.SUPPRESS_HELP) general.add_option( '--color', dest='color', metavar='[STREAM:]POLICY', default={}, type='str', action='callback', callback=_dict_from_options_callback, callback_kwargs={ 'allowed_keys': 'stdout|stderr', 'default_key': ['stdout', 'stderr'], 'process': str.strip, }, help=( 'Whether to emit color codes in output, optionally prefixed by ' 'the STREAM (stdout or stderr) to apply the setting to. ' 'Can be one of "always", "auto" (default), "never", or ' '"no_color" (use non color terminal sequences). ' 'Use "auto-tty" or "no_color-tty" to decide based on terminal support only. ' 'Can be used multiple times')) general.add_option( '--compat-options', metavar='OPTS', dest='compat_opts', default=set(), type='str', action='callback', callback=_set_from_options_callback, callback_kwargs={ 'allowed_values': { 'filename', 'filename-sanitization', 'format-sort', 'abort-on-error', 'format-spec', 'no-playlist-metafiles', 'multistreams', 'no-live-chat', 'playlist-index', 'list-formats', 'no-direct-merge', 'playlist-match-filter', 'no-attach-info-json', 'embed-thumbnail-atomicparsley', 'no-external-downloader-progress', 'embed-metadata', 'seperate-video-versions', 'no-clean-infojson', 'no-keep-subs', 'no-certifi', 'no-youtube-channel-redirect', 'no-youtube-unavailable-videos', 'no-youtube-prefer-utc-upload-date', 'prefer-legacy-http-handler', 'manifest-filesize-approx', 'allow-unsafe-ext', 'prefer-vp9-sort', 'mtime-by-default', }, 'aliases': { 'youtube-dl': ['all', '-multistreams', '-playlist-match-filter', '-manifest-filesize-approx', '-allow-unsafe-ext', '-prefer-vp9-sort'], 'youtube-dlc': ['all', '-no-youtube-channel-redirect', '-no-live-chat', '-playlist-match-filter', '-manifest-filesize-approx', '-allow-unsafe-ext', '-prefer-vp9-sort'], '2021': ['2022', 'no-certifi', 'filename-sanitization'], '2022': ['2023', 'no-external-downloader-progress', 'playlist-match-filter', 'prefer-legacy-http-handler', 'manifest-filesize-approx'], '2023': ['2024', 'prefer-vp9-sort'], '2024': ['mtime-by-default'], }, }, help=( 'Options that can help keep compatibility with youtube-dl or youtube-dlc ' 'configurations by reverting some of the changes made in yt-dlp. ' 'See "Differences in default behavior" for details')) general.add_option( '--alias', metavar='ALIASES OPTIONS', dest='_', type='str', nargs=2, action='callback', callback=_create_alias, help=( 'Create aliases for an option string. Unless an alias starts with a dash "-", it is prefixed with "--". ' 'Arguments are parsed according to the Python string formatting mini-language. ' 'E.g. --alias get-audio,-X "-S aext:{0},abr -x --audio-format {0}" creates options ' '"--get-audio" and "-X" that takes an argument (ARG0) and expands to ' '"-S aext:ARG0,abr -x --audio-format ARG0". All defined aliases are listed in the --help output. ' 'Alias options can trigger more aliases; so be careful to avoid defining recursive options. ' f'As a safety measure, each alias may be triggered a maximum of {_YoutubeDLOptionParser.ALIAS_TRIGGER_LIMIT} times. ' 'This option can be used multiple times')) general.add_option( '-t', '--preset-alias', metavar='PRESET', dest='_', type='str', action='callback', callback=_preset_alias_callback, help=( 'Applies a predefined set of options. e.g. --preset-alias mp3. ' f'The following presets are available: {", ".join(_PRESET_ALIASES)}. ' 'See the "Preset Aliases" section at the end for more info. ' 'This option can be used multiple times')) network = optparse.OptionGroup(parser, 'Network Options') network.add_option( '--proxy', dest='proxy', default=None, metavar='URL', help=( 'Use the specified HTTP/HTTPS/SOCKS proxy. To enable SOCKS proxy, specify a proper scheme, ' 'e.g. socks5://user:pass@127.0.0.1:1080/. Pass in an empty string (--proxy "") for direct connection')) network.add_option( '--socket-timeout', dest='socket_timeout', type=float, default=None, metavar='SECONDS', help='Time to wait before giving up, in seconds') network.add_option( '--source-address', metavar='IP', dest='source_address', default=None, help='Client-side IP address to bind to', ) network.add_option( '--impersonate', metavar='CLIENT[:OS]', dest='impersonate', default=None, help=( 'Client to impersonate for requests. E.g. chrome, chrome-110, chrome:windows-10. ' 'Pass --impersonate="" to impersonate any client. Note that forcing impersonation ' 'for all requests may have a detrimental impact on download speed and stability'), ) network.add_option( '--list-impersonate-targets', dest='list_impersonate_targets', default=False, action='store_true', help='List available clients to impersonate.', ) network.add_option( '-4', '--force-ipv4', action='store_const', const='0.0.0.0', dest='source_address', help='Make all connections via IPv4', ) network.add_option( '-6', '--force-ipv6', action='store_const', const='::', dest='source_address', help='Make all connections via IPv6', ) network.add_option( '--enable-file-urls', action='store_true', dest='enable_file_urls', default=False, help='Enable file:// URLs. This is disabled by default for security reasons.', ) geo = optparse.OptionGroup(parser, 'Geo-restriction') geo.add_option( '--geo-verification-proxy', dest='geo_verification_proxy', default=None, metavar='URL', help=( 'Use this proxy to verify the IP address for some geo-restricted sites. ' 'The default proxy specified by --proxy (or none, if the option is not present) is used for the actual downloading')) geo.add_option( '--xff', metavar='VALUE', dest='geo_bypass', default='default', help=( 'How to fake X-Forwarded-For HTTP header to try bypassing geographic restriction. ' 'One of "default" (only when known to be useful), "never", ' 'an IP block in CIDR notation, or a two-letter ISO 3166-2 country code')) geo.add_option( '--geo-bypass', action='store_const', dest='geo_bypass', const='default', help=optparse.SUPPRESS_HELP) geo.add_option( '--no-geo-bypass', action='store_const', dest='geo_bypass', const='never', help=optparse.SUPPRESS_HELP) geo.add_option( '--geo-bypass-country', metavar='CODE', dest='geo_bypass', help=optparse.SUPPRESS_HELP) geo.add_option( '--geo-bypass-ip-block', metavar='IP_BLOCK', dest='geo_bypass', help=optparse.SUPPRESS_HELP) selection = optparse.OptionGroup(parser, 'Video Selection') selection.add_option( '--playlist-start', dest='playliststart', metavar='NUMBER', default=1, type=int, help=optparse.SUPPRESS_HELP) selection.add_option( '--playlist-end', dest='playlistend', metavar='NUMBER', default=None, type=int, help=optparse.SUPPRESS_HELP) selection.add_option( '-I', '--playlist-items', dest='playlist_items', metavar='ITEM_SPEC', default=None, help=( 'Comma-separated playlist_index of the items to download. ' 'You can specify a range using "[START]:[STOP][:STEP]". For backward compatibility, START-STOP is also supported. ' 'Use negative indices to count from the right and negative STEP to download in reverse order. ' 'E.g. "-I 1:3,7,-5::2" used on a playlist of size 15 will download the items at index 1,2,3,7,11,13,15')) selection.add_option( '--match-title', dest='matchtitle', metavar='REGEX', help=optparse.SUPPRESS_HELP)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
true
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/webvtt.py
yt_dlp/webvtt.py
""" A partial parser for WebVTT segments. Interprets enough of the WebVTT stream to be able to assemble a single stand-alone subtitle file, suitably adjusting timestamps on the way, while everything else is passed through unmodified. Regular expressions based on the W3C WebVTT specification <https://www.w3.org/TR/webvtt1/>. The X-TIMESTAMP-MAP extension is described in RFC 8216 §3.5 <https://tools.ietf.org/html/rfc8216#section-3.5>. """ import io import re from .utils import int_or_none, timetuple_from_msec class _MatchParser: """ An object that maintains the current parsing position and allows conveniently advancing it as syntax elements are successfully parsed. """ def __init__(self, string): self._data = string self._pos = 0 def match(self, r): if isinstance(r, re.Pattern): return r.match(self._data, self._pos) if isinstance(r, str): if self._data.startswith(r, self._pos): return len(r) return None raise ValueError(r) def advance(self, by): if by is None: amt = 0 elif isinstance(by, re.Match): amt = len(by.group(0)) elif isinstance(by, str): amt = len(by) elif isinstance(by, int): amt = by else: raise ValueError(by) self._pos += amt return by def consume(self, r): return self.advance(self.match(r)) def child(self): return _MatchChildParser(self) class _MatchChildParser(_MatchParser): """ A child parser state, which advances through the same data as its parent, but has an independent position. This is useful when advancing through syntax elements we might later want to backtrack from. """ def __init__(self, parent): super().__init__(parent._data) self.__parent = parent self._pos = parent._pos def commit(self): """ Advance the parent state to the current position of this child state. """ self.__parent._pos = self._pos return self.__parent class ParseError(Exception): def __init__(self, parser): data = parser._data[parser._pos:parser._pos + 100] super().__init__(f'Parse error at position {parser._pos} (near {data!r})') # While the specification <https://www.w3.org/TR/webvtt1/#webvtt-timestamp> # prescribes that hours must be *2 or more* digits, timestamps with a single # digit for the hour part has been seen in the wild. # See https://github.com/yt-dlp/yt-dlp/issues/921 _REGEX_TS = re.compile(r'''(?x) (?:([0-9]{1,}):)? ([0-9]{2}): ([0-9]{2})\. ([0-9]{3})? ''') _REGEX_EOF = re.compile(r'\Z') _REGEX_NL = re.compile(r'(?:\r\n|[\r\n]|$)') _REGEX_BLANK = re.compile(r'(?:\r\n|[\r\n])+') _REGEX_OPTIONAL_WHITESPACE = re.compile(r'[ \t]*') def _parse_ts(ts): """ Convert a parsed WebVTT timestamp (a re.Match obtained from _REGEX_TS) into an MPEG PES timestamp: a tick counter at 90 kHz resolution. """ return 90 * sum( int(part or 0) * mult for part, mult in zip(ts.groups(), (3600_000, 60_000, 1000, 1), strict=True)) def _format_ts(ts): """ Convert an MPEG PES timestamp into a WebVTT timestamp. This will lose sub-millisecond precision. """ return '%02u:%02u:%02u.%03u' % timetuple_from_msec(int((ts + 45) // 90)) class Block: """ An abstract WebVTT block. """ def __init__(self, **kwargs): for key, val in kwargs.items(): setattr(self, key, val) @classmethod def parse(cls, parser): m = parser.match(cls._REGEX) if not m: return None parser.advance(m) return cls(raw=m.group(0)) def write_into(self, stream): stream.write(self.raw) class HeaderBlock(Block): """ A WebVTT block that may only appear in the header part of the file, i.e. before any cue blocks. """ pass class Magic(HeaderBlock): _REGEX = re.compile(r'\ufeff?WEBVTT([ \t][^\r\n]*)?(?:\r\n|[\r\n])') # XXX: The X-TIMESTAMP-MAP extension is described in RFC 8216 §3.5 # <https://tools.ietf.org/html/rfc8216#section-3.5>, but the RFC # doesn't specify the exact grammar nor where in the WebVTT # syntax it should be placed; the below has been devised based # on usage in the wild # # And strictly speaking, the presence of this extension violates # the W3C WebVTT spec. Oh well. _REGEX_TSMAP = re.compile(r'X-TIMESTAMP-MAP=') _REGEX_TSMAP_LOCAL = re.compile(r'LOCAL:') _REGEX_TSMAP_MPEGTS = re.compile(r'MPEGTS:([0-9]+)') _REGEX_TSMAP_SEP = re.compile(r'[ \t]*,[ \t]*') # This was removed from the spec in the 2017 revision; # the last spec draft to describe this syntax element is # <https://www.w3.org/TR/2015/WD-webvtt1-20151208/#webvtt-metadata-header>. # Nevertheless, YouTube keeps serving those _REGEX_META = re.compile(r'(?:(?!-->)[^\r\n])+:(?:(?!-->)[^\r\n])+(?:\r\n|[\r\n])') @classmethod def __parse_tsmap(cls, parser): parser = parser.child() while True: m = parser.consume(cls._REGEX_TSMAP_LOCAL) if m: m = parser.consume(_REGEX_TS) if m is None: raise ParseError(parser) local = _parse_ts(m) if local is None: raise ParseError(parser) else: m = parser.consume(cls._REGEX_TSMAP_MPEGTS) if m: mpegts = int_or_none(m.group(1)) if mpegts is None: raise ParseError(parser) else: raise ParseError(parser) if parser.consume(cls._REGEX_TSMAP_SEP): continue if parser.consume(_REGEX_NL): break raise ParseError(parser) parser.commit() return local, mpegts @classmethod def parse(cls, parser): parser = parser.child() m = parser.consume(cls._REGEX) if not m: raise ParseError(parser) extra = m.group(1) local, mpegts, meta = None, None, '' while not parser.consume(_REGEX_NL): if parser.consume(cls._REGEX_TSMAP): local, mpegts = cls.__parse_tsmap(parser) continue m = parser.consume(cls._REGEX_META) if m: meta += m.group(0) continue raise ParseError(parser) parser.commit() return cls(extra=extra, mpegts=mpegts, local=local, meta=meta) def write_into(self, stream): stream.write('WEBVTT') if self.extra is not None: stream.write(self.extra) stream.write('\n') if self.local or self.mpegts: stream.write('X-TIMESTAMP-MAP=LOCAL:') stream.write(_format_ts(self.local if self.local is not None else 0)) stream.write(',MPEGTS:') stream.write(str(self.mpegts if self.mpegts is not None else 0)) stream.write('\n') if self.meta: stream.write(self.meta) stream.write('\n') class StyleBlock(HeaderBlock): _REGEX = re.compile(r'''(?x) STYLE[\ \t]*(?:\r\n|[\r\n]) ((?:(?!-->)[^\r\n])+(?:\r\n|[\r\n]))* (?:\r\n|[\r\n]) ''') class RegionBlock(HeaderBlock): _REGEX = re.compile(r'''(?x) REGION[\ \t]* ((?:(?!-->)[^\r\n])+(?:\r\n|[\r\n]))* (?:\r\n|[\r\n]) ''') class CommentBlock(Block): _REGEX = re.compile(r'''(?x) NOTE(?:\r\n|[\ \t\r\n]) ((?:(?!-->)[^\r\n])+(?:\r\n|[\r\n]))* (?:\r\n|[\r\n]) ''') class CueBlock(Block): """ A cue block. The payload is not interpreted. """ _REGEX_ID = re.compile(r'((?:(?!-->)[^\r\n])+)(?:\r\n|[\r\n])') _REGEX_ARROW = re.compile(r'[ \t]+-->[ \t]+') _REGEX_SETTINGS = re.compile(r'[ \t]+((?:(?!-->)[^\r\n])+)') _REGEX_PAYLOAD = re.compile(r'[^\r\n]+(?:\r\n|[\r\n])?') @classmethod def parse(cls, parser): parser = parser.child() id_ = None m = parser.consume(cls._REGEX_ID) if m: id_ = m.group(1) m0 = parser.consume(_REGEX_TS) if not m0: return None if not parser.consume(cls._REGEX_ARROW): return None m1 = parser.consume(_REGEX_TS) if not m1: return None m2 = parser.consume(cls._REGEX_SETTINGS) parser.consume(_REGEX_OPTIONAL_WHITESPACE) if not parser.consume(_REGEX_NL): return None start = _parse_ts(m0) end = _parse_ts(m1) settings = m2.group(1) if m2 is not None else None text = io.StringIO() while True: m = parser.consume(cls._REGEX_PAYLOAD) if not m: break text.write(m.group(0)) parser.commit() return cls( id=id_, start=start, end=end, settings=settings, text=text.getvalue(), ) def write_into(self, stream): if self.id is not None: stream.write(self.id) stream.write('\n') stream.write(_format_ts(self.start)) stream.write(' --> ') stream.write(_format_ts(self.end)) if self.settings is not None: stream.write(' ') stream.write(self.settings) stream.write('\n') stream.write(self.text) stream.write('\n') @property def as_json(self): return { 'id': self.id, 'start': self.start, 'end': self.end, 'text': self.text, 'settings': self.settings, } def __eq__(self, other): return self.as_json == other.as_json @classmethod def from_json(cls, json): return cls( id=json['id'], start=json['start'], end=json['end'], text=json['text'], settings=json['settings'], ) def hinges(self, other): if self.text != other.text: return False if self.settings != other.settings: return False return self.start <= self.end == other.start <= other.end def parse_fragment(frag_content): """ A generator that yields (partially) parsed WebVTT blocks when given a bytes object containing the raw contents of a WebVTT file. """ parser = _MatchParser(frag_content.decode()) yield Magic.parse(parser) while not parser.match(_REGEX_EOF): if parser.consume(_REGEX_BLANK): continue block = RegionBlock.parse(parser) if block: yield block continue block = StyleBlock.parse(parser) if block: yield block continue block = CommentBlock.parse(parser) if block: yield block # XXX: or skip continue break while not parser.match(_REGEX_EOF): if parser.consume(_REGEX_BLANK): continue block = CommentBlock.parse(parser) if block: yield block # XXX: or skip continue block = CueBlock.parse(parser) if block: yield block continue raise ParseError(parser)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/odnoklassniki.py
yt_dlp/extractor/odnoklassniki.py
import urllib.parse from .common import InfoExtractor from ..compat import compat_etree_fromstring from ..networking import HEADRequest from ..utils import ( ExtractorError, float_or_none, int_or_none, qualities, smuggle_url, traverse_obj, unescapeHTML, unified_strdate, unsmuggle_url, url_or_none, urlencode_postdata, ) class OdnoklassnikiIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// (?:(?:www|m|mobile)\.)? (?:odnoklassniki|ok)\.ru/ (?: video(?P<embed>embed)?/| web-api/video/moviePlayer/| live/| dk\?.*?st\.mvId= ) (?P<id>[\d-]+) ''' _EMBED_REGEX = [r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:odnoklassniki|ok)\.ru/videoembed/.+?)\1'] _TESTS = [{ 'note': 'Coub embedded', 'url': 'http://ok.ru/video/1484130554189', 'info_dict': { 'id': '1keok9', 'ext': 'mp4', 'timestamp': 1545580896, 'view_count': int, 'thumbnail': r're:^https?://.*\.jpg$', 'title': 'Народная забава', 'uploader': 'Nevata', 'upload_date': '20181223', 'age_limit': 0, 'uploader_id': 'nevata.s', 'like_count': int, 'duration': 8.08, 'repost_count': int, }, }, { 'note': 'vk.com embedded', 'url': 'https://ok.ru/video/3568183087575', 'info_dict': { 'id': '-165101755_456243749', 'ext': 'mp4', 'uploader_id': '-165101755', 'duration': 132, 'timestamp': 1642869935, 'upload_date': '20220122', 'thumbnail': str, 'title': str, 'uploader': str, }, 'skip': 'vk extractor error', }, { # metadata in JSON, webm_dash with Firefox UA 'url': 'http://ok.ru/video/20079905452', 'md5': '8f477d8931c531374a3e36daec617b2c', 'info_dict': { 'id': '20079905452', 'ext': 'webm', 'title': 'Культура меняет нас (прекрасный ролик!))', 'thumbnail': str, 'duration': 100, 'upload_date': '20141207', 'uploader_id': '330537914540', 'uploader': 'Виталий Добровольский', 'like_count': int, 'age_limit': 0, }, 'params': { 'format': 'bv[ext=webm]', 'http_headers': {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; rv:102.0) Gecko/20100101 Firefox/102.0'}, }, }, { # metadataUrl 'url': 'http://ok.ru/video/63567059965189-0?fromTime=5', 'md5': '2bae2f58eefe1b3d26f3926c4a64d2f3', 'info_dict': { 'id': '63567059965189-0', 'ext': 'mp4', 'title': 'Девушка без комплексов ...', 'thumbnail': str, 'duration': 191, 'upload_date': '20150518', 'uploader_id': '534380003155', 'uploader': '☭ Андрей Мещанинов ☭', 'like_count': int, 'age_limit': 0, 'start_time': 5, }, 'params': {'skip_download': 'm3u8'}, }, { # YouTube embed (metadataUrl, provider == USER_YOUTUBE) 'url': 'https://ok.ru/video/3952212382174', 'md5': '5fb5f83ce16cb212d6bf887282b5da53', 'info_dict': { 'id': '5axVgHHDBvU', 'ext': 'mp4', 'title': 'Youtube-dl 101: What is it and HOW to use it! Full Download Walkthrough and Guide', 'description': 'md5:b57209eeb9d5c2f20c984dfb58862097', 'uploader': 'Lod Mer', 'uploader_id': '575186401502', 'duration': 1529, 'age_limit': 0, 'upload_date': '20210405', 'comment_count': int, 'live_status': 'not_live', 'view_count': int, 'thumbnail': 'https://i.mycdn.me/i?r=AEHujHvw2RjEbemUCNEorZbxYpb_p_9AcN2FmGik64Krkcmz37YtlY093oAM5-HIEAt7Zi9s0CiBOSDmbngC-I-k&fn=external_8', 'uploader_url': 'https://www.youtube.com/@MrKewlkid94', 'channel_follower_count': int, 'tags': ['youtube-dl', 'youtube playlists', 'download videos', 'download audio'], 'channel_id': 'UCVGtvURtEURYHtJFUegdSug', 'like_count': int, 'availability': 'public', 'channel_url': 'https://www.youtube.com/channel/UCVGtvURtEURYHtJFUegdSug', 'categories': ['Education'], 'playable_in_embed': True, 'channel': 'BornToReact', }, }, { # YouTube embed (metadata, provider == USER_YOUTUBE, no metadata.movie.title field) 'url': 'http://ok.ru/video/62036049272859-0', 'info_dict': { 'id': '62036049272859-0', 'ext': 'mp4', 'title': 'МУЗЫКА ДОЖДЯ .', 'description': 'md5:6f1867132bd96e33bf53eda1091e8ed0', 'upload_date': '20120106', 'uploader_id': '473534735899', 'uploader': 'МARINA D', 'age_limit': 0, }, 'params': { 'skip_download': True, }, 'skip': 'Video has not been found', }, { 'note': 'Only available in mobile webpage', 'url': 'https://m.ok.ru/video/2361249957145', 'info_dict': { 'id': '2361249957145', 'ext': 'mp4', 'title': 'Быковское крещение', 'duration': 3038.181, 'thumbnail': r're:^https?://i\.mycdn\.me/videoPreview\?.+', }, }, { 'note': 'subtitles', 'url': 'https://ok.ru/video/4249587550747', 'info_dict': { 'id': '4249587550747', 'ext': 'mp4', 'title': 'Small Country An African Childhood (2020) (1080p) +subtitle', 'uploader': 'Sunflower Movies', 'uploader_id': '595802161179', 'upload_date': '20220816', 'duration': 6728, 'age_limit': 0, 'thumbnail': r're:^https?://i\.mycdn\.me/videoPreview\?.+', 'like_count': int, 'subtitles': dict, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://ok.ru/web-api/video/moviePlayer/20079905452', 'only_matching': True, }, { 'url': 'http://www.ok.ru/video/20648036891', 'only_matching': True, }, { 'url': 'http://www.ok.ru/videoembed/20648036891', 'only_matching': True, }, { 'url': 'http://m.ok.ru/video/20079905452', 'only_matching': True, }, { 'url': 'http://mobile.ok.ru/video/20079905452', 'only_matching': True, }, { 'url': 'https://www.ok.ru/live/484531969818', 'only_matching': True, }, { 'url': 'https://m.ok.ru/dk?st.cmd=movieLayer&st.discId=863789452017&st.retLoc=friend&st.rtu=%2Fdk%3Fst.cmd%3DfriendMovies%26st.mode%3Down%26st.mrkId%3D%257B%2522uploadedMovieMarker%2522%253A%257B%2522marker%2522%253A%25221519410114503%2522%252C%2522hasMore%2522%253Atrue%257D%252C%2522sharedMovieMarker%2522%253A%257B%2522marker%2522%253Anull%252C%2522hasMore%2522%253Afalse%257D%257D%26st.friendId%3D561722190321%26st.frwd%3Don%26_prevCmd%3DfriendMovies%26tkn%3D7257&st.discType=MOVIE&st.mvId=863789452017&_prevCmd=friendMovies&tkn=3648#lst#', 'only_matching': True, }, { # Paid video 'url': 'https://ok.ru/video/954886983203', 'only_matching': True, }, { 'url': 'https://ok.ru/videoembed/2932705602075', 'info_dict': { 'id': '2932705602075', 'ext': 'mp4', 'thumbnail': 'https://i.mycdn.me/videoPreview?id=1369902483995&type=37&idx=2&tkn=fqlnoQD_xwq5ovIlKfgNyU08qmM&fn=external_8', 'title': 'Boosty для тебя!', 'uploader_id': '597811038747', 'like_count': 0, 'duration': 35, }, }] _WEBPAGE_TESTS = [{ 'url': 'https://boosty.to/ikakprosto/posts/56cedaca-b56a-4dfd-b3ed-98c79cfa0167', 'info_dict': { 'id': '3950343629563', 'ext': 'mp4', 'thumbnail': 'https://i.mycdn.me/videoPreview?id=2776238394107&type=37&idx=11&tkn=F3ejkUFcpuI4DnMRxrDGcH5YcmM&fn=external_8', 'title': 'Заяц Бусти.mp4', 'uploader_id': '571368965883', 'like_count': 0, 'duration': 10444, }, 'skip': 'Site no longer embeds', }] def _clear_cookies(self, cdn_url): # Direct http downloads will fail if CDN cookies are set # so we need to reset them after each format extraction self.cookiejar.clear(domain='.mycdn.me') self.cookiejar.clear(domain=urllib.parse.urlparse(cdn_url).hostname) @classmethod def _extract_embed_urls(cls, url, webpage): for x in super()._extract_embed_urls(url, webpage): yield smuggle_url(x, {'referrer': url}) def _real_extract(self, url): try: return self._extract_desktop(url) except ExtractorError as e: try: return self._extract_mobile(url) except ExtractorError: # error message of desktop webpage is in English raise e def _extract_desktop(self, url): start_time = int_or_none(urllib.parse.parse_qs( urllib.parse.urlparse(url).query).get('fromTime', [None])[0]) url, smuggled = unsmuggle_url(url, {}) video_id, is_embed = self._match_valid_url(url).group('id', 'embed') mode = 'videoembed' if is_embed else 'video' webpage = self._download_webpage( f'https://ok.ru/{mode}/{video_id}', video_id, note='Downloading desktop webpage', headers={'Referer': smuggled['referrer']} if smuggled.get('referrer') else {}) error = self._search_regex( r'[^>]+class="vp_video_stub_txt"[^>]*>([^<]+)<', webpage, 'error', default=None) # Direct link from boosty if (error == 'The author of this video has not been found or is blocked' and not smuggled.get('referrer') and mode == 'videoembed'): return self._extract_desktop(smuggle_url(url, {'referrer': 'https://boosty.to'})) elif error: raise ExtractorError(error, expected=True) elif '>Access to this video is restricted</div>' in webpage: self.raise_login_required() player = self._parse_json( unescapeHTML(self._search_regex( rf'data-options=(?P<quote>["\'])(?P<player>{{.+?{video_id}.+?}})(?P=quote)', webpage, 'player', group='player')), video_id) # embedded external player if player.get('isExternalPlayer') and player.get('url'): return self.url_result(player['url']) flashvars = player['flashvars'] metadata = flashvars.get('metadata') if metadata: metadata = self._parse_json(metadata, video_id) else: data = {} st_location = flashvars.get('location') if st_location: data['st.location'] = st_location metadata = self._download_json( urllib.parse.unquote(flashvars['metadataUrl']), video_id, 'Downloading metadata JSON', data=urlencode_postdata(data)) movie = metadata['movie'] # Some embedded videos may not contain title in movie dict (e.g. # http://ok.ru/video/62036049272859-0) thus we allow missing title # here and it's going to be extracted later by an extractor that # will process the actual embed. provider = metadata.get('provider') title = movie['title'] if provider == 'UPLOADED_ODKL' else movie.get('title') thumbnail = movie.get('poster') duration = int_or_none(movie.get('duration')) author = metadata.get('author', {}) uploader_id = author.get('id') uploader = author.get('name') upload_date = unified_strdate(self._html_search_meta( 'ya:ovs:upload_date', webpage, 'upload date', default=None)) age_limit = None adult = self._html_search_meta( 'ya:ovs:adult', webpage, 'age limit', default=None) if adult: age_limit = 18 if adult == 'true' else 0 like_count = int_or_none(metadata.get('likeCount')) subtitles = {} for sub in traverse_obj(metadata, ('movie', 'subtitleTracks', ...), expected_type=dict): sub_url = sub.get('url') if not sub_url: continue subtitles.setdefault(sub.get('language') or 'en', []).append({ 'url': sub_url, 'ext': 'vtt', }) info = { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'duration': duration, 'upload_date': upload_date, 'uploader': uploader, 'uploader_id': uploader_id, 'like_count': like_count, 'age_limit': age_limit, 'start_time': start_time, 'subtitles': subtitles, } if provider == 'USER_YOUTUBE': info.update({ '_type': 'url_transparent', 'url': movie['contentId'], }) return info assert title if provider == 'LIVE_TV_APP': info['title'] = title quality = qualities(('4', '0', '1', '2', '3', '5', '6', '7')) formats = [{ 'url': f['url'], 'ext': 'mp4', 'format_id': f.get('name'), } for f in traverse_obj(metadata, ('videos', lambda _, v: url_or_none(v['url'])))] m3u8_url = traverse_obj(metadata, 'hlsManifestUrl', 'ondemandHls') if m3u8_url: formats.extend(self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) self._clear_cookies(m3u8_url) for mpd_id, mpd_key in [('dash', 'ondemandDash'), ('webm', 'metadataWebmUrl')]: mpd_url = metadata.get(mpd_key) if mpd_url: formats.extend(self._extract_mpd_formats( mpd_url, video_id, mpd_id=mpd_id, fatal=False)) self._clear_cookies(mpd_url) dash_manifest = metadata.get('metadataEmbedded') if dash_manifest: formats.extend(self._parse_mpd_formats( compat_etree_fromstring(dash_manifest), 'mpd')) for fmt in formats: fmt_type = self._search_regex( r'\btype[/=](\d)', fmt['url'], 'format type', default=None) if fmt_type: fmt['quality'] = quality(fmt_type) # Live formats m3u8_url = metadata.get('hlsMasterPlaylistUrl') if m3u8_url: formats.extend(self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', m3u8_id='hls', fatal=False)) self._clear_cookies(m3u8_url) rtmp_url = metadata.get('rtmpUrl') if rtmp_url: formats.append({ 'url': rtmp_url, 'format_id': 'rtmp', 'ext': 'flv', }) if not formats: payment_info = metadata.get('paymentInfo') if payment_info: self.raise_no_formats('This video is paid, subscribe to download it', expected=True) info['formats'] = formats return info def _extract_mobile(self, url): video_id = self._match_id(url) webpage = self._download_webpage( f'https://m.ok.ru/video/{video_id}', video_id, note='Downloading mobile webpage') error = self._search_regex( r'видео</a>\s*<div\s+class="empty">(.+?)</div>', webpage, 'error', default=None) if error: raise ExtractorError(error, expected=True) json_data = self._search_regex( r'data-video="(.+?)"', webpage, 'json data') json_data = self._parse_json(unescapeHTML(json_data), video_id) or {} redirect_url = self._request_webpage(HEADRequest( json_data['videoSrc']), video_id, 'Requesting download URL').url self._clear_cookies(redirect_url) return { 'id': video_id, 'title': json_data.get('videoName'), 'duration': float_or_none(json_data.get('videoDuration'), scale=1000), 'thumbnail': json_data.get('videoPosterSrc'), 'formats': [{ 'format_id': 'mobile', 'url': redirect_url, 'ext': 'mp4', }], }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/thisvid.py
yt_dlp/extractor/thisvid.py
import itertools import re import urllib.parse from .common import InfoExtractor from ..utils import ( clean_html, get_element_by_class, int_or_none, url_or_none, urljoin, ) class ThisVidIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?thisvid\.com/(?P<type>videos|embed)/(?P<id>[A-Za-z0-9-]+)' _TESTS = [{ 'url': 'https://thisvid.com/videos/sitting-on-ball-tight-jeans/', 'md5': '839becb572995687e11a69dc4358a386', 'info_dict': { 'id': '3533241', 'ext': 'mp4', 'title': 'Sitting on ball tight jeans', 'description': 'md5:372353bb995883d1b65fddf507489acd', 'thumbnail': r're:https?://\w+\.thisvid\.com/(?:[^/]+/)+3533241/preview\.jpg', 'uploader_id': '150629', 'uploader': 'jeanslevisjeans', 'display_id': 'sitting-on-ball-tight-jeans', 'age_limit': 18, }, }, { 'url': 'https://thisvid.com/embed/3533241/', 'md5': '839becb572995687e11a69dc4358a386', 'info_dict': { 'id': '3533241', 'ext': 'mp4', 'title': 'Sitting on ball tight jeans', 'thumbnail': r're:https?://\w+\.thisvid\.com/(?:[^/]+/)+3533241/preview\.jpg', 'uploader_id': '150629', 'uploader': 'jeanslevisjeans', 'display_id': 'sitting-on-ball-tight-jeans', 'age_limit': 18, }, }] def _real_extract(self, url): main_id, type_ = re.match(self._VALID_URL, url).group('id', 'type') webpage = self._download_webpage(url, main_id) title = self._html_search_regex( r'<title\b[^>]*?>(?:Video:\s+)?(.+?)(?:\s+-\s+ThisVid(?:\.com| tube))?</title>', webpage, 'title') if type_ == 'embed': # look for more metadata video_alt_url = url_or_none(self._search_regex( rf'''video_alt_url\s*:\s+'({self._VALID_URL}/)',''', webpage, 'video_alt_url', default=None)) if video_alt_url and video_alt_url != url: webpage = self._download_webpage( video_alt_url, main_id, note='Redirecting embed to main page', fatal=False) or webpage video_holder = get_element_by_class('video-holder', webpage) or '' if '>This video is a private video' in video_holder: self.raise_login_required( (clean_html(video_holder) or 'Private video').partition('\n')[0]) uploader = self._html_search_regex( r'''(?s)<span\b[^>]*>Added by:\s*</span><a\b[^>]+\bclass\s*=\s*["']author\b[^>]+\bhref\s*=\s*["']https://thisvid\.com/members/([0-9]+/.{3,}?)\s*</a>''', webpage, 'uploader', default='') uploader = re.split(r'''/["'][^>]*>\s*''', uploader) if len(uploader) == 2: # id must be non-empty, uploader could be '' uploader_id, uploader = uploader uploader = uploader or None else: uploader_id = uploader = None return self.url_result( url, ie='Generic', url_transparent=True, title=title, age_limit=18, uploader=uploader, uploader_id=uploader_id) class ThisVidPlaylistBaseIE(InfoExtractor): _PLAYLIST_URL_RE = None @classmethod def _find_urls(cls, html): for m in re.finditer(rf'''<a\b[^>]+\bhref\s*=\s*["'](?P<url>{cls._PLAYLIST_URL_RE}\b)[^>]+>''', html): yield m.group('url') def _generate_playlist_entries(self, url, playlist_id, html=None): page_url = url for page in itertools.count(1): if not html: html = self._download_webpage( page_url, playlist_id, note=f'Downloading page {page}', fatal=False) or '' yield from self._find_urls(html) next_page = get_element_by_class('pagination-next', html) or '' if next_page: # member list page next_page = urljoin(url, self._search_regex( r'''<a\b[^>]+\bhref\s*=\s*("|')(?P<url>(?!#)(?:(?!\1).)+)''', next_page, 'next page link', group='url', default=None)) # in case a member page should have pagination-next with empty link, not just `else:` if next_page is None: # playlist page parsed_url = urllib.parse.urlparse(page_url) base_path, _, num = parsed_url.path.rpartition('/') num = int_or_none(num) if num is None: base_path, num = parsed_url.path.rstrip('/'), 1 parsed_url = parsed_url._replace(path=f'{base_path}/{num + 1}') next_page = urllib.parse.urlunparse(parsed_url) if page_url == next_page: next_page = None if not next_page: return page_url, html = next_page, None def _make_playlist_result(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) title = re.split( r'(?i)\s*\|\s*ThisVid\.com\s*$', self._og_search_title(webpage, default=None) or self._html_search_regex(r'(?s)<title\b[^>]*>(.+?)</title', webpage, 'title', fatal=False) or '', maxsplit=1)[0] or None return self.playlist_from_matches( self._generate_playlist_entries(url, playlist_id, webpage), playlist_id=playlist_id, playlist_title=title, ie=ThisVidIE) class ThisVidMemberIE(ThisVidPlaylistBaseIE): _VALID_URL = r'https?://thisvid\.com/members/(?P<id>\d+)' _TESTS = [{ 'url': 'https://thisvid.com/members/2140501/', 'info_dict': { 'id': '2140501', 'title': 'Rafflesia\'s Profile', }, 'playlist_mincount': 16, }, { 'url': 'https://thisvid.com/members/2140501/favourite_videos/', 'info_dict': { 'id': '2140501', 'title': 'Rafflesia\'s Favourite Videos', }, 'playlist_mincount': 15, }, { 'url': 'https://thisvid.com/members/636468/public_videos/', 'info_dict': { 'id': '636468', 'title': 'Happymouth\'s Public Videos', }, 'playlist_mincount': 196, }] _PLAYLIST_URL_RE = ThisVidIE._VALID_URL def _real_extract(self, url): return self._make_playlist_result(url) class ThisVidPlaylistIE(ThisVidPlaylistBaseIE): _VALID_URL = r'https?://thisvid\.com/playlist/(?P<id>\d+)/video/(?P<video_id>[A-Za-z0-9-]+)' _TESTS = [{ 'url': 'https://thisvid.com/playlist/6615/video/big-italian-booty-28/', 'info_dict': { 'id': '6615', 'title': 'Underwear Stuff', }, 'playlist_mincount': 200, }, { 'url': 'https://thisvid.com/playlist/6615/video/big-italian-booty-28/', 'info_dict': { 'id': '1072387', 'ext': 'mp4', 'title': 'Big Italian Booty 28', 'description': 'md5:1bccf7b13765e18fb27bf764dba7ede2', 'uploader_id': '367912', 'uploader': 'Jcmusclefun', 'age_limit': 18, 'display_id': 'big-italian-booty-28', 'thumbnail': r're:https?://\w+\.thisvid\.com/(?:[^/]+/)+1072387/preview\.jpg', }, 'params': { 'noplaylist': True, }, }] _PLAYLIST_URL_RE = _VALID_URL def _generate_playlist_entries(self, url, playlist_id, html=None): for wrapped_url in super()._generate_playlist_entries(url, playlist_id, html): video_id = re.match(self._VALID_URL, wrapped_url).group('video_id') yield urljoin(url, f'/videos/{video_id}/') def _real_extract(self, url): playlist_id, video_id = self._match_valid_url(url).group('id', 'video_id') if not self._yes_playlist(playlist_id, video_id): redirect_url = urljoin(url, f'/videos/{video_id}/') return self.url_result(redirect_url, ThisVidIE) result = self._make_playlist_result(url) # Fix duplicated title (`the title - the title` => `the title`) title = result['title'] t_len = len(title) if t_len > 5 and t_len % 2 != 0: t_len = t_len // 2 if title[t_len] == '-': first, second = map(str.strip, (title[:t_len], title[t_len + 1:])) if first and first == second: result['title'] = first return result
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ivideon.py
yt_dlp/extractor/ivideon.py
import urllib.parse from .common import InfoExtractor from ..utils import qualities class IvideonIE(InfoExtractor): IE_NAME = 'ivideon' IE_DESC = 'Ivideon TV' _VALID_URL = r'https?://(?:www\.)?ivideon\.com/tv/(?:[^/]+/)*camera/(?P<id>\d+-[\da-f]+)/(?P<camera_id>\d+)' _TESTS = [{ 'url': 'https://www.ivideon.com/tv/camera/100-916ca13b5c4ad9f564266424a026386d/0/', 'info_dict': { 'id': '100-916ca13b5c4ad9f564266424a026386d', 'ext': 'flv', 'title': 're:^Касса [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'description': 'Основное предназначение - запись действий кассиров. Плюс общий вид.', 'is_live': True, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.ivideon.com/tv/camera/100-c4ee4cb9ede885cf62dfbe93d7b53783/589824/?lang=ru', 'only_matching': True, }, { 'url': 'https://www.ivideon.com/tv/map/22.917923/-31.816406/16/camera/100-e7bc16c7d4b5bbd633fd5350b66dfa9a/0', 'only_matching': True, }] _QUALITIES = ('low', 'mid', 'hi') def _real_extract(self, url): mobj = self._match_valid_url(url) server_id, camera_id = mobj.group('id'), mobj.group('camera_id') camera_name, description = None, None camera_url = urllib.parse.urljoin( url, f'/tv/camera/{server_id}/{camera_id}/') webpage = self._download_webpage(camera_url, server_id, fatal=False) if webpage: config_string = self._search_regex( r'var\s+config\s*=\s*({.+?});', webpage, 'config', default=None) if config_string: config = self._parse_json(config_string, server_id, fatal=False) camera_info = config.get('ivTvAppOptions', {}).get('currentCameraInfo') if camera_info: camera_name = camera_info.get('camera_name') description = camera_info.get('misc', {}).get('description') if not camera_name: camera_name = self._html_search_meta( 'name', webpage, 'camera name', default=None) or self._search_regex( r'<h1[^>]+class="b-video-title"[^>]*>([^<]+)', webpage, 'camera name', default=None) quality = qualities(self._QUALITIES) formats = [{ 'url': 'https://streaming.ivideon.com/flv/live?{}'.format(urllib.parse.urlencode({ 'server': server_id, 'camera': camera_id, 'sessionId': 'demo', 'q': quality(format_id), })), 'format_id': format_id, 'ext': 'flv', 'quality': quality(format_id), } for format_id in self._QUALITIES] return { 'id': server_id, 'title': camera_name or server_id, 'description': description, 'is_live': True, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/amadeustv.py
yt_dlp/extractor/amadeustv.py
from .common import InfoExtractor from ..utils import ( ExtractorError, float_or_none, int_or_none, parse_iso8601, url_or_none, ) from ..utils.traversal import traverse_obj class AmadeusTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?amadeus\.tv/library/(?P<id>[\da-f]+)' _TESTS = [{ 'url': 'http://www.amadeus.tv/library/65091a87ff85af59d9fc54c3', 'info_dict': { 'id': '5576678021301411311', 'ext': 'mp4', 'title': 'Jieon Park - 第五届珠海莫扎特国际青少年音乐周小提琴C组第三轮', 'thumbnail': 'http://1253584441.vod2.myqcloud.com/a0046a27vodtransbj1253584441/7db4af535576678021301411311/coverBySnapshot_10_0.jpg', 'duration': 1264.8, 'upload_date': '20230918', 'timestamp': 1695034800, 'display_id': '65091a87ff85af59d9fc54c3', 'view_count': int, 'description': 'md5:a0357b9c215489e2067cbae0b777bb95', }, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) nuxt_data = self._search_nuxt_data(webpage, display_id, traverse=('fetch', '0')) video_id = traverse_obj(nuxt_data, ('item', 'video', {str})) if not video_id: raise ExtractorError('Unable to extract actual video ID') video_data = self._download_json( f'http://playvideo.qcloud.com/getplayinfo/v2/1253584441/{video_id}', video_id, headers={'Referer': 'http://www.amadeus.tv/'}) formats = [] for video in traverse_obj(video_data, ('videoInfo', ('sourceVideo', ('transcodeList', ...)), {dict})): if not url_or_none(video.get('url')): continue formats.append({ **traverse_obj(video, { 'url': 'url', 'format_id': ('definition', {lambda x: f'http-{x or "0"}'}), 'width': ('width', {int_or_none}), 'height': ('height', {int_or_none}), 'filesize': (('totalSize', 'size'), {int_or_none}), 'vcodec': ('videoStreamList', 0, 'codec'), 'acodec': ('audioStreamList', 0, 'codec'), 'fps': ('videoStreamList', 0, 'fps', {float_or_none}), }, get_all=False), 'http_headers': {'Referer': 'http://www.amadeus.tv/'}, }) return { 'id': video_id, 'display_id': display_id, 'formats': formats, **traverse_obj(video_data, { 'title': ('videoInfo', 'basicInfo', 'name', {str}), 'thumbnail': ('coverInfo', 'coverUrl', {url_or_none}), 'duration': ('videoInfo', 'sourceVideo', ('floatDuration', 'duration'), {float_or_none}), }, get_all=False), **traverse_obj(nuxt_data, ('item', { 'title': (('title', 'title_en', 'title_cn'), {str}), 'description': (('description', 'description_en', 'description_cn'), {str}), 'timestamp': ('date', {parse_iso8601}), 'view_count': ('view', {int_or_none}), }), get_all=False), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/litv.py
yt_dlp/extractor/litv.py
import json import uuid from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, join_nonempty, smuggle_url, traverse_obj, try_call, unsmuggle_url, urljoin, ) class LiTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?litv\.tv/(?:[^/?#]+/watch/|vod/[^/?#]+/content\.do\?content_id=)(?P<id>[\w-]+)' _URL_TEMPLATE = 'https://www.litv.tv/%s/watch/%s' _GEO_COUNTRIES = ['TW'] _TESTS = [{ 'url': 'https://www.litv.tv/drama/watch/VOD00041610', 'info_dict': { 'id': 'VOD00041606', 'title': '花千骨', }, 'playlist_count': 51, # 50 episodes + 1 trailer }, { 'url': 'https://www.litv.tv/drama/watch/VOD00041610', 'md5': 'b90ff1e9f1d8f5cfcd0a44c3e2b34c7a', 'info_dict': { 'id': 'VOD00041610', 'ext': 'mp4', 'title': '花千骨第1集', 'thumbnail': r're:https?://.*\.jpg$', 'description': '《花千骨》陸劇線上看。十六年前,平靜的村莊內,一名女嬰隨異相出生,途徑此地的蜀山掌門清虛道長算出此女命運非同一般,她體內散發的異香易招惹妖魔。一念慈悲下,他在村莊周邊設下結界阻擋妖魔入侵,讓其年滿十六後去蜀山,並賜名花千骨。', 'categories': ['奇幻', '愛情', '仙俠', '古裝'], 'episode': 'Episode 1', 'episode_number': 1, }, 'params': { 'noplaylist': True, }, }, { 'url': 'https://www.litv.tv/drama/watch/VOD00044841', 'md5': '88322ea132f848d6e3e18b32a832b918', 'info_dict': { 'id': 'VOD00044841', 'ext': 'mp4', 'title': '芈月傳第1集 霸星芈月降世楚國', 'description': '楚威王二年,太史令唐昧夜觀星象,發現霸星即將現世。王后得知霸星的預言後,想盡辦法不讓孩子順利出生,幸得莒姬相護化解危機。沒想到眾人期待下出生的霸星卻是位公主,楚威王對此失望至極。楚王后命人將女嬰丟棄河中,居然奇蹟似的被少司命像攔下,楚威王認為此女非同凡響,為她取名芈月。', }, 'skip': 'No longer exists', }] def _extract_playlist(self, playlist_data, content_type): all_episodes = [ self.url_result(smuggle_url( self._URL_TEMPLATE % (content_type, episode['content_id']), {'force_noplaylist': True})) # To prevent infinite recursion for episode in traverse_obj(playlist_data, ('seasons', ..., 'episodes', lambda _, v: v['content_id']))] return self.playlist_result(all_episodes, playlist_data['content_id'], playlist_data.get('title')) def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) vod_data = self._search_nextjs_data(webpage, video_id)['props']['pageProps'] program_info = traverse_obj(vod_data, ('programInformation', {dict})) or {} playlist_data = traverse_obj(vod_data, ('seriesTree')) if playlist_data and self._yes_playlist(program_info.get('series_id'), video_id, smuggled_data): return self._extract_playlist(playlist_data, program_info.get('content_type')) asset_id = traverse_obj(program_info, ('assets', 0, 'asset_id', {str})) if asset_id: # This is a VOD media_type = 'vod' else: # This is a live stream asset_id = program_info['content_id'] media_type = program_info['content_type'] puid = try_call(lambda: self._get_cookies('https://www.litv.tv/')['PUID'].value) if puid: endpoint = 'get-urls' else: puid = str(uuid.uuid4()) endpoint = 'get-urls-no-auth' video_data = self._download_json( f'https://www.litv.tv/api/{endpoint}', video_id, data=json.dumps({'AssetId': asset_id, 'MediaType': media_type, 'puid': puid}).encode(), headers={'Content-Type': 'application/json'}) if error := traverse_obj(video_data, ('error', {dict})): error_msg = traverse_obj(error, ('message', {str})) if error_msg and 'OutsideRegionError' in error_msg: self.raise_geo_restricted('This video is available in Taiwan only') elif error_msg: raise ExtractorError(f'{self.IE_NAME} said: {error_msg}', expected=True) raise ExtractorError(f'Unexpected error from {self.IE_NAME}') formats = self._extract_m3u8_formats( video_data['result']['AssetURLs'][0], video_id, ext='mp4', m3u8_id='hls') for a_format in formats: # LiTV HLS segments doesn't like compressions a_format.setdefault('http_headers', {})['Accept-Encoding'] = 'identity' return { 'id': video_id, 'formats': formats, 'title': join_nonempty('title', 'secondary_mark', delim='', from_dict=program_info), **traverse_obj(program_info, { 'description': ('description', {str}), 'thumbnail': ('picture', {urljoin('https://p-cdnstatic.svc.litv.tv/')}), 'categories': ('genres', ..., 'name', {str}), 'episode_number': ('episode', {int_or_none}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/gdcvault.py
yt_dlp/extractor/gdcvault.py
import re from .common import InfoExtractor from .kaltura import KalturaIE from ..networking import HEADRequest, Request from ..utils import remove_start, smuggle_url, urlencode_postdata class GDCVaultIE(InfoExtractor): _WORKING = False _VALID_URL = r'https?://(?:www\.)?gdcvault\.com/play/(?P<id>\d+)(?:/(?P<name>[\w-]+))?' _NETRC_MACHINE = 'gdcvault' _TESTS = [ { 'url': 'http://www.gdcvault.com/play/1019721/Doki-Doki-Universe-Sweet-Simple', 'md5': '7ce8388f544c88b7ac11c7ab1b593704', 'info_dict': { 'id': '201311826596_AWNY', 'display_id': 'Doki-Doki-Universe-Sweet-Simple', 'ext': 'mp4', 'title': 'Doki-Doki Universe: Sweet, Simple and Genuine (GDC Next 10)', }, }, { 'url': 'http://www.gdcvault.com/play/1015683/Embracing-the-Dark-Art-of', 'info_dict': { 'id': '201203272_1330951438328RSXR', 'display_id': 'Embracing-the-Dark-Art-of', 'ext': 'flv', 'title': 'Embracing the Dark Art of Mathematical Modeling in AI', }, 'params': { 'skip_download': True, # Requires rtmpdump }, }, { 'url': 'http://www.gdcvault.com/play/1015301/Thexder-Meets-Windows-95-or', 'md5': 'a5eb77996ef82118afbbe8e48731b98e', 'info_dict': { 'id': '1015301', 'display_id': 'Thexder-Meets-Windows-95-or', 'ext': 'flv', 'title': 'Thexder Meets Windows 95, or Writing Great Games in the Windows 95 Environment', }, 'skip': 'Requires login', }, { 'url': 'http://gdcvault.com/play/1020791/', 'only_matching': True, }, { # Hard-coded hostname 'url': 'http://gdcvault.com/play/1023460/Tenacious-Design-and-The-Interface', 'md5': 'a8efb6c31ed06ca8739294960b2dbabd', 'info_dict': { 'id': '840376_BQRC', 'ext': 'mp4', 'display_id': 'Tenacious-Design-and-The-Interface', 'title': 'Tenacious Design and The Interface of \'Destiny\'', }, }, { # Multiple audios 'url': 'http://www.gdcvault.com/play/1014631/Classic-Game-Postmortem-PAC', 'info_dict': { 'id': '12396_1299111843500GMPX', 'ext': 'mp4', 'title': 'How to Create a Good Game - From My Experience of Designing Pac-Man', }, # 'params': { # 'skip_download': True, # Requires rtmpdump # 'format': 'jp', # The japanese audio # } }, { # gdc-player.html 'url': 'http://www.gdcvault.com/play/1435/An-American-engine-in-Tokyo', 'info_dict': { 'id': '9350_1238021887562UHXB', 'display_id': 'An-American-engine-in-Tokyo', 'ext': 'mp4', 'title': 'An American Engine in Tokyo:/nThe collaboration of Epic Games and Square Enix/nFor THE LAST REMINANT', }, }, { # Kaltura Embed 'url': 'https://www.gdcvault.com/play/1026180/Mastering-the-Apex-of-Scaling', 'info_dict': { 'id': '0_h1fg8j3p', 'ext': 'mp4', 'title': 'Mastering the Apex of Scaling Game Servers (Presented by Multiplay)', 'timestamp': 1554401811, 'upload_date': '20190404', 'uploader_id': 'joe@blazestreaming.com', }, 'params': { 'format': 'mp4-408', }, }, { # Kaltura embed, whitespace between quote and embedded URL in iframe's src 'url': 'https://www.gdcvault.com/play/1025699', 'info_dict': { 'id': '0_zagynv0a', 'ext': 'mp4', 'title': 'Tech Toolbox', 'upload_date': '20190408', 'uploader_id': 'joe@blazestreaming.com', 'timestamp': 1554764629, }, 'params': { 'skip_download': True, }, }, { # HTML5 video 'url': 'http://www.gdcvault.com/play/1014846/Conference-Keynote-Shigeru', 'only_matching': True, }, ] def _login(self, webpage_url, display_id): username, password = self._get_login_info() if username is None or password is None: self.report_warning('It looks like ' + webpage_url + ' requires a login. Try specifying a username and password and try again.') return None mobj = re.match(r'(?P<root_url>https?://.*?/).*', webpage_url) login_url = mobj.group('root_url') + 'api/login.php' logout_url = mobj.group('root_url') + 'logout' login_form = { 'email': username, 'password': password, } request = Request(login_url, urlencode_postdata(login_form)) request.headers['Content-Type'] = 'application/x-www-form-urlencoded' self._download_webpage(request, display_id, 'Logging in') start_page = self._download_webpage(webpage_url, display_id, 'Getting authenticated video page') self._download_webpage(logout_url, display_id, 'Logging out') return start_page def _real_extract(self, url): video_id, name = self._match_valid_url(url).groups() display_id = name or video_id webpage_url = 'http://www.gdcvault.com/play/' + video_id start_page = self._download_webpage(webpage_url, display_id) direct_url = self._search_regex( r's1\.addVariable\("file",\s*encodeURIComponent\("(/[^"]+)"\)\);', start_page, 'url', default=None) if direct_url: title = self._html_search_regex( r'<td><strong>Session Name:?</strong></td>\s*<td>(.*?)</td>', start_page, 'title') video_url = 'http://www.gdcvault.com' + direct_url # resolve the url so that we can detect the correct extension video_url = self._request_webpage( HEADRequest(video_url), video_id).url return { 'id': video_id, 'display_id': display_id, 'url': video_url, 'title': title, } embed_url = KalturaIE._extract_url(start_page) if embed_url: embed_url = smuggle_url(embed_url, {'source_url': url}) ie_key = 'Kaltura' else: PLAYER_REGEX = r'<iframe src="(?P<xml_root>.+?)/(?:gdc-)?player.*?\.html.*?".*?</iframe>' xml_root = self._html_search_regex( PLAYER_REGEX, start_page, 'xml root', default=None) if xml_root is None: # Probably need to authenticate login_res = self._login(webpage_url, display_id) if login_res is None: self.report_warning('Could not login.') else: start_page = login_res # Grab the url from the authenticated page xml_root = self._html_search_regex( PLAYER_REGEX, start_page, 'xml root') xml_name = self._html_search_regex( r'<iframe src=".*?\?xml(?:=|URL=xml/)(.+?\.xml).*?".*?</iframe>', start_page, 'xml filename', default=None) if not xml_name: info = self._parse_html5_media_entries(url, start_page, video_id)[0] info.update({ 'title': remove_start(self._search_regex( r'>Session Name:\s*<.*?>\s*<td>(.+?)</td>', start_page, 'title', default=None) or self._og_search_title( start_page, default=None), 'GDC Vault - '), 'id': video_id, 'display_id': display_id, }) return info embed_url = f'{xml_root}/xml/{xml_name}' ie_key = 'DigitallySpeaking' return { '_type': 'url_transparent', 'id': video_id, 'display_id': display_id, 'url': embed_url, 'ie_key': ie_key, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/streaks.py
yt_dlp/extractor/streaks.py
import json import urllib.parse from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, filter_dict, float_or_none, join_nonempty, mimetype2ext, parse_iso8601, unsmuggle_url, update_url_query, url_or_none, ) from ..utils.traversal import traverse_obj class StreaksBaseIE(InfoExtractor): _API_URL_TEMPLATE = 'https://{}.api.streaks.jp/v1/projects/{}/medias/{}{}' _GEO_BYPASS = False _GEO_COUNTRIES = ['JP'] def _extract_from_streaks_api(self, project_id, media_id, headers=None, query=None, ssai=False): try: response = self._download_json( self._API_URL_TEMPLATE.format('playback', project_id, media_id, ''), media_id, 'Downloading STREAKS playback API JSON', headers={ 'Accept': 'application/json', 'Origin': 'https://players.streaks.jp', **self.geo_verification_headers(), **(headers or {}), }) except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status in (403, 404): error = self._parse_json(e.cause.response.read().decode(), media_id, fatal=False) message = traverse_obj(error, ('message', {str})) code = traverse_obj(error, ('code', {str})) error_id = traverse_obj(error, ('id', {int})) if code == 'REQUEST_FAILED': if error_id == 124: self.raise_geo_restricted(countries=self._GEO_COUNTRIES) elif error_id == 126: raise ExtractorError('Access is denied (possibly due to invalid/missing API key)') if code == 'MEDIA_NOT_FOUND': raise ExtractorError(join_nonempty(code, message, delim=': '), expected=True) if code or message: raise ExtractorError(join_nonempty(code, error_id, message, delim=': ')) raise streaks_id = response['id'] live_status = { 'clip': 'was_live', 'file': 'not_live', 'linear': 'is_live', 'live': 'is_live', }.get(response.get('type')) formats, subtitles = [], {} drm_formats = False for source in traverse_obj(response, ('sources', lambda _, v: v['src'])): if source.get('key_systems'): drm_formats = True continue src_url = source['src'] is_live = live_status == 'is_live' ext = mimetype2ext(source.get('type')) if ext != 'm3u8': self.report_warning(f'Unsupported stream type: {ext}') continue if is_live and ssai: session_params = traverse_obj(self._download_json( self._API_URL_TEMPLATE.format('ssai', project_id, streaks_id, '/ssai/session'), media_id, 'Downloading session parameters', headers={'Content-Type': 'application/json', 'Accept': 'application/json'}, data=json.dumps({'id': source['id']}).encode(), ), (0, 'query', {urllib.parse.parse_qs})) src_url = update_url_query(src_url, session_params) fmts, subs = self._extract_m3u8_formats_and_subtitles( src_url, media_id, 'mp4', m3u8_id='hls', fatal=False, live=is_live, query=query) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) if not formats and drm_formats: self.report_drm(media_id) self._remove_duplicate_formats(formats) for subs in traverse_obj(response, ( 'tracks', lambda _, v: v['kind'] in ('captions', 'subtitles') and url_or_none(v['src']), )): lang = traverse_obj(subs, ('srclang', {str.lower})) or 'ja' subtitles.setdefault(lang, []).append({'url': subs['src']}) return { 'id': streaks_id, 'display_id': media_id, 'formats': formats, 'live_status': live_status, 'subtitles': subtitles, 'uploader_id': project_id, **traverse_obj(response, { 'title': ('name', {str}), 'description': ('description', {str}, filter), 'duration': ('duration', {float_or_none}), 'modified_timestamp': ('updated_at', {parse_iso8601}), 'tags': ('tags', ..., {str}), 'thumbnails': (('poster', 'thumbnail'), 'src', {'url': {url_or_none}}), 'timestamp': ('created_at', {parse_iso8601}), }), } class StreaksIE(StreaksBaseIE): _VALID_URL = [ r'https?://players\.streaks\.jp/(?P<project_id>[\w-]+)/[\da-f]+/index\.html\?(?:[^#]+&)?m=(?P<id>(?:ref:)?[\w-]+)', r'https?://playback\.api\.streaks\.jp/v1/projects/(?P<project_id>[\w-]+)/medias/(?P<id>(?:ref:)?[\w-]+)', ] _EMBED_REGEX = [rf'<iframe\s+[^>]*\bsrc\s*=\s*["\'](?P<url>{_VALID_URL[0]})'] _TESTS = [{ 'url': 'https://players.streaks.jp/tipness/08155cd19dc14c12bebefb69b92eafcc/index.html?m=dbdf2df35b4d483ebaeeaeb38c594647', 'info_dict': { 'id': 'dbdf2df35b4d483ebaeeaeb38c594647', 'ext': 'mp4', 'title': '3shunenCM_edit.mp4', 'display_id': 'dbdf2df35b4d483ebaeeaeb38c594647', 'duration': 47.533, 'live_status': 'not_live', 'modified_date': '20230726', 'modified_timestamp': 1690356180, 'timestamp': 1690355996, 'upload_date': '20230726', 'uploader_id': 'tipness', }, }, { 'url': 'https://players.streaks.jp/ktv-web/0298e8964c164ab384c07ef6e08c444b/index.html?m=ref:mycoffeetime_250317', 'info_dict': { 'id': 'dccdc079e3fd41f88b0c8435e2d453ab', 'ext': 'mp4', 'title': 'わたしの珈琲時間_250317', 'display_id': 'ref:mycoffeetime_250317', 'duration': 122.99, 'live_status': 'not_live', 'modified_date': '20250310', 'modified_timestamp': 1741586302, 'thumbnail': r're:https?://.+\.jpg', 'timestamp': 1741585839, 'upload_date': '20250310', 'uploader_id': 'ktv-web', }, }, { 'url': 'https://playback.api.streaks.jp/v1/projects/ktv-web/medias/b5411938e1e5435dac71edf829dd4813', 'info_dict': { 'id': 'b5411938e1e5435dac71edf829dd4813', 'ext': 'mp4', 'title': 'KANTELE_SYUSEi_0630', 'display_id': 'b5411938e1e5435dac71edf829dd4813', 'live_status': 'not_live', 'modified_date': '20250122', 'modified_timestamp': 1737522999, 'thumbnail': r're:https?://.+\.jpg', 'timestamp': 1735205137, 'upload_date': '20241226', 'uploader_id': 'ktv-web', }, }, { # TVer Olympics: website already down, but api remains accessible 'url': 'https://playback.api.streaks.jp/v1/projects/tver-olympic/medias/ref:sp_240806_1748_dvr', 'info_dict': { 'id': 'c10f7345adb648cf804d7578ab93b2e3', 'ext': 'mp4', 'title': 'サッカー 男子 準決勝_dvr', 'display_id': 'ref:sp_240806_1748_dvr', 'duration': 12960.0, 'live_status': 'was_live', 'modified_date': '20240805', 'modified_timestamp': 1722896263, 'timestamp': 1722777618, 'upload_date': '20240804', 'uploader_id': 'tver-olympic', }, }, { # TBS FREE: 24-hour stream 'url': 'https://playback.api.streaks.jp/v1/projects/tbs/medias/ref:simul-02', 'info_dict': { 'id': 'c4e83a7b48f4409a96adacec674b4e22', 'ext': 'mp4', 'title': str, 'display_id': 'ref:simul-02', 'live_status': 'is_live', 'modified_date': '20241031', 'modified_timestamp': 1730339858, 'timestamp': 1705466840, 'upload_date': '20240117', 'uploader_id': 'tbs', }, }, { # DRM protected 'url': 'https://players.streaks.jp/sp-jbc/a12d7ee0f40c49d6a0a2bff520639677/index.html?m=5f89c62f37ee4a68be8e6e3b1396c7d8', 'only_matching': True, }] _WEBPAGE_TESTS = [{ 'url': 'https://event.play.jp/playnext2023/', 'info_dict': { 'id': '2d975178293140dc8074a7fc536a7604', 'ext': 'mp4', 'title': 'PLAY NEXTキームービー(本番)', 'uploader_id': 'play', 'duration': 17.05, 'thumbnail': r're:https?://.+\.jpg', 'timestamp': 1668387517, 'upload_date': '20221114', 'modified_timestamp': 1739411523, 'modified_date': '20250213', 'live_status': 'not_live', }, }, { 'url': 'https://wowshop.jp/Page/special/cooking_goods/?bid=wowshop&srsltid=AfmBOor_phUNoPEE_UCPiGGSCMrJE5T2US397smvsbrSdLqUxwON0el4', 'playlist_mincount': 2, 'info_dict': { 'id': '?bid=wowshop&srsltid=AfmBOor_phUNoPEE_UCPiGGSCMrJE5T2US397smvsbrSdLqUxwON0el4', 'title': 'ワンランク上の料理道具でとびきりの“おいしい”を食卓へ|wowshop', 'description': 'md5:914b5cb8624fc69274c7fb7b2342958f', 'age_limit': 0, 'thumbnail': 'https://wowshop.jp/Page/special/cooking_goods/images/ogp.jpg', }, }] def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) project_id, media_id = self._match_valid_url(url).group('project_id', 'id') return self._extract_from_streaks_api( project_id, media_id, headers=filter_dict({ 'X-Streaks-Api-Key': smuggled_data.get('api_key'), }))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/funker530.py
yt_dlp/extractor/funker530.py
from .common import InfoExtractor from .rumble import RumbleEmbedIE from .youtube import YoutubeIE from ..utils import ExtractorError, clean_html, get_element_by_class, strip_or_none class Funker530IE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?funker530\.com/video/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://funker530.com/video/azov-patrol-caught-in-open-under-automatic-grenade-launcher-fire/', 'md5': '085f50fea27523a388bbc22e123e09c8', 'info_dict': { 'id': 'v2qbmu4', 'ext': 'mp4', 'title': 'Azov Patrol Caught In Open Under Automatic Grenade Launcher Fire', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': 'Funker530', 'channel': 'Funker530', 'channel_url': 'https://rumble.com/c/c-1199543', 'width': 1280, 'height': 720, 'fps': 25, 'duration': 27, 'upload_date': '20230608', 'timestamp': 1686241321, 'live_status': 'not_live', 'description': 'md5:bea2e1f458095414e04b5ac189c2f980', }, }, { 'url': 'https://funker530.com/video/my-friends-joined-the-russians-civdiv/', 'md5': 'a42c2933391210662e93e867d7124b70', 'info_dict': { 'id': 'k-pk4bOvoac', 'ext': 'mp4', 'view_count': int, 'channel': 'Civ Div', 'comment_count': int, 'channel_follower_count': int, 'thumbnail': 'https://i.ytimg.com/vi/k-pk4bOvoac/maxresdefault.jpg', 'uploader_id': '@CivDiv', 'duration': 357, 'channel_url': 'https://www.youtube.com/channel/UCgsCiwJ88up-YyMHo7hL5-A', 'tags': [], 'uploader_url': 'https://www.youtube.com/@CivDiv', 'channel_id': 'UCgsCiwJ88up-YyMHo7hL5-A', 'like_count': int, 'description': 'md5:aef75ec3f59c07a0e39400f609b24429', 'live_status': 'not_live', 'age_limit': 0, 'uploader': 'Civ Div', 'categories': ['People & Blogs'], 'title': 'My “Friends” joined the Russians.', 'availability': 'public', 'upload_date': '20230608', 'playable_in_embed': True, 'heatmap': 'count:100', }, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) info = {} rumble_url = list(RumbleEmbedIE._extract_embed_urls(url, webpage)) if rumble_url: info = {'url': rumble_url[0], 'ie_key': RumbleEmbedIE.ie_key()} else: youtube_url = list(YoutubeIE._extract_embed_urls(url, webpage)) if youtube_url: info = {'url': youtube_url[0], 'ie_key': YoutubeIE.ie_key()} if not info: raise ExtractorError('No videos found on webpage', expected=True) return { **info, '_type': 'url_transparent', 'description': strip_or_none(self._search_regex( r'(?s)(.+)About the Author', clean_html(get_element_by_class('video-desc-paragraph', webpage)), 'description', default=None)), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/stretchinternet.py
yt_dlp/extractor/stretchinternet.py
from .common import InfoExtractor class StretchInternetIE(InfoExtractor): _VALID_URL = r'https?://portal\.stretchinternet\.com/[^/]+/(?:portal|full)\.htm\?.*?\beventId=(?P<id>\d+)' _TEST = { 'url': 'https://portal.stretchinternet.com/umary/portal.htm?eventId=573272&streamType=video', 'info_dict': { 'id': '573272', 'ext': 'mp4', 'title': 'UNIVERSITY OF MARY WRESTLING VS UPPER IOWA', # 'timestamp': 1575668361, # 'upload_date': '20191206', 'uploader_id': '99997', }, } def _real_extract(self, url): video_id = self._match_id(url) media_url = self._download_json( 'https://core.stretchlive.com/trinity/event/tcg/' + video_id, video_id)[0]['media'][0]['url'] event = self._download_json( 'https://neo-client.stretchinternet.com/portal-ws/getEvent.json', video_id, query={'eventID': video_id, 'token': 'asdf'})['event'] return { 'id': video_id, 'title': event['title'], # TODO: parse US timezone abbreviations # 'timestamp': event.get('dateTimeString'), 'url': 'https://' + media_url, 'uploader_id': event.get('ownerID'), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/theplatform.py
yt_dlp/extractor/theplatform.py
import hashlib import hmac import re import time from .adobepass import AdobePassIE from ..networking import HEADRequest, Request from ..utils import ( ExtractorError, determine_ext, find_xpath_attr, float_or_none, int_or_none, mimetype2ext, parse_age_limit, parse_qs, traverse_obj, unsmuggle_url, update_url, update_url_query, url_or_none, urlhandle_detect_ext, xpath_with_ns, ) default_ns = 'http://www.w3.org/2005/SMIL21/Language' _x = lambda p: xpath_with_ns(p, {'smil': default_ns}) class ThePlatformBaseIE(AdobePassIE): _TP_TLD = 'com' def _extract_theplatform_smil(self, smil_url, video_id, note='Downloading SMIL data'): meta = self._download_xml( smil_url, video_id, note=note, query={'format': 'SMIL'}, headers=self.geo_verification_headers()) error_element = find_xpath_attr(meta, _x('.//smil:ref'), 'src') if error_element is not None: exception = find_xpath_attr( error_element, _x('.//smil:param'), 'name', 'exception') if exception is not None: if exception.get('value') == 'GeoLocationBlocked': self.raise_geo_restricted(error_element.attrib['abstract']) elif error_element.attrib['src'].startswith( f'http://link.theplatform.{self._TP_TLD}/s/errorFiles/Unavailable.'): raise ExtractorError( error_element.attrib['abstract'], expected=True) smil_formats, subtitles = self._parse_smil_formats_and_subtitles( meta, smil_url, video_id, namespace=default_ns, # the parameters are from syfy.com, other sites may use others, # they also work for nbc.com f4m_params={'g': 'UXWGVKRWHFSP', 'hdcore': '3.0.3'}, transform_rtmp_url=lambda streamer, src: (streamer, 'mp4:' + src)) formats = [] for _format in smil_formats: media_url = _format['url'] if determine_ext(media_url) == 'm3u8': hdnea2 = self._get_cookies(media_url).get('hdnea2') if hdnea2: _format['url'] = update_url_query(media_url, {'hdnea3': hdnea2.value}) formats.append(_format) return formats, subtitles def _download_theplatform_metadata(self, path, video_id, fatal=True): return self._download_json( f'https://link.theplatform.{self._TP_TLD}/s/{path}', video_id, fatal=fatal, query={'format': 'preview'}) or {} @staticmethod def _parse_theplatform_metadata(tp_metadata): def site_specific_filter(*fields): return lambda k, v: v and k.endswith(tuple(f'${f}' for f in fields)) info = traverse_obj(tp_metadata, { 'title': ('title', {str}), 'episode': ('title', {str}), 'description': ('description', {str}), 'thumbnail': ('defaultThumbnailUrl', {url_or_none}), 'duration': ('duration', {float_or_none(scale=1000)}), 'timestamp': ('pubDate', {float_or_none(scale=1000)}), 'uploader': ('billingCode', {str}), 'creators': ('author', {str}, filter, all, filter), 'categories': ( 'categories', lambda _, v: v.get('label') in ['category', None], 'name', {str}, filter, all, filter), 'tags': ('keywords', {str}, filter, {lambda x: re.split(r'[;,]\s?', x)}, filter), 'age_limit': ('ratings', ..., 'rating', {parse_age_limit}, any), 'season_number': (site_specific_filter('seasonNumber'), {int_or_none}, any), 'episode_number': (site_specific_filter('episodeNumber', 'airOrder'), {int_or_none}, any), 'series': (site_specific_filter('show', 'seriesTitle', 'seriesShortTitle'), (None, ...), {str}, any), 'location': (site_specific_filter('region'), {str}, any), 'media_type': (site_specific_filter('programmingType', 'type'), {str}, any), }) chapters = traverse_obj(tp_metadata, ('chapters', ..., { 'start_time': ('startTime', {float_or_none(scale=1000)}), 'end_time': ('endTime', {float_or_none(scale=1000)}), })) # Ignore pointless single chapters from short videos that span the entire video's duration if len(chapters) > 1 or traverse_obj(chapters, (0, 'end_time')): info['chapters'] = chapters info['subtitles'] = {} for caption in traverse_obj(tp_metadata, ('captions', lambda _, v: url_or_none(v['src']))): info['subtitles'].setdefault(caption.get('lang') or 'en', []).append({ 'url': caption['src'], 'ext': mimetype2ext(caption.get('type')), }) return info def _extract_theplatform_metadata(self, path, video_id): info = self._download_theplatform_metadata(path, video_id) return self._parse_theplatform_metadata(info) class ThePlatformIE(ThePlatformBaseIE): _VALID_URL = r'''(?x) (?:https?://(?:link|player)\.theplatform\.com/[sp]/(?P<provider_id>[^/]+)/ (?:(?:(?:[^/]+/)+select/)?(?P<media>media/(?:guid/\d+/)?)?|(?P<config>(?:[^/\?]+/(?:swf|config)|onsite)/select/))? |theplatform:)(?P<id>[^/\?&]+)''' _EMBED_REGEX = [ r'''(?x) <meta\s+ property=(["'])(?:og:video(?::(?:secure_)?url)?|twitter:player)\1\s+ content=(["'])(?P<url>https?://player\.theplatform\.com/p/.+?)\2''', r'(?s)<(?:iframe|script)[^>]+src=(["\'])(?P<url>(?:https?:)?//player\.theplatform\.com/p/.+?)\1', ] _TESTS = [{ # from http://www.metacafe.com/watch/cb-e9I_cZgTgIPd/blackberrys_big_bold_z30/ 'url': 'http://link.theplatform.com/s/dJ5BDC/e9I_cZgTgIPd/meta.smil?format=smil&Tracking=true&mbr=true', 'info_dict': { 'id': 'e9I_cZgTgIPd', 'ext': 'flv', 'title': 'Blackberry\'s big, bold Z30', 'description': 'The Z30 is Blackberry\'s biggest, baddest mobile messaging device yet.', 'duration': 247, 'timestamp': 1383239700, 'upload_date': '20131031', 'uploader': 'CBSI-NEW', }, 'params': { # rtmp download 'skip_download': True, }, 'skip': '404 Not Found', }, { # from http://www.cnet.com/videos/tesla-model-s-a-second-step-towards-a-cleaner-motoring-future/ 'url': 'http://link.theplatform.com/s/kYEXFC/22d_qsQ6MIRT', 'info_dict': { 'id': '22d_qsQ6MIRT', 'ext': 'flv', 'description': 'md5:ac330c9258c04f9d7512cf26b9595409', 'title': 'Tesla Model S: A second step towards a cleaner motoring future', 'timestamp': 1426176191, 'upload_date': '20150312', 'uploader': 'CBSI-NEW', }, 'params': { # rtmp download 'skip_download': True, }, 'skip': 'CNet no longer uses ThePlatform', }, { 'url': 'https://player.theplatform.com/p/D6x-PC/pulse_preview/embed/select/media/yMBg9E8KFxZD', 'info_dict': { 'id': 'yMBg9E8KFxZD', 'ext': 'mp4', 'description': 'md5:644ad9188d655b742f942bf2e06b002d', 'title': 'HIGHLIGHTS: USA bag first ever series Cup win', 'uploader': 'EGSM', }, 'skip': 'Dead link', }, { 'url': 'http://player.theplatform.com/p/NnzsPC/widget/select/media/4Y0TlYUr_ZT7', 'only_matching': True, }, { 'url': 'http://player.theplatform.com/p/2E2eJC/nbcNewsOffsite?guid=tdy_or_siri_150701', 'md5': 'fb96bb3d85118930a5b055783a3bd992', 'info_dict': { 'id': 'tdy_or_siri_150701', 'ext': 'mp4', 'title': 'iPhone Siri’s sassy response to a math question has people talking', 'description': 'md5:a565d1deadd5086f3331d57298ec6333', 'duration': 83.0, 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1435752600, 'upload_date': '20150701', 'uploader': 'NBCU-NEWS', }, 'skip': 'Error: Player PID "nbcNewsOffsite" is disabled', }, { # From http://www.nbc.com/the-blacklist/video/sir-crispin-crandall/2928790?onid=137781#vc137781=1 # geo-restricted (US), HLS encrypted with AES-128 'url': 'http://player.theplatform.com/p/NnzsPC/onsite_universal/select/media/guid/2410887629/2928790?fwsitesection=nbc_the_blacklist_video_library&autoPlay=true&carouselID=137781', 'only_matching': True, }] @classmethod def _extract_embed_urls(cls, url, webpage): # Are whitespaces ignored in URLs? # https://github.com/ytdl-org/youtube-dl/issues/12044 for embed_url in super()._extract_embed_urls(url, webpage): yield re.sub(r'\s', '', embed_url) @staticmethod def _sign_url(url, sig_key, sig_secret, life=600, include_qs=False): flags = '10' if include_qs else '00' expiration_date = '%x' % (int(time.time()) + life) def str_to_hex(str_data): return str_data.encode('ascii').hex() relative_path = re.match(r'https?://link\.theplatform\.com/s/([^?]+)', url).group(1) clear_text = bytes.fromhex(flags + expiration_date + str_to_hex(relative_path)) checksum = hmac.new(sig_key.encode('ascii'), clear_text, hashlib.sha1).hexdigest() sig = flags + expiration_date + checksum + str_to_hex(sig_secret) return f'{url}&sig={sig}' def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) self._initialize_geo_bypass({ 'countries': smuggled_data.get('geo_countries'), }) mobj = self._match_valid_url(url) provider_id = mobj.group('provider_id') video_id = mobj.group('id') if not provider_id: provider_id = 'dJ5BDC' path = provider_id + '/' if mobj.group('media'): path += mobj.group('media') path += video_id qs_dict = parse_qs(url) if 'guid' in qs_dict: webpage = self._download_webpage(url, video_id) scripts = re.findall(r'<script[^>]+src="([^"]+)"', webpage) feed_id = None # feed id usually locates in the last script. # Seems there's no pattern for the interested script filename, so # I try one by one for script in reversed(scripts): feed_script = self._download_webpage( self._proto_relative_url(script, 'http:'), video_id, 'Downloading feed script') feed_id = self._search_regex( r'defaultFeedId\s*:\s*"([^"]+)"', feed_script, 'default feed id', default=None) if feed_id is not None: break if feed_id is None: raise ExtractorError('Unable to find feed id') return self.url_result('http://feed.theplatform.com/f/{}/{}?byGuid={}'.format( provider_id, feed_id, qs_dict['guid'][0])) if smuggled_data.get('force_smil_url', False): smil_url = url # Explicitly specified SMIL (see https://github.com/ytdl-org/youtube-dl/issues/7385) elif '/guid/' in url: headers = {} source_url = smuggled_data.get('source_url') if source_url: headers['Referer'] = source_url request = Request(url, headers=headers) webpage = self._download_webpage(request, video_id) smil_url = self._search_regex( r'<link[^>]+href=(["\'])(?P<url>.+?)\1[^>]+type=["\']application/smil\+xml', webpage, 'smil url', group='url') path = self._search_regex( r'link\.theplatform\.com/s/((?:[^/?#&]+/)+[^/?#&]+)', smil_url, 'path') smil_url += '?' if '?' not in smil_url else '&' + 'formats=m3u,mpeg4' elif mobj.group('config'): config_url = url + '&form=json' config_url = config_url.replace('swf/', 'config/') config_url = config_url.replace('onsite/', 'onsite/config/') config = self._download_json(config_url, video_id, 'Downloading config') release_url = config.get('releaseUrl') or f'http://link.theplatform.com/s/{path}?mbr=true' smil_url = release_url + '&formats=MPEG4&manifest=f4m' else: smil_url = f'http://link.theplatform.com/s/{path}?mbr=true' sig = smuggled_data.get('sig') if sig: smil_url = self._sign_url(smil_url, sig['key'], sig['secret']) formats, subtitles = self._extract_theplatform_smil(smil_url, video_id) # With some sites, manifest URL must be forced to extract HLS formats if not traverse_obj(formats, lambda _, v: v['format_id'].startswith('hls')): m3u8_url = update_url(url, query='mbr=true&manifest=m3u', fragment=None) urlh = self._request_webpage( HEADRequest(m3u8_url), video_id, 'Checking for HLS formats', 'No HLS formats found', fatal=False) if urlh and urlhandle_detect_ext(urlh) == 'm3u8': m3u8_fmts, m3u8_subs = self._extract_m3u8_formats_and_subtitles( m3u8_url, video_id, m3u8_id='hls', fatal=False) formats.extend(m3u8_fmts) self._merge_subtitles(m3u8_subs, target=subtitles) ret = self._extract_theplatform_metadata(path, video_id) combined_subtitles = self._merge_subtitles(ret.get('subtitles', {}), subtitles) ret.update({ 'id': video_id, 'formats': formats, 'subtitles': combined_subtitles, }) return ret class ThePlatformFeedIE(ThePlatformBaseIE): _URL_TEMPLATE = '%s//feed.theplatform.com/f/%s/%s?form=json&%s' _VALID_URL = r'https?://feed\.theplatform\.com/f/(?P<provider_id>[^/]+)/(?P<feed_id>[^?/]+)\?(?:[^&]+&)*(?P<filter>by(?:Gui|I)d=(?P<id>[^&]+))' _TESTS = [{ # From http://player.theplatform.com/p/7wvmTC/MSNBCEmbeddedOffSite?guid=n_hardball_5biden_140207 'url': 'http://feed.theplatform.com/f/7wvmTC/msnbc_video-p-test?form=json&pretty=true&range=-40&byGuid=n_hardball_5biden_140207', 'md5': '6e32495b5073ab414471b615c5ded394', 'info_dict': { 'id': 'n_hardball_5biden_140207', 'ext': 'mp4', 'title': 'The Biden factor: will Joe run in 2016?', 'description': 'Could Vice President Joe Biden be preparing a 2016 campaign? Mark Halperin and Sam Stein weigh in.', 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20140208', 'timestamp': 1391824260, 'duration': 467.0, 'categories': ['MSNBC/Issues/Democrats', 'MSNBC/Issues/Elections/Election 2016'], 'uploader': 'NBCU-NEWS', }, }, { 'url': 'http://feed.theplatform.com/f/2E2eJC/nnd_NBCNews?byGuid=nn_netcast_180306.Copy.01', 'only_matching': True, }] def _extract_feed_info(self, provider_id, feed_id, filter_query, video_id, custom_fields=None, asset_types_query={}, account_id=None): real_url = self._URL_TEMPLATE % (self.http_scheme(), provider_id, feed_id, filter_query) entry = self._download_json(real_url, video_id)['entries'][0] main_smil_url = 'http://link.theplatform.com/s/%s/media/guid/%d/%s' % (provider_id, account_id, entry['guid']) if account_id else entry.get('plmedia$publicUrl') formats = [] subtitles = {} first_video_id = None duration = None asset_types = [] for item in entry['media$content']: smil_url = item['plfile$url'] cur_video_id = ThePlatformIE._match_id(smil_url) if first_video_id is None: first_video_id = cur_video_id duration = float_or_none(item.get('plfile$duration')) file_asset_types = item.get('plfile$assetTypes') or parse_qs(smil_url)['assetTypes'] for asset_type in file_asset_types: if asset_type in asset_types: continue asset_types.append(asset_type) query = { 'mbr': 'true', 'formats': item['plfile$format'], 'assetTypes': asset_type, } if asset_type in asset_types_query: query.update(asset_types_query[asset_type]) cur_formats, cur_subtitles = self._extract_theplatform_smil(update_url_query( main_smil_url or smil_url, query), video_id, f'Downloading SMIL data for {asset_type}') formats.extend(cur_formats) subtitles = self._merge_subtitles(subtitles, cur_subtitles) thumbnails = [{ 'url': thumbnail['plfile$url'], 'width': int_or_none(thumbnail.get('plfile$width')), 'height': int_or_none(thumbnail.get('plfile$height')), } for thumbnail in entry.get('media$thumbnails', [])] timestamp = int_or_none(entry.get('media$availableDate'), scale=1000) categories = [item['media$name'] for item in entry.get('media$categories', [])] ret = self._extract_theplatform_metadata(f'{provider_id}/{first_video_id}', video_id) subtitles = self._merge_subtitles(subtitles, ret['subtitles']) ret.update({ 'id': video_id, 'formats': formats, 'subtitles': subtitles, 'thumbnails': thumbnails, 'duration': duration, 'timestamp': timestamp, 'categories': categories, }) if custom_fields: ret.update(custom_fields(entry)) return ret def _real_extract(self, url): mobj = self._match_valid_url(url) video_id = mobj.group('id') provider_id = mobj.group('provider_id') feed_id = mobj.group('feed_id') filter_query = mobj.group('filter') return self._extract_feed_info(provider_id, feed_id, filter_query, video_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/formula1.py
yt_dlp/extractor/formula1.py
from .common import InfoExtractor class Formula1IE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?formula1\.com/en/latest/video\.[^.]+\.(?P<id>\d+)\.html' _TEST = { 'url': 'https://www.formula1.com/en/latest/video.race-highlights-spain-2016.6060988138001.html', 'md5': 'be7d3a8c2f804eb2ab2aa5d941c359f8', 'info_dict': { 'id': '6060988138001', 'ext': 'mp4', 'title': 'Race highlights - Spain 2016', 'timestamp': 1463332814, 'upload_date': '20160515', 'uploader_id': '6057949432001', }, 'add_ie': ['BrightcoveNew'], } BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/6057949432001/S1WMrhjlh_default/index.html?videoId=%s' def _real_extract(self, url): bc_id = self._match_id(url) return self.url_result( self.BRIGHTCOVE_URL_TEMPLATE % bc_id, 'BrightcoveNew', bc_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/maariv.py
yt_dlp/extractor/maariv.py
from .common import InfoExtractor from ..utils import ( int_or_none, parse_resolution, unified_timestamp, url_or_none, ) from ..utils.traversal import traverse_obj class MaarivIE(InfoExtractor): IE_NAME = 'maariv.co.il' _VALID_URL = r'https?://player\.maariv\.co\.il/public/player\.html\?(?:[^#]+&)?media=(?P<id>\d+)' _EMBED_REGEX = [rf'<iframe[^>]+\bsrc=[\'"](?P<url>{_VALID_URL})'] _TESTS = [{ 'url': 'https://player.maariv.co.il/public/player.html?player=maariv-desktop&media=3611585', 'info_dict': { 'id': '3611585', 'duration': 75, 'ext': 'mp4', 'upload_date': '20231009', 'title': 'מבצע חרבות ברזל', 'timestamp': 1696851301, }, }] _WEBPAGE_TESTS = [{ 'url': 'https://www.maariv.co.il/news/law/Article-1044008', 'info_dict': { 'id': '3611585', 'duration': 75, 'ext': 'mp4', 'upload_date': '20231009', 'title': 'מבצע חרבות ברזל', 'timestamp': 1696851301, }, }] def _real_extract(self, url): video_id = self._match_id(url) data = self._download_json( f'https://dal.walla.co.il/media/{video_id}?origin=player.maariv.co.il', video_id)['data'] formats = [] if hls_url := traverse_obj(data, ('video', 'url', {url_or_none})): formats.extend(self._extract_m3u8_formats(hls_url, video_id, m3u8_id='hls', fatal=False)) for http_format in traverse_obj(data, ('video', 'stream_urls', ..., 'stream_url', {url_or_none})): formats.append({ 'url': http_format, 'format_id': 'http', **parse_resolution(http_format), }) return { 'id': video_id, **traverse_obj(data, { 'title': 'title', 'duration': ('video', 'duration', {int_or_none}), 'timestamp': ('upload_date', {unified_timestamp}), }), 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/gotostage.py
yt_dlp/extractor/gotostage.py
import json from .common import InfoExtractor from ..utils import try_get, url_or_none class GoToStageIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?gotostage\.com/channel/[a-z0-9]+/recording/(?P<id>[a-z0-9]+)/watch' _TESTS = [{ 'url': 'https://www.gotostage.com/channel/8901680603948959494/recording/60bb55548d434f21b9ce4f0e225c4895/watch', 'md5': 'ca72ce990cdcd7a2bd152f7217e319a2', 'info_dict': { 'id': '60bb55548d434f21b9ce4f0e225c4895', 'ext': 'mp4', 'title': 'What is GoToStage?', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 93.924711, }, }, { 'url': 'https://www.gotostage.com/channel/bacc3d3535b34bafacc3f4ef8d4df78a/recording/831e74cd3e0042be96defba627b6f676/watch?source=HOMEPAGE', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) metadata = self._download_json( f'https://api.gotostage.com/contents?ids={video_id}', video_id, note='Downloading video metadata', errnote='Unable to download video metadata')[0] registration_data = { 'product': metadata['product'], 'resourceType': metadata['contentType'], 'productReferenceKey': metadata['productRefKey'], 'firstName': 'foo', 'lastName': 'bar', 'email': 'foobar@example.com', } registration_response = self._download_json( 'https://api-registrations.logmeininc.com/registrations', video_id, data=json.dumps(registration_data).encode(), expected_status=409, headers={'Content-Type': 'application/json'}, note='Register user', errnote='Unable to register user') content_response = self._download_json( f'https://api.gotostage.com/contents/{video_id}/asset', video_id, headers={'x-registrantkey': registration_response['registrationKey']}, note='Get download url', errnote='Unable to get download url') return { 'id': video_id, 'title': try_get(metadata, lambda x: x['title'], str), 'url': try_get(content_response, lambda x: x['cdnLocation'], str), 'ext': 'mp4', 'thumbnail': url_or_none(try_get(metadata, lambda x: x['thumbnail']['location'])), 'duration': try_get(metadata, lambda x: x['duration'], float), 'categories': [try_get(metadata, lambda x: x['category'], str)], 'is_live': False, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/canalc2.py
yt_dlp/extractor/canalc2.py
import re from .common import InfoExtractor from ..utils import parse_duration class Canalc2IE(InfoExtractor): IE_NAME = 'canalc2.tv' _VALID_URL = r'https?://(?:(?:www\.)?canalc2\.tv/video/|archives-canalc2\.u-strasbg\.fr/video\.asp\?.*\bidVideo=)(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.canalc2.tv/video/12163', 'md5': '060158428b650f896c542dfbb3d6487f', 'info_dict': { 'id': '12163', 'ext': 'mp4', 'title': 'Terrasses du Numérique', 'duration': 122, }, }, { 'url': 'http://archives-canalc2.u-strasbg.fr/video.asp?idVideo=11427&voir=oui', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( f'http://www.canalc2.tv/video/{video_id}', video_id) title = self._html_search_regex( r'(?s)class="[^"]*col_description[^"]*">.*?<h3>(.+?)</h3>', webpage, 'title') formats = [] for _, video_url in re.findall(r'file\s*=\s*(["\'])(.+?)\1', webpage): if video_url.startswith('rtmp://'): rtmp = re.search( r'^(?P<url>rtmp://[^/]+/(?P<app>.+/))(?P<play_path>mp4:.+)$', video_url) formats.append({ 'url': rtmp.group('url'), 'format_id': 'rtmp', 'ext': 'flv', 'app': rtmp.group('app'), 'play_path': rtmp.group('play_path'), 'page_url': url, }) else: formats.append({ 'url': video_url, 'format_id': 'http', }) if formats: info = { 'formats': formats, } else: info = self._parse_html5_media_entries(url, webpage, url)[0] info.update({ 'id': video_id, 'title': title, 'duration': parse_duration(self._search_regex( r'id=["\']video_duree["\'][^>]*>([^<]+)', webpage, 'duration', fatal=False)), }) return info
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/nzonscreen.py
yt_dlp/extractor/nzonscreen.py
from .common import InfoExtractor from ..utils import ( float_or_none, int_or_none, remove_end, strip_or_none, traverse_obj, url_or_none, ) class NZOnScreenIE(InfoExtractor): _VALID_URL = r'https?://www\.nzonscreen\.com/title/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://www.nzonscreen.com/title/shoop-shoop-diddy-wop-cumma-cumma-wang-dang-1982', 'info_dict': { 'id': '726ed6585c6bfb30', 'ext': 'mp4', 'format_id': 'hi', 'display_id': 'shoop-shoop-diddy-wop-cumma-cumma-wang-dang-1982', 'title': 'Monte Video - "Shoop Shoop, Diddy Wop"', 'description': 'Monte Video - "Shoop Shoop, Diddy Wop"', 'alt_title': 'Shoop Shoop Diddy Wop Cumma Cumma Wang Dang | Music Video', 'thumbnail': r're:https://www\.nzonscreen\.com/content/images/.+\.jpg', 'duration': 158, }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.nzonscreen.com/title/shes-a-mod-1964?collection=best-of-the-60s', 'info_dict': { 'id': '3dbe709ff03c36f1', 'ext': 'mp4', 'format_id': 'hi', 'display_id': 'shes-a-mod-1964', 'title': 'Ray Columbus - \'She\'s A Mod\'', 'description': 'Ray Columbus - \'She\'s A Mod\'', 'alt_title': 'She\'s a Mod | Music Video', 'thumbnail': r're:https://www\.nzonscreen\.com/content/images/.+\.jpg', 'duration': 130, }, 'params': {'skip_download': 'm3u8'}, }, { 'url': 'https://www.nzonscreen.com/title/puha-and-pakeha-1968/overview', 'info_dict': { 'id': 'f86342544385ad8a', 'ext': 'mp4', 'format_id': 'hi', 'display_id': 'puha-and-pakeha-1968', 'title': 'Looking At New Zealand - Puha and Pakeha', 'alt_title': 'Looking at New Zealand - \'Pūhā and Pākehā\' | Television', 'description': 'An excerpt from this television programme.', 'duration': 212, 'thumbnail': r're:https://www\.nzonscreen\.com/content/images/.+\.jpg', }, 'params': {'skip_download': 'm3u8'}, }] def _extract_formats(self, playlist): for quality, (id_, url) in enumerate(traverse_obj( playlist, ('h264', {'lo': 'lo_res', 'hi': 'hi_res'}), expected_type=url_or_none).items()): yield { 'url': url, 'format_id': id_, 'ext': 'mp4', 'quality': quality, 'height': int_or_none(playlist.get('height')) if id_ == 'hi' else None, 'width': int_or_none(playlist.get('width')) if id_ == 'hi' else None, 'filesize_approx': float_or_none(traverse_obj(playlist, ('h264', f'{id_}_res_mb')), invscale=1024**2), } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) playlist = self._parse_json(self._html_search_regex( r'data-video-config=\'([^\']+)\'', webpage, 'media data'), video_id) return { 'id': playlist['uuid'], 'display_id': video_id, 'title': strip_or_none(playlist.get('label')), 'description': strip_or_none(playlist.get('description')), 'alt_title': strip_or_none(remove_end( self._html_extract_title(webpage, default=None) or self._og_search_title(webpage), ' | NZ On Screen')), 'thumbnail': traverse_obj(playlist, ('thumbnail', 'path')), 'duration': float_or_none(playlist.get('duration')), 'formats': list(self._extract_formats(playlist)), 'http_headers': { 'Referer': 'https://www.nzonscreen.com/', 'Origin': 'https://www.nzonscreen.com/', }, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/mux.py
yt_dlp/extractor/mux.py
import re from .common import InfoExtractor from ..utils import ( extract_attributes, filter_dict, parse_qs, smuggle_url, unsmuggle_url, update_url_query, ) from ..utils.traversal import traverse_obj class MuxIE(InfoExtractor): _VALID_URL = r'https?://(?:stream\.new/v|player\.mux\.com)/(?P<id>[A-Za-z0-9-]+)' _EMBED_REGEX = [r'<iframe\b[^>]+\bsrc=["\'](?P<url>(?:https?:)?//(?:stream\.new/v|player\.mux\.com)/(?P<id>[A-Za-z0-9-]+)[^"\']+)'] _TESTS = [{ 'url': 'https://stream.new/v/OCtRWZiZqKvLbnZ32WSEYiGNvHdAmB01j/embed', 'info_dict': { 'ext': 'mp4', 'id': 'OCtRWZiZqKvLbnZ32WSEYiGNvHdAmB01j', 'title': 'OCtRWZiZqKvLbnZ32WSEYiGNvHdAmB01j', }, }, { 'url': 'https://player.mux.com/OCtRWZiZqKvLbnZ32WSEYiGNvHdAmB01j', 'info_dict': { 'ext': 'mp4', 'id': 'OCtRWZiZqKvLbnZ32WSEYiGNvHdAmB01j', 'title': 'OCtRWZiZqKvLbnZ32WSEYiGNvHdAmB01j', }, }] _WEBPAGE_TESTS = [{ # iframe embed 'url': 'https://www.redbrickai.com/blog/2025-07-14-FAST-brush', 'info_dict': { 'ext': 'mp4', 'id': 'cXhzAiW1AmsHY01eRbEYFcTEAn0102aGN8sbt8JprP6Dfw', 'title': 'cXhzAiW1AmsHY01eRbEYFcTEAn0102aGN8sbt8JprP6Dfw', }, }, { # mux-player embed 'url': 'https://muxvideo.2coders.com/download/', 'info_dict': { 'ext': 'mp4', 'id': 'JBuasdg35Hw7tYmTe9k68QLPQKixL300YsWHDz5Flit8', 'title': 'JBuasdg35Hw7tYmTe9k68QLPQKixL300YsWHDz5Flit8', }, }, { # mux-player with title metadata 'url': 'https://datastar-todomvc.cross.stream/', 'info_dict': { 'ext': 'mp4', 'id': 'KX01ZSZ8CXv5SVfVwMZKJTcuBcUQmo1ReS9U5JjoHm4k', 'title': 'TodoMVC with Datastar Tutorial', }, }] @classmethod def _extract_embed_urls(cls, url, webpage): yield from super()._extract_embed_urls(url, webpage) for mux_player in re.findall(r'<mux-(?:player|video)\b[^>]*\bplayback-id=[^>]+>', webpage): attrs = extract_attributes(mux_player) playback_id = attrs.get('playback-id') if not playback_id: continue token = attrs.get('playback-token') or traverse_obj(playback_id, ({parse_qs}, 'token', -1)) playback_id = playback_id.partition('?')[0] embed_url = update_url_query( f'https://player.mux.com/{playback_id}', filter_dict({'playback-token': token})) if title := attrs.get('metadata-video-title'): embed_url = smuggle_url(embed_url, {'title': title}) yield embed_url def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) video_id = self._match_id(url) token = traverse_obj(parse_qs(url), ('playback-token', -1)) formats, subtitles = self._extract_m3u8_formats_and_subtitles( f'https://stream.mux.com/{video_id}.m3u8', video_id, 'mp4', query=filter_dict({'token': token})) return { 'id': video_id, 'title': smuggled_data.get('title') or video_id, 'formats': formats, 'subtitles': subtitles, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/msn.py
yt_dlp/extractor/msn.py
from .common import InfoExtractor from ..utils import ( ExtractorError, clean_html, determine_ext, int_or_none, parse_iso8601, url_or_none, ) from ..utils.traversal import traverse_obj class MSNIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:www|preview)\.)?msn\.com/(?P<locale>[a-z]{2}-[a-z]{2})/(?:[^/?#]+/)+(?P<display_id>[^/?#]+)/[a-z]{2}-(?P<id>[\da-zA-Z]+)' _TESTS = [{ 'url': 'https://www.msn.com/en-gb/video/news/president-macron-interrupts-trump-over-ukraine-funding/vi-AA1zMcD7', 'info_dict': { 'id': 'AA1zMcD7', 'ext': 'mp4', 'display_id': 'president-macron-interrupts-trump-over-ukraine-funding', 'title': 'President Macron interrupts Trump over Ukraine funding', 'description': 'md5:5fd3857ac25849e7a56cb25fbe1a2a8b', 'uploader': 'k! News UK', 'uploader_id': 'BB1hz5Rj', 'duration': 59, 'thumbnail': 'https://img-s-msn-com.akamaized.net/tenant/amp/entityid/AA1zMagX.img', 'tags': 'count:14', 'timestamp': 1740510914, 'upload_date': '20250225', 'release_timestamp': 1740513600, 'release_date': '20250225', 'modified_timestamp': 1741413241, 'modified_date': '20250308', }, }, { 'url': 'https://www.msn.com/en-gb/video/watch/films-success-saved-adam-pearsons-acting-career/vi-AA1znZGE?ocid=hpmsn', 'info_dict': { 'id': 'AA1znZGE', 'ext': 'mp4', 'display_id': 'films-success-saved-adam-pearsons-acting-career', 'title': "Films' success saved Adam Pearson's acting career", 'description': 'md5:98c05f7bd9ab4f9c423400f62f2d3da5', 'uploader': 'Sky News', 'uploader_id': 'AA2eki', 'duration': 52, 'thumbnail': 'https://img-s-msn-com.akamaized.net/tenant/amp/entityid/AA1zo7nU.img', 'timestamp': 1739993965, 'upload_date': '20250219', 'release_timestamp': 1739977753, 'release_date': '20250219', 'modified_timestamp': 1742076259, 'modified_date': '20250315', }, }, { 'url': 'https://www.msn.com/en-us/entertainment/news/rock-frontman-replacements-you-might-not-know-happened/vi-AA1yLVcD', 'info_dict': { 'id': 'AA1yLVcD', 'ext': 'mp4', 'display_id': 'rock-frontman-replacements-you-might-not-know-happened', 'title': 'Rock Frontman Replacements You Might Not Know Happened', 'description': 'md5:451a125496ff0c9f6816055bb1808da9', 'uploader': 'Grunge (Video)', 'uploader_id': 'BB1oveoV', 'duration': 596, 'thumbnail': 'https://img-s-msn-com.akamaized.net/tenant/amp/entityid/AA1yM4OJ.img', 'timestamp': 1739223456, 'upload_date': '20250210', 'release_timestamp': 1739219731, 'release_date': '20250210', 'modified_timestamp': 1741427272, 'modified_date': '20250308', }, }, { # Dailymotion Embed 'url': 'https://www.msn.com/de-de/nachrichten/other/the-first-descendant-gameplay-trailer-zu-serena-der-neuen-gefl%C3%BCgelten-nachfahrin/vi-AA1B1d06', 'info_dict': { 'id': 'x9g6oli', 'ext': 'mp4', 'title': 'The First Descendant: Gameplay-Trailer zu Serena, der neuen geflügelten Nachfahrin', 'description': '', 'uploader': 'MeinMMO', 'uploader_id': 'x2mvqi4', 'view_count': int, 'like_count': int, 'age_limit': 0, 'duration': 60, 'thumbnail': 'https://s1.dmcdn.net/v/Y3fO61drj56vPB9SS/x1080', 'tags': ['MeinMMO', 'The First Descendant'], 'timestamp': 1742124877, 'upload_date': '20250316', }, }, { # Youtube Embed 'url': 'https://www.msn.com/en-gb/video/webcontent/web-content/vi-AA1ybFaJ', 'info_dict': { 'id': 'kQSChWu95nE', 'ext': 'mp4', 'title': '7 Daily Habits to Nurture Your Personal Growth', 'description': 'md5:6f233c68341b74dee30c8c121924e827', 'uploader': 'TopThink', 'uploader_id': '@TopThink', 'uploader_url': 'https://www.youtube.com/@TopThink', 'channel': 'TopThink', 'channel_id': 'UCMlGmHokrQRp-RaNO7aq4Uw', 'channel_url': 'https://www.youtube.com/channel/UCMlGmHokrQRp-RaNO7aq4Uw', 'channel_is_verified': True, 'channel_follower_count': int, 'comment_count': int, 'view_count': int, 'like_count': int, 'age_limit': 0, 'duration': 705, 'thumbnail': 'https://i.ytimg.com/vi/kQSChWu95nE/maxresdefault.jpg', 'categories': ['Howto & Style'], 'tags': ['topthink', 'top think', 'personal growth'], 'timestamp': 1722711620, 'upload_date': '20240803', 'playable_in_embed': True, 'availability': 'public', 'live_status': 'not_live', }, }, { # Article with social embed 'url': 'https://www.msn.com/en-in/news/techandscience/watch-earth-sets-and-rises-behind-moon-in-breathtaking-blue-ghost-video/ar-AA1zKoAc', 'info_dict': { 'id': 'AA1zKoAc', 'title': 'Watch: Earth sets and rises behind Moon in breathtaking Blue Ghost video', 'description': 'md5:0ad51cfa77e42e7f0c46cf98a619dbbf', 'uploader': 'India Today', 'uploader_id': 'AAyFWG', 'tags': 'count:11', 'timestamp': 1740485034, 'upload_date': '20250225', 'release_timestamp': 1740484875, 'release_date': '20250225', 'modified_timestamp': 1740488561, 'modified_date': '20250225', }, 'playlist_count': 1, }] def _real_extract(self, url): locale, display_id, page_id = self._match_valid_url(url).group('locale', 'display_id', 'id') json_data = self._download_json( f'https://assets.msn.com/content/view/v2/Detail/{locale}/{page_id}', page_id) common_metadata = traverse_obj(json_data, { 'title': ('title', {str}), 'description': (('abstract', ('body', {clean_html})), {str}, filter, any), 'timestamp': ('createdDateTime', {parse_iso8601}), 'release_timestamp': ('publishedDateTime', {parse_iso8601}), 'modified_timestamp': ('updatedDateTime', {parse_iso8601}), 'thumbnail': ('thumbnail', 'image', 'url', {url_or_none}), 'duration': ('videoMetadata', 'playTime', {int_or_none}), 'tags': ('keywords', ..., {str}), 'uploader': ('provider', 'name', {str}), 'uploader_id': ('provider', 'id', {str}), }) page_type = json_data['type'] source_url = traverse_obj(json_data, ('sourceHref', {url_or_none})) if page_type == 'video': if traverse_obj(json_data, ('thirdPartyVideoPlayer', 'enabled')) and source_url: return self.url_result(source_url) formats = [] subtitles = {} for file in traverse_obj(json_data, ('videoMetadata', 'externalVideoFiles', lambda _, v: url_or_none(v['url']))): file_url = file['url'] ext = determine_ext(file_url) if ext == 'm3u8': fmts, subs = self._extract_m3u8_formats_and_subtitles( file_url, page_id, 'mp4', m3u8_id='hls', fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) elif ext == 'mpd': fmts, subs = self._extract_mpd_formats_and_subtitles( file_url, page_id, mpd_id='dash', fatal=False) formats.extend(fmts) self._merge_subtitles(subs, target=subtitles) else: formats.append( traverse_obj(file, { 'url': 'url', 'format_id': ('format', {str}), 'filesize': ('fileSize', {int_or_none}), 'height': ('height', {int_or_none}), 'width': ('width', {int_or_none}), })) for caption in traverse_obj(json_data, ('videoMetadata', 'closedCaptions', lambda _, v: url_or_none(v['href']))): lang = caption.get('locale') or 'en-us' subtitles.setdefault(lang, []).append({ 'url': caption['href'], 'ext': 'ttml', }) return { 'id': page_id, 'display_id': display_id, 'formats': formats, 'subtitles': subtitles, **common_metadata, } elif page_type == 'webcontent': if not source_url: raise ExtractorError('Could not find source URL') return self.url_result(source_url) elif page_type == 'article': entries = [] for embed_url in traverse_obj(json_data, ('socialEmbeds', ..., 'postUrl', {url_or_none})): entries.append(self.url_result(embed_url)) return self.playlist_result(entries, page_id, **common_metadata) raise ExtractorError(f'Unsupported page type: {page_type}')
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/tv2hu.py
yt_dlp/extractor/tv2hu.py
from .common import InfoExtractor from ..utils import ( UnsupportedError, traverse_obj, ) class TV2HuIE(InfoExtractor): IE_NAME = 'tv2play.hu' _VALID_URL = r'https?://(?:www\.)?tv2play\.hu/(?!szalag/)(?P<id>[^#&?]+)' _TESTS = [{ 'url': 'https://tv2play.hu/mintaapak/mintaapak_213_epizod_resz', 'info_dict': { 'id': '249240', 'ext': 'mp4', 'title': 'Mintaapák - 213. epizód', 'series': 'Mintaapák', 'duration': 2164, 'description': 'md5:7350147e75485a59598e806c47967b07', 'thumbnail': r're:^https?://.*\.jpg$', 'release_date': '20210825', 'episode_number': 213, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://tv2play.hu/taxi_2', 'md5': '585e58e2e090f34603804bb2c48e98d8', 'info_dict': { 'id': '199363', 'ext': 'mp4', 'title': 'Taxi 2', 'series': 'Taxi 2', 'duration': 5087, 'description': 'md5:47762155dc9a50241797ded101b1b08c', 'thumbnail': r're:^https?://.*\.jpg$', 'release_date': '20210118', }, 'params': { 'skip_download': True, }, }] def _real_extract(self, url): video_id = self._match_id(url) json_data = self._download_json(f'https://tv2play.hu/api/search/{video_id}', video_id) if json_data['contentType'] == 'showpage': ribbon_ids = traverse_obj(json_data, ('pages', ..., 'tabs', ..., 'ribbonIds'), get_all=False, expected_type=list) entries = [self.url_result(f'https://tv2play.hu/szalag/{ribbon_id}', ie=TV2HuSeriesIE.ie_key(), video_id=ribbon_id) for ribbon_id in ribbon_ids] return self.playlist_result(entries, playlist_id=video_id) elif json_data['contentType'] != 'video': raise UnsupportedError(url) video_id = str(json_data['id']) player_id = json_data.get('playerId') series_json = json_data.get('seriesInfo', {}) video_json_url = self._download_json(f'https://tv2play.hu/api/streaming-url?playerId={player_id}', video_id)['url'] video_json = self._download_json(video_json_url, video_id) m3u8_url = self._proto_relative_url(traverse_obj(video_json, ('bitrates', 'hls'))) formats, subtitles = self._extract_m3u8_formats_and_subtitles(m3u8_url, video_id) return { 'id': video_id, 'title': json_data['title'], 'series': json_data.get('seriesTitle'), 'duration': json_data.get('length'), 'description': json_data.get('description'), 'thumbnail': 'https://tv2play.hu' + json_data.get('thumbnailUrl'), 'release_date': json_data.get('uploadedAt').replace('.', ''), 'season_number': series_json.get('seasonNr'), 'episode_number': series_json.get('episodeNr'), 'formats': formats, 'subtitles': subtitles, } class TV2HuSeriesIE(InfoExtractor): IE_NAME = 'tv2playseries.hu' _VALID_URL = r'https?://(?:www\.)?tv2play\.hu/szalag/(?P<id>[^#&?]+)' _TESTS = [{ 'url': 'https://tv2play.hu/szalag/59?rendezes=nepszeruseg', 'playlist_mincount': 284, 'info_dict': { 'id': '59', }, }] def _real_extract(self, url): playlist_id = self._match_id(url) json_data = self._download_json(f'https://tv2play.hu/api/ribbons/{playlist_id}/0?size=100000', playlist_id) entries = [] for card in json_data.get('cards', []): video_id = card.get('slug') if video_id: entries.append(self.url_result( f'https://tv2play.hu/{video_id}', TV2HuIE, video_id)) return self.playlist_result(entries, playlist_id=playlist_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/mainstreaming.py
yt_dlp/extractor/mainstreaming.py
import re from .common import InfoExtractor from ..utils import ( int_or_none, js_to_json, parse_duration, traverse_obj, try_get, urljoin, ) class MainStreamingIE(InfoExtractor): _VALID_URL = r'https?://(?:webtools-?)?(?P<host>[A-Za-z0-9-]*\.msvdn\.net)/(?:embed|amp_embed|content)/(?P<id>\w+)' _EMBED_REGEX = [rf'<iframe[^>]+?src=["\']?(?P<url>{_VALID_URL})["\']?'] IE_DESC = 'MainStreaming Player' _TESTS = [{ # Live stream offline, has alternative content id 'url': 'https://webtools-e18da6642b684f8aa9ae449862783a56.msvdn.net/embed/53EN6GxbWaJC', 'info_dict': { 'id': '53EN6GxbWaJC', 'title': 'Diretta homepage 2021-12-31 12:00', 'description': '', 'live_status': 'was_live', 'ext': 'mp4', 'thumbnail': r're:https?://[\w-]+\.msvdn\.net/image/\w+/poster', }, 'expected_warnings': [ 'Ignoring alternative content ID: WDAF1KOWUpH3', 'MainStreaming said: Live event is OFFLINE', ], 'skip': 'live stream offline', }, { # playlist 'url': 'https://webtools-e18da6642b684f8aa9ae449862783a56.msvdn.net/embed/WDAF1KOWUpH3', 'info_dict': { 'id': 'WDAF1KOWUpH3', 'title': 'Playlist homepage', }, 'playlist_mincount': 2, }, { # livestream 'url': 'https://webtools-859c1818ed614cc5b0047439470927b0.msvdn.net/embed/tDoFkZD3T1Lw', 'info_dict': { 'id': 'tDoFkZD3T1Lw', 'title': str, 'live_status': 'is_live', 'ext': 'mp4', 'thumbnail': r're:https?://[\w-]+\.msvdn\.net/image/\w+/poster', }, 'skip': 'live stream', }, { 'url': 'https://webtools-f5842579ff984c1c98d63b8d789673eb.msvdn.net/embed/EUlZfGWkGpOd?autoPlay=false', 'info_dict': { 'id': 'EUlZfGWkGpOd', 'title': 'La Settimana ', 'description': '03 Ottobre ore 02:00', 'ext': 'mp4', 'live_status': 'not_live', 'thumbnail': r're:https?://[\w-]+\.msvdn\.net/image/\w+/poster', 'duration': 1512, }, 'skip': 'Invalid URL', }, { # video without webtools- prefix 'url': 'https://f5842579ff984c1c98d63b8d789673eb.msvdn.net/embed/MfuWmzL2lGkA?autoplay=false&T=1635860445', 'info_dict': { 'id': 'MfuWmzL2lGkA', 'title': 'TG Mattina', 'description': '06 Ottobre ore 08:00', 'ext': 'mp4', 'live_status': 'not_live', 'thumbnail': r're:https?://[\w-]+\.msvdn\.net/image/\w+/poster', 'duration': 789.04, }, 'skip': 'Invalid URL', }, { # always-on livestream with DVR 'url': 'https://webtools-f5842579ff984c1c98d63b8d789673eb.msvdn.net/embed/HVvPMzy', 'info_dict': { 'id': 'HVvPMzy', 'title': str, 'description': 'canale all news', 'live_status': 'is_live', 'ext': 'mp4', 'thumbnail': r're:https?://[\w-]+\.msvdn\.net/image/\w+/poster', }, 'params': {'skip_download': 'm3u8'}, }, { # no host 'url': 'https://webtools.msvdn.net/embed/MfuWmzL2lGkA', 'only_matching': True, }, { 'url': 'https://859c1818ed614cc5b0047439470927b0.msvdn.net/amp_embed/tDoFkZD3T1Lw', 'only_matching': True, }, { 'url': 'https://859c1818ed614cc5b0047439470927b0.msvdn.net/content/tDoFkZD3T1Lw#', 'only_matching': True, }] _WEBPAGE_TESTS = [{ # FIXME: Embed detection 'url': 'https://www.lacplay.it/video/in-evidenza_728/lac-storie-p-250-i-santi-pietro-e-paolo_77297/', 'info_dict': { 'id': 'u7kiX5DUaHYr', 'ext': 'mp4', 'title': 'I Santi Pietro e Paolo', 'description': 'md5:ff6be24916ba6b9ae990bf5f3df4911e', 'duration': 1700.0, 'thumbnail': r're:https?://.+', 'tags': '06/07/2025', 'live_status': 'not_live', }, }] def _playlist_entries(self, host, playlist_content): for entry in playlist_content: content_id = entry.get('contentID') yield { '_type': 'url', 'ie_key': MainStreamingIE.ie_key(), 'id': content_id, 'duration': int_or_none(traverse_obj(entry, ('duration', 'totalSeconds'))), 'title': entry.get('title'), 'url': f'https://{host}/embed/{content_id}', } @staticmethod def _get_webtools_host(host): if not host.startswith('webtools'): host = 'webtools' + ('-' if not host.startswith('.') else '') + host return host def _get_webtools_base_url(self, host): return f'{self.http_scheme()}//{self._get_webtools_host(host)}' def _call_api(self, host: str, path: str, item_id: str, query=None, note='Downloading API JSON', fatal=False): # JSON API, does not appear to be documented return self._call_webtools_api(host, '/api/v2/' + path, item_id, query, note, fatal) def _call_webtools_api(self, host: str, path: str, item_id: str, query=None, note='Downloading webtools API JSON', fatal=False): # webtools docs: https://webtools.msvdn.net/ return self._download_json( urljoin(self._get_webtools_base_url(host), path), item_id, query=query, note=note, fatal=fatal) def _real_extract(self, url): host, video_id = self._match_valid_url(url).groups() content_info = try_get( self._call_api( host, f'content/{video_id}', video_id, note='Downloading content info API JSON'), lambda x: x['playerContentInfo']) # Fallback if not content_info: webpage = self._download_webpage(url, video_id) player_config = self._parse_json( self._search_regex( r'config\s*=\s*({.+?})\s*;', webpage, 'mainstreaming player config', default='{}', flags=re.DOTALL), video_id, transform_source=js_to_json, fatal=False) or {} content_info = player_config['contentInfo'] host = content_info.get('host') or host video_id = content_info.get('contentID') or video_id title = content_info.get('title') description = traverse_obj(content_info, 'longDescription', 'shortDescription', expected_type=str) live_status = 'not_live' if content_info.get('drmEnabled'): self.report_drm(video_id) alternative_content_id = content_info.get('alternativeContentID') if alternative_content_id: self.report_warning(f'Ignoring alternative content ID: {alternative_content_id}') content_type = int_or_none(content_info.get('contentType')) format_base_url = None formats = [] subtitles = {} # Live content if content_type == 20: dvr_enabled = traverse_obj(content_info, ('playerSettings', 'dvrEnabled'), expected_type=bool) format_base_url = f"https://{host}/live/{content_info['liveSourceID']}/{video_id}/%s{'?DVR' if dvr_enabled else ''}" live_status = 'is_live' heartbeat = self._call_api(host, f'heartbeat/{video_id}', video_id, note='Checking stream status') or {} if heartbeat.get('heartBeatUp') is False: self.raise_no_formats(f'MainStreaming said: {heartbeat.get("responseMessage")}', expected=True) live_status = 'was_live' # Playlist elif content_type == 31: return self.playlist_result( self._playlist_entries(host, content_info.get('playlistContents')), video_id, title, description) # Normal video content? elif content_type == 10: format_base_url = f'https://{host}/vod/{video_id}/%s' # Progressive format # Note: in https://webtools.msvdn.net/loader/playerV2.js there is mention of original.mp3 format, # however it seems to be the same as original.mp4? formats.append({'url': format_base_url % 'original.mp4', 'format_note': 'original', 'quality': 1}) else: self.raise_no_formats(f'Unknown content type {content_type}') if format_base_url: m3u8_formats, m3u8_subs = self._extract_m3u8_formats_and_subtitles( format_base_url % 'playlist.m3u8', video_id=video_id, fatal=False) mpd_formats, mpd_subs = self._extract_mpd_formats_and_subtitles( format_base_url % 'manifest.mpd', video_id=video_id, fatal=False) subtitles = self._merge_subtitles(m3u8_subs, mpd_subs) formats.extend(m3u8_formats + mpd_formats) return { 'id': video_id, 'title': title, 'description': description, 'formats': formats, 'live_status': live_status, 'duration': parse_duration(content_info.get('duration')), 'tags': content_info.get('tags'), 'subtitles': subtitles, 'thumbnail': urljoin(self._get_webtools_base_url(host), f'image/{video_id}/poster'), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/bpb.py
yt_dlp/extractor/bpb.py
import re from .common import InfoExtractor from ..utils import ( clean_html, extract_attributes, join_nonempty, js_to_json, mimetype2ext, parse_resolution, unified_strdate, url_or_none, urljoin, ) from ..utils.traversal import ( find_element, traverse_obj, ) class BpbIE(InfoExtractor): IE_DESC = 'Bundeszentrale für politische Bildung' _VALID_URL = r'https?://(?:www\.|m\.)?bpb\.de/(?:[^/?#]+/)*(?P<id>\d+)(?:[/?#]|$)' _TESTS = [{ 'url': 'http://www.bpb.de/mediathek/297/joachim-gauck-zu-1989-und-die-erinnerung-an-die-ddr', 'info_dict': { 'id': '297', 'ext': 'mp4', 'creators': ['Kooperative Berlin'], 'description': r're:Joachim Gauck, .*\n\nKamera: .*', 'release_date': '20150716', 'series': 'Interview auf dem Geschichtsforum 1989 | 2009', 'tags': [], 'thumbnail': r're:https?://www\.bpb\.de/cache/images/7/297_teaser_16x9_1240\.jpg.*', 'title': 'Joachim Gauck zu 1989 und die Erinnerung an die DDR', 'uploader': 'Bundeszentrale für politische Bildung', }, }, { 'url': 'https://www.bpb.de/mediathek/video/522184/krieg-flucht-und-falschmeldungen-wirstattdesinformation-2/', 'info_dict': { 'id': '522184', 'ext': 'mp4', 'creators': ['Institute for Strategic Dialogue Germany gGmbH (ISD)'], 'description': 'md5:f83c795ff8f825a69456a9e51fc15903', 'release_date': '20230621', 'series': 'Narrative über den Krieg Russlands gegen die Ukraine (NUK)', 'tags': [], 'thumbnail': r're:https://www\.bpb\.de/cache/images/4/522184_teaser_16x9_1240\.png.*', 'title': 'md5:9b01ccdbf58dbf9e5c9f6e771a803b1c', 'uploader': 'Bundeszentrale für politische Bildung', }, }, { 'url': 'https://www.bpb.de/lernen/bewegtbild-und-politische-bildung/webvideo/518789/krieg-flucht-und-falschmeldungen-wirstattdesinformation-1/', 'info_dict': { 'id': '518789', 'ext': 'mp4', 'creators': ['Institute for Strategic Dialogue Germany gGmbH (ISD)'], 'description': 'md5:85228aed433e84ff0ff9bc582abd4ea8', 'release_date': '20230302', 'series': 'Narrative über den Krieg Russlands gegen die Ukraine (NUK)', 'tags': [], 'thumbnail': r're:https://www\.bpb\.de/cache/images/9/518789_teaser_16x9_1240\.jpeg.*', 'title': 'md5:3e956f264bb501f6383f10495a401da4', 'uploader': 'Bundeszentrale für politische Bildung', }, }, { 'url': 'https://www.bpb.de/mediathek/podcasts/apuz-podcast/539727/apuz-20-china/', 'only_matching': True, }, { 'url': 'https://www.bpb.de/mediathek/audio/315813/folge-1-eine-einfuehrung/', 'info_dict': { 'id': '315813', 'ext': 'mp3', 'creators': ['Axel Schröder'], 'description': 'md5:eda9d1af34e5912efef5baf54fba4427', 'release_date': '20200921', 'series': 'Auf Endlagersuche. Der deutsche Weg zu einem sicheren Atommülllager', 'tags': ['Atomenergie', 'Endlager', 'hoch-radioaktiver Abfall', 'Endlagersuche', 'Atommüll', 'Atomendlager', 'Gorleben', 'Deutschland'], 'thumbnail': r're:https://www\.bpb\.de/cache/images/3/315813_teaser_16x9_1240\.png.*', 'title': 'Folge 1: Eine Einführung', 'uploader': 'Bundeszentrale für politische Bildung', }, }, { 'url': 'https://www.bpb.de/517806/die-weltanschauung-der-neuen-rechten/', 'info_dict': { 'id': '517806', 'ext': 'mp3', 'creators': ['Bundeszentrale für politische Bildung'], 'description': 'md5:594689600e919912aade0b2871cc3fed', 'release_date': '20230127', 'series': 'Vorträge des Fachtags "Modernisierer. Grenzgänger. Anstifter. Sechs Jahrzehnte \'Neue Rechte\'"', 'tags': ['Rechtsextremismus', 'Konservatismus', 'Konservativismus', 'neue Rechte', 'Rechtspopulismus', 'Schnellroda', 'Deutschland'], 'thumbnail': r're:https://www\.bpb\.de/cache/images/6/517806_teaser_16x9_1240\.png.*', 'title': 'Die Weltanschauung der "Neuen Rechten"', 'uploader': 'Bundeszentrale für politische Bildung', }, }, { 'url': 'https://www.bpb.de/mediathek/reihen/zahlen-und-fakten-soziale-situation-filme/520153/zahlen-und-fakten-die-soziale-situation-in-deutschland-migration/', 'only_matching': True, }] _TITLE_RE = re.compile('(?P<title>[^<]*)<[^>]+>(?P<series>[^<]*)') def _parse_vue_attributes(self, name, string, video_id): attributes = extract_attributes(self._search_regex(rf'(<{name}(?:"[^"]*?"|[^>])*>)', string, name)) for key, value in attributes.items(): if key.startswith(':'): attributes[key] = self._parse_json(value, video_id, transform_source=js_to_json, fatal=False) return attributes def _process_source(self, source): url = url_or_none(source['src']) if not url: return None source_type = source.get('type', '') extension = mimetype2ext(source_type) note = self._search_regex(r'[_-]([a-z]+)\.[\da-z]+(?:$|\?)', url, 'note', default=None) return { 'url': url, 'ext': extension, 'vcodec': None if source_type.startswith('video') else 'none', 'quality': 10 if note == 'high' else 0, 'format_note': note, 'format_id': join_nonempty(extension, note), **parse_resolution(source.get('label')), } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title_result = traverse_obj(webpage, ({find_element(cls='opening-header__title')}, {self._TITLE_RE.match})) json_lds = list(self._yield_json_ld(webpage, video_id, fatal=False)) return { 'id': video_id, 'title': traverse_obj(title_result, ('title', {str.strip})) or None, # This metadata could be interpreted otherwise, but it fits "series" the most 'series': traverse_obj(title_result, ('series', {str.strip})) or None, 'description': join_nonempty(*traverse_obj(webpage, [( {find_element(cls='opening-intro')}, [{find_element(tag='bpb-accordion-item')}, {find_element(cls='text-content')}], ), {clean_html}]), delim='\n\n') or None, 'creators': traverse_obj(self._html_search_meta('author', webpage), all), 'uploader': self._html_search_meta('publisher', webpage), 'release_date': unified_strdate(self._html_search_meta('date', webpage)), 'tags': traverse_obj(json_lds, (..., 'keywords', {lambda x: x.split(',')}, ...)), **traverse_obj(self._parse_vue_attributes('bpb-player', webpage, video_id), { 'formats': (':sources', ..., {self._process_source}), 'thumbnail': ('poster', {urljoin(url)}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/ettutv.py
yt_dlp/extractor/ettutv.py
from .common import InfoExtractor from ..utils import bool_or_none, traverse_obj, unified_timestamp, url_or_none class EttuTvIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?ettu\.tv/[^?#]+/playerpage/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'https://www.ettu.tv/en-int/playerpage/1573849', 'md5': '5874b7639a2aa866d1f6c3a4037c7c09', 'info_dict': { 'id': '1573849', 'title': 'Ni Xia Lian - Shao Jieni', 'description': 'ITTF Europe Top 16 Cup', 'timestamp': 1677348600, 'upload_date': '20230225', 'thumbnail': r're:^https?://.*\.(?:jpg|png)', 'ext': 'mp4', }, }, { 'url': 'https://www.ettu.tv/en-int/playerpage/1573753', 'md5': '1fc094bf96cf2d5ec0f434d3a6dec9aa', 'info_dict': { 'id': '1573753', 'title': 'Qiu Dang - Jorgic Darko', 'description': 'ITTF Europe Top 16 Cup', 'timestamp': 1677423600, 'upload_date': '20230226', 'thumbnail': r're:^https?://.*\.(?:jpg|png)', 'ext': 'mp4', }, }] def _real_extract(self, url): video_id = self._match_id(url) player_settings = self._download_json( f'https://www.ettu.tv/api/v3/contents/{video_id}/player-settings', video_id, query={ 'language': 'en', 'showTitle': 'true', 'device': 'desktop', }) stream_response = self._download_json(player_settings['streamAccess'], video_id, data=b'') formats, subtitles = self._extract_m3u8_formats_and_subtitles( stream_response['data']['stream'], video_id, 'mp4') return { 'id': video_id, 'formats': formats, 'subtitles': subtitles, **traverse_obj(player_settings, { 'title': 'title', 'description': ('metaInformation', 'competition'), 'thumbnail': ('image', {url_or_none}), 'timestamp': ('date', {unified_timestamp}), 'is_live': ('isLivestream', {bool_or_none}), }), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/rtl2.py
yt_dlp/extractor/rtl2.py
import re from .common import InfoExtractor from ..utils import int_or_none class RTL2IE(InfoExtractor): IE_NAME = 'rtl2' _VALID_URL = r'https?://(?:www\.)?rtl2\.de/sendung/[^/]+/(?:video/(?P<vico_id>\d+)[^/]+/(?P<vivi_id>\d+)-|folge/)(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'http://www.rtl2.de/sendung/grip-das-motormagazin/folge/folge-203-0', 'info_dict': { 'id': 'folge-203-0', 'ext': 'f4v', 'title': 'GRIP sucht den Sommerkönig', 'description': 'md5:e3adbb940fd3c6e76fa341b8748b562f', }, 'params': { # rtmp download 'skip_download': True, }, 'expected_warnings': ['Unable to download f4m manifest', 'Failed to download m3u8 information'], }, { 'url': 'http://www.rtl2.de/sendung/koeln-50667/video/5512-anna/21040-anna-erwischt-alex/', 'info_dict': { 'id': 'anna-erwischt-alex', 'ext': 'mp4', 'title': 'Anna erwischt Alex!', 'description': 'Anna nimmt ihrem Vater nicht ab, dass er nicht spielt. Und tatsächlich erwischt sie ihn auf frischer Tat.', }, 'params': { # rtmp download 'skip_download': True, }, 'expected_warnings': ['Unable to download f4m manifest', 'Failed to download m3u8 information'], }] def _real_extract(self, url): vico_id, vivi_id, display_id = self._match_valid_url(url).groups() if not vico_id: webpage = self._download_webpage(url, display_id) mobj = re.search( r'data-collection="(?P<vico_id>\d+)"[^>]+data-video="(?P<vivi_id>\d+)"', webpage) if mobj: vico_id = mobj.group('vico_id') vivi_id = mobj.group('vivi_id') else: vico_id = self._html_search_regex( r'vico_id\s*:\s*([0-9]+)', webpage, 'vico_id') vivi_id = self._html_search_regex( r'vivi_id\s*:\s*([0-9]+)', webpage, 'vivi_id') info = self._download_json( 'https://service.rtl2.de/api-player-vipo/video.php', display_id, query={ 'vico_id': vico_id, 'vivi_id': vivi_id, }) video_info = info['video'] title = video_info['titel'] formats = [] rtmp_url = video_info.get('streamurl') if rtmp_url: rtmp_url = rtmp_url.replace('\\', '') stream_url = 'mp4:' + self._html_search_regex(r'/ondemand/(.+)', rtmp_url, 'stream URL') rtmp_conn = ['S:connect', 'O:1', 'NS:pageUrl:' + url, 'NB:fpad:0', 'NN:videoFunction:1', 'O:0'] formats.append({ 'format_id': 'rtmp', 'url': rtmp_url, 'play_path': stream_url, 'player_url': 'https://www.rtl2.de/sites/default/modules/rtl2/jwplayer/jwplayer-7.6.0/jwplayer.flash.swf', 'page_url': url, 'flash_version': 'LNX 11,2,202,429', 'rtmp_conn': rtmp_conn, 'no_resume': True, 'quality': 1, }) m3u8_url = video_info.get('streamurl_hls') if m3u8_url: formats.extend(self._extract_akamai_formats(m3u8_url, display_id)) return { 'id': display_id, 'title': title, 'thumbnail': video_info.get('image'), 'description': video_info.get('beschreibung'), 'duration': int_or_none(video_info.get('duration')), 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/agora.py
yt_dlp/extractor/agora.py
import functools import uuid from .common import InfoExtractor from ..utils import ( ExtractorError, OnDemandPagedList, int_or_none, month_by_name, parse_duration, try_call, ) class WyborczaVideoIE(InfoExtractor): # this id is not an article id, it has to be extracted from the article _VALID_URL = r'(?:wyborcza:video:|https?://wyborcza\.pl/(?:api-)?video/)(?P<id>\d+)' IE_NAME = 'wyborcza:video' _TESTS = [{ 'url': 'wyborcza:video:26207634', 'info_dict': { 'id': '26207634', 'ext': 'mp4', 'title': '- Polska w 2020 r. jest innym państwem niż w 2015 r. Nie zmieniła się konstytucja, ale jest to już inny ustrój - mówi Adam Bodnar', 'description': ' ', 'uploader': 'Dorota Roman', 'duration': 2474, 'thumbnail': r're:https://.+\.jpg', }, }, { 'url': 'https://wyborcza.pl/video/26207634', 'only_matching': True, }, { 'url': 'https://wyborcza.pl/api-video/26207634', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) meta = self._download_json(f'https://wyborcza.pl/api-video/{video_id}', video_id) formats = [] base_url = meta['redirector'].replace('http://', 'https://') + meta['basePath'] for quality in ('standard', 'high'): if not meta['files'].get(quality): continue formats.append({ 'url': base_url + meta['files'][quality], 'height': int_or_none( self._search_regex( r'p(\d+)[a-z]+\.mp4$', meta['files'][quality], 'mp4 video height', default=None)), 'format_id': quality, }) if meta['files'].get('dash'): formats.extend(self._extract_mpd_formats(base_url + meta['files']['dash'], video_id)) return { 'id': video_id, 'formats': formats, 'title': meta.get('title'), 'description': meta.get('lead'), 'uploader': meta.get('signature'), 'thumbnail': meta.get('imageUrl'), 'duration': meta.get('duration'), } class WyborczaPodcastIE(InfoExtractor): _VALID_URL = r'''(?x) https?://(?:www\.)?(?: wyborcza\.pl/podcast(?:/0,172673\.html)?| wysokieobcasy\.pl/wysokie-obcasy/0,176631\.html )(?:\?(?:[^&#]+?&)*podcast=(?P<id>\d+))? ''' _TESTS = [{ 'url': 'https://wyborcza.pl/podcast/0,172673.html?podcast=100720#S.main_topic-K.C-B.6-L.1.podcast', 'info_dict': { 'id': '100720', 'ext': 'mp3', 'title': 'Cyfrodziewczyny. Kim były pionierki polskiej informatyki ', 'uploader': 'Michał Nogaś ', 'upload_date': '20210117', 'description': 'md5:49f0a06ffc4c1931210d3ab1416a651d', 'duration': 3684.0, 'thumbnail': r're:https://.+\.jpg', }, }, { 'url': 'https://www.wysokieobcasy.pl/wysokie-obcasy/0,176631.html?podcast=100673', 'info_dict': { 'id': '100673', 'ext': 'mp3', 'title': 'Czym jest ubóstwo menstruacyjne i dlaczego dotyczy każdej i każdego z nas?', 'uploader': 'Agnieszka Urazińska ', 'upload_date': '20210115', 'description': 'md5:c161dc035f8dbb60077011fc41274899', 'duration': 1803.0, 'thumbnail': r're:https://.+\.jpg', }, }, { 'url': 'https://wyborcza.pl/podcast', 'info_dict': { 'id': '334', 'title': 'Gościnnie: Wyborcza, 8:10', 'series': 'Gościnnie: Wyborcza, 8:10', }, 'playlist_mincount': 370, }, { 'url': 'https://www.wysokieobcasy.pl/wysokie-obcasy/0,176631.html', 'info_dict': { 'id': '395', 'title': 'Gościnnie: Wysokie Obcasy', 'series': 'Gościnnie: Wysokie Obcasy', }, 'playlist_mincount': 12, }] def _real_extract(self, url): podcast_id = self._match_id(url) if not podcast_id: # playlist podcast_id = '395' if 'wysokieobcasy.pl/' in url else '334' return self.url_result(TokFMAuditionIE._create_url(podcast_id), TokFMAuditionIE, podcast_id) meta = self._download_json('https://wyborcza.pl/api/podcast', podcast_id, query={'guid': podcast_id, 'type': 'wo' if 'wysokieobcasy.pl/' in url else None}) day, month, year = self._search_regex(r'^(\d\d?) (\w+) (\d{4})$', meta.get('publishedDate'), 'upload date', group=(1, 2, 3), default=(None, None, None)) return { 'id': podcast_id, 'url': meta['url'], 'title': meta.get('title'), 'description': meta.get('description'), 'thumbnail': meta.get('imageUrl'), 'duration': parse_duration(meta.get('duration')), 'uploader': meta.get('author'), 'upload_date': try_call(lambda: f'{year}{month_by_name(month, lang="pl"):0>2}{day:0>2}'), } class TokFMPodcastIE(InfoExtractor): _VALID_URL = r'(?:https?://audycje\.tokfm\.pl/podcast/|tokfm:podcast:)(?P<id>\d+),?' IE_NAME = 'tokfm:podcast' _TESTS = [{ 'url': 'https://audycje.tokfm.pl/podcast/91275,-Systemowy-rasizm-Czy-zamieszki-w-USA-po-morderstwie-w-Minneapolis-doprowadza-do-zmian-w-sluzbach-panstwowych', 'info_dict': { 'id': '91275', 'ext': 'mp3', 'title': 'md5:a9b15488009065556900169fb8061cce', 'episode': 'md5:a9b15488009065556900169fb8061cce', 'series': 'Analizy', }, }] def _real_extract(self, url): media_id = self._match_id(url) # in case it breaks see this but it returns a lot of useless data # https://api.podcast.radioagora.pl/api4/getPodcasts?podcast_id=100091&with_guests=true&with_leaders_for_mobile=true metadata = self._download_json( f'https://audycje.tokfm.pl/getp/3{media_id}', media_id, 'Downloading podcast metadata') if not metadata: raise ExtractorError('No such podcast', expected=True) metadata = metadata[0] mp3_url = self._download_json( 'https://api.podcast.radioagora.pl/api4/getSongUrl', media_id, 'Downloading podcast mp3 URL', query={ 'podcast_id': media_id, 'device_id': str(uuid.uuid4()), 'ppre': 'false', 'audio': 'mp3', })['link_ssl'] return { 'id': media_id, 'url': mp3_url, 'vcodec': 'none', 'ext': 'mp3', 'title': metadata.get('podcast_name'), 'series': metadata.get('series_name'), 'episode': metadata.get('podcast_name'), } class TokFMAuditionIE(InfoExtractor): _VALID_URL = r'(?:https?://audycje\.tokfm\.pl/audycja/|tokfm:audition:)(?P<id>\d+),?' IE_NAME = 'tokfm:audition' _TESTS = [{ 'url': 'https://audycje.tokfm.pl/audycja/218,Analizy', 'info_dict': { 'id': '218', 'title': 'Analizy', 'series': 'Analizy', }, 'playlist_count': 1635, }] _PAGE_SIZE = 30 _HEADERS = { 'User-Agent': 'Mozilla/5.0 (Linux; Android 9; Redmi 3S Build/PQ3A.190801.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/87.0.4280.101 Mobile Safari/537.36', } @staticmethod def _create_url(video_id): return f'https://audycje.tokfm.pl/audycja/{video_id}' def _real_extract(self, url): audition_id = self._match_id(url) data = self._download_json( f'https://api.podcast.radioagora.pl/api4/getSeries?series_id={audition_id}', audition_id, 'Downloading audition metadata', headers=self._HEADERS) if not data: raise ExtractorError('No such audition', expected=True) data = data[0] entries = OnDemandPagedList(functools.partial( self._fetch_page, audition_id, data), self._PAGE_SIZE) return { '_type': 'playlist', 'id': audition_id, 'title': data.get('series_name'), 'series': data.get('series_name'), 'entries': entries, } def _fetch_page(self, audition_id, data, page): for retry in self.RetryManager(): podcast_page = self._download_json( f'https://api.podcast.radioagora.pl/api4/getPodcasts?series_id={audition_id}&limit=30&offset={page}&with_guests=true&with_leaders_for_mobile=true', audition_id, f'Downloading podcast list page {page + 1}', headers=self._HEADERS) if not podcast_page: retry.error = ExtractorError('Agora returned empty page', expected=True) for podcast in podcast_page: yield { '_type': 'url_transparent', 'url': podcast['podcast_sharing_url'], 'ie_key': TokFMPodcastIE.ie_key(), 'title': podcast.get('podcast_name'), 'episode': podcast.get('podcast_name'), 'description': podcast.get('podcast_description'), 'timestamp': int_or_none(podcast.get('podcast_timestamp')), 'series': data.get('series_name'), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/matchtv.py
yt_dlp/extractor/matchtv.py
from .common import InfoExtractor class MatchTVIE(InfoExtractor): _VALID_URL = [ r'https?://matchtv\.ru/on-air/?(?:$|[?#])', r'https?://video\.matchtv\.ru/iframe/channel/106/?(?:$|[?#])', ] _TESTS = [{ 'url': 'http://matchtv.ru/on-air/', 'info_dict': { 'id': 'matchtv-live', 'ext': 'mp4', 'title': r're:^Матч ТВ - Прямой эфир \d{4}-\d{2}-\d{2} \d{2}:\d{2}$', 'live_status': 'is_live', }, 'params': { 'skip_download': True, }, }, { 'url': 'https://video.matchtv.ru/iframe/channel/106', 'only_matching': True, }] def _real_extract(self, url): video_id = 'matchtv-live' webpage = self._download_webpage('https://video.matchtv.ru/iframe/channel/106', video_id) video_url = self._html_search_regex( r'data-config="config=(https?://[^?"]+)[?"]', webpage, 'video URL').replace('/feed/', '/media/') + '.m3u8' return { 'id': video_id, 'title': 'Матч ТВ - Прямой эфир', 'is_live': True, 'formats': self._extract_m3u8_formats(video_url, video_id, 'mp4', live=True), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/sproutvideo.py
yt_dlp/extractor/sproutvideo.py
import base64 import urllib.parse from .common import InfoExtractor from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, int_or_none, qualities, remove_start, smuggle_url, unsmuggle_url, update_url_query, url_or_none, urlencode_postdata, ) from ..utils.traversal import traverse_obj class SproutVideoIE(InfoExtractor): _NO_SCHEME_RE = r'//videos\.sproutvideo\.com/embed/(?P<id>[\da-f]+)/[\da-f]+' _VALID_URL = rf'https?:{_NO_SCHEME_RE}' _EMBED_REGEX = [rf'<iframe [^>]*\bsrc=["\'](?P<url>(?:https?:)?{_NO_SCHEME_RE}[^"\']*)["\']'] _TESTS = [{ 'url': 'https://videos.sproutvideo.com/embed/4c9dddb01910e3c9c4/0fc24387c4f24ee3', 'md5': '1343ce1a6cb39d67889bfa07c7b02b0e', 'info_dict': { 'id': '4c9dddb01910e3c9c4', 'ext': 'mp4', 'title': 'Adrien Labaeye : Berlin, des communautés aux communs', 'duration': 576, 'thumbnail': r're:https?://images\.sproutvideo\.com/.+\.jpg', }, }, { 'url': 'https://videos.sproutvideo.com/embed/a79fdcb21f1be2c62e/93bf31e41e39ca27', 'md5': 'cebae5cf558cca83271917cf4ec03f26', 'info_dict': { 'id': 'a79fdcb21f1be2c62e', 'ext': 'mp4', 'title': 'HS_01_Live Stream 2023-01-14 10:00', 'duration': 703, 'thumbnail': r're:https?://images\.sproutvideo\.com/.+\.jpg', }, 'skip': 'Account Disabled', }, { # http formats 'sd' and 'hd' are available 'url': 'https://videos.sproutvideo.com/embed/119cd6bc1a18e6cd98/30751a1761ae5b90', 'md5': 'f368c78df07e78a749508b221528672c', 'info_dict': { 'id': '119cd6bc1a18e6cd98', 'ext': 'mp4', 'title': '3. Updating your Partner details', 'thumbnail': r're:https?://images\.sproutvideo\.com/.+\.jpg', 'duration': 60, }, 'params': {'format': 'hd'}, }, { # subtitles 'url': 'https://videos.sproutvideo.com/embed/119dd8ba121ee0cc98/4ee50c88a343215d?type=hd', 'md5': '7f6798f037d7a3e3e07e67959de68fc6', 'info_dict': { 'id': '119dd8ba121ee0cc98', 'ext': 'mp4', 'title': 'Recipients Setup - Domestic Wire Only', 'thumbnail': r're:https?://images\.sproutvideo\.com/.+\.jpg', 'duration': 77, 'subtitles': {'en': 'count:1'}, }, }] _WEBPAGE_TESTS = [{ 'url': 'https://www.solidarum.org/vivre-ensemble/adrien-labaeye-berlin-des-communautes-aux-communs', 'info_dict': { 'id': '4c9dddb01910e3c9c4', 'ext': 'mp4', 'title': 'Adrien Labaeye : Berlin, des communautés aux communs', 'duration': 576, 'thumbnail': r're:https?://images\.sproutvideo\.com/.+\.jpg', }, }] _M3U8_URL_TMPL = 'https://{base}.videos.sproutvideo.com/{s3_user_hash}/{s3_video_hash}/video/index.m3u8' _QUALITIES = ('hd', 'uhd', 'source') # Exclude 'sd' to prioritize hls formats above it @staticmethod def _policy_to_qs(policy, signature_key, as_string=False): query = {} for key, value in policy['signatures'][signature_key].items(): query[remove_start(key, 'CloudFront-')] = value query['sessionID'] = policy['sessionID'] return urllib.parse.urlencode(query, doseq=True) if as_string else query @classmethod def _extract_embed_urls(cls, url, webpage): for embed_url in super()._extract_embed_urls(url, webpage): if embed_url.startswith('//'): embed_url = f'https:{embed_url}' yield smuggle_url(embed_url, {'referer': url}) def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) video_id = self._match_id(url) webpage = self._download_webpage( url, video_id, headers=traverse_obj(smuggled_data, {'Referer': 'referer'})) data = self._search_json( r'(?:window\.|(?:var|const|let)\s+)(?:dat|(?:player|video)Info|)\s*=\s*["\']', webpage, 'player info', video_id, contains_pattern=r'[A-Za-z0-9+/=]+', end_pattern=r'["\'];', transform_source=lambda x: base64.b64decode(x).decode()) # SproutVideo may send player info for 'SMPTE Color Monitor Test' [a791d7b71b12ecc52e] # e.g. if the user-agent we used with the webpage request is too old video_uid = data['videoUid'] if video_id != video_uid: raise ExtractorError(f'{self.IE_NAME} sent the wrong video data ({video_uid})') formats, subtitles = [], {} headers = { 'Accept': '*/*', 'Origin': 'https://videos.sproutvideo.com', 'Referer': url, } # HLS extraction is fatal; only attempt it if the JSON data says it's available if traverse_obj(data, 'hls'): manifest_query = self._policy_to_qs(data, 'm') fragment_query = self._policy_to_qs(data, 't', as_string=True) key_query = self._policy_to_qs(data, 'k', as_string=True) formats.extend(self._extract_m3u8_formats( self._M3U8_URL_TMPL.format(**data), video_id, 'mp4', m3u8_id='hls', headers=headers, query=manifest_query)) for fmt in formats: fmt.update({ 'url': update_url_query(fmt['url'], manifest_query), 'extra_param_to_segment_url': fragment_query, 'extra_param_to_key_url': key_query, }) if downloads := traverse_obj(data, ('downloads', {dict.items}, lambda _, v: url_or_none(v[1]))): quality = qualities(self._QUALITIES) acodec = 'none' if data.get('has_audio') is False else None formats.extend([{ 'format_id': str(format_id), 'url': format_url, 'ext': 'mp4', 'quality': quality(format_id), 'acodec': acodec, } for format_id, format_url in downloads]) for sub_data in traverse_obj(data, ('subtitleData', lambda _, v: url_or_none(v['src']))): subtitles.setdefault(sub_data.get('srclang', 'en'), []).append({ 'url': sub_data['src'], }) return { 'id': video_id, 'formats': formats, 'subtitles': subtitles, 'http_headers': headers, **traverse_obj(data, { 'title': ('title', {str}), 'duration': ('duration', {int_or_none}), 'thumbnail': ('posterframe_url', {url_or_none}), }), } class VidsIoIE(InfoExtractor): IE_NAME = 'vids.io' _VALID_URL = r'https?://[\w-]+\.vids\.io/videos/(?P<id>[\da-f]+)/(?P<display_id>[\w-]+)' _TESTS = [{ 'url': 'https://how-to-video.vids.io/videos/799cd8b11c10efc1f0/how-to-video-live-streaming', 'md5': '9bbbb2c0c0739eb163b80f87b8d77c9e', 'info_dict': { 'id': '799cd8b11c10efc1f0', 'ext': 'mp4', 'title': 'How to Video: Live Streaming', 'duration': 2787, 'thumbnail': r're:https?://images\.sproutvideo\.com/.+\.jpg', }, }] def _real_extract(self, url): video_id, display_id = self._match_valid_url(url).group('id', 'display_id') webpage, urlh = self._download_webpage_handle(url, display_id, expected_status=403) if urlh.status == 403: password = self.get_param('videopassword') if not password: raise ExtractorError( 'This video is password-protected; use the --video-password option', expected=True) try: webpage = self._download_webpage( url, display_id, 'Submitting video password', data=urlencode_postdata({ 'password': password, **self._hidden_inputs(webpage), })) # Requests with user's session cookie `_sproutvideo_session` are now authorized except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status == 403: raise ExtractorError('Incorrect password', expected=True) raise if embed_url := next(SproutVideoIE._extract_embed_urls(url, webpage), None): return self.url_result(embed_url, SproutVideoIE, video_id) raise ExtractorError('Unable to extract any SproutVideo embed url')
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/lumni.py
yt_dlp/extractor/lumni.py
from .francetv import FranceTVBaseInfoExtractor class LumniIE(FranceTVBaseInfoExtractor): _VALID_URL = r'https?://(?:www\.)?lumni\.fr/video/(?P<id>[\w-]+)' _TESTS = [{ 'url': 'https://www.lumni.fr/video/l-homme-et-son-environnement-dans-la-revolution-industrielle', 'md5': '960e8240c4f2c7a20854503a71e52f5e', 'info_dict': { 'id': 'd2b9a4e5-a526-495b-866c-ab72737e3645', 'ext': 'mp4', 'title': "L'homme et son environnement dans la révolution industrielle - L'ère de l'homme", 'thumbnail': 'https://assets.webservices.francetelevisions.fr/v1/assets/images/a7/17/9f/a7179f5f-63a5-4e11-8d4d-012ab942d905.jpg', 'duration': 230, }, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) video_id = self._html_search_regex( r'<div[^>]+data-factoryid\s*=\s*["\']([^"\']+)', webpage, 'video id') return self._make_url_result(video_id, url=url)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/musicdex.py
yt_dlp/extractor/musicdex.py
from .common import InfoExtractor from ..utils import ( date_from_str, format_field, try_get, unified_strdate, ) class MusicdexBaseIE(InfoExtractor): def _return_info(self, track_json, album_json, video_id): return { 'id': str(video_id), 'title': track_json.get('name'), 'track': track_json.get('name'), 'description': track_json.get('description'), 'track_number': track_json.get('number'), 'url': format_field(track_json, 'url', 'https://www.musicdex.org/%s'), 'duration': track_json.get('duration'), 'genres': [genre.get('name') for genre in track_json.get('genres') or []], 'like_count': track_json.get('likes_count'), 'view_count': track_json.get('plays'), 'artists': [artist.get('name') for artist in track_json.get('artists') or []], 'album_artists': [artist.get('name') for artist in album_json.get('artists') or []], 'thumbnail': format_field(album_json, 'image', 'https://www.musicdex.org/%s'), 'album': album_json.get('name'), 'release_year': try_get(album_json, lambda x: date_from_str(unified_strdate(x['release_date'])).year), 'extractor_key': MusicdexSongIE.ie_key(), 'extractor': 'MusicdexSong', } class MusicdexSongIE(MusicdexBaseIE): _VALID_URL = r'https?://(?:www\.)?musicdex\.org/track/(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.musicdex.org/track/306/dual-existence', 'info_dict': { 'id': '306', 'ext': 'mp3', 'title': 'dual existence', 'description': '#NIPPONSEI @ IRC.RIZON.NET', 'track': 'dual existence', 'track_number': 1, 'duration': 266000, 'genres': ['Anime'], 'like_count': int, 'view_count': int, 'artists': ['fripSide'], 'album_artists': ['fripSide'], 'thumbnail': 'https://www.musicdex.org/storage/album/9iDIam1DHTVqUG4UclFIEq1WAFGXfPW4y0TtZa91.png', 'album': 'To Aru Kagaku no Railgun T OP2 Single - dual existence', 'release_year': 2020, }, 'params': {'skip_download': True}, }] def _real_extract(self, url): video_id = self._match_id(url) data_json = self._download_json( f'https://www.musicdex.org/secure/tracks/{video_id}?defaultRelations=true', video_id)['track'] return self._return_info(data_json, data_json.get('album') or {}, video_id) class MusicdexAlbumIE(MusicdexBaseIE): _VALID_URL = r'https?://(?:www\.)?musicdex\.org/album/(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.musicdex.org/album/56/tenmon-and-eiichiro-yanagi-minori/ef-a-tale-of-memories-original-soundtrack-2-fortissimo', 'playlist_mincount': 28, 'info_dict': { 'id': '56', 'genres': ['OST'], 'view_count': int, 'artists': ['TENMON & Eiichiro Yanagi / minori'], 'title': 'ef - a tale of memories Original Soundtrack 2 ~fortissimo~', 'release_year': 2008, 'thumbnail': 'https://www.musicdex.org/storage/album/2rSHkyYBYfB7sbvElpEyTMcUn6toY7AohOgJuDlE.jpg', }, }] def _real_extract(self, url): playlist_id = self._match_id(url) data_json = self._download_json( f'https://www.musicdex.org/secure/albums/{playlist_id}?defaultRelations=true', playlist_id)['album'] entries = [self._return_info(track, data_json, track['id']) for track in data_json.get('tracks') or [] if track.get('id')] return { '_type': 'playlist', 'id': playlist_id, 'title': data_json.get('name'), 'description': data_json.get('description'), 'genres': [genre.get('name') for genre in data_json.get('genres') or []], 'view_count': data_json.get('plays'), 'artists': [artist.get('name') for artist in data_json.get('artists') or []], 'thumbnail': format_field(data_json, 'image', 'https://www.musicdex.org/%s'), 'release_year': try_get(data_json, lambda x: date_from_str(unified_strdate(x['release_date'])).year), 'entries': entries, } class MusicdexPageIE(MusicdexBaseIE): # XXX: Conventionally, base classes should end with BaseIE/InfoExtractor def _entries(self, playlist_id): next_page_url = self._API_URL % playlist_id while next_page_url: data_json = self._download_json(next_page_url, playlist_id)['pagination'] yield from data_json.get('data') or [] next_page_url = data_json.get('next_page_url') class MusicdexArtistIE(MusicdexPageIE): _VALID_URL = r'https?://(?:www\.)?musicdex\.org/artist/(?P<id>\d+)' _API_URL = 'https://www.musicdex.org/secure/artists/%s/albums?page=1' _TESTS = [{ 'url': 'https://www.musicdex.org/artist/11/fripside', 'playlist_mincount': 28, 'info_dict': { 'id': '11', 'view_count': int, 'title': 'fripSide', 'thumbnail': 'https://www.musicdex.org/storage/artist/ZmOz0lN2vsweegB660em3xWffCjLPmTQHqJls5Xx.jpg', }, }] def _real_extract(self, url): playlist_id = self._match_id(url) data_json = self._download_json(f'https://www.musicdex.org/secure/artists/{playlist_id}', playlist_id)['artist'] entries = [] for album in self._entries(playlist_id): entries.extend(self._return_info(track, album, track['id']) for track in album.get('tracks') or [] if track.get('id')) return { '_type': 'playlist', 'id': playlist_id, 'title': data_json.get('name'), 'view_count': data_json.get('plays'), 'thumbnail': format_field(data_json, 'image_small', 'https://www.musicdex.org/%s'), 'entries': entries, } class MusicdexPlaylistIE(MusicdexPageIE): _VALID_URL = r'https?://(?:www\.)?musicdex\.org/playlist/(?P<id>\d+)' _API_URL = 'https://www.musicdex.org/secure/playlists/%s/tracks?perPage=10000&page=1' _TESTS = [{ 'url': 'https://www.musicdex.org/playlist/9/test', 'playlist_mincount': 73, 'info_dict': { 'id': '9', 'view_count': int, 'title': 'Test', 'thumbnail': 'https://www.musicdex.org/storage/album/jXATI79f0IbQ2sgsKYOYRCW3zRwF3XsfHhzITCuJ.jpg', 'description': 'Test 123 123 21312 32121321321321312', }, }] def _real_extract(self, url): playlist_id = self._match_id(url) data_json = self._download_json(f'https://www.musicdex.org/secure/playlists/{playlist_id}', playlist_id)['playlist'] entries = [self._return_info(track, track.get('album') or {}, track['id']) for track in self._entries(playlist_id) or [] if track.get('id')] return { '_type': 'playlist', 'id': playlist_id, 'title': data_json.get('name'), 'description': data_json.get('description'), 'view_count': data_json.get('plays'), 'thumbnail': format_field(data_json, 'image', 'https://www.musicdex.org/%s'), 'entries': entries, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/blerp.py
yt_dlp/extractor/blerp.py
import json from .common import InfoExtractor from ..utils import strip_or_none, traverse_obj class BlerpIE(InfoExtractor): IE_NAME = 'blerp' _VALID_URL = r'https?://(?:www\.)?blerp\.com/soundbites/(?P<id>[0-9a-zA-Z]+)' _TESTS = [{ 'url': 'https://blerp.com/soundbites/6320fe8745636cb4dd677a5a', 'info_dict': { 'id': '6320fe8745636cb4dd677a5a', 'title': 'Samsung Galaxy S8 Over the Horizon Ringtone 2016', 'uploader': 'luminousaj', 'uploader_id': '5fb81e51aa66ae000c395478', 'ext': 'mp3', 'tags': ['samsung', 'galaxy', 's8', 'over the horizon', '2016', 'ringtone'], }, }, { 'url': 'https://blerp.com/soundbites/5bc94ef4796001000498429f', 'info_dict': { 'id': '5bc94ef4796001000498429f', 'title': 'Yee', 'uploader': '179617322678353920', 'uploader_id': '5ba99cf71386730004552c42', 'ext': 'mp3', 'tags': ['YEE', 'YEET', 'wo ha haah catchy tune yee', 'yee'], }, }] _GRAPHQL_OPERATIONNAME = 'webBitePageGetBite' _GRAPHQL_QUERY = ( '''query webBitePageGetBite($_id: MongoID!) { web { biteById(_id: $_id) { ...bitePageFrag __typename } __typename } } fragment bitePageFrag on Bite { _id title userKeywords keywords color visibility isPremium owned price extraReview isAudioExists image { filename original { url __typename } __typename } userReactions { _id reactions createdAt __typename } topReactions totalSaveCount saved blerpLibraryType license licenseMetaData playCount totalShareCount totalFavoriteCount totalAddedToBoardCount userCategory userAudioQuality audioCreationState transcription userTranscription description createdAt updatedAt author listingType ownerObject { _id username profileImage { filename original { url __typename } __typename } __typename } transcription favorited visibility isCurated sourceUrl audienceRating strictAudienceRating ownerId reportObject { reportedContentStatus __typename } giphy { mp4 gif __typename } audio { filename original { url __typename } mp3 { url __typename } __typename } __typename } ''') def _real_extract(self, url): audio_id = self._match_id(url) data = { 'operationName': self._GRAPHQL_OPERATIONNAME, 'query': self._GRAPHQL_QUERY, 'variables': { '_id': audio_id, }, } headers = { 'Content-Type': 'application/json', } json_result = self._download_json( 'https://api.blerp.com/graphql', audio_id, data=json.dumps(data).encode(), headers=headers) bite_json = json_result['data']['web']['biteById'] return { 'id': bite_json['_id'], 'url': bite_json['audio']['mp3']['url'], 'title': bite_json['title'], 'uploader': traverse_obj(bite_json, ('ownerObject', 'username'), expected_type=strip_or_none), 'uploader_id': traverse_obj(bite_json, ('ownerObject', '_id'), expected_type=strip_or_none), 'ext': 'mp3', 'tags': list(filter(None, map(strip_or_none, (traverse_obj(bite_json, 'userKeywords', expected_type=list) or []))) or None), }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/sbs.py
yt_dlp/extractor/sbs.py
from .common import InfoExtractor from ..networking import HEADRequest from ..utils import ( float_or_none, int_or_none, parse_duration, parse_iso8601, traverse_obj, update_url_query, url_or_none, ) class SBSIE(InfoExtractor): IE_DESC = 'sbs.com.au' _VALID_URL = r'''(?x) https?://(?:www\.)?sbs\.com\.au/(?: ondemand(?: /video/(?:single/)?| /(?:movie|tv-program)/[^/]+/| /(?:tv|news)-series/(?:[^/]+/){3}| .*?\bplay=|/watch/ )|news/(?:embeds/)?video/ )(?P<id>[0-9]+)''' _EMBED_REGEX = [r'''(?x)] (?: <meta\s+property="og:video"\s+content=| <iframe[^>]+?src= ) (["\'])(?P<url>https?://(?:www\.)?sbs\.com\.au/ondemand/video/.+?)\1'''] _TESTS = [{ # Original URL is handled by the generic IE which finds the iframe: # http://www.sbs.com.au/thefeed/blog/2014/08/21/dingo-conservation 'url': 'http://www.sbs.com.au/ondemand/video/single/320403011771/?source=drupal&vertical=thefeed', 'md5': '31f84a7a19b53635db63c73f8ab0c4a7', 'info_dict': { 'id': '320403011771', # '_rFBPRPO4pMR', 'ext': 'mp4', 'title': 'Dingo Conservation (The Feed)', 'description': 'md5:f250a9856fca50d22dec0b5b8015f8a5', 'thumbnail': r're:https?://.*\.jpg', 'duration': 308, 'timestamp': 1408613220, 'upload_date': '20140821', 'uploader': 'SBSC', }, 'expected_warnings': ['Unable to download JSON metadata'], }, { 'url': 'http://www.sbs.com.au/ondemand/video/320403011771/Dingo-Conservation-The-Feed', 'only_matching': True, }, { 'url': 'http://www.sbs.com.au/news/video/471395907773/The-Feed-July-9', 'only_matching': True, }, { 'url': 'https://www.sbs.com.au/ondemand/?play=1836638787723', 'only_matching': True, }, { 'url': 'https://www.sbs.com.au/ondemand/program/inside-windsor-castle?play=1283505731842', 'only_matching': True, }, { 'url': 'https://www.sbs.com.au/news/embeds/video/1840778819866', 'only_matching': True, }, { 'url': 'https://www.sbs.com.au/ondemand/watch/1698704451971', 'only_matching': True, }, { 'url': 'https://www.sbs.com.au/ondemand/movie/coherence/1469404227931', 'only_matching': True, }, { 'note': 'Live stream', 'url': 'https://www.sbs.com.au/ondemand/video/1726824003663/sbs-24x7-live-stream-nsw', 'only_matching': True, }, { 'url': 'https://www.sbs.com.au/ondemand/news-series/dateline/dateline-2022/dateline-s2022-ep26/2072245827515', 'only_matching': True, }, { 'url': 'https://www.sbs.com.au/ondemand/tv-series/the-handmaids-tale/season-5/the-handmaids-tale-s5-ep1/2065631811776', 'only_matching': True, }, { 'url': 'https://www.sbs.com.au/ondemand/tv-program/autun-romes-forgotten-sister/2116212803602', 'only_matching': True, }] _GEO_COUNTRIES = ['AU'] _AUS_TV_PARENTAL_GUIDELINES = { 'P': 0, 'C': 7, 'G': 0, 'PG': 0, 'M': 14, 'MA15+': 15, 'MAV15+': 15, 'R18+': 18, } _PLAYER_API = 'https://www.sbs.com.au/api/v3' def _real_extract(self, url): video_id = self._match_id(url) formats, subtitles = self._extract_smil_formats_and_subtitles( update_url_query(f'{self._PLAYER_API}/video_smil', {'id': video_id}), video_id) if not formats: urlh = self._request_webpage( HEADRequest('https://sbs-vod-prod-01.akamaized.net/'), video_id, note='Checking geo-restriction', fatal=False, expected_status=403) if urlh: error_reasons = urlh.headers.get_all('x-error-reason') or [] if 'geo-blocked' in error_reasons: self.raise_geo_restricted(countries=['AU']) self.raise_no_formats('No formats are available', video_id=video_id) media = traverse_obj(self._download_json( f'{self._PLAYER_API}/video_stream', video_id, fatal=False, query={'id': video_id, 'context': 'tv'}), ('video_object', {dict})) or {} media.update(self._download_json( f'https://catalogue.pr.sbsod.com/mpx-media/{video_id}', video_id, fatal=not media) or {}) # For named episodes, use the catalogue's title to set episode, rather than generic 'Episode N'. if traverse_obj(media, ('partOfSeries', {dict})): media['epName'] = traverse_obj(media, ('title', {str})) # Need to set different language for forced subs or else they have priority over full subs fixed_subtitles = {} for lang, subs in subtitles.items(): for sub in subs: fixed_lang = lang if sub['url'].lower().endswith('_fe.vtt'): fixed_lang += '-forced' fixed_subtitles.setdefault(fixed_lang, []).append(sub) return { 'id': video_id, **traverse_obj(media, { 'title': ('name', {str}), 'description': ('description', {str}), 'channel': ('taxonomy', 'channel', 'name', {str}), 'series': ((('partOfSeries', 'name'), 'seriesTitle'), {str}), 'series_id': ((('partOfSeries', 'uuid'), 'seriesID'), {str}), 'season_number': ('seasonNumber', {int_or_none}), 'episode': ('epName', {str}), 'episode_number': ('episodeNumber', {int_or_none}), 'timestamp': (('datePublished', ('publication', 'startDate')), {parse_iso8601}), 'release_year': ('releaseYear', {int_or_none}), 'duration': ('duration', ({float_or_none}, {parse_duration})), 'is_live': ('liveStream', {bool}), 'age_limit': (('classificationID', 'contentRating'), {str.upper}, { lambda x: self._AUS_TV_PARENTAL_GUIDELINES.get(x)}), # dict.get is unhashable in py3.7 }, get_all=False), **traverse_obj(media, { 'categories': (('genres', ...), ('taxonomy', ('genre', 'subgenre'), 'name'), {str}), 'tags': (('consumerAdviceTexts', ('sbsSubCertification', 'consumerAdvice')), ..., {str}), 'thumbnails': ('thumbnails', lambda _, v: url_or_none(v['contentUrl']), { 'id': ('name', {str}), 'url': 'contentUrl', 'width': ('width', {int_or_none}), 'height': ('height', {int_or_none}), }), }), 'formats': formats, 'subtitles': fixed_subtitles, 'uploader': 'SBSC', }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/pyvideo.py
yt_dlp/extractor/pyvideo.py
import re from .common import InfoExtractor from ..utils import int_or_none class PyvideoIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?pyvideo\.org/(?P<category>[^/]+)/(?P<id>[^/?#&.]+)' _TESTS = [{ 'url': 'http://pyvideo.org/pycon-us-2013/become-a-logging-expert-in-30-minutes.html', 'info_dict': { 'id': 'become-a-logging-expert-in-30-minutes', }, 'playlist_count': 2, }, { 'url': 'http://pyvideo.org/pygotham-2012/gloriajw-spotifywitherikbernhardsson182m4v.html', 'md5': '5fe1c7e0a8aa5570330784c847ff6d12', 'info_dict': { 'id': '2542', 'ext': 'm4v', 'title': 'Gloriajw-SpotifyWithErikBernhardsson182.m4v', }, }] def _real_extract(self, url): mobj = self._match_valid_url(url) category = mobj.group('category') video_id = mobj.group('id') entries = [] data = self._download_json( f'https://raw.githubusercontent.com/pyvideo/data/master/{category}/videos/{video_id}.json', video_id, fatal=False) if data: for video in data['videos']: video_url = video.get('url') if video_url: if video.get('type') == 'youtube': entries.append(self.url_result(video_url, 'Youtube')) else: entries.append({ 'id': str(data.get('id') or video_id), 'url': video_url, 'title': data['title'], 'description': data.get('description') or data.get('summary'), 'thumbnail': data.get('thumbnail_url'), 'duration': int_or_none(data.get('duration')), }) else: webpage = self._download_webpage(url, video_id) title = self._og_search_title(webpage) media_urls = self._search_regex( r'(?s)Media URL:(.+?)</li>', webpage, 'media urls') for m in re.finditer( r'<a[^>]+href=(["\'])(?P<url>http.+?)\1', media_urls): media_url = m.group('url') if re.match(r'https?://www\.youtube\.com/watch\?v=.*', media_url): entries.append(self.url_result(media_url, 'Youtube')) else: entries.append({ 'id': video_id, 'url': media_url, 'title': title, }) return self.playlist_result(entries, video_id)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/awaan.py
yt_dlp/extractor/awaan.py
import base64 import urllib.parse from .common import InfoExtractor from ..utils import ( format_field, int_or_none, parse_iso8601, smuggle_url, unsmuggle_url, urlencode_postdata, ) class AWAANIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?:awaan|dcndigital)\.ae/(?:#/)?show/(?P<show_id>\d+)/[^/]+(?:/(?P<id>\d+)/(?P<season_id>\d+))?' def _real_extract(self, url): show_id, video_id, season_id = self._match_valid_url(url).groups() if video_id and int(video_id) > 0: return self.url_result( f'http://awaan.ae/media/{video_id}', 'AWAANVideo') elif season_id and int(season_id) > 0: return self.url_result(smuggle_url( f'http://awaan.ae/program/season/{season_id}', {'show_id': show_id}), 'AWAANSeason') else: return self.url_result( f'http://awaan.ae/program/{show_id}', 'AWAANSeason') class AWAANBaseIE(InfoExtractor): def _parse_video_data(self, video_data, video_id, is_live): title = video_data.get('title_en') or video_data['title_ar'] img = video_data.get('img') return { 'id': video_id, 'title': title, 'description': video_data.get('description_en') or video_data.get('description_ar'), 'thumbnail': format_field(img, None, 'http://admin.mangomolo.com/analytics/%s'), 'duration': int_or_none(video_data.get('duration')), 'timestamp': parse_iso8601(video_data.get('create_time'), ' '), 'is_live': is_live, 'uploader_id': video_data.get('user_id'), } class AWAANVideoIE(AWAANBaseIE): IE_NAME = 'awaan:video' _VALID_URL = r'https?://(?:www\.)?(?:awaan|dcndigital)\.ae/(?:#/)?(?:video(?:/[^/]+)?|media|catchup/[^/]+/[^/]+)/(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.dcndigital.ae/#/video/%D8%B1%D8%AD%D9%84%D8%A9-%D8%A7%D9%84%D8%B9%D9%85%D8%B1-%D8%A7%D9%84%D8%AD%D9%84%D9%82%D8%A9-1/17375', 'md5': '5f61c33bfc7794315c671a62d43116aa', 'info_dict': { 'id': '17375', 'ext': 'mp4', 'title': 'رحلة العمر : الحلقة 1', 'description': 'md5:0156e935d870acb8ef0a66d24070c6d6', 'duration': 2041, 'timestamp': 1227504126, 'upload_date': '20081124', 'uploader_id': '71', }, }, { 'url': 'http://awaan.ae/video/26723981/%D8%AF%D8%A7%D8%B1-%D8%A7%D9%84%D8%B3%D9%84%D8%A7%D9%85:-%D8%AE%D9%8A%D8%B1-%D8%AF%D9%88%D8%B1-%D8%A7%D9%84%D8%A3%D9%86%D8%B5%D8%A7%D8%B1', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) video_data = self._download_json( f'http://admin.mangomolo.com/analytics/index.php/plus/video?id={video_id}', video_id, headers={'Origin': 'http://awaan.ae'}) info = self._parse_video_data(video_data, video_id, False) embed_url = 'http://admin.mangomolo.com/analytics/index.php/customers/embed/video?' + urllib.parse.urlencode({ 'id': video_data['id'], 'user_id': video_data['user_id'], 'signature': video_data['signature'], 'countries': 'Q0M=', 'filter': 'DENY', }) info.update({ '_type': 'url_transparent', 'url': embed_url, 'ie_key': 'MangomoloVideo', }) return info class AWAANLiveIE(AWAANBaseIE): IE_NAME = 'awaan:live' _VALID_URL = r'https?://(?:www\.)?(?:awaan|dcndigital)\.ae/(?:#/)?live/(?P<id>\d+)' _TEST = { 'url': 'http://awaan.ae/live/6/dubai-tv', 'info_dict': { 'id': '6', 'ext': 'mp4', 'title': 're:Dubai Al Oula [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'upload_date': '20150107', 'timestamp': 1420588800, 'uploader_id': '71', }, 'params': { # m3u8 download 'skip_download': True, }, } def _real_extract(self, url): channel_id = self._match_id(url) channel_data = self._download_json( f'http://admin.mangomolo.com/analytics/index.php/plus/getchanneldetails?channel_id={channel_id}', channel_id, headers={'Origin': 'http://awaan.ae'}) info = self._parse_video_data(channel_data, channel_id, True) embed_url = 'http://admin.mangomolo.com/analytics/index.php/customers/embed/index?' + urllib.parse.urlencode({ 'id': base64.b64encode(channel_data['user_id'].encode()).decode(), 'channelid': base64.b64encode(channel_data['id'].encode()).decode(), 'signature': channel_data['signature'], 'countries': 'Q0M=', 'filter': 'DENY', }) info.update({ '_type': 'url_transparent', 'url': embed_url, 'ie_key': 'MangomoloLive', }) return info class AWAANSeasonIE(InfoExtractor): IE_NAME = 'awaan:season' _VALID_URL = r'https?://(?:www\.)?(?:awaan|dcndigital)\.ae/(?:#/)?program/(?:(?P<show_id>\d+)|season/(?P<season_id>\d+))' _TEST = { 'url': 'http://dcndigital.ae/#/program/205024/%D9%85%D8%AD%D8%A7%D8%B6%D8%B1%D8%A7%D8%AA-%D8%A7%D9%84%D8%B4%D9%8A%D8%AE-%D8%A7%D9%84%D8%B4%D8%B9%D8%B1%D8%A7%D9%88%D9%8A', 'info_dict': { 'id': '7910', 'title': 'محاضرات الشيخ الشعراوي', }, 'playlist_mincount': 27, } def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) show_id, season_id = self._match_valid_url(url).groups() data = {} if season_id: data['season'] = season_id show_id = smuggled_data.get('show_id') if show_id is None: season = self._download_json( f'http://admin.mangomolo.com/analytics/index.php/plus/season_info?id={season_id}', season_id, headers={'Origin': 'http://awaan.ae'}) show_id = season['id'] data['show_id'] = show_id show = self._download_json( 'http://admin.mangomolo.com/analytics/index.php/plus/show', show_id, data=urlencode_postdata(data), headers={ 'Origin': 'http://awaan.ae', 'Content-Type': 'application/x-www-form-urlencoded', }) if not season_id: season_id = show['default_season'] for season in show['seasons']: if season['id'] == season_id: title = season.get('title_en') or season['title_ar'] entries = [] for video in show['videos']: video_id = str(video['id']) entries.append(self.url_result( f'http://awaan.ae/media/{video_id}', 'AWAANVideo', video_id)) return self.playlist_result(entries, season_id, title)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/shiey.py
yt_dlp/extractor/shiey.py
import json from .common import InfoExtractor from .vimeo import VimeoIE from ..utils import extract_attributes from ..utils.traversal import find_element, traverse_obj class ShieyIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?shiey\.com/videos/v/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://www.shiey.com/videos/v/train-journey-to-edge-of-serbia-ep-2', 'info_dict': { 'id': '1103409448', 'ext': 'mp4', 'title': 'Train Journey To Edge of Serbia (Ep. 2)', 'uploader': 'shiey', 'uploader_url': '', 'duration': 1364, 'thumbnail': r're:^https?://.+', }, 'params': {'skip_download': True}, 'expected_warnings': ['Failed to parse XML: not well-formed'], }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) oembed_html = traverse_obj(webpage, ( {find_element(attr='data-controller', value='VideoEmbed', html=True)}, {extract_attributes}, 'data-config-embed-video', {json.loads}, 'oembedHtml', {str})) return self.url_result(VimeoIE._extract_url(url, oembed_html), VimeoIE)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/sina.py
yt_dlp/extractor/sina.py
from .common import InfoExtractor from ..networking import HEADRequest from ..utils import ( ExtractorError, clean_html, get_element_by_attribute, int_or_none, qualities, update_url_query, ) class SinaIE(InfoExtractor): _VALID_URL = r'''(?x)https?://(?:[^/?#]+\.)?video\.sina\.com\.cn/ (?: (?:view/|.*\#)(?P<id>\d+)| .+?/(?P<pseudo_id>[^/?#]+)(?:\.s?html)| # This is used by external sites like Weibo api/sinawebApi/outplay.php/(?P<token>.+?)\.swf ) ''' _TESTS = [ { 'url': 'http://video.sina.com.cn/news/spj/topvideoes20160504/?opsubject_id=top1#250576622', 'md5': 'd38433e2fc886007729735650ae4b3e9', 'info_dict': { 'id': '250576622', 'ext': 'mp4', 'title': '现场:克鲁兹宣布退选 特朗普将稳获提名', }, }, { 'url': 'http://video.sina.com.cn/v/b/101314253-1290078633.html', 'info_dict': { 'id': '101314253', 'ext': 'flv', 'title': '军方提高对朝情报监视级别', }, 'skip': 'the page does not exist or has been deleted', }, { 'url': 'http://video.sina.com.cn/view/250587748.html', 'md5': '3d1807a25c775092aab3bc157fff49b4', 'info_dict': { 'id': '250587748', 'ext': 'mp4', 'title': '瞬间泪目:8年前汶川地震珍贵视频首曝光', }, }, ] def _real_extract(self, url): mobj = self._match_valid_url(url) video_id = mobj.group('id') if not video_id: if mobj.group('token') is not None: # The video id is in the redirected url self.to_screen('Getting video id') request = HEADRequest(url) _, urlh = self._download_webpage_handle(request, 'NA', False) return self._real_extract(urlh.url) else: pseudo_id = mobj.group('pseudo_id') webpage = self._download_webpage(url, pseudo_id) error = get_element_by_attribute('class', 'errtitle', webpage) if error: raise ExtractorError(f'{self.IE_NAME} said: {clean_html(error)}', expected=True) video_id = self._search_regex( r"video_id\s*:\s*'(\d+)'", webpage, 'video id') video_data = self._download_json( 'http://s.video.sina.com.cn/video/h5play', video_id, query={'video_id': video_id}) if video_data['code'] != 1: raise ExtractorError('{} said: {}'.format( self.IE_NAME, video_data['message']), expected=True) else: video_data = video_data['data'] title = video_data['title'] description = video_data.get('description') if description: description = description.strip() preference = qualities(['cif', 'sd', 'hd', 'fhd', 'ffd']) formats = [] for quality_id, quality in video_data.get('videos', {}).get('mp4', {}).items(): file_api = quality.get('file_api') file_id = quality.get('file_id') if not file_api or not file_id: continue formats.append({ 'format_id': quality_id, 'url': update_url_query(file_api, {'vid': file_id}), 'quality': preference(quality_id), 'ext': 'mp4', }) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': video_data.get('image'), 'duration': int_or_none(video_data.get('length')), 'timestamp': int_or_none(video_data.get('create_time')), 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/spankbang.py
yt_dlp/extractor/spankbang.py
import re from .common import InfoExtractor from ..utils import ( ExtractorError, determine_ext, merge_dicts, parse_duration, parse_resolution, str_to_int, url_or_none, urlencode_postdata, urljoin, ) class SpankBangIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// (?:[^/]+\.)?spankbang\.com/ (?: (?P<id>[\da-z]+)/(?:video|play|embed)\b| [\da-z]+-(?P<id_2>[\da-z]+)/playlist/[^/?#&]+ ) ''' _TESTS = [{ 'url': 'https://spankbang.com/56b3d/video/the+slut+maker+hmv', 'md5': '2D13903DE4ECC7895B5D55930741650A', 'info_dict': { 'id': '56b3d', 'ext': 'mp4', 'title': 'The Slut Maker HMV', 'description': 'Girls getting converted into cock slaves.', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': 'Mindself', 'uploader_id': 'mindself', 'timestamp': 1617109572, 'upload_date': '20210330', 'age_limit': 18, }, }, { # 480p only 'url': 'http://spankbang.com/1vt0/video/solvane+gangbang', 'only_matching': True, }, { # no uploader 'url': 'http://spankbang.com/lklg/video/sex+with+anyone+wedding+edition+2', 'only_matching': True, }, { # mobile page 'url': 'http://m.spankbang.com/1o2de/video/can+t+remember+her+name', 'only_matching': True, }, { # 4k 'url': 'https://spankbang.com/1vwqx/video/jade+kush+solo+4k', 'only_matching': True, }, { 'url': 'https://m.spankbang.com/3vvn/play/fantasy+solo/480p/', 'only_matching': True, }, { 'url': 'https://m.spankbang.com/3vvn/play', 'only_matching': True, }, { 'url': 'https://spankbang.com/2y3td/embed/', 'only_matching': True, }, { 'url': 'https://spankbang.com/2v7ik-7ecbgu/playlist/latina+booty', 'only_matching': True, }] def _real_extract(self, url): mobj = self._match_valid_url(url) video_id = mobj.group('id') or mobj.group('id_2') country = self.get_param('geo_bypass_country') or 'US' self._set_cookie('.spankbang.com', 'country', country.upper()) webpage = self._download_webpage( url.replace(f'/{video_id}/embed', f'/{video_id}/video'), video_id, impersonate=True) if re.search(r'<[^>]+\b(?:id|class)=["\']video_removed', webpage): raise ExtractorError( f'Video {video_id} is not available', expected=True) formats = [] def extract_format(format_id, format_url): f_url = url_or_none(format_url) if not f_url: return f = parse_resolution(format_id) ext = determine_ext(f_url) if format_id.startswith('m3u8') or ext == 'm3u8': formats.extend(self._extract_m3u8_formats( f_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) elif format_id.startswith('mpd') or ext == 'mpd': formats.extend(self._extract_mpd_formats( f_url, video_id, mpd_id='dash', fatal=False)) elif ext == 'mp4' or f.get('width') or f.get('height'): f.update({ 'url': f_url, 'format_id': format_id, }) formats.append(f) STREAM_URL_PREFIX = 'stream_url_' for mobj in re.finditer( rf'{STREAM_URL_PREFIX}(?P<id>[^\s=]+)\s*=\s*(["\'])(?P<url>(?:(?!\2).)+)\2', webpage): extract_format(mobj.group('id', 'url')) if not formats: stream_key = self._search_regex( r'data-streamkey\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage, 'stream key', group='value') stream = self._download_json( 'https://spankbang.com/api/videos/stream', video_id, 'Downloading stream JSON', data=urlencode_postdata({ 'id': stream_key, 'data': 0, }), headers={ 'Referer': url, 'X-Requested-With': 'XMLHttpRequest', }) for format_id, format_url in stream.items(): if format_url and isinstance(format_url, list): format_url = format_url[0] extract_format(format_id, format_url) info = self._search_json_ld(webpage, video_id, default={}) title = self._html_search_regex( r'(?s)<h1[^>]+\btitle=["\']([^"]+)["\']>', webpage, 'title', default=None) description = self._search_regex( r'<div[^>]+\bclass=["\']bottom[^>]+>\s*<p>[^<]*</p>\s*<p>([^<]+)', webpage, 'description', default=None) thumbnail = self._og_search_thumbnail(webpage, default=None) uploader = self._html_search_regex( r'<svg[^>]+\bclass="(?:[^"]*?user[^"]*?)">.*?</svg>([^<]+)', webpage, 'uploader', default=None) uploader_id = self._html_search_regex( r'<a[^>]+href="/profile/([^"]+)"', webpage, 'uploader_id', default=None) duration = parse_duration(self._search_regex( r'<div[^>]+\bclass=["\']right_side[^>]+>\s*<span>([^<]+)', webpage, 'duration', default=None)) view_count = str_to_int(self._search_regex( r'([\d,.]+)\s+plays', webpage, 'view count', default=None)) age_limit = self._rta_search(webpage) return merge_dicts({ 'id': video_id, 'title': title or video_id, 'description': description, 'thumbnail': thumbnail, 'uploader': uploader, 'uploader_id': uploader_id, 'duration': duration, 'view_count': view_count, 'formats': formats, 'age_limit': age_limit, }, info, ) class SpankBangPlaylistIE(InfoExtractor): _VALID_URL = r'https?://(?:[^/]+\.)?spankbang\.com/(?P<id>[\da-z]+)/playlist/(?P<display_id>[^/]+)' _TEST = { 'url': 'https://spankbang.com/ug0k/playlist/big+ass+titties', 'info_dict': { 'id': 'ug0k', 'title': 'Big Ass Titties', }, 'playlist_mincount': 40, } def _real_extract(self, url): mobj = self._match_valid_url(url) playlist_id = mobj.group('id') webpage = self._download_webpage( url, playlist_id, headers={'Cookie': 'country=US; mobile=on'}) entries = [self.url_result( urljoin(url, mobj.group('path')), ie=SpankBangIE.ie_key(), video_id=mobj.group('id')) for mobj in re.finditer( r'<a[^>]+\bhref=(["\'])(?P<path>/?[\da-z]+-(?P<id>[\da-z]+)/playlist/[^"\'](?:(?!\1).)*)\1', webpage)] title = self._html_search_regex( r'<em>([^<]+)</em>\s+playlist\s*<', webpage, 'playlist title', fatal=False) return self.playlist_result(entries, playlist_id, title)
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/popcorntv.py
yt_dlp/extractor/popcorntv.py
from .common import InfoExtractor from ..utils import ( extract_attributes, int_or_none, unified_timestamp, ) class PopcornTVIE(InfoExtractor): _VALID_URL = r'https?://[^/]+\.popcorntv\.it/guarda/(?P<display_id>[^/]+)/(?P<id>\d+)' _TESTS = [{ 'url': 'https://animemanga.popcorntv.it/guarda/food-wars-battaglie-culinarie-episodio-01/9183', 'md5': '47d65a48d147caf692ab8562fe630b45', 'info_dict': { 'id': '9183', 'display_id': 'food-wars-battaglie-culinarie-episodio-01', 'ext': 'mp4', 'title': 'Food Wars, Battaglie Culinarie | Episodio 01', 'description': 'md5:b8bea378faae4651d3b34c6e112463d0', 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1497610857, 'upload_date': '20170616', 'duration': 1440, 'view_count': int, }, }, { 'url': 'https://cinema.popcorntv.it/guarda/smash-cut/10433', 'only_matching': True, }] def _real_extract(self, url): mobj = self._match_valid_url(url) display_id, video_id = mobj.group('display_id', 'id') webpage = self._download_webpage(url, display_id) m3u8_url = extract_attributes( self._search_regex( r'(<link[^>]+itemprop=["\'](?:content|embed)Url[^>]*>)', webpage, 'content', ))['href'] formats = self._extract_m3u8_formats( m3u8_url, display_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls') title = self._search_regex( r'<h1[^>]+itemprop=["\']name[^>]*>([^<]+)', webpage, 'title', default=None) or self._og_search_title(webpage) description = self._html_search_regex( r'(?s)<article[^>]+itemprop=["\']description[^>]*>(.+?)</article>', webpage, 'description', fatal=False) thumbnail = self._og_search_thumbnail(webpage) timestamp = unified_timestamp(self._html_search_meta( 'uploadDate', webpage, 'timestamp')) duration = int_or_none(self._html_search_meta( 'duration', webpage), invscale=60) view_count = int_or_none(self._html_search_meta( 'interactionCount', webpage, 'view count')) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'timestamp': timestamp, 'duration': duration, 'view_count': view_count, 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/acast.py
yt_dlp/extractor/acast.py
from .common import InfoExtractor from ..utils import ( clean_html, clean_podcast_url, int_or_none, parse_iso8601, ) class ACastBaseIE(InfoExtractor): def _extract_episode(self, episode, show_info): title = episode['title'] info = { 'id': episode['id'], 'display_id': episode.get('episodeUrl'), 'url': clean_podcast_url(episode['url']), 'title': title, 'description': clean_html(episode.get('description') or episode.get('summary')), 'thumbnail': episode.get('image'), 'timestamp': parse_iso8601(episode.get('publishDate')), 'duration': int_or_none(episode.get('duration')), 'filesize': int_or_none(episode.get('contentLength')), 'season_number': int_or_none(episode.get('season')), 'episode': title, 'episode_number': int_or_none(episode.get('episode')), } info.update(show_info) return info def _extract_show_info(self, show): return { 'creator': show.get('author'), 'series': show.get('title'), } def _call_api(self, path, video_id, query=None): return self._download_json( 'https://feeder.acast.com/api/v1/shows/' + path, video_id, query=query) class ACastIE(ACastBaseIE): IE_NAME = 'acast' _VALID_URL = r'''(?x: https?:// (?: (?:(?:embed|www|shows)\.)?acast\.com/| play\.acast\.com/s/ ) (?P<channel>[^/?#]+)/(?:episodes/)?(?P<id>[^/#?"]+) )''' _EMBED_REGEX = [rf'(?x)<iframe[^>]+\bsrc=[\'"](?P<url>{_VALID_URL})'] _TESTS = [{ 'url': 'https://shows.acast.com/sparpodcast/episodes/2.raggarmordet-rosterurdetforflutna', 'info_dict': { 'id': '2a92b283-1a75-4ad8-8396-499c641de0d9', 'ext': 'mp3', 'title': '2. Raggarmordet - Röster ur det förflutna', 'description': 'md5:013959207e05011ad14a222cf22278cc', 'timestamp': 1477346700, 'upload_date': '20161024', 'duration': 2766, 'creators': ['Third Ear Studio'], 'series': 'Spår', 'episode': '2. Raggarmordet - Röster ur det förflutna', 'thumbnail': 'https://assets.pippa.io/shows/616ebe1886d7b1398620b943/616ebe33c7e6e70013cae7da.jpg', 'episode_number': 2, 'display_id': '2.raggarmordet-rosterurdetforflutna', 'season_number': 4, 'season': 'Season 4', }, }, { 'url': 'http://embed.acast.com/adambuxton/ep.12-adam-joeschristmaspodcast2015', 'only_matching': True, }, { 'url': 'https://play.acast.com/s/rattegangspodden/s04e09styckmordetihelenelund-del2-2', 'only_matching': True, }, { 'url': 'https://www.acast.com/sparpodcast/2.raggarmordet-rosterurdetforflutna', 'only_matching': True, }, { 'url': 'https://play.acast.com/s/sparpodcast/2a92b283-1a75-4ad8-8396-499c641de0d9', 'only_matching': True, }] _WEBPAGE_TESTS = [{ 'url': 'https://ausi.anu.edu.au/news/democracy-sausage-episode-can-labor-be-long-form-government', 'info_dict': { 'id': '646c68fb21fbf20011e9c651', 'ext': 'mp3', 'creator': 'The Australian National University', 'display_id': 'can-labor-be-a-long-form-government', 'duration': 2618, 'thumbnail': 'https://assets.pippa.io/shows/6113e8578b4903809f16f7e5/1684821529295-515b9520db9ce53275b995eb302f941c.jpeg', 'title': 'Can Labor be a long-form government?', 'episode': 'Can Labor be a long-form government?', 'upload_date': '20230523', 'series': 'Democracy Sausage with Mark Kenny', 'timestamp': 1684826362, 'description': 'md5:feabe1fc5004c78ee59c84a46bf4ba16', }, }] def _real_extract(self, url): channel, display_id = self._match_valid_url(url).groups() episode = self._call_api( f'{channel}/episodes/{display_id}', display_id, {'showInfo': 'true'}) return self._extract_episode( episode, self._extract_show_info(episode.get('show') or {})) class ACastChannelIE(ACastBaseIE): IE_NAME = 'acast:channel' _VALID_URL = r'''(?x) https?:// (?: (?:(?:www|shows)\.)?acast\.com/| play\.acast\.com/s/ ) (?P<id>[^/#?]+) ''' _TESTS = [{ 'url': 'https://www.acast.com/todayinfocus', 'info_dict': { 'id': '4efc5294-5385-4847-98bd-519799ce5786', 'title': 'Today in Focus', 'description': 'md5:feca253de9947634605080cd9eeea2bf', }, 'playlist_mincount': 200, }, { 'url': 'http://play.acast.com/s/ft-banking-weekly', 'only_matching': True, }, { 'url': 'https://shows.acast.com/sparpodcast', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if ACastIE.suitable(url) else super().suitable(url) def _real_extract(self, url): show_slug = self._match_id(url) show = self._call_api(show_slug, show_slug) show_info = self._extract_show_info(show) entries = [] for episode in (show.get('episodes') or []): entries.append(self._extract_episode(episode, show_info)) return self.playlist_result( entries, show.get('id'), show.get('title'), show.get('description'))
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/filmarchiv.py
yt_dlp/extractor/filmarchiv.py
from .common import InfoExtractor from ..utils import clean_html from ..utils.traversal import ( find_element, find_elements, traverse_obj, ) class FilmArchivIE(InfoExtractor): IE_DESC = 'FILMARCHIV ON' _VALID_URL = r'https?://(?:www\.)?filmarchiv\.at/de/filmarchiv-on/video/(?P<id>f_[0-9a-zA-Z]{5,})' _TESTS = [{ 'url': 'https://www.filmarchiv.at/de/filmarchiv-on/video/f_0305p7xKrXUPBwoNE9x6mh', 'md5': '54a6596f6a84624531866008a77fa27a', 'info_dict': { 'id': 'f_0305p7xKrXUPBwoNE9x6mh', 'ext': 'mp4', 'title': 'Der Wurstelprater zur Kaiserzeit', 'description': 'md5:9843f92df5cc9a4975cee7aabcf6e3b2', 'thumbnail': r're:https://cdn\.filmarchiv\.at/f_0305/p7xKrXUPBwoNE9x6mh_v1/poster\.jpg', }, }, { 'url': 'https://www.filmarchiv.at/de/filmarchiv-on/video/f_0306vI3wO0tJIsfrqYFQXF', 'md5': '595385d7f54cb6529140ee8de7d1c3c7', 'info_dict': { 'id': 'f_0306vI3wO0tJIsfrqYFQXF', 'ext': 'mp4', 'title': 'Vor 70 Jahren: Wettgehen der Briefträger in Wien', 'description': 'md5:b2a2e4230923cd1969d471c552e62811', 'thumbnail': r're:https://cdn\.filmarchiv\.at/f_0306/vI3wO0tJIsfrqYFQXF_v1/poster\.jpg', }, }] def _real_extract(self, url): media_id = self._match_id(url) webpage = self._download_webpage(url, media_id) path = '/'.join((media_id[:6], media_id[6:])) formats, subtitles = self._extract_m3u8_formats_and_subtitles( f'https://cdn.filmarchiv.at/{path}_v1_sv1/playlist.m3u8', media_id) return { 'id': media_id, 'title': traverse_obj(webpage, ({find_element(tag='title-div')}, {clean_html})), 'description': traverse_obj(webpage, ( {find_elements(tag='div', attr='class', value=r'.*\bborder-base-content\b', regex=True)}, ..., {find_elements(tag='div', attr='class', value=r'.*\bprose\b', html=False, regex=True)}, ..., {clean_html}, any)), 'thumbnail': f'https://cdn.filmarchiv.at/{path}_v1/poster.jpg', 'formats': formats, 'subtitles': subtitles, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/qingting.py
yt_dlp/extractor/qingting.py
from .common import InfoExtractor from ..utils import traverse_obj class QingTingIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.|m\.)?(?:qingting\.fm|qtfm\.cn)/v?channels/(?P<channel>\d+)/programs/(?P<id>\d+)' _TESTS = [{ 'url': 'https://www.qingting.fm/channels/378005/programs/22257411/', 'md5': '47e6a94f4e621ed832c316fd1888fb3c', 'info_dict': { 'id': '22257411', 'title': '用了十年才修改,谁在乎教科书?', 'channel_id': '378005', 'channel': '睡前消息', 'uploader': '马督工', 'ext': 'm4a', }, }, { 'url': 'https://m.qtfm.cn/vchannels/378005/programs/23023573/', 'md5': '2703120b6abe63b5fa90b975a58f4c0e', 'info_dict': { 'id': '23023573', 'title': '【睡前消息488】重庆山火之后,有图≠真相', 'channel_id': '378005', 'channel': '睡前消息', 'uploader': '马督工', 'ext': 'm4a', }, }] def _real_extract(self, url): channel_id, pid = self._match_valid_url(url).group('channel', 'id') webpage = self._download_webpage( f'https://m.qtfm.cn/vchannels/{channel_id}/programs/{pid}/', pid) info = self._search_json(r'window\.__initStores\s*=', webpage, 'program info', pid) return { 'id': pid, 'title': traverse_obj(info, ('ProgramStore', 'programInfo', 'title')), 'channel_id': channel_id, 'channel': traverse_obj(info, ('ProgramStore', 'channelInfo', 'title')), 'uploader': traverse_obj(info, ('ProgramStore', 'podcasterInfo', 'podcaster', 'nickname')), 'url': traverse_obj(info, ('ProgramStore', 'programInfo', 'audioUrl')), 'vcodec': 'none', 'acodec': 'm4a', 'ext': 'm4a', }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false
yt-dlp/yt-dlp
https://github.com/yt-dlp/yt-dlp/blob/5a481d65fa99862110bb84d10a2f15f0cb47cab3/yt_dlp/extractor/izlesene.py
yt_dlp/extractor/izlesene.py
import urllib.parse from .common import InfoExtractor from ..utils import ( determine_ext, float_or_none, get_element_by_id, int_or_none, parse_iso8601, str_to_int, ) class IzleseneIE(InfoExtractor): _VALID_URL = r'''(?x) https?://(?:(?:www|m)\.)?izlesene\.com/ (?:video|embedplayer)/(?:[^/]+/)?(?P<id>[0-9]+) ''' _TESTS = [ { 'url': 'http://www.izlesene.com/video/sevincten-cildirtan-dogum-gunu-hediyesi/7599694', 'md5': '4384f9f0ea65086734b881085ee05ac2', 'info_dict': { 'id': '7599694', 'ext': 'mp4', 'title': 'Sevinçten Çıldırtan Doğum Günü Hediyesi', 'description': 'md5:253753e2655dde93f59f74b572454f6d', 'thumbnail': r're:^https?://.*\.jpg', 'uploader_id': 'pelikzzle', 'timestamp': int, 'upload_date': '20140702', 'duration': 95.395, 'age_limit': 0, }, }, { 'url': 'http://www.izlesene.com/video/tarkan-dortmund-2006-konseri/17997', 'md5': '97f09b6872bffa284cb7fa4f6910cb72', 'info_dict': { 'id': '17997', 'ext': 'mp4', 'title': 'Tarkan Dortmund 2006 Konseri', 'thumbnail': r're:^https://.*\.jpg', 'uploader_id': 'parlayankiz', 'timestamp': int, 'upload_date': '20061112', 'duration': 253.666, 'age_limit': 0, }, }, ] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(f'http://www.izlesene.com/video/{video_id}', video_id) video = self._parse_json( self._search_regex( r'videoObj\s*=\s*({.+?})\s*;\s*\n', webpage, 'streams'), video_id) title = video.get('videoTitle') or self._og_search_title(webpage) formats = [] for stream in video['media']['level']: source_url = stream.get('source') if not source_url or not isinstance(source_url, str): continue ext = determine_ext(url, 'mp4') quality = stream.get('value') height = int_or_none(quality) formats.append({ 'format_id': f'{quality}p' if quality else 'sd', 'url': urllib.parse.unquote(source_url), 'ext': ext, 'height': height, }) description = self._og_search_description(webpage, default=None) thumbnail = video.get('posterURL') or self._proto_relative_url( self._og_search_thumbnail(webpage), scheme='http:') uploader = self._html_search_regex( r"adduserUsername\s*=\s*'([^']+)';", webpage, 'uploader', fatal=False) timestamp = parse_iso8601(self._html_search_meta( 'uploadDate', webpage, 'upload date')) duration = float_or_none(video.get('duration') or self._html_search_regex( r'videoduration["\']?\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage, 'duration', fatal=False, group='value'), scale=1000) view_count = str_to_int(get_element_by_id('videoViewCount', webpage)) comment_count = self._html_search_regex( r'comment_count\s*=\s*\'([^\']+)\';', webpage, 'comment_count', fatal=False) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'uploader_id': uploader, 'timestamp': timestamp, 'duration': duration, 'view_count': int_or_none(view_count), 'comment_count': int_or_none(comment_count), 'age_limit': self._family_friendly_search(webpage), 'formats': formats, }
python
Unlicense
5a481d65fa99862110bb84d10a2f15f0cb47cab3
2026-01-04T14:38:15.430780Z
false