repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/data/processor/processor_utils.py
src/llamafactory/data/processor/processor_utils.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import bisect from abc import ABC, abstractmethod from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Optional if TYPE_CHECKING: from transformers import PreTrainedTokenizer, ProcessorMixin from ...hparams import DataArguments from ..template import Template @dataclass class DatasetProcessor(ABC): r"""A class for data processors.""" template: "Template" tokenizer: "PreTrainedTokenizer" processor: Optional["ProcessorMixin"] data_args: "DataArguments" @abstractmethod def preprocess_dataset(self, examples: dict[str, list[Any]]) -> dict[str, list[Any]]: r"""Build model inputs from the examples.""" ... @abstractmethod def print_data_example(self, example: dict[str, list[int]]) -> None: r"""Print a data example to stdout.""" ... def search_for_fit(numbers: list[int], capacity: int) -> int: r"""Find the index of largest number that fits into the knapsack with the given capacity.""" index = bisect.bisect(numbers, capacity) return -1 if index == 0 else (index - 1) def greedy_knapsack(numbers: list[int], capacity: int) -> list[list[int]]: r"""Implement efficient greedy algorithm with binary search for the knapsack problem.""" numbers.sort() # sort numbers in ascending order for binary search knapsacks = [] while numbers: current_knapsack = [] remaining_capacity = capacity while True: index = search_for_fit(numbers, remaining_capacity) if index == -1: break # no more numbers fit in this knapsack remaining_capacity -= numbers[index] # update the remaining capacity current_knapsack.append(numbers.pop(index)) # add the number to knapsack knapsacks.append(current_knapsack) return knapsacks def infer_seqlen(source_len: int, target_len: int, cutoff_len: int) -> tuple[int, int]: r"""Compute the real sequence length after truncation by the cutoff_len.""" if target_len * 2 < cutoff_len: # truncate source max_target_len = cutoff_len elif source_len * 2 < cutoff_len: # truncate target max_target_len = cutoff_len - source_len else: # truncate both max_target_len = int(cutoff_len * (target_len / (source_len + target_len))) new_target_len = min(max_target_len, target_len) max_source_len = max(cutoff_len - new_target_len, 0) new_source_len = min(max_source_len, source_len) return new_source_len, new_target_len
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/data/processor/unsupervised.py
src/llamafactory/data/processor/unsupervised.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict from typing import TYPE_CHECKING, Any, Optional from ...extras import logging from ..data_utils import Role from .processor_utils import DatasetProcessor, infer_seqlen if TYPE_CHECKING: from ..mm_plugin import AudioInput, ImageInput, VideoInput logger = logging.get_logger(__name__) class UnsupervisedDatasetProcessor(DatasetProcessor): def _encode_data_example( self, prompt: list[dict[str, str]], response: list[dict[str, str]], system: Optional[str], tools: Optional[str], images: list["ImageInput"], videos: list["VideoInput"], audios: list["AudioInput"], ) -> tuple[list[int], list[int]]: if len(response) == 1: messages = prompt + response else: messages = prompt + [{"role": Role.ASSISTANT.value, "content": ""}] messages = self.template.mm_plugin.process_messages(messages, images, videos, audios, self.processor) input_ids, labels = self.template.encode_oneturn(self.tokenizer, messages, system, tools) if self.template.efficient_eos: labels += [self.tokenizer.eos_token_id] input_ids, _ = self.template.mm_plugin.process_token_ids( input_ids, None, images, videos, audios, self.tokenizer, self.processor ) source_len, target_len = infer_seqlen(len(input_ids), len(labels), self.data_args.cutoff_len) input_ids = input_ids[:source_len] labels = labels[:target_len] return input_ids, labels def preprocess_dataset(self, examples: dict[str, list[Any]]) -> dict[str, list[Any]]: # build inputs with format `<bos> X` and labels with format `Y <eos>` model_inputs = defaultdict(list) for i in range(len(examples["_prompt"])): if len(examples["_prompt"][i]) % 2 != 1: logger.warning_rank0( "Dropped invalid example: {}".format(examples["_prompt"][i] + examples["_response"][i]) ) continue input_ids, labels = self._encode_data_example( prompt=examples["_prompt"][i], response=examples["_response"][i], system=examples["_system"][i], tools=examples["_tools"][i], images=examples["_images"][i] or [], videos=examples["_videos"][i] or [], audios=examples["_audios"][i] or [], ) model_inputs["input_ids"].append(input_ids) model_inputs["attention_mask"].append([1] * len(input_ids)) model_inputs["labels"].append(labels) model_inputs["images"].append(examples["_images"][i]) model_inputs["videos"].append(examples["_videos"][i]) model_inputs["audios"].append(examples["_audios"][i]) return model_inputs def print_data_example(self, example: dict[str, list[int]]) -> None: print("input_ids:\n{}".format(example["input_ids"])) print("inputs:\n{}".format(self.tokenizer.decode(example["input_ids"], skip_special_tokens=False))) print("label_ids:\n{}".format(example["labels"])) print("labels:\n{}".format(self.tokenizer.decode(example["labels"], skip_special_tokens=False)))
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/data/processor/__init__.py
src/llamafactory/data/processor/__init__.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .feedback import FeedbackDatasetProcessor from .pairwise import PairwiseDatasetProcessor from .pretrain import PretrainDatasetProcessor from .processor_utils import DatasetProcessor from .supervised import PackedSupervisedDatasetProcessor, SupervisedDatasetProcessor from .unsupervised import UnsupervisedDatasetProcessor __all__ = [ "DatasetProcessor", "FeedbackDatasetProcessor", "PackedSupervisedDatasetProcessor", "PairwiseDatasetProcessor", "PretrainDatasetProcessor", "SupervisedDatasetProcessor", "UnsupervisedDatasetProcessor", ]
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/data/processor/pairwise.py
src/llamafactory/data/processor/pairwise.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict from typing import TYPE_CHECKING, Any, Optional from ...extras import logging from ...extras.constants import IGNORE_INDEX from .processor_utils import DatasetProcessor, infer_seqlen if TYPE_CHECKING: from ..mm_plugin import AudioInput, ImageInput, VideoInput logger = logging.get_logger(__name__) class PairwiseDatasetProcessor(DatasetProcessor): def _encode_data_example( self, prompt: list[dict[str, str]], response: list[dict[str, str]], system: Optional[str], tools: Optional[str], images: list["ImageInput"], videos: list["VideoInput"], audios: list["AudioInput"], ) -> tuple[list[int], list[int], list[int], list[int]]: chosen_messages = self.template.mm_plugin.process_messages( prompt + [response[0]], images, videos, audios, self.processor ) rejected_messages = self.template.mm_plugin.process_messages( prompt + [response[1]], images, videos, audios, self.processor ) prompt_ids, chosen_ids = self.template.encode_oneturn(self.tokenizer, chosen_messages, system, tools) _, rejected_ids = self.template.encode_oneturn(self.tokenizer, rejected_messages, system, tools) if self.template.efficient_eos: chosen_ids += [self.tokenizer.eos_token_id] rejected_ids += [self.tokenizer.eos_token_id] prompt_ids, _ = self.template.mm_plugin.process_token_ids( prompt_ids, None, images, videos, audios, self.tokenizer, self.processor ) # consider the response is more important source_len, target_len = infer_seqlen( len(prompt_ids), max(len(chosen_ids), len(rejected_ids)), self.data_args.cutoff_len ) prompt_ids = prompt_ids[:source_len] chosen_ids = chosen_ids[:target_len] rejected_ids = rejected_ids[:target_len] chosen_input_ids = prompt_ids + chosen_ids chosen_labels = [IGNORE_INDEX] * source_len + chosen_ids rejected_input_ids = prompt_ids + rejected_ids rejected_labels = [IGNORE_INDEX] * source_len + rejected_ids return chosen_input_ids, chosen_labels, rejected_input_ids, rejected_labels def preprocess_dataset(self, examples: dict[str, list[Any]]) -> dict[str, list[Any]]: # build input pairs with format `<bos> X`, `Y1 <eos>` and `Y2 <eos>` model_inputs = defaultdict(list) for i in range(len(examples["_prompt"])): if len(examples["_prompt"][i]) % 2 != 1 or len(examples["_response"][i]) < 2: logger.warning_rank0( "Dropped invalid example: {}".format(examples["_prompt"][i] + examples["_response"][i]) ) continue chosen_input_ids, chosen_labels, rejected_input_ids, rejected_labels = self._encode_data_example( prompt=examples["_prompt"][i], response=examples["_response"][i], system=examples["_system"][i], tools=examples["_tools"][i], images=examples["_images"][i] or [], videos=examples["_videos"][i] or [], audios=examples["_audios"][i] or [], ) model_inputs["chosen_input_ids"].append(chosen_input_ids) model_inputs["chosen_attention_mask"].append([1] * len(chosen_input_ids)) model_inputs["chosen_labels"].append(chosen_labels) model_inputs["rejected_input_ids"].append(rejected_input_ids) model_inputs["rejected_attention_mask"].append([1] * len(rejected_input_ids)) model_inputs["rejected_labels"].append(rejected_labels) model_inputs["images"].append(examples["_images"][i]) model_inputs["videos"].append(examples["_videos"][i]) model_inputs["audios"].append(examples["_audios"][i]) return model_inputs def print_data_example(self, example: dict[str, list[int]]) -> None: valid_chosen_labels = list(filter(lambda x: x != IGNORE_INDEX, example["chosen_labels"])) valid_rejected_labels = list(filter(lambda x: x != IGNORE_INDEX, example["rejected_labels"])) print("chosen_input_ids:\n{}".format(example["chosen_input_ids"])) print( "chosen_inputs:\n{}".format(self.tokenizer.decode(example["chosen_input_ids"], skip_special_tokens=False)) ) print("chosen_label_ids:\n{}".format(example["chosen_labels"])) print(f"chosen_labels:\n{self.tokenizer.decode(valid_chosen_labels, skip_special_tokens=False)}") print("rejected_input_ids:\n{}".format(example["rejected_input_ids"])) print( "rejected_inputs:\n{}".format( self.tokenizer.decode(example["rejected_input_ids"], skip_special_tokens=False) ) ) print("rejected_label_ids:\n{}".format(example["rejected_labels"])) print(f"rejected_labels:\n{self.tokenizer.decode(valid_rejected_labels, skip_special_tokens=False)}")
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/extras/constants.py
src/llamafactory/extras/constants.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from collections import OrderedDict, defaultdict from enum import Enum, unique from peft.utils import SAFETENSORS_WEIGHTS_NAME as SAFE_ADAPTER_WEIGHTS_NAME from peft.utils import WEIGHTS_NAME as ADAPTER_WEIGHTS_NAME from transformers.utils import SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME AUDIO_PLACEHOLDER = os.getenv("AUDIO_PLACEHOLDER", "<audio>") CHECKPOINT_NAMES = { SAFE_ADAPTER_WEIGHTS_NAME, ADAPTER_WEIGHTS_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, } CHOICES = ["A", "B", "C", "D"] DATA_CONFIG = "dataset_info.json" DEFAULT_TEMPLATE = defaultdict(str) FILEEXT2TYPE = { "arrow": "arrow", "csv": "csv", "json": "json", "jsonl": "json", "parquet": "parquet", "txt": "text", } IGNORE_INDEX = -100 IMAGE_PLACEHOLDER = os.getenv("IMAGE_PLACEHOLDER", "<image>") LAYERNORM_NAMES = {"norm", "ln"} LLAMABOARD_CONFIG = "llamaboard_config.yaml" MCA_SUPPORTED_MODELS = { "deepseek_v3", "llama", "mistral", "mixtral", "qwen2", "qwen2_vl", "qwen2_5_vl", "qwen3_vl", "qwen3", "qwen3_moe", "qwen3_next", } METHODS = ["full", "freeze", "lora", "oft"] MOD_SUPPORTED_MODELS = {"bloom", "falcon", "gemma", "llama", "mistral", "mixtral", "phi", "starcoder2"} MULTIMODAL_SUPPORTED_MODELS = set() PEFT_METHODS = {"lora", "oft"} RUNNING_LOG = "running_log.txt" SUBJECTS = ["Average", "STEM", "Social Sciences", "Humanities", "Other"] SUPPORTED_MODELS = OrderedDict() TRAINER_LOG = "trainer_log.jsonl" TRAINING_ARGS = "training_args.yaml" TRAINING_STAGES = { "Supervised Fine-Tuning": "sft", "Reward Modeling": "rm", "PPO": "ppo", "DPO": "dpo", "KTO": "kto", "Pre-Training": "pt", } STAGES_USE_PAIR_DATA = {"rm", "dpo"} SUPPORTED_CLASS_FOR_S2ATTN = {"llama"} SWANLAB_CONFIG = "swanlab_public_config.json" VIDEO_PLACEHOLDER = os.getenv("VIDEO_PLACEHOLDER", "<video>") V_HEAD_WEIGHTS_NAME = "value_head.bin" V_HEAD_SAFE_WEIGHTS_NAME = "value_head.safetensors" class AttentionFunction(str, Enum): AUTO = "auto" DISABLED = "disabled" SDPA = "sdpa" FA2 = "fa2" FA3 = "fa3" class EngineName(str, Enum): HF = "huggingface" VLLM = "vllm" SGLANG = "sglang" KT = "ktransformers" class DownloadSource(str, Enum): DEFAULT = "hf" MODELSCOPE = "ms" OPENMIND = "om" @unique class QuantizationMethod(str, Enum): r"""Borrowed from `transformers.utils.quantization_config.QuantizationMethod`.""" BNB = "bnb" GPTQ = "gptq" AWQ = "awq" AQLM = "aqlm" QUANTO = "quanto" EETQ = "eetq" HQQ = "hqq" MXFP4 = "mxfp4" FP8 = "fp8" class RopeScaling(str, Enum): LINEAR = "linear" DYNAMIC = "dynamic" YARN = "yarn" LLAMA3 = "llama3" def register_model_group( models: dict[str, dict[DownloadSource, str]], template: str | None = None, multimodal: bool = False, ) -> None: for name, path in models.items(): SUPPORTED_MODELS[name] = path if template is not None and ( any(suffix in name for suffix in ("-Chat", "-Distill", "-Instruct", "-Thinking")) or multimodal ): DEFAULT_TEMPLATE[name] = template if multimodal: MULTIMODAL_SUPPORTED_MODELS.add(name) register_model_group( models={ "Aya-23-8B-Chat": { DownloadSource.DEFAULT: "CohereForAI/aya-23-8B", }, "Aya-23-35B-Chat": { DownloadSource.DEFAULT: "CohereForAI/aya-23-35B", }, }, template="cohere", ) register_model_group( models={ "Baichuan-7B-Base": { DownloadSource.DEFAULT: "baichuan-inc/Baichuan-7B", DownloadSource.MODELSCOPE: "baichuan-inc/baichuan-7B", }, "Baichuan-13B-Base": { DownloadSource.DEFAULT: "baichuan-inc/Baichuan-13B-Base", DownloadSource.MODELSCOPE: "baichuan-inc/Baichuan-13B-Base", }, "Baichuan-13B-Chat": { DownloadSource.DEFAULT: "baichuan-inc/Baichuan-13B-Chat", DownloadSource.MODELSCOPE: "baichuan-inc/Baichuan-13B-Chat", }, }, template="baichuan", ) register_model_group( models={ "Baichuan2-7B-Base": { DownloadSource.DEFAULT: "baichuan-inc/Baichuan2-7B-Base", DownloadSource.MODELSCOPE: "baichuan-inc/Baichuan2-7B-Base", }, "Baichuan2-13B-Base": { DownloadSource.DEFAULT: "baichuan-inc/Baichuan2-13B-Base", DownloadSource.MODELSCOPE: "baichuan-inc/Baichuan2-13B-Base", DownloadSource.OPENMIND: "Baichuan/Baichuan2_13b_base_pt", }, "Baichuan2-7B-Chat": { DownloadSource.DEFAULT: "baichuan-inc/Baichuan2-7B-Chat", DownloadSource.MODELSCOPE: "baichuan-inc/Baichuan2-7B-Chat", DownloadSource.OPENMIND: "Baichuan/Baichuan2_7b_chat_pt", }, "Baichuan2-13B-Chat": { DownloadSource.DEFAULT: "baichuan-inc/Baichuan2-13B-Chat", DownloadSource.MODELSCOPE: "baichuan-inc/Baichuan2-13B-Chat", DownloadSource.OPENMIND: "Baichuan/Baichuan2_13b_chat_pt", }, }, template="baichuan2", ) register_model_group( models={ "BLOOM-560M": { DownloadSource.DEFAULT: "bigscience/bloom-560m", DownloadSource.MODELSCOPE: "AI-ModelScope/bloom-560m", }, "BLOOM-3B": { DownloadSource.DEFAULT: "bigscience/bloom-3b", DownloadSource.MODELSCOPE: "AI-ModelScope/bloom-3b", }, "BLOOM-7B1": { DownloadSource.DEFAULT: "bigscience/bloom-7b1", DownloadSource.MODELSCOPE: "AI-ModelScope/bloom-7b1", }, }, ) register_model_group( models={ "BLOOMZ-560M": { DownloadSource.DEFAULT: "bigscience/bloomz-560m", DownloadSource.MODELSCOPE: "AI-ModelScope/bloomz-560m", }, "BLOOMZ-3B": { DownloadSource.DEFAULT: "bigscience/bloomz-3b", DownloadSource.MODELSCOPE: "AI-ModelScope/bloomz-3b", }, "BLOOMZ-7B1-mt": { DownloadSource.DEFAULT: "bigscience/bloomz-7b1-mt", DownloadSource.MODELSCOPE: "AI-ModelScope/bloomz-7b1-mt", }, }, ) register_model_group( models={ "BlueLM-7B-Base": { DownloadSource.DEFAULT: "vivo-ai/BlueLM-7B-Base", DownloadSource.MODELSCOPE: "vivo-ai/BlueLM-7B-Base", }, "BlueLM-7B-Chat": { DownloadSource.DEFAULT: "vivo-ai/BlueLM-7B-Chat", DownloadSource.MODELSCOPE: "vivo-ai/BlueLM-7B-Chat", }, }, template="bluelm", ) register_model_group( models={ "Breeze-7B": { DownloadSource.DEFAULT: "MediaTek-Research/Breeze-7B-Base-v1_0", }, "Breeze-7B-Instruct": { DownloadSource.DEFAULT: "MediaTek-Research/Breeze-7B-Instruct-v1_0", }, }, template="breeze", ) register_model_group( models={ "ChatGLM2-6B-Chat": { DownloadSource.DEFAULT: "zai-org/chatglm2-6b", DownloadSource.MODELSCOPE: "ZhipuAI/chatglm2-6b", } }, template="chatglm2", ) register_model_group( models={ "ChatGLM3-6B-Base": { DownloadSource.DEFAULT: "zai-org/chatglm3-6b-base", DownloadSource.MODELSCOPE: "ZhipuAI/chatglm3-6b-base", }, "ChatGLM3-6B-Chat": { DownloadSource.DEFAULT: "zai-org/chatglm3-6b", DownloadSource.MODELSCOPE: "ZhipuAI/chatglm3-6b", }, }, template="chatglm3", ) register_model_group( models={ "Chinese-Llama-2-1.3B": { DownloadSource.DEFAULT: "hfl/chinese-llama-2-1.3b", DownloadSource.MODELSCOPE: "AI-ModelScope/chinese-llama-2-1.3b", }, "Chinese-Llama-2-7B": { DownloadSource.DEFAULT: "hfl/chinese-llama-2-7b", DownloadSource.MODELSCOPE: "AI-ModelScope/chinese-llama-2-7b", }, "Chinese-Llama-2-13B": { DownloadSource.DEFAULT: "hfl/chinese-llama-2-13b", DownloadSource.MODELSCOPE: "AI-ModelScope/chinese-llama-2-13b", }, "Chinese-Alpaca-2-1.3B-Chat": { DownloadSource.DEFAULT: "hfl/chinese-alpaca-2-1.3b", DownloadSource.MODELSCOPE: "AI-ModelScope/chinese-alpaca-2-1.3b", }, "Chinese-Alpaca-2-7B-Chat": { DownloadSource.DEFAULT: "hfl/chinese-alpaca-2-7b", DownloadSource.MODELSCOPE: "AI-ModelScope/chinese-alpaca-2-7b", }, "Chinese-Alpaca-2-13B-Chat": { DownloadSource.DEFAULT: "hfl/chinese-alpaca-2-13b", DownloadSource.MODELSCOPE: "AI-ModelScope/chinese-alpaca-2-13b", }, }, template="llama2_zh", ) register_model_group( models={ "CodeGeeX4-9B-Chat": { DownloadSource.DEFAULT: "zai-org/codegeex4-all-9b", DownloadSource.MODELSCOPE: "ZhipuAI/codegeex4-all-9b", }, }, template="codegeex4", ) register_model_group( models={ "CodeGemma-7B": { DownloadSource.DEFAULT: "google/codegemma-7b", }, "CodeGemma-7B-Instruct": { DownloadSource.DEFAULT: "google/codegemma-7b-it", DownloadSource.MODELSCOPE: "AI-ModelScope/codegemma-7b-it", }, "CodeGemma-1.1-2B": { DownloadSource.DEFAULT: "google/codegemma-1.1-2b", }, "CodeGemma-1.1-7B-Instruct": { DownloadSource.DEFAULT: "google/codegemma-1.1-7b-it", }, }, template="gemma", ) register_model_group( models={ "Codestral-22B-v0.1-Chat": { DownloadSource.DEFAULT: "mistralai/Codestral-22B-v0.1", DownloadSource.MODELSCOPE: "swift/Codestral-22B-v0.1", }, }, template="mistral", ) register_model_group( models={ "CommandR-35B-Chat": { DownloadSource.DEFAULT: "CohereForAI/c4ai-command-r-v01", DownloadSource.MODELSCOPE: "AI-ModelScope/c4ai-command-r-v01", }, "CommandR-Plus-104B-Chat": { DownloadSource.DEFAULT: "CohereForAI/c4ai-command-r-plus", DownloadSource.MODELSCOPE: "AI-ModelScope/c4ai-command-r-plus", }, "CommandR-35B-4bit-Chat": { DownloadSource.DEFAULT: "CohereForAI/c4ai-command-r-v01-4bit", DownloadSource.MODELSCOPE: "mirror013/c4ai-command-r-v01-4bit", }, "CommandR-Plus-104B-4bit-Chat": { DownloadSource.DEFAULT: "CohereForAI/c4ai-command-r-plus-4bit", }, }, template="cohere", ) register_model_group( models={ "DBRX-132B-Base": { DownloadSource.DEFAULT: "databricks/dbrx-base", DownloadSource.MODELSCOPE: "AI-ModelScope/dbrx-base", }, "DBRX-132B-Instruct": { DownloadSource.DEFAULT: "databricks/dbrx-instruct", DownloadSource.MODELSCOPE: "AI-ModelScope/dbrx-instruct", }, }, template="dbrx", ) register_model_group( models={ "DeepSeek-LLM-7B-Base": { DownloadSource.DEFAULT: "deepseek-ai/deepseek-llm-7b-base", DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-llm-7b-base", }, "DeepSeek-LLM-67B-Base": { DownloadSource.DEFAULT: "deepseek-ai/deepseek-llm-67b-base", DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-llm-67b-base", }, "DeepSeek-LLM-7B-Chat": { DownloadSource.DEFAULT: "deepseek-ai/deepseek-llm-7b-chat", DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-llm-7b-chat", }, "DeepSeek-LLM-67B-Chat": { DownloadSource.DEFAULT: "deepseek-ai/deepseek-llm-67b-chat", DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-llm-67b-chat", }, "DeepSeek-Math-7B-Base": { DownloadSource.DEFAULT: "deepseek-ai/deepseek-math-7b-base", DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-math-7b-base", }, "DeepSeek-Math-7B-Instruct": { DownloadSource.DEFAULT: "deepseek-ai/deepseek-math-7b-instruct", DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-math-7b-instruct", }, "DeepSeek-MoE-16B-Base": { DownloadSource.DEFAULT: "deepseek-ai/deepseek-moe-16b-base", DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-moe-16b-base", }, "DeepSeek-MoE-16B-Chat": { DownloadSource.DEFAULT: "deepseek-ai/deepseek-moe-16b-chat", DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-moe-16b-chat", }, "DeepSeek-V2-16B-Base": { DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-V2-Lite", DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-V2-Lite", }, "DeepSeek-V2-236B-Base": { DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-V2", DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-V2", }, "DeepSeek-V2-16B-Chat": { DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-V2-Lite-Chat", DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-V2-Lite-Chat", }, "DeepSeek-V2-236B-Chat": { DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-V2-Chat", DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-V2-Chat", }, "DeepSeek-Coder-V2-16B-Base": { DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-Coder-V2-Lite-Base", DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-Coder-V2-Lite-Base", }, "DeepSeek-Coder-V2-236B-Base": { DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-Coder-V2-Base", DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-Coder-V2-Base", }, "DeepSeek-Coder-V2-16B-Instruct": { DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct", DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct", }, "DeepSeek-Coder-V2-236B-Instruct": { DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-Coder-V2-Instruct", DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-Coder-V2-Instruct", }, }, template="deepseek", ) register_model_group( models={ "DeepSeek-Coder-6.7B-Base": { DownloadSource.DEFAULT: "deepseek-ai/deepseek-coder-6.7b-base", DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-coder-6.7b-base", }, "DeepSeek-Coder-7B-Base": { DownloadSource.DEFAULT: "deepseek-ai/deepseek-coder-7b-base-v1.5", DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-coder-7b-base-v1.5", }, "DeepSeek-Coder-33B-Base": { DownloadSource.DEFAULT: "deepseek-ai/deepseek-coder-33b-base", DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-coder-33b-base", }, "DeepSeek-Coder-6.7B-Instruct": { DownloadSource.DEFAULT: "deepseek-ai/deepseek-coder-6.7b-instruct", DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-coder-6.7b-instruct", }, "DeepSeek-Coder-7B-Instruct": { DownloadSource.DEFAULT: "deepseek-ai/deepseek-coder-7b-instruct-v1.5", DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-coder-7b-instruct-v1.5", }, "DeepSeek-Coder-33B-Instruct": { DownloadSource.DEFAULT: "deepseek-ai/deepseek-coder-33b-instruct", DownloadSource.MODELSCOPE: "deepseek-ai/deepseek-coder-33b-instruct", }, }, template="deepseekcoder", ) register_model_group( models={ "DeepSeek-V2-0628-236B-Chat": { DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-V2-Chat-0628", DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-V2-Chat-0628", }, "DeepSeek-V2.5-236B-Chat": { DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-V2.5", DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-V2.5", }, "DeepSeek-V2.5-1210-236B-Chat": { DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-V2.5-1210", DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-V2.5-1210", }, "DeepSeek-V3-671B-Base": { DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-V3-Base", DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-V3-Base", }, "DeepSeek-V3-671B-Chat": { DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-V3", DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-V3", }, "DeepSeek-V3-0324-671B-Chat": { DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-V3-0324", DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-V3-0324", }, }, template="deepseek3", ) register_model_group( models={ "DeepSeek-R1-1.5B-Distill": { DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B", DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B", }, "DeepSeek-R1-7B-Distill": { DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B", DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B", }, "DeepSeek-R1-8B-Distill": { DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-R1-Distill-Llama-8B", DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-R1-Distill-Llama-8B", }, "DeepSeek-R1-14B-Distill": { DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B", DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B", }, "DeepSeek-R1-32B-Distill": { DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", }, "DeepSeek-R1-70B-Distill": { DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-R1-Distill-Llama-70B", DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-R1-Distill-Llama-70B", }, "DeepSeek-R1-671B-Chat-Zero": { DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-R1-Zero", DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-R1-Zero", }, "DeepSeek-R1-671B-Chat": { DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-R1", DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-R1", }, "DeepSeek-R1-0528-8B-Distill": { DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-R1-0528-Qwen3-8B", DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-R1-0528-Qwen3-8B", }, "DeepSeek-R1-0528-671B-Chat": { DownloadSource.DEFAULT: "deepseek-ai/DeepSeek-R1-0528", DownloadSource.MODELSCOPE: "deepseek-ai/DeepSeek-R1-0528", }, }, template="deepseekr1", ) register_model_group( models={ "Devstral-Small-2507-Instruct": { DownloadSource.DEFAULT: "mistralai/Devstral-Small-2507", DownloadSource.MODELSCOPE: "mistralai/Devstral-Small-2507", }, }, template="mistral_small", ) register_model_group( models={ "dots.ocr": { DownloadSource.DEFAULT: "rednote-hilab/dots.ocr", DownloadSource.MODELSCOPE: "rednote-hilab/dots.ocr", }, }, template="dots_ocr", multimodal=True, ) register_model_group( models={ "ERNIE-4.5-21B-A3B-Thinking": { DownloadSource.DEFAULT: "baidu/ERNIE-4.5-21B-A3B-Thinking", DownloadSource.MODELSCOPE: "PaddlePaddle/ERNIE-4.5-21B-A3B-Thinking", }, }, template="ernie", ) register_model_group( models={ "ERNIE-4.5-0.3B-PT": { DownloadSource.DEFAULT: "baidu/ERNIE-4.5-0.3B-PT", DownloadSource.MODELSCOPE: "PaddlePaddle/ERNIE-4.5-0.3B-PT", }, "ERNIE-4.5-21B-A3B-PT": { DownloadSource.DEFAULT: "baidu/ERNIE-4.5-21B-A3B-PT", DownloadSource.MODELSCOPE: "PaddlePaddle/ERNIE-4.5-21B-A3B-PT", }, "ERNIE-4.5-300B-A47B-PT": { DownloadSource.DEFAULT: "baidu/ERNIE-4.5-300B-A47B-PT", DownloadSource.MODELSCOPE: "PaddlePaddle/ERNIE-4.5-300B-A47B-PT", }, }, template="ernie_nothink", ) register_model_group( models={ "ERNIE-4.5-VL-28B-A3B-PT": { DownloadSource.DEFAULT: "baidu/ERNIE-4.5-VL-28B-A3B-PT", DownloadSource.MODELSCOPE: "PaddlePaddle/ERNIE-4.5-VL-28B-A3B-PT", }, "ERNIE-4.5-VL-28B-A3B-Thinking": { DownloadSource.DEFAULT: "baidu/ERNIE-4.5-VL-28B-A3B-Thinking", DownloadSource.MODELSCOPE: "PaddlePaddle/ERNIE-4.5-VL-28B-A3B-Thinking", }, "ERNIE-4.5-VL-424B-A47B-Base-PT": { DownloadSource.DEFAULT: "baidu/ERNIE-4.5-VL-424B-A47B-PT", DownloadSource.MODELSCOPE: "PaddlePaddle/ERNIE-4.5-VL-424B-A47B-PT", }, }, template="ernie_vl", multimodal=True, ) register_model_group( models={ "EXAONE-3.0-7.8B-Instruct": { DownloadSource.DEFAULT: "LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct", }, }, template="exaone", ) register_model_group( models={ "Falcon-7B": { DownloadSource.DEFAULT: "tiiuae/falcon-7b", DownloadSource.MODELSCOPE: "AI-ModelScope/falcon-7b", }, "Falcon-11B": { DownloadSource.DEFAULT: "tiiuae/falcon-11B", DownloadSource.MODELSCOPE: "tiiuae/falcon-11B", }, "Falcon-40B": { DownloadSource.DEFAULT: "tiiuae/falcon-40b", DownloadSource.MODELSCOPE: "AI-ModelScope/falcon-40b", }, "Falcon-180B": { DownloadSource.DEFAULT: "tiiuae/falcon-180b", DownloadSource.MODELSCOPE: "modelscope/falcon-180B", }, "Falcon-7B-Instruct": { DownloadSource.DEFAULT: "tiiuae/falcon-7b-instruct", DownloadSource.MODELSCOPE: "AI-ModelScope/falcon-7b-instruct", }, "Falcon-40B-Instruct": { DownloadSource.DEFAULT: "tiiuae/falcon-40b-instruct", DownloadSource.MODELSCOPE: "AI-ModelScope/falcon-40b-instruct", }, "Falcon-180B-Chat": { DownloadSource.DEFAULT: "tiiuae/falcon-180b-chat", DownloadSource.MODELSCOPE: "modelscope/falcon-180B-chat", }, }, template="falcon", ) register_model_group( models={ "Falcon-H1-0.5B-Base": { DownloadSource.DEFAULT: "tiiuae/Falcon-H1-0.5B-Base", DownloadSource.MODELSCOPE: "tiiuae/Falcon-H1-0.5B-Base", }, "Falcon-H1-1.5B-Base": { DownloadSource.DEFAULT: "tiiuae/Falcon-H1-1.5B-Base", DownloadSource.MODELSCOPE: "tiiuae/Falcon-H1-1.5B-Base", }, "Falcon-H1-1.5B-Deep-Base": { DownloadSource.DEFAULT: "tiuae/Falcon-H1-1.5B-Deep-Base", DownloadSource.MODELSCOPE: "tiiuae/Falcon-H1-1.5B-Deep-Base", }, "Falcon-H1-3B-Base": { DownloadSource.DEFAULT: "tiiuae/Falcon-H1-3B-Base", DownloadSource.MODELSCOPE: "tiiuae/Falcon-H1-3B-Base", }, "Falcon-H1-7B-Base": { DownloadSource.DEFAULT: "tiiuae/Falcon-H1-7B-Base", DownloadSource.MODELSCOPE: "tiiuae/Falcon-H1-7B-Base", }, "Falcon-H1-34B-Base": { DownloadSource.DEFAULT: "tiiuae/Falcon-H1-34B-Base", DownloadSource.MODELSCOPE: "tiiuae/Falcon-H1-34B-Base", }, "Falcon-H1-0.5B-Instruct": { DownloadSource.DEFAULT: "tiiuae/Falcon-H1-0.5B-Instruct", DownloadSource.MODELSCOPE: "tiiuae/Falcon-H1-0.5B-Instruct", }, "Falcon-H1-1.5B-Instruct": { DownloadSource.DEFAULT: "tiiuae/Falcon-H1-1.5B-Instruct", DownloadSource.MODELSCOPE: "tiiuae/Falcon-H1-1.5B-Instruct", }, "Falcon-H1-1.5B-Deep-Instruct": { DownloadSource.DEFAULT: "tiiuae/Falcon-H1-1.5B-Deep-Instruct", DownloadSource.MODELSCOPE: "tiiuae/Falcon-H1-1.5B-Deep-Instruct", }, "Falcon-H1-3B-Instruct": { DownloadSource.DEFAULT: "tiiuae/Falcon-H1-3B-Instruct", DownloadSource.MODELSCOPE: "tiiuae/Falcon-H1-3B-Instruct", }, "Falcon-H1-7B-Instruct": { DownloadSource.DEFAULT: "tiiuae/Falcon-H1-7B-Instruct", DownloadSource.MODELSCOPE: "tiiuae/Falcon-H1-7B-Instruct", }, "Falcon-H1-34B-Instruct": { DownloadSource.DEFAULT: "tiiuae/Falcon-H1-34B-Instruct", DownloadSource.MODELSCOPE: "tiiuae/Falcon-H1-34B-Instruct", }, }, template="falcon_h1", ) register_model_group( models={ "Gemma-2B": { DownloadSource.DEFAULT: "google/gemma-2b", DownloadSource.MODELSCOPE: "AI-ModelScope/gemma-2b", }, "Gemma-7B": { DownloadSource.DEFAULT: "google/gemma-7b", DownloadSource.MODELSCOPE: "AI-ModelScope/gemma-2b-it", }, "Gemma-2B-Instruct": { DownloadSource.DEFAULT: "google/gemma-2b-it", DownloadSource.MODELSCOPE: "AI-ModelScope/gemma-7b", }, "Gemma-7B-Instruct": { DownloadSource.DEFAULT: "google/gemma-7b-it", DownloadSource.MODELSCOPE: "AI-ModelScope/gemma-7b-it", }, "Gemma-1.1-2B-Instruct": { DownloadSource.DEFAULT: "google/gemma-1.1-2b-it", }, "Gemma-1.1-7B-Instruct": { DownloadSource.DEFAULT: "google/gemma-1.1-7b-it", }, }, template="gemma", ) register_model_group( models={ "Gemma-2-2B": { DownloadSource.DEFAULT: "google/gemma-2-2b", DownloadSource.MODELSCOPE: "LLM-Research/gemma-2-2b", }, "Gemma-2-9B": { DownloadSource.DEFAULT: "google/gemma-2-9b", DownloadSource.MODELSCOPE: "LLM-Research/gemma-2-9b", }, "Gemma-2-27B": { DownloadSource.DEFAULT: "google/gemma-2-27b", DownloadSource.MODELSCOPE: "LLM-Research/gemma-2-27b", }, "Gemma-2-2B-Instruct": { DownloadSource.DEFAULT: "google/gemma-2-2b-it", DownloadSource.MODELSCOPE: "LLM-Research/gemma-2-2b-it", DownloadSource.OPENMIND: "LlamaFactory/gemma-2-2b-it", }, "Gemma-2-9B-Instruct": { DownloadSource.DEFAULT: "google/gemma-2-9b-it", DownloadSource.MODELSCOPE: "LLM-Research/gemma-2-9b-it", DownloadSource.OPENMIND: "LlamaFactory/gemma-2-9b-it", }, "Gemma-2-27B-Instruct": { DownloadSource.DEFAULT: "google/gemma-2-27b-it", DownloadSource.MODELSCOPE: "LLM-Research/gemma-2-27b-it", }, "Gemma-3-270M": { DownloadSource.DEFAULT: "google/gemma-3-270m", DownloadSource.MODELSCOPE: "LLM-Research/gemma-3-270m", }, "Gemma-3-1B": { DownloadSource.DEFAULT: "google/gemma-3-1b-pt", DownloadSource.MODELSCOPE: "LLM-Research/gemma-3-1b-pt", }, "Gemma-3-270M-Instruct": { DownloadSource.DEFAULT: "google/gemma-3-270m-it", DownloadSource.MODELSCOPE: "LLM-Research/gemma-3-270m-it", }, "Gemma-3-1B-Instruct": { DownloadSource.DEFAULT: "google/gemma-3-1b-it", DownloadSource.MODELSCOPE: "LLM-Research/gemma-3-1b-it", }, "MedGemma-27B-Instruct": { DownloadSource.DEFAULT: "google/medgemma-27b-text-it", DownloadSource.MODELSCOPE: "google/medgemma-27b-text-it", }, }, template="gemma2", ) register_model_group( models={ "Gemma-3-4B": { DownloadSource.DEFAULT: "google/gemma-3-4b-pt", DownloadSource.MODELSCOPE: "LLM-Research/gemma-3-4b-pt", }, "Gemma-3-12B": { DownloadSource.DEFAULT: "google/gemma-3-12b-pt", DownloadSource.MODELSCOPE: "LLM-Research/gemma-3-12b-pt", }, "Gemma-3-27B": { DownloadSource.DEFAULT: "google/gemma-3-27b-pt", DownloadSource.MODELSCOPE: "LLM-Research/gemma-3-27b-pt", }, "Gemma-3-4B-Instruct": { DownloadSource.DEFAULT: "google/gemma-3-4b-it", DownloadSource.MODELSCOPE: "LLM-Research/gemma-3-4b-it", }, "Gemma-3-12B-Instruct": { DownloadSource.DEFAULT: "google/gemma-3-12b-it", DownloadSource.MODELSCOPE: "LLM-Research/gemma-3-12b-it", }, "Gemma-3-27B-Instruct": { DownloadSource.DEFAULT: "google/gemma-3-27b-it", DownloadSource.MODELSCOPE: "LLM-Research/gemma-3-27b-it", }, "MedGemma-4B": { DownloadSource.DEFAULT: "google/medgemma-4b-pt", DownloadSource.MODELSCOPE: "google/medgemma-4b-pt", }, "MedGemma-4B-Instruct": { DownloadSource.DEFAULT: "google/medgemma-4b-it", DownloadSource.MODELSCOPE: "google/medgemma-4b-it", }, "MedGemma-27B-Instruct": { DownloadSource.DEFAULT: "google/medgemma-27b-text-it", DownloadSource.MODELSCOPE: "google/medgemma-27b-text-it", }, }, template="gemma3", multimodal=True, ) register_model_group( models={ "Gemma-3n-E2B": { DownloadSource.DEFAULT: "google/gemma-3n-E2B", DownloadSource.MODELSCOPE: "LLM-Research/gemma-3n-E2B", }, "Gemma-3n-E4B": { DownloadSource.DEFAULT: "google/gemma-3n-E4B", DownloadSource.MODELSCOPE: "LLM-Research/gemma-3n-E4B", }, "Gemma-3n-E2B-Instruct": { DownloadSource.DEFAULT: "google/gemma-3n-E2B-it", DownloadSource.MODELSCOPE: "LLM-Research/gemma-3n-E2B-it", }, "Gemma-3n-E4B-Instruct": { DownloadSource.DEFAULT: "google/gemma-3n-E4B-it", DownloadSource.MODELSCOPE: "LLM-Research/gemma-3n-E4B-it", }, }, template="gemma3n", multimodal=True, ) register_model_group( models={ "GLM-4-9B": { DownloadSource.DEFAULT: "zai-org/glm-4-9b", DownloadSource.MODELSCOPE: "ZhipuAI/glm-4-9b", }, "GLM-4-9B-Chat": { DownloadSource.DEFAULT: "zai-org/glm-4-9b-chat", DownloadSource.MODELSCOPE: "ZhipuAI/glm-4-9b-chat", DownloadSource.OPENMIND: "LlamaFactory/glm-4-9b-chat", }, "GLM-4-9B-1M-Chat": { DownloadSource.DEFAULT: "zai-org/glm-4-9b-chat-1m", DownloadSource.MODELSCOPE: "ZhipuAI/glm-4-9b-chat-1m", }, "GLM-4-0414-9B-Chat": { DownloadSource.DEFAULT: "zai-org/GLM-4-9B-0414", DownloadSource.MODELSCOPE: "ZhipuAI/GLM-4-9B-0414", }, "GLM-4-0414-32B-Base": { DownloadSource.DEFAULT: "zai-org/GLM-4-32B-Base-0414", DownloadSource.MODELSCOPE: "ZhipuAI/GLM-4-32B-Base-0414", }, "GLM-4-0414-32B-Chat": { DownloadSource.DEFAULT: "zai-org/GLM-4-32B-0414", DownloadSource.MODELSCOPE: "ZhipuAI/GLM-4-32B-0414", }, }, template="glm4", ) register_model_group( models={ "GLM-4.1V-9B-Base": { DownloadSource.DEFAULT: "zai-org/GLM-4.1V-9B-Base", DownloadSource.MODELSCOPE: "ZhipuAI/GLM-4.1V-9B-Base", }, "GLM-4.1V-9B-Thinking": { DownloadSource.DEFAULT: "zai-org/GLM-4.1V-9B-Thinking", DownloadSource.MODELSCOPE: "ZhipuAI/GLM-4.1V-9B-Thinking", }, }, template="glm4v", multimodal=True, ) register_model_group( models={ "GLM-4.5-Air-Base": { DownloadSource.DEFAULT: "zai-org/GLM-4.5-Air-Base", DownloadSource.MODELSCOPE: "ZhipuAI/GLM-4.5-Air-Base", }, "GLM-4.5-Base": { DownloadSource.DEFAULT: "zai-org/GLM-4.5-Base", DownloadSource.MODELSCOPE: "ZhipuAI/GLM-4.5-Base", },
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
true
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/extras/misc.py
src/llamafactory/extras/misc.py
# Copyright 2025 HuggingFace Inc. and the LlamaFactory team. # # This code is inspired by the HuggingFace's PEFT library. # https://github.com/huggingface/peft/blob/v0.10.0/src/peft/peft_model.py # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import os import socket from typing import TYPE_CHECKING, Any, Literal, Optional, Union import torch import torch.distributed as dist import transformers.dynamic_module_utils from huggingface_hub.utils import WeakFileLock from transformers import InfNanRemoveLogitsProcessor, LogitsProcessorList from transformers.dynamic_module_utils import get_relative_imports from transformers.utils import ( is_torch_bf16_gpu_available, is_torch_cuda_available, is_torch_mps_available, is_torch_npu_available, is_torch_xpu_available, ) from transformers.utils.versions import require_version from . import logging _is_fp16_available = is_torch_npu_available() or is_torch_cuda_available() try: _is_bf16_available = is_torch_bf16_gpu_available() or (is_torch_npu_available() and torch.npu.is_bf16_supported()) except Exception: _is_bf16_available = False if TYPE_CHECKING: from numpy.typing import NDArray from ..hparams import ModelArguments logger = logging.get_logger(__name__) class AverageMeter: r"""Compute and store the average and current value.""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def check_version(requirement: str, mandatory: bool = False) -> None: r"""Optionally check the package version.""" if is_env_enabled("DISABLE_VERSION_CHECK") and not mandatory: logger.warning_rank0_once("Version checking has been disabled, may lead to unexpected behaviors.") return if "gptmodel" in requirement or "autoawq" in requirement: pip_command = f"pip install {requirement} --no-build-isolation" else: pip_command = f"pip install {requirement}" if mandatory: hint = f"To fix: run `{pip_command}`." else: hint = f"To fix: run `{pip_command}` or set `DISABLE_VERSION_CHECK=1` to skip this check." require_version(requirement, hint) def check_dependencies() -> None: r"""Check the version of the required packages.""" check_version("transformers>=4.51.0,<=4.57.1") check_version("datasets>=2.16.0,<=4.0.0") check_version("accelerate>=1.3.0,<=1.11.0") check_version("peft>=0.14.0,<=0.17.1") check_version("trl>=0.18.0,<=0.24.0") def calculate_tps(dataset: list[dict[str, Any]], metrics: dict[str, float], stage: Literal["sft", "rm"]) -> float: r"""Calculate effective tokens per second.""" effective_token_num = 0 for data in dataset: if stage == "sft": effective_token_num += len(data["input_ids"]) elif stage == "rm": effective_token_num += len(data["chosen_input_ids"]) + len(data["rejected_input_ids"]) result = effective_token_num * metrics["epoch"] / metrics["train_runtime"] return result / dist.get_world_size() if dist.is_initialized() else result def count_parameters(model: "torch.nn.Module") -> tuple[int, int]: r"""Return the number of trainable parameters and number of all parameters in the model.""" trainable_params, all_param = 0, 0 for param in model.parameters(): num_params = param.numel() # if using DS Zero 3 and the weights are initialized empty if num_params == 0 and hasattr(param, "ds_numel"): num_params = param.ds_numel # Due to the design of 4bit linear layers from bitsandbytes, multiply the number of parameters by itemsize if param.__class__.__name__ == "Params4bit": if hasattr(param, "quant_storage") and hasattr(param.quant_storage, "itemsize"): num_bytes = param.quant_storage.itemsize elif hasattr(param, "element_size"): # for older pytorch version num_bytes = param.element_size() else: num_bytes = 1 num_params = num_params * 2 * num_bytes all_param += num_params if param.requires_grad: trainable_params += num_params return trainable_params, all_param def get_current_device() -> "torch.device": r"""Get the current available device.""" if is_torch_xpu_available(): device = "xpu:{}".format(os.getenv("LOCAL_RANK", "0")) elif is_torch_npu_available(): device = "npu:{}".format(os.getenv("LOCAL_RANK", "0")) elif is_torch_mps_available(): device = "mps:{}".format(os.getenv("LOCAL_RANK", "0")) elif is_torch_cuda_available(): device = "cuda:{}".format(os.getenv("LOCAL_RANK", "0")) else: device = "cpu" return torch.device(device) def get_device_count() -> int: r"""Get the number of available devices.""" if is_torch_xpu_available(): return torch.xpu.device_count() elif is_torch_npu_available(): return torch.npu.device_count() elif is_torch_mps_available(): return torch.mps.device_count() elif is_torch_cuda_available(): return torch.cuda.device_count() else: return 0 def get_logits_processor() -> "LogitsProcessorList": r"""Get logits processor that removes NaN and Inf logits.""" logits_processor = LogitsProcessorList() logits_processor.append(InfNanRemoveLogitsProcessor()) return logits_processor def get_current_memory() -> tuple[int, int]: r"""Get the available and total memory for the current device (in Bytes).""" if is_torch_xpu_available(): return torch.xpu.mem_get_info() elif is_torch_npu_available(): return torch.npu.mem_get_info() elif is_torch_mps_available(): return torch.mps.current_allocated_memory(), torch.mps.recommended_max_memory() elif is_torch_cuda_available(): return torch.cuda.mem_get_info() else: return 0, -1 def get_peak_memory() -> tuple[int, int]: r"""Get the peak memory usage (allocated, reserved) for the current device (in Bytes).""" if is_torch_xpu_available(): return torch.xpu.max_memory_allocated(), torch.xpu.max_memory_reserved() elif is_torch_npu_available(): return torch.npu.max_memory_allocated(), torch.npu.max_memory_reserved() elif is_torch_mps_available(): return torch.mps.current_allocated_memory(), -1 elif is_torch_cuda_available(): return torch.cuda.max_memory_allocated(), torch.cuda.max_memory_reserved() else: return 0, -1 def has_tokenized_data(path: "os.PathLike") -> bool: r"""Check if the path has a tokenized dataset.""" return os.path.isdir(path) and len(os.listdir(path)) > 0 def infer_optim_dtype(model_dtype: Optional["torch.dtype"]) -> "torch.dtype": r"""Infer the optimal dtype according to the model_dtype and device compatibility.""" if _is_bf16_available and (model_dtype == torch.bfloat16 or model_dtype is None): return torch.bfloat16 elif _is_fp16_available: return torch.float16 else: return torch.float32 def is_accelerator_available() -> bool: r"""Check if the accelerator is available.""" return ( is_torch_xpu_available() or is_torch_npu_available() or is_torch_mps_available() or is_torch_cuda_available() ) def is_env_enabled(env_var: str, default: str = "0") -> bool: r"""Check if the environment variable is enabled.""" return os.getenv(env_var, default).lower() in ["true", "y", "1"] def numpify(inputs: Union["NDArray", "torch.Tensor"]) -> "NDArray": r"""Cast a torch tensor or a numpy array to a numpy array.""" if isinstance(inputs, torch.Tensor): inputs = inputs.cpu() if inputs.dtype == torch.bfloat16: # numpy does not support bfloat16 until 1.21.4 inputs = inputs.to(torch.float32) inputs = inputs.numpy() return inputs def skip_check_imports() -> None: r"""Avoid flash attention import error in custom model files.""" if not is_env_enabled("FORCE_CHECK_IMPORTS"): transformers.dynamic_module_utils.check_imports = get_relative_imports def torch_gc() -> None: r"""Collect the device memory.""" gc.collect() if is_torch_xpu_available(): torch.xpu.empty_cache() elif is_torch_npu_available(): torch.npu.empty_cache() elif is_torch_mps_available(): torch.mps.empty_cache() elif is_torch_cuda_available(): torch.cuda.empty_cache() def try_download_model_from_other_hub(model_args: "ModelArguments") -> str: if (not use_modelscope() and not use_openmind()) or os.path.exists(model_args.model_name_or_path): return model_args.model_name_or_path if use_modelscope(): check_version("modelscope>=1.14.0", mandatory=True) from modelscope import snapshot_download # type: ignore from modelscope.hub.api import HubApi # type: ignore if model_args.ms_hub_token: api = HubApi() api.login(model_args.ms_hub_token) revision = "master" if model_args.model_revision == "main" else model_args.model_revision with WeakFileLock(os.path.abspath(os.path.expanduser("~/.cache/llamafactory/modelscope.lock"))): model_path = snapshot_download( model_args.model_name_or_path, revision=revision, cache_dir=model_args.cache_dir, ) return model_path if use_openmind(): check_version("openmind>=0.8.0", mandatory=True) from openmind.utils.hub import snapshot_download # type: ignore with WeakFileLock(os.path.abspath(os.path.expanduser("~/.cache/llamafactory/openmind.lock"))): model_path = snapshot_download( model_args.model_name_or_path, revision=model_args.model_revision, cache_dir=model_args.cache_dir, ) return model_path def use_modelscope() -> bool: return is_env_enabled("USE_MODELSCOPE_HUB") def use_openmind() -> bool: return is_env_enabled("USE_OPENMIND_HUB") def use_ray() -> bool: return is_env_enabled("USE_RAY") def use_kt() -> bool: return is_env_enabled("USE_KT") def find_available_port() -> int: r"""Find an available port on the local machine.""" sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind(("", 0)) port = sock.getsockname()[1] sock.close() return port def fix_proxy(ipv6_enabled: bool = False) -> None: r"""Fix proxy settings for gradio ui.""" os.environ["no_proxy"] = "localhost,127.0.0.1,0.0.0.0" if ipv6_enabled: os.environ.pop("http_proxy", None) os.environ.pop("HTTP_PROXY", None) os.environ.pop("https_proxy", None) os.environ.pop("HTTPS_PROXY", None) os.environ.pop("all_proxy", None) os.environ.pop("ALL_PROXY", None)
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/extras/logging.py
src/llamafactory/extras/logging.py
# Copyright 2025 Optuna, HuggingFace Inc. and the LlamaFactory team. # # This code is inspired by the HuggingFace's transformers library. # https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/utils/logging.py # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import sys import threading from concurrent.futures import ThreadPoolExecutor from functools import lru_cache from typing import Optional from .constants import RUNNING_LOG _thread_lock = threading.RLock() _default_handler: Optional["logging.Handler"] = None _default_log_level: "logging._Level" = logging.INFO class LoggerHandler(logging.Handler): r"""Redirect the logging output to the logging file for LLaMA Board.""" def __init__(self, output_dir: str) -> None: super().__init__() self._formatter = logging.Formatter( fmt="[%(levelname)s|%(asctime)s] %(filename)s:%(lineno)s >> %(message)s", datefmt="%Y-%m-%d %H:%M:%S", ) self.setLevel(logging.INFO) os.makedirs(output_dir, exist_ok=True) self.running_log = os.path.join(output_dir, RUNNING_LOG) if os.path.exists(self.running_log): os.remove(self.running_log) self.thread_pool = ThreadPoolExecutor(max_workers=1) def _write_log(self, log_entry: str) -> None: with open(self.running_log, "a", encoding="utf-8") as f: f.write(log_entry + "\n") def emit(self, record) -> None: if record.name == "httpx": return log_entry = self._formatter.format(record) self.thread_pool.submit(self._write_log, log_entry) def close(self) -> None: self.thread_pool.shutdown(wait=True) return super().close() class _Logger(logging.Logger): r"""A logger that supports rank0 logging.""" def info_rank0(self, *args, **kwargs) -> None: self.info(*args, **kwargs) def warning_rank0(self, *args, **kwargs) -> None: self.warning(*args, **kwargs) def warning_rank0_once(self, *args, **kwargs) -> None: self.warning(*args, **kwargs) def _get_default_logging_level() -> "logging._Level": r"""Return the default logging level.""" env_level_str = os.getenv("LLAMAFACTORY_VERBOSITY", None) if env_level_str: if env_level_str.upper() in logging._nameToLevel: return logging._nameToLevel[env_level_str.upper()] else: raise ValueError(f"Unknown logging level: {env_level_str}.") return _default_log_level def _get_library_name() -> str: return __name__.split(".")[0] def _get_library_root_logger() -> "_Logger": return logging.getLogger(_get_library_name()) def _configure_library_root_logger() -> None: r"""Configure root logger using a stdout stream handler with an explicit format.""" global _default_handler with _thread_lock: if _default_handler: # already configured return formatter = logging.Formatter( fmt="[%(levelname)s|%(asctime)s] %(name)s:%(lineno)s >> %(message)s", datefmt="%Y-%m-%d %H:%M:%S", ) _default_handler = logging.StreamHandler(sys.stdout) _default_handler.setFormatter(formatter) library_root_logger = _get_library_root_logger() library_root_logger.addHandler(_default_handler) library_root_logger.setLevel(_get_default_logging_level()) library_root_logger.propagate = False def get_logger(name: str | None = None) -> "_Logger": r"""Return a logger with the specified name. It it not supposed to be accessed externally.""" if name is None: name = _get_library_name() _configure_library_root_logger() return logging.getLogger(name) def add_handler(handler: "logging.Handler") -> None: r"""Add a handler to the root logger.""" _configure_library_root_logger() _get_library_root_logger().addHandler(handler) def remove_handler(handler: logging.Handler) -> None: r"""Remove a handler to the root logger.""" _configure_library_root_logger() _get_library_root_logger().removeHandler(handler) def info_rank0(self: "logging.Logger", *args, **kwargs) -> None: if int(os.getenv("LOCAL_RANK", "0")) == 0: self.info(*args, **kwargs) def warning_rank0(self: "logging.Logger", *args, **kwargs) -> None: if int(os.getenv("LOCAL_RANK", "0")) == 0: self.warning(*args, **kwargs) @lru_cache(None) def warning_rank0_once(self: "logging.Logger", *args, **kwargs) -> None: if int(os.getenv("LOCAL_RANK", "0")) == 0: self.warning(*args, **kwargs) logging.Logger.info_rank0 = info_rank0 logging.Logger.warning_rank0 = warning_rank0 logging.Logger.warning_rank0_once = warning_rank0_once
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/extras/__init__.py
src/llamafactory/extras/__init__.py
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/extras/ploting.py
src/llamafactory/extras/ploting.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import math import os from typing import Any from transformers.trainer import TRAINER_STATE_NAME from . import logging from .packages import is_matplotlib_available if is_matplotlib_available(): import matplotlib.figure import matplotlib.pyplot as plt logger = logging.get_logger(__name__) def smooth(scalars: list[float]) -> list[float]: r"""EMA implementation according to TensorBoard.""" if len(scalars) == 0: return [] last = scalars[0] smoothed = [] weight = 1.8 * (1 / (1 + math.exp(-0.05 * len(scalars))) - 0.5) # a sigmoid function for next_val in scalars: smoothed_val = last * weight + (1 - weight) * next_val smoothed.append(smoothed_val) last = smoothed_val return smoothed def gen_loss_plot(trainer_log: list[dict[str, Any]]) -> "matplotlib.figure.Figure": r"""Plot loss curves in LlamaBoard.""" plt.close("all") plt.switch_backend("agg") fig = plt.figure() ax = fig.add_subplot(111) steps, losses = [], [] for log in trainer_log: if log.get("loss", None): steps.append(log["current_steps"]) losses.append(log["loss"]) ax.plot(steps, losses, color="#1f77b4", alpha=0.4, label="original") ax.plot(steps, smooth(losses), color="#1f77b4", label="smoothed") ax.legend() ax.set_xlabel("step") ax.set_ylabel("loss") return fig def plot_loss(save_dictionary: str, keys: list[str] = ["loss"]) -> None: r"""Plot loss curves and saves the image.""" plt.switch_backend("agg") with open(os.path.join(save_dictionary, TRAINER_STATE_NAME), encoding="utf-8") as f: data = json.load(f) for key in keys: steps, metrics = [], [] for i in range(len(data["log_history"])): if key in data["log_history"][i]: steps.append(data["log_history"][i]["step"]) metrics.append(data["log_history"][i][key]) if len(metrics) == 0: logger.warning_rank0(f"No metric {key} to plot.") continue plt.figure() plt.plot(steps, metrics, color="#1f77b4", alpha=0.4, label="original") plt.plot(steps, smooth(metrics), color="#1f77b4", label="smoothed") plt.title(f"training {key} of {save_dictionary}") plt.xlabel("step") plt.ylabel(key) plt.legend() figure_path = os.path.join(save_dictionary, "training_{}.png".format(key.replace("/", "_"))) plt.savefig(figure_path, format="png", dpi=100) print("Figure saved at:", figure_path)
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/extras/env.py
src/llamafactory/extras/env.py
# Copyright 2025 HuggingFace Inc. and the LlamaFactory team. # # This code is inspired by the HuggingFace's transformers library. # https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/commands/env.py # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import OrderedDict VERSION = "0.9.5.dev0" def print_env() -> None: import os import platform import accelerate import datasets import peft import torch import transformers from transformers.utils import is_torch_cuda_available, is_torch_npu_available info = OrderedDict( { "`llamafactory` version": VERSION, "Platform": platform.platform(), "Python version": platform.python_version(), "PyTorch version": torch.__version__, "Transformers version": transformers.__version__, "Datasets version": datasets.__version__, "Accelerate version": accelerate.__version__, "PEFT version": peft.__version__, } ) if is_torch_cuda_available(): info["PyTorch version"] += " (GPU)" info["GPU type"] = torch.cuda.get_device_name() info["GPU number"] = torch.cuda.device_count() info["GPU memory"] = f"{torch.cuda.mem_get_info()[1] / (1024**3):.2f}GB" if is_torch_npu_available(): info["PyTorch version"] += " (NPU)" info["NPU type"] = torch.npu.get_device_name() info["CANN version"] = torch.version.cann try: import trl # type: ignore info["TRL version"] = trl.__version__ except Exception: pass try: import deepspeed # type: ignore info["DeepSpeed version"] = deepspeed.__version__ except Exception: pass try: import bitsandbytes # type: ignore info["Bitsandbytes version"] = bitsandbytes.__version__ except Exception: pass try: import vllm info["vLLM version"] = vllm.__version__ except Exception: pass try: import subprocess commit_info = subprocess.run(["git", "rev-parse", "HEAD"], capture_output=True, text=True, check=True) commit_hash = commit_info.stdout.strip() info["Git commit"] = commit_hash except Exception: pass if os.path.exists("data"): info["Default data directory"] = "detected" else: info["Default data directory"] = "not detected" print("\n" + "\n".join([f"- {key}: {value}" for key, value in info.items()]) + "\n")
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/extras/packages.py
src/llamafactory/extras/packages.py
# Copyright 2025 HuggingFace Inc. and the LlamaFactory team. # # This code is inspired by the HuggingFace's transformers library. # https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/utils/import_utils.py # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib.metadata import importlib.util from functools import lru_cache from typing import TYPE_CHECKING from packaging import version if TYPE_CHECKING: from packaging.version import Version def _is_package_available(name: str) -> bool: return importlib.util.find_spec(name) is not None def _get_package_version(name: str) -> "Version": try: return version.parse(importlib.metadata.version(name)) except Exception: return version.parse("0.0.0") def is_pyav_available(): return _is_package_available("av") def is_librosa_available(): return _is_package_available("librosa") def is_fastapi_available(): return _is_package_available("fastapi") def is_galore_available(): return _is_package_available("galore_torch") def is_apollo_available(): return _is_package_available("apollo_torch") def is_jieba_available(): return _is_package_available("jieba") def is_gradio_available(): return _is_package_available("gradio") def is_matplotlib_available(): return _is_package_available("matplotlib") def is_mcore_adapter_available(): return _is_package_available("mcore_adapter") def is_pillow_available(): return _is_package_available("PIL") def is_ray_available(): return _is_package_available("ray") def is_kt_available(): return _is_package_available("ktransformers") def is_requests_available(): return _is_package_available("requests") def is_rouge_available(): return _is_package_available("rouge_chinese") def is_safetensors_available(): return _is_package_available("safetensors") def is_sglang_available(): return _is_package_available("sglang") def is_starlette_available(): return _is_package_available("sse_starlette") @lru_cache def is_transformers_version_greater_than(content: str): return _get_package_version("transformers") >= version.parse(content) @lru_cache def is_torch_version_greater_than(content: str): return _get_package_version("torch") >= version.parse(content) def is_uvicorn_available(): return _is_package_available("uvicorn") def is_vllm_available(): return _is_package_available("vllm")
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/webui/chatter.py
src/llamafactory/webui/chatter.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from collections.abc import Generator from contextlib import contextmanager from typing import TYPE_CHECKING, Any from transformers.utils import is_torch_npu_available from ..chat import ChatModel from ..data import Role from ..extras.constants import PEFT_METHODS from ..extras.misc import torch_gc from ..extras.packages import is_gradio_available from .common import get_save_dir, load_config from .locales import ALERTS if TYPE_CHECKING: from ..chat import BaseEngine from .manager import Manager if is_gradio_available(): import gradio as gr def _escape_html(text: str) -> str: r"""Escape HTML characters.""" return text.replace("<", "&lt;").replace(">", "&gt;") def _format_response(text: str, lang: str, escape_html: bool, thought_words: tuple[str, str]) -> str: r"""Post-process the response text. Based on: https://huggingface.co/spaces/Lyte/DeepSeek-R1-Distill-Qwen-1.5B-Demo-GGUF/blob/main/app.py """ if thought_words[0] not in text: return _escape_html(text) if escape_html else text text = text.replace(thought_words[0], "") result = text.split(thought_words[1], maxsplit=1) if len(result) == 1: summary = ALERTS["info_thinking"][lang] thought, answer = text, "" else: summary = ALERTS["info_thought"][lang] thought, answer = result if escape_html: thought, answer = _escape_html(thought), _escape_html(answer) return ( f"<details open><summary class='thinking-summary'><span>{summary}</span></summary>\n\n" f"<div class='thinking-container'>\n{thought}\n</div>\n</details>{answer}" ) @contextmanager def update_attr(obj: Any, name: str, value: Any): old_value = getattr(obj, name, None) setattr(obj, name, value) yield setattr(obj, name, old_value) class WebChatModel(ChatModel): def __init__(self, manager: "Manager", demo_mode: bool = False, lazy_init: bool = True) -> None: self.manager = manager self.demo_mode = demo_mode self.engine: BaseEngine | None = None if not lazy_init: # read arguments from command line super().__init__() if demo_mode and os.getenv("DEMO_MODEL") and os.getenv("DEMO_TEMPLATE"): # load demo model model_name_or_path = os.getenv("DEMO_MODEL") template = os.getenv("DEMO_TEMPLATE") infer_backend = os.getenv("DEMO_BACKEND", "huggingface") super().__init__( dict(model_name_or_path=model_name_or_path, template=template, infer_backend=infer_backend) ) @property def loaded(self) -> bool: return self.engine is not None def load_model(self, data) -> Generator[str, None, None]: get = lambda elem_id: data[self.manager.get_elem_by_id(elem_id)] lang, model_name, model_path = get("top.lang"), get("top.model_name"), get("top.model_path") finetuning_type, checkpoint_path = get("top.finetuning_type"), get("top.checkpoint_path") user_config = load_config() error = "" if self.loaded: error = ALERTS["err_exists"][lang] elif not model_name: error = ALERTS["err_no_model"][lang] elif not model_path: error = ALERTS["err_no_path"][lang] elif self.demo_mode: error = ALERTS["err_demo"][lang] try: json.loads(get("infer.extra_args")) except json.JSONDecodeError: error = ALERTS["err_json_schema"][lang] if error: gr.Warning(error) yield error return yield ALERTS["info_loading"][lang] args = dict( model_name_or_path=model_path, cache_dir=user_config.get("cache_dir", None), finetuning_type=finetuning_type, template=get("top.template"), rope_scaling=get("top.rope_scaling") if get("top.rope_scaling") != "none" else None, flash_attn="fa2" if get("top.booster") == "flashattn2" else "auto", use_unsloth=(get("top.booster") == "unsloth"), enable_liger_kernel=(get("top.booster") == "liger_kernel"), infer_backend=get("infer.infer_backend"), infer_dtype=get("infer.infer_dtype"), trust_remote_code=True, ) args.update(json.loads(get("infer.extra_args"))) # checkpoints if checkpoint_path: if finetuning_type in PEFT_METHODS: # list args["adapter_name_or_path"] = ",".join( [get_save_dir(model_name, finetuning_type, adapter) for adapter in checkpoint_path] ) else: # str args["model_name_or_path"] = get_save_dir(model_name, finetuning_type, checkpoint_path) # quantization if get("top.quantization_bit") != "none": args["quantization_bit"] = int(get("top.quantization_bit")) args["quantization_method"] = get("top.quantization_method") args["double_quantization"] = not is_torch_npu_available() super().__init__(args) yield ALERTS["info_loaded"][lang] def unload_model(self, data) -> Generator[str, None, None]: lang = data[self.manager.get_elem_by_id("top.lang")] if self.demo_mode: gr.Warning(ALERTS["err_demo"][lang]) yield ALERTS["err_demo"][lang] return yield ALERTS["info_unloading"][lang] self.engine = None torch_gc() yield ALERTS["info_unloaded"][lang] @staticmethod def append( chatbot: list[dict[str, str]], messages: list[dict[str, str]], role: str, query: str, escape_html: bool, ) -> tuple[list[dict[str, str]], list[dict[str, str]], str]: r"""Add the user input to chatbot. Inputs: infer.chatbot, infer.messages, infer.role, infer.query, infer.escape_html Output: infer.chatbot, infer.messages, infer.query """ return ( chatbot + [{"role": "user", "content": _escape_html(query) if escape_html else query}], messages + [{"role": role, "content": query}], "", ) def stream( self, chatbot: list[dict[str, str]], messages: list[dict[str, str]], lang: str, system: str, tools: str, image: Any | None, video: Any | None, audio: Any | None, max_new_tokens: int, top_p: float, temperature: float, skip_special_tokens: bool, escape_html: bool, enable_thinking: bool, ) -> Generator[tuple[list[dict[str, str]], list[dict[str, str]]], None, None]: r"""Generate output text in stream. Inputs: infer.chatbot, infer.messages, infer.system, infer.tools, infer.image, infer.video, ... Output: infer.chatbot, infer.messages """ with update_attr(self.engine.template, "enable_thinking", enable_thinking): chatbot.append({"role": "assistant", "content": ""}) response = "" for new_text in self.stream_chat( messages, system, tools, images=[image] if image else None, videos=[video] if video else None, audios=[audio] if audio else None, max_new_tokens=max_new_tokens, top_p=top_p, temperature=temperature, skip_special_tokens=skip_special_tokens, ): response += new_text if tools: result = self.engine.template.extract_tool(response) else: result = response if isinstance(result, list): tool_calls = [{"name": tool.name, "arguments": json.loads(tool.arguments)} for tool in result] tool_calls = json.dumps(tool_calls, ensure_ascii=False) output_messages = messages + [{"role": Role.FUNCTION.value, "content": tool_calls}] bot_text = "```json\n" + tool_calls + "\n```" else: output_messages = messages + [{"role": Role.ASSISTANT.value, "content": result}] bot_text = _format_response(result, lang, escape_html, self.engine.template.thought_words) chatbot[-1] = {"role": "assistant", "content": bot_text} yield chatbot, output_messages
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/webui/runner.py
src/llamafactory/webui/runner.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from collections.abc import Generator from copy import deepcopy from subprocess import PIPE, Popen, TimeoutExpired from typing import TYPE_CHECKING, Any from transformers.utils import is_torch_npu_available from ..extras.constants import LLAMABOARD_CONFIG, MULTIMODAL_SUPPORTED_MODELS, PEFT_METHODS, TRAINING_STAGES from ..extras.misc import is_accelerator_available, torch_gc from ..extras.packages import is_gradio_available from .common import ( DEFAULT_CACHE_DIR, DEFAULT_CONFIG_DIR, abort_process, calculate_pixels, gen_cmd, get_save_dir, load_args, load_config, load_eval_results, save_args, save_cmd, ) from .control import get_trainer_info from .locales import ALERTS, LOCALES if is_gradio_available(): import gradio as gr if TYPE_CHECKING: from gradio.components import Component from .manager import Manager class Runner: r"""A class to manage the running status of the trainers.""" def __init__(self, manager: "Manager", demo_mode: bool = False) -> None: r"""Init a runner.""" self.manager = manager self.demo_mode = demo_mode """ Resume """ self.trainer: Popen | None = None self.do_train = True self.running_data: dict[Component, Any] = None """ State """ self.aborted = False self.running = False def set_abort(self) -> None: self.aborted = True if self.trainer is not None: abort_process(self.trainer.pid) def _initialize(self, data: dict["Component", Any], do_train: bool, from_preview: bool) -> str: r"""Validate the configuration.""" get = lambda elem_id: data[self.manager.get_elem_by_id(elem_id)] lang, model_name, model_path = get("top.lang"), get("top.model_name"), get("top.model_path") dataset = get("train.dataset") if do_train else get("eval.dataset") if self.running: return ALERTS["err_conflict"][lang] if not model_name: return ALERTS["err_no_model"][lang] if not model_path: return ALERTS["err_no_path"][lang] if not dataset: return ALERTS["err_no_dataset"][lang] if not from_preview and self.demo_mode: return ALERTS["err_demo"][lang] if do_train: if not get("train.output_dir"): return ALERTS["err_no_output_dir"][lang] try: json.loads(get("train.extra_args")) except json.JSONDecodeError: return ALERTS["err_json_schema"][lang] stage = TRAINING_STAGES[get("train.training_stage")] if stage == "ppo" and not get("train.reward_model"): return ALERTS["err_no_reward_model"][lang] else: if not get("eval.output_dir"): return ALERTS["err_no_output_dir"][lang] if not from_preview and not is_accelerator_available(): gr.Warning(ALERTS["warn_no_cuda"][lang]) return "" def _finalize(self, lang: str, finish_info: str) -> None: r"""Clean the cached memory and resets the runner.""" finish_info = ALERTS["info_aborted"][lang] if self.aborted else finish_info gr.Info(finish_info) self.trainer = None self.aborted = False self.running = False self.running_data = None torch_gc() def _parse_train_args(self, data: dict["Component", Any]) -> dict[str, Any]: r"""Build and validate the training arguments.""" get = lambda elem_id: data[self.manager.get_elem_by_id(elem_id)] model_name, finetuning_type = get("top.model_name"), get("top.finetuning_type") user_config = load_config() args = dict( stage=TRAINING_STAGES[get("train.training_stage")], do_train=True, model_name_or_path=get("top.model_path"), cache_dir=user_config.get("cache_dir", None), preprocessing_num_workers=16, finetuning_type=finetuning_type, template=get("top.template"), rope_scaling=get("top.rope_scaling") if get("top.rope_scaling") != "none" else None, flash_attn="fa2" if get("top.booster") == "flashattn2" else "auto", use_unsloth=(get("top.booster") == "unsloth"), enable_liger_kernel=(get("top.booster") == "liger_kernel"), dataset_dir=get("train.dataset_dir"), dataset=",".join(get("train.dataset")), cutoff_len=get("train.cutoff_len"), learning_rate=float(get("train.learning_rate")), num_train_epochs=float(get("train.num_train_epochs")), max_samples=int(get("train.max_samples")), per_device_train_batch_size=get("train.batch_size"), gradient_accumulation_steps=get("train.gradient_accumulation_steps"), lr_scheduler_type=get("train.lr_scheduler_type"), max_grad_norm=float(get("train.max_grad_norm")), logging_steps=get("train.logging_steps"), save_steps=get("train.save_steps"), warmup_steps=get("train.warmup_steps"), neftune_noise_alpha=get("train.neftune_alpha") or None, packing=get("train.packing") or get("train.neat_packing"), neat_packing=get("train.neat_packing"), train_on_prompt=get("train.train_on_prompt"), mask_history=get("train.mask_history"), resize_vocab=get("train.resize_vocab"), use_llama_pro=get("train.use_llama_pro"), enable_thinking=get("train.enable_thinking"), report_to=get("train.report_to"), use_galore=get("train.use_galore"), use_apollo=get("train.use_apollo"), use_badam=get("train.use_badam"), use_swanlab=get("train.use_swanlab"), output_dir=get_save_dir(model_name, finetuning_type, get("train.output_dir")), fp16=(get("train.compute_type") == "fp16"), bf16=(get("train.compute_type") == "bf16"), pure_bf16=(get("train.compute_type") == "pure_bf16"), plot_loss=True, trust_remote_code=True, ddp_timeout=180000000, include_num_input_tokens_seen=True, ) args.update(json.loads(get("train.extra_args"))) # checkpoints if get("top.checkpoint_path"): if finetuning_type in PEFT_METHODS: # list args["adapter_name_or_path"] = ",".join( [get_save_dir(model_name, finetuning_type, adapter) for adapter in get("top.checkpoint_path")] ) else: # str args["model_name_or_path"] = get_save_dir(model_name, finetuning_type, get("top.checkpoint_path")) # quantization if get("top.quantization_bit") != "none": args["quantization_bit"] = int(get("top.quantization_bit")) args["quantization_method"] = get("top.quantization_method") args["double_quantization"] = not is_torch_npu_available() # freeze config if args["finetuning_type"] == "freeze": args["freeze_trainable_layers"] = get("train.freeze_trainable_layers") args["freeze_trainable_modules"] = get("train.freeze_trainable_modules") args["freeze_extra_modules"] = get("train.freeze_extra_modules") or None # lora config if args["finetuning_type"] == "lora": args["lora_rank"] = get("train.lora_rank") args["lora_alpha"] = get("train.lora_alpha") args["lora_dropout"] = get("train.lora_dropout") args["loraplus_lr_ratio"] = get("train.loraplus_lr_ratio") or None args["create_new_adapter"] = get("train.create_new_adapter") args["use_rslora"] = get("train.use_rslora") args["use_dora"] = get("train.use_dora") args["pissa_init"] = get("train.use_pissa") args["pissa_convert"] = get("train.use_pissa") args["lora_target"] = get("train.lora_target") or "all" args["additional_target"] = get("train.additional_target") or None if args["use_llama_pro"]: args["freeze_trainable_layers"] = get("train.freeze_trainable_layers") # rlhf config if args["stage"] == "ppo": if finetuning_type in PEFT_METHODS: args["reward_model"] = ",".join( [get_save_dir(model_name, finetuning_type, adapter) for adapter in get("train.reward_model")] ) else: args["reward_model"] = get_save_dir(model_name, finetuning_type, get("train.reward_model")) args["reward_model_type"] = "lora" if finetuning_type == "lora" else "full" args["ppo_score_norm"] = get("train.ppo_score_norm") args["ppo_whiten_rewards"] = get("train.ppo_whiten_rewards") args["top_k"] = 0 args["top_p"] = 0.9 elif args["stage"] in ["dpo", "kto"]: args["pref_beta"] = get("train.pref_beta") args["pref_ftx"] = get("train.pref_ftx") args["pref_loss"] = get("train.pref_loss") # multimodal config if model_name in MULTIMODAL_SUPPORTED_MODELS: args["freeze_vision_tower"] = get("train.freeze_vision_tower") args["freeze_multi_modal_projector"] = get("train.freeze_multi_modal_projector") args["freeze_language_model"] = get("train.freeze_language_model") args["image_max_pixels"] = calculate_pixels(get("train.image_max_pixels")) args["image_min_pixels"] = calculate_pixels(get("train.image_min_pixels")) args["video_max_pixels"] = calculate_pixels(get("train.video_max_pixels")) args["video_min_pixels"] = calculate_pixels(get("train.video_min_pixels")) # galore config if args["use_galore"]: args["galore_rank"] = get("train.galore_rank") args["galore_update_interval"] = get("train.galore_update_interval") args["galore_scale"] = get("train.galore_scale") args["galore_target"] = get("train.galore_target") # apollo config if args["use_apollo"]: args["apollo_rank"] = get("train.apollo_rank") args["apollo_update_interval"] = get("train.apollo_update_interval") args["apollo_scale"] = get("train.apollo_scale") args["apollo_target"] = get("train.apollo_target") # badam config if args["use_badam"]: args["badam_mode"] = get("train.badam_mode") args["badam_switch_mode"] = get("train.badam_switch_mode") args["badam_switch_interval"] = get("train.badam_switch_interval") args["badam_update_ratio"] = get("train.badam_update_ratio") # swanlab config if get("train.use_swanlab"): args["swanlab_project"] = get("train.swanlab_project") args["swanlab_run_name"] = get("train.swanlab_run_name") args["swanlab_workspace"] = get("train.swanlab_workspace") args["swanlab_api_key"] = get("train.swanlab_api_key") args["swanlab_mode"] = get("train.swanlab_mode") # eval config if get("train.val_size") > 1e-6 and args["stage"] != "ppo": args["val_size"] = get("train.val_size") args["eval_strategy"] = "steps" args["eval_steps"] = args["save_steps"] args["per_device_eval_batch_size"] = args["per_device_train_batch_size"] # ds config if get("train.ds_stage") != "none": ds_stage = get("train.ds_stage") ds_offload = "offload_" if get("train.ds_offload") else "" args["deepspeed"] = os.path.join(DEFAULT_CACHE_DIR, f"ds_z{ds_stage}_{ds_offload}config.json") return args def _parse_eval_args(self, data: dict["Component", Any]) -> dict[str, Any]: r"""Build and validate the evaluation arguments.""" get = lambda elem_id: data[self.manager.get_elem_by_id(elem_id)] model_name, finetuning_type = get("top.model_name"), get("top.finetuning_type") user_config = load_config() args = dict( stage="sft", model_name_or_path=get("top.model_path"), cache_dir=user_config.get("cache_dir", None), preprocessing_num_workers=16, finetuning_type=finetuning_type, quantization_method=get("top.quantization_method"), template=get("top.template"), rope_scaling=get("top.rope_scaling") if get("top.rope_scaling") != "none" else None, flash_attn="fa2" if get("top.booster") == "flashattn2" else "auto", use_unsloth=(get("top.booster") == "unsloth"), dataset_dir=get("eval.dataset_dir"), eval_dataset=",".join(get("eval.dataset")), cutoff_len=get("eval.cutoff_len"), max_samples=int(get("eval.max_samples")), per_device_eval_batch_size=get("eval.batch_size"), predict_with_generate=True, report_to="none", max_new_tokens=get("eval.max_new_tokens"), top_p=get("eval.top_p"), temperature=get("eval.temperature"), output_dir=get_save_dir(model_name, finetuning_type, get("eval.output_dir")), trust_remote_code=True, ddp_timeout=180000000, ) if get("eval.predict"): args["do_predict"] = True else: args["do_eval"] = True # checkpoints if get("top.checkpoint_path"): if finetuning_type in PEFT_METHODS: # list args["adapter_name_or_path"] = ",".join( [get_save_dir(model_name, finetuning_type, adapter) for adapter in get("top.checkpoint_path")] ) else: # str args["model_name_or_path"] = get_save_dir(model_name, finetuning_type, get("top.checkpoint_path")) # quantization if get("top.quantization_bit") != "none": args["quantization_bit"] = int(get("top.quantization_bit")) args["quantization_method"] = get("top.quantization_method") args["double_quantization"] = not is_torch_npu_available() return args def _preview(self, data: dict["Component", Any], do_train: bool) -> Generator[dict["Component", str], None, None]: r"""Preview the training commands.""" output_box = self.manager.get_elem_by_id("{}.output_box".format("train" if do_train else "eval")) error = self._initialize(data, do_train, from_preview=True) if error: gr.Warning(error) yield {output_box: error} else: args = self._parse_train_args(data) if do_train else self._parse_eval_args(data) yield {output_box: gen_cmd(args)} def _launch(self, data: dict["Component", Any], do_train: bool) -> Generator[dict["Component", Any], None, None]: r"""Start the training process.""" output_box = self.manager.get_elem_by_id("{}.output_box".format("train" if do_train else "eval")) error = self._initialize(data, do_train, from_preview=False) if error: gr.Warning(error) yield {output_box: error} else: self.do_train, self.running_data = do_train, data args = self._parse_train_args(data) if do_train else self._parse_eval_args(data) os.makedirs(args["output_dir"], exist_ok=True) save_args(os.path.join(args["output_dir"], LLAMABOARD_CONFIG), self._build_config_dict(data)) env = deepcopy(os.environ) env["LLAMABOARD_ENABLED"] = "1" env["LLAMABOARD_WORKDIR"] = args["output_dir"] if args.get("deepspeed", None) is not None: env["FORCE_TORCHRUN"] = "1" # NOTE: DO NOT USE shell=True to avoid security risk self.trainer = Popen(["llamafactory-cli", "train", save_cmd(args)], env=env, stderr=PIPE, text=True) yield from self.monitor() def _build_config_dict(self, data: dict["Component", Any]) -> dict[str, Any]: r"""Build a dictionary containing the current training configuration.""" config_dict = {} skip_ids = ["top.lang", "top.model_path", "train.output_dir", "train.config_path"] for elem, value in data.items(): elem_id = self.manager.get_id_by_elem(elem) if elem_id not in skip_ids: config_dict[elem_id] = value return config_dict def preview_train(self, data): yield from self._preview(data, do_train=True) def preview_eval(self, data): yield from self._preview(data, do_train=False) def run_train(self, data): yield from self._launch(data, do_train=True) def run_eval(self, data): yield from self._launch(data, do_train=False) def monitor(self): r"""Monitorgit the training progress and logs.""" self.aborted = False self.running = True get = lambda elem_id: self.running_data[self.manager.get_elem_by_id(elem_id)] lang, model_name, finetuning_type = get("top.lang"), get("top.model_name"), get("top.finetuning_type") output_dir = get("{}.output_dir".format("train" if self.do_train else "eval")) output_path = get_save_dir(model_name, finetuning_type, output_dir) output_box = self.manager.get_elem_by_id("{}.output_box".format("train" if self.do_train else "eval")) progress_bar = self.manager.get_elem_by_id("{}.progress_bar".format("train" if self.do_train else "eval")) loss_viewer = self.manager.get_elem_by_id("train.loss_viewer") if self.do_train else None swanlab_link = self.manager.get_elem_by_id("train.swanlab_link") if self.do_train else None running_log = "" return_code = -1 while return_code == -1: if self.aborted: yield { output_box: ALERTS["info_aborting"][lang], progress_bar: gr.Slider(visible=False), } else: running_log, running_progress, running_info = get_trainer_info(lang, output_path, self.do_train) return_dict = { output_box: running_log, progress_bar: running_progress, } if "loss_viewer" in running_info: return_dict[loss_viewer] = running_info["loss_viewer"] if "swanlab_link" in running_info: return_dict[swanlab_link] = running_info["swanlab_link"] yield return_dict try: stderr = self.trainer.communicate(timeout=2)[1] return_code = self.trainer.returncode except TimeoutExpired: continue if return_code == 0 or self.aborted: finish_info = ALERTS["info_finished"][lang] if self.do_train: finish_log = ALERTS["info_finished"][lang] + "\n\n" + running_log else: finish_log = load_eval_results(os.path.join(output_path, "all_results.json")) + "\n\n" + running_log else: print(stderr) finish_info = ALERTS["err_failed"][lang] finish_log = ALERTS["err_failed"][lang] + f" Exit code: {return_code}\n\n```\n{stderr}\n```\n" self._finalize(lang, finish_info) return_dict = {output_box: finish_log, progress_bar: gr.Slider(visible=False)} yield return_dict def save_args(self, data): r"""Save the training configuration to config path.""" output_box = self.manager.get_elem_by_id("train.output_box") error = self._initialize(data, do_train=True, from_preview=True) if error: gr.Warning(error) return {output_box: error} lang = data[self.manager.get_elem_by_id("top.lang")] config_path = data[self.manager.get_elem_by_id("train.config_path")] os.makedirs(DEFAULT_CONFIG_DIR, exist_ok=True) save_path = os.path.join(DEFAULT_CONFIG_DIR, config_path) save_args(save_path, self._build_config_dict(data)) return {output_box: ALERTS["info_config_saved"][lang] + save_path} def load_args(self, lang: str, config_path: str): r"""Load the training configuration from config path.""" output_box = self.manager.get_elem_by_id("train.output_box") config_dict = load_args(os.path.join(DEFAULT_CONFIG_DIR, config_path)) if config_dict is None: gr.Warning(ALERTS["err_config_not_found"][lang]) return {output_box: ALERTS["err_config_not_found"][lang]} output_dict: dict[Component, Any] = {output_box: ALERTS["info_config_loaded"][lang]} for elem_id, value in config_dict.items(): output_dict[self.manager.get_elem_by_id(elem_id)] = value return output_dict def check_output_dir(self, lang: str, model_name: str, finetuning_type: str, output_dir: str): r"""Restore the training status if output_dir exists.""" output_box = self.manager.get_elem_by_id("train.output_box") output_dict: dict[Component, Any] = {output_box: LOCALES["output_box"][lang]["value"]} if model_name and output_dir and os.path.isdir(get_save_dir(model_name, finetuning_type, output_dir)): gr.Warning(ALERTS["warn_output_dir_exists"][lang]) output_dict[output_box] = ALERTS["warn_output_dir_exists"][lang] output_dir = get_save_dir(model_name, finetuning_type, output_dir) config_dict = load_args(os.path.join(output_dir, LLAMABOARD_CONFIG)) # load llamaboard config for elem_id, value in config_dict.items(): output_dict[self.manager.get_elem_by_id(elem_id)] = value return output_dict
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/webui/common.py
src/llamafactory/webui/common.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import signal from collections import defaultdict from datetime import datetime from typing import Any from psutil import Process from yaml import safe_dump, safe_load from ..extras import logging from ..extras.constants import ( DATA_CONFIG, DEFAULT_TEMPLATE, MULTIMODAL_SUPPORTED_MODELS, SUPPORTED_MODELS, TRAINING_ARGS, DownloadSource, ) from ..extras.misc import use_modelscope, use_openmind logger = logging.get_logger(__name__) DEFAULT_CACHE_DIR = "llamaboard_cache" DEFAULT_CONFIG_DIR = "llamaboard_config" DEFAULT_DATA_DIR = "data" DEFAULT_SAVE_DIR = "saves" USER_CONFIG = "user_config.yaml" def abort_process(pid: int) -> None: r"""Abort the processes recursively in a bottom-up way.""" try: children = Process(pid).children() if children: for child in children: abort_process(child.pid) os.kill(pid, signal.SIGABRT) except Exception: pass def get_save_dir(*paths: str) -> os.PathLike: r"""Get the path to saved model checkpoints.""" if os.path.sep in paths[-1]: logger.warning_rank0("Found complex path, some features may be not available.") return paths[-1] paths = (path.replace(" ", "").strip() for path in paths) return os.path.join(DEFAULT_SAVE_DIR, *paths) def _get_config_path() -> os.PathLike: r"""Get the path to user config.""" return os.path.join(DEFAULT_CACHE_DIR, USER_CONFIG) def load_config() -> dict[str, str | dict[str, Any]]: r"""Load user config if exists.""" try: with open(_get_config_path(), encoding="utf-8") as f: return safe_load(f) except Exception: return {"lang": None, "hub_name": None, "last_model": None, "path_dict": {}, "cache_dir": None} def save_config( lang: str, hub_name: str | None = None, model_name: str | None = None, model_path: str | None = None ) -> None: r"""Save user config.""" os.makedirs(DEFAULT_CACHE_DIR, exist_ok=True) user_config = load_config() user_config["lang"] = lang or user_config["lang"] if hub_name: user_config["hub_name"] = hub_name if model_name: user_config["last_model"] = model_name if model_name and model_path: user_config["path_dict"][model_name] = model_path with open(_get_config_path(), "w", encoding="utf-8") as f: safe_dump(user_config, f) def get_model_path(model_name: str) -> str: r"""Get the model path according to the model name.""" user_config = load_config() path_dict: dict[DownloadSource, str] = SUPPORTED_MODELS.get(model_name, defaultdict(str)) model_path = user_config["path_dict"].get(model_name, "") or path_dict.get(DownloadSource.DEFAULT, "") if ( use_modelscope() and path_dict.get(DownloadSource.MODELSCOPE) and model_path == path_dict.get(DownloadSource.DEFAULT) ): # replace hf path with ms path model_path = path_dict.get(DownloadSource.MODELSCOPE) if ( use_openmind() and path_dict.get(DownloadSource.OPENMIND) and model_path == path_dict.get(DownloadSource.DEFAULT) ): # replace hf path with om path model_path = path_dict.get(DownloadSource.OPENMIND) return model_path def get_template(model_name: str) -> str: r"""Get the template name if the model is a chat/distill/instruct model.""" return DEFAULT_TEMPLATE.get(model_name, "default") def get_time() -> str: r"""Get current date and time.""" return datetime.now().strftime(r"%Y-%m-%d-%H-%M-%S") def is_multimodal(model_name: str) -> bool: r"""Judge if the model is a vision language model.""" return model_name in MULTIMODAL_SUPPORTED_MODELS def load_dataset_info(dataset_dir: str) -> dict[str, dict[str, Any]]: r"""Load dataset_info.json.""" if dataset_dir == "ONLINE" or dataset_dir.startswith("REMOTE:"): logger.info_rank0(f"dataset_dir is {dataset_dir}, using online dataset.") return {} try: with open(os.path.join(dataset_dir, DATA_CONFIG), encoding="utf-8") as f: return json.load(f) except Exception as err: logger.warning_rank0(f"Cannot open {os.path.join(dataset_dir, DATA_CONFIG)} due to {str(err)}.") return {} def load_args(config_path: str) -> dict[str, Any] | None: r"""Load the training configuration from config path.""" try: with open(config_path, encoding="utf-8") as f: return safe_load(f) except Exception: return None def save_args(config_path: str, config_dict: dict[str, Any]) -> None: r"""Save the training configuration to config path.""" with open(config_path, "w", encoding="utf-8") as f: safe_dump(config_dict, f) def _clean_cmd(args: dict[str, Any]) -> dict[str, Any]: r"""Remove args with NoneType or False or empty string value.""" no_skip_keys = [ "packing", "enable_thinking", "use_reentrant_gc", "double_quantization", "freeze_vision_tower", "freeze_multi_modal_projector", ] return {k: v for k, v in args.items() if (k in no_skip_keys) or (v is not None and v is not False and v != "")} def gen_cmd(args: dict[str, Any]) -> str: r"""Generate CLI commands for previewing.""" cmd_lines = ["llamafactory-cli train "] for k, v in _clean_cmd(args).items(): if isinstance(v, dict): cmd_lines.append(f" --{k} {json.dumps(v, ensure_ascii=False)} ") elif isinstance(v, list): cmd_lines.append(f" --{k} {' '.join(map(str, v))} ") else: cmd_lines.append(f" --{k} {str(v)} ") if os.name == "nt": cmd_text = "`\n".join(cmd_lines) else: cmd_text = "\\\n".join(cmd_lines) cmd_text = f"```bash\n{cmd_text}\n```" return cmd_text def save_cmd(args: dict[str, Any]) -> str: r"""Save CLI commands to launch training.""" output_dir = args["output_dir"] os.makedirs(output_dir, exist_ok=True) with open(os.path.join(output_dir, TRAINING_ARGS), "w", encoding="utf-8") as f: safe_dump(_clean_cmd(args), f) return os.path.join(output_dir, TRAINING_ARGS) def load_eval_results(path: os.PathLike) -> str: r"""Get scores after evaluation.""" with open(path, encoding="utf-8") as f: result = json.dumps(json.load(f), indent=4) return f"```json\n{result}\n```\n" def calculate_pixels(pixels: str) -> int: r"""Calculate the number of pixels from the expression.""" if "*" in pixels: return int(pixels.split("*")[0]) * int(pixels.split("*")[1]) else: return int(pixels) def create_ds_config() -> None: r"""Create deepspeed config in the current directory.""" os.makedirs(DEFAULT_CACHE_DIR, exist_ok=True) ds_config = { "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "gradient_accumulation_steps": "auto", "gradient_clipping": "auto", "zero_allow_untested_optimizer": True, "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1, }, "bf16": {"enabled": "auto"}, } offload_config = { "device": "cpu", "pin_memory": True, } ds_config["zero_optimization"] = { "stage": 2, "allgather_partitions": True, "allgather_bucket_size": 5e8, "overlap_comm": False, "reduce_scatter": True, "reduce_bucket_size": 5e8, "contiguous_gradients": True, "round_robin_gradients": True, } with open(os.path.join(DEFAULT_CACHE_DIR, "ds_z2_config.json"), "w", encoding="utf-8") as f: json.dump(ds_config, f, indent=2) ds_config["zero_optimization"]["offload_optimizer"] = offload_config with open(os.path.join(DEFAULT_CACHE_DIR, "ds_z2_offload_config.json"), "w", encoding="utf-8") as f: json.dump(ds_config, f, indent=2) ds_config["zero_optimization"] = { "stage": 3, "overlap_comm": False, "contiguous_gradients": True, "sub_group_size": 1e9, "reduce_bucket_size": "auto", "stage3_prefetch_bucket_size": "auto", "stage3_param_persistence_threshold": "auto", "stage3_max_live_parameters": 1e9, "stage3_max_reuse_distance": 1e9, "stage3_gather_16bit_weights_on_model_save": True, } with open(os.path.join(DEFAULT_CACHE_DIR, "ds_z3_config.json"), "w", encoding="utf-8") as f: json.dump(ds_config, f, indent=2) ds_config["zero_optimization"]["offload_optimizer"] = offload_config ds_config["zero_optimization"]["offload_param"] = offload_config with open(os.path.join(DEFAULT_CACHE_DIR, "ds_z3_offload_config.json"), "w", encoding="utf-8") as f: json.dump(ds_config, f, indent=2)
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/webui/locales.py
src/llamafactory/webui/locales.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. LOCALES = { "title": { "en": { "value": "<h1><center>🦙🏭LLaMA Factory: Unified Efficient Fine-Tuning of 100+ LLMs</center></h1>", }, "ru": { "value": "<h1><center>🦙🏭LLaMA Factory: Унифицированная эффективная тонкая настройка 100+ LLMs</center></h1>", }, "zh": { "value": "<h1><center>🦙🏭LLaMA Factory: 一站式大模型高效微调平台</center></h1>", }, "ko": { "value": "<h1><center>🦙🏭LLaMA Factory: 100+ LLMs를 위한 통합 효율적인 튜닝</center></h1>", }, "ja": { "value": "<h1><center>🦙🏭LLaMA Factory: 100+ LLMs の統合効率的なチューニング</center></h1>", }, }, "subtitle": { "en": { "value": ( "<h3><center>Visit <a href='https://github.com/hiyouga/LLaMA-Factory' target='_blank'>" "GitHub Page</a> <a href='https://llamafactory.readthedocs.io/en/latest/' target='_blank'>" "Documentation</a> <a href='https://blog.llamafactory.net/en/' target='_blank'>" "Blog</a></center></h3>" ), }, "ru": { "value": ( "<h3><center>Посетить <a href='https://github.com/hiyouga/LLaMA-Factory' target='_blank'>" "страницу GitHub</a> <a href='https://llamafactory.readthedocs.io/en/latest/' target='_blank'>" "Документацию</a> <a href='https://blog.llamafactory.net/en/' target='_blank'>" "Блог</a></center></h3>" ), }, "zh": { "value": ( "<h3><center>访问 <a href='https://github.com/hiyouga/LLaMA-Factory' target='_blank'>" "GitHub 主页</a> <a href='https://llamafactory.readthedocs.io/zh-cn/latest/' target='_blank'>" "官方文档</a> <a href='https://blog.llamafactory.net/' target='_blank'>" "博客</a></center></h3>" ), }, "ko": { "value": ( "<h3><center><a href='https://github.com/hiyouga/LLaMA-Factory' target='_blank'>" "GitHub 페이지</a> <a href='https://llamafactory.readthedocs.io/en/latest/' target='_blank'>" "공식 문서</a> <a href='https://blog.llamafactory.net/en/' target='_blank'>" "블로그</a>를 방문하세요.</center></h3>" ), }, "ja": { "value": ( "<h3><center><a href='https://github.com/hiyouga/LLaMA-Factory' target='_blank'>" "GitHub ページ</a> <a href='https://llamafactory.readthedocs.io/en/latest/' target='_blank'>" "ドキュメント</a> <a href='https://blog.llamafactory.net/en/' target='_blank'>" "ブログ</a>にアクセスする</center></h3>" ), }, }, "lang": { "en": { "label": "Language", }, "ru": { "label": "Язык", }, "zh": { "label": "语言", }, "ko": { "label": "언어", }, "ja": { "label": "言語", }, }, "model_name": { "en": { "label": "Model name", "info": "Input the initial name to search for the model.", }, "ru": { "label": "Название модели", "info": "Введите начальное имя для поиска модели.", }, "zh": { "label": "模型名称", "info": "输入首单词以检索模型。", }, "ko": { "label": "모델 이름", "info": "모델을 검색할 초기 이름을 입력하세요.", }, "ja": { "label": "モデル名", "info": "モデルを検索するための初期名を入力してください。", }, }, "model_path": { "en": { "label": "Model path", "info": "Path to pretrained model or model identifier from Hugging Face.", }, "ru": { "label": "Путь к модели", "info": "Путь к предварительно обученной модели или идентификатор модели от Hugging Face.", }, "zh": { "label": "模型路径", "info": "本地模型的文件路径或 Hugging Face 的模型标识符。", }, "ko": { "label": "모델 경로", "info": "사전 훈련된 모델의 경로 또는 Hugging Face의 모델 식별자.", }, "ja": { "label": "モデルパス", "info": "事前学習済みモデルへのパス、または Hugging Face のモデル識別子。", }, }, "hub_name": { "en": { "label": "Hub name", "info": "Choose the model download source.", }, "ru": { "label": "Имя хаба", "info": "Выберите источник загрузки модели.", }, "zh": { "label": "模型下载源", "info": "选择模型下载源。(网络受限环境推荐使用 ModelScope)", }, "ko": { "label": "모델 다운로드 소스", "info": "모델 다운로드 소스를 선택하세요.", }, "ja": { "label": "モデルダウンロードソース", "info": "モデルをダウンロードするためのソースを選択してください。", }, }, "finetuning_type": { "en": { "label": "Finetuning method", }, "ru": { "label": "Метод дообучения", }, "zh": { "label": "微调方法", }, "ko": { "label": "파인튜닝 방법", }, "ja": { "label": "ファインチューニング方法", }, }, "checkpoint_path": { "en": { "label": "Checkpoint path", }, "ru": { "label": "Путь контрольной точки", }, "zh": { "label": "检查点路径", }, "ko": { "label": "체크포인트 경로", }, "ja": { "label": "チェックポイントパス", }, }, "quantization_bit": { "en": { "label": "Quantization bit", "info": "Enable quantization (QLoRA).", }, "ru": { "label": "Уровень квантования", "info": "Включить квантование (QLoRA).", }, "zh": { "label": "量化等级", "info": "启用量化(QLoRA)。", }, "ko": { "label": "양자화 비트", "info": "양자화 활성화 (QLoRA).", }, "ja": { "label": "量子化ビット", "info": "量子化を有効にする (QLoRA)。", }, }, "quantization_method": { "en": { "label": "Quantization method", "info": "Quantization algorithm to use.", }, "ru": { "label": "Метод квантования", "info": "Алгоритм квантования, который следует использовать.", }, "zh": { "label": "量化方法", "info": "使用的量化算法。", }, "ko": { "label": "양자화 방법", "info": "사용할 양자화 알고리즘.", }, "ja": { "label": "量子化方法", "info": "使用する量子化アルゴリズム。", }, }, "template": { "en": { "label": "Chat template", "info": "The chat template used in constructing prompts.", }, "ru": { "label": "Шаблон чата", "info": "Шаблон чата используемый для составления подсказок.", }, "zh": { "label": "对话模板", "info": "构建提示词时使用的模板。", }, "ko": { "label": "채팅 템플릿", "info": "프롬프트 작성에 사용되는 채팅 템플릿.", }, "ja": { "label": "チャットテンプレート", "info": "プロンプトの構築に使用されるチャットテンプレート。", }, }, "rope_scaling": { "en": { "label": "RoPE scaling", "info": "RoPE scaling method to use.", }, "ru": { "label": "Масштабирование RoPE", "info": "Метод масштабирования RoPE для использования.", }, "zh": {"label": "RoPE 插值方法", "info": "RoPE 插值时使用的方法。"}, "ko": { "label": "RoPE 스케일링", "info": "사용할 RoPE 스케일링 방법.", }, "ja": { "label": "RoPE スケーリング", "info": "使用する RoPE スケーリング方法。", }, }, "booster": { "en": { "label": "Booster", "info": "Approach used to boost training speed.", }, "ru": { "label": "Ускоритель", "info": "Подход, используемый для ускорения обучения.", }, "zh": {"label": "加速方式", "info": "使用的加速方法。"}, "ko": { "label": "부스터", "info": "훈련 속도를 향상시키기 위해 사용된 접근 방식.", }, "ja": { "label": "ブースター", "info": "トレーニング速度を向上させるためのアプローチ。", }, }, "training_stage": { "en": { "label": "Stage", "info": "The stage to perform in training.", }, "ru": { "label": "Этап", "info": "Этап выполнения обучения.", }, "zh": { "label": "训练阶段", "info": "目前采用的训练方式。", }, "ko": { "label": "학습 단계", "info": "수행할 학습 방법.", }, "ja": { "label": "ステージ", "info": "トレーニングで実行するステージ。", }, }, "dataset_dir": { "en": { "label": "Data dir", "info": "Path to the data directory.", }, "ru": { "label": "Директория данных", "info": "Путь к директории данных.", }, "zh": { "label": "数据路径", "info": "数据文件夹的路径。", }, "ko": { "label": "데이터 디렉토리", "info": "데이터 디렉토리의 경로.", }, "ja": { "label": "データディレクトリ", "info": "データディレクトリへのパス。", }, }, "dataset": { "en": { "label": "Dataset", }, "ru": { "label": "Набор данных", }, "zh": { "label": "数据集", }, "ko": { "label": "데이터셋", }, "ja": { "label": "データセット", }, }, "data_preview_btn": { "en": { "value": "Preview dataset", }, "ru": { "value": "Просмотреть набор данных", }, "zh": { "value": "预览数据集", }, "ko": { "value": "데이터셋 미리보기", }, "ja": { "value": "データセットをプレビュー", }, }, "preview_count": { "en": { "label": "Count", }, "ru": { "label": "Количество", }, "zh": { "label": "数量", }, "ko": { "label": "개수", }, "ja": { "label": "カウント", }, }, "page_index": { "en": { "label": "Page", }, "ru": { "label": "Страница", }, "zh": { "label": "页数", }, "ko": { "label": "페이지", }, "ja": { "label": "ページ", }, }, "prev_btn": { "en": { "value": "Prev", }, "ru": { "value": "Предыдущая", }, "zh": { "value": "上一页", }, "ko": { "value": "이전", }, "ja": { "value": "前へ", }, }, "next_btn": { "en": { "value": "Next", }, "ru": { "value": "Следующая", }, "zh": { "value": "下一页", }, "ko": { "value": "다음", }, "ja": { "value": "次へ", }, }, "close_btn": { "en": { "value": "Close", }, "ru": { "value": "Закрыть", }, "zh": { "value": "关闭", }, "ko": { "value": "닫기", }, "ja": { "value": "閉じる", }, }, "preview_samples": { "en": { "label": "Samples", }, "ru": { "label": "Примеры", }, "zh": { "label": "样例", }, "ko": { "label": "샘플", }, "ja": { "label": "サンプル", }, }, "learning_rate": { "en": { "label": "Learning rate", "info": "Initial learning rate for AdamW.", }, "ru": { "label": "Скорость обучения", "info": "Начальная скорость обучения для AdamW.", }, "zh": { "label": "学习率", "info": "AdamW 优化器的初始学习率。", }, "ko": { "label": "학습률", "info": "AdamW의 초기 학습률.", }, "ja": { "label": "学習率", "info": "AdamW の初期学習率。", }, }, "num_train_epochs": { "en": { "label": "Epochs", "info": "Total number of training epochs to perform.", }, "ru": { "label": "Эпохи", "info": "Общее количество эпох обучения.", }, "zh": { "label": "训练轮数", "info": "需要执行的训练总轮数。", }, "ko": { "label": "에포크", "info": "수행할 총 학습 에포크 수.", }, "ja": { "label": "エポック数", "info": "実行するトレーニングの総エポック数。", }, }, "max_grad_norm": { "en": { "label": "Maximum gradient norm", "info": "Norm for gradient clipping.", }, "ru": { "label": "Максимальная норма градиента", "info": "Норма для обрезки градиента.", }, "zh": { "label": "最大梯度范数", "info": "用于梯度裁剪的范数。", }, "ko": { "label": "최대 그레디언트 노름(norm)", "info": "그레디언트 클리핑을 위한 노름(norm).", }, "ja": { "label": "最大勾配ノルム", "info": "勾配クリッピングのためのノルム。", }, }, "max_samples": { "en": { "label": "Max samples", "info": "Maximum samples per dataset.", }, "ru": { "label": "Максимальное количество образцов", "info": "Максимальное количество образцов на набор данных.", }, "zh": { "label": "最大样本数", "info": "每个数据集的最大样本数。", }, "ko": { "label": "최대 샘플 수", "info": "데이터셋 당 최대 샘플 수.", }, "ja": { "label": "最大サンプル数", "info": "データセットごとの最大サンプル数。", }, }, "compute_type": { "en": { "label": "Compute type", "info": "Whether to use mixed precision training.", }, "ru": { "label": "Тип вычислений", "info": "Использовать ли обучение смешанной точности.", }, "zh": { "label": "计算类型", "info": "是否使用混合精度训练。", }, "ko": { "label": "연산 유형", "info": "혼합 정밀도 훈련을 사용할지 여부.", }, "ja": { "label": "計算タイプ", "info": "混合精度トレーニングを使用するかどうか。", }, }, "cutoff_len": { "en": { "label": "Cutoff length", "info": "Max tokens in input sequence.", }, "ru": { "label": "Длина обрезки", "info": "Максимальное количество токенов во входной последовательности.", }, "zh": { "label": "截断长度", "info": "输入序列分词后的最大长度。", }, "ko": { "label": "컷오프 길이", "info": "입력 시퀀스의 최대 토큰 수.", }, "ja": { "label": "カットオフ長", "info": "入力シーケンスの最大トークン数。", }, }, "batch_size": { "en": { "label": "Batch size", "info": "Number of samples processed on each GPU.", }, "ru": { "label": "Размер пакета", "info": "Количество образцов для обработки на каждом GPU.", }, "zh": { "label": "批处理大小", "info": "每个 GPU 处理的样本数量。", }, "ko": { "label": "배치 크기", "info": "각 GPU에서 처리되는 샘플 수.", }, "ja": { "label": "バッチサイズ", "info": "各 GPU で処理されるサンプル数。", }, }, "gradient_accumulation_steps": { "en": { "label": "Gradient accumulation", "info": "Number of steps for gradient accumulation.", }, "ru": { "label": "Накопление градиента", "info": "Количество шагов накопления градиента.", }, "zh": { "label": "梯度累积", "info": "梯度累积的步数。", }, "ko": { "label": "그레디언트 누적", "info": "그레디언트 누적 단계 수.", }, "ja": { "label": "勾配累積", "info": "勾配累積のステップ数。", }, }, "val_size": { "en": { "label": "Val size", "info": "Percentage of validation set from the entire dataset.", }, "ru": { "label": "Размер валидации", "info": "Пропорция данных в наборе для разработки.", }, "zh": { "label": "验证集比例", "info": "验证集占全部样本的百分比。", }, "ko": { "label": "검증 데이터셋 크기", "info": "개발 데이터셋에서 검증 데이터의 비율.", }, "ja": { "label": "検証セットサイズ", "info": "データセット全体に対する検証セットの割合。", }, }, "lr_scheduler_type": { "en": { "label": "LR scheduler", "info": "Name of the learning rate scheduler.", }, "ru": { "label": "Планировщик скорости обучения", "info": "Название планировщика скорости обучения.", }, "zh": { "label": "学习率调节器", "info": "学习率调度器的名称。", }, "ko": { "label": "LR 스케줄러", "info": "학습률 스케줄러의 이름.", }, "ja": { "label": "学習率スケジューラ", "info": "学習率スケジューラの名前。", }, }, "extra_tab": { "en": { "label": "Extra configurations", }, "ru": { "label": "Дополнительные конфигурации", }, "zh": { "label": "其它参数设置", }, "ko": { "label": "추가 구성(configuration)", }, "ja": { "label": "追加設定", }, }, "logging_steps": { "en": { "label": "Logging steps", "info": "Number of steps between two logs.", }, "ru": { "label": "Шаги логирования", "info": "Количество шагов между двумя записями в журнале.", }, "zh": { "label": "日志间隔", "info": "每两次日志输出间的更新步数。", }, "ko": { "label": "로깅 스텝", "info": "이전 로깅과 다음 로깅 간 스텝 수.", }, "ja": { "label": "ロギングステップ", "info": "2 つのログ間のステップ数。", }, }, "save_steps": { "en": { "label": "Save steps", "info": "Number of steps between two checkpoints.", }, "ru": { "label": "Шаги сохранения", "info": "Количество шагов между двумя контрольными точками.", }, "zh": { "label": "保存间隔", "info": "每两次断点保存间的更新步数。", }, "ko": { "label": "저장 스텝", "info": "이전 체크포인트와 다음 체크포인트 사이의 스텝 수.", }, "ja": { "label": "保存ステップ", "info": "2 つのチェックポイント間のステップ数。", }, }, "warmup_steps": { "en": { "label": "Warmup steps", "info": "Number of steps used for warmup.", }, "ru": { "label": "Шаги прогрева", "info": "Количество шагов, используемых для прогрева.", }, "zh": { "label": "预热步数", "info": "学习率预热采用的步数。", }, "ko": { "label": "Warmup 스텝", "info": "Warmup에 사용되는 스텝 수.", }, "ja": { "label": "ウォームアップステップ", "info": "ウォームアップに使用されるステップ数。", }, }, "neftune_alpha": { "en": { "label": "NEFTune alpha", "info": "Magnitude of noise adding to embedding vectors.", }, "ru": { "label": "NEFTune alpha", "info": "Величина шума, добавляемого к векторам вложений.", }, "zh": { "label": "NEFTune 噪声参数", "info": "嵌入向量所添加的噪声大小。", }, "ko": { "label": "NEFTune 알파", "info": "임베딩 벡터에 추가되는 노이즈의 크기.", }, "ja": { "label": "NEFTune alpha", "info": "埋め込みベクトルに追加されるノイズの大きさ。", }, }, "extra_args": { "en": { "label": "Extra arguments", "info": "Extra arguments passed to the trainer in JSON format.", }, "ru": { "label": "Дополнительные аргументы", "info": "Дополнительные аргументы, которые передаются тренеру в формате JSON.", }, "zh": { "label": "额外参数", "info": "以 JSON 格式传递给训练器的额外参数。", }, "ko": { "label": "추가 인수", "info": "JSON 형식으로 트레이너에게 전달할 추가 인수입니다.", }, "ja": { "label": "追加引数", "info": "JSON 形式でトレーナーに渡される追加引数。", }, }, "packing": { "en": { "label": "Pack sequences", "info": "Pack sequences into samples of fixed length.", }, "ru": { "label": "Упаковка последовательностей", "info": "Упаковка последовательностей в образцы фиксированной длины.", }, "zh": { "label": "序列打包", "info": "将序列打包为等长样本。", }, "ko": { "label": "시퀀스 패킹", "info": "고정된 길이의 샘플로 시퀀스를 패킹합니다.", }, "ja": { "label": "シーケンスパッキング", "info": "シーケンスを固定長のサンプルにパッキングします。", }, }, "neat_packing": { "en": { "label": "Use neat packing", "info": "Avoid cross-attention between packed sequences.", }, "ru": { "label": "Используйте аккуратную упаковку", "info": "избегайте перекрестного внимания между упакованными последовательностями.", }, "zh": { "label": "使用无污染打包", "info": "避免打包后的序列产生交叉注意力。", }, "ko": { "label": "니트 패킹 사용", "info": "패킹된 시퀀스 간의 크로스 어텐션을 피합니다.", }, "ja": { "label": "無汚染パッキングを使用", "info": "パッキング後のシーケンス間のクロスアテンションを避けます。", }, }, "train_on_prompt": { "en": { "label": "Train on prompt", "info": "Disable the label mask on the prompt (only for SFT).", }, "ru": { "label": "Тренировка на подсказке", "info": "Отключить маску меток на подсказке (только для SFT).", }, "zh": { "label": "学习提示词", "info": "不在提示词的部分添加掩码(仅适用于 SFT)。", }, "ko": { "label": "프롬프트도 학습", "info": "프롬프트에서 라벨 마스킹을 비활성화합니다 (SFT에만 해당).", }, "ja": { "label": "プロンプトで学習", "info": "プロンプト部分にマスクを追加しない(SFT のみ)。", }, }, "mask_history": { "en": { "label": "Mask history", "info": "Train on the last turn only (only for SFT).", }, "ru": { "label": "История масок", "info": "Тренироваться только на последнем шаге (только для SFT).", }, "zh": { "label": "不学习历史对话", "info": "仅学习最后一轮对话(仅适用于 SFT)。", }, "ko": { "label": "히스토리 마스킹", "info": "대화 데이터의 마지막 턴만 학습합니다 (SFT에만 해당).", }, "ja": { "label": "履歴をマスク", "info": "最後のターンのみを学習する(SFT のみ)。", }, }, "resize_vocab": { "en": { "label": "Resize token embeddings", "info": "Resize the tokenizer vocab and the embedding layers.", }, "ru": { "label": "Изменение размера токенных эмбеддингов", "info": "Изменить размер словаря токенизатора и слоев эмбеддинга.", }, "zh": { "label": "更改词表大小", "info": "更改分词器词表和嵌入层的大小。", }, "ko": { "label": "토큰 임베딩의 사이즈 조정", "info": "토크나이저 어휘와 임베딩 레이어의 크기를 조정합니다.", }, "ja": { "label": "トークン埋め込みのサイズ変更", "info": "トークナイザーの語彙と埋め込み層のサイズを変更します。", }, }, "use_llama_pro": { "en": { "label": "Enable LLaMA Pro", "info": "Make the parameters in the expanded blocks trainable.", }, "ru": { "label": "Включить LLaMA Pro", "info": "Сделать параметры в расширенных блоках обучаемыми.", }, "zh": { "label": "使用 LLaMA Pro", "info": "仅训练块扩展后的参数。", }, "ko": { "label": "LLaMA Pro 사용", "info": "확장된 블록의 매개변수를 학습 가능하게 만듭니다.", }, "ja": { "label": "LLaMA Pro を有効化", "info": "拡張ブロックのパラメータのみをトレーニングします。", }, }, "enable_thinking": { "en": { "label": "Enable thinking", "info": "Whether or not to enable thinking mode for reasoning models.", }, "ru": { "label": "Включить мысли", "info": "Включить режим мысли для моделей решающего характера.", }, "zh": { "label": "启用思考模式", "info": "是否启用推理模型的思考模式。", }, "ko": { "label": "생각 모드 활성화", "info": "추론 모델의 생각 모드를 활성화할지 여부.", }, "ja": { "label": "思考モードを有効化",
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
true
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/webui/control.py
src/llamafactory/webui/control.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from typing import Any from transformers.trainer_utils import get_last_checkpoint from ..extras.constants import ( CHECKPOINT_NAMES, PEFT_METHODS, RUNNING_LOG, STAGES_USE_PAIR_DATA, SWANLAB_CONFIG, TRAINER_LOG, TRAINING_STAGES, ) from ..extras.packages import is_gradio_available, is_matplotlib_available from ..extras.ploting import gen_loss_plot from ..model import QuantizationMethod from .common import DEFAULT_CONFIG_DIR, DEFAULT_DATA_DIR, get_model_path, get_save_dir, get_template, load_dataset_info from .locales import ALERTS if is_gradio_available(): import gradio as gr def switch_hub(hub_name: str) -> None: r"""Switch model hub. Inputs: top.hub_name """ os.environ["USE_MODELSCOPE_HUB"] = "1" if hub_name == "modelscope" else "0" os.environ["USE_OPENMIND_HUB"] = "1" if hub_name == "openmind" else "0" def can_quantize(finetuning_type: str) -> "gr.Dropdown": r"""Judge if the quantization is available in this finetuning type. Inputs: top.finetuning_type Outputs: top.quantization_bit """ if finetuning_type not in PEFT_METHODS: return gr.Dropdown(value="none", interactive=False) else: return gr.Dropdown(interactive=True) def can_quantize_to(quantization_method: str) -> "gr.Dropdown": r"""Get the available quantization bits. Inputs: top.quantization_method Outputs: top.quantization_bit """ if quantization_method == QuantizationMethod.BNB: available_bits = ["none", "8", "4"] elif quantization_method == QuantizationMethod.HQQ: available_bits = ["none", "8", "6", "5", "4", "3", "2", "1"] elif quantization_method == QuantizationMethod.EETQ: available_bits = ["none", "8"] return gr.Dropdown(choices=available_bits) def change_stage(training_stage: str = list(TRAINING_STAGES.keys())[0]) -> tuple[list[str], bool]: r"""Modify states after changing the training stage. Inputs: train.training_stage Outputs: train.dataset, train.packing """ return [], TRAINING_STAGES[training_stage] == "pt" def get_model_info(model_name: str) -> tuple[str, str]: r"""Get the necessary information of this model. Inputs: top.model_name Outputs: top.model_path, top.template """ return get_model_path(model_name), get_template(model_name) def check_template(lang: str, template: str) -> None: r"""Check if an instruct model is used. Please use queue=True to show the warning message. Inputs: top.lang, top.template """ if template == "default": gr.Warning(ALERTS["warn_no_instruct"][lang]) def get_trainer_info(lang: str, output_path: os.PathLike, do_train: bool) -> tuple[str, "gr.Slider", dict[str, Any]]: r"""Get training infomation for monitor. If do_train is True: Inputs: top.lang, train.output_path Outputs: train.output_box, train.progress_bar, train.loss_viewer, train.swanlab_link If do_train is False: Inputs: top.lang, eval.output_path Outputs: eval.output_box, eval.progress_bar, None, None """ running_log = "" running_progress = gr.Slider(visible=False) running_info = {} running_log_path = os.path.join(output_path, RUNNING_LOG) if os.path.isfile(running_log_path): with open(running_log_path, encoding="utf-8") as f: running_log = "```\n" + f.read()[-20000:] + "\n```\n" # avoid lengthy log trainer_log_path = os.path.join(output_path, TRAINER_LOG) if os.path.isfile(trainer_log_path): trainer_log: list[dict[str, Any]] = [] with open(trainer_log_path, encoding="utf-8") as f: for line in f: trainer_log.append(json.loads(line)) if len(trainer_log) != 0: latest_log = trainer_log[-1] percentage = latest_log["percentage"] label = "Running {:d}/{:d}: {} < {}".format( latest_log["current_steps"], latest_log["total_steps"], latest_log["elapsed_time"], latest_log["remaining_time"], ) running_progress = gr.Slider(label=label, value=percentage, visible=True) if do_train and is_matplotlib_available(): running_info["loss_viewer"] = gr.Plot(gen_loss_plot(trainer_log)) swanlab_config_path = os.path.join(output_path, SWANLAB_CONFIG) if os.path.isfile(swanlab_config_path): with open(swanlab_config_path, encoding="utf-8") as f: swanlab_public_config = json.load(f) swanlab_link = swanlab_public_config["cloud"]["experiment_url"] if swanlab_link is not None: running_info["swanlab_link"] = gr.Markdown( ALERTS["info_swanlab_link"][lang] + swanlab_link, visible=True ) return running_log, running_progress, running_info def list_checkpoints(model_name: str, finetuning_type: str) -> "gr.Dropdown": r"""List all available checkpoints. Inputs: top.model_name, top.finetuning_type Outputs: top.checkpoint_path """ checkpoints = [] if model_name: save_dir = get_save_dir(model_name, finetuning_type) if save_dir and os.path.isdir(save_dir): for checkpoint in os.listdir(save_dir): if os.path.isdir(os.path.join(save_dir, checkpoint)) and any( os.path.isfile(os.path.join(save_dir, checkpoint, name)) for name in CHECKPOINT_NAMES ): checkpoints.append(checkpoint) if finetuning_type in PEFT_METHODS: return gr.Dropdown(value=[], choices=checkpoints, multiselect=True) else: return gr.Dropdown(value=None, choices=checkpoints, multiselect=False) def list_config_paths(current_time: str) -> "gr.Dropdown": r"""List all the saved configuration files. Inputs: train.current_time Outputs: train.config_path """ config_files = [f"{current_time}.yaml"] if os.path.isdir(DEFAULT_CONFIG_DIR): for file_name in os.listdir(DEFAULT_CONFIG_DIR): if file_name.endswith(".yaml") and file_name not in config_files: config_files.append(file_name) return gr.Dropdown(choices=config_files) def list_datasets(dataset_dir: str = None, training_stage: str = list(TRAINING_STAGES.keys())[0]) -> "gr.Dropdown": r"""List all available datasets in the dataset dir for the training stage. Inputs: *.dataset_dir, *.training_stage Outputs: *.dataset """ dataset_info = load_dataset_info(dataset_dir if dataset_dir is not None else DEFAULT_DATA_DIR) ranking = TRAINING_STAGES[training_stage] in STAGES_USE_PAIR_DATA datasets = [k for k, v in dataset_info.items() if v.get("ranking", False) == ranking] return gr.Dropdown(choices=datasets) def list_output_dirs(model_name: str | None, finetuning_type: str, current_time: str) -> "gr.Dropdown": r"""List all the directories that can resume from. Inputs: top.model_name, top.finetuning_type, train.current_time Outputs: train.output_dir """ output_dirs = [f"train_{current_time}"] if model_name: save_dir = get_save_dir(model_name, finetuning_type) if save_dir and os.path.isdir(save_dir): for folder in os.listdir(save_dir): output_dir = os.path.join(save_dir, folder) if os.path.isdir(output_dir) and get_last_checkpoint(output_dir) is not None: output_dirs.append(folder) return gr.Dropdown(choices=output_dirs)
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/webui/css.py
src/llamafactory/webui/css.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. CSS = r""" .duplicate-button { margin: auto !important; color: white !important; background: black !important; border-radius: 100vh !important; } .thinking-summary { padding: 8px !important; } .thinking-summary span { border-radius: 4px !important; padding: 4px !important; cursor: pointer !important; font-size: 14px !important; background: rgb(245, 245, 245) !important; } .dark .thinking-summary span { background: rgb(73, 73, 73) !important; } .thinking-container { border-left: 2px solid #a6a6a6 !important; padding-left: 10px !important; margin: 4px 0 !important; } .thinking-container p { color: #a6a6a6 !important; } .modal-box { position: fixed !important; top: 50%; left: 50%; transform: translate(-50%, -50%); /* center horizontally */ max-width: 1000px; max-height: 750px; overflow-y: auto; background-color: var(--input-background-fill); flex-wrap: nowrap !important; border: 2px solid black !important; z-index: 1000; padding: 10px; } .dark .modal-box { border: 2px solid white !important; } """
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/webui/interface.py
src/llamafactory/webui/interface.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import platform from ..extras.misc import fix_proxy, is_env_enabled from ..extras.packages import is_gradio_available from .common import save_config from .components import ( create_chat_box, create_eval_tab, create_export_tab, create_footer, create_infer_tab, create_top, create_train_tab, ) from .css import CSS from .engine import Engine if is_gradio_available(): import gradio as gr def create_ui(demo_mode: bool = False) -> "gr.Blocks": engine = Engine(demo_mode=demo_mode, pure_chat=False) hostname = os.getenv("HOSTNAME", os.getenv("COMPUTERNAME", platform.node())).split(".")[0] with gr.Blocks(title=f"LLaMA Factory ({hostname})", css=CSS) as demo: title = gr.HTML() subtitle = gr.HTML() if demo_mode: gr.DuplicateButton(value="Duplicate Space for private use", elem_classes="duplicate-button") engine.manager.add_elems("head", {"title": title, "subtitle": subtitle}) engine.manager.add_elems("top", create_top()) lang: gr.Dropdown = engine.manager.get_elem_by_id("top.lang") with gr.Tab("Train"): engine.manager.add_elems("train", create_train_tab(engine)) with gr.Tab("Evaluate & Predict"): engine.manager.add_elems("eval", create_eval_tab(engine)) with gr.Tab("Chat"): engine.manager.add_elems("infer", create_infer_tab(engine)) if not demo_mode: with gr.Tab("Export"): engine.manager.add_elems("export", create_export_tab(engine)) engine.manager.add_elems("footer", create_footer()) demo.load(engine.resume, outputs=engine.manager.get_elem_list(), concurrency_limit=None) lang.change(engine.change_lang, [lang], engine.manager.get_elem_list(), queue=False) lang.input(save_config, inputs=[lang], queue=False) return demo def create_web_demo() -> "gr.Blocks": engine = Engine(pure_chat=True) hostname = os.getenv("HOSTNAME", os.getenv("COMPUTERNAME", platform.node())).split(".")[0] with gr.Blocks(title=f"LLaMA Factory Web Demo ({hostname})", css=CSS) as demo: lang = gr.Dropdown(choices=["en", "ru", "zh", "ko", "ja"], scale=1) engine.manager.add_elems("top", dict(lang=lang)) _, _, chat_elems = create_chat_box(engine, visible=True) engine.manager.add_elems("infer", chat_elems) demo.load(engine.resume, outputs=engine.manager.get_elem_list(), concurrency_limit=None) lang.change(engine.change_lang, [lang], engine.manager.get_elem_list(), queue=False) lang.input(save_config, inputs=[lang], queue=False) return demo def run_web_ui() -> None: gradio_ipv6 = is_env_enabled("GRADIO_IPV6") gradio_share = is_env_enabled("GRADIO_SHARE") server_name = os.getenv("GRADIO_SERVER_NAME", "[::]" if gradio_ipv6 else "0.0.0.0") print("Visit http://ip:port for Web UI, e.g., http://127.0.0.1:7860") fix_proxy(ipv6_enabled=gradio_ipv6) create_ui().queue().launch(share=gradio_share, server_name=server_name, inbrowser=True) def run_web_demo() -> None: gradio_ipv6 = is_env_enabled("GRADIO_IPV6") gradio_share = is_env_enabled("GRADIO_SHARE") server_name = os.getenv("GRADIO_SERVER_NAME", "[::]" if gradio_ipv6 else "0.0.0.0") print("Visit http://ip:port for Web UI, e.g., http://127.0.0.1:7860") fix_proxy(ipv6_enabled=gradio_ipv6) create_web_demo().queue().launch(share=gradio_share, server_name=server_name, inbrowser=True)
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/webui/__init__.py
src/llamafactory/webui/__init__.py
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/webui/manager.py
src/llamafactory/webui/manager.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections.abc import Generator from typing import TYPE_CHECKING if TYPE_CHECKING: from gradio.components import Component class Manager: r"""A class to manage all the gradio components in Web UI.""" def __init__(self) -> None: self._id_to_elem: dict[str, Component] = {} self._elem_to_id: dict[Component, str] = {} def add_elems(self, tab_name: str, elem_dict: dict[str, "Component"]) -> None: r"""Add elements to manager.""" for elem_name, elem in elem_dict.items(): elem_id = f"{tab_name}.{elem_name}" self._id_to_elem[elem_id] = elem self._elem_to_id[elem] = elem_id def get_elem_list(self) -> list["Component"]: r"""Return the list of all elements.""" return list(self._id_to_elem.values()) def get_elem_iter(self) -> Generator[tuple[str, "Component"], None, None]: r"""Return an iterator over all elements with their names.""" for elem_id, elem in self._id_to_elem.items(): yield elem_id.split(".")[-1], elem def get_elem_by_id(self, elem_id: str) -> "Component": r"""Get element by id. Example: top.lang, train.dataset """ return self._id_to_elem[elem_id] def get_id_by_elem(self, elem: "Component") -> str: r"""Get id by element.""" return self._elem_to_id[elem] def get_base_elems(self) -> set["Component"]: r"""Get the base elements that are commonly used.""" return { self._id_to_elem["top.lang"], self._id_to_elem["top.model_name"], self._id_to_elem["top.model_path"], self._id_to_elem["top.finetuning_type"], self._id_to_elem["top.checkpoint_path"], self._id_to_elem["top.quantization_bit"], self._id_to_elem["top.quantization_method"], self._id_to_elem["top.template"], self._id_to_elem["top.rope_scaling"], self._id_to_elem["top.booster"], }
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/webui/engine.py
src/llamafactory/webui/engine.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING, Any from .chatter import WebChatModel from .common import create_ds_config, get_time, load_config from .locales import LOCALES from .manager import Manager from .runner import Runner if TYPE_CHECKING: from gradio.components import Component class Engine: r"""A general engine to control the behaviors of Web UI.""" def __init__(self, demo_mode: bool = False, pure_chat: bool = False) -> None: self.demo_mode = demo_mode self.pure_chat = pure_chat self.manager = Manager() self.runner = Runner(self.manager, demo_mode) self.chatter = WebChatModel(self.manager, demo_mode, lazy_init=(not pure_chat)) if not demo_mode: create_ds_config() def _update_component(self, input_dict: dict[str, dict[str, Any]]) -> dict["Component", "Component"]: r"""Update gradio components according to the (elem_id, properties) mapping.""" output_dict: dict[Component, Component] = {} for elem_id, elem_attr in input_dict.items(): elem = self.manager.get_elem_by_id(elem_id) output_dict[elem] = elem.__class__(**elem_attr) return output_dict def resume(self): r"""Get the initial value of gradio components and restores training status if necessary.""" user_config = load_config() if not self.demo_mode else {} # do not use config in demo mode lang = user_config.get("lang") or "en" init_dict = {"top.lang": {"value": lang}, "infer.chat_box": {"visible": self.chatter.loaded}} if not self.pure_chat: current_time = get_time() hub_name = user_config.get("hub_name") or "huggingface" init_dict["top.hub_name"] = {"value": hub_name} init_dict["train.current_time"] = {"value": current_time} init_dict["train.output_dir"] = {"value": f"train_{current_time}"} init_dict["train.config_path"] = {"value": f"{current_time}.yaml"} init_dict["eval.output_dir"] = {"value": f"eval_{current_time}"} init_dict["infer.mm_box"] = {"visible": False} if user_config.get("last_model", None): init_dict["top.model_name"] = {"value": user_config["last_model"]} yield self._update_component(init_dict) if self.runner.running and not self.demo_mode and not self.pure_chat: yield {elem: elem.__class__(value=value) for elem, value in self.runner.running_data.items()} if self.runner.do_train: yield self._update_component({"train.resume_btn": {"value": True}}) else: yield self._update_component({"eval.resume_btn": {"value": True}}) def change_lang(self, lang: str): r"""Update the displayed language of gradio components.""" return { elem: elem.__class__(**LOCALES[elem_name][lang]) for elem_name, elem in self.manager.get_elem_iter() if elem_name in LOCALES }
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/webui/components/train.py
src/llamafactory/webui/components/train.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from transformers.trainer_utils import SchedulerType from ...extras.constants import TRAINING_STAGES from ...extras.misc import get_device_count from ...extras.packages import is_gradio_available from ..common import DEFAULT_DATA_DIR from ..control import change_stage, list_checkpoints, list_config_paths, list_datasets, list_output_dirs from .data import create_preview_box if is_gradio_available(): import gradio as gr if TYPE_CHECKING: from gradio.components import Component from ..engine import Engine def create_train_tab(engine: "Engine") -> dict[str, "Component"]: input_elems = engine.manager.get_base_elems() elem_dict = dict() with gr.Row(): stages = list(TRAINING_STAGES.keys()) training_stage = gr.Dropdown(choices=stages, value=stages[0], scale=1) dataset_dir = gr.Textbox(value=DEFAULT_DATA_DIR, scale=1) dataset = gr.Dropdown(multiselect=True, allow_custom_value=True, scale=4) preview_elems = create_preview_box(dataset_dir, dataset) input_elems.update({training_stage, dataset_dir, dataset}) elem_dict.update(dict(training_stage=training_stage, dataset_dir=dataset_dir, dataset=dataset, **preview_elems)) with gr.Row(): learning_rate = gr.Textbox(value="5e-5") num_train_epochs = gr.Textbox(value="3.0") max_grad_norm = gr.Textbox(value="1.0") max_samples = gr.Textbox(value="100000") compute_type = gr.Dropdown(choices=["bf16", "fp16", "fp32", "pure_bf16"], value="bf16") input_elems.update({learning_rate, num_train_epochs, max_grad_norm, max_samples, compute_type}) elem_dict.update( dict( learning_rate=learning_rate, num_train_epochs=num_train_epochs, max_grad_norm=max_grad_norm, max_samples=max_samples, compute_type=compute_type, ) ) with gr.Row(): cutoff_len = gr.Slider(minimum=4, maximum=131072, value=2048, step=1) batch_size = gr.Slider(minimum=1, maximum=1024, value=2, step=1) gradient_accumulation_steps = gr.Slider(minimum=1, maximum=1024, value=8, step=1) val_size = gr.Slider(minimum=0, maximum=1, value=0, step=0.001) lr_scheduler_type = gr.Dropdown(choices=[scheduler.value for scheduler in SchedulerType], value="cosine") input_elems.update({cutoff_len, batch_size, gradient_accumulation_steps, val_size, lr_scheduler_type}) elem_dict.update( dict( cutoff_len=cutoff_len, batch_size=batch_size, gradient_accumulation_steps=gradient_accumulation_steps, val_size=val_size, lr_scheduler_type=lr_scheduler_type, ) ) with gr.Accordion(open=False) as extra_tab: with gr.Row(): logging_steps = gr.Slider(minimum=1, maximum=1000, value=5, step=5) save_steps = gr.Slider(minimum=10, maximum=5000, value=100, step=10) warmup_steps = gr.Slider(minimum=0, maximum=5000, value=0, step=1) neftune_alpha = gr.Slider(minimum=0, maximum=10, value=0, step=0.1) extra_args = gr.Textbox(value='{"optim": "adamw_torch"}') with gr.Row(): with gr.Column(): packing = gr.Checkbox() neat_packing = gr.Checkbox() with gr.Column(): train_on_prompt = gr.Checkbox() mask_history = gr.Checkbox() with gr.Column(): resize_vocab = gr.Checkbox() use_llama_pro = gr.Checkbox() with gr.Column(): enable_thinking = gr.Checkbox(value=True) report_to = gr.Dropdown( choices=["none", "wandb", "mlflow", "neptune", "tensorboard", "all"], value="none", allow_custom_value=True, ) input_elems.update( { logging_steps, save_steps, warmup_steps, neftune_alpha, extra_args, packing, neat_packing, train_on_prompt, mask_history, resize_vocab, use_llama_pro, enable_thinking, report_to, } ) elem_dict.update( dict( extra_tab=extra_tab, logging_steps=logging_steps, save_steps=save_steps, warmup_steps=warmup_steps, neftune_alpha=neftune_alpha, extra_args=extra_args, packing=packing, neat_packing=neat_packing, train_on_prompt=train_on_prompt, mask_history=mask_history, resize_vocab=resize_vocab, use_llama_pro=use_llama_pro, enable_thinking=enable_thinking, report_to=report_to, ) ) with gr.Accordion(open=False) as freeze_tab: with gr.Row(): freeze_trainable_layers = gr.Slider(minimum=-128, maximum=128, value=2, step=1) freeze_trainable_modules = gr.Textbox(value="all") freeze_extra_modules = gr.Textbox() input_elems.update({freeze_trainable_layers, freeze_trainable_modules, freeze_extra_modules}) elem_dict.update( dict( freeze_tab=freeze_tab, freeze_trainable_layers=freeze_trainable_layers, freeze_trainable_modules=freeze_trainable_modules, freeze_extra_modules=freeze_extra_modules, ) ) with gr.Accordion(open=False) as lora_tab: with gr.Row(): lora_rank = gr.Slider(minimum=1, maximum=1024, value=8, step=1) lora_alpha = gr.Slider(minimum=1, maximum=2048, value=16, step=1) lora_dropout = gr.Slider(minimum=0, maximum=1, value=0, step=0.01) loraplus_lr_ratio = gr.Slider(minimum=0, maximum=64, value=0, step=0.01) create_new_adapter = gr.Checkbox() with gr.Row(): use_rslora = gr.Checkbox() use_dora = gr.Checkbox() use_pissa = gr.Checkbox() lora_target = gr.Textbox(scale=2) additional_target = gr.Textbox(scale=2) input_elems.update( { lora_rank, lora_alpha, lora_dropout, loraplus_lr_ratio, create_new_adapter, use_rslora, use_dora, use_pissa, lora_target, additional_target, } ) elem_dict.update( dict( lora_tab=lora_tab, lora_rank=lora_rank, lora_alpha=lora_alpha, lora_dropout=lora_dropout, loraplus_lr_ratio=loraplus_lr_ratio, create_new_adapter=create_new_adapter, use_rslora=use_rslora, use_dora=use_dora, use_pissa=use_pissa, lora_target=lora_target, additional_target=additional_target, ) ) with gr.Accordion(open=False) as rlhf_tab: with gr.Row(): pref_beta = gr.Slider(minimum=0, maximum=1, value=0.1, step=0.01) pref_ftx = gr.Slider(minimum=0, maximum=10, value=0, step=0.01) pref_loss = gr.Dropdown(choices=["sigmoid", "hinge", "ipo", "kto_pair", "orpo", "simpo"], value="sigmoid") reward_model = gr.Dropdown(multiselect=True, allow_custom_value=True) with gr.Column(): ppo_score_norm = gr.Checkbox() ppo_whiten_rewards = gr.Checkbox() input_elems.update({pref_beta, pref_ftx, pref_loss, reward_model, ppo_score_norm, ppo_whiten_rewards}) elem_dict.update( dict( rlhf_tab=rlhf_tab, pref_beta=pref_beta, pref_ftx=pref_ftx, pref_loss=pref_loss, reward_model=reward_model, ppo_score_norm=ppo_score_norm, ppo_whiten_rewards=ppo_whiten_rewards, ) ) with gr.Accordion(open=False) as mm_tab: with gr.Row(): freeze_vision_tower = gr.Checkbox(value=True) freeze_multi_modal_projector = gr.Checkbox(value=True) freeze_language_model = gr.Checkbox(value=False) with gr.Row(): image_max_pixels = gr.Textbox(value="768*768") image_min_pixels = gr.Textbox(value="32*32") video_max_pixels = gr.Textbox(value="256*256") video_min_pixels = gr.Textbox(value="16*16") input_elems.update( { freeze_vision_tower, freeze_multi_modal_projector, freeze_language_model, image_max_pixels, image_min_pixels, video_max_pixels, video_min_pixels, } ) elem_dict.update( dict( mm_tab=mm_tab, freeze_vision_tower=freeze_vision_tower, freeze_multi_modal_projector=freeze_multi_modal_projector, freeze_language_model=freeze_language_model, image_max_pixels=image_max_pixels, image_min_pixels=image_min_pixels, video_max_pixels=video_max_pixels, video_min_pixels=video_min_pixels, ) ) with gr.Accordion(open=False) as galore_tab: with gr.Row(): use_galore = gr.Checkbox() galore_rank = gr.Slider(minimum=1, maximum=1024, value=16, step=1) galore_update_interval = gr.Slider(minimum=1, maximum=2048, value=200, step=1) galore_scale = gr.Slider(minimum=0, maximum=100, value=2.0, step=0.1) galore_target = gr.Textbox(value="all") input_elems.update({use_galore, galore_rank, galore_update_interval, galore_scale, galore_target}) elem_dict.update( dict( galore_tab=galore_tab, use_galore=use_galore, galore_rank=galore_rank, galore_update_interval=galore_update_interval, galore_scale=galore_scale, galore_target=galore_target, ) ) with gr.Accordion(open=False) as apollo_tab: with gr.Row(): use_apollo = gr.Checkbox() apollo_rank = gr.Slider(minimum=1, maximum=1024, value=16, step=1) apollo_update_interval = gr.Slider(minimum=1, maximum=2048, value=200, step=1) apollo_scale = gr.Slider(minimum=0, maximum=100, value=32.0, step=0.1) apollo_target = gr.Textbox(value="all") input_elems.update({use_apollo, apollo_rank, apollo_update_interval, apollo_scale, apollo_target}) elem_dict.update( dict( apollo_tab=apollo_tab, use_apollo=use_apollo, apollo_rank=apollo_rank, apollo_update_interval=apollo_update_interval, apollo_scale=apollo_scale, apollo_target=apollo_target, ) ) with gr.Accordion(open=False) as badam_tab: with gr.Row(): use_badam = gr.Checkbox() badam_mode = gr.Dropdown(choices=["layer", "ratio"], value="layer") badam_switch_mode = gr.Dropdown(choices=["ascending", "descending", "random", "fixed"], value="ascending") badam_switch_interval = gr.Slider(minimum=1, maximum=1024, value=50, step=1) badam_update_ratio = gr.Slider(minimum=0, maximum=1, value=0.05, step=0.01) input_elems.update({use_badam, badam_mode, badam_switch_mode, badam_switch_interval, badam_update_ratio}) elem_dict.update( dict( badam_tab=badam_tab, use_badam=use_badam, badam_mode=badam_mode, badam_switch_mode=badam_switch_mode, badam_switch_interval=badam_switch_interval, badam_update_ratio=badam_update_ratio, ) ) with gr.Accordion(open=False) as swanlab_tab: with gr.Row(): use_swanlab = gr.Checkbox() swanlab_project = gr.Textbox(value="llamafactory") swanlab_run_name = gr.Textbox() swanlab_workspace = gr.Textbox() swanlab_api_key = gr.Textbox() swanlab_mode = gr.Dropdown(choices=["cloud", "local"], value="cloud") swanlab_link = gr.Markdown(visible=False) input_elems.update( { use_swanlab, swanlab_project, swanlab_run_name, swanlab_workspace, swanlab_api_key, swanlab_mode, swanlab_link, } ) elem_dict.update( dict( swanlab_tab=swanlab_tab, use_swanlab=use_swanlab, swanlab_project=swanlab_project, swanlab_run_name=swanlab_run_name, swanlab_workspace=swanlab_workspace, swanlab_api_key=swanlab_api_key, swanlab_mode=swanlab_mode, swanlab_link=swanlab_link, ) ) with gr.Row(): cmd_preview_btn = gr.Button() arg_save_btn = gr.Button() arg_load_btn = gr.Button() start_btn = gr.Button(variant="primary") stop_btn = gr.Button(variant="stop") with gr.Row(): with gr.Column(scale=3): with gr.Row(): current_time = gr.Textbox(visible=False, interactive=False) output_dir = gr.Dropdown(allow_custom_value=True) config_path = gr.Dropdown(allow_custom_value=True) with gr.Row(): device_count = gr.Textbox(value=str(get_device_count() or 1), interactive=False) ds_stage = gr.Dropdown(choices=["none", "2", "3"], value="none") ds_offload = gr.Checkbox() with gr.Row(): resume_btn = gr.Checkbox(visible=False, interactive=False) progress_bar = gr.Slider(visible=False, interactive=False) with gr.Row(): output_box = gr.Markdown() with gr.Column(scale=1): loss_viewer = gr.Plot() input_elems.update({output_dir, config_path, ds_stage, ds_offload}) elem_dict.update( dict( cmd_preview_btn=cmd_preview_btn, arg_save_btn=arg_save_btn, arg_load_btn=arg_load_btn, start_btn=start_btn, stop_btn=stop_btn, current_time=current_time, output_dir=output_dir, config_path=config_path, device_count=device_count, ds_stage=ds_stage, ds_offload=ds_offload, resume_btn=resume_btn, progress_bar=progress_bar, output_box=output_box, loss_viewer=loss_viewer, ) ) output_elems = [output_box, progress_bar, loss_viewer, swanlab_link] cmd_preview_btn.click(engine.runner.preview_train, input_elems, output_elems, concurrency_limit=None) start_btn.click(engine.runner.run_train, input_elems, output_elems) stop_btn.click(engine.runner.set_abort) resume_btn.change(engine.runner.monitor, outputs=output_elems, concurrency_limit=None) lang = engine.manager.get_elem_by_id("top.lang") model_name: gr.Dropdown = engine.manager.get_elem_by_id("top.model_name") finetuning_type: gr.Dropdown = engine.manager.get_elem_by_id("top.finetuning_type") arg_save_btn.click(engine.runner.save_args, input_elems, output_elems, concurrency_limit=None) arg_load_btn.click( engine.runner.load_args, [lang, config_path], list(input_elems) + [output_box], concurrency_limit=None ) dataset.focus(list_datasets, [dataset_dir, training_stage], [dataset], queue=False) training_stage.change(change_stage, [training_stage], [dataset, packing], queue=False) reward_model.focus(list_checkpoints, [model_name, finetuning_type], [reward_model], queue=False) model_name.change(list_output_dirs, [model_name, finetuning_type, current_time], [output_dir], queue=False) finetuning_type.change(list_output_dirs, [model_name, finetuning_type, current_time], [output_dir], queue=False) output_dir.change( list_output_dirs, [model_name, finetuning_type, current_time], [output_dir], concurrency_limit=None ) output_dir.input( engine.runner.check_output_dir, [lang, model_name, finetuning_type, output_dir], list(input_elems) + [output_box], concurrency_limit=None, ) config_path.change(list_config_paths, [current_time], [config_path], queue=False) return elem_dict
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/webui/components/export.py
src/llamafactory/webui/components/export.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from collections.abc import Generator from typing import TYPE_CHECKING from ...extras.constants import PEFT_METHODS from ...extras.misc import torch_gc from ...extras.packages import is_gradio_available from ...train.tuner import export_model from ..common import get_save_dir, load_config from ..locales import ALERTS if is_gradio_available(): import gradio as gr if TYPE_CHECKING: from gradio.components import Component from ..engine import Engine GPTQ_BITS = ["8", "4", "3", "2"] def can_quantize(checkpoint_path: str | list[str]) -> "gr.Dropdown": if isinstance(checkpoint_path, list) and len(checkpoint_path) != 0: return gr.Dropdown(value="none", interactive=False) else: return gr.Dropdown(interactive=True) def save_model( lang: str, model_name: str, model_path: str, finetuning_type: str, checkpoint_path: str | list[str], template: str, export_size: int, export_quantization_bit: str, export_quantization_dataset: str, export_device: str, export_legacy_format: bool, export_dir: str, export_hub_model_id: str, extra_args: str, ) -> Generator[str, None, None]: user_config = load_config() error = "" if not model_name: error = ALERTS["err_no_model"][lang] elif not model_path: error = ALERTS["err_no_path"][lang] elif not export_dir: error = ALERTS["err_no_export_dir"][lang] elif export_quantization_bit in GPTQ_BITS and not export_quantization_dataset: error = ALERTS["err_no_dataset"][lang] elif export_quantization_bit not in GPTQ_BITS and not checkpoint_path: error = ALERTS["err_no_adapter"][lang] elif export_quantization_bit in GPTQ_BITS and checkpoint_path and isinstance(checkpoint_path, list): error = ALERTS["err_gptq_lora"][lang] try: json.loads(extra_args) except json.JSONDecodeError: error = ALERTS["err_json_schema"][lang] if error: gr.Warning(error) yield error return args = dict( model_name_or_path=model_path, cache_dir=user_config.get("cache_dir", None), finetuning_type=finetuning_type, template=template, export_dir=export_dir, export_hub_model_id=export_hub_model_id or None, export_size=export_size, export_quantization_bit=int(export_quantization_bit) if export_quantization_bit in GPTQ_BITS else None, export_quantization_dataset=export_quantization_dataset, export_device=export_device, export_legacy_format=export_legacy_format, trust_remote_code=True, ) args.update(json.loads(extra_args)) if checkpoint_path: if finetuning_type in PEFT_METHODS: # list args["adapter_name_or_path"] = ",".join( [get_save_dir(model_name, finetuning_type, adapter) for adapter in checkpoint_path] ) else: # str args["model_name_or_path"] = get_save_dir(model_name, finetuning_type, checkpoint_path) yield ALERTS["info_exporting"][lang] export_model(args) torch_gc() yield ALERTS["info_exported"][lang] def create_export_tab(engine: "Engine") -> dict[str, "Component"]: with gr.Row(): export_size = gr.Slider(minimum=1, maximum=100, value=5, step=1) export_quantization_bit = gr.Dropdown(choices=["none"] + GPTQ_BITS, value="none") export_quantization_dataset = gr.Textbox(value="data/c4_demo.jsonl") export_device = gr.Radio(choices=["cpu", "auto"], value="cpu") export_legacy_format = gr.Checkbox() with gr.Row(): export_dir = gr.Textbox() export_hub_model_id = gr.Textbox() extra_args = gr.Textbox(value="{}") checkpoint_path: gr.Dropdown = engine.manager.get_elem_by_id("top.checkpoint_path") checkpoint_path.change(can_quantize, [checkpoint_path], [export_quantization_bit], queue=False) export_btn = gr.Button() info_box = gr.Textbox(show_label=False, interactive=False) export_btn.click( save_model, [ engine.manager.get_elem_by_id("top.lang"), engine.manager.get_elem_by_id("top.model_name"), engine.manager.get_elem_by_id("top.model_path"), engine.manager.get_elem_by_id("top.finetuning_type"), engine.manager.get_elem_by_id("top.checkpoint_path"), engine.manager.get_elem_by_id("top.template"), export_size, export_quantization_bit, export_quantization_dataset, export_device, export_legacy_format, export_dir, export_hub_model_id, extra_args, ], [info_box], ) return dict( export_size=export_size, export_quantization_bit=export_quantization_bit, export_quantization_dataset=export_quantization_dataset, export_device=export_device, export_legacy_format=export_legacy_format, export_dir=export_dir, export_hub_model_id=export_hub_model_id, extra_args=extra_args, export_btn=export_btn, info_box=info_box, )
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/webui/components/infer.py
src/llamafactory/webui/components/infer.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...extras.packages import is_gradio_available from ..common import is_multimodal from .chatbot import create_chat_box if is_gradio_available(): import gradio as gr if TYPE_CHECKING: from gradio.components import Component from ..engine import Engine def create_infer_tab(engine: "Engine") -> dict[str, "Component"]: input_elems = engine.manager.get_base_elems() elem_dict = dict() with gr.Row(): infer_backend = gr.Dropdown(choices=["huggingface", "vllm", "sglang"], value="huggingface") infer_dtype = gr.Dropdown(choices=["auto", "float16", "bfloat16", "float32"], value="auto") extra_args = gr.Textbox(value='{"vllm_enforce_eager": true}') with gr.Row(): load_btn = gr.Button() unload_btn = gr.Button() info_box = gr.Textbox(show_label=False, interactive=False) input_elems.update({infer_backend, infer_dtype, extra_args}) elem_dict.update( dict( infer_backend=infer_backend, infer_dtype=infer_dtype, extra_args=extra_args, load_btn=load_btn, unload_btn=unload_btn, info_box=info_box, ) ) chatbot, messages, chat_elems = create_chat_box(engine, visible=False) elem_dict.update(chat_elems) load_btn.click(engine.chatter.load_model, input_elems, [info_box]).then( lambda: gr.Column(visible=engine.chatter.loaded), outputs=[chat_elems["chat_box"]] ) unload_btn.click(engine.chatter.unload_model, input_elems, [info_box]).then( lambda: ([], []), outputs=[chatbot, messages] ).then(lambda: gr.Column(visible=engine.chatter.loaded), outputs=[chat_elems["chat_box"]]) engine.manager.get_elem_by_id("top.model_name").change( lambda model_name: gr.Column(visible=is_multimodal(model_name)), [engine.manager.get_elem_by_id("top.model_name")], [chat_elems["mm_box"]], ) return elem_dict
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/webui/components/eval.py
src/llamafactory/webui/components/eval.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...extras.packages import is_gradio_available from ..common import DEFAULT_DATA_DIR from ..control import list_datasets from .data import create_preview_box if is_gradio_available(): import gradio as gr if TYPE_CHECKING: from gradio.components import Component from ..engine import Engine def create_eval_tab(engine: "Engine") -> dict[str, "Component"]: input_elems = engine.manager.get_base_elems() elem_dict = dict() with gr.Row(): dataset_dir = gr.Textbox(value=DEFAULT_DATA_DIR, scale=2) dataset = gr.Dropdown(multiselect=True, allow_custom_value=True, scale=4) preview_elems = create_preview_box(dataset_dir, dataset) input_elems.update({dataset_dir, dataset}) elem_dict.update(dict(dataset_dir=dataset_dir, dataset=dataset, **preview_elems)) with gr.Row(): cutoff_len = gr.Slider(minimum=4, maximum=131072, value=1024, step=1) max_samples = gr.Textbox(value="100000") batch_size = gr.Slider(minimum=1, maximum=1024, value=2, step=1) predict = gr.Checkbox(value=True) input_elems.update({cutoff_len, max_samples, batch_size, predict}) elem_dict.update(dict(cutoff_len=cutoff_len, max_samples=max_samples, batch_size=batch_size, predict=predict)) with gr.Row(): max_new_tokens = gr.Slider(minimum=8, maximum=4096, value=512, step=1) top_p = gr.Slider(minimum=0.01, maximum=1, value=0.7, step=0.01) temperature = gr.Slider(minimum=0.01, maximum=1.5, value=0.95, step=0.01) output_dir = gr.Textbox() input_elems.update({max_new_tokens, top_p, temperature, output_dir}) elem_dict.update(dict(max_new_tokens=max_new_tokens, top_p=top_p, temperature=temperature, output_dir=output_dir)) with gr.Row(): cmd_preview_btn = gr.Button() start_btn = gr.Button(variant="primary") stop_btn = gr.Button(variant="stop") with gr.Row(): resume_btn = gr.Checkbox(visible=False, interactive=False) progress_bar = gr.Slider(visible=False, interactive=False) with gr.Row(): output_box = gr.Markdown() elem_dict.update( dict( cmd_preview_btn=cmd_preview_btn, start_btn=start_btn, stop_btn=stop_btn, resume_btn=resume_btn, progress_bar=progress_bar, output_box=output_box, ) ) output_elems = [output_box, progress_bar] cmd_preview_btn.click(engine.runner.preview_eval, input_elems, output_elems, concurrency_limit=None) start_btn.click(engine.runner.run_eval, input_elems, output_elems) stop_btn.click(engine.runner.set_abort) resume_btn.change(engine.runner.monitor, outputs=output_elems, concurrency_limit=None) dataset.focus(list_datasets, [dataset_dir], [dataset], queue=False) return elem_dict
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/webui/components/footer.py
src/llamafactory/webui/components/footer.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...extras.misc import get_current_memory from ...extras.packages import is_gradio_available if is_gradio_available(): import gradio as gr if TYPE_CHECKING: from gradio.components import Component def get_device_memory() -> "gr.Slider": free, total = get_current_memory() if total != -1: used = round((total - free) / (1024**3), 2) total = round(total / (1024**3), 2) return gr.Slider(minimum=0, maximum=total, value=used, step=0.01, visible=True) else: return gr.Slider(visible=False) def create_footer() -> dict[str, "Component"]: with gr.Row(): device_memory = gr.Slider(visible=False, interactive=False) timer = gr.Timer(value=5) timer.tick(get_device_memory, outputs=[device_memory], queue=False) return dict(device_memory=device_memory)
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/webui/components/chatbot.py
src/llamafactory/webui/components/chatbot.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import json from typing import TYPE_CHECKING from ...data import Role from ...extras.packages import is_gradio_available from ..locales import ALERTS if is_gradio_available(): import gradio as gr if TYPE_CHECKING: from gradio.components import Component from ..engine import Engine def check_json_schema(text: str, lang: str) -> None: r"""Check if the json schema is valid.""" try: tools = json.loads(text) if tools: assert isinstance(tools, list) for tool in tools: if "name" not in tool: raise NotImplementedError("Name not found.") except NotImplementedError: gr.Warning(ALERTS["err_tool_name"][lang]) except Exception: gr.Warning(ALERTS["err_json_schema"][lang]) def create_chat_box( engine: "Engine", visible: bool = False ) -> tuple["Component", "Component", dict[str, "Component"]]: lang = engine.manager.get_elem_by_id("top.lang") with gr.Column(visible=visible) as chat_box: kwargs = {} if "show_copy_button" in inspect.signature(gr.Chatbot.__init__).parameters: kwargs["show_copy_button"] = True if "resizable" in inspect.signature(gr.Chatbot.__init__).parameters: kwargs["resizable"] = True chatbot = gr.Chatbot(type="messages", **kwargs) messages = gr.State([]) with gr.Row(): with gr.Column(scale=4): with gr.Row(): with gr.Column(): role = gr.Dropdown(choices=[Role.USER.value, Role.OBSERVATION.value], value=Role.USER.value) system = gr.Textbox(show_label=False) tools = gr.Textbox(show_label=False, lines=3) with gr.Column() as mm_box: with gr.Tab("Image"): image = gr.Image(type="pil") with gr.Tab("Video"): video = gr.Video() with gr.Tab("Audio"): audio = gr.Audio(type="filepath") query = gr.Textbox(show_label=False, lines=8) submit_btn = gr.Button(variant="primary") with gr.Column(scale=1): max_new_tokens = gr.Slider(minimum=8, maximum=8192, value=1024, step=1) top_p = gr.Slider(minimum=0.01, maximum=1.0, value=0.7, step=0.01) temperature = gr.Slider(minimum=0.01, maximum=1.5, value=0.95, step=0.01) skip_special_tokens = gr.Checkbox(value=True) escape_html = gr.Checkbox(value=True) enable_thinking = gr.Checkbox(value=True) clear_btn = gr.Button() tools.input(check_json_schema, inputs=[tools, engine.manager.get_elem_by_id("top.lang")]) submit_btn.click( engine.chatter.append, [chatbot, messages, role, query, escape_html], [chatbot, messages, query], ).then( engine.chatter.stream, [ chatbot, messages, lang, system, tools, image, video, audio, max_new_tokens, top_p, temperature, skip_special_tokens, escape_html, enable_thinking, ], [chatbot, messages], ) clear_btn.click(lambda: ([], []), outputs=[chatbot, messages]) return ( chatbot, messages, dict( chat_box=chat_box, role=role, system=system, tools=tools, mm_box=mm_box, image=image, video=video, audio=audio, query=query, submit_btn=submit_btn, max_new_tokens=max_new_tokens, top_p=top_p, temperature=temperature, skip_special_tokens=skip_special_tokens, escape_html=escape_html, enable_thinking=enable_thinking, clear_btn=clear_btn, ), )
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/webui/components/__init__.py
src/llamafactory/webui/components/__init__.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .chatbot import create_chat_box from .eval import create_eval_tab from .export import create_export_tab from .footer import create_footer from .infer import create_infer_tab from .top import create_top from .train import create_train_tab __all__ = [ "create_chat_box", "create_eval_tab", "create_export_tab", "create_footer", "create_infer_tab", "create_top", "create_train_tab", ]
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/webui/components/top.py
src/llamafactory/webui/components/top.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...data import TEMPLATES from ...extras.constants import METHODS, SUPPORTED_MODELS from ...extras.misc import use_modelscope, use_openmind from ...extras.packages import is_gradio_available from ..common import save_config from ..control import can_quantize, can_quantize_to, check_template, get_model_info, list_checkpoints, switch_hub if is_gradio_available(): import gradio as gr if TYPE_CHECKING: from gradio.components import Component def create_top() -> dict[str, "Component"]: with gr.Row(): lang = gr.Dropdown(choices=["en", "ru", "zh", "ko", "ja"], value=None, scale=1) available_models = list(SUPPORTED_MODELS.keys()) + ["Custom"] model_name = gr.Dropdown(choices=available_models, value=None, scale=2) model_path = gr.Textbox(scale=2) default_hub = "modelscope" if use_modelscope() else "openmind" if use_openmind() else "huggingface" hub_name = gr.Dropdown(choices=["huggingface", "modelscope", "openmind"], value=default_hub, scale=2) with gr.Row(): finetuning_type = gr.Dropdown(choices=METHODS, value="lora", scale=1) checkpoint_path = gr.Dropdown(multiselect=True, allow_custom_value=True, scale=6) with gr.Row(): quantization_bit = gr.Dropdown(choices=["none", "8", "4"], value="none", allow_custom_value=True) quantization_method = gr.Dropdown(choices=["bnb", "hqq", "eetq"], value="bnb") template = gr.Dropdown(choices=list(TEMPLATES.keys()), value="default") rope_scaling = gr.Dropdown(choices=["none", "linear", "dynamic", "yarn", "llama3"], value="none") booster = gr.Dropdown(choices=["auto", "flashattn2", "unsloth", "liger_kernel"], value="auto") model_name.change(get_model_info, [model_name], [model_path, template], queue=False).then( list_checkpoints, [model_name, finetuning_type], [checkpoint_path], queue=False ).then(check_template, [lang, template]) model_name.input(save_config, inputs=[lang, hub_name, model_name], queue=False) model_path.input(save_config, inputs=[lang, hub_name, model_name, model_path], queue=False) finetuning_type.change(can_quantize, [finetuning_type], [quantization_bit], queue=False).then( list_checkpoints, [model_name, finetuning_type], [checkpoint_path], queue=False ) checkpoint_path.focus(list_checkpoints, [model_name, finetuning_type], [checkpoint_path], queue=False) quantization_method.change(can_quantize_to, [quantization_method], [quantization_bit], queue=False) hub_name.change(switch_hub, inputs=[hub_name], queue=False).then( get_model_info, [model_name], [model_path, template], queue=False ).then(list_checkpoints, [model_name, finetuning_type], [checkpoint_path], queue=False).then( check_template, [lang, template] ) hub_name.input(save_config, inputs=[lang, hub_name], queue=False) return dict( lang=lang, model_name=model_name, model_path=model_path, hub_name=hub_name, finetuning_type=finetuning_type, checkpoint_path=checkpoint_path, quantization_bit=quantization_bit, quantization_method=quantization_method, template=template, rope_scaling=rope_scaling, booster=booster, )
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/src/llamafactory/webui/components/data.py
src/llamafactory/webui/components/data.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from typing import TYPE_CHECKING, Any from ...extras.constants import DATA_CONFIG from ...extras.packages import is_gradio_available if is_gradio_available(): import gradio as gr if TYPE_CHECKING: from gradio.components import Component PAGE_SIZE = 2 def prev_page(page_index: int) -> int: return page_index - 1 if page_index > 0 else page_index def next_page(page_index: int, total_num: int) -> int: return page_index + 1 if (page_index + 1) * PAGE_SIZE < total_num else page_index def can_preview(dataset_dir: str, dataset: list) -> "gr.Button": r"""Check if the dataset is a local dataset.""" try: with open(os.path.join(dataset_dir, DATA_CONFIG), encoding="utf-8") as f: dataset_info = json.load(f) except Exception: return gr.Button(interactive=False) if len(dataset) == 0 or "file_name" not in dataset_info[dataset[0]]: return gr.Button(interactive=False) data_path = os.path.join(dataset_dir, dataset_info[dataset[0]]["file_name"]) if os.path.isfile(data_path) or (os.path.isdir(data_path) and os.listdir(data_path)): return gr.Button(interactive=True) else: return gr.Button(interactive=False) def _load_data_file(file_path: str) -> list[Any]: with open(file_path, encoding="utf-8") as f: if file_path.endswith(".json"): return json.load(f) elif file_path.endswith(".jsonl"): return [json.loads(line) for line in f] else: return list(f) def get_preview(dataset_dir: str, dataset: list, page_index: int) -> tuple[int, list, "gr.Column"]: r"""Get the preview samples from the dataset.""" with open(os.path.join(dataset_dir, DATA_CONFIG), encoding="utf-8") as f: dataset_info = json.load(f) data_path = os.path.join(dataset_dir, dataset_info[dataset[0]]["file_name"]) if os.path.isfile(data_path): data = _load_data_file(data_path) else: data = [] for file_name in os.listdir(data_path): data.extend(_load_data_file(os.path.join(data_path, file_name))) return len(data), data[PAGE_SIZE * page_index : PAGE_SIZE * (page_index + 1)], gr.Column(visible=True) def create_preview_box(dataset_dir: "gr.Textbox", dataset: "gr.Dropdown") -> dict[str, "Component"]: data_preview_btn = gr.Button(interactive=False, scale=1) with gr.Column(visible=False, elem_classes="modal-box") as preview_box: with gr.Row(): preview_count = gr.Number(value=0, interactive=False, precision=0) page_index = gr.Number(value=0, interactive=False, precision=0) with gr.Row(): prev_btn = gr.Button() next_btn = gr.Button() close_btn = gr.Button() with gr.Row(): preview_samples = gr.JSON() dataset.change(can_preview, [dataset_dir, dataset], [data_preview_btn], queue=False).then( lambda: 0, outputs=[page_index], queue=False ) data_preview_btn.click( get_preview, [dataset_dir, dataset, page_index], [preview_count, preview_samples, preview_box], queue=False ) prev_btn.click(prev_page, [page_index], [page_index], queue=False).then( get_preview, [dataset_dir, dataset, page_index], [preview_count, preview_samples, preview_box], queue=False ) next_btn.click(next_page, [page_index, preview_count], [page_index], queue=False).then( get_preview, [dataset_dir, dataset, page_index], [preview_count, preview_samples, preview_box], queue=False ) close_btn.click(lambda: gr.Column(visible=False), outputs=[preview_box], queue=False) return dict( data_preview_btn=data_preview_btn, preview_count=preview_count, page_index=page_index, prev_btn=prev_btn, next_btn=next_btn, close_btn=close_btn, preview_samples=preview_samples, )
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/tests/check_license.py
tests/check_license.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from pathlib import Path KEYWORDS = ("Copyright", "2025", "LlamaFactory") def main(): path_list: list[Path] = [] for check_dir in sys.argv[1:]: path_list.extend(Path(check_dir).glob("**/*.py")) for path in path_list: with open(path.absolute(), encoding="utf-8") as f: file_content = f.read().strip().split("\n") if not file_content[0]: continue print(f"Check license: {path}") assert all(keyword in file_content[0] for keyword in KEYWORDS), f"File {path} does not contain license." if __name__ == "__main__": main()
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/tests/conftest.py
tests/conftest.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """LlamaFactory test configuration. Contains shared fixtures, pytest configuration, and custom markers. """ import os from typing import Optional import pytest import torch import torch.distributed as dist from pytest import Config, FixtureRequest, Item, MonkeyPatch from llamafactory.extras.misc import get_current_device, get_device_count, is_env_enabled from llamafactory.extras.packages import is_transformers_version_greater_than from llamafactory.train.test_utils import patch_valuehead_model CURRENT_DEVICE = get_current_device().type def pytest_configure(config: Config): """Register custom pytest markers.""" config.addinivalue_line( "markers", "slow: marks tests as slow (deselect with '-m \"not slow\"' or set RUN_SLOW=1 to run)", ) config.addinivalue_line( "markers", "runs_on: test requires specific device type, e.g., @pytest.mark.runs_on(['cuda'])", ) config.addinivalue_line( "markers", "require_distributed(num_devices): allow multi-device execution (default: 2)", ) def _handle_runs_on(items: list[Item]): """Skip tests on specified device TYPES (cpu/cuda/npu).""" for item in items: marker = item.get_closest_marker("runs_on") if not marker: continue devices = marker.args[0] if isinstance(devices, str): devices = [devices] if CURRENT_DEVICE not in devices: item.add_marker(pytest.mark.skip(reason=f"test requires one of {devices} (current: {CURRENT_DEVICE})")) def _handle_slow_tests(items: list[Item]): """Skip slow tests unless RUN_SLOW is enabled.""" if not is_env_enabled("RUN_SLOW"): skip_slow = pytest.mark.skip(reason="slow test (set RUN_SLOW=1 to run)") for item in items: if "slow" in item.keywords: item.add_marker(skip_slow) def _get_visible_devices_env() -> Optional[str]: """Return device visibility env var name.""" if CURRENT_DEVICE == "cuda": return "CUDA_VISIBLE_DEVICES" elif CURRENT_DEVICE == "npu": return "ASCEND_RT_VISIBLE_DEVICES" else: return None def _handle_device_visibility(items: list[Item]): """Handle device visibility based on test markers.""" env_key = _get_visible_devices_env() if env_key is None or CURRENT_DEVICE in ("cpu", "mps"): return # Parse visible devices visible_devices_env = os.environ.get(env_key) if visible_devices_env is None: available = get_device_count() else: visible_devices = [v for v in visible_devices_env.split(",") if v != ""] available = len(visible_devices) for item in items: marker = item.get_closest_marker("require_distributed") if not marker: continue required = marker.args[0] if marker.args else 2 if available < required: item.add_marker(pytest.mark.skip(reason=f"test requires {required} devices, but only {available} visible")) def pytest_collection_modifyitems(config: Config, items: list[Item]): """Modify test collection based on markers and environment.""" # Handle version compatibility (from HEAD) skip_bc = pytest.mark.skip(reason="Skip backward compatibility tests") for item in items: if "tests_v1" in str(item.fspath) and not is_transformers_version_greater_than("4.57.0"): item.add_marker(skip_bc) _handle_slow_tests(items) _handle_runs_on(items) _handle_device_visibility(items) @pytest.fixture(autouse=True) def _cleanup_distributed_state(): """Cleanup distributed state after each test.""" yield if dist.is_initialized(): dist.destroy_process_group() @pytest.fixture(autouse=True) def _manage_distributed_env(request: FixtureRequest, monkeypatch: MonkeyPatch) -> None: """Set environment variables for distributed tests if specific devices are requested.""" env_key = _get_visible_devices_env() if not env_key: return # Save old environment for logic checks, monkeypatch handles restoration old_value = os.environ.get(env_key) marker = request.node.get_closest_marker("require_distributed") if marker: # distributed test required = marker.args[0] if marker.args else 2 specific_devices = marker.args[1] if len(marker.args) > 1 else None if specific_devices: devices_str = ",".join(map(str, specific_devices)) else: devices_str = ",".join(str(i) for i in range(required)) monkeypatch.setenv(env_key, devices_str) else: # non-distributed test if old_value: visible_devices = [v for v in old_value.split(",") if v != ""] monkeypatch.setenv(env_key, visible_devices[0] if visible_devices else "0") else: monkeypatch.setenv(env_key, "0") if CURRENT_DEVICE == "cuda": monkeypatch.setattr(torch.cuda, "device_count", lambda: 1) elif CURRENT_DEVICE == "npu": monkeypatch.setattr(torch.npu, "device_count", lambda: 1) @pytest.fixture def fix_valuehead_cpu_loading(): """Fix valuehead model loading.""" patch_valuehead_model()
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/tests/train/test_sft_trainer.py
tests/train/test_sft_trainer.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from dataclasses import dataclass, field from typing import Any import pytest from transformers import DataCollatorWithPadding from llamafactory.data import get_dataset, get_template_and_fix_tokenizer from llamafactory.hparams import get_train_args from llamafactory.model import load_model, load_tokenizer from llamafactory.train.sft.trainer import CustomSeq2SeqTrainer DEMO_DATA = os.getenv("DEMO_DATA", "llamafactory/demo_data") TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3") TRAIN_ARGS = { "model_name_or_path": TINY_LLAMA3, "stage": "sft", "do_train": True, "finetuning_type": "lora", "dataset": "llamafactory/tiny-supervised-dataset", "dataset_dir": "ONLINE", "template": "llama3", "cutoff_len": 1024, "overwrite_output_dir": True, "per_device_train_batch_size": 1, "max_steps": 1, "report_to": "none", } @dataclass class DataCollatorWithVerbose(DataCollatorWithPadding): verbose_list: list[dict[str, Any]] = field(default_factory=list) def __call__(self, features: list[dict[str, Any]]) -> dict[str, Any]: features = [ {k: v for k, v in feature.items() if k in ["input_ids", "attention_mask", "labels"]} for feature in features ] self.verbose_list.extend(features) batch = super().__call__(features) return {k: v[:, :1] for k, v in batch.items()} # truncate input length @pytest.mark.parametrize("disable_shuffling", [False, True]) def test_shuffle(disable_shuffling: bool): model_args, data_args, training_args, finetuning_args, _ = get_train_args( { "output_dir": os.path.join("output", f"shuffle{str(disable_shuffling).lower()}"), "disable_shuffling": disable_shuffling, **TRAIN_ARGS, } ) tokenizer_module = load_tokenizer(model_args) tokenizer = tokenizer_module["tokenizer"] template = get_template_and_fix_tokenizer(tokenizer, data_args) dataset_module = get_dataset(template, model_args, data_args, training_args, stage="sft", **tokenizer_module) model = load_model(tokenizer, model_args, finetuning_args, training_args.do_train) data_collator = DataCollatorWithVerbose(tokenizer=tokenizer) trainer = CustomSeq2SeqTrainer( model=model, args=training_args, finetuning_args=finetuning_args, data_collator=data_collator, **dataset_module, **tokenizer_module, ) trainer.train() if disable_shuffling: assert data_collator.verbose_list[0]["input_ids"] == dataset_module["train_dataset"][0]["input_ids"] else: assert data_collator.verbose_list[0]["input_ids"] != dataset_module["train_dataset"][0]["input_ids"]
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/tests/model/test_pissa.py
tests/model/test_pissa.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest from llamafactory.train.test_utils import compare_model, load_infer_model, load_reference_model, load_train_model TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3") TINY_LLAMA_PISSA = os.getenv("TINY_LLAMA_ADAPTER", "llamafactory/tiny-random-Llama-3-pissa") TRAIN_ARGS = { "model_name_or_path": TINY_LLAMA3, "stage": "sft", "do_train": True, "finetuning_type": "lora", "pissa_init": True, "pissa_iter": -1, "dataset": "llamafactory/tiny-supervised-dataset", "dataset_dir": "ONLINE", "template": "llama3", "cutoff_len": 1024, "output_dir": "dummy_dir", "overwrite_output_dir": True, "fp16": True, } INFER_ARGS = { "model_name_or_path": TINY_LLAMA_PISSA, "adapter_name_or_path": TINY_LLAMA_PISSA, "adapter_folder": "pissa_init", "finetuning_type": "lora", "template": "llama3", "infer_dtype": "float16", } @pytest.mark.xfail(reason="PiSSA initialization is not stable in different platform.") def test_pissa_train(): model = load_train_model(**TRAIN_ARGS) ref_model = load_reference_model(TINY_LLAMA_PISSA, TINY_LLAMA_PISSA, use_pissa=True, is_trainable=True) compare_model(model, ref_model) @pytest.mark.xfail(reason="Known connection error.") def test_pissa_inference(): model = load_infer_model(**INFER_ARGS) ref_model = load_reference_model(TINY_LLAMA_PISSA, TINY_LLAMA_PISSA, use_pissa=True, is_trainable=False) ref_model = ref_model.merge_and_unload() compare_model(model, ref_model)
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/tests/model/test_base.py
tests/model/test_base.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest from llamafactory.train.test_utils import compare_model, load_infer_model, load_reference_model TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3") TINY_LLAMA_VALUEHEAD = os.getenv("TINY_LLAMA_VALUEHEAD", "llamafactory/tiny-random-Llama-3-valuehead") INFER_ARGS = { "model_name_or_path": TINY_LLAMA3, "template": "llama3", "infer_dtype": "float16", } def test_base(): model = load_infer_model(**INFER_ARGS) ref_model = load_reference_model(TINY_LLAMA3) compare_model(model, ref_model) @pytest.mark.usefixtures("fix_valuehead_cpu_loading") def test_valuehead(): model = load_infer_model(add_valuehead=True, **INFER_ARGS) ref_model = load_reference_model(TINY_LLAMA_VALUEHEAD, add_valuehead=True) compare_model(model, ref_model)
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/tests/model/test_full.py
tests/model/test_full.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import torch from llamafactory.train.test_utils import load_infer_model, load_train_model TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3") TRAIN_ARGS = { "model_name_or_path": TINY_LLAMA3, "stage": "sft", "do_train": True, "finetuning_type": "full", "dataset": "llamafactory/tiny-supervised-dataset", "dataset_dir": "ONLINE", "template": "llama3", "cutoff_len": 1024, "output_dir": "dummy_dir", "overwrite_output_dir": True, "fp16": True, } INFER_ARGS = { "model_name_or_path": TINY_LLAMA3, "finetuning_type": "full", "template": "llama3", "infer_dtype": "float16", } def test_full_train(): model = load_train_model(**TRAIN_ARGS) for param in model.parameters(): assert param.requires_grad is True assert param.dtype == torch.float32 def test_full_inference(): model = load_infer_model(**INFER_ARGS) for param in model.parameters(): assert param.requires_grad is False assert param.dtype == torch.float16
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/tests/model/test_freeze.py
tests/model/test_freeze.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import torch from llamafactory.train.test_utils import load_infer_model, load_train_model TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3") TRAIN_ARGS = { "model_name_or_path": TINY_LLAMA3, "stage": "sft", "do_train": True, "finetuning_type": "freeze", "dataset": "llamafactory/tiny-supervised-dataset", "dataset_dir": "ONLINE", "template": "llama3", "cutoff_len": 1024, "output_dir": "dummy_dir", "overwrite_output_dir": True, "fp16": True, } INFER_ARGS = { "model_name_or_path": TINY_LLAMA3, "finetuning_type": "freeze", "template": "llama3", "infer_dtype": "float16", } def test_freeze_train_all_modules(): model = load_train_model(freeze_trainable_layers=1, **TRAIN_ARGS) for name, param in model.named_parameters(): if name.startswith("model.layers.1."): assert param.requires_grad is True assert param.dtype == torch.float32 else: assert param.requires_grad is False assert param.dtype == torch.float16 def test_freeze_train_extra_modules(): model = load_train_model(freeze_trainable_layers=1, freeze_extra_modules="embed_tokens,lm_head", **TRAIN_ARGS) for name, param in model.named_parameters(): if name.startswith("model.layers.1.") or any(module in name for module in ["embed_tokens", "lm_head"]): assert param.requires_grad is True assert param.dtype == torch.float32 else: assert param.requires_grad is False assert param.dtype == torch.float16 def test_freeze_inference(): model = load_infer_model(**INFER_ARGS) for param in model.parameters(): assert param.requires_grad is False assert param.dtype == torch.float16
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/tests/model/test_lora.py
tests/model/test_lora.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest import torch from llamafactory.train.test_utils import ( check_lora_model, compare_model, load_infer_model, load_reference_model, load_train_model, ) TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3") TINY_LLAMA_ADAPTER = os.getenv("TINY_LLAMA_ADAPTER", "llamafactory/tiny-random-Llama-3-lora") TINY_LLAMA_VALUEHEAD = os.getenv("TINY_LLAMA_VALUEHEAD", "llamafactory/tiny-random-Llama-3-valuehead") TRAIN_ARGS = { "model_name_or_path": TINY_LLAMA3, "stage": "sft", "do_train": True, "finetuning_type": "lora", "dataset": "llamafactory/tiny-supervised-dataset", "dataset_dir": "ONLINE", "template": "llama3", "cutoff_len": 1024, "output_dir": "dummy_dir", "overwrite_output_dir": True, "fp16": True, } INFER_ARGS = { "model_name_or_path": TINY_LLAMA3, "adapter_name_or_path": TINY_LLAMA_ADAPTER, "finetuning_type": "lora", "template": "llama3", "infer_dtype": "float16", } def test_lora_train_qv_modules(): model = load_train_model(lora_target="q_proj,v_proj", **TRAIN_ARGS) linear_modules, _ = check_lora_model(model) assert linear_modules == {"q_proj", "v_proj"} def test_lora_train_all_modules(): model = load_train_model(lora_target="all", **TRAIN_ARGS) linear_modules, _ = check_lora_model(model) assert linear_modules == {"q_proj", "k_proj", "v_proj", "o_proj", "up_proj", "gate_proj", "down_proj"} def test_lora_train_extra_modules(): model = load_train_model(additional_target="embed_tokens,lm_head", **TRAIN_ARGS) _, extra_modules = check_lora_model(model) assert extra_modules == {"embed_tokens", "lm_head"} def test_lora_train_old_adapters(): model = load_train_model(adapter_name_or_path=TINY_LLAMA_ADAPTER, create_new_adapter=False, **TRAIN_ARGS) ref_model = load_reference_model(TINY_LLAMA3, TINY_LLAMA_ADAPTER, use_lora=True, is_trainable=True) compare_model(model, ref_model) def test_lora_train_new_adapters(): model = load_train_model(adapter_name_or_path=TINY_LLAMA_ADAPTER, create_new_adapter=True, **TRAIN_ARGS) ref_model = load_reference_model(TINY_LLAMA3, TINY_LLAMA_ADAPTER, use_lora=True, is_trainable=True) compare_model( model, ref_model, diff_keys=["q_proj", "k_proj", "v_proj", "o_proj", "up_proj", "gate_proj", "down_proj"] ) @pytest.mark.usefixtures("fix_valuehead_cpu_loading") def test_lora_train_valuehead(): model = load_train_model(add_valuehead=True, **TRAIN_ARGS) ref_model = load_reference_model(TINY_LLAMA_VALUEHEAD, is_trainable=True, add_valuehead=True) state_dict = model.state_dict() ref_state_dict = ref_model.state_dict() assert torch.allclose(state_dict["v_head.summary.weight"], ref_state_dict["v_head.summary.weight"]) assert torch.allclose(state_dict["v_head.summary.bias"], ref_state_dict["v_head.summary.bias"]) def test_lora_inference(): model = load_infer_model(**INFER_ARGS) ref_model = load_reference_model(TINY_LLAMA3, TINY_LLAMA_ADAPTER, use_lora=True).merge_and_unload() compare_model(model, ref_model)
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/tests/model/model_utils/test_checkpointing.py
tests/model/model_utils/test_checkpointing.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest import torch from llamafactory.extras.misc import get_current_device from llamafactory.train.test_utils import load_train_model TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3") TRAIN_ARGS = { "model_name_or_path": TINY_LLAMA3, "stage": "sft", "do_train": True, "finetuning_type": "lora", "lora_target": "all", "dataset": "llamafactory/tiny-supervised-dataset", "dataset_dir": "ONLINE", "template": "llama3", "cutoff_len": 1024, "output_dir": "dummy_dir", "overwrite_output_dir": True, "fp16": True, } @pytest.mark.parametrize("disable_gradient_checkpointing", [False, True]) def test_vanilla_checkpointing(disable_gradient_checkpointing: bool): model = load_train_model(disable_gradient_checkpointing=disable_gradient_checkpointing, **TRAIN_ARGS) for module in filter(lambda m: hasattr(m, "gradient_checkpointing"), model.modules()): assert getattr(module, "gradient_checkpointing") != disable_gradient_checkpointing def test_unsloth_gradient_checkpointing(): model = load_train_model(use_unsloth_gc=True, **TRAIN_ARGS) for module in filter(lambda m: hasattr(m, "gradient_checkpointing"), model.modules()): assert module._gradient_checkpointing_func.__self__.__name__ == "UnslothGradientCheckpointing" def test_upcast_layernorm(): model = load_train_model(upcast_layernorm=True, **TRAIN_ARGS) for name, param in model.named_parameters(): if param.ndim == 1 and "norm" in name: assert param.dtype == torch.float32 def test_upcast_lmhead_output(): model = load_train_model(upcast_lmhead_output=True, **TRAIN_ARGS) inputs = torch.randn((1, 16), dtype=torch.float16, device=get_current_device()) outputs: torch.Tensor = model.get_output_embeddings()(inputs) assert outputs.dtype == torch.float32
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/tests/model/model_utils/test_visual.py
tests/model/model_utils/test_visual.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest import torch from transformers import AutoConfig, AutoModelForVision2Seq from llamafactory.extras.packages import is_transformers_version_greater_than from llamafactory.hparams import FinetuningArguments, ModelArguments from llamafactory.model.adapter import init_adapter @pytest.mark.parametrize("freeze_vision_tower", (False, True)) @pytest.mark.parametrize("freeze_multi_modal_projector", (False, True)) @pytest.mark.parametrize("freeze_language_model", (False, True)) def test_visual_full(freeze_vision_tower: bool, freeze_multi_modal_projector: bool, freeze_language_model: bool): model_args = ModelArguments(model_name_or_path="Qwen/Qwen2-VL-2B-Instruct") finetuning_args = FinetuningArguments( finetuning_type="full", freeze_vision_tower=freeze_vision_tower, freeze_multi_modal_projector=freeze_multi_modal_projector, freeze_language_model=freeze_language_model, ) config = AutoConfig.from_pretrained(model_args.model_name_or_path) with torch.device("meta"): model = AutoModelForVision2Seq.from_config(config) model = init_adapter(config, model, model_args, finetuning_args, is_trainable=True) for name, param in model.named_parameters(): if any(key in name for key in ["visual.patch_embed", "visual.blocks"]): assert param.requires_grad != freeze_vision_tower elif "visual.merger" in name: assert param.requires_grad != freeze_multi_modal_projector else: assert param.requires_grad != freeze_language_model @pytest.mark.parametrize("freeze_vision_tower,freeze_language_model", ((False, False), (False, True), (True, False))) def test_visual_lora(freeze_vision_tower: bool, freeze_language_model: bool): model_args = ModelArguments(model_name_or_path="Qwen/Qwen2-VL-2B-Instruct") finetuning_args = FinetuningArguments( finetuning_type="lora", freeze_vision_tower=freeze_vision_tower, freeze_language_model=freeze_language_model ) config = AutoConfig.from_pretrained(model_args.model_name_or_path) with torch.device("meta"): model = AutoModelForVision2Seq.from_config(config) model = init_adapter(config, model, model_args, finetuning_args, is_trainable=True) trainable_params, frozen_params = set(), set() for name, param in model.named_parameters(): if param.requires_grad: trainable_params.add(name) else: frozen_params.add(name) if is_transformers_version_greater_than("4.52.0"): visual_param_name = "base_model.model.model.visual.blocks.0.attn.qkv.lora_A.default.weight" language_param_name = "base_model.model.model.language_model.layers.0.self_attn.q_proj.lora_A.default.weight" merger_param_name = "base_model.model.model.visual.merger.lora_A.default.weight" else: visual_param_name = "base_model.model.visual.blocks.0.attn.qkv.lora_A.default.weight" language_param_name = "base_model.model.model.layers.0.self_attn.q_proj.lora_A.default.weight" merger_param_name = "base_model.model.visual.merger.lora_A.default.weight" assert (visual_param_name in trainable_params) != freeze_vision_tower assert (language_param_name in trainable_params) != freeze_language_model assert (merger_param_name in trainable_params) is False def test_visual_model_save_load(): # check VLM's state dict: https://github.com/huggingface/transformers/pull/38385 model_args = ModelArguments(model_name_or_path="Qwen/Qwen2-VL-2B-Instruct") finetuning_args = FinetuningArguments(finetuning_type="full") config = AutoConfig.from_pretrained(model_args.model_name_or_path) with torch.device("meta"): model = AutoModelForVision2Seq.from_config(config) model = init_adapter(config, model, model_args, finetuning_args, is_trainable=False) loaded_model_weight = dict(model.named_parameters()) model.save_pretrained(os.path.join("output", "qwen2_vl"), max_shard_size="10GB", safe_serialization=False) saved_model_weight = torch.load(os.path.join("output", "qwen2_vl", "pytorch_model.bin"), weights_only=False) if is_transformers_version_greater_than("4.52.0"): assert "model.language_model.layers.0.self_attn.q_proj.weight" in loaded_model_weight else: assert "model.layers.0.self_attn.q_proj.weight" in loaded_model_weight assert "model.layers.0.self_attn.q_proj.weight" in saved_model_weight
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/tests/model/model_utils/test_add_tokens.py
tests/model/model_utils/test_add_tokens.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest from llamafactory.hparams import ModelArguments from llamafactory.model import load_tokenizer TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3") UNUSED_TOKEN = "<|UNUSED_TOKEN|>" @pytest.mark.parametrize("special_tokens", [False, True]) def test_add_tokens(special_tokens: bool): if special_tokens: model_args = ModelArguments(model_name_or_path=TINY_LLAMA3, add_special_tokens=UNUSED_TOKEN) else: model_args = ModelArguments(model_name_or_path=TINY_LLAMA3, add_tokens=UNUSED_TOKEN) tokenizer = load_tokenizer(model_args)["tokenizer"] encoded_ids = tokenizer.encode(UNUSED_TOKEN, add_special_tokens=False) assert len(encoded_ids) == 1 decoded_str = tokenizer.decode(encoded_ids, skip_special_tokens=True) if special_tokens: assert decoded_str == "" else: assert decoded_str == UNUSED_TOKEN if __name__ == "__main__": pytest.main([__file__])
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/tests/model/model_utils/test_attention.py
tests/model/model_utils/test_attention.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest from transformers.utils import is_flash_attn_2_available # Compatible with Transformers v4 and Transformers v5 try: from transformers.utils import is_torch_sdpa_available except ImportError: def is_torch_sdpa_available(): return True from llamafactory.extras.packages import is_transformers_version_greater_than from llamafactory.train.test_utils import load_infer_model TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3") INFER_ARGS = { "model_name_or_path": TINY_LLAMA3, "template": "llama3", } @pytest.mark.xfail(is_transformers_version_greater_than("4.48"), reason="Attention refactor.") def test_attention(): attention_available = ["disabled"] if is_torch_sdpa_available(): attention_available.append("sdpa") if is_flash_attn_2_available(): attention_available.append("fa2") llama_attention_classes = { "disabled": "LlamaAttention", "sdpa": "LlamaSdpaAttention", "fa2": "LlamaFlashAttention2", } for requested_attention in attention_available: model = load_infer_model(flash_attn=requested_attention, **INFER_ARGS) for module in model.modules(): if "Attention" in module.__class__.__name__: assert module.__class__.__name__ == llama_attention_classes[requested_attention]
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/tests/model/model_utils/test_packing.py
tests/model/model_utils/test_packing.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import torch from llamafactory.model.model_utils.packing import get_seqlens_in_batch, get_unpad_data @pytest.mark.parametrize( "attention_mask,golden_seq_lens", [ ( [ [1, 1, 2, 2, 2, 0], [1, 2, 2, 3, 3, 3], ], [2, 3, 1, 2, 3], ), ( [[1]], [1], ), ], ) def test_get_seqlens_in_batch(attention_mask, golden_seq_lens): attention_mask_with_indices = torch.tensor(attention_mask) seqlens_in_batch = get_seqlens_in_batch(attention_mask_with_indices) assert torch.all(seqlens_in_batch == torch.tensor(golden_seq_lens)) @pytest.mark.parametrize( "attention_mask,golden_indices,golden_cu_seqlens,golden_max_seqlen", [ ( [ [1, 1, 2, 2, 2, 0], [1, 2, 2, 3, 3, 3], ], [0, 1, 2, 3, 4, 6, 7, 8, 9, 10, 11], [0, 2, 5, 6, 8, 11], 3, ), ( [[1]], [0], [0, 1], 1, ), ], ) def test_get_unpad_data(attention_mask, golden_indices, golden_cu_seqlens, golden_max_seqlen): attention_mask_with_indices = torch.tensor(attention_mask) indices, cu_seqlens, max_seqlen_in_batch = get_unpad_data(attention_mask_with_indices) assert torch.all(indices == torch.tensor(golden_indices)) assert torch.all(cu_seqlens == torch.tensor(golden_cu_seqlens, dtype=torch.int32)) assert max_seqlen_in_batch == golden_max_seqlen
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/tests/model/model_utils/test_misc.py
tests/model/model_utils/test_misc.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest import torch from transformers import AutoConfig, AutoModelForCausalLM from llamafactory.model.model_utils.misc import find_expanded_modules HF_TOKEN = os.getenv("HF_TOKEN") @pytest.mark.skipif(not HF_TOKEN, reason="Gated model.") def test_expanded_modules(): config = AutoConfig.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct") with torch.device("meta"): model = AutoModelForCausalLM.from_config(config) expanded_modules = find_expanded_modules(model, ["q_proj", "v_proj"], num_layer_trainable=4) assert expanded_modules == [ "model.layers.7.self_attn.q_proj", "model.layers.7.self_attn.v_proj", "model.layers.15.self_attn.q_proj", "model.layers.15.self_attn.v_proj", "model.layers.23.self_attn.q_proj", "model.layers.23.self_attn.v_proj", "model.layers.31.self_attn.q_proj", "model.layers.31.self_attn.v_proj", ]
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/tests/e2e/test_sglang.py
tests/e2e/test_sglang.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import pytest from llamafactory.chat import ChatModel from llamafactory.extras.packages import is_sglang_available MODEL_NAME = "Qwen/Qwen2.5-0.5B" INFER_ARGS = { "model_name_or_path": MODEL_NAME, "finetuning_type": "lora", "template": "llama3", "infer_dtype": "float16", "infer_backend": "sglang", "do_sample": False, "max_new_tokens": 1, } MESSAGES = [ {"role": "user", "content": "Hi"}, ] @pytest.mark.runs_on(["cuda"]) @pytest.mark.skipif(not is_sglang_available(), reason="SGLang is not installed") def test_chat(): r"""Test the SGLang engine's basic chat functionality.""" chat_model = ChatModel(INFER_ARGS) response = chat_model.chat(MESSAGES)[0] # TODO: Change to EXPECTED_RESPONSE print(response.response_text) @pytest.mark.runs_on(["cuda"]) @pytest.mark.skipif(not is_sglang_available(), reason="SGLang is not installed") def test_stream_chat(): r"""Test the SGLang engine's streaming chat functionality.""" chat_model = ChatModel(INFER_ARGS) response = "" for token in chat_model.stream_chat(MESSAGES): response += token print("Complete response:", response) assert response, "Should receive a non-empty response" # Run tests if executed directly if __name__ == "__main__": if not is_sglang_available(): print("SGLang is not available. Please install it.") sys.exit(1) test_chat() test_stream_chat()
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/tests/e2e/test_chat.py
tests/e2e/test_chat.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest from llamafactory.chat import ChatModel TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3") INFER_ARGS = { "model_name_or_path": TINY_LLAMA3, "finetuning_type": "lora", "template": "llama3", "infer_dtype": "float16", "do_sample": False, "max_new_tokens": 1, } MESSAGES = [ {"role": "user", "content": "Hi"}, ] EXPECTED_RESPONSE = "_rho" @pytest.mark.runs_on(["cpu", "mps"]) def test_chat(): chat_model = ChatModel(INFER_ARGS) assert chat_model.chat(MESSAGES)[0].response_text == EXPECTED_RESPONSE @pytest.mark.runs_on(["cpu", "mps"]) def test_stream_chat(): chat_model = ChatModel(INFER_ARGS) response = "" for token in chat_model.stream_chat(MESSAGES): response += token assert response == EXPECTED_RESPONSE
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/tests/e2e/test_train.py
tests/e2e/test_train.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest from llamafactory.train.tuner import export_model, run_exp DEMO_DATA = os.getenv("DEMO_DATA", "llamafactory/demo_data") TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3") TINY_LLAMA_ADAPTER = os.getenv("TINY_LLAMA_ADAPTER", "llamafactory/tiny-random-Llama-3-lora") TRAIN_ARGS = { "model_name_or_path": TINY_LLAMA3, "do_train": True, "finetuning_type": "lora", "dataset_dir": "REMOTE:" + DEMO_DATA, "template": "llama3", "cutoff_len": 1, "overwrite_output_dir": True, "per_device_train_batch_size": 1, "max_steps": 1, "report_to": "none", } INFER_ARGS = { "model_name_or_path": TINY_LLAMA3, "adapter_name_or_path": TINY_LLAMA_ADAPTER, "finetuning_type": "lora", "template": "llama3", "infer_dtype": "float16", } OS_NAME = os.getenv("OS_NAME", "") @pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.parametrize( "stage,dataset", [ ("pt", "c4_demo"), ("sft", "alpaca_en_demo"), ("dpo", "dpo_en_demo"), ("kto", "kto_en_demo"), pytest.param("rm", "dpo_en_demo", marks=pytest.mark.xfail(OS_NAME.startswith("windows"), reason="OS error.")), ], ) def test_run_exp(stage: str, dataset: str): output_dir = os.path.join("output", f"train_{stage}") run_exp({"stage": stage, "dataset": dataset, "output_dir": output_dir, **TRAIN_ARGS}) assert os.path.exists(output_dir) @pytest.mark.runs_on(["cpu", "mps"]) def test_export(): export_dir = os.path.join("output", "llama3_export") export_model({"export_dir": export_dir, **INFER_ARGS}) assert os.path.exists(export_dir)
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/tests/eval/test_eval_template.py
tests/eval/test_eval_template.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest from llamafactory.eval.template import get_eval_template @pytest.mark.runs_on(["cpu", "mps"]) def test_eval_template_en(): support_set = [ { "question": "Fewshot question", "A": "Fewshot1", "B": "Fewshot2", "C": "Fewshot3", "D": "Fewshot4", "answer": "B", } ] example = { "question": "Target question", "A": "Target1", "B": "Target2", "C": "Target3", "D": "Target4", "answer": "C", } template = get_eval_template(name="en") messages = template.format_example(example, support_set=support_set, subject_name="SubName") assert messages == [ { "role": "user", "content": ( "The following are multiple choice questions (with answers) about SubName.\n\n" "Fewshot question\nA. Fewshot1\nB. Fewshot2\nC. Fewshot3\nD. Fewshot4\nAnswer:" ), }, {"role": "assistant", "content": "B"}, { "role": "user", "content": "Target question\nA. Target1\nB. Target2\nC. Target3\nD. Target4\nAnswer:", }, {"role": "assistant", "content": "C"}, ] @pytest.mark.runs_on(["cpu", "mps"]) def test_eval_template_zh(): support_set = [ { "question": "示例问题", "A": "示例答案1", "B": "示例答案2", "C": "示例答案3", "D": "示例答案4", "answer": "B", } ] example = { "question": "目标问题", "A": "目标答案1", "B": "目标答案2", "C": "目标答案3", "D": "目标答案4", "answer": "C", } template = get_eval_template(name="zh") messages = template.format_example(example, support_set=support_set, subject_name="主题") assert messages == [ { "role": "user", "content": ( "以下是中国关于主题考试的单项选择题,请选出其中的正确答案。\n\n" "示例问题\nA. 示例答案1\nB. 示例答案2\nC. 示例答案3\nD. 示例答案4\n答案:" ), }, {"role": "assistant", "content": "B"}, { "role": "user", "content": "目标问题\nA. 目标答案1\nB. 目标答案2\nC. 目标答案3\nD. 目标答案4\n答案:", }, {"role": "assistant", "content": "C"}, ]
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/tests/data/test_loader.py
tests/data/test_loader.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest from llamafactory.train.test_utils import load_dataset_module DEMO_DATA = os.getenv("DEMO_DATA", "llamafactory/demo_data") TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3") TINY_DATA = os.getenv("TINY_DATA", "llamafactory/tiny-supervised-dataset") TRAIN_ARGS = { "model_name_or_path": TINY_LLAMA3, "stage": "sft", "do_train": True, "finetuning_type": "full", "template": "llama3", "dataset": TINY_DATA, "dataset_dir": "ONLINE", "cutoff_len": 8192, "output_dir": "dummy_dir", "overwrite_output_dir": True, "fp16": True, } @pytest.mark.runs_on(["cpu", "mps"]) def test_load_train_only(): dataset_module = load_dataset_module(**TRAIN_ARGS) assert dataset_module.get("train_dataset") is not None assert dataset_module.get("eval_dataset") is None @pytest.mark.runs_on(["cpu", "mps"]) def test_load_val_size(): dataset_module = load_dataset_module(val_size=0.1, **TRAIN_ARGS) assert dataset_module.get("train_dataset") is not None assert dataset_module.get("eval_dataset") is not None @pytest.mark.runs_on(["cpu", "mps"]) def test_load_eval_data(): dataset_module = load_dataset_module(eval_dataset=TINY_DATA, **TRAIN_ARGS) assert dataset_module.get("train_dataset") is not None assert dataset_module.get("eval_dataset") is not None
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/tests/data/test_template.py
tests/data/test_template.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from typing import TYPE_CHECKING import pytest from transformers import AutoTokenizer from llamafactory.data import get_template_and_fix_tokenizer from llamafactory.data.template import parse_template from llamafactory.hparams import DataArguments if TYPE_CHECKING: from transformers import PreTrainedTokenizer HF_TOKEN = os.getenv("HF_TOKEN") TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3") TINY_LLAMA4 = os.getenv("TINY_LLAMA4", "llamafactory/tiny-random-Llama-4") MESSAGES = [ {"role": "user", "content": "How are you"}, {"role": "assistant", "content": "I am fine!"}, {"role": "user", "content": "你好"}, {"role": "assistant", "content": "很高兴认识你!"}, ] MESSAGES_WITH_THOUGHT = [ {"role": "user", "content": "How are you"}, {"role": "assistant", "content": "<think>\nModel thought here\n</think>\n\nI am fine!"}, {"role": "user", "content": "你好"}, {"role": "assistant", "content": "<think>\n模型思考内容\n</think>\n\n很高兴认识你!"}, ] def _check_tokenization( tokenizer: "PreTrainedTokenizer", batch_input_ids: list[list[int]], batch_text: list[str] ) -> None: r"""Check token ids and texts. encode(text) == token_ids decode(token_ids) == text """ for input_ids, text in zip(batch_input_ids, batch_text): assert tokenizer.encode(text, add_special_tokens=False) == input_ids assert tokenizer.decode(input_ids) == text def _check_template( model_id: str, template_name: str, prompt_str: str, answer_str: str, use_fast: bool, messages: list[dict[str, str]] = MESSAGES, ) -> None: r"""Check template. Args: model_id: the model id on hugging face hub. template_name: the template name. prompt_str: the string corresponding to the prompt part. answer_str: the string corresponding to the answer part. use_fast: whether to use fast tokenizer. messages: the list of messages. """ tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=use_fast, token=HF_TOKEN) content_str = tokenizer.apply_chat_template(messages, tokenize=False) content_ids = tokenizer.apply_chat_template(messages, tokenize=True) template = get_template_and_fix_tokenizer(tokenizer, DataArguments(template=template_name)) prompt_ids, answer_ids = template.encode_oneturn(tokenizer, messages) assert content_str == prompt_str + answer_str assert content_ids == prompt_ids + answer_ids _check_tokenization(tokenizer, (prompt_ids, answer_ids), (prompt_str, answer_str)) @pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.parametrize("use_fast", [True, False]) def test_encode_oneturn(use_fast: bool): tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3, use_fast=use_fast) template = get_template_and_fix_tokenizer(tokenizer, DataArguments(template="llama3")) prompt_ids, answer_ids = template.encode_oneturn(tokenizer, MESSAGES) prompt_str = ( "<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\nHow are you<|eot_id|>" "<|start_header_id|>assistant<|end_header_id|>\n\nI am fine!<|eot_id|>" "<|start_header_id|>user<|end_header_id|>\n\n你好<|eot_id|>" "<|start_header_id|>assistant<|end_header_id|>\n\n" ) answer_str = "很高兴认识你!<|eot_id|>" _check_tokenization(tokenizer, (prompt_ids, answer_ids), (prompt_str, answer_str)) @pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.parametrize("use_fast", [True, False]) def test_encode_multiturn(use_fast: bool): tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3, use_fast=use_fast) template = get_template_and_fix_tokenizer(tokenizer, DataArguments(template="llama3")) encoded_pairs = template.encode_multiturn(tokenizer, MESSAGES) prompt_str_1 = ( "<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\nHow are you<|eot_id|>" "<|start_header_id|>assistant<|end_header_id|>\n\n" ) answer_str_1 = "I am fine!<|eot_id|>" prompt_str_2 = ( "<|start_header_id|>user<|end_header_id|>\n\n你好<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n" ) answer_str_2 = "很高兴认识你!<|eot_id|>" _check_tokenization( tokenizer, (encoded_pairs[0][0], encoded_pairs[0][1], encoded_pairs[1][0], encoded_pairs[1][1]), (prompt_str_1, answer_str_1, prompt_str_2, answer_str_2), ) @pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.parametrize("use_fast", [True, False]) @pytest.mark.parametrize("cot_messages", [True, False]) @pytest.mark.parametrize("enable_thinking", [True, False, None]) def test_reasoning_encode_oneturn(use_fast: bool, cot_messages: bool, enable_thinking: bool): tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-8B", use_fast=use_fast) data_args = DataArguments(template="qwen3", enable_thinking=enable_thinking) template = get_template_and_fix_tokenizer(tokenizer, data_args) prompt_ids, answer_ids = template.encode_oneturn(tokenizer, MESSAGES_WITH_THOUGHT if cot_messages else MESSAGES) prompt_str = ( f"<|im_start|>user\n{MESSAGES[0]['content']}<|im_end|>\n<|im_start|>assistant\n" f"{MESSAGES[1]['content']}<|im_end|>\n" f"<|im_start|>user\n{MESSAGES[2]['content']}<|im_end|>\n<|im_start|>assistant\n" ) if not cot_messages or enable_thinking is False: answer_str = f"{MESSAGES[3]['content']}<|im_end|>\n" if enable_thinking: answer_str = "<think>\n\n</think>\n\n" + answer_str else: prompt_str = prompt_str + "<think>\n\n</think>\n\n" else: answer_str = f"{MESSAGES_WITH_THOUGHT[3]['content']}<|im_end|>\n" _check_tokenization(tokenizer, (prompt_ids, answer_ids), (prompt_str, answer_str)) @pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.parametrize("use_fast", [True, False]) @pytest.mark.parametrize("cot_messages", [True, False]) @pytest.mark.parametrize("enable_thinking", [True, False, None]) def test_reasoning_encode_multiturn(use_fast: bool, cot_messages: bool, enable_thinking: bool): tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-8B", use_fast=use_fast) data_args = DataArguments(template="qwen3", enable_thinking=enable_thinking) template = get_template_and_fix_tokenizer(tokenizer, data_args) encoded_pairs = template.encode_multiturn(tokenizer, MESSAGES_WITH_THOUGHT if cot_messages else MESSAGES) messages = MESSAGES if not cot_messages or enable_thinking is False else MESSAGES_WITH_THOUGHT prompt_str_1 = f"<|im_start|>user\n{MESSAGES[0]['content']}<|im_end|>\n<|im_start|>assistant\n" answer_str_1 = f"{messages[1]['content']}<|im_end|>\n" prompt_str_2 = f"<|im_start|>user\n{MESSAGES[2]['content']}<|im_end|>\n<|im_start|>assistant\n" answer_str_2 = f"{messages[3]['content']}<|im_end|>\n" if not cot_messages or enable_thinking is False: if enable_thinking: answer_str_1 = "<think>\n\n</think>\n\n" + answer_str_1 answer_str_2 = "<think>\n\n</think>\n\n" + answer_str_2 else: prompt_str_1 = prompt_str_1 + "<think>\n\n</think>\n\n" prompt_str_2 = prompt_str_2 + "<think>\n\n</think>\n\n" _check_tokenization( tokenizer, (encoded_pairs[0][0], encoded_pairs[0][1], encoded_pairs[1][0], encoded_pairs[1][1]), (prompt_str_1, answer_str_1, prompt_str_2, answer_str_2), ) @pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.parametrize("use_fast", [True, False]) def test_jinja_template(use_fast: bool): tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3, use_fast=use_fast) ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3, use_fast=use_fast) template = get_template_and_fix_tokenizer(tokenizer, DataArguments(template="llama3")) tokenizer.chat_template = template._get_jinja_template(tokenizer) # llama3 template no replace assert tokenizer.chat_template != ref_tokenizer.chat_template assert tokenizer.apply_chat_template(MESSAGES) == ref_tokenizer.apply_chat_template(MESSAGES) @pytest.mark.runs_on(["cpu", "mps"]) def test_ollama_modelfile(): tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3) template = get_template_and_fix_tokenizer(tokenizer, DataArguments(template="llama3")) assert template.get_ollama_modelfile(tokenizer) == ( "# ollama modelfile auto-generated by llamafactory\n\n" "FROM .\n\n" 'TEMPLATE """<|begin_of_text|>' "{{ if .System }}<|start_header_id|>system<|end_header_id|>\n\n{{ .System }}<|eot_id|>{{ end }}" '{{ range .Messages }}{{ if eq .Role "user" }}<|start_header_id|>user<|end_header_id|>\n\n{{ .Content }}' "<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n" '{{ else if eq .Role "assistant" }}{{ .Content }}<|eot_id|>{{ end }}{{ end }}"""\n\n' 'PARAMETER stop "<|eom_id|>"\n' 'PARAMETER stop "<|eot_id|>"\n' "PARAMETER num_ctx 4096\n" ) @pytest.mark.runs_on(["cpu", "mps"]) def test_get_stop_token_ids(): tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3) template = get_template_and_fix_tokenizer(tokenizer, DataArguments(template="llama3")) assert set(template.get_stop_token_ids(tokenizer)) == {128008, 128009} @pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.skipif(not HF_TOKEN, reason="Gated model.") @pytest.mark.parametrize("use_fast", [True, False]) def test_gemma_template(use_fast: bool): prompt_str = ( f"<bos><start_of_turn>user\n{MESSAGES[0]['content']}<end_of_turn>\n" f"<start_of_turn>model\n{MESSAGES[1]['content']}<end_of_turn>\n" f"<start_of_turn>user\n{MESSAGES[2]['content']}<end_of_turn>\n" "<start_of_turn>model\n" ) answer_str = f"{MESSAGES[3]['content']}<end_of_turn>\n" _check_template("google/gemma-3-4b-it", "gemma", prompt_str, answer_str, use_fast) @pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.skipif(not HF_TOKEN, reason="Gated model.") @pytest.mark.parametrize("use_fast", [True, False]) def test_gemma2_template(use_fast: bool): prompt_str = ( f"<bos><start_of_turn>user\n{MESSAGES[0]['content']}<end_of_turn>\n" f"<start_of_turn>model\n{MESSAGES[1]['content']}<end_of_turn>\n" f"<start_of_turn>user\n{MESSAGES[2]['content']}<end_of_turn>\n" "<start_of_turn>model\n" ) answer_str = f"{MESSAGES[3]['content']}<end_of_turn>\n" _check_template("google/gemma-2-2b-it", "gemma2", prompt_str, answer_str, use_fast) @pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.skipif(not HF_TOKEN, reason="Gated model.") @pytest.mark.parametrize("use_fast", [True, False]) def test_llama3_template(use_fast: bool): prompt_str = ( f"<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n{MESSAGES[0]['content']}<|eot_id|>" f"<|start_header_id|>assistant<|end_header_id|>\n\n{MESSAGES[1]['content']}<|eot_id|>" f"<|start_header_id|>user<|end_header_id|>\n\n{MESSAGES[2]['content']}<|eot_id|>" "<|start_header_id|>assistant<|end_header_id|>\n\n" ) answer_str = f"{MESSAGES[3]['content']}<|eot_id|>" _check_template("meta-llama/Meta-Llama-3-8B-Instruct", "llama3", prompt_str, answer_str, use_fast) @pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.parametrize( "use_fast", [True, pytest.param(False, marks=pytest.mark.xfail(reason="Llama 4 has no slow tokenizer."))] ) def test_llama4_template(use_fast: bool): prompt_str = ( f"<|begin_of_text|><|header_start|>user<|header_end|>\n\n{MESSAGES[0]['content']}<|eot|>" f"<|header_start|>assistant<|header_end|>\n\n{MESSAGES[1]['content']}<|eot|>" f"<|header_start|>user<|header_end|>\n\n{MESSAGES[2]['content']}<|eot|>" "<|header_start|>assistant<|header_end|>\n\n" ) answer_str = f"{MESSAGES[3]['content']}<|eot|>" _check_template(TINY_LLAMA4, "llama4", prompt_str, answer_str, use_fast) @pytest.mark.parametrize( "use_fast", [ pytest.param(True, marks=pytest.mark.xfail(not HF_TOKEN, reason="Authorization.")), pytest.param(False, marks=pytest.mark.xfail(reason="Phi-4 slow tokenizer is broken.")), ], ) @pytest.mark.runs_on(["cpu", "mps"]) def test_phi4_template(use_fast: bool): prompt_str = ( f"<|im_start|>user<|im_sep|>{MESSAGES[0]['content']}<|im_end|>" f"<|im_start|>assistant<|im_sep|>{MESSAGES[1]['content']}<|im_end|>" f"<|im_start|>user<|im_sep|>{MESSAGES[2]['content']}<|im_end|>" "<|im_start|>assistant<|im_sep|>" ) answer_str = f"{MESSAGES[3]['content']}<|im_end|>" _check_template("microsoft/phi-4", "phi4", prompt_str, answer_str, use_fast) @pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.xfail(not HF_TOKEN, reason="Authorization.") @pytest.mark.parametrize("use_fast", [True, False]) def test_qwen2_5_template(use_fast: bool): prompt_str = ( "<|im_start|>system\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\n" f"<|im_start|>user\n{MESSAGES[0]['content']}<|im_end|>\n" f"<|im_start|>assistant\n{MESSAGES[1]['content']}<|im_end|>\n" f"<|im_start|>user\n{MESSAGES[2]['content']}<|im_end|>\n" "<|im_start|>assistant\n" ) answer_str = f"{MESSAGES[3]['content']}<|im_end|>\n" _check_template("Qwen/Qwen2.5-7B-Instruct", "qwen", prompt_str, answer_str, use_fast) @pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.parametrize("use_fast", [True, False]) @pytest.mark.parametrize("cot_messages", [True, False]) def test_qwen3_template(use_fast: bool, cot_messages: bool): prompt_str = ( f"<|im_start|>user\n{MESSAGES[0]['content']}<|im_end|>\n" f"<|im_start|>assistant\n{MESSAGES[1]['content']}<|im_end|>\n" f"<|im_start|>user\n{MESSAGES[2]['content']}<|im_end|>\n" "<|im_start|>assistant\n" ) if not cot_messages: answer_str = f"<think>\n\n</think>\n\n{MESSAGES[3]['content']}<|im_end|>\n" messages = MESSAGES else: answer_str = f"{MESSAGES_WITH_THOUGHT[3]['content']}<|im_end|>\n" messages = MESSAGES_WITH_THOUGHT _check_template("Qwen/Qwen3-8B", "qwen3", prompt_str, answer_str, use_fast, messages=messages) @pytest.mark.runs_on(["cpu", "mps"]) def test_parse_llama3_template(): tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3, token=HF_TOKEN) template = parse_template(tokenizer) assert template.format_user.slots == [ "<|start_header_id|>user<|end_header_id|>\n\n{{content}}<|eot_id|>" "<|start_header_id|>assistant<|end_header_id|>\n\n" ] assert template.format_assistant.slots == ["{{content}}<|eot_id|>"] assert template.format_system.slots == ["<|start_header_id|>system<|end_header_id|>\n\n{{content}}<|eot_id|>"] assert template.format_prefix.slots == ["<|begin_of_text|>"] assert template.default_system == "" @pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.xfail(not HF_TOKEN, reason="Authorization.") def test_parse_qwen_template(): tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-7B-Instruct", token=HF_TOKEN) template = parse_template(tokenizer) assert template.__class__.__name__ == "Template" assert template.format_user.slots == ["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"] assert template.format_assistant.slots == ["{{content}}<|im_end|>\n"] assert template.format_system.slots == ["<|im_start|>system\n{{content}}<|im_end|>\n"] assert template.format_prefix.slots == [] assert template.default_system == "You are Qwen, created by Alibaba Cloud. You are a helpful assistant." @pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.xfail(not HF_TOKEN, reason="Authorization.") def test_parse_qwen3_template(): tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-8B", token=HF_TOKEN) template = parse_template(tokenizer) assert template.__class__.__name__ == "ReasoningTemplate" assert template.format_user.slots == ["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"] assert template.format_assistant.slots == ["{{content}}<|im_end|>\n"] assert template.format_system.slots == ["<|im_start|>system\n{{content}}<|im_end|>\n"] assert template.format_prefix.slots == [] assert template.default_system == ""
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/tests/data/test_mm_plugin.py
tests/data/test_mm_plugin.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from typing import TYPE_CHECKING, Any import numpy as np import pytest import torch from PIL import Image from llamafactory.data.mm_plugin import get_mm_plugin from llamafactory.extras.packages import is_transformers_version_greater_than from llamafactory.hparams import get_infer_args from llamafactory.model import load_tokenizer if TYPE_CHECKING: from transformers import PreTrainedTokenizer, ProcessorMixin from transformers.image_processing_utils import BaseImageProcessor from llamafactory.data.mm_plugin import BasePlugin from llamafactory.model.loader import TokenizerModule HF_TOKEN = os.getenv("HF_TOKEN") TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3") TINY_LLAMA4 = os.getenv("TINY_LLAMA4", "llamafactory/tiny-random-Llama-4") MM_MESSAGES = [ {"role": "user", "content": "<image>What is in this image?"}, {"role": "assistant", "content": "A cat."}, ] OMNI_MESSAGES = [ {"role": "user", "content": "<image>What is in this image?"}, {"role": "assistant", "content": "A cat."}, {"role": "user", "content": "<audio>What is in this audio?"}, {"role": "assistant", "content": "Nothing."}, ] TEXT_MESSAGES = [ {"role": "user", "content": "How are you"}, {"role": "assistant", "content": "I am fine!"}, ] VIDEO_MESSAGES = [ {"role": "user", "content": "<video>What is in this viode?"}, {"role": "assistant", "content": "A cat."}, ] AUDIOS = [np.zeros(1600)] IMAGES = [Image.new("RGB", (32, 32), (255, 255, 255))] VIDEOS = [[Image.new("RGB", (32, 32), (255, 255, 255))] * 4] NO_IMAGES = [] NO_VIDEOS = [] NO_AUDIOS = [] IMGLENS = [1] AUDLENS = [1] NO_IMGLENS = [0] NO_VIDLENS = [0] NO_AUDLENS = [0] INPUT_IDS = [0, 1, 2, 3, 4] LABELS = [0, 1, 2, 3, 4] BATCH_IDS = [[1] * 1024] def _get_mm_inputs(processor: "ProcessorMixin") -> dict[str, "torch.Tensor"]: image_processor: BaseImageProcessor = getattr(processor, "image_processor") return image_processor(images=IMAGES, return_tensors="pt") def _get_omni_inputs(processor: "ProcessorMixin") -> dict[str, "torch.Tensor"]: mm_inputs = {} image_processor: BaseImageProcessor = getattr(processor, "image_processor", None) feature_extractor = getattr(processor, "feature_extractor", None) mm_inputs.update(image_processor(IMAGES, return_tensors="pt")) mm_inputs.update( feature_extractor( AUDIOS, sampling_rate=getattr(processor, "audio_sampling_rate", 16000), return_attention_mask=True, padding="max_length", return_tensors="pt", ) ) mm_inputs["feature_attention_mask"] = mm_inputs.pop("attention_mask") return mm_inputs def _is_close(batch_a: dict[str, Any], batch_b: dict[str, Any]) -> None: assert batch_a.keys() == batch_b.keys() for key in batch_a.keys(): if isinstance(batch_a[key], torch.Tensor): assert torch.allclose(batch_a[key], batch_b[key], rtol=1e-4, atol=1e-5) elif isinstance(batch_a[key], list) and all(isinstance(item, torch.Tensor) for item in batch_a[key]): assert len(batch_a[key]) == len(batch_b[key]) for tensor_a, tensor_b in zip(batch_a[key], batch_b[key]): assert torch.allclose(tensor_a, tensor_b, rtol=1e-4, atol=1e-5) else: assert batch_a[key] == batch_b[key] def _load_tokenizer_module(model_name_or_path: str) -> "TokenizerModule": model_args, *_ = get_infer_args({"model_name_or_path": model_name_or_path, "template": "default"}) return load_tokenizer(model_args) def _check_plugin( plugin: "BasePlugin", tokenizer: "PreTrainedTokenizer", processor: "ProcessorMixin", expected_mm_messages: list[dict[str, str]] = MM_MESSAGES, expected_input_ids: list[int] = INPUT_IDS, expected_labels: list[int] = LABELS, expected_mm_inputs: dict[str, Any] = {}, expected_no_mm_inputs: dict[str, Any] = {}, ) -> None: if plugin.__class__.__name__ == "Qwen2OmniPlugin": # test omni_messages assert plugin.process_messages(OMNI_MESSAGES, IMAGES, NO_VIDEOS, AUDIOS, processor) == expected_mm_messages assert plugin.process_token_ids(INPUT_IDS, LABELS, IMAGES, NO_VIDEOS, AUDIOS, tokenizer, processor) == ( expected_input_ids, expected_labels, ) _is_close( plugin.get_mm_inputs(IMAGES, NO_VIDEOS, AUDIOS, IMGLENS, NO_VIDLENS, AUDLENS, BATCH_IDS, processor), expected_mm_inputs, ) elif plugin.__class__.__name__ == "Qwen3VLPlugin": # only check replacement assert plugin.process_messages(VIDEO_MESSAGES, NO_IMAGES, VIDEOS, NO_AUDIOS, processor) == expected_mm_messages elif plugin.__class__.__name__ != "BasePlugin": # test mm_messages assert plugin.process_messages(MM_MESSAGES, IMAGES, NO_VIDEOS, NO_AUDIOS, processor) == expected_mm_messages assert plugin.process_token_ids(INPUT_IDS, LABELS, IMAGES, NO_VIDEOS, NO_AUDIOS, tokenizer, processor) == ( expected_input_ids, expected_labels, ) _is_close( plugin.get_mm_inputs(IMAGES, NO_VIDEOS, NO_AUDIOS, IMGLENS, NO_VIDLENS, NO_AUDLENS, BATCH_IDS, processor), expected_mm_inputs, ) # test text_messages assert plugin.process_messages(TEXT_MESSAGES, NO_IMAGES, NO_VIDEOS, NO_AUDIOS, processor) == TEXT_MESSAGES assert plugin.process_token_ids(INPUT_IDS, LABELS, NO_IMAGES, NO_VIDEOS, NO_AUDIOS, tokenizer, processor) == ( INPUT_IDS, LABELS, ) _is_close( plugin.get_mm_inputs( NO_IMAGES, NO_VIDEOS, NO_AUDIOS, NO_IMGLENS, NO_VIDLENS, NO_AUDLENS, BATCH_IDS, processor ), expected_no_mm_inputs, ) @pytest.mark.runs_on(["cpu", "mps"]) def test_base_plugin(): tokenizer_module = _load_tokenizer_module(model_name_or_path=TINY_LLAMA3) base_plugin = get_mm_plugin(name="base") check_inputs = {"plugin": base_plugin, **tokenizer_module} _check_plugin(**check_inputs) @pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.skipif(not HF_TOKEN, reason="Gated model.") @pytest.mark.skipif(not is_transformers_version_greater_than("4.50.0"), reason="Requires transformers>=4.50.0") def test_gemma3_plugin(): image_seqlen = 256 tokenizer_module = _load_tokenizer_module(model_name_or_path="google/gemma-3-4b-it") gemma3_plugin = get_mm_plugin(name="gemma3", image_token="<image_soft_token>") image_tokens_expanded = "<image_soft_token>" * image_seqlen check_inputs = {"plugin": gemma3_plugin, **tokenizer_module} check_inputs["expected_mm_messages"] = [ { key: value.replace("<image>", f"\n\n<start_of_image>{image_tokens_expanded}<end_of_image>\n\n") for key, value in message.items() } for message in MM_MESSAGES ] check_inputs["expected_mm_inputs"] = _get_mm_inputs(tokenizer_module["processor"]) check_inputs["expected_mm_inputs"].pop("num_crops") check_inputs["expected_mm_inputs"]["token_type_ids"] = [[0] * 1024] check_inputs["expected_no_mm_inputs"] = {"token_type_ids": [[0] * 1024]} _check_plugin(**check_inputs) @pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.skipif(not is_transformers_version_greater_than("4.52.0"), reason="Requires transformers>=4.52.0") def test_internvl_plugin(): image_seqlen = 256 tokenizer_module = _load_tokenizer_module(model_name_or_path="OpenGVLab/InternVL3-1B-hf") internvl_plugin = get_mm_plugin("intern_vl", image_token="<image>", video_token="<video>") check_inputs = {"plugin": internvl_plugin, **tokenizer_module} check_inputs["expected_mm_messages"] = [ { key: value.replace("<image>", f"<img>{'<IMG_CONTEXT>' * image_seqlen * 1}</img>") for key, value in message.items() } for message in MM_MESSAGES ] check_inputs["expected_mm_inputs"] = _get_mm_inputs(tokenizer_module["processor"]) check_inputs["expected_mm_inputs"].pop("num_patches", None) _check_plugin(**check_inputs) @pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.skipif(not is_transformers_version_greater_than("4.51.0"), reason="Requires transformers>=4.51.0") def test_llama4_plugin(): tokenizer_module = _load_tokenizer_module(model_name_or_path=TINY_LLAMA4) processor = tokenizer_module["processor"] llama4_plugin = get_mm_plugin(name="llama4", image_token="<|image|>") check_inputs = {"plugin": llama4_plugin, **tokenizer_module} mm_inputs = _get_mm_inputs(tokenizer_module["processor"]) image_height, image_width = mm_inputs["pixel_values"][0].shape[-2:] num_patches_per_chunk = int( (image_height // processor.patch_size) * (image_width // processor.patch_size) // processor.downsample_ratio ) aspect_ratios = mm_inputs.pop("aspect_ratios") tokens_for_this_image = processor._prompt_split_image(aspect_ratios[0], num_patches_per_chunk) check_inputs["expected_mm_messages"] = [ {key: value.replace("<image>", tokens_for_this_image) for key, value in message.items()} for message in MM_MESSAGES ] check_inputs["expected_mm_inputs"] = mm_inputs _check_plugin(**check_inputs) @pytest.mark.runs_on(["cpu", "mps"]) def test_llava_plugin(): image_seqlen = 576 tokenizer_module = _load_tokenizer_module(model_name_or_path="llava-hf/llava-1.5-7b-hf") llava_plugin = get_mm_plugin(name="llava", image_token="<image>") check_inputs = {"plugin": llava_plugin, **tokenizer_module} check_inputs["expected_mm_messages"] = [ {key: value.replace("<image>", "<image>" * image_seqlen) for key, value in message.items()} for message in MM_MESSAGES ] check_inputs["expected_mm_inputs"] = _get_mm_inputs(tokenizer_module["processor"]) _check_plugin(**check_inputs) @pytest.mark.runs_on(["cpu", "mps"]) def test_llava_next_plugin(): image_seqlen = 1176 tokenizer_module = _load_tokenizer_module(model_name_or_path="llava-hf/llava-v1.6-vicuna-7b-hf") llava_next_plugin = get_mm_plugin(name="llava_next", image_token="<image>") check_inputs = {"plugin": llava_next_plugin, **tokenizer_module} check_inputs["expected_mm_messages"] = [ {key: value.replace("<image>", "<image>" * image_seqlen) for key, value in message.items()} for message in MM_MESSAGES ] check_inputs["expected_mm_inputs"] = _get_mm_inputs(tokenizer_module["processor"]) _check_plugin(**check_inputs) @pytest.mark.runs_on(["cpu", "mps"]) def test_llava_next_video_plugin(): image_seqlen = 1176 tokenizer_module = _load_tokenizer_module(model_name_or_path="llava-hf/LLaVA-NeXT-Video-7B-hf") llava_next_video_plugin = get_mm_plugin(name="llava_next_video", image_token="<image>", video_token="<video>") check_inputs = {"plugin": llava_next_video_plugin, **tokenizer_module} check_inputs["expected_mm_messages"] = [ {key: value.replace("<image>", "<image>" * image_seqlen) for key, value in message.items()} for message in MM_MESSAGES ] check_inputs["expected_mm_inputs"] = _get_mm_inputs(tokenizer_module["processor"]) _check_plugin(**check_inputs) @pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.skipif(not HF_TOKEN, reason="Gated model.") def test_paligemma_plugin(): image_seqlen = 256 tokenizer_module = _load_tokenizer_module(model_name_or_path="google/paligemma-3b-pt-224") paligemma_plugin = get_mm_plugin(name="paligemma", image_token="<image>") check_inputs = {"plugin": paligemma_plugin, **tokenizer_module} check_inputs["expected_mm_messages"] = [ {key: value.replace("<image>", "") for key, value in message.items()} for message in MM_MESSAGES ] check_inputs["expected_input_ids"] = [ tokenizer_module["tokenizer"].convert_tokens_to_ids(paligemma_plugin.image_token) ] * image_seqlen + INPUT_IDS check_inputs["expected_labels"] = [-100] * image_seqlen + LABELS check_inputs["expected_mm_inputs"] = _get_mm_inputs(tokenizer_module["processor"]) check_inputs["expected_mm_inputs"]["token_type_ids"] = [[0] * image_seqlen + [1] * (1024 - image_seqlen)] check_inputs["expected_no_mm_inputs"] = {"token_type_ids": [[1] * 1024]} _check_plugin(**check_inputs) @pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.skipif(not is_transformers_version_greater_than("4.50.0"), reason="Requires transformers>=4.50.0") def test_pixtral_plugin(): image_slice_height, image_slice_width = 2, 2 tokenizer_module = _load_tokenizer_module(model_name_or_path="mistral-community/pixtral-12b") pixtral_plugin = get_mm_plugin(name="pixtral", image_token="[IMG]") check_inputs = {"plugin": pixtral_plugin, **tokenizer_module} check_inputs["expected_mm_messages"] = [ { key: value.replace( "<image>", ("{}[IMG_BREAK]".format("[IMG]" * image_slice_width) * image_slice_height).rsplit("[IMG_BREAK]", 1)[0] + "[IMG_END]", ) for key, value in message.items() } for message in MM_MESSAGES ] check_inputs["expected_mm_inputs"] = _get_mm_inputs(tokenizer_module["processor"]) check_inputs["expected_mm_inputs"]["pixel_values"] = check_inputs["expected_mm_inputs"]["pixel_values"][0] _check_plugin(**check_inputs) @pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.skipif(not is_transformers_version_greater_than("4.52.0"), reason="Requires transformers>=4.52.0") def test_qwen2_omni_plugin(): image_seqlen, audio_seqlen = 4, 2 tokenizer_module = _load_tokenizer_module(model_name_or_path="Qwen/Qwen2.5-Omni-7B") qwen2_omni_plugin = get_mm_plugin( name="qwen2_omni", image_token="<|IMAGE|>", video_token="<|VIDEO|>", audio_token="<|AUDIO|>", vision_bos_token="<|vision_bos|>", vision_eos_token="<|vision_eos|>", audio_bos_token="<|audio_bos|>", audio_eos_token="<|audio_eos|>", ) check_inputs = {"plugin": qwen2_omni_plugin, **tokenizer_module} check_inputs["expected_mm_messages"] = [ { key: ( value.replace("<image>", f"<|vision_bos|>{'<|IMAGE|>' * image_seqlen}<|vision_eos|>").replace( "<audio>", f"<|audio_bos|>{'<|AUDIO|>' * audio_seqlen}<|audio_eos|>" ) ) for key, value in message.items() } for message in OMNI_MESSAGES ] check_inputs["expected_mm_inputs"] = _get_omni_inputs(tokenizer_module["processor"]) _check_plugin(**check_inputs) @pytest.mark.runs_on(["cpu", "mps"]) def test_qwen2_vl_plugin(): image_seqlen = 4 tokenizer_module = _load_tokenizer_module(model_name_or_path="Qwen/Qwen2-VL-7B-Instruct") qwen2_vl_plugin = get_mm_plugin(name="qwen2_vl", image_token="<|image_pad|>") check_inputs = {"plugin": qwen2_vl_plugin, **tokenizer_module} check_inputs["expected_mm_messages"] = [ { key: value.replace("<image>", "<|vision_start|>{}<|vision_end|>".format("<|image_pad|>" * image_seqlen)) for key, value in message.items() } for message in MM_MESSAGES ] check_inputs["expected_mm_inputs"] = _get_mm_inputs(tokenizer_module["processor"]) _check_plugin(**check_inputs) @pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.skipif(not is_transformers_version_greater_than("4.57.0"), reason="Requires transformers>=4.57.0") def test_qwen3_vl_plugin(): frame_seqlen = 1 tokenizer_module = _load_tokenizer_module(model_name_or_path="Qwen/Qwen3-VL-30B-A3B-Instruct") qwen3_vl_plugin = get_mm_plugin(name="qwen3_vl", video_token="<|video_pad|>") check_inputs = {"plugin": qwen3_vl_plugin, **tokenizer_module} check_inputs["expected_mm_messages"] = [ { key: value.replace( "<video>", # little different with original processor for default `fps=2` in our repo "<0.2 seconds><|vision_start|>{}<|vision_end|><1.2 seconds><|vision_start|>{}<|vision_end|>".format( "<|video_pad|>" * frame_seqlen, "<|video_pad|>" * frame_seqlen ), ) for key, value in message.items() } for message in VIDEO_MESSAGES ] _check_plugin(**check_inputs) @pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.skipif(not is_transformers_version_greater_than("4.47.0"), reason="Requires transformers>=4.47.0") def test_video_llava_plugin(): image_seqlen = 256 tokenizer_module = _load_tokenizer_module(model_name_or_path="LanguageBind/Video-LLaVA-7B-hf") video_llava_plugin = get_mm_plugin(name="video_llava", image_token="<image>", video_token="<video>") check_inputs = {"plugin": video_llava_plugin, **tokenizer_module} check_inputs["expected_mm_messages"] = [ {key: value.replace("<image>", "<image>" * image_seqlen) for key, value in message.items()} for message in MM_MESSAGES ] check_inputs["expected_mm_inputs"] = _get_mm_inputs(tokenizer_module["processor"]) _check_plugin(**check_inputs)
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/tests/data/test_converter.py
tests/data/test_converter.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest from llamafactory.data import Role from llamafactory.data.converter import get_dataset_converter from llamafactory.data.parser import DatasetAttr from llamafactory.hparams import DataArguments @pytest.mark.runs_on(["cpu", "mps"]) def test_alpaca_converter(): dataset_attr = DatasetAttr("hf_hub", "llamafactory/tiny-supervised-dataset") data_args = DataArguments() example = { "instruction": "Solve the math problem.", "input": "3 + 4", "output": "The answer is 7.", } dataset_converter = get_dataset_converter("alpaca", dataset_attr, data_args) assert dataset_converter(example) == { "_prompt": [{"role": Role.USER.value, "content": "Solve the math problem.\n3 + 4"}], "_response": [{"role": Role.ASSISTANT.value, "content": "The answer is 7."}], "_system": "", "_tools": "", "_images": None, "_videos": None, "_audios": None, } @pytest.mark.runs_on(["cpu", "mps"]) def test_sharegpt_converter(): dataset_attr = DatasetAttr("hf_hub", "llamafactory/tiny-supervised-dataset") data_args = DataArguments() example = { "conversations": [ {"from": "system", "value": "You are a helpful assistant."}, {"from": "human", "value": "Solve the math problem.\n3 + 4"}, {"from": "gpt", "value": "The answer is 7."}, ] } dataset_converter = get_dataset_converter("sharegpt", dataset_attr, data_args) assert dataset_converter(example) == { "_prompt": [{"role": Role.USER.value, "content": "Solve the math problem.\n3 + 4"}], "_response": [{"role": Role.ASSISTANT.value, "content": "The answer is 7."}], "_system": "You are a helpful assistant.", "_tools": "", "_images": None, "_videos": None, "_audios": None, }
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/tests/data/test_collator.py
tests/data/test_collator.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest import torch from PIL import Image from transformers import AutoConfig, AutoModelForVision2Seq from llamafactory.data import get_template_and_fix_tokenizer from llamafactory.data.collator import MultiModalDataCollatorForSeq2Seq, prepare_4d_attention_mask from llamafactory.extras.constants import IGNORE_INDEX from llamafactory.hparams import get_infer_args from llamafactory.model import load_tokenizer TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3") @pytest.mark.runs_on(["cpu", "mps"]) def test_base_collator(): model_args, data_args, *_ = get_infer_args({"model_name_or_path": TINY_LLAMA3, "template": "default"}) tokenizer_module = load_tokenizer(model_args) template = get_template_and_fix_tokenizer(tokenizer_module["tokenizer"], data_args) data_collator = MultiModalDataCollatorForSeq2Seq( template=template, pad_to_multiple_of=8, label_pad_token_id=IGNORE_INDEX, **tokenizer_module, ) p = tokenizer_module["tokenizer"].pad_token_id q = IGNORE_INDEX features = [ { "input_ids": [0, 1, 2, 3, 4, 5], "attention_mask": [1, 1, 1, 1, 1, 1], "labels": [q, q, 2, 3, 4, 5], }, { "input_ids": [6, 7], "attention_mask": [1, 1], "labels": [q, 7], }, ] batch_input = data_collator(features) expected_input = { "input_ids": [ [0, 1, 2, 3, 4, 5, p, p], [6, 7, p, p, p, p, p, p], ], "attention_mask": [ [1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0], ], "labels": [ [q, q, 2, 3, 4, 5, q, q], [q, 7, q, q, q, q, q, q], ], } for k in batch_input.keys(): assert batch_input[k].eq(torch.tensor(expected_input[k])).all() @pytest.mark.runs_on(["cpu", "mps"]) def test_multimodal_collator(): model_args, data_args, *_ = get_infer_args( {"model_name_or_path": "Qwen/Qwen2-VL-2B-Instruct", "template": "qwen2_vl"} ) tokenizer_module = load_tokenizer(model_args) template = get_template_and_fix_tokenizer(tokenizer_module["tokenizer"], data_args) config = AutoConfig.from_pretrained(model_args.model_name_or_path) with torch.device("meta"): model = AutoModelForVision2Seq.from_config(config) data_collator = MultiModalDataCollatorForSeq2Seq( template=template, model=model, pad_to_multiple_of=4, label_pad_token_id=IGNORE_INDEX, **tokenizer_module, ) p = tokenizer_module["tokenizer"].pad_token_id q = IGNORE_INDEX s = tokenizer_module["tokenizer"].convert_tokens_to_ids("<|vision_start|>") e = tokenizer_module["tokenizer"].convert_tokens_to_ids("<|vision_end|>") m = tokenizer_module["tokenizer"].convert_tokens_to_ids("<|image_pad|>") fake_image = Image.new("RGB", (64, 64), (255, 255, 255)) features = [ { "input_ids": [0, 1, 2, 3], "attention_mask": [1, 1, 1, 1], "labels": [0, 1, 2, 3], }, ] batch_input = data_collator(features) expected_input = { "input_ids": [ [0, 1, 2, 3, s, m, m, m, m, e, p, p], ], "attention_mask": [ [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], ], "labels": [ [0, 1, 2, 3, q, q, q, q, q, q, q, q], ], "position_ids": [ [[0, 1, 2, 3, 1, 1, 1, 1, 1, 1, 1, 1]], [[0, 1, 2, 3, 1, 1, 1, 1, 1, 1, 1, 1]], [[0, 1, 2, 3, 1, 1, 1, 1, 1, 1, 1, 1]], ], "rope_deltas": [[-8]], **tokenizer_module["processor"].image_processor(fake_image), } assert batch_input.keys() == expected_input.keys() for k in batch_input.keys(): assert batch_input[k].eq(torch.tensor(expected_input[k])).all() @pytest.mark.runs_on(["cpu"]) def test_4d_attention_mask(): o = 0.0 x = torch.finfo(torch.float16).min attention_mask_with_indices = torch.tensor( [ [1, 1, 2, 2, 2, 0], [1, 2, 2, 3, 3, 3], ] ) attention_mask_computed = prepare_4d_attention_mask(attention_mask_with_indices, torch.float16) attention_mask_expected = torch.tensor( [ [ [ [o, x, x, x, x, x], [o, o, x, x, x, x], [x, x, o, x, x, x], [x, x, o, o, x, x], [x, x, o, o, o, x], [x, x, x, x, x, x], ] ], [ [ [o, x, x, x, x, x], [x, o, x, x, x, x], [x, o, o, x, x, x], [x, x, x, o, x, x], [x, x, x, o, o, x], [x, x, x, o, o, o], ] ], ], dtype=torch.float16, ) assert list(attention_mask_computed.size()) == [2, 1, 6, 6] assert torch.all(attention_mask_computed == attention_mask_expected) if __name__ == "__main__": test_multimodal_collator()
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/tests/data/test_formatter.py
tests/data/test_formatter.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from datetime import datetime import pytest from llamafactory.data.formatter import EmptyFormatter, FunctionFormatter, StringFormatter, ToolFormatter FUNCTION = {"name": "tool_name", "arguments": {"foo": "bar", "size": 10}} TOOLS = [ { "name": "test_tool", "description": "tool_desc", "parameters": { "type": "object", "properties": { "foo": {"type": "string", "description": "foo_desc"}, "bar": {"type": "number", "description": "bar_desc"}, }, "required": ["foo"], }, } ] @pytest.mark.runs_on(["cpu", "mps"]) def test_empty_formatter(): formatter = EmptyFormatter(slots=["\n"]) assert formatter.apply() == ["\n"] @pytest.mark.runs_on(["cpu", "mps"]) def test_string_formatter(): formatter = StringFormatter(slots=["<s>", "Human: {{content}}\nAssistant:"]) assert formatter.apply(content="Hi") == ["<s>", "Human: Hi\nAssistant:"] @pytest.mark.runs_on(["cpu", "mps"]) def test_function_formatter(): formatter = FunctionFormatter(slots=["{{content}}", "</s>"], tool_format="default") tool_calls = json.dumps(FUNCTION) assert formatter.apply(content=tool_calls) == [ """Action: tool_name\nAction Input: {"foo": "bar", "size": 10}""", "</s>", ] @pytest.mark.runs_on(["cpu", "mps"]) def test_multi_function_formatter(): formatter = FunctionFormatter(slots=["{{content}}", "</s>"], tool_format="default") tool_calls = json.dumps([FUNCTION] * 2) assert formatter.apply(content=tool_calls) == [ """Action: tool_name\nAction Input: {"foo": "bar", "size": 10}\n""" """Action: tool_name\nAction Input: {"foo": "bar", "size": 10}""", "</s>", ] @pytest.mark.runs_on(["cpu", "mps"]) def test_default_tool_formatter(): formatter = ToolFormatter(tool_format="default") assert formatter.apply(content=json.dumps(TOOLS)) == [ "You have access to the following tools:\n" "> Tool Name: test_tool\n" "Tool Description: tool_desc\n" "Tool Args:\n" " - foo (string, required): foo_desc\n" " - bar (number): bar_desc\n\n" "Use the following format if using a tool:\n" "```\n" "Action: tool name (one of [test_tool])\n" "Action Input: the input to the tool, in a JSON format representing the kwargs " """(e.g. ```{"input": "hello world", "num_beams": 5}```)\n""" "```\n" ] @pytest.mark.runs_on(["cpu", "mps"]) def test_default_tool_extractor(): formatter = ToolFormatter(tool_format="default") result = """Action: test_tool\nAction Input: {"foo": "bar", "size": 10}""" assert formatter.extract(result) == [("test_tool", """{"foo": "bar", "size": 10}""")] @pytest.mark.runs_on(["cpu", "mps"]) def test_default_multi_tool_extractor(): formatter = ToolFormatter(tool_format="default") result = ( """Action: test_tool\nAction Input: {"foo": "bar", "size": 10}\n""" """Action: another_tool\nAction Input: {"foo": "job", "size": 2}""" ) assert formatter.extract(result) == [ ("test_tool", """{"foo": "bar", "size": 10}"""), ("another_tool", """{"foo": "job", "size": 2}"""), ] @pytest.mark.runs_on(["cpu", "mps"]) def test_glm4_function_formatter(): formatter = FunctionFormatter(slots=["{{content}}"], tool_format="glm4") tool_calls = json.dumps(FUNCTION) assert formatter.apply(content=tool_calls) == ["""tool_name\n{"foo": "bar", "size": 10}"""] @pytest.mark.runs_on(["cpu", "mps"]) def test_glm4_tool_formatter(): formatter = ToolFormatter(tool_format="glm4") assert formatter.apply(content=json.dumps(TOOLS)) == [ "你是一个名为 ChatGLM 的人工智能助手。你是基于智谱 AI 公司训练的语言模型 GLM-4 模型开发的," "你的任务是针对用户的问题和要求提供适当的答复和支持。\n\n# 可用工具\n\n" f"## test_tool\n\n{json.dumps(TOOLS[0], indent=4, ensure_ascii=False)}\n" "在调用上述函数时,请使用 Json 格式表示调用的参数。" ] @pytest.mark.runs_on(["cpu", "mps"]) def test_glm4_tool_extractor(): formatter = ToolFormatter(tool_format="glm4") result = """test_tool\n{"foo": "bar", "size": 10}\n""" assert formatter.extract(result) == [("test_tool", """{"foo": "bar", "size": 10}""")] @pytest.mark.runs_on(["cpu", "mps"]) def test_llama3_function_formatter(): formatter = FunctionFormatter(slots=["{{content}}<|eot_id|>"], tool_format="llama3") tool_calls = json.dumps(FUNCTION) assert formatter.apply(content=tool_calls) == [ """{"name": "tool_name", "parameters": {"foo": "bar", "size": 10}}<|eot_id|>""" ] @pytest.mark.runs_on(["cpu", "mps"]) def test_llama3_multi_function_formatter(): formatter = FunctionFormatter(slots=["{{content}}<|eot_id|>"], tool_format="llama3") tool_calls = json.dumps([FUNCTION] * 2) assert formatter.apply(content=tool_calls) == [ """[{"name": "tool_name", "parameters": {"foo": "bar", "size": 10}}, """ """{"name": "tool_name", "parameters": {"foo": "bar", "size": 10}}]""" """<|eot_id|>""" ] @pytest.mark.runs_on(["cpu", "mps"]) def test_llama3_tool_formatter(): formatter = ToolFormatter(tool_format="llama3") date = datetime.now().strftime("%d %b %Y") wrapped_tool = {"type": "function", "function": TOOLS[0]} assert formatter.apply(content=json.dumps(TOOLS)) == [ f"Cutting Knowledge Date: December 2023\nToday Date: {date}\n\n" "You have access to the following functions. " "To call a function, please respond with JSON for a function call. " """Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}. """ f"Do not use variables.\n\n{json.dumps(wrapped_tool, indent=4, ensure_ascii=False)}\n\n" ] @pytest.mark.runs_on(["cpu", "mps"]) def test_llama3_tool_extractor(): formatter = ToolFormatter(tool_format="llama3") result = """{"name": "test_tool", "parameters": {"foo": "bar", "size": 10}}\n""" assert formatter.extract(result) == [("test_tool", """{"foo": "bar", "size": 10}""")] @pytest.mark.runs_on(["cpu", "mps"]) def test_llama3_multi_tool_extractor(): formatter = ToolFormatter(tool_format="llama3") result = ( """[{"name": "test_tool", "parameters": {"foo": "bar", "size": 10}}, """ """{"name": "another_tool", "parameters": {"foo": "job", "size": 2}}]""" ) assert formatter.extract(result) == [ ("test_tool", """{"foo": "bar", "size": 10}"""), ("another_tool", """{"foo": "job", "size": 2}"""), ] @pytest.mark.runs_on(["cpu", "mps"]) def test_mistral_function_formatter(): formatter = FunctionFormatter(slots=["[TOOL_CALLS] {{content}}", "</s>"], tool_format="mistral") tool_calls = json.dumps(FUNCTION) assert formatter.apply(content=tool_calls) == [ "[TOOL_CALLS] " """[{"name": "tool_name", "arguments": {"foo": "bar", "size": 10}}]""", "</s>", ] @pytest.mark.runs_on(["cpu", "mps"]) def test_mistral_multi_function_formatter(): formatter = FunctionFormatter(slots=["[TOOL_CALLS] {{content}}", "</s>"], tool_format="mistral") tool_calls = json.dumps([FUNCTION] * 2) assert formatter.apply(content=tool_calls) == [ "[TOOL_CALLS] " """[{"name": "tool_name", "arguments": {"foo": "bar", "size": 10}}, """ """{"name": "tool_name", "arguments": {"foo": "bar", "size": 10}}]""", "</s>", ] @pytest.mark.runs_on(["cpu", "mps"]) def test_mistral_tool_formatter(): formatter = ToolFormatter(tool_format="mistral") wrapped_tool = {"type": "function", "function": TOOLS[0]} assert formatter.apply(content=json.dumps(TOOLS)) == [ "[AVAILABLE_TOOLS] " + json.dumps([wrapped_tool], ensure_ascii=False) + "[/AVAILABLE_TOOLS]" ] @pytest.mark.runs_on(["cpu", "mps"]) def test_mistral_tool_extractor(): formatter = ToolFormatter(tool_format="mistral") result = """{"name": "test_tool", "arguments": {"foo": "bar", "size": 10}}""" assert formatter.extract(result) == [("test_tool", """{"foo": "bar", "size": 10}""")] @pytest.mark.runs_on(["cpu", "mps"]) def test_mistral_multi_tool_extractor(): formatter = ToolFormatter(tool_format="mistral") result = ( """[{"name": "test_tool", "arguments": {"foo": "bar", "size": 10}}, """ """{"name": "another_tool", "arguments": {"foo": "job", "size": 2}}]""" ) assert formatter.extract(result) == [ ("test_tool", """{"foo": "bar", "size": 10}"""), ("another_tool", """{"foo": "job", "size": 2}"""), ] @pytest.mark.runs_on(["cpu", "mps"]) def test_qwen_function_formatter(): formatter = FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="qwen") tool_calls = json.dumps(FUNCTION) assert formatter.apply(content=tool_calls) == [ """<tool_call>\n{"name": "tool_name", "arguments": {"foo": "bar", "size": 10}}\n</tool_call><|im_end|>\n""" ] @pytest.mark.runs_on(["cpu", "mps"]) def test_qwen_multi_function_formatter(): formatter = FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="qwen") tool_calls = json.dumps([FUNCTION] * 2) assert formatter.apply(content=tool_calls) == [ """<tool_call>\n{"name": "tool_name", "arguments": {"foo": "bar", "size": 10}}\n</tool_call>\n""" """<tool_call>\n{"name": "tool_name", "arguments": {"foo": "bar", "size": 10}}\n</tool_call>""" "<|im_end|>\n" ] @pytest.mark.runs_on(["cpu", "mps"]) def test_qwen_tool_formatter(): formatter = ToolFormatter(tool_format="qwen") wrapped_tool = {"type": "function", "function": TOOLS[0]} assert formatter.apply(content=json.dumps(TOOLS)) == [ "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\n" "You are provided with function signatures within <tools></tools> XML tags:\n<tools>" f"\n{json.dumps(wrapped_tool, ensure_ascii=False)}" "\n</tools>\n\nFor each function call, return a json object with function name and arguments within " """<tool_call></tool_call> XML tags:\n<tool_call>\n{"name": <function-name>, """ """"arguments": <args-json-object>}\n</tool_call>""" ] @pytest.mark.runs_on(["cpu", "mps"]) def test_qwen_tool_extractor(): formatter = ToolFormatter(tool_format="qwen") result = """<tool_call>\n{"name": "test_tool", "arguments": {"foo": "bar", "size": 10}}\n</tool_call>""" assert formatter.extract(result) == [("test_tool", """{"foo": "bar", "size": 10}""")] @pytest.mark.runs_on(["cpu", "mps"]) def test_qwen_multi_tool_extractor(): formatter = ToolFormatter(tool_format="qwen") result = ( """<tool_call>\n{"name": "test_tool", "arguments": {"foo": "bar", "size": 10}}\n</tool_call>\n""" """<tool_call>\n{"name": "another_tool", "arguments": {"foo": "job", "size": 2}}\n</tool_call>""" ) assert formatter.extract(result) == [ ("test_tool", """{"foo": "bar", "size": 10}"""), ("another_tool", """{"foo": "job", "size": 2}"""), ]
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/tests/data/processor/test_processor_utils.py
tests/data/processor/test_processor_utils.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest from llamafactory.data.processor.processor_utils import infer_seqlen @pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.parametrize( "test_input,test_output", [ ((3000, 2000, 1000), (600, 400)), ((2000, 3000, 1000), (400, 600)), ((1000, 100, 1000), (900, 100)), ((100, 1000, 1000), (100, 900)), ((100, 500, 1000), (100, 500)), ((500, 100, 1000), (500, 100)), ((10, 10, 1000), (10, 10)), ], ) def test_infer_seqlen(test_input: tuple[int, int, int], test_output: tuple[int, int]): assert test_output == infer_seqlen(*test_input)
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/tests/data/processor/test_unsupervised.py
tests/data/processor/test_unsupervised.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import random import pytest from datasets import load_dataset from transformers import AutoTokenizer from llamafactory.train.test_utils import load_dataset_module DEMO_DATA = os.getenv("DEMO_DATA", "llamafactory/demo_data") TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3") TINY_DATA = os.getenv("TINY_DATA", "llamafactory/tiny-supervised-dataset") TRAIN_ARGS = { "model_name_or_path": TINY_LLAMA3, "stage": "ppo", "do_train": True, "finetuning_type": "full", "reward_model": "", "reward_model_type": "full", "dataset": "system_chat", "dataset_dir": "REMOTE:" + DEMO_DATA, "template": "llama3", "cutoff_len": 8192, "output_dir": "dummy_dir", "overwrite_output_dir": True, "fp16": True, "report_to": "none", # transfromers compatibility } @pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.parametrize("num_samples", [16]) def test_unsupervised_data(num_samples: int): train_dataset = load_dataset_module(**TRAIN_ARGS)["train_dataset"] ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3) original_data = load_dataset(DEMO_DATA, name="system_chat", split="train") indexes = random.choices(range(len(original_data)), k=num_samples) for index in indexes: messages = original_data["messages"][index] ref_ids = ref_tokenizer.apply_chat_template(messages) ref_input_ids = ref_tokenizer.apply_chat_template(messages[:-1], add_generation_prompt=True) ref_labels = ref_ids[len(ref_input_ids) :] assert train_dataset["input_ids"][index] == ref_input_ids assert train_dataset["labels"][index] == ref_labels
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/tests/data/processor/test_supervised.py
tests/data/processor/test_supervised.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import random import pytest from datasets import load_dataset from transformers import AutoTokenizer from llamafactory.extras.constants import IGNORE_INDEX from llamafactory.train.test_utils import load_dataset_module DEMO_DATA = os.getenv("DEMO_DATA", "llamafactory/demo_data") TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3") TINY_DATA = os.getenv("TINY_DATA", "llamafactory/tiny-supervised-dataset") TRAIN_ARGS = { "model_name_or_path": TINY_LLAMA3, "stage": "sft", "do_train": True, "finetuning_type": "full", "template": "llama3", "cutoff_len": 8192, "output_dir": "dummy_dir", "overwrite_output_dir": True, "fp16": True, } @pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.parametrize("num_samples", [16]) def test_supervised_single_turn(num_samples: int): train_dataset = load_dataset_module(dataset_dir="ONLINE", dataset=TINY_DATA, **TRAIN_ARGS)["train_dataset"] ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3) original_data = load_dataset(TINY_DATA, split="train") indexes = random.choices(range(len(original_data)), k=num_samples) for index in indexes: prompt = original_data["instruction"][index] if original_data["input"][index]: prompt += "\n" + original_data["input"][index] messages = [ {"role": "user", "content": prompt}, {"role": "assistant", "content": original_data["output"][index]}, ] ref_input_ids = ref_tokenizer.apply_chat_template(messages) assert train_dataset["input_ids"][index] == ref_input_ids @pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.parametrize("num_samples", [8]) def test_supervised_multi_turn(num_samples: int): train_dataset = load_dataset_module(dataset_dir="REMOTE:" + DEMO_DATA, dataset="system_chat", **TRAIN_ARGS)[ "train_dataset" ] ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3) original_data = load_dataset(DEMO_DATA, name="system_chat", split="train") indexes = random.choices(range(len(original_data)), k=num_samples) for index in indexes: ref_input_ids = ref_tokenizer.apply_chat_template(original_data["messages"][index]) assert train_dataset["input_ids"][index] == ref_input_ids @pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.parametrize("num_samples", [4]) def test_supervised_train_on_prompt(num_samples: int): train_dataset = load_dataset_module( dataset_dir="REMOTE:" + DEMO_DATA, dataset="system_chat", train_on_prompt=True, **TRAIN_ARGS )["train_dataset"] ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3) original_data = load_dataset(DEMO_DATA, name="system_chat", split="train") indexes = random.choices(range(len(original_data)), k=num_samples) for index in indexes: ref_ids = ref_tokenizer.apply_chat_template(original_data["messages"][index]) assert train_dataset["input_ids"][index] == ref_ids assert train_dataset["labels"][index] == ref_ids @pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.parametrize("num_samples", [4]) def test_supervised_mask_history(num_samples: int): train_dataset = load_dataset_module( dataset_dir="REMOTE:" + DEMO_DATA, dataset="system_chat", mask_history=True, **TRAIN_ARGS )["train_dataset"] ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3) original_data = load_dataset(DEMO_DATA, name="system_chat", split="train") indexes = random.choices(range(len(original_data)), k=num_samples) for index in indexes: messages = original_data["messages"][index] ref_input_ids = ref_tokenizer.apply_chat_template(messages) prompt_len = len(ref_tokenizer.apply_chat_template(messages[:-1], add_generation_prompt=True)) ref_label_ids = [IGNORE_INDEX] * prompt_len + ref_input_ids[prompt_len:] assert train_dataset["input_ids"][index] == ref_input_ids assert train_dataset["labels"][index] == ref_label_ids
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/tests/data/processor/test_feedback.py
tests/data/processor/test_feedback.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import random import pytest from datasets import load_dataset from transformers import AutoTokenizer from llamafactory.extras.constants import IGNORE_INDEX from llamafactory.train.test_utils import load_dataset_module DEMO_DATA = os.getenv("DEMO_DATA", "llamafactory/demo_data") TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3") TRAIN_ARGS = { "model_name_or_path": TINY_LLAMA3, "stage": "kto", "do_train": True, "finetuning_type": "full", "dataset": "kto_en_demo", "dataset_dir": "REMOTE:" + DEMO_DATA, "template": "llama3", "cutoff_len": 8192, "output_dir": "dummy_dir", "overwrite_output_dir": True, "fp16": True, } @pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.parametrize("num_samples", [16]) def test_feedback_data(num_samples: int): train_dataset = load_dataset_module(**TRAIN_ARGS)["train_dataset"] ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3) original_data = load_dataset(DEMO_DATA, name="kto_en_demo", split="train") indexes = random.choices(range(len(original_data)), k=num_samples) for index in indexes: messages = original_data["messages"][index] ref_input_ids = ref_tokenizer.apply_chat_template(messages) prompt_len = len(ref_tokenizer.apply_chat_template(messages[:-1], add_generation_prompt=True)) ref_labels = [IGNORE_INDEX] * prompt_len + ref_input_ids[prompt_len:] assert train_dataset["input_ids"][index] == ref_input_ids assert train_dataset["labels"][index] == ref_labels assert train_dataset["kto_tags"][index] == original_data["label"][index]
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
hiyouga/LlamaFactory
https://github.com/hiyouga/LlamaFactory/blob/f60a6e3d015962198b7c626936f117e83260bde9/tests/data/processor/test_pairwise.py
tests/data/processor/test_pairwise.py
# Copyright 2025 the LlamaFactory team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import random import pytest from datasets import load_dataset from transformers import AutoTokenizer from llamafactory.extras.constants import IGNORE_INDEX from llamafactory.train.test_utils import load_dataset_module DEMO_DATA = os.getenv("DEMO_DATA", "llamafactory/demo_data") TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3") TRAIN_ARGS = { "model_name_or_path": TINY_LLAMA3, "stage": "rm", "do_train": True, "finetuning_type": "full", "dataset": "dpo_en_demo", "dataset_dir": "REMOTE:" + DEMO_DATA, "template": "llama3", "cutoff_len": 8192, "output_dir": "dummy_dir", "overwrite_output_dir": True, "fp16": True, } def _convert_sharegpt_to_openai(messages: list[dict[str, str]]) -> list[dict[str, str]]: role_mapping = {"human": "user", "gpt": "assistant", "system": "system"} new_messages = [] for message in messages: new_messages.append({"role": role_mapping[message["from"]], "content": message["value"]}) return new_messages @pytest.mark.runs_on(["cpu", "mps"]) @pytest.mark.parametrize("num_samples", [16]) def test_pairwise_data(num_samples: int): train_dataset = load_dataset_module(**TRAIN_ARGS)["train_dataset"] ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3) original_data = load_dataset(DEMO_DATA, name="dpo_en_demo", split="train") indexes = random.choices(range(len(original_data)), k=num_samples) for index in indexes: chosen_messages = original_data["conversations"][index] + [original_data["chosen"][index]] rejected_messages = original_data["conversations"][index] + [original_data["rejected"][index]] chosen_messages = _convert_sharegpt_to_openai(chosen_messages) rejected_messages = _convert_sharegpt_to_openai(rejected_messages) ref_chosen_input_ids = ref_tokenizer.apply_chat_template(chosen_messages) chosen_prompt_len = len(ref_tokenizer.apply_chat_template(chosen_messages[:-1], add_generation_prompt=True)) ref_chosen_labels = [IGNORE_INDEX] * chosen_prompt_len + ref_chosen_input_ids[chosen_prompt_len:] ref_rejected_input_ids = ref_tokenizer.apply_chat_template(rejected_messages) rejected_prompt_len = len( ref_tokenizer.apply_chat_template(rejected_messages[:-1], add_generation_prompt=True) ) ref_rejected_labels = [IGNORE_INDEX] * rejected_prompt_len + ref_rejected_input_ids[rejected_prompt_len:] assert train_dataset["chosen_input_ids"][index] == ref_chosen_input_ids assert train_dataset["chosen_labels"][index] == ref_chosen_labels assert train_dataset["rejected_input_ids"][index] == ref_rejected_input_ids assert train_dataset["rejected_labels"][index] == ref_rejected_labels
python
Apache-2.0
f60a6e3d015962198b7c626936f117e83260bde9
2026-01-04T14:38:25.134237Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/setup.py
setup.py
#!/usr/bin/env python # coding: utf-8 from __future__ import print_function import os.path import warnings import sys try: from setuptools import setup, Command setuptools_available = True except ImportError: from distutils.core import setup, Command setuptools_available = False from distutils.spawn import spawn try: # This will create an exe that needs Microsoft Visual C++ 2008 # Redistributable Package import py2exe except ImportError: if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe': print('Cannot import py2exe', file=sys.stderr) exit(1) py2exe_options = { 'bundle_files': 1, 'compressed': 1, 'optimize': 2, 'dist_dir': '.', 'dll_excludes': ['w9xpopen.exe', 'crypt32.dll'], } # Get the version from youtube_dl/version.py without importing the package exec(compile(open('youtube_dl/version.py').read(), 'youtube_dl/version.py', 'exec')) DESCRIPTION = 'YouTube video downloader' LONG_DESCRIPTION = 'Command-line program to download videos from YouTube.com and other video sites' py2exe_console = [{ 'script': './youtube_dl/__main__.py', 'dest_base': 'youtube-dl', 'version': __version__, 'description': DESCRIPTION, 'comments': LONG_DESCRIPTION, 'product_name': 'youtube-dl', 'product_version': __version__, }] py2exe_params = { 'console': py2exe_console, 'options': {'py2exe': py2exe_options}, 'zipfile': None } if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe': params = py2exe_params else: files_spec = [ ('etc/bash_completion.d', ['youtube-dl.bash-completion']), ('etc/fish/completions', ['youtube-dl.fish']), ('share/doc/youtube_dl', ['README.txt']), ('share/man/man1', ['youtube-dl.1']) ] root = os.path.dirname(os.path.abspath(__file__)) data_files = [] for dirname, files in files_spec: resfiles = [] for fn in files: if not os.path.exists(fn): warnings.warn('Skipping file %s since it is not present. Type make to build all automatically generated files.' % fn) else: resfiles.append(fn) data_files.append((dirname, resfiles)) params = { 'data_files': data_files, } if setuptools_available: params['entry_points'] = {'console_scripts': ['youtube-dl = youtube_dl:main']} else: params['scripts'] = ['bin/youtube-dl'] class build_lazy_extractors(Command): description = 'Build the extractor lazy loading module' user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): spawn( [sys.executable, 'devscripts/make_lazy_extractors.py', 'youtube_dl/extractor/lazy_extractors.py'], dry_run=self.dry_run, ) setup( name='youtube_dl', version=__version__, description=DESCRIPTION, long_description=LONG_DESCRIPTION, url='https://github.com/ytdl-org/youtube-dl', author='Ricardo Garcia', author_email='ytdl@yt-dl.org', maintainer='Sergey M.', maintainer_email='dstftw@gmail.com', license='Unlicense', packages=[ 'youtube_dl', 'youtube_dl.extractor', 'youtube_dl.downloader', 'youtube_dl.postprocessor'], # Provokes warning on most systems (why?!) # test_suite = 'nose.collector', # test_requires = ['nosetest'], classifiers=[ 'Topic :: Multimedia :: Video', 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'License :: Public Domain', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: Implementation', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: IronPython', 'Programming Language :: Python :: Implementation :: Jython', 'Programming Language :: Python :: Implementation :: PyPy', ], cmdclass={'build_lazy_extractors': build_lazy_extractors}, **params )
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/compat.py
youtube_dl/compat.py
# coding: utf-8 from __future__ import unicode_literals from __future__ import division import base64 import binascii import collections import ctypes import datetime import email import getpass import io import itertools import optparse import os import platform import re import shlex import socket import struct import subprocess import sys import types import xml.etree.ElementTree _IDENTITY = lambda x: x # naming convention # 'compat_' + Python3_name.replace('.', '_') # other aliases exist for convenience and/or legacy # wrap disposable test values in type() to reclaim storage # deal with critical unicode/str things first: # compat_str, compat_basestring, compat_chr try: # Python 2 compat_str, compat_basestring, compat_chr = ( unicode, basestring, unichr ) except NameError: compat_str, compat_basestring, compat_chr = ( str, (str, bytes), chr ) # compat_casefold try: compat_str.casefold compat_casefold = lambda s: s.casefold() except AttributeError: from .casefold import _casefold as compat_casefold # compat_collections_abc try: import collections.abc as compat_collections_abc except ImportError: compat_collections_abc = collections # compat_urllib_request try: import urllib.request as compat_urllib_request except ImportError: # Python 2 import urllib2 as compat_urllib_request # Also fix up lack of method arg in old Pythons try: type(compat_urllib_request.Request('http://127.0.0.1', method='GET')) except TypeError: def _add_init_method_arg(cls): init = cls.__init__ def wrapped_init(self, *args, **kwargs): method = kwargs.pop('method', 'GET') init(self, *args, **kwargs) if any(callable(x.__dict__.get('get_method')) for x in (self.__class__, self) if x != cls): # allow instance or its subclass to override get_method() return if self.has_data() and method == 'GET': method = 'POST' self.get_method = types.MethodType(lambda _: method, self) cls.__init__ = wrapped_init _add_init_method_arg(compat_urllib_request.Request) del _add_init_method_arg # compat_urllib_error try: import urllib.error as compat_urllib_error except ImportError: # Python 2 import urllib2 as compat_urllib_error # compat_urllib_parse try: import urllib.parse as compat_urllib_parse except ImportError: # Python 2 import urllib as compat_urllib_parse import urlparse as _urlparse for a in dir(_urlparse): if not hasattr(compat_urllib_parse, a): setattr(compat_urllib_parse, a, getattr(_urlparse, a)) del _urlparse # unfavoured aliases compat_urlparse = compat_urllib_parse compat_urllib_parse_urlparse = compat_urllib_parse.urlparse # compat_urllib_response try: import urllib.response as compat_urllib_response except ImportError: # Python 2 import urllib as compat_urllib_response # compat_urllib_response.addinfourl try: compat_urllib_response.addinfourl.status except AttributeError: # .getcode() is deprecated in Py 3. compat_urllib_response.addinfourl.status = property(lambda self: self.getcode()) # compat_http_cookiejar try: import http.cookiejar as compat_cookiejar except ImportError: # Python 2 import cookielib as compat_cookiejar compat_http_cookiejar = compat_cookiejar if sys.version_info[0] == 2: class compat_cookiejar_Cookie(compat_cookiejar.Cookie): def __init__(self, version, name, value, *args, **kwargs): if isinstance(name, compat_str): name = name.encode() if isinstance(value, compat_str): value = value.encode() compat_cookiejar.Cookie.__init__(self, version, name, value, *args, **kwargs) else: compat_cookiejar_Cookie = compat_cookiejar.Cookie compat_http_cookiejar_Cookie = compat_cookiejar_Cookie # compat_http_cookies try: import http.cookies as compat_cookies except ImportError: # Python 2 import Cookie as compat_cookies compat_http_cookies = compat_cookies # compat_http_cookies_SimpleCookie if sys.version_info[0] == 2 or sys.version_info < (3, 3): class compat_cookies_SimpleCookie(compat_cookies.SimpleCookie): def load(self, rawdata): must_have_value = 0 if not isinstance(rawdata, dict): if sys.version_info[:2] != (2, 7) or sys.platform.startswith('java'): # attribute must have value for parsing rawdata, must_have_value = re.subn( r'(?i)(;\s*)(secure|httponly)(\s*(?:;|$))', r'\1\2=\2\3', rawdata) if sys.version_info[0] == 2: if isinstance(rawdata, compat_str): rawdata = str(rawdata) super(compat_cookies_SimpleCookie, self).load(rawdata) if must_have_value > 0: for morsel in self.values(): for attr in ('secure', 'httponly'): if morsel.get(attr): morsel[attr] = True else: compat_cookies_SimpleCookie = compat_cookies.SimpleCookie compat_http_cookies_SimpleCookie = compat_cookies_SimpleCookie # compat_html_entities, probably useless now try: import html.entities as compat_html_entities except ImportError: # Python 2 import htmlentitydefs as compat_html_entities # compat_html_entities_html5 try: # Python >= 3.3 compat_html_entities_html5 = compat_html_entities.html5 except AttributeError: # Copied from CPython 3.5.1 html/entities.py compat_html_entities_html5 = { 'Aacute': '\xc1', 'aacute': '\xe1', 'Aacute;': '\xc1', 'aacute;': '\xe1', 'Abreve;': '\u0102', 'abreve;': '\u0103', 'ac;': '\u223e', 'acd;': '\u223f', 'acE;': '\u223e\u0333', 'Acirc': '\xc2', 'acirc': '\xe2', 'Acirc;': '\xc2', 'acirc;': '\xe2', 'acute': '\xb4', 'acute;': '\xb4', 'Acy;': '\u0410', 'acy;': '\u0430', 'AElig': '\xc6', 'aelig': '\xe6', 'AElig;': '\xc6', 'aelig;': '\xe6', 'af;': '\u2061', 'Afr;': '\U0001d504', 'afr;': '\U0001d51e', 'Agrave': '\xc0', 'agrave': '\xe0', 'Agrave;': '\xc0', 'agrave;': '\xe0', 'alefsym;': '\u2135', 'aleph;': '\u2135', 'Alpha;': '\u0391', 'alpha;': '\u03b1', 'Amacr;': '\u0100', 'amacr;': '\u0101', 'amalg;': '\u2a3f', 'AMP': '&', 'amp': '&', 'AMP;': '&', 'amp;': '&', 'And;': '\u2a53', 'and;': '\u2227', 'andand;': '\u2a55', 'andd;': '\u2a5c', 'andslope;': '\u2a58', 'andv;': '\u2a5a', 'ang;': '\u2220', 'ange;': '\u29a4', 'angle;': '\u2220', 'angmsd;': '\u2221', 'angmsdaa;': '\u29a8', 'angmsdab;': '\u29a9', 'angmsdac;': '\u29aa', 'angmsdad;': '\u29ab', 'angmsdae;': '\u29ac', 'angmsdaf;': '\u29ad', 'angmsdag;': '\u29ae', 'angmsdah;': '\u29af', 'angrt;': '\u221f', 'angrtvb;': '\u22be', 'angrtvbd;': '\u299d', 'angsph;': '\u2222', 'angst;': '\xc5', 'angzarr;': '\u237c', 'Aogon;': '\u0104', 'aogon;': '\u0105', 'Aopf;': '\U0001d538', 'aopf;': '\U0001d552', 'ap;': '\u2248', 'apacir;': '\u2a6f', 'apE;': '\u2a70', 'ape;': '\u224a', 'apid;': '\u224b', 'apos;': "'", 'ApplyFunction;': '\u2061', 'approx;': '\u2248', 'approxeq;': '\u224a', 'Aring': '\xc5', 'aring': '\xe5', 'Aring;': '\xc5', 'aring;': '\xe5', 'Ascr;': '\U0001d49c', 'ascr;': '\U0001d4b6', 'Assign;': '\u2254', 'ast;': '*', 'asymp;': '\u2248', 'asympeq;': '\u224d', 'Atilde': '\xc3', 'atilde': '\xe3', 'Atilde;': '\xc3', 'atilde;': '\xe3', 'Auml': '\xc4', 'auml': '\xe4', 'Auml;': '\xc4', 'auml;': '\xe4', 'awconint;': '\u2233', 'awint;': '\u2a11', 'backcong;': '\u224c', 'backepsilon;': '\u03f6', 'backprime;': '\u2035', 'backsim;': '\u223d', 'backsimeq;': '\u22cd', 'Backslash;': '\u2216', 'Barv;': '\u2ae7', 'barvee;': '\u22bd', 'Barwed;': '\u2306', 'barwed;': '\u2305', 'barwedge;': '\u2305', 'bbrk;': '\u23b5', 'bbrktbrk;': '\u23b6', 'bcong;': '\u224c', 'Bcy;': '\u0411', 'bcy;': '\u0431', 'bdquo;': '\u201e', 'becaus;': '\u2235', 'Because;': '\u2235', 'because;': '\u2235', 'bemptyv;': '\u29b0', 'bepsi;': '\u03f6', 'bernou;': '\u212c', 'Bernoullis;': '\u212c', 'Beta;': '\u0392', 'beta;': '\u03b2', 'beth;': '\u2136', 'between;': '\u226c', 'Bfr;': '\U0001d505', 'bfr;': '\U0001d51f', 'bigcap;': '\u22c2', 'bigcirc;': '\u25ef', 'bigcup;': '\u22c3', 'bigodot;': '\u2a00', 'bigoplus;': '\u2a01', 'bigotimes;': '\u2a02', 'bigsqcup;': '\u2a06', 'bigstar;': '\u2605', 'bigtriangledown;': '\u25bd', 'bigtriangleup;': '\u25b3', 'biguplus;': '\u2a04', 'bigvee;': '\u22c1', 'bigwedge;': '\u22c0', 'bkarow;': '\u290d', 'blacklozenge;': '\u29eb', 'blacksquare;': '\u25aa', 'blacktriangle;': '\u25b4', 'blacktriangledown;': '\u25be', 'blacktriangleleft;': '\u25c2', 'blacktriangleright;': '\u25b8', 'blank;': '\u2423', 'blk12;': '\u2592', 'blk14;': '\u2591', 'blk34;': '\u2593', 'block;': '\u2588', 'bne;': '=\u20e5', 'bnequiv;': '\u2261\u20e5', 'bNot;': '\u2aed', 'bnot;': '\u2310', 'Bopf;': '\U0001d539', 'bopf;': '\U0001d553', 'bot;': '\u22a5', 'bottom;': '\u22a5', 'bowtie;': '\u22c8', 'boxbox;': '\u29c9', 'boxDL;': '\u2557', 'boxDl;': '\u2556', 'boxdL;': '\u2555', 'boxdl;': '\u2510', 'boxDR;': '\u2554', 'boxDr;': '\u2553', 'boxdR;': '\u2552', 'boxdr;': '\u250c', 'boxH;': '\u2550', 'boxh;': '\u2500', 'boxHD;': '\u2566', 'boxHd;': '\u2564', 'boxhD;': '\u2565', 'boxhd;': '\u252c', 'boxHU;': '\u2569', 'boxHu;': '\u2567', 'boxhU;': '\u2568', 'boxhu;': '\u2534', 'boxminus;': '\u229f', 'boxplus;': '\u229e', 'boxtimes;': '\u22a0', 'boxUL;': '\u255d', 'boxUl;': '\u255c', 'boxuL;': '\u255b', 'boxul;': '\u2518', 'boxUR;': '\u255a', 'boxUr;': '\u2559', 'boxuR;': '\u2558', 'boxur;': '\u2514', 'boxV;': '\u2551', 'boxv;': '\u2502', 'boxVH;': '\u256c', 'boxVh;': '\u256b', 'boxvH;': '\u256a', 'boxvh;': '\u253c', 'boxVL;': '\u2563', 'boxVl;': '\u2562', 'boxvL;': '\u2561', 'boxvl;': '\u2524', 'boxVR;': '\u2560', 'boxVr;': '\u255f', 'boxvR;': '\u255e', 'boxvr;': '\u251c', 'bprime;': '\u2035', 'Breve;': '\u02d8', 'breve;': '\u02d8', 'brvbar': '\xa6', 'brvbar;': '\xa6', 'Bscr;': '\u212c', 'bscr;': '\U0001d4b7', 'bsemi;': '\u204f', 'bsim;': '\u223d', 'bsime;': '\u22cd', 'bsol;': '\\', 'bsolb;': '\u29c5', 'bsolhsub;': '\u27c8', 'bull;': '\u2022', 'bullet;': '\u2022', 'bump;': '\u224e', 'bumpE;': '\u2aae', 'bumpe;': '\u224f', 'Bumpeq;': '\u224e', 'bumpeq;': '\u224f', 'Cacute;': '\u0106', 'cacute;': '\u0107', 'Cap;': '\u22d2', 'cap;': '\u2229', 'capand;': '\u2a44', 'capbrcup;': '\u2a49', 'capcap;': '\u2a4b', 'capcup;': '\u2a47', 'capdot;': '\u2a40', 'CapitalDifferentialD;': '\u2145', 'caps;': '\u2229\ufe00', 'caret;': '\u2041', 'caron;': '\u02c7', 'Cayleys;': '\u212d', 'ccaps;': '\u2a4d', 'Ccaron;': '\u010c', 'ccaron;': '\u010d', 'Ccedil': '\xc7', 'ccedil': '\xe7', 'Ccedil;': '\xc7', 'ccedil;': '\xe7', 'Ccirc;': '\u0108', 'ccirc;': '\u0109', 'Cconint;': '\u2230', 'ccups;': '\u2a4c', 'ccupssm;': '\u2a50', 'Cdot;': '\u010a', 'cdot;': '\u010b', 'cedil': '\xb8', 'cedil;': '\xb8', 'Cedilla;': '\xb8', 'cemptyv;': '\u29b2', 'cent': '\xa2', 'cent;': '\xa2', 'CenterDot;': '\xb7', 'centerdot;': '\xb7', 'Cfr;': '\u212d', 'cfr;': '\U0001d520', 'CHcy;': '\u0427', 'chcy;': '\u0447', 'check;': '\u2713', 'checkmark;': '\u2713', 'Chi;': '\u03a7', 'chi;': '\u03c7', 'cir;': '\u25cb', 'circ;': '\u02c6', 'circeq;': '\u2257', 'circlearrowleft;': '\u21ba', 'circlearrowright;': '\u21bb', 'circledast;': '\u229b', 'circledcirc;': '\u229a', 'circleddash;': '\u229d', 'CircleDot;': '\u2299', 'circledR;': '\xae', 'circledS;': '\u24c8', 'CircleMinus;': '\u2296', 'CirclePlus;': '\u2295', 'CircleTimes;': '\u2297', 'cirE;': '\u29c3', 'cire;': '\u2257', 'cirfnint;': '\u2a10', 'cirmid;': '\u2aef', 'cirscir;': '\u29c2', 'ClockwiseContourIntegral;': '\u2232', 'CloseCurlyDoubleQuote;': '\u201d', 'CloseCurlyQuote;': '\u2019', 'clubs;': '\u2663', 'clubsuit;': '\u2663', 'Colon;': '\u2237', 'colon;': ':', 'Colone;': '\u2a74', 'colone;': '\u2254', 'coloneq;': '\u2254', 'comma;': ',', 'commat;': '@', 'comp;': '\u2201', 'compfn;': '\u2218', 'complement;': '\u2201', 'complexes;': '\u2102', 'cong;': '\u2245', 'congdot;': '\u2a6d', 'Congruent;': '\u2261', 'Conint;': '\u222f', 'conint;': '\u222e', 'ContourIntegral;': '\u222e', 'Copf;': '\u2102', 'copf;': '\U0001d554', 'coprod;': '\u2210', 'Coproduct;': '\u2210', 'COPY': '\xa9', 'copy': '\xa9', 'COPY;': '\xa9', 'copy;': '\xa9', 'copysr;': '\u2117', 'CounterClockwiseContourIntegral;': '\u2233', 'crarr;': '\u21b5', 'Cross;': '\u2a2f', 'cross;': '\u2717', 'Cscr;': '\U0001d49e', 'cscr;': '\U0001d4b8', 'csub;': '\u2acf', 'csube;': '\u2ad1', 'csup;': '\u2ad0', 'csupe;': '\u2ad2', 'ctdot;': '\u22ef', 'cudarrl;': '\u2938', 'cudarrr;': '\u2935', 'cuepr;': '\u22de', 'cuesc;': '\u22df', 'cularr;': '\u21b6', 'cularrp;': '\u293d', 'Cup;': '\u22d3', 'cup;': '\u222a', 'cupbrcap;': '\u2a48', 'CupCap;': '\u224d', 'cupcap;': '\u2a46', 'cupcup;': '\u2a4a', 'cupdot;': '\u228d', 'cupor;': '\u2a45', 'cups;': '\u222a\ufe00', 'curarr;': '\u21b7', 'curarrm;': '\u293c', 'curlyeqprec;': '\u22de', 'curlyeqsucc;': '\u22df', 'curlyvee;': '\u22ce', 'curlywedge;': '\u22cf', 'curren': '\xa4', 'curren;': '\xa4', 'curvearrowleft;': '\u21b6', 'curvearrowright;': '\u21b7', 'cuvee;': '\u22ce', 'cuwed;': '\u22cf', 'cwconint;': '\u2232', 'cwint;': '\u2231', 'cylcty;': '\u232d', 'Dagger;': '\u2021', 'dagger;': '\u2020', 'daleth;': '\u2138', 'Darr;': '\u21a1', 'dArr;': '\u21d3', 'darr;': '\u2193', 'dash;': '\u2010', 'Dashv;': '\u2ae4', 'dashv;': '\u22a3', 'dbkarow;': '\u290f', 'dblac;': '\u02dd', 'Dcaron;': '\u010e', 'dcaron;': '\u010f', 'Dcy;': '\u0414', 'dcy;': '\u0434', 'DD;': '\u2145', 'dd;': '\u2146', 'ddagger;': '\u2021', 'ddarr;': '\u21ca', 'DDotrahd;': '\u2911', 'ddotseq;': '\u2a77', 'deg': '\xb0', 'deg;': '\xb0', 'Del;': '\u2207', 'Delta;': '\u0394', 'delta;': '\u03b4', 'demptyv;': '\u29b1', 'dfisht;': '\u297f', 'Dfr;': '\U0001d507', 'dfr;': '\U0001d521', 'dHar;': '\u2965', 'dharl;': '\u21c3', 'dharr;': '\u21c2', 'DiacriticalAcute;': '\xb4', 'DiacriticalDot;': '\u02d9', 'DiacriticalDoubleAcute;': '\u02dd', 'DiacriticalGrave;': '`', 'DiacriticalTilde;': '\u02dc', 'diam;': '\u22c4', 'Diamond;': '\u22c4', 'diamond;': '\u22c4', 'diamondsuit;': '\u2666', 'diams;': '\u2666', 'die;': '\xa8', 'DifferentialD;': '\u2146', 'digamma;': '\u03dd', 'disin;': '\u22f2', 'div;': '\xf7', 'divide': '\xf7', 'divide;': '\xf7', 'divideontimes;': '\u22c7', 'divonx;': '\u22c7', 'DJcy;': '\u0402', 'djcy;': '\u0452', 'dlcorn;': '\u231e', 'dlcrop;': '\u230d', 'dollar;': '$', 'Dopf;': '\U0001d53b', 'dopf;': '\U0001d555', 'Dot;': '\xa8', 'dot;': '\u02d9', 'DotDot;': '\u20dc', 'doteq;': '\u2250', 'doteqdot;': '\u2251', 'DotEqual;': '\u2250', 'dotminus;': '\u2238', 'dotplus;': '\u2214', 'dotsquare;': '\u22a1', 'doublebarwedge;': '\u2306', 'DoubleContourIntegral;': '\u222f', 'DoubleDot;': '\xa8', 'DoubleDownArrow;': '\u21d3', 'DoubleLeftArrow;': '\u21d0', 'DoubleLeftRightArrow;': '\u21d4', 'DoubleLeftTee;': '\u2ae4', 'DoubleLongLeftArrow;': '\u27f8', 'DoubleLongLeftRightArrow;': '\u27fa', 'DoubleLongRightArrow;': '\u27f9', 'DoubleRightArrow;': '\u21d2', 'DoubleRightTee;': '\u22a8', 'DoubleUpArrow;': '\u21d1', 'DoubleUpDownArrow;': '\u21d5', 'DoubleVerticalBar;': '\u2225', 'DownArrow;': '\u2193', 'Downarrow;': '\u21d3', 'downarrow;': '\u2193', 'DownArrowBar;': '\u2913', 'DownArrowUpArrow;': '\u21f5', 'DownBreve;': '\u0311', 'downdownarrows;': '\u21ca', 'downharpoonleft;': '\u21c3', 'downharpoonright;': '\u21c2', 'DownLeftRightVector;': '\u2950', 'DownLeftTeeVector;': '\u295e', 'DownLeftVector;': '\u21bd', 'DownLeftVectorBar;': '\u2956', 'DownRightTeeVector;': '\u295f', 'DownRightVector;': '\u21c1', 'DownRightVectorBar;': '\u2957', 'DownTee;': '\u22a4', 'DownTeeArrow;': '\u21a7', 'drbkarow;': '\u2910', 'drcorn;': '\u231f', 'drcrop;': '\u230c', 'Dscr;': '\U0001d49f', 'dscr;': '\U0001d4b9', 'DScy;': '\u0405', 'dscy;': '\u0455', 'dsol;': '\u29f6', 'Dstrok;': '\u0110', 'dstrok;': '\u0111', 'dtdot;': '\u22f1', 'dtri;': '\u25bf', 'dtrif;': '\u25be', 'duarr;': '\u21f5', 'duhar;': '\u296f', 'dwangle;': '\u29a6', 'DZcy;': '\u040f', 'dzcy;': '\u045f', 'dzigrarr;': '\u27ff', 'Eacute': '\xc9', 'eacute': '\xe9', 'Eacute;': '\xc9', 'eacute;': '\xe9', 'easter;': '\u2a6e', 'Ecaron;': '\u011a', 'ecaron;': '\u011b', 'ecir;': '\u2256', 'Ecirc': '\xca', 'ecirc': '\xea', 'Ecirc;': '\xca', 'ecirc;': '\xea', 'ecolon;': '\u2255', 'Ecy;': '\u042d', 'ecy;': '\u044d', 'eDDot;': '\u2a77', 'Edot;': '\u0116', 'eDot;': '\u2251', 'edot;': '\u0117', 'ee;': '\u2147', 'efDot;': '\u2252', 'Efr;': '\U0001d508', 'efr;': '\U0001d522', 'eg;': '\u2a9a', 'Egrave': '\xc8', 'egrave': '\xe8', 'Egrave;': '\xc8', 'egrave;': '\xe8', 'egs;': '\u2a96', 'egsdot;': '\u2a98', 'el;': '\u2a99', 'Element;': '\u2208', 'elinters;': '\u23e7', 'ell;': '\u2113', 'els;': '\u2a95', 'elsdot;': '\u2a97', 'Emacr;': '\u0112', 'emacr;': '\u0113', 'empty;': '\u2205', 'emptyset;': '\u2205', 'EmptySmallSquare;': '\u25fb', 'emptyv;': '\u2205', 'EmptyVerySmallSquare;': '\u25ab', 'emsp13;': '\u2004', 'emsp14;': '\u2005', 'emsp;': '\u2003', 'ENG;': '\u014a', 'eng;': '\u014b', 'ensp;': '\u2002', 'Eogon;': '\u0118', 'eogon;': '\u0119', 'Eopf;': '\U0001d53c', 'eopf;': '\U0001d556', 'epar;': '\u22d5', 'eparsl;': '\u29e3', 'eplus;': '\u2a71', 'epsi;': '\u03b5', 'Epsilon;': '\u0395', 'epsilon;': '\u03b5', 'epsiv;': '\u03f5', 'eqcirc;': '\u2256', 'eqcolon;': '\u2255', 'eqsim;': '\u2242', 'eqslantgtr;': '\u2a96', 'eqslantless;': '\u2a95', 'Equal;': '\u2a75', 'equals;': '=', 'EqualTilde;': '\u2242', 'equest;': '\u225f', 'Equilibrium;': '\u21cc', 'equiv;': '\u2261', 'equivDD;': '\u2a78', 'eqvparsl;': '\u29e5', 'erarr;': '\u2971', 'erDot;': '\u2253', 'Escr;': '\u2130', 'escr;': '\u212f', 'esdot;': '\u2250', 'Esim;': '\u2a73', 'esim;': '\u2242', 'Eta;': '\u0397', 'eta;': '\u03b7', 'ETH': '\xd0', 'eth': '\xf0', 'ETH;': '\xd0', 'eth;': '\xf0', 'Euml': '\xcb', 'euml': '\xeb', 'Euml;': '\xcb', 'euml;': '\xeb', 'euro;': '\u20ac', 'excl;': '!', 'exist;': '\u2203', 'Exists;': '\u2203', 'expectation;': '\u2130', 'ExponentialE;': '\u2147', 'exponentiale;': '\u2147', 'fallingdotseq;': '\u2252', 'Fcy;': '\u0424', 'fcy;': '\u0444', 'female;': '\u2640', 'ffilig;': '\ufb03', 'fflig;': '\ufb00', 'ffllig;': '\ufb04', 'Ffr;': '\U0001d509', 'ffr;': '\U0001d523', 'filig;': '\ufb01', 'FilledSmallSquare;': '\u25fc', 'FilledVerySmallSquare;': '\u25aa', 'fjlig;': 'fj', 'flat;': '\u266d', 'fllig;': '\ufb02', 'fltns;': '\u25b1', 'fnof;': '\u0192', 'Fopf;': '\U0001d53d', 'fopf;': '\U0001d557', 'ForAll;': '\u2200', 'forall;': '\u2200', 'fork;': '\u22d4', 'forkv;': '\u2ad9', 'Fouriertrf;': '\u2131', 'fpartint;': '\u2a0d', 'frac12': '\xbd', 'frac12;': '\xbd', 'frac13;': '\u2153', 'frac14': '\xbc', 'frac14;': '\xbc', 'frac15;': '\u2155', 'frac16;': '\u2159', 'frac18;': '\u215b', 'frac23;': '\u2154', 'frac25;': '\u2156', 'frac34': '\xbe', 'frac34;': '\xbe', 'frac35;': '\u2157', 'frac38;': '\u215c', 'frac45;': '\u2158', 'frac56;': '\u215a', 'frac58;': '\u215d', 'frac78;': '\u215e', 'frasl;': '\u2044', 'frown;': '\u2322', 'Fscr;': '\u2131', 'fscr;': '\U0001d4bb', 'gacute;': '\u01f5', 'Gamma;': '\u0393', 'gamma;': '\u03b3', 'Gammad;': '\u03dc', 'gammad;': '\u03dd', 'gap;': '\u2a86', 'Gbreve;': '\u011e', 'gbreve;': '\u011f', 'Gcedil;': '\u0122', 'Gcirc;': '\u011c', 'gcirc;': '\u011d', 'Gcy;': '\u0413', 'gcy;': '\u0433', 'Gdot;': '\u0120', 'gdot;': '\u0121', 'gE;': '\u2267', 'ge;': '\u2265', 'gEl;': '\u2a8c', 'gel;': '\u22db', 'geq;': '\u2265', 'geqq;': '\u2267', 'geqslant;': '\u2a7e', 'ges;': '\u2a7e', 'gescc;': '\u2aa9', 'gesdot;': '\u2a80', 'gesdoto;': '\u2a82', 'gesdotol;': '\u2a84', 'gesl;': '\u22db\ufe00', 'gesles;': '\u2a94', 'Gfr;': '\U0001d50a', 'gfr;': '\U0001d524', 'Gg;': '\u22d9', 'gg;': '\u226b', 'ggg;': '\u22d9', 'gimel;': '\u2137', 'GJcy;': '\u0403', 'gjcy;': '\u0453', 'gl;': '\u2277', 'gla;': '\u2aa5', 'glE;': '\u2a92', 'glj;': '\u2aa4', 'gnap;': '\u2a8a', 'gnapprox;': '\u2a8a', 'gnE;': '\u2269', 'gne;': '\u2a88', 'gneq;': '\u2a88', 'gneqq;': '\u2269', 'gnsim;': '\u22e7', 'Gopf;': '\U0001d53e', 'gopf;': '\U0001d558', 'grave;': '`', 'GreaterEqual;': '\u2265', 'GreaterEqualLess;': '\u22db', 'GreaterFullEqual;': '\u2267', 'GreaterGreater;': '\u2aa2', 'GreaterLess;': '\u2277', 'GreaterSlantEqual;': '\u2a7e', 'GreaterTilde;': '\u2273', 'Gscr;': '\U0001d4a2', 'gscr;': '\u210a', 'gsim;': '\u2273', 'gsime;': '\u2a8e', 'gsiml;': '\u2a90', 'GT': '>', 'gt': '>', 'GT;': '>', 'Gt;': '\u226b', 'gt;': '>', 'gtcc;': '\u2aa7', 'gtcir;': '\u2a7a', 'gtdot;': '\u22d7', 'gtlPar;': '\u2995', 'gtquest;': '\u2a7c', 'gtrapprox;': '\u2a86', 'gtrarr;': '\u2978', 'gtrdot;': '\u22d7', 'gtreqless;': '\u22db', 'gtreqqless;': '\u2a8c', 'gtrless;': '\u2277', 'gtrsim;': '\u2273', 'gvertneqq;': '\u2269\ufe00', 'gvnE;': '\u2269\ufe00', 'Hacek;': '\u02c7', 'hairsp;': '\u200a', 'half;': '\xbd', 'hamilt;': '\u210b', 'HARDcy;': '\u042a', 'hardcy;': '\u044a', 'hArr;': '\u21d4', 'harr;': '\u2194', 'harrcir;': '\u2948', 'harrw;': '\u21ad', 'Hat;': '^', 'hbar;': '\u210f', 'Hcirc;': '\u0124', 'hcirc;': '\u0125', 'hearts;': '\u2665', 'heartsuit;': '\u2665', 'hellip;': '\u2026', 'hercon;': '\u22b9', 'Hfr;': '\u210c', 'hfr;': '\U0001d525', 'HilbertSpace;': '\u210b', 'hksearow;': '\u2925', 'hkswarow;': '\u2926', 'hoarr;': '\u21ff', 'homtht;': '\u223b', 'hookleftarrow;': '\u21a9', 'hookrightarrow;': '\u21aa', 'Hopf;': '\u210d', 'hopf;': '\U0001d559', 'horbar;': '\u2015', 'HorizontalLine;': '\u2500', 'Hscr;': '\u210b', 'hscr;': '\U0001d4bd', 'hslash;': '\u210f', 'Hstrok;': '\u0126', 'hstrok;': '\u0127', 'HumpDownHump;': '\u224e', 'HumpEqual;': '\u224f', 'hybull;': '\u2043', 'hyphen;': '\u2010', 'Iacute': '\xcd', 'iacute': '\xed', 'Iacute;': '\xcd', 'iacute;': '\xed', 'ic;': '\u2063', 'Icirc': '\xce', 'icirc': '\xee', 'Icirc;': '\xce', 'icirc;': '\xee', 'Icy;': '\u0418', 'icy;': '\u0438', 'Idot;': '\u0130', 'IEcy;': '\u0415', 'iecy;': '\u0435', 'iexcl': '\xa1', 'iexcl;': '\xa1', 'iff;': '\u21d4', 'Ifr;': '\u2111', 'ifr;': '\U0001d526', 'Igrave': '\xcc', 'igrave': '\xec', 'Igrave;': '\xcc', 'igrave;': '\xec', 'ii;': '\u2148', 'iiiint;': '\u2a0c', 'iiint;': '\u222d', 'iinfin;': '\u29dc', 'iiota;': '\u2129', 'IJlig;': '\u0132', 'ijlig;': '\u0133', 'Im;': '\u2111', 'Imacr;': '\u012a', 'imacr;': '\u012b', 'image;': '\u2111', 'ImaginaryI;': '\u2148', 'imagline;': '\u2110', 'imagpart;': '\u2111', 'imath;': '\u0131', 'imof;': '\u22b7', 'imped;': '\u01b5', 'Implies;': '\u21d2', 'in;': '\u2208', 'incare;': '\u2105', 'infin;': '\u221e', 'infintie;': '\u29dd', 'inodot;': '\u0131', 'Int;': '\u222c', 'int;': '\u222b', 'intcal;': '\u22ba', 'integers;': '\u2124', 'Integral;': '\u222b', 'intercal;': '\u22ba', 'Intersection;': '\u22c2', 'intlarhk;': '\u2a17', 'intprod;': '\u2a3c', 'InvisibleComma;': '\u2063', 'InvisibleTimes;': '\u2062', 'IOcy;': '\u0401', 'iocy;': '\u0451', 'Iogon;': '\u012e', 'iogon;': '\u012f', 'Iopf;': '\U0001d540', 'iopf;': '\U0001d55a', 'Iota;': '\u0399', 'iota;': '\u03b9', 'iprod;': '\u2a3c', 'iquest': '\xbf', 'iquest;': '\xbf', 'Iscr;': '\u2110', 'iscr;': '\U0001d4be', 'isin;': '\u2208', 'isindot;': '\u22f5', 'isinE;': '\u22f9', 'isins;': '\u22f4', 'isinsv;': '\u22f3', 'isinv;': '\u2208', 'it;': '\u2062', 'Itilde;': '\u0128', 'itilde;': '\u0129', 'Iukcy;': '\u0406', 'iukcy;': '\u0456', 'Iuml': '\xcf', 'iuml': '\xef', 'Iuml;': '\xcf', 'iuml;': '\xef', 'Jcirc;': '\u0134', 'jcirc;': '\u0135', 'Jcy;': '\u0419', 'jcy;': '\u0439', 'Jfr;': '\U0001d50d', 'jfr;': '\U0001d527', 'jmath;': '\u0237', 'Jopf;': '\U0001d541', 'jopf;': '\U0001d55b', 'Jscr;': '\U0001d4a5', 'jscr;': '\U0001d4bf', 'Jsercy;': '\u0408', 'jsercy;': '\u0458', 'Jukcy;': '\u0404', 'jukcy;': '\u0454', 'Kappa;': '\u039a', 'kappa;': '\u03ba', 'kappav;': '\u03f0', 'Kcedil;': '\u0136', 'kcedil;': '\u0137', 'Kcy;': '\u041a', 'kcy;': '\u043a', 'Kfr;': '\U0001d50e', 'kfr;': '\U0001d528', 'kgreen;': '\u0138', 'KHcy;': '\u0425', 'khcy;': '\u0445', 'KJcy;': '\u040c', 'kjcy;': '\u045c', 'Kopf;': '\U0001d542', 'kopf;': '\U0001d55c', 'Kscr;': '\U0001d4a6', 'kscr;': '\U0001d4c0', 'lAarr;': '\u21da', 'Lacute;': '\u0139', 'lacute;': '\u013a', 'laemptyv;': '\u29b4', 'lagran;': '\u2112', 'Lambda;': '\u039b', 'lambda;': '\u03bb', 'Lang;': '\u27ea', 'lang;': '\u27e8', 'langd;': '\u2991', 'langle;': '\u27e8', 'lap;': '\u2a85', 'Laplacetrf;': '\u2112', 'laquo': '\xab', 'laquo;': '\xab', 'Larr;': '\u219e', 'lArr;': '\u21d0', 'larr;': '\u2190', 'larrb;': '\u21e4', 'larrbfs;': '\u291f', 'larrfs;': '\u291d', 'larrhk;': '\u21a9', 'larrlp;': '\u21ab', 'larrpl;': '\u2939', 'larrsim;': '\u2973', 'larrtl;': '\u21a2', 'lat;': '\u2aab', 'lAtail;': '\u291b', 'latail;': '\u2919', 'late;': '\u2aad', 'lates;': '\u2aad\ufe00', 'lBarr;': '\u290e', 'lbarr;': '\u290c', 'lbbrk;': '\u2772', 'lbrace;': '{', 'lbrack;': '[', 'lbrke;': '\u298b', 'lbrksld;': '\u298f', 'lbrkslu;': '\u298d', 'Lcaron;': '\u013d', 'lcaron;': '\u013e', 'Lcedil;': '\u013b', 'lcedil;': '\u013c', 'lceil;': '\u2308', 'lcub;': '{', 'Lcy;': '\u041b', 'lcy;': '\u043b', 'ldca;': '\u2936', 'ldquo;': '\u201c', 'ldquor;': '\u201e', 'ldrdhar;': '\u2967', 'ldrushar;': '\u294b', 'ldsh;': '\u21b2', 'lE;': '\u2266', 'le;': '\u2264', 'LeftAngleBracket;': '\u27e8', 'LeftArrow;': '\u2190', 'Leftarrow;': '\u21d0', 'leftarrow;': '\u2190', 'LeftArrowBar;': '\u21e4', 'LeftArrowRightArrow;': '\u21c6', 'leftarrowtail;': '\u21a2', 'LeftCeiling;': '\u2308', 'LeftDoubleBracket;': '\u27e6', 'LeftDownTeeVector;': '\u2961', 'LeftDownVector;': '\u21c3', 'LeftDownVectorBar;': '\u2959', 'LeftFloor;': '\u230a',
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
true
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/version.py
youtube_dl/version.py
from __future__ import unicode_literals __version__ = '2025.04.07'
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/YoutubeDL.py
youtube_dl/YoutubeDL.py
#!/usr/bin/env python # coding: utf-8 from __future__ import absolute_import, unicode_literals import collections import copy import datetime import errno import functools import io import itertools import json import locale import operator import os import platform import re import shutil import subprocess import socket import sys import time import tokenize import traceback import random try: from ssl import OPENSSL_VERSION except ImportError: # Must be Python 2.6, should be built against 1.0.2 OPENSSL_VERSION = 'OpenSSL 1.0.2(?)' from string import ascii_letters from .compat import ( compat_basestring, compat_collections_chain_map as ChainMap, compat_filter as filter, compat_get_terminal_size, compat_http_client, compat_http_cookiejar_Cookie, compat_http_cookies_SimpleCookie, compat_integer_types, compat_kwargs, compat_map as map, compat_numeric_types, compat_open as open, compat_os_name, compat_str, compat_tokenize_tokenize, compat_urllib_error, compat_urllib_parse, compat_urllib_request, compat_urllib_request_DataHandler, ) from .utils import ( _UnsafeExtensionError, age_restricted, args_to_str, bug_reports_message, ContentTooShortError, date_from_str, DateRange, DEFAULT_OUTTMPL, determine_ext, determine_protocol, DownloadError, encode_compat_str, encodeFilename, error_to_compat_str, expand_path, ExtractorError, format_bytes, formatSeconds, GeoRestrictedError, int_or_none, ISO3166Utils, join_nonempty, locked_file, LazyList, make_HTTPS_handler, MaxDownloadsReached, orderedSet, PagedList, parse_filesize, PerRequestProxyHandler, platform_name, PostProcessingError, preferredencoding, prepend_extension, process_communicate_or_kill, register_socks_protocols, render_table, replace_extension, SameFileError, sanitize_filename, sanitize_path, sanitize_url, sanitized_Request, std_headers, str_or_none, subtitles_filename, traverse_obj, UnavailableVideoError, url_basename, version_tuple, write_json_file, write_string, YoutubeDLCookieJar, YoutubeDLCookieProcessor, YoutubeDLHandler, YoutubeDLRedirectHandler, ytdl_is_updateable, ) from .cache import Cache from .extractor import get_info_extractor, gen_extractor_classes, _LAZY_LOADER from .extractor.openload import PhantomJSwrapper from .downloader import get_suitable_downloader from .downloader.rtmp import rtmpdump_version from .postprocessor import ( FFmpegFixupM3u8PP, FFmpegFixupM4aPP, FFmpegFixupStretchedPP, FFmpegMergerPP, FFmpegPostProcessor, get_postprocessor, ) from .version import __version__ if compat_os_name == 'nt': import ctypes def _catch_unsafe_file_extension(func): @functools.wraps(func) def wrapper(self, *args, **kwargs): try: return func(self, *args, **kwargs) except _UnsafeExtensionError as error: self.report_error( '{0} found; to avoid damaging your system, this value is disallowed.' ' If you believe this is an error{1}'.format( error_to_compat_str(error), bug_reports_message(','))) return wrapper class YoutubeDL(object): """YoutubeDL class. YoutubeDL objects are the ones responsible of downloading the actual video file and writing it to disk if the user has requested it, among some other tasks. In most cases there should be one per program. As, given a video URL, the downloader doesn't know how to extract all the needed information, task that InfoExtractors do, it has to pass the URL to one of them. For this, YoutubeDL objects have a method that allows InfoExtractors to be registered in a given order. When it is passed a URL, the YoutubeDL object handles it to the first InfoExtractor it finds that reports being able to handle it. The InfoExtractor extracts all the information about the video or videos the URL refers to, and YoutubeDL process the extracted information, possibly using a File Downloader to download the video. YoutubeDL objects accept a lot of parameters. In order not to saturate the object constructor with arguments, it receives a dictionary of options instead. These options are available through the params attribute for the InfoExtractors to use. The YoutubeDL also registers itself as the downloader in charge for the InfoExtractors that are added to it, so this is a "mutual registration". Available options: username: Username for authentication purposes. password: Password for authentication purposes. videopassword: Password for accessing a video. ap_mso: Adobe Pass multiple-system operator identifier. ap_username: Multiple-system operator account username. ap_password: Multiple-system operator account password. usenetrc: Use netrc for authentication instead. verbose: Print additional info to stdout. quiet: Do not print messages to stdout. no_warnings: Do not print out anything for warnings. forceurl: Force printing final URL. forcetitle: Force printing title. forceid: Force printing ID. forcethumbnail: Force printing thumbnail URL. forcedescription: Force printing description. forcefilename: Force printing final filename. forceduration: Force printing duration. forcejson: Force printing info_dict as JSON. dump_single_json: Force printing the info_dict of the whole playlist (or video) as a single JSON line. simulate: Do not download the video files. format: Video format code. See options.py for more information. outtmpl: Template for output names. outtmpl_na_placeholder: Placeholder for unavailable meta fields. restrictfilenames: Do not allow "&" and spaces in file names ignoreerrors: Do not stop on download errors. force_generic_extractor: Force downloader to use the generic extractor nooverwrites: Prevent overwriting files. playliststart: Playlist item to start at. playlistend: Playlist item to end at. playlist_items: Specific indices of playlist to download. playlistreverse: Download playlist items in reverse order. playlistrandom: Download playlist items in random order. matchtitle: Download only matching titles. rejecttitle: Reject downloads for matching titles. logger: Log messages to a logging.Logger instance. logtostderr: Log messages to stderr instead of stdout. writedescription: Write the video description to a .description file writeinfojson: Write the video description to a .info.json file writeannotations: Write the video annotations to a .annotations.xml file writethumbnail: Write the thumbnail image to a file write_all_thumbnails: Write all thumbnail formats to files writesubtitles: Write the video subtitles to a file writeautomaticsub: Write the automatically generated subtitles to a file allsubtitles: Downloads all the subtitles of the video (requires writesubtitles or writeautomaticsub) listsubtitles: Lists all available subtitles for the video subtitlesformat: The format code for subtitles subtitleslangs: List of languages of the subtitles to download keepvideo: Keep the video file after post-processing daterange: A DateRange object, download only if the upload_date is in the range. skip_download: Skip the actual download of the video file cachedir: Location of the cache files in the filesystem. False to disable filesystem cache. noplaylist: Download single video instead of a playlist if in doubt. age_limit: An integer representing the user's age in years. Unsuitable videos for the given age are skipped. min_views: An integer representing the minimum view count the video must have in order to not be skipped. Videos without view count information are always downloaded. None for no limit. max_views: An integer representing the maximum view count. Videos that are more popular than that are not downloaded. Videos without view count information are always downloaded. None for no limit. download_archive: File name of a file where all downloads are recorded. Videos already present in the file are not downloaded again. cookiefile: File name where cookies should be read from and dumped to. nocheckcertificate:Do not verify SSL certificates prefer_insecure: Use HTTP instead of HTTPS to retrieve information. At the moment, this is only supported by YouTube. proxy: URL of the proxy server to use geo_verification_proxy: URL of the proxy to use for IP address verification on geo-restricted sites. socket_timeout: Time to wait for unresponsive hosts, in seconds bidi_workaround: Work around buggy terminals without bidirectional text support, using fridibi debug_printtraffic:Print out sent and received HTTP traffic include_ads: Download ads as well default_search: Prepend this string if an input url is not valid. 'auto' for elaborate guessing encoding: Use this encoding instead of the system-specified. extract_flat: Do not resolve URLs, return the immediate result. Pass in 'in_playlist' to only show this behavior for playlist items. postprocessors: A list of dictionaries, each with an entry * key: The name of the postprocessor. See youtube_dl/postprocessor/__init__.py for a list. as well as any further keyword arguments for the postprocessor. progress_hooks: A list of functions that get called on download progress, with a dictionary with the entries * status: One of "downloading", "error", or "finished". Check this first and ignore unknown values. If status is one of "downloading", or "finished", the following properties may also be present: * filename: The final filename (always present) * tmpfilename: The filename we're currently writing to * downloaded_bytes: Bytes on disk * total_bytes: Size of the whole file, None if unknown * total_bytes_estimate: Guess of the eventual file size, None if unavailable. * elapsed: The number of seconds since download started. * eta: The estimated time in seconds, None if unknown * speed: The download speed in bytes/second, None if unknown * fragment_index: The counter of the currently downloaded video fragment. * fragment_count: The number of fragments (= individual files that will be merged) Progress hooks are guaranteed to be called at least once (with status "finished") if the download is successful. merge_output_format: Extension to use when merging formats. fixup: Automatically correct known faults of the file. One of: - "never": do nothing - "warn": only emit a warning - "detect_or_warn": check whether we can do anything about it, warn otherwise (default) source_address: Client-side IP address to bind to. call_home: Boolean, true iff we are allowed to contact the youtube-dl servers for debugging. sleep_interval: Number of seconds to sleep before each download when used alone or a lower bound of a range for randomized sleep before each download (minimum possible number of seconds to sleep) when used along with max_sleep_interval. max_sleep_interval:Upper bound of a range for randomized sleep before each download (maximum possible number of seconds to sleep). Must only be used along with sleep_interval. Actual sleep time will be a random float from range [sleep_interval; max_sleep_interval]. listformats: Print an overview of available video formats and exit. list_thumbnails: Print a table of all thumbnails and exit. match_filter: A function that gets called with the info_dict of every video. If it returns a message, the video is ignored. If it returns None, the video is downloaded. match_filter_func in utils.py is one example for this. no_color: Do not emit color codes in output. geo_bypass: Bypass geographic restriction via faking X-Forwarded-For HTTP header geo_bypass_country: Two-letter ISO 3166-2 country code that will be used for explicit geographic restriction bypassing via faking X-Forwarded-For HTTP header geo_bypass_ip_block: IP range in CIDR notation that will be used similarly to geo_bypass_country The following options determine which downloader is picked: external_downloader: Executable of the external downloader to call. None or unset for standard (built-in) downloader. hls_prefer_native: Use the native HLS downloader instead of ffmpeg/avconv if True, otherwise use ffmpeg/avconv if False, otherwise use downloader suggested by extractor if None. The following parameters are not used by YoutubeDL itself, they are used by the downloader (see youtube_dl/downloader/common.py): nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test, noresizebuffer, retries, continuedl, noprogress, consoletitle, xattr_set_filesize, external_downloader_args, hls_use_mpegts, http_chunk_size. The following options are used by the post processors: prefer_ffmpeg: If False, use avconv instead of ffmpeg if both are available, otherwise prefer ffmpeg. ffmpeg_location: Location of the ffmpeg/avconv binary; either the path to the binary or its containing directory. postprocessor_args: A list of additional command-line arguments for the postprocessor. The following options are used by the Youtube extractor: youtube_include_dash_manifest: If True (default), DASH manifests and related data will be downloaded and processed by extractor. You can reduce network I/O by disabling it if you don't care about DASH. """ _NUMERIC_FIELDS = set(( 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx', 'timestamp', 'upload_year', 'upload_month', 'upload_day', 'available_at', 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count', 'average_rating', 'comment_count', 'age_limit', 'start_time', 'end_time', 'chapter_number', 'season_number', 'episode_number', 'track_number', 'disc_number', 'release_year', 'playlist_index', )) params = None _ies = [] _pps = [] _download_retcode = None _num_downloads = None _playlist_level = 0 _playlist_urls = set() _screen_file = None def __init__(self, params=None, auto_init=True): """Create a FileDownloader object with the given options.""" if params is None: params = {} self._ies = [] self._ies_instances = {} self._pps = [] self._progress_hooks = [] self._download_retcode = 0 self._num_downloads = 0 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)] self._err_file = sys.stderr self.params = { # Default parameters 'nocheckcertificate': False, } self.params.update(params) self.cache = Cache(self) self._header_cookies = [] self._load_cookies_from_headers(self.params.get('http_headers')) def check_deprecated(param, option, suggestion): if self.params.get(param) is not None: self.report_warning( '%s is deprecated. Use %s instead.' % (option, suggestion)) return True return False if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'): if self.params.get('geo_verification_proxy') is None: self.params['geo_verification_proxy'] = self.params['cn_verification_proxy'] check_deprecated('autonumber_size', '--autonumber-size', 'output template with %(autonumber)0Nd, where N in the number of digits') check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"') check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"') if params.get('bidi_workaround', False): try: import pty master, slave = pty.openpty() width = compat_get_terminal_size().columns if width is None: width_args = [] else: width_args = ['-w', str(width)] sp_kwargs = dict( stdin=subprocess.PIPE, stdout=slave, stderr=self._err_file) try: self._output_process = subprocess.Popen( ['bidiv'] + width_args, **sp_kwargs ) except OSError: self._output_process = subprocess.Popen( ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs) self._output_channel = os.fdopen(master, 'rb') except OSError as ose: if ose.errno == errno.ENOENT: self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.') else: raise if (sys.platform != 'win32' and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968'] and not params.get('restrictfilenames', False)): # Unicode filesystem API will throw errors (#1474, #13027) self.report_warning( 'Assuming --restrict-filenames since file system encoding ' 'cannot encode all characters. ' 'Set the LC_ALL environment variable to fix this.') self.params['restrictfilenames'] = True if isinstance(params.get('outtmpl'), bytes): self.report_warning( 'Parameter outtmpl is bytes, but should be a unicode string. ' 'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.') self._setup_opener() if auto_init: self.print_debug_header() self.add_default_info_extractors() for pp_def_raw in self.params.get('postprocessors', []): pp_class = get_postprocessor(pp_def_raw['key']) pp_def = dict(pp_def_raw) del pp_def['key'] pp = pp_class(self, **compat_kwargs(pp_def)) self.add_post_processor(pp) for ph in self.params.get('progress_hooks', []): self.add_progress_hook(ph) register_socks_protocols() def warn_if_short_id(self, argv): # short YouTube ID starting with dash? idxs = [ i for i, a in enumerate(argv) if re.match(r'^-[0-9A-Za-z_-]{10}$', a)] if idxs: correct_argv = ( ['youtube-dl'] + [a for i, a in enumerate(argv) if i not in idxs] + ['--'] + [argv[i] for i in idxs] ) self.report_warning( 'Long argument string detected. ' 'Use -- to separate parameters and URLs, like this:\n%s\n' % args_to_str(correct_argv)) def add_info_extractor(self, ie): """Add an InfoExtractor object to the end of the list.""" self._ies.append(ie) if not isinstance(ie, type): self._ies_instances[ie.ie_key()] = ie ie.set_downloader(self) def get_info_extractor(self, ie_key): """ Get an instance of an IE with name ie_key, it will try to get one from the _ies list, if there's no instance it will create a new one and add it to the extractor list. """ ie = self._ies_instances.get(ie_key) if ie is None: ie = get_info_extractor(ie_key)() self.add_info_extractor(ie) return ie def add_default_info_extractors(self): """ Add the InfoExtractors returned by gen_extractors to the end of the list """ for ie in gen_extractor_classes(): self.add_info_extractor(ie) def add_post_processor(self, pp): """Add a PostProcessor object to the end of the chain.""" self._pps.append(pp) pp.set_downloader(self) def add_progress_hook(self, ph): """Add the progress hook (currently only for the file downloader)""" self._progress_hooks.append(ph) def _bidi_workaround(self, message): if not hasattr(self, '_output_channel'): return message assert hasattr(self, '_output_process') assert isinstance(message, compat_str) line_count = message.count('\n') + 1 self._output_process.stdin.write((message + '\n').encode('utf-8')) self._output_process.stdin.flush() res = ''.join(self._output_channel.readline().decode('utf-8') for _ in range(line_count)) return res[:-len('\n')] def to_screen(self, message, skip_eol=False): """Print message to stdout if not in quiet mode.""" return self.to_stdout(message, skip_eol, check_quiet=True) def _write_string(self, s, out=None, only_once=False, _cache=set()): if only_once and s in _cache: return write_string(s, out=out, encoding=self.params.get('encoding')) if only_once: _cache.add(s) def to_stdout(self, message, skip_eol=False, check_quiet=False, only_once=False): """Print message to stdout if not in quiet mode.""" if self.params.get('logger'): self.params['logger'].debug(message) elif not check_quiet or not self.params.get('quiet', False): message = self._bidi_workaround(message) terminator = ['\n', ''][skip_eol] output = message + terminator self._write_string(output, self._screen_file, only_once=only_once) def to_stderr(self, message, only_once=False): """Print message to stderr.""" assert isinstance(message, compat_str) if self.params.get('logger'): self.params['logger'].error(message) else: message = self._bidi_workaround(message) output = message + '\n' self._write_string(output, self._err_file, only_once=only_once) def to_console_title(self, message): if not self.params.get('consoletitle', False): return if compat_os_name == 'nt': if ctypes.windll.kernel32.GetConsoleWindow(): # c_wchar_p() might not be necessary if `message` is # already of type unicode() ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message)) elif 'TERM' in os.environ: self._write_string('\033]0;%s\007' % message, self._screen_file) def save_console_title(self): if not self.params.get('consoletitle', False): return if self.params.get('simulate', False): return if compat_os_name != 'nt' and 'TERM' in os.environ: # Save the title on stack self._write_string('\033[22;0t', self._screen_file) def restore_console_title(self): if not self.params.get('consoletitle', False): return if self.params.get('simulate', False): return if compat_os_name != 'nt' and 'TERM' in os.environ: # Restore the title from stack self._write_string('\033[23;0t', self._screen_file) def __enter__(self): self.save_console_title() return self def __exit__(self, *args): self.restore_console_title() if self.params.get('cookiefile') is not None: self.cookiejar.save(ignore_discard=True, ignore_expires=True) def trouble(self, *args, **kwargs): """Determine action to take when a download problem appears. Depending on if the downloader has been configured to ignore download errors or not, this method may throw an exception or not when errors are found, after printing the message. tb, if given, is additional traceback information. """ # message=None, tb=None, is_error=True message = args[0] if len(args) > 0 else kwargs.get('message', None) tb = args[1] if len(args) > 1 else kwargs.get('tb', None) is_error = args[2] if len(args) > 2 else kwargs.get('is_error', True) if message is not None: self.to_stderr(message) if self.params.get('verbose'): if tb is None: if sys.exc_info()[0]: # if .trouble has been called from an except block tb = '' if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info)) tb += encode_compat_str(traceback.format_exc()) else: tb_data = traceback.format_list(traceback.extract_stack()) tb = ''.join(tb_data) if tb: self.to_stderr(tb) if not is_error: return if not self.params.get('ignoreerrors', False): if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: exc_info = sys.exc_info()[1].exc_info else: exc_info = sys.exc_info() raise DownloadError(message, exc_info) self._download_retcode = 1 def report_warning(self, message, only_once=False): ''' Print the message to stderr, it will be prefixed with 'WARNING:' If stderr is a tty file the 'WARNING:' will be colored ''' if self.params.get('logger') is not None: self.params['logger'].warning(message) else: if self.params.get('no_warnings'): return if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt': _msg_header = '\033[0;33mWARNING:\033[0m' else: _msg_header = 'WARNING:' warning_message = '%s %s' % (_msg_header, message) self.to_stderr(warning_message, only_once=only_once) def report_error(self, message, *args, **kwargs): ''' Do the same as trouble, but prefixes the message with 'ERROR:', colored in red if stderr is a tty file. ''' if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt': _msg_header = '\033[0;31mERROR:\033[0m' else: _msg_header = 'ERROR:' kwargs['message'] = '%s %s' % (_msg_header, message) self.trouble(*args, **kwargs) def write_debug(self, message, only_once=False): '''Log debug message or Print message to stderr''' if not self.params.get('verbose', False): return message = '[debug] {0}'.format(message) if self.params.get('logger'): self.params['logger'].debug(message) else: self.to_stderr(message, only_once) def report_unscoped_cookies(self, *args, **kwargs): # message=None, tb=False, is_error=False if len(args) <= 2: kwargs.setdefault('is_error', False) if len(args) <= 0: kwargs.setdefault( 'message', 'Unscoped cookies are not allowed: please specify some sort of scoping') self.report_error(*args, **kwargs) def report_file_already_downloaded(self, file_name): """Report file has already been fully downloaded.""" try: self.to_screen('[download] %s has already been downloaded' % file_name) except UnicodeEncodeError: self.to_screen('[download] The file has already been downloaded') def prepare_filename(self, info_dict): """Generate the output filename.""" try: template_dict = dict(info_dict) template_dict['epoch'] = int(time.time()) autonumber_size = self.params.get('autonumber_size') if autonumber_size is None: autonumber_size = 5 template_dict['autonumber'] = self.params.get('autonumber_start', 1) - 1 + self._num_downloads if template_dict.get('resolution') is None: if template_dict.get('width') and template_dict.get('height'): template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height']) elif template_dict.get('height'): template_dict['resolution'] = '%sp' % template_dict['height'] elif template_dict.get('width'): template_dict['resolution'] = '%dx?' % template_dict['width'] sanitize = lambda k, v: sanitize_filename( compat_str(v), restricted=self.params.get('restrictfilenames'), is_id=(k == 'id' or k.endswith('_id'))) template_dict = dict((k, v if isinstance(v, compat_numeric_types) else sanitize(k, v)) for k, v in template_dict.items() if v is not None and not isinstance(v, (list, tuple, dict))) template_dict = collections.defaultdict(lambda: self.params.get('outtmpl_na_placeholder', 'NA'), template_dict) outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL) # For fields playlist_index and autonumber convert all occurrences # of %(field)s to %(field)0Nd for backward compatibility field_size_compat_map = { 'playlist_index': len(str(template_dict['n_entries'])), 'autonumber': autonumber_size, } FIELD_SIZE_COMPAT_RE = r'(?<!%)%\((?P<field>autonumber|playlist_index)\)s' mobj = re.search(FIELD_SIZE_COMPAT_RE, outtmpl) if mobj: outtmpl = re.sub( FIELD_SIZE_COMPAT_RE, r'%%(\1)0%dd' % field_size_compat_map[mobj.group('field')], outtmpl) # Missing numeric fields used together with integer presentation types # in format specification will break the argument substitution since
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
true
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/__main__.py
youtube_dl/__main__.py
#!/usr/bin/env python from __future__ import unicode_literals # Execute with # $ python youtube_dl/__main__.py (2.6+) # $ python -m youtube_dl (2.7+) import sys if __package__ is None and not hasattr(sys, 'frozen'): # direct call of __main__.py import os.path path = os.path.realpath(os.path.abspath(__file__)) sys.path.insert(0, os.path.dirname(os.path.dirname(path))) import youtube_dl if __name__ == '__main__': youtube_dl.main()
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/update.py
youtube_dl/update.py
from __future__ import unicode_literals import json import traceback import hashlib import os import subprocess import sys from zipimport import zipimporter from .compat import ( compat_open as open, compat_realpath, ) from .utils import encode_compat_str from .version import __version__ def rsa_verify(message, signature, key): from hashlib import sha256 assert isinstance(message, bytes) byte_size = (len(bin(key[0])) - 2 + 8 - 1) // 8 signature = ('%x' % pow(int(signature, 16), key[1], key[0])).encode() signature = (byte_size * 2 - len(signature)) * b'0' + signature asn1 = b'3031300d060960864801650304020105000420' asn1 += sha256(message).hexdigest().encode() if byte_size < len(asn1) // 2 + 11: return False expected = b'0001' + (byte_size - len(asn1) // 2 - 3) * b'ff' + b'00' + asn1 return expected == signature def update_self(to_screen, verbose, opener): """Update the program file with the latest version from the repository""" UPDATE_URL = 'https://yt-dl.org/update/' VERSION_URL = UPDATE_URL + 'LATEST_VERSION' JSON_URL = UPDATE_URL + 'versions.json' UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537) if not isinstance(globals().get('__loader__'), zipimporter) and not hasattr(sys, 'frozen'): to_screen('It looks like you installed youtube-dl with a package manager, pip, setup.py or a tarball. Please use that to update.') return # Check if there is a new version try: newversion = opener.open(VERSION_URL).read().decode('utf-8').strip() except Exception: if verbose: to_screen(encode_compat_str(traceback.format_exc())) to_screen('ERROR: can\'t find the current version. Please try again later.') return if newversion == __version__: to_screen('youtube-dl is up-to-date (' + __version__ + ')') return # Download and check versions info try: versions_info = opener.open(JSON_URL).read().decode('utf-8') versions_info = json.loads(versions_info) except Exception: if verbose: to_screen(encode_compat_str(traceback.format_exc())) to_screen('ERROR: can\'t obtain versions info. Please try again later.') return if 'signature' not in versions_info: to_screen('ERROR: the versions file is not signed or corrupted. Aborting.') return signature = versions_info['signature'] del versions_info['signature'] if not rsa_verify(json.dumps(versions_info, sort_keys=True).encode('utf-8'), signature, UPDATES_RSA_KEY): to_screen('ERROR: the versions file signature is invalid. Aborting.') return version_id = versions_info['latest'] def version_tuple(version_str): return tuple(map(int, version_str.split('.'))) if version_tuple(__version__) >= version_tuple(version_id): to_screen('youtube-dl is up to date (%s)' % __version__) return to_screen('Updating to version ' + version_id + ' ...') version = versions_info['versions'][version_id] print_notes(to_screen, versions_info['versions']) # sys.executable is set to the full pathname of the exe-file for py2exe # though symlinks are not followed so that we need to do this manually # with help of realpath filename = compat_realpath(sys.executable if hasattr(sys, 'frozen') else sys.argv[0]) if not os.access(filename, os.W_OK): to_screen('ERROR: no write permissions on %s' % filename) return # Py2EXE if hasattr(sys, 'frozen'): exe = filename directory = os.path.dirname(exe) if not os.access(directory, os.W_OK): to_screen('ERROR: no write permissions on %s' % directory) return try: urlh = opener.open(version['exe'][0]) newcontent = urlh.read() urlh.close() except (IOError, OSError): if verbose: to_screen(encode_compat_str(traceback.format_exc())) to_screen('ERROR: unable to download latest version') return newcontent_hash = hashlib.sha256(newcontent).hexdigest() if newcontent_hash != version['exe'][1]: to_screen('ERROR: the downloaded file hash does not match. Aborting.') return try: with open(exe + '.new', 'wb') as outf: outf.write(newcontent) except (IOError, OSError): if verbose: to_screen(encode_compat_str(traceback.format_exc())) to_screen('ERROR: unable to write the new version') return try: bat = os.path.join(directory, 'youtube-dl-updater.bat') with open(bat, 'w') as batfile: batfile.write(''' @echo off echo Waiting for file handle to be closed ... ping 127.0.0.1 -n 5 -w 1000 > NUL move /Y "%s.new" "%s" > NUL echo Updated youtube-dl to version %s. start /b "" cmd /c del "%%~f0"&exit /b" \n''' % (exe, exe, version_id)) subprocess.Popen([bat]) # Continues to run in the background return # Do not show premature success messages except (IOError, OSError): if verbose: to_screen(encode_compat_str(traceback.format_exc())) to_screen('ERROR: unable to overwrite current version') return # Zip unix package elif isinstance(globals().get('__loader__'), zipimporter): try: urlh = opener.open(version['bin'][0]) newcontent = urlh.read() urlh.close() except (IOError, OSError): if verbose: to_screen(encode_compat_str(traceback.format_exc())) to_screen('ERROR: unable to download latest version') return newcontent_hash = hashlib.sha256(newcontent).hexdigest() if newcontent_hash != version['bin'][1]: to_screen('ERROR: the downloaded file hash does not match. Aborting.') return try: with open(filename, 'wb') as outf: outf.write(newcontent) except (IOError, OSError): if verbose: to_screen(encode_compat_str(traceback.format_exc())) to_screen('ERROR: unable to overwrite current version') return to_screen('Updated youtube-dl. Restart youtube-dl to use the new version.') def get_notes(versions, fromVersion): notes = [] for v, vdata in sorted(versions.items()): if v > fromVersion: notes.extend(vdata.get('notes', [])) return notes def print_notes(to_screen, versions, fromVersion=__version__): notes = get_notes(versions, fromVersion) if notes: to_screen('PLEASE NOTE:') for note in notes: to_screen(note)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/socks.py
youtube_dl/socks.py
# Public Domain SOCKS proxy protocol implementation # Adapted from https://gist.github.com/bluec0re/cafd3764412967417fd3 from __future__ import unicode_literals # References: # SOCKS4 protocol http://www.openssh.com/txt/socks4.protocol # SOCKS4A protocol http://www.openssh.com/txt/socks4a.protocol # SOCKS5 protocol https://tools.ietf.org/html/rfc1928 # SOCKS5 username/password authentication https://tools.ietf.org/html/rfc1929 import collections import socket from .compat import ( compat_ord, compat_struct_pack, compat_struct_unpack, ) __author__ = 'Timo Schmid <coding@timoschmid.de>' SOCKS4_VERSION = 4 SOCKS4_REPLY_VERSION = 0x00 # Excerpt from SOCKS4A protocol: # if the client cannot resolve the destination host's domain name to find its # IP address, it should set the first three bytes of DSTIP to NULL and the last # byte to a non-zero value. SOCKS4_DEFAULT_DSTIP = compat_struct_pack('!BBBB', 0, 0, 0, 0xFF) SOCKS5_VERSION = 5 SOCKS5_USER_AUTH_VERSION = 0x01 SOCKS5_USER_AUTH_SUCCESS = 0x00 class Socks4Command(object): CMD_CONNECT = 0x01 CMD_BIND = 0x02 class Socks5Command(Socks4Command): CMD_UDP_ASSOCIATE = 0x03 class Socks5Auth(object): AUTH_NONE = 0x00 AUTH_GSSAPI = 0x01 AUTH_USER_PASS = 0x02 AUTH_NO_ACCEPTABLE = 0xFF # For server response class Socks5AddressType(object): ATYP_IPV4 = 0x01 ATYP_DOMAINNAME = 0x03 ATYP_IPV6 = 0x04 class ProxyError(socket.error): ERR_SUCCESS = 0x00 def __init__(self, code=None, msg=None): if code is not None and msg is None: msg = self.CODES.get(code) or 'unknown error' super(ProxyError, self).__init__(code, msg) class InvalidVersionError(ProxyError): def __init__(self, expected_version, got_version): msg = ('Invalid response version from server. Expected {0:02x} got ' '{1:02x}'.format(expected_version, got_version)) super(InvalidVersionError, self).__init__(0, msg) class Socks4Error(ProxyError): ERR_SUCCESS = 90 CODES = { 91: 'request rejected or failed', 92: 'request rejected because SOCKS server cannot connect to identd on the client', 93: 'request rejected because the client program and identd report different user-ids' } class Socks5Error(ProxyError): ERR_GENERAL_FAILURE = 0x01 CODES = { 0x01: 'general SOCKS server failure', 0x02: 'connection not allowed by ruleset', 0x03: 'Network unreachable', 0x04: 'Host unreachable', 0x05: 'Connection refused', 0x06: 'TTL expired', 0x07: 'Command not supported', 0x08: 'Address type not supported', 0xFE: 'unknown username or invalid password', 0xFF: 'all offered authentication methods were rejected' } class ProxyType(object): SOCKS4 = 0 SOCKS4A = 1 SOCKS5 = 2 Proxy = collections.namedtuple('Proxy', ( 'type', 'host', 'port', 'username', 'password', 'remote_dns')) class sockssocket(socket.socket): def __init__(self, *args, **kwargs): self._proxy = None super(sockssocket, self).__init__(*args, **kwargs) def setproxy(self, proxytype, addr, port, rdns=True, username=None, password=None): assert proxytype in (ProxyType.SOCKS4, ProxyType.SOCKS4A, ProxyType.SOCKS5) self._proxy = Proxy(proxytype, addr, port, username, password, rdns) def recvall(self, cnt): data = b'' while len(data) < cnt: cur = self.recv(cnt - len(data)) if not cur: raise EOFError('{0} bytes missing'.format(cnt - len(data))) data += cur return data def _recv_bytes(self, cnt): data = self.recvall(cnt) return compat_struct_unpack('!{0}B'.format(cnt), data) @staticmethod def _len_and_data(data): return compat_struct_pack('!B', len(data)) + data def _check_response_version(self, expected_version, got_version): if got_version != expected_version: self.close() raise InvalidVersionError(expected_version, got_version) def _resolve_address(self, destaddr, default, use_remote_dns): try: return socket.inet_aton(destaddr) except socket.error: if use_remote_dns and self._proxy.remote_dns: return default else: return socket.inet_aton(socket.gethostbyname(destaddr)) def _setup_socks4(self, address, is_4a=False): destaddr, port = address ipaddr = self._resolve_address(destaddr, SOCKS4_DEFAULT_DSTIP, use_remote_dns=is_4a) packet = compat_struct_pack('!BBH', SOCKS4_VERSION, Socks4Command.CMD_CONNECT, port) + ipaddr username = (self._proxy.username or '').encode('utf-8') packet += username + b'\x00' if is_4a and self._proxy.remote_dns: packet += destaddr.encode('utf-8') + b'\x00' self.sendall(packet) version, resp_code, dstport, dsthost = compat_struct_unpack('!BBHI', self.recvall(8)) self._check_response_version(SOCKS4_REPLY_VERSION, version) if resp_code != Socks4Error.ERR_SUCCESS: self.close() raise Socks4Error(resp_code) return (dsthost, dstport) def _setup_socks4a(self, address): self._setup_socks4(address, is_4a=True) def _socks5_auth(self): packet = compat_struct_pack('!B', SOCKS5_VERSION) auth_methods = [Socks5Auth.AUTH_NONE] if self._proxy.username and self._proxy.password: auth_methods.append(Socks5Auth.AUTH_USER_PASS) packet += compat_struct_pack('!B', len(auth_methods)) packet += compat_struct_pack('!{0}B'.format(len(auth_methods)), *auth_methods) self.sendall(packet) version, method = self._recv_bytes(2) self._check_response_version(SOCKS5_VERSION, version) if method == Socks5Auth.AUTH_NO_ACCEPTABLE or ( method == Socks5Auth.AUTH_USER_PASS and (not self._proxy.username or not self._proxy.password)): self.close() raise Socks5Error(Socks5Auth.AUTH_NO_ACCEPTABLE) if method == Socks5Auth.AUTH_USER_PASS: username = self._proxy.username.encode('utf-8') password = self._proxy.password.encode('utf-8') packet = compat_struct_pack('!B', SOCKS5_USER_AUTH_VERSION) packet += self._len_and_data(username) + self._len_and_data(password) self.sendall(packet) version, status = self._recv_bytes(2) self._check_response_version(SOCKS5_USER_AUTH_VERSION, version) if status != SOCKS5_USER_AUTH_SUCCESS: self.close() raise Socks5Error(Socks5Error.ERR_GENERAL_FAILURE) def _setup_socks5(self, address): destaddr, port = address ipaddr = self._resolve_address(destaddr, None, use_remote_dns=True) self._socks5_auth() reserved = 0 packet = compat_struct_pack('!BBB', SOCKS5_VERSION, Socks5Command.CMD_CONNECT, reserved) if ipaddr is None: destaddr = destaddr.encode('utf-8') packet += compat_struct_pack('!B', Socks5AddressType.ATYP_DOMAINNAME) packet += self._len_and_data(destaddr) else: packet += compat_struct_pack('!B', Socks5AddressType.ATYP_IPV4) + ipaddr packet += compat_struct_pack('!H', port) self.sendall(packet) version, status, reserved, atype = self._recv_bytes(4) self._check_response_version(SOCKS5_VERSION, version) if status != Socks5Error.ERR_SUCCESS: self.close() raise Socks5Error(status) if atype == Socks5AddressType.ATYP_IPV4: destaddr = self.recvall(4) elif atype == Socks5AddressType.ATYP_DOMAINNAME: alen = compat_ord(self.recv(1)) destaddr = self.recvall(alen) elif atype == Socks5AddressType.ATYP_IPV6: destaddr = self.recvall(16) destport = compat_struct_unpack('!H', self.recvall(2))[0] return (destaddr, destport) def _make_proxy(self, connect_func, address): if not self._proxy: return connect_func(self, address) result = connect_func(self, (self._proxy.host, self._proxy.port)) if result != 0 and result is not None: return result setup_funcs = { ProxyType.SOCKS4: self._setup_socks4, ProxyType.SOCKS4A: self._setup_socks4a, ProxyType.SOCKS5: self._setup_socks5, } setup_funcs[self._proxy.type](address) return result def connect(self, address): self._make_proxy(socket.socket.connect, address) def connect_ex(self, address): return self._make_proxy(socket.socket.connect_ex, address)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/utils.py
youtube_dl/utils.py
#!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals import base64 import binascii import calendar import codecs import collections import contextlib import ctypes import datetime import email.utils import email.header import errno import functools import inspect import io import itertools import json import locale import math import operator import os import platform import random import re import socket import ssl import subprocess import sys import tempfile import time import traceback import unicodedata import xml.etree.ElementTree import zlib from .compat import ( compat_HTMLParseError, compat_HTMLParser, compat_basestring, compat_brotli as brotli, compat_casefold, compat_chr, compat_collections_abc, compat_contextlib_suppress, compat_cookiejar, compat_ctypes_WINFUNCTYPE, compat_datetime_timedelta_total_seconds, compat_etree_Element, compat_etree_fromstring, compat_etree_iterfind, compat_expanduser, compat_filter as filter, compat_filter_fns, compat_html_entities, compat_html_entities_html5, compat_http_client, compat_http_cookies, compat_integer_types, compat_kwargs, compat_ncompress as ncompress, compat_os_name, compat_re_Match, compat_re_Pattern, compat_shlex_quote, compat_str, compat_struct_pack, compat_struct_unpack, compat_urllib_error, compat_urllib_HTTPError, compat_urllib_parse, compat_urllib_parse_parse_qs as compat_parse_qs, compat_urllib_parse_urlencode, compat_urllib_parse_urlparse, compat_urllib_parse_unquote_plus, compat_urllib_request, compat_xpath, ) from .socks import ( ProxyType, sockssocket, ) def register_socks_protocols(): # "Register" SOCKS protocols # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904 # URLs with protocols not in urlparse.uses_netloc are not handled correctly for scheme in ('socks', 'socks4', 'socks4a', 'socks5'): if scheme not in compat_urllib_parse.uses_netloc: compat_urllib_parse.uses_netloc.append(scheme) # Unfavoured alias compiled_regex_type = compat_re_Pattern def random_user_agent(): _USER_AGENT_TPL = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/%s Safari/537.36' _CHROME_VERSIONS = ( '74.0.3729.129', '76.0.3780.3', '76.0.3780.2', '74.0.3729.128', '76.0.3780.1', '76.0.3780.0', '75.0.3770.15', '74.0.3729.127', '74.0.3729.126', '76.0.3779.1', '76.0.3779.0', '75.0.3770.14', '74.0.3729.125', '76.0.3778.1', '76.0.3778.0', '75.0.3770.13', '74.0.3729.124', '74.0.3729.123', '73.0.3683.121', '76.0.3777.1', '76.0.3777.0', '75.0.3770.12', '74.0.3729.122', '76.0.3776.4', '75.0.3770.11', '74.0.3729.121', '76.0.3776.3', '76.0.3776.2', '73.0.3683.120', '74.0.3729.120', '74.0.3729.119', '74.0.3729.118', '76.0.3776.1', '76.0.3776.0', '76.0.3775.5', '75.0.3770.10', '74.0.3729.117', '76.0.3775.4', '76.0.3775.3', '74.0.3729.116', '75.0.3770.9', '76.0.3775.2', '76.0.3775.1', '76.0.3775.0', '75.0.3770.8', '74.0.3729.115', '74.0.3729.114', '76.0.3774.1', '76.0.3774.0', '75.0.3770.7', '74.0.3729.113', '74.0.3729.112', '74.0.3729.111', '76.0.3773.1', '76.0.3773.0', '75.0.3770.6', '74.0.3729.110', '74.0.3729.109', '76.0.3772.1', '76.0.3772.0', '75.0.3770.5', '74.0.3729.108', '74.0.3729.107', '76.0.3771.1', '76.0.3771.0', '75.0.3770.4', '74.0.3729.106', '74.0.3729.105', '75.0.3770.3', '74.0.3729.104', '74.0.3729.103', '74.0.3729.102', '75.0.3770.2', '74.0.3729.101', '75.0.3770.1', '75.0.3770.0', '74.0.3729.100', '75.0.3769.5', '75.0.3769.4', '74.0.3729.99', '75.0.3769.3', '75.0.3769.2', '75.0.3768.6', '74.0.3729.98', '75.0.3769.1', '75.0.3769.0', '74.0.3729.97', '73.0.3683.119', '73.0.3683.118', '74.0.3729.96', '75.0.3768.5', '75.0.3768.4', '75.0.3768.3', '75.0.3768.2', '74.0.3729.95', '74.0.3729.94', '75.0.3768.1', '75.0.3768.0', '74.0.3729.93', '74.0.3729.92', '73.0.3683.117', '74.0.3729.91', '75.0.3766.3', '74.0.3729.90', '75.0.3767.2', '75.0.3767.1', '75.0.3767.0', '74.0.3729.89', '73.0.3683.116', '75.0.3766.2', '74.0.3729.88', '75.0.3766.1', '75.0.3766.0', '74.0.3729.87', '73.0.3683.115', '74.0.3729.86', '75.0.3765.1', '75.0.3765.0', '74.0.3729.85', '73.0.3683.114', '74.0.3729.84', '75.0.3764.1', '75.0.3764.0', '74.0.3729.83', '73.0.3683.113', '75.0.3763.2', '75.0.3761.4', '74.0.3729.82', '75.0.3763.1', '75.0.3763.0', '74.0.3729.81', '73.0.3683.112', '75.0.3762.1', '75.0.3762.0', '74.0.3729.80', '75.0.3761.3', '74.0.3729.79', '73.0.3683.111', '75.0.3761.2', '74.0.3729.78', '74.0.3729.77', '75.0.3761.1', '75.0.3761.0', '73.0.3683.110', '74.0.3729.76', '74.0.3729.75', '75.0.3760.0', '74.0.3729.74', '75.0.3759.8', '75.0.3759.7', '75.0.3759.6', '74.0.3729.73', '75.0.3759.5', '74.0.3729.72', '73.0.3683.109', '75.0.3759.4', '75.0.3759.3', '74.0.3729.71', '75.0.3759.2', '74.0.3729.70', '73.0.3683.108', '74.0.3729.69', '75.0.3759.1', '75.0.3759.0', '74.0.3729.68', '73.0.3683.107', '74.0.3729.67', '75.0.3758.1', '75.0.3758.0', '74.0.3729.66', '73.0.3683.106', '74.0.3729.65', '75.0.3757.1', '75.0.3757.0', '74.0.3729.64', '73.0.3683.105', '74.0.3729.63', '75.0.3756.1', '75.0.3756.0', '74.0.3729.62', '73.0.3683.104', '75.0.3755.3', '75.0.3755.2', '73.0.3683.103', '75.0.3755.1', '75.0.3755.0', '74.0.3729.61', '73.0.3683.102', '74.0.3729.60', '75.0.3754.2', '74.0.3729.59', '75.0.3753.4', '74.0.3729.58', '75.0.3754.1', '75.0.3754.0', '74.0.3729.57', '73.0.3683.101', '75.0.3753.3', '75.0.3752.2', '75.0.3753.2', '74.0.3729.56', '75.0.3753.1', '75.0.3753.0', '74.0.3729.55', '73.0.3683.100', '74.0.3729.54', '75.0.3752.1', '75.0.3752.0', '74.0.3729.53', '73.0.3683.99', '74.0.3729.52', '75.0.3751.1', '75.0.3751.0', '74.0.3729.51', '73.0.3683.98', '74.0.3729.50', '75.0.3750.0', '74.0.3729.49', '74.0.3729.48', '74.0.3729.47', '75.0.3749.3', '74.0.3729.46', '73.0.3683.97', '75.0.3749.2', '74.0.3729.45', '75.0.3749.1', '75.0.3749.0', '74.0.3729.44', '73.0.3683.96', '74.0.3729.43', '74.0.3729.42', '75.0.3748.1', '75.0.3748.0', '74.0.3729.41', '75.0.3747.1', '73.0.3683.95', '75.0.3746.4', '74.0.3729.40', '74.0.3729.39', '75.0.3747.0', '75.0.3746.3', '75.0.3746.2', '74.0.3729.38', '75.0.3746.1', '75.0.3746.0', '74.0.3729.37', '73.0.3683.94', '75.0.3745.5', '75.0.3745.4', '75.0.3745.3', '75.0.3745.2', '74.0.3729.36', '75.0.3745.1', '75.0.3745.0', '75.0.3744.2', '74.0.3729.35', '73.0.3683.93', '74.0.3729.34', '75.0.3744.1', '75.0.3744.0', '74.0.3729.33', '73.0.3683.92', '74.0.3729.32', '74.0.3729.31', '73.0.3683.91', '75.0.3741.2', '75.0.3740.5', '74.0.3729.30', '75.0.3741.1', '75.0.3741.0', '74.0.3729.29', '75.0.3740.4', '73.0.3683.90', '74.0.3729.28', '75.0.3740.3', '73.0.3683.89', '75.0.3740.2', '74.0.3729.27', '75.0.3740.1', '75.0.3740.0', '74.0.3729.26', '73.0.3683.88', '73.0.3683.87', '74.0.3729.25', '75.0.3739.1', '75.0.3739.0', '73.0.3683.86', '74.0.3729.24', '73.0.3683.85', '75.0.3738.4', '75.0.3738.3', '75.0.3738.2', '75.0.3738.1', '75.0.3738.0', '74.0.3729.23', '73.0.3683.84', '74.0.3729.22', '74.0.3729.21', '75.0.3737.1', '75.0.3737.0', '74.0.3729.20', '73.0.3683.83', '74.0.3729.19', '75.0.3736.1', '75.0.3736.0', '74.0.3729.18', '73.0.3683.82', '74.0.3729.17', '75.0.3735.1', '75.0.3735.0', '74.0.3729.16', '73.0.3683.81', '75.0.3734.1', '75.0.3734.0', '74.0.3729.15', '73.0.3683.80', '74.0.3729.14', '75.0.3733.1', '75.0.3733.0', '75.0.3732.1', '74.0.3729.13', '74.0.3729.12', '73.0.3683.79', '74.0.3729.11', '75.0.3732.0', '74.0.3729.10', '73.0.3683.78', '74.0.3729.9', '74.0.3729.8', '74.0.3729.7', '75.0.3731.3', '75.0.3731.2', '75.0.3731.0', '74.0.3729.6', '73.0.3683.77', '73.0.3683.76', '75.0.3730.5', '75.0.3730.4', '73.0.3683.75', '74.0.3729.5', '73.0.3683.74', '75.0.3730.3', '75.0.3730.2', '74.0.3729.4', '73.0.3683.73', '73.0.3683.72', '75.0.3730.1', '75.0.3730.0', '74.0.3729.3', '73.0.3683.71', '74.0.3729.2', '73.0.3683.70', '74.0.3729.1', '74.0.3729.0', '74.0.3726.4', '73.0.3683.69', '74.0.3726.3', '74.0.3728.0', '74.0.3726.2', '73.0.3683.68', '74.0.3726.1', '74.0.3726.0', '74.0.3725.4', '73.0.3683.67', '73.0.3683.66', '74.0.3725.3', '74.0.3725.2', '74.0.3725.1', '74.0.3724.8', '74.0.3725.0', '73.0.3683.65', '74.0.3724.7', '74.0.3724.6', '74.0.3724.5', '74.0.3724.4', '74.0.3724.3', '74.0.3724.2', '74.0.3724.1', '74.0.3724.0', '73.0.3683.64', '74.0.3723.1', '74.0.3723.0', '73.0.3683.63', '74.0.3722.1', '74.0.3722.0', '73.0.3683.62', '74.0.3718.9', '74.0.3702.3', '74.0.3721.3', '74.0.3721.2', '74.0.3721.1', '74.0.3721.0', '74.0.3720.6', '73.0.3683.61', '72.0.3626.122', '73.0.3683.60', '74.0.3720.5', '72.0.3626.121', '74.0.3718.8', '74.0.3720.4', '74.0.3720.3', '74.0.3718.7', '74.0.3720.2', '74.0.3720.1', '74.0.3720.0', '74.0.3718.6', '74.0.3719.5', '73.0.3683.59', '74.0.3718.5', '74.0.3718.4', '74.0.3719.4', '74.0.3719.3', '74.0.3719.2', '74.0.3719.1', '73.0.3683.58', '74.0.3719.0', '73.0.3683.57', '73.0.3683.56', '74.0.3718.3', '73.0.3683.55', '74.0.3718.2', '74.0.3718.1', '74.0.3718.0', '73.0.3683.54', '74.0.3717.2', '73.0.3683.53', '74.0.3717.1', '74.0.3717.0', '73.0.3683.52', '74.0.3716.1', '74.0.3716.0', '73.0.3683.51', '74.0.3715.1', '74.0.3715.0', '73.0.3683.50', '74.0.3711.2', '74.0.3714.2', '74.0.3713.3', '74.0.3714.1', '74.0.3714.0', '73.0.3683.49', '74.0.3713.1', '74.0.3713.0', '72.0.3626.120', '73.0.3683.48', '74.0.3712.2', '74.0.3712.1', '74.0.3712.0', '73.0.3683.47', '72.0.3626.119', '73.0.3683.46', '74.0.3710.2', '72.0.3626.118', '74.0.3711.1', '74.0.3711.0', '73.0.3683.45', '72.0.3626.117', '74.0.3710.1', '74.0.3710.0', '73.0.3683.44', '72.0.3626.116', '74.0.3709.1', '74.0.3709.0', '74.0.3704.9', '73.0.3683.43', '72.0.3626.115', '74.0.3704.8', '74.0.3704.7', '74.0.3708.0', '74.0.3706.7', '74.0.3704.6', '73.0.3683.42', '72.0.3626.114', '74.0.3706.6', '72.0.3626.113', '74.0.3704.5', '74.0.3706.5', '74.0.3706.4', '74.0.3706.3', '74.0.3706.2', '74.0.3706.1', '74.0.3706.0', '73.0.3683.41', '72.0.3626.112', '74.0.3705.1', '74.0.3705.0', '73.0.3683.40', '72.0.3626.111', '73.0.3683.39', '74.0.3704.4', '73.0.3683.38', '74.0.3704.3', '74.0.3704.2', '74.0.3704.1', '74.0.3704.0', '73.0.3683.37', '72.0.3626.110', '72.0.3626.109', '74.0.3703.3', '74.0.3703.2', '73.0.3683.36', '74.0.3703.1', '74.0.3703.0', '73.0.3683.35', '72.0.3626.108', '74.0.3702.2', '74.0.3699.3', '74.0.3702.1', '74.0.3702.0', '73.0.3683.34', '72.0.3626.107', '73.0.3683.33', '74.0.3701.1', '74.0.3701.0', '73.0.3683.32', '73.0.3683.31', '72.0.3626.105', '74.0.3700.1', '74.0.3700.0', '73.0.3683.29', '72.0.3626.103', '74.0.3699.2', '74.0.3699.1', '74.0.3699.0', '73.0.3683.28', '72.0.3626.102', '73.0.3683.27', '73.0.3683.26', '74.0.3698.0', '74.0.3696.2', '72.0.3626.101', '73.0.3683.25', '74.0.3696.1', '74.0.3696.0', '74.0.3694.8', '72.0.3626.100', '74.0.3694.7', '74.0.3694.6', '74.0.3694.5', '74.0.3694.4', '72.0.3626.99', '72.0.3626.98', '74.0.3694.3', '73.0.3683.24', '72.0.3626.97', '72.0.3626.96', '72.0.3626.95', '73.0.3683.23', '72.0.3626.94', '73.0.3683.22', '73.0.3683.21', '72.0.3626.93', '74.0.3694.2', '72.0.3626.92', '74.0.3694.1', '74.0.3694.0', '74.0.3693.6', '73.0.3683.20', '72.0.3626.91', '74.0.3693.5', '74.0.3693.4', '74.0.3693.3', '74.0.3693.2', '73.0.3683.19', '74.0.3693.1', '74.0.3693.0', '73.0.3683.18', '72.0.3626.90', '74.0.3692.1', '74.0.3692.0', '73.0.3683.17', '72.0.3626.89', '74.0.3687.3', '74.0.3691.1', '74.0.3691.0', '73.0.3683.16', '72.0.3626.88', '72.0.3626.87', '73.0.3683.15', '74.0.3690.1', '74.0.3690.0', '73.0.3683.14', '72.0.3626.86', '73.0.3683.13', '73.0.3683.12', '74.0.3689.1', '74.0.3689.0', '73.0.3683.11', '72.0.3626.85', '73.0.3683.10', '72.0.3626.84', '73.0.3683.9', '74.0.3688.1', '74.0.3688.0', '73.0.3683.8', '72.0.3626.83', '74.0.3687.2', '74.0.3687.1', '74.0.3687.0', '73.0.3683.7', '72.0.3626.82', '74.0.3686.4', '72.0.3626.81', '74.0.3686.3', '74.0.3686.2', '74.0.3686.1', '74.0.3686.0', '73.0.3683.6', '72.0.3626.80', '74.0.3685.1', '74.0.3685.0', '73.0.3683.5', '72.0.3626.79', '74.0.3684.1', '74.0.3684.0', '73.0.3683.4', '72.0.3626.78', '72.0.3626.77', '73.0.3683.3', '73.0.3683.2', '72.0.3626.76', '73.0.3683.1', '73.0.3683.0', '72.0.3626.75', '71.0.3578.141', '73.0.3682.1', '73.0.3682.0', '72.0.3626.74', '71.0.3578.140', '73.0.3681.4', '73.0.3681.3', '73.0.3681.2', '73.0.3681.1', '73.0.3681.0', '72.0.3626.73', '71.0.3578.139', '72.0.3626.72', '72.0.3626.71', '73.0.3680.1', '73.0.3680.0', '72.0.3626.70', '71.0.3578.138', '73.0.3678.2', '73.0.3679.1', '73.0.3679.0', '72.0.3626.69', '71.0.3578.137', '73.0.3678.1', '73.0.3678.0', '71.0.3578.136', '73.0.3677.1', '73.0.3677.0', '72.0.3626.68', '72.0.3626.67', '71.0.3578.135', '73.0.3676.1', '73.0.3676.0', '73.0.3674.2', '72.0.3626.66', '71.0.3578.134', '73.0.3674.1', '73.0.3674.0', '72.0.3626.65', '71.0.3578.133', '73.0.3673.2', '73.0.3673.1', '73.0.3673.0', '72.0.3626.64', '71.0.3578.132', '72.0.3626.63', '72.0.3626.62', '72.0.3626.61', '72.0.3626.60', '73.0.3672.1', '73.0.3672.0', '72.0.3626.59', '71.0.3578.131', '73.0.3671.3', '73.0.3671.2', '73.0.3671.1', '73.0.3671.0', '72.0.3626.58', '71.0.3578.130', '73.0.3670.1', '73.0.3670.0', '72.0.3626.57', '71.0.3578.129', '73.0.3669.1', '73.0.3669.0', '72.0.3626.56', '71.0.3578.128', '73.0.3668.2', '73.0.3668.1', '73.0.3668.0', '72.0.3626.55', '71.0.3578.127', '73.0.3667.2', '73.0.3667.1', '73.0.3667.0', '72.0.3626.54', '71.0.3578.126', '73.0.3666.1', '73.0.3666.0', '72.0.3626.53', '71.0.3578.125', '73.0.3665.4', '73.0.3665.3', '72.0.3626.52', '73.0.3665.2', '73.0.3664.4', '73.0.3665.1', '73.0.3665.0', '72.0.3626.51', '71.0.3578.124', '72.0.3626.50', '73.0.3664.3', '73.0.3664.2', '73.0.3664.1', '73.0.3664.0', '73.0.3663.2', '72.0.3626.49', '71.0.3578.123', '73.0.3663.1', '73.0.3663.0', '72.0.3626.48', '71.0.3578.122', '73.0.3662.1', '73.0.3662.0', '72.0.3626.47', '71.0.3578.121', '73.0.3661.1', '72.0.3626.46', '73.0.3661.0', '72.0.3626.45', '71.0.3578.120', '73.0.3660.2', '73.0.3660.1', '73.0.3660.0', '72.0.3626.44', '71.0.3578.119', '73.0.3659.1', '73.0.3659.0', '72.0.3626.43', '71.0.3578.118', '73.0.3658.1', '73.0.3658.0', '72.0.3626.42', '71.0.3578.117', '73.0.3657.1', '73.0.3657.0', '72.0.3626.41', '71.0.3578.116', '73.0.3656.1', '73.0.3656.0', '72.0.3626.40', '71.0.3578.115', '73.0.3655.1', '73.0.3655.0', '72.0.3626.39', '71.0.3578.114', '73.0.3654.1', '73.0.3654.0', '72.0.3626.38', '71.0.3578.113', '73.0.3653.1', '73.0.3653.0', '72.0.3626.37', '71.0.3578.112', '73.0.3652.1', '73.0.3652.0', '72.0.3626.36', '71.0.3578.111', '73.0.3651.1', '73.0.3651.0', '72.0.3626.35', '71.0.3578.110', '73.0.3650.1', '73.0.3650.0', '72.0.3626.34', '71.0.3578.109', '73.0.3649.1', '73.0.3649.0', '72.0.3626.33', '71.0.3578.108', '73.0.3648.2', '73.0.3648.1', '73.0.3648.0', '72.0.3626.32', '71.0.3578.107', '73.0.3647.2', '73.0.3647.1', '73.0.3647.0', '72.0.3626.31', '71.0.3578.106', '73.0.3635.3', '73.0.3646.2', '73.0.3646.1', '73.0.3646.0', '72.0.3626.30', '71.0.3578.105', '72.0.3626.29', '73.0.3645.2', '73.0.3645.1', '73.0.3645.0', '72.0.3626.28', '71.0.3578.104', '72.0.3626.27', '72.0.3626.26', '72.0.3626.25', '72.0.3626.24', '73.0.3644.0', '73.0.3643.2', '72.0.3626.23', '71.0.3578.103', '73.0.3643.1', '73.0.3643.0', '72.0.3626.22', '71.0.3578.102', '73.0.3642.1', '73.0.3642.0', '72.0.3626.21', '71.0.3578.101', '73.0.3641.1', '73.0.3641.0', '72.0.3626.20', '71.0.3578.100', '72.0.3626.19', '73.0.3640.1', '73.0.3640.0', '72.0.3626.18', '73.0.3639.1', '71.0.3578.99', '73.0.3639.0', '72.0.3626.17', '73.0.3638.2', '72.0.3626.16', '73.0.3638.1', '73.0.3638.0', '72.0.3626.15', '71.0.3578.98', '73.0.3635.2', '71.0.3578.97', '73.0.3637.1', '73.0.3637.0', '72.0.3626.14', '71.0.3578.96', '71.0.3578.95', '72.0.3626.13', '71.0.3578.94', '73.0.3636.2', '71.0.3578.93', '73.0.3636.1', '73.0.3636.0', '72.0.3626.12', '71.0.3578.92', '73.0.3635.1', '73.0.3635.0', '72.0.3626.11', '71.0.3578.91', '73.0.3634.2', '73.0.3634.1', '73.0.3634.0', '72.0.3626.10', '71.0.3578.90', '71.0.3578.89', '73.0.3633.2', '73.0.3633.1', '73.0.3633.0', '72.0.3610.4', '72.0.3626.9', '71.0.3578.88', '73.0.3632.5', '73.0.3632.4', '73.0.3632.3', '73.0.3632.2', '73.0.3632.1', '73.0.3632.0', '72.0.3626.8', '71.0.3578.87', '73.0.3631.2', '73.0.3631.1', '73.0.3631.0', '72.0.3626.7', '71.0.3578.86', '72.0.3626.6', '73.0.3630.1', '73.0.3630.0', '72.0.3626.5', '71.0.3578.85', '72.0.3626.4', '73.0.3628.3', '73.0.3628.2', '73.0.3629.1', '73.0.3629.0', '72.0.3626.3', '71.0.3578.84', '73.0.3628.1', '73.0.3628.0', '71.0.3578.83', '73.0.3627.1', '73.0.3627.0', '72.0.3626.2', '71.0.3578.82', '71.0.3578.81', '71.0.3578.80', '72.0.3626.1', '72.0.3626.0', '71.0.3578.79', '70.0.3538.124', '71.0.3578.78', '72.0.3623.4', '72.0.3625.2', '72.0.3625.1', '72.0.3625.0', '71.0.3578.77', '70.0.3538.123', '72.0.3624.4', '72.0.3624.3', '72.0.3624.2', '71.0.3578.76', '72.0.3624.1', '72.0.3624.0', '72.0.3623.3', '71.0.3578.75', '70.0.3538.122', '71.0.3578.74', '72.0.3623.2', '72.0.3610.3', '72.0.3623.1', '72.0.3623.0', '72.0.3622.3', '72.0.3622.2', '71.0.3578.73', '70.0.3538.121', '72.0.3622.1', '72.0.3622.0', '71.0.3578.72', '70.0.3538.120', '72.0.3621.1', '72.0.3621.0', '71.0.3578.71', '70.0.3538.119', '72.0.3620.1', '72.0.3620.0', '71.0.3578.70', '70.0.3538.118', '71.0.3578.69', '72.0.3619.1', '72.0.3619.0', '71.0.3578.68', '70.0.3538.117', '71.0.3578.67', '72.0.3618.1', '72.0.3618.0', '71.0.3578.66', '70.0.3538.116', '72.0.3617.1', '72.0.3617.0', '71.0.3578.65', '70.0.3538.115', '72.0.3602.3', '71.0.3578.64', '72.0.3616.1', '72.0.3616.0', '71.0.3578.63', '70.0.3538.114', '71.0.3578.62', '72.0.3615.1', '72.0.3615.0', '71.0.3578.61', '70.0.3538.113', '72.0.3614.1', '72.0.3614.0', '71.0.3578.60', '70.0.3538.112', '72.0.3613.1', '72.0.3613.0', '71.0.3578.59', '70.0.3538.111', '72.0.3612.2', '72.0.3612.1', '72.0.3612.0', '70.0.3538.110', '71.0.3578.58', '70.0.3538.109', '72.0.3611.2', '72.0.3611.1', '72.0.3611.0', '71.0.3578.57', '70.0.3538.108', '72.0.3610.2', '71.0.3578.56', '71.0.3578.55', '72.0.3610.1', '72.0.3610.0', '71.0.3578.54', '70.0.3538.107', '71.0.3578.53', '72.0.3609.3', '71.0.3578.52', '72.0.3609.2', '71.0.3578.51', '72.0.3608.5', '72.0.3609.1', '72.0.3609.0', '71.0.3578.50', '70.0.3538.106', '72.0.3608.4', '72.0.3608.3', '72.0.3608.2', '71.0.3578.49', '72.0.3608.1', '72.0.3608.0', '70.0.3538.105', '71.0.3578.48', '72.0.3607.1', '72.0.3607.0', '71.0.3578.47', '70.0.3538.104', '72.0.3606.2', '72.0.3606.1', '72.0.3606.0', '71.0.3578.46', '70.0.3538.103', '70.0.3538.102', '72.0.3605.3', '72.0.3605.2', '72.0.3605.1', '72.0.3605.0', '71.0.3578.45', '70.0.3538.101', '71.0.3578.44', '71.0.3578.43', '70.0.3538.100', '70.0.3538.99', '71.0.3578.42', '72.0.3604.1', '72.0.3604.0', '71.0.3578.41', '70.0.3538.98', '71.0.3578.40', '72.0.3603.2', '72.0.3603.1', '72.0.3603.0', '71.0.3578.39', '70.0.3538.97', '72.0.3602.2', '71.0.3578.38', '71.0.3578.37', '72.0.3602.1', '72.0.3602.0', '71.0.3578.36', '70.0.3538.96', '72.0.3601.1', '72.0.3601.0', '71.0.3578.35', '70.0.3538.95', '72.0.3600.1', '72.0.3600.0', '71.0.3578.34', '70.0.3538.94', '72.0.3599.3', '72.0.3599.2', '72.0.3599.1', '72.0.3599.0', '71.0.3578.33', '70.0.3538.93', '72.0.3598.1', '72.0.3598.0', '71.0.3578.32', '70.0.3538.87', '72.0.3597.1', '72.0.3597.0', '72.0.3596.2', '71.0.3578.31', '70.0.3538.86', '71.0.3578.30', '71.0.3578.29', '72.0.3596.1', '72.0.3596.0', '71.0.3578.28', '70.0.3538.85', '72.0.3595.2', '72.0.3591.3', '72.0.3595.1', '72.0.3595.0', '71.0.3578.27', '70.0.3538.84', '72.0.3594.1', '72.0.3594.0', '71.0.3578.26', '70.0.3538.83', '72.0.3593.2', '72.0.3593.1', '72.0.3593.0', '71.0.3578.25', '70.0.3538.82', '72.0.3589.3', '72.0.3592.2', '72.0.3592.1', '72.0.3592.0', '71.0.3578.24', '72.0.3589.2', '70.0.3538.81', '70.0.3538.80', '72.0.3591.2', '72.0.3591.1', '72.0.3591.0', '71.0.3578.23', '70.0.3538.79', '71.0.3578.22', '72.0.3590.1', '72.0.3590.0', '71.0.3578.21', '70.0.3538.78', '70.0.3538.77', '72.0.3589.1', '72.0.3589.0', '71.0.3578.20', '70.0.3538.76', '71.0.3578.19', '70.0.3538.75', '72.0.3588.1', '72.0.3588.0', '71.0.3578.18', '70.0.3538.74', '72.0.3586.2', '72.0.3587.0', '71.0.3578.17', '70.0.3538.73', '72.0.3586.1', '72.0.3586.0', '71.0.3578.16', '70.0.3538.72', '72.0.3585.1', '72.0.3585.0', '71.0.3578.15', '70.0.3538.71', '71.0.3578.14', '72.0.3584.1', '72.0.3584.0', '71.0.3578.13', '70.0.3538.70', '72.0.3583.2', '71.0.3578.12', '72.0.3583.1', '72.0.3583.0', '71.0.3578.11', '70.0.3538.69', '71.0.3578.10', '72.0.3582.0', '72.0.3581.4', '71.0.3578.9', '70.0.3538.67', '72.0.3581.3', '72.0.3581.2', '72.0.3581.1', '72.0.3581.0', '71.0.3578.8', '70.0.3538.66', '72.0.3580.1', '72.0.3580.0', '71.0.3578.7', '70.0.3538.65', '71.0.3578.6', '72.0.3579.1', '72.0.3579.0', '71.0.3578.5', '70.0.3538.64', '71.0.3578.4', '71.0.3578.3', '71.0.3578.2', '71.0.3578.1', '71.0.3578.0', '70.0.3538.63', '69.0.3497.128', '70.0.3538.62', '70.0.3538.61', '70.0.3538.60', '70.0.3538.59', '71.0.3577.1', '71.0.3577.0', '70.0.3538.58', '69.0.3497.127', '71.0.3576.2', '71.0.3576.1', '71.0.3576.0', '70.0.3538.57', '70.0.3538.56', '71.0.3575.2', '70.0.3538.55', '69.0.3497.126', '70.0.3538.54', '71.0.3575.1', '71.0.3575.0', '71.0.3574.1', '71.0.3574.0', '70.0.3538.53', '69.0.3497.125', '70.0.3538.52', '71.0.3573.1', '71.0.3573.0', '70.0.3538.51', '69.0.3497.124', '71.0.3572.1', '71.0.3572.0', '70.0.3538.50', '69.0.3497.123', '71.0.3571.2', '70.0.3538.49', '69.0.3497.122', '71.0.3571.1', '71.0.3571.0', '70.0.3538.48', '69.0.3497.121', '71.0.3570.1', '71.0.3570.0', '70.0.3538.47', '69.0.3497.120', '71.0.3568.2', '71.0.3569.1', '71.0.3569.0', '70.0.3538.46', '69.0.3497.119', '70.0.3538.45', '71.0.3568.1', '71.0.3568.0', '70.0.3538.44', '69.0.3497.118', '70.0.3538.43', '70.0.3538.42', '71.0.3567.1', '71.0.3567.0', '70.0.3538.41', '69.0.3497.117', '71.0.3566.1', '71.0.3566.0', '70.0.3538.40', '69.0.3497.116', '71.0.3565.1', '71.0.3565.0', '70.0.3538.39', '69.0.3497.115', '71.0.3564.1', '71.0.3564.0', '70.0.3538.38', '69.0.3497.114', '71.0.3563.0', '71.0.3562.2', '70.0.3538.37', '69.0.3497.113', '70.0.3538.36', '70.0.3538.35', '71.0.3562.1', '71.0.3562.0', '70.0.3538.34', '69.0.3497.112', '70.0.3538.33', '71.0.3561.1', '71.0.3561.0', '70.0.3538.32', '69.0.3497.111', '71.0.3559.6', '71.0.3560.1', '71.0.3560.0', '71.0.3559.5', '71.0.3559.4', '70.0.3538.31', '69.0.3497.110', '71.0.3559.3', '70.0.3538.30', '69.0.3497.109', '71.0.3559.2', '71.0.3559.1', '71.0.3559.0', '70.0.3538.29', '69.0.3497.108', '71.0.3558.2', '71.0.3558.1', '71.0.3558.0', '70.0.3538.28', '69.0.3497.107', '71.0.3557.2', '71.0.3557.1', '71.0.3557.0', '70.0.3538.27', '69.0.3497.106',
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
true
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/swfinterp.py
youtube_dl/swfinterp.py
from __future__ import unicode_literals import collections import io import zlib from .compat import ( compat_str, compat_struct_unpack, ) from .utils import ( ExtractorError, ) def _extract_tags(file_contents): if file_contents[1:3] != b'WS': raise ExtractorError( 'Not an SWF file; header is %r' % file_contents[:3]) if file_contents[:1] == b'C': content = zlib.decompress(file_contents[8:]) else: raise NotImplementedError( 'Unsupported compression format %r' % file_contents[:1]) # Determine number of bits in framesize rectangle framesize_nbits = compat_struct_unpack('!B', content[:1])[0] >> 3 framesize_len = (5 + 4 * framesize_nbits + 7) // 8 pos = framesize_len + 2 + 2 while pos < len(content): header16 = compat_struct_unpack('<H', content[pos:pos + 2])[0] pos += 2 tag_code = header16 >> 6 tag_len = header16 & 0x3f if tag_len == 0x3f: tag_len = compat_struct_unpack('<I', content[pos:pos + 4])[0] pos += 4 assert pos + tag_len <= len(content), \ ('Tag %d ends at %d+%d - that\'s longer than the file (%d)' % (tag_code, pos, tag_len, len(content))) yield (tag_code, content[pos:pos + tag_len]) pos += tag_len class _AVMClass_Object(object): def __init__(self, avm_class): self.avm_class = avm_class def __repr__(self): return '%s#%x' % (self.avm_class.name, id(self)) class _ScopeDict(dict): def __init__(self, avm_class): super(_ScopeDict, self).__init__() self.avm_class = avm_class def __repr__(self): return '%s__Scope(%s)' % ( self.avm_class.name, super(_ScopeDict, self).__repr__()) class _AVMClass(object): def __init__(self, name_idx, name, static_properties=None): self.name_idx = name_idx self.name = name self.method_names = {} self.method_idxs = {} self.methods = {} self.method_pyfunctions = {} self.static_properties = static_properties if static_properties else {} self.variables = _ScopeDict(self) self.constants = {} def make_object(self): return _AVMClass_Object(self) def __repr__(self): return '_AVMClass(%s)' % (self.name) def register_methods(self, methods): self.method_names.update(methods.items()) self.method_idxs.update(dict( (idx, name) for name, idx in methods.items())) class _Multiname(object): def __init__(self, kind): self.kind = kind def __repr__(self): return '[MULTINAME kind: 0x%x]' % self.kind def _read_int(reader): res = 0 shift = 0 for _ in range(5): buf = reader.read(1) assert len(buf) == 1 b = compat_struct_unpack('<B', buf)[0] res = res | ((b & 0x7f) << shift) if b & 0x80 == 0: break shift += 7 return res def _u30(reader): res = _read_int(reader) assert res & 0xf0000000 == 0 return res _u32 = _read_int def _s32(reader): v = _read_int(reader) if v & 0x80000000 != 0: v = - ((v ^ 0xffffffff) + 1) return v def _s24(reader): bs = reader.read(3) assert len(bs) == 3 last_byte = b'\xff' if (ord(bs[2:3]) >= 0x80) else b'\x00' return compat_struct_unpack('<i', bs + last_byte)[0] def _read_string(reader): slen = _u30(reader) resb = reader.read(slen) assert len(resb) == slen return resb.decode('utf-8') def _read_bytes(count, reader): assert count >= 0 resb = reader.read(count) assert len(resb) == count return resb def _read_byte(reader): resb = _read_bytes(1, reader=reader) res = compat_struct_unpack('<B', resb)[0] return res StringClass = _AVMClass('(no name idx)', 'String') ByteArrayClass = _AVMClass('(no name idx)', 'ByteArray') TimerClass = _AVMClass('(no name idx)', 'Timer') TimerEventClass = _AVMClass('(no name idx)', 'TimerEvent', {'TIMER': 'timer'}) _builtin_classes = { StringClass.name: StringClass, ByteArrayClass.name: ByteArrayClass, TimerClass.name: TimerClass, TimerEventClass.name: TimerEventClass, } class _Undefined(object): def __bool__(self): return False __nonzero__ = __bool__ def __hash__(self): return 0 def __str__(self): return 'undefined' __repr__ = __str__ undefined = _Undefined() class SWFInterpreter(object): def __init__(self, file_contents): self._patched_functions = { (TimerClass, 'addEventListener'): lambda params: undefined, } code_tag = next(tag for tag_code, tag in _extract_tags(file_contents) if tag_code == 82) p = code_tag.index(b'\0', 4) + 1 code_reader = io.BytesIO(code_tag[p:]) # Parse ABC (AVM2 ByteCode) # Define a couple convenience methods u30 = lambda *args: _u30(*args, reader=code_reader) s32 = lambda *args: _s32(*args, reader=code_reader) u32 = lambda *args: _u32(*args, reader=code_reader) read_bytes = lambda *args: _read_bytes(*args, reader=code_reader) read_byte = lambda *args: _read_byte(*args, reader=code_reader) # minor_version + major_version read_bytes(2 + 2) # Constant pool int_count = u30() self.constant_ints = [0] for _c in range(1, int_count): self.constant_ints.append(s32()) self.constant_uints = [0] uint_count = u30() for _c in range(1, uint_count): self.constant_uints.append(u32()) double_count = u30() read_bytes(max(0, (double_count - 1)) * 8) string_count = u30() self.constant_strings = [''] for _c in range(1, string_count): s = _read_string(code_reader) self.constant_strings.append(s) namespace_count = u30() for _c in range(1, namespace_count): read_bytes(1) # kind u30() # name ns_set_count = u30() for _c in range(1, ns_set_count): count = u30() for _c2 in range(count): u30() multiname_count = u30() MULTINAME_SIZES = { 0x07: 2, # QName 0x0d: 2, # QNameA 0x0f: 1, # RTQName 0x10: 1, # RTQNameA 0x11: 0, # RTQNameL 0x12: 0, # RTQNameLA 0x09: 2, # Multiname 0x0e: 2, # MultinameA 0x1b: 1, # MultinameL 0x1c: 1, # MultinameLA } self.multinames = [''] for _c in range(1, multiname_count): kind = u30() assert kind in MULTINAME_SIZES, 'Invalid multiname kind %r' % kind if kind == 0x07: u30() # namespace_idx name_idx = u30() self.multinames.append(self.constant_strings[name_idx]) elif kind == 0x09: name_idx = u30() u30() self.multinames.append(self.constant_strings[name_idx]) else: self.multinames.append(_Multiname(kind)) for _c2 in range(MULTINAME_SIZES[kind]): u30() # Methods method_count = u30() MethodInfo = collections.namedtuple( 'MethodInfo', ['NEED_ARGUMENTS', 'NEED_REST']) method_infos = [] for method_id in range(method_count): param_count = u30() u30() # return type for _ in range(param_count): u30() # param type u30() # name index (always 0 for youtube) flags = read_byte() if flags & 0x08 != 0: # Options present option_count = u30() for c in range(option_count): u30() # val read_bytes(1) # kind if flags & 0x80 != 0: # Param names present for _ in range(param_count): u30() # param name mi = MethodInfo(flags & 0x01 != 0, flags & 0x04 != 0) method_infos.append(mi) # Metadata metadata_count = u30() for _c in range(metadata_count): u30() # name item_count = u30() for _c2 in range(item_count): u30() # key u30() # value def parse_traits_info(): trait_name_idx = u30() kind_full = read_byte() kind = kind_full & 0x0f attrs = kind_full >> 4 methods = {} constants = None if kind == 0x00: # Slot u30() # Slot id u30() # type_name_idx vindex = u30() if vindex != 0: read_byte() # vkind elif kind == 0x06: # Const u30() # Slot id u30() # type_name_idx vindex = u30() vkind = 'any' if vindex != 0: vkind = read_byte() if vkind == 0x03: # Constant_Int value = self.constant_ints[vindex] elif vkind == 0x04: # Constant_UInt value = self.constant_uints[vindex] else: return {}, None # Ignore silently for now constants = {self.multinames[trait_name_idx]: value} elif kind in (0x01, 0x02, 0x03): # Method / Getter / Setter u30() # disp_id method_idx = u30() methods[self.multinames[trait_name_idx]] = method_idx elif kind == 0x04: # Class u30() # slot_id u30() # classi elif kind == 0x05: # Function u30() # slot_id function_idx = u30() methods[function_idx] = self.multinames[trait_name_idx] else: raise ExtractorError('Unsupported trait kind %d' % kind) if attrs & 0x4 != 0: # Metadata present metadata_count = u30() for _c3 in range(metadata_count): u30() # metadata index return methods, constants # Classes class_count = u30() classes = [] for class_id in range(class_count): name_idx = u30() cname = self.multinames[name_idx] avm_class = _AVMClass(name_idx, cname) classes.append(avm_class) u30() # super_name idx flags = read_byte() if flags & 0x08 != 0: # Protected namespace is present u30() # protected_ns_idx intrf_count = u30() for _c2 in range(intrf_count): u30() u30() # iinit trait_count = u30() for _c2 in range(trait_count): trait_methods, trait_constants = parse_traits_info() avm_class.register_methods(trait_methods) if trait_constants: avm_class.constants.update(trait_constants) assert len(classes) == class_count self._classes_by_name = dict((c.name, c) for c in classes) for avm_class in classes: avm_class.cinit_idx = u30() trait_count = u30() for _c2 in range(trait_count): trait_methods, trait_constants = parse_traits_info() avm_class.register_methods(trait_methods) if trait_constants: avm_class.constants.update(trait_constants) # Scripts script_count = u30() for _c in range(script_count): u30() # init trait_count = u30() for _c2 in range(trait_count): parse_traits_info() # Method bodies method_body_count = u30() Method = collections.namedtuple('Method', ['code', 'local_count']) self._all_methods = [] for _c in range(method_body_count): method_idx = u30() u30() # max_stack local_count = u30() u30() # init_scope_depth u30() # max_scope_depth code_length = u30() code = read_bytes(code_length) m = Method(code, local_count) self._all_methods.append(m) for avm_class in classes: if method_idx in avm_class.method_idxs: avm_class.methods[avm_class.method_idxs[method_idx]] = m exception_count = u30() for _c2 in range(exception_count): u30() # from u30() # to u30() # target u30() # exc_type u30() # var_name trait_count = u30() for _c2 in range(trait_count): parse_traits_info() assert p + code_reader.tell() == len(code_tag) def patch_function(self, avm_class, func_name, f): self._patched_functions[(avm_class, func_name)] = f def extract_class(self, class_name, call_cinit=True): try: res = self._classes_by_name[class_name] except KeyError: raise ExtractorError('Class %r not found' % class_name) if call_cinit and hasattr(res, 'cinit_idx'): res.register_methods({'$cinit': res.cinit_idx}) res.methods['$cinit'] = self._all_methods[res.cinit_idx] cinit = self.extract_function(res, '$cinit') cinit([]) return res def extract_function(self, avm_class, func_name): p = self._patched_functions.get((avm_class, func_name)) if p: return p if func_name in avm_class.method_pyfunctions: return avm_class.method_pyfunctions[func_name] if func_name in self._classes_by_name: return self._classes_by_name[func_name].make_object() if func_name not in avm_class.methods: raise ExtractorError('Cannot find function %s.%s' % ( avm_class.name, func_name)) m = avm_class.methods[func_name] def resfunc(args): # Helper functions coder = io.BytesIO(m.code) s24 = lambda: _s24(coder) u30 = lambda: _u30(coder) registers = [avm_class.variables] + list(args) + [None] * m.local_count stack = [] scopes = collections.deque([ self._classes_by_name, avm_class.constants, avm_class.variables]) while True: opcode = _read_byte(coder) if opcode == 9: # label pass # Spec says: "Do nothing." elif opcode == 16: # jump offset = s24() coder.seek(coder.tell() + offset) elif opcode == 17: # iftrue offset = s24() value = stack.pop() if value: coder.seek(coder.tell() + offset) elif opcode == 18: # iffalse offset = s24() value = stack.pop() if not value: coder.seek(coder.tell() + offset) elif opcode == 19: # ifeq offset = s24() value2 = stack.pop() value1 = stack.pop() if value2 == value1: coder.seek(coder.tell() + offset) elif opcode == 20: # ifne offset = s24() value2 = stack.pop() value1 = stack.pop() if value2 != value1: coder.seek(coder.tell() + offset) elif opcode == 21: # iflt offset = s24() value2 = stack.pop() value1 = stack.pop() if value1 < value2: coder.seek(coder.tell() + offset) elif opcode == 32: # pushnull stack.append(None) elif opcode == 33: # pushundefined stack.append(undefined) elif opcode == 36: # pushbyte v = _read_byte(coder) stack.append(v) elif opcode == 37: # pushshort v = u30() stack.append(v) elif opcode == 38: # pushtrue stack.append(True) elif opcode == 39: # pushfalse stack.append(False) elif opcode == 40: # pushnan stack.append(float('NaN')) elif opcode == 42: # dup value = stack[-1] stack.append(value) elif opcode == 44: # pushstring idx = u30() stack.append(self.constant_strings[idx]) elif opcode == 48: # pushscope new_scope = stack.pop() scopes.append(new_scope) elif opcode == 66: # construct arg_count = u30() args = list(reversed( [stack.pop() for _ in range(arg_count)])) obj = stack.pop() res = obj.avm_class.make_object() stack.append(res) elif opcode == 70: # callproperty index = u30() mname = self.multinames[index] arg_count = u30() args = list(reversed( [stack.pop() for _ in range(arg_count)])) obj = stack.pop() if obj == StringClass: if mname == 'String': assert len(args) == 1 assert isinstance(args[0], ( int, compat_str, _Undefined)) if args[0] == undefined: res = 'undefined' else: res = compat_str(args[0]) stack.append(res) continue else: raise NotImplementedError( 'Function String.%s is not yet implemented' % mname) elif isinstance(obj, _AVMClass_Object): func = self.extract_function(obj.avm_class, mname) res = func(args) stack.append(res) continue elif isinstance(obj, _AVMClass): func = self.extract_function(obj, mname) res = func(args) stack.append(res) continue elif isinstance(obj, _ScopeDict): if mname in obj.avm_class.method_names: func = self.extract_function(obj.avm_class, mname) res = func(args) else: res = obj[mname] stack.append(res) continue elif isinstance(obj, compat_str): if mname == 'split': assert len(args) == 1 assert isinstance(args[0], compat_str) if args[0] == '': res = list(obj) else: res = obj.split(args[0]) stack.append(res) continue elif mname == 'charCodeAt': assert len(args) <= 1 idx = 0 if len(args) == 0 else args[0] assert isinstance(idx, int) res = ord(obj[idx]) stack.append(res) continue elif isinstance(obj, list): if mname == 'slice': assert len(args) == 1 assert isinstance(args[0], int) res = obj[args[0]:] stack.append(res) continue elif mname == 'join': assert len(args) == 1 assert isinstance(args[0], compat_str) res = args[0].join(obj) stack.append(res) continue raise NotImplementedError( 'Unsupported property %r on %r' % (mname, obj)) elif opcode == 71: # returnvoid res = undefined return res elif opcode == 72: # returnvalue res = stack.pop() return res elif opcode == 73: # constructsuper # Not yet implemented, just hope it works without it arg_count = u30() args = list(reversed( [stack.pop() for _ in range(arg_count)])) obj = stack.pop() elif opcode == 74: # constructproperty index = u30() arg_count = u30() args = list(reversed( [stack.pop() for _ in range(arg_count)])) obj = stack.pop() mname = self.multinames[index] assert isinstance(obj, _AVMClass) # We do not actually call the constructor for now; # we just pretend it does nothing stack.append(obj.make_object()) elif opcode == 79: # callpropvoid index = u30() mname = self.multinames[index] arg_count = u30() args = list(reversed( [stack.pop() for _ in range(arg_count)])) obj = stack.pop() if isinstance(obj, _AVMClass_Object): func = self.extract_function(obj.avm_class, mname) res = func(args) assert res is undefined continue if isinstance(obj, _ScopeDict): assert mname in obj.avm_class.method_names func = self.extract_function(obj.avm_class, mname) res = func(args) assert res is undefined continue if mname == 'reverse': assert isinstance(obj, list) obj.reverse() else: raise NotImplementedError( 'Unsupported (void) property %r on %r' % (mname, obj)) elif opcode == 86: # newarray arg_count = u30() arr = [] for i in range(arg_count): arr.append(stack.pop()) arr = arr[::-1] stack.append(arr) elif opcode == 93: # findpropstrict index = u30() mname = self.multinames[index] for s in reversed(scopes): if mname in s: res = s break else: res = scopes[0] if mname not in res and mname in _builtin_classes: stack.append(_builtin_classes[mname]) else: stack.append(res[mname]) elif opcode == 94: # findproperty index = u30() mname = self.multinames[index] for s in reversed(scopes): if mname in s: res = s break else: res = avm_class.variables stack.append(res) elif opcode == 96: # getlex index = u30() mname = self.multinames[index] for s in reversed(scopes): if mname in s: scope = s break else: scope = avm_class.variables if mname in scope: res = scope[mname] elif mname in _builtin_classes: res = _builtin_classes[mname] else: # Assume uninitialized # TODO warn here res = undefined stack.append(res) elif opcode == 97: # setproperty index = u30() value = stack.pop() idx = self.multinames[index] if isinstance(idx, _Multiname): idx = stack.pop() obj = stack.pop() obj[idx] = value elif opcode == 98: # getlocal index = u30() stack.append(registers[index]) elif opcode == 99: # setlocal index = u30() value = stack.pop() registers[index] = value elif opcode == 102: # getproperty index = u30() pname = self.multinames[index] if pname == 'length': obj = stack.pop() assert isinstance(obj, (compat_str, list)) stack.append(len(obj)) elif isinstance(pname, compat_str): # Member access obj = stack.pop() if isinstance(obj, _AVMClass): res = obj.static_properties[pname] stack.append(res) continue assert isinstance(obj, (dict, _ScopeDict)), \ 'Accessing member %r on %r' % (pname, obj) res = obj.get(pname, undefined) stack.append(res) else: # Assume attribute access idx = stack.pop() assert isinstance(idx, int) obj = stack.pop() assert isinstance(obj, list) stack.append(obj[idx]) elif opcode == 104: # initproperty index = u30() value = stack.pop() idx = self.multinames[index] if isinstance(idx, _Multiname): idx = stack.pop() obj = stack.pop() obj[idx] = value elif opcode == 115: # convert_ value = stack.pop() intvalue = int(value) stack.append(intvalue) elif opcode == 128: # coerce u30() elif opcode == 130: # coerce_a value = stack.pop() # um, yes, it's any value stack.append(value) elif opcode == 133: # coerce_s assert isinstance(stack[-1], (type(None), compat_str)) elif opcode == 147: # decrement value = stack.pop() assert isinstance(value, int) stack.append(value - 1) elif opcode == 149: # typeof value = stack.pop() return { _Undefined: 'undefined', compat_str: 'String', int: 'Number', float: 'Number', }[type(value)] elif opcode == 160: # add value2 = stack.pop() value1 = stack.pop() res = value1 + value2 stack.append(res) elif opcode == 161: # subtract value2 = stack.pop() value1 = stack.pop() res = value1 - value2 stack.append(res) elif opcode == 162: # multiply value2 = stack.pop() value1 = stack.pop() res = value1 * value2 stack.append(res) elif opcode == 164: # modulo value2 = stack.pop() value1 = stack.pop() res = value1 % value2 stack.append(res) elif opcode == 168: # bitand value2 = stack.pop() value1 = stack.pop() assert isinstance(value1, int) assert isinstance(value2, int) res = value1 & value2 stack.append(res) elif opcode == 171: # equals value2 = stack.pop() value1 = stack.pop() result = value1 == value2 stack.append(result) elif opcode == 175: # greaterequals value2 = stack.pop() value1 = stack.pop() result = value1 >= value2 stack.append(result) elif opcode == 192: # increment_i value = stack.pop() assert isinstance(value, int) stack.append(value + 1) elif opcode == 208: # getlocal_0 stack.append(registers[0]) elif opcode == 209: # getlocal_1 stack.append(registers[1]) elif opcode == 210: # getlocal_2 stack.append(registers[2]) elif opcode == 211: # getlocal_3 stack.append(registers[3]) elif opcode == 212: # setlocal_0 registers[0] = stack.pop() elif opcode == 213: # setlocal_1 registers[1] = stack.pop() elif opcode == 214: # setlocal_2 registers[2] = stack.pop() elif opcode == 215: # setlocal_3 registers[3] = stack.pop() else: raise NotImplementedError( 'Unsupported opcode %d' % opcode) avm_class.method_pyfunctions[func_name] = resfunc return resfunc
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/jsinterp.py
youtube_dl/jsinterp.py
# coding: utf-8 from __future__ import unicode_literals import calendar import itertools import json import operator import re import time from functools import update_wrapper, wraps from .utils import ( error_to_compat_str, ExtractorError, float_or_none, int_or_none, js_to_json, remove_quotes, str_or_none, unified_timestamp, variadic, write_string, ) from .compat import ( compat_basestring, compat_chr, compat_collections_chain_map as ChainMap, compat_contextlib_suppress, compat_filter as filter, compat_int, compat_integer_types, compat_itertools_zip_longest as zip_longest, compat_map as map, compat_numeric_types, compat_str, ) # name JS functions class function_with_repr(object): # from yt_dlp/utils.py, but in this module # repr_ is always set def __init__(self, func, repr_): update_wrapper(self, func) self.func, self.__repr = func, repr_ def __call__(self, *args, **kwargs): return self.func(*args, **kwargs) def __repr__(self): return self.__repr # name JS operators def wraps_op(op): def update_and_rename_wrapper(w): f = update_wrapper(w, op) # fn names are str in both Py 2/3 f.__name__ = str('JS_') + f.__name__ return f return update_and_rename_wrapper # NB In principle NaN cannot be checked by membership. # Here all NaN values are actually this one, so _NaN is _NaN, # although _NaN != _NaN. Ditto Infinity. _NaN = float('nan') _Infinity = float('inf') class JS_Undefined(object): pass def _js_bit_op(op, is_shift=False): def zeroise(x, is_shift_arg=False): if isinstance(x, compat_integer_types): return (x % 32) if is_shift_arg else (x & 0xffffffff) try: x = float(x) if is_shift_arg: x = int(x % 32) elif x < 0: x = -compat_int(-x % 0xffffffff) else: x = compat_int(x % 0xffffffff) except (ValueError, TypeError): # also here for int(NaN), including float('inf') % 32 x = 0 return x @wraps_op(op) def wrapped(a, b): return op(zeroise(a), zeroise(b, is_shift)) & 0xffffffff return wrapped def _js_arith_op(op, div=False): @wraps_op(op) def wrapped(a, b): if JS_Undefined in (a, b): return _NaN # null, "" --> 0 a, b = (float_or_none( (x.strip() if isinstance(x, compat_basestring) else x) or 0, default=_NaN) for x in (a, b)) if _NaN in (a, b): return _NaN try: return op(a, b) except ZeroDivisionError: return _NaN if not (div and (a or b)) else _Infinity return wrapped _js_arith_add = _js_arith_op(operator.add) def _js_add(a, b): if not (isinstance(a, compat_basestring) or isinstance(b, compat_basestring)): return _js_arith_add(a, b) if not isinstance(a, compat_basestring): a = _js_toString(a) elif not isinstance(b, compat_basestring): b = _js_toString(b) return operator.concat(a, b) _js_mod = _js_arith_op(operator.mod) __js_exp = _js_arith_op(operator.pow) def _js_exp(a, b): if not b: return 1 # even 0 ** 0 !! return __js_exp(a, b) def _js_to_primitive(v): return ( ','.join(map(_js_toString, v)) if isinstance(v, list) else '[object Object]' if isinstance(v, dict) else compat_str(v) if not isinstance(v, ( compat_numeric_types, compat_basestring)) else v ) # more exact: yt-dlp/yt-dlp#12110 def _js_toString(v): return ( 'undefined' if v is JS_Undefined else 'Infinity' if v == _Infinity else 'NaN' if v is _NaN else 'null' if v is None # bool <= int: do this first else ('false', 'true')[v] if isinstance(v, bool) else re.sub(r'(?<=\d)\.?0*$', '', '{0:.7f}'.format(v)) if isinstance(v, compat_numeric_types) else _js_to_primitive(v)) _nullish = frozenset((None, JS_Undefined)) def _js_eq(a, b): # NaN != any if _NaN in (a, b): return False # Object is Object if isinstance(a, type(b)) and isinstance(b, (dict, list)): return operator.is_(a, b) # general case if a == b: return True # null == undefined a_b = set((a, b)) if a_b & _nullish: return a_b <= _nullish a, b = _js_to_primitive(a), _js_to_primitive(b) if not isinstance(a, compat_basestring): a, b = b, a # Number to String: convert the string to a number # Conversion failure results in ... false if isinstance(a, compat_basestring): return float_or_none(a) == b return a == b def _js_neq(a, b): return not _js_eq(a, b) def _js_id_op(op): @wraps_op(op) def wrapped(a, b): if _NaN in (a, b): return op(_NaN, None) if not isinstance(a, (compat_basestring, compat_numeric_types)): a, b = b, a # strings are === if == # why 'a' is not 'a': https://stackoverflow.com/a/1504848 if isinstance(a, (compat_basestring, compat_numeric_types)): return a == b if op(0, 0) else a != b return op(a, b) return wrapped def _js_comp_op(op): @wraps_op(op) def wrapped(a, b): if JS_Undefined in (a, b): return False if isinstance(a, compat_basestring): b = compat_str(b or 0) elif isinstance(b, compat_basestring): a = compat_str(a or 0) return op(a or 0, b or 0) return wrapped def _js_ternary(cndn, if_true=True, if_false=False): """Simulate JS's ternary operator (cndn?if_true:if_false)""" if cndn in (False, None, 0, '', JS_Undefined, _NaN): return if_false return if_true def _js_unary_op(op): @wraps_op(op) def wrapped(a, _): return op(a) return wrapped # https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/typeof def _js_typeof(expr): with compat_contextlib_suppress(TypeError, KeyError): return { JS_Undefined: 'undefined', _NaN: 'number', _Infinity: 'number', True: 'boolean', False: 'boolean', None: 'object', }[expr] for t, n in ( (compat_basestring, 'string'), (compat_numeric_types, 'number'), ): if isinstance(expr, t): return n if callable(expr): return 'function' # TODO: Symbol, BigInt return 'object' # (op, definition) in order of binding priority, tightest first # avoid dict to maintain order # definition None => Defined in JSInterpreter._operator _OPERATORS = ( ('>>', _js_bit_op(operator.rshift, True)), ('<<', _js_bit_op(operator.lshift, True)), ('+', _js_add), ('-', _js_arith_op(operator.sub)), ('*', _js_arith_op(operator.mul)), ('%', _js_mod), ('/', _js_arith_op(operator.truediv, div=True)), ('**', _js_exp), ) _LOG_OPERATORS = ( ('|', _js_bit_op(operator.or_)), ('^', _js_bit_op(operator.xor)), ('&', _js_bit_op(operator.and_)), ) _SC_OPERATORS = ( ('?', None), ('??', None), ('||', None), ('&&', None), ) _UNARY_OPERATORS_X = ( ('void', _js_unary_op(lambda _: JS_Undefined)), ('typeof', _js_unary_op(_js_typeof)), # avoid functools.partial here since Py2 update_wrapper(partial) -> no __module__ ('!', _js_unary_op(lambda x: _js_ternary(x, if_true=False, if_false=True))), ) _COMP_OPERATORS = ( ('===', _js_id_op(operator.is_)), ('!==', _js_id_op(operator.is_not)), ('==', _js_eq), ('!=', _js_neq), ('<=', _js_comp_op(operator.le)), ('>=', _js_comp_op(operator.ge)), ('<', _js_comp_op(operator.lt)), ('>', _js_comp_op(operator.gt)), ) _OPERATOR_RE = '|'.join(map(lambda x: re.escape(x[0]), _OPERATORS + _LOG_OPERATORS + _SC_OPERATORS)) _NAME_RE = r'[a-zA-Z_$][\w$]*' _MATCHING_PARENS = dict(zip(*zip('()', '{}', '[]'))) _QUOTES = '\'"/' _NESTED_BRACKETS = r'[^[\]]+(?:\[[^[\]]+(?:\[[^\]]+\])?\])?' class JS_Break(ExtractorError): def __init__(self): ExtractorError.__init__(self, 'Invalid break') class JS_Continue(ExtractorError): def __init__(self): ExtractorError.__init__(self, 'Invalid continue') class JS_Throw(ExtractorError): def __init__(self, e): self.error = e ExtractorError.__init__(self, 'Uncaught exception ' + error_to_compat_str(e)) class LocalNameSpace(ChainMap): def __getitem__(self, key): try: return super(LocalNameSpace, self).__getitem__(key) except KeyError: return JS_Undefined def __setitem__(self, key, value): for scope in self.maps: if key in scope: scope[key] = value return self.maps[0][key] = value def __delitem__(self, key): raise NotImplementedError('Deleting is not supported') def __repr__(self): return 'LocalNameSpace({0!r})'.format(self.maps) class Debugger(object): ENABLED = False @staticmethod def write(*args, **kwargs): level = kwargs.get('level', 100) def truncate_string(s, left, right=0): if s is None or len(s) <= left + right: return s return '...'.join((s[:left - 3], s[-right:] if right else '')) write_string('[debug] JS: {0}{1}\n'.format( ' ' * (100 - level), ' '.join(truncate_string(compat_str(x), 50, 50) for x in args))) @classmethod def wrap_interpreter(cls, f): if not cls.ENABLED: return f @wraps(f) def interpret_statement(self, stmt, local_vars, allow_recursion, *args, **kwargs): if cls.ENABLED and stmt.strip(): cls.write(stmt, level=allow_recursion) try: ret, should_ret = f(self, stmt, local_vars, allow_recursion, *args, **kwargs) except Exception as e: if cls.ENABLED: if isinstance(e, ExtractorError): e = e.orig_msg cls.write('=> Raises:', e, '<-|', stmt, level=allow_recursion) raise if cls.ENABLED and stmt.strip(): if should_ret or repr(ret) != stmt: cls.write(['->', '=>'][bool(should_ret)], repr(ret), '<-|', stmt, level=allow_recursion) return ret, should_ret return interpret_statement class JSInterpreter(object): __named_object_counter = 0 _OBJ_NAME = '__youtube_dl_jsinterp_obj' OP_CHARS = None def __init__(self, code, objects=None): self.code, self._functions = code, {} self._objects = {} if objects is None else objects if type(self).OP_CHARS is None: type(self).OP_CHARS = self.OP_CHARS = self.__op_chars() class Exception(ExtractorError): def __init__(self, msg, *args, **kwargs): expr = kwargs.pop('expr', None) msg = str_or_none(msg, default='"None"') if expr is not None: msg = '{0} in: {1!r:.100}'.format(msg.rstrip(), expr) super(JSInterpreter.Exception, self).__init__(msg, *args, **kwargs) class JS_Object(object): def __getitem__(self, key): if hasattr(self, key): return getattr(self, key) raise KeyError(key) def dump(self): """Serialise the instance""" raise NotImplementedError class JS_RegExp(JS_Object): RE_FLAGS = { # special knowledge: Python's re flags are bitmask values, current max 128 # invent new bitmask values well above that for literal parsing # JS 'u' flag is effectively always set (surrogate pairs aren't seen), # but \u{...} and \p{...} escapes aren't handled); no additional JS 'v' # features are supported # TODO: execute matches with these flags (remaining: d, y) 'd': 1024, # Generate indices for substring matches 'g': 2048, # Global search 'i': re.I, # Case-insensitive search 'm': re.M, # Multi-line search 's': re.S, # Allows . to match newline characters 'u': re.U, # Treat a pattern as a sequence of unicode code points 'v': re.U, # Like 'u' with extended character class and \p{} syntax 'y': 4096, # Perform a "sticky" search that matches starting at the current position in the target string } def __init__(self, pattern_txt, flags=0): if isinstance(flags, compat_str): flags, _ = self.regex_flags(flags) self.__self = None pattern_txt = str_or_none(pattern_txt) or '(?:)' # escape unintended embedded flags pattern_txt = re.sub( r'(\(\?)([aiLmsux]*)(-[imsx]+:|(?<!\?)\))', lambda m: ''.join( (re.escape(m.group(1)), m.group(2), re.escape(m.group(3))) if m.group(3) == ')' else ('(?:', m.group(2), m.group(3))), pattern_txt) # Avoid https://github.com/python/cpython/issues/74534 self.source = pattern_txt.replace('[[', r'[\[') self.__flags = flags def __instantiate(self): if self.__self: return self.__self = re.compile(self.source, self.__flags) # Thx: https://stackoverflow.com/questions/44773522/setattr-on-python2-sre-sre-pattern for name in dir(self.__self): # Only these? Obviously __class__, __init__. # PyPy creates a __weakref__ attribute with value None # that can't be setattr'd but also can't need to be copied. if name in ('__class__', '__init__', '__weakref__'): continue if name == 'flags': setattr(self, name, getattr(self.__self, name, self.__flags)) else: setattr(self, name, getattr(self.__self, name)) def __getattr__(self, name): self.__instantiate() if name == 'pattern': self.pattern = self.source return self.pattern elif hasattr(self.__self, name): v = getattr(self.__self, name) setattr(self, name, v) return v elif name in ('groupindex', 'groups'): return 0 if name == 'groupindex' else {} else: flag_attrs = ( # order by 2nd elt ('hasIndices', 'd'), ('global', 'g'), ('ignoreCase', 'i'), ('multiline', 'm'), ('dotAll', 's'), ('unicode', 'u'), ('unicodeSets', 'v'), ('sticky', 'y'), ) for k, c in flag_attrs: if name == k: return bool(self.RE_FLAGS[c] & self.__flags) else: if name == 'flags': return ''.join( (c if self.RE_FLAGS[c] & self.__flags else '') for _, c in flag_attrs) raise AttributeError('{0} has no attribute named {1}'.format(self, name)) @classmethod def regex_flags(cls, expr): flags = 0 if not expr: return flags, expr for idx, ch in enumerate(expr): if ch not in cls.RE_FLAGS: break flags |= cls.RE_FLAGS[ch] return flags, expr[idx + 1:] def dump(self): return '(/{0}/{1})'.format( re.sub(r'(?<!\\)/', r'\/', self.source), self.flags) @staticmethod def escape(string_): return re.escape(string_) class JS_Date(JS_Object): _t = None @staticmethod def __ymd_etc(*args, **kw_is_utc): # args: year, monthIndex, day, hours, minutes, seconds, milliseconds is_utc = kw_is_utc.get('is_utc', False) args = list(args[:7]) args += [0] * (9 - len(args)) args[1] += 1 # month 0..11 -> 1..12 ms = args[6] for i in range(6, 9): args[i] = -1 # don't know if is_utc: args[-1] = 1 # TODO: [MDN] When a segment overflows or underflows its expected # range, it usually "carries over to" or "borrows from" the higher segment. try: mktime = calendar.timegm if is_utc else time.mktime return mktime(time.struct_time(args)) * 1000 + ms except (OverflowError, ValueError): return None @classmethod def UTC(cls, *args): t = cls.__ymd_etc(*args, is_utc=True) return _NaN if t is None else t @staticmethod def parse(date_str, **kw_is_raw): is_raw = kw_is_raw.get('is_raw', False) t = unified_timestamp(str_or_none(date_str), False) return int(t * 1000) if t is not None else t if is_raw else _NaN @staticmethod def now(**kw_is_raw): is_raw = kw_is_raw.get('is_raw', False) t = time.time() return int(t * 1000) if t is not None else t if is_raw else _NaN def __init__(self, *args): if not args: args = [self.now(is_raw=True)] if len(args) == 1: if isinstance(args[0], JSInterpreter.JS_Date): self._t = int_or_none(args[0].valueOf(), default=None) else: arg_type = _js_typeof(args[0]) if arg_type == 'string': self._t = self.parse(args[0], is_raw=True) elif arg_type == 'number': self._t = int(args[0]) else: self._t = self.__ymd_etc(*args) def toString(self): try: return time.strftime('%a %b %0d %Y %H:%M:%S %Z%z', self._t).rstrip() except TypeError: return "Invalid Date" def valueOf(self): return _NaN if self._t is None else self._t def dump(self): return '(new Date({0}))'.format(self.toString()) @classmethod def __op_chars(cls): op_chars = set(';,[') for op in cls._all_operators(): if op[0].isalpha(): continue op_chars.update(op[0]) return op_chars def _named_object(self, namespace, obj): self.__named_object_counter += 1 name = '%s%d' % (self._OBJ_NAME, self.__named_object_counter) if callable(obj) and not isinstance(obj, function_with_repr): obj = function_with_repr(obj, 'F<%s>' % (self.__named_object_counter, )) namespace[name] = obj return name @classmethod def _separate(cls, expr, delim=',', max_split=None, skip_delims=None): if not expr: return # collections.Counter() is ~10% slower in both 2.7 and 3.9 counters = dict((k, 0) for k in _MATCHING_PARENS.values()) start, splits, pos, delim_len = 0, 0, 0, len(delim) - 1 in_quote, escaping, after_op, in_regex_char_group = None, False, True, False skipping = 0 if skip_delims: skip_delims = variadic(skip_delims) skip_txt = None for idx, char in enumerate(expr): if skip_txt and idx <= skip_txt[1]: continue paren_delta = 0 if not in_quote: if char == '/' and expr[idx:idx + 2] == '/*': # skip a comment skip_txt = expr[idx:].find('*/', 2) skip_txt = [idx, idx + skip_txt + 1] if skip_txt >= 2 else None if skip_txt: continue if char in _MATCHING_PARENS: counters[_MATCHING_PARENS[char]] += 1 paren_delta = 1 elif char in counters: counters[char] -= 1 paren_delta = -1 if not escaping: if char in _QUOTES and in_quote in (char, None): if in_quote or after_op or char != '/': in_quote = None if in_quote and not in_regex_char_group else char elif in_quote == '/' and char in '[]': in_regex_char_group = char == '[' escaping = not escaping and in_quote and char == '\\' after_op = not in_quote and (char in cls.OP_CHARS or paren_delta > 0 or (after_op and char.isspace())) if char != delim[pos] or any(counters.values()) or in_quote: pos = skipping = 0 continue elif skipping > 0: skipping -= 1 continue elif pos == 0 and skip_delims: here = expr[idx:] for s in skip_delims: if here.startswith(s) and s: skipping = len(s) - 1 break if skipping > 0: continue if pos < delim_len: pos += 1 continue if skip_txt and skip_txt[0] >= start and skip_txt[1] <= idx - delim_len: yield expr[start:skip_txt[0]] + expr[skip_txt[1] + 1: idx - delim_len] else: yield expr[start: idx - delim_len] skip_txt = None start, pos = idx + 1, 0 splits += 1 if max_split and splits >= max_split: break if skip_txt and skip_txt[0] >= start: yield expr[start:skip_txt[0]] + expr[skip_txt[1] + 1:] else: yield expr[start:] @classmethod def _separate_at_paren(cls, expr, delim=None): if delim is None: delim = expr and _MATCHING_PARENS[expr[0]] separated = list(cls._separate(expr, delim, 1)) if len(separated) < 2: raise cls.Exception('No terminating paren {delim} in {expr!r:.5500}'.format(**locals())) return separated[0][1:].strip(), separated[1].strip() @staticmethod def _all_operators(_cached=[]): if not _cached: _cached.extend(itertools.chain( # Ref: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Operator_Precedence _SC_OPERATORS, _LOG_OPERATORS, _COMP_OPERATORS, _OPERATORS, _UNARY_OPERATORS_X)) return _cached def _separate_at_op(self, expr, max_split=None): for op, _ in self._all_operators(): # hackety: </> have higher priority than <</>>, but don't confuse them skip_delim = (op + op) if op in '<>*?' else None if op == '?': skip_delim = (skip_delim, '?.') separated = list(self._separate(expr, op, skip_delims=skip_delim)) if len(separated) < 2: continue right_expr = separated.pop() # handle operators that are both unary and binary, minimal BODMAS if op in ('+', '-'): # simplify/adjust consecutive instances of these operators undone = 0 separated = [s.strip() for s in separated] while len(separated) > 1 and not separated[-1]: undone += 1 separated.pop() if op == '-' and undone % 2 != 0: right_expr = op + right_expr elif op == '+': while len(separated) > 1 and set(separated[-1]) <= self.OP_CHARS: right_expr = separated.pop() + right_expr if separated[-1][-1:] in self.OP_CHARS: right_expr = separated.pop() + right_expr # hanging op at end of left => unary + (strip) or - (push right) separated.append(right_expr) dm_ops = ('*', '%', '/', '**') dm_chars = set(''.join(dm_ops)) def yield_terms(s): skip = False for i, term in enumerate(s[:-1]): if skip: skip = False continue if not (dm_chars & set(term)): yield term continue for dm_op in dm_ops: bodmas = list(self._separate(term, dm_op, skip_delims=skip_delim)) if len(bodmas) > 1 and not bodmas[-1].strip(): bodmas[-1] = (op if op == '-' else '') + s[i + 1] yield dm_op.join(bodmas) skip = True break else: if term: yield term if not skip and s[-1]: yield s[-1] separated = list(yield_terms(separated)) right_expr = separated.pop() if len(separated) > 1 else None expr = op.join(separated) if right_expr is None: continue return op, separated, right_expr def _operator(self, op, left_val, right_expr, expr, local_vars, allow_recursion): if op in ('||', '&&'): if (op == '&&') ^ _js_ternary(left_val): return left_val # short circuiting elif op == '??': if left_val not in (None, JS_Undefined): return left_val elif op == '?': right_expr = _js_ternary(left_val, *self._separate(right_expr, ':', 1)) right_val = self.interpret_expression(right_expr, local_vars, allow_recursion) if right_expr else left_val opfunc = op and next((v for k, v in self._all_operators() if k == op), None) if not opfunc: return right_val try: # print('Eval:', opfunc.__name__, left_val, right_val) return opfunc(left_val, right_val) except Exception as e: raise self.Exception('Failed to evaluate {left_val!r:.50} {op} {right_val!r:.50}'.format(**locals()), expr, cause=e) def _index(self, obj, idx, allow_undefined=None): if idx == 'length' and isinstance(obj, list): return len(obj) try: return obj[int(idx)] if isinstance(obj, list) else obj[compat_str(idx)] except (TypeError, KeyError, IndexError, ValueError) as e: # allow_undefined is None gives correct behaviour if allow_undefined or ( allow_undefined is None and not isinstance(e, TypeError)): return JS_Undefined raise self.Exception('Cannot get index {idx!r:.100}'.format(**locals()), expr=repr(obj), cause=e) def _dump(self, obj, namespace): if obj is JS_Undefined: return 'undefined' try: return json.dumps(obj) except TypeError: return self._named_object(namespace, obj) # used below _VAR_RET_THROW_RE = re.compile(r'''(?x) (?:(?P<var>var|const|let)\s+|(?P<ret>return)(?:\s+|(?=["'])|$)|(?P<throw>throw)\s+) ''') _COMPOUND_RE = re.compile(r'''(?x) (?P<try>try)\s*\{| (?P<if>if)\s*\(| (?P<switch>switch)\s*\(| (?P<for>for)\s*\(| (?P<while>while)\s*\( ''') _FINALLY_RE = re.compile(r'finally\s*\{') _SWITCH_RE = re.compile(r'switch\s*\(') def _eval_operator(self, op, left_expr, right_expr, expr, local_vars, allow_recursion): left_val = self.interpret_expression(left_expr, local_vars, allow_recursion) return self._operator(op, left_val, right_expr, expr, local_vars, allow_recursion) @Debugger.wrap_interpreter def interpret_statement(self, stmt, local_vars, allow_recursion=100): if allow_recursion < 0: raise self.Exception('Recursion limit reached') allow_recursion -= 1 # print('At: ' + stmt[:60]) should_return = False # fails on (eg) if (...) stmt1; else stmt2; sub_statements = list(self._separate(stmt, ';')) or [''] expr = stmt = sub_statements.pop().strip() for sub_stmt in sub_statements: ret, should_return = self.interpret_statement(sub_stmt, local_vars, allow_recursion) if should_return: return ret, should_return m = self._VAR_RET_THROW_RE.match(stmt) if m: expr = stmt[len(m.group(0)):].strip() if m.group('throw'): raise JS_Throw(self.interpret_expression(expr, local_vars, allow_recursion)) should_return = 'return' if m.group('ret') else False if not expr: return None, should_return if expr[0] in _QUOTES: inner, outer = self._separate(expr, expr[0], 1) if expr[0] == '/': flags, outer = self.JS_RegExp.regex_flags(outer) inner = self.JS_RegExp(inner[1:], flags=flags) else: inner = json.loads(js_to_json(inner + expr[0])) # , strict=True)) if not outer: return inner, should_return expr = self._named_object(local_vars, inner) + outer new_kw, _, obj = expr.partition('new ') if not new_kw: for klass, konstr in (('Date', lambda *x: self.JS_Date(*x).valueOf()), ('RegExp', self.JS_RegExp), ('Error', self.Exception)): if not obj.startswith(klass + '('): continue left, right = self._separate_at_paren(obj[len(klass):]) argvals = self.interpret_iter(left, local_vars, allow_recursion) expr = konstr(*argvals) if expr is None: raise self.Exception('Failed to parse {klass} {left!r:.100}'.format(**locals()), expr=expr) expr = self._dump(expr, local_vars) + right break else: raise self.Exception('Unsupported object {obj:.100}'.format(**locals()), expr=expr) # apply unary operators (see new above) for op, _ in _UNARY_OPERATORS_X: if not expr.startswith(op): continue operand = expr[len(op):] if not operand or (op.isalpha() and operand[0] != ' '): continue separated = self._separate_at_op(operand, max_split=1) if separated: next_op, separated, right_expr = separated separated.append(right_expr) operand = next_op.join(separated) return self._eval_operator(op, operand, '', expr, local_vars, allow_recursion), should_return if expr.startswith('{'): inner, outer = self._separate_at_paren(expr) # try for object expression (Map) sub_expressions = [list(self._separate(sub_expr.strip(), ':', 1)) for sub_expr in self._separate(inner)] if all(len(sub_expr) == 2 for sub_expr in sub_expressions): return dict( (key_expr if re.match(_NAME_RE, key_expr) else key_expr, self.interpret_expression(val_expr, local_vars, allow_recursion)) for key_expr, val_expr in sub_expressions), should_return # or statement list inner, should_abort = self.interpret_statement(inner, local_vars, allow_recursion) if not outer or should_abort: return inner, should_abort or should_return else: expr = self._dump(inner, local_vars) + outer if expr.startswith('('): m = re.match(r'\((?P<d>[a-z])%(?P<e>[a-z])\.length\+(?P=e)\.length\)%(?P=e)\.length', expr) if m: # short-cut eval of frequently used `(d%e.length+e.length)%e.length`, worth ~6% on `pytest -k test_nsig` outer = None inner, should_abort = self._offset_e_by_d(m.group('d'), m.group('e'), local_vars) else:
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
true
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/__init__.py
youtube_dl/__init__.py
#!/usr/bin/env python # coding: utf-8 from __future__ import unicode_literals __license__ = 'Public Domain' import io import os import random import sys from .options import ( parseOpts, ) from .compat import ( compat_getpass, compat_register_utf8, compat_shlex_split, _workaround_optparse_bug9161, ) from .utils import ( _UnsafeExtensionError, DateRange, decodeOption, DEFAULT_OUTTMPL, DownloadError, expand_path, match_filter_func, MaxDownloadsReached, preferredencoding, read_batch_urls, SameFileError, setproctitle, std_headers, write_string, render_table, ) from .update import update_self from .downloader import ( FileDownloader, ) from .extractor import gen_extractors, list_extractors from .extractor.adobepass import MSO_INFO from .YoutubeDL import YoutubeDL def _real_main(argv=None): # Compatibility fix for Windows compat_register_utf8() _workaround_optparse_bug9161() setproctitle('youtube-dl') parser, opts, args = parseOpts(argv) # Set user agent if opts.user_agent is not None: std_headers['User-Agent'] = opts.user_agent # Set referer if opts.referer is not None: std_headers['Referer'] = opts.referer # Custom HTTP headers if opts.headers is not None: for h in opts.headers: if ':' not in h: parser.error('wrong header formatting, it should be key:value, not "%s"' % h) key, value = h.split(':', 1) if opts.verbose: write_string('[debug] Adding header from command line option %s:%s\n' % (key, value)) std_headers[key] = value # Dump user agent if opts.dump_user_agent: write_string(std_headers['User-Agent'] + '\n', out=sys.stdout) sys.exit(0) # Batch file verification batch_urls = [] if opts.batchfile is not None: try: if opts.batchfile == '-': batchfd = sys.stdin else: batchfd = io.open( expand_path(opts.batchfile), 'r', encoding='utf-8', errors='ignore') batch_urls = read_batch_urls(batchfd) if opts.verbose: write_string('[debug] Batch file urls: ' + repr(batch_urls) + '\n') except IOError: sys.exit('ERROR: batch file %s could not be read' % opts.batchfile) all_urls = batch_urls + [url.strip() for url in args] # batch_urls are already striped in read_batch_urls _enc = preferredencoding() all_urls = [url.decode(_enc, 'ignore') if isinstance(url, bytes) else url for url in all_urls] if opts.list_extractors: for ie in list_extractors(opts.age_limit): write_string(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else '') + '\n', out=sys.stdout) matchedUrls = [url for url in all_urls if ie.suitable(url)] for mu in matchedUrls: write_string(' ' + mu + '\n', out=sys.stdout) sys.exit(0) if opts.list_extractor_descriptions: for ie in list_extractors(opts.age_limit): if not ie._WORKING: continue desc = getattr(ie, 'IE_DESC', ie.IE_NAME) if desc is False: continue if hasattr(ie, 'SEARCH_KEY'): _SEARCHES = ('cute kittens', 'slithering pythons', 'falling cat', 'angry poodle', 'purple fish', 'running tortoise', 'sleeping bunny', 'burping cow') _COUNTS = ('', '5', '10', 'all') desc += ' (Example: "%s%s:%s" )' % (ie.SEARCH_KEY, random.choice(_COUNTS), random.choice(_SEARCHES)) write_string(desc + '\n', out=sys.stdout) sys.exit(0) if opts.ap_list_mso: table = [[mso_id, mso_info['name']] for mso_id, mso_info in MSO_INFO.items()] write_string('Supported TV Providers:\n' + render_table(['mso', 'mso name'], table) + '\n', out=sys.stdout) sys.exit(0) # Conflicting, missing and erroneous options if opts.usenetrc and (opts.username is not None or opts.password is not None): parser.error('using .netrc conflicts with giving username/password') if opts.password is not None and opts.username is None: parser.error('account username missing\n') if opts.ap_password is not None and opts.ap_username is None: parser.error('TV Provider account username missing\n') if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid): parser.error('using output template conflicts with using title, video ID or auto number') if opts.autonumber_size is not None: if opts.autonumber_size <= 0: parser.error('auto number size must be positive') if opts.autonumber_start is not None: if opts.autonumber_start < 0: parser.error('auto number start must be positive or 0') if opts.usetitle and opts.useid: parser.error('using title conflicts with using video ID') if opts.username is not None and opts.password is None: opts.password = compat_getpass('Type account password and press [Return]: ') if opts.ap_username is not None and opts.ap_password is None: opts.ap_password = compat_getpass('Type TV provider account password and press [Return]: ') if opts.ratelimit is not None: numeric_limit = FileDownloader.parse_bytes(opts.ratelimit) if numeric_limit is None: parser.error('invalid rate limit specified') opts.ratelimit = numeric_limit if opts.min_filesize is not None: numeric_limit = FileDownloader.parse_bytes(opts.min_filesize) if numeric_limit is None: parser.error('invalid min_filesize specified') opts.min_filesize = numeric_limit if opts.max_filesize is not None: numeric_limit = FileDownloader.parse_bytes(opts.max_filesize) if numeric_limit is None: parser.error('invalid max_filesize specified') opts.max_filesize = numeric_limit if opts.sleep_interval is not None: if opts.sleep_interval < 0: parser.error('sleep interval must be positive or 0') if opts.max_sleep_interval is not None: if opts.max_sleep_interval < 0: parser.error('max sleep interval must be positive or 0') if opts.sleep_interval is None: parser.error('min sleep interval must be specified, use --min-sleep-interval') if opts.max_sleep_interval < opts.sleep_interval: parser.error('max sleep interval must be greater than or equal to min sleep interval') else: opts.max_sleep_interval = opts.sleep_interval if opts.ap_mso and opts.ap_mso not in MSO_INFO: parser.error('Unsupported TV Provider, use --ap-list-mso to get a list of supported TV Providers') if opts.no_check_extensions: _UnsafeExtensionError.lenient = True def parse_retries(retries): if retries in ('inf', 'infinite'): parsed_retries = float('inf') else: try: parsed_retries = int(retries) except (TypeError, ValueError): parser.error('invalid retry count specified') return parsed_retries if opts.retries is not None: opts.retries = parse_retries(opts.retries) if opts.fragment_retries is not None: opts.fragment_retries = parse_retries(opts.fragment_retries) if opts.buffersize is not None: numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize) if numeric_buffersize is None: parser.error('invalid buffer size specified') opts.buffersize = numeric_buffersize if opts.http_chunk_size is not None: numeric_chunksize = FileDownloader.parse_bytes(opts.http_chunk_size) if not numeric_chunksize: parser.error('invalid http chunk size specified') opts.http_chunk_size = numeric_chunksize if opts.playliststart <= 0: raise ValueError('Playlist start must be positive') if opts.playlistend not in (-1, None) and opts.playlistend < opts.playliststart: raise ValueError('Playlist end must be greater than playlist start') if opts.extractaudio: if opts.audioformat not in ['best', 'aac', 'flac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']: parser.error('invalid audio format specified') if opts.audioquality: opts.audioquality = opts.audioquality.strip('k').strip('K') if not opts.audioquality.isdigit(): parser.error('invalid audio quality specified') if opts.recodevideo is not None: if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg', 'mkv', 'avi']: parser.error('invalid video recode format specified') if opts.convertsubtitles is not None: if opts.convertsubtitles not in ['srt', 'vtt', 'ass', 'lrc']: parser.error('invalid subtitle format specified') if opts.date is not None: date = DateRange.day(opts.date) else: date = DateRange(opts.dateafter, opts.datebefore) # Do not download videos when there are audio-only formats if opts.extractaudio and not opts.keepvideo and opts.format is None: opts.format = 'bestaudio/best' # --all-sub automatically sets --write-sub if --write-auto-sub is not given # this was the old behaviour if only --all-sub was given. if opts.allsubtitles and not opts.writeautomaticsub: opts.writesubtitles = True outtmpl = ((opts.outtmpl is not None and opts.outtmpl) or (opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s') or (opts.format == '-1' and '%(id)s-%(format)s.%(ext)s') or (opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s') or (opts.usetitle and '%(title)s-%(id)s.%(ext)s') or (opts.useid and '%(id)s.%(ext)s') or (opts.autonumber and '%(autonumber)s-%(id)s.%(ext)s') or DEFAULT_OUTTMPL) if not os.path.splitext(outtmpl)[1] and opts.extractaudio: parser.error('Cannot download a video and extract audio into the same' ' file! Use "{0}.%(ext)s" instead of "{0}" as the output' ' template'.format(outtmpl)) any_getting = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson or opts.dump_single_json any_printing = opts.print_json download_archive_fn = expand_path(opts.download_archive) if opts.download_archive is not None else opts.download_archive # PostProcessors postprocessors = [] if opts.metafromtitle: postprocessors.append({ 'key': 'MetadataFromTitle', 'titleformat': opts.metafromtitle }) if opts.extractaudio: postprocessors.append({ 'key': 'FFmpegExtractAudio', 'preferredcodec': opts.audioformat, 'preferredquality': opts.audioquality, 'nopostoverwrites': opts.nopostoverwrites, }) if opts.recodevideo: postprocessors.append({ 'key': 'FFmpegVideoConvertor', 'preferedformat': opts.recodevideo, }) # FFmpegMetadataPP should be run after FFmpegVideoConvertorPP and # FFmpegExtractAudioPP as containers before conversion may not support # metadata (3gp, webm, etc.) # And this post-processor should be placed before other metadata # manipulating post-processors (FFmpegEmbedSubtitle) to prevent loss of # extra metadata. By default ffmpeg preserves metadata applicable for both # source and target containers. From this point the container won't change, # so metadata can be added here. if opts.addmetadata: postprocessors.append({'key': 'FFmpegMetadata'}) if opts.convertsubtitles: postprocessors.append({ 'key': 'FFmpegSubtitlesConvertor', 'format': opts.convertsubtitles, }) if opts.embedsubtitles: postprocessors.append({ 'key': 'FFmpegEmbedSubtitle', }) if opts.embedthumbnail: already_have_thumbnail = opts.writethumbnail or opts.write_all_thumbnails postprocessors.append({ 'key': 'EmbedThumbnail', 'already_have_thumbnail': already_have_thumbnail }) if not already_have_thumbnail: opts.writethumbnail = True # XAttrMetadataPP should be run after post-processors that may change file # contents if opts.xattrs: postprocessors.append({'key': 'XAttrMetadata'}) # Please keep ExecAfterDownload towards the bottom as it allows the user to modify the final file in any way. # So if the user is able to remove the file before your postprocessor runs it might cause a few problems. if opts.exec_cmd: postprocessors.append({ 'key': 'ExecAfterDownload', 'exec_cmd': opts.exec_cmd, }) external_downloader_args = None if opts.external_downloader_args: external_downloader_args = compat_shlex_split(opts.external_downloader_args) postprocessor_args = None if opts.postprocessor_args: postprocessor_args = compat_shlex_split(opts.postprocessor_args) match_filter = ( None if opts.match_filter is None else match_filter_func(opts.match_filter)) ydl_opts = { 'usenetrc': opts.usenetrc, 'username': opts.username, 'password': opts.password, 'twofactor': opts.twofactor, 'videopassword': opts.videopassword, 'ap_mso': opts.ap_mso, 'ap_username': opts.ap_username, 'ap_password': opts.ap_password, 'quiet': (opts.quiet or any_getting or any_printing), 'no_warnings': opts.no_warnings, 'forceurl': opts.geturl, 'forcetitle': opts.gettitle, 'forceid': opts.getid, 'forcethumbnail': opts.getthumbnail, 'forcedescription': opts.getdescription, 'forceduration': opts.getduration, 'forcefilename': opts.getfilename, 'forceformat': opts.getformat, 'forcejson': opts.dumpjson or opts.print_json, 'dump_single_json': opts.dump_single_json, 'simulate': opts.simulate or any_getting, 'skip_download': opts.skip_download, 'format': opts.format, 'listformats': opts.listformats, 'outtmpl': outtmpl, 'outtmpl_na_placeholder': opts.outtmpl_na_placeholder, 'autonumber_size': opts.autonumber_size, 'autonumber_start': opts.autonumber_start, 'restrictfilenames': opts.restrictfilenames, 'ignoreerrors': opts.ignoreerrors, 'force_generic_extractor': opts.force_generic_extractor, 'ratelimit': opts.ratelimit, 'nooverwrites': opts.nooverwrites, 'retries': opts.retries, 'fragment_retries': opts.fragment_retries, 'skip_unavailable_fragments': opts.skip_unavailable_fragments, 'keep_fragments': opts.keep_fragments, 'buffersize': opts.buffersize, 'noresizebuffer': opts.noresizebuffer, 'http_chunk_size': opts.http_chunk_size, 'continuedl': opts.continue_dl, 'noprogress': opts.noprogress, 'progress_with_newline': opts.progress_with_newline, 'playliststart': opts.playliststart, 'playlistend': opts.playlistend, 'playlistreverse': opts.playlist_reverse, 'playlistrandom': opts.playlist_random, 'noplaylist': opts.noplaylist, 'logtostderr': opts.outtmpl == '-', 'consoletitle': opts.consoletitle, 'nopart': opts.nopart, 'updatetime': opts.updatetime, 'writedescription': opts.writedescription, 'writeannotations': opts.writeannotations, 'writeinfojson': opts.writeinfojson, 'writethumbnail': opts.writethumbnail, 'write_all_thumbnails': opts.write_all_thumbnails, 'writesubtitles': opts.writesubtitles, 'writeautomaticsub': opts.writeautomaticsub, 'allsubtitles': opts.allsubtitles, 'listsubtitles': opts.listsubtitles, 'subtitlesformat': opts.subtitlesformat, 'subtitleslangs': opts.subtitleslangs, 'matchtitle': decodeOption(opts.matchtitle), 'rejecttitle': decodeOption(opts.rejecttitle), 'max_downloads': opts.max_downloads, 'prefer_free_formats': opts.prefer_free_formats, 'verbose': opts.verbose, 'dump_intermediate_pages': opts.dump_intermediate_pages, 'write_pages': opts.write_pages, 'test': opts.test, 'keepvideo': opts.keepvideo, 'min_filesize': opts.min_filesize, 'max_filesize': opts.max_filesize, 'min_views': opts.min_views, 'max_views': opts.max_views, 'daterange': date, 'cachedir': opts.cachedir, 'youtube_print_sig_code': opts.youtube_print_sig_code, 'age_limit': opts.age_limit, 'download_archive': download_archive_fn, 'cookiefile': opts.cookiefile, 'nocheckcertificate': opts.no_check_certificate, 'prefer_insecure': opts.prefer_insecure, 'proxy': opts.proxy, 'socket_timeout': opts.socket_timeout, 'bidi_workaround': opts.bidi_workaround, 'debug_printtraffic': opts.debug_printtraffic, 'prefer_ffmpeg': opts.prefer_ffmpeg, 'include_ads': opts.include_ads, 'default_search': opts.default_search, 'youtube_include_dash_manifest': opts.youtube_include_dash_manifest, 'youtube_player_js_version': opts.youtube_player_js_version, 'youtube_player_js_variant': opts.youtube_player_js_variant, 'encoding': opts.encoding, 'extract_flat': opts.extract_flat, 'mark_watched': opts.mark_watched, 'merge_output_format': opts.merge_output_format, 'postprocessors': postprocessors, 'fixup': opts.fixup, 'source_address': opts.source_address, 'call_home': opts.call_home, 'sleep_interval': opts.sleep_interval, 'max_sleep_interval': opts.max_sleep_interval, 'external_downloader': opts.external_downloader, 'list_thumbnails': opts.list_thumbnails, 'playlist_items': opts.playlist_items, 'xattr_set_filesize': opts.xattr_set_filesize, 'match_filter': match_filter, 'no_color': opts.no_color, 'ffmpeg_location': opts.ffmpeg_location, 'hls_prefer_native': opts.hls_prefer_native, 'hls_use_mpegts': opts.hls_use_mpegts, 'external_downloader_args': external_downloader_args, 'postprocessor_args': postprocessor_args, 'cn_verification_proxy': opts.cn_verification_proxy, 'geo_verification_proxy': opts.geo_verification_proxy, 'config_location': opts.config_location, 'geo_bypass': opts.geo_bypass, 'geo_bypass_country': opts.geo_bypass_country, 'geo_bypass_ip_block': opts.geo_bypass_ip_block, # just for deprecation check 'autonumber': opts.autonumber if opts.autonumber is True else None, 'usetitle': opts.usetitle if opts.usetitle is True else None, } with YoutubeDL(ydl_opts) as ydl: # Update version if opts.update_self: update_self(ydl.to_screen, opts.verbose, ydl._opener) # Remove cache dir if opts.rm_cachedir: ydl.cache.remove() # Maybe do nothing if (len(all_urls) < 1) and (opts.load_info_filename is None): if opts.update_self or opts.rm_cachedir: sys.exit() ydl.warn_if_short_id(sys.argv[1:] if argv is None else argv) parser.error( 'You must provide at least one URL.\n' 'Type youtube-dl --help to see a list of all options.') try: if opts.load_info_filename is not None: retcode = ydl.download_with_info_file(expand_path(opts.load_info_filename)) else: retcode = ydl.download(all_urls) except MaxDownloadsReached: ydl.to_screen('--max-download limit reached, aborting.') retcode = 101 sys.exit(retcode) def main(argv=None): try: _real_main(argv) except DownloadError: sys.exit(1) except SameFileError: sys.exit('ERROR: fixed output name but more than one file to download') except KeyboardInterrupt: sys.exit('\nERROR: Interrupted by user') __all__ = ['main', 'YoutubeDL', 'gen_extractors', 'list_extractors']
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/aes.py
youtube_dl/aes.py
from __future__ import unicode_literals from math import ceil from .compat import compat_b64decode from .utils import bytes_to_intlist, intlist_to_bytes BLOCK_SIZE_BYTES = 16 def pkcs7_padding(data): """ PKCS#7 padding @param {int[]} data cleartext @returns {int[]} padding data """ remaining_length = BLOCK_SIZE_BYTES - len(data) % BLOCK_SIZE_BYTES return data + [remaining_length] * remaining_length def aes_ctr_decrypt(data, key, counter): """ Decrypt with aes in counter mode @param {int[]} data cipher @param {int[]} key 16/24/32-Byte cipher key @param {instance} counter Instance whose next_value function (@returns {int[]} 16-Byte block) returns the next counter block @returns {int[]} decrypted data """ expanded_key = key_expansion(key) block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES)) decrypted_data = [] for i in range(block_count): counter_block = counter.next_value() block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES] block += [0] * (BLOCK_SIZE_BYTES - len(block)) cipher_counter_block = aes_encrypt(counter_block, expanded_key) decrypted_data += xor(block, cipher_counter_block) decrypted_data = decrypted_data[:len(data)] return decrypted_data def aes_cbc_decrypt(data, key, iv): """ Decrypt with aes in CBC mode @param {int[]} data cipher @param {int[]} key 16/24/32-Byte cipher key @param {int[]} iv 16-Byte IV @returns {int[]} decrypted data """ expanded_key = key_expansion(key) block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES)) decrypted_data = [] previous_cipher_block = iv for i in range(block_count): block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES] block += [0] * (BLOCK_SIZE_BYTES - len(block)) decrypted_block = aes_decrypt(block, expanded_key) decrypted_data += xor(decrypted_block, previous_cipher_block) previous_cipher_block = block decrypted_data = decrypted_data[:len(data)] return decrypted_data def aes_cbc_encrypt(data, key, iv): """ Encrypt with aes in CBC mode. Using PKCS#7 padding @param {int[]} data cleartext @param {int[]} key 16/24/32-Byte cipher key @param {int[]} iv 16-Byte IV @returns {int[]} encrypted data """ expanded_key = key_expansion(key) block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES)) encrypted_data = [] previous_cipher_block = iv for i in range(block_count): block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES] block = pkcs7_padding(block) mixed_block = xor(block, previous_cipher_block) encrypted_block = aes_encrypt(mixed_block, expanded_key) encrypted_data += encrypted_block previous_cipher_block = encrypted_block return encrypted_data def aes_ecb_encrypt(data, key): """ Encrypt with aes in ECB mode. Using PKCS#7 padding @param {int[]} data cleartext @param {int[]} key 16/24/32-Byte cipher key @returns {int[]} encrypted data """ expanded_key = key_expansion(key) block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES)) encrypted_data = [] for i in range(block_count): block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES] block = pkcs7_padding(block) encrypted_block = aes_encrypt(block, expanded_key) encrypted_data += encrypted_block return encrypted_data def key_expansion(data): """ Generate key schedule @param {int[]} data 16/24/32-Byte cipher key @returns {int[]} 176/208/240-Byte expanded key """ data = data[:] # copy rcon_iteration = 1 key_size_bytes = len(data) expanded_key_size_bytes = (key_size_bytes // 4 + 7) * BLOCK_SIZE_BYTES while len(data) < expanded_key_size_bytes: temp = data[-4:] temp = key_schedule_core(temp, rcon_iteration) rcon_iteration += 1 data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) for _ in range(3): temp = data[-4:] data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) if key_size_bytes == 32: temp = data[-4:] temp = sub_bytes(temp) data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0): temp = data[-4:] data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) data = data[:expanded_key_size_bytes] return data def aes_encrypt(data, expanded_key): """ Encrypt one block with aes @param {int[]} data 16-Byte state @param {int[]} expanded_key 176/208/240-Byte expanded key @returns {int[]} 16-Byte cipher """ rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1 data = xor(data, expanded_key[:BLOCK_SIZE_BYTES]) for i in range(1, rounds + 1): data = sub_bytes(data) data = shift_rows(data) if i != rounds: data = mix_columns(data) data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]) return data def aes_decrypt(data, expanded_key): """ Decrypt one block with aes @param {int[]} data 16-Byte cipher @param {int[]} expanded_key 176/208/240-Byte expanded key @returns {int[]} 16-Byte state """ rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1 for i in range(rounds, 0, -1): data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]) if i != rounds: data = mix_columns_inv(data) data = shift_rows_inv(data) data = sub_bytes_inv(data) data = xor(data, expanded_key[:BLOCK_SIZE_BYTES]) return data def aes_decrypt_text(data, password, key_size_bytes): """ Decrypt text - The first 8 Bytes of decoded 'data' are the 8 high Bytes of the counter - The cipher key is retrieved by encrypting the first 16 Byte of 'password' with the first 'key_size_bytes' Bytes from 'password' (if necessary filled with 0's) - Mode of operation is 'counter' @param {str} data Base64 encoded string @param {str,unicode} password Password (will be encoded with utf-8) @param {int} key_size_bytes Possible values: 16 for 128-Bit, 24 for 192-Bit or 32 for 256-Bit @returns {str} Decrypted data """ NONCE_LENGTH_BYTES = 8 data = bytes_to_intlist(compat_b64decode(data)) password = bytes_to_intlist(password.encode('utf-8')) key = password[:key_size_bytes] + [0] * (key_size_bytes - len(password)) key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES) nonce = data[:NONCE_LENGTH_BYTES] cipher = data[NONCE_LENGTH_BYTES:] class Counter(object): __value = nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES) def next_value(self): temp = self.__value self.__value = inc(self.__value) return temp decrypted_data = aes_ctr_decrypt(cipher, key, Counter()) plaintext = intlist_to_bytes(decrypted_data) return plaintext RCON = (0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36) SBOX = (0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76, 0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0, 0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15, 0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75, 0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84, 0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF, 0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8, 0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2, 0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73, 0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB, 0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79, 0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08, 0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A, 0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E, 0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF, 0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16) SBOX_INV = (0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb, 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e, 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25, 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92, 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84, 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06, 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73, 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e, 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b, 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4, 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f, 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d) MIX_COLUMN_MATRIX = ((0x2, 0x3, 0x1, 0x1), (0x1, 0x2, 0x3, 0x1), (0x1, 0x1, 0x2, 0x3), (0x3, 0x1, 0x1, 0x2)) MIX_COLUMN_MATRIX_INV = ((0xE, 0xB, 0xD, 0x9), (0x9, 0xE, 0xB, 0xD), (0xD, 0x9, 0xE, 0xB), (0xB, 0xD, 0x9, 0xE)) RIJNDAEL_EXP_TABLE = (0x01, 0x03, 0x05, 0x0F, 0x11, 0x33, 0x55, 0xFF, 0x1A, 0x2E, 0x72, 0x96, 0xA1, 0xF8, 0x13, 0x35, 0x5F, 0xE1, 0x38, 0x48, 0xD8, 0x73, 0x95, 0xA4, 0xF7, 0x02, 0x06, 0x0A, 0x1E, 0x22, 0x66, 0xAA, 0xE5, 0x34, 0x5C, 0xE4, 0x37, 0x59, 0xEB, 0x26, 0x6A, 0xBE, 0xD9, 0x70, 0x90, 0xAB, 0xE6, 0x31, 0x53, 0xF5, 0x04, 0x0C, 0x14, 0x3C, 0x44, 0xCC, 0x4F, 0xD1, 0x68, 0xB8, 0xD3, 0x6E, 0xB2, 0xCD, 0x4C, 0xD4, 0x67, 0xA9, 0xE0, 0x3B, 0x4D, 0xD7, 0x62, 0xA6, 0xF1, 0x08, 0x18, 0x28, 0x78, 0x88, 0x83, 0x9E, 0xB9, 0xD0, 0x6B, 0xBD, 0xDC, 0x7F, 0x81, 0x98, 0xB3, 0xCE, 0x49, 0xDB, 0x76, 0x9A, 0xB5, 0xC4, 0x57, 0xF9, 0x10, 0x30, 0x50, 0xF0, 0x0B, 0x1D, 0x27, 0x69, 0xBB, 0xD6, 0x61, 0xA3, 0xFE, 0x19, 0x2B, 0x7D, 0x87, 0x92, 0xAD, 0xEC, 0x2F, 0x71, 0x93, 0xAE, 0xE9, 0x20, 0x60, 0xA0, 0xFB, 0x16, 0x3A, 0x4E, 0xD2, 0x6D, 0xB7, 0xC2, 0x5D, 0xE7, 0x32, 0x56, 0xFA, 0x15, 0x3F, 0x41, 0xC3, 0x5E, 0xE2, 0x3D, 0x47, 0xC9, 0x40, 0xC0, 0x5B, 0xED, 0x2C, 0x74, 0x9C, 0xBF, 0xDA, 0x75, 0x9F, 0xBA, 0xD5, 0x64, 0xAC, 0xEF, 0x2A, 0x7E, 0x82, 0x9D, 0xBC, 0xDF, 0x7A, 0x8E, 0x89, 0x80, 0x9B, 0xB6, 0xC1, 0x58, 0xE8, 0x23, 0x65, 0xAF, 0xEA, 0x25, 0x6F, 0xB1, 0xC8, 0x43, 0xC5, 0x54, 0xFC, 0x1F, 0x21, 0x63, 0xA5, 0xF4, 0x07, 0x09, 0x1B, 0x2D, 0x77, 0x99, 0xB0, 0xCB, 0x46, 0xCA, 0x45, 0xCF, 0x4A, 0xDE, 0x79, 0x8B, 0x86, 0x91, 0xA8, 0xE3, 0x3E, 0x42, 0xC6, 0x51, 0xF3, 0x0E, 0x12, 0x36, 0x5A, 0xEE, 0x29, 0x7B, 0x8D, 0x8C, 0x8F, 0x8A, 0x85, 0x94, 0xA7, 0xF2, 0x0D, 0x17, 0x39, 0x4B, 0xDD, 0x7C, 0x84, 0x97, 0xA2, 0xFD, 0x1C, 0x24, 0x6C, 0xB4, 0xC7, 0x52, 0xF6, 0x01) RIJNDAEL_LOG_TABLE = (0x00, 0x00, 0x19, 0x01, 0x32, 0x02, 0x1a, 0xc6, 0x4b, 0xc7, 0x1b, 0x68, 0x33, 0xee, 0xdf, 0x03, 0x64, 0x04, 0xe0, 0x0e, 0x34, 0x8d, 0x81, 0xef, 0x4c, 0x71, 0x08, 0xc8, 0xf8, 0x69, 0x1c, 0xc1, 0x7d, 0xc2, 0x1d, 0xb5, 0xf9, 0xb9, 0x27, 0x6a, 0x4d, 0xe4, 0xa6, 0x72, 0x9a, 0xc9, 0x09, 0x78, 0x65, 0x2f, 0x8a, 0x05, 0x21, 0x0f, 0xe1, 0x24, 0x12, 0xf0, 0x82, 0x45, 0x35, 0x93, 0xda, 0x8e, 0x96, 0x8f, 0xdb, 0xbd, 0x36, 0xd0, 0xce, 0x94, 0x13, 0x5c, 0xd2, 0xf1, 0x40, 0x46, 0x83, 0x38, 0x66, 0xdd, 0xfd, 0x30, 0xbf, 0x06, 0x8b, 0x62, 0xb3, 0x25, 0xe2, 0x98, 0x22, 0x88, 0x91, 0x10, 0x7e, 0x6e, 0x48, 0xc3, 0xa3, 0xb6, 0x1e, 0x42, 0x3a, 0x6b, 0x28, 0x54, 0xfa, 0x85, 0x3d, 0xba, 0x2b, 0x79, 0x0a, 0x15, 0x9b, 0x9f, 0x5e, 0xca, 0x4e, 0xd4, 0xac, 0xe5, 0xf3, 0x73, 0xa7, 0x57, 0xaf, 0x58, 0xa8, 0x50, 0xf4, 0xea, 0xd6, 0x74, 0x4f, 0xae, 0xe9, 0xd5, 0xe7, 0xe6, 0xad, 0xe8, 0x2c, 0xd7, 0x75, 0x7a, 0xeb, 0x16, 0x0b, 0xf5, 0x59, 0xcb, 0x5f, 0xb0, 0x9c, 0xa9, 0x51, 0xa0, 0x7f, 0x0c, 0xf6, 0x6f, 0x17, 0xc4, 0x49, 0xec, 0xd8, 0x43, 0x1f, 0x2d, 0xa4, 0x76, 0x7b, 0xb7, 0xcc, 0xbb, 0x3e, 0x5a, 0xfb, 0x60, 0xb1, 0x86, 0x3b, 0x52, 0xa1, 0x6c, 0xaa, 0x55, 0x29, 0x9d, 0x97, 0xb2, 0x87, 0x90, 0x61, 0xbe, 0xdc, 0xfc, 0xbc, 0x95, 0xcf, 0xcd, 0x37, 0x3f, 0x5b, 0xd1, 0x53, 0x39, 0x84, 0x3c, 0x41, 0xa2, 0x6d, 0x47, 0x14, 0x2a, 0x9e, 0x5d, 0x56, 0xf2, 0xd3, 0xab, 0x44, 0x11, 0x92, 0xd9, 0x23, 0x20, 0x2e, 0x89, 0xb4, 0x7c, 0xb8, 0x26, 0x77, 0x99, 0xe3, 0xa5, 0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07) def sub_bytes(data): return [SBOX[x] for x in data] def sub_bytes_inv(data): return [SBOX_INV[x] for x in data] def rotate(data): return data[1:] + [data[0]] def key_schedule_core(data, rcon_iteration): data = rotate(data) data = sub_bytes(data) data[0] = data[0] ^ RCON[rcon_iteration] return data def xor(data1, data2): return [x ^ y for x, y in zip(data1, data2)] def rijndael_mul(a, b): if (a == 0 or b == 0): return 0 return RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[a] + RIJNDAEL_LOG_TABLE[b]) % 0xFF] def mix_column(data, matrix): data_mixed = [] for row in range(4): mixed = 0 for column in range(4): # xor is (+) and (-) mixed ^= rijndael_mul(data[column], matrix[row][column]) data_mixed.append(mixed) return data_mixed def mix_columns(data, matrix=MIX_COLUMN_MATRIX): data_mixed = [] for i in range(4): column = data[i * 4: (i + 1) * 4] data_mixed += mix_column(column, matrix) return data_mixed def mix_columns_inv(data): return mix_columns(data, MIX_COLUMN_MATRIX_INV) def shift_rows(data): data_shifted = [] for column in range(4): for row in range(4): data_shifted.append(data[((column + row) & 0b11) * 4 + row]) return data_shifted def shift_rows_inv(data): data_shifted = [] for column in range(4): for row in range(4): data_shifted.append(data[((column - row) & 0b11) * 4 + row]) return data_shifted def inc(data): data = data[:] # copy for i in range(len(data) - 1, -1, -1): if data[i] == 255: data[i] = 0 else: data[i] = data[i] + 1 break return data __all__ = ['aes_encrypt', 'key_expansion', 'aes_ctr_decrypt', 'aes_cbc_decrypt', 'aes_decrypt_text']
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/casefold.py
youtube_dl/casefold.py
# coding: utf-8 from __future__ import unicode_literals from .compat import ( compat_str, compat_chr, ) # Below is included the text of icu/CaseFolding.txt retrieved from # https://github.com/unicode-org/icu/blob/main/icu4c/source/data/unidata/CaseFolding.txt # In case newly foldable Unicode characters are defined, paste the new version # of the text inside the ''' marks. # The text is expected to have only blank lines and lines with 1st character #, # all ignored, and fold definitions like this: # `from_hex_code; status; space_separated_to_hex_code_list; comment` # Only `status` C/F are used. _map_str = ''' # CaseFolding-15.0.0.txt # Date: 2022-02-02, 23:35:35 GMT # © 2022 Unicode®, Inc. # Unicode and the Unicode Logo are registered trademarks of Unicode, Inc. in the U.S. and other countries. # For terms of use, see https://www.unicode.org/terms_of_use.html # # Unicode Character Database # For documentation, see https://www.unicode.org/reports/tr44/ # # Case Folding Properties # # This file is a supplement to the UnicodeData file. # It provides a case folding mapping generated from the Unicode Character Database. # If all characters are mapped according to the full mapping below, then # case differences (according to UnicodeData.txt and SpecialCasing.txt) # are eliminated. # # The data supports both implementations that require simple case foldings # (where string lengths don't change), and implementations that allow full case folding # (where string lengths may grow). Note that where they can be supported, the # full case foldings are superior: for example, they allow "MASSE" and "Maße" to match. # # All code points not listed in this file map to themselves. # # NOTE: case folding does not preserve normalization formats! # # For information on case folding, including how to have case folding # preserve normalization formats, see Section 3.13 Default Case Algorithms in # The Unicode Standard. # # ================================================================================ # Format # ================================================================================ # The entries in this file are in the following machine-readable format: # # <code>; <status>; <mapping>; # <name> # # The status field is: # C: common case folding, common mappings shared by both simple and full mappings. # F: full case folding, mappings that cause strings to grow in length. Multiple characters are separated by spaces. # S: simple case folding, mappings to single characters where different from F. # T: special case for uppercase I and dotted uppercase I # - For non-Turkic languages, this mapping is normally not used. # - For Turkic languages (tr, az), this mapping can be used instead of the normal mapping for these characters. # Note that the Turkic mappings do not maintain canonical equivalence without additional processing. # See the discussions of case mapping in the Unicode Standard for more information. # # Usage: # A. To do a simple case folding, use the mappings with status C + S. # B. To do a full case folding, use the mappings with status C + F. # # The mappings with status T can be used or omitted depending on the desired case-folding # behavior. (The default option is to exclude them.) # # ================================================================= # Property: Case_Folding # All code points not explicitly listed for Case_Folding # have the value C for the status field, and the code point itself for the mapping field. # ================================================================= 0041; C; 0061; # LATIN CAPITAL LETTER A 0042; C; 0062; # LATIN CAPITAL LETTER B 0043; C; 0063; # LATIN CAPITAL LETTER C 0044; C; 0064; # LATIN CAPITAL LETTER D 0045; C; 0065; # LATIN CAPITAL LETTER E 0046; C; 0066; # LATIN CAPITAL LETTER F 0047; C; 0067; # LATIN CAPITAL LETTER G 0048; C; 0068; # LATIN CAPITAL LETTER H 0049; C; 0069; # LATIN CAPITAL LETTER I 0049; T; 0131; # LATIN CAPITAL LETTER I 004A; C; 006A; # LATIN CAPITAL LETTER J 004B; C; 006B; # LATIN CAPITAL LETTER K 004C; C; 006C; # LATIN CAPITAL LETTER L 004D; C; 006D; # LATIN CAPITAL LETTER M 004E; C; 006E; # LATIN CAPITAL LETTER N 004F; C; 006F; # LATIN CAPITAL LETTER O 0050; C; 0070; # LATIN CAPITAL LETTER P 0051; C; 0071; # LATIN CAPITAL LETTER Q 0052; C; 0072; # LATIN CAPITAL LETTER R 0053; C; 0073; # LATIN CAPITAL LETTER S 0054; C; 0074; # LATIN CAPITAL LETTER T 0055; C; 0075; # LATIN CAPITAL LETTER U 0056; C; 0076; # LATIN CAPITAL LETTER V 0057; C; 0077; # LATIN CAPITAL LETTER W 0058; C; 0078; # LATIN CAPITAL LETTER X 0059; C; 0079; # LATIN CAPITAL LETTER Y 005A; C; 007A; # LATIN CAPITAL LETTER Z 00B5; C; 03BC; # MICRO SIGN 00C0; C; 00E0; # LATIN CAPITAL LETTER A WITH GRAVE 00C1; C; 00E1; # LATIN CAPITAL LETTER A WITH ACUTE 00C2; C; 00E2; # LATIN CAPITAL LETTER A WITH CIRCUMFLEX 00C3; C; 00E3; # LATIN CAPITAL LETTER A WITH TILDE 00C4; C; 00E4; # LATIN CAPITAL LETTER A WITH DIAERESIS 00C5; C; 00E5; # LATIN CAPITAL LETTER A WITH RING ABOVE 00C6; C; 00E6; # LATIN CAPITAL LETTER AE 00C7; C; 00E7; # LATIN CAPITAL LETTER C WITH CEDILLA 00C8; C; 00E8; # LATIN CAPITAL LETTER E WITH GRAVE 00C9; C; 00E9; # LATIN CAPITAL LETTER E WITH ACUTE 00CA; C; 00EA; # LATIN CAPITAL LETTER E WITH CIRCUMFLEX 00CB; C; 00EB; # LATIN CAPITAL LETTER E WITH DIAERESIS 00CC; C; 00EC; # LATIN CAPITAL LETTER I WITH GRAVE 00CD; C; 00ED; # LATIN CAPITAL LETTER I WITH ACUTE 00CE; C; 00EE; # LATIN CAPITAL LETTER I WITH CIRCUMFLEX 00CF; C; 00EF; # LATIN CAPITAL LETTER I WITH DIAERESIS 00D0; C; 00F0; # LATIN CAPITAL LETTER ETH 00D1; C; 00F1; # LATIN CAPITAL LETTER N WITH TILDE 00D2; C; 00F2; # LATIN CAPITAL LETTER O WITH GRAVE 00D3; C; 00F3; # LATIN CAPITAL LETTER O WITH ACUTE 00D4; C; 00F4; # LATIN CAPITAL LETTER O WITH CIRCUMFLEX 00D5; C; 00F5; # LATIN CAPITAL LETTER O WITH TILDE 00D6; C; 00F6; # LATIN CAPITAL LETTER O WITH DIAERESIS 00D8; C; 00F8; # LATIN CAPITAL LETTER O WITH STROKE 00D9; C; 00F9; # LATIN CAPITAL LETTER U WITH GRAVE 00DA; C; 00FA; # LATIN CAPITAL LETTER U WITH ACUTE 00DB; C; 00FB; # LATIN CAPITAL LETTER U WITH CIRCUMFLEX 00DC; C; 00FC; # LATIN CAPITAL LETTER U WITH DIAERESIS 00DD; C; 00FD; # LATIN CAPITAL LETTER Y WITH ACUTE 00DE; C; 00FE; # LATIN CAPITAL LETTER THORN 00DF; F; 0073 0073; # LATIN SMALL LETTER SHARP S 0100; C; 0101; # LATIN CAPITAL LETTER A WITH MACRON 0102; C; 0103; # LATIN CAPITAL LETTER A WITH BREVE 0104; C; 0105; # LATIN CAPITAL LETTER A WITH OGONEK 0106; C; 0107; # LATIN CAPITAL LETTER C WITH ACUTE 0108; C; 0109; # LATIN CAPITAL LETTER C WITH CIRCUMFLEX 010A; C; 010B; # LATIN CAPITAL LETTER C WITH DOT ABOVE 010C; C; 010D; # LATIN CAPITAL LETTER C WITH CARON 010E; C; 010F; # LATIN CAPITAL LETTER D WITH CARON 0110; C; 0111; # LATIN CAPITAL LETTER D WITH STROKE 0112; C; 0113; # LATIN CAPITAL LETTER E WITH MACRON 0114; C; 0115; # LATIN CAPITAL LETTER E WITH BREVE 0116; C; 0117; # LATIN CAPITAL LETTER E WITH DOT ABOVE 0118; C; 0119; # LATIN CAPITAL LETTER E WITH OGONEK 011A; C; 011B; # LATIN CAPITAL LETTER E WITH CARON 011C; C; 011D; # LATIN CAPITAL LETTER G WITH CIRCUMFLEX 011E; C; 011F; # LATIN CAPITAL LETTER G WITH BREVE 0120; C; 0121; # LATIN CAPITAL LETTER G WITH DOT ABOVE 0122; C; 0123; # LATIN CAPITAL LETTER G WITH CEDILLA 0124; C; 0125; # LATIN CAPITAL LETTER H WITH CIRCUMFLEX 0126; C; 0127; # LATIN CAPITAL LETTER H WITH STROKE 0128; C; 0129; # LATIN CAPITAL LETTER I WITH TILDE 012A; C; 012B; # LATIN CAPITAL LETTER I WITH MACRON 012C; C; 012D; # LATIN CAPITAL LETTER I WITH BREVE 012E; C; 012F; # LATIN CAPITAL LETTER I WITH OGONEK 0130; F; 0069 0307; # LATIN CAPITAL LETTER I WITH DOT ABOVE 0130; T; 0069; # LATIN CAPITAL LETTER I WITH DOT ABOVE 0132; C; 0133; # LATIN CAPITAL LIGATURE IJ 0134; C; 0135; # LATIN CAPITAL LETTER J WITH CIRCUMFLEX 0136; C; 0137; # LATIN CAPITAL LETTER K WITH CEDILLA 0139; C; 013A; # LATIN CAPITAL LETTER L WITH ACUTE 013B; C; 013C; # LATIN CAPITAL LETTER L WITH CEDILLA 013D; C; 013E; # LATIN CAPITAL LETTER L WITH CARON 013F; C; 0140; # LATIN CAPITAL LETTER L WITH MIDDLE DOT 0141; C; 0142; # LATIN CAPITAL LETTER L WITH STROKE 0143; C; 0144; # LATIN CAPITAL LETTER N WITH ACUTE 0145; C; 0146; # LATIN CAPITAL LETTER N WITH CEDILLA 0147; C; 0148; # LATIN CAPITAL LETTER N WITH CARON 0149; F; 02BC 006E; # LATIN SMALL LETTER N PRECEDED BY APOSTROPHE 014A; C; 014B; # LATIN CAPITAL LETTER ENG 014C; C; 014D; # LATIN CAPITAL LETTER O WITH MACRON 014E; C; 014F; # LATIN CAPITAL LETTER O WITH BREVE 0150; C; 0151; # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE 0152; C; 0153; # LATIN CAPITAL LIGATURE OE 0154; C; 0155; # LATIN CAPITAL LETTER R WITH ACUTE 0156; C; 0157; # LATIN CAPITAL LETTER R WITH CEDILLA 0158; C; 0159; # LATIN CAPITAL LETTER R WITH CARON 015A; C; 015B; # LATIN CAPITAL LETTER S WITH ACUTE 015C; C; 015D; # LATIN CAPITAL LETTER S WITH CIRCUMFLEX 015E; C; 015F; # LATIN CAPITAL LETTER S WITH CEDILLA 0160; C; 0161; # LATIN CAPITAL LETTER S WITH CARON 0162; C; 0163; # LATIN CAPITAL LETTER T WITH CEDILLA 0164; C; 0165; # LATIN CAPITAL LETTER T WITH CARON 0166; C; 0167; # LATIN CAPITAL LETTER T WITH STROKE 0168; C; 0169; # LATIN CAPITAL LETTER U WITH TILDE 016A; C; 016B; # LATIN CAPITAL LETTER U WITH MACRON 016C; C; 016D; # LATIN CAPITAL LETTER U WITH BREVE 016E; C; 016F; # LATIN CAPITAL LETTER U WITH RING ABOVE 0170; C; 0171; # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE 0172; C; 0173; # LATIN CAPITAL LETTER U WITH OGONEK 0174; C; 0175; # LATIN CAPITAL LETTER W WITH CIRCUMFLEX 0176; C; 0177; # LATIN CAPITAL LETTER Y WITH CIRCUMFLEX 0178; C; 00FF; # LATIN CAPITAL LETTER Y WITH DIAERESIS 0179; C; 017A; # LATIN CAPITAL LETTER Z WITH ACUTE 017B; C; 017C; # LATIN CAPITAL LETTER Z WITH DOT ABOVE 017D; C; 017E; # LATIN CAPITAL LETTER Z WITH CARON 017F; C; 0073; # LATIN SMALL LETTER LONG S 0181; C; 0253; # LATIN CAPITAL LETTER B WITH HOOK 0182; C; 0183; # LATIN CAPITAL LETTER B WITH TOPBAR 0184; C; 0185; # LATIN CAPITAL LETTER TONE SIX 0186; C; 0254; # LATIN CAPITAL LETTER OPEN O 0187; C; 0188; # LATIN CAPITAL LETTER C WITH HOOK 0189; C; 0256; # LATIN CAPITAL LETTER AFRICAN D 018A; C; 0257; # LATIN CAPITAL LETTER D WITH HOOK 018B; C; 018C; # LATIN CAPITAL LETTER D WITH TOPBAR 018E; C; 01DD; # LATIN CAPITAL LETTER REVERSED E 018F; C; 0259; # LATIN CAPITAL LETTER SCHWA 0190; C; 025B; # LATIN CAPITAL LETTER OPEN E 0191; C; 0192; # LATIN CAPITAL LETTER F WITH HOOK 0193; C; 0260; # LATIN CAPITAL LETTER G WITH HOOK 0194; C; 0263; # LATIN CAPITAL LETTER GAMMA 0196; C; 0269; # LATIN CAPITAL LETTER IOTA 0197; C; 0268; # LATIN CAPITAL LETTER I WITH STROKE 0198; C; 0199; # LATIN CAPITAL LETTER K WITH HOOK 019C; C; 026F; # LATIN CAPITAL LETTER TURNED M 019D; C; 0272; # LATIN CAPITAL LETTER N WITH LEFT HOOK 019F; C; 0275; # LATIN CAPITAL LETTER O WITH MIDDLE TILDE 01A0; C; 01A1; # LATIN CAPITAL LETTER O WITH HORN 01A2; C; 01A3; # LATIN CAPITAL LETTER OI 01A4; C; 01A5; # LATIN CAPITAL LETTER P WITH HOOK 01A6; C; 0280; # LATIN LETTER YR 01A7; C; 01A8; # LATIN CAPITAL LETTER TONE TWO 01A9; C; 0283; # LATIN CAPITAL LETTER ESH 01AC; C; 01AD; # LATIN CAPITAL LETTER T WITH HOOK 01AE; C; 0288; # LATIN CAPITAL LETTER T WITH RETROFLEX HOOK 01AF; C; 01B0; # LATIN CAPITAL LETTER U WITH HORN 01B1; C; 028A; # LATIN CAPITAL LETTER UPSILON 01B2; C; 028B; # LATIN CAPITAL LETTER V WITH HOOK 01B3; C; 01B4; # LATIN CAPITAL LETTER Y WITH HOOK 01B5; C; 01B6; # LATIN CAPITAL LETTER Z WITH STROKE 01B7; C; 0292; # LATIN CAPITAL LETTER EZH 01B8; C; 01B9; # LATIN CAPITAL LETTER EZH REVERSED 01BC; C; 01BD; # LATIN CAPITAL LETTER TONE FIVE 01C4; C; 01C6; # LATIN CAPITAL LETTER DZ WITH CARON 01C5; C; 01C6; # LATIN CAPITAL LETTER D WITH SMALL LETTER Z WITH CARON 01C7; C; 01C9; # LATIN CAPITAL LETTER LJ 01C8; C; 01C9; # LATIN CAPITAL LETTER L WITH SMALL LETTER J 01CA; C; 01CC; # LATIN CAPITAL LETTER NJ 01CB; C; 01CC; # LATIN CAPITAL LETTER N WITH SMALL LETTER J 01CD; C; 01CE; # LATIN CAPITAL LETTER A WITH CARON 01CF; C; 01D0; # LATIN CAPITAL LETTER I WITH CARON 01D1; C; 01D2; # LATIN CAPITAL LETTER O WITH CARON 01D3; C; 01D4; # LATIN CAPITAL LETTER U WITH CARON 01D5; C; 01D6; # LATIN CAPITAL LETTER U WITH DIAERESIS AND MACRON 01D7; C; 01D8; # LATIN CAPITAL LETTER U WITH DIAERESIS AND ACUTE 01D9; C; 01DA; # LATIN CAPITAL LETTER U WITH DIAERESIS AND CARON 01DB; C; 01DC; # LATIN CAPITAL LETTER U WITH DIAERESIS AND GRAVE 01DE; C; 01DF; # LATIN CAPITAL LETTER A WITH DIAERESIS AND MACRON 01E0; C; 01E1; # LATIN CAPITAL LETTER A WITH DOT ABOVE AND MACRON 01E2; C; 01E3; # LATIN CAPITAL LETTER AE WITH MACRON 01E4; C; 01E5; # LATIN CAPITAL LETTER G WITH STROKE 01E6; C; 01E7; # LATIN CAPITAL LETTER G WITH CARON 01E8; C; 01E9; # LATIN CAPITAL LETTER K WITH CARON 01EA; C; 01EB; # LATIN CAPITAL LETTER O WITH OGONEK 01EC; C; 01ED; # LATIN CAPITAL LETTER O WITH OGONEK AND MACRON 01EE; C; 01EF; # LATIN CAPITAL LETTER EZH WITH CARON 01F0; F; 006A 030C; # LATIN SMALL LETTER J WITH CARON 01F1; C; 01F3; # LATIN CAPITAL LETTER DZ 01F2; C; 01F3; # LATIN CAPITAL LETTER D WITH SMALL LETTER Z 01F4; C; 01F5; # LATIN CAPITAL LETTER G WITH ACUTE 01F6; C; 0195; # LATIN CAPITAL LETTER HWAIR 01F7; C; 01BF; # LATIN CAPITAL LETTER WYNN 01F8; C; 01F9; # LATIN CAPITAL LETTER N WITH GRAVE 01FA; C; 01FB; # LATIN CAPITAL LETTER A WITH RING ABOVE AND ACUTE 01FC; C; 01FD; # LATIN CAPITAL LETTER AE WITH ACUTE 01FE; C; 01FF; # LATIN CAPITAL LETTER O WITH STROKE AND ACUTE 0200; C; 0201; # LATIN CAPITAL LETTER A WITH DOUBLE GRAVE 0202; C; 0203; # LATIN CAPITAL LETTER A WITH INVERTED BREVE 0204; C; 0205; # LATIN CAPITAL LETTER E WITH DOUBLE GRAVE 0206; C; 0207; # LATIN CAPITAL LETTER E WITH INVERTED BREVE 0208; C; 0209; # LATIN CAPITAL LETTER I WITH DOUBLE GRAVE 020A; C; 020B; # LATIN CAPITAL LETTER I WITH INVERTED BREVE 020C; C; 020D; # LATIN CAPITAL LETTER O WITH DOUBLE GRAVE 020E; C; 020F; # LATIN CAPITAL LETTER O WITH INVERTED BREVE 0210; C; 0211; # LATIN CAPITAL LETTER R WITH DOUBLE GRAVE 0212; C; 0213; # LATIN CAPITAL LETTER R WITH INVERTED BREVE 0214; C; 0215; # LATIN CAPITAL LETTER U WITH DOUBLE GRAVE 0216; C; 0217; # LATIN CAPITAL LETTER U WITH INVERTED BREVE 0218; C; 0219; # LATIN CAPITAL LETTER S WITH COMMA BELOW 021A; C; 021B; # LATIN CAPITAL LETTER T WITH COMMA BELOW 021C; C; 021D; # LATIN CAPITAL LETTER YOGH 021E; C; 021F; # LATIN CAPITAL LETTER H WITH CARON 0220; C; 019E; # LATIN CAPITAL LETTER N WITH LONG RIGHT LEG 0222; C; 0223; # LATIN CAPITAL LETTER OU 0224; C; 0225; # LATIN CAPITAL LETTER Z WITH HOOK 0226; C; 0227; # LATIN CAPITAL LETTER A WITH DOT ABOVE 0228; C; 0229; # LATIN CAPITAL LETTER E WITH CEDILLA 022A; C; 022B; # LATIN CAPITAL LETTER O WITH DIAERESIS AND MACRON 022C; C; 022D; # LATIN CAPITAL LETTER O WITH TILDE AND MACRON 022E; C; 022F; # LATIN CAPITAL LETTER O WITH DOT ABOVE 0230; C; 0231; # LATIN CAPITAL LETTER O WITH DOT ABOVE AND MACRON 0232; C; 0233; # LATIN CAPITAL LETTER Y WITH MACRON 023A; C; 2C65; # LATIN CAPITAL LETTER A WITH STROKE 023B; C; 023C; # LATIN CAPITAL LETTER C WITH STROKE 023D; C; 019A; # LATIN CAPITAL LETTER L WITH BAR 023E; C; 2C66; # LATIN CAPITAL LETTER T WITH DIAGONAL STROKE 0241; C; 0242; # LATIN CAPITAL LETTER GLOTTAL STOP 0243; C; 0180; # LATIN CAPITAL LETTER B WITH STROKE 0244; C; 0289; # LATIN CAPITAL LETTER U BAR 0245; C; 028C; # LATIN CAPITAL LETTER TURNED V 0246; C; 0247; # LATIN CAPITAL LETTER E WITH STROKE 0248; C; 0249; # LATIN CAPITAL LETTER J WITH STROKE 024A; C; 024B; # LATIN CAPITAL LETTER SMALL Q WITH HOOK TAIL 024C; C; 024D; # LATIN CAPITAL LETTER R WITH STROKE 024E; C; 024F; # LATIN CAPITAL LETTER Y WITH STROKE 0345; C; 03B9; # COMBINING GREEK YPOGEGRAMMENI 0370; C; 0371; # GREEK CAPITAL LETTER HETA 0372; C; 0373; # GREEK CAPITAL LETTER ARCHAIC SAMPI 0376; C; 0377; # GREEK CAPITAL LETTER PAMPHYLIAN DIGAMMA 037F; C; 03F3; # GREEK CAPITAL LETTER YOT 0386; C; 03AC; # GREEK CAPITAL LETTER ALPHA WITH TONOS 0388; C; 03AD; # GREEK CAPITAL LETTER EPSILON WITH TONOS 0389; C; 03AE; # GREEK CAPITAL LETTER ETA WITH TONOS 038A; C; 03AF; # GREEK CAPITAL LETTER IOTA WITH TONOS 038C; C; 03CC; # GREEK CAPITAL LETTER OMICRON WITH TONOS 038E; C; 03CD; # GREEK CAPITAL LETTER UPSILON WITH TONOS 038F; C; 03CE; # GREEK CAPITAL LETTER OMEGA WITH TONOS 0390; F; 03B9 0308 0301; # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS 0391; C; 03B1; # GREEK CAPITAL LETTER ALPHA 0392; C; 03B2; # GREEK CAPITAL LETTER BETA 0393; C; 03B3; # GREEK CAPITAL LETTER GAMMA 0394; C; 03B4; # GREEK CAPITAL LETTER DELTA 0395; C; 03B5; # GREEK CAPITAL LETTER EPSILON 0396; C; 03B6; # GREEK CAPITAL LETTER ZETA 0397; C; 03B7; # GREEK CAPITAL LETTER ETA 0398; C; 03B8; # GREEK CAPITAL LETTER THETA 0399; C; 03B9; # GREEK CAPITAL LETTER IOTA 039A; C; 03BA; # GREEK CAPITAL LETTER KAPPA 039B; C; 03BB; # GREEK CAPITAL LETTER LAMDA 039C; C; 03BC; # GREEK CAPITAL LETTER MU 039D; C; 03BD; # GREEK CAPITAL LETTER NU 039E; C; 03BE; # GREEK CAPITAL LETTER XI 039F; C; 03BF; # GREEK CAPITAL LETTER OMICRON 03A0; C; 03C0; # GREEK CAPITAL LETTER PI 03A1; C; 03C1; # GREEK CAPITAL LETTER RHO 03A3; C; 03C3; # GREEK CAPITAL LETTER SIGMA 03A4; C; 03C4; # GREEK CAPITAL LETTER TAU 03A5; C; 03C5; # GREEK CAPITAL LETTER UPSILON 03A6; C; 03C6; # GREEK CAPITAL LETTER PHI 03A7; C; 03C7; # GREEK CAPITAL LETTER CHI 03A8; C; 03C8; # GREEK CAPITAL LETTER PSI 03A9; C; 03C9; # GREEK CAPITAL LETTER OMEGA 03AA; C; 03CA; # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA 03AB; C; 03CB; # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA 03B0; F; 03C5 0308 0301; # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS 03C2; C; 03C3; # GREEK SMALL LETTER FINAL SIGMA 03CF; C; 03D7; # GREEK CAPITAL KAI SYMBOL 03D0; C; 03B2; # GREEK BETA SYMBOL 03D1; C; 03B8; # GREEK THETA SYMBOL 03D5; C; 03C6; # GREEK PHI SYMBOL 03D6; C; 03C0; # GREEK PI SYMBOL 03D8; C; 03D9; # GREEK LETTER ARCHAIC KOPPA 03DA; C; 03DB; # GREEK LETTER STIGMA 03DC; C; 03DD; # GREEK LETTER DIGAMMA 03DE; C; 03DF; # GREEK LETTER KOPPA 03E0; C; 03E1; # GREEK LETTER SAMPI 03E2; C; 03E3; # COPTIC CAPITAL LETTER SHEI 03E4; C; 03E5; # COPTIC CAPITAL LETTER FEI 03E6; C; 03E7; # COPTIC CAPITAL LETTER KHEI 03E8; C; 03E9; # COPTIC CAPITAL LETTER HORI 03EA; C; 03EB; # COPTIC CAPITAL LETTER GANGIA 03EC; C; 03ED; # COPTIC CAPITAL LETTER SHIMA 03EE; C; 03EF; # COPTIC CAPITAL LETTER DEI 03F0; C; 03BA; # GREEK KAPPA SYMBOL 03F1; C; 03C1; # GREEK RHO SYMBOL 03F4; C; 03B8; # GREEK CAPITAL THETA SYMBOL 03F5; C; 03B5; # GREEK LUNATE EPSILON SYMBOL 03F7; C; 03F8; # GREEK CAPITAL LETTER SHO 03F9; C; 03F2; # GREEK CAPITAL LUNATE SIGMA SYMBOL 03FA; C; 03FB; # GREEK CAPITAL LETTER SAN 03FD; C; 037B; # GREEK CAPITAL REVERSED LUNATE SIGMA SYMBOL 03FE; C; 037C; # GREEK CAPITAL DOTTED LUNATE SIGMA SYMBOL 03FF; C; 037D; # GREEK CAPITAL REVERSED DOTTED LUNATE SIGMA SYMBOL 0400; C; 0450; # CYRILLIC CAPITAL LETTER IE WITH GRAVE 0401; C; 0451; # CYRILLIC CAPITAL LETTER IO 0402; C; 0452; # CYRILLIC CAPITAL LETTER DJE 0403; C; 0453; # CYRILLIC CAPITAL LETTER GJE 0404; C; 0454; # CYRILLIC CAPITAL LETTER UKRAINIAN IE 0405; C; 0455; # CYRILLIC CAPITAL LETTER DZE 0406; C; 0456; # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I 0407; C; 0457; # CYRILLIC CAPITAL LETTER YI 0408; C; 0458; # CYRILLIC CAPITAL LETTER JE 0409; C; 0459; # CYRILLIC CAPITAL LETTER LJE 040A; C; 045A; # CYRILLIC CAPITAL LETTER NJE 040B; C; 045B; # CYRILLIC CAPITAL LETTER TSHE 040C; C; 045C; # CYRILLIC CAPITAL LETTER KJE 040D; C; 045D; # CYRILLIC CAPITAL LETTER I WITH GRAVE 040E; C; 045E; # CYRILLIC CAPITAL LETTER SHORT U 040F; C; 045F; # CYRILLIC CAPITAL LETTER DZHE 0410; C; 0430; # CYRILLIC CAPITAL LETTER A 0411; C; 0431; # CYRILLIC CAPITAL LETTER BE 0412; C; 0432; # CYRILLIC CAPITAL LETTER VE 0413; C; 0433; # CYRILLIC CAPITAL LETTER GHE 0414; C; 0434; # CYRILLIC CAPITAL LETTER DE 0415; C; 0435; # CYRILLIC CAPITAL LETTER IE 0416; C; 0436; # CYRILLIC CAPITAL LETTER ZHE 0417; C; 0437; # CYRILLIC CAPITAL LETTER ZE 0418; C; 0438; # CYRILLIC CAPITAL LETTER I 0419; C; 0439; # CYRILLIC CAPITAL LETTER SHORT I 041A; C; 043A; # CYRILLIC CAPITAL LETTER KA 041B; C; 043B; # CYRILLIC CAPITAL LETTER EL 041C; C; 043C; # CYRILLIC CAPITAL LETTER EM 041D; C; 043D; # CYRILLIC CAPITAL LETTER EN 041E; C; 043E; # CYRILLIC CAPITAL LETTER O 041F; C; 043F; # CYRILLIC CAPITAL LETTER PE 0420; C; 0440; # CYRILLIC CAPITAL LETTER ER 0421; C; 0441; # CYRILLIC CAPITAL LETTER ES 0422; C; 0442; # CYRILLIC CAPITAL LETTER TE 0423; C; 0443; # CYRILLIC CAPITAL LETTER U 0424; C; 0444; # CYRILLIC CAPITAL LETTER EF 0425; C; 0445; # CYRILLIC CAPITAL LETTER HA 0426; C; 0446; # CYRILLIC CAPITAL LETTER TSE 0427; C; 0447; # CYRILLIC CAPITAL LETTER CHE 0428; C; 0448; # CYRILLIC CAPITAL LETTER SHA 0429; C; 0449; # CYRILLIC CAPITAL LETTER SHCHA 042A; C; 044A; # CYRILLIC CAPITAL LETTER HARD SIGN 042B; C; 044B; # CYRILLIC CAPITAL LETTER YERU 042C; C; 044C; # CYRILLIC CAPITAL LETTER SOFT SIGN 042D; C; 044D; # CYRILLIC CAPITAL LETTER E 042E; C; 044E; # CYRILLIC CAPITAL LETTER YU 042F; C; 044F; # CYRILLIC CAPITAL LETTER YA 0460; C; 0461; # CYRILLIC CAPITAL LETTER OMEGA 0462; C; 0463; # CYRILLIC CAPITAL LETTER YAT 0464; C; 0465; # CYRILLIC CAPITAL LETTER IOTIFIED E 0466; C; 0467; # CYRILLIC CAPITAL LETTER LITTLE YUS 0468; C; 0469; # CYRILLIC CAPITAL LETTER IOTIFIED LITTLE YUS 046A; C; 046B; # CYRILLIC CAPITAL LETTER BIG YUS 046C; C; 046D; # CYRILLIC CAPITAL LETTER IOTIFIED BIG YUS 046E; C; 046F; # CYRILLIC CAPITAL LETTER KSI 0470; C; 0471; # CYRILLIC CAPITAL LETTER PSI 0472; C; 0473; # CYRILLIC CAPITAL LETTER FITA 0474; C; 0475; # CYRILLIC CAPITAL LETTER IZHITSA 0476; C; 0477; # CYRILLIC CAPITAL LETTER IZHITSA WITH DOUBLE GRAVE ACCENT 0478; C; 0479; # CYRILLIC CAPITAL LETTER UK 047A; C; 047B; # CYRILLIC CAPITAL LETTER ROUND OMEGA 047C; C; 047D; # CYRILLIC CAPITAL LETTER OMEGA WITH TITLO 047E; C; 047F; # CYRILLIC CAPITAL LETTER OT 0480; C; 0481; # CYRILLIC CAPITAL LETTER KOPPA 048A; C; 048B; # CYRILLIC CAPITAL LETTER SHORT I WITH TAIL 048C; C; 048D; # CYRILLIC CAPITAL LETTER SEMISOFT SIGN 048E; C; 048F; # CYRILLIC CAPITAL LETTER ER WITH TICK 0490; C; 0491; # CYRILLIC CAPITAL LETTER GHE WITH UPTURN 0492; C; 0493; # CYRILLIC CAPITAL LETTER GHE WITH STROKE 0494; C; 0495; # CYRILLIC CAPITAL LETTER GHE WITH MIDDLE HOOK 0496; C; 0497; # CYRILLIC CAPITAL LETTER ZHE WITH DESCENDER 0498; C; 0499; # CYRILLIC CAPITAL LETTER ZE WITH DESCENDER 049A; C; 049B; # CYRILLIC CAPITAL LETTER KA WITH DESCENDER 049C; C; 049D; # CYRILLIC CAPITAL LETTER KA WITH VERTICAL STROKE 049E; C; 049F; # CYRILLIC CAPITAL LETTER KA WITH STROKE 04A0; C; 04A1; # CYRILLIC CAPITAL LETTER BASHKIR KA 04A2; C; 04A3; # CYRILLIC CAPITAL LETTER EN WITH DESCENDER 04A4; C; 04A5; # CYRILLIC CAPITAL LIGATURE EN GHE 04A6; C; 04A7; # CYRILLIC CAPITAL LETTER PE WITH MIDDLE HOOK 04A8; C; 04A9; # CYRILLIC CAPITAL LETTER ABKHASIAN HA 04AA; C; 04AB; # CYRILLIC CAPITAL LETTER ES WITH DESCENDER 04AC; C; 04AD; # CYRILLIC CAPITAL LETTER TE WITH DESCENDER 04AE; C; 04AF; # CYRILLIC CAPITAL LETTER STRAIGHT U 04B0; C; 04B1; # CYRILLIC CAPITAL LETTER STRAIGHT U WITH STROKE 04B2; C; 04B3; # CYRILLIC CAPITAL LETTER HA WITH DESCENDER 04B4; C; 04B5; # CYRILLIC CAPITAL LIGATURE TE TSE 04B6; C; 04B7; # CYRILLIC CAPITAL LETTER CHE WITH DESCENDER 04B8; C; 04B9; # CYRILLIC CAPITAL LETTER CHE WITH VERTICAL STROKE 04BA; C; 04BB; # CYRILLIC CAPITAL LETTER SHHA 04BC; C; 04BD; # CYRILLIC CAPITAL LETTER ABKHASIAN CHE 04BE; C; 04BF; # CYRILLIC CAPITAL LETTER ABKHASIAN CHE WITH DESCENDER 04C0; C; 04CF; # CYRILLIC LETTER PALOCHKA 04C1; C; 04C2; # CYRILLIC CAPITAL LETTER ZHE WITH BREVE 04C3; C; 04C4; # CYRILLIC CAPITAL LETTER KA WITH HOOK 04C5; C; 04C6; # CYRILLIC CAPITAL LETTER EL WITH TAIL 04C7; C; 04C8; # CYRILLIC CAPITAL LETTER EN WITH HOOK 04C9; C; 04CA; # CYRILLIC CAPITAL LETTER EN WITH TAIL 04CB; C; 04CC; # CYRILLIC CAPITAL LETTER KHAKASSIAN CHE 04CD; C; 04CE; # CYRILLIC CAPITAL LETTER EM WITH TAIL 04D0; C; 04D1; # CYRILLIC CAPITAL LETTER A WITH BREVE 04D2; C; 04D3; # CYRILLIC CAPITAL LETTER A WITH DIAERESIS 04D4; C; 04D5; # CYRILLIC CAPITAL LIGATURE A IE 04D6; C; 04D7; # CYRILLIC CAPITAL LETTER IE WITH BREVE 04D8; C; 04D9; # CYRILLIC CAPITAL LETTER SCHWA 04DA; C; 04DB; # CYRILLIC CAPITAL LETTER SCHWA WITH DIAERESIS 04DC; C; 04DD; # CYRILLIC CAPITAL LETTER ZHE WITH DIAERESIS 04DE; C; 04DF; # CYRILLIC CAPITAL LETTER ZE WITH DIAERESIS 04E0; C; 04E1; # CYRILLIC CAPITAL LETTER ABKHASIAN DZE 04E2; C; 04E3; # CYRILLIC CAPITAL LETTER I WITH MACRON 04E4; C; 04E5; # CYRILLIC CAPITAL LETTER I WITH DIAERESIS 04E6; C; 04E7; # CYRILLIC CAPITAL LETTER O WITH DIAERESIS 04E8; C; 04E9; # CYRILLIC CAPITAL LETTER BARRED O 04EA; C; 04EB; # CYRILLIC CAPITAL LETTER BARRED O WITH DIAERESIS 04EC; C; 04ED; # CYRILLIC CAPITAL LETTER E WITH DIAERESIS 04EE; C; 04EF; # CYRILLIC CAPITAL LETTER U WITH MACRON 04F0; C; 04F1; # CYRILLIC CAPITAL LETTER U WITH DIAERESIS 04F2; C; 04F3; # CYRILLIC CAPITAL LETTER U WITH DOUBLE ACUTE 04F4; C; 04F5; # CYRILLIC CAPITAL LETTER CHE WITH DIAERESIS 04F6; C; 04F7; # CYRILLIC CAPITAL LETTER GHE WITH DESCENDER 04F8; C; 04F9; # CYRILLIC CAPITAL LETTER YERU WITH DIAERESIS 04FA; C; 04FB; # CYRILLIC CAPITAL LETTER GHE WITH STROKE AND HOOK 04FC; C; 04FD; # CYRILLIC CAPITAL LETTER HA WITH HOOK 04FE; C; 04FF; # CYRILLIC CAPITAL LETTER HA WITH STROKE 0500; C; 0501; # CYRILLIC CAPITAL LETTER KOMI DE 0502; C; 0503; # CYRILLIC CAPITAL LETTER KOMI DJE 0504; C; 0505; # CYRILLIC CAPITAL LETTER KOMI ZJE 0506; C; 0507; # CYRILLIC CAPITAL LETTER KOMI DZJE 0508; C; 0509; # CYRILLIC CAPITAL LETTER KOMI LJE 050A; C; 050B; # CYRILLIC CAPITAL LETTER KOMI NJE 050C; C; 050D; # CYRILLIC CAPITAL LETTER KOMI SJE 050E; C; 050F; # CYRILLIC CAPITAL LETTER KOMI TJE 0510; C; 0511; # CYRILLIC CAPITAL LETTER REVERSED ZE 0512; C; 0513; # CYRILLIC CAPITAL LETTER EL WITH HOOK 0514; C; 0515; # CYRILLIC CAPITAL LETTER LHA 0516; C; 0517; # CYRILLIC CAPITAL LETTER RHA 0518; C; 0519; # CYRILLIC CAPITAL LETTER YAE 051A; C; 051B; # CYRILLIC CAPITAL LETTER QA 051C; C; 051D; # CYRILLIC CAPITAL LETTER WE 051E; C; 051F; # CYRILLIC CAPITAL LETTER ALEUT KA 0520; C; 0521; # CYRILLIC CAPITAL LETTER EL WITH MIDDLE HOOK 0522; C; 0523; # CYRILLIC CAPITAL LETTER EN WITH MIDDLE HOOK 0524; C; 0525; # CYRILLIC CAPITAL LETTER PE WITH DESCENDER 0526; C; 0527; # CYRILLIC CAPITAL LETTER SHHA WITH DESCENDER 0528; C; 0529; # CYRILLIC CAPITAL LETTER EN WITH LEFT HOOK 052A; C; 052B; # CYRILLIC CAPITAL LETTER DZZHE 052C; C; 052D; # CYRILLIC CAPITAL LETTER DCHE 052E; C; 052F; # CYRILLIC CAPITAL LETTER EL WITH DESCENDER 0531; C; 0561; # ARMENIAN CAPITAL LETTER AYB 0532; C; 0562; # ARMENIAN CAPITAL LETTER BEN 0533; C; 0563; # ARMENIAN CAPITAL LETTER GIM 0534; C; 0564; # ARMENIAN CAPITAL LETTER DA 0535; C; 0565; # ARMENIAN CAPITAL LETTER ECH 0536; C; 0566; # ARMENIAN CAPITAL LETTER ZA 0537; C; 0567; # ARMENIAN CAPITAL LETTER EH 0538; C; 0568; # ARMENIAN CAPITAL LETTER ET 0539; C; 0569; # ARMENIAN CAPITAL LETTER TO 053A; C; 056A; # ARMENIAN CAPITAL LETTER ZHE 053B; C; 056B; # ARMENIAN CAPITAL LETTER INI 053C; C; 056C; # ARMENIAN CAPITAL LETTER LIWN 053D; C; 056D; # ARMENIAN CAPITAL LETTER XEH 053E; C; 056E; # ARMENIAN CAPITAL LETTER CA 053F; C; 056F; # ARMENIAN CAPITAL LETTER KEN 0540; C; 0570; # ARMENIAN CAPITAL LETTER HO 0541; C; 0571; # ARMENIAN CAPITAL LETTER JA 0542; C; 0572; # ARMENIAN CAPITAL LETTER GHAD 0543; C; 0573; # ARMENIAN CAPITAL LETTER CHEH 0544; C; 0574; # ARMENIAN CAPITAL LETTER MEN 0545; C; 0575; # ARMENIAN CAPITAL LETTER YI 0546; C; 0576; # ARMENIAN CAPITAL LETTER NOW 0547; C; 0577; # ARMENIAN CAPITAL LETTER SHA 0548; C; 0578; # ARMENIAN CAPITAL LETTER VO 0549; C; 0579; # ARMENIAN CAPITAL LETTER CHA 054A; C; 057A; # ARMENIAN CAPITAL LETTER PEH 054B; C; 057B; # ARMENIAN CAPITAL LETTER JHEH 054C; C; 057C; # ARMENIAN CAPITAL LETTER RA 054D; C; 057D; # ARMENIAN CAPITAL LETTER SEH 054E; C; 057E; # ARMENIAN CAPITAL LETTER VEW 054F; C; 057F; # ARMENIAN CAPITAL LETTER TIWN 0550; C; 0580; # ARMENIAN CAPITAL LETTER REH 0551; C; 0581; # ARMENIAN CAPITAL LETTER CO 0552; C; 0582; # ARMENIAN CAPITAL LETTER YIWN 0553; C; 0583; # ARMENIAN CAPITAL LETTER PIWR 0554; C; 0584; # ARMENIAN CAPITAL LETTER KEH 0555; C; 0585; # ARMENIAN CAPITAL LETTER OH 0556; C; 0586; # ARMENIAN CAPITAL LETTER FEH 0587; F; 0565 0582; # ARMENIAN SMALL LIGATURE ECH YIWN 10A0; C; 2D00; # GEORGIAN CAPITAL LETTER AN 10A1; C; 2D01; # GEORGIAN CAPITAL LETTER BAN 10A2; C; 2D02; # GEORGIAN CAPITAL LETTER GAN 10A3; C; 2D03; # GEORGIAN CAPITAL LETTER DON 10A4; C; 2D04; # GEORGIAN CAPITAL LETTER EN 10A5; C; 2D05; # GEORGIAN CAPITAL LETTER VIN 10A6; C; 2D06; # GEORGIAN CAPITAL LETTER ZEN 10A7; C; 2D07; # GEORGIAN CAPITAL LETTER TAN 10A8; C; 2D08; # GEORGIAN CAPITAL LETTER IN 10A9; C; 2D09; # GEORGIAN CAPITAL LETTER KAN 10AA; C; 2D0A; # GEORGIAN CAPITAL LETTER LAS 10AB; C; 2D0B; # GEORGIAN CAPITAL LETTER MAN 10AC; C; 2D0C; # GEORGIAN CAPITAL LETTER NAR 10AD; C; 2D0D; # GEORGIAN CAPITAL LETTER ON 10AE; C; 2D0E; # GEORGIAN CAPITAL LETTER PAR 10AF; C; 2D0F; # GEORGIAN CAPITAL LETTER ZHAR 10B0; C; 2D10; # GEORGIAN CAPITAL LETTER RAE 10B1; C; 2D11; # GEORGIAN CAPITAL LETTER SAN 10B2; C; 2D12; # GEORGIAN CAPITAL LETTER TAR 10B3; C; 2D13; # GEORGIAN CAPITAL LETTER UN 10B4; C; 2D14; # GEORGIAN CAPITAL LETTER PHAR 10B5; C; 2D15; # GEORGIAN CAPITAL LETTER KHAR 10B6; C; 2D16; # GEORGIAN CAPITAL LETTER GHAN 10B7; C; 2D17; # GEORGIAN CAPITAL LETTER QAR 10B8; C; 2D18; # GEORGIAN CAPITAL LETTER SHIN 10B9; C; 2D19; # GEORGIAN CAPITAL LETTER CHIN 10BA; C; 2D1A; # GEORGIAN CAPITAL LETTER CAN 10BB; C; 2D1B; # GEORGIAN CAPITAL LETTER JIL 10BC; C; 2D1C; # GEORGIAN CAPITAL LETTER CIL 10BD; C; 2D1D; # GEORGIAN CAPITAL LETTER CHAR 10BE; C; 2D1E; # GEORGIAN CAPITAL LETTER XAN 10BF; C; 2D1F; # GEORGIAN CAPITAL LETTER JHAN 10C0; C; 2D20; # GEORGIAN CAPITAL LETTER HAE 10C1; C; 2D21; # GEORGIAN CAPITAL LETTER HE 10C2; C; 2D22; # GEORGIAN CAPITAL LETTER HIE 10C3; C; 2D23; # GEORGIAN CAPITAL LETTER WE 10C4; C; 2D24; # GEORGIAN CAPITAL LETTER HAR 10C5; C; 2D25; # GEORGIAN CAPITAL LETTER HOE 10C7; C; 2D27; # GEORGIAN CAPITAL LETTER YN 10CD; C; 2D2D; # GEORGIAN CAPITAL LETTER AEN 13F8; C; 13F0; # CHEROKEE SMALL LETTER YE 13F9; C; 13F1; # CHEROKEE SMALL LETTER YI 13FA; C; 13F2; # CHEROKEE SMALL LETTER YO 13FB; C; 13F3; # CHEROKEE SMALL LETTER YU 13FC; C; 13F4; # CHEROKEE SMALL LETTER YV 13FD; C; 13F5; # CHEROKEE SMALL LETTER MV 1C80; C; 0432; # CYRILLIC SMALL LETTER ROUNDED VE 1C81; C; 0434; # CYRILLIC SMALL LETTER LONG-LEGGED DE 1C82; C; 043E; # CYRILLIC SMALL LETTER NARROW O 1C83; C; 0441; # CYRILLIC SMALL LETTER WIDE ES 1C84; C; 0442; # CYRILLIC SMALL LETTER TALL TE 1C85; C; 0442; # CYRILLIC SMALL LETTER THREE-LEGGED TE 1C86; C; 044A; # CYRILLIC SMALL LETTER TALL HARD SIGN 1C87; C; 0463; # CYRILLIC SMALL LETTER TALL YAT 1C88; C; A64B; # CYRILLIC SMALL LETTER UNBLENDED UK 1C90; C; 10D0; # GEORGIAN MTAVRULI CAPITAL LETTER AN 1C91; C; 10D1; # GEORGIAN MTAVRULI CAPITAL LETTER BAN 1C92; C; 10D2; # GEORGIAN MTAVRULI CAPITAL LETTER GAN 1C93; C; 10D3; # GEORGIAN MTAVRULI CAPITAL LETTER DON 1C94; C; 10D4; # GEORGIAN MTAVRULI CAPITAL LETTER EN 1C95; C; 10D5; # GEORGIAN MTAVRULI CAPITAL LETTER VIN 1C96; C; 10D6; # GEORGIAN MTAVRULI CAPITAL LETTER ZEN 1C97; C; 10D7; # GEORGIAN MTAVRULI CAPITAL LETTER TAN 1C98; C; 10D8; # GEORGIAN MTAVRULI CAPITAL LETTER IN 1C99; C; 10D9; # GEORGIAN MTAVRULI CAPITAL LETTER KAN 1C9A; C; 10DA; # GEORGIAN MTAVRULI CAPITAL LETTER LAS 1C9B; C; 10DB; # GEORGIAN MTAVRULI CAPITAL LETTER MAN 1C9C; C; 10DC; # GEORGIAN MTAVRULI CAPITAL LETTER NAR 1C9D; C; 10DD; # GEORGIAN MTAVRULI CAPITAL LETTER ON 1C9E; C; 10DE; # GEORGIAN MTAVRULI CAPITAL LETTER PAR 1C9F; C; 10DF; # GEORGIAN MTAVRULI CAPITAL LETTER ZHAR 1CA0; C; 10E0; # GEORGIAN MTAVRULI CAPITAL LETTER RAE 1CA1; C; 10E1; # GEORGIAN MTAVRULI CAPITAL LETTER SAN 1CA2; C; 10E2; # GEORGIAN MTAVRULI CAPITAL LETTER TAR 1CA3; C; 10E3; # GEORGIAN MTAVRULI CAPITAL LETTER UN 1CA4; C; 10E4; # GEORGIAN MTAVRULI CAPITAL LETTER PHAR 1CA5; C; 10E5; # GEORGIAN MTAVRULI CAPITAL LETTER KHAR 1CA6; C; 10E6; # GEORGIAN MTAVRULI CAPITAL LETTER GHAN 1CA7; C; 10E7; # GEORGIAN MTAVRULI CAPITAL LETTER QAR 1CA8; C; 10E8; # GEORGIAN MTAVRULI CAPITAL LETTER SHIN 1CA9; C; 10E9; # GEORGIAN MTAVRULI CAPITAL LETTER CHIN 1CAA; C; 10EA; # GEORGIAN MTAVRULI CAPITAL LETTER CAN 1CAB; C; 10EB; # GEORGIAN MTAVRULI CAPITAL LETTER JIL 1CAC; C; 10EC; # GEORGIAN MTAVRULI CAPITAL LETTER CIL 1CAD; C; 10ED; # GEORGIAN MTAVRULI CAPITAL LETTER CHAR 1CAE; C; 10EE; # GEORGIAN MTAVRULI CAPITAL LETTER XAN 1CAF; C; 10EF; # GEORGIAN MTAVRULI CAPITAL LETTER JHAN 1CB0; C; 10F0; # GEORGIAN MTAVRULI CAPITAL LETTER HAE 1CB1; C; 10F1; # GEORGIAN MTAVRULI CAPITAL LETTER HE 1CB2; C; 10F2; # GEORGIAN MTAVRULI CAPITAL LETTER HIE 1CB3; C; 10F3; # GEORGIAN MTAVRULI CAPITAL LETTER WE 1CB4; C; 10F4; # GEORGIAN MTAVRULI CAPITAL LETTER HAR 1CB5; C; 10F5; # GEORGIAN MTAVRULI CAPITAL LETTER HOE 1CB6; C; 10F6; # GEORGIAN MTAVRULI CAPITAL LETTER FI 1CB7; C; 10F7; # GEORGIAN MTAVRULI CAPITAL LETTER YN
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
true
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/cache.py
youtube_dl/cache.py
# coding: utf-8 from __future__ import unicode_literals import errno import json import os import re import shutil import traceback from .compat import ( compat_getenv, compat_open as open, compat_os_makedirs, ) from .utils import ( error_to_compat_str, escape_rfc3986, expand_path, is_outdated_version, traverse_obj, write_json_file, ) from .version import __version__ class Cache(object): _YTDL_DIR = 'youtube-dl' _VERSION_KEY = _YTDL_DIR + '_version' _DEFAULT_VERSION = '2021.12.17' def __init__(self, ydl): self._ydl = ydl def _write_debug(self, *args, **kwargs): self._ydl.write_debug(*args, **kwargs) def _report_warning(self, *args, **kwargs): self._ydl.report_warning(*args, **kwargs) def _to_screen(self, *args, **kwargs): self._ydl.to_screen(*args, **kwargs) def _get_param(self, k, default=None): return self._ydl.params.get(k, default) def _get_root_dir(self): res = self._get_param('cachedir') if res is None: cache_root = compat_getenv('XDG_CACHE_HOME', '~/.cache') res = os.path.join(cache_root, self._YTDL_DIR) return expand_path(res) def _get_cache_fn(self, section, key, dtype): assert re.match(r'^[\w.-]+$', section), \ 'invalid section %r' % section key = escape_rfc3986(key, safe='').replace('%', ',') # encode non-ascii characters return os.path.join( self._get_root_dir(), section, '%s.%s' % (key, dtype)) @property def enabled(self): return self._get_param('cachedir') is not False def store(self, section, key, data, dtype='json'): assert dtype in ('json',) if not self.enabled: return fn = self._get_cache_fn(section, key, dtype) try: compat_os_makedirs(os.path.dirname(fn), exist_ok=True) self._write_debug('Saving {section}.{key} to cache'.format(section=section, key=key)) write_json_file({self._VERSION_KEY: __version__, 'data': data}, fn) except Exception: tb = traceback.format_exc() self._report_warning('Writing cache to {fn!r} failed: {tb}'.format(fn=fn, tb=tb)) def clear(self, section, key, dtype='json'): if not self.enabled: return fn = self._get_cache_fn(section, key, dtype) self._write_debug('Clearing {section}.{key} from cache'.format(section=section, key=key)) try: os.remove(fn) except Exception as e: if getattr(e, 'errno') == errno.ENOENT: # file not found return tb = traceback.format_exc() self._report_warning('Clearing cache from {fn!r} failed: {tb}'.format(fn=fn, tb=tb)) def _validate(self, data, min_ver): version = traverse_obj(data, self._VERSION_KEY) if not version: # Backward compatibility data, version = {'data': data}, self._DEFAULT_VERSION if not is_outdated_version(version, min_ver or '0', assume_new=False): return data['data'] self._write_debug('Discarding old cache from version {version} (needs {min_ver})'.format(version=version, min_ver=min_ver)) def load(self, section, key, dtype='json', default=None, **kw_min_ver): assert dtype in ('json',) min_ver = kw_min_ver.get('min_ver') if not self.enabled: return default cache_fn = self._get_cache_fn(section, key, dtype) try: with open(cache_fn, encoding='utf-8') as cachef: self._write_debug('Loading {section}.{key} from cache'.format(section=section, key=key), only_once=True) return self._validate(json.load(cachef), min_ver) except (ValueError, KeyError): try: file_size = 'size: %d' % os.path.getsize(cache_fn) except (OSError, IOError) as oe: file_size = error_to_compat_str(oe) self._report_warning('Cache retrieval from %s failed (%s)' % (cache_fn, file_size)) except Exception as e: if getattr(e, 'errno') == errno.ENOENT: # no cache available return self._report_warning('Cache retrieval from %s failed' % (cache_fn,)) return default def remove(self): if not self.enabled: self._to_screen('Cache is disabled (Did you combine --no-cache-dir and --rm-cache-dir?)') return cachedir = self._get_root_dir() if not any((term in cachedir) for term in ('cache', 'tmp')): raise Exception('Not removing directory %s - this does not look like a cache dir' % (cachedir,)) self._to_screen( 'Removing cache dir %s .' % (cachedir,), skip_eol=True, ), if os.path.exists(cachedir): self._to_screen('.', skip_eol=True) shutil.rmtree(cachedir) self._to_screen('.')
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/traversal.py
youtube_dl/traversal.py
# coding: utf-8 # TODO: move these utils.fns here and move import to utils # flake8: noqa from .utils import ( dict_get, get_first, require, subs_list_to_dict, T, traverse_obj, unpack, value, )
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/options.py
youtube_dl/options.py
from __future__ import unicode_literals import os.path import optparse import re import sys from .downloader.external import list_external_downloaders from .compat import ( compat_expanduser, compat_get_terminal_size, compat_getenv, compat_kwargs, compat_open as open, compat_shlex_split, ) from .utils import ( preferredencoding, write_string, ) from .version import __version__ def _hide_login_info(opts): PRIVATE_OPTS = set(['-p', '--password', '-u', '--username', '--video-password', '--ap-password', '--ap-username']) eqre = re.compile('^(?P<key>' + ('|'.join(re.escape(po) for po in PRIVATE_OPTS)) + ')=.+$') def _scrub_eq(o): m = eqre.match(o) if m: return m.group('key') + '=PRIVATE' else: return o opts = list(map(_scrub_eq, opts)) for idx, opt in enumerate(opts): if opt in PRIVATE_OPTS and idx + 1 < len(opts): opts[idx + 1] = 'PRIVATE' return opts def parseOpts(overrideArguments=None): def _readOptions(filename_bytes, default=[]): try: optionf = open(filename_bytes, encoding=preferredencoding()) except IOError: return default # silently skip if file is not present try: contents = optionf.read() res = compat_shlex_split(contents, comments=True) finally: optionf.close() return res def _readUserConf(): xdg_config_home = compat_getenv('XDG_CONFIG_HOME') if xdg_config_home: userConfFile = os.path.join(xdg_config_home, 'youtube-dl', 'config') if not os.path.isfile(userConfFile): userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf') else: userConfFile = os.path.join(compat_expanduser('~'), '.config', 'youtube-dl', 'config') if not os.path.isfile(userConfFile): userConfFile = os.path.join(compat_expanduser('~'), '.config', 'youtube-dl.conf') userConf = _readOptions(userConfFile, None) if userConf is None: appdata_dir = compat_getenv('appdata') if appdata_dir: userConf = _readOptions( os.path.join(appdata_dir, 'youtube-dl', 'config'), default=None) if userConf is None: userConf = _readOptions( os.path.join(appdata_dir, 'youtube-dl', 'config.txt'), default=None) if userConf is None: userConf = _readOptions( os.path.join(compat_expanduser('~'), 'youtube-dl.conf'), default=None) if userConf is None: userConf = _readOptions( os.path.join(compat_expanduser('~'), 'youtube-dl.conf.txt'), default=None) if userConf is None: userConf = [] return userConf def _format_option_string(option): ''' ('-o', '--option') -> -o, --format METAVAR''' opts = [] if option._short_opts: opts.append(option._short_opts[0]) if option._long_opts: opts.append(option._long_opts[0]) if len(opts) > 1: opts.insert(1, ', ') if option.takes_value(): opts.append(' %s' % option.metavar) return ''.join(opts) def _comma_separated_values_options_callback(option, opt_str, value, parser): setattr(parser.values, option.dest, value.split(',')) # No need to wrap help messages if we're on a wide console columns = compat_get_terminal_size().columns max_width = columns if columns else 80 max_help_position = 80 fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position) fmt.format_option_strings = _format_option_string kw = { 'version': __version__, 'formatter': fmt, 'usage': '%prog [OPTIONS] URL [URL...]', 'conflict_handler': 'resolve', } parser = optparse.OptionParser(**compat_kwargs(kw)) general = optparse.OptionGroup(parser, 'General Options') general.add_option( '-h', '--help', action='help', help='Print this help text and exit') general.add_option( '--version', action='version', help='Print program version and exit') general.add_option( '-U', '--update', action='store_true', dest='update_self', help='Update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)') general.add_option( '-i', '--ignore-errors', action='store_true', dest='ignoreerrors', default=False, help='Continue on download errors, for example to skip unavailable videos in a playlist') general.add_option( '--abort-on-error', action='store_false', dest='ignoreerrors', help='Abort downloading of further videos (in the playlist or the command line) if an error occurs') general.add_option( '--dump-user-agent', action='store_true', dest='dump_user_agent', default=False, help='Display the current browser identification') general.add_option( '--list-extractors', action='store_true', dest='list_extractors', default=False, help='List all supported extractors') general.add_option( '--extractor-descriptions', action='store_true', dest='list_extractor_descriptions', default=False, help='Output descriptions of all supported extractors') general.add_option( '--force-generic-extractor', action='store_true', dest='force_generic_extractor', default=False, help='Force extraction to use the generic extractor') general.add_option( '--default-search', dest='default_search', metavar='PREFIX', help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". Use the value "auto" to let youtube-dl guess ("auto_warning" to emit a warning when guessing). "error" just throws an error. The default value "fixup_error" repairs broken URLs, but emits an error if this is not possible instead of searching.') general.add_option( '--ignore-config', action='store_true', help='Do not read configuration files. ' 'When given in the global configuration file /etc/youtube-dl.conf: ' 'Do not read the user configuration in ~/.config/youtube-dl/config ' '(%APPDATA%/youtube-dl/config.txt on Windows)') general.add_option( '--config-location', dest='config_location', metavar='PATH', help='Location of the configuration file; either the path to the config or its containing directory.') general.add_option( '--flat-playlist', action='store_const', dest='extract_flat', const='in_playlist', default=False, help='Do not extract the videos of a playlist, only list them.') general.add_option( '--mark-watched', action='store_true', dest='mark_watched', default=False, help='Mark videos watched (YouTube only)') general.add_option( '--no-mark-watched', action='store_false', dest='mark_watched', default=False, help='Do not mark videos watched (YouTube only)') general.add_option( '--no-color', '--no-colors', action='store_true', dest='no_color', default=False, help='Do not emit color codes in output') network = optparse.OptionGroup(parser, 'Network Options') network.add_option( '--proxy', dest='proxy', default=None, metavar='URL', help='Use the specified HTTP/HTTPS/SOCKS proxy. To enable ' 'SOCKS proxy, specify a proper scheme. For example ' 'socks5://127.0.0.1:1080/. Pass in an empty string (--proxy "") ' 'for direct connection') network.add_option( '--socket-timeout', dest='socket_timeout', type=float, default=None, metavar='SECONDS', help='Time to wait before giving up, in seconds') network.add_option( '--source-address', metavar='IP', dest='source_address', default=None, help='Client-side IP address to bind to', ) network.add_option( '-4', '--force-ipv4', action='store_const', const='0.0.0.0', dest='source_address', help='Make all connections via IPv4', ) network.add_option( '-6', '--force-ipv6', action='store_const', const='::', dest='source_address', help='Make all connections via IPv6', ) geo = optparse.OptionGroup(parser, 'Geo Restriction') geo.add_option( '--geo-verification-proxy', dest='geo_verification_proxy', default=None, metavar='URL', help='Use this proxy to verify the IP address for some geo-restricted sites. ' 'The default proxy specified by --proxy (or none, if the option is not present) is used for the actual downloading.') geo.add_option( '--cn-verification-proxy', dest='cn_verification_proxy', default=None, metavar='URL', help=optparse.SUPPRESS_HELP) geo.add_option( '--geo-bypass', action='store_true', dest='geo_bypass', default=True, help='Bypass geographic restriction via faking X-Forwarded-For HTTP header') geo.add_option( '--no-geo-bypass', action='store_false', dest='geo_bypass', default=True, help='Do not bypass geographic restriction via faking X-Forwarded-For HTTP header') geo.add_option( '--geo-bypass-country', metavar='CODE', dest='geo_bypass_country', default=None, help='Force bypass geographic restriction with explicitly provided two-letter ISO 3166-2 country code') geo.add_option( '--geo-bypass-ip-block', metavar='IP_BLOCK', dest='geo_bypass_ip_block', default=None, help='Force bypass geographic restriction with explicitly provided IP block in CIDR notation') selection = optparse.OptionGroup(parser, 'Video Selection') selection.add_option( '--playlist-start', dest='playliststart', metavar='NUMBER', default=1, type=int, help='Playlist video to start at (default is %default)') selection.add_option( '--playlist-end', dest='playlistend', metavar='NUMBER', default=None, type=int, help='Playlist video to end at (default is last)') selection.add_option( '--playlist-items', dest='playlist_items', metavar='ITEM_SPEC', default=None, help='Playlist video items to download. Specify indices of the videos in the playlist separated by commas like: "--playlist-items 1,2,5,8" if you want to download videos indexed 1, 2, 5, 8 in the playlist. You can specify range: "--playlist-items 1-3,7,10-13", it will download the videos at index 1, 2, 3, 7, 10, 11, 12 and 13.') selection.add_option( '--match-title', dest='matchtitle', metavar='REGEX', help='Download only matching titles (case-insensitive regex or alphanumeric sub-string)') selection.add_option( '--reject-title', dest='rejecttitle', metavar='REGEX', help='Skip download for matching titles (case-insensitive regex or alphanumeric sub-string)') selection.add_option( '--max-downloads', dest='max_downloads', metavar='NUMBER', type=int, default=None, help='Abort after downloading NUMBER files') selection.add_option( '--min-filesize', metavar='SIZE', dest='min_filesize', default=None, help='Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)') selection.add_option( '--max-filesize', metavar='SIZE', dest='max_filesize', default=None, help='Do not download any videos larger than SIZE (e.g. 50k or 44.6m)') selection.add_option( '--date', metavar='DATE', dest='date', default=None, help='Download only videos uploaded in this date') selection.add_option( '--datebefore', metavar='DATE', dest='datebefore', default=None, help='Download only videos uploaded on or before this date (i.e. inclusive)') selection.add_option( '--dateafter', metavar='DATE', dest='dateafter', default=None, help='Download only videos uploaded on or after this date (i.e. inclusive)') selection.add_option( '--min-views', metavar='COUNT', dest='min_views', default=None, type=int, help='Do not download any videos with less than COUNT views') selection.add_option( '--max-views', metavar='COUNT', dest='max_views', default=None, type=int, help='Do not download any videos with more than COUNT views') selection.add_option( '--match-filter', metavar='FILTER', dest='match_filter', default=None, help=( 'Generic video filter. ' 'Specify any key (see the "OUTPUT TEMPLATE" for a list of available keys) to ' 'match if the key is present, ' '!key to check if the key is not present, ' 'key > NUMBER (like "comment_count > 12", also works with ' '>=, <, <=, !=, =) to compare against a number, ' 'key = \'LITERAL\' (like "uploader = \'Mike Smith\'", also works with !=) ' 'to match against a string literal ' 'and & to require multiple matches. ' 'Values which are not known are excluded unless you ' 'put a question mark (?) after the operator. ' 'For example, to only match videos that have been liked more than ' '100 times and disliked less than 50 times (or the dislike ' 'functionality is not available at the given service), but who ' 'also have a description, use --match-filter ' '"like_count > 100 & dislike_count <? 50 & description" .' )) selection.add_option( '--no-playlist', action='store_true', dest='noplaylist', default=False, help='Download only the video, if the URL refers to a video and a playlist.') selection.add_option( '--yes-playlist', action='store_false', dest='noplaylist', default=False, help='Download the playlist, if the URL refers to a video and a playlist.') selection.add_option( '--age-limit', metavar='YEARS', dest='age_limit', default=None, type=int, help='Download only videos suitable for the given age') selection.add_option( '--download-archive', metavar='FILE', dest='download_archive', help='Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.') selection.add_option( '--include-ads', dest='include_ads', action='store_true', help='Download advertisements as well (experimental)') authentication = optparse.OptionGroup(parser, 'Authentication Options') authentication.add_option( '-u', '--username', dest='username', metavar='USERNAME', help='Login with this account ID') authentication.add_option( '-p', '--password', dest='password', metavar='PASSWORD', help='Account password. If this option is left out, youtube-dl will ask interactively.') authentication.add_option( '-2', '--twofactor', dest='twofactor', metavar='TWOFACTOR', help='Two-factor authentication code') authentication.add_option( '-n', '--netrc', action='store_true', dest='usenetrc', default=False, help='Use .netrc authentication data') authentication.add_option( '--video-password', dest='videopassword', metavar='PASSWORD', help='Video password (vimeo, youku)') adobe_pass = optparse.OptionGroup(parser, 'Adobe Pass Options') adobe_pass.add_option( '--ap-mso', dest='ap_mso', metavar='MSO', help='Adobe Pass multiple-system operator (TV provider) identifier, use --ap-list-mso for a list of available MSOs') adobe_pass.add_option( '--ap-username', dest='ap_username', metavar='USERNAME', help='Multiple-system operator account login') adobe_pass.add_option( '--ap-password', dest='ap_password', metavar='PASSWORD', help='Multiple-system operator account password. If this option is left out, youtube-dl will ask interactively.') adobe_pass.add_option( '--ap-list-mso', action='store_true', dest='ap_list_mso', default=False, help='List all supported multiple-system operators') video_format = optparse.OptionGroup(parser, 'Video Format Options') video_format.add_option( '-f', '--format', action='store', dest='format', metavar='FORMAT', default=None, help='Video format code, see the "FORMAT SELECTION" for all the info') video_format.add_option( '--all-formats', action='store_const', dest='format', const='all', help='Download all available video formats') video_format.add_option( '--prefer-free-formats', action='store_true', dest='prefer_free_formats', default=False, help='Prefer free video formats unless a specific one is requested') video_format.add_option( '-F', '--list-formats', action='store_true', dest='listformats', help='List all available formats of requested videos') video_format.add_option( '--no-list-formats', action='store_false', dest='listformats', help='Do not list available formats of requested videos (default)') video_format.add_option( '--youtube-include-dash-manifest', action='store_true', dest='youtube_include_dash_manifest', default=True, help=optparse.SUPPRESS_HELP) video_format.add_option( '--youtube-skip-dash-manifest', action='store_false', dest='youtube_include_dash_manifest', help='Do not download the DASH manifests and related data on YouTube videos') video_format.add_option( '--youtube-player-js-variant', action='store', dest='youtube_player_js_variant', help='For YouTube, the player javascript variant to use for n/sig deciphering; `actual` to follow the site; default `%default`.', choices=('actual', 'main', 'tcc', 'tce', 'es5', 'es6', 'tv', 'tv_es6', 'phone', 'tablet'), default='actual', metavar='VARIANT') video_format.add_option( '--youtube-player-js-version', action='store', dest='youtube_player_js_version', help='For YouTube, the player javascript version to use for n/sig deciphering, specified as `signature_timestamp@hash`, or `actual` to follow the site; default `%default`', default='actual', metavar='STS@HASH') video_format.add_option( '--merge-output-format', action='store', dest='merge_output_format', metavar='FORMAT', default=None, help=( 'If a merge is required (e.g. bestvideo+bestaudio), ' 'output to given container format. One of mkv, mp4, ogg, webm, flv. ' 'Ignored if no merge is required')) subtitles = optparse.OptionGroup(parser, 'Subtitle Options') subtitles.add_option( '--write-sub', '--write-srt', action='store_true', dest='writesubtitles', default=False, help='Write subtitle file') subtitles.add_option( '--write-auto-sub', '--write-automatic-sub', action='store_true', dest='writeautomaticsub', default=False, help='Write automatically generated subtitle file (YouTube only)') subtitles.add_option( '--all-subs', action='store_true', dest='allsubtitles', default=False, help='Download all the available subtitles of the video') subtitles.add_option( '--list-subs', action='store_true', dest='listsubtitles', default=False, help='List all available subtitles for the video') subtitles.add_option( '--sub-format', action='store', dest='subtitlesformat', metavar='FORMAT', default='best', help='Subtitle format, accepts formats preference, for example: "srt" or "ass/srt/best"') subtitles.add_option( '--sub-lang', '--sub-langs', '--srt-lang', action='callback', dest='subtitleslangs', metavar='LANGS', type='str', default=[], callback=_comma_separated_values_options_callback, help='Languages of the subtitles to download (optional) separated by commas, use --list-subs for available language tags') downloader = optparse.OptionGroup(parser, 'Download Options') downloader.add_option( '-r', '--limit-rate', '--rate-limit', dest='ratelimit', metavar='RATE', help='Maximum download rate in bytes per second (e.g. 50K or 4.2M)') downloader.add_option( '-R', '--retries', dest='retries', metavar='RETRIES', default=10, help='Number of retries (default is %default), or "infinite".') downloader.add_option( '--fragment-retries', dest='fragment_retries', metavar='RETRIES', default=10, help='Number of retries for a fragment (default is %default), or "infinite" (DASH, hlsnative and ISM)') downloader.add_option( '--skip-unavailable-fragments', action='store_true', dest='skip_unavailable_fragments', default=True, help='Skip unavailable fragments (DASH, hlsnative and ISM)') downloader.add_option( '--abort-on-unavailable-fragment', action='store_false', dest='skip_unavailable_fragments', help='Abort downloading when some fragment is not available') downloader.add_option( '--keep-fragments', action='store_true', dest='keep_fragments', default=False, help='Keep downloaded fragments on disk after downloading is finished; fragments are erased by default') downloader.add_option( '--buffer-size', dest='buffersize', metavar='SIZE', default='1024', help='Size of download buffer (e.g. 1024 or 16K) (default is %default)') downloader.add_option( '--no-resize-buffer', action='store_true', dest='noresizebuffer', default=False, help='Do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.') downloader.add_option( '--http-chunk-size', dest='http_chunk_size', metavar='SIZE', default=None, help='Size of a chunk for chunk-based HTTP downloading (e.g. 10485760 or 10M) (default is disabled). ' 'May be useful for bypassing bandwidth throttling imposed by a webserver (experimental)') downloader.add_option( '--test', action='store_true', dest='test', default=False, help=optparse.SUPPRESS_HELP) downloader.add_option( '--playlist-reverse', action='store_true', help='Download playlist videos in reverse order') downloader.add_option( '--playlist-random', action='store_true', help='Download playlist videos in random order') downloader.add_option( '--xattr-set-filesize', dest='xattr_set_filesize', action='store_true', help='Set file xattribute ytdl.filesize with expected file size') downloader.add_option( '--hls-prefer-native', dest='hls_prefer_native', action='store_true', default=None, help='Use the native HLS downloader instead of ffmpeg') downloader.add_option( '--hls-prefer-ffmpeg', dest='hls_prefer_native', action='store_false', default=None, help='Use ffmpeg instead of the native HLS downloader') downloader.add_option( '--hls-use-mpegts', dest='hls_use_mpegts', action='store_true', help='Use the mpegts container for HLS videos, allowing to play the ' 'video while downloading (some players may not be able to play it)') downloader.add_option( '--external-downloader', dest='external_downloader', metavar='COMMAND', help='Use the specified external downloader. ' 'Currently supports %s' % ','.join(list_external_downloaders())) downloader.add_option( '--external-downloader-args', dest='external_downloader_args', metavar='ARGS', help='Give these arguments to the external downloader') workarounds = optparse.OptionGroup(parser, 'Workarounds') workarounds.add_option( '--encoding', dest='encoding', metavar='ENCODING', help='Force the specified encoding (experimental)') workarounds.add_option( '--no-check-certificate', action='store_true', dest='no_check_certificate', default=False, help='Suppress HTTPS certificate validation') workarounds.add_option( '--no-check-extensions', action='store_true', dest='no_check_extensions', default=False, help='Suppress file extension validation') workarounds.add_option( '--prefer-insecure', '--prefer-unsecure', action='store_true', dest='prefer_insecure', help='Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)') workarounds.add_option( '--user-agent', metavar='UA', dest='user_agent', help='Specify a custom user agent') workarounds.add_option( '--referer', metavar='URL', dest='referer', default=None, help='Specify a custom Referer: use if the video access is restricted to one domain', ) workarounds.add_option( '--add-header', metavar='FIELD:VALUE', dest='headers', action='append', help=('Specify a custom HTTP header and its value, separated by a colon \':\'. You can use this option multiple times. ' 'NB Use --cookies rather than adding a Cookie header if its contents may be sensitive; ' 'data from a Cookie header will be sent to all domains, not just the one intended') ) workarounds.add_option( '--bidi-workaround', dest='bidi_workaround', action='store_true', help='Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH') workarounds.add_option( '--sleep-interval', '--min-sleep-interval', metavar='SECONDS', dest='sleep_interval', type=float, help=( 'Number of seconds to sleep before each download when used alone ' 'or a lower bound of a range for randomized sleep before each download ' '(minimum possible number of seconds to sleep) when used along with ' '--max-sleep-interval.')) workarounds.add_option( '--max-sleep-interval', metavar='SECONDS', dest='max_sleep_interval', type=float, help=( 'Upper bound of a range for randomized sleep before each download ' '(maximum possible number of seconds to sleep). Must only be used ' 'along with --min-sleep-interval.')) verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options') verbosity.add_option( '-q', '--quiet', action='store_true', dest='quiet', default=False, help='Activate quiet mode') verbosity.add_option( '--no-warnings', dest='no_warnings', action='store_true', default=False, help='Ignore warnings') verbosity.add_option( '-s', '--simulate', action='store_true', dest='simulate', default=False, help='Do not download the video and do not write anything to disk') verbosity.add_option( '--skip-download', action='store_true', dest='skip_download', default=False, help='Do not download the video') verbosity.add_option( '-g', '--get-url', action='store_true', dest='geturl', default=False, help='Simulate, quiet but print URL') verbosity.add_option( '-e', '--get-title', action='store_true', dest='gettitle', default=False, help='Simulate, quiet but print title') verbosity.add_option( '--get-id', action='store_true', dest='getid', default=False, help='Simulate, quiet but print id') verbosity.add_option( '--get-thumbnail', action='store_true', dest='getthumbnail', default=False, help='Simulate, quiet but print thumbnail URL') verbosity.add_option( '--get-description', action='store_true', dest='getdescription', default=False, help='Simulate, quiet but print video description') verbosity.add_option( '--get-duration', action='store_true', dest='getduration', default=False, help='Simulate, quiet but print video length') verbosity.add_option( '--get-filename', action='store_true', dest='getfilename', default=False, help='Simulate, quiet but print output filename') verbosity.add_option( '--get-format', action='store_true', dest='getformat', default=False, help='Simulate, quiet but print output format') verbosity.add_option( '-j', '--dump-json', action='store_true', dest='dumpjson', default=False, help='Simulate, quiet but print JSON information. See the "OUTPUT TEMPLATE" for a description of available keys.') verbosity.add_option( '-J', '--dump-single-json', action='store_true', dest='dump_single_json', default=False, help='Simulate, quiet but print JSON information for each command-line argument. If the URL refers to a playlist, dump the whole playlist information in a single line.') verbosity.add_option( '--print-json', action='store_true', dest='print_json', default=False, help='Be quiet and print the video information as JSON (video is still being downloaded).', ) verbosity.add_option( '--newline', action='store_true', dest='progress_with_newline', default=False, help='Output progress bar as new lines') verbosity.add_option( '--no-progress', action='store_true', dest='noprogress', default=False, help='Do not print progress bar') verbosity.add_option( '--console-title', action='store_true', dest='consoletitle', default=False, help='Display progress in console titlebar') verbosity.add_option( '-v', '--verbose', action='store_true', dest='verbose', default=False, help='Print various debugging information') verbosity.add_option( '--dump-pages', '--dump-intermediate-pages', action='store_true', dest='dump_intermediate_pages', default=False, help='Print downloaded pages encoded using base64 to debug problems (very verbose)') verbosity.add_option( '--write-pages', action='store_true', dest='write_pages', default=False, help='Write downloaded intermediary pages to files in the current directory to debug problems') verbosity.add_option( '--youtube-print-sig-code', action='store_true', dest='youtube_print_sig_code', default=False, help=optparse.SUPPRESS_HELP) verbosity.add_option( '--print-traffic', '--dump-headers', dest='debug_printtraffic', action='store_true', default=False, help='Display sent and read HTTP traffic') verbosity.add_option( '-C', '--call-home', dest='call_home', action='store_true', default=False, help='Contact the youtube-dl server for debugging') verbosity.add_option( '--no-call-home', dest='call_home', action='store_false', default=False, help='Do NOT contact the youtube-dl server for debugging') filesystem = optparse.OptionGroup(parser, 'Filesystem Options') filesystem.add_option( '-a', '--batch-file', dest='batchfile', metavar='FILE', help="File containing URLs to download ('-' for stdin), one URL per line. " "Lines starting with '#', ';' or ']' are considered as comments and ignored.") filesystem.add_option( '--id', default=False, action='store_true', dest='useid', help='Use only video ID in file name') filesystem.add_option( '-o', '--output', dest='outtmpl', metavar='TEMPLATE', help=('Output filename template, see the "OUTPUT TEMPLATE" for all the info')) filesystem.add_option( '--output-na-placeholder', dest='outtmpl_na_placeholder', metavar='PLACEHOLDER', default='NA', help=('Placeholder value for unavailable meta fields in output filename template (default is "%default")'))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
true
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/odnoklassniki.py
youtube_dl/extractor/odnoklassniki.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_etree_fromstring, compat_parse_qs, compat_urllib_parse_unquote, compat_urllib_parse_urlparse, ) from ..utils import ( ExtractorError, unified_strdate, int_or_none, qualities, unescapeHTML, urlencode_postdata, ) class OdnoklassnikiIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// (?:(?:www|m|mobile)\.)? (?:odnoklassniki|ok)\.ru/ (?: video(?:embed)?/| web-api/video/moviePlayer/| live/| dk\?.*?st\.mvId= ) (?P<id>[\d-]+) ''' _TESTS = [{ # metadata in JSON 'url': 'http://ok.ru/video/20079905452', 'md5': '0b62089b479e06681abaaca9d204f152', 'info_dict': { 'id': '20079905452', 'ext': 'mp4', 'title': 'Культура меняет нас (прекрасный ролик!))', 'duration': 100, 'upload_date': '20141207', 'uploader_id': '330537914540', 'uploader': 'Виталий Добровольский', 'like_count': int, 'age_limit': 0, }, }, { # metadataUrl 'url': 'http://ok.ru/video/63567059965189-0?fromTime=5', 'md5': '6ff470ea2dd51d5d18c295a355b0b6bc', 'info_dict': { 'id': '63567059965189-0', 'ext': 'mp4', 'title': 'Девушка без комплексов ...', 'duration': 191, 'upload_date': '20150518', 'uploader_id': '534380003155', 'uploader': '☭ Андрей Мещанинов ☭', 'like_count': int, 'age_limit': 0, 'start_time': 5, }, }, { # YouTube embed (metadataUrl, provider == USER_YOUTUBE) 'url': 'http://ok.ru/video/64211978996595-1', 'md5': '2f206894ffb5dbfcce2c5a14b909eea5', 'info_dict': { 'id': 'V_VztHT5BzY', 'ext': 'mp4', 'title': 'Космическая среда от 26 августа 2015', 'description': 'md5:848eb8b85e5e3471a3a803dae1343ed0', 'duration': 440, 'upload_date': '20150826', 'uploader_id': 'tvroscosmos', 'uploader': 'Телестудия Роскосмоса', 'age_limit': 0, }, }, { # YouTube embed (metadata, provider == USER_YOUTUBE, no metadata.movie.title field) 'url': 'http://ok.ru/video/62036049272859-0', 'info_dict': { 'id': '62036049272859-0', 'ext': 'mp4', 'title': 'МУЗЫКА ДОЖДЯ .', 'description': 'md5:6f1867132bd96e33bf53eda1091e8ed0', 'upload_date': '20120106', 'uploader_id': '473534735899', 'uploader': 'МARINA D', 'age_limit': 0, }, 'params': { 'skip_download': True, }, 'skip': 'Video has not been found', }, { 'url': 'http://ok.ru/web-api/video/moviePlayer/20079905452', 'only_matching': True, }, { 'url': 'http://www.ok.ru/video/20648036891', 'only_matching': True, }, { 'url': 'http://www.ok.ru/videoembed/20648036891', 'only_matching': True, }, { 'url': 'http://m.ok.ru/video/20079905452', 'only_matching': True, }, { 'url': 'http://mobile.ok.ru/video/20079905452', 'only_matching': True, }, { 'url': 'https://www.ok.ru/live/484531969818', 'only_matching': True, }, { 'url': 'https://m.ok.ru/dk?st.cmd=movieLayer&st.discId=863789452017&st.retLoc=friend&st.rtu=%2Fdk%3Fst.cmd%3DfriendMovies%26st.mode%3Down%26st.mrkId%3D%257B%2522uploadedMovieMarker%2522%253A%257B%2522marker%2522%253A%25221519410114503%2522%252C%2522hasMore%2522%253Atrue%257D%252C%2522sharedMovieMarker%2522%253A%257B%2522marker%2522%253Anull%252C%2522hasMore%2522%253Afalse%257D%257D%26st.friendId%3D561722190321%26st.frwd%3Don%26_prevCmd%3DfriendMovies%26tkn%3D7257&st.discType=MOVIE&st.mvId=863789452017&_prevCmd=friendMovies&tkn=3648#lst#', 'only_matching': True, }, { # Paid video 'url': 'https://ok.ru/video/954886983203', 'only_matching': True, }] @staticmethod def _extract_url(webpage): mobj = re.search( r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:odnoklassniki|ok)\.ru/videoembed/.+?)\1', webpage) if mobj: return mobj.group('url') def _real_extract(self, url): start_time = int_or_none(compat_parse_qs( compat_urllib_parse_urlparse(url).query).get('fromTime', [None])[0]) video_id = self._match_id(url) webpage = self._download_webpage( 'http://ok.ru/video/%s' % video_id, video_id) error = self._search_regex( r'[^>]+class="vp_video_stub_txt"[^>]*>([^<]+)<', webpage, 'error', default=None) if error: raise ExtractorError(error, expected=True) player = self._parse_json( unescapeHTML(self._search_regex( r'data-options=(?P<quote>["\'])(?P<player>{.+?%s.+?})(?P=quote)' % video_id, webpage, 'player', group='player')), video_id) flashvars = player['flashvars'] metadata = flashvars.get('metadata') if metadata: metadata = self._parse_json(metadata, video_id) else: data = {} st_location = flashvars.get('location') if st_location: data['st.location'] = st_location metadata = self._download_json( compat_urllib_parse_unquote(flashvars['metadataUrl']), video_id, 'Downloading metadata JSON', data=urlencode_postdata(data)) movie = metadata['movie'] # Some embedded videos may not contain title in movie dict (e.g. # http://ok.ru/video/62036049272859-0) thus we allow missing title # here and it's going to be extracted later by an extractor that # will process the actual embed. provider = metadata.get('provider') title = movie['title'] if provider == 'UPLOADED_ODKL' else movie.get('title') thumbnail = movie.get('poster') duration = int_or_none(movie.get('duration')) author = metadata.get('author', {}) uploader_id = author.get('id') uploader = author.get('name') upload_date = unified_strdate(self._html_search_meta( 'ya:ovs:upload_date', webpage, 'upload date', default=None)) age_limit = None adult = self._html_search_meta( 'ya:ovs:adult', webpage, 'age limit', default=None) if adult: age_limit = 18 if adult == 'true' else 0 like_count = int_or_none(metadata.get('likeCount')) info = { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'duration': duration, 'upload_date': upload_date, 'uploader': uploader, 'uploader_id': uploader_id, 'like_count': like_count, 'age_limit': age_limit, 'start_time': start_time, } if provider == 'USER_YOUTUBE': info.update({ '_type': 'url_transparent', 'url': movie['contentId'], }) return info assert title if provider == 'LIVE_TV_APP': info['title'] = self._live_title(title) quality = qualities(('4', '0', '1', '2', '3', '5')) formats = [{ 'url': f['url'], 'ext': 'mp4', 'format_id': f['name'], } for f in metadata['videos']] m3u8_url = metadata.get('hlsManifestUrl') if m3u8_url: formats.extend(self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) dash_manifest = metadata.get('metadataEmbedded') if dash_manifest: formats.extend(self._parse_mpd_formats( compat_etree_fromstring(dash_manifest), 'mpd')) for fmt in formats: fmt_type = self._search_regex( r'\btype[/=](\d)', fmt['url'], 'format type', default=None) if fmt_type: fmt['quality'] = quality(fmt_type) # Live formats m3u8_url = metadata.get('hlsMasterPlaylistUrl') if m3u8_url: formats.extend(self._extract_m3u8_formats( m3u8_url, video_id, 'mp4', entry_protocol='m3u8', m3u8_id='hls', fatal=False)) rtmp_url = metadata.get('rtmpUrl') if rtmp_url: formats.append({ 'url': rtmp_url, 'format_id': 'rtmp', 'ext': 'flv', }) if not formats: payment_info = metadata.get('paymentInfo') if payment_info: raise ExtractorError('This video is paid, subscribe to download it', expected=True) self._sort_formats(formats) info['formats'] = formats return info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/thisvid.py
youtube_dl/extractor/thisvid.py
# coding: utf-8 from __future__ import unicode_literals import re import itertools from .common import InfoExtractor from ..compat import ( compat_urlparse, ) from ..utils import ( clean_html, get_element_by_class, int_or_none, merge_dicts, url_or_none, urljoin, ) class ThisVidIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?thisvid\.com/(?P<type>videos|embed)/(?P<id>[A-Za-z0-9-]+)' _TESTS = [{ 'url': 'https://thisvid.com/videos/sitting-on-ball-tight-jeans/', 'md5': '839becb572995687e11a69dc4358a386', 'info_dict': { 'id': '3533241', 'ext': 'mp4', 'title': 'Sitting on ball tight jeans', 'description': 'md5:372353bb995883d1b65fddf507489acd', 'thumbnail': r're:https?://\w+\.thisvid\.com/(?:[^/]+/)+3533241/preview\.jpg', 'uploader_id': '150629', 'uploader': 'jeanslevisjeans', 'age_limit': 18, } }, { 'url': 'https://thisvid.com/embed/3533241/', 'md5': '839becb572995687e11a69dc4358a386', 'info_dict': { 'id': '3533241', 'ext': 'mp4', 'title': 'Sitting on ball tight jeans', 'thumbnail': r're:https?://\w+\.thisvid\.com/(?:[^/]+/)+3533241/preview\.jpg', 'uploader_id': '150629', 'uploader': 'jeanslevisjeans', 'age_limit': 18, } }] def _real_extract(self, url): main_id, type_ = re.match(self._VALID_URL, url).group('id', 'type') webpage = self._download_webpage(url, main_id) title = self._html_search_regex( r'<title\b[^>]*?>(?:Video:\s+)?(.+?)(?:\s+-\s+ThisVid(?:\.com| tube))?</title>', webpage, 'title') if type_ == 'embed': # look for more metadata video_alt_url = url_or_none(self._search_regex( r'''video_alt_url\s*:\s+'(%s/)',''' % (self._VALID_URL, ), webpage, 'video_alt_url', default=None)) if video_alt_url and video_alt_url != url: webpage = self._download_webpage( video_alt_url, main_id, note='Redirecting embed to main page', fatal=False) or webpage video_holder = get_element_by_class('video-holder', webpage) or '' if '>This video is a private video' in video_holder: self.raise_login_required( (clean_html(video_holder) or 'Private video').split('\n', 1)[0]) uploader = self._html_search_regex( r'''(?s)<span\b[^>]*>Added by:\s*</span><a\b[^>]+\bclass\s*=\s*["']author\b[^>]+\bhref\s*=\s*["']https://thisvid\.com/members/([0-9]+/.{3,}?)\s*</a>''', webpage, 'uploader', default='') uploader = re.split(r'''/["'][^>]*>\s*''', uploader) if len(uploader) == 2: # id must be non-empty, uploader could be '' uploader_id, uploader = uploader uploader = uploader or None else: uploader_id = uploader = None return merge_dicts({ '_type': 'url_transparent', 'title': title, 'age_limit': 18, 'uploader': uploader, 'uploader_id': uploader_id, }, self.url_result(url, ie='Generic')) class ThisVidMemberIE(InfoExtractor): _VALID_URL = r'https?://thisvid\.com/members/(?P<id>\d+)' _TESTS = [{ 'url': 'https://thisvid.com/members/2140501/', 'info_dict': { 'id': '2140501', 'title': 'Rafflesia\'s Profile', }, 'playlist_mincount': 16, }, { 'url': 'https://thisvid.com/members/2140501/favourite_videos/', 'info_dict': { 'id': '2140501', 'title': 'Rafflesia\'s Favourite Videos', }, 'playlist_mincount': 15, }, { 'url': 'https://thisvid.com/members/636468/public_videos/', 'info_dict': { 'id': '636468', 'title': 'Happymouth\'s Public Videos', }, 'playlist_mincount': 196, }, ] def _urls(self, html): for m in re.finditer(r'''<a\b[^>]+\bhref\s*=\s*["'](?P<url>%s\b)[^>]+>''' % (ThisVidIE._VALID_URL, ), html): yield m.group('url') def _real_extract(self, url): pl_id = self._match_id(url) webpage = self._download_webpage(url, pl_id) title = re.split( r'(?i)\s*\|\s*ThisVid\.com\s*$', self._og_search_title(webpage, default=None) or self._html_search_regex(r'(?s)<title\b[^>]*>(.+?)</title', webpage, 'title', fatal=False) or '', 1)[0] or None def entries(page_url, html=None): for page in itertools.count(1): if not html: html = self._download_webpage( page_url, pl_id, note='Downloading page %d' % (page, ), fatal=False) or '' for u in self._urls(html): yield u next_page = get_element_by_class('pagination-next', html) or '' if next_page: # member list page next_page = urljoin(url, self._search_regex( r'''<a\b[^>]+\bhref\s*=\s*("|')(?P<url>(?!#)(?:(?!\1).)+)''', next_page, 'next page link', group='url', default=None)) # in case a member page should have pagination-next with empty link, not just `else:` if next_page is None: # playlist page parsed_url = compat_urlparse.urlparse(page_url) base_path, num = parsed_url.path.rsplit('/', 1) num = int_or_none(num) if num is None: base_path, num = parsed_url.path.rstrip('/'), 1 parsed_url = parsed_url._replace(path=base_path + ('/%d' % (num + 1, ))) next_page = compat_urlparse.urlunparse(parsed_url) if page_url == next_page: next_page = None if not next_page: break page_url, html = next_page, None return self.playlist_from_matches( entries(url, webpage), playlist_id=pl_id, playlist_title=title, ie='ThisVid') class ThisVidPlaylistIE(ThisVidMemberIE): _VALID_URL = r'https?://thisvid\.com/playlist/(?P<id>\d+)/video/(?P<video_id>[A-Za-z0-9-]+)' _TESTS = [{ 'url': 'https://thisvid.com/playlist/6615/video/big-italian-booty-28/', 'info_dict': { 'id': '6615', 'title': 'Underwear Stuff', }, 'playlist_mincount': 200, }, { 'url': 'https://thisvid.com/playlist/6615/video/big-italian-booty-28/', 'info_dict': { 'id': '1072387', 'ext': 'mp4', 'title': 'Big Italian Booty 28', 'description': 'md5:1bccf7b13765e18fb27bf764dba7ede2', 'uploader_id': '367912', 'uploader': 'Jcmusclefun', 'age_limit': 18, }, 'params': { 'noplaylist': True, }, }] def _get_video_url(self, pl_url): video_id = re.match(self._VALID_URL, pl_url).group('video_id') return urljoin(pl_url, '/videos/%s/' % (video_id, )) def _urls(self, html): for m in re.finditer(r'''<a\b[^>]+\bhref\s*=\s*["'](?P<url>%s\b)[^>]+>''' % (self._VALID_URL, ), html): yield self._get_video_url(m.group('url')) def _real_extract(self, url): pl_id = self._match_id(url) if self._downloader.params.get('noplaylist'): self.to_screen('Downloading just the featured video because of --no-playlist') return self.url_result(self._get_video_url(url), 'ThisVid') self.to_screen( 'Downloading playlist %s - add --no-playlist to download just the featured video' % (pl_id, )) result = super(ThisVidPlaylistIE, self)._real_extract(url) # rework title returned as `the title - the title` title = result['title'] t_len = len(title) if t_len > 5 and t_len % 2 != 0: t_len = t_len // 2 if title[t_len] == '-': title = [t.strip() for t in (title[:t_len], title[t_len + 1:])] if title[0] and title[0] == title[1]: result['title'] = title[0] return result
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/metacafe.py
youtube_dl/extractor/metacafe.py
from __future__ import unicode_literals import json import re from .common import InfoExtractor from ..compat import ( compat_parse_qs, compat_urllib_parse, compat_urllib_parse_unquote, ) from ..utils import ( determine_ext, ExtractorError, int_or_none, get_element_by_attribute, mimetype2ext, ) class MetacafeIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?metacafe\.com/watch/(?P<video_id>[^/]+)/(?P<display_id>[^/?#]+)' _DISCLAIMER = 'http://www.metacafe.com/family_filter/' _FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user' IE_NAME = 'metacafe' _TESTS = [ # Youtube video { 'add_ie': ['Youtube'], 'url': 'http://metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/', 'info_dict': { 'id': '_aUehQsCQtM', 'ext': 'mp4', 'upload_date': '20090102', 'title': 'The Electric Company | "Short I" | PBS KIDS GO!', 'description': 'md5:2439a8ef6d5a70e380c22f5ad323e5a8', 'uploader': 'PBS', 'uploader_id': 'PBS' } }, # Normal metacafe video { 'url': 'http://www.metacafe.com/watch/11121940/news_stuff_you_wont_do_with_your_playstation_4/', 'md5': '6e0bca200eaad2552e6915ed6fd4d9ad', 'info_dict': { 'id': '11121940', 'ext': 'mp4', 'title': 'News: Stuff You Won\'t Do with Your PlayStation 4', 'uploader': 'ign', 'description': 'Sony released a massive FAQ on the PlayStation Blog detailing the PS4\'s capabilities and limitations.', }, 'skip': 'Page is temporarily unavailable.', }, # metacafe video with family filter { 'url': 'http://www.metacafe.com/watch/2155630/adult_art_by_david_hart_156/', 'md5': 'b06082c5079bbdcde677a6291fbdf376', 'info_dict': { 'id': '2155630', 'ext': 'mp4', 'title': 'Adult Art By David Hart 156', 'uploader': '63346', 'description': 'md5:9afac8fc885252201ad14563694040fc', }, 'params': { 'skip_download': True, }, }, # AnyClip video { 'url': 'http://www.metacafe.com/watch/an-dVVXnuY7Jh77J/the_andromeda_strain_1971_stop_the_bomb_part_3/', 'info_dict': { 'id': 'an-dVVXnuY7Jh77J', 'ext': 'mp4', 'title': 'The Andromeda Strain (1971): Stop the Bomb Part 3', 'uploader': 'AnyClip', 'description': 'md5:cbef0460d31e3807f6feb4e7a5952e5b', }, }, # age-restricted video { 'url': 'http://www.metacafe.com/watch/5186653/bbc_internal_christmas_tape_79_uncensored_outtakes_etc/', 'md5': '98dde7c1a35d02178e8ab7560fe8bd09', 'info_dict': { 'id': '5186653', 'ext': 'mp4', 'title': 'BBC INTERNAL Christmas Tape \'79 - UNCENSORED Outtakes, Etc.', 'uploader': 'Dwayne Pipe', 'description': 'md5:950bf4c581e2c059911fa3ffbe377e4b', 'age_limit': 18, }, }, # cbs video { 'url': 'http://www.metacafe.com/watch/cb-8VD4r_Zws8VP/open_this_is_face_the_nation_february_9/', 'info_dict': { 'id': '8VD4r_Zws8VP', 'ext': 'flv', 'title': 'Open: This is Face the Nation, February 9', 'description': 'md5:8a9ceec26d1f7ed6eab610834cc1a476', 'duration': 96, 'uploader': 'CBSI-NEW', 'upload_date': '20140209', 'timestamp': 1391959800, }, 'params': { # rtmp download 'skip_download': True, }, }, # Movieclips.com video { 'url': 'http://www.metacafe.com/watch/mv-Wy7ZU/my_week_with_marilyn_do_you_love_me/', 'info_dict': { 'id': 'mv-Wy7ZU', 'ext': 'mp4', 'title': 'My Week with Marilyn - Do You Love Me?', 'description': 'From the movie My Week with Marilyn - Colin (Eddie Redmayne) professes his love to Marilyn (Michelle Williams) and gets her to promise to return to set and finish the movie.', 'uploader': 'movie_trailers', 'duration': 176, }, 'params': { 'skip_download': 'requires rtmpdump', } } ] def report_disclaimer(self): self.to_screen('Retrieving disclaimer') def _real_extract(self, url): # Extract id and simplified title from URL video_id, display_id = re.match(self._VALID_URL, url).groups() # the video may come from an external site m_external = re.match(r'^(\w{2})-(.*)$', video_id) if m_external is not None: prefix, ext_id = m_external.groups() # Check if video comes from YouTube if prefix == 'yt': return self.url_result('http://www.youtube.com/watch?v=%s' % ext_id, 'Youtube') # CBS videos use theplatform.com if prefix == 'cb': return self.url_result('theplatform:%s' % ext_id, 'ThePlatform') headers = { # Disable family filter 'Cookie': 'user=%s; ' % compat_urllib_parse.quote(json.dumps({'ffilter': False})) } # AnyClip videos require the flashversion cookie so that we get the link # to the mp4 file if video_id.startswith('an-'): headers['Cookie'] += 'flashVersion=0; ' # Retrieve video webpage to extract further information webpage = self._download_webpage(url, video_id, headers=headers) error = get_element_by_attribute( 'class', 'notfound-page-title', webpage) if error: raise ExtractorError(error, expected=True) video_title = self._html_search_meta( ['og:title', 'twitter:title'], webpage, 'title', default=None) or self._search_regex(r'<h1>(.*?)</h1>', webpage, 'title') # Extract URL, uploader and title from webpage self.report_extraction(video_id) video_url = None mobj = re.search(r'(?m)&(?:media|video)URL=([^&]+)', webpage) if mobj is not None: mediaURL = compat_urllib_parse_unquote(mobj.group(1)) video_ext = determine_ext(mediaURL) # Extract gdaKey if available mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage) if mobj is None: video_url = mediaURL else: gdaKey = mobj.group(1) video_url = '%s?__gda__=%s' % (mediaURL, gdaKey) if video_url is None: mobj = re.search(r'<video src="([^"]+)"', webpage) if mobj: video_url = mobj.group(1) video_ext = 'mp4' if video_url is None: flashvars = self._search_regex( r' name="flashvars" value="(.*?)"', webpage, 'flashvars', default=None) if flashvars: vardict = compat_parse_qs(flashvars) if 'mediaData' not in vardict: raise ExtractorError('Unable to extract media URL') mobj = re.search( r'"mediaURL":"(?P<mediaURL>http.*?)",(.*?)"key":"(?P<key>.*?)"', vardict['mediaData'][0]) if mobj is None: raise ExtractorError('Unable to extract media URL') mediaURL = mobj.group('mediaURL').replace('\\/', '/') video_url = '%s?__gda__=%s' % (mediaURL, mobj.group('key')) video_ext = determine_ext(video_url) if video_url is None: player_url = self._search_regex( r"swfobject\.embedSWF\('([^']+)'", webpage, 'config URL', default=None) if player_url: config_url = self._search_regex( r'config=(.+)$', player_url, 'config URL') config_doc = self._download_xml( config_url, video_id, note='Downloading video config') smil_url = config_doc.find('.//properties').attrib['smil_file'] smil_doc = self._download_xml( smil_url, video_id, note='Downloading SMIL document') base_url = smil_doc.find('./head/meta').attrib['base'] video_url = [] for vn in smil_doc.findall('.//video'): br = int(vn.attrib['system-bitrate']) play_path = vn.attrib['src'] video_url.append({ 'format_id': 'smil-%d' % br, 'url': base_url, 'play_path': play_path, 'page_url': url, 'player_url': player_url, 'ext': play_path.partition(':')[0], }) if video_url is None: flashvars = self._parse_json(self._search_regex( r'flashvars\s*=\s*({.*});', webpage, 'flashvars', default=None), video_id, fatal=False) if flashvars: video_url = [] for source in flashvars.get('sources'): source_url = source.get('src') if not source_url: continue ext = mimetype2ext(source.get('type')) or determine_ext(source_url) if ext == 'm3u8': video_url.extend(self._extract_m3u8_formats( source_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) else: video_url.append({ 'url': source_url, 'ext': ext, }) if video_url is None: raise ExtractorError('Unsupported video type') description = self._html_search_meta( ['og:description', 'twitter:description', 'description'], webpage, 'title', fatal=False) thumbnail = self._html_search_meta( ['og:image', 'twitter:image'], webpage, 'title', fatal=False) video_uploader = self._html_search_regex( r'submitter=(.*?);|googletag\.pubads\(\)\.setTargeting\("(?:channel|submiter)","([^"]+)"\);', webpage, 'uploader nickname', fatal=False) duration = int_or_none( self._html_search_meta('video:duration', webpage, default=None)) age_limit = ( 18 if re.search(r'(?:"contentRating":|"rating",)"restricted"', webpage) else 0) if isinstance(video_url, list): formats = video_url else: formats = [{ 'url': video_url, 'ext': video_ext, }] self._sort_formats(formats) return { 'id': video_id, 'display_id': display_id, 'description': description, 'uploader': video_uploader, 'title': video_title, 'thumbnail': thumbnail, 'age_limit': age_limit, 'formats': formats, 'duration': duration, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/ivideon.py
youtube_dl/extractor/ivideon.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_urllib_parse_urlencode, compat_urlparse, ) from ..utils import qualities class IvideonIE(InfoExtractor): IE_NAME = 'ivideon' IE_DESC = 'Ivideon TV' _VALID_URL = r'https?://(?:www\.)?ivideon\.com/tv/(?:[^/]+/)*camera/(?P<id>\d+-[\da-f]+)/(?P<camera_id>\d+)' _TESTS = [{ 'url': 'https://www.ivideon.com/tv/camera/100-916ca13b5c4ad9f564266424a026386d/0/', 'info_dict': { 'id': '100-916ca13b5c4ad9f564266424a026386d', 'ext': 'flv', 'title': 're:^Касса [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'description': 'Основное предназначение - запись действий кассиров. Плюс общий вид.', 'is_live': True, }, 'params': { 'skip_download': True, } }, { 'url': 'https://www.ivideon.com/tv/camera/100-c4ee4cb9ede885cf62dfbe93d7b53783/589824/?lang=ru', 'only_matching': True, }, { 'url': 'https://www.ivideon.com/tv/map/22.917923/-31.816406/16/camera/100-e7bc16c7d4b5bbd633fd5350b66dfa9a/0', 'only_matching': True, }] _QUALITIES = ('low', 'mid', 'hi') def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) server_id, camera_id = mobj.group('id'), mobj.group('camera_id') camera_name, description = None, None camera_url = compat_urlparse.urljoin( url, '/tv/camera/%s/%s/' % (server_id, camera_id)) webpage = self._download_webpage(camera_url, server_id, fatal=False) if webpage: config_string = self._search_regex( r'var\s+config\s*=\s*({.+?});', webpage, 'config', default=None) if config_string: config = self._parse_json(config_string, server_id, fatal=False) camera_info = config.get('ivTvAppOptions', {}).get('currentCameraInfo') if camera_info: camera_name = camera_info.get('camera_name') description = camera_info.get('misc', {}).get('description') if not camera_name: camera_name = self._html_search_meta( 'name', webpage, 'camera name', default=None) or self._search_regex( r'<h1[^>]+class="b-video-title"[^>]*>([^<]+)', webpage, 'camera name', default=None) quality = qualities(self._QUALITIES) formats = [{ 'url': 'https://streaming.ivideon.com/flv/live?%s' % compat_urllib_parse_urlencode({ 'server': server_id, 'camera': camera_id, 'sessionId': 'demo', 'q': quality(format_id), }), 'format_id': format_id, 'ext': 'flv', 'quality': quality(format_id), } for format_id in self._QUALITIES] self._sort_formats(formats) return { 'id': server_id, 'title': self._live_title(camera_name or server_id), 'description': description, 'is_live': True, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/litv.py
youtube_dl/extractor/litv.py
# coding: utf-8 from __future__ import unicode_literals import json from .common import InfoExtractor from ..utils import ( ExtractorError, int_or_none, smuggle_url, unsmuggle_url, ) class LiTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?litv\.tv/(?:vod|promo)/[^/]+/(?:content\.do)?\?.*?\b(?:content_)?id=(?P<id>[^&]+)' _URL_TEMPLATE = 'https://www.litv.tv/vod/%s/content.do?id=%s' _TESTS = [{ 'url': 'https://www.litv.tv/vod/drama/content.do?brc_id=root&id=VOD00041610&isUHEnabled=true&autoPlay=1', 'info_dict': { 'id': 'VOD00041606', 'title': '花千骨', }, 'playlist_count': 50, }, { 'url': 'https://www.litv.tv/vod/drama/content.do?brc_id=root&id=VOD00041610&isUHEnabled=true&autoPlay=1', 'md5': '969e343d9244778cb29acec608e53640', 'info_dict': { 'id': 'VOD00041610', 'ext': 'mp4', 'title': '花千骨第1集', 'thumbnail': r're:https?://.*\.jpg$', 'description': 'md5:c7017aa144c87467c4fb2909c4b05d6f', 'episode_number': 1, }, 'params': { 'noplaylist': True, }, 'skip': 'Georestricted to Taiwan', }, { 'url': 'https://www.litv.tv/promo/miyuezhuan/?content_id=VOD00044841&', 'md5': '88322ea132f848d6e3e18b32a832b918', 'info_dict': { 'id': 'VOD00044841', 'ext': 'mp4', 'title': '芈月傳第1集 霸星芈月降世楚國', 'description': '楚威王二年,太史令唐昧夜觀星象,發現霸星即將現世。王后得知霸星的預言後,想盡辦法不讓孩子順利出生,幸得莒姬相護化解危機。沒想到眾人期待下出生的霸星卻是位公主,楚威王對此失望至極。楚王后命人將女嬰丟棄河中,居然奇蹟似的被少司命像攔下,楚威王認為此女非同凡響,為她取名芈月。', }, 'skip': 'Georestricted to Taiwan', }] def _extract_playlist(self, season_list, video_id, program_info, prompt=True): episode_title = program_info['title'] content_id = season_list['contentId'] if prompt: self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (content_id, video_id)) all_episodes = [ self.url_result(smuggle_url( self._URL_TEMPLATE % (program_info['contentType'], episode['contentId']), {'force_noplaylist': True})) # To prevent infinite recursion for episode in season_list['episode']] return self.playlist_result(all_episodes, content_id, episode_title) def _real_extract(self, url): url, data = unsmuggle_url(url, {}) video_id = self._match_id(url) noplaylist = self._downloader.params.get('noplaylist') noplaylist_prompt = True if 'force_noplaylist' in data: noplaylist = data['force_noplaylist'] noplaylist_prompt = False webpage = self._download_webpage(url, video_id) program_info = self._parse_json(self._search_regex( r'var\s+programInfo\s*=\s*([^;]+)', webpage, 'VOD data', default='{}'), video_id) season_list = list(program_info.get('seasonList', {}).values()) if season_list: if not noplaylist: return self._extract_playlist( season_list[0], video_id, program_info, prompt=noplaylist_prompt) if noplaylist_prompt: self.to_screen('Downloading just video %s because of --no-playlist' % video_id) # In browsers `getMainUrl` request is always issued. Usually this # endpoint gives the same result as the data embedded in the webpage. # If georestricted, there are no embedded data, so an extra request is # necessary to get the error code if 'assetId' not in program_info: program_info = self._download_json( 'https://www.litv.tv/vod/ajax/getProgramInfo', video_id, query={'contentId': video_id}, headers={'Accept': 'application/json'}) video_data = self._parse_json(self._search_regex( r'uiHlsUrl\s*=\s*testBackendData\(([^;]+)\);', webpage, 'video data', default='{}'), video_id) if not video_data: payload = { 'assetId': program_info['assetId'], 'watchDevices': program_info['watchDevices'], 'contentType': program_info['contentType'], } video_data = self._download_json( 'https://www.litv.tv/vod/getMainUrl', video_id, data=json.dumps(payload).encode('utf-8'), headers={'Content-Type': 'application/json'}) if not video_data.get('fullpath'): error_msg = video_data.get('errorMessage') if error_msg == 'vod.error.outsideregionerror': self.raise_geo_restricted('This video is available in Taiwan only') if error_msg: raise ExtractorError('%s said: %s' % (self.IE_NAME, error_msg), expected=True) raise ExtractorError('Unexpected result from %s' % self.IE_NAME) formats = self._extract_m3u8_formats( video_data['fullpath'], video_id, ext='mp4', entry_protocol='m3u8_native', m3u8_id='hls') for a_format in formats: # LiTV HLS segments doesn't like compressions a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = True title = program_info['title'] + program_info.get('secondaryMark', '') description = program_info.get('description') thumbnail = program_info.get('imageFile') categories = [item['name'] for item in program_info.get('category', [])] episode = int_or_none(program_info.get('episode')) return { 'id': video_id, 'formats': formats, 'title': title, 'description': description, 'thumbnail': thumbnail, 'categories': categories, 'episode_number': episode, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/gdcvault.py
youtube_dl/extractor/gdcvault.py
from __future__ import unicode_literals import re from .common import InfoExtractor from .kaltura import KalturaIE from ..utils import ( HEADRequest, remove_start, sanitized_Request, smuggle_url, urlencode_postdata, ) class GDCVaultIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?gdcvault\.com/play/(?P<id>\d+)(?:/(?P<name>[\w-]+))?' _NETRC_MACHINE = 'gdcvault' _TESTS = [ { 'url': 'http://www.gdcvault.com/play/1019721/Doki-Doki-Universe-Sweet-Simple', 'md5': '7ce8388f544c88b7ac11c7ab1b593704', 'info_dict': { 'id': '201311826596_AWNY', 'display_id': 'Doki-Doki-Universe-Sweet-Simple', 'ext': 'mp4', 'title': 'Doki-Doki Universe: Sweet, Simple and Genuine (GDC Next 10)' } }, { 'url': 'http://www.gdcvault.com/play/1015683/Embracing-the-Dark-Art-of', 'info_dict': { 'id': '201203272_1330951438328RSXR', 'display_id': 'Embracing-the-Dark-Art-of', 'ext': 'flv', 'title': 'Embracing the Dark Art of Mathematical Modeling in AI' }, 'params': { 'skip_download': True, # Requires rtmpdump } }, { 'url': 'http://www.gdcvault.com/play/1015301/Thexder-Meets-Windows-95-or', 'md5': 'a5eb77996ef82118afbbe8e48731b98e', 'info_dict': { 'id': '1015301', 'display_id': 'Thexder-Meets-Windows-95-or', 'ext': 'flv', 'title': 'Thexder Meets Windows 95, or Writing Great Games in the Windows 95 Environment', }, 'skip': 'Requires login', }, { 'url': 'http://gdcvault.com/play/1020791/', 'only_matching': True, }, { # Hard-coded hostname 'url': 'http://gdcvault.com/play/1023460/Tenacious-Design-and-The-Interface', 'md5': 'a8efb6c31ed06ca8739294960b2dbabd', 'info_dict': { 'id': '840376_BQRC', 'ext': 'mp4', 'display_id': 'Tenacious-Design-and-The-Interface', 'title': 'Tenacious Design and The Interface of \'Destiny\'', }, }, { # Multiple audios 'url': 'http://www.gdcvault.com/play/1014631/Classic-Game-Postmortem-PAC', 'info_dict': { 'id': '12396_1299111843500GMPX', 'ext': 'mp4', 'title': 'How to Create a Good Game - From My Experience of Designing Pac-Man', }, # 'params': { # 'skip_download': True, # Requires rtmpdump # 'format': 'jp', # The japanese audio # } }, { # gdc-player.html 'url': 'http://www.gdcvault.com/play/1435/An-American-engine-in-Tokyo', 'info_dict': { 'id': '9350_1238021887562UHXB', 'display_id': 'An-American-engine-in-Tokyo', 'ext': 'mp4', 'title': 'An American Engine in Tokyo:/nThe collaboration of Epic Games and Square Enix/nFor THE LAST REMINANT', }, }, { # Kaltura Embed 'url': 'https://www.gdcvault.com/play/1026180/Mastering-the-Apex-of-Scaling', 'info_dict': { 'id': '0_h1fg8j3p', 'ext': 'mp4', 'title': 'Mastering the Apex of Scaling Game Servers (Presented by Multiplay)', 'timestamp': 1554401811, 'upload_date': '20190404', 'uploader_id': 'joe@blazestreaming.com', }, 'params': { 'format': 'mp4-408', }, }, { # Kaltura embed, whitespace between quote and embedded URL in iframe's src 'url': 'https://www.gdcvault.com/play/1025699', 'info_dict': { 'id': '0_zagynv0a', 'ext': 'mp4', 'title': 'Tech Toolbox', 'upload_date': '20190408', 'uploader_id': 'joe@blazestreaming.com', 'timestamp': 1554764629, }, 'params': { 'skip_download': True, }, }, { # HTML5 video 'url': 'http://www.gdcvault.com/play/1014846/Conference-Keynote-Shigeru', 'only_matching': True, }, ] def _login(self, webpage_url, display_id): username, password = self._get_login_info() if username is None or password is None: self.report_warning('It looks like ' + webpage_url + ' requires a login. Try specifying a username and password and try again.') return None mobj = re.match(r'(?P<root_url>https?://.*?/).*', webpage_url) login_url = mobj.group('root_url') + 'api/login.php' logout_url = mobj.group('root_url') + 'logout' login_form = { 'email': username, 'password': password, } request = sanitized_Request(login_url, urlencode_postdata(login_form)) request.add_header('Content-Type', 'application/x-www-form-urlencoded') self._download_webpage(request, display_id, 'Logging in') start_page = self._download_webpage(webpage_url, display_id, 'Getting authenticated video page') self._download_webpage(logout_url, display_id, 'Logging out') return start_page def _real_extract(self, url): video_id, name = re.match(self._VALID_URL, url).groups() display_id = name or video_id webpage_url = 'http://www.gdcvault.com/play/' + video_id start_page = self._download_webpage(webpage_url, display_id) direct_url = self._search_regex( r's1\.addVariable\("file",\s*encodeURIComponent\("(/[^"]+)"\)\);', start_page, 'url', default=None) if direct_url: title = self._html_search_regex( r'<td><strong>Session Name:?</strong></td>\s*<td>(.*?)</td>', start_page, 'title') video_url = 'http://www.gdcvault.com' + direct_url # resolve the url so that we can detect the correct extension video_url = self._request_webpage( HEADRequest(video_url), video_id).geturl() return { 'id': video_id, 'display_id': display_id, 'url': video_url, 'title': title, } embed_url = KalturaIE._extract_url(start_page) if embed_url: embed_url = smuggle_url(embed_url, {'source_url': url}) ie_key = 'Kaltura' else: PLAYER_REGEX = r'<iframe src="(?P<xml_root>.+?)/(?:gdc-)?player.*?\.html.*?".*?</iframe>' xml_root = self._html_search_regex( PLAYER_REGEX, start_page, 'xml root', default=None) if xml_root is None: # Probably need to authenticate login_res = self._login(webpage_url, display_id) if login_res is None: self.report_warning('Could not login.') else: start_page = login_res # Grab the url from the authenticated page xml_root = self._html_search_regex( PLAYER_REGEX, start_page, 'xml root') xml_name = self._html_search_regex( r'<iframe src=".*?\?xml(?:=|URL=xml/)(.+?\.xml).*?".*?</iframe>', start_page, 'xml filename', default=None) if not xml_name: info = self._parse_html5_media_entries(url, start_page, video_id)[0] info.update({ 'title': remove_start(self._search_regex( r'>Session Name:\s*<.*?>\s*<td>(.+?)</td>', start_page, 'title', default=None) or self._og_search_title( start_page, default=None), 'GDC Vault - '), 'id': video_id, 'display_id': display_id, }) return info embed_url = '%s/xml/%s' % (xml_root, xml_name) ie_key = 'DigitallySpeaking' return { '_type': 'url_transparent', 'id': video_id, 'display_id': display_id, 'url': embed_url, 'ie_key': ie_key, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/stretchinternet.py
youtube_dl/extractor/stretchinternet.py
from __future__ import unicode_literals from .common import InfoExtractor class StretchInternetIE(InfoExtractor): _VALID_URL = r'https?://portal\.stretchinternet\.com/[^/]+/(?:portal|full)\.htm\?.*?\beventId=(?P<id>\d+)' _TEST = { 'url': 'https://portal.stretchinternet.com/umary/portal.htm?eventId=573272&streamType=video', 'info_dict': { 'id': '573272', 'ext': 'mp4', 'title': 'UNIVERSITY OF MARY WRESTLING VS UPPER IOWA', # 'timestamp': 1575668361, # 'upload_date': '20191206', 'uploader_id': '99997', } } def _real_extract(self, url): video_id = self._match_id(url) media_url = self._download_json( 'https://core.stretchlive.com/trinity/event/tcg/' + video_id, video_id)[0]['media'][0]['url'] event = self._download_json( 'https://neo-client.stretchinternet.com/portal-ws/getEvent.json', video_id, query={'eventID': video_id, 'token': 'asdf'})['event'] return { 'id': video_id, 'title': event['title'], # TODO: parse US timezone abbreviations # 'timestamp': event.get('dateTimeString'), 'url': 'https://' + media_url, 'uploader_id': event.get('ownerID'), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/theplatform.py
youtube_dl/extractor/theplatform.py
# coding: utf-8 from __future__ import unicode_literals import re import time import hmac import binascii import hashlib from .once import OnceIE from .adobepass import AdobePassIE from ..compat import ( compat_parse_qs, compat_urllib_parse_urlparse, ) from ..utils import ( determine_ext, ExtractorError, float_or_none, int_or_none, sanitized_Request, unsmuggle_url, update_url_query, xpath_with_ns, mimetype2ext, find_xpath_attr, ) default_ns = 'http://www.w3.org/2005/SMIL21/Language' _x = lambda p: xpath_with_ns(p, {'smil': default_ns}) class ThePlatformBaseIE(OnceIE): _TP_TLD = 'com' def _extract_theplatform_smil(self, smil_url, video_id, note='Downloading SMIL data'): meta = self._download_xml( smil_url, video_id, note=note, query={'format': 'SMIL'}, headers=self.geo_verification_headers()) error_element = find_xpath_attr(meta, _x('.//smil:ref'), 'src') if error_element is not None: exception = find_xpath_attr( error_element, _x('.//smil:param'), 'name', 'exception') if exception is not None: if exception.get('value') == 'GeoLocationBlocked': self.raise_geo_restricted(error_element.attrib['abstract']) elif error_element.attrib['src'].startswith( 'http://link.theplatform.%s/s/errorFiles/Unavailable.' % self._TP_TLD): raise ExtractorError( error_element.attrib['abstract'], expected=True) smil_formats = self._parse_smil_formats( meta, smil_url, video_id, namespace=default_ns, # the parameters are from syfy.com, other sites may use others, # they also work for nbc.com f4m_params={'g': 'UXWGVKRWHFSP', 'hdcore': '3.0.3'}, transform_rtmp_url=lambda streamer, src: (streamer, 'mp4:' + src)) formats = [] for _format in smil_formats: if OnceIE.suitable(_format['url']): formats.extend(self._extract_once_formats(_format['url'])) else: media_url = _format['url'] if determine_ext(media_url) == 'm3u8': hdnea2 = self._get_cookies(media_url).get('hdnea2') if hdnea2: _format['url'] = update_url_query(media_url, {'hdnea3': hdnea2.value}) formats.append(_format) subtitles = self._parse_smil_subtitles(meta, default_ns) return formats, subtitles def _download_theplatform_metadata(self, path, video_id): info_url = 'http://link.theplatform.%s/s/%s?format=preview' % (self._TP_TLD, path) return self._download_json(info_url, video_id) def _parse_theplatform_metadata(self, info): subtitles = {} captions = info.get('captions') if isinstance(captions, list): for caption in captions: lang, src, mime = caption.get('lang', 'en'), caption.get('src'), caption.get('type') subtitles.setdefault(lang, []).append({ 'ext': mimetype2ext(mime), 'url': src, }) duration = info.get('duration') tp_chapters = info.get('chapters', []) chapters = [] if tp_chapters: def _add_chapter(start_time, end_time): start_time = float_or_none(start_time, 1000) end_time = float_or_none(end_time, 1000) if start_time is None or end_time is None: return chapters.append({ 'start_time': start_time, 'end_time': end_time, }) for chapter in tp_chapters[:-1]: _add_chapter(chapter.get('startTime'), chapter.get('endTime')) _add_chapter(tp_chapters[-1].get('startTime'), tp_chapters[-1].get('endTime') or duration) return { 'title': info['title'], 'subtitles': subtitles, 'description': info['description'], 'thumbnail': info['defaultThumbnailUrl'], 'duration': float_or_none(duration, 1000), 'timestamp': int_or_none(info.get('pubDate'), 1000) or None, 'uploader': info.get('billingCode'), 'chapters': chapters, } def _extract_theplatform_metadata(self, path, video_id): info = self._download_theplatform_metadata(path, video_id) return self._parse_theplatform_metadata(info) class ThePlatformIE(ThePlatformBaseIE, AdobePassIE): _VALID_URL = r'''(?x) (?:https?://(?:link|player)\.theplatform\.com/[sp]/(?P<provider_id>[^/]+)/ (?:(?:(?:[^/]+/)+select/)?(?P<media>media/(?:guid/\d+/)?)?|(?P<config>(?:[^/\?]+/(?:swf|config)|onsite)/select/))? |theplatform:)(?P<id>[^/\?&]+)''' _TESTS = [{ # from http://www.metacafe.com/watch/cb-e9I_cZgTgIPd/blackberrys_big_bold_z30/ 'url': 'http://link.theplatform.com/s/dJ5BDC/e9I_cZgTgIPd/meta.smil?format=smil&Tracking=true&mbr=true', 'info_dict': { 'id': 'e9I_cZgTgIPd', 'ext': 'flv', 'title': 'Blackberry\'s big, bold Z30', 'description': 'The Z30 is Blackberry\'s biggest, baddest mobile messaging device yet.', 'duration': 247, 'timestamp': 1383239700, 'upload_date': '20131031', 'uploader': 'CBSI-NEW', }, 'params': { # rtmp download 'skip_download': True, }, 'skip': '404 Not Found', }, { # from http://www.cnet.com/videos/tesla-model-s-a-second-step-towards-a-cleaner-motoring-future/ 'url': 'http://link.theplatform.com/s/kYEXFC/22d_qsQ6MIRT', 'info_dict': { 'id': '22d_qsQ6MIRT', 'ext': 'flv', 'description': 'md5:ac330c9258c04f9d7512cf26b9595409', 'title': 'Tesla Model S: A second step towards a cleaner motoring future', 'timestamp': 1426176191, 'upload_date': '20150312', 'uploader': 'CBSI-NEW', }, 'params': { # rtmp download 'skip_download': True, } }, { 'url': 'https://player.theplatform.com/p/D6x-PC/pulse_preview/embed/select/media/yMBg9E8KFxZD', 'info_dict': { 'id': 'yMBg9E8KFxZD', 'ext': 'mp4', 'description': 'md5:644ad9188d655b742f942bf2e06b002d', 'title': 'HIGHLIGHTS: USA bag first ever series Cup win', 'uploader': 'EGSM', } }, { 'url': 'http://player.theplatform.com/p/NnzsPC/widget/select/media/4Y0TlYUr_ZT7', 'only_matching': True, }, { 'url': 'http://player.theplatform.com/p/2E2eJC/nbcNewsOffsite?guid=tdy_or_siri_150701', 'md5': 'fb96bb3d85118930a5b055783a3bd992', 'info_dict': { 'id': 'tdy_or_siri_150701', 'ext': 'mp4', 'title': 'iPhone Siri’s sassy response to a math question has people talking', 'description': 'md5:a565d1deadd5086f3331d57298ec6333', 'duration': 83.0, 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1435752600, 'upload_date': '20150701', 'uploader': 'NBCU-NEWS', }, }, { # From http://www.nbc.com/the-blacklist/video/sir-crispin-crandall/2928790?onid=137781#vc137781=1 # geo-restricted (US), HLS encrypted with AES-128 'url': 'http://player.theplatform.com/p/NnzsPC/onsite_universal/select/media/guid/2410887629/2928790?fwsitesection=nbc_the_blacklist_video_library&autoPlay=true&carouselID=137781', 'only_matching': True, }] @classmethod def _extract_urls(cls, webpage): m = re.search( r'''(?x) <meta\s+ property=(["'])(?:og:video(?::(?:secure_)?url)?|twitter:player)\1\s+ content=(["'])(?P<url>https?://player\.theplatform\.com/p/.+?)\2 ''', webpage) if m: return [m.group('url')] # Are whitespaces ignored in URLs? # https://github.com/ytdl-org/youtube-dl/issues/12044 matches = re.findall( r'(?s)<(?:iframe|script)[^>]+src=(["\'])((?:https?:)?//player\.theplatform\.com/p/.+?)\1', webpage) if matches: return [re.sub(r'\s', '', list(zip(*matches))[1][0])] @staticmethod def _sign_url(url, sig_key, sig_secret, life=600, include_qs=False): flags = '10' if include_qs else '00' expiration_date = '%x' % (int(time.time()) + life) def str_to_hex(str): return binascii.b2a_hex(str.encode('ascii')).decode('ascii') def hex_to_bytes(hex): return binascii.a2b_hex(hex.encode('ascii')) relative_path = re.match(r'https?://link\.theplatform\.com/s/([^?]+)', url).group(1) clear_text = hex_to_bytes(flags + expiration_date + str_to_hex(relative_path)) checksum = hmac.new(sig_key.encode('ascii'), clear_text, hashlib.sha1).hexdigest() sig = flags + expiration_date + checksum + str_to_hex(sig_secret) return '%s&sig=%s' % (url, sig) def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) self._initialize_geo_bypass({ 'countries': smuggled_data.get('geo_countries'), }) mobj = re.match(self._VALID_URL, url) provider_id = mobj.group('provider_id') video_id = mobj.group('id') if not provider_id: provider_id = 'dJ5BDC' path = provider_id + '/' if mobj.group('media'): path += mobj.group('media') path += video_id qs_dict = compat_parse_qs(compat_urllib_parse_urlparse(url).query) if 'guid' in qs_dict: webpage = self._download_webpage(url, video_id) scripts = re.findall(r'<script[^>]+src="([^"]+)"', webpage) feed_id = None # feed id usually locates in the last script. # Seems there's no pattern for the interested script filename, so # I try one by one for script in reversed(scripts): feed_script = self._download_webpage( self._proto_relative_url(script, 'http:'), video_id, 'Downloading feed script') feed_id = self._search_regex( r'defaultFeedId\s*:\s*"([^"]+)"', feed_script, 'default feed id', default=None) if feed_id is not None: break if feed_id is None: raise ExtractorError('Unable to find feed id') return self.url_result('http://feed.theplatform.com/f/%s/%s?byGuid=%s' % ( provider_id, feed_id, qs_dict['guid'][0])) if smuggled_data.get('force_smil_url', False): smil_url = url # Explicitly specified SMIL (see https://github.com/ytdl-org/youtube-dl/issues/7385) elif '/guid/' in url: headers = {} source_url = smuggled_data.get('source_url') if source_url: headers['Referer'] = source_url request = sanitized_Request(url, headers=headers) webpage = self._download_webpage(request, video_id) smil_url = self._search_regex( r'<link[^>]+href=(["\'])(?P<url>.+?)\1[^>]+type=["\']application/smil\+xml', webpage, 'smil url', group='url') path = self._search_regex( r'link\.theplatform\.com/s/((?:[^/?#&]+/)+[^/?#&]+)', smil_url, 'path') smil_url += '?' if '?' not in smil_url else '&' + 'formats=m3u,mpeg4' elif mobj.group('config'): config_url = url + '&form=json' config_url = config_url.replace('swf/', 'config/') config_url = config_url.replace('onsite/', 'onsite/config/') config = self._download_json(config_url, video_id, 'Downloading config') if 'releaseUrl' in config: release_url = config['releaseUrl'] else: release_url = 'http://link.theplatform.com/s/%s?mbr=true' % path smil_url = release_url + '&formats=MPEG4&manifest=f4m' else: smil_url = 'http://link.theplatform.com/s/%s?mbr=true' % path sig = smuggled_data.get('sig') if sig: smil_url = self._sign_url(smil_url, sig['key'], sig['secret']) formats, subtitles = self._extract_theplatform_smil(smil_url, video_id) self._sort_formats(formats) ret = self._extract_theplatform_metadata(path, video_id) combined_subtitles = self._merge_subtitles(ret.get('subtitles', {}), subtitles) ret.update({ 'id': video_id, 'formats': formats, 'subtitles': combined_subtitles, }) return ret class ThePlatformFeedIE(ThePlatformBaseIE): _URL_TEMPLATE = '%s//feed.theplatform.com/f/%s/%s?form=json&%s' _VALID_URL = r'https?://feed\.theplatform\.com/f/(?P<provider_id>[^/]+)/(?P<feed_id>[^?/]+)\?(?:[^&]+&)*(?P<filter>by(?:Gui|I)d=(?P<id>[^&]+))' _TESTS = [{ # From http://player.theplatform.com/p/7wvmTC/MSNBCEmbeddedOffSite?guid=n_hardball_5biden_140207 'url': 'http://feed.theplatform.com/f/7wvmTC/msnbc_video-p-test?form=json&pretty=true&range=-40&byGuid=n_hardball_5biden_140207', 'md5': '6e32495b5073ab414471b615c5ded394', 'info_dict': { 'id': 'n_hardball_5biden_140207', 'ext': 'mp4', 'title': 'The Biden factor: will Joe run in 2016?', 'description': 'Could Vice President Joe Biden be preparing a 2016 campaign? Mark Halperin and Sam Stein weigh in.', 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20140208', 'timestamp': 1391824260, 'duration': 467.0, 'categories': ['MSNBC/Issues/Democrats', 'MSNBC/Issues/Elections/Election 2016'], 'uploader': 'NBCU-NEWS', }, }, { 'url': 'http://feed.theplatform.com/f/2E2eJC/nnd_NBCNews?byGuid=nn_netcast_180306.Copy.01', 'only_matching': True, }] def _extract_feed_info(self, provider_id, feed_id, filter_query, video_id, custom_fields=None, asset_types_query={}, account_id=None): real_url = self._URL_TEMPLATE % (self.http_scheme(), provider_id, feed_id, filter_query) entry = self._download_json(real_url, video_id)['entries'][0] main_smil_url = 'http://link.theplatform.com/s/%s/media/guid/%d/%s' % (provider_id, account_id, entry['guid']) if account_id else entry.get('plmedia$publicUrl') formats = [] subtitles = {} first_video_id = None duration = None asset_types = [] for item in entry['media$content']: smil_url = item['plfile$url'] cur_video_id = ThePlatformIE._match_id(smil_url) if first_video_id is None: first_video_id = cur_video_id duration = float_or_none(item.get('plfile$duration')) file_asset_types = item.get('plfile$assetTypes') or compat_parse_qs(compat_urllib_parse_urlparse(smil_url).query)['assetTypes'] for asset_type in file_asset_types: if asset_type in asset_types: continue asset_types.append(asset_type) query = { 'mbr': 'true', 'formats': item['plfile$format'], 'assetTypes': asset_type, } if asset_type in asset_types_query: query.update(asset_types_query[asset_type]) cur_formats, cur_subtitles = self._extract_theplatform_smil(update_url_query( main_smil_url or smil_url, query), video_id, 'Downloading SMIL data for %s' % asset_type) formats.extend(cur_formats) subtitles = self._merge_subtitles(subtitles, cur_subtitles) self._sort_formats(formats) thumbnails = [{ 'url': thumbnail['plfile$url'], 'width': int_or_none(thumbnail.get('plfile$width')), 'height': int_or_none(thumbnail.get('plfile$height')), } for thumbnail in entry.get('media$thumbnails', [])] timestamp = int_or_none(entry.get('media$availableDate'), scale=1000) categories = [item['media$name'] for item in entry.get('media$categories', [])] ret = self._extract_theplatform_metadata('%s/%s' % (provider_id, first_video_id), video_id) subtitles = self._merge_subtitles(subtitles, ret['subtitles']) ret.update({ 'id': video_id, 'formats': formats, 'subtitles': subtitles, 'thumbnails': thumbnails, 'duration': duration, 'timestamp': timestamp, 'categories': categories, }) if custom_fields: ret.update(custom_fields(entry)) return ret def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') provider_id = mobj.group('provider_id') feed_id = mobj.group('feed_id') filter_query = mobj.group('filter') return self._extract_feed_info(provider_id, feed_id, filter_query, video_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/formula1.py
youtube_dl/extractor/formula1.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class Formula1IE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?formula1\.com/en/latest/video\.[^.]+\.(?P<id>\d+)\.html' _TEST = { 'url': 'https://www.formula1.com/en/latest/video.race-highlights-spain-2016.6060988138001.html', 'md5': 'be7d3a8c2f804eb2ab2aa5d941c359f8', 'info_dict': { 'id': '6060988138001', 'ext': 'mp4', 'title': 'Race highlights - Spain 2016', 'timestamp': 1463332814, 'upload_date': '20160515', 'uploader_id': '6057949432001', }, 'add_ie': ['BrightcoveNew'], } BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/6057949432001/S1WMrhjlh_default/index.html?videoId=%s' def _real_extract(self, url): bc_id = self._match_id(url) return self.url_result( self.BRIGHTCOVE_URL_TEMPLATE % bc_id, 'BrightcoveNew', bc_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/canalc2.py
youtube_dl/extractor/canalc2.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import parse_duration class Canalc2IE(InfoExtractor): IE_NAME = 'canalc2.tv' _VALID_URL = r'https?://(?:(?:www\.)?canalc2\.tv/video/|archives-canalc2\.u-strasbg\.fr/video\.asp\?.*\bidVideo=)(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.canalc2.tv/video/12163', 'md5': '060158428b650f896c542dfbb3d6487f', 'info_dict': { 'id': '12163', 'ext': 'mp4', 'title': 'Terrasses du Numérique', 'duration': 122, }, }, { 'url': 'http://archives-canalc2.u-strasbg.fr/video.asp?idVideo=11427&voir=oui', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( 'http://www.canalc2.tv/video/%s' % video_id, video_id) title = self._html_search_regex( r'(?s)class="[^"]*col_description[^"]*">.*?<h3>(.+?)</h3>', webpage, 'title') formats = [] for _, video_url in re.findall(r'file\s*=\s*(["\'])(.+?)\1', webpage): if video_url.startswith('rtmp://'): rtmp = re.search( r'^(?P<url>rtmp://[^/]+/(?P<app>.+/))(?P<play_path>mp4:.+)$', video_url) formats.append({ 'url': rtmp.group('url'), 'format_id': 'rtmp', 'ext': 'flv', 'app': rtmp.group('app'), 'play_path': rtmp.group('play_path'), 'page_url': url, }) else: formats.append({ 'url': video_url, 'format_id': 'http', }) if formats: info = { 'formats': formats, } else: info = self._parse_html5_media_entries(url, webpage, url)[0] self._sort_formats(info['formats']) info.update({ 'id': video_id, 'title': title, 'duration': parse_duration(self._search_regex( r'id=["\']video_duree["\'][^>]*>([^<]+)', webpage, 'duration', fatal=False)), }) return info
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/msn.py
youtube_dl/extractor/msn.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( determine_ext, ExtractorError, int_or_none, unescapeHTML, ) class MSNIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:www|preview)\.)?msn\.com/(?:[^/]+/)+(?P<display_id>[^/]+)/[a-z]{2}-(?P<id>[\da-zA-Z]+)' _TESTS = [{ 'url': 'https://www.msn.com/en-in/money/video/7-ways-to-get-rid-of-chest-congestion/vi-BBPxU6d', 'md5': '087548191d273c5c55d05028f8d2cbcd', 'info_dict': { 'id': 'BBPxU6d', 'display_id': '7-ways-to-get-rid-of-chest-congestion', 'ext': 'mp4', 'title': 'Seven ways to get rid of chest congestion', 'description': '7 Ways to Get Rid of Chest Congestion', 'duration': 88, 'uploader': 'Health', 'uploader_id': 'BBPrMqa', }, }, { # Article, multiple Dailymotion Embeds 'url': 'https://www.msn.com/en-in/money/sports/hottest-football-wags-greatest-footballers-turned-managers-and-more/ar-BBpc7Nl', 'info_dict': { 'id': 'BBpc7Nl', }, 'playlist_mincount': 4, }, { 'url': 'http://www.msn.com/en-ae/news/offbeat/meet-the-nine-year-old-self-made-millionaire/ar-BBt6ZKf', 'only_matching': True, }, { 'url': 'http://www.msn.com/en-ae/video/watch/obama-a-lot-of-people-will-be-disappointed/vi-AAhxUMH', 'only_matching': True, }, { # geo restricted 'url': 'http://www.msn.com/en-ae/foodanddrink/joinourtable/the-first-fart-makes-you-laugh-the-last-fart-makes-you-cry/vp-AAhzIBU', 'only_matching': True, }, { 'url': 'http://www.msn.com/en-ae/entertainment/bollywood/watch-how-salman-khan-reacted-when-asked-if-he-would-apologize-for-his-‘raped-woman’-comment/vi-AAhvzW6', 'only_matching': True, }, { # Vidible(AOL) Embed 'url': 'https://www.msn.com/en-us/money/other/jupiter-is-about-to-come-so-close-you-can-see-its-moons-with-binoculars/vi-AACqsHR', 'only_matching': True, }, { # Dailymotion Embed 'url': 'https://www.msn.com/es-ve/entretenimiento/watch/winston-salem-paire-refait-des-siennes-en-perdant-sa-raquette-au-service/vp-AAG704L', 'only_matching': True, }, { # YouTube Embed 'url': 'https://www.msn.com/en-in/money/news/meet-vikram-%E2%80%94-chandrayaan-2s-lander/vi-AAGUr0v', 'only_matching': True, }, { # NBCSports Embed 'url': 'https://www.msn.com/en-us/money/football_nfl/week-13-preview-redskins-vs-panthers/vi-BBXsCDb', 'only_matching': True, }] def _real_extract(self, url): display_id, page_id = re.match(self._VALID_URL, url).groups() webpage = self._download_webpage(url, display_id) entries = [] for _, metadata in re.findall(r'data-metadata\s*=\s*(["\'])(?P<data>.+?)\1', webpage): video = self._parse_json(unescapeHTML(metadata), display_id) provider_id = video.get('providerId') player_name = video.get('playerName') if player_name and provider_id: entry = None if player_name == 'AOL': if provider_id.startswith('http'): provider_id = self._search_regex( r'https?://delivery\.vidible\.tv/video/redirect/([0-9a-f]{24})', provider_id, 'vidible id') entry = self.url_result( 'aol-video:' + provider_id, 'Aol', provider_id) elif player_name == 'Dailymotion': entry = self.url_result( 'https://www.dailymotion.com/video/' + provider_id, 'Dailymotion', provider_id) elif player_name == 'YouTube': entry = self.url_result( provider_id, 'Youtube', provider_id) elif player_name == 'NBCSports': entry = self.url_result( 'http://vplayer.nbcsports.com/p/BxmELC/nbcsports_embed/select/media/' + provider_id, 'NBCSportsVPlayer', provider_id) if entry: entries.append(entry) continue video_id = video['uuid'] title = video['title'] formats = [] for file_ in video.get('videoFiles', []): format_url = file_.get('url') if not format_url: continue if 'format=m3u8-aapl' in format_url: # m3u8_native should not be used here until # https://github.com/ytdl-org/youtube-dl/issues/9913 is fixed formats.extend(self._extract_m3u8_formats( format_url, display_id, 'mp4', m3u8_id='hls', fatal=False)) elif 'format=mpd-time-csf' in format_url: formats.extend(self._extract_mpd_formats( format_url, display_id, 'dash', fatal=False)) elif '.ism' in format_url: if format_url.endswith('.ism'): format_url += '/manifest' formats.extend(self._extract_ism_formats( format_url, display_id, 'mss', fatal=False)) else: format_id = file_.get('formatCode') formats.append({ 'url': format_url, 'ext': 'mp4', 'format_id': format_id, 'width': int_or_none(file_.get('width')), 'height': int_or_none(file_.get('height')), 'vbr': int_or_none(self._search_regex(r'_(\d+)\.mp4', format_url, 'vbr', default=None)), 'preference': 1 if format_id == '1001' else None, }) self._sort_formats(formats) subtitles = {} for file_ in video.get('files', []): format_url = file_.get('url') format_code = file_.get('formatCode') if not format_url or not format_code: continue if compat_str(format_code) == '3100': subtitles.setdefault(file_.get('culture', 'en'), []).append({ 'ext': determine_ext(format_url, 'ttml'), 'url': format_url, }) entries.append({ 'id': video_id, 'display_id': display_id, 'title': title, 'description': video.get('description'), 'thumbnail': video.get('headlineImage', {}).get('url'), 'duration': int_or_none(video.get('durationSecs')), 'uploader': video.get('sourceFriendly'), 'uploader_id': video.get('providerId'), 'creator': video.get('creator'), 'subtitles': subtitles, 'formats': formats, }) if not entries: error = unescapeHTML(self._search_regex( r'data-error=(["\'])(?P<error>.+?)\1', webpage, 'error', group='error')) raise ExtractorError('%s said: %s' % (self.IE_NAME, error), expected=True) return self.playlist_result(entries, page_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/tv2hu.py
youtube_dl/extractor/tv2hu.py
# encoding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import int_or_none class TV2HuIE(InfoExtractor): IE_NAME = 'tv2.hu' _VALID_URL = r'https?://(?:www\.)?tv2\.hu/(?:[^/]+/)+(?P<id>\d+)_[^/?#]+?\.html' _TESTS = [{ 'url': 'http://tv2.hu/ezek_megorultek/217679_ezek-megorultek---1.-adas-1.-resz.html', 'md5': '585e58e2e090f34603804bb2c48e98d8', 'info_dict': { 'id': '217679', 'ext': 'mp4', 'title': 'Ezek megőrültek! - 1. adás 1. rész', 'upload_date': '20160826', 'thumbnail': r're:^https?://.*\.jpg$' } }, { 'url': 'http://tv2.hu/ezek_megorultek/teljes_adasok/217677_ezek-megorultek---1.-adas-2.-resz.html', 'only_matching': True }, { 'url': 'http://tv2.hu/musoraink/aktiv/aktiv_teljes_adas/217963_aktiv-teljes-adas---2016.08.30..html', 'only_matching': True }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) json_url = self._search_regex( r'jsonUrl\s*=\s*"([^"]+)"', webpage, 'json url') json_data = self._download_json(json_url, video_id) formats = [] for b in ('bitrates', 'backupBitrates'): bitrates = json_data.get(b, {}) m3u8_url = bitrates.get('hls') if m3u8_url: formats.extend(self._extract_wowza_formats( m3u8_url, video_id, skip_protocols=['rtmp', 'rtsp'])) for mp4_url in bitrates.get('mp4', []): height = int_or_none(self._search_regex( r'\.(\d+)p\.mp4', mp4_url, 'height', default=None)) formats.append({ 'format_id': 'http' + ('-%d' % height if height else ''), 'url': mp4_url, 'height': height, 'width': int_or_none(height / 9.0 * 16.0 if height else None), }) self._sort_formats(formats) return { 'id': video_id, 'title': self._og_search_title(webpage).strip(), 'thumbnail': self._og_search_thumbnail(webpage), 'upload_date': self._search_regex( r'/vod/(\d{8})/', json_url, 'upload_date', default=None), 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/bpb.py
youtube_dl/extractor/bpb.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( js_to_json, determine_ext, ) class BpbIE(InfoExtractor): IE_DESC = 'Bundeszentrale für politische Bildung' _VALID_URL = r'https?://(?:www\.)?bpb\.de/mediathek/(?P<id>[0-9]+)/' _TEST = { 'url': 'http://www.bpb.de/mediathek/297/joachim-gauck-zu-1989-und-die-erinnerung-an-die-ddr', # md5 fails in Python 2.6 due to buggy server response and wrong handling of urllib2 'md5': 'c4f84c8a8044ca9ff68bb8441d300b3f', 'info_dict': { 'id': '297', 'ext': 'mp4', 'title': 'Joachim Gauck zu 1989 und die Erinnerung an die DDR', 'description': 'Joachim Gauck, erster Beauftragter für die Stasi-Unterlagen, spricht auf dem Geschichtsforum über die friedliche Revolution 1989 und eine "gewisse Traurigkeit" im Umgang mit der DDR-Vergangenheit.' } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._html_search_regex( r'<h2 class="white">(.*?)</h2>', webpage, 'title') video_info_dicts = re.findall( r"({\s*src\s*:\s*'https?://film\.bpb\.de/[^}]+})", webpage) formats = [] for video_info in video_info_dicts: video_info = self._parse_json( video_info, video_id, transform_source=js_to_json, fatal=False) if not video_info: continue video_url = video_info.get('src') if not video_url: continue quality = 'high' if '_high' in video_url else 'low' formats.append({ 'url': video_url, 'preference': 10 if quality == 'high' else 0, 'format_note': quality, 'format_id': '%s-%s' % (quality, determine_ext(video_url)), }) self._sort_formats(formats) return { 'id': video_id, 'formats': formats, 'title': title, 'description': self._og_search_description(webpage), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/rtl2.py
youtube_dl/extractor/rtl2.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..aes import aes_cbc_decrypt from ..compat import ( compat_b64decode, compat_ord, compat_str, ) from ..utils import ( bytes_to_intlist, ExtractorError, intlist_to_bytes, int_or_none, strip_or_none, ) class RTL2IE(InfoExtractor): IE_NAME = 'rtl2' _VALID_URL = r'https?://(?:www\.)?rtl2\.de/sendung/[^/]+/(?:video/(?P<vico_id>\d+)[^/]+/(?P<vivi_id>\d+)-|folge/)(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'http://www.rtl2.de/sendung/grip-das-motormagazin/folge/folge-203-0', 'info_dict': { 'id': 'folge-203-0', 'ext': 'f4v', 'title': 'GRIP sucht den Sommerkönig', 'description': 'md5:e3adbb940fd3c6e76fa341b8748b562f' }, 'params': { # rtmp download 'skip_download': True, }, 'expected_warnings': ['Unable to download f4m manifest', 'Failed to download m3u8 information'], }, { 'url': 'http://www.rtl2.de/sendung/koeln-50667/video/5512-anna/21040-anna-erwischt-alex/', 'info_dict': { 'id': 'anna-erwischt-alex', 'ext': 'mp4', 'title': 'Anna erwischt Alex!', 'description': 'Anna nimmt ihrem Vater nicht ab, dass er nicht spielt. Und tatsächlich erwischt sie ihn auf frischer Tat.' }, 'params': { # rtmp download 'skip_download': True, }, 'expected_warnings': ['Unable to download f4m manifest', 'Failed to download m3u8 information'], }] def _real_extract(self, url): vico_id, vivi_id, display_id = re.match(self._VALID_URL, url).groups() if not vico_id: webpage = self._download_webpage(url, display_id) mobj = re.search( r'data-collection="(?P<vico_id>\d+)"[^>]+data-video="(?P<vivi_id>\d+)"', webpage) if mobj: vico_id = mobj.group('vico_id') vivi_id = mobj.group('vivi_id') else: vico_id = self._html_search_regex( r'vico_id\s*:\s*([0-9]+)', webpage, 'vico_id') vivi_id = self._html_search_regex( r'vivi_id\s*:\s*([0-9]+)', webpage, 'vivi_id') info = self._download_json( 'https://service.rtl2.de/api-player-vipo/video.php', display_id, query={ 'vico_id': vico_id, 'vivi_id': vivi_id, }) video_info = info['video'] title = video_info['titel'] formats = [] rtmp_url = video_info.get('streamurl') if rtmp_url: rtmp_url = rtmp_url.replace('\\', '') stream_url = 'mp4:' + self._html_search_regex(r'/ondemand/(.+)', rtmp_url, 'stream URL') rtmp_conn = ['S:connect', 'O:1', 'NS:pageUrl:' + url, 'NB:fpad:0', 'NN:videoFunction:1', 'O:0'] formats.append({ 'format_id': 'rtmp', 'url': rtmp_url, 'play_path': stream_url, 'player_url': 'https://www.rtl2.de/sites/default/modules/rtl2/jwplayer/jwplayer-7.6.0/jwplayer.flash.swf', 'page_url': url, 'flash_version': 'LNX 11,2,202,429', 'rtmp_conn': rtmp_conn, 'no_resume': True, 'preference': 1, }) m3u8_url = video_info.get('streamurl_hls') if m3u8_url: formats.extend(self._extract_akamai_formats(m3u8_url, display_id)) self._sort_formats(formats) return { 'id': display_id, 'title': title, 'thumbnail': video_info.get('image'), 'description': video_info.get('beschreibung'), 'duration': int_or_none(video_info.get('duration')), 'formats': formats, } class RTL2YouBaseIE(InfoExtractor): _BACKWERK_BASE_URL = 'https://p-you-backwerk.rtl2apps.de/' class RTL2YouIE(RTL2YouBaseIE): IE_NAME = 'rtl2:you' _VALID_URL = r'http?://you\.rtl2\.de/(?:video/\d+/|youplayer/index\.html\?.*?\bvid=)(?P<id>\d+)' _TESTS = [{ 'url': 'http://you.rtl2.de/video/3002/15740/MJUNIK%20%E2%80%93%20Home%20of%20YOU/307-hirn-wo-bist-du', 'info_dict': { 'id': '15740', 'ext': 'mp4', 'title': 'MJUNIK – Home of YOU - #307 Hirn, wo bist du?!', 'description': 'md5:ddaa95c61b372b12b66e115b2772fe01', 'age_limit': 12, }, }, { 'url': 'http://you.rtl2.de/youplayer/index.html?vid=15712', 'only_matching': True, }] _AES_KEY = b'\xe9W\xe4.<*\xb8\x1a\xd2\xb6\x92\xf3C\xd3\xefL\x1b\x03*\xbbbH\xc0\x03\xffo\xc2\xf2(\xaa\xaa!' _GEO_COUNTRIES = ['DE'] def _real_extract(self, url): video_id = self._match_id(url) stream_data = self._download_json( self._BACKWERK_BASE_URL + 'stream/video/' + video_id, video_id) data, iv = compat_b64decode(stream_data['streamUrl']).decode().split(':') stream_url = intlist_to_bytes(aes_cbc_decrypt( bytes_to_intlist(compat_b64decode(data)), bytes_to_intlist(self._AES_KEY), bytes_to_intlist(compat_b64decode(iv)) )) if b'rtl2_you_video_not_found' in stream_url: raise ExtractorError('video not found', expected=True) formats = self._extract_m3u8_formats( stream_url[:-compat_ord(stream_url[-1])].decode(), video_id, 'mp4', 'm3u8_native') self._sort_formats(formats) video_data = self._download_json( self._BACKWERK_BASE_URL + 'video/' + video_id, video_id) series = video_data.get('formatTitle') title = episode = video_data.get('title') or series if series and series != title: title = '%s - %s' % (series, title) return { 'id': video_id, 'title': title, 'formats': formats, 'description': strip_or_none(video_data.get('description')), 'thumbnail': video_data.get('image'), 'duration': int_or_none(stream_data.get('duration') or video_data.get('duration'), 1000), 'series': series, 'episode': episode, 'age_limit': int_or_none(video_data.get('minimumAge')), } class RTL2YouSeriesIE(RTL2YouBaseIE): IE_NAME = 'rtl2:you:series' _VALID_URL = r'http?://you\.rtl2\.de/videos/(?P<id>\d+)' _TEST = { 'url': 'http://you.rtl2.de/videos/115/dragon-ball', 'info_dict': { 'id': '115', }, 'playlist_mincount': 5, } def _real_extract(self, url): series_id = self._match_id(url) stream_data = self._download_json( self._BACKWERK_BASE_URL + 'videos', series_id, query={ 'formatId': series_id, 'limit': 1000000000, }) entries = [] for video in stream_data.get('videos', []): video_id = compat_str(video['videoId']) if not video_id: continue entries.append(self.url_result( 'http://you.rtl2.de/video/%s/%s' % (series_id, video_id), 'RTL2You', video_id)) return self.playlist_result(entries, series_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/matchtv.py
youtube_dl/extractor/matchtv.py
# coding: utf-8 from __future__ import unicode_literals import random from .common import InfoExtractor from ..utils import xpath_text class MatchTVIE(InfoExtractor): _VALID_URL = r'https?://matchtv\.ru(?:/on-air|/?#live-player)' _TESTS = [{ 'url': 'http://matchtv.ru/#live-player', 'info_dict': { 'id': 'matchtv-live', 'ext': 'flv', 'title': r're:^Матч ТВ - Прямой эфир \d{4}-\d{2}-\d{2} \d{2}:\d{2}$', 'is_live': True, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://matchtv.ru/on-air/', 'only_matching': True, }] def _real_extract(self, url): video_id = 'matchtv-live' video_url = self._download_json( 'http://player.matchtv.ntvplus.tv/player/smil', video_id, query={ 'ts': '', 'quality': 'SD', 'contentId': '561d2c0df7159b37178b4567', 'sign': '', 'includeHighlights': '0', 'userId': '', 'sessionId': random.randint(1, 1000000000), 'contentType': 'channel', 'timeShift': '0', 'platform': 'portal', }, headers={ 'Referer': 'http://player.matchtv.ntvplus.tv/embed-player/NTVEmbedPlayer.swf', })['data']['videoUrl'] f4m_url = xpath_text(self._download_xml(video_url, video_id), './to') formats = self._extract_f4m_formats(f4m_url, video_id) self._sort_formats(formats) return { 'id': video_id, 'title': self._live_title('Матч ТВ - Прямой эфир'), 'is_live': True, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/blerp.py
youtube_dl/extractor/blerp.py
# coding: utf-8 from __future__ import unicode_literals import json from ..utils import ( strip_or_none, traverse_obj, ) from .common import InfoExtractor class BlerpIE(InfoExtractor): IE_NAME = 'blerp' _VALID_URL = r'https?://(?:www\.)?blerp\.com/soundbites/(?P<id>[0-9a-zA-Z]+)' _TESTS = [{ 'url': 'https://blerp.com/soundbites/6320fe8745636cb4dd677a5a', 'info_dict': { 'id': '6320fe8745636cb4dd677a5a', 'title': 'Samsung Galaxy S8 Over the Horizon Ringtone 2016', 'uploader': 'luminousaj', 'uploader_id': '5fb81e51aa66ae000c395478', 'ext': 'mp3', 'tags': ['samsung', 'galaxy', 's8', 'over the horizon', '2016', 'ringtone'], } }, { 'url': 'https://blerp.com/soundbites/5bc94ef4796001000498429f', 'info_dict': { 'id': '5bc94ef4796001000498429f', 'title': 'Yee', 'uploader': '179617322678353920', 'uploader_id': '5ba99cf71386730004552c42', 'ext': 'mp3', 'tags': ['YEE', 'YEET', 'wo ha haah catchy tune yee', 'yee'] } }] _GRAPHQL_OPERATIONNAME = "webBitePageGetBite" _GRAPHQL_QUERY = ( '''query webBitePageGetBite($_id: MongoID!) { web { biteById(_id: $_id) { ...bitePageFrag __typename } __typename } } fragment bitePageFrag on Bite { _id title userKeywords keywords color visibility isPremium owned price extraReview isAudioExists image { filename original { url __typename } __typename } userReactions { _id reactions createdAt __typename } topReactions totalSaveCount saved blerpLibraryType license licenseMetaData playCount totalShareCount totalFavoriteCount totalAddedToBoardCount userCategory userAudioQuality audioCreationState transcription userTranscription description createdAt updatedAt author listingType ownerObject { _id username profileImage { filename original { url __typename } __typename } __typename } transcription favorited visibility isCurated sourceUrl audienceRating strictAudienceRating ownerId reportObject { reportedContentStatus __typename } giphy { mp4 gif __typename } audio { filename original { url __typename } mp3 { url __typename } __typename } __typename } ''') def _real_extract(self, url): audio_id = self._match_id(url) data = { 'operationName': self._GRAPHQL_OPERATIONNAME, 'query': self._GRAPHQL_QUERY, 'variables': { '_id': audio_id } } headers = { 'Content-Type': 'application/json' } json_result = self._download_json('https://api.blerp.com/graphql', audio_id, data=json.dumps(data).encode('utf-8'), headers=headers) bite_json = json_result['data']['web']['biteById'] info_dict = { 'id': bite_json['_id'], 'url': bite_json['audio']['mp3']['url'], 'title': bite_json['title'], 'uploader': traverse_obj(bite_json, ('ownerObject', 'username'), expected_type=strip_or_none), 'uploader_id': traverse_obj(bite_json, ('ownerObject', '_id'), expected_type=strip_or_none), 'ext': 'mp3', 'tags': list(filter(None, map(strip_or_none, (traverse_obj(bite_json, 'userKeywords', expected_type=list) or []))) or None) } return info_dict
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/sbs.py
youtube_dl/extractor/sbs.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( smuggle_url, ExtractorError, ) class SBSIE(InfoExtractor): IE_DESC = 'sbs.com.au' _VALID_URL = r'https?://(?:www\.)?sbs\.com\.au/(?:ondemand(?:/video/(?:single/)?|.*?\bplay=|/watch/)|news/(?:embeds/)?video/)(?P<id>[0-9]+)' _TESTS = [{ # Original URL is handled by the generic IE which finds the iframe: # http://www.sbs.com.au/thefeed/blog/2014/08/21/dingo-conservation 'url': 'http://www.sbs.com.au/ondemand/video/single/320403011771/?source=drupal&vertical=thefeed', 'md5': '3150cf278965eeabb5b4cea1c963fe0a', 'info_dict': { 'id': '_rFBPRPO4pMR', 'ext': 'mp4', 'title': 'Dingo Conservation (The Feed)', 'description': 'md5:f250a9856fca50d22dec0b5b8015f8a5', 'thumbnail': r're:http://.*\.jpg', 'duration': 308, 'timestamp': 1408613220, 'upload_date': '20140821', 'uploader': 'SBSC', }, }, { 'url': 'http://www.sbs.com.au/ondemand/video/320403011771/Dingo-Conservation-The-Feed', 'only_matching': True, }, { 'url': 'http://www.sbs.com.au/news/video/471395907773/The-Feed-July-9', 'only_matching': True, }, { 'url': 'https://www.sbs.com.au/ondemand/?play=1836638787723', 'only_matching': True, }, { 'url': 'https://www.sbs.com.au/ondemand/program/inside-windsor-castle?play=1283505731842', 'only_matching': True, }, { 'url': 'https://www.sbs.com.au/news/embeds/video/1840778819866', 'only_matching': True, }, { 'url': 'https://www.sbs.com.au/ondemand/watch/1698704451971', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) player_params = self._download_json( 'http://www.sbs.com.au/api/video_pdkvars/id/%s?form=json' % video_id, video_id) error = player_params.get('error') if error: error_message = 'Sorry, The video you are looking for does not exist.' video_data = error.get('results') or {} error_code = error.get('errorCode') if error_code == 'ComingSoon': error_message = '%s is not yet available.' % video_data.get('title', '') elif error_code in ('Forbidden', 'intranetAccessOnly'): error_message = 'Sorry, This video cannot be accessed via this website' elif error_code == 'Expired': error_message = 'Sorry, %s is no longer available.' % video_data.get('title', '') raise ExtractorError('%s said: %s' % (self.IE_NAME, error_message), expected=True) urls = player_params['releaseUrls'] theplatform_url = (urls.get('progressive') or urls.get('html') or urls.get('standard') or player_params['relatedItemsURL']) return { '_type': 'url_transparent', 'ie_key': 'ThePlatform', 'id': video_id, 'url': smuggle_url(self._proto_relative_url(theplatform_url), {'force_smil_url': True}), }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/pyvideo.py
youtube_dl/extractor/pyvideo.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_str from ..utils import int_or_none class PyvideoIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?pyvideo\.org/(?P<category>[^/]+)/(?P<id>[^/?#&.]+)' _TESTS = [{ 'url': 'http://pyvideo.org/pycon-us-2013/become-a-logging-expert-in-30-minutes.html', 'info_dict': { 'id': 'become-a-logging-expert-in-30-minutes', }, 'playlist_count': 2, }, { 'url': 'http://pyvideo.org/pygotham-2012/gloriajw-spotifywitherikbernhardsson182m4v.html', 'md5': '5fe1c7e0a8aa5570330784c847ff6d12', 'info_dict': { 'id': '2542', 'ext': 'm4v', 'title': 'Gloriajw-SpotifyWithErikBernhardsson182.m4v', }, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) category = mobj.group('category') video_id = mobj.group('id') entries = [] data = self._download_json( 'https://raw.githubusercontent.com/pyvideo/data/master/%s/videos/%s.json' % (category, video_id), video_id, fatal=False) if data: for video in data['videos']: video_url = video.get('url') if video_url: if video.get('type') == 'youtube': entries.append(self.url_result(video_url, 'Youtube')) else: entries.append({ 'id': compat_str(data.get('id') or video_id), 'url': video_url, 'title': data['title'], 'description': data.get('description') or data.get('summary'), 'thumbnail': data.get('thumbnail_url'), 'duration': int_or_none(data.get('duration')), }) else: webpage = self._download_webpage(url, video_id) title = self._og_search_title(webpage) media_urls = self._search_regex( r'(?s)Media URL:(.+?)</li>', webpage, 'media urls') for m in re.finditer( r'<a[^>]+href=(["\'])(?P<url>http.+?)\1', media_urls): media_url = m.group('url') if re.match(r'https?://www\.youtube\.com/watch\?v=.*', media_url): entries.append(self.url_result(media_url, 'Youtube')) else: entries.append({ 'id': video_id, 'url': media_url, 'title': title, }) return self.playlist_result(entries, video_id)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/awaan.py
youtube_dl/extractor/awaan.py
# coding: utf-8 from __future__ import unicode_literals import re import base64 from .common import InfoExtractor from ..compat import ( compat_urllib_parse_urlencode, compat_str, ) from ..utils import ( int_or_none, parse_iso8601, smuggle_url, unsmuggle_url, urlencode_postdata, ) class AWAANIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?(?:awaan|dcndigital)\.ae/(?:#/)?show/(?P<show_id>\d+)/[^/]+(?:/(?P<video_id>\d+)/(?P<season_id>\d+))?' def _real_extract(self, url): show_id, video_id, season_id = re.match(self._VALID_URL, url).groups() if video_id and int(video_id) > 0: return self.url_result( 'http://awaan.ae/media/%s' % video_id, 'AWAANVideo') elif season_id and int(season_id) > 0: return self.url_result(smuggle_url( 'http://awaan.ae/program/season/%s' % season_id, {'show_id': show_id}), 'AWAANSeason') else: return self.url_result( 'http://awaan.ae/program/%s' % show_id, 'AWAANSeason') class AWAANBaseIE(InfoExtractor): def _parse_video_data(self, video_data, video_id, is_live): title = video_data.get('title_en') or video_data['title_ar'] img = video_data.get('img') return { 'id': video_id, 'title': self._live_title(title) if is_live else title, 'description': video_data.get('description_en') or video_data.get('description_ar'), 'thumbnail': 'http://admin.mangomolo.com/analytics/%s' % img if img else None, 'duration': int_or_none(video_data.get('duration')), 'timestamp': parse_iso8601(video_data.get('create_time'), ' '), 'is_live': is_live, 'uploader_id': video_data.get('user_id'), } class AWAANVideoIE(AWAANBaseIE): IE_NAME = 'awaan:video' _VALID_URL = r'https?://(?:www\.)?(?:awaan|dcndigital)\.ae/(?:#/)?(?:video(?:/[^/]+)?|media|catchup/[^/]+/[^/]+)/(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.dcndigital.ae/#/video/%D8%B1%D8%AD%D9%84%D8%A9-%D8%A7%D9%84%D8%B9%D9%85%D8%B1-%D8%A7%D9%84%D8%AD%D9%84%D9%82%D8%A9-1/17375', 'md5': '5f61c33bfc7794315c671a62d43116aa', 'info_dict': { 'id': '17375', 'ext': 'mp4', 'title': 'رحلة العمر : الحلقة 1', 'description': 'md5:0156e935d870acb8ef0a66d24070c6d6', 'duration': 2041, 'timestamp': 1227504126, 'upload_date': '20081124', 'uploader_id': '71', }, }, { 'url': 'http://awaan.ae/video/26723981/%D8%AF%D8%A7%D8%B1-%D8%A7%D9%84%D8%B3%D9%84%D8%A7%D9%85:-%D8%AE%D9%8A%D8%B1-%D8%AF%D9%88%D8%B1-%D8%A7%D9%84%D8%A3%D9%86%D8%B5%D8%A7%D8%B1', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) video_data = self._download_json( 'http://admin.mangomolo.com/analytics/index.php/plus/video?id=%s' % video_id, video_id, headers={'Origin': 'http://awaan.ae'}) info = self._parse_video_data(video_data, video_id, False) embed_url = 'http://admin.mangomolo.com/analytics/index.php/customers/embed/video?' + compat_urllib_parse_urlencode({ 'id': video_data['id'], 'user_id': video_data['user_id'], 'signature': video_data['signature'], 'countries': 'Q0M=', 'filter': 'DENY', }) info.update({ '_type': 'url_transparent', 'url': embed_url, 'ie_key': 'MangomoloVideo', }) return info class AWAANLiveIE(AWAANBaseIE): IE_NAME = 'awaan:live' _VALID_URL = r'https?://(?:www\.)?(?:awaan|dcndigital)\.ae/(?:#/)?live/(?P<id>\d+)' _TEST = { 'url': 'http://awaan.ae/live/6/dubai-tv', 'info_dict': { 'id': '6', 'ext': 'mp4', 'title': 're:Dubai Al Oula [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', 'upload_date': '20150107', 'timestamp': 1420588800, 'uploader_id': '71', }, 'params': { # m3u8 download 'skip_download': True, }, } def _real_extract(self, url): channel_id = self._match_id(url) channel_data = self._download_json( 'http://admin.mangomolo.com/analytics/index.php/plus/getchanneldetails?channel_id=%s' % channel_id, channel_id, headers={'Origin': 'http://awaan.ae'}) info = self._parse_video_data(channel_data, channel_id, True) embed_url = 'http://admin.mangomolo.com/analytics/index.php/customers/embed/index?' + compat_urllib_parse_urlencode({ 'id': base64.b64encode(channel_data['user_id'].encode()).decode(), 'channelid': base64.b64encode(channel_data['id'].encode()).decode(), 'signature': channel_data['signature'], 'countries': 'Q0M=', 'filter': 'DENY', }) info.update({ '_type': 'url_transparent', 'url': embed_url, 'ie_key': 'MangomoloLive', }) return info class AWAANSeasonIE(InfoExtractor): IE_NAME = 'awaan:season' _VALID_URL = r'https?://(?:www\.)?(?:awaan|dcndigital)\.ae/(?:#/)?program/(?:(?P<show_id>\d+)|season/(?P<season_id>\d+))' _TEST = { 'url': 'http://dcndigital.ae/#/program/205024/%D9%85%D8%AD%D8%A7%D8%B6%D8%B1%D8%A7%D8%AA-%D8%A7%D9%84%D8%B4%D9%8A%D8%AE-%D8%A7%D9%84%D8%B4%D8%B9%D8%B1%D8%A7%D9%88%D9%8A', 'info_dict': { 'id': '7910', 'title': 'محاضرات الشيخ الشعراوي', }, 'playlist_mincount': 27, } def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) show_id, season_id = re.match(self._VALID_URL, url).groups() data = {} if season_id: data['season'] = season_id show_id = smuggled_data.get('show_id') if show_id is None: season = self._download_json( 'http://admin.mangomolo.com/analytics/index.php/plus/season_info?id=%s' % season_id, season_id, headers={'Origin': 'http://awaan.ae'}) show_id = season['id'] data['show_id'] = show_id show = self._download_json( 'http://admin.mangomolo.com/analytics/index.php/plus/show', show_id, data=urlencode_postdata(data), headers={ 'Origin': 'http://awaan.ae', 'Content-Type': 'application/x-www-form-urlencoded' }) if not season_id: season_id = show['default_season'] for season in show['seasons']: if season['id'] == season_id: title = season.get('title_en') or season['title_ar'] entries = [] for video in show['videos']: video_id = compat_str(video['id']) entries.append(self.url_result( 'http://awaan.ae/media/%s' % video_id, 'AWAANVideo', video_id)) return self.playlist_result(entries, season_id, title)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/sina.py
youtube_dl/extractor/sina.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( HEADRequest, ExtractorError, int_or_none, update_url_query, qualities, get_element_by_attribute, clean_html, ) class SinaIE(InfoExtractor): _VALID_URL = r'''(?x)https?://(?:.*?\.)?video\.sina\.com\.cn/ (?: (?:view/|.*\#)(?P<video_id>\d+)| .+?/(?P<pseudo_id>[^/?#]+)(?:\.s?html)| # This is used by external sites like Weibo api/sinawebApi/outplay.php/(?P<token>.+?)\.swf ) ''' _TESTS = [ { 'url': 'http://video.sina.com.cn/news/spj/topvideoes20160504/?opsubject_id=top1#250576622', 'md5': 'd38433e2fc886007729735650ae4b3e9', 'info_dict': { 'id': '250576622', 'ext': 'mp4', 'title': '现场:克鲁兹宣布退选 特朗普将稳获提名', } }, { 'url': 'http://video.sina.com.cn/v/b/101314253-1290078633.html', 'info_dict': { 'id': '101314253', 'ext': 'flv', 'title': '军方提高对朝情报监视级别', }, 'skip': 'the page does not exist or has been deleted', }, { 'url': 'http://video.sina.com.cn/view/250587748.html', 'md5': '3d1807a25c775092aab3bc157fff49b4', 'info_dict': { 'id': '250587748', 'ext': 'mp4', 'title': '瞬间泪目:8年前汶川地震珍贵视频首曝光', }, }, ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('video_id') if not video_id: if mobj.group('token') is not None: # The video id is in the redirected url self.to_screen('Getting video id') request = HEADRequest(url) _, urlh = self._download_webpage_handle(request, 'NA', False) return self._real_extract(urlh.geturl()) else: pseudo_id = mobj.group('pseudo_id') webpage = self._download_webpage(url, pseudo_id) error = get_element_by_attribute('class', 'errtitle', webpage) if error: raise ExtractorError('%s said: %s' % ( self.IE_NAME, clean_html(error)), expected=True) video_id = self._search_regex( r"video_id\s*:\s*'(\d+)'", webpage, 'video id') video_data = self._download_json( 'http://s.video.sina.com.cn/video/h5play', video_id, query={'video_id': video_id}) if video_data['code'] != 1: raise ExtractorError('%s said: %s' % ( self.IE_NAME, video_data['message']), expected=True) else: video_data = video_data['data'] title = video_data['title'] description = video_data.get('description') if description: description = description.strip() preference = qualities(['cif', 'sd', 'hd', 'fhd', 'ffd']) formats = [] for quality_id, quality in video_data.get('videos', {}).get('mp4', {}).items(): file_api = quality.get('file_api') file_id = quality.get('file_id') if not file_api or not file_id: continue formats.append({ 'format_id': quality_id, 'url': update_url_query(file_api, {'vid': file_id}), 'preference': preference(quality_id), 'ext': 'mp4', }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': video_data.get('image'), 'duration': int_or_none(video_data.get('length')), 'timestamp': int_or_none(video_data.get('create_time')), 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/caffeine.py
youtube_dl/extractor/caffeine.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, merge_dicts, parse_iso8601, T, traverse_obj, txt_or_none, urljoin, ) class CaffeineTVIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?caffeine\.tv/[^/]+/video/(?P<id>[0-9a-f-]+)' _TESTS = [{ 'url': 'https://www.caffeine.tv/TsuSurf/video/cffc0a00-e73f-11ec-8080-80017d29f26e', 'info_dict': { 'id': 'cffc0a00-e73f-11ec-8080-80017d29f26e', 'ext': 'mp4', 'title': 'GOOOOD MORNINNNNN #highlights', 'timestamp': 1654702180, 'upload_date': '20220608', 'uploader': 'TsuSurf', 'duration': 3145, 'age_limit': 17, }, 'params': { 'format': 'bestvideo', }, }] def _real_extract(self, url): video_id = self._match_id(url) json_data = self._download_json( 'https://api.caffeine.tv/social/public/activity/' + video_id, video_id) broadcast_info = traverse_obj(json_data, ('broadcast_info', T(dict))) or {} title = broadcast_info['broadcast_title'] video_url = broadcast_info['video_url'] ext = determine_ext(video_url) if ext == 'm3u8': formats = self._extract_m3u8_formats( video_url, video_id, 'mp4', entry_protocol='m3u8', fatal=False) else: formats = [{'url': video_url}] self._sort_formats(formats) return merge_dicts({ 'id': video_id, 'title': title, 'formats': formats, }, traverse_obj(json_data, { 'uploader': ((None, 'user'), 'username'), }, get_all=False), traverse_obj(json_data, { 'like_count': ('like_count', T(int_or_none)), 'view_count': ('view_count', T(int_or_none)), 'comment_count': ('comment_count', T(int_or_none)), 'tags': ('tags', Ellipsis, T(txt_or_none)), 'is_live': 'is_live', 'uploader': ('user', 'name'), }), traverse_obj(broadcast_info, { 'duration': ('content_duration', T(int_or_none)), 'timestamp': ('broadcast_start_time', T(parse_iso8601)), 'thumbnail': ('preview_image_path', T(lambda u: urljoin(url, u))), 'age_limit': ('content_rating', T(lambda r: r and { # assume Apple Store ratings [1] # 1. https://en.wikipedia.org/wiki/Mobile_software_content_rating_system 'FOUR_PLUS': 0, 'NINE_PLUS': 9, 'TWELVE_PLUS': 12, 'SEVENTEEN_PLUS': 17, }.get(r, 17))), }))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/spankbang.py
youtube_dl/extractor/spankbang.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, ExtractorError, merge_dicts, parse_duration, parse_resolution, str_to_int, url_or_none, urlencode_postdata, urljoin, ) class SpankBangIE(InfoExtractor): _VALID_URL = r'''(?x) https?:// (?:[^/]+\.)?spankbang\.com/ (?: (?P<id>[\da-z]+)/(?:video|play|embed)\b| [\da-z]+-(?P<id_2>[\da-z]+)/playlist/[^/?#&]+ ) ''' _TESTS = [{ 'url': 'http://spankbang.com/3vvn/video/fantasy+solo', 'md5': '1cc433e1d6aa14bc376535b8679302f7', 'info_dict': { 'id': '3vvn', 'ext': 'mp4', 'title': 'fantasy solo', 'description': 'dillion harper masturbates on a bed', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': 'silly2587', 'timestamp': 1422571989, 'upload_date': '20150129', 'age_limit': 18, } }, { # 480p only 'url': 'http://spankbang.com/1vt0/video/solvane+gangbang', 'only_matching': True, }, { # no uploader 'url': 'http://spankbang.com/lklg/video/sex+with+anyone+wedding+edition+2', 'only_matching': True, }, { # mobile page 'url': 'http://m.spankbang.com/1o2de/video/can+t+remember+her+name', 'only_matching': True, }, { # 4k 'url': 'https://spankbang.com/1vwqx/video/jade+kush+solo+4k', 'only_matching': True, }, { 'url': 'https://m.spankbang.com/3vvn/play/fantasy+solo/480p/', 'only_matching': True, }, { 'url': 'https://m.spankbang.com/3vvn/play', 'only_matching': True, }, { 'url': 'https://spankbang.com/2y3td/embed/', 'only_matching': True, }, { 'url': 'https://spankbang.com/2v7ik-7ecbgu/playlist/latina+booty', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') or mobj.group('id_2') webpage = self._download_webpage( url.replace('/%s/embed' % video_id, '/%s/video' % video_id), video_id, headers={'Cookie': 'country=US'}) if re.search(r'<[^>]+\b(?:id|class)=["\']video_removed', webpage): raise ExtractorError( 'Video %s is not available' % video_id, expected=True) formats = [] def extract_format(format_id, format_url): f_url = url_or_none(format_url) if not f_url: return f = parse_resolution(format_id) ext = determine_ext(f_url) if format_id.startswith('m3u8') or ext == 'm3u8': formats.extend(self._extract_m3u8_formats( f_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)) elif format_id.startswith('mpd') or ext == 'mpd': formats.extend(self._extract_mpd_formats( f_url, video_id, mpd_id='dash', fatal=False)) elif ext == 'mp4' or f.get('width') or f.get('height'): f.update({ 'url': f_url, 'format_id': format_id, }) formats.append(f) STREAM_URL_PREFIX = 'stream_url_' for mobj in re.finditer( r'%s(?P<id>[^\s=]+)\s*=\s*(["\'])(?P<url>(?:(?!\2).)+)\2' % STREAM_URL_PREFIX, webpage): extract_format(mobj.group('id', 'url')) if not formats: stream_key = self._search_regex( r'data-streamkey\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage, 'stream key', group='value') stream = self._download_json( 'https://spankbang.com/api/videos/stream', video_id, 'Downloading stream JSON', data=urlencode_postdata({ 'id': stream_key, 'data': 0, }), headers={ 'Referer': url, 'X-Requested-With': 'XMLHttpRequest', }) for format_id, format_url in stream.items(): if format_url and isinstance(format_url, list): format_url = format_url[0] extract_format(format_id, format_url) self._sort_formats(formats, field_preference=('preference', 'height', 'width', 'fps', 'tbr', 'format_id')) info = self._search_json_ld(webpage, video_id, default={}) title = self._html_search_regex( r'(?s)<h1[^>]*>(.+?)</h1>', webpage, 'title', default=None) description = self._search_regex( r'<div[^>]+\bclass=["\']bottom[^>]+>\s*<p>[^<]*</p>\s*<p>([^<]+)', webpage, 'description', default=None) thumbnail = self._og_search_thumbnail(webpage, default=None) uploader = self._html_search_regex( (r'(?s)<li[^>]+class=["\']profile[^>]+>(.+?)</a>', r'class="user"[^>]*><img[^>]+>([^<]+)'), webpage, 'uploader', default=None) duration = parse_duration(self._search_regex( r'<div[^>]+\bclass=["\']right_side[^>]+>\s*<span>([^<]+)', webpage, 'duration', default=None)) view_count = str_to_int(self._search_regex( r'([\d,.]+)\s+plays', webpage, 'view count', default=None)) age_limit = self._rta_search(webpage) return merge_dicts({ 'id': video_id, 'title': title or video_id, 'description': description, 'thumbnail': thumbnail, 'uploader': uploader, 'duration': duration, 'view_count': view_count, 'formats': formats, 'age_limit': age_limit, }, info ) class SpankBangPlaylistIE(InfoExtractor): _VALID_URL = r'https?://(?:[^/]+\.)?spankbang\.com/(?P<id>[\da-z]+)/playlist/(?P<display_id>[^/]+)' _TEST = { 'url': 'https://spankbang.com/ug0k/playlist/big+ass+titties', 'info_dict': { 'id': 'ug0k', 'title': 'Big Ass Titties', }, 'playlist_mincount': 40, } def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) playlist_id = mobj.group('id') display_id = mobj.group('display_id') webpage = self._download_webpage( url, playlist_id, headers={'Cookie': 'country=US; mobile=on'}) entries = [self.url_result( urljoin(url, mobj.group('path')), ie=SpankBangIE.ie_key(), video_id=mobj.group('id')) for mobj in re.finditer( r'<a[^>]+\bhref=(["\'])(?P<path>/?[\da-z]+-(?P<id>[\da-z]+)/playlist/%s(?:(?!\1).)*)\1' % re.escape(display_id), webpage)] title = self._html_search_regex( r'<h1>([^<]+)\s+playlist\s*<', webpage, 'playlist title', fatal=False) return self.playlist_result(entries, playlist_id, title)
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/popcorntv.py
youtube_dl/extractor/popcorntv.py
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( extract_attributes, int_or_none, unified_timestamp, ) class PopcornTVIE(InfoExtractor): _VALID_URL = r'https?://[^/]+\.popcorntv\.it/guarda/(?P<display_id>[^/]+)/(?P<id>\d+)' _TESTS = [{ 'url': 'https://animemanga.popcorntv.it/guarda/food-wars-battaglie-culinarie-episodio-01/9183', 'md5': '47d65a48d147caf692ab8562fe630b45', 'info_dict': { 'id': '9183', 'display_id': 'food-wars-battaglie-culinarie-episodio-01', 'ext': 'mp4', 'title': 'Food Wars, Battaglie Culinarie | Episodio 01', 'description': 'md5:b8bea378faae4651d3b34c6e112463d0', 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1497610857, 'upload_date': '20170616', 'duration': 1440, 'view_count': int, }, }, { 'url': 'https://cinema.popcorntv.it/guarda/smash-cut/10433', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) display_id, video_id = mobj.group('display_id', 'id') webpage = self._download_webpage(url, display_id) m3u8_url = extract_attributes( self._search_regex( r'(<link[^>]+itemprop=["\'](?:content|embed)Url[^>]*>)', webpage, 'content' ))['href'] formats = self._extract_m3u8_formats( m3u8_url, display_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls') title = self._search_regex( r'<h1[^>]+itemprop=["\']name[^>]*>([^<]+)', webpage, 'title', default=None) or self._og_search_title(webpage) description = self._html_search_regex( r'(?s)<article[^>]+itemprop=["\']description[^>]*>(.+?)</article>', webpage, 'description', fatal=False) thumbnail = self._og_search_thumbnail(webpage) timestamp = unified_timestamp(self._html_search_meta( 'uploadDate', webpage, 'timestamp')) duration = int_or_none(self._html_search_meta( 'duration', webpage), invscale=60) view_count = int_or_none(self._html_search_meta( 'interactionCount', webpage, 'view count')) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'timestamp': timestamp, 'duration': duration, 'view_count': view_count, 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/acast.py
youtube_dl/extractor/acast.py
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( clean_html, clean_podcast_url, int_or_none, parse_iso8601, ) class ACastBaseIE(InfoExtractor): def _extract_episode(self, episode, show_info): title = episode['title'] info = { 'id': episode['id'], 'display_id': episode.get('episodeUrl'), 'url': clean_podcast_url(episode['url']), 'title': title, 'description': clean_html(episode.get('description') or episode.get('summary')), 'thumbnail': episode.get('image'), 'timestamp': parse_iso8601(episode.get('publishDate')), 'duration': int_or_none(episode.get('duration')), 'filesize': int_or_none(episode.get('contentLength')), 'season_number': int_or_none(episode.get('season')), 'episode': title, 'episode_number': int_or_none(episode.get('episode')), } info.update(show_info) return info def _extract_show_info(self, show): return { 'creator': show.get('author'), 'series': show.get('title'), } def _call_api(self, path, video_id, query=None): return self._download_json( 'https://feeder.acast.com/api/v1/shows/' + path, video_id, query=query) class ACastIE(ACastBaseIE): IE_NAME = 'acast' _VALID_URL = r'''(?x) https?:// (?: (?:(?:embed|www)\.)?acast\.com/| play\.acast\.com/s/ ) (?P<channel>[^/]+)/(?P<id>[^/#?]+) ''' _TESTS = [{ 'url': 'https://www.acast.com/sparpodcast/2.raggarmordet-rosterurdetforflutna', 'md5': 'f5598f3ad1e4776fed12ec1407153e4b', 'info_dict': { 'id': '2a92b283-1a75-4ad8-8396-499c641de0d9', 'ext': 'mp3', 'title': '2. Raggarmordet - Röster ur det förflutna', 'description': 'md5:a992ae67f4d98f1c0141598f7bebbf67', 'timestamp': 1477346700, 'upload_date': '20161024', 'duration': 2766, 'creator': 'Anton Berg & Martin Johnson', 'series': 'Spår', 'episode': '2. Raggarmordet - Röster ur det förflutna', } }, { 'url': 'http://embed.acast.com/adambuxton/ep.12-adam-joeschristmaspodcast2015', 'only_matching': True, }, { 'url': 'https://play.acast.com/s/rattegangspodden/s04e09styckmordetihelenelund-del2-2', 'only_matching': True, }, { 'url': 'https://play.acast.com/s/sparpodcast/2a92b283-1a75-4ad8-8396-499c641de0d9', 'only_matching': True, }] def _real_extract(self, url): channel, display_id = re.match(self._VALID_URL, url).groups() episode = self._call_api( '%s/episodes/%s' % (channel, display_id), display_id, {'showInfo': 'true'}) return self._extract_episode( episode, self._extract_show_info(episode.get('show') or {})) class ACastChannelIE(ACastBaseIE): IE_NAME = 'acast:channel' _VALID_URL = r'''(?x) https?:// (?: (?:www\.)?acast\.com/| play\.acast\.com/s/ ) (?P<id>[^/#?]+) ''' _TESTS = [{ 'url': 'https://www.acast.com/todayinfocus', 'info_dict': { 'id': '4efc5294-5385-4847-98bd-519799ce5786', 'title': 'Today in Focus', 'description': 'md5:c09ce28c91002ce4ffce71d6504abaae', }, 'playlist_mincount': 200, }, { 'url': 'http://play.acast.com/s/ft-banking-weekly', 'only_matching': True, }] @classmethod def suitable(cls, url): return False if ACastIE.suitable(url) else super(ACastChannelIE, cls).suitable(url) def _real_extract(self, url): show_slug = self._match_id(url) show = self._call_api(show_slug, show_slug) show_info = self._extract_show_info(show) entries = [] for episode in (show.get('episodes') or []): entries.append(self._extract_episode(episode, show_info)) return self.playlist_result( entries, show.get('id'), show.get('title'), show.get('description'))
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false
ytdl-org/youtube-dl
https://github.com/ytdl-org/youtube-dl/blob/956b8c585591b401a543e409accb163eeaaa1193/youtube_dl/extractor/izlesene.py
youtube_dl/extractor/izlesene.py
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_str, compat_urllib_parse_unquote, ) from ..utils import ( determine_ext, float_or_none, get_element_by_id, int_or_none, parse_iso8601, str_to_int, ) class IzleseneIE(InfoExtractor): _VALID_URL = r'''(?x) https?://(?:(?:www|m)\.)?izlesene\.com/ (?:video|embedplayer)/(?:[^/]+/)?(?P<id>[0-9]+) ''' _TESTS = [ { 'url': 'http://www.izlesene.com/video/sevincten-cildirtan-dogum-gunu-hediyesi/7599694', 'md5': '4384f9f0ea65086734b881085ee05ac2', 'info_dict': { 'id': '7599694', 'ext': 'mp4', 'title': 'Sevinçten Çıldırtan Doğum Günü Hediyesi', 'description': 'md5:253753e2655dde93f59f74b572454f6d', 'thumbnail': r're:^https?://.*\.jpg', 'uploader_id': 'pelikzzle', 'timestamp': int, 'upload_date': '20140702', 'duration': 95.395, 'age_limit': 0, } }, { 'url': 'http://www.izlesene.com/video/tarkan-dortmund-2006-konseri/17997', 'md5': '97f09b6872bffa284cb7fa4f6910cb72', 'info_dict': { 'id': '17997', 'ext': 'mp4', 'title': 'Tarkan Dortmund 2006 Konseri', 'thumbnail': r're:^https://.*\.jpg', 'uploader_id': 'parlayankiz', 'timestamp': int, 'upload_date': '20061112', 'duration': 253.666, 'age_limit': 0, } }, ] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage('http://www.izlesene.com/video/%s' % video_id, video_id) video = self._parse_json( self._search_regex( r'videoObj\s*=\s*({.+?})\s*;\s*\n', webpage, 'streams'), video_id) title = video.get('videoTitle') or self._og_search_title(webpage) formats = [] for stream in video['media']['level']: source_url = stream.get('source') if not source_url or not isinstance(source_url, compat_str): continue ext = determine_ext(url, 'mp4') quality = stream.get('value') height = int_or_none(quality) formats.append({ 'format_id': '%sp' % quality if quality else 'sd', 'url': compat_urllib_parse_unquote(source_url), 'ext': ext, 'height': height, }) self._sort_formats(formats) description = self._og_search_description(webpage, default=None) thumbnail = video.get('posterURL') or self._proto_relative_url( self._og_search_thumbnail(webpage), scheme='http:') uploader = self._html_search_regex( r"adduserUsername\s*=\s*'([^']+)';", webpage, 'uploader', fatal=False) timestamp = parse_iso8601(self._html_search_meta( 'uploadDate', webpage, 'upload date')) duration = float_or_none(video.get('duration') or self._html_search_regex( r'videoduration["\']?\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage, 'duration', fatal=False, group='value'), scale=1000) view_count = str_to_int(get_element_by_id('videoViewCount', webpage)) comment_count = self._html_search_regex( r'comment_count\s*=\s*\'([^\']+)\';', webpage, 'comment_count', fatal=False) return { 'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'uploader_id': uploader, 'timestamp': timestamp, 'duration': duration, 'view_count': int_or_none(view_count), 'comment_count': int_or_none(comment_count), 'age_limit': self._family_friendly_search(webpage), 'formats': formats, }
python
Unlicense
956b8c585591b401a543e409accb163eeaaa1193
2026-01-04T14:38:15.437342Z
false