sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
hiyouga/LlamaFactory:src/llamafactory/v1/core/base_sampler.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import AsyncGenerator
from ..config import ModelArguments, SampleArguments, SampleBackend
from ..utils.types import HFModel, Message, Sample, TorchDataset
from .utils.inference_engine import HuggingFaceEngine
from .utils.rendering import Renderer
class BaseSampler:
"""Base sampler.
Args:
args: Sample arguments.
model_args: Model arguments.
model: Model.
renderer: Renderer.
"""
def __init__(
self,
args: SampleArguments,
model_args: ModelArguments,
model: HFModel,
renderer: Renderer,
) -> None:
if args.sample_backend == SampleBackend.HF:
self.engine = HuggingFaceEngine(args, model_args, model, renderer)
else:
raise ValueError(f"Unknown sample backend: {args.sample_backend}")
async def generate(self, messages: list[Message], tools: str | None = None) -> AsyncGenerator[str, None]:
"""Generate tokens asynchronously.
Args:
messages: List of messages.
tools: Tools string.
Yields:
Generated tokens.
"""
async for token in self.engine.generate(messages, tools):
yield token
async def batch_infer(self, dataset: TorchDataset) -> list[Sample]:
"""Batch infer samples.
Args:
dataset: Torch dataset.
Returns:
List of samples.
"""
return await self.engine.batch_infer(dataset)
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/core/base_sampler.py",
"license": "Apache License 2.0",
"lines": 55,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:tests_v1/plugins/model_plugins/test_init_plugin.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from llamafactory.v1.accelerator.interface import DistributedInterface
from llamafactory.v1.config.arg_parser import get_args
from llamafactory.v1.core.model_engine import ModelEngine
def test_init_on_meta():
model_args, *_ = get_args(
dict(
model="llamafactory/tiny-random-qwen3",
init_config={"name": "init_on_meta"},
)
)
model_engine = ModelEngine(model_args=model_args)
assert model_engine.model.device.type == "meta"
def test_init_on_rank0():
model_args, *_ = get_args(
dict(
model="llamafactory/tiny-random-qwen3",
init_config={"name": "init_on_rank0"},
)
)
model_engine = ModelEngine(model_args=model_args)
if DistributedInterface().get_rank() == 0:
assert model_engine.model.device.type == "cpu"
else:
assert model_engine.model.device.type == "meta"
def test_init_on_default():
model_args, *_ = get_args(
dict(
model="llamafactory/tiny-random-qwen3",
init_config={"name": "init_on_default"},
)
)
model_engine = ModelEngine(model_args=model_args)
assert model_engine.model.device == DistributedInterface().current_device
if __name__ == "__main__":
"""
python tests_v1/plugins/model_plugins/test_init_plugin.py
"""
test_init_on_meta()
test_init_on_rank0()
test_init_on_default()
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "tests_v1/plugins/model_plugins/test_init_plugin.py",
"license": "Apache License 2.0",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
hiyouga/LlamaFactory:src/llamafactory/v1/plugins/model_plugins/kernels/base.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The definition of base kernel class.
Init Phase:
1. Define base kernel class.
2. Define abstract methods.
"""
from abc import ABC, abstractmethod
from typing import Any
from ....accelerator.helper import DeviceType, get_current_accelerator
from ....utils.types import HFModel
class BaseKernel(ABC):
r"""Base class for all kernel implementations.
Subclasses must implement the abstract methods and define the required class attributes.
"""
_kernel_id: Any = "" # kernel ID, any hashable value to identify a kernel implementation
_device: DeviceType = DeviceType.CPU # "cuda", "npu", "cpu", etc.
@classmethod
def get_kernel_id(cls) -> str:
"""Returns the unique identifier for the kernel."""
return cls._kernel_id
@classmethod
def get_device(cls) -> str:
"""Returns the device type associated with the kernel (e.g., "cuda", "npu", "cpu")."""
return cls._device
@classmethod
def check_deps(cls) -> bool:
"""Checks if the required dependencies for the kernel are available.
Returns:
bool: ``True`` if dependencies are met, ``False`` otherwise.
.. note::
In explicit mode, if a user specifies an implementation but this check fails,
it should raise an error instead of silently switching.
Kernels can override this method to implement custom dependency checks.
"""
if cls._device != get_current_accelerator().type:
return False
return True
@classmethod
@abstractmethod
def apply(cls, **kwargs) -> HFModel:
"""Applies the kernel optimization to the model.
Args:
**kwargs: Arbitrary keyword arguments, usually containing the model instance and the kernel configuration.
Returns:
HFModel: The model with the kernel applied.
Raises:
RuntimeError: If the kernel dependencies are not met.
NotImplementedError: If the method is not implemented by the subclass.
Example:
>>> from llamafactory.v1.plugins.model_plugins.kernels.interface import apply_kernel
>>> model = HFModel(config=config)
>>> model = apply_kernel(model=model, kernel_id="npu_fused_moe")
"""
if not cls.check_deps():
raise RuntimeError(f"{cls.__name__} is not available but {cls.__name__} kernel was called.")
raise NotImplementedError
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/plugins/model_plugins/kernels/base.py",
"license": "Apache License 2.0",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/v1/plugins/model_plugins/kernels/interface.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The definition of kernel interface.
Init Phase:
1. Scan all kernels.
2. Register default kernels.
3. Define kernel plugin.
"""
import importlib
from pathlib import Path
from ....utils import logging
from ....utils.plugin import BasePlugin
from ....utils.types import HFModel
from .registry import Registry
logger = logging.get_logger(__name__)
def scan_all_kernels():
"""Scan all kernels in the ``ops`` directory.
Scans the ``ops`` directory for all ``.py`` files and attempts to import them.
Importing triggers the :func:`~registry.register_kernel` decorator, which automatically registers the kernels.
Returns:
dict[str, type[BaseKernel]]: A dictionary of registered kernels.
.. note::
This function assumes that the ``ops`` directory is located in the same directory as this file.
It recursively searches for ``.py`` files and constructs the module path for import.
"""
ops_path = Path(__file__).parent / "ops"
if not ops_path.exists():
return
base_package = __package__
for file_path in ops_path.rglob("*.py"):
if file_path.name == "__init__.py":
continue
# calculate the relative path:
# file_path = .../kernels_v2/ops/mlp/npu_swiglu.py
# rel_path = ops/mlp/npu_swiglu.py
rel_path = file_path.relative_to(Path(__file__).parent)
# build module path:
module_name = ".".join(rel_path.parts)[:-3]
full_module_name = f"{base_package}.{module_name}"
try:
importlib.import_module(full_module_name)
except Exception as e:
logger.warning(f"[Kernel Registry] Failed to import {full_module_name} when loading kernels: {e}")
return Registry.get_registered_kernels()
default_kernels = scan_all_kernels()
def get_default_kernels():
"""Get a list of default registered kernel IDs.
Returns:
list[str]: List of kernel IDs.
"""
return list(default_kernels.keys())
def apply_kernel(kernel_id: str, **kwargs):
"""Applies a specific kernel to the model.
Args:
kernel_id (str): The ID of the kernel to apply.
**kwargs: Keyword arguments passed to the kernel application function.
Typically includes the model instance.
Returns:
HFModel: The model with applied kernel.
"""
kernel = default_kernels.get(kernel_id)
if kernel is None:
raise ValueError(f"Kernel {kernel_id} not found")
kernel.apply(**kwargs)
class KernelPlugin(BasePlugin):
"""Plugin for managing kernel optimizations."""
pass
@KernelPlugin("auto").register()
def apply_default_kernels(model: HFModel, include_kernels: str = None) -> HFModel:
"""Applies all default registered kernels to the model.
Args:
model (HFModel): The model instance to apply kernels to.
include_kernels (str, optional): Comma-separated list of kernel IDs to apply.
If "auto" or True, applies all default kernels.
If None or False, no kernels are applied.
Defaults to None.
Returns:
HFModel: The model with applied kernels.
"""
if not include_kernels:
return model
elif include_kernels == "auto" or include_kernels is True:
use_kernels = default_kernels.keys()
else:
use_kernels = include_kernels.split(",") # "kernel_id1,kernel_id2,kernel_id3"
for kernel in use_kernels:
if kernel not in default_kernels:
raise ValueError(f"Kernel {kernel} not found")
apply_kernel(kernel, model=model)
return model
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/plugins/model_plugins/kernels/interface.py",
"license": "Apache License 2.0",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/v1/plugins/model_plugins/kernels/ops/rope/npu_rope.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The definition of NPU fused RoPE kernels.
Init Phase:
1. Define RoPE forward functions.
2. Register NPU fused RoPE kernel.
"""
import sys
import torch
from ......accelerator.helper import DeviceType
from ......utils.logging import get_logger
from ......utils.types import HFModel
from ...base import BaseKernel
from ...registry import register_kernel
logger = get_logger(__name__)
try:
import torch_npu
except ImportError:
pass
def _apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors using NPU optimization.
Args:
q (Tensor): Query tensor.
k (Tensor): Key tensor.
cos (Tensor): Cosine part of embedding.
sin (Tensor): Sine part of embedding.
position_ids (Tensor, optional): Position IDs. Default: ``None``.
unsqueeze_dim (int): Dimension to unsqueeze cos and sin. Default: 1.
Returns:
tuple: (q_embed, k_embed) The embedded query and key tensors.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = torch_npu.npu_rotary_mul(q, cos, sin)
k_embed = torch_npu.npu_rotary_mul(k, cos, sin)
return q_embed, k_embed
def _apply_multimodal_rotary_pos_emb_qwen25_vl(q, k, cos, sin, mrope_section, unsqueeze_dim=1):
"""Applies Rotary Position Embedding with multimodal sections (Qwen2-VL) on NPU.
Args:
q (Tensor): Query tensor.
k (Tensor): Key tensor.
cos (Tensor): Cosine part of embedding.
sin (Tensor): Sine part of embedding.
mrope_section (Tensor): Multimodal RoPE section.
unsqueeze_dim (int): Dimension to unsqueeze cos and sin. Default: 1.
Returns:
tuple: (q_embed, k_embed) The embedded query and key tensors.
"""
mrope_section = mrope_section * 2
cos = torch.cat([m[i % 3] for i, m in enumerate(cos.split(mrope_section, dim=-1))], dim=-1).unsqueeze(
unsqueeze_dim
)
sin = torch.cat([m[i % 3] for i, m in enumerate(sin.split(mrope_section, dim=-1))], dim=-1).unsqueeze(
unsqueeze_dim
)
q_embed = torch_npu.npu_rotary_mul(q, cos, sin)
k_embed = torch_npu.npu_rotary_mul(k, cos, sin)
return q_embed, k_embed
@register_kernel
class NpuRoPEKernel(BaseKernel):
"""NPU Kernel for Rotary Position Embedding."""
_kernel_id = "npu_fused_rope"
_device = DeviceType.NPU
@classmethod
def apply(cls, **kwargs) -> "HFModel":
"""Apply RoPE acceleration by monkey-patching `apply_rotary_pos_emb`.
This function iterates through the model's modules to find attention layers,
identifies the module where they are defined, and replaces the original
`apply_rotary_pos_emb` function in that module's namespace with the
NPU-accelerated version from this file.
Args:
**kwargs: Keyword arguments containing the model.
Returns:
HFModel: The model with patched RoPE functions.
Raises:
RuntimeError: If dependencies are not met.
ValueError: If the model is not provided.
"""
if not cls.check_deps():
raise RuntimeError(f"torch_npu is not available but {cls.__name__} was called.")
model = kwargs.get("model", None)
if model is None:
raise ValueError(f"HFModel instance is required for {cls.__name__}.")
_modules = set()
for module in model.modules():
if "Attention" in module.__class__.__name__:
module_name = module.__class__.__module__
if module_name in _modules:
continue
try:
target_module = sys.modules[module_name]
if hasattr(target_module, "apply_rotary_pos_emb"):
if getattr(target_module, "apply_rotary_pos_emb") is not _apply_rotary_pos_emb:
setattr(target_module, "apply_rotary_pos_emb", _apply_rotary_pos_emb)
_modules.add(module_name)
if hasattr(target_module, "apply_multimodal_rotary_pos_emb"):
if (
getattr(target_module, "apply_multimodal_rotary_pos_emb")
is not _apply_multimodal_rotary_pos_emb_qwen25_vl
):
setattr(
target_module,
"apply_multimodal_rotary_pos_emb",
_apply_multimodal_rotary_pos_emb_qwen25_vl,
)
_modules.add(module_name)
except Exception as e:
logger.warning_rank0_once(f"Failed to apply RoPE kernel to module {module_name}: {e}")
return model
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/plugins/model_plugins/kernels/ops/rope/npu_rope.py",
"license": "Apache License 2.0",
"lines": 120,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:tests_v1/config/test_args_parser.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from pathlib import Path
from unittest.mock import patch
from llamafactory.v1.config.arg_parser import get_args
def test_get_args_from_yaml(tmp_path: Path):
config_yaml = """
### model
model: llamafactory/tiny-random-qwen3
trust_remote_code: true
model_class: llm
kernel_config:
name: auto
include_kernels: auto # choice: null/true/false/auto/kernel_id1,kernel_id2,kernel_id3, default is null
peft_config:
name: lora
lora_rank: 0.8
quant_config: null
### data
train_dataset: llamafactory/v1-sft-demo
### training
output_dir: outputs/test_run
micro_batch_size: 1
global_batch_size: 1
cutoff_len: 2048
learning_rate: 1.0e-4
bf16: false
dist_config: null
### sample
sample_backend: hf
max_new_tokens: 128
"""
config_file = tmp_path / "config.yaml"
config_file.write_text(config_yaml, encoding="utf-8")
test_argv = ["test_args_parser.py", str(config_file)]
with patch.object(sys, "argv", test_argv):
model_args, data_args, training_args, sample_args = get_args()
assert data_args.train_dataset == "llamafactory/v1-sft-demo"
assert model_args.model == "llamafactory/tiny-random-qwen3"
assert model_args.kernel_config.name == "auto"
assert model_args.kernel_config.get("include_kernels") == "auto"
assert model_args.peft_config.name == "lora"
assert model_args.peft_config.get("lora_rank") == 0.8
assert training_args.output_dir == "outputs/test_run"
assert training_args.micro_batch_size == 1
assert training_args.global_batch_size == 1
assert training_args.learning_rate == 1.0e-4
assert training_args.bf16 is False
assert training_args.dist_config is None
assert sample_args.sample_backend == "hf"
if __name__ == "__main__":
"""
python -m tests_v1.config.test_args_parser
"""
import tempfile
with tempfile.TemporaryDirectory() as tmp_dir:
test_get_args_from_yaml(tmp_path=Path(tmp_dir))
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "tests_v1/config/test_args_parser.py",
"license": "Apache License 2.0",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
hiyouga/LlamaFactory:src/llamafactory/v1/plugins/model_plugins/initialization.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ...accelerator.helper import DeviceType
from ...accelerator.interface import DistributedInterface
from ...utils.plugin import BasePlugin
class InitPlugin(BasePlugin):
def __call__(self) -> torch.device:
return super().__call__()
@InitPlugin("init_on_meta").register()
def init_on_meta() -> torch.device:
return torch.device(DeviceType.META.value)
@InitPlugin("init_on_rank0").register()
def init_on_rank0() -> torch.device:
if DistributedInterface().get_rank() == 0:
return torch.device(DeviceType.CPU.value)
else:
return torch.device(DeviceType.META.value)
@InitPlugin("init_on_default").register()
def init_on_default() -> torch.device:
return DistributedInterface().current_device
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/plugins/model_plugins/initialization.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/v1/plugins/model_plugins/quantization.py | # Copyright 2025 HuggingFace Inc., the KVCache.AI team, Approaching AI, and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's transformers library.
# https://github.com/huggingface/transformers/blob/v4.40.0/examples/pytorch/language-modeling/run_clm.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Any
import torch
from transformers import BitsAndBytesConfig
from ...accelerator.helper import get_current_device
from ...config.model_args import ModelArguments
from ...utils import logging
from ...utils.packages import check_version
from ...utils.plugin import BasePlugin
if TYPE_CHECKING:
from transformers import PretrainedConfig, PreTrainedTokenizer
logger = logging.get_logger(__name__)
class QuantizationPlugin(BasePlugin):
r"""Plugin for model quantization."""
def __call__(
self,
init_kwargs: dict[str, Any] = None,
config: "PretrainedConfig" = None,
tokenizer: "PreTrainedTokenizer" = None,
model_args: "ModelArguments" = None,
is_trainable: bool = False,
) -> dict[str, Any]:
return super().__call__(
init_kwargs, config=config, tokenizer=tokenizer, model_args=model_args, is_trainable=is_trainable
)
@QuantizationPlugin("auto").register()
def quantization_auto(
init_kwargs: dict[str, Any],
**kwargs,
) -> dict[str, Any]:
"""Automatic quantization selection, only support bnb currently.
Args:
init_kwargs (dict[str, Any]): The kwargs for model initialization.
**kwargs: Keyword arguments containing the model.
Returns:
dict[str, Any]: The updated kwargs for model initialization.
"""
model_args: ModelArguments = kwargs.get("model_args", None)
quant_config = model_args.quant_config
quantization_bit = quant_config.get("quantization_bit", None)
if quantization_bit is not None:
logger.info_rank0(f"Loading {quantization_bit}-bit quantized model.")
if quantization_bit in [8, 4]:
return quantization_with_bnb(init_kwargs, **kwargs)
else:
raise ValueError(f"Unsupported quantization bit: {quantization_bit} for auto quantization.")
logger.warning_rank0("No quantization method applied.")
return init_kwargs
@QuantizationPlugin("bnb").register()
def quantization_with_bnb(
init_kwargs: dict[str, Any],
model_args: "ModelArguments" = None,
**kwargs,
) -> dict[str, Any]:
r"""Quantization with BNB."""
logger.info_rank0("Using Bitsandbytes quantization.")
quantization_bit = model_args.quant_config.get("quantization_bit", None)
if quantization_bit is None:
logger.warning_rank0("quantization_bit is not specified, default to 8-bit quantization.")
quantization_bit = 4
assert quantization_bit in [8, 4], "Bitsandbytes only accepts 4-bit or 8-bit quantization."
if quantization_bit == 8:
check_version("bitsandbytes>=0.37.0", mandatory=True)
init_kwargs["quantization_config"] = BitsAndBytesConfig(load_in_8bit=True)
elif quantization_bit == 4:
check_version("bitsandbytes>=0.39.0", mandatory=True)
init_kwargs["quantization_config"] = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=model_args.quant_config.get("compute_dtype", torch.float16),
bnb_4bit_use_double_quant=model_args.quant_config.get("double_quantization", True),
bnb_4bit_quant_type=model_args.quant_config.get("quantization_type", "nf4"),
bnb_4bit_quant_storage=model_args.quant_config.get(
"compute_dtype", torch.float16
), # crucial for fsdp+qlora
)
else:
raise ValueError("Bitsandbytes only accepts 4-bit or 8-bit quantization.")
# TODO: improve deepspeed zero3 and fsdp detection.
if kwargs.get("is_trainable", False):
logger.info_rank0("Detected inference mode, setting device_map for bitsandbytes quantization.")
init_kwargs["device_map"] = {"": get_current_device()} # change auto device map for inference
else:
logger.info_rank0("Detected training mode, skip setting device_map for bitsandbytes quantization.")
if model_args.quant_config.get("quantization_bit") != 4:
raise ValueError("Only 4-bit quantized model can use fsdp+qlora or auto device map.")
check_version("bitsandbytes>=0.43.0", mandatory=True)
logger.info_rank0(f"Quantizing model to {model_args.quant_config.get('quantization_bit')} bit with bitsandbytes.")
return init_kwargs
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/plugins/model_plugins/quantization.py",
"license": "Apache License 2.0",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/v1/utils/pytest.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from contextlib import contextmanager
@contextmanager
def dist_env(local_rank: int = 0, world_size: int = 1, master_port: int = 25595):
"""Set distributed environment variables."""
env_vars = {
"MASTER_ADDR": "127.0.0.1",
"MASTER_PORT": str(master_port),
"RANK": str(local_rank),
"LOCAL_RANK": str(local_rank),
"WORLD_SIZE": str(world_size),
"LOCAL_WORLD_SIZE": str(world_size),
}
os.environ.update(env_vars)
try:
yield
finally:
for key in env_vars.keys():
os.environ.pop(key, None)
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/utils/pytest.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/train/dpo/ktrainer.py | # Copyright 2025 HuggingFace Inc. and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's TRL library.
# https://github.com/huggingface/trl/blob/v0.8.0/trl/trainer/dpo_trainer.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ktransformers.sft.lora import KTrainer # type: ignore
from typing_extensions import override
from ..trainer_utils import get_batch_logps, nested_detach
from .trainer import CustomDPOTrainer
if TYPE_CHECKING:
from transformers import PreTrainedModel
class KDPOTrainer(KTrainer, CustomDPOTrainer):
@override
def concatenated_forward(
self, model: "PreTrainedModel", batch: dict[str, "torch.Tensor"], is_ref_model: bool = False
) -> tuple["torch.Tensor", "torch.Tensor", "torch.Tensor", "torch.Tensor", "torch.Tensor"]:
r"""Compute the sum log probabilities of the labels under given logits if loss_type is not IPO, ORPO or SimPO.
Otherwise the average log probabilities.
"""
if self.finetuning_args.use_ref_model:
batch = nested_detach(batch, clone=True) # avoid error
labels = batch.pop("labels") # dpo do not need compute loss in forward
all_logits: torch.Tensor = model(**batch, return_dict=True, use_cache=False).logits.to(torch.float32)
all_logits = all_logits.to("cpu")
labels = labels.to(all_logits.device)
all_logps, valid_length = get_batch_logps(
logits=all_logits, labels=labels, ld_alpha=(self.ld_alpha if not is_ref_model else None)
)
if self.loss_type in ["ipo", "orpo", "simpo"]:
all_logps = all_logps / valid_length
batch_size = batch["input_ids"].size(0) // 2
chosen_logps, rejected_logps = all_logps.split(batch_size, dim=0)
chosen_logits, rejected_logits = all_logits.split(batch_size, dim=0)
chosen_length, _ = valid_length.split(batch_size, dim=0)
if self.loss_type in ["ipo", "orpo", "simpo"]:
return chosen_logps, rejected_logps, chosen_logits, rejected_logits, chosen_logps
else:
return chosen_logps, rejected_logps, chosen_logits, rejected_logits, chosen_logps / chosen_length
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/train/dpo/ktrainer.py",
"license": "Apache License 2.0",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/v1/utils/dtype.py | # Copyright 2025 Bytedance Ltd. and the LlamaFactory team.
#
# This code is inspired by the Bytedance's verl library.
# https://github.com/volcengine/verl/blob/v0.6.1/verl/utils/torch_dtypes.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
import torch
from transformers.utils import is_torch_bf16_available_on_device, is_torch_fp16_available_on_device
from ..accelerator.interface import DistributedInterface
class DtypeRegistry:
HALF_LIST = ["fp16", "float16", "half", torch.float16]
FLOAT_LIST = ["fp32", "float32", "float", torch.float32]
BFLOAT_LIST = ["bf16", "bfloat16", torch.bfloat16]
class DtypeInterface:
"""Type of precision used."""
_is_fp16_available = is_torch_fp16_available_on_device(DistributedInterface().current_device)
_is_bf16_available = is_torch_bf16_available_on_device(DistributedInterface().current_device)
_is_fp32_available = True
@staticmethod
def is_available(precision: str | torch.dtype) -> bool:
if precision in DtypeRegistry.HALF_LIST:
return DtypeInterface._is_fp16_available
elif precision in DtypeRegistry.FLOAT_LIST:
return DtypeInterface._is_fp32_available
elif precision in DtypeRegistry.BFLOAT_LIST:
return DtypeInterface._is_bf16_available
else:
raise RuntimeError(f"Unexpected precision: {precision}")
@staticmethod
def is_fp16(precision: str | torch.dtype) -> bool:
return precision in DtypeRegistry.HALF_LIST
@staticmethod
def is_fp32(precision: str | torch.dtype) -> bool:
return precision in DtypeRegistry.FLOAT_LIST
@staticmethod
def is_bf16(precision: str | torch.dtype) -> bool:
return precision in DtypeRegistry.BFLOAT_LIST
@staticmethod
def to_dtype(precision: str | torch.dtype) -> torch.dtype:
if precision in DtypeRegistry.HALF_LIST:
return torch.float16
elif precision in DtypeRegistry.FLOAT_LIST:
return torch.float32
elif precision in DtypeRegistry.BFLOAT_LIST:
return torch.bfloat16
else:
raise RuntimeError(f"Unexpected precision: {precision}")
@staticmethod
def to_str(precision: torch.dtype) -> str:
if precision == torch.float16:
return "float16"
elif precision == torch.float32:
return "float32"
elif precision == torch.bfloat16:
return "bfloat16"
else:
raise RuntimeError(f"Unexpected precision: {precision}")
@contextmanager
def set_dtype(self, precision: str | torch.dtype):
original_dtype = torch.get_default_dtype()
torch.set_default_dtype(self.to_dtype(precision))
try:
yield
finally:
torch.set_default_dtype(original_dtype)
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/utils/dtype.py",
"license": "Apache License 2.0",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:tests_v1/core/test_model_loader.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from llamafactory.v1.config.model_args import ModelArguments
from llamafactory.v1.core.model_engine import ModelEngine
def test_tiny_qwen():
model_args = ModelArguments(model="llamafactory/tiny-random-qwen3")
model_engine = ModelEngine(model_args)
assert "Qwen2Tokenizer" in model_engine.processor.__class__.__name__
assert "Qwen3Config" in model_engine.model_config.__class__.__name__
assert "Qwen3ForCausalLM" in model_engine.model.__class__.__name__
assert model_engine.model.dtype == torch.bfloat16
def test_tiny_qwen_with_kernel_plugin():
from llamafactory.v1.plugins.model_plugins.kernels.ops.rms_norm.npu_rms_norm import npu_rms_norm_forward
model_args = ModelArguments(
model="llamafactory/tiny-random-qwen3", kernel_config={"name": "auto", "include_kernels": "auto"}
)
model_engine = ModelEngine(model_args)
# test enable apply kernel plugin
if hasattr(torch, "npu"):
assert model_engine.model.model.layers[0].input_layernorm.forward.__code__ == npu_rms_norm_forward.__code__
else:
assert model_engine.model.model.layers[0].input_layernorm.forward.__code__ != npu_rms_norm_forward.__code__
assert "Qwen3ForCausalLM" in model_engine.model.__class__.__name__
if __name__ == "__main__":
"""
python -m tests_v1.core.test_model_loader
"""
test_tiny_qwen()
test_tiny_qwen_with_kernel_plugin()
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "tests_v1/core/test_model_loader.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
hiyouga/LlamaFactory:src/llamafactory/v1/accelerator/interface.py | # Copyright 2025 Bytedance Ltd. and the LlamaFactory team.
#
# This code is inspired by the Bytedance's VeOmni library.
# https://github.com/ByteDance-Seed/VeOmni/blob/v0.1.4/veomni/distributed/parallel_state.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A unified interface for model parallelism and data parallelism.
Supports model parallelism types:
- mp_replicate: Replicate model across multiple devices.
- mp_shard: Shard model across multiple devices.
And data parallelism types:
- dp: Data parallelism.
- cp: Context parallelism.
"""
from dataclasses import dataclass
from datetime import timedelta
from enum import StrEnum
from typing import Any, Optional
from torch.distributed import barrier, destroy_process_group, init_process_group
from torch.distributed.device_mesh import DeviceMesh, init_device_mesh
from ..utils import logging
from ..utils.types import DistributedConfig, ProcessGroup, TensorLike
from . import helper
logger = logging.get_logger(__name__)
class Dim(StrEnum):
"""Dimension names."""
MP_REPLICATE = "mp_replicate"
MP_SHARD = "mp_shard"
DP = "dp"
CP = "cp"
@dataclass
class DistributedStrategy:
"""Distributed strategy."""
mp_replicate_size: int = 1
"""Model parallel replicate size, default to 1."""
mp_shard_size: int | None = None
"""Model parallel shard size, default to world_size // mp_replicate_size."""
dp_size: int | None = None
"""Data parallel size, default to world_size // cp_size."""
cp_size: int = 1
"""Context parallel size, default to 1."""
def __post_init__(self) -> None:
if not helper.is_distributed():
self.mp_shard_size = 1
elif self.mp_shard_size is None:
self.mp_shard_size = helper.get_world_size() // self.mp_replicate_size
elif self.mp_replicate_size * self.mp_shard_size != helper.get_world_size():
raise ValueError(
f"mp_replicate_size * mp_shard_size must equal to world_size, "
f"got {self.mp_replicate_size} * {self.mp_shard_size} != {helper.get_world_size()}."
)
if not helper.is_distributed():
self.dp_size = 1
elif self.dp_size is None:
self.dp_size = helper.get_world_size() // self.cp_size
elif self.dp_size * self.cp_size != helper.get_world_size():
raise ValueError(
f"dp_size * cp_size must equal to world_size, "
f"got {self.dp_size} * {self.cp_size} != {helper.get_world_size()}."
)
@property
def model_mesh_shape(self) -> tuple[int, int]:
"""Model parallel mesh shape."""
return (self.mp_replicate_size, self.mp_shard_size)
@property
def model_mesh_dim_names(self) -> tuple[str, str]:
"""Model parallel mesh dimension names."""
return (Dim.MP_REPLICATE.value, Dim.MP_SHARD.value)
@property
def data_mesh_shape(self) -> tuple[int, int]:
"""Data parallel mesh shape."""
return (self.dp_size, self.cp_size)
@property
def data_mesh_dim_names(self) -> tuple[str, str]:
"""Data parallel mesh dimension names."""
return (Dim.DP.value, Dim.CP.value)
class DistributedInterface:
"""Distributed interface."""
_instance: Optional["DistributedInterface"] = None
_initialized: bool = False
def __new__(cls, *args: Any, **kwargs: Any) -> "DistributedInterface":
"""Singleton pattern."""
if cls._instance is None:
cls._instance = super().__new__(cls)
return cls._instance
def __init__(self, config: DistributedConfig | None = None) -> None:
if self._initialized:
return
helper.set_device_index()
self._is_distributed = helper.is_distributed()
self._rank = helper.get_rank()
self._world_size = helper.get_world_size()
self._local_rank = helper.get_local_rank()
self._local_world_size = helper.get_local_world_size()
self.current_device = helper.get_current_device()
self.device_count = helper.get_device_count()
if config is None:
self.strategy = DistributedStrategy()
timeout = 18000
else:
self.strategy = DistributedStrategy(
mp_replicate_size=config.get("mp_replicate_size", 1),
mp_shard_size=config.get("mp_shard_size", None),
dp_size=config.get("dp_size", None),
cp_size=config.get("cp_size", 1),
)
timeout = config.get("timeout", 18000)
if self._is_distributed:
init_process_group(timeout=timedelta(seconds=timeout), backend=helper.get_process_group_backend())
self.model_device_mesh = init_device_mesh(
device_type=self.current_device.type,
mesh_shape=self.strategy.model_mesh_shape,
mesh_dim_names=self.strategy.model_mesh_dim_names,
)
self.data_device_mesh = init_device_mesh(
device_type=self.current_device.type,
mesh_shape=self.strategy.data_mesh_shape,
mesh_dim_names=self.strategy.data_mesh_dim_names,
)
else:
self.model_device_mesh = None
self.data_device_mesh = None
self._initialized = True
logger.info_rank0(f"DistributedInterface initialized: {self}.")
def __str__(self) -> str:
return (
f"DistributedInterface(strategy={self.strategy}), is_distributed={self._is_distributed}, "
f"current_device={self.current_device}, rank={self._rank}, world_size={self._world_size}, "
f"model_device_mesh={self.model_device_mesh}, data_device_mesh={self.data_device_mesh}"
)
def get_device_mesh(self, dim: Dim | None = None) -> DeviceMesh | None:
"""Get device mesh for specified dimension."""
if dim is None:
raise ValueError("dim must be specified.")
elif not self._is_distributed:
return None
elif dim in self.strategy.data_mesh_dim_names:
return self.data_device_mesh[dim.value]
else:
return self.model_device_mesh[dim.value]
def get_group(self, dim: Dim | None = None) -> Optional[ProcessGroup]:
"""Get process group for specified dimension."""
if not self._is_distributed or dim is None:
return None
else:
return self.get_device_mesh(dim).get_group()
def get_rank(self, dim: Dim | None = None) -> int:
"""Get parallel rank for specified dimension."""
if not self._is_distributed:
return 0
elif dim is None:
return self._rank
else:
return self.get_device_mesh(dim).get_local_rank()
def get_world_size(self, dim: Dim | None = None) -> int:
"""Get parallel size for specified dimension."""
if not self._is_distributed:
return 1
elif dim is None:
return self._world_size
else:
return self.get_device_mesh(dim).size()
def get_local_rank(self) -> int:
"""Get parallel local rank."""
return self._local_rank
def get_local_world_size(self) -> int:
"""Get parallel local world size."""
return self._local_world_size
def all_gather(self, data: TensorLike, dim: Dim | None = Dim.DP) -> TensorLike:
"""Gather tensor across specified parallel group."""
if self._is_distributed:
return helper.operate_tensorlike(helper.all_gather, data, group=self.get_group(dim))
else:
return data
def all_reduce(
self, data: TensorLike, op: helper.ReduceOp = helper.ReduceOp.MEAN, dim: Dim | None = Dim.DP
) -> TensorLike:
"""Reduce tensor across specified parallel group."""
if self._is_distributed:
return helper.operate_tensorlike(helper.all_reduce, data, op=op, group=self.get_group(dim))
else:
return data
def broadcast(self, data: TensorLike, src: int = 0, dim: Dim | None = Dim.DP) -> TensorLike:
"""Broadcast tensor across specified parallel group."""
if self._is_distributed:
return helper.operate_tensorlike(helper.broadcast, data, src=src, group=self.get_group(dim))
else:
return data
def sync(self) -> None:
"""Synchronize all processes."""
if self._is_distributed:
helper.synchronize()
def barrier(self) -> None:
"""Barrier all processes."""
if self._is_distributed:
barrier()
def destroy(self) -> None:
"""Destroy all processes."""
if self._is_distributed:
destroy_process_group()
if __name__ == "__main__":
"""
python -m llamafactory.v1.accelerator.interface
"""
print(DistributedInterface())
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/accelerator/interface.py",
"license": "Apache License 2.0",
"lines": 215,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/v1/config/arg_utils.py | # Copyright 2025 HuggingFace Inc. and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's transformers library.
# https://github.com/huggingface/transformers/blob/v5.0.0rc0/src/transformers/training_args.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from enum import StrEnum, unique
class PluginConfig(dict):
"""Dictionary that allows attribute access."""
@property
def name(self) -> str:
"""Plugin name."""
if "name" not in self:
raise ValueError("Plugin configuration must have a 'name' field.")
return self["name"]
PluginArgument = PluginConfig | dict | str | None
@unique
class ModelClass(StrEnum):
"""Auto class for model config."""
LLM = "llm"
CLS = "cls"
OTHER = "other"
@unique
class SampleBackend(StrEnum):
HF = "hf"
VLLM = "vllm"
@unique
class BatchingStrategy(StrEnum):
NORMAL = "normal"
PADDING_FREE = "padding_free"
DYNAMIC_BATCHING = "dynamic_batching"
DYNAMIC_PADDING_FREE = "dynamic_padding_free"
def _convert_str_dict(data: dict) -> dict:
"""Parse string representation inside the dictionary.
Args:
data: The string or dictionary to convert.
Returns:
The converted dictionary.
"""
for key, value in data.items():
if isinstance(value, dict):
data[key] = _convert_str_dict(value)
elif isinstance(value, str):
if value.lower() in ("true", "false"):
data[key] = value.lower() == "true"
elif value.isdigit():
data[key] = int(value)
elif value.replace(".", "", 1).isdigit():
data[key] = float(value)
return data
def get_plugin_config(config: PluginArgument) -> PluginConfig | None:
"""Get the plugin configuration from the argument value.
Args:
config: The argument value to get the plugin configuration from.
Returns:
The plugin configuration.
"""
if config is None:
return None
if isinstance(config, str) and config.startswith("{"):
config = json.loads(config)
config = _convert_str_dict(config)
if "name" not in config:
raise ValueError("Plugin configuration must have a 'name' field.")
return PluginConfig(config)
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/config/arg_utils.py",
"license": "Apache License 2.0",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/v1/utils/constants.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
IGNORE_INDEX = -100
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/utils/constants.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/v1/utils/logging.py | # Copyright 2025 Optuna, HuggingFace Inc. and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's transformers library.
# https://github.com/huggingface/transformers/blob/v5.0.0rc0/src/transformers/utils/logging.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import threading
from functools import lru_cache
from typing import Optional
_thread_lock = threading.RLock()
_default_handler: Optional["logging.Handler"] = None
_default_log_level: "logging._Level" = logging.INFO
class _Logger(logging.Logger):
"""A logger that supports rank0 logging."""
def info_rank0(self, *args, **kwargs) -> None:
self.info(*args, **kwargs)
def warning_rank0(self, *args, **kwargs) -> None:
self.warning(*args, **kwargs)
def warning_rank0_once(self, *args, **kwargs) -> None:
self.warning(*args, **kwargs)
def _get_default_logging_level() -> "logging._Level":
"""Return the default logging level."""
env_level_str = os.getenv("LLAMAFACTORY_VERBOSITY", None)
if env_level_str:
if env_level_str.upper() in logging._nameToLevel:
return logging._nameToLevel[env_level_str.upper()]
else:
raise ValueError(f"Unknown logging level: {env_level_str}.")
return _default_log_level
def _get_library_name() -> str:
return ".".join(__name__.split(".")[:2]) # llamafactory.v1
def _get_library_root_logger() -> "_Logger":
return logging.getLogger(_get_library_name())
def _configure_library_root_logger() -> None:
"""Configure root logger using a stdout stream handler with an explicit format."""
global _default_handler
with _thread_lock:
if _default_handler: # already configured
return
formatter = logging.Formatter(
fmt="[%(levelname)s|%(asctime)s] %(name)s:%(lineno)s >> %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
_default_handler = logging.StreamHandler(sys.stdout)
_default_handler.setFormatter(formatter)
library_root_logger = _get_library_root_logger()
library_root_logger.addHandler(_default_handler)
library_root_logger.setLevel(_get_default_logging_level())
library_root_logger.propagate = False
def get_logger(name: str | None = None) -> "_Logger":
"""Return a logger with the specified name. It it not supposed to be accessed externally."""
if name is None:
name = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(name)
def add_handler(handler: "logging.Handler") -> None:
"""Add a handler to the root logger."""
_configure_library_root_logger()
_get_library_root_logger().addHandler(handler)
def remove_handler(handler: logging.Handler) -> None:
"""Remove a handler to the root logger."""
_configure_library_root_logger()
_get_library_root_logger().removeHandler(handler)
def info_rank0(self: "logging.Logger", *args, **kwargs) -> None:
if int(os.getenv("LOCAL_RANK", "0")) == 0:
self.info(*args, **kwargs)
def warning_rank0(self: "logging.Logger", *args, **kwargs) -> None:
if int(os.getenv("LOCAL_RANK", "0")) == 0:
self.warning(*args, **kwargs)
@lru_cache(None)
def warning_rank0_once(self: "logging.Logger", *args, **kwargs) -> None:
if int(os.getenv("LOCAL_RANK", "0")) == 0:
self.warning(*args, **kwargs)
logging.Logger.info_rank0 = info_rank0
logging.Logger.warning_rank0 = warning_rank0
logging.Logger.warning_rank0_once = warning_rank0_once
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/utils/logging.py",
"license": "Apache License 2.0",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/v1/utils/plugin.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from collections.abc import Callable
from typing import Any
from . import logging
logger = logging.get_logger(__name__)
class BasePlugin:
"""Base class for plugins.
A plugin is a callable object that can be registered and called by name.
Example usage:
```python
class PrintPlugin(BasePlugin):
def again(self): # optional
self["again"]()
@PrintPlugin("hello").register()
def print_hello():
print("Hello world!")
@PrintPlugin("hello").register("again")
def print_hello_again():
print("Hello world! Again.")
PrintPlugin("hello")()
PrintPlugin("hello").again()
```
"""
_registry: dict[str, dict[str, Callable]] = defaultdict(dict)
def __init__(self, name: str | None = None) -> None:
"""Initialize the plugin with a name."""
self.name = name
def register(self, method_name: str = "__call__") -> Callable:
"""Decorator to register a function as a plugin."""
if self.name is None:
raise ValueError("Plugin name should be specified.")
if method_name in self._registry[self.name]:
logger.warning_rank0_once(f"Method {method_name} of plugin {self.name} is already registered.")
def decorator(func: Callable) -> Callable:
self._registry[self.name][method_name] = func
return func
return decorator
def __call__(self, *args, **kwargs) -> Any:
"""Call the registered function with the given arguments."""
return self["__call__"](*args, **kwargs)
def __getattr__(self, method_name: str) -> Callable:
"""Get the registered function with the given name."""
return self[method_name]
def __getitem__(self, method_name: str) -> Callable:
"""Get the registered function with the given name."""
if method_name not in self._registry[self.name]:
raise ValueError(f"Method {method_name} of plugin {self.name} is not registered.")
return self._registry[self.name][method_name]
if __name__ == "__main__":
"""
python -m llamafactory.v1.utils.plugin
"""
class PrintPlugin(BasePlugin):
def again(self): # optional
self["again"]()
@PrintPlugin("hello").register()
def print_hello():
print("Hello world!")
@PrintPlugin("hello").register("again")
def print_hello_again():
print("Hello world! Again.")
PrintPlugin("hello")()
PrintPlugin("hello").again()
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/utils/plugin.py",
"license": "Apache License 2.0",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/chat/kt_engine.py | # Copyright 2025 the KVCache.AI team, Approaching AI, and the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import os
import platform
from collections.abc import AsyncGenerator
from threading import Thread
from typing import TYPE_CHECKING, Any, Optional
import torch
from typing_extensions import override
from ..data import get_template_and_fix_tokenizer
from ..extras import logging
from ..extras.constants import EngineName
from ..model import load_model, load_tokenizer
from .base_engine import BaseEngine, Response
if TYPE_CHECKING:
from transformers import PreTrainedTokenizer
from trl import PreTrainedModelWrapper
from ..data.mm_plugin import AudioInput, ImageInput, VideoInput
from ..hparams import DataArguments, FinetuningArguments, GeneratingArguments, ModelArguments
from ktransformers.operators.flashinfer_wrapper import flashinfer_enabled
from ktransformers.server.config.config import Config
from ktransformers.util.utils import (
get_compute_capability,
prefill_and_generate_capture,
)
from ktransformers.util.vendors import GPUVendor, device_manager
logger = logging.get_logger(__name__)
class KTransformersEngine(BaseEngine):
def __init__(
self,
model_args: "ModelArguments",
data_args: "DataArguments",
finetuning_args: "FinetuningArguments",
generating_args: "GeneratingArguments",
) -> None:
self.name = EngineName.KT
self.can_generate = finetuning_args.stage == "sft"
tok_mod = load_tokenizer(model_args)
self.tokenizer = tok_mod["tokenizer"]
self.tokenizer.padding_side = "left" if self.can_generate else "right"
self.template = get_template_and_fix_tokenizer(self.tokenizer, data_args)
self.model = load_model(
self.tokenizer, model_args, finetuning_args, is_trainable=False, add_valuehead=(not self.can_generate)
)
self.generating_args = generating_args.to_dict()
self.max_new_tokens = model_args.kt_maxlen
self.use_cuda_graph = model_args.kt_use_cuda_graph
self.mode = model_args.kt_mode
self.force_think = model_args.kt_force_think
self.chunk_size = model_args.chunk_size
try:
asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.semaphore = asyncio.Semaphore(int(os.getenv("MAX_CONCURRENT", "1")))
@staticmethod
@torch.inference_mode()
def _get_scores(
model: "PreTrainedModelWrapper",
tokenizer: "PreTrainedTokenizer",
batch_input: list[str],
input_kwargs: Optional[dict[str, Any]] = {},
) -> list[float]:
max_length: Optional[int] = input_kwargs.pop("max_length", None)
device = getattr(model.pretrained_model, "device", "cuda")
inputs = tokenizer(
batch_input,
padding=True,
truncation=True,
max_length=max_length or getattr(model.config, "max_position_embeddings", 1024),
return_tensors="pt",
add_special_tokens=False,
).to(device)
values: torch.Tensor = model(**inputs, return_dict=True, use_cache=False)[-1]
scores = values.gather(dim=-1, index=(inputs["attention_mask"].sum(dim=-1, keepdim=True) - 1))
return scores
async def _generate(
self,
messages: list[dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
**input_kwargs,
) -> AsyncGenerator[str, None]:
paired = messages + [{"role": "assistant", "content": ""}]
prompt_ids, _ = self.template.encode_oneturn(self.tokenizer, paired, system, tools)
prompt_len = len(prompt_ids)
max_length: Optional[int] = input_kwargs.pop("max_length", None)
max_new_tokens: Optional[int] = input_kwargs.pop("max_new_tokens", None)
if "max_new_tokens" in self.generating_args:
max_tokens = int(self.generating_args["max_new_tokens"])
elif "max_length" in self.generating_args:
gl = int(self.generating_args["max_length"])
max_tokens = gl - prompt_len if gl > prompt_len else 1
else:
max_tokens = self.max_new_tokens or 256
if max_length is not None:
max_tokens = max(max_length - prompt_len, 1)
if max_new_tokens is not None:
max_tokens = int(max_new_tokens)
max_tokens = max(1, int(max_tokens))
if self.mode == "long_context":
max_len_cfg = Config().long_context_config["max_seq_len"]
need = prompt_len + max_tokens
assert max_len_cfg > need, f"please set max_seq_len > {need} in ~/.ktransformers/config.yaml"
device = next(self.model.parameters()).device
input_tensor = torch.tensor([prompt_ids], dtype=torch.long, device=device)
if self.force_think:
think = torch.tensor(
[self.tokenizer.encode("<think>\n", add_special_tokens=False)], dtype=torch.long, device=device
)
input_tensor = torch.cat([input_tensor, think], dim=1)
use_flashinfer = (
platform.system() != "Windows"
and getattr(self.model.config, "architectures", [""])[0]
in {"DeepseekV2ForCausalLM", "DeepseekV3ForCausalLM"}
and flashinfer_enabled
and get_compute_capability() >= 8
and device_manager.gpu_vendor == GPUVendor.NVIDIA
)
def make_gen():
if use_flashinfer:
return prefill_and_generate_capture(
self.model,
self.tokenizer,
input_tensor,
max_tokens,
self.use_cuda_graph,
mode=self.mode,
force_think=self.force_think,
chunk_size=self.chunk_size,
use_flashinfer_mla=True,
num_heads=self.model.config.num_attention_heads,
head_dim_ckv=getattr(self.model.config, "kv_lora_rank", 0),
head_dim_kpe=getattr(self.model.config, "qk_rope_head_dim", 0),
q_head_dim=getattr(self.model.config, "qk_rope_head_dim", 0)
+ getattr(self.model.config, "qk_nope_head_dim", 0),
echo_stream=False,
)
else:
return prefill_and_generate_capture(
self.model,
self.tokenizer,
input_tensor,
max_tokens,
self.use_cuda_graph,
mode=self.mode,
force_think=self.force_think,
chunk_size=self.chunk_size,
echo_stream=False,
)
loop = asyncio.get_running_loop()
q: asyncio.Queue[Optional[str]] = asyncio.Queue()
def producer():
try:
gen = make_gen()
if hasattr(gen, "__aiter__"):
async def drain_async():
async for t in gen:
loop.call_soon_threadsafe(q.put_nowait, t if isinstance(t, str) else str(t))
asyncio.run(drain_async())
elif hasattr(gen, "__iter__"):
for t in gen:
loop.call_soon_threadsafe(q.put_nowait, t if isinstance(t, str) else str(t))
else:
loop.call_soon_threadsafe(q.put_nowait, gen if isinstance(gen, str) else str(gen))
finally:
loop.call_soon_threadsafe(q.put_nowait, None)
Thread(target=producer, daemon=True).start()
while True:
item = await q.get()
if item is None:
break
yield item
@override
async def chat(
self,
messages: list[dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
images: Optional[list["ImageInput"]] = None,
videos: Optional[list["VideoInput"]] = None,
audios: Optional[list["AudioInput"]] = None,
**input_kwargs,
) -> list["Response"]:
if not self.can_generate:
raise ValueError("The current model does not support `chat`.")
async with self.semaphore:
produced = ""
final_text = ""
async for t in self._generate(messages, system, tools, **input_kwargs):
delta = t
produced = produced + delta
if delta:
final_text += delta
prompt_ids, _ = self.template.encode_oneturn(
self.tokenizer, messages + [{"role": "assistant", "content": ""}], system, tools
)
return [
Response(
response_text=final_text,
response_length=len(self.tokenizer.encode(final_text, add_special_tokens=False)),
prompt_length=len(prompt_ids),
finish_reason="stop",
)
]
@override
async def stream_chat(
self,
messages: list[dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
images: Optional[list["ImageInput"]] = None,
videos: Optional[list["VideoInput"]] = None,
audios: Optional[list["AudioInput"]] = None,
**input_kwargs,
) -> AsyncGenerator[str, None]:
if not self.can_generate:
raise ValueError("The current model does not support `stream_chat`.")
async with self.semaphore:
produced = ""
async for t in self._generate(messages, system, tools, **input_kwargs):
delta = t[len(produced) :] if t.startswith(produced) else t
produced = t
if delta:
yield delta
@override
async def get_scores(
self,
batch_input: list[str],
**input_kwargs,
) -> list[float]:
if self.can_generate:
raise ValueError("Cannot get scores using an auto-regressive model.")
args = (self.model, self.tokenizer, batch_input, input_kwargs)
async with self.semaphore:
return await asyncio.to_thread(self._get_scores, *args)
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/chat/kt_engine.py",
"license": "Apache License 2.0",
"lines": 249,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/model/model_utils/ktransformers.py | # Copyright 2025 the KVCache.AI team, Approaching AI, and the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib.util as _u
from typing import TYPE_CHECKING, Any
import torch
from ...extras import logging
from ...extras.misc import get_current_device
if TYPE_CHECKING:
from ...hparams import FinetuningArguments, ModelArguments
from transformers import AutoConfig, AutoModelForCausalLM, PretrainedConfig, PreTrainedModel
KT_AVAILABLE = _u.find_spec("ktransformers") is not None
if KT_AVAILABLE:
from ktransformers.models.modeling_deepseek import DeepseekV2ForCausalLM
from ktransformers.models.modeling_deepseek_v3 import DeepseekV3ForCausalLM
from ktransformers.models.modeling_llama import LlamaForCausalLM
from ktransformers.models.modeling_mixtral import MixtralForCausalLM
from ktransformers.models.modeling_qwen2_moe import Qwen2MoeForCausalLM
from ktransformers.models.modeling_qwen3_moe import Qwen3MoeForCausalLM
from ktransformers.optimize.optimize import optimize_and_load_gguf
from ktransformers.server.config.config import Config
from ktransformers.sft.lora import inject_lora_layer
from ktransformers.util.custom_loader import GGUFLoader, SafeTensorLoader
from ktransformers.util.globals import GLOBAL_CONFIG
from ktransformers.util.utils import load_weights
logger = logging.get_logger(__name__)
def _get_kt_kwargs(
config: "PretrainedConfig",
model_name_or_path: str,
model_args: "ModelArguments",
finetuning_args: "FinetuningArguments",
) -> dict[str, Any]:
return {
"model_name": model_name_or_path,
"max_seq_length": model_args.model_max_length or 4096,
"dtype": model_args.compute_dtype,
"load_in_4bit": model_args.quantization_bit == 4,
"token": model_args.hf_hub_token,
"full_finetuning": finetuning_args.finetuning_type == "full",
"device_map": {"": get_current_device()},
"rope_scaling": getattr(config, "rope_scaling", None),
"fix_tokenizer": False,
"trust_remote_code": model_args.trust_remote_code,
"use_gradient_checkpointing": "ktransformers",
}
def load_kt_pretrained_model(config: "PretrainedConfig", model_args: "ModelArguments") -> "PreTrainedModel":
r"""Optionally load pretrained model with KTransformers. Used in training."""
custom_models = {
"DeepseekV2ForCausalLM": DeepseekV2ForCausalLM,
"DeepseekV3ForCausalLM": DeepseekV3ForCausalLM,
"Qwen2MoeForCausalLM": Qwen2MoeForCausalLM,
"Qwen3MoeForCausalLM": Qwen3MoeForCausalLM,
"LlamaForCausalLM": LlamaForCausalLM,
"MixtralForCausalLM": MixtralForCausalLM,
}
Config().cpu_infer = model_args.cpu_infer
Config().chunk_size = model_args.chunk_size
config = AutoConfig.from_pretrained(model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code)
if model_args.mode == "long_context":
assert config.architectures[0] == "LlamaForCausalLM", "only LlamaForCausalLM support long_context mode"
torch.set_default_dtype(torch.float16)
else:
torch.set_default_dtype(config.torch_dtype)
with torch.device("meta"):
if config.architectures[0] in custom_models:
print("using custom modeling_xxx.py.")
if "Qwen2Moe" in config.architectures[0]: # Qwen2Moe must use flash_attention_2 to avoid overflow.
config._attn_implementation = "flash_attention_2"
if "Llama" in config.architectures[0]:
config._attn_implementation = "eager"
if "Mixtral" in config.architectures[0]:
config._attn_implementation = "flash_attention_2"
model = custom_models[config.architectures[0]](config)
else:
attn_implementation = "flash_attention_2"
model = AutoModelForCausalLM.from_config(
config, trust_remote_code=True, attn_implementation=attn_implementation
)
optimize_config_path = model_args.kt_optimize_rule
gguf_path = model_args.model_name_or_path
assert optimize_config_path is not None, "optimize_config_path must be provided (path to YAML rules file)."
assert gguf_path is not None, "gguf_path must be provided (path to a folder or .gguf file)."
GLOBAL_CONFIG._config["mod"] = "infer"
optimize_and_load_gguf(model, optimize_config_path, gguf_path, config)
return model
def get_kt_peft_model(model: "PreTrainedModel", peft_kwargs: dict[str, Any]) -> "PreTrainedModel":
r"""Get the peft model for the pretrained model with KTransformers. Used in training."""
from ktransformers.sft.peft_utils.mapping import get_peft_model
return get_peft_model(model, peft_kwargs)
def load_kt_peft_model(model_args: "ModelArguments", model: "PreTrainedModel") -> "PreTrainedModel":
r"""Load peft model with KTransformers. Used in both training and inference."""
load_adapter_name_or_path = model_args.adapter_name_or_path[0]
if load_adapter_name_or_path.endswith(".gguf"):
inject_lora_layer(model, load_adapter_name_or_path)
adapter_gguf_loader = GGUFLoader(load_adapter_name_or_path)
load_weights(model, adapter_gguf_loader, adapter_gguf=True)
model.train()
else:
inject_lora_layer(model, load_adapter_name_or_path)
adapter_loader = SafeTensorLoader(load_adapter_name_or_path)
device = next(model.parameters()).device
for key in adapter_loader.tensor_file_map.keys():
try:
tensor = adapter_loader.load_tensor(key, device=device)
model_key = key.replace("base_model.model.", "")
model_key = model_key.replace(".weight", ".default.weight")
model_key = model_key.replace(".default.default.weight", ".default.weight")
param = model.get_parameter(model_key)
param.data.copy_(tensor.data)
print(f"Loaded adapter weight: {key} -> {model_key}")
except AttributeError:
print(f"Skipping {key}: not a model parameter")
except KeyError:
print(f"Key not found in model: {model_key} (original: {key})")
return model
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/model/model_utils/ktransformers.py",
"license": "Apache License 2.0",
"lines": 125,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:scripts/megatron_merge.py | # Copyright 2025 the ROLL team and the LlamaFactory team.
#
# This code is modified from the ROLL library.
# https://github.com/alibaba/ROLL/blob/main/mcore_adapter/tools/convert.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import fire
import torch
from mcore_adapter.models.converter.post_converter import convert_checkpoint_to_hf, convert_checkpoint_to_mca
from mcore_adapter.training_args import DistributingParallelArguments
from mcore_adapter.utils import get_logger
from transformers import AutoConfig
logger = get_logger(__name__)
def convert_mca_to_hf(
checkpoint_path: str,
output_path: str = "./output",
bf16: bool = False,
fp16: bool = False,
convert_model_max_length: int | None = None,
):
"""Convert megatron checkpoint to HuggingFace format.
Args:
checkpoint_path: Path to the checkpoint to convert
output_path: Path to save the converted checkpoint
bf16: Use bfloat16 precision
fp16: Use float16 precision
convert_model_max_length: Change the model_max_length in hf config.json
"""
if bf16 and fp16:
raise ValueError("bf16 and fp16 cannot be both True.")
torch_dtype = None
if bf16:
torch_dtype = torch.bfloat16
elif fp16:
torch_dtype = torch.float16
convert_checkpoint_to_hf(checkpoint_path, output_path, torch_dtype=torch_dtype)
if convert_model_max_length is not None:
config = AutoConfig.from_pretrained(output_path, trust_remote_code=True)
config.model_max_length = convert_model_max_length
config.save_pretrained(output_path)
def convert(
checkpoint_path: str,
output_path: str = "./output",
bf16: bool = False,
fp16: bool = False,
convert_model_max_length: int | None = None,
tensor_model_parallel_size: int = 1,
pipeline_model_parallel_size: int = 1,
expert_model_parallel_size: int = 1,
virtual_pipeline_model_parallel_size: int | None = None,
):
"""Convert checkpoint between MCA and HuggingFace formats.
Args:
checkpoint_path: Path to the checkpoint to convert
output_path: Path to save the converted checkpoint
bf16: Use bfloat16 precision
fp16: Use float16 precision
convert_model_max_length: Change the model_max_length in hf config.json
tensor_model_parallel_size: Tensor model parallel size
pipeline_model_parallel_size: Pipeline model parallel size
expert_model_parallel_size: Expert model parallel size
virtual_pipeline_model_parallel_size: Virtual pipeline model parallel size
"""
if bf16 and fp16:
raise ValueError("bf16 and fp16 cannot be both True.")
mca_config_path = os.path.join(checkpoint_path, "mca_config.json")
from_mca = os.path.exists(mca_config_path)
if not from_mca:
dist_args = DistributingParallelArguments(
tensor_model_parallel_size=tensor_model_parallel_size,
pipeline_model_parallel_size=pipeline_model_parallel_size,
expert_model_parallel_size=expert_model_parallel_size,
virtual_pipeline_model_parallel_size=virtual_pipeline_model_parallel_size,
)
convert_checkpoint_to_mca(
checkpoint_path,
output_path,
dist_args,
bf16=bf16,
fp16=fp16,
)
else:
convert_mca_to_hf(
checkpoint_path=checkpoint_path,
output_path=output_path,
bf16=bf16,
fp16=fp16,
convert_model_max_length=convert_model_max_length,
)
def main():
fire.Fire(convert)
if __name__ == "__main__":
main()
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "scripts/megatron_merge.py",
"license": "Apache License 2.0",
"lines": 104,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/train/mca/trainer.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO override the original trainer
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/train/mca/trainer.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/train/mca/workflow.py | # Copyright 2025 the ROLL team and the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from collections.abc import Sequence
from copy import deepcopy
from typing import TYPE_CHECKING, Any, Optional
from transformers import DataCollatorForSeq2Seq
from ...data import (
SFTDataCollatorWith4DAttentionMask,
get_dataset,
get_template_and_fix_tokenizer,
)
from ...data.collator import (
PairwiseDataCollatorWithPadding,
)
from ...extras.constants import IGNORE_INDEX, MCA_SUPPORTED_MODELS
from ...extras.logging import get_logger
from ...extras.misc import calculate_tps
from ...extras.packages import is_mcore_adapter_available
from ...extras.ploting import plot_loss
from ...model import load_tokenizer
from ..callbacks import SaveProcessorCallback
if not is_mcore_adapter_available():
raise ImportError("mcore_adapter is not installed. Please install it with `pip install mcore-adapter`.")
from mcore_adapter.models import AutoConfig, AutoModel
from mcore_adapter.trainer import DPOTrainer as McaDPOTrainer
from mcore_adapter.trainer import McaTrainer
from mcore_adapter.trainer.dpo_config import DPOConfig
if TYPE_CHECKING:
from mcore_adapter.training_args import Seq2SeqTrainingArguments as McaSeq2SeqTrainingArguments
from transformers import TrainerCallback
from ...hparams import DataArguments, FinetuningArguments, ModelArguments
logger = get_logger(__name__)
def _data_collator_wrapper(data_collator: Any):
@functools.wraps(data_collator)
def wrapper(features: Sequence[dict[str, Any]]):
labels_key = [k for k in features[0].keys() if k.endswith("labels")]
input_ids_key = [k for k in features[0].keys() if k.endswith("input_ids")]
for feature in features:
if len(labels_key) == 0: # pt
feature["labels"] = deepcopy(feature["input_ids"])[1:]
for k in labels_key:
feature[k] = feature[k][1:]
for k in input_ids_key:
feature[k] = feature[k][:-1]
for k in ["attention_mask", "position_ids"]:
if k in feature:
feature[k] = feature[k][:-1]
return data_collator(features)
return wrapper
def _check_model_support(model_args: "ModelArguments"):
from transformers import AutoConfig as HfAutoConfig
config = HfAutoConfig.from_pretrained(
model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code
)
if config.model_type not in MCA_SUPPORTED_MODELS:
raise ValueError(
f"Model {config.model_type} is not supported by mcore_adapter."
"You can try to upgrade mcore_adapter to the latest version for more supported models."
)
def _freeze_model_parameters(model: Any, finetuning_args: "FinetuningArguments"):
"""Freeze model parameters for qwen_vl series models based on finetuning arguments."""
if getattr(model.config, "hf_model_type", None) not in ["qwen2_vl", "qwen2_5_vl", "qwen3_vl", "qwen3_vl_moe"]:
return
params_to_freeze = []
if finetuning_args.freeze_vision_tower:
params_to_freeze.extend(["vision_model.blocks", "vision_model.patch_embed"])
if getattr(model.config, "hf_model_type", None) in ["qwen3_vl", "qwen3_vl_moe"]:
params_to_freeze.extend(["vision_model.pos_embed"])
if finetuning_args.freeze_multi_modal_projector:
params_to_freeze.extend(["multi_modal_projector"])
if finetuning_args.freeze_language_model:
params_to_freeze.extend(["embedding", "decoder", "output_layer"])
if params_to_freeze:
for name, p in model.named_parameters():
if any(name.startswith(k) for k in params_to_freeze):
p.requires_grad_(False)
def run_pt(
model_args: "ModelArguments",
data_args: "DataArguments",
training_args: "McaSeq2SeqTrainingArguments",
finetuning_args: "FinetuningArguments",
callbacks: Optional[list["TrainerCallback"]] = None,
):
tokenizer_module = load_tokenizer(model_args)
tokenizer = tokenizer_module["tokenizer"]
template = get_template_and_fix_tokenizer(tokenizer, data_args)
# dataset needs +1 then cut back due to MCA shift logic
data_args.cutoff_len += 1
dataset_module = get_dataset(template, model_args, data_args, training_args, stage="pt", **tokenizer_module)
data_args.cutoff_len -= 1
_check_model_support(model_args)
model = AutoModel.from_pretrained(model_args.model_name_or_path, training_args)
data_collator = DataCollatorForSeq2Seq(
tokenizer=tokenizer,
pad_to_multiple_of=8,
label_pad_token_id=IGNORE_INDEX,
)
data_collator = _data_collator_wrapper(data_collator)
trainer = McaTrainer(
model=model,
args=training_args,
tokenizer=tokenizer,
data_collator=data_collator,
callbacks=callbacks,
**dataset_module,
)
if "processor" in tokenizer_module and tokenizer_module["processor"] is not None:
trainer.add_callback(SaveProcessorCallback(tokenizer_module["processor"]))
if training_args.do_train:
train_result = trainer.train(training_args.resume_from_checkpoint)
trainer.save_model()
trainer.log_metrics("train", train_result.metrics)
trainer.save_metrics("train", train_result.metrics)
trainer.save_state()
if trainer.is_world_process_zero() and finetuning_args.plot_loss:
keys = ["loss"]
if isinstance(dataset_module.get("eval_dataset"), dict):
keys += [f"eval_{key}_loss" for key in dataset_module["eval_dataset"].keys()]
else:
keys += ["eval_loss"]
plot_loss(training_args.output_dir, keys=keys)
def run_sft(
model_args: "ModelArguments",
data_args: "DataArguments",
training_args: "McaSeq2SeqTrainingArguments",
finetuning_args: "FinetuningArguments",
callbacks: Optional[list["TrainerCallback"]] = None,
):
# align packing flags
# TODO: FIX SequencePacking
data_args.neat_packing = training_args.sequence_packing = data_args.neat_packing or training_args.sequence_packing
data_args.packing = data_args.neat_packing or data_args.packing
tokenizer_module = load_tokenizer(model_args)
tokenizer = tokenizer_module["tokenizer"]
template = get_template_and_fix_tokenizer(tokenizer, data_args)
# dataset needs +1 then cut back due to MCA shift logic
data_args.cutoff_len += 1
dataset_module = get_dataset(template, model_args, data_args, training_args, stage="sft", **tokenizer_module)
data_args.cutoff_len -= 1
_check_model_support(model_args)
model = AutoModel.from_pretrained(model_args.model_name_or_path, training_args)
# optional freezing for qwen_vl series
_freeze_model_parameters(model, finetuning_args)
pad_to_max = training_args.expert_model_parallel_size is not None and training_args.expert_model_parallel_size > 1
data_collator = SFTDataCollatorWith4DAttentionMask(
template=template,
padding="max_length" if pad_to_max else "longest",
max_length=data_args.cutoff_len if pad_to_max else None,
pad_to_multiple_of=64,
label_pad_token_id=IGNORE_INDEX,
**tokenizer_module,
)
data_collator = _data_collator_wrapper(data_collator)
trainer = McaTrainer(
model=model,
args=training_args,
tokenizer=tokenizer,
data_collator=data_collator,
callbacks=callbacks,
**dataset_module,
)
if "processor" in tokenizer_module and tokenizer_module["processor"] is not None:
trainer.add_callback(SaveProcessorCallback(tokenizer_module["processor"]))
train_result = trainer.train(training_args.resume_from_checkpoint)
trainer.save_model()
trainer.log_metrics("train", train_result.metrics)
trainer.save_metrics("train", train_result.metrics)
trainer.save_state()
if trainer.is_world_process_zero() and finetuning_args.plot_loss:
keys = ["loss"]
if isinstance(dataset_module.get("eval_dataset"), dict):
keys += [f"eval_{key}_loss" for key in dataset_module["eval_dataset"].keys()]
else:
keys += ["eval_loss"]
plot_loss(training_args.output_dir, keys=keys)
def run_dpo(
model_args: "ModelArguments",
data_args: "DataArguments",
training_args: "McaSeq2SeqTrainingArguments",
finetuning_args: "FinetuningArguments",
callbacks: Optional[list["TrainerCallback"]] = None,
):
tokenizer_module = load_tokenizer(model_args)
tokenizer = tokenizer_module["tokenizer"]
template = get_template_and_fix_tokenizer(tokenizer, data_args)
_check_model_support(model_args)
model = AutoModel.from_pretrained(model_args.model_name_or_path, training_args)
_freeze_model_parameters(model, finetuning_args)
if finetuning_args.use_ref_model:
ref_config = AutoConfig.from_pretrained(model_args.model_name_or_path, training_args)
ref_model = AutoModel.from_config(ref_config)
ref_model.load_state_dict(model.state_dict())
else:
ref_model = None
# dataset needs +1 then cut back due to MCA shift logic
data_args.cutoff_len += 1
dataset_module = get_dataset(template, model_args, data_args, training_args, stage="rm", **tokenizer_module)
data_args.cutoff_len -= 1
pad_to_max = training_args.expert_model_parallel_size is not None and training_args.expert_model_parallel_size > 1
dpo_config = DPOConfig(
beta=finetuning_args.pref_beta,
pref_loss=finetuning_args.pref_loss,
label_smoothing=finetuning_args.dpo_label_smoothing,
)
data_collator = PairwiseDataCollatorWithPadding(
template=template,
pad_to_multiple_of=64,
padding="max_length" if pad_to_max else "longest",
max_length=data_args.cutoff_len if pad_to_max else None,
label_pad_token_id=IGNORE_INDEX,
**tokenizer_module,
)
data_collator = _data_collator_wrapper(data_collator)
trainer = McaDPOTrainer(
model=model,
ref_model=ref_model,
args=training_args,
train_config=dpo_config,
tokenizer=tokenizer,
data_collator=data_collator,
callbacks=callbacks,
**dataset_module,
)
if "processor" in tokenizer_module and tokenizer_module["processor"] is not None:
trainer.add_callback(SaveProcessorCallback(tokenizer_module["processor"]))
train_result = trainer.train(training_args.resume_from_checkpoint)
trainer.save_model()
if finetuning_args.include_effective_tokens_per_second:
train_result.metrics["effective_tokens_per_sec"] = calculate_tps(
dataset_module["train_dataset"], train_result.metrics, stage="rm"
)
trainer.log_metrics("train", train_result.metrics)
trainer.save_metrics("train", train_result.metrics)
trainer.save_state()
if trainer.is_world_process_zero() and finetuning_args.plot_loss:
keys = ["loss", "rewards/accuracies"]
if isinstance(dataset_module.get("eval_dataset"), dict):
keys += [f"eval_{key}_loss" for key in dataset_module["eval_dataset"].keys()]
else:
keys += ["eval_loss"]
plot_loss(training_args.output_dir, keys=keys)
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/train/mca/workflow.py",
"license": "Apache License 2.0",
"lines": 253,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/v1/plugins/model_plugins/kernels/registry.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The definition of kernel registry.
Init Phase:
1. Define kernel registry.
2. Register kernels.
"""
from ....accelerator.helper import get_current_accelerator
from .base import BaseKernel
__all__ = ["Registry", "register_kernel"]
class Registry:
"""Registry for managing kernel implementations.
Storage structure: ``{ "kernel_id": Class }``
"""
_kernels: dict[str, type[BaseKernel]] = {}
@classmethod
def register(cls, kernel_cls: type[BaseKernel]) -> type[BaseKernel] | None:
"""Decorator to register a kernel class.
The class must inherit from :class:`BaseKernel` and specify ``_kernel_id`` and ``_device`` attributes.
Args:
kernel_cls (type[BaseKernel]): The kernel class to register.
Returns:
type[BaseKernel] | None: The registered kernel class if the device type matches the current accelerator
Raises:
TypeError: If the class does not inherit from :class:`BaseKernel`.
ValueError: If the kernel ID is missing or already registered.
"""
if not issubclass(kernel_cls, BaseKernel):
raise TypeError(f"Class {kernel_cls} must inherit from BaseKernel")
kernel_id = kernel_cls.get_kernel_id()
device = kernel_cls.get_device()
# The device type of the current accelerator does not match the device type required by the kernel, skip registration
if device != get_current_accelerator().type:
return
if not kernel_id:
raise ValueError(f"Kernel ID (_kernel_id) is needed for {kernel_cls} to register")
if kernel_id in cls._kernels:
raise ValueError(f"{kernel_id} already registered! The registered kernel is {cls._kernels[kernel_id]}")
cls._kernels[kernel_id] = kernel_cls
return kernel_cls
@classmethod
def get(cls, kernel_id: str) -> type[BaseKernel] | None:
"""Retrieves a registered kernel implementation by its ID.
Args:
kernel_id (str): The ID of the kernel to retrieve.
Returns:
type[BaseKernel] | None: The kernel class if found, else ``None``.
"""
return cls._kernels.get(kernel_id)
@classmethod
def get_registered_kernels(cls) -> dict[str, type[BaseKernel]]:
"""Returns a dictionary of all registered kernels.
Returns:
dict[str, type[BaseKernel]]: Dictionary mapping kernel IDs to kernel classes.
"""
return cls._kernels
# export decorator alias
register_kernel = Registry.register
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/plugins/model_plugins/kernels/registry.py",
"license": "Apache License 2.0",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:tests_v1/plugins/model_plugins/test_kernel_plugin.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from unittest.mock import MagicMock, patch
import torch.multiprocessing as mp
from transformers import AutoModelForCausalLM
def _apply_kernel(rank) -> None:
with patch("torch.accelerator.current_accelerator") as mock_get_accelerator:
mock_device = MagicMock()
setattr(mock_device, "type", "npu")
mock_get_accelerator.return_value = mock_device
# reload kernel modules to respect mocked accelerator
for k in list(sys.modules.keys()):
if k.startswith("llamafactory.v1.plugins.model_plugins.kernels"):
del sys.modules[k]
from llamafactory.v1.plugins.model_plugins.kernels.interface import apply_default_kernels
model = AutoModelForCausalLM.from_pretrained("llamafactory/tiny-random-qwen3")
original_rmsnorm_forward = model.model.layers[0].input_layernorm.forward
original_swiglu_forward = model.model.layers[0].mlp.forward
model = apply_default_kernels(model=model, include_kernels="npu_fused_rmsnorm")
assert model.model.layers[0].input_layernorm.forward.__func__ is not original_rmsnorm_forward.__func__
assert model.model.layers[0].mlp.forward.__func__ is original_swiglu_forward.__func__
def _apply_all_kernels(rank) -> None:
with patch("torch.accelerator.current_accelerator") as mock_get_accelerator:
mock_device = MagicMock()
setattr(mock_device, "type", "npu")
mock_get_accelerator.return_value = mock_device
# reload kernel modules to respect mocked accelerator
for k in list(sys.modules.keys()):
if k.startswith("llamafactory.v1.plugins.model_plugins.kernels"):
del sys.modules[k]
from llamafactory.v1.plugins.model_plugins.kernels.interface import apply_default_kernels
model = AutoModelForCausalLM.from_pretrained("llamafactory/tiny-random-qwen3")
original_rmsnorm_forward = model.model.layers[0].input_layernorm.forward
original_swiglu_forward = model.model.layers[0].mlp.forward
model = apply_default_kernels(model=model, include_kernels=True)
assert model.model.layers[0].input_layernorm.forward.__func__ is not original_rmsnorm_forward.__func__
assert model.model.layers[0].mlp.forward.__func__ is not original_swiglu_forward.__func__
def test_apply_kernel():
mp.spawn(_apply_kernel)
def test_apply_all_kernels():
mp.spawn(_apply_all_kernels)
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "tests_v1/plugins/model_plugins/test_kernel_plugin.py",
"license": "Apache License 2.0",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
hiyouga/LlamaFactory:tests_v1/plugins/data_plugins/test_converter.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import pytest
from datasets import load_dataset
from llamafactory.v1.config.data_args import DataArguments
from llamafactory.v1.core.data_engine import DataEngine
from llamafactory.v1.plugins.data_plugins.converter import DataConverterPlugin
@pytest.mark.parametrize("num_samples", [16])
def test_alpaca_converter(num_samples: int):
data_args = DataArguments(train_dataset="llamafactory/v1-dataset-info/tiny-supervised-dataset.yaml")
data_engine = DataEngine(data_args.train_dataset)
original_data = load_dataset("llamafactory/tiny-supervised-dataset", split="train")
indexes = random.choices(range(len(data_engine)), k=num_samples)
for index in indexes:
print(data_engine[index])
expected_data = {
"messages": [
{
"role": "user",
"content": [
{"type": "text", "value": original_data[index]["instruction"] + original_data[index]["input"]}
],
"loss_weight": 0.0,
},
{
"role": "assistant",
"content": [{"type": "text", "value": original_data[index]["output"]}],
"loss_weight": 1.0,
},
]
}
assert data_engine[index] == {"_dataset_name": "tiny_dataset", **expected_data}
def test_sharegpt_converter():
example = {
"conversations": [
{"from": "system", "value": "System"},
{"from": "human", "value": "User"},
{"from": "function_call", "value": "1"},
{"from": "observation", "value": "Observation"},
{"from": "gpt", "value": "Assistant"},
]
}
expected_data = {
"messages": [
{"role": "system", "content": [{"type": "text", "value": "System"}], "loss_weight": 0.0},
{"role": "user", "content": [{"type": "text", "value": "User"}], "loss_weight": 0.0},
{"role": "assistant", "content": [{"type": "tool_call", "value": "1"}], "loss_weight": 1.0},
{"role": "tool", "content": [{"type": "text", "value": "Observation"}], "loss_weight": 0.0},
{"role": "assistant", "content": [{"type": "text", "value": "Assistant"}], "loss_weight": 1.0},
]
}
assert DataConverterPlugin("sharegpt")(example) == expected_data
@pytest.mark.parametrize("num_samples", [16])
def test_pair_converter(num_samples: int):
data_args = DataArguments(train_dataset="llamafactory/v1-dataset-info/orca-dpo-pairs.yaml")
data_engine = DataEngine(data_args.train_dataset)
original_data = load_dataset("HuggingFaceH4/orca_dpo_pairs", split="train_prefs")
indexes = random.choices(range(len(data_engine)), k=num_samples)
for index in indexes:
print(data_engine[index])
print(original_data[index])
expected_data = {
"chosen_messages": [
{
"role": "system",
"content": [{"type": "text", "value": original_data[index]["chosen"][0]["content"]}],
"loss_weight": 0.0,
},
{
"role": "user",
"content": [{"type": "text", "value": original_data[index]["chosen"][1]["content"]}],
"loss_weight": 0.0,
},
{
"role": "assistant",
"content": [{"type": "text", "value": original_data[index]["chosen"][2]["content"]}],
"loss_weight": 1.0,
},
],
"rejected_messages": [
{
"role": "system",
"content": [{"type": "text", "value": original_data[index]["rejected"][0]["content"]}],
"loss_weight": 0.0,
},
{
"role": "user",
"content": [{"type": "text", "value": original_data[index]["rejected"][1]["content"]}],
"loss_weight": 0.0,
},
{
"role": "assistant",
"content": [{"type": "text", "value": original_data[index]["rejected"][2]["content"]}],
"loss_weight": 1.0,
},
],
}
assert data_engine[index] == {"_dataset_name": "tiny_dataset", **expected_data}
if __name__ == "__main__":
"""
python -m tests_v1.plugins.data_plugins.test_converter
"""
test_alpaca_converter(1)
test_sharegpt_converter()
test_pair_converter(1)
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "tests_v1/plugins/data_plugins/test_converter.py",
"license": "Apache License 2.0",
"lines": 117,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
hiyouga/LlamaFactory:scripts/bench_qwen.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from dataclasses import dataclass
from typing import Any
import fire
import torch
from peft import PeftModel
from torch.utils.data import Dataset
from transformers import DataCollatorForSeq2Seq, Qwen2_5_VLProcessor
from llamafactory.extras.constants import IGNORE_INDEX
from llamafactory.hparams import get_train_args
from llamafactory.model import load_model, load_tokenizer
from llamafactory.train.callbacks import LogCallback
from llamafactory.train.sft.trainer import CustomSeq2SeqTrainer
class DummyDataset(Dataset):
def __init__(self, size: int = 1000, seq_length: int = 1024, processor: Qwen2_5_VLProcessor = None):
self.size = size
self.seq_length = seq_length
self.vocab_size = 32768
self.processor = processor
image_token_num = 18 * 18 // (2 * 2)
image_t = 2
self.text_seqlen = seq_length // 4 # 25% text
video_seq_length = self.seq_length - self.text_seqlen - image_t * image_token_num
video_t = video_seq_length // image_token_num
self.image_size = [18 * 18 * image_t, 1176]
self.image_grid_thw = torch.tensor([[1, 18, 18]] * image_t, dtype=torch.long)
self.image_seqlen = image_t * image_token_num
self.video_size = [18 * 18 * video_t, 1176]
self.video_grid_thw = torch.tensor([[video_t, 18, 18]], dtype=torch.long)
self.video_seqlen = video_t * image_token_num
def __len__(self):
return self.size
def __getitem__(self, index: int):
input_ids = torch.randint(low=0, high=self.vocab_size, size=(self.seq_length,))
input_ids[: self.image_seqlen] = self.processor.image_token_id
input_ids[self.image_seqlen : self.image_seqlen + self.video_seqlen] = self.processor.video_token_id
attention_mask = torch.ones((self.seq_length,), dtype=torch.long)
labels = input_ids.clone()
labels[: self.image_seqlen + self.video_seqlen] = IGNORE_INDEX
pixel_values = torch.rand(self.image_size, dtype=torch.float32)
pixel_values_videos = torch.rand(self.video_size, dtype=torch.float32)
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"labels": labels,
"pixel_values": pixel_values,
"pixel_values_videos": pixel_values_videos,
"image_grid_thw": self.image_grid_thw,
"video_grid_thw": self.video_grid_thw,
}
@dataclass
class MultiModalDataCollatorForSeq2Seq(DataCollatorForSeq2Seq):
def __post_init__(self):
if isinstance(self.model, PeftModel):
self.model = self.model.base_model.model
if self.model is not None and hasattr(self.model, "get_rope_index"): # for qwen2vl mrope
self.get_rope_func = self.model.get_rope_index # transformers < 4.52.0 or qwen2.5 omni
elif self.model is not None and hasattr(self.model, "model") and hasattr(self.model.model, "get_rope_index"):
self.get_rope_func = self.model.model.get_rope_index # transformers >= 4.52.0
else:
self.get_rope_func = None
def __call__(self, features: list[dict[str, Any]]) -> dict[str, "torch.Tensor"]:
batch_pixel_values = [feature.pop("pixel_values") for feature in features]
batch_pixel_values_videos = [feature.pop("pixel_values_videos") for feature in features]
batch_image_grid_thw = [feature.pop("image_grid_thw") for feature in features]
batch_video_grid_thw = [feature.pop("video_grid_thw") for feature in features]
batch: dict[str, torch.Tensor] = super().__call__(features)
batch["pixel_values"] = torch.cat(batch_pixel_values, dim=0)
batch["pixel_values_videos"] = torch.cat(batch_pixel_values_videos, dim=0)
batch["image_grid_thw"] = torch.cat(batch_image_grid_thw, dim=0)
batch["video_grid_thw"] = torch.cat(batch_video_grid_thw, dim=0)
if self.get_rope_func is not None:
rope_index_kwargs = {
"input_ids": batch["input_ids"],
"image_grid_thw": batch["image_grid_thw"],
"video_grid_thw": batch["video_grid_thw"],
"attention_mask": (batch["attention_mask"] >= 1).float(),
}
batch["position_ids"], batch["rope_deltas"] = self.get_rope_func(**rope_index_kwargs)
if "position_ids" not in batch or batch["position_ids"].dim() != 3:
raise ValueError("Qwen2VL requires 3D position ids for mrope.")
return batch
def bench_qwen(
model_name_or_path: str = "Qwen/Qwen2-VL-7B-Instruct",
batch_size: int = 1,
seq_length: int = 2048,
liger_kernel: bool = False,
deepspeed_stage: int = 3,
):
os.environ["LLAMABOARD_ENABLED"] = "true"
os.environ["LLAMABOARD_WORKDIR"] = "output/dummy_dir"
args = {
"model_name_or_path": model_name_or_path,
"enable_liger_kernel": liger_kernel,
"stage": "sft",
"do_train": True,
"finetuning_type": "full",
"dataset": "alpaca_en_demo",
"template": "qwen2_vl",
"cutoff_len": seq_length,
"output_dir": "output/dummy_dir",
"logging_steps": 10,
"save_strategy": "no",
"save_only_model": True,
"overwrite_output_dir": True,
"per_device_train_batch_size": batch_size,
"max_steps": 1000,
"bf16": True,
"include_num_input_tokens_seen": True,
"report_to": "none",
}
if deepspeed_stage in [2, 3]:
args["deepspeed"] = f"examples/deepspeed/ds_z{deepspeed_stage}_config.json"
model_args, _, training_args, finetuning_args, _ = get_train_args(args)
tokenizer_module = load_tokenizer(model_args)
tokenizer = tokenizer_module["tokenizer"]
trainset = DummyDataset(size=100000, seq_length=seq_length, processor=tokenizer_module["processor"])
model = load_model(tokenizer, model_args, finetuning_args, training_args.do_train)
data_collator = MultiModalDataCollatorForSeq2Seq(
tokenizer=tokenizer, model=model, pad_to_multiple_of=8, label_pad_token_id=IGNORE_INDEX
)
trainer = CustomSeq2SeqTrainer(
model=model,
args=training_args,
finetuning_args=finetuning_args,
data_collator=data_collator,
callbacks=[LogCallback()],
train_dataset=trainset,
**tokenizer_module,
)
trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint)
if __name__ == "__main__":
fire.Fire(bench_qwen)
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "scripts/bench_qwen.py",
"license": "Apache License 2.0",
"lines": 146,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:tests_v1/core/test_data_engine.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import pytest
from datasets import load_dataset
from llamafactory.v1.config.data_args import DataArguments
from llamafactory.v1.core.data_engine import DataEngine
@pytest.mark.parametrize("num_samples", [16])
def test_map_dataset(num_samples: int):
data_args = DataArguments(train_dataset="llamafactory/v1-sft-demo")
data_engine = DataEngine(data_args.train_dataset)
original_data = load_dataset("llamafactory/v1-sft-demo", split="train")
indexes = random.choices(range(len(data_engine)), k=num_samples)
for index in indexes:
print(data_engine[index])
assert data_engine[index] == {"_dataset_name": "default", **original_data[index]}
if __name__ == "__main__":
"""
python -m tests_v1.core.test_data_engine
"""
test_map_dataset(1)
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "tests_v1/core/test_data_engine.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
hiyouga/LlamaFactory:src/llamafactory/v1/config/data_args.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
@dataclass
class DataArguments:
train_dataset: str | None = field(
default=None,
metadata={"help": "Path to the training dataset."},
)
eval_dataset: str | None = field(
default=None,
metadata={"help": "Path to the evaluation dataset."},
)
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/config/data_args.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/v1/config/model_args.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from .arg_utils import ModelClass, PluginConfig, get_plugin_config
@dataclass
class ModelArguments:
model: str = field(
default="Qwen/Qwen3-4B-Instruct-2507",
metadata={"help": "Path to the model or model identifier from Hugging Face."},
)
template: str = field(
default="qwen3_nothink",
metadata={"help": "Template for the model."},
)
trust_remote_code: bool = field(
default=False,
metadata={"help": "Trust remote code from Hugging Face."},
)
model_class: ModelClass = field(
default=ModelClass.LLM,
metadata={"help": "Model class from Hugging Face."},
)
init_config: PluginConfig | None = field(
default=None,
metadata={"help": "Initialization configuration for the model."},
)
peft_config: PluginConfig | None = field(
default=None,
metadata={"help": "PEFT configuration for the model."},
)
kernel_config: PluginConfig | None = field(
default=None,
metadata={"help": "Kernel configuration for the model."},
)
quant_config: PluginConfig | None = field(
default=None,
metadata={"help": "Quantization configuration for the model."},
)
def __post_init__(self) -> None:
self.init_config = get_plugin_config(self.init_config)
self.peft_config = get_plugin_config(self.peft_config)
self.kernel_config = get_plugin_config(self.kernel_config)
self.quant_config = get_plugin_config(self.quant_config)
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/config/model_args.py",
"license": "Apache License 2.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/v1/config/sample_args.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from .arg_utils import SampleBackend
@dataclass
class SampleArguments:
sample_backend: SampleBackend = field(
default=SampleBackend.HF,
metadata={"help": "Sampling backend, default to 'hf'."},
)
max_new_tokens: int = field(
default=128,
metadata={"help": "Maximum number of new tokens to generate."},
)
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/config/sample_args.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/v1/config/training_args.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from dataclasses import dataclass, field
from uuid import uuid4
from .arg_utils import BatchingStrategy, PluginConfig, get_plugin_config
@dataclass
class TrainingArguments:
output_dir: str = field(
default=os.path.join("outputs", str(uuid4().hex)),
metadata={"help": "Path to the output directory."},
)
micro_batch_size: int = field(
default=1,
metadata={"help": "Micro batch size for training."},
)
global_batch_size: int | None = field(
default=None,
metadata={"help": "Global batch size for training, default to DP size * micro batch size."},
)
cutoff_len: int = field(
default=2048,
metadata={"help": "Maximum sequence length for training."},
)
learning_rate: float = field(
default=1e-4,
metadata={"help": "Learning rate for training."},
)
num_train_epochs: int = field(
default=3,
metadata={"help": "Number of training epochs."},
)
max_steps: int | None = field(
default=None,
metadata={"help": "Maximum number of training steps. If set, overrides num_train_epochs."},
)
max_grad_norm: float = field(
default=1.0,
metadata={"help": "Maximum gradient norm for training."},
)
bf16: bool = field(
default=False,
metadata={"help": "Use bf16 for training."},
)
batching_strategy: BatchingStrategy = field(
default=BatchingStrategy.NORMAL,
metadata={"help": "Batching strategy for training."},
)
batching_workers: int = field(
default=16,
metadata={"help": "Number of workers for batching."},
)
enable_activation_checkpointing: bool = field(
default=False,
metadata={"help": "Enable activation checkpointing for training."},
)
dist_config: PluginConfig | None = field(
default=None,
metadata={"help": "Distribution configuration for training."},
)
optim_config: PluginConfig | None = field(
default=None,
metadata={"help": "Optimizer configuration for training."},
)
lr_scheduler_config: PluginConfig | None = field(
default=None,
metadata={"help": "Learning rate scheduler configuration for training."},
)
seed: int = field(
default=42,
metadata={"help": "Random seed that will be set at the beginning of training."},
)
def __post_init__(self) -> None:
self.dist_config = get_plugin_config(self.dist_config)
self.optim_config = get_plugin_config(self.optim_config)
self.lr_scheduler_config = get_plugin_config(self.lr_scheduler_config)
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/config/training_args.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/v1/core/data_engine.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The definition of data engine.
How to use:
data_engine = DataEngine(data_args.train_dataset)
data_engine[i]: Get the sample via index.
Init workflow:
1. Parse dataset info from arguments.
2. Load datasets according to dataset info.
3. Build data index (and reweight samples if necessary).
Get data sample:
1. Get sample from data index.
2. Convert sample to standard format.
3. Return sample.
Note:
1. The data engine is equivalent to the torch dataset.
2. The data engine is agnostic to the model used.
"""
import os
from collections.abc import Iterable
from typing import Any
from huggingface_hub import hf_hub_download
from omegaconf import OmegaConf
from torch.utils.data import Dataset
from ..utils.types import DatasetInfo, HFDataset, Sample
class DataEngine(Dataset):
"""Data engine.
Args:
data_args: Data arguments.
"""
def __init__(self, dataset_path: str) -> None:
self.path = dataset_path
"""Dataset path."""
self.datasets: dict[str, HFDataset] = {}
"""Dict of (dataset_name, dataset)"""
self.dataset_infos: dict[str, DatasetInfo] = {}
"""Dict of (dataset_name, dataset_info)"""
self.data_index: list[tuple[str, int]] = []
"""List of (dataset_name, sample_index)"""
self.streaming: bool = False
"""Whether dataset is streaming."""
self._get_dataset_info()
self._load_dataset()
self._build_data_index()
def _get_dataset_info(self) -> None:
"""Get dataset info from data arguments."""
if self.path.endswith(".yaml") and os.path.isfile(self.path): # local file
self.dataset_infos = OmegaConf.load(self.path)
elif self.path.endswith(".yaml"): # hf hub uri, e.g. llamafactory/v1-sft-demo/dataset_info.yaml
repo_id, filename = os.path.split(self.path)
filepath = hf_hub_download(repo_id=repo_id, filename=filename, repo_type="dataset")
self.dataset_infos = OmegaConf.load(filepath)
elif os.path.exists(self.path): # local file(s)
self.dataset_infos = {"default": {"path": self.path, "source": "local"}}
else: # hf hub dataset, e.g. llamafactory/v1-sft-demo
self.dataset_infos = {"default": {"path": self.path}}
def _load_dataset(self) -> None:
"""Load datasets according to dataset info."""
is_streaming = [dataset_info.get("streaming", False) for dataset_info in self.dataset_infos.values()]
self.streaming = any(is_streaming)
if all(is_streaming) != any(is_streaming):
raise ValueError("All datasets must be streaming or non-streaming.")
for dataset_name, dataset_info in self.dataset_infos.items():
split = dataset_info.get("split", "train")
if dataset_info.get("source", "hf_hub") == "hf_hub":
from datasets import load_dataset
self.datasets[dataset_name] = load_dataset(dataset_info["path"], split=split, streaming=self.streaming)
else: # data loader plugin
from ..plugins.data_plugins.loader import DataLoaderPlugin
self.datasets[dataset_name] = DataLoaderPlugin(dataset_info["source"]).load(dataset_info)
def _build_data_index(self) -> None:
"""Build dataset index."""
for dataset_name, dataset in self.datasets.items():
if self.streaming:
data_index = [(dataset_name, -1) for _ in range(1000)]
else:
data_index = [(dataset_name, sample_index) for sample_index in range(len(dataset))]
size = self.dataset_infos[dataset_name].get("size")
weight = self.dataset_infos[dataset_name].get("weight")
if size or weight:
from ..plugins.data_plugins.loader import adjust_data_index
data_index = adjust_data_index(data_index, size, weight)
self.data_index.extend(data_index)
def _convert_data_sample(self, raw_sample: dict[str, Any], dataset_name: str) -> Sample:
"""Convert dataset sample.
Args:
raw_sample (dict[str, Any]): Raw dataset sample.
dataset_name (str): Dataset name.
Returns:
Sample: Dataset sample.
"""
converter = self.dataset_infos[dataset_name].get("converter")
if converter is not None:
from ..plugins.data_plugins.converter import DataConverterPlugin
return {"_dataset_name": dataset_name, **DataConverterPlugin(converter)(raw_sample)}
else:
return {"_dataset_name": dataset_name, **raw_sample}
def __len__(self) -> int:
"""Get dataset length.
Returns:
int: Dataset length.
"""
if self.streaming:
return -1
else:
return len(self.data_index)
def __getitem__(self, index: int | Any) -> Sample | list[Sample]:
"""Get dataset item.
Args:
index (int): Dataset index.
Returns:
Sample: Dataset item.
"""
if self.streaming:
raise ValueError("Streaming dataset does not support index access.")
if isinstance(index, int):
dataset_name, sample_index = self.data_index[index]
return self._convert_data_sample(self.datasets[dataset_name][sample_index], dataset_name)
else: # data selector plugin
from ..plugins.data_plugins.loader import select_data_sample
selected_index = select_data_sample(self.data_index, index)
if isinstance(selected_index, list):
return [
self._convert_data_sample(self.datasets[dataset_name][sample_index], dataset_name)
for dataset_name, sample_index in selected_index
]
else:
dataset_name, sample_index = selected_index
return self._convert_data_sample(self.datasets[dataset_name][sample_index], dataset_name)
def __iter__(self) -> Iterable[Sample]:
"""Get dataset iterator.
Returns:
Iterable[Sample]: Dataset iterator.
"""
# NOTE: hf iterable dataset uses worker ids while map dataset does not
# NOTE: add worker id and shuffle to the map dataset
# https://github.com/huggingface/datasets/blob/4.0.0/src/datasets/iterable_dataset.py#L2214
raise NotImplementedError()
if __name__ == "__main__":
"""
python -m llamafactory.v1.core.data_engine --train_dataset data/v1_sft_demo.yaml
python -m llamafactory.v1.core.data_engine --train_dataset data/v1_dpo_demo.yaml
"""
from ..config.arg_parser import get_args
_, data_args, *_ = get_args()
data_engine = DataEngine(data_args.train_dataset)
print(data_engine[0])
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/core/data_engine.py",
"license": "Apache License 2.0",
"lines": 158,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/v1/core/base_trainer.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The definition of trainer.
Init Phase:
1. Init batch generator.
2. Init optimizer (deepspeed).
3. Shard model.
4. Init optimizer (fsdp).
5. Init lr scheduler.
Train Phase:
1. Train Loop
"""
from abc import abstractmethod
import torch
import torch.nn.functional as F
from ..accelerator.helper import ReduceOp
from ..accelerator.interface import Dim, DistributedInterface
from ..config import TrainingArguments
from ..utils import logging
from ..utils.helper import compute_valid_tokens
from ..utils.types import BatchInput, HFModel, ModelOutput, Tensor, TorchDataset
from .utils.batching import BatchGenerator
from .utils.rendering import Renderer
logger = logging.get_logger(__name__)
class BaseTrainer:
def __init__(
self,
args: TrainingArguments,
model: HFModel,
renderer: Renderer,
train_dataset: TorchDataset,
) -> None:
self.args = args
self.model = model
self.renderer = renderer
self.train_dataset = train_dataset
# info
self.global_step = 0
# cached variables
self.device = DistributedInterface().current_device
self.dp_size = DistributedInterface().get_world_size(Dim.DP)
self.model_input_names = self.renderer.processor.model_input_names
self._create_batch_generator()
# Calculate num_training_steps: max_steps takes priority if set
if self.args.max_steps is not None and self.args.max_steps > 0:
self.num_training_steps = self.args.max_steps
else:
self.num_training_steps = self.args.num_train_epochs * len(self.train_batch_generator)
if self.args.enable_activation_checkpointing:
self.model.gradient_checkpointing_enable({"use_reentrant": False})
self._deepspeed_engine = None
dist_name = self.args.dist_config.name if self.args.dist_config is not None else None
if dist_name == "deepspeed":
from ..plugins.trainer_plugins.distributed.hub import DistributedPlugin
self._deepspeed_engine = DistributedPlugin("deepspeed")(
self.model,
self.args.dist_config,
num_micro_batch=self.train_batch_generator.num_micro_batch,
micro_batch_size=self.args.micro_batch_size,
)
self._init_optimizer()
self._init_lr_scheduler()
self.model, self.optimizer, self.lr_scheduler = self._deepspeed_engine.prepare(
self.model, self.optimizer, self.lr_scheduler
)
else:
# fsdp2 / DDP / no dist
self._shard_model()
self._init_optimizer()
self._init_lr_scheduler()
def _create_batch_generator(self) -> None:
self.train_batch_generator = BatchGenerator(
dataset=self.train_dataset,
renderer=self.renderer,
micro_batch_size=self.args.micro_batch_size,
global_batch_size=self.args.global_batch_size,
cutoff_len=self.args.cutoff_len,
batching_workers=self.args.batching_workers,
batching_strategy=self.args.batching_strategy,
seed=self.args.seed,
)
def _shard_model(self) -> None:
if self.args.dist_config is None:
if DistributedInterface().get_world_size(Dim.DP) > 1:
from torch.nn.parallel import DistributedDataParallel as DDP
logger.warning_rank0(
"dist_config is None but distributed training is enabled; falling back to DistributedDataParallel."
)
device_ids = None if self.device.type == "cpu" else [self.device.index]
self.model = DDP(self.model, device_ids=device_ids)
else:
from ..plugins.trainer_plugins.distributed.hub import DistributedPlugin
self.model = DistributedPlugin(self.args.dist_config.name)(
self.model,
self.args.dist_config,
)
def _init_optimizer(self) -> None:
"""Init optimizer."""
if self.args.optim_config is None:
_trainable_params = [p for p in self.model.parameters() if p.requires_grad]
self.optimizer = torch.optim.AdamW(_trainable_params, lr=self.args.learning_rate)
else:
from ..plugins.trainer_plugins.optimizer import OptimizerPlugin
self.optimizer = OptimizerPlugin(self.args.optim_config.name)(self.model, self.args.optim_config)
def _init_lr_scheduler(self) -> None:
"""Init lr scheduler."""
if self.args.lr_scheduler_config is None:
self.lr_scheduler = torch.optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=lambda x: 1.0)
else:
from ..plugins.trainer_plugins.lr_scheduler import LRSchedulerPlugin
self.lr_scheduler = LRSchedulerPlugin(self.args.lr_scheduler_config.name)(
self.optimizer, self.num_training_steps, self.args.lr_scheduler_config
)
def compute_log_probs(self, model: HFModel, batch: BatchInput) -> Tensor:
"""Compute log probs.
log_probs: Tensor of shape (batch_size, seq_len - 1)
"""
batch_size, _ = batch["labels"].shape
model_inputs = {
k: v.to(self.device, non_blocking=True) for k, v in batch.items() if k in self.model_input_names
}
labels = batch["labels"].to(self.device, non_blocking=True)
outputs: ModelOutput = model(**model_inputs)
logits = outputs.logits.float()
shift_labels = labels[..., 1:].contiguous().view(-1)
shift_logits = logits[..., :-1, :].contiguous().view(shift_labels.size(0), -1)
return -F.cross_entropy(shift_logits, shift_labels, reduction="none").view(batch_size, -1)
@abstractmethod
def compute_loss(self, batch: BatchInput) -> Tensor:
"""Compute the scalar loss."""
...
def fit(self) -> None:
"""Train the model."""
self.model.train()
for epoch in range(self.args.num_train_epochs):
self.train_batch_generator.set_epoch(epoch)
for micro_batches in self.train_batch_generator:
self.global_step += 1
step_loss = 0
step_valid_tokens = compute_valid_tokens(micro_batches)
step_valid_tokens = DistributedInterface().all_reduce(step_valid_tokens, op=ReduceOp.SUM)
num_micro = len(micro_batches)
for i, micro_batch in enumerate(micro_batches):
loss = self.compute_loss(micro_batch)
mini_step_valid_tokens = compute_valid_tokens([micro_batch])
# fsdp uses mean reduction so we need to scale the loss by dp_size
loss = loss * mini_step_valid_tokens * self.dp_size / (step_valid_tokens + 1e-6)
if self._deepspeed_engine is not None:
# deepspeed: set sync_gradients so engine.step() only fires on last micro-batch
self._deepspeed_engine.accelerator.sync_gradients = i == num_micro - 1
self._deepspeed_engine.backward(loss)
else:
loss.backward()
step_loss += loss.item()
if self._deepspeed_engine is not None:
# deepspeed: engine.step() already ran inside backward at the sync boundary
grad_norm = self._deepspeed_engine.get_grad_norm()
else:
grad_norm = torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.max_grad_norm).item()
# isfinite(): argument 'input' (position 1) must be Tensor, not float
if not torch.isfinite(torch.tensor(grad_norm)): # type: ignore # pyright: ignore [reportUnknownReturnType]
logger.warning_rank0(f"Gradient norm is not finite: {grad_norm}")
else:
self.optimizer.step()
self.lr_scheduler.step()
self.optimizer.zero_grad()
step_loss, grad_norm = DistributedInterface().all_reduce([step_loss, grad_norm])
DistributedInterface().sync()
if DistributedInterface().get_rank() == 0:
print(f"Epoch {epoch}, Step {self.global_step}, Loss: {step_loss:.4f}, Grad Norm: {grad_norm:.4f}")
# Check if max_steps is reached
if self.global_step >= self.num_training_steps:
logger.info_rank0(f"Reached max_steps ({self.num_training_steps}), stopping training.")
return
def save_model(self) -> None:
"""Save the model."""
if self.args.dist_config is not None and self.args.dist_config.name in ("deepspeed", "fsdp2"):
from ..plugins.trainer_plugins.distributed.hub import DistributedPlugin
DistributedPlugin(self.args.dist_config.name).save_model(
self.model, self.args.output_dir, self.renderer.processor
)
else:
model_to_save = self.model.module if hasattr(self.model, "module") else self.model
model_to_save.save_pretrained(self.args.output_dir, max_shard_size="4GB")
self.renderer.processor.save_pretrained(self.args.output_dir, max_shard_size="4GB")
logger.info_rank0(f"Model saved to {self.args.output_dir}")
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/core/base_trainer.py",
"license": "Apache License 2.0",
"lines": 197,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/v1/core/model_engine.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The definition of model engine.
How to use:
model_engine = ModelEngine(model_args, is_train=True)
model_engine.processor: Get the tokenizer or multi-modal processor.
model_engine.renderer: Get the renderer.
model_engine.model_config: Get the model configuration.
model_engine.model: Get the HF model.
Init workflow:
1. Init processor.
2. Init render.
2. Init model config.
3. Init model.
4. Init adapter.
"""
import torch
from accelerate import init_empty_weights
from transformers import AutoConfig, AutoProcessor
from ..accelerator.helper import DeviceType
from ..accelerator.interface import DistributedInterface
from ..config.model_args import ModelArguments, ModelClass
from ..utils import logging
from ..utils.types import HFConfig, HFModel, Processor
from .utils.rendering import Renderer
logger = logging.get_logger(__name__)
class ModelEngine:
"""Model engine.
Args:
model_args: Model arguments.
is_train: Whether to train the model.
"""
def __init__(self, model_args: ModelArguments, is_train: bool = False) -> None:
self.args = model_args
"""Model arguments."""
self.is_train = is_train
"""Whether to train the model."""
self.processor = self._init_processor()
"""Tokenizer or multi-modal processor."""
self.renderer = Renderer(self.args.template, self.processor)
"""Renderer."""
self.model_config = self._init_model_config()
"""Model configuration."""
self.model = self._init_model()
"""HF model."""
def _init_processor(self) -> Processor:
"""Init processor.
NOTE: Transformers v5 always use fast tokenizer.
https://github.com/huggingface/transformers/blob/v5.0.0rc1/src/transformers/models/auto/tokenization_auto.py#L642
"""
return AutoProcessor.from_pretrained(
self.args.model,
trust_remote_code=self.args.trust_remote_code,
)
def _init_model_config(self) -> HFConfig:
"""Init model config."""
return AutoConfig.from_pretrained(
self.args.model,
trust_remote_code=self.args.trust_remote_code,
)
def _init_model(self) -> HFModel:
"""Init model.
Transformers can choose the proper model init context.
https://github.com/huggingface/transformers/blob/v5.0.0rc0/src/transformers/modeling_utils.py#L3538
"""
if self.args.init_config is not None:
from ..plugins.model_plugins.initialization import InitPlugin
init_device = InitPlugin(self.args.init_config.name)()
else:
init_device = DistributedInterface().current_device
init_kwargs = {"device_map": init_device}
if self.args.quant_config is not None:
from ..plugins.model_plugins.quantization import QuantizationPlugin
init_kwargs = QuantizationPlugin(self.args.quant_config.name)(
init_kwargs=init_kwargs,
config=self.model_config,
tokenizer=self.processor,
model_args=self.args,
is_trainable=self.is_train,
)
if self.args.model_class == ModelClass.LLM:
from transformers import AutoModelForCausalLM, AutoModelForImageTextToText
if type(self.model_config) in AutoModelForImageTextToText._model_mapping.keys():
AutoClass = AutoModelForImageTextToText
else:
AutoClass = AutoModelForCausalLM
elif self.args.model_class == ModelClass.CLS:
from transformers import AutoModelForTokenClassification
AutoClass = AutoModelForTokenClassification
else:
from transformers import AutoModel
AutoClass = AutoModel
if init_device.type == DeviceType.META:
assert self.args.quant_config is None, "Quantization is not supported with meta device."
with init_empty_weights():
model = AutoClass.from_config(self.model_config)
else:
model = AutoClass.from_pretrained(
self.args.model,
config=self.model_config,
dtype="auto",
trust_remote_code=self.args.trust_remote_code,
**init_kwargs,
)
if self.args.peft_config is None:
if self.is_train:
logger.info_rank0("Fine-tuning mode: full tuning")
model = model.to(torch.float32)
else:
logger.info_rank0("Inference the original model")
else:
from ..plugins.model_plugins.peft import PeftPlugin
model = PeftPlugin(self.args.peft_config.name)(model, self.args.peft_config, self.is_train)
if self.args.kernel_config is not None:
from ..plugins.model_plugins.kernels.interface import KernelPlugin
model = KernelPlugin(self.args.kernel_config.name)(
model, include_kernels=self.args.kernel_config.get("include_kernels")
)
return model
if __name__ == "__main__":
"""
python -m llamafactory.v1.core.model_engine --model llamafactory/tiny-random-qwen2.5
"""
from ..config.arg_parser import get_args
model_args, *_ = get_args()
model_engine = ModelEngine(model_args=model_args)
print(model_engine.processor)
print(model_engine.model_config)
print(model_engine.model)
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/core/model_engine.py",
"license": "Apache License 2.0",
"lines": 140,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/v1/launcher.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
from copy import deepcopy
USAGE = (
"-" * 70
+ "\n"
+ "| Usage: |\n"
+ "| llamafactory-cli sft -h: train models |\n"
+ "| llamafactory-cli version: show version info |\n"
+ "| Hint: You can use `lmf` as a shortcut for `llamafactory-cli`. |\n"
+ "-" * 70
)
_DIST_TRAIN_COMMANDS = ("train", "sft", "dpo", "rm")
def launch():
from .accelerator.helper import get_device_count
from .utils.env import find_available_port, is_env_enabled, use_kt, use_ray
from .utils.logging import get_logger
logger = get_logger(__name__)
# NOTE:
# `llamafactory-cli <command> ...` enters here first.
# We may re-launch via `torchrun` for distributed training. In that case we must
# forward `<command>` as argv[1] to the re-executed script, otherwise the script
# will misinterpret the first user argument (e.g. yaml config) as the command.
command = sys.argv.pop(1) if len(sys.argv) > 1 else "help"
if command in _DIST_TRAIN_COMMANDS and (
is_env_enabled("FORCE_TORCHRUN") or (get_device_count() > 1 and not use_ray() and not use_kt())
):
nnodes = os.getenv("NNODES", "1")
node_rank = os.getenv("NODE_RANK", "0")
nproc_per_node = os.getenv("NPROC_PER_NODE", str(get_device_count()))
master_addr = os.getenv("MASTER_ADDR", "127.0.0.1")
master_port = os.getenv("MASTER_PORT", str(find_available_port()))
logger.info_rank0(f"Initializing {nproc_per_node} distributed tasks at: {master_addr}:{master_port}")
if int(nnodes) > 1:
logger.info_rank0(f"Multi-node training enabled: num nodes: {nnodes}, node rank: {node_rank}")
# elastic launch support
max_restarts = os.getenv("MAX_RESTARTS", "0")
rdzv_id = os.getenv("RDZV_ID")
min_nnodes = os.getenv("MIN_NNODES")
max_nnodes = os.getenv("MAX_NNODES")
env = deepcopy(os.environ)
if is_env_enabled("OPTIM_TORCH", "1"):
# optimize DDP, see https://zhuanlan.zhihu.com/p/671834539
env["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
env["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1"
torchrun_args = [
"torchrun",
"--nproc-per-node",
nproc_per_node,
]
if rdzv_id is not None:
# launch elastic job with fault tolerant support when possible
# see also https://docs.pytorch.org/docs/stable/elastic/train_script.html
rdzv_nnodes = nnodes
# elastic number of nodes if MIN_NNODES and MAX_NNODES are set
if min_nnodes is not None and max_nnodes is not None:
rdzv_nnodes = f"{min_nnodes}:{max_nnodes}"
torchrun_args.extend(
[
"--nnodes",
rdzv_nnodes,
"--rdzv-id",
rdzv_id,
"--rdzv-backend",
"c10d",
"--rdzv-endpoint",
f"{master_addr}:{master_port}",
"--max-restarts",
max_restarts,
]
)
else:
# NOTE: DO NOT USE shell=True to avoid security risk
torchrun_args.extend(
[
"--nnodes",
nnodes,
"--node_rank",
node_rank,
"--master_addr",
master_addr,
"--master_port",
master_port,
]
)
script_args = [__file__, command] + sys.argv[1:]
process = subprocess.run(
torchrun_args + script_args,
env=env,
check=True,
)
sys.exit(process.returncode)
elif command == "chat":
from .samplers.cli_sampler import run_chat
run_chat()
elif command == "merge":
from llamafactory.v1.plugins.model_plugins.peft import merge_and_export_model
merge_and_export_model()
elif command == "env":
raise NotImplementedError("Environment information is not implemented yet.")
elif command == "version":
raise NotImplementedError("Version information is not implemented yet.")
elif command == "help":
print(USAGE)
elif command in _DIST_TRAIN_COMMANDS:
# Single GPU training without torchrun
if command in ("train", "sft"):
from llamafactory.v1.trainers.sft_trainer import run_sft
run_sft()
elif command == "dpo":
raise NotImplementedError("DPO trainer is not implemented yet.")
elif command == "rm":
raise NotImplementedError("RM trainer is not implemented yet.")
else:
print(f"Unknown command: {command}.\n{USAGE}")
def main():
# sys.argv[1] contains the command (sft/dpo/rm/train), sys.argv[2:] contains the rest args
command = sys.argv[1] if len(sys.argv) > 1 else "sft"
# Routing needs the sub-command, but downstream trainers usually expect argv without it.
if command in _DIST_TRAIN_COMMANDS:
sys.argv.pop(1)
else:
# Backward-compat: if someone runs `torchrun launcher.py config.yaml`,
# treat it as sft by default.
if len(sys.argv) > 1 and sys.argv[1].endswith((".yaml", ".yml")):
command = "sft"
if command in ("train", "sft"):
from llamafactory.v1.trainers.sft_trainer import run_sft
run_sft()
elif command == "dpo":
# from llamafactory.v1.trainers.dpo_trainer import run_dpo
# run_dpo()
raise NotImplementedError("DPO trainer is not implemented yet.")
elif command == "rm":
# from llamafactory.v1.trainers.rm_trainer import run_rm
# run_rm()
raise NotImplementedError("RM trainer is not implemented yet.")
if __name__ == "__main__":
main()
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/launcher.py",
"license": "Apache License 2.0",
"lines": 153,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/v1/plugins/data_plugins/converter.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from typing import Any, Literal, NotRequired, TypedDict
from ...utils import logging
from ...utils.plugin import BasePlugin
from ...utils.types import DPOSample, Sample, SFTSample, ToolCall
logger = logging.get_logger(__name__)
class AlpacaSample(TypedDict, total=False):
system: NotRequired[str]
instruction: str
input: NotRequired[str]
output: str
SharegptMessage = TypedDict(
"SharegptMessage",
{"from": Literal["human", "gpt", "system", "function_call", "observation"], "value": str},
)
class SharegptSample(TypedDict, total=False):
conversations: list[SharegptMessage]
tools: NotRequired[str]
class OpenaiMessage(TypedDict, total=False):
role: Literal["user", "assistant", "tool"]
content: str
class OpenaiSample(TypedDict, total=False):
messages: list[OpenaiMessage]
class PairSample(TypedDict, total=False):
chosen: list[OpenaiMessage]
rejected: list[OpenaiMessage]
class DataConverterPlugin(BasePlugin):
"""Plugin for data converters."""
def __call__(self, raw_sample: dict[str, Any]) -> Sample:
return super().__call__(raw_sample)
@DataConverterPlugin("alpaca").register()
def alpaca_converter(raw_sample: AlpacaSample) -> SFTSample:
"""Convert Alpaca sample to SFT sample.
See raw example at: https://huggingface.co/datasets/llamafactory/alpaca_gpt4_en
Args:
raw_sample (AlpacaSample): Alpaca sample.
Returns:
SFTSample: SFT sample.
"""
messages = []
if "system" in raw_sample:
messages.append(
{"role": "system", "content": [{"type": "text", "value": raw_sample["system"]}], "loss_weight": 0.0}
)
if "instruction" in raw_sample or "input" in raw_sample:
messages.append(
{
"role": "user",
"content": [
{"type": "text", "value": raw_sample.get("instruction", "") + raw_sample.get("input", "")}
],
"loss_weight": 0.0,
}
)
if "output" in raw_sample:
messages.append(
{"role": "assistant", "content": [{"type": "text", "value": raw_sample["output"]}], "loss_weight": 1.0}
)
return {"messages": messages}
@DataConverterPlugin("sharegpt").register()
def sharegpt_converter(raw_sample: SharegptSample) -> SFTSample:
"""Convert ShareGPT sample to SFT sample.
See raw example at: https://huggingface.co/datasets/llamafactory/glaive_toolcall_en
Args:
raw_sample (SharegptSample): ShareGPT sample.
Returns:
SFTSample: SFT sample.
"""
tag_mapping = {
"system": "system",
"human": "user",
"gpt": "assistant",
"observation": "tool",
"function_call": "assistant",
}
sample = {}
messages = []
for message in raw_sample.get("conversations", []):
tag = message["from"]
if tag not in tag_mapping:
logger.warning_rank0(f"Unsupported role tag {tag} in message: {message}")
elif tag == "function_call":
try:
tool_calls: ToolCall | list[ToolCall] = json.loads(message["value"])
except json.JSONDecodeError:
logger.warning_rank0(f"Invalid tool call format: {str(message['value'])}")
continue
if not isinstance(tool_calls, list):
tool_calls = [tool_calls]
messages.append(
{
"role": "assistant",
"content": [{"type": "tool_call", "value": json.dumps(tool_call)} for tool_call in tool_calls],
"loss_weight": 1.0,
}
)
else:
messages.append(
{
"role": tag_mapping[tag],
"content": [{"type": "text", "value": message["value"]}],
"loss_weight": 1.0 if tag == "gpt" else 0.0,
}
)
sample["messages"] = messages
tools = raw_sample.get("tools")
if tools:
try:
tools: list[dict[str, Any]] = json.loads(tools)
sample["tools"] = json.dumps(tools)
except json.JSONDecodeError:
logger.warning_rank0(f"Invalid tools format: {str(tools)}")
return sample
@DataConverterPlugin("pair").register()
def pair_converter(raw_sample: PairSample) -> DPOSample:
"""Convert Pair sample to DPO sample.
See raw example at: https://huggingface.co/datasets/HuggingFaceH4/orca_dpo_pairs
Args:
raw_sample (PairSample): pair sample with chosen, rejected fields.
Returns:
DPOSample: DPO sample with chosen_messages and rejected_messages.
"""
def process_message(raw_messages: list[OpenaiMessage]):
messages = []
for message in raw_messages:
if message["role"] == "tool":
try:
tool_calls: ToolCall | list[ToolCall] = json.loads(message["content"])
except json.JSONDecodeError:
logger.warning_rank0(f"Invalid tool call format: {str(message['content'])}")
continue
if not isinstance(tool_calls, list):
tool_calls = [tool_calls]
messages.append(
{
"role": message["role"],
"content": [{"type": "tool_call", "value": json.dumps(tool_call)} for tool_call in tool_calls],
"loss_weight": 1.0 if message["role"] == "assistant" else 0.0,
}
)
else:
messages.append(
{
"role": message["role"],
"content": [{"type": "text", "value": message["content"]}],
"loss_weight": 1.0 if message["role"] == "assistant" else 0.0,
}
)
return messages
sample = {}
sample["chosen_messages"] = process_message(raw_sample.get("chosen", []))
sample["rejected_messages"] = process_message(raw_sample.get("rejected", []))
tools = raw_sample.get("tools")
if tools:
try:
tools: list[dict[str, Any]] = json.loads(tools)
sample["tools"] = json.dumps(tools)
except json.JSONDecodeError:
logger.warning_rank0(f"Invalid tools format: {str(tools)}")
return sample
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/plugins/data_plugins/converter.py",
"license": "Apache License 2.0",
"lines": 173,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/v1/trainers/sft_trainer.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..accelerator.interface import DistributedInterface
from ..config import InputArgument, get_args
from ..core.base_trainer import BaseTrainer
from ..core.data_engine import DataEngine
from ..core.model_engine import ModelEngine
from ..utils.types import BatchInput, Tensor
class SFTTrainer(BaseTrainer):
def compute_loss(self, batch: BatchInput) -> Tensor:
shift_loss_weights = batch["loss_weights"].to(self.device, non_blocking=True)[..., 1:]
log_probs = self.compute_log_probs(self.model, batch)
loss = (-log_probs * shift_loss_weights).sum() / (shift_loss_weights.sum() + 1e-6)
return loss
def run_sft(args: InputArgument = None):
model_args, data_args, training_args, _ = get_args(args)
DistributedInterface(training_args.dist_config)
train_dataset = DataEngine(data_args.train_dataset)
model_engine = ModelEngine(model_args, is_train=True)
trainer = SFTTrainer(
args=training_args,
model=model_engine.model,
renderer=model_engine.renderer,
train_dataset=train_dataset,
)
trainer.fit()
trainer.save_model()
DistributedInterface().destroy()
if __name__ == "__main__":
"""
python -m llamafactory.v1.trainers.sft_trainer --model Qwen/Qwen3-0.6B --train_dataset data/v1_sft_demo.yaml
"""
run_sft()
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/v1/trainers/sft_trainer.py",
"license": "Apache License 2.0",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/train/fp8_utils.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import types
from typing import TYPE_CHECKING, Any, Optional
from ..extras import logging
if TYPE_CHECKING:
from ..hparams import TrainingArguments
logger = logging.get_logger(__name__)
def create_fp8_kwargs(training_args: "TrainingArguments") -> list[Any]:
"""Create AORecipeKwargs for FP8 training with HuggingFace Accelerate.
Args:
training_args: Training arguments containing FP8 configuration
Returns:
List containing AORecipeKwargs if FP8 is enabled and supported, empty list otherwise
"""
if not training_args.fp8:
return []
backend = getattr(training_args, "fp8_backend", "auto")
logger.info_rank0(f"Creating FP8 configuration with backend: {backend}")
try:
# Use Transformer Engine backend (optimal for Hopper GPUs)
if backend == "te":
from accelerate.utils import FP8RecipeKwargs
logger.info_rank0("Using Transformer Engine FP8 backend")
return [FP8RecipeKwargs(backend="TE", fp8_format="HYBRID", amax_history_len=16, amax_compute_algo="max")]
# Use TorchAO backend (default)
from accelerate.utils import AORecipeKwargs
# Create Float8LinearConfig if torchao backend is used
config = None
if backend == "torchao" or backend == "auto":
from torchao.float8 import Float8LinearConfig
# Use rowwise scaling for better performance (as recommended by torchao)
# Configure alignment requirements for FP8 kernels
config = Float8LinearConfig.from_recipe_name("rowwise")
# Enable alignment for better kernel performance
if hasattr(config, "enable_amax_init"):
config.enable_amax_init = True
if hasattr(config, "enable_pre_and_post_forward"):
config.enable_pre_and_post_forward = True
# Create module filter function to skip problematic layers
# TorchAO FP8 requires dimensions divisible by 16 for optimal kernels
def module_filter_func(module, layer_name):
# Skip embedding and output layers for numerical stability
skip_layers = ["embed", "lm_head", "output", "classifier"]
if any(skip_name in layer_name.lower() for skip_name in skip_layers):
return False
# Only convert Linear layers
if not (hasattr(module, "weight") and len(module.weight.shape) == 2):
return False
# Check dimension alignment for FP8 kernels
weight = module.weight
in_features, out_features = weight.shape[1], weight.shape[0]
# Skip layers with dimensions not divisible by 16 to avoid kernel errors
if in_features % 16 != 0 or out_features % 16 != 0:
logger.debug(
f"Skipping layer {layer_name} with dimensions {out_features}x{in_features} (not divisible by 16)"
)
return False
return True
# Map FSDP all-gather setting if available (this affects the underlying implementation)
if (
hasattr(training_args, "fp8_enable_fsdp_float8_all_gather")
and training_args.fp8_enable_fsdp_float8_all_gather
):
logger.info_rank0("FSDP float8 all-gather optimization requested")
return [AORecipeKwargs(config=config, module_filter_func=module_filter_func)]
except Exception as e:
logger.info_rank0(f"Failed to create FP8 configuration: {e}")
return []
def get_fp8_mixed_precision(training_args: "TrainingArguments") -> Optional[str]:
"""Get the mixed precision setting for Accelerate when using FP8.
Args:
training_args: Training arguments containing FP8 configuration
Returns:
"fp8" if FP8 is enabled, None otherwise
"""
return "fp8" if training_args.fp8 else None
def configure_fp8_environment(training_args: "TrainingArguments") -> None:
"""Configure FP8 environment for HuggingFace Accelerate.
FP8 training is handled entirely through HuggingFace Accelerate, regardless of whether
DeepSpeed or FSDP is used for distributed training. This function sets up the environment
variables and validates the FP8 configuration.
Args:
training_args: Training arguments containing FP8 configuration
"""
if not training_args.fp8:
return
# Set mixed precision to fp8 for HuggingFace Accelerate
os.environ["ACCELERATE_MIXED_PRECISION"] = "fp8"
logger.info_rank0("Set ACCELERATE_MIXED_PRECISION=fp8")
# Configure FP8 backend and options
backend = getattr(training_args, "fp8_backend", "auto")
if backend != "auto":
os.environ["FP8_BACKEND"] = backend
logger.info_rank0(f"Set FP8_BACKEND={backend}")
# Create and validate FP8 recipe kwargs (for logging/debugging)
fp8_kwargs = create_fp8_kwargs(training_args)
logger.info_rank0(f"FP8 AORecipeKwargs created: {len(fp8_kwargs)} items")
# Enable FSDP float8 all-gather optimization if requested
if hasattr(training_args, "fp8_enable_fsdp_float8_all_gather") and training_args.fp8_enable_fsdp_float8_all_gather:
os.environ["FP8_ENABLE_FSDP_FLOAT8_ALL_GATHER"] = "true"
logger.info_rank0("Set FP8_ENABLE_FSDP_FLOAT8_ALL_GATHER=true")
logger.info_rank0("FP8 environment configured - all FP8 training handled by HuggingFace Accelerate")
def verify_fp8_status(accelerator, training_args: "TrainingArguments") -> None:
"""Verify that FP8 training is actually working after model preparation.
Args:
accelerator: The HuggingFace Accelerator instance
training_args: Training arguments containing FP8 configuration
"""
if not training_args.fp8:
return
# Check Accelerate's FP8 status
fp8_enabled = getattr(accelerator, "fp8_enabled", False)
fp8_backend_type = getattr(accelerator, "fp8_backend", "UNKNOWN")
backend = getattr(training_args, "fp8_backend", "auto")
if backend == "torchao" or backend == "auto":
logger.info_rank0(
"FP8 training enabled with TorchAO backend. For optimal performance, "
"ensure model layer dimensions are mostly divisible by 16. "
"If you encounter issues, try fp8_backend='te' with Transformer Engine."
)
else:
logger.info_rank0(f"FP8 training enabled with {backend} backend.")
logger.info_rank0(f"Accelerate FP8 status - enabled: {fp8_enabled}, backend: {fp8_backend_type}")
if not fp8_enabled:
logger.info_rank0("WARNING: FP8 was requested but Accelerate shows fp8_enabled=False. FP8 may not be working.")
def patch_accelerator_for_fp8() -> None:
"""Patch Accelerator to inject FP8 recipe kwargs.
This is needed because HuggingFace Trainer doesn't pass kwargs_handlers to Accelerator.
We monkey-patch Accelerator.__init__ to inject the FP8 recipe and force mixed_precision='fp8'.
"""
import transformer_engine.pytorch as te
from accelerate import Accelerator
# Guard against multiple patches
if getattr(Accelerator, "_te_fp8_patched", False):
return
# Stub for Accelerate 1.12+ compatibility (te.fp8.check_mxfp8_support doesn't exist yet)
if not hasattr(te, "fp8"):
te.fp8 = types.ModuleType("fp8")
te.fp8.check_mxfp8_support = lambda: (False, "MXFP8 not supported")
try:
from accelerate.utils import TERecipeKwargs as FP8Recipe
use_te_recipe = True
except ImportError:
from accelerate.utils import FP8RecipeKwargs as FP8Recipe
use_te_recipe = False
original_init = Accelerator.__init__
def patched_init(self, *args, **kwargs):
if "kwargs_handlers" not in kwargs or not kwargs["kwargs_handlers"]:
if use_te_recipe:
kwargs["kwargs_handlers"] = [
FP8Recipe(fp8_format="HYBRID", amax_history_len=16, amax_compute_algo="max")
]
else:
kwargs["kwargs_handlers"] = [
FP8Recipe(backend="TE", fp8_format="HYBRID", amax_history_len=16, amax_compute_algo="max")
]
# Only force mixed_precision when we inject handlers
kwargs["mixed_precision"] = "fp8"
return original_init(self, *args, **kwargs)
Accelerator.__init__ = patched_init
Accelerator._te_fp8_patched = True
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/train/fp8_utils.py",
"license": "Apache License 2.0",
"lines": 174,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
hiyouga/LlamaFactory:src/llamafactory/webui/components/footer.py | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...extras.misc import get_current_memory
from ...extras.packages import is_gradio_available
if is_gradio_available():
import gradio as gr
if TYPE_CHECKING:
from gradio.components import Component
def get_device_memory() -> "gr.Slider":
free, total = get_current_memory()
if total != -1:
used = round((total - free) / (1024**3), 2)
total = round(total / (1024**3), 2)
return gr.Slider(minimum=0, maximum=total, value=used, step=0.01, visible=True)
else:
return gr.Slider(visible=False)
def create_footer() -> dict[str, "Component"]:
with gr.Row():
device_memory = gr.Slider(visible=False, interactive=False)
timer = gr.Timer(value=5)
timer.tick(get_device_memory, outputs=[device_memory], queue=False)
return dict(device_memory=device_memory)
| {
"repo_id": "hiyouga/LlamaFactory",
"file_path": "src/llamafactory/webui/components/footer.py",
"license": "Apache License 2.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
home-assistant/core:homeassistant/components/matter/lock_helpers.py | """Lock-specific helpers for the Matter integration.
Provides DoorLock cluster endpoint resolution, feature detection, and
business logic for lock user/credential management.
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, TypedDict
from chip.clusters import Objects as clusters
from homeassistant.exceptions import HomeAssistantError, ServiceValidationError
from .const import (
CLEAR_ALL_INDEX,
CRED_TYPE_FACE,
CRED_TYPE_FINGER_VEIN,
CRED_TYPE_FINGERPRINT,
CRED_TYPE_PIN,
CRED_TYPE_RFID,
CREDENTIAL_RULE_MAP,
CREDENTIAL_RULE_REVERSE_MAP,
CREDENTIAL_TYPE_MAP,
CREDENTIAL_TYPE_REVERSE_MAP,
LOCK_TIMED_REQUEST_TIMEOUT_MS,
USER_STATUS_MAP,
USER_STATUS_REVERSE_MAP,
USER_TYPE_MAP,
USER_TYPE_REVERSE_MAP,
)
# Error translation keys (used in ServiceValidationError/HomeAssistantError)
ERR_CREDENTIAL_TYPE_NOT_SUPPORTED = "credential_type_not_supported"
ERR_INVALID_CREDENTIAL_DATA = "invalid_credential_data"
# SetCredential response status mapping (Matter DlStatus)
_DlStatus = clusters.DoorLock.Enums.DlStatus
SET_CREDENTIAL_STATUS_MAP: dict[int, str] = {
_DlStatus.kSuccess: "success",
_DlStatus.kFailure: "failure",
_DlStatus.kDuplicate: "duplicate",
_DlStatus.kOccupied: "occupied",
}
if TYPE_CHECKING:
from matter_server.client import MatterClient
from matter_server.client.models.node import MatterEndpoint, MatterNode
# DoorLock Feature bitmap from Matter SDK
DoorLockFeature = clusters.DoorLock.Bitmaps.Feature
# --- TypedDicts for service action responses ---
class LockUserCredentialData(TypedDict):
"""Credential data within a user response."""
type: str
index: int | None
class LockUserData(TypedDict):
"""User data returned from lock queries."""
user_index: int | None
user_name: str | None
user_unique_id: int | None
user_status: str
user_type: str
credential_rule: str
credentials: list[LockUserCredentialData]
next_user_index: int | None
class SetLockUserResult(TypedDict):
"""Result of set_lock_user service action."""
user_index: int
class GetLockUsersResult(TypedDict):
"""Result of get_lock_users service action."""
max_users: int
users: list[LockUserData]
class GetLockInfoResult(TypedDict):
"""Result of get_lock_info service action."""
supports_user_management: bool
supported_credential_types: list[str]
max_users: int | None
max_pin_users: int | None
max_rfid_users: int | None
max_credentials_per_user: int | None
min_pin_length: int | None
max_pin_length: int | None
min_rfid_length: int | None
max_rfid_length: int | None
class SetLockCredentialResult(TypedDict):
"""Result of set_lock_credential service action."""
credential_index: int
user_index: int | None
next_credential_index: int | None
class GetLockCredentialStatusResult(TypedDict):
"""Result of get_lock_credential_status service action."""
credential_exists: bool
user_index: int | None
next_credential_index: int | None
def _get_lock_endpoint_from_node(node: MatterNode) -> MatterEndpoint | None:
"""Get the DoorLock endpoint from a node.
Returns the first endpoint that has the DoorLock cluster, or None if not found.
"""
for endpoint in node.endpoints.values():
if endpoint.has_cluster(clusters.DoorLock):
return endpoint
return None
def _get_feature_map(endpoint: MatterEndpoint) -> int | None:
"""Read the DoorLock FeatureMap attribute from an endpoint."""
value: int | None = endpoint.get_attribute_value(
None, clusters.DoorLock.Attributes.FeatureMap
)
return value
def _lock_supports_usr_feature(endpoint: MatterEndpoint) -> bool:
"""Check if lock endpoint supports USR (User) feature.
The USR feature indicates the lock supports user and credential management
commands like SetUser, GetUser, SetCredential, etc.
"""
feature_map = _get_feature_map(endpoint)
if feature_map is None:
return False
return bool(feature_map & DoorLockFeature.kUser)
# --- Pure utility functions ---
def _get_attr(obj: Any, attr: str) -> Any:
"""Get attribute from object or dict.
Matter SDK responses can be either dataclass objects or dicts depending on
the SDK version and serialization context.
"""
if isinstance(obj, dict):
return obj.get(attr)
return getattr(obj, attr, None)
def _get_supported_credential_types(feature_map: int) -> list[str]:
"""Get list of supported credential types from feature map."""
types = []
if feature_map & DoorLockFeature.kPinCredential:
types.append(CRED_TYPE_PIN)
if feature_map & DoorLockFeature.kRfidCredential:
types.append(CRED_TYPE_RFID)
if feature_map & DoorLockFeature.kFingerCredentials:
types.append(CRED_TYPE_FINGERPRINT)
if feature_map & DoorLockFeature.kFaceCredentials:
types.append(CRED_TYPE_FACE)
return types
def _format_user_response(user_data: Any) -> LockUserData | None:
"""Format GetUser response to API response format.
Returns None if the user slot is empty (no userStatus).
"""
if user_data is None:
return None
user_status = _get_attr(user_data, "userStatus")
if user_status is None:
return None
creds = _get_attr(user_data, "credentials")
credentials: list[LockUserCredentialData] = [
LockUserCredentialData(
type=CREDENTIAL_TYPE_MAP.get(_get_attr(cred, "credentialType"), "unknown"),
index=_get_attr(cred, "credentialIndex"),
)
for cred in (creds or [])
]
return LockUserData(
user_index=_get_attr(user_data, "userIndex"),
user_name=_get_attr(user_data, "userName"),
user_unique_id=_get_attr(user_data, "userUniqueID"),
user_status=USER_STATUS_MAP.get(user_status, "unknown"),
user_type=USER_TYPE_MAP.get(_get_attr(user_data, "userType"), "unknown"),
credential_rule=CREDENTIAL_RULE_MAP.get(
_get_attr(user_data, "credentialRule"), "unknown"
),
credentials=credentials,
next_user_index=_get_attr(user_data, "nextUserIndex"),
)
# --- Credential management helpers ---
async def _clear_user_credentials(
matter_client: MatterClient,
node_id: int,
endpoint_id: int,
user_index: int,
) -> None:
"""Clear all credentials for a specific user.
Fetches the user to get credential list, then clears each credential.
"""
get_user_response = await matter_client.send_device_command(
node_id=node_id,
endpoint_id=endpoint_id,
command=clusters.DoorLock.Commands.GetUser(userIndex=user_index),
)
creds = _get_attr(get_user_response, "credentials")
if not creds:
return
for cred in creds:
cred_type = _get_attr(cred, "credentialType")
cred_index = _get_attr(cred, "credentialIndex")
await matter_client.send_device_command(
node_id=node_id,
endpoint_id=endpoint_id,
command=clusters.DoorLock.Commands.ClearCredential(
credential=clusters.DoorLock.Structs.CredentialStruct(
credentialType=cred_type,
credentialIndex=cred_index,
),
),
timed_request_timeout_ms=LOCK_TIMED_REQUEST_TIMEOUT_MS,
)
class LockEndpointNotFoundError(HomeAssistantError):
"""Lock endpoint not found on node."""
class UsrFeatureNotSupportedError(ServiceValidationError):
"""Lock does not support USR (user management) feature."""
class UserSlotEmptyError(ServiceValidationError):
"""User slot is empty."""
class NoAvailableUserSlotsError(ServiceValidationError):
"""No available user slots on the lock."""
class CredentialTypeNotSupportedError(ServiceValidationError):
"""Lock does not support the requested credential type."""
class CredentialDataInvalidError(ServiceValidationError):
"""Credential data fails validation."""
class SetCredentialFailedError(HomeAssistantError):
"""SetCredential command returned a non-success status."""
def _get_lock_endpoint_or_raise(node: MatterNode) -> MatterEndpoint:
"""Get the DoorLock endpoint from a node or raise an error."""
lock_endpoint = _get_lock_endpoint_from_node(node)
if lock_endpoint is None:
raise LockEndpointNotFoundError("No lock endpoint found on this device")
return lock_endpoint
def _ensure_usr_support(lock_endpoint: MatterEndpoint) -> None:
"""Ensure the lock endpoint supports USR (user management) feature.
Raises UsrFeatureNotSupportedError if the lock doesn't support user management.
"""
if not _lock_supports_usr_feature(lock_endpoint):
raise UsrFeatureNotSupportedError(
"Lock does not support user/credential management"
)
# --- High-level business logic functions ---
async def get_lock_info(
matter_client: MatterClient,
node: MatterNode,
) -> GetLockInfoResult:
"""Get lock capabilities and configuration info.
Returns a typed dict with lock capability information.
Raises HomeAssistantError if lock endpoint not found.
"""
lock_endpoint = _get_lock_endpoint_or_raise(node)
supports_usr = _lock_supports_usr_feature(lock_endpoint)
# Get feature map for credential type detection
feature_map = (
lock_endpoint.get_attribute_value(None, clusters.DoorLock.Attributes.FeatureMap)
or 0
)
result = GetLockInfoResult(
supports_user_management=supports_usr,
supported_credential_types=_get_supported_credential_types(feature_map),
max_users=None,
max_pin_users=None,
max_rfid_users=None,
max_credentials_per_user=None,
min_pin_length=None,
max_pin_length=None,
min_rfid_length=None,
max_rfid_length=None,
)
# Populate capacity info if USR feature is supported
if supports_usr:
result["max_users"] = lock_endpoint.get_attribute_value(
None, clusters.DoorLock.Attributes.NumberOfTotalUsersSupported
)
result["max_pin_users"] = lock_endpoint.get_attribute_value(
None, clusters.DoorLock.Attributes.NumberOfPINUsersSupported
)
result["max_rfid_users"] = lock_endpoint.get_attribute_value(
None, clusters.DoorLock.Attributes.NumberOfRFIDUsersSupported
)
result["max_credentials_per_user"] = lock_endpoint.get_attribute_value(
None, clusters.DoorLock.Attributes.NumberOfCredentialsSupportedPerUser
)
result["min_pin_length"] = lock_endpoint.get_attribute_value(
None, clusters.DoorLock.Attributes.MinPINCodeLength
)
result["max_pin_length"] = lock_endpoint.get_attribute_value(
None, clusters.DoorLock.Attributes.MaxPINCodeLength
)
result["min_rfid_length"] = lock_endpoint.get_attribute_value(
None, clusters.DoorLock.Attributes.MinRFIDCodeLength
)
result["max_rfid_length"] = lock_endpoint.get_attribute_value(
None, clusters.DoorLock.Attributes.MaxRFIDCodeLength
)
return result
async def set_lock_user(
matter_client: MatterClient,
node: MatterNode,
*,
user_index: int | None = None,
user_name: str | None = None,
user_unique_id: int | None = None,
user_status: str | None = None,
user_type: str | None = None,
credential_rule: str | None = None,
) -> SetLockUserResult:
"""Add or update a user on the lock.
When user_status, user_type, or credential_rule is None, defaults are used
for new users and existing values are preserved for modifications.
Returns typed dict with user_index on success.
Raises HomeAssistantError on failure.
"""
lock_endpoint = _get_lock_endpoint_or_raise(node)
_ensure_usr_support(lock_endpoint)
if user_index is None:
# Adding new user - find first available slot
max_users = (
lock_endpoint.get_attribute_value(
None, clusters.DoorLock.Attributes.NumberOfTotalUsersSupported
)
or 0
)
for idx in range(1, max_users + 1):
get_user_response = await matter_client.send_device_command(
node_id=node.node_id,
endpoint_id=lock_endpoint.endpoint_id,
command=clusters.DoorLock.Commands.GetUser(userIndex=idx),
)
if _get_attr(get_user_response, "userStatus") is None:
user_index = idx
break
if user_index is None:
raise NoAvailableUserSlotsError("No available user slots on the lock")
user_status_enum = (
USER_STATUS_REVERSE_MAP.get(
user_status,
clusters.DoorLock.Enums.UserStatusEnum.kOccupiedEnabled,
)
if user_status is not None
else clusters.DoorLock.Enums.UserStatusEnum.kOccupiedEnabled
)
await matter_client.send_device_command(
node_id=node.node_id,
endpoint_id=lock_endpoint.endpoint_id,
command=clusters.DoorLock.Commands.SetUser(
operationType=clusters.DoorLock.Enums.DataOperationTypeEnum.kAdd,
userIndex=user_index,
userName=user_name,
userUniqueID=user_unique_id,
userStatus=user_status_enum,
userType=USER_TYPE_REVERSE_MAP.get(
user_type,
clusters.DoorLock.Enums.UserTypeEnum.kUnrestrictedUser,
)
if user_type is not None
else clusters.DoorLock.Enums.UserTypeEnum.kUnrestrictedUser,
credentialRule=CREDENTIAL_RULE_REVERSE_MAP.get(
credential_rule,
clusters.DoorLock.Enums.CredentialRuleEnum.kSingle,
)
if credential_rule is not None
else clusters.DoorLock.Enums.CredentialRuleEnum.kSingle,
),
timed_request_timeout_ms=LOCK_TIMED_REQUEST_TIMEOUT_MS,
)
else:
# Updating existing user - preserve existing values when not specified
get_user_response = await matter_client.send_device_command(
node_id=node.node_id,
endpoint_id=lock_endpoint.endpoint_id,
command=clusters.DoorLock.Commands.GetUser(userIndex=user_index),
)
if _get_attr(get_user_response, "userStatus") is None:
raise UserSlotEmptyError(f"User slot {user_index} is empty")
resolved_user_name = (
user_name
if user_name is not None
else _get_attr(get_user_response, "userName")
)
resolved_unique_id = (
user_unique_id
if user_unique_id is not None
else _get_attr(get_user_response, "userUniqueID")
)
resolved_status = (
USER_STATUS_REVERSE_MAP[user_status]
if user_status is not None
else _get_attr(get_user_response, "userStatus")
)
resolved_type = (
USER_TYPE_REVERSE_MAP[user_type]
if user_type is not None
else _get_attr(get_user_response, "userType")
)
resolved_rule = (
CREDENTIAL_RULE_REVERSE_MAP[credential_rule]
if credential_rule is not None
else _get_attr(get_user_response, "credentialRule")
)
await matter_client.send_device_command(
node_id=node.node_id,
endpoint_id=lock_endpoint.endpoint_id,
command=clusters.DoorLock.Commands.SetUser(
operationType=clusters.DoorLock.Enums.DataOperationTypeEnum.kModify,
userIndex=user_index,
userName=resolved_user_name,
userUniqueID=resolved_unique_id,
userStatus=resolved_status,
userType=resolved_type,
credentialRule=resolved_rule,
),
timed_request_timeout_ms=LOCK_TIMED_REQUEST_TIMEOUT_MS,
)
return SetLockUserResult(user_index=user_index)
async def get_lock_users(
matter_client: MatterClient,
node: MatterNode,
) -> GetLockUsersResult:
"""Get all users from the lock.
Returns typed dict with users list and max_users capacity.
Raises HomeAssistantError on failure.
"""
lock_endpoint = _get_lock_endpoint_or_raise(node)
_ensure_usr_support(lock_endpoint)
max_users = (
lock_endpoint.get_attribute_value(
None, clusters.DoorLock.Attributes.NumberOfTotalUsersSupported
)
or 0
)
users: list[LockUserData] = []
current_index = 1
# Iterate through users using next_user_index for efficiency
while current_index is not None and current_index <= max_users:
get_user_response = await matter_client.send_device_command(
node_id=node.node_id,
endpoint_id=lock_endpoint.endpoint_id,
command=clusters.DoorLock.Commands.GetUser(
userIndex=current_index,
),
)
user_data = _format_user_response(get_user_response)
if user_data is not None:
users.append(user_data)
# Move to next user index
next_index = _get_attr(get_user_response, "nextUserIndex")
if next_index is None or next_index <= current_index:
break
current_index = next_index
return GetLockUsersResult(
max_users=max_users,
users=users,
)
async def clear_lock_user(
matter_client: MatterClient,
node: MatterNode,
user_index: int,
) -> None:
"""Clear a user from the lock, cleaning up credentials first.
Use index 0xFFFE (CLEAR_ALL_INDEX) to clear all users.
Raises HomeAssistantError on failure.
"""
lock_endpoint = _get_lock_endpoint_or_raise(node)
_ensure_usr_support(lock_endpoint)
if user_index == CLEAR_ALL_INDEX:
# Clear all: clear all credentials first, then all users
await matter_client.send_device_command(
node_id=node.node_id,
endpoint_id=lock_endpoint.endpoint_id,
command=clusters.DoorLock.Commands.ClearCredential(
credential=None,
),
timed_request_timeout_ms=LOCK_TIMED_REQUEST_TIMEOUT_MS,
)
else:
# Clear credentials for this specific user before deleting them
await _clear_user_credentials(
matter_client,
node.node_id,
lock_endpoint.endpoint_id,
user_index,
)
await matter_client.send_device_command(
node_id=node.node_id,
endpoint_id=lock_endpoint.endpoint_id,
command=clusters.DoorLock.Commands.ClearUser(
userIndex=user_index,
),
timed_request_timeout_ms=LOCK_TIMED_REQUEST_TIMEOUT_MS,
)
# --- Credential validation helpers ---
# Map credential type strings to the feature bit that must be set
_CREDENTIAL_TYPE_FEATURE_MAP: dict[str, int] = {
CRED_TYPE_PIN: DoorLockFeature.kPinCredential,
CRED_TYPE_RFID: DoorLockFeature.kRfidCredential,
CRED_TYPE_FINGERPRINT: DoorLockFeature.kFingerCredentials,
CRED_TYPE_FINGER_VEIN: DoorLockFeature.kFingerCredentials,
CRED_TYPE_FACE: DoorLockFeature.kFaceCredentials,
}
def _validate_credential_type_support(
lock_endpoint: MatterEndpoint, credential_type: str
) -> None:
"""Validate the lock supports the requested credential type.
Raises CredentialTypeNotSupportedError if not supported.
"""
required_bit = _CREDENTIAL_TYPE_FEATURE_MAP.get(credential_type)
if required_bit is None:
raise CredentialTypeNotSupportedError(
translation_domain="matter",
translation_key=ERR_CREDENTIAL_TYPE_NOT_SUPPORTED,
translation_placeholders={"credential_type": credential_type},
)
feature_map = _get_feature_map(lock_endpoint) or 0
if not (feature_map & required_bit):
raise CredentialTypeNotSupportedError(
translation_domain="matter",
translation_key=ERR_CREDENTIAL_TYPE_NOT_SUPPORTED,
translation_placeholders={"credential_type": credential_type},
)
def _validate_credential_data(
lock_endpoint: MatterEndpoint, credential_type: str, credential_data: str
) -> None:
"""Validate credential data against lock constraints.
For PIN: checks digits-only and length against Min/MaxPINCodeLength.
For RFID: checks valid hex and byte length against Min/MaxRFIDCodeLength.
Raises CredentialDataInvalidError on failure.
"""
if credential_type == CRED_TYPE_PIN:
if not credential_data.isdigit():
raise CredentialDataInvalidError(
translation_domain="matter",
translation_key=ERR_INVALID_CREDENTIAL_DATA,
translation_placeholders={"reason": "PIN must contain only digits"},
)
min_len = (
lock_endpoint.get_attribute_value(
None, clusters.DoorLock.Attributes.MinPINCodeLength
)
or 0
)
max_len = (
lock_endpoint.get_attribute_value(
None, clusters.DoorLock.Attributes.MaxPINCodeLength
)
or 255
)
if not min_len <= len(credential_data) <= max_len:
raise CredentialDataInvalidError(
translation_domain="matter",
translation_key=ERR_INVALID_CREDENTIAL_DATA,
translation_placeholders={
"reason": (f"PIN length must be between {min_len} and {max_len}")
},
)
elif credential_type == CRED_TYPE_RFID:
try:
rfid_bytes = bytes.fromhex(credential_data)
except ValueError as err:
raise CredentialDataInvalidError(
translation_domain="matter",
translation_key=ERR_INVALID_CREDENTIAL_DATA,
translation_placeholders={
"reason": "RFID data must be valid hexadecimal"
},
) from err
min_len = (
lock_endpoint.get_attribute_value(
None, clusters.DoorLock.Attributes.MinRFIDCodeLength
)
or 0
)
max_len = (
lock_endpoint.get_attribute_value(
None, clusters.DoorLock.Attributes.MaxRFIDCodeLength
)
or 255
)
if not min_len <= len(rfid_bytes) <= max_len:
raise CredentialDataInvalidError(
translation_domain="matter",
translation_key=ERR_INVALID_CREDENTIAL_DATA,
translation_placeholders={
"reason": (
f"RFID data length must be between"
f" {min_len} and {max_len} bytes"
)
},
)
def _credential_data_to_bytes(credential_type: str, credential_data: str) -> bytes:
"""Convert credential data string to bytes for the Matter command."""
if credential_type == CRED_TYPE_RFID:
return bytes.fromhex(credential_data)
# PIN and other types: encode as UTF-8
return credential_data.encode()
# --- Credential business logic functions ---
async def set_lock_credential(
matter_client: MatterClient,
node: MatterNode,
*,
credential_type: str,
credential_data: str,
credential_index: int | None = None,
user_index: int | None = None,
user_status: str | None = None,
user_type: str | None = None,
) -> SetLockCredentialResult:
"""Add or modify a credential on the lock.
Returns typed dict with credential_index, user_index, and next_credential_index.
Raises ServiceValidationError for validation failures.
Raises HomeAssistantError for device communication failures.
"""
lock_endpoint = _get_lock_endpoint_or_raise(node)
_ensure_usr_support(lock_endpoint)
_validate_credential_type_support(lock_endpoint, credential_type)
_validate_credential_data(lock_endpoint, credential_type, credential_data)
cred_type_int = CREDENTIAL_TYPE_REVERSE_MAP[credential_type]
cred_data_bytes = _credential_data_to_bytes(credential_type, credential_data)
# Determine operation type and credential index
operation_type = clusters.DoorLock.Enums.DataOperationTypeEnum.kAdd
if credential_index is None:
# Auto-find first available credential slot
max_creds = (
lock_endpoint.get_attribute_value(
None,
clusters.DoorLock.Attributes.NumberOfCredentialsSupportedPerUser,
)
or 5
)
for idx in range(1, max_creds + 1):
status_response = await matter_client.send_device_command(
node_id=node.node_id,
endpoint_id=lock_endpoint.endpoint_id,
command=clusters.DoorLock.Commands.GetCredentialStatus(
credential=clusters.DoorLock.Structs.CredentialStruct(
credentialType=cred_type_int,
credentialIndex=idx,
),
),
)
if not _get_attr(status_response, "credentialExists"):
credential_index = idx
break
if credential_index is None:
raise NoAvailableUserSlotsError("No available credential slots on the lock")
else:
# Check if slot is occupied to determine Add vs Modify
status_response = await matter_client.send_device_command(
node_id=node.node_id,
endpoint_id=lock_endpoint.endpoint_id,
command=clusters.DoorLock.Commands.GetCredentialStatus(
credential=clusters.DoorLock.Structs.CredentialStruct(
credentialType=cred_type_int,
credentialIndex=credential_index,
),
),
)
if _get_attr(status_response, "credentialExists"):
operation_type = clusters.DoorLock.Enums.DataOperationTypeEnum.kModify
# Resolve optional user_status and user_type enums
resolved_user_status = (
USER_STATUS_REVERSE_MAP.get(user_status) if user_status is not None else None
)
resolved_user_type = (
USER_TYPE_REVERSE_MAP.get(user_type) if user_type is not None else None
)
set_cred_response = await matter_client.send_device_command(
node_id=node.node_id,
endpoint_id=lock_endpoint.endpoint_id,
command=clusters.DoorLock.Commands.SetCredential(
operationType=operation_type,
credential=clusters.DoorLock.Structs.CredentialStruct(
credentialType=cred_type_int,
credentialIndex=credential_index,
),
credentialData=cred_data_bytes,
userIndex=user_index,
userStatus=resolved_user_status,
userType=resolved_user_type,
),
timed_request_timeout_ms=LOCK_TIMED_REQUEST_TIMEOUT_MS,
)
status_code = _get_attr(set_cred_response, "status")
status_str = SET_CREDENTIAL_STATUS_MAP.get(status_code, f"unknown({status_code})")
if status_str != "success":
raise SetCredentialFailedError(
translation_domain="matter",
translation_key="set_credential_failed",
translation_placeholders={"status": status_str},
)
return SetLockCredentialResult(
credential_index=credential_index,
user_index=_get_attr(set_cred_response, "userIndex"),
next_credential_index=_get_attr(set_cred_response, "nextCredentialIndex"),
)
async def clear_lock_credential(
matter_client: MatterClient,
node: MatterNode,
*,
credential_type: str,
credential_index: int,
) -> None:
"""Clear a credential from the lock.
Raises HomeAssistantError on failure.
"""
lock_endpoint = _get_lock_endpoint_or_raise(node)
_ensure_usr_support(lock_endpoint)
cred_type_int = CREDENTIAL_TYPE_REVERSE_MAP[credential_type]
await matter_client.send_device_command(
node_id=node.node_id,
endpoint_id=lock_endpoint.endpoint_id,
command=clusters.DoorLock.Commands.ClearCredential(
credential=clusters.DoorLock.Structs.CredentialStruct(
credentialType=cred_type_int,
credentialIndex=credential_index,
),
),
timed_request_timeout_ms=LOCK_TIMED_REQUEST_TIMEOUT_MS,
)
async def get_lock_credential_status(
matter_client: MatterClient,
node: MatterNode,
*,
credential_type: str,
credential_index: int,
) -> GetLockCredentialStatusResult:
"""Get the status of a credential slot on the lock.
Returns typed dict with credential_exists, user_index, next_credential_index.
Raises HomeAssistantError on failure.
"""
lock_endpoint = _get_lock_endpoint_or_raise(node)
_ensure_usr_support(lock_endpoint)
cred_type_int = CREDENTIAL_TYPE_REVERSE_MAP[credential_type]
response = await matter_client.send_device_command(
node_id=node.node_id,
endpoint_id=lock_endpoint.endpoint_id,
command=clusters.DoorLock.Commands.GetCredentialStatus(
credential=clusters.DoorLock.Structs.CredentialStruct(
credentialType=cred_type_int,
credentialIndex=credential_index,
),
),
)
return GetLockCredentialStatusResult(
credential_exists=bool(_get_attr(response, "credentialExists")),
user_index=_get_attr(response, "userIndex"),
next_credential_index=_get_attr(response, "nextCredentialIndex"),
)
| {
"repo_id": "home-assistant/core",
"file_path": "homeassistant/components/matter/lock_helpers.py",
"license": "Apache License 2.0",
"lines": 725,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
home-assistant/core:homeassistant/components/aws_s3/diagnostics.py | """Diagnostics support for AWS S3."""
from __future__ import annotations
import dataclasses
from typing import Any
from homeassistant.components.backup import (
DATA_MANAGER as BACKUP_DATA_MANAGER,
BackupManager,
)
from homeassistant.components.diagnostics import async_redact_data
from homeassistant.core import HomeAssistant
from .const import (
CONF_ACCESS_KEY_ID,
CONF_BUCKET,
CONF_PREFIX,
CONF_SECRET_ACCESS_KEY,
DOMAIN,
)
from .coordinator import S3ConfigEntry
from .helpers import async_list_backups_from_s3
TO_REDACT = (CONF_ACCESS_KEY_ID, CONF_SECRET_ACCESS_KEY)
async def async_get_config_entry_diagnostics(
hass: HomeAssistant,
entry: S3ConfigEntry,
) -> dict[str, Any]:
"""Return diagnostics for a config entry."""
coordinator = entry.runtime_data
backup_manager: BackupManager = hass.data[BACKUP_DATA_MANAGER]
backups = await async_list_backups_from_s3(
coordinator.client,
bucket=entry.data[CONF_BUCKET],
prefix=entry.data.get(CONF_PREFIX, ""),
)
data = {
"coordinator_data": dataclasses.asdict(coordinator.data),
"config": {
**entry.data,
**entry.options,
},
"backup_agents": [
{"name": agent.name}
for agent in backup_manager.backup_agents.values()
if agent.domain == DOMAIN
],
"backup": [backup.as_dict() for backup in backups],
}
return async_redact_data(data, TO_REDACT)
| {
"repo_id": "home-assistant/core",
"file_path": "homeassistant/components/aws_s3/diagnostics.py",
"license": "Apache License 2.0",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
home-assistant/core:tests/components/aws_s3/test_diagnostics.py | """Tests for AWS S3 diagnostics."""
from syrupy.assertion import SnapshotAssertion
from homeassistant.components.backup import DOMAIN as BACKUP_DOMAIN
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry
from tests.components.diagnostics import get_diagnostics_for_config_entry
from tests.typing import ClientSessionGenerator
async def test_entry_diagnostics(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
mock_config_entry: MockConfigEntry,
snapshot: SnapshotAssertion,
) -> None:
"""Test config entry diagnostics."""
mock_config_entry.add_to_hass(hass)
assert await async_setup_component(hass, BACKUP_DOMAIN, {BACKUP_DOMAIN: {}})
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
assert (
await get_diagnostics_for_config_entry(hass, hass_client, mock_config_entry)
== snapshot
)
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/aws_s3/test_diagnostics.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:homeassistant/components/orvibo/config_flow.py | """Config flow for the orvibo integration."""
import asyncio
import logging
from typing import Any
from orvibo.s20 import S20, S20Exception, discover
import voluptuous as vol
from homeassistant.config_entries import ConfigFlow, ConfigFlowResult
from homeassistant.const import CONF_HOST, CONF_MAC, CONF_NAME
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.device_registry import format_mac
from .const import CONF_SWITCH_LIST, DEFAULT_NAME, DOMAIN
_LOGGER = logging.getLogger(__name__)
FULL_EDIT_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_MAC): cv.string,
}
)
class S20ConfigFlow(ConfigFlow, domain=DOMAIN):
"""Handle the config flow for Orvibo S20 switches."""
VERSION = 1
MINOR_VERSION = 1
def __init__(self) -> None:
"""Initialize an instance of the S20 config flow."""
self.discovery_task: asyncio.Task | None = None
self._discovered_switches: dict[str, dict[str, Any]] = {}
self.chosen_switch: dict[str, Any] = {}
async def _async_discover(self) -> None:
def _filter_discovered_switches(
switches: dict[str, dict[str, Any]],
) -> dict[str, dict[str, Any]]:
# Get existing unique_ids from config entries
existing_ids = {entry.unique_id for entry in self._async_current_entries()}
_LOGGER.debug("Existing unique IDs: %s", existing_ids)
# Build a new filtered dict
filtered = {}
for ip, info in switches.items():
mac_bytes = info.get("mac")
if not mac_bytes:
continue # skip if no MAC
unique_id = format_mac(mac_bytes.hex()).lower()
if unique_id not in existing_ids:
filtered[ip] = info
_LOGGER.debug("New switches: %s", filtered)
return filtered
# Discover S20 devices.
_LOGGER.debug("Discovering S20 switches")
_unfiltered_switches = await self.hass.async_add_executor_job(discover)
_LOGGER.debug("All discovered switches: %s", _unfiltered_switches)
self._discovered_switches = _filter_discovered_switches(_unfiltered_switches)
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> ConfigFlowResult:
"""Handle a flow initialized by the user."""
return self.async_show_menu(
step_id="user", menu_options=["start_discovery", "edit"]
)
async def _validate_input(self, user_input: dict[str, Any]) -> str | None:
"""Validate user input and discover MAC if missing."""
if user_input.get(CONF_MAC):
user_input[CONF_MAC] = format_mac(user_input[CONF_MAC]).lower()
if len(user_input[CONF_MAC]) != 17 or user_input[CONF_MAC].count(":") != 5:
return "invalid_mac"
try:
device = await self.hass.async_add_executor_job(
S20,
user_input[CONF_HOST],
user_input.get(CONF_MAC),
)
if not user_input.get(CONF_MAC):
# Using private attribute access here since S20 class doesn't have a public method to get the MAC without repeating discovery
if not device._mac: # noqa: SLF001
return "cannot_discover"
user_input[CONF_MAC] = format_mac(device._mac.hex()).lower() # noqa: SLF001
except S20Exception:
return "cannot_connect"
return None
async def async_step_edit(
self, user_input: dict[str, Any] | None = None
) -> ConfigFlowResult:
"""Edit a discovered or manually configured server."""
errors = {}
if user_input:
error = await self._validate_input(user_input)
if not error:
await self.async_set_unique_id(user_input[CONF_MAC])
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=f"{DEFAULT_NAME} ({user_input[CONF_HOST]})", data=user_input
)
errors["base"] = error
return self.async_show_form(
step_id="edit",
data_schema=FULL_EDIT_SCHEMA,
errors=errors,
)
async def async_step_start_discovery(
self, user_input: dict[str, Any] | None = None
) -> ConfigFlowResult:
"""Handle a flow initialized by the user."""
if not self.discovery_task:
self.discovery_task = self.hass.async_create_task(self._async_discover())
return self.async_show_progress(
step_id="start_discovery",
progress_action="start_discovery",
progress_task=self.discovery_task,
)
if self.discovery_task.done():
try:
self.discovery_task.result()
except (S20Exception, OSError) as err:
_LOGGER.debug("Discovery task failed: %s", err)
self.discovery_task = None
return self.async_show_progress_done(
next_step_id=(
"choose_switch" if self._discovered_switches else "discovery_failed"
)
)
return self.async_show_progress(
step_id="start_discovery",
progress_action="start_discovery",
progress_task=self.discovery_task,
)
async def async_step_choose_switch(
self, user_input: dict[str, Any] | None = None
) -> ConfigFlowResult:
"""Choose manual or discover flow."""
_chosen_host: str
if user_input:
_chosen_host = user_input[CONF_SWITCH_LIST]
for host, data in self._discovered_switches.items():
if _chosen_host == host:
self.chosen_switch[CONF_HOST] = host
self.chosen_switch[CONF_MAC] = format_mac(
data[CONF_MAC].hex()
).lower()
await self.async_set_unique_id(self.chosen_switch[CONF_MAC])
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=f"{DEFAULT_NAME} ({host})", data=self.chosen_switch
)
_LOGGER.debug("discovered switches: %s", self._discovered_switches)
_options = {
host: f"{host} ({format_mac(data[CONF_MAC].hex()).lower()})"
for host, data in self._discovered_switches.items()
}
return self.async_show_form(
step_id="choose_switch",
data_schema=vol.Schema({vol.Required(CONF_SWITCH_LIST): vol.In(_options)}),
)
async def async_step_discovery_failed(
self, user_input: dict[str, Any] | None = None
) -> ConfigFlowResult:
"""Handle a failed discovery."""
return self.async_show_menu(
step_id="discovery_failed", menu_options=["start_discovery", "edit"]
)
async def async_step_import(self, user_input: dict[str, Any]) -> ConfigFlowResult:
"""Handle import from configuration.yaml."""
_LOGGER.debug("Importing config: %s", user_input)
error = await self._validate_input(user_input)
if error:
return self.async_abort(reason=error)
await self.async_set_unique_id(user_input[CONF_MAC])
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=user_input.get(CONF_NAME, user_input[CONF_HOST]), data=user_input
)
| {
"repo_id": "home-assistant/core",
"file_path": "homeassistant/components/orvibo/config_flow.py",
"license": "Apache License 2.0",
"lines": 168,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
home-assistant/core:tests/components/orvibo/test_config_flow.py | """Tests for the Orvibo config flow in Home Assistant core."""
import asyncio
from typing import Any
from unittest.mock import patch
from orvibo.s20 import S20Exception
import pytest
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.orvibo.const import CONF_SWITCH_LIST, DEFAULT_NAME, DOMAIN
from homeassistant.const import CONF_HOST, CONF_MAC
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import FlowResultType
from tests.common import MockConfigEntry
async def test_user_menu_display(hass: HomeAssistant) -> None:
"""Initial step displays the user menu correctly."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == FlowResultType.MENU
assert result["step_id"] == "user"
assert set(result["menu_options"]) == {"start_discovery", "edit"}
@pytest.mark.parametrize(
("user_input", "expected_mac", "mock_mac_bytes"),
[
(
{CONF_HOST: "192.168.1.2", CONF_MAC: "ac:cf:23:12:34:56"},
"ac:cf:23:12:34:56",
None,
),
({CONF_HOST: "192.168.1.2"}, "aa:bb:cc:dd:ee:ff", b"\xaa\xbb\xcc\xdd\xee\xff"),
],
)
async def test_edit_flow_success(
hass: HomeAssistant,
mock_discover,
mock_setup_entry,
mock_s20,
user_input: dict[str, Any],
expected_mac: str,
mock_mac_bytes: bytes | None,
) -> None:
"""Test manual flow succeeds with provided MAC or discovered MAC."""
mock_s20.return_value._mac = mock_mac_bytes
mock_discover.return_value = {"192.168.1.2": {"mac": b"\xaa\xbb\xcc\xdd\xee\xff"}}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"next_step_id": "edit"}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input
)
assert result["type"] == FlowResultType.CREATE_ENTRY
assert result["title"] == f"{DEFAULT_NAME} (192.168.1.2)"
assert result["data"][CONF_HOST] == "192.168.1.2"
assert result["data"][CONF_MAC] == expected_mac
assert result["result"].unique_id == expected_mac
@pytest.mark.parametrize(
("user_input", "expected_error", "mock_exception", "mock_mac_bytes"),
[
(
{CONF_HOST: "192.168.1.2", CONF_MAC: "not_a_mac"},
"invalid_mac",
None,
b"dummy",
),
({CONF_HOST: "192.168.1.99"}, "cannot_discover", None, None),
(
{CONF_HOST: "192.168.1.3", CONF_MAC: "ac:cf:23:12:34:56"},
"cannot_connect",
S20Exception("Connection failed"),
b"dummy",
),
],
)
async def test_edit_flow_errors(
hass: HomeAssistant,
mock_s20,
mock_discover,
mock_setup_entry,
user_input: dict[str, Any],
expected_error: str,
mock_exception: Exception | None,
mock_mac_bytes: bytes | None,
) -> None:
"""Test various errors in the manual (edit) step and recover."""
mock_discover.return_value = {}
mock_s20.side_effect = mock_exception
mock_s20.return_value._mac = mock_mac_bytes
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"next_step_id": "edit"}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input
)
assert result["type"] == FlowResultType.FORM
assert result["errors"]["base"] == expected_error
mock_s20.side_effect = None
mock_s20.return_value._mac = b"\xac\xcf\x23\x12\x34\x56"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: "192.168.1.2", CONF_MAC: "ac:cf:23:12:34:56"},
)
assert result["type"] == FlowResultType.CREATE_ENTRY
assert result["title"] == f"{DEFAULT_NAME} (192.168.1.2)"
assert result["data"][CONF_HOST] == "192.168.1.2"
assert result["data"][CONF_MAC] == "ac:cf:23:12:34:56"
async def test_discovery_success(
hass: HomeAssistant, mock_discover, mock_setup_entry
) -> None:
"""Verify discovery finds devices and completes config entry creation."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == FlowResultType.MENU
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"next_step_id": "start_discovery"}
)
assert result["type"] == FlowResultType.SHOW_PROGRESS
assert result["step_id"] == "start_discovery"
assert result["progress_action"] == "start_discovery"
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == FlowResultType.FORM
assert result["step_id"] == "choose_switch"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_SWITCH_LIST: "192.168.1.100"}
)
assert result["type"] == FlowResultType.CREATE_ENTRY
assert result["title"] == f"{DEFAULT_NAME} (192.168.1.100)"
assert result["data"][CONF_HOST] == "192.168.1.100"
assert result["data"][CONF_MAC] == "ac:cf:23:12:34:56"
assert result["result"].unique_id == "ac:cf:23:12:34:56"
async def test_discovery_no_devices(
hass: HomeAssistant, mock_discover, mock_s20, mock_setup_entry
) -> None:
"""Discovery with no found devices should go to discovery_failed and recover via edit."""
mock_discover.return_value = {}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"next_step_id": "start_discovery"}
)
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == FlowResultType.MENU
assert result["step_id"] == "discovery_failed"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"next_step_id": "edit"}
)
assert result["type"] == FlowResultType.FORM
assert result["step_id"] == "edit"
mock_s20.return_value._mac = b"\xaa\xbb\xcc\xdd\xee\xff"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: "192.168.1.10", CONF_MAC: "aa:bb:cc:dd:ee:ff"},
)
assert result["type"] == FlowResultType.CREATE_ENTRY
assert result["title"] == f"{DEFAULT_NAME} (192.168.1.10)"
assert result["data"][CONF_HOST] == "192.168.1.10"
assert result["data"][CONF_MAC] == "aa:bb:cc:dd:ee:ff"
@pytest.mark.parametrize(
("import_data", "expected_mac", "mock_mac_bytes"),
[
(
{CONF_HOST: "192.168.1.5", CONF_MAC: "ac:cf:23:12:34:56"},
"ac:cf:23:12:34:56",
None,
),
({CONF_HOST: "192.168.1.5"}, "11:22:33:44:55:66", b"\x11\x22\x33\x44\x55\x66"),
],
)
async def test_import_flow_success(
hass: HomeAssistant,
mock_discover,
mock_setup_entry,
mock_s20,
import_data: dict[str, Any],
expected_mac: str,
mock_mac_bytes: bytes | None,
) -> None:
"""Test importing configuration.yaml entry succeeds with provided or discovered MAC."""
mock_s20.return_value._mac = mock_mac_bytes
mock_discover.return_value = {"192.168.1.5": {"mac": b"\x11\x22\x33\x44\x55\x66"}}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=import_data
)
assert result["type"] == FlowResultType.CREATE_ENTRY
assert result["title"] == "192.168.1.5"
assert result["data"][CONF_MAC] == expected_mac
@pytest.mark.parametrize(
("import_data", "expected_reason", "mock_exception", "mock_mac_bytes"),
[
({CONF_HOST: "192.168.1.5"}, "cannot_discover", None, None),
(
{CONF_HOST: "192.168.1.5", CONF_MAC: "ac:cf:23:12:34:56"},
"cannot_connect",
S20Exception("Connection failed"),
b"dummy",
),
],
)
async def test_import_flow_errors(
hass: HomeAssistant,
mock_s20,
mock_discover,
import_data: dict[str, Any],
expected_reason: str,
mock_exception: Exception | None,
mock_mac_bytes: bytes | None,
) -> None:
"""Test various abort errors in the import flow."""
mock_discover.return_value = {}
mock_s20.side_effect = mock_exception
mock_s20.return_value._mac = mock_mac_bytes
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=import_data
)
assert result["type"] == FlowResultType.ABORT
assert result["reason"] == expected_reason
async def test_discover_skips_existing_and_invalid_mac(
hass: HomeAssistant, mock_config_entry: MockConfigEntry, mock_discover
) -> None:
"""Test discovery ignores devices already configured and devices without MACs."""
mock_config_entry.add_to_hass(hass)
mock_discover.return_value = {
"192.168.1.10": {"mac": b"\xaa\xbb\xcc\xdd\xee\xff"},
"192.168.1.11": {},
"192.168.1.12": {"mac": b"\x11\x22\x33\x44\x55\x66"},
}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"next_step_id": "start_discovery"}
)
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == FlowResultType.FORM
assert result["step_id"] == "choose_switch"
schema = result["data_schema"].schema
dropdown_options = schema[vol.Required(CONF_SWITCH_LIST)].container
assert "192.168.1.12" in dropdown_options
assert "192.168.1.10" not in dropdown_options
assert "192.168.1.11" not in dropdown_options
async def test_start_discovery_shows_progress(hass: HomeAssistant) -> None:
"""Test polling the flow while discovery is still in progress."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
async def delayed_executor_job(*args, **kwargs) -> dict[str, Any]:
await asyncio.sleep(0.1)
return {}
with patch.object(hass, "async_add_executor_job", side_effect=delayed_executor_job):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"next_step_id": "start_discovery"}
)
assert result["type"] == FlowResultType.SHOW_PROGRESS
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == FlowResultType.SHOW_PROGRESS
assert result["progress_action"] == "start_discovery"
await hass.async_block_till_done()
async def test_discovery_flow_task_exception(
hass: HomeAssistant, mock_discover
) -> None:
"""Test the discovery process when the background task raises an error."""
mock_discover.side_effect = S20Exception("Network timeout")
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"next_step_id": "start_discovery"}
)
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == FlowResultType.MENU
assert result["step_id"] == "discovery_failed"
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/orvibo/test_config_flow.py",
"license": "Apache License 2.0",
"lines": 282,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:homeassistant/components/teslemetry/calendar.py | """Calendar platform for Teslemetry integration."""
from __future__ import annotations
from datetime import datetime, timedelta
from typing import Any
from homeassistant.components.calendar import CalendarEntity, CalendarEvent
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback
from homeassistant.util import dt as dt_util
from . import TeslemetryConfigEntry
from .entity import TeslemetryEnergyInfoEntity
PARALLEL_UPDATES = 0
async def async_setup_entry(
hass: HomeAssistant,
entry: TeslemetryConfigEntry,
async_add_entities: AddConfigEntryEntitiesCallback,
) -> None:
"""Set up the Teslemetry Calendar platform from a config entry."""
entities_to_add: list[CalendarEntity] = []
entities_to_add.extend(
TeslemetryTariffSchedule(energy, "tariff_content_v2")
for energy in entry.runtime_data.energysites
if energy.info_coordinator.data.get("tariff_content_v2_seasons")
)
entities_to_add.extend(
TeslemetryTariffSchedule(energy, "tariff_content_v2_sell_tariff")
for energy in entry.runtime_data.energysites
if energy.info_coordinator.data.get("tariff_content_v2_sell_tariff_seasons")
)
async_add_entities(entities_to_add)
def _is_day_in_range(day_of_week: int, from_day: int, to_day: int) -> bool:
"""Check if a day of week falls within a range, handling week crossing."""
if from_day <= to_day:
return from_day <= day_of_week <= to_day
# Week crossing (e.g., Fri=4 to Mon=0)
return day_of_week >= from_day or day_of_week <= to_day
def _parse_period_times(
period_def: dict[str, Any],
base_day: datetime,
) -> tuple[datetime, datetime] | None:
"""Parse a TOU period definition into start and end times.
Returns None if the base_day's weekday doesn't match the period's day range.
For periods crossing midnight, end_time will be on the following day.
"""
# DaysOfWeek are from 0-6 (Monday-Sunday)
from_day = period_def.get("fromDayOfWeek", 0)
to_day = period_def.get("toDayOfWeek", 6)
if not _is_day_in_range(base_day.weekday(), from_day, to_day):
return None
# Hours are from 0-23, so 24 hours is 0-0
from_hour = period_def.get("fromHour", 0)
to_hour = period_def.get("toHour", 0)
# Minutes are from 0-59, so 60 minutes is 0-0
from_minute = period_def.get("fromMinute", 0)
to_minute = period_def.get("toMinute", 0)
start_time = base_day.replace(
hour=from_hour, minute=from_minute, second=0, microsecond=0
)
end_time = base_day.replace(hour=to_hour, minute=to_minute, second=0, microsecond=0)
if end_time <= start_time:
end_time += timedelta(days=1)
return start_time, end_time
def _build_event(
key_base: str,
season_name: str,
period_name: str,
price: float | None,
start_time: datetime,
end_time: datetime,
) -> CalendarEvent:
"""Build a CalendarEvent for a tariff period."""
price_str = f"{price:.2f}/kWh" if price is not None else "Unknown Price"
return CalendarEvent(
start=start_time,
end=end_time,
summary=f"{period_name.capitalize().replace('_', ' ')}: {price_str}",
description=(
f"Season: {season_name.capitalize()}\n"
f"Period: {period_name.capitalize().replace('_', ' ')}\n"
f"Price: {price_str}"
),
uid=f"{key_base}_{season_name}_{period_name}_{start_time.isoformat()}",
)
class TeslemetryTariffSchedule(TeslemetryEnergyInfoEntity, CalendarEntity):
"""Energy Site Tariff Schedule Calendar."""
def __init__(
self,
data: Any,
key_base: str,
) -> None:
"""Initialize the tariff schedule calendar."""
self.key_base: str = key_base
self.seasons: dict[str, dict[str, Any]] = {}
self.charges: dict[str, dict[str, Any]] = {}
super().__init__(data, key_base)
@property
def event(self) -> CalendarEvent | None:
"""Return the current active tariff event."""
now = dt_util.now()
current_season_name = self._get_current_season(now)
if not current_season_name or not self.seasons.get(current_season_name):
return None
# Time of use (TOU) periods define the tariff schedule within a season
tou_periods = self.seasons[current_season_name].get("tou_periods", {})
for period_name, period_group in tou_periods.items():
for period_def in period_group.get("periods", []):
result = _parse_period_times(period_def, now)
if result is None:
continue
start_time, end_time = result
# Check if now falls within this period
if not (start_time <= now < end_time):
# For cross-midnight periods, check yesterday's instance
start_time -= timedelta(days=1)
end_time -= timedelta(days=1)
if not (start_time <= now < end_time):
continue
price = self._get_price_for_period(current_season_name, period_name)
return _build_event(
self.key_base,
current_season_name,
period_name,
price,
start_time,
end_time,
)
return None
async def async_get_events(
self,
hass: HomeAssistant,
start_date: datetime,
end_date: datetime,
) -> list[CalendarEvent]:
"""Return calendar events (tariff periods) within a datetime range."""
events: list[CalendarEvent] = []
start_date = dt_util.as_local(start_date)
end_date = dt_util.as_local(end_date)
# Start one day earlier to catch TOU periods that cross midnight
# from the previous evening into the query range.
current_day = dt_util.start_of_local_day(start_date) - timedelta(days=1)
while current_day < end_date:
season_name = self._get_current_season(current_day)
if not season_name or not self.seasons.get(season_name):
current_day += timedelta(days=1)
continue
tou_periods = self.seasons[season_name].get("tou_periods", {})
for period_name, period_group in tou_periods.items():
for period_def in period_group.get("periods", []):
result = _parse_period_times(period_def, current_day)
if result is None:
continue
start_time, end_time = result
if start_time < end_date and end_time > start_date:
price = self._get_price_for_period(season_name, period_name)
events.append(
_build_event(
self.key_base,
season_name,
period_name,
price,
start_time,
end_time,
)
)
current_day += timedelta(days=1)
events.sort(key=lambda x: x.start)
return events
def _get_current_season(self, date_to_check: datetime) -> str | None:
"""Determine the active season for a given date."""
local_date = dt_util.as_local(date_to_check)
year = local_date.year
for season_name, season_data in self.seasons.items():
if not season_data:
continue
try:
from_month = season_data["fromMonth"]
from_day = season_data["fromDay"]
to_month = season_data["toMonth"]
to_day = season_data["toDay"]
# Handle seasons that cross year boundaries
start_year = year
end_year = year
# Season crosses year boundary (e.g., Oct-Mar)
if from_month > to_month or (
from_month == to_month and from_day > to_day
):
if local_date.month > from_month or (
local_date.month == from_month and local_date.day >= from_day
):
end_year = year + 1
else:
start_year = year - 1
season_start = local_date.replace(
year=start_year,
month=from_month,
day=from_day,
hour=0,
minute=0,
second=0,
microsecond=0,
)
season_end = local_date.replace(
year=end_year,
month=to_month,
day=to_day,
hour=0,
minute=0,
second=0,
microsecond=0,
) + timedelta(days=1)
if season_start <= local_date < season_end:
return season_name
except KeyError, ValueError:
continue
return None
def _get_price_for_period(self, season_name: str, period_name: str) -> float | None:
"""Get the price for a specific season and period name."""
try:
season_charges = self.charges.get(season_name, self.charges.get("ALL", {}))
rates = season_charges.get("rates", {})
price = rates.get(period_name, rates.get("ALL"))
return float(price) if price is not None else None
except KeyError, ValueError, TypeError:
return None
def _async_update_attrs(self) -> None:
"""Update the Calendar attributes from coordinator data."""
self.seasons = self.coordinator.data.get(f"{self.key_base}_seasons", {})
self.charges = self.coordinator.data.get(f"{self.key_base}_energy_charges", {})
self._attr_available = bool(self.seasons and self.charges)
| {
"repo_id": "home-assistant/core",
"file_path": "homeassistant/components/teslemetry/calendar.py",
"license": "Apache License 2.0",
"lines": 228,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
home-assistant/core:tests/components/teslemetry/test_calendar.py | """Test the Teslemetry calendar platform."""
from collections.abc import Generator
from copy import deepcopy
from datetime import datetime
from unittest.mock import AsyncMock, patch
from freezegun.api import FrozenDateTimeFactory
import pytest
from syrupy.assertion import SnapshotAssertion
from homeassistant.components.calendar import (
DOMAIN as CALENDAR_DOMAIN,
EVENT_END_DATETIME,
EVENT_START_DATETIME,
SERVICE_GET_EVENTS,
)
from homeassistant.const import ATTR_ENTITY_ID, Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from homeassistant.util import dt as dt_util
from . import assert_entities, setup_platform
from .const import SITE_INFO, SITE_INFO_MULTI_SEASON, SITE_INFO_WEEK_CROSSING
ENTITY_BUY = "calendar.energy_site_buy_tariff"
ENTITY_SELL = "calendar.energy_site_sell_tariff"
@pytest.fixture
def mock_site_info_week_crossing(mock_site_info) -> Generator[AsyncMock]:
"""Mock Teslemetry Energy site_info with week-crossing tariff data."""
with patch(
"tesla_fleet_api.tesla.energysite.EnergySite.site_info",
side_effect=lambda: deepcopy(SITE_INFO_WEEK_CROSSING),
) as mock:
yield mock
@pytest.fixture
def mock_site_info_multi_season(mock_site_info) -> Generator[AsyncMock]:
"""Mock Teslemetry Energy site_info with multi-season tariff data."""
with patch(
"tesla_fleet_api.tesla.energysite.EnergySite.site_info",
side_effect=lambda: deepcopy(SITE_INFO_MULTI_SEASON),
) as mock:
yield mock
@pytest.fixture
def mock_site_info_no_tariff(mock_site_info) -> Generator[AsyncMock]:
"""Mock Teslemetry Energy site_info with no tariff data."""
site_info_no_tariff = deepcopy(SITE_INFO_WEEK_CROSSING)
site_info_no_tariff["response"]["tariff_content_v2"]["seasons"] = {}
site_info_no_tariff["response"]["tariff_content_v2"]["sell_tariff"]["seasons"] = {}
with patch(
"tesla_fleet_api.tesla.energysite.EnergySite.site_info",
side_effect=lambda: deepcopy(site_info_no_tariff),
) as mock:
yield mock
@pytest.fixture
def mock_site_info_invalid_season(mock_site_info) -> Generator[AsyncMock]:
"""Mock site_info with invalid/empty season data."""
site_info = deepcopy(SITE_INFO)
# Empty season first (hits _get_current_season empty check),
# then season with missing keys (hits KeyError exception handler)
site_info["response"]["tariff_content_v2"]["seasons"] = {
"Empty": {},
"Invalid": {"someKey": "value"},
}
site_info["response"]["tariff_content_v2"]["sell_tariff"]["seasons"] = {}
with patch(
"tesla_fleet_api.tesla.energysite.EnergySite.site_info",
side_effect=lambda: deepcopy(site_info),
) as mock:
yield mock
@pytest.fixture
def mock_site_info_invalid_price(mock_site_info) -> Generator[AsyncMock]:
"""Mock site_info with non-numeric price data."""
site_info = deepcopy(SITE_INFO)
site_info["response"]["tariff_content_v2"]["energy_charges"]["Summer"]["rates"] = {
"OFF_PEAK": "not_a_number",
"ON_PEAK": "not_a_number",
}
site_info["response"]["tariff_content_v2"]["sell_tariff"]["seasons"] = {}
with patch(
"tesla_fleet_api.tesla.energysite.EnergySite.site_info",
side_effect=lambda: deepcopy(site_info),
) as mock:
yield mock
@pytest.mark.usefixtures("entity_registry_enabled_by_default")
async def test_calendar(
hass: HomeAssistant,
snapshot: SnapshotAssertion,
entity_registry: er.EntityRegistry,
freezer: FrozenDateTimeFactory,
mock_legacy: AsyncMock,
) -> None:
"""Tests that the calendar entity is correct."""
tz = dt_util.get_default_time_zone()
freezer.move_to(datetime(2024, 1, 1, 10, 0, 0, tzinfo=tz))
entry = await setup_platform(hass, [Platform.CALENDAR])
assert_entities(hass, entry.entry_id, entity_registry, snapshot)
@pytest.mark.parametrize(
"entity_id",
[ENTITY_BUY, ENTITY_SELL],
)
@pytest.mark.parametrize(
"time_tuple",
[
(2024, 1, 1, 10, 0, 0), # OFF_PEAK period started yesterday
(2024, 1, 1, 20, 0, 0), # ON_PEAK period starts and ends today
(2024, 1, 1, 22, 0, 0), # OFF_PEAK period ends tomorrow
],
)
@pytest.mark.usefixtures("entity_registry_enabled_by_default")
async def test_calendar_events(
hass: HomeAssistant,
snapshot: SnapshotAssertion,
entity_registry: er.EntityRegistry,
freezer: FrozenDateTimeFactory,
mock_legacy: AsyncMock,
entity_id: str,
time_tuple: tuple,
) -> None:
"""Tests that the energy tariff calendar entity events are correct."""
tz = dt_util.get_default_time_zone()
freezer.move_to(datetime(*time_tuple, tzinfo=tz))
await setup_platform(hass, [Platform.CALENDAR])
state = hass.states.get(entity_id)
assert state
assert state.attributes == snapshot(name="event")
result = await hass.services.async_call(
CALENDAR_DOMAIN,
SERVICE_GET_EVENTS,
{
ATTR_ENTITY_ID: [entity_id],
EVENT_START_DATETIME: dt_util.parse_datetime("2024-01-01T00:00:00Z"),
EVENT_END_DATETIME: dt_util.parse_datetime("2024-01-07T00:00:00Z"),
},
blocking=True,
return_response=True,
)
assert result == snapshot(name="events")
@pytest.mark.parametrize(
("time_tuple", "expected_state", "expected_period"),
[
# Friday (day 4) - WEEKEND period active (Fri-Mon crossing)
((2024, 1, 5, 12, 0, 0), "on", "Weekend"),
# Saturday (day 5) - WEEKEND period active
((2024, 1, 6, 12, 0, 0), "on", "Weekend"),
# Sunday (day 6) - WEEKEND period active
((2024, 1, 7, 12, 0, 0), "on", "Weekend"),
# Monday (day 0) - WEEKEND period active (end of Fri-Mon range)
((2024, 1, 8, 12, 0, 0), "on", "Weekend"),
],
)
@pytest.mark.usefixtures("entity_registry_enabled_by_default")
async def test_calendar_week_crossing(
hass: HomeAssistant,
freezer: FrozenDateTimeFactory,
mock_legacy: AsyncMock,
mock_site_info_week_crossing: AsyncMock,
time_tuple: tuple,
expected_state: str,
expected_period: str,
) -> None:
"""Test calendar handles week-crossing day ranges correctly."""
tz = dt_util.get_default_time_zone()
time = datetime(*time_tuple, tzinfo=tz)
freezer.move_to(time)
await setup_platform(hass, [Platform.CALENDAR])
state = hass.states.get(ENTITY_BUY)
assert state
assert state.state == expected_state
assert expected_period in state.attributes["message"]
@pytest.mark.usefixtures("entity_registry_enabled_by_default")
async def test_calendar_week_crossing_excluded_day(
hass: HomeAssistant,
freezer: FrozenDateTimeFactory,
mock_legacy: AsyncMock,
mock_site_info_week_crossing: AsyncMock,
) -> None:
"""Test calendar excludes days outside week-crossing range."""
tz = dt_util.get_default_time_zone()
# Wednesday (day 2) - No period active (outside Fri-Mon range)
freezer.move_to(datetime(2024, 1, 3, 12, 0, 0, tzinfo=tz))
await setup_platform(hass, [Platform.CALENDAR])
state = hass.states.get(ENTITY_BUY)
assert state
assert state.state == "off"
@pytest.mark.parametrize(
("time_tuple", "expected_season", "expected_buy_price"),
[
# June 15 at noon - Summer OFF_PEAK (Apr-Sep)
((2024, 6, 15, 12, 0, 0), "Summer", "0.20"),
# July 1 at 18:00 - Summer PEAK
((2024, 7, 1, 18, 0, 0), "Summer", "0.35"),
# December 15 at noon - Winter OFF_PEAK (Oct-Mar, crosses year boundary)
((2024, 12, 15, 12, 0, 0), "Winter", "0.12"),
# January 15 at noon - Winter OFF_PEAK (crosses year boundary)
((2024, 1, 15, 12, 0, 0), "Winter", "0.12"),
# February 28 at 18:00 - Winter PEAK
((2024, 2, 28, 18, 0, 0), "Winter", "0.25"),
],
)
@pytest.mark.usefixtures("entity_registry_enabled_by_default")
async def test_calendar_multi_season(
hass: HomeAssistant,
freezer: FrozenDateTimeFactory,
mock_legacy: AsyncMock,
mock_site_info_multi_season: AsyncMock,
time_tuple: tuple,
expected_season: str,
expected_buy_price: str,
) -> None:
"""Test calendar handles multiple seasons and year boundaries correctly."""
tz = dt_util.get_default_time_zone()
time = datetime(*time_tuple, tzinfo=tz)
freezer.move_to(time)
await setup_platform(hass, [Platform.CALENDAR])
state = hass.states.get(ENTITY_BUY)
assert state
assert state.state == "on"
assert expected_season in state.attributes["description"]
assert expected_buy_price in state.attributes["message"]
@pytest.mark.usefixtures("entity_registry_enabled_by_default")
async def test_calendar_no_tariff_data(
hass: HomeAssistant,
freezer: FrozenDateTimeFactory,
mock_legacy: AsyncMock,
mock_site_info_no_tariff: AsyncMock,
) -> None:
"""Test calendar entity is not created when tariff data is missing."""
tz = dt_util.get_default_time_zone()
freezer.move_to(datetime(2024, 1, 1, 10, 0, 0, tzinfo=tz))
await setup_platform(hass, [Platform.CALENDAR])
state = hass.states.get(ENTITY_BUY)
assert state is None
state = hass.states.get(ENTITY_SELL)
assert state is None
@pytest.mark.usefixtures("entity_registry_enabled_by_default")
async def test_calendar_invalid_season_data(
hass: HomeAssistant,
freezer: FrozenDateTimeFactory,
mock_legacy: AsyncMock,
mock_site_info_invalid_season: AsyncMock,
) -> None:
"""Test calendar handles invalid/empty season data gracefully."""
tz = dt_util.get_default_time_zone()
freezer.move_to(datetime(2024, 6, 15, 12, 0, 0, tzinfo=tz))
await setup_platform(hass, [Platform.CALENDAR])
# No valid season found -> event returns None -> state is "off"
state = hass.states.get(ENTITY_BUY)
assert state
assert state.state == "off"
# async_get_events also returns empty when no valid seasons
result = await hass.services.async_call(
CALENDAR_DOMAIN,
SERVICE_GET_EVENTS,
{
ATTR_ENTITY_ID: [ENTITY_BUY],
EVENT_START_DATETIME: dt_util.parse_datetime("2024-06-15T00:00:00Z"),
EVENT_END_DATETIME: dt_util.parse_datetime("2024-06-17T00:00:00Z"),
},
blocking=True,
return_response=True,
)
assert result[ENTITY_BUY]["events"] == []
@pytest.mark.usefixtures("entity_registry_enabled_by_default")
async def test_calendar_week_crossing_get_events(
hass: HomeAssistant,
freezer: FrozenDateTimeFactory,
mock_legacy: AsyncMock,
mock_site_info_week_crossing: AsyncMock,
) -> None:
"""Test async_get_events filters by day of week with week-crossing periods."""
tz = dt_util.get_default_time_zone()
freezer.move_to(datetime(2024, 1, 1, 12, 0, 0, tzinfo=tz))
await setup_platform(hass, [Platform.CALENDAR])
# Request events for a full week - only Fri-Mon should have events
result = await hass.services.async_call(
CALENDAR_DOMAIN,
SERVICE_GET_EVENTS,
{
ATTR_ENTITY_ID: [ENTITY_BUY],
EVENT_START_DATETIME: dt_util.parse_datetime("2024-01-01T00:00:00Z"),
EVENT_END_DATETIME: dt_util.parse_datetime("2024-01-08T00:00:00Z"),
},
blocking=True,
return_response=True,
)
events = result[ENTITY_BUY]["events"]
# 5 events: Sun Dec 31, Mon Jan 1, Fri Jan 5, Sat Jan 6, Sun Jan 7
# (Dec 31 included due to UTC-to-local shift) - no Tue/Wed/Thu
assert len(events) == 5
for event in events:
start = dt_util.parse_datetime(event["start"])
assert start is not None
assert start.weekday() in (0, 4, 5, 6)
@pytest.mark.usefixtures("entity_registry_enabled_by_default")
async def test_calendar_midnight_crossing_local_start(
hass: HomeAssistant,
freezer: FrozenDateTimeFactory,
mock_legacy: AsyncMock,
) -> None:
"""Test async_get_events includes overnight period when query starts at local midnight."""
tz = dt_util.get_default_time_zone()
freezer.move_to(datetime(2024, 1, 1, 10, 0, 0, tzinfo=tz))
await setup_platform(hass, [Platform.CALENDAR])
# Use local-timezone timestamps so UTC-to-local shift does not
# accidentally push the start back to the previous day.
start = datetime(2024, 1, 1, 0, 0, 0, tzinfo=tz)
end = datetime(2024, 1, 2, 0, 0, 0, tzinfo=tz)
result = await hass.services.async_call(
CALENDAR_DOMAIN,
SERVICE_GET_EVENTS,
{
ATTR_ENTITY_ID: [ENTITY_BUY],
EVENT_START_DATETIME: start,
EVENT_END_DATETIME: end,
},
blocking=True,
return_response=True,
)
events = result[ENTITY_BUY]["events"]
# Expect 2 events on Jan 1:
# 1) OFF_PEAK that started Dec 31 21:00 and ends Jan 1 16:00
# 2) ON_PEAK from Jan 1 16:00 to Jan 1 21:00
# The OFF_PEAK starting Jan 1 21:00 (ending Jan 2 16:00) also overlaps,
# so 3 events total.
assert len(events) == 3
starts = [dt_util.parse_datetime(e["start"]) for e in events]
assert starts[0].day == 31 # Dec 31 21:00 (previous evening)
assert starts[1].day == 1 # Jan 1 16:00
assert starts[2].day == 1 # Jan 1 21:00
@pytest.mark.usefixtures("entity_registry_enabled_by_default")
async def test_calendar_invalid_price(
hass: HomeAssistant,
freezer: FrozenDateTimeFactory,
mock_legacy: AsyncMock,
mock_site_info_invalid_price: AsyncMock,
) -> None:
"""Test calendar handles non-numeric price data gracefully."""
tz = dt_util.get_default_time_zone()
freezer.move_to(datetime(2024, 1, 1, 10, 0, 0, tzinfo=tz))
await setup_platform(hass, [Platform.CALENDAR])
# Period matches but price is invalid -> shows "Unknown Price"
state = hass.states.get(ENTITY_BUY)
assert state
assert state.state == "on"
assert "Unknown Price" in state.attributes["message"]
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/teslemetry/test_calendar.py",
"license": "Apache License 2.0",
"lines": 341,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:homeassistant/components/kitchen_sink/fan.py | """Demo platform that offers a fake infrared fan entity."""
from __future__ import annotations
from typing import Any
import infrared_protocols
from homeassistant.components.fan import FanEntity, FanEntityFeature
from homeassistant.components.infrared import async_send_command
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import STATE_UNAVAILABLE
from homeassistant.core import Event, EventStateChangedData, HomeAssistant, callback
from homeassistant.helpers.device_registry import DeviceInfo
from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback
from homeassistant.helpers.event import async_track_state_change_event
from .const import CONF_INFRARED_ENTITY_ID, DOMAIN
PARALLEL_UPDATES = 0
DUMMY_FAN_ADDRESS = 0x1234
DUMMY_CMD_POWER_ON = 0x01
DUMMY_CMD_POWER_OFF = 0x02
DUMMY_CMD_SPEED_LOW = 0x03
DUMMY_CMD_SPEED_MEDIUM = 0x04
DUMMY_CMD_SPEED_HIGH = 0x05
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddConfigEntryEntitiesCallback,
) -> None:
"""Set up the demo infrared fan platform."""
for subentry_id, subentry in config_entry.subentries.items():
if subentry.subentry_type != "infrared_fan":
continue
async_add_entities(
[
DemoInfraredFan(
subentry_id=subentry_id,
device_name=subentry.title,
infrared_entity_id=subentry.data[CONF_INFRARED_ENTITY_ID],
)
],
config_subentry_id=subentry_id,
)
class DemoInfraredFan(FanEntity):
"""Representation of a demo infrared fan entity."""
_attr_has_entity_name = True
_attr_name = None
_attr_should_poll = False
_attr_assumed_state = True
_attr_speed_count = 3
_attr_supported_features = (
FanEntityFeature.SET_SPEED
| FanEntityFeature.TURN_OFF
| FanEntityFeature.TURN_ON
)
def __init__(
self,
subentry_id: str,
device_name: str,
infrared_entity_id: str,
) -> None:
"""Initialize the demo infrared fan entity."""
self._infrared_entity_id = infrared_entity_id
self._attr_unique_id = subentry_id
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, subentry_id)},
name=device_name,
)
self._attr_percentage = 0
async def async_added_to_hass(self) -> None:
"""Subscribe to infrared entity state changes."""
await super().async_added_to_hass()
@callback
def _async_ir_state_changed(event: Event[EventStateChangedData]) -> None:
"""Handle infrared entity state changes."""
new_state = event.data["new_state"]
self._attr_available = (
new_state is not None and new_state.state != STATE_UNAVAILABLE
)
self.async_write_ha_state()
self.async_on_remove(
async_track_state_change_event(
self.hass, [self._infrared_entity_id], _async_ir_state_changed
)
)
# Set initial availability based on current infrared entity state
ir_state = self.hass.states.get(self._infrared_entity_id)
self._attr_available = (
ir_state is not None and ir_state.state != STATE_UNAVAILABLE
)
async def _send_command(self, command_code: int) -> None:
"""Send an IR command using the NEC protocol."""
command = infrared_protocols.NECCommand(
address=DUMMY_FAN_ADDRESS,
command=command_code,
modulation=38000,
)
await async_send_command(
self.hass, self._infrared_entity_id, command, context=self._context
)
async def async_turn_on(
self,
percentage: int | None = None,
preset_mode: str | None = None,
**kwargs: Any,
) -> None:
"""Turn on the fan."""
if percentage is not None:
await self.async_set_percentage(percentage)
return
await self._send_command(DUMMY_CMD_POWER_ON)
self._attr_percentage = 33
self.async_write_ha_state()
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn off the fan."""
await self._send_command(DUMMY_CMD_POWER_OFF)
self._attr_percentage = 0
self.async_write_ha_state()
async def async_set_percentage(self, percentage: int) -> None:
"""Set the speed percentage of the fan."""
if percentage == 0:
await self.async_turn_off()
return
if percentage <= 33:
await self._send_command(DUMMY_CMD_SPEED_LOW)
elif percentage <= 66:
await self._send_command(DUMMY_CMD_SPEED_MEDIUM)
else:
await self._send_command(DUMMY_CMD_SPEED_HIGH)
self._attr_percentage = percentage
self.async_write_ha_state()
| {
"repo_id": "home-assistant/core",
"file_path": "homeassistant/components/kitchen_sink/fan.py",
"license": "Apache License 2.0",
"lines": 127,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
home-assistant/core:homeassistant/components/kitchen_sink/infrared.py | """Demo platform that offers a fake infrared entity."""
from __future__ import annotations
import infrared_protocols
from homeassistant.components import persistent_notification
from homeassistant.components.infrared import InfraredEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.device_registry import DeviceInfo
from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback
from . import DOMAIN
PARALLEL_UPDATES = 0
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddConfigEntryEntitiesCallback,
) -> None:
"""Set up the demo infrared platform."""
async_add_entities(
[
DemoInfrared(
unique_id="ir_transmitter",
device_name="IR Blaster",
entity_name="Infrared Transmitter",
),
]
)
class DemoInfrared(InfraredEntity):
"""Representation of a demo infrared entity."""
_attr_has_entity_name = True
_attr_should_poll = False
def __init__(
self,
unique_id: str,
device_name: str,
entity_name: str,
) -> None:
"""Initialize the demo infrared entity."""
self._attr_unique_id = unique_id
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, unique_id)},
name=device_name,
)
self._attr_name = entity_name
async def async_send_command(self, command: infrared_protocols.Command) -> None:
"""Send an IR command."""
timings = [
interval
for timing in command.get_raw_timings()
for interval in (timing.high_us, -timing.low_us)
]
persistent_notification.async_create(
self.hass, str(timings), title="Infrared Command"
)
| {
"repo_id": "home-assistant/core",
"file_path": "homeassistant/components/kitchen_sink/infrared.py",
"license": "Apache License 2.0",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
home-assistant/core:tests/components/infrared/test_init.py | """Tests for the Infrared integration setup."""
from unittest.mock import AsyncMock
from freezegun.api import FrozenDateTimeFactory
from infrared_protocols import NECCommand
import pytest
from homeassistant.components.infrared import (
DATA_COMPONENT,
DOMAIN,
async_get_emitters,
async_send_command,
)
from homeassistant.const import STATE_UNAVAILABLE, STATE_UNKNOWN
from homeassistant.core import HomeAssistant, State
from homeassistant.exceptions import HomeAssistantError
from homeassistant.setup import async_setup_component
from homeassistant.util import dt as dt_util
from .conftest import MockInfraredEntity
from tests.common import mock_restore_cache
async def test_get_entities_integration_setup(hass: HomeAssistant) -> None:
"""Test getting entities when the integration is not setup."""
assert async_get_emitters(hass) == []
@pytest.mark.usefixtures("init_integration")
async def test_get_entities_empty(hass: HomeAssistant) -> None:
"""Test getting entities when none are registered."""
assert async_get_emitters(hass) == []
@pytest.mark.usefixtures("init_integration")
async def test_infrared_entity_initial_state(
hass: HomeAssistant, mock_infrared_entity: MockInfraredEntity
) -> None:
"""Test infrared entity has no state before any command is sent."""
component = hass.data[DATA_COMPONENT]
await component.async_add_entities([mock_infrared_entity])
state = hass.states.get("infrared.test_ir_transmitter")
assert state is not None
assert state.state == STATE_UNKNOWN
@pytest.mark.usefixtures("init_integration")
async def test_async_send_command_success(
hass: HomeAssistant,
mock_infrared_entity: MockInfraredEntity,
freezer: FrozenDateTimeFactory,
) -> None:
"""Test sending command via async_send_command helper."""
# Add the mock entity to the component
component = hass.data[DATA_COMPONENT]
await component.async_add_entities([mock_infrared_entity])
# Freeze time so we can verify the state update
now = dt_util.utcnow()
freezer.move_to(now)
command = NECCommand(address=0x04FB, command=0x08F7, modulation=38000)
await async_send_command(hass, mock_infrared_entity.entity_id, command)
assert len(mock_infrared_entity.send_command_calls) == 1
assert mock_infrared_entity.send_command_calls[0] is command
state = hass.states.get("infrared.test_ir_transmitter")
assert state is not None
assert state.state == now.isoformat(timespec="milliseconds")
@pytest.mark.usefixtures("init_integration")
async def test_async_send_command_error_does_not_update_state(
hass: HomeAssistant,
mock_infrared_entity: MockInfraredEntity,
) -> None:
"""Test that state is not updated when async_send_command raises an error."""
component = hass.data[DATA_COMPONENT]
await component.async_add_entities([mock_infrared_entity])
state = hass.states.get("infrared.test_ir_transmitter")
assert state is not None
assert state.state == STATE_UNKNOWN
command = NECCommand(address=0x04FB, command=0x08F7, modulation=38000)
mock_infrared_entity.async_send_command = AsyncMock(
side_effect=HomeAssistantError("Transmission failed")
)
with pytest.raises(HomeAssistantError, match="Transmission failed"):
await async_send_command(hass, mock_infrared_entity.entity_id, command)
# Verify state was not updated after the error
state = hass.states.get("infrared.test_ir_transmitter")
assert state is not None
assert state.state == STATE_UNKNOWN
@pytest.mark.usefixtures("init_integration")
async def test_async_send_command_entity_not_found(hass: HomeAssistant) -> None:
"""Test async_send_command raises error when entity not found."""
command = NECCommand(
address=0x04FB, command=0x08F7, modulation=38000, repeat_count=1
)
with pytest.raises(
HomeAssistantError,
match="Infrared entity `infrared.nonexistent_entity` not found",
):
await async_send_command(hass, "infrared.nonexistent_entity", command)
async def test_async_send_command_component_not_loaded(hass: HomeAssistant) -> None:
"""Test async_send_command raises error when component not loaded."""
command = NECCommand(
address=0x04FB, command=0x08F7, modulation=38000, repeat_count=1
)
with pytest.raises(HomeAssistantError, match="component_not_loaded"):
await async_send_command(hass, "infrared.some_entity", command)
@pytest.mark.parametrize(
("restored_value", "expected_state"),
[
("2026-01-01T12:00:00.000+00:00", "2026-01-01T12:00:00.000+00:00"),
(STATE_UNAVAILABLE, STATE_UNKNOWN),
],
)
async def test_infrared_entity_state_restore(
hass: HomeAssistant,
mock_infrared_entity: MockInfraredEntity,
restored_value: str,
expected_state: str,
) -> None:
"""Test infrared entity state restore."""
mock_restore_cache(hass, [State("infrared.test_ir_transmitter", restored_value)])
assert await async_setup_component(hass, DOMAIN, {})
await hass.async_block_till_done()
component = hass.data[DATA_COMPONENT]
await component.async_add_entities([mock_infrared_entity])
state = hass.states.get("infrared.test_ir_transmitter")
assert state is not None
assert state.state == expected_state
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/infrared/test_init.py",
"license": "Apache License 2.0",
"lines": 116,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:tests/components/kitchen_sink/test_infrared.py | """The tests for the kitchen_sink infrared platform."""
from unittest.mock import patch
from freezegun.api import FrozenDateTimeFactory
import infrared_protocols
import pytest
from homeassistant.components.infrared import async_send_command
from homeassistant.components.kitchen_sink import DOMAIN
from homeassistant.const import STATE_UNKNOWN, Platform
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from homeassistant.util import dt as dt_util
ENTITY_IR_TRANSMITTER = "infrared.ir_blaster_infrared_transmitter"
@pytest.fixture
async def infrared_only() -> None:
"""Enable only the infrared platform."""
with patch(
"homeassistant.components.kitchen_sink.COMPONENTS_WITH_DEMO_PLATFORM",
[Platform.INFRARED],
):
yield
@pytest.fixture(autouse=True)
async def setup_comp(hass: HomeAssistant, infrared_only: None) -> None:
"""Set up demo component."""
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}})
await hass.async_block_till_done()
async def test_send_command(
hass: HomeAssistant, freezer: FrozenDateTimeFactory
) -> None:
"""Test sending an infrared command."""
state = hass.states.get(ENTITY_IR_TRANSMITTER)
assert state
assert state.state == STATE_UNKNOWN
now = dt_util.parse_datetime("2021-01-09 12:00:00+00:00")
assert now is not None
freezer.move_to(now)
command = infrared_protocols.NECCommand(
address=0x04, command=0x08, modulation=38000
)
await async_send_command(hass, ENTITY_IR_TRANSMITTER, command)
state = hass.states.get(ENTITY_IR_TRANSMITTER)
assert state
assert state.state == now.isoformat(timespec="milliseconds")
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/kitchen_sink/test_infrared.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:homeassistant/components/compit/fan.py | """Fan platform for Compit integration."""
from typing import Any
from compit_inext_api import PARAM_VALUES
from compit_inext_api.consts import CompitParameter
from homeassistant.components.fan import (
FanEntity,
FanEntityDescription,
FanEntityFeature,
)
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.core import HomeAssistant
from homeassistant.helpers.device_registry import DeviceInfo
from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from homeassistant.util.percentage import (
ordered_list_item_to_percentage,
percentage_to_ordered_list_item,
)
from .const import DOMAIN, MANUFACTURER_NAME
from .coordinator import CompitConfigEntry, CompitDataUpdateCoordinator
PARALLEL_UPDATES = 0
COMPIT_GEAR_TO_HA = PARAM_VALUES[CompitParameter.VENTILATION_GEAR_TARGET]
HA_STATE_TO_COMPIT = {value: key for key, value in COMPIT_GEAR_TO_HA.items()}
DEVICE_DEFINITIONS: dict[int, FanEntityDescription] = {
223: FanEntityDescription(
key="Nano Color 2",
translation_key="ventilation",
),
12: FanEntityDescription(
key="Nano Color",
translation_key="ventilation",
),
}
async def async_setup_entry(
hass: HomeAssistant,
entry: CompitConfigEntry,
async_add_entities: AddConfigEntryEntitiesCallback,
) -> None:
"""Set up Compit fan entities from a config entry."""
coordinator = entry.runtime_data
async_add_entities(
CompitFan(
coordinator,
device_id,
device_definition,
)
for device_id, device in coordinator.connector.all_devices.items()
if (device_definition := DEVICE_DEFINITIONS.get(device.definition.code))
)
class CompitFan(CoordinatorEntity[CompitDataUpdateCoordinator], FanEntity):
"""Representation of a Compit fan entity."""
_attr_speed_count = len(COMPIT_GEAR_TO_HA)
_attr_has_entity_name = True
_attr_name = None
_attr_supported_features = (
FanEntityFeature.TURN_ON
| FanEntityFeature.TURN_OFF
| FanEntityFeature.SET_SPEED
)
def __init__(
self,
coordinator: CompitDataUpdateCoordinator,
device_id: int,
entity_description: FanEntityDescription,
) -> None:
"""Initialize the fan entity."""
super().__init__(coordinator)
self.device_id = device_id
self.entity_description = entity_description
self._attr_unique_id = f"{device_id}_{entity_description.key}"
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, str(device_id))},
name=entity_description.key,
manufacturer=MANUFACTURER_NAME,
model=entity_description.key,
)
@property
def available(self) -> bool:
"""Return if entity is available."""
return (
super().available
and self.coordinator.connector.get_device(self.device_id) is not None
)
@property
def is_on(self) -> bool | None:
"""Return true if the fan is on."""
value = self.coordinator.connector.get_current_option(
self.device_id, CompitParameter.VENTILATION_ON_OFF
)
return True if value == STATE_ON else False if value == STATE_OFF else None
async def async_turn_on(
self,
percentage: int | None = None,
preset_mode: str | None = None,
**kwargs: Any,
) -> None:
"""Turn on the fan."""
await self.coordinator.connector.select_device_option(
self.device_id, CompitParameter.VENTILATION_ON_OFF, STATE_ON
)
if percentage is None:
self.async_write_ha_state()
return
await self.async_set_percentage(percentage)
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn off the fan."""
await self.coordinator.connector.select_device_option(
self.device_id, CompitParameter.VENTILATION_ON_OFF, STATE_OFF
)
self.async_write_ha_state()
@property
def percentage(self) -> int | None:
"""Return the current fan speed as a percentage."""
if self.is_on is False:
return 0
mode = self.coordinator.connector.get_current_option(
self.device_id, CompitParameter.VENTILATION_GEAR_TARGET
)
if mode is None:
return None
gear = COMPIT_GEAR_TO_HA.get(mode)
return (
None
if gear is None
else ordered_list_item_to_percentage(
list(COMPIT_GEAR_TO_HA.values()),
gear,
)
)
async def async_set_percentage(self, percentage: int) -> None:
"""Set the fan speed."""
if percentage == 0:
await self.async_turn_off()
return
gear = int(
percentage_to_ordered_list_item(
list(COMPIT_GEAR_TO_HA.values()),
percentage,
)
)
mode = HA_STATE_TO_COMPIT.get(gear)
if mode is None:
return
await self.coordinator.connector.select_device_option(
self.device_id, CompitParameter.VENTILATION_GEAR_TARGET, mode
)
self.async_write_ha_state()
| {
"repo_id": "home-assistant/core",
"file_path": "homeassistant/components/compit/fan.py",
"license": "Apache License 2.0",
"lines": 147,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
home-assistant/core:tests/components/compit/test_fan.py | """Tests for the Compit fan platform."""
from typing import Any
from unittest.mock import MagicMock
from compit_inext_api.consts import CompitParameter
import pytest
from syrupy.assertion import SnapshotAssertion
from homeassistant.components.fan import (
ATTR_PERCENTAGE,
DOMAIN as FAN_DOMAIN,
SERVICE_SET_PERCENTAGE,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
STATE_UNKNOWN,
Platform,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from . import setup_integration, snapshot_compit_entities
from tests.common import MockConfigEntry
async def test_fan_entities_snapshot(
hass: HomeAssistant,
entity_registry: er.EntityRegistry,
mock_config_entry: MockConfigEntry,
mock_connector: MagicMock,
snapshot: SnapshotAssertion,
) -> None:
"""Snapshot test for fan entities creation, unique IDs, and device info."""
await setup_integration(hass, mock_config_entry)
snapshot_compit_entities(hass, entity_registry, snapshot, Platform.FAN)
async def test_fan_turn_on(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_connector: MagicMock,
) -> None:
"""Test turning on the fan."""
await setup_integration(hass, mock_config_entry)
await mock_connector.select_device_option(
2, CompitParameter.VENTILATION_ON_OFF, STATE_OFF
)
await hass.services.async_call(
FAN_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: "fan.nano_color_2"}, blocking=True
)
state = hass.states.get("fan.nano_color_2")
assert state is not None
assert state.state == STATE_ON
async def test_fan_turn_on_with_percentage(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_connector: MagicMock,
) -> None:
"""Test turning on the fan with a percentage."""
await setup_integration(hass, mock_config_entry)
await mock_connector.select_device_option(
2, CompitParameter.VENTILATION_ON_OFF, STATE_OFF
)
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "fan.nano_color_2", ATTR_PERCENTAGE: 100},
blocking=True,
)
state = hass.states.get("fan.nano_color_2")
assert state is not None
assert state.state == STATE_ON
assert state.attributes.get("percentage") == 100
async def test_fan_turn_off(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_connector: MagicMock,
) -> None:
"""Test turning off the fan."""
await setup_integration(hass, mock_config_entry)
await mock_connector.select_device_option(
2, CompitParameter.VENTILATION_ON_OFF, STATE_ON
)
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "fan.nano_color_2"},
blocking=True,
)
state = hass.states.get("fan.nano_color_2")
assert state is not None
assert state.state == STATE_OFF
async def test_fan_set_speed(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_connector: MagicMock,
) -> None:
"""Test setting the fan speed."""
await setup_integration(hass, mock_config_entry)
await mock_connector.select_device_option(
2, CompitParameter.VENTILATION_ON_OFF, STATE_ON
) # Ensure fan is on before setting speed
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_SET_PERCENTAGE,
{
ATTR_ENTITY_ID: "fan.nano_color_2",
ATTR_PERCENTAGE: 80,
},
blocking=True,
)
state = hass.states.get("fan.nano_color_2")
assert state is not None
assert state.attributes.get("percentage") == 80
async def test_fan_set_speed_while_off(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_connector: MagicMock,
) -> None:
"""Test setting the fan speed while the fan is off."""
await setup_integration(hass, mock_config_entry)
await mock_connector.select_device_option(
2, CompitParameter.VENTILATION_ON_OFF, STATE_OFF
)
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_SET_PERCENTAGE,
{
ATTR_ENTITY_ID: "fan.nano_color_2",
ATTR_PERCENTAGE: 80,
},
blocking=True,
)
state = hass.states.get("fan.nano_color_2")
assert state is not None
assert state.state == STATE_OFF # Fan should remain off until turned on
assert state.attributes.get("percentage") == 0
async def test_fan_set_speed_to_not_in_step_percentage(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_connector: MagicMock,
) -> None:
"""Test setting the fan speed to a percentage that is not in the step of the fan."""
await setup_integration(hass, mock_config_entry)
await mock_connector.select_device_option(
2, CompitParameter.VENTILATION_ON_OFF, STATE_ON
)
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_SET_PERCENTAGE,
{ATTR_ENTITY_ID: "fan.nano_color_2", ATTR_PERCENTAGE: 65},
)
state = hass.states.get("fan.nano_color_2")
assert state is not None
assert state.state == STATE_ON
assert state.attributes.get("percentage") == 80
async def test_fan_set_speed_to_0(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_connector: MagicMock,
) -> None:
"""Test setting the fan speed to 0."""
await setup_integration(hass, mock_config_entry)
await mock_connector.select_device_option(
2, CompitParameter.VENTILATION_ON_OFF, STATE_ON
) # Turn on fan first
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_SET_PERCENTAGE,
{
ATTR_ENTITY_ID: "fan.nano_color_2",
ATTR_PERCENTAGE: 0,
},
blocking=True,
)
state = hass.states.get("fan.nano_color_2")
assert state is not None
assert state.state == STATE_OFF # Fan is turned off by setting the percentage to 0
assert state.attributes.get("percentage") == 0
@pytest.mark.parametrize(
"mock_return_value",
[
None,
"invalid",
],
)
async def test_fan_invalid_speed(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_connector: MagicMock,
mock_return_value: Any,
) -> None:
"""Test setting an invalid speed."""
mock_connector.get_current_option.side_effect = lambda device_id, parameter_code: (
mock_return_value
)
await setup_integration(hass, mock_config_entry)
state = hass.states.get("fan.nano_color_2")
assert state is not None
assert state.state == STATE_UNKNOWN
@pytest.mark.parametrize(
("gear", "expected_percentage"),
[
("gear_0", 20),
("gear_1", 40),
("gear_2", 60),
("gear_3", 80),
("airing", 100),
],
)
async def test_fan_gear_to_percentage(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_connector: MagicMock,
gear: str,
expected_percentage: int,
) -> None:
"""Test the gear to percentage conversion."""
mock_connector.get_current_option.side_effect = lambda device_id, parameter_code: (
gear
)
await setup_integration(hass, mock_config_entry)
state = hass.states.get("fan.nano_color_2")
assert state is not None
assert state.attributes.get("percentage") == expected_percentage
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/compit/test_fan.py",
"license": "Apache License 2.0",
"lines": 222,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:homeassistant/components/systemnexa2/diagnostics.py | """Diagnostics support for System Nexa 2."""
from __future__ import annotations
from dataclasses import asdict
from typing import Any
from homeassistant.components.diagnostics import async_redact_data
from homeassistant.const import CONF_DEVICE_ID, CONF_HOST
from homeassistant.core import HomeAssistant
from .coordinator import SystemNexa2ConfigEntry
TO_REDACT = {
CONF_HOST,
CONF_DEVICE_ID,
"unique_id",
"wifi_ssid",
}
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, entry: SystemNexa2ConfigEntry
) -> dict[str, Any]:
"""Return diagnostics for a config entry."""
coordinator = entry.runtime_data
return {
"config_entry": async_redact_data(dict(entry.data), TO_REDACT),
"device_info": async_redact_data(asdict(coordinator.data.info_data), TO_REDACT),
"coordinator_available": coordinator.last_update_success,
"state": coordinator.data.state,
"settings": {
name: {
"name": setting.name,
"enabled": setting.is_enabled(),
}
for name, setting in coordinator.data.on_off_settings.items()
},
}
| {
"repo_id": "home-assistant/core",
"file_path": "homeassistant/components/systemnexa2/diagnostics.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
home-assistant/core:tests/components/systemnexa2/test_diagnostics.py | """Test System Nexa 2 diagnostics."""
from syrupy.assertion import SnapshotAssertion
from homeassistant.core import HomeAssistant
from tests.common import MockConfigEntry
from tests.components.diagnostics import get_diagnostics_for_config_entry
from tests.typing import ClientSessionGenerator
async def test_diagnostics(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
mock_config_entry: MockConfigEntry,
mock_system_nexa_2_device,
snapshot: SnapshotAssertion,
) -> None:
"""Test diagnostics for config entry."""
mock_config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
result = await get_diagnostics_for_config_entry(
hass, hass_client, mock_config_entry
)
assert result == snapshot
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/systemnexa2/test_diagnostics.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:homeassistant/components/systemnexa2/sensor.py | """Sensor platform for SystemNexa2 integration."""
from collections.abc import Callable
from dataclasses import dataclass
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.const import SIGNAL_STRENGTH_DECIBELS_MILLIWATT, EntityCategory
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback
from .coordinator import SystemNexa2ConfigEntry, SystemNexa2DataUpdateCoordinator
from .entity import SystemNexa2Entity
PARALLEL_UPDATES = 0
@dataclass(frozen=True, kw_only=True)
class SystemNexa2SensorEntityDescription(SensorEntityDescription):
"""Describes SystemNexa2 sensor entity."""
value_fn: Callable[[SystemNexa2DataUpdateCoordinator], str | int | None]
SENSOR_DESCRIPTIONS: tuple[SystemNexa2SensorEntityDescription, ...] = (
SystemNexa2SensorEntityDescription(
key="wifi_dbm",
native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
device_class=SensorDeviceClass.SIGNAL_STRENGTH,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda coordinator: coordinator.data.info_data.wifi_dbm,
entity_registry_enabled_default=False,
),
)
async def async_setup_entry(
hass: HomeAssistant,
entry: SystemNexa2ConfigEntry,
async_add_entities: AddConfigEntryEntitiesCallback,
) -> None:
"""Set up sensors based on a config entry."""
coordinator = entry.runtime_data
async_add_entities(
SystemNexa2Sensor(coordinator, description)
for description in SENSOR_DESCRIPTIONS
if description.value_fn(coordinator) is not None
)
class SystemNexa2Sensor(SystemNexa2Entity, SensorEntity):
"""Representation of a SystemNexa2 sensor."""
entity_description: SystemNexa2SensorEntityDescription
def __init__(
self,
coordinator: SystemNexa2DataUpdateCoordinator,
entity_description: SystemNexa2SensorEntityDescription,
) -> None:
"""Initialize the sensor."""
super().__init__(
coordinator=coordinator,
key=entity_description.key,
)
self.entity_description = entity_description
@property
def native_value(self) -> str | int | None:
"""Return the state of the sensor."""
return self.entity_description.value_fn(self.coordinator)
| {
"repo_id": "home-assistant/core",
"file_path": "homeassistant/components/systemnexa2/sensor.py",
"license": "Apache License 2.0",
"lines": 60,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
home-assistant/core:tests/components/systemnexa2/test_sensor.py | """Test the System Nexa 2 sensor platform."""
from unittest.mock import patch
import pytest
from syrupy.assertion import SnapshotAssertion
from homeassistant.const import Platform
from homeassistant.core import HomeAssistant
import homeassistant.helpers.entity_registry as er
from tests.common import MockConfigEntry, snapshot_platform
@pytest.mark.usefixtures("mock_system_nexa_2_device")
@pytest.mark.usefixtures("entity_registry_enabled_by_default")
async def test_sensor_entities(
hass: HomeAssistant,
snapshot: SnapshotAssertion,
entity_registry: er.EntityRegistry,
mock_config_entry: MockConfigEntry,
) -> None:
"""Test the sensor entities."""
mock_config_entry.add_to_hass(hass)
# Only load the sensor platform for snapshot testing
with patch(
"homeassistant.components.systemnexa2.PLATFORMS",
[Platform.SENSOR],
):
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await snapshot_platform(
hass, entity_registry, snapshot, mock_config_entry.entry_id
)
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/systemnexa2/test_sensor.py",
"license": "Apache License 2.0",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:homeassistant/components/homematicip_cloud/siren.py | """Support for HomematicIP Cloud sirens."""
from __future__ import annotations
import logging
from typing import Any
from homematicip.base.functionalChannels import NotificationMp3SoundChannel
from homematicip.device import CombinationSignallingDevice
from homeassistant.components.siren import (
ATTR_TONE,
ATTR_VOLUME_LEVEL,
SirenEntity,
SirenEntityFeature,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback
from .entity import HomematicipGenericEntity
from .hap import HomematicIPConfigEntry, HomematicipHAP
_logger = logging.getLogger(__name__)
# Map tone integers to HmIP sound file strings
_TONE_TO_SOUNDFILE: dict[int, str] = {0: "INTERNAL_SOUNDFILE"}
_TONE_TO_SOUNDFILE.update({i: f"SOUNDFILE_{i:03d}" for i in range(1, 253)})
# Available tones as dict[int, str] for HA UI
AVAILABLE_TONES: dict[int, str] = {0: "Internal"}
AVAILABLE_TONES.update({i: f"Sound {i}" for i in range(1, 253)})
async def async_setup_entry(
hass: HomeAssistant,
config_entry: HomematicIPConfigEntry,
async_add_entities: AddConfigEntryEntitiesCallback,
) -> None:
"""Set up the HomematicIP Cloud sirens from a config entry."""
hap = config_entry.runtime_data
async_add_entities(
HomematicipMP3Siren(hap, device)
for device in hap.home.devices
if isinstance(device, CombinationSignallingDevice)
)
class HomematicipMP3Siren(HomematicipGenericEntity, SirenEntity):
"""Representation of the HomematicIP MP3 siren (HmIP-MP3P)."""
_attr_available_tones = AVAILABLE_TONES
_attr_supported_features = (
SirenEntityFeature.TURN_ON
| SirenEntityFeature.TURN_OFF
| SirenEntityFeature.TONES
| SirenEntityFeature.VOLUME_SET
)
def __init__(
self, hap: HomematicipHAP, device: CombinationSignallingDevice
) -> None:
"""Initialize the siren entity."""
super().__init__(hap, device, post="Siren", channel=1, is_multi_channel=False)
@property
def _func_channel(self) -> NotificationMp3SoundChannel:
return self._device.functionalChannels[self._channel]
@property
def is_on(self) -> bool:
"""Return true if siren is playing."""
return self._func_channel.playingFileActive
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the siren on."""
tone = kwargs.get(ATTR_TONE, 0)
volume_level = kwargs.get(ATTR_VOLUME_LEVEL, 1.0)
sound_file = _TONE_TO_SOUNDFILE.get(tone, "INTERNAL_SOUNDFILE")
await self._func_channel.set_sound_file_volume_level_async(
sound_file=sound_file, volume_level=volume_level
)
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the siren off."""
await self._func_channel.stop_sound_async()
| {
"repo_id": "home-assistant/core",
"file_path": "homeassistant/components/homematicip_cloud/siren.py",
"license": "Apache License 2.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
home-assistant/core:tests/components/homematicip_cloud/test_siren.py | """Tests for HomematicIP Cloud siren."""
from homeassistant.components.siren import (
ATTR_AVAILABLE_TONES,
ATTR_TONE,
ATTR_VOLUME_LEVEL,
SirenEntityFeature,
)
from homeassistant.const import ATTR_SUPPORTED_FEATURES, STATE_OFF
from homeassistant.core import HomeAssistant
from .helper import HomeFactory, async_manipulate_test_data, get_and_check_entity_basics
async def test_hmip_mp3_siren(
hass: HomeAssistant, default_mock_hap_factory: HomeFactory
) -> None:
"""Test HomematicipMP3Siren (HmIP-MP3P)."""
entity_id = "siren.kombisignalmelder_siren"
entity_name = "Kombisignalmelder Siren"
device_model = "HmIP-MP3P"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=["Kombisignalmelder"]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
# Fixture has playingFileActive=false
assert ha_state.state == STATE_OFF
assert ha_state.attributes[ATTR_SUPPORTED_FEATURES] == (
SirenEntityFeature.TURN_ON
| SirenEntityFeature.TURN_OFF
| SirenEntityFeature.TONES
| SirenEntityFeature.VOLUME_SET
)
assert len(ha_state.attributes[ATTR_AVAILABLE_TONES]) == 253
functional_channel = hmip_device.functionalChannels[1]
service_call_counter = len(functional_channel.mock_calls)
# Test turn_on with tone and volume
await hass.services.async_call(
"siren",
"turn_on",
{
"entity_id": entity_id,
ATTR_TONE: 5,
ATTR_VOLUME_LEVEL: 0.6,
},
blocking=True,
)
assert functional_channel.mock_calls[-1][0] == "set_sound_file_volume_level_async"
assert functional_channel.mock_calls[-1][2] == {
"sound_file": "SOUNDFILE_005",
"volume_level": 0.6,
}
assert len(functional_channel.mock_calls) == service_call_counter + 1
# Test turn_on with internal sound (tone=0)
await hass.services.async_call(
"siren",
"turn_on",
{"entity_id": entity_id, ATTR_TONE: 0},
blocking=True,
)
assert functional_channel.mock_calls[-1][2] == {
"sound_file": "INTERNAL_SOUNDFILE",
"volume_level": 1.0,
}
assert len(functional_channel.mock_calls) == service_call_counter + 2
# Test turn_off
await hass.services.async_call(
"siren",
"turn_off",
{"entity_id": entity_id},
blocking=True,
)
assert functional_channel.mock_calls[-1][0] == "stop_sound_async"
assert len(functional_channel.mock_calls) == service_call_counter + 3
# Test state update when playing
await async_manipulate_test_data(
hass, hmip_device, "playingFileActive", True, channel=1
)
ha_state = hass.states.get(entity_id)
assert ha_state.state == "on"
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/homematicip_cloud/test_siren.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:homeassistant/components/brands/const.py | """Constants for the Brands integration."""
from __future__ import annotations
from datetime import timedelta
import re
from typing import Final
from aiohttp import ClientTimeout
DOMAIN: Final = "brands"
# CDN
BRANDS_CDN_URL: Final = "https://brands.home-assistant.io"
CDN_TIMEOUT: Final = ClientTimeout(total=10)
PLACEHOLDER: Final = "_placeholder"
# Caching
CACHE_TTL: Final = 30 * 24 * 60 * 60 # 30 days in seconds
# Access token
TOKEN_CHANGE_INTERVAL: Final = timedelta(minutes=30)
# Validation
CATEGORY_RE: Final = re.compile(r"^[a-z0-9_]+$")
HARDWARE_IMAGE_RE: Final = re.compile(r"^[a-z0-9_-]+\.png$")
# Images and fallback chains
ALLOWED_IMAGES: Final = frozenset(
{
"icon.png",
"logo.png",
"icon@2x.png",
"logo@2x.png",
"dark_icon.png",
"dark_logo.png",
"dark_icon@2x.png",
"dark_logo@2x.png",
}
)
# Fallback chains for image resolution, mirroring the brands CDN build logic.
# When a requested image is not found, we try each fallback in order.
IMAGE_FALLBACKS: Final[dict[str, list[str]]] = {
"logo.png": ["icon.png"],
"icon@2x.png": ["icon.png"],
"logo@2x.png": ["logo.png", "icon.png"],
"dark_icon.png": ["icon.png"],
"dark_logo.png": ["dark_icon.png", "logo.png", "icon.png"],
"dark_icon@2x.png": ["icon@2x.png", "icon.png"],
"dark_logo@2x.png": [
"dark_icon@2x.png",
"logo@2x.png",
"logo.png",
"icon.png",
],
}
| {
"repo_id": "home-assistant/core",
"file_path": "homeassistant/components/brands/const.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
home-assistant/core:tests/components/brands/test_init.py | """Tests for the Brands integration."""
from datetime import timedelta
from http import HTTPStatus
import os
from pathlib import Path
import time
from unittest.mock import patch
from aiohttp import ClientError
from freezegun.api import FrozenDateTimeFactory
import pytest
from homeassistant.components.brands.const import (
BRANDS_CDN_URL,
CACHE_TTL,
DOMAIN,
TOKEN_CHANGE_INTERVAL,
)
from homeassistant.core import HomeAssistant
from homeassistant.loader import Integration
from homeassistant.setup import async_setup_component
from tests.common import async_fire_time_changed
from tests.test_util.aiohttp import AiohttpClientMocker
from tests.typing import ClientSessionGenerator, WebSocketGenerator
FAKE_PNG = b"\x89PNG\r\n\x1a\nfakeimagedata"
@pytest.fixture(autouse=True)
async def setup_brands(hass: HomeAssistant) -> None:
"""Set up the brands integration for all tests."""
assert await async_setup_component(hass, "http", {"http": {}})
assert await async_setup_component(hass, DOMAIN, {})
def _create_custom_integration(
hass: HomeAssistant,
domain: str,
*,
has_branding: bool = False,
) -> Integration:
"""Create a mock custom integration."""
top_level = {"__init__.py", "manifest.json"}
if has_branding:
top_level.add("brand")
return Integration(
hass,
f"custom_components.{domain}",
Path(hass.config.config_dir) / "custom_components" / domain,
{
"name": domain,
"domain": domain,
"config_flow": False,
"dependencies": [],
"requirements": [],
"version": "1.0.0",
},
top_level,
)
# ------------------------------------------------------------------
# Integration view: /api/brands/integration/{domain}/{image}
# ------------------------------------------------------------------
async def test_integration_view_serves_from_cdn(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test serving an integration brand image from the CDN."""
aioclient_mock.get(
f"{BRANDS_CDN_URL}/brands/hue/icon.png",
content=FAKE_PNG,
)
client = await hass_client()
resp = await client.get("/api/brands/integration/hue/icon.png")
assert resp.status == HTTPStatus.OK
assert resp.content_type == "image/png"
assert await resp.read() == FAKE_PNG
async def test_integration_view_default_placeholder_fallback(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test that CDN 404 serves placeholder by default."""
aioclient_mock.get(
f"{BRANDS_CDN_URL}/brands/nonexistent/icon.png",
status=HTTPStatus.NOT_FOUND,
)
aioclient_mock.get(
f"{BRANDS_CDN_URL}/_/_placeholder/icon.png",
content=FAKE_PNG,
)
client = await hass_client()
resp = await client.get("/api/brands/integration/nonexistent/icon.png")
assert resp.status == HTTPStatus.OK
assert await resp.read() == FAKE_PNG
async def test_integration_view_no_placeholder(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test that CDN 404 returns 404 when placeholder=no is set."""
aioclient_mock.get(
f"{BRANDS_CDN_URL}/brands/nonexistent/icon.png",
status=HTTPStatus.NOT_FOUND,
)
client = await hass_client()
resp = await client.get(
"/api/brands/integration/nonexistent/icon.png?placeholder=no"
)
assert resp.status == HTTPStatus.NOT_FOUND
async def test_integration_view_invalid_domain(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
) -> None:
"""Test that invalid domain names return 404."""
client = await hass_client()
resp = await client.get("/api/brands/integration/INVALID/icon.png")
assert resp.status == HTTPStatus.NOT_FOUND
resp = await client.get("/api/brands/integration/../etc/icon.png")
assert resp.status == HTTPStatus.NOT_FOUND
resp = await client.get("/api/brands/integration/has spaces/icon.png")
assert resp.status == HTTPStatus.NOT_FOUND
resp = await client.get("/api/brands/integration/_leading/icon.png")
assert resp.status == HTTPStatus.NOT_FOUND
resp = await client.get("/api/brands/integration/trailing_/icon.png")
assert resp.status == HTTPStatus.NOT_FOUND
resp = await client.get("/api/brands/integration/double__under/icon.png")
assert resp.status == HTTPStatus.NOT_FOUND
async def test_integration_view_invalid_image(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
) -> None:
"""Test that invalid image filenames return 404."""
client = await hass_client()
resp = await client.get("/api/brands/integration/hue/malicious.jpg")
assert resp.status == HTTPStatus.NOT_FOUND
resp = await client.get("/api/brands/integration/hue/../../etc/passwd")
assert resp.status == HTTPStatus.NOT_FOUND
resp = await client.get("/api/brands/integration/hue/notallowed.png")
assert resp.status == HTTPStatus.NOT_FOUND
async def test_integration_view_all_allowed_images(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test that all allowed image filenames are accepted."""
allowed = [
"icon.png",
"logo.png",
"icon@2x.png",
"logo@2x.png",
"dark_icon.png",
"dark_logo.png",
"dark_icon@2x.png",
"dark_logo@2x.png",
]
for image in allowed:
aioclient_mock.get(
f"{BRANDS_CDN_URL}/brands/hue/{image}",
content=FAKE_PNG,
)
client = await hass_client()
for image in allowed:
resp = await client.get(f"/api/brands/integration/hue/{image}")
assert resp.status == HTTPStatus.OK, f"Failed for {image}"
async def test_integration_view_cdn_error_returns_none(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test that CDN connection errors result in 404 with placeholder=no."""
aioclient_mock.get(
f"{BRANDS_CDN_URL}/brands/broken/icon.png",
exc=ClientError(),
)
client = await hass_client()
resp = await client.get("/api/brands/integration/broken/icon.png?placeholder=no")
assert resp.status == HTTPStatus.NOT_FOUND
async def test_integration_view_cdn_unexpected_status(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test that unexpected CDN status codes result in 404 with placeholder=no."""
aioclient_mock.get(
f"{BRANDS_CDN_URL}/brands/broken/icon.png",
status=HTTPStatus.INTERNAL_SERVER_ERROR,
)
client = await hass_client()
resp = await client.get("/api/brands/integration/broken/icon.png?placeholder=no")
assert resp.status == HTTPStatus.NOT_FOUND
# ------------------------------------------------------------------
# Disk caching
# ------------------------------------------------------------------
async def test_disk_cache_hit(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test that a second request is served from disk cache."""
aioclient_mock.get(
f"{BRANDS_CDN_URL}/brands/hue/icon.png",
content=FAKE_PNG,
)
client = await hass_client()
# First request: fetches from CDN
resp = await client.get("/api/brands/integration/hue/icon.png")
assert resp.status == HTTPStatus.OK
assert aioclient_mock.call_count == 1
# Second request: served from disk cache
resp = await client.get("/api/brands/integration/hue/icon.png")
assert resp.status == HTTPStatus.OK
assert await resp.read() == FAKE_PNG
assert aioclient_mock.call_count == 1 # No additional CDN call
async def test_disk_cache_404_marker(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test that 404s are cached as empty files."""
aioclient_mock.get(
f"{BRANDS_CDN_URL}/brands/nothing/icon.png",
status=HTTPStatus.NOT_FOUND,
)
client = await hass_client()
# First request: CDN returns 404, cached as empty file
resp = await client.get("/api/brands/integration/nothing/icon.png?placeholder=no")
assert resp.status == HTTPStatus.NOT_FOUND
assert aioclient_mock.call_count == 1
# Second request: served from cached 404 marker
resp = await client.get("/api/brands/integration/nothing/icon.png?placeholder=no")
assert resp.status == HTTPStatus.NOT_FOUND
assert aioclient_mock.call_count == 1 # No additional CDN call
async def test_stale_cache_triggers_background_refresh(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test that stale cache entries trigger background refresh."""
aioclient_mock.get(
f"{BRANDS_CDN_URL}/brands/hue/icon.png",
content=FAKE_PNG,
)
client = await hass_client()
# Prime the cache
resp = await client.get("/api/brands/integration/hue/icon.png")
assert resp.status == HTTPStatus.OK
assert aioclient_mock.call_count == 1
# Make the cache stale by backdating the file mtime
cache_path = (
Path(hass.config.cache_path(DOMAIN)) / "integrations" / "hue" / "icon.png"
)
assert cache_path.is_file()
stale_time = time.time() - CACHE_TTL - 1
os.utime(cache_path, (stale_time, stale_time))
# Request with stale cache should still return cached data
# but trigger a background refresh
resp = await client.get("/api/brands/integration/hue/icon.png")
assert resp.status == HTTPStatus.OK
assert await resp.read() == FAKE_PNG
# Wait for the background task to complete
await hass.async_block_till_done()
# Background refresh should have fetched from CDN again
assert aioclient_mock.call_count == 2
async def test_stale_cache_404_marker_with_placeholder(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test that stale cached 404 serves placeholder by default."""
aioclient_mock.get(
f"{BRANDS_CDN_URL}/brands/gone/icon.png",
status=HTTPStatus.NOT_FOUND,
)
aioclient_mock.get(
f"{BRANDS_CDN_URL}/_/_placeholder/icon.png",
content=FAKE_PNG,
)
client = await hass_client()
# First request caches the 404 (with placeholder=no)
resp = await client.get("/api/brands/integration/gone/icon.png?placeholder=no")
assert resp.status == HTTPStatus.NOT_FOUND
assert aioclient_mock.call_count == 1
# Make the cache stale
cache_path = (
Path(hass.config.cache_path(DOMAIN)) / "integrations" / "gone" / "icon.png"
)
assert cache_path.is_file()
stale_time = time.time() - CACHE_TTL - 1
os.utime(cache_path, (stale_time, stale_time))
# Stale 404 with default placeholder serves the placeholder
resp = await client.get("/api/brands/integration/gone/icon.png")
assert resp.status == HTTPStatus.OK
assert await resp.read() == FAKE_PNG
async def test_stale_cache_404_marker_no_placeholder(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test that stale cached 404 with placeholder=no returns 404."""
aioclient_mock.get(
f"{BRANDS_CDN_URL}/brands/gone/icon.png",
status=HTTPStatus.NOT_FOUND,
)
client = await hass_client()
# First request caches the 404
resp = await client.get("/api/brands/integration/gone/icon.png?placeholder=no")
assert resp.status == HTTPStatus.NOT_FOUND
assert aioclient_mock.call_count == 1
# Make the cache stale
cache_path = (
Path(hass.config.cache_path(DOMAIN)) / "integrations" / "gone" / "icon.png"
)
assert cache_path.is_file()
stale_time = time.time() - CACHE_TTL - 1
os.utime(cache_path, (stale_time, stale_time))
# Stale 404 with placeholder=no still returns 404
resp = await client.get("/api/brands/integration/gone/icon.png?placeholder=no")
assert resp.status == HTTPStatus.NOT_FOUND
# Background refresh should have been triggered
await hass.async_block_till_done()
assert aioclient_mock.call_count == 2
# ------------------------------------------------------------------
# Custom integration brand files
# ------------------------------------------------------------------
async def test_custom_integration_brand_served(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test that custom integration brand files are served."""
custom = _create_custom_integration(hass, "my_custom", has_branding=True)
# Create the brand file on disk
brand_dir = Path(custom.file_path) / "brand"
brand_dir.mkdir(parents=True, exist_ok=True)
(brand_dir / "icon.png").write_bytes(FAKE_PNG)
with patch(
"homeassistant.components.brands.async_get_custom_components",
return_value={"my_custom": custom},
):
client = await hass_client()
resp = await client.get("/api/brands/integration/my_custom/icon.png")
assert resp.status == HTTPStatus.OK
assert await resp.read() == FAKE_PNG
# Should not have called CDN
assert aioclient_mock.call_count == 0
async def test_custom_integration_no_brand_falls_through(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test that custom integration without brand falls through to CDN."""
custom = _create_custom_integration(hass, "my_custom", has_branding=False)
aioclient_mock.get(
f"{BRANDS_CDN_URL}/brands/my_custom/icon.png",
content=FAKE_PNG,
)
with patch(
"homeassistant.components.brands.async_get_custom_components",
return_value={"my_custom": custom},
):
client = await hass_client()
resp = await client.get("/api/brands/integration/my_custom/icon.png")
assert resp.status == HTTPStatus.OK
assert aioclient_mock.call_count == 1
async def test_custom_integration_brand_missing_file_falls_through(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test that custom integration with brand dir but missing file falls through."""
custom = _create_custom_integration(hass, "my_custom", has_branding=True)
# Create the brand directory but NOT the requested file
brand_dir = Path(custom.file_path) / "brand"
brand_dir.mkdir(parents=True, exist_ok=True)
aioclient_mock.get(
f"{BRANDS_CDN_URL}/brands/my_custom/icon.png",
content=FAKE_PNG,
)
with patch(
"homeassistant.components.brands.async_get_custom_components",
return_value={"my_custom": custom},
):
client = await hass_client()
resp = await client.get("/api/brands/integration/my_custom/icon.png")
assert resp.status == HTTPStatus.OK
assert aioclient_mock.call_count == 1
async def test_custom_integration_takes_priority_over_cache(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test that custom integration brand takes priority over disk cache."""
custom_png = b"\x89PNGcustom"
# Prime the CDN cache first
aioclient_mock.get(
f"{BRANDS_CDN_URL}/brands/my_custom/icon.png",
content=FAKE_PNG,
)
client = await hass_client()
resp = await client.get("/api/brands/integration/my_custom/icon.png")
assert resp.status == HTTPStatus.OK
assert await resp.read() == FAKE_PNG
# Now create a custom integration with brand
custom = _create_custom_integration(hass, "my_custom", has_branding=True)
brand_dir = Path(custom.file_path) / "brand"
brand_dir.mkdir(parents=True, exist_ok=True)
(brand_dir / "icon.png").write_bytes(custom_png)
with patch(
"homeassistant.components.brands.async_get_custom_components",
return_value={"my_custom": custom},
):
resp = await client.get("/api/brands/integration/my_custom/icon.png")
# Custom integration brand takes priority
assert resp.status == HTTPStatus.OK
assert await resp.read() == custom_png
# ------------------------------------------------------------------
# Custom integration image fallback chains
# ------------------------------------------------------------------
async def test_custom_integration_logo_falls_back_to_icon(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test that requesting logo.png falls back to icon.png for custom integrations."""
custom = _create_custom_integration(hass, "my_custom", has_branding=True)
brand_dir = Path(custom.file_path) / "brand"
brand_dir.mkdir(parents=True, exist_ok=True)
(brand_dir / "icon.png").write_bytes(FAKE_PNG)
with patch(
"homeassistant.components.brands.async_get_custom_components",
return_value={"my_custom": custom},
):
client = await hass_client()
resp = await client.get("/api/brands/integration/my_custom/logo.png")
assert resp.status == HTTPStatus.OK
assert await resp.read() == FAKE_PNG
assert aioclient_mock.call_count == 0
async def test_custom_integration_dark_icon_falls_back_to_icon(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test that dark_icon.png falls back to icon.png for custom integrations."""
custom = _create_custom_integration(hass, "my_custom", has_branding=True)
brand_dir = Path(custom.file_path) / "brand"
brand_dir.mkdir(parents=True, exist_ok=True)
(brand_dir / "icon.png").write_bytes(FAKE_PNG)
with patch(
"homeassistant.components.brands.async_get_custom_components",
return_value={"my_custom": custom},
):
client = await hass_client()
resp = await client.get("/api/brands/integration/my_custom/dark_icon.png")
assert resp.status == HTTPStatus.OK
assert await resp.read() == FAKE_PNG
assert aioclient_mock.call_count == 0
async def test_custom_integration_dark_logo_falls_back_through_chain(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test that dark_logo.png walks the full fallback chain."""
custom = _create_custom_integration(hass, "my_custom", has_branding=True)
brand_dir = Path(custom.file_path) / "brand"
brand_dir.mkdir(parents=True, exist_ok=True)
# Only icon.png exists; dark_logo → dark_icon → logo → icon
(brand_dir / "icon.png").write_bytes(FAKE_PNG)
with patch(
"homeassistant.components.brands.async_get_custom_components",
return_value={"my_custom": custom},
):
client = await hass_client()
resp = await client.get("/api/brands/integration/my_custom/dark_logo.png")
assert resp.status == HTTPStatus.OK
assert await resp.read() == FAKE_PNG
assert aioclient_mock.call_count == 0
async def test_custom_integration_dark_logo_prefers_dark_icon(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test that dark_logo.png prefers dark_icon.png over icon.png."""
dark_icon_data = b"\x89PNGdarkicon"
custom = _create_custom_integration(hass, "my_custom", has_branding=True)
brand_dir = Path(custom.file_path) / "brand"
brand_dir.mkdir(parents=True, exist_ok=True)
(brand_dir / "icon.png").write_bytes(FAKE_PNG)
(brand_dir / "dark_icon.png").write_bytes(dark_icon_data)
with patch(
"homeassistant.components.brands.async_get_custom_components",
return_value={"my_custom": custom},
):
client = await hass_client()
resp = await client.get("/api/brands/integration/my_custom/dark_logo.png")
assert resp.status == HTTPStatus.OK
assert await resp.read() == dark_icon_data
async def test_custom_integration_icon2x_falls_back_to_icon(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test that icon@2x.png falls back to icon.png."""
custom = _create_custom_integration(hass, "my_custom", has_branding=True)
brand_dir = Path(custom.file_path) / "brand"
brand_dir.mkdir(parents=True, exist_ok=True)
(brand_dir / "icon.png").write_bytes(FAKE_PNG)
with patch(
"homeassistant.components.brands.async_get_custom_components",
return_value={"my_custom": custom},
):
client = await hass_client()
resp = await client.get("/api/brands/integration/my_custom/icon@2x.png")
assert resp.status == HTTPStatus.OK
assert await resp.read() == FAKE_PNG
assert aioclient_mock.call_count == 0
async def test_custom_integration_logo2x_falls_back_to_logo_then_icon(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test that logo@2x.png falls back to logo.png then icon.png."""
logo_data = b"\x89PNGlogodata"
custom = _create_custom_integration(hass, "my_custom", has_branding=True)
brand_dir = Path(custom.file_path) / "brand"
brand_dir.mkdir(parents=True, exist_ok=True)
(brand_dir / "icon.png").write_bytes(FAKE_PNG)
(brand_dir / "logo.png").write_bytes(logo_data)
with patch(
"homeassistant.components.brands.async_get_custom_components",
return_value={"my_custom": custom},
):
client = await hass_client()
resp = await client.get("/api/brands/integration/my_custom/logo@2x.png")
assert resp.status == HTTPStatus.OK
assert await resp.read() == logo_data
async def test_custom_integration_no_fallback_match_falls_through_to_cdn(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test that if no fallback image exists locally, we fall through to CDN."""
custom = _create_custom_integration(hass, "my_custom", has_branding=True)
brand_dir = Path(custom.file_path) / "brand"
brand_dir.mkdir(parents=True, exist_ok=True)
# brand dir exists but is empty - no icon.png either
aioclient_mock.get(
f"{BRANDS_CDN_URL}/brands/my_custom/icon.png",
content=FAKE_PNG,
)
with patch(
"homeassistant.components.brands.async_get_custom_components",
return_value={"my_custom": custom},
):
client = await hass_client()
resp = await client.get("/api/brands/integration/my_custom/icon.png")
assert resp.status == HTTPStatus.OK
assert aioclient_mock.call_count == 1
# ------------------------------------------------------------------
# Hardware view: /api/brands/hardware/{category}/{image:.+}
# ------------------------------------------------------------------
async def test_hardware_view_serves_from_cdn(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test serving a hardware brand image from CDN."""
aioclient_mock.get(
f"{BRANDS_CDN_URL}/hardware/boards/green.png",
content=FAKE_PNG,
)
client = await hass_client()
resp = await client.get("/api/brands/hardware/boards/green.png")
assert resp.status == HTTPStatus.OK
assert await resp.read() == FAKE_PNG
async def test_hardware_view_invalid_category(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
) -> None:
"""Test that invalid category names return 404."""
client = await hass_client()
resp = await client.get("/api/brands/hardware/INVALID/board.png")
assert resp.status == HTTPStatus.NOT_FOUND
async def test_hardware_view_invalid_image_extension(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
) -> None:
"""Test that non-png image names return 404."""
client = await hass_client()
resp = await client.get("/api/brands/hardware/boards/image.jpg")
assert resp.status == HTTPStatus.NOT_FOUND
async def test_hardware_view_invalid_image_characters(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
) -> None:
"""Test that image names with invalid characters return 404."""
client = await hass_client()
resp = await client.get("/api/brands/hardware/boards/Bad-Name.png")
assert resp.status == HTTPStatus.NOT_FOUND
resp = await client.get("/api/brands/hardware/boards/../etc.png")
assert resp.status == HTTPStatus.NOT_FOUND
# ------------------------------------------------------------------
# CDN timeout handling
# ------------------------------------------------------------------
async def test_cdn_timeout_returns_404(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test that CDN timeout results in 404 with placeholder=no."""
aioclient_mock.get(
f"{BRANDS_CDN_URL}/brands/slow/icon.png",
exc=TimeoutError(),
)
client = await hass_client()
resp = await client.get("/api/brands/integration/slow/icon.png?placeholder=no")
assert resp.status == HTTPStatus.NOT_FOUND
# ------------------------------------------------------------------
# Authentication
# ------------------------------------------------------------------
async def test_authenticated_request(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test that authenticated requests succeed."""
aioclient_mock.get(
f"{BRANDS_CDN_URL}/brands/hue/icon.png",
content=FAKE_PNG,
)
client = await hass_client()
resp = await client.get("/api/brands/integration/hue/icon.png")
assert resp.status == HTTPStatus.OK
async def test_token_query_param_authentication(
hass: HomeAssistant,
hass_client_no_auth: ClientSessionGenerator,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test that a valid access token in query param authenticates."""
aioclient_mock.get(
f"{BRANDS_CDN_URL}/brands/hue/icon.png",
content=FAKE_PNG,
)
token = hass.data[DOMAIN][-1]
client = await hass_client_no_auth()
resp = await client.get(f"/api/brands/integration/hue/icon.png?token={token}")
assert resp.status == HTTPStatus.OK
assert await resp.read() == FAKE_PNG
async def test_unauthenticated_request_forbidden(
hass: HomeAssistant,
hass_client_no_auth: ClientSessionGenerator,
aioclient_mock: AiohttpClientMocker,
) -> None:
"""Test that unauthenticated requests are forbidden."""
client = await hass_client_no_auth()
resp = await client.get("/api/brands/integration/hue/icon.png")
assert resp.status == HTTPStatus.FORBIDDEN
resp = await client.get("/api/brands/hardware/boards/green.png")
assert resp.status == HTTPStatus.FORBIDDEN
async def test_invalid_token_forbidden(
hass: HomeAssistant,
hass_client_no_auth: ClientSessionGenerator,
) -> None:
"""Test that an invalid access token in query param is forbidden."""
client = await hass_client_no_auth()
resp = await client.get("/api/brands/integration/hue/icon.png?token=invalid_token")
assert resp.status == HTTPStatus.FORBIDDEN
async def test_invalid_bearer_token_unauthorized(
hass: HomeAssistant,
hass_client_no_auth: ClientSessionGenerator,
) -> None:
"""Test that an invalid Bearer token returns unauthorized."""
client = await hass_client_no_auth()
resp = await client.get(
"/api/brands/integration/hue/icon.png",
headers={"Authorization": "Bearer invalid_token"},
)
assert resp.status == HTTPStatus.UNAUTHORIZED
async def test_token_rotation(
hass: HomeAssistant,
hass_client_no_auth: ClientSessionGenerator,
aioclient_mock: AiohttpClientMocker,
freezer: FrozenDateTimeFactory,
) -> None:
"""Test that access tokens rotate over time."""
aioclient_mock.get(
f"{BRANDS_CDN_URL}/brands/hue/icon.png",
content=FAKE_PNG,
)
original_token = hass.data[DOMAIN][-1]
client = await hass_client_no_auth()
# Original token works
resp = await client.get(
f"/api/brands/integration/hue/icon.png?token={original_token}"
)
assert resp.status == HTTPStatus.OK
# Trigger token rotation
freezer.tick(TOKEN_CHANGE_INTERVAL + timedelta(seconds=1))
async_fire_time_changed(hass)
await hass.async_block_till_done()
# Deque now contains a different newest token
new_token = hass.data[DOMAIN][-1]
assert new_token != original_token
# New token works
resp = await client.get(f"/api/brands/integration/hue/icon.png?token={new_token}")
assert resp.status == HTTPStatus.OK
# ------------------------------------------------------------------
# WebSocket API
# ------------------------------------------------------------------
async def test_ws_access_token(
hass: HomeAssistant,
hass_ws_client: WebSocketGenerator,
) -> None:
"""Test the brands/access_token WebSocket command."""
client = await hass_ws_client(hass)
await client.send_json({"id": 1, "type": "brands/access_token"})
resp = await client.receive_json()
assert resp["success"]
assert resp["result"]["token"] == hass.data[DOMAIN][-1]
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/brands/test_init.py",
"license": "Apache License 2.0",
"lines": 712,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:homeassistant/components/sonarr/helpers.py | """Helper functions for Sonarr."""
from typing import Any
from aiopyarr import (
Diskspace,
SonarrCalendar,
SonarrEpisode,
SonarrQueue,
SonarrSeries,
SonarrWantedMissing,
)
def format_queue_item(item: Any, base_url: str | None = None) -> dict[str, Any]:
"""Format a single queue item."""
# Calculate progress
remaining = 1 if item.size == 0 else item.sizeleft / item.size
remaining_pct = 100 * (1 - remaining)
result: dict[str, Any] = {
"id": item.id,
"series_id": getattr(item, "seriesId", None),
"episode_id": getattr(item, "episodeId", None),
"title": item.series.title,
"download_title": item.title,
"season_number": getattr(item, "seasonNumber", None),
"progress": f"{remaining_pct:.2f}%",
"size": item.size,
"size_left": item.sizeleft,
"status": item.status,
"tracked_download_status": getattr(item, "trackedDownloadStatus", None),
"tracked_download_state": getattr(item, "trackedDownloadState", None),
"download_client": getattr(item, "downloadClient", None),
"download_id": getattr(item, "downloadId", None),
"indexer": getattr(item, "indexer", None),
"protocol": str(getattr(item, "protocol", None)),
"episode_has_file": getattr(item, "episodeHasFile", None),
"estimated_completion_time": str(
getattr(item, "estimatedCompletionTime", None)
),
"time_left": str(getattr(item, "timeleft", None)),
}
# Add episode information from the episode object if available
if episode := getattr(item, "episode", None):
result["episode_number"] = getattr(episode, "episodeNumber", None)
result["episode_title"] = getattr(episode, "title", None)
# Add formatted identifier like the sensor uses (if we have both season and episode)
if result["season_number"] is not None and result["episode_number"] is not None:
result["episode_identifier"] = (
f"S{result['season_number']:02d}E{result['episode_number']:02d}"
)
# Add quality information if available
if quality := getattr(item, "quality", None):
result["quality"] = quality.quality.name
# Add language information if available
if languages := getattr(item, "languages", None):
result["languages"] = [lang["name"] for lang in languages]
# Add custom format score if available
if custom_format_score := getattr(item, "customFormatScore", None):
result["custom_format_score"] = custom_format_score
# Add series images if available
if images := getattr(item.series, "images", None):
result["images"] = {}
for image in images:
cover_type = image.coverType
# Prefer remoteUrl (public TVDB URL) over local path
if remote_url := getattr(image, "remoteUrl", None):
result["images"][cover_type] = remote_url
elif base_url and (url := getattr(image, "url", None)):
result["images"][cover_type] = f"{base_url.rstrip('/')}{url}"
return result
def format_queue(
queue: SonarrQueue, base_url: str | None = None
) -> dict[str, dict[str, Any]]:
"""Format queue for service response."""
# Group queue items by download ID to handle season packs
downloads: dict[str, list[Any]] = {}
for item in queue.records:
download_id = getattr(item, "downloadId", None)
if download_id:
if download_id not in downloads:
downloads[download_id] = []
downloads[download_id].append(item)
shows = {}
for items in downloads.values():
if len(items) == 1:
# Single episode download
item = items[0]
shows[item.title] = format_queue_item(item, base_url)
else:
# Multiple episodes (season pack) - use first item for main data
item = items[0]
formatted = format_queue_item(item, base_url)
# Get all episode numbers for this download
episode_numbers = sorted(
getattr(i.episode, "episodeNumber", 0)
for i in items
if hasattr(i, "episode")
)
# Format as season pack
if episode_numbers:
min_ep = min(episode_numbers)
max_ep = max(episode_numbers)
formatted["is_season_pack"] = True
formatted["episode_count"] = len(episode_numbers)
formatted["episode_range"] = f"E{min_ep:02d}-E{max_ep:02d}"
# Update identifier to show it's a season pack
if formatted.get("season_number") is not None:
formatted["episode_identifier"] = (
f"S{formatted['season_number']:02d} "
f"({len(episode_numbers)} episodes)"
)
shows[item.title] = formatted
return shows
def format_series(
series_list: list[SonarrSeries], base_url: str | None = None
) -> dict[str, dict[str, Any]]:
"""Format series list for service response."""
formatted_shows = {}
for series in series_list:
series_title = series.title
formatted_shows[series_title] = {
"id": series.id,
"year": series.year,
"tvdb_id": getattr(series, "tvdbId", None),
"imdb_id": getattr(series, "imdbId", None),
"status": series.status,
"monitored": series.monitored,
}
# Add episode statistics if available (like the sensor shows)
if statistics := getattr(series, "statistics", None):
episode_file_count = getattr(statistics, "episodeFileCount", None)
episode_count = getattr(statistics, "episodeCount", None)
formatted_shows[series_title]["episode_file_count"] = episode_file_count
formatted_shows[series_title]["episode_count"] = episode_count
# Only format episodes_info if we have valid data
if episode_file_count is not None and episode_count is not None:
formatted_shows[series_title]["episodes_info"] = (
f"{episode_file_count}/{episode_count} Episodes"
)
else:
formatted_shows[series_title]["episodes_info"] = None
# Add series images if available
if images := getattr(series, "images", None):
images_dict: dict[str, str] = {}
for image in images:
cover_type = image.coverType
# Prefer remoteUrl (public TVDB URL) over local path
if remote_url := getattr(image, "remoteUrl", None):
images_dict[cover_type] = remote_url
elif base_url and (url := getattr(image, "url", None)):
images_dict[cover_type] = f"{base_url.rstrip('/')}{url}"
formatted_shows[series_title]["images"] = images_dict
return formatted_shows
# Space unit conversion factors (divisors from bytes)
SPACE_UNITS: dict[str, int] = {
"bytes": 1,
"kb": 1000,
"kib": 1024,
"mb": 1000**2,
"mib": 1024**2,
"gb": 1000**3,
"gib": 1024**3,
"tb": 1000**4,
"tib": 1024**4,
"pb": 1000**5,
"pib": 1024**5,
}
def format_diskspace(
disks: list[Diskspace], space_unit: str = "bytes"
) -> dict[str, dict[str, Any]]:
"""Format diskspace for service response.
Args:
disks: List of disk space objects from Sonarr.
space_unit: Unit for space values (bytes, kb, kib, mb, mib, gb, gib, tb, tib, pb, pib).
Returns:
Dictionary of disk information keyed by path.
"""
result = {}
divisor = SPACE_UNITS.get(space_unit, 1)
for disk in disks:
path = disk.path
free_space = disk.freeSpace / divisor
total_space = disk.totalSpace / divisor
result[path] = {
"path": path,
"label": getattr(disk, "label", None) or "",
"free_space": free_space,
"total_space": total_space,
"unit": space_unit,
}
return result
def _format_series_images(series: Any, base_url: str | None = None) -> dict[str, str]:
"""Format series images."""
images_dict: dict[str, str] = {}
if images := getattr(series, "images", None):
for image in images:
cover_type = image.coverType
# Prefer remoteUrl (public TVDB URL) over local path
if remote_url := getattr(image, "remoteUrl", None):
images_dict[cover_type] = remote_url
elif base_url and (url := getattr(image, "url", None)):
images_dict[cover_type] = f"{base_url.rstrip('/')}{url}"
return images_dict
def format_upcoming_item(
episode: SonarrCalendar, base_url: str | None = None
) -> dict[str, Any]:
"""Format a single upcoming episode item."""
result: dict[str, Any] = {
"id": episode.id,
"series_id": episode.seriesId,
"season_number": episode.seasonNumber,
"episode_number": episode.episodeNumber,
"episode_identifier": f"S{episode.seasonNumber:02d}E{episode.episodeNumber:02d}",
"title": episode.title,
"air_date": str(getattr(episode, "airDate", None)),
"air_date_utc": str(getattr(episode, "airDateUtc", None)),
"overview": getattr(episode, "overview", None),
"has_file": getattr(episode, "hasFile", False),
"monitored": getattr(episode, "monitored", True),
"runtime": getattr(episode, "runtime", None),
"finale_type": getattr(episode, "finaleType", None),
}
# Add series information
if series := getattr(episode, "series", None):
result["series_title"] = series.title
result["series_year"] = getattr(series, "year", None)
result["series_tvdb_id"] = getattr(series, "tvdbId", None)
result["series_imdb_id"] = getattr(series, "imdbId", None)
result["series_status"] = getattr(series, "status", None)
result["network"] = getattr(series, "network", None)
result["images"] = _format_series_images(series, base_url)
return result
def format_upcoming(
calendar: list[SonarrCalendar], base_url: str | None = None
) -> dict[str, dict[str, Any]]:
"""Format upcoming calendar for service response."""
episodes = {}
for episode in calendar:
# Create a unique key combining series title and episode identifier
series_title = episode.series.title if hasattr(episode, "series") else "Unknown"
identifier = f"S{episode.seasonNumber:02d}E{episode.episodeNumber:02d}"
key = f"{series_title} {identifier}"
episodes[key] = format_upcoming_item(episode, base_url)
return episodes
def format_wanted_item(item: Any, base_url: str | None = None) -> dict[str, Any]:
"""Format a single wanted episode item."""
result: dict[str, Any] = {
"id": item.id,
"series_id": item.seriesId,
"season_number": item.seasonNumber,
"episode_number": item.episodeNumber,
"episode_identifier": f"S{item.seasonNumber:02d}E{item.episodeNumber:02d}",
"title": item.title,
"air_date": str(getattr(item, "airDate", None)),
"air_date_utc": str(getattr(item, "airDateUtc", None)),
"overview": getattr(item, "overview", None),
"has_file": getattr(item, "hasFile", False),
"monitored": getattr(item, "monitored", True),
"runtime": getattr(item, "runtime", None),
"tvdb_id": getattr(item, "tvdbId", None),
}
# Add series information
if series := getattr(item, "series", None):
result["series_title"] = series.title
result["series_year"] = getattr(series, "year", None)
result["series_tvdb_id"] = getattr(series, "tvdbId", None)
result["series_imdb_id"] = getattr(series, "imdbId", None)
result["series_status"] = getattr(series, "status", None)
result["network"] = getattr(series, "network", None)
result["images"] = _format_series_images(series, base_url)
return result
def format_wanted(
wanted: SonarrWantedMissing, base_url: str | None = None
) -> dict[str, dict[str, Any]]:
"""Format wanted missing episodes for service response."""
episodes = {}
for item in wanted.records:
# Create a unique key combining series title and episode identifier
series_title = (
item.series.title if hasattr(item, "series") and item.series else "Unknown"
)
identifier = f"S{item.seasonNumber:02d}E{item.episodeNumber:02d}"
key = f"{series_title} {identifier}"
episodes[key] = format_wanted_item(item, base_url)
return episodes
def format_episode(episode: SonarrEpisode) -> dict[str, Any]:
"""Format a single episode from a series."""
result: dict[str, Any] = {
"id": episode.id,
"series_id": episode.seriesId,
"tvdb_id": getattr(episode, "tvdbId", None),
"season_number": episode.seasonNumber,
"episode_number": episode.episodeNumber,
"episode_identifier": f"S{episode.seasonNumber:02d}E{episode.episodeNumber:02d}",
"title": episode.title,
"air_date": str(getattr(episode, "airDate", None)),
"air_date_utc": str(getattr(episode, "airDateUtc", None)),
"has_file": getattr(episode, "hasFile", False),
"monitored": getattr(episode, "monitored", False),
"runtime": getattr(episode, "runtime", None),
"episode_file_id": getattr(episode, "episodeFileId", None),
}
# Add overview if available (not always present)
if overview := getattr(episode, "overview", None):
result["overview"] = overview
# Add finale type if applicable
if finale_type := getattr(episode, "finaleType", None):
result["finale_type"] = finale_type
return result
def format_episodes(
episodes: list[SonarrEpisode], season_number: int | None = None
) -> dict[str, dict[str, Any]]:
"""Format episodes list for service response.
Args:
episodes: List of episodes to format.
season_number: Optional season number to filter by.
Returns:
Dictionary of episodes keyed by episode identifier (e.g., "S01E01").
"""
result = {}
for episode in episodes:
# Filter by season if specified
if season_number is not None and episode.seasonNumber != season_number:
continue
identifier = f"S{episode.seasonNumber:02d}E{episode.episodeNumber:02d}"
result[identifier] = format_episode(episode)
return result
| {
"repo_id": "home-assistant/core",
"file_path": "homeassistant/components/sonarr/helpers.py",
"license": "Apache License 2.0",
"lines": 324,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
home-assistant/core:homeassistant/components/sonarr/services.py | """Define services for the Sonarr integration."""
from collections.abc import Awaitable, Callable
from datetime import timedelta
from typing import Any, cast
from aiopyarr import exceptions
import voluptuous as vol
from homeassistant.config_entries import ConfigEntryState
from homeassistant.const import CONF_URL
from homeassistant.core import HomeAssistant, ServiceCall, SupportsResponse, callback
from homeassistant.exceptions import HomeAssistantError, ServiceValidationError
from homeassistant.helpers import selector
from homeassistant.util import dt as dt_util
from .const import (
ATTR_DISKS,
ATTR_ENTRY_ID,
ATTR_EPISODES,
ATTR_SHOWS,
DEFAULT_UPCOMING_DAYS,
DOMAIN,
SERVICE_GET_DISKSPACE,
SERVICE_GET_EPISODES,
SERVICE_GET_QUEUE,
SERVICE_GET_SERIES,
SERVICE_GET_UPCOMING,
SERVICE_GET_WANTED,
)
from .coordinator import SonarrConfigEntry
from .helpers import (
format_diskspace,
format_episodes,
format_queue,
format_series,
format_upcoming,
format_wanted,
)
# Service parameter constants
CONF_DAYS = "days"
CONF_MAX_ITEMS = "max_items"
CONF_SERIES_ID = "series_id"
CONF_SEASON_NUMBER = "season_number"
CONF_SPACE_UNIT = "space_unit"
# Valid space units
SPACE_UNITS = ["bytes", "KB", "KiB", "MB", "MiB", "GB", "GiB", "TB", "TiB", "PB", "PiB"]
DEFAULT_SPACE_UNIT = "bytes"
# Default values - 0 means no limit
DEFAULT_MAX_ITEMS = 0
SERVICE_BASE_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTRY_ID): selector.ConfigEntrySelector(
{"integration": DOMAIN}
),
}
)
SERVICE_GET_SERIES_SCHEMA = SERVICE_BASE_SCHEMA
SERVICE_GET_EPISODES_SCHEMA = SERVICE_BASE_SCHEMA.extend(
{
vol.Required(CONF_SERIES_ID): vol.All(vol.Coerce(int), vol.Range(min=1)),
vol.Optional(CONF_SEASON_NUMBER): vol.All(vol.Coerce(int), vol.Range(min=0)),
}
)
SERVICE_GET_QUEUE_SCHEMA = SERVICE_BASE_SCHEMA.extend(
{
vol.Optional(CONF_MAX_ITEMS, default=DEFAULT_MAX_ITEMS): vol.All(
vol.Coerce(int), vol.Range(min=0, max=500)
),
}
)
SERVICE_GET_DISKSPACE_SCHEMA = SERVICE_BASE_SCHEMA.extend(
{
vol.Optional(CONF_SPACE_UNIT, default=DEFAULT_SPACE_UNIT): vol.In(SPACE_UNITS),
}
)
SERVICE_GET_UPCOMING_SCHEMA = SERVICE_BASE_SCHEMA.extend(
{
vol.Optional(CONF_DAYS, default=DEFAULT_UPCOMING_DAYS): vol.All(
vol.Coerce(int), vol.Range(min=1, max=30)
),
}
)
SERVICE_GET_WANTED_SCHEMA = SERVICE_BASE_SCHEMA.extend(
{
vol.Optional(CONF_MAX_ITEMS, default=DEFAULT_MAX_ITEMS): vol.All(
vol.Coerce(int), vol.Range(min=0, max=500)
),
}
)
def _get_config_entry_from_service_data(call: ServiceCall) -> SonarrConfigEntry:
"""Return config entry for entry id."""
config_entry_id: str = call.data[ATTR_ENTRY_ID]
if not (entry := call.hass.config_entries.async_get_entry(config_entry_id)):
raise ServiceValidationError(
translation_domain=DOMAIN,
translation_key="integration_not_found",
translation_placeholders={"target": config_entry_id},
)
if entry.state is not ConfigEntryState.LOADED:
raise ServiceValidationError(
translation_domain=DOMAIN,
translation_key="not_loaded",
translation_placeholders={"target": entry.title},
)
return cast(SonarrConfigEntry, entry)
async def _handle_api_errors[_T](func: Callable[[], Awaitable[_T]]) -> _T:
"""Handle API errors and raise HomeAssistantError with user-friendly messages."""
try:
return await func()
except exceptions.ArrAuthenticationException as ex:
raise HomeAssistantError("Authentication failed for Sonarr") from ex
except exceptions.ArrConnectionException as ex:
raise HomeAssistantError("Failed to connect to Sonarr") from ex
except exceptions.ArrException as ex:
raise HomeAssistantError(f"Sonarr API error: {ex}") from ex
async def _async_get_series(service: ServiceCall) -> dict[str, Any]:
"""Get all Sonarr series."""
entry = _get_config_entry_from_service_data(service)
api_client = entry.runtime_data.status.api_client
series_list = await _handle_api_errors(api_client.async_get_series)
base_url = entry.data[CONF_URL]
shows = format_series(cast(list, series_list), base_url)
return {ATTR_SHOWS: shows}
async def _async_get_episodes(service: ServiceCall) -> dict[str, Any]:
"""Get episodes for a specific series."""
entry = _get_config_entry_from_service_data(service)
series_id: int = service.data[CONF_SERIES_ID]
season_number: int | None = service.data.get(CONF_SEASON_NUMBER)
api_client = entry.runtime_data.status.api_client
episodes = await _handle_api_errors(
lambda: api_client.async_get_episodes(series_id, series=True)
)
formatted_episodes = format_episodes(cast(list, episodes), season_number)
return {ATTR_EPISODES: formatted_episodes}
async def _async_get_queue(service: ServiceCall) -> dict[str, Any]:
"""Get Sonarr queue."""
entry = _get_config_entry_from_service_data(service)
max_items: int = service.data[CONF_MAX_ITEMS]
api_client = entry.runtime_data.status.api_client
# 0 means no limit - use a large page size to get all items
page_size = max_items if max_items > 0 else 10000
queue = await _handle_api_errors(
lambda: api_client.async_get_queue(
page_size=page_size, include_series=True, include_episode=True
)
)
base_url = entry.data[CONF_URL]
shows = format_queue(queue, base_url)
return {ATTR_SHOWS: shows}
async def _async_get_diskspace(service: ServiceCall) -> dict[str, Any]:
"""Get Sonarr diskspace information."""
entry = _get_config_entry_from_service_data(service)
space_unit: str = service.data[CONF_SPACE_UNIT]
api_client = entry.runtime_data.status.api_client
disks = await _handle_api_errors(api_client.async_get_diskspace)
return {ATTR_DISKS: format_diskspace(disks, space_unit)}
async def _async_get_upcoming(service: ServiceCall) -> dict[str, Any]:
"""Get Sonarr upcoming episodes."""
entry = _get_config_entry_from_service_data(service)
days: int = service.data[CONF_DAYS]
api_client = entry.runtime_data.status.api_client
local = dt_util.start_of_local_day().replace(microsecond=0)
start = dt_util.as_utc(local)
end = start + timedelta(days=days)
calendar = await _handle_api_errors(
lambda: api_client.async_get_calendar(
start_date=start, end_date=end, include_series=True
)
)
base_url = entry.data[CONF_URL]
episodes = format_upcoming(cast(list, calendar), base_url)
return {ATTR_EPISODES: episodes}
async def _async_get_wanted(service: ServiceCall) -> dict[str, Any]:
"""Get Sonarr wanted/missing episodes."""
entry = _get_config_entry_from_service_data(service)
max_items: int = service.data[CONF_MAX_ITEMS]
api_client = entry.runtime_data.status.api_client
# 0 means no limit - use a large page size to get all items
page_size = max_items if max_items > 0 else 10000
wanted = await _handle_api_errors(
lambda: api_client.async_get_wanted(page_size=page_size, include_series=True)
)
base_url = entry.data[CONF_URL]
episodes = format_wanted(wanted, base_url)
return {ATTR_EPISODES: episodes}
@callback
def async_setup_services(hass: HomeAssistant) -> None:
"""Register services for the Sonarr integration."""
hass.services.async_register(
DOMAIN,
SERVICE_GET_SERIES,
_async_get_series,
schema=SERVICE_GET_SERIES_SCHEMA,
supports_response=SupportsResponse.ONLY,
)
hass.services.async_register(
DOMAIN,
SERVICE_GET_EPISODES,
_async_get_episodes,
schema=SERVICE_GET_EPISODES_SCHEMA,
supports_response=SupportsResponse.ONLY,
)
hass.services.async_register(
DOMAIN,
SERVICE_GET_QUEUE,
_async_get_queue,
schema=SERVICE_GET_QUEUE_SCHEMA,
supports_response=SupportsResponse.ONLY,
)
hass.services.async_register(
DOMAIN,
SERVICE_GET_DISKSPACE,
_async_get_diskspace,
schema=SERVICE_GET_DISKSPACE_SCHEMA,
supports_response=SupportsResponse.ONLY,
)
hass.services.async_register(
DOMAIN,
SERVICE_GET_UPCOMING,
_async_get_upcoming,
schema=SERVICE_GET_UPCOMING_SCHEMA,
supports_response=SupportsResponse.ONLY,
)
hass.services.async_register(
DOMAIN,
SERVICE_GET_WANTED,
_async_get_wanted,
schema=SERVICE_GET_WANTED_SCHEMA,
supports_response=SupportsResponse.ONLY,
)
| {
"repo_id": "home-assistant/core",
"file_path": "homeassistant/components/sonarr/services.py",
"license": "Apache License 2.0",
"lines": 227,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
home-assistant/core:tests/components/sonarr/test_services.py | """Tests for Sonarr services."""
from unittest.mock import MagicMock
from aiopyarr import (
ArrAuthenticationException,
ArrConnectionException,
Diskspace,
SonarrQueue,
)
import pytest
from syrupy.assertion import SnapshotAssertion
from homeassistant.components.sonarr.const import (
ATTR_DISKS,
ATTR_ENTRY_ID,
ATTR_EPISODES,
ATTR_SHOWS,
DOMAIN,
SERVICE_GET_DISKSPACE,
SERVICE_GET_EPISODES,
SERVICE_GET_QUEUE,
SERVICE_GET_SERIES,
SERVICE_GET_UPCOMING,
SERVICE_GET_WANTED,
)
from homeassistant.config_entries import ConfigEntryState
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError, ServiceValidationError
from tests.common import MockConfigEntry
@pytest.mark.parametrize(
"service",
[
SERVICE_GET_SERIES,
SERVICE_GET_QUEUE,
SERVICE_GET_DISKSPACE,
SERVICE_GET_UPCOMING,
SERVICE_GET_WANTED,
],
)
async def test_services_config_entry_not_loaded_state(
hass: HomeAssistant,
init_integration: MockConfigEntry,
service: str,
) -> None:
"""Test service call when config entry is in failed state."""
# Create a second config entry that's not loaded
unloaded_entry = MockConfigEntry(
title="Sonarr",
domain=DOMAIN,
unique_id="unloaded",
)
unloaded_entry.add_to_hass(hass)
assert unloaded_entry.state is ConfigEntryState.NOT_LOADED
with pytest.raises(ServiceValidationError) as exc_info:
await hass.services.async_call(
DOMAIN,
service,
{ATTR_ENTRY_ID: unloaded_entry.entry_id},
blocking=True,
return_response=True,
)
assert exc_info.value.translation_key == "not_loaded"
@pytest.mark.parametrize(
"service",
[
SERVICE_GET_SERIES,
SERVICE_GET_QUEUE,
SERVICE_GET_DISKSPACE,
SERVICE_GET_UPCOMING,
SERVICE_GET_WANTED,
],
)
async def test_services_integration_not_found(
hass: HomeAssistant,
init_integration: MockConfigEntry,
service: str,
) -> None:
"""Test service call with non-existent config entry."""
with pytest.raises(ServiceValidationError) as exc_info:
await hass.services.async_call(
DOMAIN,
service,
{ATTR_ENTRY_ID: "non_existent_entry_id"},
blocking=True,
return_response=True,
)
assert exc_info.value.translation_key == "integration_not_found"
async def test_service_get_series(
hass: HomeAssistant,
init_integration: MockConfigEntry,
snapshot: SnapshotAssertion,
) -> None:
"""Test get_series service."""
response = await hass.services.async_call(
DOMAIN,
SERVICE_GET_SERIES,
{ATTR_ENTRY_ID: init_integration.entry_id},
blocking=True,
return_response=True,
)
# Explicit assertion for specific behavior
assert len(response[ATTR_SHOWS]) == 1
# Snapshot for full structure validation
assert response == snapshot
async def test_service_get_queue(
hass: HomeAssistant,
init_integration: MockConfigEntry,
snapshot: SnapshotAssertion,
) -> None:
"""Test get_queue service."""
response = await hass.services.async_call(
DOMAIN,
SERVICE_GET_QUEUE,
{ATTR_ENTRY_ID: init_integration.entry_id},
blocking=True,
return_response=True,
)
# Explicit assertion for specific behavior
assert len(response[ATTR_SHOWS]) == 1
# Snapshot for full structure validation
assert response == snapshot
@pytest.mark.parametrize(
"service",
[
SERVICE_GET_SERIES,
SERVICE_GET_QUEUE,
SERVICE_GET_DISKSPACE,
SERVICE_GET_UPCOMING,
SERVICE_GET_WANTED,
],
)
async def test_services_entry_not_loaded(
hass: HomeAssistant,
init_integration: MockConfigEntry,
service: str,
) -> None:
"""Test services with unloaded config entry."""
# Unload the entry
await hass.config_entries.async_unload(init_integration.entry_id)
await hass.async_block_till_done()
with pytest.raises(ServiceValidationError) as exc_info:
await hass.services.async_call(
DOMAIN,
service,
{ATTR_ENTRY_ID: init_integration.entry_id},
blocking=True,
return_response=True,
)
assert exc_info.value.translation_key == "not_loaded"
async def test_service_get_queue_empty(
hass: HomeAssistant,
init_integration: MockConfigEntry,
mock_sonarr: MagicMock,
) -> None:
"""Test get_queue service with empty queue."""
# Mock empty queue response
mock_sonarr.async_get_queue.return_value = SonarrQueue(
{
"page": 1,
"pageSize": 10,
"sortKey": "timeleft",
"sortDirection": "ascending",
"totalRecords": 0,
"records": [],
}
)
response = await hass.services.async_call(
DOMAIN,
SERVICE_GET_QUEUE,
{ATTR_ENTRY_ID: init_integration.entry_id},
blocking=True,
return_response=True,
)
assert response is not None
assert ATTR_SHOWS in response
shows = response[ATTR_SHOWS]
assert isinstance(shows, dict)
assert len(shows) == 0
async def test_service_get_diskspace(
hass: HomeAssistant,
init_integration: MockConfigEntry,
snapshot: SnapshotAssertion,
) -> None:
"""Test get_diskspace service."""
response = await hass.services.async_call(
DOMAIN,
SERVICE_GET_DISKSPACE,
{ATTR_ENTRY_ID: init_integration.entry_id},
blocking=True,
return_response=True,
)
# Explicit assertion for specific behavior
assert len(response[ATTR_DISKS]) == 1
# Snapshot for full structure validation
assert response == snapshot
async def test_service_get_diskspace_multiple_drives(
hass: HomeAssistant,
init_integration: MockConfigEntry,
mock_sonarr: MagicMock,
) -> None:
"""Test get_diskspace service with multiple drives."""
# Mock multiple disks response
mock_sonarr.async_get_diskspace.return_value = [
Diskspace(
{
"path": "C:\\",
"label": "System",
"freeSpace": 100000000000,
"totalSpace": 500000000000,
}
),
Diskspace(
{
"path": "D:\\Media",
"label": "Media Storage",
"freeSpace": 2000000000000,
"totalSpace": 4000000000000,
}
),
Diskspace(
{
"path": "/mnt/nas",
"label": "NAS",
"freeSpace": 10000000000000,
"totalSpace": 20000000000000,
}
),
]
response = await hass.services.async_call(
DOMAIN,
SERVICE_GET_DISKSPACE,
{ATTR_ENTRY_ID: init_integration.entry_id},
blocking=True,
return_response=True,
)
assert response is not None
assert ATTR_DISKS in response
disks = response[ATTR_DISKS]
assert isinstance(disks, dict)
assert len(disks) == 3
# Check first disk (C:\)
c_drive = disks["C:\\"]
assert c_drive["path"] == "C:\\"
assert c_drive["label"] == "System"
assert c_drive["free_space"] == 100000000000
assert c_drive["total_space"] == 500000000000
assert c_drive["unit"] == "bytes"
# Check second disk (D:\Media)
d_drive = disks["D:\\Media"]
assert d_drive["path"] == "D:\\Media"
assert d_drive["label"] == "Media Storage"
assert d_drive["free_space"] == 2000000000000
assert d_drive["total_space"] == 4000000000000
# Check third disk (/mnt/nas)
nas = disks["/mnt/nas"]
assert nas["path"] == "/mnt/nas"
assert nas["label"] == "NAS"
assert nas["free_space"] == 10000000000000
assert nas["total_space"] == 20000000000000
async def test_service_get_upcoming(
hass: HomeAssistant,
init_integration: MockConfigEntry,
snapshot: SnapshotAssertion,
) -> None:
"""Test get_upcoming service."""
response = await hass.services.async_call(
DOMAIN,
SERVICE_GET_UPCOMING,
{ATTR_ENTRY_ID: init_integration.entry_id},
blocking=True,
return_response=True,
)
# Explicit assertion for specific behavior
assert len(response[ATTR_EPISODES]) == 1
# Snapshot for full structure validation
assert response == snapshot
async def test_service_get_wanted(
hass: HomeAssistant,
init_integration: MockConfigEntry,
snapshot: SnapshotAssertion,
) -> None:
"""Test get_wanted service."""
response = await hass.services.async_call(
DOMAIN,
SERVICE_GET_WANTED,
{ATTR_ENTRY_ID: init_integration.entry_id},
blocking=True,
return_response=True,
)
# Explicit assertion for specific behavior
assert len(response[ATTR_EPISODES]) == 2
# Snapshot for full structure validation
assert response == snapshot
async def test_service_get_episodes(
hass: HomeAssistant,
init_integration: MockConfigEntry,
snapshot: SnapshotAssertion,
) -> None:
"""Test get_episodes service."""
response = await hass.services.async_call(
DOMAIN,
SERVICE_GET_EPISODES,
{ATTR_ENTRY_ID: init_integration.entry_id, "series_id": 105},
blocking=True,
return_response=True,
)
# Explicit assertion for specific behavior
assert len(response[ATTR_EPISODES]) == 3
# Snapshot for full structure validation
assert response == snapshot
async def test_service_get_episodes_with_season_filter(
hass: HomeAssistant,
init_integration: MockConfigEntry,
) -> None:
"""Test get_episodes service with season filter."""
response = await hass.services.async_call(
DOMAIN,
SERVICE_GET_EPISODES,
{
ATTR_ENTRY_ID: init_integration.entry_id,
"series_id": 105,
"season_number": 1,
},
blocking=True,
return_response=True,
)
assert response is not None
assert ATTR_EPISODES in response
episodes = response[ATTR_EPISODES]
assert isinstance(episodes, dict)
# Should only have season 1 episodes (2 of them)
assert len(episodes) == 2
assert "S01E01" in episodes
assert "S01E02" in episodes
assert "S02E01" not in episodes
async def test_service_get_queue_image_fallback(
hass: HomeAssistant,
init_integration: MockConfigEntry,
mock_sonarr: MagicMock,
) -> None:
"""Test that get_queue uses url fallback when remoteUrl is not available."""
# Mock queue response with images that only have 'url' (no 'remoteUrl')
mock_sonarr.async_get_queue.return_value = SonarrQueue(
{
"page": 1,
"pageSize": 10,
"sortKey": "timeleft",
"sortDirection": "ascending",
"totalRecords": 1,
"records": [
{
"series": {
"title": "Test Series",
"sortTitle": "test series",
"seasonCount": 1,
"status": "continuing",
"overview": "A test series.",
"network": "Test Network",
"airTime": "20:00",
"images": [
{
"coverType": "fanart",
"url": "/MediaCover/1/fanart.jpg?lastWrite=123456",
},
{
"coverType": "poster",
"url": "/MediaCover/1/poster.jpg?lastWrite=123456",
},
],
"seasons": [{"seasonNumber": 1, "monitored": True}],
"year": 2024,
"path": "/tv/Test Series",
"profileId": 1,
"seasonFolder": True,
"monitored": True,
"useSceneNumbering": False,
"runtime": 45,
"tvdbId": 12345,
"tvRageId": 0,
"tvMazeId": 0,
"firstAired": "2024-01-01T00:00:00Z",
"lastInfoSync": "2024-01-01T00:00:00Z",
"seriesType": "standard",
"cleanTitle": "testseries",
"imdbId": "tt1234567",
"titleSlug": "test-series",
"certification": "TV-14",
"genres": ["Drama"],
"tags": [],
"added": "2024-01-01T00:00:00Z",
"ratings": {"votes": 100, "value": 8.0},
"qualityProfileId": 1,
"id": 1,
},
"episode": {
"seriesId": 1,
"episodeFileId": 0,
"seasonNumber": 1,
"episodeNumber": 1,
"title": "Pilot",
"airDate": "2024-01-01",
"airDateUtc": "2024-01-01T00:00:00Z",
"overview": "The pilot episode.",
"hasFile": False,
"monitored": True,
"absoluteEpisodeNumber": 1,
"unverifiedSceneNumbering": False,
"id": 1,
},
"quality": {
"quality": {"id": 3, "name": "WEBDL-1080p"},
"revision": {"version": 1, "real": 0},
},
"size": 1000000000,
"title": "Test.Series.S01E01.1080p.WEB-DL",
"sizeleft": 500000000,
"timeleft": "00:10:00",
"estimatedCompletionTime": "2024-01-01T01:00:00Z",
"status": "Downloading",
"trackedDownloadStatus": "Ok",
"statusMessages": [],
"downloadId": "test123",
"protocol": "torrent",
"id": 1,
}
],
}
)
response = await hass.services.async_call(
DOMAIN,
SERVICE_GET_QUEUE,
{ATTR_ENTRY_ID: init_integration.entry_id},
blocking=True,
return_response=True,
)
assert response is not None
assert ATTR_SHOWS in response
shows = response[ATTR_SHOWS]
assert len(shows) == 1
queue_item = shows["Test.Series.S01E01.1080p.WEB-DL"]
assert "images" in queue_item
# Since remoteUrl is not available, the fallback should use base_url + url
# The base_url from mock_config_entry is http://192.168.1.189:8989
assert "fanart" in queue_item["images"]
assert "poster" in queue_item["images"]
# Check that the fallback constructed the URL with base_url prefix
assert queue_item["images"]["fanart"] == (
"http://192.168.1.189:8989/MediaCover/1/fanart.jpg?lastWrite=123456"
)
assert queue_item["images"]["poster"] == (
"http://192.168.1.189:8989/MediaCover/1/poster.jpg?lastWrite=123456"
)
async def test_service_get_queue_season_pack(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_sonarr_season_pack: MagicMock,
) -> None:
"""Test get_queue service with a season pack download."""
# Set up integration with season pack queue data
mock_config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
response = await hass.services.async_call(
DOMAIN,
SERVICE_GET_QUEUE,
{ATTR_ENTRY_ID: mock_config_entry.entry_id},
blocking=True,
return_response=True,
)
assert response is not None
assert ATTR_SHOWS in response
shows = response[ATTR_SHOWS]
# Should have only 1 entry (the season pack) instead of 3 (one per episode)
assert len(shows) == 1
# Check the season pack data structure
season_pack = shows["House.S02.1080p.BluRay.x264-SHORTBREHD"]
assert season_pack["title"] == "House"
assert season_pack["season_number"] == 2
assert season_pack["download_title"] == "House.S02.1080p.BluRay.x264-SHORTBREHD"
# Check season pack specific fields
assert season_pack["is_season_pack"] is True
assert season_pack["episode_count"] == 3 # Episodes 1, 2, and 24 in fixture
assert season_pack["episode_range"] == "E01-E24"
assert season_pack["episode_identifier"] == "S02 (3 episodes)"
# Check that basic download info is still present
assert season_pack["size"] == 84429221268
assert season_pack["status"] == "paused"
assert season_pack["quality"] == "Bluray-1080p"
@pytest.mark.parametrize(
("service", "method"),
[
(SERVICE_GET_SERIES, "async_get_series"),
(SERVICE_GET_QUEUE, "async_get_queue"),
(SERVICE_GET_DISKSPACE, "async_get_diskspace"),
(SERVICE_GET_UPCOMING, "async_get_calendar"),
(SERVICE_GET_WANTED, "async_get_wanted"),
],
)
async def test_services_api_connection_error(
hass: HomeAssistant,
init_integration: MockConfigEntry,
mock_sonarr: MagicMock,
service: str,
method: str,
) -> None:
"""Test services with API connection error."""
# Configure the mock to raise an exception
getattr(mock_sonarr, method).side_effect = ArrConnectionException(
"Connection failed"
)
with pytest.raises(HomeAssistantError, match="Failed to connect to Sonarr"):
await hass.services.async_call(
DOMAIN,
service,
{ATTR_ENTRY_ID: init_integration.entry_id},
blocking=True,
return_response=True,
)
@pytest.mark.parametrize(
("service", "method"),
[
(SERVICE_GET_SERIES, "async_get_series"),
(SERVICE_GET_QUEUE, "async_get_queue"),
(SERVICE_GET_DISKSPACE, "async_get_diskspace"),
(SERVICE_GET_UPCOMING, "async_get_calendar"),
(SERVICE_GET_WANTED, "async_get_wanted"),
],
)
async def test_services_api_auth_error(
hass: HomeAssistant,
init_integration: MockConfigEntry,
mock_sonarr: MagicMock,
service: str,
method: str,
) -> None:
"""Test services with API authentication error."""
# Configure the mock to raise an exception
getattr(mock_sonarr, method).side_effect = ArrAuthenticationException(
"Authentication failed"
)
with pytest.raises(HomeAssistantError, match="Authentication failed for Sonarr"):
await hass.services.async_call(
DOMAIN,
service,
{ATTR_ENTRY_ID: init_integration.entry_id},
blocking=True,
return_response=True,
)
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/sonarr/test_services.py",
"license": "Apache License 2.0",
"lines": 546,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:homeassistant/components/zinvolt/binary_sensor.py | """Binary sensor platform for Zinvolt integration."""
from collections.abc import Callable
from dataclasses import dataclass
from zinvolt.models import BatteryState
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
BinarySensorEntityDescription,
)
from homeassistant.const import EntityCategory
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback
from .coordinator import ZinvoltConfigEntry, ZinvoltDeviceCoordinator
from .entity import ZinvoltEntity
@dataclass(kw_only=True, frozen=True)
class ZinvoltBatteryStateDescription(BinarySensorEntityDescription):
"""Binary sensor description for Zinvolt battery state."""
is_on_fn: Callable[[BatteryState], bool]
SENSORS: tuple[ZinvoltBatteryStateDescription, ...] = (
ZinvoltBatteryStateDescription(
key="on_grid",
translation_key="on_grid",
entity_category=EntityCategory.DIAGNOSTIC,
device_class=BinarySensorDeviceClass.CONNECTIVITY,
is_on_fn=lambda state: state.current_power.on_grid,
),
)
async def async_setup_entry(
hass: HomeAssistant,
entry: ZinvoltConfigEntry,
async_add_entities: AddConfigEntryEntitiesCallback,
) -> None:
"""Initialize the entries."""
async_add_entities(
ZinvoltBatteryStateBinarySensor(coordinator, description)
for description in SENSORS
for coordinator in entry.runtime_data.values()
)
class ZinvoltBatteryStateBinarySensor(ZinvoltEntity, BinarySensorEntity):
"""Zinvolt battery state binary sensor."""
entity_description: ZinvoltBatteryStateDescription
def __init__(
self,
coordinator: ZinvoltDeviceCoordinator,
description: ZinvoltBatteryStateDescription,
) -> None:
"""Initialize the binary sensor."""
super().__init__(coordinator)
self.entity_description = description
self._attr_unique_id = f"{coordinator.data.serial_number}.{description.key}"
@property
def is_on(self) -> bool:
"""Return the state of the binary sensor."""
return self.entity_description.is_on_fn(self.coordinator.data)
| {
"repo_id": "home-assistant/core",
"file_path": "homeassistant/components/zinvolt/binary_sensor.py",
"license": "Apache License 2.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
home-assistant/core:tests/components/zinvolt/test_binary_sensor.py | """Tests for the Zinvolt binary sensor."""
from unittest.mock import AsyncMock, patch
from syrupy.assertion import SnapshotAssertion
from homeassistant.const import Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from . import setup_integration
from tests.common import MockConfigEntry, snapshot_platform
async def test_all_entities(
hass: HomeAssistant,
snapshot: SnapshotAssertion,
mock_zinvolt_client: AsyncMock,
mock_config_entry: MockConfigEntry,
entity_registry: er.EntityRegistry,
) -> None:
"""Test all entities."""
with patch("homeassistant.components.zinvolt._PLATFORMS", [Platform.BINARY_SENSOR]):
await setup_integration(hass, mock_config_entry)
await snapshot_platform(hass, entity_registry, snapshot, mock_config_entry.entry_id)
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/zinvolt/test_binary_sensor.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:homeassistant/components/smarla/update.py | """Swing2Sleep Smarla Update platform."""
from dataclasses import dataclass
from datetime import timedelta
from typing import Any
from pysmarlaapi import Federwiege
from pysmarlaapi.federwiege.services.classes import Property
from pysmarlaapi.federwiege.services.types import UpdateStatus
from homeassistant.components.update import (
UpdateDeviceClass,
UpdateEntity,
UpdateEntityDescription,
UpdateEntityFeature,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback
from . import FederwiegeConfigEntry
from .entity import SmarlaBaseEntity, SmarlaEntityDescription
SCAN_INTERVAL = timedelta(seconds=300)
PARALLEL_UPDATES = 1
@dataclass(frozen=True, kw_only=True)
class SmarlaUpdateEntityDescription(SmarlaEntityDescription, UpdateEntityDescription):
"""Class describing Swing2Sleep Smarla update entity."""
UPDATE_ENTITY_DESC = SmarlaUpdateEntityDescription(
key="update",
service="info",
property="version",
device_class=UpdateDeviceClass.FIRMWARE,
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: FederwiegeConfigEntry,
async_add_entities: AddConfigEntryEntitiesCallback,
) -> None:
"""Set up Smarla update entity based on a config entry."""
federwiege = config_entry.runtime_data
async_add_entities([SmarlaUpdate(federwiege, UPDATE_ENTITY_DESC)], True)
class SmarlaUpdate(SmarlaBaseEntity, UpdateEntity):
"""Defines an Smarla update entity."""
_attr_supported_features = (
UpdateEntityFeature.INSTALL | UpdateEntityFeature.PROGRESS
)
_attr_should_poll = True
entity_description: SmarlaUpdateEntityDescription
_property: Property[str]
_update_property: Property[int]
_update_status_property: Property[UpdateStatus]
def __init__(
self, federwiege: Federwiege, desc: SmarlaUpdateEntityDescription
) -> None:
"""Initialize the update entity."""
super().__init__(federwiege, desc)
self._update_property = federwiege.get_property("system", "firmware_update")
self._update_status_property = federwiege.get_property(
"system", "firmware_update_status"
)
async def async_update(self) -> None:
"""Check for firmware update and update attributes."""
value = await self._federwiege.check_firmware_update()
if value is None:
self._attr_latest_version = None
self._attr_release_summary = None
return
target, notes = value
self._attr_latest_version = target
self._attr_release_summary = notes
async def async_added_to_hass(self) -> None:
"""Run when this Entity has been added to HA."""
await super().async_added_to_hass()
await self._update_status_property.add_listener(self.on_change)
async def async_will_remove_from_hass(self) -> None:
"""Entity being removed from hass."""
await super().async_will_remove_from_hass()
await self._update_status_property.remove_listener(self.on_change)
@property
def in_progress(self) -> bool | None:
"""Return if an update is in progress."""
status = self._update_status_property.get()
return status not in (None, UpdateStatus.IDLE, UpdateStatus.FAILED)
@property
def installed_version(self) -> str | None:
"""Return the current installed version."""
return self._property.get()
def install(self, version: str | None, backup: bool, **kwargs: Any) -> None:
"""Install latest update."""
self._update_property.set(1)
| {
"repo_id": "home-assistant/core",
"file_path": "homeassistant/components/smarla/update.py",
"license": "Apache License 2.0",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
home-assistant/core:tests/components/smarla/test_update.py | """Test update platform for Swing2Sleep Smarla integration."""
from unittest.mock import MagicMock, patch
from pysmarlaapi.federwiege.services.types import UpdateStatus
import pytest
from syrupy.assertion import SnapshotAssertion
from homeassistant.components.update import (
ATTR_IN_PROGRESS,
ATTR_INSTALLED_VERSION,
ATTR_LATEST_VERSION,
DOMAIN as UPDATE_DOMAIN,
SERVICE_INSTALL,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
STATE_OFF,
STATE_ON,
STATE_UNKNOWN,
Platform,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from homeassistant.helpers.entity_component import async_update_entity
from . import setup_integration, update_property_listeners
from tests.common import MockConfigEntry, snapshot_platform
UPDATE_ENTITY_ID = "update.smarla_firmware"
@pytest.mark.usefixtures("mock_federwiege")
async def test_update(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
entity_registry: er.EntityRegistry,
snapshot: SnapshotAssertion,
) -> None:
"""Test the smarla update platform."""
with patch("homeassistant.components.smarla.PLATFORMS", [Platform.UPDATE]):
assert await setup_integration(hass, mock_config_entry)
await snapshot_platform(
hass, entity_registry, snapshot, mock_config_entry.entry_id
)
async def test_update_available(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_federwiege: MagicMock,
) -> None:
"""Test smarla update initial state and behavior when an update gets available."""
assert await setup_integration(hass, mock_config_entry)
state = hass.states.get(UPDATE_ENTITY_ID)
assert state is not None
assert state.state == STATE_OFF
assert state.attributes[ATTR_INSTALLED_VERSION] == "1.0.0"
assert state.attributes[ATTR_LATEST_VERSION] == "1.0.0"
mock_federwiege.check_firmware_update.return_value = ("1.1.0", "")
await async_update_entity(hass, UPDATE_ENTITY_ID)
await hass.async_block_till_done()
state = hass.states.get(UPDATE_ENTITY_ID)
assert state is not None
assert state.state == STATE_ON
assert state.attributes[ATTR_LATEST_VERSION] == "1.1.0"
async def test_update_install(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_federwiege: MagicMock,
) -> None:
"""Test the smarla update install action."""
mock_federwiege.check_firmware_update.return_value = ("1.1.0", "")
assert await setup_integration(hass, mock_config_entry)
mock_update_property = mock_federwiege.get_property("system", "firmware_update")
await hass.services.async_call(
UPDATE_DOMAIN,
SERVICE_INSTALL,
{ATTR_ENTITY_ID: UPDATE_ENTITY_ID},
blocking=True,
)
mock_update_property.set.assert_called_once_with(1)
@pytest.mark.parametrize("status", [UpdateStatus.DOWNLOADING, UpdateStatus.INSTALLING])
async def test_update_in_progress(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_federwiege: MagicMock,
status: UpdateStatus,
) -> None:
"""Test the smarla update progress."""
assert await setup_integration(hass, mock_config_entry)
mock_update_status_property = mock_federwiege.get_property(
"system", "firmware_update_status"
)
state = hass.states.get(UPDATE_ENTITY_ID)
assert state is not None
assert state.attributes[ATTR_IN_PROGRESS] is False
mock_update_status_property.get.return_value = status
await update_property_listeners(mock_update_status_property)
await hass.async_block_till_done()
state = hass.states.get(UPDATE_ENTITY_ID)
assert state is not None
assert state.attributes[ATTR_IN_PROGRESS] is True
async def test_update_unknown(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_federwiege: MagicMock,
) -> None:
"""Test smarla update unknown behavior."""
assert await setup_integration(hass, mock_config_entry)
state = hass.states.get(UPDATE_ENTITY_ID)
assert state is not None
assert state.state != STATE_UNKNOWN
mock_federwiege.check_firmware_update.return_value = None
await async_update_entity(hass, UPDATE_ENTITY_ID)
await hass.async_block_till_done()
state = hass.states.get(UPDATE_ENTITY_ID)
assert state is not None
assert state.state == STATE_UNKNOWN
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/smarla/test_update.py",
"license": "Apache License 2.0",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:homeassistant/components/zinvolt/number.py | """Number platform for Zinvolt integration."""
from collections.abc import Awaitable, Callable
from dataclasses import dataclass
from zinvolt import ZinvoltClient
from zinvolt.models import BatteryState
from homeassistant.components.number import (
NumberDeviceClass,
NumberEntity,
NumberEntityDescription,
)
from homeassistant.const import PERCENTAGE, EntityCategory, UnitOfPower, UnitOfTime
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback
from .coordinator import ZinvoltConfigEntry, ZinvoltDeviceCoordinator
from .entity import ZinvoltEntity
@dataclass(kw_only=True, frozen=True)
class ZinvoltBatteryStateDescription(NumberEntityDescription):
"""Number description for Zinvolt battery state."""
max_fn: Callable[[BatteryState], int] | None = None
value_fn: Callable[[BatteryState], int]
set_value_fn: Callable[[ZinvoltClient, str, int], Awaitable[None]]
NUMBERS: tuple[ZinvoltBatteryStateDescription, ...] = (
ZinvoltBatteryStateDescription(
key="max_output",
translation_key="max_output",
entity_category=EntityCategory.CONFIG,
device_class=NumberDeviceClass.POWER,
native_unit_of_measurement=UnitOfPower.WATT,
value_fn=lambda state: state.global_settings.max_output,
set_value_fn=lambda client, battery_id, value: client.set_max_output(
battery_id, value
),
native_min_value=0,
max_fn=lambda state: state.global_settings.max_output_limit,
),
ZinvoltBatteryStateDescription(
key="upper_threshold",
translation_key="upper_threshold",
entity_category=EntityCategory.CONFIG,
native_unit_of_measurement=PERCENTAGE,
value_fn=lambda state: state.global_settings.battery_upper_threshold,
set_value_fn=lambda client, battery_id, value: client.set_upper_threshold(
battery_id, value
),
native_min_value=0,
native_max_value=100,
),
ZinvoltBatteryStateDescription(
key="lower_threshold",
translation_key="lower_threshold",
entity_category=EntityCategory.CONFIG,
native_unit_of_measurement=PERCENTAGE,
value_fn=lambda state: state.global_settings.battery_lower_threshold,
set_value_fn=lambda client, battery_id, value: client.set_lower_threshold(
battery_id, value
),
native_min_value=9,
native_max_value=100,
),
ZinvoltBatteryStateDescription(
key="standby_time",
translation_key="standby_time",
entity_category=EntityCategory.CONFIG,
native_unit_of_measurement=UnitOfTime.MINUTES,
device_class=NumberDeviceClass.DURATION,
value_fn=lambda state: state.global_settings.standby_time,
set_value_fn=lambda client, battery_id, value: client.set_standby_time(
battery_id, value
),
native_min_value=5,
native_max_value=60,
),
)
async def async_setup_entry(
hass: HomeAssistant,
entry: ZinvoltConfigEntry,
async_add_entities: AddConfigEntryEntitiesCallback,
) -> None:
"""Initialize the entries."""
async_add_entities(
ZinvoltBatteryStateNumber(coordinator, description)
for description in NUMBERS
for coordinator in entry.runtime_data.values()
)
class ZinvoltBatteryStateNumber(ZinvoltEntity, NumberEntity):
"""Zinvolt number."""
entity_description: ZinvoltBatteryStateDescription
def __init__(
self,
coordinator: ZinvoltDeviceCoordinator,
description: ZinvoltBatteryStateDescription,
) -> None:
"""Initialize the number."""
super().__init__(coordinator)
self.entity_description = description
self._attr_unique_id = f"{coordinator.data.serial_number}.{description.key}"
@property
def native_max_value(self) -> float:
"""Return the native maximum value."""
if self.entity_description.max_fn is None:
return super().native_max_value
return self.entity_description.max_fn(self.coordinator.data)
@property
def native_value(self) -> float:
"""Return the state of the sensor."""
return self.entity_description.value_fn(self.coordinator.data)
async def async_set_native_value(self, value: float) -> None:
"""Set the state of the sensor."""
await self.entity_description.set_value_fn(
self.coordinator.client, self.coordinator.battery.identifier, int(value)
)
await self.coordinator.async_request_refresh()
| {
"repo_id": "home-assistant/core",
"file_path": "homeassistant/components/zinvolt/number.py",
"license": "Apache License 2.0",
"lines": 112,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
home-assistant/core:tests/components/zinvolt/test_number.py | """Tests for the Zinvolt number."""
from unittest.mock import AsyncMock, patch
from syrupy.assertion import SnapshotAssertion
from homeassistant.const import Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from . import setup_integration
from tests.common import MockConfigEntry, snapshot_platform
async def test_all_entities(
hass: HomeAssistant,
snapshot: SnapshotAssertion,
mock_zinvolt_client: AsyncMock,
mock_config_entry: MockConfigEntry,
entity_registry: er.EntityRegistry,
) -> None:
"""Test all entities."""
with patch("homeassistant.components.zinvolt._PLATFORMS", [Platform.NUMBER]):
await setup_integration(hass, mock_config_entry)
await snapshot_platform(hass, entity_registry, snapshot, mock_config_entry.entry_id)
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/zinvolt/test_number.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:homeassistant/components/ntfy/update.py | """Update platform for the ntfy integration."""
from __future__ import annotations
from enum import StrEnum
from homeassistant.components.update import (
UpdateEntity,
UpdateEntityDescription,
UpdateEntityFeature,
)
from homeassistant.const import CONF_URL, EntityCategory
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import EntityDescription
from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback
from . import NTFY_KEY
from .const import DEFAULT_URL
from .coordinator import (
NtfyConfigEntry,
NtfyLatestReleaseUpdateCoordinator,
NtfyVersionDataUpdateCoordinator,
)
from .entity import NtfyCommonBaseEntity
PARALLEL_UPDATES = 0
class NtfyUpdate(StrEnum):
"""Ntfy update."""
UPDATE = "update"
DESCRIPTION = UpdateEntityDescription(
key=NtfyUpdate.UPDATE,
translation_key=NtfyUpdate.UPDATE,
entity_category=EntityCategory.DIAGNOSTIC,
)
async def async_setup_entry(
hass: HomeAssistant,
entry: NtfyConfigEntry,
async_add_entities: AddConfigEntryEntitiesCallback,
) -> None:
"""Set up update platform."""
if (
entry.data[CONF_URL] != DEFAULT_URL
and (version_coordinator := entry.runtime_data.version).data is not None
):
update_coordinator = hass.data[NTFY_KEY]
async_add_entities(
[NtfyUpdateEntity(version_coordinator, update_coordinator, DESCRIPTION)]
)
class NtfyUpdateEntity(NtfyCommonBaseEntity, UpdateEntity):
"""Representation of an update entity."""
_attr_supported_features = UpdateEntityFeature.RELEASE_NOTES
coordinator: NtfyVersionDataUpdateCoordinator
def __init__(
self,
coordinator: NtfyVersionDataUpdateCoordinator,
update_checker: NtfyLatestReleaseUpdateCoordinator,
description: EntityDescription,
) -> None:
"""Initialize the entity."""
super().__init__(coordinator, description)
self.update_checker = update_checker
if self._attr_device_info and self.installed_version:
self._attr_device_info.update({"sw_version": self.installed_version})
@property
def installed_version(self) -> str | None:
"""Current version."""
return self.coordinator.data.version if self.coordinator.data else None
@property
def title(self) -> str | None:
"""Title of the release."""
return f"ntfy {self.update_checker.data.name}"
@property
def release_url(self) -> str | None:
"""URL to the full release notes."""
return self.update_checker.data.html_url
@property
def latest_version(self) -> str | None:
"""Latest version."""
return self.update_checker.data.tag_name.removeprefix("v")
async def async_release_notes(self) -> str | None:
"""Return the release notes."""
return self.update_checker.data.body
async def async_added_to_hass(self) -> None:
"""When entity is added to hass.
Register extra update listener for the update checker coordinator.
"""
await super().async_added_to_hass()
self.async_on_remove(
self.update_checker.async_add_listener(self._handle_coordinator_update)
)
@property
def available(self) -> bool:
"""Return if entity is available."""
return super().available and self.update_checker.last_update_success
| {
"repo_id": "home-assistant/core",
"file_path": "homeassistant/components/ntfy/update.py",
"license": "Apache License 2.0",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
home-assistant/core:tests/components/ntfy/test_update.py | """Tests for the ntfy update platform."""
from collections.abc import Generator
from unittest.mock import AsyncMock, patch
from aiontfy.exceptions import (
NtfyNotFoundPageError,
NtfyUnauthorizedAuthenticationError,
)
from aiontfy.update import UpdateCheckerError
import pytest
from syrupy.assertion import SnapshotAssertion
from homeassistant.components.ntfy.const import DEFAULT_URL, DOMAIN
from homeassistant.config_entries import ConfigEntryState
from homeassistant.const import (
CONF_TOKEN,
CONF_URL,
CONF_USERNAME,
CONF_VERIFY_SSL,
STATE_UNAVAILABLE,
Platform,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from tests.common import MockConfigEntry, snapshot_platform
from tests.typing import WebSocketGenerator
@pytest.fixture(autouse=True)
def update_only() -> Generator[None]:
"""Enable only the update platform."""
with patch(
"homeassistant.components.ntfy.PLATFORMS",
[Platform.UPDATE],
):
yield
@pytest.mark.usefixtures("mock_aiontfy", "mock_update_checker")
async def test_setup(
hass: HomeAssistant,
snapshot: SnapshotAssertion,
entity_registry: er.EntityRegistry,
hass_ws_client: WebSocketGenerator,
) -> None:
"""Snapshot test states of update platform."""
ws_client = await hass_ws_client(hass)
config_entry = MockConfigEntry(
domain=DOMAIN,
title="ntfy.example",
data={
CONF_URL: "https://ntfy.example/",
CONF_USERNAME: None,
CONF_TOKEN: "token",
CONF_VERIFY_SSL: True,
},
entry_id="123456789",
)
config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.LOADED
await snapshot_platform(hass, entity_registry, snapshot, config_entry.entry_id)
await ws_client.send_json(
{
"id": 1,
"type": "update/release_notes",
"entity_id": "update.ntfy_example_ntfy_version",
}
)
result = await ws_client.receive_json()
assert result["result"] == "**RELEASE_NOTES**"
@pytest.mark.usefixtures("mock_aiontfy")
async def test_update_checker_error(
hass: HomeAssistant,
mock_update_checker: AsyncMock,
) -> None:
"""Test update entity update checker error."""
config_entry = MockConfigEntry(
domain=DOMAIN,
title="ntfy.example",
data={
CONF_URL: "https://ntfy.example/",
CONF_USERNAME: None,
CONF_TOKEN: "token",
CONF_VERIFY_SSL: True,
},
entry_id="123456789",
)
mock_update_checker.latest_release.side_effect = UpdateCheckerError
config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.LOADED
state = hass.states.get("update.ntfy_example_ntfy_version")
assert state is not None
assert state.state == STATE_UNAVAILABLE
@pytest.mark.parametrize(
"exception",
[
NtfyUnauthorizedAuthenticationError(40101, 401, "unauthorized"),
NtfyNotFoundPageError(40401, 404, "page not found"),
],
ids=["not an admin", "version < 2.17.0"],
)
@pytest.mark.usefixtures("mock_update_checker")
async def test_version_errors(
hass: HomeAssistant,
mock_aiontfy: AsyncMock,
exception: Exception,
) -> None:
"""Test update entity is not created when version endpoint is not available."""
config_entry = MockConfigEntry(
domain=DOMAIN,
title="ntfy.example",
data={
CONF_URL: "https://ntfy.example/",
CONF_USERNAME: None,
CONF_TOKEN: "token",
CONF_VERIFY_SSL: True,
},
entry_id="123456789",
)
mock_aiontfy.version.side_effect = exception
config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.LOADED
state = hass.states.get("update.ntfy_example_ntfy_version")
assert state is None
@pytest.mark.usefixtures("mock_aiontfy", "mock_update_checker")
async def test_with_official_server(hass: HomeAssistant) -> None:
"""Test update entity is not created when using official ntfy server."""
config_entry = MockConfigEntry(
domain=DOMAIN,
title="ntfy.sh",
data={
CONF_URL: DEFAULT_URL,
CONF_USERNAME: None,
CONF_TOKEN: "token",
CONF_VERIFY_SSL: True,
},
entry_id="123456789",
)
config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state is ConfigEntryState.LOADED
state = hass.states.get("update.ntfy_sh_ntfy_version")
assert state is None
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/ntfy/test_update.py",
"license": "Apache License 2.0",
"lines": 144,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:homeassistant/components/nrgkick/diagnostics.py | """Diagnostics support for NRGkick."""
from __future__ import annotations
from dataclasses import asdict
from typing import Any
from homeassistant.components.diagnostics import async_redact_data
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from .coordinator import NRGkickConfigEntry
TO_REDACT = {
CONF_PASSWORD,
CONF_USERNAME,
}
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, entry: NRGkickConfigEntry
) -> dict[str, Any]:
"""Return diagnostics for a config entry."""
return async_redact_data(
{
"entry_data": entry.data,
"coordinator_data": asdict(entry.runtime_data.data),
},
TO_REDACT,
)
| {
"repo_id": "home-assistant/core",
"file_path": "homeassistant/components/nrgkick/diagnostics.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
home-assistant/core:tests/components/nrgkick/test_diagnostics.py | """Tests for the diagnostics data provided by the NRGkick integration."""
from __future__ import annotations
from unittest.mock import AsyncMock
from syrupy.assertion import SnapshotAssertion
from homeassistant.core import HomeAssistant
from . import setup_integration
from tests.common import MockConfigEntry
from tests.components.diagnostics import get_diagnostics_for_config_entry
from tests.typing import ClientSessionGenerator
async def test_diagnostics(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
mock_config_entry: MockConfigEntry,
mock_nrgkick_api: AsyncMock,
snapshot: SnapshotAssertion,
) -> None:
"""Test diagnostics."""
await setup_integration(hass, mock_config_entry)
assert (
await get_diagnostics_for_config_entry(hass, hass_client, mock_config_entry)
== snapshot
)
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/nrgkick/test_diagnostics.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:homeassistant/components/volvo/services.py | """Volvo services."""
import asyncio
import logging
from typing import Any
from urllib import parse
from httpx import AsyncClient, HTTPError, HTTPStatusError
import voluptuous as vol
from homeassistant.config_entries import ConfigEntryState
from homeassistant.core import HomeAssistant, ServiceCall, SupportsResponse
from homeassistant.exceptions import HomeAssistantError, ServiceValidationError
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.httpx_client import get_async_client
from .const import DOMAIN
from .coordinator import VolvoConfigEntry
_LOGGER = logging.getLogger(__name__)
CONF_CONFIG_ENTRY_ID = "entry"
CONF_IMAGE_TYPES = "images"
SERVICE_GET_IMAGE_URL = "get_image_url"
SERVICE_GET_IMAGE_URL_SCHEMA = vol.Schema(
{
vol.Required(CONF_CONFIG_ENTRY_ID): str,
vol.Optional(CONF_IMAGE_TYPES): vol.All(cv.ensure_list, [str]),
}
)
_HEADERS = {
"Accept-Language": "en-GB",
"Sec-Fetch-User": "?1",
}
_PARAM_IMAGE_ANGLE_MAP = {
"exterior_back": "6",
"exterior_back_left": "5",
"exterior_back_right": "2",
"exterior_front": "3",
"exterior_front_left": "4",
"exterior_front_right": "0",
"exterior_side_left": "7",
"exterior_side_right": "1",
}
_IMAGE_ANGLE_MAP = {
"1": "right",
"3": "front",
"4": "threeQuartersFrontLeft",
"5": "threeQuartersRearLeft",
"6": "rear",
"7": "left",
}
async def async_setup_services(hass: HomeAssistant) -> None:
"""Set up services."""
hass.services.async_register(
DOMAIN,
SERVICE_GET_IMAGE_URL,
_get_image_url,
schema=SERVICE_GET_IMAGE_URL_SCHEMA,
supports_response=SupportsResponse.ONLY,
)
async def _get_image_url(call: ServiceCall) -> dict[str, Any]:
entry_id = call.data.get(CONF_CONFIG_ENTRY_ID, "")
requested_images = call.data.get(CONF_IMAGE_TYPES, [])
entry = _async_get_config_entry(call.hass, entry_id)
image_types = _get_requested_image_types(requested_images)
client = get_async_client(call.hass)
# Build (type, url) pairs for all requested image types up front
candidates: list[tuple[str, str]] = []
for image_type in image_types:
if image_type == "interior":
url = entry.runtime_data.context.vehicle.images.internal_image_url or ""
else:
url = _parse_exterior_image_url(
entry.runtime_data.context.vehicle.images.exterior_image_url,
_PARAM_IMAGE_ANGLE_MAP[image_type],
)
candidates.append((image_type, url))
# Interior images exist if their URL is populated; exterior images require an HTTP check
async def _check_exists(image_type: str, url: str) -> bool:
if image_type == "interior":
return bool(url)
return await _async_image_exists(client, url)
# Run checks in parallel
exists_results = await asyncio.gather(
*(_check_exists(image_type, url) for image_type, url in candidates)
)
return {
"images": [
{"type": image_type, "url": url}
for (image_type, url), exists in zip(
candidates, exists_results, strict=True
)
if exists
]
}
def _async_get_config_entry(hass: HomeAssistant, entry_id: str) -> VolvoConfigEntry:
if not entry_id:
raise ServiceValidationError(
translation_domain=DOMAIN,
translation_key="invalid_entry_id",
translation_placeholders={"entry_id": entry_id},
)
if not (entry := hass.config_entries.async_get_entry(entry_id)):
raise ServiceValidationError(
translation_domain=DOMAIN,
translation_key="entry_not_found",
translation_placeholders={"entry_id": entry_id},
)
if entry.domain != DOMAIN:
raise ServiceValidationError(
translation_domain=DOMAIN,
translation_key="invalid_entry",
translation_placeholders={"entry_id": entry.entry_id},
)
if entry.state is not ConfigEntryState.LOADED:
raise ServiceValidationError(
translation_domain=DOMAIN,
translation_key="entry_not_loaded",
translation_placeholders={"entry_id": entry.entry_id},
)
return entry
def _get_requested_image_types(requested_image_types: list[str]) -> list[str]:
allowed_image_types = [*_PARAM_IMAGE_ANGLE_MAP.keys(), "interior"]
if not requested_image_types:
return allowed_image_types
image_types: list[str] = []
for image_type in requested_image_types:
if image_type in image_types:
continue
if image_type not in allowed_image_types:
raise ServiceValidationError(
translation_domain=DOMAIN,
translation_key="invalid_image_type",
translation_placeholders={"image_type": image_type},
)
image_types.append(image_type)
return image_types
def _parse_exterior_image_url(exterior_url: str, angle: str) -> str:
if not exterior_url:
return ""
url_parts = parse.urlparse(exterior_url)
if url_parts.netloc.startswith("wizz"):
if new_angle := _IMAGE_ANGLE_MAP.get(angle):
current_angle = url_parts.path.split("/")[-2]
return exterior_url.replace(current_angle, new_angle)
return ""
query = parse.parse_qs(url_parts.query, keep_blank_values=True)
query["angle"] = [angle]
return url_parts._replace(query=parse.urlencode(query, doseq=True)).geturl()
async def _async_image_exists(client: AsyncClient, url: str) -> bool:
if not url:
return False
try:
async with client.stream(
"GET", url, headers=_HEADERS, timeout=10, follow_redirects=True
) as response:
response.raise_for_status()
except HTTPStatusError as ex:
status = ex.response.status_code if ex.response is not None else None
if status in (404, 410):
_LOGGER.debug("Image does not exist: %s", url)
return False
raise HomeAssistantError(
translation_domain=DOMAIN,
translation_key="image_error",
translation_placeholders={"url": url},
) from ex
except HTTPError as ex:
raise HomeAssistantError(
translation_domain=DOMAIN,
translation_key="image_error",
translation_placeholders={"url": url},
) from ex
else:
return True
| {
"repo_id": "home-assistant/core",
"file_path": "homeassistant/components/volvo/services.py",
"license": "Apache License 2.0",
"lines": 170,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
home-assistant/core:tests/components/volvo/test_services.py | """Test Volvo services."""
from collections.abc import Awaitable, Callable
from unittest.mock import AsyncMock, patch
from httpx import AsyncClient, HTTPError, HTTPStatusError, Request, Response
import pytest
from homeassistant.components.volvo.const import DOMAIN
from homeassistant.components.volvo.services import (
CONF_CONFIG_ENTRY_ID,
CONF_IMAGE_TYPES,
SERVICE_GET_IMAGE_URL,
_async_image_exists,
_parse_exterior_image_url,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError, ServiceValidationError
from tests.common import MockConfigEntry
@pytest.mark.usefixtures("mock_api")
async def test_setup_services(
hass: HomeAssistant,
setup_integration: Callable[[], Awaitable[bool]],
) -> None:
"""Test setup of services."""
assert await setup_integration()
services = hass.services.async_services_for_domain(DOMAIN)
assert services
assert SERVICE_GET_IMAGE_URL in services
@pytest.mark.usefixtures("mock_api")
async def test_get_image_url_all(
hass: HomeAssistant,
setup_integration: Callable[[], Awaitable[bool]],
mock_config_entry: MockConfigEntry,
) -> None:
"""Test if get_image_url returns all image types."""
assert await setup_integration()
with patch(
"homeassistant.components.volvo.services._async_image_exists",
new=AsyncMock(return_value=True),
):
images = await hass.services.async_call(
DOMAIN,
SERVICE_GET_IMAGE_URL,
{
CONF_CONFIG_ENTRY_ID: mock_config_entry.entry_id,
CONF_IMAGE_TYPES: [],
},
blocking=True,
return_response=True,
)
assert images
assert images["images"]
assert isinstance(images["images"], list)
assert len(images["images"]) == 9
@pytest.mark.usefixtures("mock_api")
@pytest.mark.parametrize(
"image_type",
[
"exterior_back",
"exterior_back_left",
"exterior_back_right",
"exterior_front",
"exterior_front_left",
"exterior_front_right",
"exterior_side_left",
"exterior_side_right",
"interior",
],
)
async def test_get_image_url_selected(
hass: HomeAssistant,
setup_integration: Callable[[], Awaitable[bool]],
mock_config_entry: MockConfigEntry,
image_type: str,
) -> None:
"""Test if get_image_url returns selected image types."""
assert await setup_integration()
with patch(
"homeassistant.components.volvo.services._async_image_exists",
new=AsyncMock(return_value=True),
):
images = await hass.services.async_call(
DOMAIN,
SERVICE_GET_IMAGE_URL,
{
CONF_CONFIG_ENTRY_ID: mock_config_entry.entry_id,
CONF_IMAGE_TYPES: [image_type],
},
blocking=True,
return_response=True,
)
assert images
assert images["images"]
assert isinstance(images["images"], list)
assert len(images["images"]) == 1
@pytest.mark.usefixtures("mock_api")
@pytest.mark.parametrize(
("entry_id", "translation_key"),
[
("", "invalid_entry_id"),
("fake_entry_id", "invalid_entry"),
("wrong_entry_id", "entry_not_found"),
],
)
async def test_invalid_config_entry(
hass: HomeAssistant,
setup_integration: Callable[[], Awaitable[bool]],
entry_id: str,
translation_key: str,
) -> None:
"""Test invalid config entry parameters."""
assert await setup_integration()
config_entry = MockConfigEntry(domain="fake_entry", entry_id="fake_entry_id")
config_entry.add_to_hass(hass)
with pytest.raises(ServiceValidationError) as exc_info:
await hass.services.async_call(
DOMAIN,
SERVICE_GET_IMAGE_URL,
{
CONF_CONFIG_ENTRY_ID: entry_id,
CONF_IMAGE_TYPES: [],
},
blocking=True,
return_response=True,
)
assert exc_info.value.translation_domain == DOMAIN
assert exc_info.value.translation_key == translation_key
@pytest.mark.usefixtures("mock_api")
async def test_invalid_image_type(
hass: HomeAssistant,
setup_integration: Callable[[], Awaitable[bool]],
mock_config_entry: MockConfigEntry,
) -> None:
"""Test invalid image type parameters."""
assert await setup_integration()
with pytest.raises(ServiceValidationError) as exc_info:
await hass.services.async_call(
DOMAIN,
SERVICE_GET_IMAGE_URL,
{
CONF_CONFIG_ENTRY_ID: mock_config_entry.entry_id,
CONF_IMAGE_TYPES: ["top"],
},
blocking=True,
return_response=True,
)
assert exc_info.value.translation_domain == DOMAIN
assert exc_info.value.translation_key == "invalid_image_type"
async def test_async_image_exists(hass: HomeAssistant) -> None:
"""Test _async_image_exists returns True on successful response."""
client = AsyncMock(spec=AsyncClient)
response = AsyncMock()
response.raise_for_status.return_value = None
client.get.return_value = response
assert await _async_image_exists(client, "http://example.com/image.jpg")
async def test_async_image_does_not_exist(hass: HomeAssistant) -> None:
"""Test _async_image_exists returns False when image does not exist."""
client = AsyncMock(spec=AsyncClient)
client.stream.side_effect = HTTPStatusError(
"Not found",
request=Request("GET", "http://example.com"),
response=Response(status_code=404),
)
assert not await _async_image_exists(client, "http://example.com/image.jpg")
async def test_async_image_non_404_status_error(hass: HomeAssistant) -> None:
"""Test _async_image_exists raises HomeAssistantError on non-404 HTTP status errors."""
client = AsyncMock(spec=AsyncClient)
client.stream.side_effect = HTTPStatusError(
"Internal server error",
request=Request("GET", "http://example.com"),
response=Response(status_code=500),
)
with pytest.raises(HomeAssistantError) as exc_info:
await _async_image_exists(client, "http://example.com/image.jpg")
assert exc_info.value.translation_domain == DOMAIN
assert exc_info.value.translation_key == "image_error"
async def test_async_image_error(hass: HomeAssistant) -> None:
"""Test _async_image_exists raises."""
client = AsyncMock(spec=AsyncClient)
client.stream.side_effect = HTTPError("HTTP error")
with pytest.raises(HomeAssistantError) as exc_info:
await _async_image_exists(client, "http://example.com/image.jpg")
assert exc_info.value.translation_domain == DOMAIN
assert exc_info.value.translation_key == "image_error"
def test_parse_exterior_image_url_wizz_valid_angle() -> None:
"""Replace angle segment in wizz-hosted URL when angle is valid."""
src = "https://wizz.images.volvocars.com/images/threeQuartersRearLeft/abc123.jpg"
result = _parse_exterior_image_url(src, "6")
assert result == "https://wizz.images.volvocars.com/images/rear/abc123.jpg"
def test_parse_exterior_image_url_wizz_invalid_angle() -> None:
"""Return empty string for wizz-hosted URL when angle is invalid."""
src = "https://wizz.images.volvocars.com/images/front/xyz.jpg"
assert _parse_exterior_image_url(src, "9") == ""
def test_parse_exterior_image_url_non_wizz_sets_angle() -> None:
"""Add angle query to non-wizz URL."""
src = "https://images.volvocars.com/image?foo=bar&angle=1"
result = _parse_exterior_image_url(src, "3")
assert "angle=3" in result
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/volvo/test_services.py",
"license": "Apache License 2.0",
"lines": 196,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:homeassistant/components/zinvolt/entity.py | """Base entity for Zinvolt integration."""
from homeassistant.helpers.device_registry import DeviceInfo
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN
from .coordinator import ZinvoltDeviceCoordinator
class ZinvoltEntity(CoordinatorEntity[ZinvoltDeviceCoordinator]):
"""Base entity for Zinvolt integration."""
_attr_has_entity_name = True
def __init__(self, coordinator: ZinvoltDeviceCoordinator) -> None:
"""Initialize the entity."""
super().__init__(coordinator)
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, coordinator.data.serial_number)},
manufacturer="Zinvolt",
name=coordinator.battery.name,
serial_number=coordinator.data.serial_number,
)
| {
"repo_id": "home-assistant/core",
"file_path": "homeassistant/components/zinvolt/entity.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
home-assistant/core:homeassistant/components/powerfox_local/diagnostics.py | """Support for Powerfox Local diagnostics."""
from __future__ import annotations
from typing import Any
from homeassistant.core import HomeAssistant
from .coordinator import PowerfoxLocalConfigEntry
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, entry: PowerfoxLocalConfigEntry
) -> dict[str, Any]:
"""Return diagnostics for Powerfox Local config entry."""
coordinator = entry.runtime_data
return {
"power": coordinator.data.power,
"energy_usage": coordinator.data.energy_usage,
"energy_usage_high_tariff": coordinator.data.energy_usage_high_tariff,
"energy_usage_low_tariff": coordinator.data.energy_usage_low_tariff,
"energy_return": coordinator.data.energy_return,
}
| {
"repo_id": "home-assistant/core",
"file_path": "homeassistant/components/powerfox_local/diagnostics.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
home-assistant/core:tests/components/powerfox_local/test_diagnostics.py | """Test for Powerfox Local diagnostics."""
from unittest.mock import AsyncMock
from syrupy.assertion import SnapshotAssertion
from homeassistant.core import HomeAssistant
from . import setup_integration
from tests.common import MockConfigEntry
from tests.components.diagnostics import get_diagnostics_for_config_entry
from tests.typing import ClientSessionGenerator
async def test_entry_diagnostics(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
mock_powerfox_local_client: AsyncMock,
mock_config_entry: MockConfigEntry,
snapshot: SnapshotAssertion,
) -> None:
"""Test the Powerfox Local entry diagnostics."""
await setup_integration(hass, mock_config_entry)
result = await get_diagnostics_for_config_entry(
hass, hass_client, mock_config_entry
)
assert result == snapshot
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/powerfox_local/test_diagnostics.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:tests/components/zoneminder/test_config.py | """Tests for ZoneMinder YAML configuration validation."""
from __future__ import annotations
from homeassistant.components.zoneminder.const import DOMAIN
from homeassistant.const import CONF_HOST, CONF_SSL
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from .conftest import MOCK_HOST
async def test_invalid_config_missing_host(hass: HomeAssistant) -> None:
"""Test that config without host is rejected."""
config: dict = {DOMAIN: [{}]}
result = await async_setup_component(hass, DOMAIN, config)
assert not result
async def test_invalid_config_bad_ssl_type(hass: HomeAssistant) -> None:
"""Test that non-boolean ssl value is rejected."""
config = {DOMAIN: [{CONF_HOST: MOCK_HOST, CONF_SSL: "not_bool"}]}
result = await async_setup_component(hass, DOMAIN, config)
assert not result
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/zoneminder/test_config.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:tests/components/zoneminder/test_services.py | """Tests for ZoneMinder service calls."""
from __future__ import annotations
from unittest.mock import MagicMock, patch
import pytest
import voluptuous as vol
from homeassistant.components.zoneminder.const import DOMAIN
from homeassistant.const import ATTR_ID, ATTR_NAME
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from .conftest import MOCK_HOST, MOCK_HOST_2, create_mock_zm_client
async def test_set_run_state_service_registered(
hass: HomeAssistant,
mock_zoneminder_client: MagicMock,
single_server_config: dict,
) -> None:
"""Test set_run_state service is registered after setup."""
assert await async_setup_component(hass, DOMAIN, single_server_config)
await hass.async_block_till_done()
assert hass.services.has_service(DOMAIN, "set_run_state")
async def test_set_run_state_valid_call(
hass: HomeAssistant,
mock_zoneminder_client: MagicMock,
single_server_config: dict,
) -> None:
"""Test valid set_run_state call sets state on correct ZM client."""
assert await async_setup_component(hass, DOMAIN, single_server_config)
await hass.async_block_till_done()
await hass.services.async_call(
DOMAIN,
"set_run_state",
{ATTR_ID: MOCK_HOST, ATTR_NAME: "Away"},
blocking=True,
)
await hass.async_block_till_done()
mock_zoneminder_client.set_active_state.assert_called_once_with("Away")
async def test_set_run_state_multi_server_targets_correct_server(
hass: HomeAssistant, multi_server_config: dict
) -> None:
"""Test set_run_state targets specific server by id."""
clients: dict[str, MagicMock] = {}
def make_client(*args, **kwargs):
client = create_mock_zm_client()
# Extract hostname from the server_origin (first positional arg)
origin = args[0]
hostname = origin.split("://")[1]
clients[hostname] = client
return client
with patch(
"homeassistant.components.zoneminder.ZoneMinder",
side_effect=make_client,
):
assert await async_setup_component(hass, DOMAIN, multi_server_config)
await hass.async_block_till_done()
await hass.services.async_call(
DOMAIN,
"set_run_state",
{ATTR_ID: MOCK_HOST_2, ATTR_NAME: "Home"},
blocking=True,
)
await hass.async_block_till_done()
# Only the second server should have been called
clients[MOCK_HOST_2].set_active_state.assert_called_once_with("Home")
clients[MOCK_HOST].set_active_state.assert_not_called()
async def test_set_run_state_missing_fields_rejected(
hass: HomeAssistant,
mock_zoneminder_client: MagicMock,
single_server_config: dict,
) -> None:
"""Test service call with missing required fields is rejected."""
assert await async_setup_component(hass, DOMAIN, single_server_config)
await hass.async_block_till_done()
with pytest.raises(vol.MultipleInvalid):
await hass.services.async_call(
DOMAIN,
"set_run_state",
{ATTR_ID: MOCK_HOST}, # Missing ATTR_NAME
blocking=True,
)
async def test_set_run_state_invalid_host(
hass: HomeAssistant,
mock_zoneminder_client: MagicMock,
single_server_config: dict,
caplog: pytest.LogCaptureFixture,
) -> None:
"""Test service call with invalid host logs error.
Regression: services.py logs error but doesn't return early,
so it also raises KeyError when trying to access the invalid host.
"""
assert await async_setup_component(hass, DOMAIN, single_server_config)
await hass.async_block_till_done()
with pytest.raises(KeyError):
await hass.services.async_call(
DOMAIN,
"set_run_state",
{ATTR_ID: "invalid.host", ATTR_NAME: "Away"},
blocking=True,
)
assert "Invalid ZoneMinder host provided" in caplog.text
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/zoneminder/test_services.py",
"license": "Apache License 2.0",
"lines": 98,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:tests/components/smarla/test_entity.py | """Test Smarla entities."""
import logging
from unittest.mock import MagicMock
import pytest
from homeassistant.const import STATE_UNAVAILABLE
from homeassistant.core import HomeAssistant
from . import setup_integration, update_property_listeners
from tests.common import MockConfigEntry
TEST_ENTITY_ID = "switch.smarla"
async def test_entity_availability(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_federwiege: MagicMock,
) -> None:
"""Test entity state when device becomes unavailable/available."""
assert await setup_integration(hass, mock_config_entry)
# Initially available
state = hass.states.get(TEST_ENTITY_ID)
assert state is not None
assert state.state != STATE_UNAVAILABLE
# Simulate device becoming unavailable
mock_federwiege.available = False
await update_property_listeners(mock_federwiege)
await hass.async_block_till_done()
# Verify state reflects unavailable
state = hass.states.get(TEST_ENTITY_ID)
assert state is not None
assert state.state == STATE_UNAVAILABLE
# Simulate device becoming available again
mock_federwiege.available = True
await update_property_listeners(mock_federwiege)
await hass.async_block_till_done()
# Verify state reflects available again
state = hass.states.get(TEST_ENTITY_ID)
assert state is not None
assert state.state != STATE_UNAVAILABLE
async def test_entity_unavailable_logging(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
caplog: pytest.LogCaptureFixture,
mock_federwiege: MagicMock,
) -> None:
"""Test logging when device becomes unavailable/available."""
assert await setup_integration(hass, mock_config_entry)
caplog.set_level(logging.INFO)
caplog.clear()
# Verify that log exists when device becomes unavailable
mock_federwiege.available = False
await update_property_listeners(mock_federwiege)
await hass.async_block_till_done()
assert "is unavailable" in caplog.text
# Verify that we only log once
caplog.clear()
await update_property_listeners(mock_federwiege)
await hass.async_block_till_done()
assert "is unavailable" not in caplog.text
# Verify that log exists when device comes back online
mock_federwiege.available = True
await update_property_listeners(mock_federwiege)
await hass.async_block_till_done()
assert "back online" in caplog.text
# Verify that we only log once
caplog.clear()
await update_property_listeners(mock_federwiege)
await hass.async_block_till_done()
assert "back online" not in caplog.text
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/smarla/test_entity.py",
"license": "Apache License 2.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:tests/components/lutron/test_binary_sensor.py | """Test Lutron binary sensor platform."""
from unittest.mock import MagicMock, patch
from pylutron import OccupancyGroup
from syrupy.assertion import SnapshotAssertion
from homeassistant.const import STATE_OFF, STATE_ON, Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from tests.common import MockConfigEntry, snapshot_platform
async def test_binary_sensor_setup(
hass: HomeAssistant,
mock_lutron: MagicMock,
mock_config_entry: MockConfigEntry,
entity_registry: er.EntityRegistry,
snapshot: SnapshotAssertion,
) -> None:
"""Test binary sensor setup."""
mock_config_entry.add_to_hass(hass)
occ_group = mock_lutron.areas[0].occupancy_group
occ_group.state = OccupancyGroup.State.VACANT
with patch("homeassistant.components.lutron.PLATFORMS", [Platform.BINARY_SENSOR]):
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
await snapshot_platform(hass, entity_registry, snapshot, mock_config_entry.entry_id)
async def test_binary_sensor_update(
hass: HomeAssistant, mock_lutron: MagicMock, mock_config_entry: MockConfigEntry
) -> None:
"""Test binary sensor update."""
mock_config_entry.add_to_hass(hass)
occ_group = mock_lutron.areas[0].occupancy_group
occ_group.state = OccupancyGroup.State.VACANT
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
entity_id = "binary_sensor.test_occupancy_occupancy"
assert hass.states.get(entity_id).state == STATE_OFF
# Simulate update
occ_group.state = OccupancyGroup.State.OCCUPIED
callback = occ_group.subscribe.call_args[0][0]
callback(occ_group, None, None, None)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ON
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/lutron/test_binary_sensor.py",
"license": "Apache License 2.0",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:tests/components/lutron/test_cover.py | """Test Lutron cover platform."""
from unittest.mock import MagicMock, patch
from syrupy.assertion import SnapshotAssertion
from homeassistant.components.cover import DOMAIN as COVER_DOMAIN
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_CLOSE_COVER,
SERVICE_OPEN_COVER,
SERVICE_SET_COVER_POSITION,
STATE_CLOSED,
STATE_OPEN,
Platform,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from tests.common import MockConfigEntry, snapshot_platform
async def test_cover_setup(
hass: HomeAssistant,
mock_lutron: MagicMock,
mock_config_entry: MockConfigEntry,
entity_registry: er.EntityRegistry,
snapshot: SnapshotAssertion,
) -> None:
"""Test cover setup."""
mock_config_entry.add_to_hass(hass)
cover = mock_lutron.areas[0].outputs[2]
cover.level = 0
cover.last_level.return_value = 0
with patch("homeassistant.components.lutron.PLATFORMS", [Platform.COVER]):
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
await snapshot_platform(hass, entity_registry, snapshot, mock_config_entry.entry_id)
async def test_cover_services(
hass: HomeAssistant, mock_lutron: MagicMock, mock_config_entry: MockConfigEntry
) -> None:
"""Test cover services."""
mock_config_entry.add_to_hass(hass)
cover = mock_lutron.areas[0].outputs[2]
cover.level = 0
cover.last_level.return_value = 0
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
entity_id = "cover.test_cover"
# Open cover
await hass.services.async_call(
COVER_DOMAIN,
SERVICE_OPEN_COVER,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
assert cover.level == 100
# Close cover
await hass.services.async_call(
COVER_DOMAIN,
SERVICE_CLOSE_COVER,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
assert cover.level == 0
# Set cover position
await hass.services.async_call(
COVER_DOMAIN,
SERVICE_SET_COVER_POSITION,
{ATTR_ENTITY_ID: entity_id, "position": 50},
blocking=True,
)
assert cover.level == 50
async def test_cover_update(
hass: HomeAssistant, mock_lutron: MagicMock, mock_config_entry: MockConfigEntry
) -> None:
"""Test cover state update."""
mock_config_entry.add_to_hass(hass)
cover = mock_lutron.areas[0].outputs[2]
cover.level = 0
cover.last_level.return_value = 0
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
entity_id = "cover.test_cover"
assert hass.states.get(entity_id).state == STATE_CLOSED
# Simulate update
cover.last_level.return_value = 100
callback = cover.subscribe.call_args[0][0]
callback(cover, None, None, None)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_OPEN
assert hass.states.get(entity_id).attributes["current_position"] == 100
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/lutron/test_cover.py",
"license": "Apache License 2.0",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:tests/components/lutron/test_event.py | """Test Lutron event platform."""
from unittest.mock import MagicMock, patch
from pylutron import Button
from syrupy.assertion import SnapshotAssertion
from homeassistant.const import Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from tests.common import MockConfigEntry, async_capture_events, snapshot_platform
async def test_event_setup(
hass: HomeAssistant,
mock_lutron: MagicMock,
mock_config_entry: MockConfigEntry,
entity_registry: er.EntityRegistry,
snapshot: SnapshotAssertion,
) -> None:
"""Test event setup."""
mock_config_entry.add_to_hass(hass)
with patch("homeassistant.components.lutron.PLATFORMS", [Platform.EVENT]):
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
await snapshot_platform(hass, entity_registry, snapshot, mock_config_entry.entry_id)
async def test_event_single_press(
hass: HomeAssistant, mock_lutron: MagicMock, mock_config_entry: MockConfigEntry
) -> None:
"""Test single press event."""
mock_config_entry.add_to_hass(hass)
button = mock_lutron.areas[0].keypads[0].buttons[0]
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
# Subscribe to events
events = async_capture_events(hass, "lutron_event")
# Simulate button press
for call in button.subscribe.call_args_list:
callback = call[0][0]
callback(button, None, Button.Event.PRESSED, None)
await hass.async_block_till_done()
# Check bus event
assert len(events) == 1
assert events[0].data["action"] == "single"
assert events[0].data["uuid"] == "button_uuid"
async def test_event_press_release(
hass: HomeAssistant, mock_lutron: MagicMock, mock_config_entry: MockConfigEntry
) -> None:
"""Test press and release events."""
mock_config_entry.add_to_hass(hass)
button = mock_lutron.areas[0].keypads[0].buttons[0]
button.button_type = "MasterRaiseLower"
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
# Subscribe to events
events = async_capture_events(hass, "lutron_event")
# Simulate button press
for call in button.subscribe.call_args_list:
callback = call[0][0]
callback(button, None, Button.Event.PRESSED, None)
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].data["action"] == "pressed"
# Simulate button release
for call in button.subscribe.call_args_list:
callback = call[0][0]
callback(button, None, Button.Event.RELEASED, None)
await hass.async_block_till_done()
assert len(events) == 2
assert events[1].data["action"] == "released"
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/lutron/test_event.py",
"license": "Apache License 2.0",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:tests/components/lutron/test_fan.py | """Test Lutron fan platform."""
from unittest.mock import MagicMock, patch
from syrupy.assertion import SnapshotAssertion
from homeassistant.components.fan import (
ATTR_PERCENTAGE,
DOMAIN as FAN_DOMAIN,
SERVICE_SET_PERCENTAGE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
)
from homeassistant.const import ATTR_ENTITY_ID, STATE_OFF, STATE_ON, Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from tests.common import MockConfigEntry, snapshot_platform
async def test_fan_setup(
hass: HomeAssistant,
mock_lutron: MagicMock,
mock_config_entry: MockConfigEntry,
entity_registry: er.EntityRegistry,
snapshot: SnapshotAssertion,
) -> None:
"""Test fan setup."""
mock_config_entry.add_to_hass(hass)
fan = mock_lutron.areas[0].outputs[3]
fan.level = 0
fan.last_level.return_value = 0
with patch("homeassistant.components.lutron.PLATFORMS", [Platform.FAN]):
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
await snapshot_platform(hass, entity_registry, snapshot, mock_config_entry.entry_id)
async def test_fan_services(
hass: HomeAssistant, mock_lutron: MagicMock, mock_config_entry: MockConfigEntry
) -> None:
"""Test fan services."""
mock_config_entry.add_to_hass(hass)
fan = mock_lutron.areas[0].outputs[3]
fan.level = 0
fan.last_level.return_value = 0
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
entity_id = "fan.test_fan"
# Turn on (defaults to medium - 67%)
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
assert fan.level == 67
# Turn off
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
assert fan.level == 0
# Set percentage
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_SET_PERCENTAGE,
{ATTR_ENTITY_ID: entity_id, ATTR_PERCENTAGE: 33},
blocking=True,
)
assert fan.level == 33
async def test_fan_update(
hass: HomeAssistant, mock_lutron: MagicMock, mock_config_entry: MockConfigEntry
) -> None:
"""Test fan state update."""
mock_config_entry.add_to_hass(hass)
fan = mock_lutron.areas[0].outputs[3]
fan.level = 0
fan.last_level.return_value = 0
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
entity_id = "fan.test_fan"
assert hass.states.get(entity_id).state == STATE_OFF
# Simulate update
fan.last_level.return_value = 100
callback = fan.subscribe.call_args[0][0]
callback(fan, None, None, None)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ON
assert hass.states.get(entity_id).attributes[ATTR_PERCENTAGE] == 100
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/lutron/test_fan.py",
"license": "Apache License 2.0",
"lines": 84,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:tests/components/lutron/test_init.py | """Test Lutron integration setup."""
from unittest.mock import MagicMock
from homeassistant.components.lutron.const import DOMAIN
from homeassistant.core import HomeAssistant
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry
async def test_setup_entry(
hass: HomeAssistant, mock_lutron: MagicMock, mock_config_entry: MockConfigEntry
) -> None:
"""Test setting up the integration."""
mock_config_entry.add_to_hass(hass)
assert await async_setup_component(hass, "lutron", {})
await hass.async_block_till_done()
assert mock_config_entry.runtime_data.client is mock_lutron
assert len(mock_config_entry.runtime_data.lights) == 1
# Verify that the unique ID is generated correctly.
# This prevents regression in unique ID generation which would be a breaking change.
entity_registry = er.async_get(hass)
# The light from mock_lutron has uuid="light_uuid" and guid="12345678901"
expected_unique_id = "12345678901_light_uuid"
entry = entity_registry.async_get("light.test_light")
assert entry.unique_id == expected_unique_id
async def test_unload_entry(
hass: HomeAssistant, mock_lutron: MagicMock, mock_config_entry: MockConfigEntry
) -> None:
"""Test unloading the integration."""
mock_config_entry.add_to_hass(hass)
assert await async_setup_component(hass, "lutron", {})
await hass.async_block_till_done()
assert await hass.config_entries.async_unload(mock_config_entry.entry_id)
await hass.async_block_till_done()
async def test_unique_id_migration(
hass: HomeAssistant, mock_lutron: MagicMock, mock_config_entry: MockConfigEntry
) -> None:
"""Test migration of legacy unique IDs to the newer UUID-based format.
In older versions of the integration, unique IDs were based on a legacy UUID format.
The integration now prefers a newer UUID format when available. This test ensures
that existing entities and devices are automatically migrated to the new format
without losing their registry entries.
"""
mock_config_entry.add_to_hass(hass)
# Setup registries with an entry using the "legacy" unique ID format.
# This simulates a user who had configured the integration in an older version.
entity_registry = er.async_get(hass)
device_registry = dr.async_get(hass)
legacy_unique_id = "12345678901_light_legacy_uuid"
new_unique_id = "12345678901_light_uuid"
# Create a device in the registry using the legacy ID
device = device_registry.async_get_or_create(
config_entry_id=mock_config_entry.entry_id,
identifiers={(DOMAIN, legacy_unique_id)},
manufacturer="Lutron",
name="Test Light",
)
# Create an entity in the registry using the legacy ID
entity = entity_registry.async_get_or_create(
domain="light",
platform="lutron",
unique_id=legacy_unique_id,
config_entry=mock_config_entry,
device_id=device.id,
)
# Verify our starting state: registry holds the legacy ID
assert entity.unique_id == legacy_unique_id
assert (DOMAIN, legacy_unique_id) in device.identifiers
# Trigger the integration setup.
# The async_setup_entry logic will detect the legacy IDs in the registry
# and update them to the new UUIDs provided by the mock_lutron fixture.
assert await async_setup_component(hass, "lutron", {})
await hass.async_block_till_done()
# Verify that the entity's unique ID has been updated to the new format.
entity = entity_registry.async_get(entity.entity_id)
assert entity.unique_id == new_unique_id
# Verify that the device's identifiers have also been migrated.
device = device_registry.async_get(device.id)
assert (DOMAIN, new_unique_id) in device.identifiers
assert (DOMAIN, legacy_unique_id) not in device.identifiers
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/lutron/test_init.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:tests/components/lutron/test_light.py | """Test Lutron light platform."""
from unittest.mock import MagicMock, patch
import pytest
from syrupy.assertion import SnapshotAssertion
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_FLASH,
ATTR_TRANSITION,
DOMAIN as LIGHT_DOMAIN,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
Platform,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from tests.common import MockConfigEntry, snapshot_platform
async def test_light_setup(
hass: HomeAssistant,
mock_lutron: MagicMock,
mock_config_entry: MockConfigEntry,
entity_registry: er.EntityRegistry,
snapshot: SnapshotAssertion,
) -> None:
"""Test light setup."""
mock_config_entry.add_to_hass(hass)
light = mock_lutron.areas[0].outputs[0]
light.level = 0
light.last_level.return_value = 0
with patch("homeassistant.components.lutron.PLATFORMS", [Platform.LIGHT]):
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
await snapshot_platform(hass, entity_registry, snapshot, mock_config_entry.entry_id)
async def test_light_turn_on_off(
hass: HomeAssistant, mock_lutron: MagicMock, mock_config_entry: MockConfigEntry
) -> None:
"""Test light turn on and off."""
mock_config_entry.add_to_hass(hass)
light = mock_lutron.areas[0].outputs[0]
light.level = 0
light.last_level.return_value = 0
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
entity_id = "light.test_light"
# Turn on
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity_id, ATTR_BRIGHTNESS: 128},
blocking=True,
)
light.set_level.assert_called_with(new_level=pytest.approx(50.196, rel=1e-3))
# Turn off
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
light.set_level.assert_called_with(new_level=0)
async def test_light_update(
hass: HomeAssistant, mock_lutron: MagicMock, mock_config_entry: MockConfigEntry
) -> None:
"""Test light state update from library."""
mock_config_entry.add_to_hass(hass)
light = mock_lutron.areas[0].outputs[0]
light.level = 0
light.last_level.return_value = 0
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
entity_id = "light.test_light"
assert hass.states.get(entity_id).state == STATE_OFF
# Simulate update from library
light.last_level.return_value = 100
# The library calls the callback registered with subscribe
callback = light.subscribe.call_args[0][0]
callback(light, None, None, None)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ON
assert hass.states.get(entity_id).attributes[ATTR_BRIGHTNESS] == 255
async def test_light_transition(
hass: HomeAssistant, mock_lutron: MagicMock, mock_config_entry: MockConfigEntry
) -> None:
"""Test light turn on/off with transition."""
mock_config_entry.add_to_hass(hass)
light = mock_lutron.areas[0].outputs[0]
light.level = 0
light.last_level.return_value = 0
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
entity_id = "light.test_light"
# Turn on with transition
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity_id, ATTR_TRANSITION: 2.5},
blocking=True,
)
# Default brightness is used if not specified (DEFAULT_DIMMER_LEVEL is 50%)
light.set_level.assert_called_with(
new_level=pytest.approx(50.0, abs=0.5), fade_time_seconds=2.5
)
# Turn off with transition
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: entity_id, ATTR_TRANSITION: 3.0},
blocking=True,
)
light.set_level.assert_called_with(new_level=0, fade_time_seconds=3.0)
async def test_light_flash(
hass: HomeAssistant, mock_lutron: MagicMock, mock_config_entry: MockConfigEntry
) -> None:
"""Test light flash."""
mock_config_entry.add_to_hass(hass)
light = mock_lutron.areas[0].outputs[0]
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
entity_id = "light.test_light"
# Short flash
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity_id, ATTR_FLASH: "short"},
blocking=True,
)
light.flash.assert_called_with(0.5)
# Long flash
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity_id, ATTR_FLASH: "long"},
blocking=True,
)
light.flash.assert_called_with(1.5)
async def test_light_brightness_restore(
hass: HomeAssistant, mock_lutron: MagicMock, mock_config_entry: MockConfigEntry
) -> None:
"""Test light brightness restore logic."""
mock_config_entry.add_to_hass(hass)
light = mock_lutron.areas[0].outputs[0]
light.level = 0
light.last_level.return_value = 0
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
entity_id = "light.test_light"
# Turn on first time - uses default (50%)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
light.set_level.assert_called_with(new_level=pytest.approx(50.0, abs=0.5))
# Simulate update to 50% (Lutron level 50 -> HA level 127)
light.last_level.return_value = 50
callback = light.subscribe.call_args[0][0]
callback(light, None, None, None)
await hass.async_block_till_done()
# Turn off
light.last_level.return_value = 0
callback(light, None, None, None)
await hass.async_block_till_done()
# Turn on again - should restore ~50%
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
# HA level 127 -> Lutron level ~49.8
light.set_level.assert_called_with(new_level=pytest.approx(50.0, abs=0.5))
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/lutron/test_light.py",
"license": "Apache License 2.0",
"lines": 176,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:tests/components/lutron/test_scene.py | """Test Lutron scene platform."""
from unittest.mock import MagicMock, patch
from syrupy.assertion import SnapshotAssertion
from homeassistant.components.scene import DOMAIN as SCENE_DOMAIN
from homeassistant.const import ATTR_ENTITY_ID, SERVICE_TURN_ON, Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from tests.common import MockConfigEntry, snapshot_platform
async def test_scene_setup(
hass: HomeAssistant,
mock_lutron: MagicMock,
mock_config_entry: MockConfigEntry,
entity_registry: er.EntityRegistry,
snapshot: SnapshotAssertion,
) -> None:
"""Test scene setup."""
mock_config_entry.add_to_hass(hass)
with patch("homeassistant.components.lutron.PLATFORMS", [Platform.SCENE]):
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
await snapshot_platform(hass, entity_registry, snapshot, mock_config_entry.entry_id)
async def test_scene_activate(
hass: HomeAssistant, mock_lutron: MagicMock, mock_config_entry: MockConfigEntry
) -> None:
"""Test scene activation."""
mock_config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
entity_id = "scene.test_keypad_test_button"
button = mock_lutron.areas[0].keypads[0].buttons[0]
await hass.services.async_call(
SCENE_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
button.tap.assert_called_once()
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/lutron/test_scene.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:tests/components/lutron/test_switch.py | """Test Lutron switch platform."""
from unittest.mock import MagicMock, patch
from syrupy.assertion import SnapshotAssertion
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
Platform,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from tests.common import MockConfigEntry, snapshot_platform
async def test_switch_setup(
hass: HomeAssistant,
mock_lutron: MagicMock,
mock_config_entry: MockConfigEntry,
entity_registry: er.EntityRegistry,
snapshot: SnapshotAssertion,
) -> None:
"""Test switch setup."""
mock_config_entry.add_to_hass(hass)
switch = mock_lutron.areas[0].outputs[1]
switch.level = 0
switch.last_level.return_value = 0
led = mock_lutron.areas[0].keypads[0].leds[0]
led.state = 0
led.last_state = 0
with patch("homeassistant.components.lutron.PLATFORMS", [Platform.SWITCH]):
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
await snapshot_platform(hass, entity_registry, snapshot, mock_config_entry.entry_id)
async def test_switch_turn_on_off(
hass: HomeAssistant, mock_lutron: MagicMock, mock_config_entry: MockConfigEntry
) -> None:
"""Test switch turn on and off."""
mock_config_entry.add_to_hass(hass)
switch = mock_lutron.areas[0].outputs[1]
switch.level = 0
switch.last_level.return_value = 0
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
entity_id = "switch.test_switch"
# Turn on
await hass.services.async_call(
SWITCH_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
assert switch.level == 100
# Turn off
await hass.services.async_call(
SWITCH_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
assert switch.level == 0
async def test_led_turn_on_off(
hass: HomeAssistant, mock_lutron: MagicMock, mock_config_entry: MockConfigEntry
) -> None:
"""Test LED turn on and off."""
mock_config_entry.add_to_hass(hass)
led = mock_lutron.areas[0].keypads[0].leds[0]
led.state = 0
led.last_state = 0
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
entity_id = "switch.test_keypad_test_button"
# Turn on
await hass.services.async_call(
SWITCH_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
assert led.state == 1
# Turn off
await hass.services.async_call(
SWITCH_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
assert led.state == 0
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/lutron/test_switch.py",
"license": "Apache License 2.0",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:homeassistant/components/compit/sensor.py | """Sensor platform for Compit integration."""
from dataclasses import dataclass
from compit_inext_api.consts import CompitParameter
from homeassistant.components.sensor import (
EntityCategory,
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.const import (
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONCENTRATION_PARTS_PER_MILLION,
PERCENTAGE,
UnitOfElectricCurrent,
UnitOfEnergy,
UnitOfPower,
UnitOfTemperature,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.device_registry import DeviceInfo
from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN, MANUFACTURER_NAME
from .coordinator import CompitConfigEntry, CompitDataUpdateCoordinator
PARALLEL_UPDATES = 0
NO_SENSOR = "no_sensor"
DESCRIPTIONS: dict[CompitParameter, SensorEntityDescription] = {
CompitParameter.ACTUAL_BUFFER_TEMP: SensorEntityDescription(
key=CompitParameter.ACTUAL_BUFFER_TEMP.value,
translation_key="actual_buffer_temp",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
),
CompitParameter.ACTUAL_DHW_TEMP: SensorEntityDescription(
key=CompitParameter.ACTUAL_DHW_TEMP.value,
translation_key="actual_dhw_temp",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
),
CompitParameter.ACTUAL_HC1_TEMPERATURE: SensorEntityDescription(
key=CompitParameter.ACTUAL_HC1_TEMPERATURE.value,
translation_key="actual_hc_temperature_zone",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
translation_placeholders={"zone": "1"},
),
CompitParameter.ACTUAL_HC2_TEMPERATURE: SensorEntityDescription(
key=CompitParameter.ACTUAL_HC2_TEMPERATURE.value,
translation_key="actual_hc_temperature_zone",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
translation_placeholders={"zone": "2"},
),
CompitParameter.ACTUAL_HC3_TEMPERATURE: SensorEntityDescription(
key=CompitParameter.ACTUAL_HC3_TEMPERATURE.value,
translation_key="actual_hc_temperature_zone",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
translation_placeholders={"zone": "3"},
),
CompitParameter.ACTUAL_HC4_TEMPERATURE: SensorEntityDescription(
key=CompitParameter.ACTUAL_HC4_TEMPERATURE.value,
translation_key="actual_hc_temperature_zone",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
translation_placeholders={"zone": "4"},
),
CompitParameter.ACTUAL_UPPER_SOURCE_TEMP: SensorEntityDescription(
key=CompitParameter.ACTUAL_UPPER_SOURCE_TEMP.value,
translation_key="actual_upper_source_temp",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
),
CompitParameter.ALARM_CODE: SensorEntityDescription(
key=CompitParameter.ALARM_CODE.value,
translation_key="alarm_code",
device_class=SensorDeviceClass.ENUM,
entity_category=EntityCategory.DIAGNOSTIC,
options=[
"no_alarm",
"damaged_outdoor_temp",
"damaged_return_temp",
"no_battery",
"discharged_battery",
"low_battery_level",
"battery_fault",
"no_pump",
"pump_fault",
"internal_af",
"no_power",
],
),
CompitParameter.BATTERY_LEVEL: SensorEntityDescription(
key=CompitParameter.BATTERY_LEVEL.value,
device_class=SensorDeviceClass.BATTERY,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=PERCENTAGE,
),
CompitParameter.BOILER_TEMPERATURE: SensorEntityDescription(
key=CompitParameter.BOILER_TEMPERATURE.value,
translation_key="boiler_temperature",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
),
CompitParameter.BUFFER_RETURN_TEMPERATURE: SensorEntityDescription(
key=CompitParameter.BUFFER_RETURN_TEMPERATURE.value,
translation_key="buffer_return_temperature",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
),
CompitParameter.BUFFER_SET_TEMPERATURE: SensorEntityDescription(
key=CompitParameter.BUFFER_SET_TEMPERATURE.value,
translation_key="buffer_set_temperature",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
),
CompitParameter.CALCULATED_BUFFER_TEMP: SensorEntityDescription(
key=CompitParameter.CALCULATED_BUFFER_TEMP.value,
translation_key="calculated_buffer_temp",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
),
CompitParameter.CALCULATED_DHW_TEMP: SensorEntityDescription(
key=CompitParameter.CALCULATED_DHW_TEMP.value,
translation_key="calculated_dhw_temp",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
),
CompitParameter.CALCULATED_HEATING_TEMPERATURE: SensorEntityDescription(
key=CompitParameter.CALCULATED_HEATING_TEMPERATURE.value,
translation_key="calculated_heating_temperature",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
),
CompitParameter.CALCULATED_TARGET_TEMPERATURE: SensorEntityDescription(
key=CompitParameter.CALCULATED_TARGET_TEMPERATURE.value,
translation_key="calculated_target_temperature",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
),
CompitParameter.CALCULATED_UPPER_SOURCE_TEMP: SensorEntityDescription(
key=CompitParameter.CALCULATED_UPPER_SOURCE_TEMP.value,
translation_key="calculated_upper_source_temp",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
),
CompitParameter.CHARGING_POWER: SensorEntityDescription(
key=CompitParameter.CHARGING_POWER.value,
translation_key="charging_power",
device_class=SensorDeviceClass.CURRENT,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfElectricCurrent.MILLIAMPERE,
),
CompitParameter.CIRCUIT_TARGET_TEMPERATURE: SensorEntityDescription(
key=CompitParameter.CIRCUIT_TARGET_TEMPERATURE.value,
translation_key="circuit_target_temperature",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
),
CompitParameter.CO2_LEVEL: SensorEntityDescription(
key=CompitParameter.CO2_LEVEL.value,
device_class=SensorDeviceClass.CO2,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=CONCENTRATION_PARTS_PER_MILLION,
),
CompitParameter.CO2_PERCENT: SensorEntityDescription(
key=CompitParameter.CO2_PERCENT.value,
translation_key="co2_percent",
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=PERCENTAGE,
),
CompitParameter.COLLECTOR_POWER: SensorEntityDescription(
key=CompitParameter.COLLECTOR_POWER.value,
translation_key="collector_power",
device_class=SensorDeviceClass.POWER,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfPower.KILO_WATT,
),
CompitParameter.COLLECTOR_TEMPERATURE: SensorEntityDescription(
key=CompitParameter.COLLECTOR_TEMPERATURE.value,
translation_key="collector_temperature",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
),
CompitParameter.DHW_MEASURED_TEMPERATURE: SensorEntityDescription(
key=CompitParameter.DHW_MEASURED_TEMPERATURE.value,
translation_key="dhw_measured_temperature",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
),
CompitParameter.DHW_TEMPERATURE: SensorEntityDescription(
key=CompitParameter.DHW_TEMPERATURE.value,
translation_key="dhw_temperature",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
),
CompitParameter.ENERGY_CONSUMPTION: SensorEntityDescription(
key=CompitParameter.ENERGY_CONSUMPTION.value,
translation_key="energy_consumption",
device_class=SensorDeviceClass.POWER,
native_unit_of_measurement=UnitOfPower.MEGA_WATT,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
),
CompitParameter.ENERGY_SGREADY_YESTERDAY: SensorEntityDescription(
key=CompitParameter.ENERGY_SGREADY_YESTERDAY.value,
translation_key="energy_smart_grid_yesterday",
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfEnergy.KILO_WATT_HOUR,
),
CompitParameter.ENERGY_TODAY: SensorEntityDescription(
key=CompitParameter.ENERGY_TODAY.value,
translation_key="energy_today",
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfEnergy.KILO_WATT_HOUR,
),
CompitParameter.ENERGY_TOTAL: SensorEntityDescription(
key=CompitParameter.ENERGY_TOTAL.value,
translation_key="energy_total",
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfEnergy.KILO_WATT_HOUR,
),
CompitParameter.ENERGY_YESTERDAY: SensorEntityDescription(
key=CompitParameter.ENERGY_YESTERDAY.value,
translation_key="energy_yesterday",
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfEnergy.KILO_WATT_HOUR,
),
CompitParameter.FUEL_LEVEL: SensorEntityDescription(
key=CompitParameter.FUEL_LEVEL.value,
translation_key="fuel_level",
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=PERCENTAGE,
),
CompitParameter.HEATING1_TARGET_TEMPERATURE: SensorEntityDescription(
key=CompitParameter.HEATING1_TARGET_TEMPERATURE.value,
translation_key="heating_target_temperature_zone",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
translation_placeholders={"zone": "1"},
),
CompitParameter.HEATING2_TARGET_TEMPERATURE: SensorEntityDescription(
key=CompitParameter.HEATING2_TARGET_TEMPERATURE.value,
translation_key="heating_target_temperature_zone",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
translation_placeholders={"zone": "2"},
),
CompitParameter.HEATING3_TARGET_TEMPERATURE: SensorEntityDescription(
key=CompitParameter.HEATING3_TARGET_TEMPERATURE.value,
translation_key="heating_target_temperature_zone",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
translation_placeholders={"zone": "3"},
),
CompitParameter.HEATING4_TARGET_TEMPERATURE: SensorEntityDescription(
key=CompitParameter.HEATING4_TARGET_TEMPERATURE.value,
translation_key="heating_target_temperature_zone",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
translation_placeholders={"zone": "4"},
),
CompitParameter.HUMIDITY: SensorEntityDescription(
key=CompitParameter.HUMIDITY.value,
device_class=SensorDeviceClass.HUMIDITY,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=PERCENTAGE,
),
CompitParameter.LOWER_SOURCE_TEMPERATURE: SensorEntityDescription(
key=CompitParameter.LOWER_SOURCE_TEMPERATURE.value,
translation_key="lower_source_temperature",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
),
CompitParameter.MIXER_TEMPERATURE: SensorEntityDescription(
key=CompitParameter.MIXER_TEMPERATURE.value,
translation_key="mixer_temperature",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
),
CompitParameter.MIXER1_TEMPERATURE: SensorEntityDescription(
key=CompitParameter.MIXER1_TEMPERATURE.value,
translation_key="mixer_temperature_zone",
translation_placeholders={"zone": "1"},
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
),
CompitParameter.MIXER2_TEMPERATURE: SensorEntityDescription(
key=CompitParameter.MIXER2_TEMPERATURE.value,
translation_key="mixer_temperature_zone",
translation_placeholders={"zone": "2"},
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
),
CompitParameter.OUTDOOR_TEMPERATURE: SensorEntityDescription(
key=CompitParameter.OUTDOOR_TEMPERATURE.value,
translation_key="outdoor_temperature",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
),
CompitParameter.PK1_FUNCTION: SensorEntityDescription(
key=CompitParameter.PK1_FUNCTION.value,
translation_key="pk1_function",
device_class=SensorDeviceClass.ENUM,
entity_category=EntityCategory.DIAGNOSTIC,
options=[
"off",
"on",
"nano_nr_1",
"nano_nr_2",
"nano_nr_3",
"nano_nr_4",
"nano_nr_5",
"winter",
"summer",
"cooling",
"holiday",
],
),
CompitParameter.PM1_LEVEL_MEASURED: SensorEntityDescription(
key=CompitParameter.PM1_LEVEL_MEASURED.value,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
device_class=SensorDeviceClass.PM1,
native_unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
),
CompitParameter.PM4_LEVEL_MEASURED: SensorEntityDescription(
key=CompitParameter.PM4_LEVEL_MEASURED.value,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
device_class=SensorDeviceClass.PM4,
native_unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
),
CompitParameter.PM10_LEVEL: SensorEntityDescription(
key=CompitParameter.PM10_LEVEL.value,
translation_key="pm10_level",
device_class=SensorDeviceClass.ENUM,
entity_category=EntityCategory.DIAGNOSTIC,
entity_registry_enabled_default=False,
options=[NO_SENSOR, "normal", "warning", "exceeded"],
),
CompitParameter.PM10_MEASURED: SensorEntityDescription(
key=CompitParameter.PM10_MEASURED.value,
device_class=SensorDeviceClass.PM10,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
),
CompitParameter.PM25_LEVEL: SensorEntityDescription(
key=CompitParameter.PM25_LEVEL.value,
translation_key="pm25_level",
device_class=SensorDeviceClass.ENUM,
entity_category=EntityCategory.DIAGNOSTIC,
entity_registry_enabled_default=False,
options=[NO_SENSOR, "normal", "warning", "exceeded"],
),
CompitParameter.PM25_MEASURED: SensorEntityDescription(
key=CompitParameter.PM25_MEASURED.value,
device_class=SensorDeviceClass.PM25,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
),
CompitParameter.PROTECTION_TEMPERATURE: SensorEntityDescription(
key=CompitParameter.PROTECTION_TEMPERATURE.value,
translation_key="protection_temperature",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
),
CompitParameter.RETURN_CIRCUIT_TEMPERATURE: SensorEntityDescription(
key=CompitParameter.RETURN_CIRCUIT_TEMPERATURE.value,
translation_key="return_circuit_temperature",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
),
CompitParameter.TANK_BOTTOM_T2_TEMPERATURE: SensorEntityDescription(
key=CompitParameter.TANK_BOTTOM_T2_TEMPERATURE.value,
translation_key="tank_temperature_t2",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
),
CompitParameter.TANK_T4_TEMPERATURE: SensorEntityDescription(
key=CompitParameter.TANK_T4_TEMPERATURE.value,
translation_key="tank_temperature_t4",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
translation_placeholders={"sensor": "T4"},
),
CompitParameter.TANK_TOP_T3_TEMPERATURE: SensorEntityDescription(
key=CompitParameter.TANK_TOP_T3_TEMPERATURE.value,
translation_key="tank_temperature_t3",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
),
CompitParameter.TARGET_HEATING_TEMPERATURE: SensorEntityDescription(
key=CompitParameter.TARGET_HEATING_TEMPERATURE.value,
translation_key="target_heating_temperature",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
),
CompitParameter.UPPER_SOURCE_TEMPERATURE: SensorEntityDescription(
key=CompitParameter.UPPER_SOURCE_TEMPERATURE.value,
translation_key="upper_source_temperature",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
),
CompitParameter.VENTILATION_ALARM: SensorEntityDescription(
key=CompitParameter.VENTILATION_ALARM.value,
translation_key="ventilation_alarm",
device_class=SensorDeviceClass.ENUM,
entity_category=EntityCategory.DIAGNOSTIC,
options=[
"no_alarm",
"damaged_supply_sensor",
"damaged_exhaust_sensor",
"damaged_supply_and_exhaust_sensors",
"bot_alarm",
"damaged_preheater_sensor",
"ahu_alarm",
],
),
CompitParameter.VENTILATION_GEAR: SensorEntityDescription(
key=CompitParameter.VENTILATION_GEAR.value,
translation_key="ventilation_gear",
entity_category=EntityCategory.DIAGNOSTIC,
),
}
@dataclass(frozen=True, kw_only=True)
class CompitDeviceDescription:
"""Class to describe a Compit device."""
name: str
"""Name of the device."""
parameters: dict[CompitParameter, SensorEntityDescription]
"""Parameters of the device."""
DEVICE_DEFINITIONS: dict[int, CompitDeviceDescription] = {
3: CompitDeviceDescription(
name="R 810",
parameters={
CompitParameter.CALCULATED_HEATING_TEMPERATURE: DESCRIPTIONS[
CompitParameter.CALCULATED_HEATING_TEMPERATURE
],
CompitParameter.OUTDOOR_TEMPERATURE: DESCRIPTIONS[
CompitParameter.OUTDOOR_TEMPERATURE
],
CompitParameter.RETURN_CIRCUIT_TEMPERATURE: DESCRIPTIONS[
CompitParameter.RETURN_CIRCUIT_TEMPERATURE
],
CompitParameter.TARGET_HEATING_TEMPERATURE: DESCRIPTIONS[
CompitParameter.TARGET_HEATING_TEMPERATURE
],
},
),
5: CompitDeviceDescription(
name="R350 T3",
parameters={
CompitParameter.CALCULATED_TARGET_TEMPERATURE: DESCRIPTIONS[
CompitParameter.CALCULATED_TARGET_TEMPERATURE
],
CompitParameter.CIRCUIT_TARGET_TEMPERATURE: DESCRIPTIONS[
CompitParameter.CIRCUIT_TARGET_TEMPERATURE
],
CompitParameter.MIXER_TEMPERATURE: DESCRIPTIONS[
CompitParameter.MIXER_TEMPERATURE
],
CompitParameter.OUTDOOR_TEMPERATURE: DESCRIPTIONS[
CompitParameter.OUTDOOR_TEMPERATURE
],
},
),
12: CompitDeviceDescription(
name="Nano Color",
parameters={
CompitParameter.OUTDOOR_TEMPERATURE: DESCRIPTIONS[
CompitParameter.OUTDOOR_TEMPERATURE
],
CompitParameter.PM10_LEVEL: DESCRIPTIONS[CompitParameter.PM10_LEVEL],
CompitParameter.PM25_LEVEL: DESCRIPTIONS[CompitParameter.PM25_LEVEL],
CompitParameter.VENTILATION_ALARM: DESCRIPTIONS[
CompitParameter.VENTILATION_ALARM
],
},
),
14: CompitDeviceDescription(
name="BWC310",
parameters={
CompitParameter.CALCULATED_HEATING_TEMPERATURE: DESCRIPTIONS[
CompitParameter.CALCULATED_HEATING_TEMPERATURE
],
CompitParameter.TARGET_HEATING_TEMPERATURE: DESCRIPTIONS[
CompitParameter.TARGET_HEATING_TEMPERATURE
],
},
),
27: CompitDeviceDescription(
name="CO2 SHC",
parameters={
CompitParameter.HUMIDITY: DESCRIPTIONS[CompitParameter.HUMIDITY],
CompitParameter.OUTDOOR_TEMPERATURE: DESCRIPTIONS[
CompitParameter.OUTDOOR_TEMPERATURE
],
},
),
34: CompitDeviceDescription(
name="r470",
parameters={
CompitParameter.OUTDOOR_TEMPERATURE: DESCRIPTIONS[
CompitParameter.OUTDOOR_TEMPERATURE
],
},
),
36: CompitDeviceDescription(
name="BioMax742",
parameters={
CompitParameter.BOILER_TEMPERATURE: DESCRIPTIONS[
CompitParameter.BOILER_TEMPERATURE
],
CompitParameter.FUEL_LEVEL: DESCRIPTIONS[CompitParameter.FUEL_LEVEL],
CompitParameter.OUTDOOR_TEMPERATURE: DESCRIPTIONS[
CompitParameter.OUTDOOR_TEMPERATURE
],
},
),
44: CompitDeviceDescription(
name="SolarComp 951",
parameters={
CompitParameter.COLLECTOR_POWER: DESCRIPTIONS[
CompitParameter.COLLECTOR_POWER
],
CompitParameter.COLLECTOR_TEMPERATURE: DESCRIPTIONS[
CompitParameter.COLLECTOR_TEMPERATURE
],
CompitParameter.TANK_BOTTOM_T2_TEMPERATURE: DESCRIPTIONS[
CompitParameter.TANK_BOTTOM_T2_TEMPERATURE
],
CompitParameter.TANK_T4_TEMPERATURE: DESCRIPTIONS[
CompitParameter.TANK_T4_TEMPERATURE
],
CompitParameter.TANK_TOP_T3_TEMPERATURE: DESCRIPTIONS[
CompitParameter.TANK_TOP_T3_TEMPERATURE
],
},
),
45: CompitDeviceDescription(
name="SolarComp971",
parameters={
CompitParameter.COLLECTOR_POWER: DESCRIPTIONS[
CompitParameter.COLLECTOR_POWER
],
CompitParameter.COLLECTOR_TEMPERATURE: DESCRIPTIONS[
CompitParameter.COLLECTOR_TEMPERATURE
],
CompitParameter.ENERGY_TODAY: DESCRIPTIONS[CompitParameter.ENERGY_TODAY],
CompitParameter.TANK_BOTTOM_T2_TEMPERATURE: DESCRIPTIONS[
CompitParameter.TANK_BOTTOM_T2_TEMPERATURE
],
CompitParameter.TANK_TOP_T3_TEMPERATURE: DESCRIPTIONS[
CompitParameter.TANK_TOP_T3_TEMPERATURE
],
},
),
53: CompitDeviceDescription(
name="R350.CWU",
parameters={
CompitParameter.CALCULATED_TARGET_TEMPERATURE: DESCRIPTIONS[
CompitParameter.CALCULATED_TARGET_TEMPERATURE
],
CompitParameter.DHW_MEASURED_TEMPERATURE: DESCRIPTIONS[
CompitParameter.DHW_MEASURED_TEMPERATURE
],
CompitParameter.ENERGY_SGREADY_YESTERDAY: DESCRIPTIONS[
CompitParameter.ENERGY_SGREADY_YESTERDAY
],
CompitParameter.ENERGY_TOTAL: DESCRIPTIONS[CompitParameter.ENERGY_TOTAL],
CompitParameter.ENERGY_YESTERDAY: DESCRIPTIONS[
CompitParameter.ENERGY_YESTERDAY
],
CompitParameter.OUTDOOR_TEMPERATURE: DESCRIPTIONS[
CompitParameter.OUTDOOR_TEMPERATURE
],
},
),
58: CompitDeviceDescription(
name="SolarComp 971SD1",
parameters={
CompitParameter.ENERGY_CONSUMPTION: DESCRIPTIONS[
CompitParameter.ENERGY_CONSUMPTION
],
},
),
75: CompitDeviceDescription(
name="BioMax772",
parameters={
CompitParameter.BOILER_TEMPERATURE: DESCRIPTIONS[
CompitParameter.BOILER_TEMPERATURE
],
CompitParameter.FUEL_LEVEL: DESCRIPTIONS[CompitParameter.FUEL_LEVEL],
CompitParameter.OUTDOOR_TEMPERATURE: DESCRIPTIONS[
CompitParameter.OUTDOOR_TEMPERATURE
],
},
),
78: CompitDeviceDescription(
name="SPM - Nano Color 2",
parameters={
CompitParameter.CO2_LEVEL: DESCRIPTIONS[CompitParameter.CO2_LEVEL],
CompitParameter.CO2_PERCENT: DESCRIPTIONS[CompitParameter.CO2_PERCENT],
CompitParameter.HUMIDITY: DESCRIPTIONS[CompitParameter.HUMIDITY],
CompitParameter.OUTDOOR_TEMPERATURE: DESCRIPTIONS[
CompitParameter.OUTDOOR_TEMPERATURE
],
CompitParameter.PM1_LEVEL_MEASURED: DESCRIPTIONS[
CompitParameter.PM1_LEVEL_MEASURED
],
CompitParameter.PM4_LEVEL_MEASURED: DESCRIPTIONS[
CompitParameter.PM4_LEVEL_MEASURED
],
CompitParameter.PM10_MEASURED: DESCRIPTIONS[CompitParameter.PM10_MEASURED],
CompitParameter.PM25_MEASURED: DESCRIPTIONS[CompitParameter.PM25_MEASURED],
},
),
91: CompitDeviceDescription(
name="R770RS / R771RS ",
parameters={
CompitParameter.BOILER_TEMPERATURE: DESCRIPTIONS[
CompitParameter.BOILER_TEMPERATURE
],
CompitParameter.FUEL_LEVEL: DESCRIPTIONS[CompitParameter.FUEL_LEVEL],
CompitParameter.MIXER1_TEMPERATURE: DESCRIPTIONS[
CompitParameter.MIXER1_TEMPERATURE
],
CompitParameter.MIXER2_TEMPERATURE: DESCRIPTIONS[
CompitParameter.MIXER2_TEMPERATURE
],
CompitParameter.OUTDOOR_TEMPERATURE: DESCRIPTIONS[
CompitParameter.OUTDOOR_TEMPERATURE
],
},
),
92: CompitDeviceDescription(
name="r490",
parameters={
CompitParameter.LOWER_SOURCE_TEMPERATURE: DESCRIPTIONS[
CompitParameter.LOWER_SOURCE_TEMPERATURE
],
CompitParameter.UPPER_SOURCE_TEMPERATURE: DESCRIPTIONS[
CompitParameter.UPPER_SOURCE_TEMPERATURE
],
},
),
99: CompitDeviceDescription(
name="SolarComp971C",
parameters={
CompitParameter.COLLECTOR_POWER: DESCRIPTIONS[
CompitParameter.COLLECTOR_POWER
],
CompitParameter.COLLECTOR_TEMPERATURE: DESCRIPTIONS[
CompitParameter.COLLECTOR_TEMPERATURE
],
CompitParameter.ENERGY_TODAY: DESCRIPTIONS[CompitParameter.ENERGY_TODAY],
CompitParameter.TANK_BOTTOM_T2_TEMPERATURE: DESCRIPTIONS[
CompitParameter.TANK_BOTTOM_T2_TEMPERATURE
],
CompitParameter.TANK_TOP_T3_TEMPERATURE: DESCRIPTIONS[
CompitParameter.TANK_TOP_T3_TEMPERATURE
],
},
),
201: CompitDeviceDescription(
name="BioMax775",
parameters={
CompitParameter.BOILER_TEMPERATURE: DESCRIPTIONS[
CompitParameter.BOILER_TEMPERATURE
],
CompitParameter.FUEL_LEVEL: DESCRIPTIONS[CompitParameter.FUEL_LEVEL],
CompitParameter.OUTDOOR_TEMPERATURE: DESCRIPTIONS[
CompitParameter.OUTDOOR_TEMPERATURE
],
},
),
210: CompitDeviceDescription(
name="EL750",
parameters={
CompitParameter.BOILER_TEMPERATURE: DESCRIPTIONS[
CompitParameter.BOILER_TEMPERATURE
],
CompitParameter.BUFFER_RETURN_TEMPERATURE: DESCRIPTIONS[
CompitParameter.BUFFER_RETURN_TEMPERATURE
],
CompitParameter.DHW_TEMPERATURE: DESCRIPTIONS[
CompitParameter.DHW_TEMPERATURE
],
},
),
212: CompitDeviceDescription(
name="BioMax742",
parameters={
CompitParameter.BOILER_TEMPERATURE: DESCRIPTIONS[
CompitParameter.BOILER_TEMPERATURE
],
CompitParameter.FUEL_LEVEL: DESCRIPTIONS[CompitParameter.FUEL_LEVEL],
CompitParameter.OUTDOOR_TEMPERATURE: DESCRIPTIONS[
CompitParameter.OUTDOOR_TEMPERATURE
],
},
),
215: CompitDeviceDescription(
name="R480",
parameters={
CompitParameter.ACTUAL_BUFFER_TEMP: DESCRIPTIONS[
CompitParameter.ACTUAL_BUFFER_TEMP
],
CompitParameter.ACTUAL_DHW_TEMP: DESCRIPTIONS[
CompitParameter.ACTUAL_DHW_TEMP
],
CompitParameter.DHW_MEASURED_TEMPERATURE: DESCRIPTIONS[
CompitParameter.DHW_MEASURED_TEMPERATURE
],
},
),
221: CompitDeviceDescription(
name="R350.M",
parameters={
CompitParameter.MIXER_TEMPERATURE: DESCRIPTIONS[
CompitParameter.MIXER_TEMPERATURE
],
CompitParameter.PROTECTION_TEMPERATURE: DESCRIPTIONS[
CompitParameter.PROTECTION_TEMPERATURE
],
CompitParameter.OUTDOOR_TEMPERATURE: DESCRIPTIONS[
CompitParameter.OUTDOOR_TEMPERATURE
],
},
),
222: CompitDeviceDescription(
name="R377B",
parameters={
CompitParameter.BUFFER_SET_TEMPERATURE: DESCRIPTIONS[
CompitParameter.BUFFER_SET_TEMPERATURE
],
},
),
223: CompitDeviceDescription(
name="Nano Color 2",
parameters={
CompitParameter.OUTDOOR_TEMPERATURE: DESCRIPTIONS[
CompitParameter.OUTDOOR_TEMPERATURE
],
CompitParameter.PM10_LEVEL: DESCRIPTIONS[CompitParameter.PM10_LEVEL],
CompitParameter.PM25_LEVEL: DESCRIPTIONS[CompitParameter.PM25_LEVEL],
CompitParameter.VENTILATION_ALARM: DESCRIPTIONS[
CompitParameter.VENTILATION_ALARM
],
CompitParameter.VENTILATION_GEAR: DESCRIPTIONS[
CompitParameter.VENTILATION_GEAR
],
},
),
224: CompitDeviceDescription(
name="R 900",
parameters={
CompitParameter.ACTUAL_BUFFER_TEMP: DESCRIPTIONS[
CompitParameter.ACTUAL_BUFFER_TEMP
],
CompitParameter.ACTUAL_HC1_TEMPERATURE: DESCRIPTIONS[
CompitParameter.ACTUAL_HC1_TEMPERATURE
],
CompitParameter.ACTUAL_HC2_TEMPERATURE: DESCRIPTIONS[
CompitParameter.ACTUAL_HC2_TEMPERATURE
],
CompitParameter.ACTUAL_HC3_TEMPERATURE: DESCRIPTIONS[
CompitParameter.ACTUAL_HC3_TEMPERATURE
],
CompitParameter.ACTUAL_HC4_TEMPERATURE: DESCRIPTIONS[
CompitParameter.ACTUAL_HC4_TEMPERATURE
],
CompitParameter.ACTUAL_DHW_TEMP: DESCRIPTIONS[
CompitParameter.ACTUAL_DHW_TEMP
],
CompitParameter.ACTUAL_UPPER_SOURCE_TEMP: DESCRIPTIONS[
CompitParameter.ACTUAL_UPPER_SOURCE_TEMP
],
CompitParameter.CALCULATED_BUFFER_TEMP: DESCRIPTIONS[
CompitParameter.CALCULATED_BUFFER_TEMP
],
CompitParameter.CALCULATED_DHW_TEMP: DESCRIPTIONS[
CompitParameter.CALCULATED_DHW_TEMP
],
CompitParameter.CALCULATED_UPPER_SOURCE_TEMP: DESCRIPTIONS[
CompitParameter.CALCULATED_UPPER_SOURCE_TEMP
],
CompitParameter.HEATING1_TARGET_TEMPERATURE: DESCRIPTIONS[
CompitParameter.HEATING1_TARGET_TEMPERATURE
],
CompitParameter.HEATING2_TARGET_TEMPERATURE: DESCRIPTIONS[
CompitParameter.HEATING2_TARGET_TEMPERATURE
],
CompitParameter.HEATING3_TARGET_TEMPERATURE: DESCRIPTIONS[
CompitParameter.HEATING3_TARGET_TEMPERATURE
],
CompitParameter.HEATING4_TARGET_TEMPERATURE: DESCRIPTIONS[
CompitParameter.HEATING4_TARGET_TEMPERATURE
],
CompitParameter.OUTDOOR_TEMPERATURE: DESCRIPTIONS[
CompitParameter.OUTDOOR_TEMPERATURE
],
},
),
225: CompitDeviceDescription(
name="SPM - Nano Color",
parameters={
CompitParameter.HUMIDITY: DESCRIPTIONS[CompitParameter.HUMIDITY],
CompitParameter.OUTDOOR_TEMPERATURE: DESCRIPTIONS[
CompitParameter.OUTDOOR_TEMPERATURE
],
CompitParameter.PM10_MEASURED: DESCRIPTIONS[CompitParameter.PM10_MEASURED],
CompitParameter.PM25_MEASURED: DESCRIPTIONS[CompitParameter.PM25_MEASURED],
},
),
226: CompitDeviceDescription(
name="AF-1",
parameters={
CompitParameter.ALARM_CODE: DESCRIPTIONS[CompitParameter.ALARM_CODE],
CompitParameter.BATTERY_LEVEL: DESCRIPTIONS[CompitParameter.BATTERY_LEVEL],
CompitParameter.CHARGING_POWER: DESCRIPTIONS[
CompitParameter.CHARGING_POWER
],
CompitParameter.OUTDOOR_TEMPERATURE: DESCRIPTIONS[
CompitParameter.OUTDOOR_TEMPERATURE
],
CompitParameter.RETURN_CIRCUIT_TEMPERATURE: DESCRIPTIONS[
CompitParameter.RETURN_CIRCUIT_TEMPERATURE
],
},
),
227: CompitDeviceDescription(
name="Combo",
parameters={
CompitParameter.PK1_FUNCTION: DESCRIPTIONS[CompitParameter.PK1_FUNCTION],
},
),
}
async def async_setup_entry(
hass: HomeAssistant,
entry: CompitConfigEntry,
async_add_devices: AddConfigEntryEntitiesCallback,
) -> None:
"""Set up Compit sensor entities from a config entry."""
coordinator = entry.runtime_data
sensor_entities = []
for device_id, device in coordinator.connector.all_devices.items():
device_definition = DEVICE_DEFINITIONS.get(device.definition.code)
if not device_definition:
continue
for code, entity_description in device_definition.parameters.items():
if (
entity_description.options
and NO_SENSOR in entity_description.options
and (
coordinator.connector.get_current_value(device_id, code)
== NO_SENSOR
)
):
continue
sensor_entities.append(
CompitSensor(
coordinator,
device_id,
device_definition.name,
code,
entity_description,
)
)
async_add_devices(sensor_entities)
class CompitSensor(CoordinatorEntity[CompitDataUpdateCoordinator], SensorEntity):
"""Representation of a Compit sensor entity."""
def __init__(
self,
coordinator: CompitDataUpdateCoordinator,
device_id: int,
device_name: str,
parameter_code: CompitParameter,
entity_description: SensorEntityDescription,
) -> None:
"""Initialize the sensor entity."""
super().__init__(coordinator)
self.device_id = device_id
self.entity_description = entity_description
self._attr_has_entity_name = True
self._attr_unique_id = f"{device_id}_{entity_description.key}"
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, str(device_id))},
name=device_name,
manufacturer=MANUFACTURER_NAME,
model=device_name,
)
self.parameter_code = parameter_code
@property
def available(self) -> bool:
"""Return if entity is available."""
return (
super().available
and self.coordinator.connector.get_device(self.device_id) is not None
)
@property
def native_value(self) -> float | str | None:
"""Return the state of the sensor."""
value = self.coordinator.connector.get_current_value(
self.device_id, self.parameter_code
)
if (
isinstance(value, str)
and self.entity_description.options
and value in self.entity_description.options
):
return value
if isinstance(value, (int, float)):
return value
return None
| {
"repo_id": "home-assistant/core",
"file_path": "homeassistant/components/compit/sensor.py",
"license": "Apache License 2.0",
"lines": 1002,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
home-assistant/core:tests/components/compit/test_sensor.py | """Tests for the Compit sensor platform."""
from typing import Any
from unittest.mock import MagicMock
import pytest
from syrupy.assertion import SnapshotAssertion
from homeassistant.const import Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from . import setup_integration, snapshot_compit_entities
from tests.common import MockConfigEntry
async def test_sensor_entities_snapshot(
hass: HomeAssistant,
entity_registry: er.EntityRegistry,
mock_config_entry: MockConfigEntry,
mock_connector,
snapshot: SnapshotAssertion,
) -> None:
"""Snapshot test for sensor entities creation, unique IDs, and device info."""
await setup_integration(hass, mock_config_entry)
snapshot_compit_entities(hass, entity_registry, snapshot, Platform.SENSOR)
@pytest.mark.parametrize(
("mock_return_value", "test_description"),
[
(None, "parameter is None"),
("damaged_supply_sensor", "parameter value is enum"),
],
)
async def test_sensor_return_value_enum_sensor(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_connector: MagicMock,
mock_return_value: Any | None,
test_description: str,
) -> None:
"""Test that sensor entity shows unknown when get_current_option returns various invalid values."""
mock_connector.get_current_value.side_effect = lambda device_id, parameter_code: (
mock_return_value
)
await setup_integration(hass, mock_config_entry)
state = hass.states.get("sensor.nano_color_2_ventilation_alarm")
assert state is not None
expected_state = mock_return_value or "unknown"
assert state.state == expected_state
async def test_sensor_enum_value_cannot_return_number(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_connector: MagicMock,
) -> None:
"""Test that sensor entity shows unknown when get_current_option returns various invalid values."""
mock_connector.get_current_value.side_effect = lambda device_id, parameter_code: (
123 # Invalid enum value
)
await setup_integration(hass, mock_config_entry)
state = hass.states.get("sensor.nano_color_2_ventilation_alarm")
assert state is None
@pytest.mark.parametrize(
("mock_return_value", "test_description"),
[
(None, "parameter is None"),
(21, "parameter value is number"),
],
)
async def test_sensor_return_value_number_sensor(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_connector: MagicMock,
mock_return_value: Any | None,
test_description: str,
) -> None:
"""Test that sensor entity shows correct number value."""
mock_connector.get_current_value.side_effect = lambda device_id, parameter_code: (
mock_return_value
)
await setup_integration(hass, mock_config_entry)
state = hass.states.get("sensor.r_900_calculated_buffer_temperature")
assert state is not None
expected_state = (
str(mock_return_value) if mock_return_value is not None else "unknown"
)
assert state.state == expected_state
async def test_sensor_number_value_cannot_return_enum(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_connector: MagicMock,
) -> None:
"""Test that sensor entity shows unknown when get_current_value returns enum instead of number."""
mock_connector.get_current_value.side_effect = lambda device_id, parameter_code: (
"eco" # Invalid number value
)
await setup_integration(hass, mock_config_entry)
state = hass.states.get("sensor.r_900_calculated_buffer_temperature")
assert state is not None and state.state == "unknown"
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/compit/test_sensor.py",
"license": "Apache License 2.0",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:homeassistant/components/proxmoxve/sensor.py | """Sensor platform for Proxmox VE integration."""
from __future__ import annotations
from collections.abc import Callable
from dataclasses import dataclass
from typing import Any
from homeassistant.components.sensor import (
EntityCategory,
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
SensorStateClass,
StateType,
)
from homeassistant.const import PERCENTAGE, UnitOfInformation
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback
from .coordinator import ProxmoxConfigEntry, ProxmoxNodeData
from .entity import ProxmoxContainerEntity, ProxmoxNodeEntity, ProxmoxVMEntity
@dataclass(frozen=True, kw_only=True)
class ProxmoxNodeSensorEntityDescription(SensorEntityDescription):
"""Class to hold Proxmox node sensor description."""
value_fn: Callable[[ProxmoxNodeData], StateType]
@dataclass(frozen=True, kw_only=True)
class ProxmoxVMSensorEntityDescription(SensorEntityDescription):
"""Class to hold Proxmox VM sensor description."""
value_fn: Callable[[dict[str, Any]], StateType]
@dataclass(frozen=True, kw_only=True)
class ProxmoxContainerSensorEntityDescription(SensorEntityDescription):
"""Class to hold Proxmox container sensor description."""
value_fn: Callable[[dict[str, Any]], StateType]
NODE_SENSORS: tuple[ProxmoxNodeSensorEntityDescription, ...] = (
ProxmoxNodeSensorEntityDescription(
key="node_cpu",
translation_key="node_cpu",
value_fn=lambda data: data.node["cpu"] * 100,
native_unit_of_measurement=PERCENTAGE,
entity_category=EntityCategory.DIAGNOSTIC,
suggested_display_precision=2,
state_class=SensorStateClass.MEASUREMENT,
),
ProxmoxNodeSensorEntityDescription(
key="node_max_cpu",
translation_key="node_max_cpu",
value_fn=lambda data: data.node["maxcpu"],
),
ProxmoxNodeSensorEntityDescription(
key="node_disk",
translation_key="node_disk",
value_fn=lambda data: data.node["disk"],
device_class=SensorDeviceClass.DATA_SIZE,
native_unit_of_measurement=UnitOfInformation.BYTES,
suggested_unit_of_measurement=UnitOfInformation.GIBIBYTES,
suggested_display_precision=1,
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
),
ProxmoxNodeSensorEntityDescription(
key="node_max_disk",
translation_key="node_max_disk",
value_fn=lambda data: data.node["maxdisk"],
device_class=SensorDeviceClass.DATA_SIZE,
native_unit_of_measurement=UnitOfInformation.BYTES,
suggested_unit_of_measurement=UnitOfInformation.GIBIBYTES,
suggested_display_precision=1,
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
),
ProxmoxNodeSensorEntityDescription(
key="node_memory",
translation_key="node_memory",
value_fn=lambda data: data.node["mem"],
device_class=SensorDeviceClass.DATA_SIZE,
native_unit_of_measurement=UnitOfInformation.BYTES,
suggested_unit_of_measurement=UnitOfInformation.GIBIBYTES,
suggested_display_precision=1,
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
),
ProxmoxNodeSensorEntityDescription(
key="node_max_memory",
translation_key="node_max_memory",
value_fn=lambda data: data.node["maxmem"],
device_class=SensorDeviceClass.DATA_SIZE,
native_unit_of_measurement=UnitOfInformation.BYTES,
suggested_unit_of_measurement=UnitOfInformation.GIBIBYTES,
suggested_display_precision=1,
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
),
ProxmoxNodeSensorEntityDescription(
key="node_status",
translation_key="node_status",
value_fn=lambda data: data.node["status"],
device_class=SensorDeviceClass.ENUM,
options=["online", "offline"],
),
)
VM_SENSORS: tuple[ProxmoxVMSensorEntityDescription, ...] = (
ProxmoxVMSensorEntityDescription(
key="vm_max_cpu",
translation_key="vm_max_cpu",
value_fn=lambda data: data["cpus"],
),
ProxmoxVMSensorEntityDescription(
key="vm_cpu",
translation_key="vm_cpu",
value_fn=lambda data: data["cpu"] * 100,
native_unit_of_measurement=PERCENTAGE,
entity_category=EntityCategory.DIAGNOSTIC,
suggested_display_precision=2,
state_class=SensorStateClass.MEASUREMENT,
),
ProxmoxVMSensorEntityDescription(
key="vm_memory",
translation_key="vm_memory",
value_fn=lambda data: data["mem"],
device_class=SensorDeviceClass.DATA_SIZE,
native_unit_of_measurement=UnitOfInformation.BYTES,
suggested_unit_of_measurement=UnitOfInformation.GIBIBYTES,
suggested_display_precision=1,
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
),
ProxmoxVMSensorEntityDescription(
key="vm_max_memory",
translation_key="vm_max_memory",
value_fn=lambda data: data["maxmem"],
device_class=SensorDeviceClass.DATA_SIZE,
native_unit_of_measurement=UnitOfInformation.BYTES,
suggested_unit_of_measurement=UnitOfInformation.GIBIBYTES,
suggested_display_precision=1,
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
),
ProxmoxVMSensorEntityDescription(
key="vm_disk",
translation_key="vm_disk",
value_fn=lambda data: data["disk"],
device_class=SensorDeviceClass.DATA_SIZE,
native_unit_of_measurement=UnitOfInformation.BYTES,
suggested_unit_of_measurement=UnitOfInformation.GIBIBYTES,
suggested_display_precision=1,
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
),
ProxmoxVMSensorEntityDescription(
key="vm_max_disk",
translation_key="vm_max_disk",
value_fn=lambda data: data["maxdisk"],
device_class=SensorDeviceClass.DATA_SIZE,
native_unit_of_measurement=UnitOfInformation.BYTES,
suggested_unit_of_measurement=UnitOfInformation.GIBIBYTES,
suggested_display_precision=1,
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
),
ProxmoxVMSensorEntityDescription(
key="vm_status",
translation_key="vm_status",
value_fn=lambda data: data["status"],
device_class=SensorDeviceClass.ENUM,
options=["running", "stopped", "suspended"],
),
)
CONTAINER_SENSORS: tuple[ProxmoxContainerSensorEntityDescription, ...] = (
ProxmoxContainerSensorEntityDescription(
key="container_max_cpu",
translation_key="container_max_cpu",
value_fn=lambda data: data["cpus"],
),
ProxmoxContainerSensorEntityDescription(
key="container_cpu",
translation_key="container_cpu",
value_fn=lambda data: data["cpu"] * 100,
native_unit_of_measurement=PERCENTAGE,
entity_category=EntityCategory.DIAGNOSTIC,
suggested_display_precision=2,
state_class=SensorStateClass.MEASUREMENT,
),
ProxmoxContainerSensorEntityDescription(
key="container_memory",
translation_key="container_memory",
value_fn=lambda data: data["mem"],
device_class=SensorDeviceClass.DATA_SIZE,
native_unit_of_measurement=UnitOfInformation.BYTES,
suggested_unit_of_measurement=UnitOfInformation.GIBIBYTES,
suggested_display_precision=1,
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
),
ProxmoxContainerSensorEntityDescription(
key="container_max_memory",
translation_key="container_max_memory",
value_fn=lambda data: data["maxmem"],
device_class=SensorDeviceClass.DATA_SIZE,
native_unit_of_measurement=UnitOfInformation.BYTES,
suggested_unit_of_measurement=UnitOfInformation.GIBIBYTES,
suggested_display_precision=1,
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
),
ProxmoxContainerSensorEntityDescription(
key="container_disk",
translation_key="container_disk",
value_fn=lambda data: data["disk"],
device_class=SensorDeviceClass.DATA_SIZE,
native_unit_of_measurement=UnitOfInformation.BYTES,
suggested_unit_of_measurement=UnitOfInformation.GIBIBYTES,
suggested_display_precision=1,
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
),
ProxmoxContainerSensorEntityDescription(
key="container_max_disk",
translation_key="container_max_disk",
value_fn=lambda data: data["maxdisk"],
device_class=SensorDeviceClass.DATA_SIZE,
native_unit_of_measurement=UnitOfInformation.BYTES,
suggested_unit_of_measurement=UnitOfInformation.GIBIBYTES,
suggested_display_precision=1,
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
),
ProxmoxContainerSensorEntityDescription(
key="container_status",
translation_key="container_status",
value_fn=lambda data: data["status"],
device_class=SensorDeviceClass.ENUM,
options=["running", "stopped", "suspended"],
),
)
async def async_setup_entry(
hass: HomeAssistant,
entry: ProxmoxConfigEntry,
async_add_entities: AddConfigEntryEntitiesCallback,
) -> None:
"""Set up Proxmox VE sensors."""
coordinator = entry.runtime_data
def _async_add_new_nodes(nodes: list[ProxmoxNodeData]) -> None:
"""Add new node sensors."""
async_add_entities(
ProxmoxNodeSensor(coordinator, entity_description, node)
for node in nodes
for entity_description in NODE_SENSORS
)
def _async_add_new_vms(
vms: list[tuple[ProxmoxNodeData, dict[str, Any]]],
) -> None:
"""Add new VM sensors."""
async_add_entities(
ProxmoxVMSensor(coordinator, entity_description, vm, node_data)
for (node_data, vm) in vms
for entity_description in VM_SENSORS
)
def _async_add_new_containers(
containers: list[tuple[ProxmoxNodeData, dict[str, Any]]],
) -> None:
"""Add new container sensors."""
async_add_entities(
ProxmoxContainerSensor(
coordinator, entity_description, container, node_data
)
for (node_data, container) in containers
for entity_description in CONTAINER_SENSORS
)
coordinator.new_nodes_callbacks.append(_async_add_new_nodes)
coordinator.new_vms_callbacks.append(_async_add_new_vms)
coordinator.new_containers_callbacks.append(_async_add_new_containers)
_async_add_new_nodes(
[
node_data
for node_data in coordinator.data.values()
if node_data.node["node"] in coordinator.known_nodes
]
)
_async_add_new_vms(
[
(node_data, vm_data)
for node_data in coordinator.data.values()
for vmid, vm_data in node_data.vms.items()
if (node_data.node["node"], vmid) in coordinator.known_vms
]
)
_async_add_new_containers(
[
(node_data, container_data)
for node_data in coordinator.data.values()
for vmid, container_data in node_data.containers.items()
if (node_data.node["node"], vmid) in coordinator.known_containers
]
)
class ProxmoxNodeSensor(ProxmoxNodeEntity, SensorEntity):
"""Representation of a Proxmox VE node sensor."""
entity_description: ProxmoxNodeSensorEntityDescription
@property
def native_value(self) -> StateType:
"""Return the native value of the sensor."""
return self.entity_description.value_fn(self.coordinator.data[self.device_name])
class ProxmoxVMSensor(ProxmoxVMEntity, SensorEntity):
"""Represents a Proxmox VE VM sensor."""
entity_description: ProxmoxVMSensorEntityDescription
@property
def native_value(self) -> StateType:
"""Return the native value of the sensor."""
return self.entity_description.value_fn(self.vm_data)
class ProxmoxContainerSensor(ProxmoxContainerEntity, SensorEntity):
"""Represents a Proxmox VE container sensor."""
entity_description: ProxmoxContainerSensorEntityDescription
@property
def native_value(self) -> StateType:
"""Return the native value of the sensor."""
return self.entity_description.value_fn(self.container_data)
| {
"repo_id": "home-assistant/core",
"file_path": "homeassistant/components/proxmoxve/sensor.py",
"license": "Apache License 2.0",
"lines": 312,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
home-assistant/core:tests/components/proxmoxve/test_sensor.py | """Tests for the Proxmox VE sensor platform."""
from unittest.mock import MagicMock, patch
import pytest
from syrupy.assertion import SnapshotAssertion
from homeassistant.const import Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from . import setup_integration
from tests.common import MockConfigEntry, snapshot_platform
@pytest.fixture(autouse=True)
def enable_all_entities(entity_registry_enabled_by_default: None) -> None:
"""Make sure all entities are enabled."""
async def test_all_entities(
hass: HomeAssistant,
snapshot: SnapshotAssertion,
mock_proxmox_client: MagicMock,
mock_config_entry: MockConfigEntry,
entity_registry: er.EntityRegistry,
) -> None:
"""Test all entities."""
with patch(
"homeassistant.components.proxmoxve.PLATFORMS",
[Platform.SENSOR],
):
await setup_integration(hass, mock_config_entry)
await snapshot_platform(
hass,
entity_registry,
snapshot,
mock_config_entry.entry_id,
)
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/proxmoxve/test_sensor.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:homeassistant/components/met/diagnostics.py | """Diagnostics support for Met.no integration."""
from typing import Any
from homeassistant.components.diagnostics import async_redact_data
from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE
from homeassistant.core import HomeAssistant
from .coordinator import MetWeatherConfigEntry
TO_REDACT = [
CONF_LATITUDE,
CONF_LONGITUDE,
]
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, entry: MetWeatherConfigEntry
) -> dict[str, Any]:
"""Return diagnostics for a config entry."""
coordinator_data = entry.runtime_data.data
return {
"entry_data": async_redact_data(entry.data, TO_REDACT),
"data": {
"current_weather_data": coordinator_data.current_weather_data,
"daily_forecast": coordinator_data.daily_forecast,
"hourly_forecast": coordinator_data.hourly_forecast,
},
}
| {
"repo_id": "home-assistant/core",
"file_path": "homeassistant/components/met/diagnostics.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
home-assistant/core:tests/components/met/test_diagnostics.py | """Test Met.no diagnostics."""
from syrupy.assertion import SnapshotAssertion
from homeassistant.core import HomeAssistant
from . import init_integration
from tests.components.diagnostics import get_diagnostics_for_config_entry
from tests.typing import ClientSessionGenerator
async def test_entry_diagnostics(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
snapshot: SnapshotAssertion,
) -> None:
"""Test config entry diagnostics."""
mock_config_entry = await init_integration(hass)
assert (
await get_diagnostics_for_config_entry(hass, hass_client, mock_config_entry)
== snapshot
)
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/met/test_diagnostics.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:homeassistant/components/systemnexa2/light.py | """Light entity for the SystemNexa2 integration."""
from typing import Any
from homeassistant.components.light import ATTR_BRIGHTNESS, ColorMode, LightEntity
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback
from .coordinator import SystemNexa2ConfigEntry, SystemNexa2DataUpdateCoordinator
from .entity import SystemNexa2Entity
PARALLEL_UPDATES = 0
async def async_setup_entry(
hass: HomeAssistant,
entry: SystemNexa2ConfigEntry,
async_add_entities: AddConfigEntryEntitiesCallback,
) -> None:
"""Set up lights based on a config entry."""
coordinator = entry.runtime_data
# Only add light entity for dimmable devices
if coordinator.data.info_data.dimmable:
async_add_entities([SystemNexa2Light(coordinator)])
class SystemNexa2Light(SystemNexa2Entity, LightEntity):
"""Representation of a dimmable SystemNexa2 light."""
_attr_translation_key = "light"
_attr_color_mode = ColorMode.BRIGHTNESS
_attr_supported_color_modes = {ColorMode.BRIGHTNESS}
def __init__(
self,
coordinator: SystemNexa2DataUpdateCoordinator,
) -> None:
"""Initialize the light."""
super().__init__(
coordinator=coordinator,
key="light",
)
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn on the light."""
# Check if we're setting brightness
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
# Convert HomeAssistant brightness (0-255) to device brightness (0-1.0)
value = brightness / 255
await self.coordinator.async_set_brightness(value)
else:
await self.coordinator.async_turn_on()
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn off the light."""
await self.coordinator.async_turn_off()
@property
def is_on(self) -> bool | None:
"""Return true if the light is on."""
if self.coordinator.data.state is None:
return None
# Consider the light on if brightness is greater than 0
return self.coordinator.data.state > 0
@property
def brightness(self) -> int | None:
"""Return the brightness of the light (0-255)."""
if self.coordinator.data.state is None:
return None
# Convert device brightness (0-1.0) to HomeAssistant brightness (0-255)
return max(0, min(255, round(self.coordinator.data.state * 255)))
| {
"repo_id": "home-assistant/core",
"file_path": "homeassistant/components/systemnexa2/light.py",
"license": "Apache License 2.0",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
home-assistant/core:tests/components/systemnexa2/test_light.py | """Test the System Nexa 2 light platform."""
from unittest.mock import MagicMock, patch
import pytest
from sn2 import ConnectionStatus, StateChange
from syrupy.assertion import SnapshotAssertion
from homeassistant.components.light import ATTR_BRIGHTNESS, DOMAIN as LIGHT_DOMAIN
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
Platform,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.entity_registry as er
from . import find_update_callback
from tests.common import MockConfigEntry, snapshot_platform
@pytest.mark.parametrize("dimmable", [True], indirect=True)
async def test_light_entities(
hass: HomeAssistant,
snapshot: SnapshotAssertion,
entity_registry: er.EntityRegistry,
mock_config_entry: MockConfigEntry,
mock_system_nexa_2_device: MagicMock,
) -> None:
"""Test the light entities."""
mock_config_entry.add_to_hass(hass)
# Only load the light platform for snapshot testing
with patch(
"homeassistant.components.systemnexa2.PLATFORMS",
[Platform.LIGHT],
):
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
await snapshot_platform(
hass, entity_registry, snapshot, mock_config_entry.entry_id
)
async def test_light_only_for_dimmable_devices(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_system_nexa_2_device: MagicMock,
) -> None:
"""Test that light entity is only created for dimmable devices."""
# The mock_system_nexa_2_device has dimmable=False
mock_config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
# Light entity should NOT exist for non-dimmable device
state = hass.states.get("light.outdoor_smart_plug")
assert state is None
@pytest.mark.parametrize("dimmable", [True], indirect=True)
async def test_light_control_operations(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_system_nexa_2_device: MagicMock,
) -> None:
"""Test all light control operations (on/off/toggle/dim)."""
device = mock_system_nexa_2_device.return_value
mock_config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
entity_id = "light.in_wall_dimmer_light"
# Verify initial state (should be on with 50% brightness from fixture)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_ON
# Test turn on without brightness
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
device.turn_on.assert_called_once()
device.set_brightness.assert_not_called()
device.turn_on.reset_mock()
# Test turn on with brightness=128 (50% in HA scale 0-255)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity_id, ATTR_BRIGHTNESS: 128},
blocking=True,
)
device.set_brightness.assert_called_once_with(128 / 255)
device.turn_on.assert_not_called()
device.set_brightness.reset_mock()
# Test turn on with brightness=255 (100% in HA scale)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity_id, ATTR_BRIGHTNESS: 255},
blocking=True,
)
device.set_brightness.assert_called_once_with(255 / 255)
device.set_brightness.reset_mock()
# Test turn on with brightness=1 (minimum non-zero in HA scale)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity_id, ATTR_BRIGHTNESS: 1},
blocking=True,
)
device.set_brightness.assert_called_once_with(1 / 255)
device.set_brightness.reset_mock()
# Test turn off
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
device.turn_off.assert_called_once()
device.turn_off.reset_mock()
# No reason to test toggle service as its an internal function using turn_on/off
@pytest.mark.parametrize("dimmable", [True], indirect=True)
async def test_light_brightness_property(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_system_nexa_2_device: MagicMock,
) -> None:
"""Test light brightness property conversion."""
mock_config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
# Find the callback that was registered with the device
update_callback = find_update_callback(mock_system_nexa_2_device)
# Test with state = 0.5 (50% in device scale, should be 128 in HA scale)
await update_callback(StateChange(state=0.5))
await hass.async_block_till_done()
state = hass.states.get("light.in_wall_dimmer_light")
assert state is not None
assert state.state == STATE_ON
assert state.attributes.get(ATTR_BRIGHTNESS) == 128
# Test with state = 1.0 (100% in device scale, should be 255 in HA scale)
await update_callback(StateChange(state=1.0))
await hass.async_block_till_done()
state = hass.states.get("light.in_wall_dimmer_light")
assert state is not None
assert state.state == STATE_ON
assert state.attributes.get(ATTR_BRIGHTNESS) == 255
# Test with state = 0.0 (0% - light should be off)
await update_callback(StateChange(state=0.0))
await hass.async_block_till_done()
state = hass.states.get("light.in_wall_dimmer_light")
assert state is not None
assert state.state == STATE_OFF
# Test with state = 0.1 (10% in device scale, should be 26 in HA scale)
await update_callback(StateChange(state=0.1))
await hass.async_block_till_done()
state = hass.states.get("light.in_wall_dimmer_light")
assert state is not None
assert state.state == STATE_ON
assert state.attributes.get(ATTR_BRIGHTNESS) == 26
@pytest.mark.parametrize("dimmable", [True], indirect=True)
async def test_light_is_on_property(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_system_nexa_2_device: MagicMock,
) -> None:
"""Test light is_on property."""
mock_config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
# Find the callback that was registered with the device
update_callback = find_update_callback(mock_system_nexa_2_device)
# Test with state > 0 (light is on)
await update_callback(StateChange(state=0.5))
await hass.async_block_till_done()
state = hass.states.get("light.in_wall_dimmer_light")
assert state is not None
assert state.state == STATE_ON
# Test with state = 0 (light is off)
await update_callback(StateChange(state=0.0))
await hass.async_block_till_done()
state = hass.states.get("light.in_wall_dimmer_light")
assert state is not None
assert state.state == STATE_OFF
@pytest.mark.parametrize("dimmable", [True], indirect=True)
async def test_coordinator_connection_status(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_system_nexa_2_device: MagicMock,
) -> None:
"""Test coordinator handles connection status updates for light."""
mock_config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
# Find the callback that was registered with the device
update_callback = find_update_callback(mock_system_nexa_2_device)
# Initially, the light should be on (state=0.5 from fixture)
state = hass.states.get("light.in_wall_dimmer_light")
assert state is not None
assert state.state == STATE_ON
# Simulate device disconnection
await update_callback(ConnectionStatus(connected=False))
await hass.async_block_till_done()
state = hass.states.get("light.in_wall_dimmer_light")
assert state is not None
assert state.state == STATE_UNAVAILABLE
# Simulate reconnection and state update
await update_callback(ConnectionStatus(connected=True))
await update_callback(StateChange(state=0.75))
await hass.async_block_till_done()
state = hass.states.get("light.in_wall_dimmer_light")
assert state is not None
assert state.state == STATE_ON
assert state.attributes.get(ATTR_BRIGHTNESS) == 191 # 0.75 * 255 ≈ 191
@pytest.mark.parametrize("dimmable", [True], indirect=True)
async def test_coordinator_state_change(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_system_nexa_2_device: MagicMock,
) -> None:
"""Test coordinator handles state change updates for light."""
mock_config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
# Find the callback that was registered with the device
update_callback = find_update_callback(mock_system_nexa_2_device)
# Change state to off (0.0)
await update_callback(StateChange(state=0.0))
await hass.async_block_till_done()
state = hass.states.get("light.in_wall_dimmer_light")
assert state is not None
assert state.state == STATE_OFF
# Change state to 25% (0.25)
await update_callback(StateChange(state=0.25))
await hass.async_block_till_done()
state = hass.states.get("light.in_wall_dimmer_light")
assert state is not None
assert state.state == STATE_ON
assert state.attributes.get(ATTR_BRIGHTNESS) == 64 # 0.25 * 255 ≈ 64
# Change state to full brightness (1.0)
await update_callback(StateChange(state=1.0))
await hass.async_block_till_done()
state = hass.states.get("light.in_wall_dimmer_light")
assert state is not None
assert state.state == STATE_ON
assert state.attributes.get(ATTR_BRIGHTNESS) == 255
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/systemnexa2/test_light.py",
"license": "Apache License 2.0",
"lines": 241,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:homeassistant/components/indevolt/select.py | """Select platform for Indevolt integration."""
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Final
from homeassistant.components.select import SelectEntity, SelectEntityDescription
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback
from . import IndevoltConfigEntry
from .coordinator import IndevoltCoordinator
from .entity import IndevoltEntity
PARALLEL_UPDATES = 0
@dataclass(frozen=True, kw_only=True)
class IndevoltSelectEntityDescription(SelectEntityDescription):
"""Custom entity description class for Indevolt select entities."""
read_key: str
write_key: str
value_to_option: dict[int, str]
unavailable_values: list[int] = field(default_factory=list)
generation: list[int] = field(default_factory=lambda: [1, 2])
SELECTS: Final = (
IndevoltSelectEntityDescription(
key="energy_mode",
translation_key="energy_mode",
read_key="7101",
write_key="47005",
value_to_option={
1: "self_consumed_prioritized",
4: "real_time_control",
5: "charge_discharge_schedule",
},
unavailable_values=[0],
),
)
async def async_setup_entry(
hass: HomeAssistant,
entry: IndevoltConfigEntry,
async_add_entities: AddConfigEntryEntitiesCallback,
) -> None:
"""Set up the select platform for Indevolt."""
coordinator = entry.runtime_data
device_gen = coordinator.generation
# Select initialization
async_add_entities(
IndevoltSelectEntity(coordinator=coordinator, description=description)
for description in SELECTS
if device_gen in description.generation
)
class IndevoltSelectEntity(IndevoltEntity, SelectEntity):
"""Represents a select entity for Indevolt devices."""
entity_description: IndevoltSelectEntityDescription
def __init__(
self,
coordinator: IndevoltCoordinator,
description: IndevoltSelectEntityDescription,
) -> None:
"""Initialize the Indevolt select entity."""
super().__init__(coordinator)
self.entity_description = description
self._attr_unique_id = f"{self.serial_number}_{description.key}"
self._attr_options = list(description.value_to_option.values())
self._option_to_value = {v: k for k, v in description.value_to_option.items()}
@property
def current_option(self) -> str | None:
"""Return the currently selected option."""
raw_value = self.coordinator.data.get(self.entity_description.read_key)
if raw_value is None:
return None
return self.entity_description.value_to_option.get(raw_value)
@property
def available(self) -> bool:
"""Return False when the device is in a mode that cannot be selected."""
if not super().available:
return False
raw_value = self.coordinator.data.get(self.entity_description.read_key)
return raw_value not in self.entity_description.unavailable_values
async def async_select_option(self, option: str) -> None:
"""Select a new option."""
value = self._option_to_value[option]
success = await self.coordinator.async_push_data(
self.entity_description.write_key, value
)
if success:
await self.coordinator.async_request_refresh()
else:
raise HomeAssistantError(f"Failed to set option {option} for {self.name}")
| {
"repo_id": "home-assistant/core",
"file_path": "homeassistant/components/indevolt/select.py",
"license": "Apache License 2.0",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
home-assistant/core:tests/components/indevolt/test_select.py | """Tests for the Indevolt select platform."""
from datetime import timedelta
from unittest.mock import AsyncMock, patch
from freezegun.api import FrozenDateTimeFactory
import pytest
from syrupy.assertion import SnapshotAssertion
from homeassistant.components.indevolt.coordinator import SCAN_INTERVAL
from homeassistant.components.select import SERVICE_SELECT_OPTION
from homeassistant.const import STATE_UNAVAILABLE, Platform
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import entity_registry as er
from . import setup_integration
from tests.common import MockConfigEntry, async_fire_time_changed, snapshot_platform
KEY_READ_ENERGY_MODE = "7101"
KEY_WRITE_ENERGY_MODE = "47005"
@pytest.mark.usefixtures("entity_registry_enabled_by_default")
@pytest.mark.parametrize("generation", [2, 1], indirect=True)
async def test_select(
hass: HomeAssistant,
entity_registry: er.EntityRegistry,
mock_indevolt: AsyncMock,
snapshot: SnapshotAssertion,
mock_config_entry: MockConfigEntry,
) -> None:
"""Test select entity registration and states."""
with patch("homeassistant.components.indevolt.PLATFORMS", [Platform.SELECT]):
await setup_integration(hass, mock_config_entry)
await snapshot_platform(hass, entity_registry, snapshot, mock_config_entry.entry_id)
@pytest.mark.parametrize("generation", [2], indirect=True)
@pytest.mark.parametrize(
("option", "expected_value"),
[
("self_consumed_prioritized", 1),
("real_time_control", 4),
("charge_discharge_schedule", 5),
],
)
async def test_select_option(
hass: HomeAssistant,
mock_indevolt: AsyncMock,
mock_config_entry: MockConfigEntry,
option: str,
expected_value: int,
) -> None:
"""Test selecting all valid energy mode options."""
with patch("homeassistant.components.indevolt.PLATFORMS", [Platform.SELECT]):
await setup_integration(hass, mock_config_entry)
# Reset mock call count for this iteration
mock_indevolt.set_data.reset_mock()
# Update mock data to reflect the new value
mock_indevolt.fetch_data.return_value[KEY_READ_ENERGY_MODE] = expected_value
# Attempt to change option
await hass.services.async_call(
Platform.SELECT,
SERVICE_SELECT_OPTION,
{"entity_id": "select.cms_sf2000_energy_mode", "option": option},
blocking=True,
)
# Verify set_data was called with correct parameters
mock_indevolt.set_data.assert_called_with(KEY_WRITE_ENERGY_MODE, expected_value)
# Verify updated state
assert (state := hass.states.get("select.cms_sf2000_energy_mode")) is not None
assert state.state == option
@pytest.mark.parametrize("generation", [2], indirect=True)
async def test_select_set_option_error(
hass: HomeAssistant,
mock_indevolt: AsyncMock,
mock_config_entry: MockConfigEntry,
) -> None:
"""Test error handling when selecting an option."""
with patch("homeassistant.components.indevolt.PLATFORMS", [Platform.SELECT]):
await setup_integration(hass, mock_config_entry)
# Mock set_data to raise an error
mock_indevolt.set_data.side_effect = HomeAssistantError(
"Device communication failed"
)
# Attempt to change option
with pytest.raises(HomeAssistantError):
await hass.services.async_call(
Platform.SELECT,
SERVICE_SELECT_OPTION,
{
"entity_id": "select.cms_sf2000_energy_mode",
"option": "real_time_control",
},
blocking=True,
)
# Verify set_data was called before failing
mock_indevolt.set_data.assert_called_once()
@pytest.mark.parametrize("generation", [2], indirect=True)
async def test_select_unavailable_outdoor_portable(
hass: HomeAssistant,
mock_indevolt: AsyncMock,
mock_config_entry: MockConfigEntry,
) -> None:
"""Test that entity is unavailable when device is in outdoor/portable mode (value 0)."""
# Update mock data to fake outdoor/portable mode
mock_indevolt.fetch_data.return_value[KEY_READ_ENERGY_MODE] = 0
# Initialize platform to test availability logic
with patch("homeassistant.components.indevolt.PLATFORMS", [Platform.SELECT]):
await setup_integration(hass, mock_config_entry)
# Verify entity state is unavailable
assert (state := hass.states.get("select.cms_sf2000_energy_mode")) is not None
assert state.state == STATE_UNAVAILABLE
@pytest.mark.parametrize("generation", [2], indirect=True)
async def test_select_availability(
hass: HomeAssistant,
mock_indevolt: AsyncMock,
mock_config_entry: MockConfigEntry,
freezer: FrozenDateTimeFactory,
) -> None:
"""Test select entity availability when coordinator fails."""
with patch("homeassistant.components.indevolt.PLATFORMS", [Platform.SELECT]):
await setup_integration(hass, mock_config_entry)
# Confirm initial state is available
assert (state := hass.states.get("select.cms_sf2000_energy_mode")) is not None
assert state.state != STATE_UNAVAILABLE
# Simulate a fetch error
mock_indevolt.fetch_data.side_effect = ConnectionError
freezer.tick(delta=timedelta(seconds=SCAN_INTERVAL))
async_fire_time_changed(hass)
await hass.async_block_till_done()
# Verify entity state is unavailable
assert (state := hass.states.get("select.cms_sf2000_energy_mode")) is not None
assert state.state == STATE_UNAVAILABLE
| {
"repo_id": "home-assistant/core",
"file_path": "tests/components/indevolt/test_select.py",
"license": "Apache License 2.0",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
home-assistant/core:homeassistant/components/velux/diagnostics.py | """Diagnostics support for Velux."""
from __future__ import annotations
from typing import Any
from homeassistant.components.diagnostics import async_redact_data
from homeassistant.const import CONF_MAC, CONF_PASSWORD
from homeassistant.core import HomeAssistant
from homeassistant.helpers import device_registry as dr, entity_registry as er
from . import VeluxConfigEntry
TO_REDACT = {CONF_MAC, CONF_PASSWORD}
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, entry: VeluxConfigEntry
) -> dict[str, Any]:
"""Return diagnostics for a config entry, includes nodes, devices, and entities."""
pyvlx = entry.runtime_data
nodes: list[dict[str, Any]] = [
{
"node_id": node.node_id,
"name": node.name,
"serial_number": node.serial_number,
"type": type(node).__name__,
"device_updated_callbacks": node.device_updated_cbs,
}
for node in pyvlx.nodes
]
device_registry = dr.async_get(hass)
entity_registry = er.async_get(hass)
devices: list[dict[str, Any]] = []
for device in dr.async_entries_for_config_entry(device_registry, entry.entry_id):
entities: list[dict[str, Any]] = []
for entity_entry in er.async_entries_for_device(
entity_registry,
device_id=device.id,
include_disabled_entities=True,
):
state_dict = None
if state := hass.states.get(entity_entry.entity_id):
state_dict = dict(state.as_dict())
state_dict.pop("context", None)
entities.append(
{
"entity_id": entity_entry.entity_id,
"unique_id": entity_entry.unique_id,
"state": state_dict,
}
)
devices.append(
{
"name": device.name,
"entities": entities,
}
)
return {
"config_entry": async_redact_data(entry.data, TO_REDACT),
"connection": {
"connected": pyvlx.connection.connected,
"connection_count": pyvlx.connection.connection_counter,
"frame_received_cbs": pyvlx.connection.frame_received_cbs,
"connection_opened_cbs": pyvlx.connection.connection_opened_cbs,
"connection_closed_cbs": pyvlx.connection.connection_closed_cbs,
},
"gateway": {
"state": str(pyvlx.klf200.state) if pyvlx.klf200.state else None,
"version": str(pyvlx.klf200.version) if pyvlx.klf200.version else None,
"protocol_version": (
str(pyvlx.klf200.protocol_version)
if pyvlx.klf200.protocol_version
else None
),
},
"nodes": nodes,
"devices": devices,
}
| {
"repo_id": "home-assistant/core",
"file_path": "homeassistant/components/velux/diagnostics.py",
"license": "Apache License 2.0",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.