repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/service-catalog/seed_code/mlops_app/infra/lambda/get_model_metadata/lambda.py | modules/service-catalog/seed_code/mlops_app/infra/lambda/get_model_metadata/lambda.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import os
import boto3
sm_client = boto3.client("sagemaker")
sns_client = boto3.client("sns")
SNS_TOPIC_ARN = os.environ["SNS_TOPIC_ARN"]
def send_message(subject, msg):
response = sns_client.publish(
TopicArn=SNS_TOPIC_ARN,
Message=msg,
Subject=subject,
)
return response
def handler(event, context):
payload = json.loads(json.dumps(event))
print(payload)
model_package_approval_status = payload["detail"]["ModelApprovalStatus"]
model_package_group_name = payload["detail"]["ModelPackageGroupName"]
response = {}
if model_package_approval_status == "Approved":
print(f"[New Model Approved] Publishing new information to topic {SNS_TOPIC_ARN}")
subject = f"[SageMaker] New Model Approved in {model_package_group_name}"
msg = f"Details: \n {json.dumps(event, indent=2)}"
response = send_message(subject, msg)
if model_package_approval_status == "PendingManualApproval":
print(f"[New Model Registered] Publishing information to topic {SNS_TOPIC_ARN}")
subject = f"[SageMaker] New Model Registered in {model_package_group_name}"
msg = f"Details: \n {json.dumps(event, indent=2)}"
response = send_message(subject, msg)
return response
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/service-catalog/seed_code/mlops_app/infra/notifications/notifications_stack.py | modules/service-catalog/seed_code/mlops_app/infra/notifications/notifications_stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from typing import Any
from aws_cdk import Stack
from aws_cdk import aws_events as events
from aws_cdk import aws_events_targets as targets
from aws_cdk import aws_iam as iam
from aws_cdk import aws_lambda as lambda_
from aws_cdk import aws_sns as sns
from constructs import Construct
class NotificationsStack(Stack):
def __init__(
self,
scope: Construct,
construct_id: str,
sagemaker_project_name: str,
sagemaker_project_id: str,
model_package_group_name: str,
project_short_name: str,
env_name: str,
**kwargs: Any,
) -> None:
super().__init__(scope, construct_id, **kwargs)
prefix = f"{sagemaker_project_name}-{sagemaker_project_id}"
topic_name = f"{project_short_name}-sns-{env_name}"
new_model_topic = sns.Topic(self, topic_name, display_name=topic_name, topic_name=topic_name)
get_metadata_function = lambda_.Function(
self,
f"{prefix}-model-lambda",
runtime=lambda_.Runtime.PYTHON_3_10,
handler="lambda.handler",
function_name=f"{project_short_name}-mpg-state-change-{env_name}",
code=lambda_.Code.from_asset("lambda/get_model_metadata"),
environment={
"SNS_TOPIC_ARN": new_model_topic.topic_arn,
},
)
new_model_topic.grant_publish(get_metadata_function)
get_metadata_function.add_to_role_policy(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
"sagemaker:Describe*",
"sagemaker:Get*",
],
resources=[
"*",
],
),
)
rule = events.Rule(
self,
f"{prefix}-new-model-rule",
rule_name=f"{project_short_name}-mpg-state-change-{env_name}",
event_pattern=events.EventPattern(
detail={
"ModelPackageGroupName": [model_package_group_name],
},
detail_type=["SageMaker Model Package State Change"],
source=["aws.sagemaker"],
),
)
rule.add_target(targets.LambdaFunction(get_metadata_function))
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/service-catalog/seed_code/mlops_app/infra/notifications/__init__.py | modules/service-catalog/seed_code/mlops_app/infra/notifications/__init__.py | # __init__.py
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/post-processing/yolop-lane-detection/stack.py | modules/post-processing/yolop-lane-detection/stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import logging
from typing import Any, cast
import aws_cdk.aws_ecr as ecr
import aws_cdk.aws_iam as iam
import cdk_nag
from aws_cdk import Aspects, Duration, Stack, Tags
from cdk_nag import NagPackSuppression, NagSuppressions
from constructs import Construct, IConstruct
_logger: logging.Logger = logging.getLogger(__name__)
class LaneDetection(Stack):
def __init__(
self,
scope: Construct,
id: str,
*,
project_name: str,
deployment_name: str,
module_name: str,
s3_access_policy: str,
ecr_repository_arn: str,
stack_description: str,
**kwargs: Any,
) -> None:
super().__init__(
scope,
id,
description=stack_description,
**kwargs,
)
Tags.of(scope=cast(IConstruct, self)).add(
key="Deployment",
value="aws",
)
dep_mod = f"{project_name}-{deployment_name}-{module_name}"
repo = ecr.Repository.from_repository_arn(self, "Repository", repository_arn=ecr_repository_arn)
self.repository_name = repo.repository_name
self.image_uri = f"{repo.repository_uri}:smprocessor"
policy_statements = [
iam.PolicyStatement(
actions=["dynamodb:*"],
effect=iam.Effect.ALLOW,
resources=[f"arn:{self.partition}:dynamodb:{self.region}:{self.account}:table/{project_name}*"],
),
iam.PolicyStatement(
actions=["ecr:*"],
effect=iam.Effect.ALLOW,
resources=[f"arn:{self.partition}:ecr:{self.region}:{self.account}:repository/{dep_mod}*"],
),
iam.PolicyStatement(
actions=["s3:GetObject", "s3:GetObjectAcl", "s3:ListBucket"],
effect=iam.Effect.ALLOW,
resources=[
f"arn:{self.partition}:s3:::{project_name}-*",
f"arn:{self.partition}:s3:::{project_name}-*/*",
],
),
]
dag_document = iam.PolicyDocument(statements=policy_statements)
self.role = iam.Role(
self,
f"{repo.repository_name}-sm-role",
assumed_by=iam.CompositePrincipal(
iam.ServicePrincipal("sagemaker.amazonaws.com"),
),
inline_policies={"DagPolicyDocument": dag_document},
managed_policies=[
iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AmazonECSTaskExecutionRolePolicy"),
iam.ManagedPolicy.from_managed_policy_arn(self, id="fullaccess", managed_policy_arn=s3_access_policy),
iam.ManagedPolicy.from_aws_managed_policy_name("AmazonSageMakerFullAccess"),
],
max_session_duration=Duration.hours(12),
)
Aspects.of(self).add(cdk_nag.AwsSolutionsChecks())
NagSuppressions.add_stack_suppressions(
self,
apply_to_nested_stacks=True,
suppressions=[
NagPackSuppression(
**{
"id": "AwsSolutions-IAM4",
"reason": "Managed Policies are for service account roles only",
}
),
NagPackSuppression(
**{
"id": "AwsSolutions-IAM5",
"reason": "Resource access restriced to ADDF resources",
}
),
],
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/post-processing/yolop-lane-detection/app.py | modules/post-processing/yolop-lane-detection/app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
from aws_cdk import App, CfnOutput, Environment
from stack import LaneDetection
project_name = os.getenv("SEEDFARMER_PROJECT_NAME", "")
deployment_name = os.getenv("SEEDFARMER_DEPLOYMENT_NAME", "")
module_name = os.getenv("SEEDFARMER_MODULE_NAME", "")
def _param(name: str) -> str:
return f"SEEDFARMER_PARAMETER_{name}"
ecr_repository_arn = os.getenv(_param("ECR_REPOSITORY_ARN"))
full_access_policy = os.getenv(_param("FULL_ACCESS_POLICY_ARN"))
if not ecr_repository_arn:
raise ValueError("ECR Repository ARN is missing.")
if not full_access_policy:
raise ValueError("S3 Full Access Policy ARN is missing.")
def generate_description() -> str:
soln_id = os.getenv("SEEDFARMER_PARAMETER_SOLUTION_ID", None)
soln_name = os.getenv("SEEDFARMER_PARAMETER_SOLUTION_NAME", None)
soln_version = os.getenv("SEEDFARMER_PARAMETER_SOLUTION_VERSION", None)
desc = "(SO9154) Autonomous Driving Data Framework (ADDF) - yolop-lane-det"
if soln_id and soln_name and soln_version:
desc = f"({soln_id}) {soln_name}. Version {soln_version}"
elif soln_id and soln_name:
desc = f"({soln_id}) {soln_name}"
return desc
app = App()
stack = LaneDetection(
scope=app,
id=f"{project_name}-{deployment_name}-{module_name}",
project_name=project_name,
deployment_name=deployment_name,
module_name=module_name,
env=Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
ecr_repository_arn=ecr_repository_arn,
s3_access_policy=full_access_policy,
stack_description=generate_description(),
)
CfnOutput(
scope=stack,
id="metadata",
value=stack.to_json_string(
{
"ImageUri": stack.image_uri,
"EcrRepoName": stack.repository_name,
"ExecutionRole": stack.role.role_arn,
}
),
)
app.synth(force=True)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/post-processing/yolop-lane-detection/src/sample_sm_processor.py | modules/post-processing/yolop-lane-detection/src/sample_sm_processor.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# The following is example code that walks thru the invocation of the model image
# in this module...it is an EXAMPLE that should be executed outside of this module
from sagemaker import get_execution_role
from sagemaker.processing import ProcessingInput, ProcessingOutput, Processor
### The following should ALL be replaced by addf env parameters
role = get_execution_role()
IMAGE_URI = "123456789012.dkr.ecr.us-east-1.amazonaws.com/yolop:smprocessor"
INSTANCE_TYPE = "ml.m5.2xlarge"
BUCKET_INPUT = "bucket-sagemaker"
S3_INPUT_PATH = "images"
BUCKET_OUTPUT = "bucket-sagemaker"
S3_OUTPUT_PATH = "output_yolop/images"
S3_OUTPUT_PATH_JSON = "output_yolop/json_output"
S3_OUTPUT_PATH_CSV = "output_yolop/csv_output"
### The following SHOULD NOT BE CHANGED
LOCAL_INPUT = "/opt/ml/processing/input/image"
LOCAL_OUTPUT = "/opt/ml/processing/output/image"
LOCAL_OUTPUT_JSON = "/opt/ml/processing/output/json"
LOCAL_OUTPUT_CSV = "/opt/ml/processing/output/csv"
processor = Processor(
image_uri=IMAGE_URI,
role=role,
instance_count=1,
instance_type=INSTANCE_TYPE,
base_job_name="yolop-testing-processor",
)
# Run the processing job
processor.run(
arguments=[
"--save_dir",
LOCAL_OUTPUT,
"--source",
LOCAL_INPUT,
"--json_path",
LOCAL_OUTPUT_JSON,
"--csv_path",
LOCAL_OUTPUT_CSV,
# "--img-size","640" #1280
],
inputs=[
ProcessingInput(
input_name="images_input",
source=f"s3://{BUCKET_INPUT}/{S3_INPUT_PATH}",
destination=LOCAL_INPUT,
)
],
outputs=[
ProcessingOutput(
output_name="image_output",
source=LOCAL_OUTPUT,
destination=f"s3://{BUCKET_OUTPUT}/{S3_OUTPUT_PATH}",
),
ProcessingOutput(
output_name="json_output",
source=LOCAL_OUTPUT_JSON,
destination=f"s3://{BUCKET_OUTPUT}/{S3_OUTPUT_PATH_JSON}",
),
ProcessingOutput(
output_name="out_csv",
source=LOCAL_OUTPUT_CSV,
destination=f"s3://{BUCKET_OUTPUT}/{S3_OUTPUT_PATH_CSV}",
),
],
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/post-processing/yolop-lane-detection/src/detect_lanes.py | modules/post-processing/yolop-lane-detection/src/detect_lanes.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import argparse
import json
import os
import sys
import time
from pathlib import Path
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_DIR)
print(sys.path)
import cv2
import numpy as np
import pandas as pd
import torch
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
from lib.config import cfg
from lib.core.function import AverageMeter
from lib.core.general import non_max_suppression, scale_coords
from lib.dataset import LoadImages, LoadStreams
from lib.models import get_net
from lib.utils import plot_one_box, show_seg_result
from lib.utils.utils import create_logger, select_device, time_synchronized
from numpy import random
from tqdm import tqdm
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
transform = transforms.Compose(
[
transforms.ToTensor(),
normalize,
]
)
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
name_lanes = []
def detect(cfg, opt):
logger, _, _ = create_logger(cfg, cfg.LOG_DIR, "demo")
device = select_device(logger, opt.device)
half = device.type != "cpu" # half precision only supported on CUDA
# Load model
model = get_net(cfg)
checkpoint = torch.load(opt.weights, map_location=device)
model.load_state_dict(checkpoint["state_dict"])
model = model.to(device)
if half:
model.half() # to FP16
# Set Dataloader
if opt.source.isnumeric():
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(opt.source, img_size=opt.img_size)
_bs = len(dataset) # batch_size
else:
dataset = LoadImages(opt.source, img_size=opt.img_size)
_bs = 1 # batch_size
# Get names and colors
names = model.module.names if hasattr(model, "module") else model.names
colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]
# Run inference
t0 = time.time()
_vid_path, _vid_writer = None, None
img = torch.zeros((1, 3, opt.img_size, opt.img_size), device=device) # init img
_ = model(img.half() if half else img) if device.type != "cpu" else None # run once
model.eval()
inf_time = AverageMeter()
nms_time = AverageMeter()
if not os.path.exists(opt.csv_path):
os.mkdir(opt.csv_path)
if not os.path.exists(opt.json_path):
os.mkdir(opt.json_path)
for i, (path, img, img_det, vid_cap, shapes) in tqdm(enumerate(dataset), total=len(dataset)):
img = transform(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = time_synchronized()
det_out, da_seg_out, ll_seg_out = model(img)
t2 = time_synchronized()
inf_out, _ = det_out
inf_time.update(t2 - t1, img.size(0))
# Apply NMS
t3 = time_synchronized()
det_pred = non_max_suppression(
inf_out,
conf_thres=opt.conf_thres,
iou_thres=opt.iou_thres,
classes=None,
agnostic=False,
)
t4 = time_synchronized()
nms_time.update(t4 - t3, img.size(0))
det = det_pred[0]
save_path = (
str(opt.save_dir + "/" + Path(path).name)
if dataset.mode != "stream"
else str(opt.save_dir + "/" + "web.mp4")
)
_, _, height, width = img.shape
_h, _w, _ = img_det.shape
pad_w, pad_h = shapes[1][1]
pad_w = int(pad_w)
pad_h = int(pad_h)
ratio = shapes[1][0][1]
da_predict = da_seg_out[:, :, pad_h : (height - pad_h), pad_w : (width - pad_w)]
da_seg_mask = torch.nn.functional.interpolate(da_predict, scale_factor=int(1 / ratio), mode="bilinear")
_, da_seg_mask = torch.max(da_seg_mask, 1)
da_seg_mask = da_seg_mask.int().squeeze().cpu().numpy()
ll_predict = ll_seg_out[:, :, pad_h : (height - pad_h), pad_w : (width - pad_w)]
ll_seg_mask = torch.nn.functional.interpolate(ll_predict, scale_factor=int(1 / ratio), mode="bilinear")
_, ll_seg_mask = torch.max(ll_seg_mask, 1)
ll_seg_mask = ll_seg_mask.int().squeeze().cpu().numpy()
# Lane line post-processing
# ll_seg_mask = connect_lane(ll_seg_mask)
img_det = show_seg_result(img_det, (da_seg_mask, ll_seg_mask), _, _, is_demo=True)
if len(det):
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img_det.shape).round()
for *xyxy, conf, cls in reversed(det):
label_det_pred = f"{names[int(cls)]} {conf:.2f}"
plot_one_box(
xyxy,
img_det,
label=label_det_pred,
color=colors[int(cls)],
line_thickness=2,
)
# if dataset.mode == 'images':
cv2.imwrite(save_path, img_det)
name_lane = [Path(path).name, json.dumps(ll_seg_mask, cls=NumpyEncoder)]
name_lanes.append(name_lane)
df = pd.DataFrame(name_lanes, columns=["source_image", "lanes"])
df.to_csv(path_or_buf=os.path.join(opt.csv_path, "lanes.csv"), index=False)
print("Results saved to %s" % Path(opt.save_dir))
print("Done. (%.3fs)" % (time.time() - t0))
print("inf : (%.4fs/frame) nms : (%.4fs/frame)" % (inf_time.avg, nms_time.avg))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--weights",
nargs="+",
type=str,
default="weights/End-to-end.pth",
help="model.pth path(s)",
)
parser.add_argument(
"--source", type=str, default="/opt/ml/processing/input/image", help="source"
) # file/folder ex:inference/images
parser.add_argument("--img-size", type=int, default=640, help="inference size (pixels)")
parser.add_argument("--conf-thres", type=float, default=0.25, help="object confidence threshold")
parser.add_argument("--iou-thres", type=float, default=0.45, help="IOU threshold for NMS")
parser.add_argument("--device", default="cpu", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
parser.add_argument(
"--save_dir",
type=str,
default="/opt/ml/processing/output/image",
help="directory to save results",
)
parser.add_argument("--augment", action="store_true", help="augmented inference")
parser.add_argument("--update", action="store_true", help="update all models")
parser.add_argument(
"--csv_path",
type=str,
default="/opt/ml/processing/output/csv",
help="output path for csv",
)
parser.add_argument(
"--json_path",
type=str,
default="/opt/ml/processing/output/json",
help="output path for json",
)
opt = parser.parse_args()
with torch.no_grad():
detect(cfg, opt)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/post-processing/yolop-lane-detection/tests/__init__.py | modules/post-processing/yolop-lane-detection/tests/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/post-processing/yolop-lane-detection/tests/infra/test_app.py | modules/post-processing/yolop-lane-detection/tests/infra/test_app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import pytest
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["SEEDFARMER_PROJECT_NAME"] = "test-proj"
os.environ["SEEDFARMER_DEPLOYMENT_NAME"] = "test-dep"
os.environ["SEEDFARMER_MODULE_NAME"] = "test-mod"
os.environ["CDK_DEFAULT_ACCOUNT"] = "111111111111"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
os.environ["SEEDFARMER_PARAMETER_ECR_REPOSITORY_ARN"] = (
"arn:aws:ecr:us-east-1:123456789012:repository/addf-docker-repository"
)
os.environ["SEEDFARMER_PARAMETER_FULL_ACCESS_POLICY_ARN"] = "arn:aws:policy:12345:XXX"
# Unload the app import so that subsequent tests don't reuse
if "app" in sys.modules:
del sys.modules["app"]
def test_app(stack_defaults):
import app # noqa: F401
def test_full_access_policy(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_FULL_ACCESS_POLICY_ARN"]
with pytest.raises(Exception):
import app # noqa: F401
assert os.environ["SEEDFARMER_PARAMETER_FULL_ACCESS_POLICY_ARN"] == "arn:aws:policy:12345:XXX"
def test_missing_app_ecr_arn(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_ECR_REPOSITORY_ARN"]
with pytest.raises(ValueError):
import app # noqa: F401
def test_solution_description(stack_defaults):
os.environ["SEEDFARMER_PARAMETER_SOLUTION_ID"] = "SO123456"
os.environ["SEEDFARMER_PARAMETER_SOLUTION_NAME"] = "MY GREAT TEST"
os.environ["SEEDFARMER_PARAMETER_SOLUTION_VERSION"] = "v1.0.0"
import app
ver = app.generate_description()
assert ver == "(SO123456) MY GREAT TEST. Version v1.0.0"
def test_solution_description_no_version(stack_defaults):
os.environ["SEEDFARMER_PARAMETER_SOLUTION_ID"] = "SO123456"
os.environ["SEEDFARMER_PARAMETER_SOLUTION_NAME"] = "MY GREAT TEST"
del os.environ["SEEDFARMER_PARAMETER_SOLUTION_VERSION"]
import app
ver = app.generate_description()
assert ver == "(SO123456) MY GREAT TEST"
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/post-processing/yolop-lane-detection/tests/infra/test_stack.py | modules/post-processing/yolop-lane-detection/tests/infra/test_stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import aws_cdk as cdk
import pytest
from aws_cdk.assertions import Template
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["CDK_DEFAULT_ACCOUNT"] = "111111111111"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
# Unload the app import so that subsequent tests don't reuse
if "stack" in sys.modules:
del sys.modules["stack"]
def test_synthesize_stack(stack_defaults):
import stack
app = cdk.App()
project_name = "test-project"
dep_name = "test-deployment"
mod_name = "test-module"
lane_det_stack = stack.LaneDetection(
scope=app,
id=f"{project_name}-{dep_name}-{mod_name}",
project_name=project_name,
deployment_name=dep_name,
module_name=mod_name,
s3_access_policy="arn:aws:policy:12345:XXX",
ecr_repository_arn="arn:aws:ecr:us-east-1:123456789012:repository/addf-docker-repository",
stack_description="Testing",
env=cdk.Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
template = Template.from_stack(lane_det_stack)
template.resource_count_is("AWS::IAM::Role", 1)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/post-processing/yolop-lane-detection/tests/infra/__init__.py | modules/post-processing/yolop-lane-detection/tests/infra/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/post-processing/yolo-object-detection/stack.py | modules/post-processing/yolo-object-detection/stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import logging
from typing import Any, cast
import aws_cdk.aws_ecr as ecr
import aws_cdk.aws_iam as iam
import cdk_nag
from aws_cdk import Aspects, Duration, Stack, Tags
from cdk_nag import NagPackSuppression, NagSuppressions
from constructs import Construct, IConstruct
_logger: logging.Logger = logging.getLogger(__name__)
class ObjectDetection(Stack):
def __init__(
self,
scope: Construct,
id: str,
*,
project_name: str,
deployment_name: str,
module_name: str,
s3_access_policy: str,
ecr_repository_arn: str,
stack_description: str,
**kwargs: Any,
) -> None:
super().__init__(
scope,
id,
description=stack_description,
**kwargs,
)
Tags.of(scope=cast(IConstruct, self)).add(
key="Deployment",
value="aws",
)
dep_mod = f"{project_name}-{deployment_name}-{module_name}"
repo = ecr.Repository.from_repository_arn(self, "Repository", repository_arn=ecr_repository_arn)
self.repository_name = repo.repository_name
self.image_uri = f"{repo.repository_uri}:latest"
policy_statements = [
iam.PolicyStatement(
actions=["dynamodb:*"],
effect=iam.Effect.ALLOW,
resources=[f"arn:{self.partition}:dynamodb:{self.region}:{self.account}:table/{project_name}*"],
),
iam.PolicyStatement(
actions=["ecr:*"],
effect=iam.Effect.ALLOW,
resources=[f"arn:{self.partition}:ecr:{self.region}:{self.account}:repository/{dep_mod}*"],
),
iam.PolicyStatement(
actions=["s3:GetObject", "s3:GetObjectAcl", "s3:ListBucket"],
effect=iam.Effect.ALLOW,
resources=[
f"arn:{self.partition}:s3:::{project_name}-*",
f"arn:{self.partition}:s3:::{project_name}-*/*",
],
),
]
dag_document = iam.PolicyDocument(statements=policy_statements)
self.role = iam.Role(
self,
f"{repo.repository_name}-sm-role",
assumed_by=iam.CompositePrincipal(
iam.ServicePrincipal("sagemaker.amazonaws.com"),
),
inline_policies={"DagPolicyDocument": dag_document},
managed_policies=[
iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AmazonECSTaskExecutionRolePolicy"),
iam.ManagedPolicy.from_managed_policy_arn(self, id="fullaccess", managed_policy_arn=s3_access_policy),
iam.ManagedPolicy.from_aws_managed_policy_name("AmazonSageMakerFullAccess"),
],
max_session_duration=Duration.hours(12),
)
Aspects.of(self).add(cdk_nag.AwsSolutionsChecks())
NagSuppressions.add_stack_suppressions(
self,
apply_to_nested_stacks=True,
suppressions=[
NagPackSuppression(
**{
"id": "AwsSolutions-IAM4",
"reason": "Managed Policies are for service account roles only",
}
),
NagPackSuppression(
**{
"id": "AwsSolutions-IAM5",
"reason": "Resource access restriced to ADDF resources",
}
),
],
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/post-processing/yolo-object-detection/app.py | modules/post-processing/yolo-object-detection/app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
from aws_cdk import App, CfnOutput, Environment
from stack import ObjectDetection
project_name = os.getenv("SEEDFARMER_PROJECT_NAME", "")
deployment_name = os.getenv("SEEDFARMER_DEPLOYMENT_NAME", "")
module_name = os.getenv("SEEDFARMER_MODULE_NAME", "")
def _param(name: str) -> str:
return f"SEEDFARMER_PARAMETER_{name}"
ecr_repository_arn = os.getenv(_param("ECR_REPOSITORY_ARN"))
full_access_policy = os.getenv(_param("FULL_ACCESS_POLICY_ARN"))
if not ecr_repository_arn:
raise ValueError("ECR Repository ARN is missing.")
if not full_access_policy:
raise ValueError("S3 Full Access Policy ARN is missing.")
def generate_description() -> str:
soln_id = os.getenv("SEEDFARMER_PARAMETER_SOLUTION_ID", None)
soln_name = os.getenv("SEEDFARMER_PARAMETER_SOLUTION_NAME", None)
soln_version = os.getenv("SEEDFARMER_PARAMETER_SOLUTION_VERSION", None)
desc = "(SO9154) Autonomous Driving Data Framework (ADDF) - yolop-lane-det"
if soln_id and soln_name and soln_version:
desc = f"({soln_id}) {soln_name}. Version {soln_version}"
elif soln_id and soln_name:
desc = f"({soln_id}) {soln_name}"
return desc
app = App()
stack = ObjectDetection(
scope=app,
id=f"{project_name}-{deployment_name}-{module_name}",
project_name=project_name,
deployment_name=deployment_name,
module_name=module_name,
env=Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
s3_access_policy=full_access_policy,
ecr_repository_arn=ecr_repository_arn,
stack_description=generate_description(),
)
base_image = (
f"763104351884.dkr.ecr.{stack.region}.amazonaws.com/pytorch-inference:1.12.1-gpu-py38-cu113-ubuntu20.04-sagemaker"
)
CfnOutput(
scope=stack,
id="metadata",
value=stack.to_json_string(
{
"ImageUri": stack.image_uri,
"EcrRepoName": stack.repository_name,
"ExecutionRole": stack.role.role_arn,
"BaseImage": base_image,
}
),
)
app.synth(force=True)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/post-processing/yolo-object-detection/src/detect_objects.py | modules/post-processing/yolo-object-detection/src/detect_objects.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from argparse import ArgumentParser
from glob import glob as get_files
from json import dump as dump_json
import pandas as pd
from ultralytics import YOLO
def get_pandas(result):
# translate boxes data from a Tensor to the List of boxes info lists
boxes_list = result.boxes.data.tolist()
columns = ["xmin", "ymin", "xmax", "ymax", "confidence", "class", "name"]
# iterate through the list of boxes info and make some formatting
for i in boxes_list:
# add a class name as a last element
i.append(result.names[i[5]])
return pd.DataFrame(boxes_list, columns=columns)
def get_yolo_prediction(model, image, input_data_path, _input_size=1280, confidence=0.25, iou=0.45, max_det=1000):
model.conf = confidence # NMS confidence threshold
model.iou = iou # NMS IoU threshold
model.agnostic = False # NMS class-agnostic
model.multi_label = True # NMS multiple labels per box
model.max_det = max_det # maximum number of detections per image
# inference with test time augmentation
results = model.predict(image, augment=True)[0]
output = {}
output["image_filename"] = image.replace(input_data_path, "")
output["boxes"] = results.numpy().boxes.xyxy.tolist()
output["score"] = results.numpy().boxes.conf.tolist()
output["category_index"] = results.numpy().boxes.cls.tolist()
output["category"] = [results.names[x] for x in output["category_index"]]
return output, get_pandas(results)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--model", type=str, default="yolo11s")
args, _ = parser.parse_known_args()
input_data_path = "/opt/ml/processing/input/"
output_data_path = "/opt/ml/processing/output/"
model_name = args.model
model = YOLO(model_name)
images_list = [file for file in get_files(input_data_path + "*.png")]
output = {}
dfs = []
for image in images_list:
output_json, output_pandas = get_yolo_prediction(model, image, input_data_path)
image_file_name = image.replace(input_data_path, "")
image_json_name = image_file_name.replace(".png", ".json")
image_csv_name = image_file_name.replace(".png", ".csv")
output[image_file_name] = output_json
with open(f"{output_data_path}{image_json_name}", "w") as file:
dump_json(output_json, file, indent=2, separators=(",", ": "), sort_keys=False)
output_pandas["source_image"] = image_file_name
dfs.append(output_pandas)
pd.concat(dfs).to_csv(f"{output_data_path}all_predictions.csv", encoding="utf-8")
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/post-processing/yolo-object-detection/tests/__init__.py | modules/post-processing/yolo-object-detection/tests/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/post-processing/yolo-object-detection/tests/infra/test_app.py | modules/post-processing/yolo-object-detection/tests/infra/test_app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import pytest
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["SEEDFARMER_PROJECT_NAME"] = "test-proj"
os.environ["SEEDFARMER_DEPLOYMENT_NAME"] = "test-dep"
os.environ["SEEDFARMER_MODULE_NAME"] = "test-mod"
os.environ["CDK_DEFAULT_ACCOUNT"] = "111111111111"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
os.environ["SEEDFARMER_PARAMETER_ECR_REPOSITORY_ARN"] = (
"arn:aws:ecr:us-east-1:123456789012:repository/addf-docker-repository"
)
os.environ["SEEDFARMER_PARAMETER_FULL_ACCESS_POLICY_ARN"] = "arn:aws:policy:12345:XXX"
# Unload the app import so that subsequent tests don't reuse
if "app" in sys.modules:
del sys.modules["app"]
def test_app(stack_defaults):
import app # noqa: F401
def test_full_access_policy(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_FULL_ACCESS_POLICY_ARN"]
with pytest.raises(Exception):
import app # noqa: F401
assert os.environ["SEEDFARMER_PARAMETER_FULL_ACCESS_POLICY_ARN"] == "arn:aws:policy:12345:XXX"
def test_missing_app_ecr_arn(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_ECR_REPOSITORY_ARN"]
with pytest.raises(ValueError):
import app # noqa: F401
def test_solution_description(stack_defaults):
os.environ["SEEDFARMER_PARAMETER_SOLUTION_ID"] = "SO123456"
os.environ["SEEDFARMER_PARAMETER_SOLUTION_NAME"] = "MY GREAT TEST"
os.environ["SEEDFARMER_PARAMETER_SOLUTION_VERSION"] = "v1.0.0"
import app
ver = app.generate_description()
assert ver == "(SO123456) MY GREAT TEST. Version v1.0.0"
def test_solution_description_no_version(stack_defaults):
os.environ["SEEDFARMER_PARAMETER_SOLUTION_ID"] = "SO123456"
os.environ["SEEDFARMER_PARAMETER_SOLUTION_NAME"] = "MY GREAT TEST"
del os.environ["SEEDFARMER_PARAMETER_SOLUTION_VERSION"]
import app
ver = app.generate_description()
assert ver == "(SO123456) MY GREAT TEST"
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/post-processing/yolo-object-detection/tests/infra/test_stack.py | modules/post-processing/yolo-object-detection/tests/infra/test_stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import aws_cdk as cdk
import pytest
from aws_cdk.assertions import Template
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["CDK_DEFAULT_ACCOUNT"] = "111111111111"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
# Unload the app import so that subsequent tests don't reuse
if "stack" in sys.modules:
del sys.modules["stack"]
def test_synthesize_stack(stack_defaults):
import stack
app = cdk.App()
project_name = "test-project"
dep_name = "test-deployment"
mod_name = "test-module"
object_det_stack = stack.ObjectDetection(
scope=app,
id=f"{project_name}-{dep_name}-{mod_name}",
project_name=project_name,
deployment_name=dep_name,
module_name=mod_name,
ecr_repository_arn="arn:aws:ecr:us-east-1:123456789012:repository/addf-docker-repository",
s3_access_policy="arn:aws:policy:12345:XXX",
stack_description="Testing",
env=cdk.Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
template = Template.from_stack(object_det_stack)
template.resource_count_is("AWS::IAM::Role", 1)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/post-processing/yolo-object-detection/tests/infra/__init__.py | modules/post-processing/yolo-object-detection/tests/infra/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/ml-training/training-image/mnist/src/mnist.py | modules/ml-training/training-image/mnist/src/mnist.py | # Copyright 2022 The Kubeflow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import logging
import os
import hypertune
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
WORLD_SIZE = int(os.environ.get("WORLD_SIZE", 1))
INPUT_DATASET_PATH = os.environ.get("DATASET_PATH", "/data/fsx/import")
OUTPUT_ARTIFACTS_PATH = os.environ.get("OUTPUT_ARTIFACTS_PATH", "/data/fsx/export")
TRAINING_JOB_ID = os.environ["TRAINING_JOB_ID"]
JOB_OUTPUT_PATH = os.path.join(OUTPUT_ARTIFACTS_PATH, TRAINING_JOB_ID)
# Create JOB specific output path if not exists
if not os.path.exists(JOB_OUTPUT_PATH):
os.makedirs(JOB_OUTPUT_PATH)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4 * 4 * 50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4 * 4 * 50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def check_dataset_exists(data_path, dataset_name):
dataset_path = os.path.join(data_path, dataset_name)
if os.path.exists(dataset_path):
print("MNIST dataset found at:", dataset_path)
return True
else:
print("MNIST dataset not found")
return False
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
msg = "Train Epoch: {} [{}/{} ({:.0f}%)]\tloss={:.4f}".format(
epoch,
batch_idx * len(data),
len(train_loader.dataset),
100.0 * batch_idx / len(train_loader),
loss.item(),
)
logging.info(msg)
def test(args, model, device, test_loader, epoch, hpt):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(
output, target, reduction="sum"
).item() # sum up batch loss
pred = output.max(1, keepdim=True)[
1
] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
test_accuracy = float(correct) / len(test_loader.dataset)
logging.info(
"{{metricName: accuracy, metricValue: {:.4f}}};{{metricName: loss, metricValue: {:.4f}}}\n".format(
test_accuracy, test_loss
)
)
if args.logger == "hypertune":
hpt.report_hyperparameter_tuning_metric(
hyperparameter_metric_tag="loss",
metric_value=test_loss,
global_step=epoch,
)
hpt.report_hyperparameter_tuning_metric(
hyperparameter_metric_tag="accuracy",
metric_value=test_accuracy,
global_step=epoch,
)
def should_distribute():
return dist.is_available() and WORLD_SIZE > 1
def is_distributed():
return dist.is_available() and dist.is_initialized()
def main():
# Training settings
parser = argparse.ArgumentParser(description="PyTorch MNIST Example")
parser.add_argument(
"--batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for training (default: 64)",
)
parser.add_argument(
"--test-batch-size",
type=int,
default=1000,
metavar="N",
help="input batch size for testing (default: 1000)",
)
parser.add_argument(
"--epochs",
type=int,
default=10,
metavar="N",
help="number of epochs to train (default: 10)",
)
parser.add_argument(
"--lr",
type=float,
default=0.01,
metavar="LR",
help="learning rate (default: 0.01)",
)
parser.add_argument(
"--momentum",
type=float,
default=0.5,
metavar="M",
help="SGD momentum (default: 0.5)",
)
parser.add_argument(
"--no-cuda",
action="store_true",
default=False,
help="disables CUDA training",
)
parser.add_argument(
"--seed",
type=int,
default=1,
metavar="S",
help="random seed (default: 1)",
)
parser.add_argument(
"--log-interval",
type=int,
default=10,
metavar="N",
help="how many batches to wait before logging training status",
)
parser.add_argument(
"--log-path",
type=str,
default="",
help="Path to save logs. Print to StdOut if log-path is not set",
)
parser.add_argument(
"--save-model",
action="store_true",
default=False,
help="For Saving the current Model",
)
parser.add_argument(
"--logger",
type=str,
choices=["standard", "hypertune"],
help="Logger",
default="standard",
)
if dist.is_available():
parser.add_argument(
"--backend",
type=str,
help="Distributed backend",
choices=[dist.Backend.GLOO, dist.Backend.NCCL, dist.Backend.MPI],
default=dist.Backend.GLOO,
)
args = parser.parse_args()
# Use this format (%Y-%m-%dT%H:%M:%SZ) to record timestamp of the metrics.
logging.basicConfig(
format="%(asctime)s %(levelname)-8s %(message)s",
datefmt="%Y-%m-%dT%H:%M:%SZ",
level=logging.DEBUG,
handlers=[
logging.FileHandler(os.path.join(JOB_OUTPUT_PATH, "training.log")),
logging.StreamHandler(),
],
)
if args.logger == "hypertune" and args.log_path != "":
os.environ["CLOUD_ML_HP_METRIC_FILE"] = args.log_path
# For JSON logging
hpt = hypertune.HyperTune()
use_cuda = not args.no_cuda and torch.cuda.is_available()
print("torch.cuda.is_available()")
print(torch.cuda.is_available())
if use_cuda:
print("Using CUDA")
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
if should_distribute():
print("Using distributed PyTorch with {} backend".format(args.backend))
dist.init_process_group(backend=args.backend)
kwargs = {"num_workers": 1} if use_cuda else {}
dataset_exists = check_dataset_exists(
data_path=INPUT_DATASET_PATH, dataset_name="FashionMNIST"
)
train_loader = torch.utils.data.DataLoader(
datasets.FashionMNIST(
INPUT_DATASET_PATH,
train=True,
download=False if dataset_exists else True,
transform=transforms.Compose([transforms.ToTensor()]),
),
batch_size=args.batch_size,
shuffle=True,
**kwargs,
)
test_loader = torch.utils.data.DataLoader(
datasets.FashionMNIST(
INPUT_DATASET_PATH,
train=False,
download=False if dataset_exists else True,
transform=transforms.Compose([transforms.ToTensor()]),
),
batch_size=args.test_batch_size,
shuffle=False,
**kwargs,
)
model = Net().to(device)
print("is_distributed?")
print(is_distributed())
if is_distributed():
Distributor = (
nn.parallel.DistributedDataParallel
if use_cuda
else nn.parallel.DistributedDataParallelCPU
)
model = Distributor(model)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader, epoch, hpt)
if args.save_model:
torch.save(model.state_dict(), os.path.join(JOB_OUTPUT_PATH, "model.pt"))
print("Model Saved")
if __name__ == "__main__":
main()
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/ml-training/training-k8s-deployment/configure_asgs.py | modules/ml-training/training-k8s-deployment/configure_asgs.py | import os
from typing import Any, Dict
import boto3 # type: ignore
eks = boto3.client("eks")
as_client = boto3.client("autoscaling")
# This script fixes missing tags for the cluster autoscaler that need to be added to the nodegroup autoscaling group
# docs:
# https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#auto-discovery-setup
# issue: https://github.com/aws/aws-cdk/issues/29280
# container-roadmap: https://github.com/aws/containers-roadmap/issues/1541
CLUSTER_NAME = os.environ["SEEDFARMER_PARAMETER_EKS_CLUSTER_NAME"]
LABEL_TAG_PREFIX = "k8s.io/cluster-autoscaler/node-template/label"
nodegroup_names = eks.list_nodegroups(clusterName=CLUSTER_NAME)["nodegroups"]
def get_asg_tag(asg_name: str, key: str, value: str) -> Dict[str, Any]:
return {
"ResourceId": asg_name,
"ResourceType": "auto-scaling-group",
"Key": key,
"Value": value,
"PropagateAtLaunch": True,
}
for nodegroup_name in nodegroup_names:
print(f"Node Group: {nodegroup_name}")
nodegroup = eks.describe_nodegroup(
clusterName=CLUSTER_NAME, nodegroupName=nodegroup_name
)["nodegroup"]
labels = nodegroup["labels"]
if labels:
print(
f"""Found autoscaling group for NodeGroup ({nodegroup_name}) with eks_node_labels ({labels}).
Checking if any cluster-autoscaler tags are missing..."""
)
asg_name = nodegroup["resources"]["autoScalingGroups"][0]["name"]
asg = as_client.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name])[
"AutoScalingGroups"
][0]
tags = asg["Tags"]
print("Tags:")
for tag in tags:
print(f"{tag['Key']}: {tag['Value']}")
tag_keys = [tag["Key"] for tag in tags]
target_tags = []
for label in labels.keys():
cluster_autoscaler_label_tag = f"{LABEL_TAG_PREFIX}/{label}"
if cluster_autoscaler_label_tag not in tag_keys:
target_tags.append(
get_asg_tag(asg_name, cluster_autoscaler_label_tag, labels[label])
)
if target_tags:
print(f"Tags to update: {target_tags}")
create_update_tags_response = as_client.create_or_update_tags(
Tags=target_tags
)
if (
create_update_tags_response
and create_update_tags_response["ResponseMetadata"]["HTTPStatusCode"]
== 200
):
print("Tags updated")
else:
print(f"Error: {create_update_tags_response}")
else:
print(
f"All cluster-autoscaler tags exist on Node Group ({nodegroup_name}) ASG({asg_name}). Doing nothing..."
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/ml-training/training-k8s-deployment/stack.py | modules/ml-training/training-k8s-deployment/stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import logging
import os
from typing import Any, cast
import cdk_nag
from aws_cdk import Aspects, Duration, Stack, Tags, aws_eks, aws_iam
from aws_cdk import aws_logs as logs
from aws_cdk import aws_stepfunctions as sfn
from aws_cdk.lambda_layer_kubectl_v29 import KubectlV29Layer
from cdk_nag import NagPackSuppression, NagSuppressions
from constructs import Construct, IConstruct
_logger: logging.Logger = logging.getLogger(__name__)
class TrainingPipeline(Stack):
def __init__(
self,
scope: Construct,
id: str,
project_name: str,
deployment_name: str,
module_name: str,
eks_cluster_name: str,
eks_admin_role_arn: str,
eks_handler_rolearn: str,
eks_openid_connect_provider_arn: str,
eks_cluster_endpoint: str,
eks_cert_auth_data: str,
training_namespace_name: str,
training_image_uri: str,
**kwargs: Any,
) -> None:
super().__init__(
scope,
id,
**kwargs,
)
dep_mod = f"{project_name}-{deployment_name}-{module_name}"
dep_mod = dep_mod[0:19]
# used to tag AWS resources. Tag Value length cant exceed 256 characters
full_dep_mod = dep_mod[:256] if len(dep_mod) > 256 else dep_mod
Tags.of(scope=cast(IConstruct, self)).add(key="Deployment", value=full_dep_mod)
policy_statements = [
aws_iam.PolicyStatement(
actions=["ecr:*"],
effect=aws_iam.Effect.ALLOW,
resources=[
f"arn:{self.partition}:ecr:{self.region}:{self.account}:repository/{project_name}-{deployment_name}-{module_name}*"
],
),
]
handler_role = aws_iam.Role.from_role_arn(
self, "HandlerRole", eks_handler_rolearn
)
provider = aws_eks.OpenIdConnectProvider.from_open_id_connect_provider_arn(
self, "Provider", eks_openid_connect_provider_arn
)
cluster = aws_eks.Cluster.from_cluster_attributes(
self,
f"eks-{deployment_name}-{module_name}",
cluster_name=eks_cluster_name,
open_id_connect_provider=provider,
kubectl_role_arn=eks_admin_role_arn,
kubectl_lambda_role=handler_role,
kubectl_layer=KubectlV29Layer(self, "Kubectlv29Layer"),
)
namespace = aws_eks.KubernetesManifest(
self,
"namespace",
cluster=cluster,
manifest=[
{
"apiVersion": "v1",
"kind": "Namespace",
"metadata": {"name": training_namespace_name},
}
],
overwrite=True, # Create if not exists
)
service_account = cluster.add_service_account(
"service-account", name=module_name, namespace=training_namespace_name
)
service_account.node.add_dependency(namespace)
service_account_role: aws_iam.Role = cast(aws_iam.Role, service_account.role)
if service_account_role.assume_role_policy:
service_account_role.assume_role_policy.add_statements(
aws_iam.PolicyStatement(
effect=aws_iam.Effect.ALLOW,
actions=["sts:AssumeRole"],
principals=[aws_iam.ServicePrincipal("states.amazonaws.com")],
)
)
for statement in policy_statements:
service_account_role.add_to_policy(statement=statement)
rbac_role = cluster.add_manifest(
"rbac-role",
{
"apiVersion": "rbac.authorization.k8s.io/v1",
"kind": "Role",
"metadata": {
"name": "module-owner",
"namespace": training_namespace_name,
},
"rules": [{"apiGroups": ["*"], "resources": ["*"], "verbs": ["*"]}],
},
)
rbac_role.node.add_dependency(namespace)
rbac_role_binding = cluster.add_manifest(
"rbac-role-binding",
{
"apiVersion": "rbac.authorization.k8s.io/v1",
"kind": "RoleBinding",
"metadata": {"name": module_name, "namespace": training_namespace_name},
"roleRef": {
"apiGroup": "rbac.authorization.k8s.io",
"kind": "Role",
"name": "module-owner",
},
"subjects": [
{"kind": "User", "name": f"{project_name}-{module_name}"},
{
"kind": "ServiceAccount",
"name": module_name,
"namespace": training_namespace_name,
},
],
},
)
rbac_role_binding.node.add_dependency(service_account)
rbac_role = aws_eks.KubernetesManifest(
self,
"rbac-role-default",
cluster=cluster,
manifest=[
{
"apiVersion": "rbac.authorization.k8s.io/v1",
"kind": "Role",
"metadata": {"name": "default-access", "namespace": "default"},
"rules": [
{
"apiGroups": ["*"],
"resources": ["*"],
"verbs": ["get", "list", "watch"],
}
],
}
],
overwrite=True,
)
rbac_role.node.add_dependency(namespace)
rbac_role_binding = cluster.add_manifest(
"rbac-role-binding-default",
{
"apiVersion": "rbac.authorization.k8s.io/v1",
"kind": "RoleBinding",
"metadata": {"name": "default-access", "namespace": "default"},
"roleRef": {
"apiGroup": "rbac.authorization.k8s.io",
"kind": "Role",
"name": "default-access",
},
"subjects": [
{"kind": "User", "name": f"{project_name}-{module_name}"},
{
"kind": "ServiceAccount",
"name": module_name,
"namespace": training_namespace_name,
},
],
},
)
rbac_role_binding.node.add_dependency(service_account)
rbac_cluster_role_binding = cluster.add_manifest(
"rbac-cluster-role-binding",
{
"apiVersion": "rbac.authorization.k8s.io/v1",
"kind": "ClusterRoleBinding",
"metadata": {"name": f"system-access-{module_name}"},
"roleRef": {
"apiGroup": "rbac.authorization.k8s.io",
"kind": "ClusterRole",
"name": "system-access",
},
"subjects": [
{"kind": "User", "name": f"{project_name}-{module_name}"},
{
"kind": "ServiceAccount",
"name": module_name,
"namespace": training_namespace_name,
},
],
},
)
rbac_cluster_role_binding.node.add_dependency(service_account)
self.eks_service_account_role = service_account.role
_final_status = sfn.Pass(self, "final step") # noqa: F841
# States language JSON to put an item into DynamoDB
# snippet generated from
# https://docs.aws.amazon.com/step-functions/latest/dg/tutorial-code-snippet.html#tutorial-code-snippet-1
body = {
"apiVerson": "batch/v1",
"kind": "Job",
"metadata": {
"namespace": training_namespace_name,
"name.$": "States.Format('pytorch-training-{}', $$.Execution.Name)",
},
"spec": {
"backoffLimit": 1,
"template": {
"spec": {
"restartPolicy": "OnFailure",
"serviceAccountName": module_name,
"containers": [
{
"name": "pytorch",
"image": training_image_uri,
"imagePullPolicy": "Always",
"volumeMounts": [
{
"name": "persistent-storage",
"mountPath": "/data",
}
],
"command": [
"python3",
"/aws/pytorch-mnist/mnist.py",
"--epochs=1",
"--save-model",
],
"env": [
{
"name": "TRAINING_JOB_ID",
"value.$": "States.Format('pytorch-training-{}', $$.Execution.Name)",
}
],
}
],
"nodeSelector": {"usage": "gpu"},
"volumes": [
{
"name": "persistent-storage",
"persistentVolumeClaim": {
"claimName": os.getenv(
"SEEDFARMER_PARAMETER_PVC_NAME"
)
},
}
],
},
},
},
}
state_json = {
"Type": "Task",
"Resource": f"arn:{self.partition}:states:::eks:runJob.sync",
"Parameters": {
"ClusterName": eks_cluster_name,
"Namespace": training_namespace_name,
"CertificateAuthority": eks_cert_auth_data,
"Endpoint": eks_cluster_endpoint,
"LogOptions": {"RetrieveLogs": True},
"Job": body,
},
}
# custom state which represents a task to insert data into DynamoDB
custom = sfn.CustomState(self, "eks-training", state_json=state_json)
log_group = logs.LogGroup(
self, "TrainingOnEKSLogGroup", retention=logs.RetentionDays.TEN_YEARS
)
sm = sfn.StateMachine( # noqa: F841
self,
"TrainingOnEKS",
definition_body=sfn.DefinitionBody.from_chainable(sfn.Chain.start(custom)),
timeout=Duration.minutes(15),
logs=sfn.LogOptions(destination=log_group, level=sfn.LogLevel.ALL),
role=service_account.role,
)
Aspects.of(self).add(cdk_nag.AwsSolutionsChecks())
NagSuppressions.add_stack_suppressions(
self,
apply_to_nested_stacks=True,
suppressions=[
NagPackSuppression(
**{
"id": "AwsSolutions-IAM4",
"reason": "Managed Policies are for service account roles only",
}
),
NagPackSuppression(
**{
"id": "AwsSolutions-IAM5",
"reason": "Resource access restriced to project resources",
}
),
NagPackSuppression(
**{
"id": "AwsSolutions-SF2",
"reason": "Xray disabled",
}
),
],
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/ml-training/training-k8s-deployment/app.py | modules/ml-training/training-k8s-deployment/app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
from typing import cast
from aws_cdk import App, CfnOutput, Environment
from stack import TrainingPipeline
# Project specific
project_name = os.getenv("SEEDFARMER_PROJECT_NAME", "")
deployment_name = os.environ["SEEDFARMER_DEPLOYMENT_NAME"]
module_name = os.environ["SEEDFARMER_MODULE_NAME"]
def _param(name: str) -> str:
return f"SEEDFARMER_PARAMETER_{name}"
eks_cluster_name = os.getenv(_param("EKS_CLUSTER_NAME"))
eks_admin_role_arn = os.getenv(_param("EKS_CLUSTER_ADMIN_ROLE_ARN"))
eks_handler_rolearn = os.getenv(_param("EKS_HANDLER_ROLEARN"))
eks_oidc_provider_arn = os.getenv(_param("EKS_OIDC_ARN"))
eks_cluster_endpoint = os.getenv(_param("EKS_CLUSTER_ENDPOINT"))
eks_cert_auth_data = os.getenv(_param("EKS_CERT_AUTH_DATA"))
training_namespace_name = os.getenv(_param("TRAINING_NAMESPACE_NAME"))
training_image_uri = os.getenv(_param("TRAINING_IMAGE_URI"))
app = App()
stack = TrainingPipeline(
scope=app,
id=f"{project_name}-{deployment_name}-{module_name}",
project_name=project_name,
deployment_name=deployment_name,
module_name=module_name,
eks_cluster_name=cast(str, eks_cluster_name),
eks_admin_role_arn=cast(str, eks_admin_role_arn),
eks_handler_rolearn=cast(str, eks_handler_rolearn),
eks_openid_connect_provider_arn=cast(str, eks_oidc_provider_arn),
eks_cluster_endpoint=cast(str, eks_cluster_endpoint),
eks_cert_auth_data=cast(str, eks_cert_auth_data),
training_namespace_name=cast(str, training_namespace_name),
training_image_uri=cast(str, training_image_uri),
env=Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
CfnOutput(
scope=stack,
id="metadata",
value=stack.to_json_string(
{
"EksServiceAccountRoleArn": stack.eks_service_account_role.role_arn,
"TrainingNamespaceName": training_namespace_name,
}
),
)
app.synth(force=True)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/ml-training/training-k8s-deployment/tests/test_app.py | modules/ml-training/training-k8s-deployment/tests/test_app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import pytest
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["SEEDFARMER_PROJECT_NAME"] = "test-project"
os.environ["SEEDFARMER_DEPLOYMENT_NAME"] = "test-deployment"
os.environ["SEEDFARMER_MODULE_NAME"] = "test-module"
os.environ["CDK_DEFAULT_ACCOUNT"] = "111111111111"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
os.environ["SEEDFARMER_PARAMETER_EKS_CLUSTER_NAME"] = "my-cluster"
os.environ["SEEDFARMER_PARAMETER_EKS_CLUSTER_ADMIN_ROLE_ARN"] = (
"arn:aws:iam::123456789012:role/eks-testing-XXXXXX"
)
os.environ["SEEDFARMER_PARAMETER_EKS_HANDLER_ROLEARN"] = (
"arn:aws:iam::123456789012:role/eks-testing-XXXXXX"
)
os.environ["SEEDFARMER_PARAMETER_EKS_OIDC_ARN"] = (
"arn:aws:iam::123456789012:oidc-provider/oidc.eks.us-east-1.amazonaws.com/id/XXXXXXXX"
)
os.environ["SEEDFARMER_PARAMETER_TRAINING_NAMESPACE_NAME"] = "namespace"
os.environ["SEEDFARMER_PARAMETER_EKS_CLUSTER_ENDPOINT"] = (
"oidc.eks.us-west-2.amazonaws.com/id/XXXXXXXXXX"
)
os.environ["SEEDFARMER_PARAMETER_EKS_CERT_AUTH_DATA"] = "BQTRJQkR3QXdnZ0VLCkFvSUJ"
os.environ["SEEDFARMER_PARAMETER_TRAINING_IMAGE_URI"] = "mnist:latest"
# Unload the app import so that subsequent tests don't reuse
if "app" in sys.modules:
del sys.modules["app"]
def test_app(stack_defaults):
import app # noqa: F401
def test_cluster_name(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_EKS_CLUSTER_NAME"]
with pytest.raises(Exception):
import app # noqa: F401
assert os.environ["SEEDFARMER_PARAMETER_EKS_CLUSTER_NAME"] == "my-cluster"
def test_training_namespace_name(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_TRAINING_NAMESPACE_NAME"]
with pytest.raises(Exception):
import app # noqa: F401
assert os.environ["SEEDFARMER_PARAMETER_TRAINING_NAMESPACE_NAME"] == "namespace"
def test_cert_auth_data(stack_defaults):
del os.environ["SEEDFARMER_PARAMETER_EKS_CERT_AUTH_DATA"]
with pytest.raises(Exception):
import app # noqa: F401
assert (
os.environ["SEEDFARMER_PARAMETER_EKS_CERT_AUTH_DATA"]
== "BQTRJQkR3QXdnZ0VLCkFvSUJ"
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/ml-training/training-k8s-deployment/tests/test_stack.py | modules/ml-training/training-k8s-deployment/tests/test_stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import aws_cdk as cdk
import pytest
@pytest.fixture(scope="function")
def stack_defaults():
os.environ["CDK_DEFAULT_ACCOUNT"] = "111111111111"
os.environ["CDK_DEFAULT_REGION"] = "us-east-1"
# Unload the app import so that subsequent tests don't reuse
if "stack" in sys.modules:
del sys.modules["stack"]
def test_synthesize_stack(stack_defaults):
import stack
app = cdk.App()
project_name = "test-project"
dep_name = "test-deployment"
mod_name = "test-module"
_step_function = stack.TrainingPipeline(
scope=app,
id=f"{project_name}-{dep_name}-{mod_name}",
project_name=project_name,
deployment_name=dep_name,
module_name=mod_name,
eks_cluster_name=mod_name,
eks_admin_role_arn="arn:aws:iam::123456789012:role/addf-eks-testing-XXXXXX",
eks_handler_rolearn="arn:aws:iam::123456789012:role/addf-eks-testing-XXXXXX",
eks_openid_connect_provider_arn="arn:aws:iam::123456789012:oidc-provider/oidc.eks.us-east-1.amazonaws.com/id/XXXXXXXX",
eks_cluster_endpoint="oidc.eks.us-west-2.amazonaws.com/id/XXXXXXXXXX",
eks_cert_auth_data="BQTRJQkR3QXdnZ0VLCkFvSUJ",
training_namespace_name="namespace",
training_image_uri="mnist:latest",
env=cdk.Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"],
),
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/ml-training/training-k8s-deployment/tests/__init__.py | modules/ml-training/training-k8s-deployment/tests/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/demo-only/vscode-on-eks/stack.py | modules/demo-only/vscode-on-eks/stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import logging
from typing import Any, cast
import aws_cdk
import aws_cdk.aws_s3_assets as s3_assets
import cdk_nag
from aws_cdk import Aspects, Stack, Tags
from aws_cdk import aws_eks as eks
from aws_cdk import aws_iam as iam
from cdk_nag import NagSuppressions
from constructs import Construct, IConstruct
_logger: logging.Logger = logging.getLogger(__name__)
class VSCodeOnEKS(Stack):
def __init__(
self,
scope: Construct,
id: str,
*,
deployment: str,
module: str,
eks_cluster_name: str,
eks_admin_role_arn: str,
eks_oidc_arn: str,
vscode_password: str,
**kwargs: Any,
) -> None:
super().__init__(
scope,
id,
description="This stack deploys VSCode environment for ADDF",
**kwargs,
)
Tags.of(scope=cast(IConstruct, self)).add(key="Deployment", value=f"addf-{deployment}")
dep_mod = f"addf-{deployment}-{module}"
# CDK Env Vars
account: str = aws_cdk.Aws.ACCOUNT_ID
region: str = aws_cdk.Aws.REGION
partition: str = aws_cdk.Aws.PARTITION
NAMESPACE = "code-server"
# Import EKS Cluster
provider = eks.OpenIdConnectProvider.from_open_id_connect_provider_arn(
self, f"{dep_mod}-provider", eks_oidc_arn
)
eks_cluster = eks.Cluster.from_cluster_attributes(
self,
f"{dep_mod}-eks-cluster",
cluster_name=eks_cluster_name,
kubectl_role_arn=eks_admin_role_arn,
open_id_connect_provider=provider,
)
vscode_on_eks_policy_statements = [
iam.PolicyStatement(
actions=[
"es:ESGet*",
],
effect=iam.Effect.ALLOW,
resources=[f"arn:{partition}:es:{region}:{account}:domain/*"],
),
iam.PolicyStatement(
actions=[
"s3:List*",
"states:List*",
"cloudformation:List*",
"lambda:List*",
"logs:List*",
"ssm:List*",
],
effect=iam.Effect.ALLOW,
resources=["*"],
),
iam.PolicyStatement(
actions=[
"cloudformation:ListStackSetOperations",
"cloudformation:ListStackInstances",
"cloudformation:ListStackSets",
"cloudformation:DescribeStacks",
"cloudformation:ListStackSetOperationResults",
"cloudformation:ListChangeSets",
"cloudformation:ListStackResources",
"states:DescribeStateMachineForExecution",
"states:DescribeActivity",
"states:StopExecution",
"states:DescribeStateMachine",
"states:ListExecutions",
"states:DescribeExecution",
"states:GetExecutionHistory",
"states:StartExecution",
"states:ListTagsForResource",
"s3:GetObjectTagging",
"s3:ListBucket",
"s3:PutObject",
"s3:GetObject",
"s3:GetObjectVersion",
],
effect=iam.Effect.ALLOW,
resources=[
f"arn:{partition}:states:{region}:{account}:execution:*:*",
f"arn:{partition}:states:{region}:{account}:stateMachine:*",
f"arn:{partition}:states:{region}:{account}:activity:*",
f"arn:{partition}:cloudformation:{region}:{account}:stackset/addf*:*",
f"arn:{partition}:cloudformation:{region}:{account}:stack/addf*/*",
f"arn:{partition}:s3:::addf*",
],
),
iam.PolicyStatement(
actions=[
"logs:CreateLogStream",
"logs:CreateLogGroup",
"logs:PutLogEvents",
"logs:GetLogEvents",
"logs:GetLogRecord",
"logs:GetLogGroupFields",
"logs:GetQueryResults",
"logs:DescribeLogGroups",
],
effect=iam.Effect.ALLOW,
resources=[f"arn:{partition}:logs:{region}:{account}:log-group:*"],
),
iam.PolicyStatement(
actions=[
"lambda:List*",
"lambda:Get*",
"cloudformation:List*",
"cloudformation:Describe*",
"states:Describe*",
"states:List*",
],
effect=iam.Effect.ALLOW,
resources=[
f"arn:{partition}:cloudformation:{region}:{account}:stackset/addf*:*",
f"arn:{partition}:cloudformation:{region}:{account}:stack/addf-/*",
f"arn:{partition}:lambda:{region}:{account}:function:addf-",
f"arn:{partition}:states:{region}:{account}:execution:*:*",
f"arn:{partition}:states:{region}:{account}:stateMachine:*",
f"arn:{partition}:states:{region}:{account}:activity:*",
],
),
iam.PolicyStatement(
actions=["sts:AssumeRole"],
effect=iam.Effect.ALLOW,
resources=[f"arn:{partition}:iam::{account}:role/addf-*"],
),
]
vscode_policy = iam.Policy(
self,
"vscodepolicy",
statements=vscode_on_eks_policy_statements,
)
# Create jupyter-hub namespace
vscode_namespace = eks_cluster.add_manifest(
"code-server-namespace",
{
"apiVersion": "v1",
"kind": "Namespace",
"metadata": {"name": NAMESPACE},
},
)
vscode_service_account = eks_cluster.add_service_account("vscode", name="vscode", namespace=NAMESPACE)
vscode_service_account.role.attach_inline_policy(vscode_policy)
chart_asset = s3_assets.Asset(
self,
"VSCodeChartAsset",
path="./helm-chart",
)
vscode_chart = eks_cluster.add_helm_chart(
"vscode-chart",
chart_asset=chart_asset,
create_namespace=True,
namespace=NAMESPACE,
values={
"namespace": NAMESPACE,
"proxy": {"service": {"type": "NodePort"}},
"ingress": {
"annotations": {
"kubernetes.io/ingress.class": "alb",
"alb.ingress.kubernetes.io/scheme": "internet-facing",
"alb.ingress.kubernetes.io/success-codes": "200,302",
},
},
"vscode": {
"password": vscode_password,
},
"serviceAccountName": "vscode",
},
)
vscode_service_account.node.add_dependency(vscode_namespace)
vscode_chart.node.add_dependency(vscode_service_account)
Aspects.of(self).add(cdk_nag.AwsSolutionsChecks())
NagSuppressions.add_stack_suppressions(
self,
apply_to_nested_stacks=True,
suppressions=[
{ # type: ignore
"id": "AwsSolutions-IAM4",
"reason": "Managed Policies are for service account roles only",
},
{ # type: ignore
"id": "AwsSolutions-IAM5",
"reason": "Resource access restriced to ADDF resources",
},
],
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/demo-only/vscode-on-eks/app.py | modules/demo-only/vscode-on-eks/app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import os
import aws_cdk
import boto3
from aws_cdk import App
from stack import VSCodeOnEKS
account = os.environ["CDK_DEFAULT_ACCOUNT"]
region = os.environ["CDK_DEFAULT_REGION"]
partition = os.getenv("AWS_PARTITION", "aws")
deployment_name = os.getenv("ADDF_DEPLOYMENT_NAME", "")
module_name = os.getenv("ADDF_MODULE_NAME", "")
def _param(name: str) -> str:
return f"ADDF_PARAMETER_{name}"
eks_cluster_name = os.getenv(_param("EKS_CLUSTER_NAME"), "")
eks_admin_role_arn = os.getenv(_param("EKS_CLUSTER_ADMIN_ROLE_ARN"), "")
eks_oidc_arn = os.getenv(_param("EKS_OIDC_ARN"), "")
secrets_manager_name = os.getenv(_param("SECRETS_MANAGER_NAME"), "")
client = boto3.client("secretsmanager")
secret_arn = f"arn:{partition}:secretsmanager:{region}:{account}:secret:{secrets_manager_name}"
secrets_json = json.loads(client.get_secret_value(SecretId=secret_arn).get("SecretString"))
vscode_password = secrets_json["password"]
app = App()
stack = VSCodeOnEKS(
scope=app,
id=f"addf-{deployment_name}-{module_name}",
env=aws_cdk.Environment(
account=account,
region=region,
),
deployment=deployment_name,
module=module_name,
eks_admin_role_arn=eks_admin_role_arn,
eks_oidc_arn=eks_oidc_arn,
eks_cluster_name=eks_cluster_name,
vscode_password=vscode_password,
)
app.synth(force=True)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/demo-only/vscode-on-eks/tests/test_stack.py | modules/demo-only/vscode-on-eks/tests/test_stack.py | def test_placeholder() -> None:
return None
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/demo-only/vscode-on-eks/tests/__init__.py | modules/demo-only/vscode-on-eks/tests/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/demo-only/rosbag-webviz/get_url.py | modules/demo-only/rosbag-webviz/get_url.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#!/usr/bin/env python3
import json
import sys
from argparse import ArgumentParser
import boto3
def main():
parser = ArgumentParser(
description="Request a Presigned URL from the generateUrlLambda"
)
parser.add_argument(
"--config-file",
dest="config_file",
required=False,
help='Name of the JSON file with Module\'s Metadata, use "-" to read from STDIN',
)
parser.add_argument(
"--bucket-name",
dest="bucket_name",
required=False,
help="the name of the bucket containing rosbag file, required if no --config-file is provided",
)
parser.add_argument(
"--function-name",
dest="function_name",
required=False,
help="The generateUrlFunctionName, required if no --config-file is provided",
)
parser.add_argument(
"--key", dest="object_key", required=False, help="the key of the object in s3"
)
parser.add_argument(
"--record",
dest="record_id",
required=False,
help="the partition key of the scene in the scenario db",
)
parser.add_argument(
"--scene",
dest="scene_id",
required=False,
help="the sort key of the scene in the scenario db",
)
args = parser.parse_args()
if args.config_file is not None:
if args.config_file == "-":
metadata = json.load(sys.stdin)
else:
with open(args.config_file) as metadata_file:
metadata = json.load(metadata_file)
else:
metadata = {}
bucket_name = (
args.bucket_name
if args.bucket_name is not None
else metadata.get("TargetBucketName", None)
)
if bucket_name is None:
raise Exception(
'One of JSON config file key "TargetBucketName" or --bucket-name must be provided'
)
function_name = (
args.function_name
if args.function_name is not None
else metadata.get("GenerateUrlLambdaName", None)
)
if function_name is None:
raise Exception(
'One of JSON config file key "GenerateUrlLambdaName" or --function-name must be provided'
)
if args.object_key is None and (args.record_id is None or args.scene_id is None):
raise Exception("You need to either specify --key or --record and --scene")
client = boto3.client("lambda")
print(f"Invoking: {function_name}")
payload = {
"bucket": bucket_name,
"key": args.object_key,
"record_id": args.record_id,
"scene_id": args.scene_id,
}
print("payload: " + json.dumps(payload))
response = client.invoke(
FunctionName=str(function_name),
InvocationType="RequestResponse",
LogType="Tail",
Payload=json.dumps(payload),
)
res = json.loads(response["Payload"].read())
statusCode = int(res.get("statusCode"))
body = json.loads(res.get("body"))
print(str(statusCode))
if statusCode == 200:
url = body.get("url")
print(url)
else:
print(json.dumps(body))
if __name__ == "__main__":
main()
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/demo-only/rosbag-webviz/lambda/generate_url/main.py | modules/demo-only/rosbag-webviz/lambda/generate_url/main.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import json
import os
from urllib.parse import quote_plus
import boto3
from botocore.config import Config
from botocore.errorfactory import ClientError
webviz_elb_url = os.environ["WEBVIZ_ELB_URL"]
dynamo_region = os.environ["SCENE_DB_REGION"]
dynamo_table_name = os.environ["SCENE_DB_TABLE"]
partition_key = os.environ["SCENE_DB_PARTITION_KEY"]
sort_key = os.environ["SCENE_DB_SORT_KEY"]
lambda_region = os.environ["AWS_REGION"]
bucket_key_ddb_name = "bag_file_bucket"
object_key_ddb_name = "bag_file_prefix"
start_time_ddb_name = "start_time"
dynamo_resource = boto3.resource("dynamodb", region_name=dynamo_region)
dynamo_table = dynamo_resource.Table(dynamo_table_name)
def query_scenes(record_id, scene_id):
response = dynamo_table.get_item(Key={partition_key: record_id, sort_key: scene_id})
return response["Item"]
def exists(s3, bucket, key):
try:
s3.head_object(Bucket=bucket, Key=key)
return True
except ClientError:
return False
def get_url(bucket, key, region, seek_to):
region_config = Config(region_name=region)
s3 = boto3.client("s3", config=region_config)
if exists(s3, bucket, key):
response = s3.generate_presigned_url(
ClientMethod="get_object",
Params={"Bucket": bucket, "Key": key},
ExpiresIn=129600,
)
double_encoded = quote_plus(response)
url = f"{webviz_elb_url}?remote-bag-url={double_encoded}"
if seek_to is not None:
url = f"{url}&seek-to={seek_to}"
return url
else:
raise Exception(f"Could not find bag file s3://{bucket}/{key}")
def json_response(statusCode, content):
return {"statusCode": statusCode, "body": json.dumps(content)}
def lambda_handler(event, context):
try:
record_id = event.get("record_id")
scene_id = event.get("scene_id")
seek_to = event.get("seek_to")
bucket = event.get("bucket")
key = event.get("key")
region = event.get("region", lambda_region)
if record_id is not None and scene_id is not None:
scene = query_scenes(record_id, scene_id)
try:
bucket = scene[bucket_key_ddb_name]
key = scene[object_key_ddb_name]
seek_to = scene[start_time_ddb_name]
except KeyError as ke:
raise Exception(f"Could not find record field for {record_id}:{scene_id} in dynamo: {ke}")
if bucket is None:
return json_response(400, {"error": "No bucket specified"})
if key is None:
return json_response(400, {"error": "No key specified"})
return json_response(200, {"url": get_url(bucket, key, region, seek_to)})
except Exception as e:
return json_response(500, {"error": str(e)})
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/demo-only/rosbag-webviz/lambda/put_cors/main.py | modules/demo-only/rosbag-webviz/lambda/put_cors/main.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import boto3
cors_rule = {
"AllowedHeaders": ["*"],
"AllowedMethods": ["GET", "HEAD"],
"ExposeHeaders": ["ETag", "Content-Type", "Accept-Ranges", "Content-Length"],
"MaxAgeSeconds": 3000,
}
s3 = boto3.client("s3")
def lambda_handler(event, context):
print(event)
bucket_name = event["ResourceProperties"]["bucket_name"]
raw_bucket_name = event["ResourceProperties"]["raw_bucket_name"]
allowed_origin = event["ResourceProperties"]["allowed_origin"]
cors_rule["AllowedOrigins"] = [allowed_origin]
if "https" not in allowed_origin:
https_origin = allowed_origin.replace("http", "https")
cors_rule["AllowedOrigins"].append(https_origin)
cors_configuration = {"CORSRules": [cors_rule]}
s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_configuration)
s3.put_bucket_cors(Bucket=raw_bucket_name, CORSConfiguration=cors_configuration)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/demo-only/opensearch-proxy/stack.py | modules/demo-only/opensearch-proxy/stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import logging
from typing import Any, cast
import aws_cdk
import aws_cdk.aws_ec2 as ec2
import aws_cdk.aws_iam as iam
import cdk_nag
from aws_cdk import Aspects, Stack, Tags
from aws_cdk.aws_s3_assets import Asset
from cdk_nag import NagSuppressions
from constructs import Construct, IConstruct
_logger: logging.Logger = logging.getLogger(__name__)
class ProxyStack(Stack):
def __init__(
self,
scope: Construct,
id: str,
*,
deployment: str,
module: str,
vpc_id: str,
opensearch_sg_id: str,
opensearch_domain_endpoint: str,
install_script: str,
username: str,
password: str,
**kwargs: Any,
) -> None:
super().__init__(
scope,
id,
description="This stack deploys Proxy environment to access Opensearch dashboard for ADDF",
**kwargs,
)
Tags.of(scope=cast(IConstruct, self)).add(key="Deployment", value=f"addf-{deployment}")
dep_mod = f"addf-{deployment}-{module}"
# CDK Env Vars
account: str = aws_cdk.Aws.ACCOUNT_ID
region: str = aws_cdk.Aws.REGION
partition: str = aws_cdk.Aws.PARTITION
self.vpc_id = vpc_id
self.vpc = ec2.Vpc.from_lookup(
self,
"VPC",
vpc_id=vpc_id,
)
os_security_group = ec2.SecurityGroup.from_security_group_id(self, f"{dep_mod}-os-sg", opensearch_sg_id)
os_security_group.connections.allow_from(
ec2.Peer.any_ipv4(),
ec2.Port.tcp(443),
"allow HTTPS traffic from anywhere",
)
# AMI
amzn_linux = ec2.MachineImage.latest_amazon_linux(
generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
edition=ec2.AmazonLinuxEdition.STANDARD,
virtualization=ec2.AmazonLinuxVirt.HVM,
storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE,
)
os_proxy_document = iam.PolicyDocument(
statements=[
iam.PolicyStatement(
actions=[
"logs:CreateLogStream",
"logs:CreateLogGroup",
"logs:PutLogEvents",
"logs:GetLogEvents",
"logs:GetLogRecord",
"logs:GetLogGroupFields",
"logs:GetQueryResults",
"logs:DescribeLogGroups",
],
effect=iam.Effect.ALLOW,
resources=[f"arn:{partition}:logs:{region}:{account}:log-group:*"],
),
iam.PolicyStatement(
actions=["sts:AssumeRole"],
effect=iam.Effect.ALLOW,
resources=[f"arn:{partition}:iam::{account}:role/addf-*"],
),
]
)
os_proxy_role = iam.Role(
self,
"os_proxy_role",
assumed_by=iam.CompositePrincipal(
iam.ServicePrincipal("ec2.amazonaws.com"),
),
inline_policies={"CDKosproxyPolicyDocument": os_proxy_document},
)
os_proxy_role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("AmazonSSMManagedInstanceCore"))
instance = ec2.Instance(
self,
"OSProxy",
instance_type=ec2.InstanceType("t2.micro"),
machine_image=amzn_linux,
vpc=self.vpc,
security_group=os_security_group,
vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
role=os_proxy_role,
)
asset = Asset(self, "Asset", path=install_script)
local_path = instance.user_data.add_s3_download_command(bucket=asset.bucket, bucket_key=asset.s3_object_key)
args = opensearch_domain_endpoint + " " + username + " " + password
instance.user_data.add_execute_file_command(file_path=local_path, arguments=args)
asset.grant_read(instance.role)
self.instance_public_ip = instance.instance_public_ip
self.instance_dns = instance.instance_public_dns_name
url = f"https://{self.instance_dns}/_dashboards/"
self.dashboard_url = url
Aspects.of(self).add(cdk_nag.AwsSolutionsChecks())
NagSuppressions.add_stack_suppressions(
self,
apply_to_nested_stacks=True,
suppressions=[
{ # type: ignore
"id": "AwsSolutions-IAM4",
"reason": "Managed Policies are for service account roles only",
},
{ # type: ignore
"id": "AwsSolutions-IAM5",
"reason": "Resource access restriced to ADDF resources",
},
{ # type: ignore
"id": "AwsSolutions-EC23",
"reason": "Access is using basic-auth challenge",
},
{ # type: ignore
"id": "AwsSolutions-EC28",
"reason": "Detailed Monitoring not enabled as this is a simple proxy",
},
{ # type: ignore
"id": "AwsSolutions-EC29",
"reason": "ASG not enabled as this is a simple proxy",
},
],
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/demo-only/opensearch-proxy/app.py | modules/demo-only/opensearch-proxy/app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import os
import aws_cdk
import boto3
from aws_cdk import App, CfnOutput
from stack import ProxyStack
account = os.environ["CDK_DEFAULT_ACCOUNT"]
region = os.environ["CDK_DEFAULT_REGION"]
partition = os.getenv("AWS_PARTITION", "aws")
deployment_name = os.getenv("ADDF_DEPLOYMENT_NAME", "")
module_name = os.getenv("ADDF_MODULE_NAME", "")
def _param(name: str) -> str:
return f"ADDF_PARAMETER_{name}"
vpc_id = os.getenv(_param("VPC_ID"), "")
opensearch_sg_id = os.getenv(_param("OPENSEARCH_SG_ID"), "")
opensearch_domain_endpoint = os.getenv(
_param("OPENSEARCH_DOMAIN_ENDPOINT"),
"",
)
secrets_manager_name = os.getenv(_param("SECRETS_MANAGER_NAME"), "")
client = boto3.client("secretsmanager")
secret_arn = f"arn:{partition}:secretsmanager:{region}:{account}:secret:{secrets_manager_name}"
secrets_json = json.loads(client.get_secret_value(SecretId=secret_arn).get("SecretString"))
osp_username = secrets_json["username"]
osp_password = secrets_json["password"]
project_dir = os.path.dirname(os.path.abspath(__file__))
install_script = os.path.join(project_dir, "install_nginx.sh")
app = App()
stack = ProxyStack(
scope=app,
id=f"addf-{deployment_name}-{module_name}",
env=aws_cdk.Environment(
account=account,
region=region,
),
deployment=deployment_name,
module=module_name,
vpc_id=vpc_id,
opensearch_sg_id=opensearch_sg_id,
opensearch_domain_endpoint=opensearch_domain_endpoint,
install_script=install_script,
username=osp_username,
password=osp_password,
)
CfnOutput(
scope=stack,
id="metadata",
value=stack.to_json_string(
{
"OpenSearchProxyDNS": stack.instance_dns,
"OpenSearchProxyIP": stack.instance_public_ip,
"OpenSearchProxyUrl": stack.dashboard_url,
}
),
)
app.synth(force=True)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/demo-only/opensearch-proxy/tests/test_stack.py | modules/demo-only/opensearch-proxy/tests/test_stack.py | def test_placeholder() -> None:
return None
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/demo-only/opensearch-proxy/tests/__init__.py | modules/demo-only/opensearch-proxy/tests/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/demo-only/jupyter-hub/stack.py | modules/demo-only/jupyter-hub/stack.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import logging
import os
from typing import Any, cast
import cdk_nag
from aws_cdk import Aspects, Stack, Tags
from aws_cdk import aws_eks as eks
from aws_cdk import aws_iam as iam
from cdk_nag import NagSuppressions
from constructs import Construct, IConstruct
_logger: logging.Logger = logging.getLogger(__name__)
project_dir = os.path.dirname(os.path.abspath(__file__))
class JupyterHubStack(Stack):
def __init__(
self,
scope: Construct,
id: str,
*,
project: str,
deployment: str,
module: str,
eks_cluster_name: str,
eks_admin_role_arn: str,
eks_oidc_arn: str,
jh_username: str,
jh_password: str,
jh_image_name: str,
jh_image_tag: str,
**kwargs: Any,
) -> None:
super().__init__(
scope,
id,
description="This stack deploys a Self managed JupyterHub environment",
**kwargs,
)
Tags.of(scope=cast(IConstruct, self)).add(key="Deployment", value=f"{project}-{deployment}")
dep_mod = f"{project}-{deployment}-{module}"
# Import EKS Cluster
provider = eks.OpenIdConnectProvider.from_open_id_connect_provider_arn(
self, f"{dep_mod}-provider", eks_oidc_arn
)
eks_cluster = eks.Cluster.from_cluster_attributes(
self,
f"{dep_mod}-eks-cluster",
cluster_name=eks_cluster_name,
kubectl_role_arn=eks_admin_role_arn,
open_id_connect_provider=provider,
)
# Create jupyter-hub namespace
jupyter_hub_namespace = eks_cluster.add_manifest(
"jupyter-hub-namespace",
{
"apiVersion": "v1",
"kind": "Namespace",
"metadata": {"name": "jupyter-hub"},
},
)
jupyterhub_service_account = eks_cluster.add_service_account(
"jupyterhub", name="jupyterhub", namespace="jupyter-hub"
)
jupyterhub_policy_statement_json_path = os.path.join(project_dir, "addons-iam-policies", "jupyterhub-iam.json")
with open(jupyterhub_policy_statement_json_path) as json_file:
jupyterhub_policy_statement_json = json.load(json_file)
# Attach the necessary permissions
jupyterhub_policy = iam.Policy(
self,
"jupyterhubpolicy",
document=iam.PolicyDocument.from_json(jupyterhub_policy_statement_json),
)
jupyterhub_service_account.role.attach_inline_policy(jupyterhub_policy)
k8s_values = {
"proxy": {"service": {"type": "NodePort"}},
"scheduling": {
"podPriority": {"enabled": True},
"userPlaceholder": {"replicas": 3},
},
"singleuser": {
"startTimeout": 300,
"storage": {"capacity": "4Gi", "dynamic": {"storageClass": "gp2"}},
"serviceAccountName": "jupyterhub",
},
"ingress": {
"annotations": {
"ingress.kubernetes.io/proxy-body-size": "64m",
"kubernetes.io/ingress.class": "alb",
"alb.ingress.kubernetes.io/scheme": "internet-facing",
"alb.ingress.kubernetes.io/success-codes": "200,302",
},
"enabled": True,
"pathType": "Prefix",
},
"hub": {
"baseUrl": "/jupyter",
"config": {
"DummyAuthenticator": {"password": jh_password},
"JupyterHub": {"authenticator_class": "dummy"},
"Authenticator": {"allowed_users": [jh_username]},
},
},
}
if jh_image_name and jh_image_tag:
k8s_values["singleuser"]["image"] = { # type: ignore
"name": jh_image_name,
"tag": jh_image_tag,
"pullPolicy": "Always",
}
jupyterhub_chart = eks_cluster.add_helm_chart(
"jupyterhub-chart",
chart="jupyterhub",
release="jupyterhub",
repository="https://jupyterhub.github.io/helm-chart",
create_namespace=True,
namespace="jupyter-hub",
values=k8s_values,
)
jupyterhub_service_account.node.add_dependency(jupyter_hub_namespace)
jupyterhub_chart.node.add_dependency(jupyterhub_service_account)
Aspects.of(self).add(cdk_nag.AwsSolutionsChecks())
NagSuppressions.add_stack_suppressions(
self,
apply_to_nested_stacks=True,
suppressions=[
{ # type: ignore
"id": "AwsSolutions-IAM4",
"reason": "Managed Policies are for service account roles only",
},
{ # type: ignore
"id": "AwsSolutions-IAM5",
"reason": "Resource access restriced to explicit resources",
},
],
)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/demo-only/jupyter-hub/app.py | modules/demo-only/jupyter-hub/app.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import os
import aws_cdk
import boto3
from aws_cdk import App
from stack import JupyterHubStack
account = os.environ["CDK_DEFAULT_ACCOUNT"]
region = os.environ["CDK_DEFAULT_REGION"]
partition = os.getenv("AWS_PARTITION", "aws")
project_name = os.getenv("SEEDFARMER_PROJECT_NAME", "")
deployment_name = os.getenv("SEEDFARMER_DEPLOYMENT_NAME", "")
module_name = os.getenv("SEEDFARMER_MODULE_NAME", "")
def _param(name: str) -> str:
return f"SEEDFARMER_PARAMETER_{name}"
eks_cluster_name = os.getenv(_param("EKS_CLUSTER_NAME"), "")
eks_admin_role_arn = os.getenv(_param("EKS_CLUSTER_ADMIN_ROLE_ARN"), "")
eks_oidc_arn = os.getenv(_param("EKS_OIDC_ARN"), "")
secrets_manager_name = os.getenv(_param("SECRETS_MANAGER_NAME"), "")
client = boto3.client("secretsmanager")
secret_arn = f"arn:{partition}:secretsmanager:{region}:{account}:secret:{secrets_manager_name}"
secrets_json = json.loads(client.get_secret_value(SecretId=secret_arn).get("SecretString"))
jh_username = secrets_json["username"]
jh_password = secrets_json["password"]
jh_image_name = os.getenv(_param("JH_IMAGE_NAME"), None)
jh_image_tag = os.getenv(_param("JH_IMAGE_TAG"), None)
app = App()
stack = JupyterHubStack(
scope=app,
id=f"{project_name}-{deployment_name}-{module_name}",
env=aws_cdk.Environment(
account=account,
region=region,
),
project=project_name,
deployment=deployment_name,
module=module_name,
eks_cluster_name=eks_cluster_name,
eks_admin_role_arn=eks_admin_role_arn,
eks_oidc_arn=eks_oidc_arn,
jh_username=jh_username,
jh_password=jh_password,
jh_image_name=jh_image_name, # type: ignore
jh_image_tag=jh_image_tag, # type: ignore
)
app.synth(force=True)
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/demo-only/jupyter-hub/tests/test_stack.py | modules/demo-only/jupyter-hub/tests/test_stack.py | def test_placeholder() -> None:
return None
| python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false |
awslabs/autonomous-driving-data-framework | https://github.com/awslabs/autonomous-driving-data-framework/blob/deadc6a9cc30df557f10c750d7d4a0a751d42f53/modules/demo-only/jupyter-hub/tests/__init__.py | modules/demo-only/jupyter-hub/tests/__init__.py | python | Apache-2.0 | deadc6a9cc30df557f10c750d7d4a0a751d42f53 | 2026-01-05T07:12:28.137953Z | false | |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/HowTo_SelectUnboundRooms.py | 0-python-code/HowTo_SelectUnboundRooms.py | """
Selects unbound rooms in model
TESTED REVIT API: 2015, 2016, 2017
Author: Jared Friedman | github.com/jbf1212
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
from Autodesk.Revit.DB import Transaction, Element
from Autodesk.Revit.DB import FilteredElementCollector
from Autodesk.Revit.UI import TaskDialog
from System.Collections.Generic import List
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
# GET ALL ROOMS IN MODEL
rooms = FilteredElementCollector(doc).OfCategory(BuiltInCategory.OST_Rooms)
ub_rooms = []
for r in rooms:
if r.Area > 0:
pass
else:
ub_rooms.append(r)
# SELECT UNBOUND ROOMS
collection = List[ElementId]([r.Id for r in ub_rooms])
selection = uidoc.Selection
selection.SetElementIds(collection)
TaskDialog.Show('Unbound Rooms', "{} unbound rooms selected". format(len(ub_rooms)))
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/HowTo_ReloadAllLinkDocuments.py | 0-python-code/HowTo_ReloadAllLinkDocuments.py | """
Reload All Link Documents
TESTED REVIT API: 2017
Author: min.naung@https://twentytwo.space/contact | https://github.com/mgjean
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
from Autodesk.Revit.DB import FilteredElementCollector,RevitLinkInstance
uidoc = __revit__.ActiveUIDocument
linkInstances = FilteredElementCollector(doc).OfClass(RevitLinkInstance).ToElements()
load = []
for link in linkInstances:
linkType = doc.GetElement(link.GetTypeId());
filepath = linkType.GetExternalFileReference().GetAbsolutePath();
try:
linkType.LoadFrom(filepath,None);
load.append(link.Name.split(" : ")[0]+" <Loaded>");
except:
load.append(link.Name.split(" : ")[0]+" <File Not Found>")
pass
for i in load:
print i
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/HowTo_GetPhaseByName.py | 0-python-code/HowTo_GetPhaseByName.py | """
Retrieves a phase by its Name
TESTED REVIT API: -
Author: Gui Talarico | github.com/gtalarico
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
from Autodesk.Revit.DB import Phase, FilteredElementCollector
def get_phase_by_name(phase_name):
phase_collector = FilteredElementCollector(doc).OfClass(Phase)
for phase in phase_collector:
if phase.Name.Equals(phase_name):
return phase
phase = get_phase_by_name('01 - Existing')
print phase.Name
print phase.Id
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/HowTo_HideUnhideLinksLevelsGrids.py | 0-python-code/HowTo_HideUnhideLinksLevelsGrids.py | """
HIDE / UNHIDE - LEVELS AND GRIDS FROM LINKS DOCUMENTS
TESTED REVIT API: 2017, 2018
Author: min.naung@https://twentytwo.space/contact | https://github.com/mgjean
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
import System
from System.Collections.Generic import List
from Autodesk.Revit.DB import Transaction
from Autodesk.Revit.DB import *
doc = __revit__.ActiveUIDocument.Document
active_view = doc.ActiveView
# filter name "can name anything"
ifilter = "GiveFilterAName"
endWiths = "Anything"
# filter check
found = False
unhide = False # Edit here to hide/unhide
msg = "Unhide" if unhide else "Hide"
trans = Transaction(doc,"%s links levels grids" %(msg))
trans.Start()
# collect all filter elements
allFilters = FilteredElementCollector(doc).OfClass(FilterElement).ToElements()
# get filters from current view
viewFilters = active_view.GetFilters()
# collect filters' names
viewFiltersName = [doc.GetElement(i).Name.ToString() for i in viewFilters]
# loop each filter
for fter in allFilters:
# filter already have in doc but not in current view
if ifilter == fter.Name.ToString() and ifilter not in viewFiltersName:
# add filter
active_view.AddFilter(fter.Id)
# set filter visibility
active_view.SetFilterVisibility(fter.Id, unhide)
found = True
# filter already have in doc and current view
if ifilter == fter.Name.ToString() and ifilter in viewFiltersName:
# set filter visibility
active_view.SetFilterVisibility(fter.Id, unhide)
found = True
# if filter not found in doc
if not found:
# all grids in doc
grids = FilteredElementCollector(doc).OfClass(Grid).ToElements()
# all levels in doc
levels = FilteredElementCollector(doc).OfClass(Level).ToElements()
# collect category id from grid and level
CateIds = List[ElementId]([grids[0].Category.Id,levels[0].Category.Id])
# type ids from grids
gridTypeIds = set([i.GetTypeId() for i in grids])
# type ids from levels
levelTypeIds = set([i.GetTypeId() for i in levels])
# get grid type element
type_elems = [doc.GetElement(i) for i in gridTypeIds]
# get level type element
type_elems.extend([doc.GetElement(l) for l in levelTypeIds])
# loop type elements
for elem in type_elems:
# if endwiths not include in type name
if not endWiths in elem.LookupParameter("Type Name").AsString():
# add endwiths in type name
elem.Name = elem.LookupParameter("Type Name").AsString() + endWiths
# get type names
type_names = [i.LookupParameter("Type Name").AsString() for i in type_elems]
# type name parameter id
paramId = type_elems[0].LookupParameter("Type Name").Id
# create a "not ends with" filter rule
notendswith = ParameterFilterRuleFactory.CreateNotEndsWithRule(paramId,endWiths,False)
# create parameter filter element
paramFilterElem = ParameterFilterElement.Create(doc, ifilter,CateIds,[notendswith])
# set filter overrides (same with add filter to current)
active_view.SetFilterOverrides(paramFilterElem.Id, OverrideGraphicSettings())
# set filter visibility
active_view.SetFilterVisibility(paramFilterElem.Id, unhide)
print "DONE!"
trans.Commit() | python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/Tools_OpenScheduleInExcel.py | 0-python-code/Tools_OpenScheduleInExcel.py | """
Creates a temporary .txt file of a selected schedule, and open it in Excel
TESTED REVIT API: 2015, 2016, 2017, 2017.1
Author: Gui Talarico | github.com/gtalarico
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
from Autodesk.Revit.DB import ViewSchedule, ViewScheduleExportOptions
from Autodesk.Revit.DB import ExportColumnHeaders, ExportTextQualifier
from Autodesk.Revit.DB import BuiltInCategory, ViewSchedule
from Autodesk.Revit.UI import TaskDialog
import os
import subprocess
doc = __revit__.ActiveUIDocument.Document
uidoc = __revit__.ActiveUIDocument
desktop = os.path.expandvars('%temp%\\')
vseop = ViewScheduleExportOptions()
selected_ids = uidoc.Selection.GetElementIds()
if not selected_ids.Count:
'''If nothing is selected, use Active View'''
selected_ids=[ doc.ActiveView.Id ]
for element_id in selected_ids:
element = doc.GetElement(element_id)
if not isinstance(element, ViewSchedule):
print('No schedule in Selection. Skipping...')
continue
filename = "".join(x for x in element.ViewName if x not in ['*']) + '.txt'
element.Export(desktop, filename, vseop)
print('EXPORTED: {0}\n TO: {1}\n'.format(element.ViewName, filename))
EXCEL = r"C:\Program Files (x86)\Microsoft Office\root\Office16\EXCEL.EXE"
if os.path.exists(EXCEL):
print('Excel Found. Trying to open...')
print('Filename is: ', filename)
try:
full_filepath = os.path.join(desktop, filename)
os.system('start excel \"{path}\"'.format(path=full_filepath))
except:
print('Sorry, something failed:')
print('Filepath: {}'.filename)
print('EXCEL Path: {}'.format(EXCEL))
else:
print('Could not find excel. EXCEL: {}'.format(EXCEL))
print('Done')
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/Tools_GetParameterMap.py | 0-python-code/Tools_GetParameterMap.py | """
Prints a Parameter map of the first element in selection.
Where applicable it prints Instance and type parameter map.
Returns a parameter dict.
TESTED REVIT API: 2015, 2016
Author: Frederic Beaupere | hdm-dt-fb
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
import clr
clr.AddReference("RevitAPI")
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
selection = [doc.GetElement(elId) for elId in uidoc.Selection.GetElementIds()]
def print_param_mapping(param_dict):
"""
Prints a key value pairs of the parameter dict
Args:
param_dict:
Returns:
"""
for key in param_dict:
print(35 * "-")
print(key)
print(param_dict[key])
def get_parameter_map(element):
"""
Retrieve an overview of parameters of the provided element.
Prints out the gathered parameter information
Args:
element: Element that holds the parameters.
Returns:
Returns two dictionaries: Instance dict, Type dict.
If no type is available second dict is None
"""
print("\nINSTANCE PARAMETERS" + 50 * "_")
inst_map = collect_params(element)
print_param_mapping(inst_map)
type_map = None
if "Symbol" in dir(element):
print("\nTYPE PARAMETERS" + 50 * "_")
elem_symbol = element.Symbol
type_map = collect_params(elem_symbol)
print_param_mapping(type_map)
return inst_map, type_map
def collect_params(param_element):
"""
Collects parameters of the provided element.
Args:
param_element: Element that holds the parameters.
Returns:
Returns a dictionary, with parameters.
"""
parameters = param_element.Parameters
param_dict = defaultdict(list)
for param in parameters:
param_dict[param.Definition.Name].append(param.StorageType.ToString().split(".")[-1])
param_dict[param.Definition.Name].append(param.HasValue)
param_value = None
if param.HasValue:
if param.StorageType.ToString() == "ElementId":
param_value = param.AsElementId().IntegerValue
elif param.StorageType.ToString() == "Integer":
param_value = param.AsInteger()
elif param.StorageType.ToString() == "Double":
param_value = param.AsDouble()
elif param.StorageType.ToString() == "String":
param_value = param.AsString()
param_dict[param.Definition.Name].append(str(param_value))
return param_dict
if selection.Count > 0:
get_parameter_map(selection[0])
else:
print("please select an element.")
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/HowTo_GetParameterValueByName.py | 0-python-code/HowTo_GetParameterValueByName.py | """
Get Parameter Value by Name
Get value of one of element's parameters.
TESTED REVIT API: 2016,2017
Author: Francisco Possetto | github.com/franpossetto
Shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
#Imports.
from Autodesk.Revit.DB import Element
doc = __revit__.ActiveUIDocument.Document
uidoc = __revit__.ActiveUIDocument
def get_parameter_value_by_name(element, parameterName):
return element.LookupParameter(parameterName).AsValueString()
#Select elements from revit.
selection = [doc.GetElement(x) for x in uidoc.Selection.GetElementIds()]
#Example with Walls.
for wall in selection:
print get_parameter_value_by_name(wall, "Base Constraint")
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/HowTo_ImportImage.py | 0-python-code/HowTo_ImportImage.py | """
Imports an Image
TESTED REVIT API: -
Author: Gui Talarico | github.com/gtalarico
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
# See: http://www.revitapidocs.com/2015/05c3dbe2-fe7e-c293-761d-b11f356a011b.htm
from clr import StrongBox
from Autodesk.Revit.DB import XYZ, ImageImportOptions, BoxPlacement, BuiltInParameter
# Import Options
import_options = ImageImportOptions()
import_options.Placement = BoxPlacement.Center
import_options.RefPoint = XYZ(0,0,0)
import_options.Resolution = 72
# Create New Image in Revit
t = Transaction(doc, 'Crop Image')
t.Start()
new_img_element = StrongBox[Element]()
new_img_path = 'C:\\path\\to\\image.jpg' # Remember to escape backslashes or use raw string
width_in_ft = 2
doc.Import(new_img_path, import_options , doc.ActiveView, new_img_element)
new_img_width = new_img_element.get_Parameter(BuiltInParameter.RASTER_SHEETWIDTH)
new_img_width.Set(width_in_ft)
t.Commit()
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/Misc_ConvertToPython.py | 0-python-code/Misc_ConvertToPython.py | """
Examples of C# code, followed by its Python Equivalent
TESTED REVIT API: -
Author: Gui Talarico | github.com/gtalarico
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
# Blog Post about this same topic
# http://thebar.cc/converting-revit-api-c-code-to-python/
""""
Class XYZ
revitapidocs.com/2016/fcb91231-2665-54b9-11d6-7ebcb7f235e2.htm
public XYZ(double x, double y, double z)
Usage:
>>> XYZ point = new XYZ(0, 0, 0);
"""
from Autodesk.Revit.DB import XYZ
point = XYZ(10, 10, 10)
print(point.X)
# 10
""""
CSharp Class Constructors
revitapidocs.com/2016/fcb91231-2665-54b9-11d6-7ebcb7f235e2.htm
public XYZ(double x, double y, double z)
Usage:
>>> XYZ point = new XYZ(0, 0, 0);
"""
from Autodesk.Revit.DB import XYZ
point = XYZ(10, 10, 10)
print(point.X)
# 10
""""
FilteredElementCollector Class
http://www.revitapidocs.com/2018/263cf06b-98be-6f91-c4da-fb47d01688f3.htm
public FilteredElementCollector(
Document document
)
Usage:
>>> FilteredElementCollector collector = new FilteredElementCollector(doc);
>>> walls = collector.OfClass(Wall).ToElements()
"""
from Autodesk.Revit.DB import FilteredElementCollector, Wall
collector = FilteredElementCollector(doc)
walls = collector.ToElements()
""""
Line Intersect Method (Users Out/Ref Values)
http://www.revitapidocs.com/2018/51961478-fb36-e00b-2d1b-7db27b0a09e6.htm
public SetComparisonResult Intersect(
Curve curve,
out IntersectionResultArray resultArray
)
Usage:
>>> IntersectionResultArray results;
>>> SetComparisonResult result = line1.Intersect( line2, out results );
>>> if( result != SetComparisonResult.Overlap ) {
... throw new InvalidOperationException("Input lines did not intersect." );
... };
>>> if( results == null || results.Size != 1 ) {
... throw new InvalidOperationException("Could not extract line intersection point." );
... }
>>> IntersectionResult iResult
= results.get_Item( 0 );
"""
import clr
from Autodesk.Revit.DB import SetComparisonResult, IntersectionResultArray
from Autodesk.Revit.Exceptions import InvalidOperationException
line1 = Line.CreateBound(XYZ(0,0,0), XYZ(10,0,0))
line2 = Line.CreateBound(XYZ(5,-5,0), XYZ(5,5,0))
results = clr.Reference[IntersectionResultArray]()
result = line1.Intersect(line2, results)
if result != SetComparisonResult.Overlap:
print('No Intesection')
if results is None or results.Size != 1:
raise InvalidOperationException("Could not extract line intersection point." )
intersection = results.Item[0]
xyz_point = intersection.XYZPoint
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/HowTo_GetAllWorksets.py | 0-python-code/HowTo_GetAllWorksets.py | """
GET ALL WORKSETS FROM THE CURRENT DOCUMENT
TESTED REVIT API: 2016,2017,2018
Author: min.naung@https://twentytwo.space/contact | https://github.com/mgjean
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
from Autodesk.Revit.DB import FilteredWorksetCollector, WorksetKind
# document instance
doc = __revit__.ActiveUIDocument.Document
# collect user created worksets
worksets = FilteredWorksetCollector(doc).OfKind(WorksetKind.UserWorkset).ToWorksets()
# loop worksets
for workset in worksets:
# print name, workset
print workset.Name,workset
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/HowTo_GetSelectedElements.py | 0-python-code/HowTo_GetSelectedElements.py | """
Get's selected elements
TESTED REVIT API: -
Author: Gui Talarico | github.com/gtalarico
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
uidoc = __revit__.ActiveUIDocument
def get_selected_elements():
"""Return Selected Elements as a list[]. Returns empty list if no elements are selected.
Usage:
- Select 1 or more elements
> selected_elements = get_selected_elements()
> [<Autodesk.Revit.DB.FamilyInstance object at 0x0000000000000034 [Autodesk.Revit.DB.FamilyInstance]>]
"""
selection = uidoc.Selection
selection_ids = selection.GetElementIds()
elements = []
for element_id in selection_ids:
elements.append(doc.GetElement(element_id))
return elements
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/HowTo_RegisterEventHandler.py | 0-python-code/HowTo_RegisterEventHandler.py | """
Register Event Handler
TESTED REVIT API: -
Author: Ehsan Iran Nejad | https://github.com/eirannejad/
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
# https://github.com/eirannejad/pyRevit/issues/201
# Relevant Links:
# http://www.revitapidocs.com/2017.1/fb46d2bd-92bf-1cc5-79ad-f253f3e875d8.htm
# http://www.revitapidocs.com/2017.1/b69e9d33-3c49-e895-3267-7daabab85fdf.htm
from System import EventHandler, Uri
from Autodesk.Revit.UI.Events import ViewActivatedEventArgs, ViewActivatingEventArgs
def event_handler_function(sender, args):
# do the even stuff here
pass
# I'm using ViewActivating event here as example.
# The handler function will be executed every time a Revit view is activated:
__revit__.ViewActivating += EventHandler[ViewActivatingEventArgs](event_handler_function)
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/RPW_SetPhaseForAll.py | 0-python-code/RPW_SetPhaseForAll.py | """
Forcefully set the phase for all Selected Objects.
User will be prompted with a simple form to select the desired phase
Requires rpw library: github.com/gtalarico/revitpythonwrapper
TESTED REVIT API: -
Author: Gui Talarico | github.com/gtalarico
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
import rpw
from rpw import doc
selection = rpw.Selection()
phases = [p for p in doc.Phases]
phase_options = {p.Name: p for p in phases}
form = rpw.forms.SelectFromList('Set Phase', phase_options.keys(),
description='Select a Phase')
form_ok = form.show()
phase = phase_options[form.selected]
with rpw.TransactionGroup('Set Phases'):
for element in selection:
element = rpw.Element(element)
with rpw.Transaction('Set Element Phase'):
try:
element.parameters.builtins['PHASE_CREATED'].value = phase.Id
except rpw.exceptions.RPW_Exception:
pass
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/HowTo_GetAllElementsOfCategory.py | 0-python-code/HowTo_GetAllElementsOfCategory.py | """
All elements of Category
Get all elements of the specified category from Model.
TESTED REVIT API: 2016,2017
Author: Francisco Possetto | github.com/franpossetto
Shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
#Imports.
from Autodesk.Revit.DB import FilteredElementCollector, BuiltInCategory
doc = __revit__.ActiveUIDocument.Document
def all_elements_of_category(category):
return FilteredElementCollector(doc).OfCategory(category).WhereElementIsNotElementType().ToElements()
#All Elements Of Walls Category.
walls = all_elements_of_category(BuiltInCategory.OST_Walls)
#All Elements Of Doors Category.
doors = all_elements_of_category(BuiltInCategory.OST_Doors)
#All Elements Of Windows Category.
windows = all_elements_of_category(BuiltInCategory.OST_Windows)
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/Tools_SetPointCloudColorMode.py | 0-python-code/Tools_SetPointCloudColorMode.py | """
Sets Color Mode of all Point Cloud Instances in View to Normal.
TESTED REVIT API: 2017
Author: Gui Talarico | github.com/gtalarico
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
import clr
clr.AddReference('RevitAPI')
clr.AddReference('RevitAPIUI')
from Autodesk.Revit import DB
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
pts = DB.FilteredElementCollector(doc).OfClass(DB.PointCloudInstance).WhereElementIsNotElementType().ToElements()
pt_cloud_settings = DB.PointClouds.PointCloudOverrideSettings()
pt_cloud_settings.ColorMode = DB.PointCloudColorMode.Normals
for pt in pts:
view = uidoc.ActiveView
pt_overrides = view.GetPointCloudOverrides()
t = DB.Transaction(doc)
t.Start('Set Pt Cloud Color Mode')
pt_overrides.SetPointCloudOverrideSettings(pt.Id, pt_cloud_settings)
t.Commit()
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/HowTo_InsertDwgLink.py | 0-python-code/HowTo_InsertDwgLink.py | """
Inserts a DWG Link into the Active View.
The same code can be used for other link types
TESTED REVIT API: 2017
Author: Gui Talarico | github.com/gtalarico
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
# See: http://www.revitapidocs.com/2017/f3112a35-91c2-7783-f346-8f21d7cb99b5.htm
import clr
from Autodesk.Revit.DB import DWGImportOptions, ImportPlacement, ElementId, Transaction
doc = __revit__.ActiveUIDocument.Document
uidoc = __revit__.ActiveUIDocument
options = DWGImportOptions()
options.Placement = ImportPlacement.Origin # Insert Options
link = clr.Reference[ElementId]()
t = Transaction(doc)
t.Start('Load Link')
doc.Link(r"C:\Some\Path\YourDrawing.dwg", options, uidoc.ActiveView, link)
t.Commit()
# link is not <Autodesk.Revit.DB.ImportInstance>
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/HowTo_GetLineIntersection.py | 0-python-code/HowTo_GetLineIntersection.py | """
Get Line Intersection
Get's intersection of 2 lines
TESTED REVIT API: 2017
Author: Gui Talarico | github.com/gtalarico
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
import clr
from Autodesk.Revit.DB import Line, XYZ
from Autodesk.Revit.DB import SetComparisonResult, IntersectionResultArray
def get_intersection(line1, line2):
results = clr.Reference[IntersectionResultArray]()
# See ironpython.net/documentation/dotnet for clr.Reference
result = line1.Intersect(line2, results)
# http://www.revitapidocs.com/2018/51961478-fb36-e00b-2d1b-7db27b0a09e6.htm
if result != SetComparisonResult.Overlap:
print('No Intesection')
intersection = results.Item[0]
return intersection.XYZPoint
line1 = Line.CreateBound(XYZ(0,0,0), XYZ(10,0,0))
line2 = Line.CreateBound(XYZ(5,-5,0), XYZ(5,5,0))
point = get_intersection(line1, line2)
print(point)
# <Autodesk.Revit.DB.XYZ object at 0x00000000000001BA [(5.000000000, 0.000000000, 0.000000000)]>
"""
From this discussion:
https://forum.dynamobim.com/t/translating-to-python/13481
C# Equivalent
private XYZ GetIntersection(
Line line1,
Line line2 )
{
IntersectionResultArray results;
SetComparisonResult result
= line1.Intersect( line2, out results );
if( result != SetComparisonResult.Overlap )
throw new InvalidOperationException(
"Input lines did not intersect." );
if( results == null || results.Size != 1 )
throw new InvalidOperationException(
"Could not extract line intersection point." );
IntersectionResult iResult
= results.get_Item( 0 );
return iResult.XYZPoint;
}
"""
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/RPW_DWGPolylineRedrawAndDimension.py | 0-python-code/RPW_DWGPolylineRedrawAndDimension.py | """
Linked DWG PolyLine Redraw and Dimension Example
Redraws polylines in a given layer (LAYER_NAME) in a linked DWG instance
and adds Dimensions to these segments
TESTED REVIT API: 2017
Author: Frederic Beaupere | github.com/hdm-dt-fb
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
from rpw import revit, db, ui, DB
LAYER_NAME = 'A-FLOR-OTLN' # "ENTER_DWG_LAYER_NAME_HERE"
selection = ui.Selection().get_elements(wrapped=False)
dwg_link_instances = [l for l in selection if isinstance(l, DB.ImportInstance)]
if not dwg_link_instances:
ui.forms.Alert("please select a linked dwg", exit=True)
active_view = revit.doc.ActiveView
geo_opt = DB.Options()
geo_opt.ComputeReferences = True
geo_opt.IncludeNonVisibleObjects = True
geo_opt.View = active_view
geometry = dwg_link_instances[0].get_Geometry(geo_opt)
with db.Transaction("redraw dim_help layer dwg polylines"):
for geo_inst in geometry:
geo_elem = geo_inst.GetInstanceGeometry()
for polyline in geo_elem:
element = revit.doc.GetElement(polyline.GraphicsStyleId)
if not element:
continue
is_target_layer = element.GraphicsStyleCategory.Name == LAYER_NAME
is_polyline = polyline.GetType().Name == "PolyLine"
if is_polyline and is_target_layer:
begin = None
for pts in polyline.GetCoordinates():
if not begin:
begin = pts
continue
end = pts
line = DB.Line.CreateBound(begin, end)
det_line = doc.Create.NewDetailCurve(active_view, line)
line_refs = DB.ReferenceArray()
geo_curve = det_line.GeometryCurve
line_refs.Append(geo_curve.GetEndPointReference(0))
line_refs.Append(geo_curve.GetEndPointReference(1))
dim = doc.Create.NewDimension(active_view,
det_line.GeometryCurve,
line_refs)
begin = pts
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/Tools_MakeFloorFromRooms.py | 0-python-code/Tools_MakeFloorFromRooms.py | """
Makes Floor objects from the boundary of selected Rooms
TESTED REVIT API: -
Author: Gui Talarico | github.com/gtalarico
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
import sys
import os
from functools import wraps
from collections import namedtuple
from Autodesk.Revit.UI import TaskDialog
from Autodesk.Revit.DB import Element, XYZ, CurveArray
from Autodesk.Revit.DB import Transaction
from Autodesk.Revit.DB import FilteredElementCollector, BuiltInCategory
from Autodesk.Revit.DB import SpatialElementBoundaryOptions, Options
from Autodesk.Revit.DB.Architecture import Room
doc = __revit__.ActiveUIDocument.Document
uidoc = __revit__.ActiveUIDocument
def get_selected_elements():
selection = uidoc.Selection
selection_ids = selection.GetElementIds()
selection_size = selection_ids.Count
if not selection_ids:
TaskDialog.Show('MakeFloors', 'No Elements Selected.')
__window__.Close()
sys.exit(0)
elements = []
for element_id in selection_ids:
elements.append(doc.GetElement(element_id))
return elements
def revit_transaction(transaction_name):
def wrap(f):
@wraps(f)
def wrapped_f(*args, **kwargs):
try:
t = Transaction(doc, transaction_name)
t.Start()
except InvalidOperationException as errmsg:
print('Transaciton Error: {}'.format(errmsg))
return_value = f(*args, **kwargs)
else:
return_value = f(*args, **kwargs)
t.Commit()
return return_value
return wrapped_f
return wrap
def get_floor_types():
types = {} # {'name':'id'}
floor_types = FilteredElementCollector(doc).OfCategory(
BuiltInCategory.OST_Floors
).WhereElementIsElementType()
for floor_type in floor_types:
types[Element.Name.GetValue(floor_type)] = floor_type.Id
return types
@revit_transaction('Create Floor')
def make_floor(new_floor):
floor_curves = CurveArray()
for boundary_segment in new_floor.boundary:
floor_curves.Append(boundary_segment.Curve)
floorType = doc.GetElement(new_floor.type_id)
level = doc.GetElement(new_floor.level_id)
normal = XYZ.BasisZ
doc.Create.NewFloor( floor_curves, floorType, level, False, normal )
elements = get_selected_elements()
floor_types = get_floor_types()
chosen_type_name = 'Floor 1'
# If can't find matchign floor type name will use the first one found.
type_id = floor_types.get(chosen_type_name, floor_types.values()[0])
# This is not needed but helps package needed elements to make floor
NewFloor = namedtuple('NewFloor', ['type_id', 'boundary', 'level_id'])
new_floors = []
room_boundary_options = SpatialElementBoundaryOptions()
for element in elements:
if isinstance(element, Room):
room = element
room_level_id = room.Level.Id
# List of Boundary Segment comes in an array by itself.
room_boundary = room.GetBoundarySegments(room_boundary_options)[0]
new_floor = NewFloor(type_id=type_id, boundary=room_boundary,
level_id=room_level_id)
new_floors.append(new_floor)
if not new_floors:
TaskDialog.Show('MakeFloors', 'You need to select at least one room.')
__window__.Close()
sys.exit(0)
for new_floor in new_floors:
view = make_floor(new_floor)
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/HowTo_CreateDraftingView.py | 0-python-code/HowTo_CreateDraftingView.py | """
Creates a Drafting View
TESTED REVIT API: -
Author: Gui Talarico | github.com/gtalarico
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
from Autodesk.Revit.DB import Transaction, Element
from Autodesk.Revit.DB import FilteredElementCollector
# Drafting Views
from Autodesk.Revit.DB import ViewFamilyType, ViewDrafting, Element
from Autodesk.Revit.DB import ViewFamily
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
t = Transaction(doc, 'Create Drafting View')
t.Start()
"""Create a Drafting View"""
def get_drafting_type_id():
"""Selects First available ViewType that Matches Drafting Type."""
viewfamily_types = FilteredElementCollector(doc).OfClass(ViewFamilyType)
for i in viewfamily_types:
if i.ViewFamily == ViewFamily.Drafting:
return i.Id
drafting_type_id = get_drafting_type_id()
drafting_view = ViewDrafting.Create(doc, drafting_type_id)
# drafting_view.Name = 'New View' - Optional View Name - May fail if already exists.
t.Commit()
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/HowTo_CreateWorkset.py | 0-python-code/HowTo_CreateWorkset.py | """
Create a Workset
Creates a Workset - Revit 2017+
TESTED REVIT API: 2017
Author: Gui Talarico | github.com/gtalarico
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
import clr
clr.AddReference("RevitAPI")
from Autodesk.Revit.DB import Workset, Transaction
doc = __revit__.ActiveUIDocument.Document
workset_name = 'Point Clouds'
t = Transaction(doc)
t.Start('Create Workset')
Workset.Create(doc, workset_name)
t.Commit()
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/Misc_RevitTransactionDecorator.py | 0-python-code/Misc_RevitTransactionDecorator.py | """
Revit Transaction Decorator function.
This allows you to create functions that make changes to the revit document
without having to repeat the code to start/commit transactions.
Just add the revit_transaction decorator and a transaction will be started before
your function is called, and then commit after the call
TESTED REVIT API: 2015, 2016, 2017, 2017.1
Author: Gui Talarico | github.com/gtalarico
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
from functools import wraps
from Autodesk.Revit.Exceptions import InvalidOperationException
def revit_transaction(transaction_name):
def wrap(f):
@wraps(f)
def wrapped_f(*args):
try:
t = Transaction(doc, transaction_name)
t.Start()
except InvalidOperationException as errmsg:
print('Transaciton Error: {}'.format(errmsg))
return_value = f(*args)
else:
return_value = f(*args)
t.Commit()
return return_value
return wrapped_f
return wrap
#Example
@revit_transaction('Create Text')
def create_text(view, text, point, align):
baseVec = XYZ.BasisX
upVec = XYZ.BasisZ
text_size = 10
text_length = 0.5
text = str(text)
align_options = {'left': TextAlignFlags.TEF_ALIGN_LEFT |
TextAlignFlags.TEF_ALIGN_MIDDLE,
'right': TextAlignFlags.TEF_ALIGN_RIGHT |
TextAlignFlags.TEF_ALIGN_MIDDLE
}
text_element = doc.Create.NewTextNote(view, point, baseVec, upVec, text_length,
align_options[align], text)
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/Tools_MoveObjectToCenterOfRoom.py | 0-python-code/Tools_MoveObjectToCenterOfRoom.py | """
Centers an object in the Room in it's in based on the center of the
Room Bounding Box.
TESTED REVIT API: 2015, 2016, 2017, 2017.1
Author: Gui Talarico | github.com/gtalarico
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
import clr
from functools import wraps
clr.AddReference('RevitAPI')
clr.AddReference('RevitAPIUI')
from Autodesk.Revit.DB import FilteredElementCollector
from Autodesk.Revit.DB import Transaction, XYZ
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
def get_bbox_center_pt(bbox):
""" Returns center XYZ of BoundingBox Element"""
avg_x = (bbox.Min.X + bbox.Max.X) / 2
avg_y = (bbox.Min.Y + bbox.Max.Y) / 2
return XYZ(avg_x, avg_y, 0)
def revit_transaction(transaction_name):
""" Revit Transaction Wrapper"""
def wrap(f):
@wraps(f)
def wrapped_f(*args, **kwargs):
try:
t = Transaction(doc, transaction_name)
t.Start()
except InvalidOperationException as errmsg:
print('Transaciton Error: {}'.format(errmsg))
return_value = f(*args, **kwargs)
else:
return_value = f(*args, **kwargs)
t.Commit()
return return_value
return wrapped_f
return wrap
@revit_transaction('Move Element')
def move_element(element, target_point):
""" Move Element """
translation = target_point - element.Location.Point
return element.Location.Move(translation)
# Get Latest Phase
for phase in doc.Phases:
pass # phase will be equal latest phase.
active_view = doc.ActiveView
selection = uidoc.Selection.GetElementIds()
# Moves all Objects to the center of the bounding box of the room
# Multiple objects can be selected. Not fully tested.
if selection.Count > 0:
for element_id in selection:
element = doc.GetElement(element_id)
try:
room = element.Room[phase]
except:
pass # Object doest not have room. Skip.
else:
if room:
room_bbox = room.get_BoundingBox(active_view)
room_center = get_bbox_center_pt(room_bbox)
move_element(element, target_point=room_center)
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/Tools_MoveRoomTagToRoomCenter.py | 0-python-code/Tools_MoveRoomTagToRoomCenter.py | """
Moves all tags to the "Room Location Point" of their corresponding rooms
TESTED REVIT API: 2015, 2016, 2017, 2017.1
Author: Gui Talarico | github.com/gtalarico
min.naung | https://twentytwo.space/contact | https://github.com/mgjean
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
import clr
clr.AddReference('RevitAPI')
clr.AddReference('RevitAPIUI')
from Autodesk.Revit.DB import Transaction
from Autodesk.Revit.DB import FilteredElementCollector, SpatialElementTag
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
###########################################################################
# TAG COLLECTOR [IN VIEW BY: doc.ActiveView.Id]
room_tags = FilteredElementCollector(doc, doc.ActiveView.Id)\
.OfClass(SpatialElementTag).ToElements()
###########################################################################
transaction = Transaction(doc, 'Move Room Tags on Room Points')
transaction.Start()
for room_tag in room_tags:
room_tag_pt = room_tag.Location.Point
room = room_tag.Room
room_pt = room.Location.Point
translation = room_pt - room_tag_pt
room_tag.Location.Move(translation)
transaction.Commit()
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/Misc_IFailuresPreprocessor.py | 0-python-code/Misc_IFailuresPreprocessor.py | """
IFailuresPreprocessor Example
Places an unenclosed room in a project and removes the warning
from transaction via the IFailuresPreprocessor
TESTED REVIT API: 2015
Author: Frederic Beaupere | github.com/hdm-dt-fb
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
import clr
clr.AddReference("RevitAPI")
import Autodesk
from Autodesk.Revit.DB import Transaction, IFailuresPreprocessor, BuiltInFailures, UV
from System.Collections.Generic import List
doc = __revit__.ActiveUIDocument.Document
active_view = doc.ActiveView
active_lvl = active_view.GenLevel
class RoomWarningSwallower(IFailuresPreprocessor):
def PreprocessFailures(self, failuresAccessor):
fail_list = List[FailureMessageAccessor]()
fail_acc_list = failuresAccessor.GetFailureMessages().GetEnumerator()
for failure in fail_acc_list:
failure_id = failure.GetFailureDefinitionId()
failure_severity = failure.GetSeverity()
failure_type = BuiltInFailures.RoomFailures.RoomNotEnclosed
if failure_id == failure_type:
print("{0} with id: {1} of type: RoomNotEnclosed removed!".format(failure_severity, failure_id.Guid))
failuresAccessor.DeleteWarning(failure)
return FailureProcessingResult.Continue
# "Start" the transaction
tx = Transaction(doc, "place unenclosed room")
tx.Start()
options = tx.GetFailureHandlingOptions()
options.SetFailuresPreprocessor(RoomWarningSwallower())
tx.SetFailureHandlingOptions(options)
room = doc.Create.NewRoom(active_lvl, UV(0,0))
# "End" the transaction
tx.Commit()
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/Tools_FamilyLoader.py | 0-python-code/Tools_FamilyLoader.py | """
Family Loader
- loads familiy into project with a path and file name.
- implements IFamilyLoadOptions to silence OverwriteParamaterValue dialogue box.
Requires rpw library: github.com/gtalarico/revitpythonwrapper
Author: Grant Foster | github.com/grantdfoster
"""
import os
import clr
import Autodesk.Revit.DB
from Autodesk.Revit.DB import IFamilyLoadOptions, FamilySource, Transaction
doc = __revit__.ActiveUIDocument.Document
class FamilyLoadOptions(IFamilyLoadOptions):
'A Class implementation for loading families'
def OnFamilyFound(self, familyInUse, overwriteParameterValues):
'Defines behavior when a family is found in the model.'
overwriteParameterValues = True
return True
def OnSharedFamilyFound(self, sharedFamily, familyInUse, source, overwriteParameterValues):
'Defines behavior when a shared family is found in the model.'
source = FamilySource.Project
# source = FamilySource.Family
overwriteParameterValues = True
return True
def load_family(folder_path='Insert Path Here', file_name='Insert File Name Here'):
'Loads a family into the Revit project with path and file name.'
family_path = os.path.join(folder_path, file_name)
if os.path.exists(family_path) is False:
return 'Path does not exist.'
family_loaded = clr.Reference[Autodesk.Revit.DB.Family]()
t = Transaction(doc)
t.Start('Load Family')
loaded = doc.LoadFamily(family_path, FamilyLoadOptions(), family_loaded)
if loaded:
family = family_loaded.Value
symbols = []
for family_symbol_id in family.GetFamilySymbolIds():
family_symbol = doc.GetElement(family_symbol_id)
symbols.append(family_symbol)
for s in symbols:
try:
s.Activate()
except:
pass
t.Commit()
return symbols
else:
t.Commit()
return 'Family already exists in project.'
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/Tools_CycleTypes.py | 0-python-code/Tools_CycleTypes.py | """
Cycles through available types in family manager. Must be in Family Document
TESTED REVIT API: 2015, 2016, 2017, 2017.1
Author: Gui Talarico | github.com/gtalarico
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
from Autodesk.Revit.DB import Transaction
from Autodesk.Revit.UI import TaskDialog
doc = __revit__.ActiveUIDocument.Document
uidoc = __revit__.ActiveUIDocument
if not doc.IsFamilyDocument:
TaskDialog.Show('Message', 'Must be in Family Document.')
else:
family_types = [x for x in doc.FamilyManager.Types]
sorted_type_names = sorted([x.Name for x in family_types])
current_type = doc.FamilyManager.CurrentType
# Iterate through sorted list of type names, return name of next in list
for n, type_name in enumerate(sorted_type_names):
if type_name == current_type.Name:
try:
next_family_type_name = sorted_type_names[n + 1]
except IndexError:
# wraps list back to 0 if current is last
next_family_type_name = sorted_type_names[0]
for family_type in family_types:
if family_type.Name == next_family_type_name:
t = Transaction(doc, 'Cycle Type')
t.Start()
doc.FamilyManager.CurrentType = family_type
t.Commit()
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/HowTo_RemoveWallPaint.py | 0-python-code/HowTo_RemoveWallPaint.py | """
Remove Wall Paint
TESTED REVIT API: 2017
Author: min.naung @https://twentytwo.space/contact | https://github.com/mgjean
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
from Autodesk.Revit.DB import Transaction, Reference, FilteredElementCollector
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
# selected elements
selection = uidoc.Selection
# wall category id
catId = FilteredElementCollector(doc).OfClass(Wall).ToElements()[0].Category.Id
# filtered wall elements
walls = [doc.GetElement(id) for id in selection.GetElementIds() if doc.GetElement(id).Category.Id == catId]
# info message
msg = "%s walls." %len(walls) if len(walls)>1 else "%s wall." %len(walls)
# transaction
t = Transaction(doc, 'walls paint remove')
# start transaction
t.Start()
# loop wall elements
for wall in walls:
# get geometry object (solid)
geoelem = wall.GetGeometryObjectFromReference(Reference(wall))
# solid to geometry object
geoobj = geoelem.GetEnumerator()
# loop geometry object
for obj in geoobj:
# collect faces from geometry object
for f in obj.Faces:
# get each face
doc.RemovePaint(wall.Id,f)
# print info message
print "Paint removed from %s" %(msg)
#end of transaction
t.Commit()
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/HowTo_SetParameterByName.py | 0-python-code/HowTo_SetParameterByName.py | """
Set Parameter by Name
Set one of element's parameters.
TESTED REVIT API: 2016,2017
Author: Francisco Possetto | github.com/franpossetto
Shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
#Imports
from Autodesk.Revit.DB import Element, Transaction
doc = __revit__.ActiveUIDocument.Document
uidoc = __revit__.ActiveUIDocument
t = Transaction(doc, 'Set Parameter by Name')
#Select element from revit.
selection = [doc.GetElement(x) for x in uidoc.Selection.GetElementIds()]
def set_parameter_by_name(element, parameterName, value):
element.LookupParameter(parameterName).Set(value)
#Start Transaction
t.Start()
for s in selection:
#Set a new Comment
set_parameter_by_name(s,"Comments", "Good Element")
#End Transaction
t.Commit() | python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/HowTo_SolidifyBoundingBox.py | 0-python-code/HowTo_SolidifyBoundingBox.py | """
Solidify Selected Element BoundingBox Example
Creates a Generic Model Direct Shape
TESTED REVIT API: 2017
Author: Frederic Beaupere | github.com/hdm-dt-fb
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
import clr
clr.AddReference("RevitAPI")
from Autodesk.Revit.DB import Curve, CurveLoop, DirectShape, ElementId, Line, XYZ
from Autodesk.Revit.DB import SolidOptions, GeometryCreationUtilities
from Autodesk.Revit.DB import BuiltInCategory as Bic
from System.Collections.Generic import List
from rpw import db, ui, doc, uidoc
selection = [doc.GetElement(elem_id) for elem_id in uidoc.Selection.GetElementIds()]
first_selected = selection[0]
solid_opt = SolidOptions(ElementId.InvalidElementId, ElementId.InvalidElementId)
bbox = first_selected.get_BoundingBox(None)
bottom_z_offset = 0.1
bbox.Min = XYZ(bbox.Min.X, bbox.Min.Y, bbox.Min.Z - bottom_z_offset)
b1 = XYZ(bbox.Min.X, bbox.Min.Y, bbox.Min.Z)
b2 = XYZ(bbox.Max.X, bbox.Min.Y, bbox.Min.Z)
b3 = XYZ(bbox.Max.X, bbox.Max.Y, bbox.Min.Z)
b4 = XYZ(bbox.Min.X, bbox.Max.Y, bbox.Min.Z)
bbox_height = bbox.Max.Z - bbox.Min.Z
lines = List[Curve]()
lines.Add(Line.CreateBound(b1, b2))
lines.Add(Line.CreateBound(b2, b3))
lines.Add(Line.CreateBound(b3, b4))
lines.Add(Line.CreateBound(b4, b1))
rectangle = [CurveLoop.Create(lines)]
extrusion = GeometryCreationUtilities.CreateExtrusionGeometry(List[CurveLoop](rectangle),
XYZ.BasisZ,
bbox_height,
solid_opt)
category_id = ElementId(Bic.OST_GenericModel)
with db.Transaction("solid_bbox_direct_shape") as tx:
direct_shape = DirectShape.CreateElement(doc, category_id, "A", "B")
direct_shape.SetShape([extrusion])
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/HowTo_CreateTextFunc.py | 0-python-code/HowTo_CreateTextFunc.py | """
Create a text annotation element.
Does not include start/commit transaction.
TESTED REVIT API: -
Author: Gui Talarico | github.com/gtalarico
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
def create_text(view, text, point, align):
"""Creates a Revit Text.
create_test(view, text_string, point)
view: view object to insert text
text: text to be inserted
point: insertion point - XYZ() instance
align: 'left', 'right', or 'center'
"""
baseVec = XYZ.BasisX
upVec = XYZ.BasisZ
text_size = 10
text_length = 0.5
text = str(text)
align_options = {'left': TextAlignFlags.TEF_ALIGN_LEFT |
TextAlignFlags.TEF_ALIGN_MIDDLE,
'right': TextAlignFlags.TEF_ALIGN_RIGHT |
TextAlignFlags.TEF_ALIGN_MIDDLE,
'center': TextAlignFlags.TEF_ALIGN_CENTER |
TextAlignFlags.TEF_ALIGN_MIDDLE,
}
text_element = doc.Create.NewTextNote(view, point, baseVec, upVec,
text_length,
align_options[align],
text)
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/Tools_RemoveUnderlay.py | 0-python-code/Tools_RemoveUnderlay.py | """
Removes Underlay From Selected Views.
TESTED REVIT API: 2015, 2016
Author: Gui Talarico | github.com/gtalarico
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
# Parts of this script were taken from:
# http://dp-stuff.org/revit-view-underlay-property-python-problem/
__doc__ = 'Removes Underlay parameter from selected views.'
__author__ = 'gtalarico'
__version__ = '0.2.0'
import clr
clr.AddReference('RevitAPI')
clr.AddReference('RevitAPIUI')
from Autodesk.Revit.UI import TaskDialog
from Autodesk.Revit.DB import Transaction
from Autodesk.Revit.DB import ElementId
from Autodesk.Revit.DB import BuiltInParameter, BuiltInCategory
doc = __revit__.ActiveUIDocument.Document
uidoc = __revit__.ActiveUIDocument
selection = uidoc.Selection
selection_ids = selection.GetElementIds()
if selection_ids.Count > 0:
t = Transaction(doc, 'Batch Set Underlay to None')
t.Start()
for element_id in selection_ids:
element = doc.GetElement(element_id)
if element.Category.Id.IntegerValue == int(BuiltInCategory.OST_Views) \
and (element.CanBePrinted):
p = element.get_Parameter(BuiltInParameter.VIEW_UNDERLAY_ID)
if p is not None:
p.Set(ElementId.InvalidElementId)
t.Commit()
else:
TaskDialog.Show('Remove Underlay', 'Select Views to Remove Underlay')
__window__.Close()
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/Tools_SetViewTemplateParameterToNotControlled.py | 0-python-code/Tools_SetViewTemplateParameterToNotControlled.py | """
Set view template parameter to not controlled by view template
Sets a single view template parameter 'keep_non_sheet_view'
to be not controlled by view template, keeping other view
template parameters settings.
TESTED REVIT API: 2017
Author: Frederic Beaupere | github.com/hdm-dt-fb
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
import clr
clr.AddReference("RevitAPI")
from Autodesk.Revit.DB import FilteredElementCollector as Fec
from Autodesk.Revit.DB import Transaction
from System.Collections.Generic import List
doc = __revit__.ActiveUIDocument.Document
all_views = Fec(doc).OfClass(View).ToElements()
view_templates = [view for view in all_views if view.IsTemplate]
first_view_template = view_templates[0]
view_template_params = first_view_template.GetTemplateParameterIds()
switch_off_param_name = "keep_non_sheet_view"
# get the id of the parameter we want to switch off
for param_id in view_template_params:
param = doc.GetElement(param_id)
if "Name" in dir(param):
print(param.Name)
if param.Name == switch_off_param_name:
switch_off_param_id = param_id
break
# set the switch off parameter to be non controlled
# while keeping the setting of the other parameters
t = Transaction(doc, "adjust view_templates")
t.Start()
for view_template in view_templates:
set_param_list = List[ElementId]()
set_param_list.Add(switch_off_param_id)
non_controlled_param_ids = view_template.GetNonControlledTemplateParameterIds()
for param_id in non_controlled_param_ids:
set_param_list.Add(param_id)
view_template.SetNonControlledTemplateParameterIds(set_param_list)
t.Commit()
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/HowTo_CreateTaskDialog.py | 0-python-code/HowTo_CreateTaskDialog.py | """
Creates a TaskDialog
TESTED REVIT API: -
Author: Gui Talarico | github.com/gtalarico
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
# Results here: https://imgur.com/4DiPYfe
from Autodesk.Revit.UI import (TaskDialog, TaskDialogCommonButtons,
TaskDialogCommandLinkId, TaskDialogResult)
title = 'Task Dialog Title'
dialog = TaskDialog(title)
# Properties
dialog.MainInstruction = 'Text Header'
dialog.MainContent = 'Text Content'
dialog.FooterText = 'Footer Text'
dialog.VerificationText = 'Verification Text'
# dialog.ExpandedContent = expanded_content
# Settings
dialog.TitleAutoPrefix = False
dialog.AllowCancellation = True
# Add Button
dialog.CommonButtons = TaskDialogCommonButtons.Ok | TaskDialogCommonButtons.Yes
# Set Default Button
dialog.DefaultButton = TaskDialogResult.None
# Add Command Link
dialog.AddCommandLink(TaskDialogCommandLinkId.CommandLink1,
'Command Button Text',
'Command Button Sub Text')
dialog.AddCommandLink(TaskDialogCommandLinkId.CommandLink2,
'Command Button Text 2',
'Command Button Sub Text 2')
result = dialog.Show()
if result == TaskDialogResult.Ok:
print('Dialog was OK')
if result == TaskDialogResult.Yes:
print('Dialog was Yes')
if result == TaskDialogResult.Cancel:
print('Dialog was Cancelled')
if result == TaskDialogResult.CommandLink1:
print('Button Was Pressed')
if result == TaskDialogResult.CommandLink2:
print('Button 2 Was Pressed')
if dialog.WasVerificationChecked():
print('Verification was Checked')
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/HowTo_GetFilledRegionByName.py | 0-python-code/HowTo_GetFilledRegionByName.py | """
Retrieves a Filled Region by its Type Name
If none is found, the last one is returned
TESTED REVIT API: -
Author: Gui Talarico | github.com/gtalarico
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
from Autodesk.Revit.DB import Element, FilteredElementCollector
from Autodesk.Revit.DB import FilledRegionType, FilledRegion
def fregion_id_by_name(name=None):
"""Get Id of Filled Region Type by Name.
Loops through all types, tries to match name.
If name not supplied, first type is used.
If name supplied does not match any existing types, last type is used
"""
f_region_types = FilteredElementCollector(doc).OfClass(FilledRegionType)
for fregion_type in f_region_types:
fregion_name = Element.Name.GetValue(fregion_type)
if not name or name.lower() == fregion_name.lower():
return fregion_type.Id
# Loops through all, not found: use last
else:
print('Color not specified or not found.')
return fregion_type.Id
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/HowTo_SelectMirroredDoors.py | 0-python-code/HowTo_SelectMirroredDoors.py | """
Selects All Door Instances that have been Mirrored.
TESTED REVIT API: 2015, 2016, 2017, 2017.1
Author: Gui Talarico | github.com/gtalarico
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
import clr
clr.AddReference('RevitAPI')
clr.AddReference('RevitAPIUI')
clr.AddReference("System")
from Autodesk.Revit.UI import TaskDialog
from Autodesk.Revit.DB import FilteredElementCollector
from Autodesk.Revit.DB import BuiltInCategory, ElementId
from System.Collections.Generic import List
collector = FilteredElementCollector(doc).OfCategory(BuiltInCategory.OST_Doors)
doors = collector.WhereElementIsNotElementType().ToElements()
mir_doors = []
for door in doors:
try:
if door.Mirrored:
mir_doors.append(door)
except AttributeError:
pass # for Symbols that don't have Mirrored attribute.
TaskDialog.Show("Mirrored Doors", "Mirrored: {} of {} Doors".format(
len(mir_doors), len(doors)))
selection = uidoc.Selection
collection = List[ElementId]([door.Id for door in mir_doors])
selection.SetElementIds(collection)
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
gtalarico/revitapidocs.code | https://github.com/gtalarico/revitapidocs.code/blob/a8103ff10fcc0d55c7e484b0dc8d1fce954433d7/0-python-code/HowTo_ChangeWorksetSetting.py | 0-python-code/HowTo_ChangeWorksetSetting.py | """
Change Workset Settings
Set workset to be hidden by default in all views
TESTED REVIT API: 2017
Author: Gui Talarico | github.com/gtalarico
This file is shared on www.revitapidocs.com
For more information visit http://github.com/gtalarico/revitapidocs
License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md
"""
import clr
clr.AddReference("RevitAPI")
from Autodesk.Revit.DB import Transaction, WorksetKind, FilteredWorksetCollector
from Autodesk.Revit.DB import WorksetDefaultVisibilitySettings
doc = __revit__.ActiveUIDocument.Document
# collect all worksets
worksets = FilteredWorksetCollector(doc).OfKind(WorksetKind.UserWorkset)
# iterate over worksets
for workset in worksets:
# Find workset you want to target by name
if workset.Name == 'Replace this with Name of Workset to Target':
t = Transaction(doc)
t.Start('Hide Workset in all Views')
defaultVisibility = WorksetDefaultVisibilitySettings.GetWorksetDefaultVisibilitySettings(doc)
defaultVisibility.SetWorksetVisibility(workset.Id, False)
t.Commit()
| python | MIT | a8103ff10fcc0d55c7e484b0dc8d1fce954433d7 | 2026-01-05T07:12:41.185581Z | false |
Jezza34000/homeassistant_petkit | https://github.com/Jezza34000/homeassistant_petkit/blob/362911ceac878aec5f56d82efa9b0515e2028355/custom_components/petkit/image.py | custom_components/petkit/image.py | """Image platform for Petkit Smart Devices integration."""
from __future__ import annotations
from dataclasses import dataclass
import datetime
from pathlib import Path
from types import MappingProxyType
from typing import TYPE_CHECKING, Any
import aiofiles
from pypetkitapi import (
FEEDER_WITH_CAMERA,
LITTER_WITH_CAMERA,
Feeder,
Litter,
Pet,
WaterFountain,
)
from homeassistant.components.image import ImageEntity, ImageEntityDescription
from homeassistant.core import callback
from .const import CONF_MEDIA_DL_IMAGE, LOGGER, MEDIA_SECTION
from .entity import PetKitDescSensorBase, PetkitEntity
if TYPE_CHECKING:
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .coordinator import PetkitMediaUpdateCoordinator
from .data import PetkitConfigEntry, PetkitDevices
@dataclass(frozen=True, kw_only=True)
class PetKitImageDesc(PetKitDescSensorBase, ImageEntityDescription):
"""A class that describes sensor entities."""
event_key: str | None = None # Event key to get the image from
COMMON_ENTITIES = []
IMAGE_MAPPING: dict[type[PetkitDevices], list[PetKitImageDesc]] = {
Feeder: [
*COMMON_ENTITIES,
PetKitImageDesc(
key="Last visit event",
event_key="pet",
translation_key="last_visit_event",
only_for_types=FEEDER_WITH_CAMERA,
),
PetKitImageDesc(
key="Last eat event",
event_key="eat",
translation_key="last_eat_event",
only_for_types=FEEDER_WITH_CAMERA,
),
PetKitImageDesc(
key="Last feed event",
event_key="feed",
translation_key="last_feed_event",
only_for_types=FEEDER_WITH_CAMERA,
),
PetKitImageDesc(
key="Dish before",
event_key="dish_before",
translation_key="dish_before",
only_for_types=FEEDER_WITH_CAMERA,
),
PetKitImageDesc(
key="Dish after",
event_key="dish_after",
translation_key="dish_after",
only_for_types=FEEDER_WITH_CAMERA,
),
],
Litter: [
*COMMON_ENTITIES,
PetKitImageDesc(
key="Last usage event",
event_key="toileting",
translation_key="last_toileting_event",
only_for_types=LITTER_WITH_CAMERA,
),
PetKitImageDesc(
key="Last visit event",
event_key="pet",
translation_key="last_visit_event",
only_for_types=LITTER_WITH_CAMERA,
),
PetKitImageDesc(
key="Waste check",
event_key="waste_check",
translation_key="waste_check",
only_for_types=LITTER_WITH_CAMERA,
),
],
}
async def async_setup_entry(
hass: HomeAssistant,
entry: PetkitConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up binary_sensors using config entry."""
devices = entry.runtime_data.client.petkit_entities.values()
entities = [
PetkitImage(
coordinator=entry.runtime_data.coordinator_media,
entity_description=entity_description,
config_entry=entry.options,
device=device,
)
for device in devices
for device_type, entity_descriptions in IMAGE_MAPPING.items()
if isinstance(device, device_type)
for entity_description in entity_descriptions
if entity_description.is_supported(device)
]
async_add_entities(entities)
class PetkitImage(PetkitEntity, ImageEntity):
"""Petkit Smart Devices Image class."""
entity_description: PetKitImageDesc
def __init__(
self,
coordinator: PetkitMediaUpdateCoordinator,
entity_description: PetKitImageDesc,
config_entry: MappingProxyType[str, Any],
device: Feeder | Litter | WaterFountain | Pet,
) -> None:
"""Initialize the switch class."""
super().__init__(coordinator, device)
ImageEntity.__init__(self, coordinator.hass)
self.coordinator = coordinator
self.entity_description = entity_description
self.config_entry = config_entry
self.device = device
self.media_list = []
self._attr_image_last_updated = None
self._last_image_file: str | None = None
self.get_last_image()
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
super()._handle_coordinator_update()
self.get_last_image()
self.async_write_ha_state()
@property
def available(self) -> bool:
"""Return if this image is available or not."""
if self.config_entry.get(MEDIA_SECTION, {}).get(CONF_MEDIA_DL_IMAGE, False):
return True
self._attr_image_last_updated = None
self._last_image_file = None
return False
@callback
def get_last_image(self):
"""Get the last image filename."""
event_key = self.entity_description.event_key
media_table = self.coordinator.media_table
# Filter media files by device_id and event_key
matching_media_files = [
media_file
for media_file in media_table.get(self.device.id, [])
if media_file.event_type == event_key
]
if not matching_media_files:
LOGGER.info(
f"No media files found for device id = {self.device.id} and event key = {event_key}"
)
self._attr_image_last_updated = None
self._last_image_file = None
return
# Find the media file with the most recent timestamp
latest_media_file = max(
matching_media_files, key=lambda media_file: media_file.timestamp
)
image_path = latest_media_file.full_file_path
self._attr_image_last_updated = datetime.datetime.fromtimestamp(
latest_media_file.timestamp
)
self._last_image_file = image_path
async def async_image(self) -> bytes | None:
"""Return bytes of image asynchronously."""
no_img = Path(__file__).parent / "img" / "no-image.png"
if not self._last_image_file:
LOGGER.error("No media files found")
self._attr_image_last_updated = None
return await self._read_file(no_img)
LOGGER.debug(
f"Getting image for {self.device.device_nfo.device_type} Path is :{self._last_image_file}"
)
return await self._read_file(self._last_image_file)
@staticmethod
async def _read_file(image_path) -> bytes | None:
try:
async with aiofiles.open(image_path, "rb") as image_file:
return await image_file.read()
except FileNotFoundError:
LOGGER.error("Unable to read image file")
return None
| python | MIT | 362911ceac878aec5f56d82efa9b0515e2028355 | 2026-01-05T07:12:41.716988Z | false |
Jezza34000/homeassistant_petkit | https://github.com/Jezza34000/homeassistant_petkit/blob/362911ceac878aec5f56d82efa9b0515e2028355/custom_components/petkit/diagnostics.py | custom_components/petkit/diagnostics.py | """Petkit integration diagnostics."""
from homeassistant.components.diagnostics import async_redact_data
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.helpers.device_registry import DeviceEntry
TO_REDACT = [CONF_PASSWORD, CONF_USERNAME]
async def async_get_device_diagnostics(
hass: HomeAssistant, config_entry: ConfigEntry, device: DeviceEntry
) -> dict[str, any]:
"""Return diagnostics for a config entry."""
return {
"config_entry": async_redact_data(config_entry.data, TO_REDACT),
}
| python | MIT | 362911ceac878aec5f56d82efa9b0515e2028355 | 2026-01-05T07:12:41.716988Z | false |
Jezza34000/homeassistant_petkit | https://github.com/Jezza34000/homeassistant_petkit/blob/362911ceac878aec5f56d82efa9b0515e2028355/custom_components/petkit/switch.py | custom_components/petkit/switch.py | """Switch platform for Petkit Smart Devices integration."""
from __future__ import annotations
import asyncio
from collections.abc import Callable
from dataclasses import dataclass
from datetime import timedelta
from typing import TYPE_CHECKING, Any
from pypetkitapi import (
DEVICES_LITTER_BOX,
FEEDER_MINI,
LITTER_WITH_CAMERA,
T7,
DeviceAction,
DeviceCommand,
Feeder,
Litter,
Pet,
Purifier,
WaterFountain,
)
from homeassistant.components.switch import SwitchEntity, SwitchEntityDescription
from homeassistant.const import EntityCategory
from .const import LOGGER, MIN_SCAN_INTERVAL, POWER_ONLINE_STATE
from .entity import PetKitDescSensorBase, PetkitEntity
if TYPE_CHECKING:
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .coordinator import PetkitDataUpdateCoordinator
from .data import PetkitConfigEntry, PetkitDevices
@dataclass(frozen=True, kw_only=True)
class PetKitSwitchDesc(PetKitDescSensorBase, SwitchEntityDescription):
"""A class that describes sensor entities."""
turn_on: Callable[[Any, Any], Any] | None = None
turn_off: Callable[[Any, Any], Any] | None = None
set_value: Callable[[Any, Any, Any], Any] | None = None
COMMON_ENTITIES = [
PetKitSwitchDesc(
key="Indicator light",
translation_key="indicator_light",
value=lambda device: device.settings.light_mode,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"lightMode": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"lightMode": 0}
),
ignore_types=[*DEVICES_LITTER_BOX, FEEDER_MINI],
),
PetKitSwitchDesc(
key="Display",
translation_key="display",
value=lambda device: device.settings.light_mode,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"lightMode": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"lightMode": 0}
),
only_for_types=DEVICES_LITTER_BOX,
),
PetKitSwitchDesc(
key="Indicator light",
translation_key="indicator_light",
value=lambda device: device.settings.light_mode,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"settings.lightMode": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"settings.lightMode": 0}
),
only_for_types=[FEEDER_MINI],
),
PetKitSwitchDesc(
key="Child lock",
translation_key="child_lock",
value=lambda device: device.settings.manual_lock,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"manualLock": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"manualLock": 0}
),
ignore_types=[FEEDER_MINI],
),
PetKitSwitchDesc(
key="Child lock",
translation_key="child_lock",
value=lambda device: device.settings.manual_lock,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"settings.manualLock": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"settings.manualLock": 0}
),
only_for_types=[FEEDER_MINI],
),
PetKitSwitchDesc(
key="Camera",
translation_key="camera",
value=lambda device: device.settings.camera,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"camera": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"camera": 0}
),
),
PetKitSwitchDesc(
key="Do not disturb",
translation_key="do_not_disturb",
value=lambda device: device.settings.disturb_mode,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"disturbMode": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"disturbMode": 0}
),
),
PetKitSwitchDesc(
key="Pet tracking",
translation_key="pet_tracking",
value=lambda device: device.settings.highlight,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"highlight": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"highlight": 0}
),
ignore_types=LITTER_WITH_CAMERA,
),
PetKitSwitchDesc(
key="Video timestamp",
translation_key="video_timestamp",
value=lambda device: device.settings.time_display,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"timeDisplay": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"timeDisplay": 0}
),
),
PetKitSwitchDesc(
key="Microphone",
translation_key="microphone",
value=lambda device: device.settings.microphone,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"microphone": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"microphone": 0}
),
),
PetKitSwitchDesc(
key="Night vision",
translation_key="night_vision",
value=lambda device: device.settings.night,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"night": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"night": 0}
),
ignore_types=[T7],
),
PetKitSwitchDesc(
key="Lack Liquid Notify",
translation_key="lack_liquid_notify",
value=lambda device: device.settings.lack_liquid_notify,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"lackLiquidNotify": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"lackLiquidNotify": 0}
),
),
PetKitSwitchDesc(
key="System notification",
translation_key="system_notification",
value=lambda device: device.settings.system_sound_enable,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"systemSoundEnable": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"systemSoundEnable": 0}
),
),
]
SWITCH_MAPPING: dict[type[PetkitDevices], list[PetKitSwitchDesc]] = {
Feeder: [
*COMMON_ENTITIES,
PetKitSwitchDesc(
key="Shortage alarm",
translation_key="shortage_alarm",
value=lambda device: device.settings.food_warn,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"foodWarn": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"foodWarn": 0}
),
),
PetKitSwitchDesc(
key="Feed tone",
translation_key="feed_tone",
value=lambda device: device.settings.feed_tone,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"feedTone": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"feedTone": 0}
),
),
PetKitSwitchDesc(
key="Feed sound",
translation_key="feed_sound",
value=lambda device: device.settings.feed_sound,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"feedSound": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"feedSound": 0}
),
),
PetKitSwitchDesc(
key="Dispensing notif",
translation_key="dispensing_notif",
value=lambda device: device.settings.feed_notify,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"feedNotify": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"feedNotify": 0}
),
ignore_types=FEEDER_MINI,
),
PetKitSwitchDesc(
key="Dispensing notif",
translation_key="dispensing_notif",
value=lambda device: device.settings.feed_notify,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"settings.feedNotify": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"settings.feedNotify": 0}
),
only_for_types=FEEDER_MINI,
),
PetKitSwitchDesc(
key="Refill notif",
translation_key="refill_notif",
value=lambda device: device.settings.food_notify,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"foodNotify": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"foodNotify": 0}
),
ignore_types=[FEEDER_MINI],
),
PetKitSwitchDesc(
key="Refill notif",
translation_key="refill_notif",
value=lambda device: device.settings.food_notify,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"settings.foodNotify": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"settings.foodNotify": 0}
),
only_for_types=[FEEDER_MINI],
),
PetKitSwitchDesc(
key="Pet visit notif",
translation_key="pet_visit_notif",
value=lambda device: device.settings.pet_notify,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"petNotify": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"petNotify": 0}
),
),
PetKitSwitchDesc(
key="Pet eat notif",
translation_key="pet_eat_notif",
value=lambda device: device.settings.eat_notify,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"eatNotify": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"eatNotify": 0}
),
),
PetKitSwitchDesc(
key="Move notif",
translation_key="move_notif",
value=lambda device: device.settings.move_detection,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"moveNotify": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"moveNotify": 0}
),
),
PetKitSwitchDesc(
key="Surplus control",
translation_key="surplus_control",
value=lambda device: device.settings.surplus_control,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"surplusControl": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"surplusControl": 0}
),
),
PetKitSwitchDesc(
key="Low battery notif",
translation_key="low_battery_notif",
value=lambda device: device.settings.low_battery_notify,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"lowBatteryNotify": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"lowBatteryNotify": 0}
),
),
PetKitSwitchDesc(
key="Voice dispense",
translation_key="voice_dispense",
value=lambda device: device.settings.sound_enable,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"soundEnable": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"soundEnable": 0}
),
),
PetKitSwitchDesc(
key="Desiccant notif",
translation_key="desiccant_notif",
value=lambda device: device.settings.desiccant_notify,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"desiccantNotify": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"desiccantNotify": 0}
),
ignore_types=[FEEDER_MINI],
),
PetKitSwitchDesc(
key="Desiccant notif",
translation_key="desiccant_notif",
value=lambda device: device.settings.desiccant_notify,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"settings.desiccantNotify": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"settings.desiccantNotify": 0}
),
only_for_types=[FEEDER_MINI],
),
],
Litter: [
*COMMON_ENTITIES,
PetKitSwitchDesc(
# For T3/T4 only
key="Auto deodorizing",
translation_key="auto_deodor",
value=lambda device: device.settings.auto_refresh,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"autoRefresh": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"autoRefresh": 0}
),
),
PetKitSwitchDesc(
# For T5/T6 only
key="Auto deodorizing",
translation_key="auto_deodor",
value=lambda device: device.settings.auto_spray,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"autoSpray": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"autoSpray": 0}
),
),
PetKitSwitchDesc(
key="Auto clean",
translation_key="auto_clean",
value=lambda device: device.settings.auto_work,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"autoWork": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"autoWork": 0}
),
),
PetKitSwitchDesc(
key="Avoid repeat clean",
translation_key="avoid_repeat_clean",
value=lambda device: device.settings.avoid_repeat,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"avoidRepeat": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"avoidRepeat": 0}
),
),
PetKitSwitchDesc(
key="Periodic cleaning",
translation_key="periodic_cleaning",
value=lambda device: device.settings.fixed_time_clear,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"fixedTimeClear": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"fixedTimeClear": 0}
),
),
PetKitSwitchDesc(
# For T3/T4 only
key="Periodic deodorizing",
translation_key="periodic_deodorizing",
value=lambda device: device.settings.fixed_time_refresh,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"fixedTimeRefresh": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"fixedTimeRefresh": 0}
),
),
PetKitSwitchDesc(
# For T5/T6 only
key="Periodic deodorizing",
translation_key="periodic_deodorizing",
value=lambda device: device.settings.fixed_time_spray,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"fixedTimeSpray": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"fixedTimeSpray": 0}
),
),
PetKitSwitchDesc(
key="Kitten mode",
translation_key="kitten_mode",
value=lambda device: device.settings.kitten,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"kitten": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"kitten": 0}
),
),
PetKitSwitchDesc(
key="Light weight",
translation_key="light_weight",
value=lambda device: device.settings.underweight,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"underweight": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"underweight": 0}
),
ignore_types=LITTER_WITH_CAMERA,
),
PetKitSwitchDesc(
key="Power",
translation_key="power",
value=lambda device: device.state.power,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.CONTROL_DEVICE, {DeviceAction.POWER: 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.CONTROL_DEVICE, {DeviceAction.POWER: 0}
),
),
PetKitSwitchDesc(
key="Cont rotation",
translation_key="cont_rotation",
value=lambda device: device.settings.downpos,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"downpos": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"downpos": 0}
),
),
PetKitSwitchDesc(
key="Deep cleaning",
translation_key="deep_cleaning",
value=lambda device: device.settings.deep_clean,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"deepClean": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"deepClean": 0}
),
ignore_types=[T7],
),
PetKitSwitchDesc(
# For T3/T4 only
key="Deep deodorizing T3 T4",
translation_key="deep_deodor",
value=lambda device: device.settings.deep_refresh,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"deepRefresh": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"deepRefresh": 0}
),
),
PetKitSwitchDesc(
# For T5/T6 only
key="Deep deodorizing T5 T6",
translation_key="deep_deodor",
value=lambda device: device.settings.deep_spray,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"deepSpray": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"deepSpray": 0}
),
),
PetKitSwitchDesc(
key="Sand Saving",
translation_key="sand_saving",
value=lambda device: device.settings.sand_saving,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"sandSaving": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"sandSaving": 0}
),
),
PetKitSwitchDesc(
key="Pet visit notif",
translation_key="pet_visit_notif",
value=lambda device: device.settings.pet_notify,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"petNotify": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"petNotify": 0}
),
),
PetKitSwitchDesc(
key="Waste covering",
translation_key="waste_covering",
value=lambda device: device.settings.bury,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"bury": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"bury": 0}
),
ignore_types=[T7],
),
PetKitSwitchDesc(
key="Litter full notify",
translation_key="litter_full_notify",
value=lambda device: device.settings.litter_full_notify,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"litterFullNotify": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"litterFullNotify": 0}
),
),
PetKitSwitchDesc(
key="Pet in notify",
translation_key="pet_in_notify",
value=lambda device: device.settings.pet_in_notify,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"petInNotify": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"petInNotify": 0}
),
),
PetKitSwitchDesc(
key="Work notify",
translation_key="work_notify",
value=lambda device: device.settings.work_notify,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"workNotify": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"workNotify": 0}
),
),
PetKitSwitchDesc(
key="Deodorant N50 notify",
translation_key="deodorant_n50_notify",
value=lambda device: device.settings.deodorant_notify,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"deodorantNotify": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"deodorantNotify": 0}
),
),
PetKitSwitchDesc(
key="Deodorant N60 notify",
translation_key="deodorant_n60_notify",
value=lambda device: device.settings.spray_notify,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"sprayNotify": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"sprayNotify": 0}
),
),
PetKitSwitchDesc(
key="Lack sand notify",
translation_key="lack_sand_notify",
value=lambda device: device.settings.lack_sand_notify,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"lackSandNotify": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"lackSandNotify": 0}
),
),
PetKitSwitchDesc(
key="Work log notify",
translation_key="work_log_notify",
value=lambda device: device.settings.log_notify,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"logNotify": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"logNotify": 0}
),
),
PetKitSwitchDesc(
key="Light Assist",
translation_key="light_assist",
value=lambda device: device.settings.light_assist,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"lightAssist": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"lightAssist": 0}
),
),
PetKitSwitchDesc(
key="Camera Light",
translation_key="camera_light",
value=lambda device: device.settings.camera_light,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"cameraLight": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"cameraLight": 0}
),
),
PetKitSwitchDesc(
key="Notif pet toileting",
translation_key="pet_toileting_notif",
value=lambda device: device.settings.toilet_notify,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"toiletNotify": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"toiletNotify": 0}
),
),
PetKitSwitchDesc(
key="Toilet light",
translation_key="toilet_light",
value=lambda device: device.settings.toilet_light,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"toiletLight": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"toiletLight": 0}
),
),
PetKitSwitchDesc(
key="Privacy mode",
translation_key="privacy_mode",
value=lambda device: device.settings.home_mode,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"homeMode": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"homeMode": 0}
),
),
PetKitSwitchDesc(
key="Privacy Camera OFF",
translation_key="privacy_camera_off",
value=lambda device: device.settings.camera_off,
entity_category=EntityCategory.CONFIG,
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"cameraOff": 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"cameraOff": 0}
),
),
PetKitSwitchDesc(
key="Privacy Camera inward",
| python | MIT | 362911ceac878aec5f56d82efa9b0515e2028355 | 2026-01-05T07:12:41.716988Z | true |
Jezza34000/homeassistant_petkit | https://github.com/Jezza34000/homeassistant_petkit/blob/362911ceac878aec5f56d82efa9b0515e2028355/custom_components/petkit/number.py | custom_components/petkit/number.py | """Switch platform for Petkit Smart Devices integration."""
from __future__ import annotations
from collections.abc import Callable
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any
from pypetkitapi import (
D3,
D4H,
D4S,
D4SH,
FEEDER,
T5,
T6,
T7,
DeviceCommand,
Feeder,
FeederCommand,
Litter,
Pet,
PetCommand,
Purifier,
WaterFountain,
)
from pypetkitapi.const import PET
from homeassistant.components.number import (
NumberDeviceClass,
NumberEntity,
NumberEntityDescription,
NumberMode,
)
from homeassistant.const import EntityCategory, UnitOfMass, UnitOfTime
from .const import LOGGER, POWER_ONLINE_STATE
from .entity import PetKitDescSensorBase, PetkitEntity
if TYPE_CHECKING:
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .coordinator import PetkitDataUpdateCoordinator
from .data import PetkitConfigEntry, PetkitDevices
@dataclass(frozen=True, kw_only=True)
class PetKitNumberDesc(PetKitDescSensorBase, NumberEntityDescription):
"""A class that describes number entities."""
entity_picture: Callable[[PetkitDevices], str | None] | None = None
native_value: Callable[[PetkitDevices], None] | None = None
action: Callable[[PetkitConfigEntry, PetkitDevices, str], Any] | None
COMMON_ENTITIES = [
PetKitNumberDesc(
key="Volume",
translation_key="volume",
entity_category=EntityCategory.CONFIG,
native_min_value=1,
native_max_value=9,
native_step=1,
mode=NumberMode.SLIDER,
native_value=lambda device: device.settings.volume,
action=lambda api, device, value: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"volume": int(value)}
),
only_for_types=[T5, T6, D3, D4H, D4SH],
),
]
NUMBER_MAPPING: dict[type[PetkitDevices], list[PetKitNumberDesc]] = {
Feeder: [
*COMMON_ENTITIES,
PetKitNumberDesc(
key="Surplus",
translation_key="surplus",
native_min_value=20,
native_max_value=100,
native_step=10,
mode=NumberMode.SLIDER,
native_value=lambda device: device.settings.surplus,
action=lambda api, device, value: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"surplus": int(value)}
),
only_for_types=[D3],
),
PetKitNumberDesc(
key="Min Eating Duration",
translation_key="min_eating_duration",
entity_category=EntityCategory.CONFIG,
native_min_value=3,
native_max_value=60,
native_step=1,
native_unit_of_measurement=UnitOfTime.SECONDS,
mode=NumberMode.SLIDER,
native_value=lambda device: device.settings.shortest,
action=lambda api, device, value: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"shortest": int(value)}
),
only_for_types=[D4S],
),
PetKitNumberDesc(
key="Manual Feed",
translation_key="manual_feed",
entity_category=EntityCategory.CONFIG,
native_min_value=0,
native_max_value=400,
native_step=20,
device_class=NumberDeviceClass.WEIGHT,
mode=NumberMode.SLIDER,
native_value=lambda device: 0,
action=lambda api, device, value: api.send_api_request(
device.id, FeederCommand.MANUAL_FEED, {"amount": int(value)}
),
only_for_types=[FEEDER],
),
],
Litter: [
*COMMON_ENTITIES,
PetKitNumberDesc(
key="Cleaning Delay",
translation_key="cleaning_delay",
entity_category=EntityCategory.CONFIG,
native_min_value=0,
native_max_value=60,
native_step=1,
native_unit_of_measurement=UnitOfTime.MINUTES,
mode=NumberMode.SLIDER,
native_value=lambda device: device.settings.still_time / 60,
action=lambda api, device, value: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"stillTime": int(value * 60)}
),
ignore_types=[T7],
),
PetKitNumberDesc(
key="Cleaning Delay",
translation_key="cleaning_delay",
entity_category=EntityCategory.CONFIG,
native_min_value=1200,
native_max_value=3600,
native_step=60,
native_unit_of_measurement=UnitOfTime.MINUTES,
mode=NumberMode.SLIDER,
native_value=lambda device: device.settings.still_time,
action=lambda api, device, value: api.send_api_request(
device.id, DeviceCommand.UPDATE_SETTING, {"stillTime": int(value)}
),
only_for_types=[T7],
),
],
WaterFountain: [*COMMON_ENTITIES],
Purifier: [*COMMON_ENTITIES],
Pet: [
*COMMON_ENTITIES,
PetKitNumberDesc(
key="Pet weight",
translation_key="pet_weight",
entity_picture=lambda pet: pet.avatar,
native_min_value=1,
native_max_value=100,
native_step=0.1,
native_unit_of_measurement=UnitOfMass.KILOGRAMS,
device_class=NumberDeviceClass.WEIGHT,
mode=NumberMode.BOX,
native_value=lambda device: device.pet_details.weight,
action=lambda api, device, value: api.send_api_request(
device.id, PetCommand.PET_UPDATE_SETTING, {"weight": int(value)}
),
only_for_types=[PET],
),
],
}
async def async_setup_entry(
hass: HomeAssistant,
entry: PetkitConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up binary_sensors using config entry."""
devices = entry.runtime_data.client.petkit_entities.values()
entities = [
PetkitNumber(
coordinator=entry.runtime_data.coordinator,
entity_description=entity_description,
device=device,
)
for device in devices
for device_type, entity_descriptions in NUMBER_MAPPING.items()
if isinstance(device, device_type)
for entity_description in entity_descriptions
if entity_description.is_supported(device) # Check if the entity is supported
]
async_add_entities(entities)
class PetkitNumber(PetkitEntity, NumberEntity):
"""Petkit Smart Devices Number class."""
entity_description: PetKitNumberDesc
def __init__(
self,
coordinator: PetkitDataUpdateCoordinator,
entity_description: PetKitNumberDesc,
device: Feeder | Litter | WaterFountain | Purifier | Pet,
) -> None:
"""Initialize the switch class."""
super().__init__(coordinator, device)
self.coordinator = coordinator
self.entity_description = entity_description
self.device = device
@property
def unique_id(self) -> str:
"""Return a unique ID for the binary_sensor."""
return f"{self.device.device_nfo.device_type}_{self.device.sn}_{self.entity_description.key}"
@property
def entity_picture(self) -> str | None:
"""Grab associated pet picture."""
if self.entity_description.entity_picture:
return self.entity_description.entity_picture(self.device)
return None
@property
def mode(self) -> NumberMode:
"""Return slider mode."""
return self.entity_description.mode
@property
def native_min_value(self) -> float | None:
"""Return minimum allowed value."""
return self.entity_description.native_min_value
@property
def native_max_value(self) -> float | None:
"""Return max value allowed."""
return self.entity_description.native_max_value
@property
def native_step(self) -> float | None:
"""Return stepping by 1."""
return self.entity_description.native_step
@property
def native_value(self) -> float | None:
"""Always reset to native_value."""
device_data = self.coordinator.data.get(self.device.id)
if device_data:
return self.entity_description.native_value(device_data)
return None
@property
def available(self) -> bool:
"""Return if this button is available or not."""
device_data = self.coordinator.data.get(self.device.id)
if (
device_data
and hasattr(device_data, "state")
and hasattr(device_data.state, "pim")
):
return device_data.state.pim in POWER_ONLINE_STATE
return True
async def async_set_native_value(self, value: str) -> None:
"""Set manual feeding amount."""
LOGGER.debug(
"Setting value for : %s with value : %s", self.entity_description.key, value
)
await self.entity_description.action(
self.coordinator.config_entry.runtime_data.client, self.device, value
)
| python | MIT | 362911ceac878aec5f56d82efa9b0515e2028355 | 2026-01-05T07:12:41.716988Z | false |
Jezza34000/homeassistant_petkit | https://github.com/Jezza34000/homeassistant_petkit/blob/362911ceac878aec5f56d82efa9b0515e2028355/custom_components/petkit/media_source.py | custom_components/petkit/media_source.py | """Media Source Implementation."""
from __future__ import annotations
import asyncio
from datetime import datetime
from pathlib import Path
import re
from custom_components.petkit.const import (
CONF_MEDIA_PATH,
COORDINATOR,
DEFAULT_MEDIA_PATH,
DOMAIN,
LOGGER,
MEDIA_ROOT,
MEDIA_SECTION,
)
from homeassistant.components.media_player import (
MediaClass,
MediaType,
async_process_play_media_url,
)
from homeassistant.components.media_source import (
BrowseMediaSource,
MediaSource,
MediaSourceItem,
PlayMedia,
)
from homeassistant.core import HomeAssistant
EXT_MP4 = ".mp4"
EXT_JPG = ".jpg"
async def async_get_media_source(hass: HomeAssistant) -> PetkitMediaSource:
"""Set up Petkit media source."""
return PetkitMediaSource(hass)
class PetkitMediaSource(MediaSource):
"""Provide Petkit media source recordings."""
name: str = "Petkit"
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize PetkitMediaSource."""
super().__init__(DOMAIN)
self.hass = hass
self.coordinator = self.get_coordinator()
# Normalize like the media coordinator: store under /media/<name>
raw = Path(
self.coordinator.config_entry.options.get(MEDIA_SECTION, {}).get(
CONF_MEDIA_PATH, DEFAULT_MEDIA_PATH
)
)
if raw.is_absolute():
raw = raw.relative_to(raw.anchor)
self.media_path = Path(DEFAULT_MEDIA_PATH) / raw
def get_coordinator(self):
"""Retrieve the integration's coordinator."""
if DOMAIN in self.hass.data and COORDINATOR in self.hass.data[DOMAIN]:
return self.hass.data[DOMAIN][COORDINATOR]
LOGGER.error("Petkit coordinator not found in hass.data.")
return None
async def async_resolve_media(self, item: MediaSourceItem) -> PlayMedia:
"""Resolve media to a URL/path."""
file_path = self.media_path / Path(item.identifier)
if not file_path.exists():
raise ValueError(f"File not found: {file_path}")
LOGGER.debug(f"Media Source: Resolving media {file_path}")
# Convert absolute FS path (/media/...) to URL under /media/local/<relative>
rel = file_path.relative_to(Path(DEFAULT_MEDIA_PATH))
url_path = (Path(MEDIA_ROOT) / rel).as_posix()
url = async_process_play_media_url(
self.hass,
url_path,
allow_relative_url=True,
for_supervisor_network=True,
)
mime_type = self.get_mime_type(file_path.suffix)
return PlayMedia(url, mime_type)
async def async_browse_media(self, item: MediaSourceItem) -> BrowseMediaSource:
"""Browse the media source."""
# Avoid duplicating the base path when identifier is empty.
identifier = item.identifier or ""
current_path = self.media_path / Path(identifier)
if not current_path.exists() or not current_path.is_dir():
raise ValueError(f"Invalid path: {current_path}")
LOGGER.debug(f"Media Source: Browsing {current_path}")
children = await asyncio.to_thread(self._get_children_from_path, current_path)
return BrowseMediaSource(
domain=DOMAIN,
identifier=str(current_path),
title=DOMAIN.capitalize(),
media_class=MediaClass.DIRECTORY,
media_content_type=MediaType.PLAYLIST,
can_expand=True,
can_play=False,
children=children,
)
def _get_children_from_path(self, path: Path):
"""Get children from a path."""
children = []
for child in sorted(path.iterdir()):
LOGGER.debug(f"Media Source: Processing child {child.name} in {path}")
if child.is_dir():
title = self.get_device_name_from_data(
self.convert_date(child.name)
).capitalize()
if title.lower() == "snapshot":
media_class = MediaClass.IMAGE
elif title.lower() == "video":
media_class = MediaClass.VIDEO
else:
media_class = MediaClass.DIRECTORY
children.append(
BrowseMediaSource(
domain=DOMAIN,
identifier=str(child.relative_to(self.media_path)),
title=title,
media_class=media_class,
media_content_type=MediaType.VIDEO,
can_expand=True,
can_play=False,
)
)
elif child.is_file():
children.append(self._build_file_media_item(child))
return children
def _build_file_media_item(self, child: Path) -> BrowseMediaSource:
"""Build a file media item."""
# Build thumbnail URL under /media/local/<relative>
rel_snapshot_parent = child.parent.relative_to(
Path(DEFAULT_MEDIA_PATH)
).with_name("snapshot")
thumbnail_path = (
Path(MEDIA_ROOT)
/ rel_snapshot_parent
/ child.name.replace(EXT_MP4, EXT_JPG)
).as_posix()
thumbnail_url = async_process_play_media_url(
self.hass,
thumbnail_path,
allow_relative_url=True,
for_supervisor_network=True,
)
media_class = self.get_media_class(child.suffix)
media_type = self.get_media_type(child.suffix)
return BrowseMediaSource(
domain=DOMAIN,
identifier=str(child.relative_to(self.media_path)),
title=self.extract_timestamp_and_convert(child.name),
media_class=media_class,
media_content_type=media_type,
thumbnail=thumbnail_url,
can_expand=False,
can_play=True,
)
def get_device_name_from_data(self, match_device: str) -> str:
"""Match a string with a key in the data dictionary and extract the device name."""
data = self.coordinator.data or {}
for key, value in data.items():
if match_device in str(key):
return value.device_nfo.device_name.capitalize()
return match_device
@staticmethod
def convert_date(input_string: str) -> str:
"""Convert a string in the format YYYYMMDD to DD/MM/YYYY."""
match = re.fullmatch(r"\d{8}", input_string)
if not match:
return input_string
try:
date_obj = datetime.strptime(input_string, "%Y%m%d")
return date_obj.strftime("%d/%m/%Y")
except ValueError:
return input_string
@staticmethod
def extract_timestamp_and_convert(filename: str) -> str:
"""Extract the timestamp from a filename and convert it to HH:MM:SS."""
try:
timestamp_str = filename.split("_")[1].split(".")[0]
timestamp = int(timestamp_str)
time_obj = datetime.fromtimestamp(timestamp).time()
return time_obj.strftime("%H:%M:%S")
except (IndexError, ValueError):
return filename
@staticmethod
def get_media_class(extension: str) -> str:
"""Return the media class based on the file extension."""
if extension == EXT_MP4:
return MediaClass.VIDEO
if extension == EXT_JPG:
return MediaClass.IMAGE
return MediaClass.APP
@staticmethod
def get_media_type(extension: str) -> str:
"""Return the media type based on the file extension."""
if extension == EXT_MP4:
return MediaType.VIDEO
if extension == EXT_JPG:
return MediaType.IMAGE
return MediaType.APP
@staticmethod
def get_mime_type(extension: str) -> str:
"""Get MIME type for a given file extension."""
mime_types = {
EXT_MP4: "video/mp4",
EXT_JPG: "image/jpeg",
}
return mime_types.get(extension, "application/octet-stream")
| python | MIT | 362911ceac878aec5f56d82efa9b0515e2028355 | 2026-01-05T07:12:41.716988Z | false |
Jezza34000/homeassistant_petkit | https://github.com/Jezza34000/homeassistant_petkit/blob/362911ceac878aec5f56d82efa9b0515e2028355/custom_components/petkit/binary_sensor.py | custom_components/petkit/binary_sensor.py | """Binary sensor platform for Petkit Smart Devices integration."""
from __future__ import annotations
from dataclasses import dataclass
from datetime import datetime
from typing import TYPE_CHECKING
from pypetkitapi import (
D4S,
D4SH,
T4,
T5,
T6,
T7,
Feeder,
Litter,
Pet,
Purifier,
WaterFountain,
)
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
BinarySensorEntityDescription,
)
from homeassistant.const import EntityCategory
from . import LOGGER
from .entity import PetKitDescSensorBase, PetkitEntity
if TYPE_CHECKING:
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .coordinator import PetkitDataUpdateCoordinator
from .data import PetkitConfigEntry, PetkitDevices
@dataclass(frozen=True, kw_only=True)
class PetKitBinarySensorDesc(PetKitDescSensorBase, BinarySensorEntityDescription):
"""A class that describes sensor entities."""
enable_fast_poll: bool = False
COMMON_ENTITIES = [
PetKitBinarySensorDesc(
key="Camera status",
translation_key="camera_status",
value=lambda device: device.state.camera_status,
),
PetKitBinarySensorDesc(
key="Care plus subscription",
translation_key="care_plus_subscription",
entity_category=EntityCategory.DIAGNOSTIC,
value=lambda device: (
isinstance(device.cloud_product.work_indate, (int, float))
and datetime.fromtimestamp(device.cloud_product.work_indate)
> datetime.now()
),
),
]
BINARY_SENSOR_MAPPING: dict[type[PetkitDevices], list[PetKitBinarySensorDesc]] = {
Feeder: [
*COMMON_ENTITIES,
PetKitBinarySensorDesc(
key="Feeding",
translation_key="feeding",
device_class=BinarySensorDeviceClass.RUNNING,
value=lambda device: device.state.feeding,
enable_fast_poll=True,
),
PetKitBinarySensorDesc(
key="Battery installed",
translation_key="battery_installed",
entity_category=EntityCategory.DIAGNOSTIC,
value=lambda device: device.state.battery_power,
),
PetKitBinarySensorDesc(
key="Eating",
translation_key="eating",
device_class=BinarySensorDeviceClass.OCCUPANCY,
value=lambda device: device.state.eating,
enable_fast_poll=True,
),
PetKitBinarySensorDesc(
key="Food level",
translation_key="food_level",
device_class=BinarySensorDeviceClass.PROBLEM,
value=lambda device: device.state.food == 0,
ignore_types=[D4S, D4SH],
),
PetKitBinarySensorDesc(
key="Food level 1",
translation_key="food_level_1",
device_class=BinarySensorDeviceClass.PROBLEM,
value=lambda device: device.state.food1 == 0,
only_for_types=[D4S, D4SH],
),
PetKitBinarySensorDesc(
key="Food level 2",
translation_key="food_level_2",
device_class=BinarySensorDeviceClass.PROBLEM,
value=lambda device: device.state.food2 == 0,
only_for_types=[D4S, D4SH],
),
],
Litter: [
*COMMON_ENTITIES,
PetKitBinarySensorDesc(
key="Sand lack",
translation_key="sand_lack",
device_class=BinarySensorDeviceClass.PROBLEM,
value=lambda device: device.state.sand_lack,
),
PetKitBinarySensorDesc(
key="Low power",
translation_key="low_power",
entity_category=EntityCategory.DIAGNOSTIC,
device_class=BinarySensorDeviceClass.PROBLEM,
value=lambda device: device.state.low_power,
),
PetKitBinarySensorDesc(
key="Waste bin",
translation_key="waste_bin",
device_class=BinarySensorDeviceClass.PROBLEM,
value=lambda device: device.state.box_full,
),
PetKitBinarySensorDesc(
key="Waste bin presence",
translation_key="waste_bin_presence",
device_class=BinarySensorDeviceClass.PROBLEM,
value=lambda device: not device.state.box_state,
only_for_types=[T4, T5],
),
PetKitBinarySensorDesc(
key="Waste bin presence",
translation_key="waste_bin_presence",
device_class=BinarySensorDeviceClass.PROBLEM,
value=lambda device: device.state.box_store_state,
only_for_types=[T6],
),
PetKitBinarySensorDesc(
key="Toilet occupied",
translation_key="toilet_occupied",
device_class=BinarySensorDeviceClass.OCCUPANCY,
value=lambda device: bool(device.state.pet_in_time),
enable_fast_poll=True,
),
PetKitBinarySensorDesc(
key="Frequent use",
translation_key="frequent_use",
device_class=BinarySensorDeviceClass.PROBLEM,
value=lambda device: bool(device.state.frequent_restroom),
),
PetKitBinarySensorDesc(
key="Deodorization",
translation_key="deodorization_running",
device_class=BinarySensorDeviceClass.RUNNING,
value=lambda device: device.state.refresh_state is not None,
force_add=[T5],
ignore_types=[T4, T6], # Not sure for T3 ?
),
PetKitBinarySensorDesc(
key="N60 deodorizer presence",
translation_key="n60_deodorize_presence",
device_class=BinarySensorDeviceClass.PROBLEM,
value=lambda device: (
None
if device.state.spray_state is None
else device.state.spray_state == 0
),
),
PetKitBinarySensorDesc(
key="Weight error",
translation_key="weight_error",
entity_category=EntityCategory.DIAGNOSTIC,
device_class=BinarySensorDeviceClass.PROBLEM,
value=lambda device: device.state.pet_error,
ignore_types=[T7],
),
PetKitBinarySensorDesc(
key="Pet error",
translation_key="pet_error",
entity_category=EntityCategory.DIAGNOSTIC,
device_class=BinarySensorDeviceClass.PROBLEM,
value=lambda device: device.state.pet_error,
only_for_types=[T7],
),
],
WaterFountain: [
*COMMON_ENTITIES,
PetKitBinarySensorDesc(
key="Lack warning",
translation_key="lack_warning",
device_class=BinarySensorDeviceClass.PROBLEM,
value=lambda device: device.lack_warning,
),
PetKitBinarySensorDesc(
key="Low battery",
translation_key="low_battery",
device_class=BinarySensorDeviceClass.PROBLEM,
entity_category=EntityCategory.DIAGNOSTIC,
value=lambda device: device.low_battery,
),
PetKitBinarySensorDesc(
key="Replace filter",
translation_key="replace_filter",
device_class=BinarySensorDeviceClass.PROBLEM,
value=lambda device: device.filter_warning,
),
PetKitBinarySensorDesc(
key="On ac power",
translation_key="on_ac_power",
device_class=BinarySensorDeviceClass.POWER,
entity_category=EntityCategory.DIAGNOSTIC,
value=lambda device: (
None
if device.status.electric_status is None
else device.status.electric_status > 0
),
),
PetKitBinarySensorDesc(
key="Do not disturb state",
translation_key="do_not_disturb_state",
value=lambda device: device.is_night_no_disturbing,
),
PetKitBinarySensorDesc(
key="Pet detected",
translation_key="pet_detected",
device_class=BinarySensorDeviceClass.OCCUPANCY,
value=lambda device: (
None
if device.status.detect_status is None
else device.status.detect_status > 0
),
),
PetKitBinarySensorDesc(
key="Pump running",
translation_key="pump_running",
device_class=BinarySensorDeviceClass.RUNNING,
value=lambda device: (
None
if device.status.run_status is None
else device.status.run_status > 0
),
),
],
Purifier: [
*COMMON_ENTITIES,
PetKitBinarySensorDesc(
key="Light",
translation_key="light",
device_class=BinarySensorDeviceClass.POWER,
value=lambda device: None if device.lighting == -1 else device.lighting,
),
PetKitBinarySensorDesc(
key="Spray",
translation_key="spray",
device_class=BinarySensorDeviceClass.RUNNING,
value=lambda device: None if device.refreshing == -1 else device.refreshing,
),
PetKitBinarySensorDesc(
key="Liquid lack",
translation_key="liquid_lack",
device_class=BinarySensorDeviceClass.PROBLEM,
value=lambda device: (
None if device.liquid_lack is None else device.liquid_lack == 0
),
),
],
Pet: [
*COMMON_ENTITIES,
PetKitBinarySensorDesc(
key="Yowling detected",
translation_key="yowling_detected",
entity_picture=lambda pet: pet.avatar,
device_class=BinarySensorDeviceClass.SOUND,
value=lambda pet: (
None if pet.yowling_detected is None else pet.yowling_detected == 1
),
),
PetKitBinarySensorDesc(
key="Abnormal urine Ph detected",
translation_key="abnormal_ph_detected",
entity_picture=lambda pet: pet.avatar,
device_class=BinarySensorDeviceClass.PROBLEM,
value=lambda pet: (
None
if pet.abnormal_ph_detected is None
else pet.abnormal_ph_detected == 1
),
),
PetKitBinarySensorDesc(
key="Soft stool detected",
translation_key="soft_stool_detected",
entity_picture=lambda pet: pet.avatar,
device_class=BinarySensorDeviceClass.PROBLEM,
value=lambda pet: (
None
if pet.soft_stool_detected is None
else pet.soft_stool_detected == 1
),
),
],
}
async def async_setup_entry(
hass: HomeAssistant,
entry: PetkitConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up binary_sensors using config entry."""
devices = entry.runtime_data.client.petkit_entities.values()
entities = [
PetkitBinarySensor(
coordinator=entry.runtime_data.coordinator,
entity_description=entity_description,
device=device,
)
for device in devices
for device_type, entity_descriptions in BINARY_SENSOR_MAPPING.items()
if isinstance(device, device_type)
for entity_description in entity_descriptions
if entity_description.is_supported(device) # Check if the entity is supported
]
LOGGER.debug(
"BINARY_SENSOR : Adding %s (on %s available)",
len(entities),
sum(len(descriptors) for descriptors in BINARY_SENSOR_MAPPING.values()),
)
async_add_entities(entities)
class PetkitBinarySensor(PetkitEntity, BinarySensorEntity):
"""Petkit Smart Devices BinarySensor class."""
entity_description: PetKitBinarySensorDesc
def __init__(
self,
coordinator: PetkitDataUpdateCoordinator,
entity_description: PetKitBinarySensorDesc,
device: Feeder | Litter | WaterFountain,
) -> None:
"""Initialize the binary_sensor class."""
super().__init__(coordinator, device)
self.coordinator = coordinator
self.entity_description = entity_description
self.device = device
@property
def entity_picture(self) -> str | None:
"""Grab associated pet picture."""
if self.entity_description.entity_picture:
return self.entity_description.entity_picture(self.device)
return None
@property
def is_on(self) -> bool | None:
"""Return the state of the binary sensor."""
device_data = self.coordinator.data.get(self.device.id)
if device_data:
value = self.entity_description.value(device_data)
if (
self.entity_description.enable_fast_poll
and value
and self.coordinator.fast_poll_tic < 1
):
self.coordinator.enable_smart_polling(24)
return value
return None
| python | MIT | 362911ceac878aec5f56d82efa9b0515e2028355 | 2026-01-05T07:12:41.716988Z | false |
Jezza34000/homeassistant_petkit | https://github.com/Jezza34000/homeassistant_petkit/blob/362911ceac878aec5f56d82efa9b0515e2028355/custom_components/petkit/const.py | custom_components/petkit/const.py | """Constants for Petkit Smart Devices integration."""
from logging import Logger, getLogger
LOGGER: Logger = getLogger(__package__)
DOMAIN = "petkit"
COORDINATOR = "coordinator"
COORDINATOR_MEDIA = "coordinator_media"
COORDINATOR_BLUETOOTH = "coordinator_bluetooth"
# Configuration
CONF_SCAN_INTERVAL_MEDIA = "scan_interval_media"
CONF_SMART_POLLING = "smart_polling"
BT_SECTION = "bluetooth_options"
CONF_BLE_RELAY_ENABLED = "ble_relay_enabled"
CONF_SCAN_INTERVAL_BLUETOOTH = "scan_interval_bluetooth"
MEDIA_SECTION = "medias_options"
CONF_MEDIA_DL_VIDEO = "media_dl_video"
CONF_MEDIA_DL_IMAGE = "media_dl_image"
CONF_MEDIA_EV_TYPE = "media_ev_type"
CONF_DELETE_AFTER = "delete_media_after"
CONF_MEDIA_PATH = "media_path"
# Default configuration values
DEFAULT_SCAN_INTERVAL = 60
DEFAULT_SCAN_INTERVAL_MEDIA = 15
DEFAULT_SCAN_INTERVAL_BLUETOOTH = 30
DEFAULT_EVENTS = [
"Pet",
"Eat",
"Feed",
"Toileting",
"Dish_before",
"Dish_after",
"Waste_check",
]
DEFAULT_DL_VIDEO = False
DEFAULT_DL_IMAGE = True
DEFAULT_SMART_POLLING = True
DEFAULT_BLUETOOTH_RELAY = True
DEFAULT_DELETE_AFTER = 3
DEFAULT_MEDIA_PATH = "/media"
MEDIA_ROOT = "/media/local"
# Update interval
MAX_SCAN_INTERVAL = 120
MIN_SCAN_INTERVAL = 5
# Petkit devices types to name translation
PETKIT_DEVICES_MAPPING = {
"0k2": "Air Magicube",
"0k3": "Air Smart Spray",
"0d3": "Fresh Element Infinity",
"0d4": "Fresh Element Solo",
"0d4s": "Fresh Element Gemini",
"0d4h": "YumShare Solo",
"0d4sh": "YumShare Dual Hopper",
"0feeder": "Fresh Element",
"0feedermini": "Fresh Element Mini Pro",
"0t3": "Pura X",
"0t4": "Puramax",
"1t4": "Puramax",
"2t4": "Puramax 2",
"0t5": "Purobot Max",
"1t5": "Purobot Max",
"0t6": "Purobot Ultra",
"1t6": "Purobot Ultra",
"0t7": "Purobot Crystal Duo",
"2w5": "Eversweet 5 Mini",
"4w5": "Eversweet 3 Pro",
"5w5": "Eversweet Solo 2",
"6w5": "Eversweet 3 Pro (UVC Version)",
"0ctw3": "Eversweet Max",
"0pet": "Pet",
}
# Messages constants
NO_ERROR = "No error"
# Status mapping
POWER_ONLINE_STATE = [1, 2]
DEVICE_STATUS_MAP = {
0: "Offline",
1: "Online",
2: "On battery",
}
BATTERY_LEVEL_MAP = {
"0": "Low",
"1": "Normal",
}
# Text input regex
INPUT_FEED_PATTERN = "^(0|[1-9][0-9]?|1[0-9]{2}|200)$"
# Select list mapping
SURPLUS_FOOD_LEVEL_OPT = {1: "Less", 2: "Moderate", 3: "Full"}
IA_DETECTION_SENSITIVITY_OPT = {1: "Low", 2: "Mid", 3: "High"}
LITTER_TYPE_OPT = {1: "Bentonite", 2: "Tofu", 3: "Mixed"}
CLEANING_INTERVAL_OPT = {
0: "Disabled",
300: "5min",
600: "10min",
900: "15min",
1800: "30min",
2700: "45min",
3600: "1h",
4500: "1h15min",
5400: "1h30min",
6300: "1h45min",
7200: "2h",
}
MANUAL_FEED_OPT = {
0: "",
5: "5g",
10: "10g",
15: "15g",
20: "20g",
25: "25g",
30: "30g",
35: "35g",
40: "40g",
45: "45g",
50: "50g",
}
FOUNTAIN_WORKING_MODE_CTW3 = {
1: "Standard",
2: "Intermittent",
3: "Battery",
}
FOUNTAIN_WORKING_MODE = {
1: "Normal",
2: "Smart",
}
LED_BRIGHTNESS = {
1: "Low",
2: "Normal",
3: "High",
}
# Fan mode
PURIFIER_MODE = {0: "Auto", 1: "Silent", 2: "Standard", 3: "Strong"}
# Litter Event
EVENT_MAPPING = {
5: {
0: {
0: "auto_cleaning_completed",
1: "periodic_cleaning_completed",
2: "manual_cleaning_completed",
3: "manual_cleaning_completed",
},
1: {
0: "auto_cleaning_terminated",
1: "periodic_cleaning_terminated",
2: "manual_cleaning_terminated",
3: "manual_cleaning_terminated",
},
2: {
0: {
"full": "auto_cleaning_failed_full",
"hallL": "auto_cleaning_failed_hall_l",
"hallT": "auto_cleaning_failed_hall_t",
},
1: {
"full": "scheduled_cleaning_failed_full",
"hallL": "scheduled_cleaning_failed_hall_l",
"hallT": "scheduled_cleaning_failed_hall_t",
},
2: {
"full": "manual_cleaning_failed_full",
"hallL": "manual_cleaning_failed_hall_l",
"hallT": "manual_cleaning_failed_hall_t",
},
3: {
"full": "manual_cleaning_failed_full",
"hallL": "manual_cleaning_failed_hall_l",
"hallT": "manual_cleaning_failed_hall_t",
},
},
3: {
0: "auto_cleaning_canceled",
1: "periodic_cleaning_canceled",
2: "manual_cleaning_canceled",
3: "manual_cleaning_canceled",
},
4: {
0: "auto_cleaning_canceled_kitten",
1: "periodic_cleaning_canceled_kitten",
},
},
6: {
0: "litter_empty_completed",
1: "litter_empty_terminated",
2: {
"full": "litter_empty_failed_full",
"hallL": "litter_empty_failed_hall_l",
"hallT": "litter_empty_failed_hall_t",
},
},
7: {
0: "reset_completed",
1: "reset_terminated",
2: {
"full": "reset_failed_full",
"hallL": "reset_failed_hall_l",
"hallT": "reset_failed_hall_t",
},
},
8: {
0: {
0: "deodorant_finished",
1: "periodic_odor_completed",
2: "manual_odor_completed",
3: "manual_odor_completed",
},
1: {
0: "deodorant_finished_liquid_lack",
1: "periodic_odor_completed_liquid_lack",
2: "manual_odor_completed_liquid_lack",
3: "manual_odor_completed_liquid_lack",
},
2: {
0: "auto_odor_failed",
1: "periodic_odor_failed",
2: "manual_odor_failed",
3: "manual_odor_failed",
},
},
}
# Country code mapping / Timezone
CODE_TO_COUNTRY_DICT = {
"AD": "Andorra",
"AE": "United Arab Emirates",
"AF": "Afghanistan",
"AG": "Antigua and Barbuda",
"AI": "Anguilla",
"AL": "Albania",
"AM": "Armenia",
"AO": "Angola",
"AQ": "Antarctica",
"AR": "Argentina",
"AS": "American Samoa",
"AT": "Austria",
"AU": "Australia",
"AW": "Aruba",
"AX": "Åland Islands",
"AZ": "Azerbaijan",
"BA": "Bosnia and Herzegovina",
"BB": "Barbados",
"BD": "Bangladesh",
"BE": "Belgium",
"BF": "Burkina Faso",
"BG": "Bulgaria",
"BH": "Bahrain",
"BI": "Burundi",
"BJ": "Benin",
"BL": "Saint Barthélemy",
"BM": "Bermuda",
"BN": "Brunei",
"BO": "Bolivia",
"BQ": "Caribbean Netherlands",
"BR": "Brazil",
"BS": "Bahamas",
"BT": "Bhutan",
"BV": "Bouvet Island",
"BW": "Botswana",
"BY": "Belarus",
"BZ": "Belize",
"CA": "Canada",
"CC": "Cocos Islands",
"CD": "DR Congo",
"CF": "Central African Republic",
"CG": "Republic of the Congo",
"CH": "Switzerland",
"CI": "Côte d'Ivoire",
"CK": "Cook Islands",
"CL": "Chile",
"CM": "Cameroon",
"CN": "China",
"CO": "Colombia",
"CR": "Costa Rica",
"CU": "Cuba",
"CV": "Cape Verde",
"CW": "Curaçao",
"CX": "Christmas Island",
"CY": "Cyprus",
"CZ": "Czech Republic",
"DE": "Germany",
"DJ": "Djibouti",
"DK": "Denmark",
"DM": "Dominica",
"DO": "Dominican Republic",
"DZ": "Algeria",
"EC": "Ecuador",
"EE": "Estonia",
"EG": "Egypt",
"EH": "Western Sahara",
"ER": "Eritrea",
"ES": "Spain",
"ET": "Ethiopia",
"FI": "Finland",
"FJ": "Fiji",
"FK": "Falkland Islands",
"FM": "Micronesia",
"FO": "Faroe Islands",
"FR": "France",
"GA": "Gabon",
"GB": "United Kingdom",
"GD": "Grenada",
"GE": "Georgia",
"GF": "French Guiana",
"GG": "Guernsey",
"GH": "Ghana",
"GI": "Gibraltar",
"GL": "Greenland",
"GM": "Gambia",
"GN": "Guinea",
"GP": "Guadeloupe",
"GQ": "Equatorial Guinea",
"GR": "Greece",
"GS": "South Georgia",
"GT": "Guatemala",
"GU": "Guam",
"GW": "Guinea-Bissau",
"GY": "Guyana",
"HK": "Hong Kong",
"HM": "Heard Island",
"HN": "Honduras",
"HR": "Croatia",
"HT": "Haiti",
"HU": "Hungary",
"ID": "Indonesia",
"IE": "Ireland",
"IL": "Israel",
"IM": "Isle of Man",
"IN": "India",
"IO": "British Indian Ocean Territory",
"IQ": "Iraq",
"IR": "Iran",
"IS": "Iceland",
"IT": "Italy",
"JE": "Jersey",
"JM": "Jamaica",
"JO": "Jordan",
"JP": "Japan",
"KE": "Kenya",
"KG": "Kyrgyzstan",
"KH": "Cambodia",
"KI": "Kiribati",
"KM": "Comoros",
"KN": "Saint Kitts and Nevis",
"KP": "North Korea",
"KR": "South Korea",
"KW": "Kuwait",
"KY": "Cayman Islands",
"KZ": "Kazakhstan",
"LA": "Laos",
"LB": "Lebanon",
"LC": "Saint Lucia",
"LI": "Liechtenstein",
"LK": "Sri Lanka",
"LR": "Liberia",
"LS": "Lesotho",
"LT": "Lithuania",
"LU": "Luxembourg",
"LV": "Latvia",
"LY": "Libya",
"MA": "Morocco",
"MC": "Monaco",
"MD": "Moldova",
"ME": "Montenegro",
"MF": "Saint Martin",
"MG": "Madagascar",
"MH": "Marshall Islands",
"MK": "North Macedonia",
"ML": "Mali",
"MM": "Myanmar",
"MN": "Mongolia",
"MO": "Macao",
"MP": "Northern Mariana Islands",
"MQ": "Martinique",
"MR": "Mauritania",
"MS": "Montserrat",
"MT": "Malta",
"MU": "Mauritius",
"MV": "Maldives",
"MW": "Malawi",
"MX": "Mexico",
"MY": "Malaysia",
"MZ": "Mozambique",
"NA": "Namibia",
"NC": "New Caledonia",
"NE": "Niger",
"NF": "Norfolk Island",
"NG": "Nigeria",
"NI": "Nicaragua",
"NL": "Netherlands",
"NO": "Norway",
"NP": "Nepal",
"NR": "Nauru",
"NU": "Niue",
"NZ": "New Zealand",
"OM": "Oman",
"PA": "Panama",
"PE": "Peru",
"PF": "French Polynesia",
"PG": "Papua New Guinea",
"PH": "Philippines",
"PK": "Pakistan",
"PL": "Poland",
"PM": "Saint Pierre and Miquelon",
"PN": "Pitcairn",
"PR": "Puerto Rico",
"PS": "Palestine",
"PT": "Portugal",
"PW": "Palau",
"PY": "Paraguay",
"QA": "Qatar",
"RE": "Réunion",
"RO": "Romania",
"RS": "Serbia",
"RU": "Russia",
"RW": "Rwanda",
"SA": "Saudi Arabia",
"SB": "Solomon Islands",
"SC": "Seychelles",
"SD": "Sudan",
"SE": "Sweden",
"SG": "Singapore",
"SH": "Saint Helena",
"SI": "Slovenia",
"SJ": "Svalbard and Jan Mayen",
"SK": "Slovakia",
"SL": "Sierra Leone",
"SM": "San Marino",
"SN": "Senegal",
"SO": "Somalia",
"SR": "Suriname",
"SS": "South Sudan",
"ST": "São Tomé and Príncipe",
"SV": "El Salvador",
"SX": "Sint Maarten",
"SY": "Syria",
"SZ": "Eswatini",
"TC": "Turks and Caicos Islands",
"TD": "Chad",
"TF": "French Southern Territories",
"TG": "Togo",
"TH": "Thailand",
"TJ": "Tajikistan",
"TK": "Tokelau",
"TL": "Timor-Leste",
"TM": "Turkmenistan",
"TN": "Tunisia",
"TO": "Tonga",
"TR": "Turkey",
"TT": "Trinidad and Tobago",
"TV": "Tuvalu",
"TW": "Taiwan",
"TZ": "Tanzania",
"UA": "Ukraine",
"UG": "Uganda",
"UM": "U.S. Minor Outlying Islands",
"US": "United States",
"UY": "Uruguay",
"UZ": "Uzbekistan",
"VA": "Vatican City",
"VC": "Saint Vincent and the Grenadines",
"VE": "Venezuela",
"VG": "British Virgin Islands",
"VI": "Virgin Islands",
"VN": "Vietnam",
"VU": "Vanuatu",
"WF": "Wallis and Futuna",
"WS": "Samoa",
"YE": "Yemen",
"YT": "Mayotte",
"ZA": "South Africa",
"ZM": "Zambia",
"ZW": "Zimbabwe",
}
COUNTRY_TO_CODE_DICT = {v: k for k, v in CODE_TO_COUNTRY_DICT.items()}
ALL_TIMEZONES_LST = [
"Africa/Abidjan",
"Africa/Accra",
"Africa/Addis_Ababa",
"Africa/Algiers",
"Africa/Asmara",
"Africa/Asmera",
"Africa/Bamako",
"Africa/Bangui",
"Africa/Banjul",
"Africa/Bissau",
"Africa/Blantyre",
"Africa/Brazzaville",
"Africa/Bujumbura",
"Africa/Cairo",
"Africa/Casablanca",
"Africa/Ceuta",
"Africa/Conakry",
"Africa/Dakar",
"Africa/Dar_es_Salaam",
"Africa/Djibouti",
"Africa/Douala",
"Africa/El_Aaiun",
"Africa/Freetown",
"Africa/Gaborone",
"Africa/Harare",
"Africa/Johannesburg",
"Africa/Juba",
"Africa/Kampala",
"Africa/Khartoum",
"Africa/Kigali",
"Africa/Kinshasa",
"Africa/Lagos",
"Africa/Libreville",
"Africa/Lome",
"Africa/Luanda",
"Africa/Lubumbashi",
"Africa/Lusaka",
"Africa/Malabo",
"Africa/Maputo",
"Africa/Maseru",
"Africa/Mbabane",
"Africa/Mogadishu",
"Africa/Monrovia",
"Africa/Nairobi",
"Africa/Ndjamena",
"Africa/Niamey",
"Africa/Nouakchott",
"Africa/Ouagadougou",
"Africa/Porto-Novo",
"Africa/Sao_Tome",
"Africa/Timbuktu",
"Africa/Tripoli",
"Africa/Tunis",
"Africa/Windhoek",
"America/Adak",
"America/Anchorage",
"America/Anguilla",
"America/Antigua",
"America/Araguaina",
"America/Argentina/Buenos_Aires",
"America/Argentina/Catamarca",
"America/Argentina/ComodRivadavia",
"America/Argentina/Cordoba",
"America/Argentina/Jujuy",
"America/Argentina/La_Rioja",
"America/Argentina/Mendoza",
"America/Argentina/Rio_Gallegos",
"America/Argentina/Salta",
"America/Argentina/San_Juan",
"America/Argentina/San_Luis",
"America/Argentina/Tucuman",
"America/Argentina/Ushuaia",
"America/Aruba",
"America/Asuncion",
"America/Atikokan",
"America/Atka",
"America/Bahia",
"America/Bahia_Banderas",
"America/Barbados",
"America/Belem",
"America/Belize",
"America/Blanc-Sablon",
"America/Boa_Vista",
"America/Bogota",
"America/Boise",
"America/Buenos_Aires",
"America/Cambridge_Bay",
"America/Campo_Grande",
"America/Cancun",
"America/Caracas",
"America/Catamarca",
"America/Cayenne",
"America/Cayman",
"America/Chicago",
"America/Chihuahua",
"America/Coral_Harbour",
"America/Cordoba",
"America/Costa_Rica",
"America/Creston",
"America/Cuiaba",
"America/Curacao",
"America/Danmarkshavn",
"America/Dawson",
"America/Dawson_Creek",
"America/Denver",
"America/Detroit",
"America/Dominica",
"America/Edmonton",
"America/Eirunepe",
"America/El_Salvador",
"America/Ensenada",
"America/Fort_Nelson",
"America/Fort_Wayne",
"America/Fortaleza",
"America/Glace_Bay",
"America/Godthab",
"America/Goose_Bay",
"America/Grand_Turk",
"America/Grenada",
"America/Guadeloupe",
"America/Guatemala",
"America/Guayaquil",
"America/Guyana",
"America/Halifax",
"America/Havana",
"America/Hermosillo",
"America/Indiana/Indianapolis",
"America/Indiana/Knox",
"America/Indiana/Marengo",
"America/Indiana/Petersburg",
"America/Indiana/Tell_City",
"America/Indiana/Vevay",
"America/Indiana/Vincennes",
"America/Indiana/Winamac",
"America/Indianapolis",
"America/Inuvik",
"America/Iqaluit",
"America/Jamaica",
"America/Jujuy",
"America/Juneau",
"America/Kentucky/Louisville",
"America/Kentucky/Monticello",
"America/Knox_IN",
"America/Kralendijk",
"America/La_Paz",
"America/Lima",
"America/Los_Angeles",
"America/Louisville",
"America/Lower_Princes",
"America/Maceio",
"America/Managua",
"America/Manaus",
"America/Marigot",
"America/Martinique",
"America/Matamoros",
"America/Mazatlan",
"America/Mendoza",
"America/Menominee",
"America/Merida",
"America/Metlakatla",
"America/Mexico_City",
"America/Miquelon",
"America/Moncton",
"America/Monterrey",
"America/Montevideo",
"America/Montreal",
"America/Montserrat",
"America/Nassau",
"America/New_York",
"America/Nipigon",
"America/Nome",
"America/Noronha",
"America/North_Dakota/Beulah",
"America/North_Dakota/Center",
"America/North_Dakota/New_Salem",
"America/Nuuk",
"America/Ojinaga",
"America/Panama",
"America/Pangnirtung",
"America/Paramaribo",
"America/Phoenix",
"America/Port-au-Prince",
"America/Port_of_Spain",
"America/Porto_Acre",
"America/Porto_Velho",
"America/Puerto_Rico",
"America/Punta_Arenas",
"America/Rainy_River",
"America/Rankin_Inlet",
"America/Recife",
"America/Regina",
"America/Resolute",
"America/Rio_Branco",
"America/Rosario",
"America/Santa_Isabel",
"America/Santarem",
"America/Santiago",
"America/Santo_Domingo",
"America/Sao_Paulo",
"America/Scoresbysund",
"America/Shiprock",
"America/Sitka",
"America/St_Barthelemy",
"America/St_Johns",
"America/St_Kitts",
"America/St_Lucia",
"America/St_Thomas",
"America/St_Vincent",
"America/Swift_Current",
"America/Tegucigalpa",
"America/Thule",
"America/Thunder_Bay",
"America/Tijuana",
"America/Toronto",
"America/Tortola",
"America/Vancouver",
"America/Virgin",
"America/Whitehorse",
"America/Winnipeg",
"America/Yakutat",
"America/Yellowknife",
"Antarctica/Casey",
"Antarctica/Davis",
"Antarctica/DumontDUrville",
"Antarctica/Macquarie",
"Antarctica/Mawson",
"Antarctica/McMurdo",
"Antarctica/Palmer",
"Antarctica/Rothera",
"Antarctica/South_Pole",
"Antarctica/Syowa",
"Antarctica/Troll",
"Antarctica/Vostok",
"Arctic/Longyearbyen",
"Asia/Aden",
"Asia/Almaty",
"Asia/Amman",
"Asia/Anadyr",
"Asia/Aqtau",
"Asia/Aqtobe",
"Asia/Ashgabat",
"Asia/Ashkhabad",
"Asia/Atyrau",
"Asia/Baghdad",
"Asia/Bahrain",
"Asia/Baku",
"Asia/Bangkok",
"Asia/Barnaul",
"Asia/Beirut",
"Asia/Bishkek",
"Asia/Brunei",
"Asia/Calcutta",
"Asia/Chita",
"Asia/Choibalsan",
"Asia/Chongqing",
"Asia/Chungking",
"Asia/Colombo",
"Asia/Dacca",
"Asia/Damascus",
"Asia/Dhaka",
"Asia/Dili",
"Asia/Dubai",
"Asia/Dushanbe",
"Asia/Famagusta",
"Asia/Gaza",
"Asia/Harbin",
"Asia/Hebron",
"Asia/Ho_Chi_Minh",
"Asia/Hong_Kong",
"Asia/Hovd",
"Asia/Irkutsk",
"Asia/Istanbul",
"Asia/Jakarta",
"Asia/Jayapura",
"Asia/Jerusalem",
"Asia/Kabul",
"Asia/Kamchatka",
"Asia/Karachi",
"Asia/Kashgar",
"Asia/Kathmandu",
"Asia/Katmandu",
"Asia/Khandyga",
"Asia/Kolkata",
"Asia/Krasnoyarsk",
"Asia/Kuala_Lumpur",
"Asia/Kuching",
"Asia/Kuwait",
"Asia/Macao",
"Asia/Macau",
"Asia/Magadan",
"Asia/Makassar",
"Asia/Manila",
"Asia/Muscat",
"Asia/Nicosia",
"Asia/Novokuznetsk",
"Asia/Novosibirsk",
"Asia/Omsk",
"Asia/Oral",
"Asia/Phnom_Penh",
"Asia/Pontianak",
"Asia/Pyongyang",
"Asia/Qatar",
"Asia/Qostanay",
"Asia/Qyzylorda",
"Asia/Rangoon",
"Asia/Riyadh",
"Asia/Saigon",
"Asia/Sakhalin",
"Asia/Samarkand",
"Asia/Seoul",
"Asia/Shanghai",
"Asia/Singapore",
"Asia/Srednekolymsk",
"Asia/Taipei",
"Asia/Tashkent",
"Asia/Tbilisi",
"Asia/Tehran",
"Asia/Tel_Aviv",
"Asia/Thimbu",
"Asia/Thimphu",
"Asia/Tokyo",
"Asia/Tomsk",
"Asia/Ujung_Pandang",
"Asia/Ulaanbaatar",
"Asia/Ulan_Bator",
"Asia/Urumqi",
"Asia/Ust-Nera",
"Asia/Vientiane",
"Asia/Vladivostok",
"Asia/Yakutsk",
"Asia/Yangon",
"Asia/Yekaterinburg",
"Asia/Yerevan",
"Atlantic/Azores",
"Atlantic/Bermuda",
"Atlantic/Canary",
"Atlantic/Cape_Verde",
"Atlantic/Faeroe",
"Atlantic/Faroe",
"Atlantic/Jan_Mayen",
"Atlantic/Madeira",
"Atlantic/Reykjavik",
"Atlantic/South_Georgia",
"Atlantic/St_Helena",
"Atlantic/Stanley",
"Australia/ACT",
"Australia/Adelaide",
"Australia/Brisbane",
"Australia/Broken_Hill",
"Australia/Canberra",
"Australia/Currie",
"Australia/Darwin",
"Australia/Eucla",
"Australia/Hobart",
"Australia/LHI",
"Australia/Lindeman",
"Australia/Lord_Howe",
"Australia/Melbourne",
"Australia/NSW",
"Australia/North",
"Australia/Perth",
"Australia/Queensland",
"Australia/South",
"Australia/Sydney",
"Australia/Tasmania",
"Australia/Victoria",
"Australia/West",
"Australia/Yancowinna",
"Brazil/Acre",
"Brazil/DeNoronha",
"Brazil/East",
"Brazil/West",
"CET",
"CST6CDT",
"Canada/Atlantic",
"Canada/Central",
"Canada/Eastern",
"Canada/Mountain",
"Canada/Newfoundland",
"Canada/Pacific",
"Canada/Saskatchewan",
"Canada/Yukon",
"Chile/Continental",
"Chile/EasterIsland",
"Cuba",
"EET",
"EST5EDT",
"Egypt",
"Eire",
"Etc/GMT",
"Etc/GMT+0",
"Etc/GMT+1",
"Etc/GMT+10",
"Etc/GMT+11",
"Etc/GMT+12",
"Etc/GMT+2",
"Etc/GMT+3",
"Etc/GMT+4",
"Etc/GMT+5",
"Etc/GMT+6",
"Etc/GMT+7",
"Etc/GMT+8",
"Etc/GMT+9",
"Etc/GMT-0",
"Etc/GMT-1",
"Etc/GMT-10",
"Etc/GMT-11",
"Etc/GMT-12",
"Etc/GMT-13",
"Etc/GMT-14",
"Etc/GMT-2",
"Etc/GMT-3",
"Etc/GMT-4",
"Etc/GMT-5",
"Etc/GMT-6",
"Etc/GMT-7",
"Etc/GMT-8",
"Etc/GMT-9",
"Etc/GMT0",
"Etc/Greenwich",
"Etc/UCT",
"Etc/UTC",
"Etc/Universal",
"Etc/Zulu",
"Europe/Amsterdam",
"Europe/Andorra",
"Europe/Astrakhan",
"Europe/Athens",
"Europe/Belfast",
"Europe/Belgrade",
"Europe/Berlin",
"Europe/Bratislava",
"Europe/Brussels",
"Europe/Bucharest",
"Europe/Budapest",
"Europe/Busingen",
"Europe/Chisinau",
"Europe/Copenhagen",
"Europe/Dublin",
"Europe/Gibraltar",
"Europe/Guernsey",
"Europe/Helsinki",
"Europe/Isle_of_Man",
"Europe/Istanbul",
"Europe/Jersey",
"Europe/Kaliningrad",
"Europe/Kiev",
"Europe/Kirov",
"Europe/Kyiv",
"Europe/Lisbon",
"Europe/Ljubljana",
"Europe/London",
"Europe/Luxembourg",
"Europe/Madrid",
"Europe/Malta",
"Europe/Mariehamn",
"Europe/Minsk",
"Europe/Monaco",
"Europe/Moscow",
"Europe/Nicosia",
"Europe/Oslo",
"Europe/Paris",
"Europe/Podgorica",
"Europe/Prague",
"Europe/Riga",
"Europe/Rome",
"Europe/Samara",
"Europe/San_Marino",
"Europe/Sarajevo",
"Europe/Saratov",
"Europe/Simferopol",
"Europe/Skopje",
"Europe/Sofia",
"Europe/Stockholm",
"Europe/Tallinn",
"Europe/Tirane",
"Europe/Tiraspol",
"Europe/Ulyanovsk",
"Europe/Uzhgorod",
"Europe/Vaduz",
"Europe/Vatican",
"Europe/Vienna",
"Europe/Vilnius",
"Europe/Volgograd",
"Europe/Warsaw",
"Europe/Zagreb",
"Europe/Zaporozhye",
"Europe/Zurich",
"GB",
"GB-Eire",
"GMT",
"GMT0",
"Greenwich",
"Hongkong",
"Iceland",
"Indian/Antananarivo",
"Indian/Chagos",
"Indian/Christmas",
"Indian/Cocos",
"Indian/Comoro",
"Indian/Kerguelen",
"Indian/Mahe",
"Indian/Maldives",
"Indian/Mauritius",
"Indian/Mayotte",
"Indian/Reunion",
"Iran",
"Israel",
"Jamaica",
"Japan",
"Kwajalein",
"Libya",
"MET",
"MST7MDT",
"Mexico/BajaNorte",
"Mexico/BajaSur",
"Mexico/General",
"NZ",
"NZ-CHAT",
"Navajo",
"PRC",
"PST8PDT",
"Pacific/Apia",
"Pacific/Auckland",
"Pacific/Bougainville",
"Pacific/Chatham",
"Pacific/Chuuk",
"Pacific/Easter",
"Pacific/Efate",
"Pacific/Enderbury",
"Pacific/Fakaofo",
"Pacific/Fiji",
"Pacific/Funafuti",
"Pacific/Galapagos",
"Pacific/Gambier",
"Pacific/Guadalcanal",
"Pacific/Guam",
"Pacific/Honolulu",
"Pacific/Johnston",
"Pacific/Kanton",
"Pacific/Kiritimati",
"Pacific/Kosrae",
"Pacific/Kwajalein",
"Pacific/Majuro",
"Pacific/Marquesas",
"Pacific/Midway",
"Pacific/Nauru",
"Pacific/Niue",
"Pacific/Norfolk",
"Pacific/Noumea",
"Pacific/Pago_Pago",
"Pacific/Palau",
"Pacific/Pitcairn",
"Pacific/Pohnpei",
"Pacific/Ponape",
"Pacific/Port_Moresby",
"Pacific/Rarotonga",
"Pacific/Saipan",
"Pacific/Samoa",
"Pacific/Tahiti",
"Pacific/Tarawa",
"Pacific/Tongatapu",
"Pacific/Truk",
"Pacific/Wake",
"Pacific/Wallis",
"Pacific/Yap",
"Poland",
"Portugal",
"ROK",
"Singapore",
"SystemV/AST4",
"SystemV/AST4ADT",
"SystemV/CST6",
"SystemV/CST6CDT",
"SystemV/EST5",
"SystemV/EST5EDT",
"SystemV/HST10",
"SystemV/MST7",
"SystemV/MST7MDT",
"SystemV/PST8",
"SystemV/PST8PDT",
"SystemV/YST9",
"SystemV/YST9YDT",
"Turkey",
"UCT",
"US/Alaska",
"US/Aleutian",
"US/Arizona",
"US/Central",
"US/East-Indiana",
"US/Eastern",
"US/Hawaii",
"US/Indiana-Starke",
"US/Michigan",
"US/Mountain",
"US/Pacific",
"US/Samoa",
"UTC",
"Universal",
"W-SU",
"WET",
"Zulu",
"EST",
"HST",
"MST",
"ACT",
"AET",
"AGT",
"ART",
"AST",
"BET",
"BST",
"CAT",
"CNT",
"CST",
"CTT",
"EAT",
"ECT",
"IET",
"IST",
"JST",
"MIT",
"NET",
"NST",
"PLT",
"PNT",
"PRT",
"PST",
"SST",
"VST",
]
| python | MIT | 362911ceac878aec5f56d82efa9b0515e2028355 | 2026-01-05T07:12:41.716988Z | false |
Jezza34000/homeassistant_petkit | https://github.com/Jezza34000/homeassistant_petkit/blob/362911ceac878aec5f56d82efa9b0515e2028355/custom_components/petkit/coordinator.py | custom_components/petkit/coordinator.py | """DataUpdateCoordinator for Petkit Smart Devices."""
from __future__ import annotations
import asyncio
from copy import deepcopy
from datetime import datetime, timedelta, timezone
from pathlib import Path
import shutil
from typing import Any
import aiofiles
import aiofiles.os
from pypetkitapi import (
DownloadDecryptMedia,
Feeder,
Litter,
MediaFile,
MediaType,
Pet,
PetkitAuthenticationUnregisteredEmailError,
PetkitRegionalServerNotFoundError,
PetkitSessionError,
PetkitSessionExpiredError,
Purifier,
PypetkitError,
RecordType,
WaterFountain,
)
from homeassistant.exceptions import ConfigEntryAuthFailed
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import (
BT_SECTION,
CONF_BLE_RELAY_ENABLED,
CONF_DELETE_AFTER,
CONF_MEDIA_DL_IMAGE,
CONF_MEDIA_DL_VIDEO,
CONF_MEDIA_EV_TYPE,
CONF_MEDIA_PATH,
CONF_SMART_POLLING,
DEFAULT_BLUETOOTH_RELAY,
DEFAULT_DELETE_AFTER,
DEFAULT_DL_IMAGE,
DEFAULT_DL_VIDEO,
DEFAULT_EVENTS,
DEFAULT_MEDIA_PATH,
DEFAULT_SCAN_INTERVAL,
DEFAULT_SMART_POLLING,
DOMAIN,
LOGGER,
MEDIA_SECTION,
MIN_SCAN_INTERVAL,
)
class PetkitDataUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage fetching data from the API."""
def __init__(self, hass, logger, name, update_interval, config_entry):
"""Initialize the data update coordinator."""
super().__init__(
hass,
logger,
name=name,
update_interval=update_interval,
config_entry=config_entry,
)
self.config_entry = config_entry
self.previous_devices = set()
self.curent_devices = set()
self.fast_poll_tic = 0
def enable_smart_polling(self, nb_tic: int) -> None:
"""Enable smart polling."""
if self.fast_poll_tic > 0:
LOGGER.debug(f"Fast poll tic already enabled for {self.fast_poll_tic} tics")
return
if not self.config_entry.options.get(CONF_SMART_POLLING, DEFAULT_SMART_POLLING):
LOGGER.debug("Smart polling is disabled by configuration")
return
self.update_interval = timedelta(seconds=MIN_SCAN_INTERVAL)
self.fast_poll_tic = nb_tic
LOGGER.debug(
f"Fast poll tic enabled for {nb_tic} tics (at {MIN_SCAN_INTERVAL}sec interval)"
)
async def update_smart_polling(self) -> None:
"""Update smart polling."""
if self.fast_poll_tic > 0:
self.fast_poll_tic -= 1
LOGGER.debug(f"Fast poll tic remaining = {self.fast_poll_tic}")
elif self.update_interval != timedelta(seconds=DEFAULT_SCAN_INTERVAL):
self.update_interval = timedelta(seconds=DEFAULT_SCAN_INTERVAL)
LOGGER.debug("Fast poll tic ended, reset to default scan interval")
async def _async_update_data(
self,
) -> dict[int, Feeder | Litter | WaterFountain | Purifier | Pet]:
"""Update data via library."""
await self.update_smart_polling()
try:
await self.config_entry.runtime_data.client.get_devices_data()
except (
PetkitSessionExpiredError,
PetkitSessionError,
PetkitAuthenticationUnregisteredEmailError,
PetkitRegionalServerNotFoundError,
) as exception:
raise ConfigEntryAuthFailed(exception) from exception
except PypetkitError as exception:
raise UpdateFailed(exception) from exception
else:
data = self.config_entry.runtime_data.client.petkit_entities
self.current_devices = set(data)
# Check if there are any stale devices
if stale_devices := self.previous_devices - self.current_devices:
device_registry = dr.async_get(self.hass)
for device_id in stale_devices:
device = device_registry.async_get(
identifiers={(DOMAIN, device_id)}
)
if device:
device_registry.async_update_device(
device_id=device.id,
remove_config_entry_id=self.config_entry.entry_id,
)
self.previous_devices = self.current_devices
return data
class PetkitMediaUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage fetching data from the API."""
def __init__(
self, hass, logger, name, update_interval, config_entry, data_coordinator
):
"""Initialize the data update coordinator."""
super().__init__(
hass,
logger,
name=name,
update_interval=update_interval,
config_entry=config_entry,
)
self.config_entry = config_entry
self.data_coordinator = data_coordinator
self.media_type = []
self.event_type = []
self.previous_devices = set()
self.media_table = {}
self.delete_after = 0
self.media_path = Path()
# Load configuration
self._get_media_config(config_entry.options)
def _get_media_config(self, options) -> None:
"""Get media configuration."""
media_options = options.get(MEDIA_SECTION, {})
event_type_config = media_options.get(CONF_MEDIA_EV_TYPE, DEFAULT_EVENTS)
dl_image = media_options.get(CONF_MEDIA_DL_IMAGE, DEFAULT_DL_IMAGE)
dl_video = media_options.get(CONF_MEDIA_DL_VIDEO, DEFAULT_DL_VIDEO)
self.delete_after = media_options.get(CONF_DELETE_AFTER, DEFAULT_DELETE_AFTER)
self.event_type = [RecordType(element.lower()) for element in event_type_config]
raw = Path(media_options.get(CONF_MEDIA_PATH, DEFAULT_MEDIA_PATH))
if raw.is_absolute():
raw = raw.relative_to(raw.anchor)
self.media_path = Path("/media") / raw
LOGGER.debug(f"Media path = {self.media_path}")
if dl_image:
self.media_type.append(MediaType.IMAGE)
if dl_video:
self.media_type.append(MediaType.VIDEO)
async def _async_update_data(
self,
) -> dict[str, list[MediaFile]]:
"""Update data via library."""
self.hass.async_create_task(
self._async_update_media_files(self.data_coordinator.current_devices)
)
return self.media_table
async def _async_update_media_files(self, devices_lst: set) -> None:
"""Update media files."""
client = self.config_entry.runtime_data.client
for device in devices_lst:
if not hasattr(client.petkit_entities[device], "medias"):
LOGGER.debug(f"Device id = {device} does not support medias")
continue
media_lst = client.petkit_entities[device].medias
if not media_lst:
LOGGER.debug(f"No medias found for device id = {device}")
continue
LOGGER.debug(f"Gathering medias files onto disk for device id = {device}")
await client.media_manager.gather_all_media_from_disk(
self.media_path, device
)
to_dl = await client.media_manager.list_missing_files(
media_lst, self.media_type, self.event_type
)
dl_mgt = DownloadDecryptMedia(self.media_path, client)
for media in to_dl:
await dl_mgt.download_file(media, self.media_type)
LOGGER.debug(
f"Downloaded all medias for device id = {device} is OK (got {len(to_dl)} files to download)"
)
self.media_table[device] = deepcopy(
await client.media_manager.gather_all_media_from_disk(
self.media_path, device
)
)
LOGGER.debug("Update media files finished for all devices")
await self._async_delete_old_media()
async def _async_delete_old_media(self) -> None:
"""Delete old media files based on the retention policy."""
if self.delete_after == 0:
LOGGER.debug("Media deletion is disabled by configuration")
return
retention_date = datetime.now() - timedelta(days=self.delete_after)
for device_id in self.data_coordinator.current_devices:
device_media_path = self.media_path / str(device_id)
if not await aiofiles.os.path.exists(str(device_media_path)):
continue
def list_directory(path):
return list(path.iterdir())
try:
date_dirs = await asyncio.to_thread(list_directory, device_media_path)
except FileNotFoundError:
LOGGER.warning(f"Device media path not found: {device_media_path}")
continue
for date_dir in date_dirs:
if date_dir.is_dir():
try:
dir_date = datetime.strptime(date_dir.name, "%Y%m%d")
if dir_date < retention_date:
LOGGER.debug(f"Deleting old media files in {date_dir}")
await asyncio.to_thread(shutil.rmtree, date_dir)
except ValueError:
LOGGER.warning(
f"Invalid date format in directory name: {date_dir.name}"
)
class PetkitBluetoothUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage bluetooth connection for Petkit Smart Devices."""
def __init__(
self, hass, logger, name, update_interval, config_entry, data_coordinator
):
"""Initialize the data update coordinator."""
super().__init__(
hass,
logger,
name=name,
update_interval=update_interval,
config_entry=config_entry,
)
self.config = config_entry
self.data_coordinator = data_coordinator
self.last_update_timestamps = {}
async def _async_update_data(
self,
) -> dict[int, Any]:
"""Update data via connecting to bluetooth (over API)."""
updated_fountain = {}
if not self.config.options.get(BT_SECTION, {}).get(
CONF_BLE_RELAY_ENABLED, DEFAULT_BLUETOOTH_RELAY
):
LOGGER.debug("BLE relay is disabled by configuration")
return updated_fountain
LOGGER.debug("Update bluetooth connection for all fountains")
for device_id in self.data_coordinator.current_devices:
device = self.config.runtime_data.client.petkit_entities.get(device_id)
if isinstance(device, WaterFountain):
LOGGER.debug(
f"Updating bluetooth connection for device id = {device_id}"
)
self.hass.async_create_task(
self._async_update_bluetooth_connection(device_id)
)
return self.last_update_timestamps
async def _async_update_bluetooth_connection(self, device_id: str) -> bool:
"""Update bluetooth connection."""
if await self.config.runtime_data.client.bluetooth_manager.open_ble_connection(
device_id
):
await asyncio.sleep(5)
await self.config.runtime_data.client.bluetooth_manager.close_ble_connection(
device_id
)
LOGGER.debug(f"Bluetooth connection for device id = {device_id} is OK")
self.last_update_timestamps[device_id] = datetime.now(timezone.utc)
return True
LOGGER.debug(f"Bluetooth connection for device id = {device_id} failed")
return False
| python | MIT | 362911ceac878aec5f56d82efa9b0515e2028355 | 2026-01-05T07:12:41.716988Z | false |
Jezza34000/homeassistant_petkit | https://github.com/Jezza34000/homeassistant_petkit/blob/362911ceac878aec5f56d82efa9b0515e2028355/custom_components/petkit/utils.py | custom_components/petkit/utils.py | """Util functions for the Petkit integration."""
from datetime import datetime
from pypetkitapi import LitterRecord, RecordsItems, WorkState
from .const import EVENT_MAPPING, LOGGER
def map_work_state(work_state: WorkState | None) -> str:
"""Get the state of the litter box.
Use the 'litter_state' translation table to map the state to a human-readable string.
"""
LOGGER.debug(f"Litter map work_state: {work_state}")
if work_state is None:
return "idle"
def get_safe_warn_status(safe_warn: int, pet_in_time: int) -> str:
"""Get the safe warn status."""
if safe_warn != 0:
return {
1: "pet_entered",
3: "cover",
}.get(safe_warn, "system_error")
return "pet_approach" if pet_in_time == 0 else "pet_using"
def handle_process_mapping(prefix: str) -> str:
"""Handle the process mapping."""
major, minor = divmod(work_state.work_process, 10)
if major == 1:
return f"{prefix}"
if major == 2:
if minor == 2:
return f"{prefix}_paused_{get_safe_warn_status(work_state.safe_warn, work_state.pet_in_time)}"
return f"{prefix}_paused"
if major == 3:
return "resetting_device"
if major == 4:
if minor == 2:
return f"paused_{get_safe_warn_status(work_state.safe_warn, work_state.pet_in_time)}"
return "paused"
return f"{prefix}"
# Map work_mode to their respective functions
_WORK_MODE_MAPPING = {
0: lambda: handle_process_mapping("cleaning"),
1: lambda: handle_process_mapping("dumping"),
2: lambda: "odor_removal",
3: lambda: "resetting",
4: lambda: "leveling",
5: lambda: "calibrating",
6: lambda: "reset_deodorant",
7: lambda: "light",
8: lambda: "reset_max_deodorant",
9: lambda: handle_process_mapping("maintenance"),
}
return _WORK_MODE_MAPPING.get(work_state.work_mode, lambda: "idle")()
def get_raw_feed_plan(feeder_records_data) -> str | None:
"""Get the raw feed plan from feeder data.
:param feeder_records_data: FeederRecordsData
:return: A string with the feed plan in the format "id_incremental,hours,minutes,amount,state"
where:
- id_incremental: The incremental ID of the feed item
- hours: Hours
- minutes: Minutes
- amount: The amount of food dispensed (for dual feeders, it will be the sum of amount1 and amount2)
- state: The state of the food depending on the device type
- 0: Food is pending (not dispensed yet)
- 1: Food was dispensed successfully (by schedule)
- 2: Food was dispensed successfully (by remote command on app)
- 3: Food was dispensed successfully (by local command on feeder)
- 6: Unknown state (probably disconnected)
- 7: Food was cancelled
- 8: Food was skipped due to SurplusControl (only for feeders with camera)
- 9: Food was not dispensed due to an error
"""
result = []
if not feeder_records_data:
LOGGER.debug("No feeder records data found")
return None
if feeder_records_data.feed is None:
LOGGER.debug("No feed data found")
return None
# Heure actuelle en secondes depuis minuit
now = datetime.now()
current_seconds = now.hour * 3600 + now.minute * 60 + now.second
for feed in feeder_records_data.feed:
items = feed.items
for idx, item in enumerate(items):
id_incremental = idx
time_in_seconds = item.time
hours = time_in_seconds // 3600
minutes = (time_in_seconds % 3600) // 60
# Calculate amount
amount = (
item.amount
if item.amount is not None
else (getattr(item, "amount1", 0) or 0)
+ (getattr(item, "amount2", 0) or 0)
)
state = 0 # Pending by default
if (
(not hasattr(item, "state") or item.state is None)
and item.status == 0
and time_in_seconds < current_seconds
):
state = 6
elif item.status == 1:
state = 7 # Food was cancelled
elif hasattr(item, "state") and item.state is not None:
if item.state.err_code == 0 and item.state.result == 0:
if item.src == 1:
state = 1
elif item.src == 3:
state = 2
elif item.src == 4:
state = 3
else:
state = 1
elif item.state.err_code == 10 and item.state.result == 8:
state = 8
else:
state = 9
result.append(f"{id_incremental},{hours},{minutes},{amount},{state}")
return ",".join(result) if result else None
def map_litter_event(litter_event: list[LitterRecord | None]) -> str | None:
"""Return a description of the last event.
Use the 'litter_last_event' translation table to map the state to a human-readable string.
"""
if not isinstance(litter_event, list) or not litter_event:
return None
litter_event = litter_event[-1]
error = litter_event.content.error
if litter_event.sub_content:
event_type = litter_event.sub_content[-1].event_type
result = litter_event.sub_content[-1].content.result
reason = litter_event.sub_content[-1].content.start_reason
else:
return litter_event.enum_event_type
if event_type not in [5, 6, 7, 8, 10]:
LOGGER.debug(f"Unknown event type code: {event_type}")
return "event_type_unknown"
if event_type == 10:
name = "Unknown" if litter_event.pet_name is None else litter_event.pet_name
return f"{name} used the litter box"
try:
if event_type == 5 and result == 2:
return EVENT_MAPPING[event_type][result][reason][error]
if event_type in [6, 7] and result == 2:
return EVENT_MAPPING[event_type][result][error]
if event_type in [8, 5]:
return EVENT_MAPPING[event_type][result][reason]
return EVENT_MAPPING[event_type][result]
except KeyError:
LOGGER.debug(f"Unknown event type result: {event_type}")
return f"event_type_{event_type}_unknown"
def get_dispense_status(
feed_record: RecordsItems,
) -> tuple[str, str, int, int, int, int]:
"""Get the dispense status.
:param feed_record: RecordsItems
:return: tuple (source, status, plan_amount1, plan_amount2, disp_amount1, disp_amount2)
"""
# Init
plan_amount1 = getattr(feed_record, "amount", 0)
plan_amount2 = 0
disp_amount1 = 0
disp_amount2 = 0
# Déterminer les montants planifiés si `amount1` et `amount2` existent
if hasattr(feed_record, "amount1") and hasattr(feed_record, "amount2"):
plan_amount1 = feed_record.amount1
plan_amount2 = feed_record.amount2
# Find the source
source_mapping = {
1: "feeding plan",
3: "manual (source : from application)",
4: "manual (source : locally from feeder)",
}
source = source_mapping.get(feed_record.src, "unknown")
# Find the status
if feed_record.status == 1:
status = "cancelled"
elif hasattr(feed_record, "state") and feed_record.state is not None:
state = feed_record.state
if state.err_code == 0 and state.result == 0:
status = "dispensed"
elif state.err_code == 10 and state.result == 8:
status = "skipped"
else:
status = "failed dispense"
# Determinate the dispensed amount
disp_amount1 = getattr(state, "real_amount", 0)
disp_amount1 = getattr(state, "real_amount1", disp_amount1)
disp_amount2 = getattr(state, "real_amount2", 0)
else:
status = "pending"
return source, status, plan_amount1, plan_amount2, disp_amount1, disp_amount2
| python | MIT | 362911ceac878aec5f56d82efa9b0515e2028355 | 2026-01-05T07:12:41.716988Z | false |
Jezza34000/homeassistant_petkit | https://github.com/Jezza34000/homeassistant_petkit/blob/362911ceac878aec5f56d82efa9b0515e2028355/custom_components/petkit/fan.py | custom_components/petkit/fan.py | """Switch platform for Petkit Smart Devices integration."""
from __future__ import annotations
import asyncio
from collections.abc import Callable
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any
from pypetkitapi import K2, DeviceAction, DeviceCommand, Purifier
from homeassistant.components.fan import (
FanEntity,
FanEntityDescription,
FanEntityFeature,
)
from .const import LOGGER, POWER_ONLINE_STATE, PURIFIER_MODE
from .entity import PetKitDescSensorBase, PetkitEntity
if TYPE_CHECKING:
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .coordinator import PetkitDataUpdateCoordinator
from .data import PetkitConfigEntry, PetkitDevices
@dataclass(frozen=True, kw_only=True)
class PetkitFanDesc(PetKitDescSensorBase, FanEntityDescription):
"""A class that describes sensor entities."""
preset_modes: Callable[[], list[str]] | None = None
turn_on: Callable[[Any, Any], Any] | None = None
turn_off: Callable[[Any, Any], Any] | None = None
set_mode: Callable[[Any, Any, Any], Any] | None = None
current_mode: Callable[[Any], str] | None = None
FAN_MAPPING: dict[type[PetkitDevices], list[PetkitFanDesc]] = {
Purifier: [
PetkitFanDesc(
key="Air Purifier Fan",
translation_key="air_purifier_fan",
preset_modes=lambda: list(PURIFIER_MODE.values()),
current_mode=lambda device: PURIFIER_MODE.get(device.state.mode, 0),
turn_on=lambda api, device: api.send_api_request(
device.id, DeviceCommand.CONTROL_DEVICE, {DeviceAction.POWER: 1}
),
turn_off=lambda api, device: api.send_api_request(
device.id, DeviceCommand.CONTROL_DEVICE, {DeviceAction.POWER: 0}
),
set_mode=lambda api, device, opt_value: api.send_api_request(
device.id,
DeviceCommand.CONTROL_DEVICE,
{
DeviceAction.MODE: next(
key
for key, value in PURIFIER_MODE.items()
if value == opt_value
)
},
),
only_for_types=[K2],
),
],
}
async def async_setup_entry(
hass: HomeAssistant,
entry: PetkitConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up binary_sensors using config entry."""
devices = entry.runtime_data.client.petkit_entities.values()
entities = [
PetkitFan(
coordinator=entry.runtime_data.coordinator,
entity_description=entity_description,
device=device,
)
for device in devices
for device_type, entity_descriptions in FAN_MAPPING.items()
if isinstance(device, device_type)
for entity_description in entity_descriptions
if entity_description.is_supported(device) # Check if the entity is supported
]
LOGGER.debug(
"FAN : Adding %s (on %s available)",
len(entities),
sum(len(descriptors) for descriptors in FAN_MAPPING.values()),
)
async_add_entities(entities)
class PetkitFan(PetkitEntity, FanEntity):
"""Petkit Smart Devices Switch class."""
entity_description: PetkitFanDesc
def __init__(
self,
coordinator: PetkitDataUpdateCoordinator,
entity_description: PetkitFanDesc,
device: PetkitDevices,
) -> None:
"""Initialize the switch class."""
super().__init__(coordinator, device)
self.coordinator = coordinator
self.entity_description = entity_description
self.device = device
@property
def available(self) -> bool:
"""Return if this button is available or not."""
device_data = self.coordinator.data.get(self.device.id)
if hasattr(device_data.state, "pim"):
return device_data.state.pim in POWER_ONLINE_STATE
return True
@property
def is_on(self) -> bool:
"""Determine if the purifier is On."""
device_data = self.coordinator.data.get(self.device.id)
if hasattr(device_data.state, "power"):
return device_data.state.power in POWER_ONLINE_STATE
return True
@property
def preset_modes(self) -> list:
"""Return the available preset modes."""
return self.entity_description.preset_modes()
@property
def preset_mode(self) -> str | None:
"""Return the current preset mode."""
device_data = self.coordinator.data.get(self.device.id)
if device_data:
return self.entity_description.current_mode(device_data)
return None
@property
def supported_features(self) -> int:
"""Return supported features."""
return (
FanEntityFeature.PRESET_MODE
| FanEntityFeature.TURN_ON
| FanEntityFeature.TURN_OFF
)
async def async_turn_on(
self,
speed: str | None = None,
percentage: int | None = None,
preset_mode: str | None = None,
**kwargs: Any,
) -> None:
"""Turn on the switch."""
LOGGER.debug("Turn Fan ON")
res = await self.entity_description.turn_on(
self.coordinator.config_entry.runtime_data.client, self.device
)
await self._update_coordinator_data(res)
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn off the switch."""
LOGGER.debug("Turn Fan OFF")
res = await self.entity_description.turn_off(
self.coordinator.config_entry.runtime_data.client, self.device
)
await self._update_coordinator_data(res)
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set a preset mode for the purifier."""
LOGGER.debug(
"Setting value for : %s with value : %s",
self.entity_description.key,
preset_mode,
)
await self.entity_description.set_mode(
self.coordinator.config_entry.runtime_data.client, self.device, preset_mode
)
async def _update_coordinator_data(self, result: bool) -> None:
"""Update the coordinator data based on the result."""
await asyncio.sleep(1)
await self.coordinator.async_request_refresh()
| python | MIT | 362911ceac878aec5f56d82efa9b0515e2028355 | 2026-01-05T07:12:41.716988Z | false |
Jezza34000/homeassistant_petkit | https://github.com/Jezza34000/homeassistant_petkit/blob/362911ceac878aec5f56d82efa9b0515e2028355/custom_components/petkit/sensor.py | custom_components/petkit/sensor.py | """Sensor platform for Petkit Smart Devices integration."""
from __future__ import annotations
from collections.abc import Callable
from dataclasses import dataclass
from datetime import datetime, timezone
from typing import TYPE_CHECKING, Any
from pypetkitapi import (
CTW3,
D4H,
D4S,
D4SH,
DEVICES_LITTER_BOX,
K2,
K3,
LITTER_WITH_CAMERA,
T3,
T4,
T5,
T6,
T7,
W5,
BluetoothState,
Feeder,
Litter,
Pet,
Purifier,
WaterFountain,
)
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.const import (
PERCENTAGE,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
EntityCategory,
UnitOfElectricPotential,
UnitOfEnergy,
UnitOfMass,
UnitOfTemperature,
UnitOfTime,
UnitOfVolume,
)
from .const import BATTERY_LEVEL_MAP, DEVICE_STATUS_MAP, LOGGER, NO_ERROR
from .entity import PetKitDescSensorBase, PetkitEntity
from .utils import get_raw_feed_plan, map_litter_event, map_work_state
if TYPE_CHECKING:
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .coordinator import (
PetkitBluetoothUpdateCoordinator,
PetkitDataUpdateCoordinator,
)
from .data import PetkitConfigEntry, PetkitDevices
@dataclass(frozen=True, kw_only=True)
class PetKitSensorDesc(PetKitDescSensorBase, SensorEntityDescription):
"""A class that describes sensor entities."""
restore_state: bool = False
bluetooth_coordinator: bool = False
smart_poll_trigger: Callable[[PetkitDevices], bool] | None = None
def get_liquid_value(device):
"""Get the liquid value for purifier devices."""
if (
hasattr(device.state, "liquid")
and device.state.liquid is not None
and 0 <= device.state.liquid <= 100
):
return device.state.liquid
if (
hasattr(device, "liquid")
and device.liquid is not None
and 0 <= device.liquid <= 100
):
return device.liquid
return None
def get_bt_state_text(state: BluetoothState) -> str | None:
"""Get the bluetooth state."""
return {
BluetoothState.NO_STATE: None,
BluetoothState.NOT_CONNECTED: "Not connected",
BluetoothState.CONNECTING: "Connecting…",
BluetoothState.CONNECTED: "Connected",
BluetoothState.ERROR: "Error",
}.get(state, "Unknown")
COMMON_ENTITIES = [
PetKitSensorDesc(
key="Device status",
translation_key="device_status",
entity_category=EntityCategory.DIAGNOSTIC,
value=lambda device: DEVICE_STATUS_MAP.get(device.state.pim, "Unknown Status"),
),
PetKitSensorDesc(
key="Rssi",
translation_key="rssi",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.SIGNAL_STRENGTH,
native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
value=lambda device: device.state.wifi.rsq,
),
PetKitSensorDesc(
key="Error message",
translation_key="error_message",
entity_category=EntityCategory.DIAGNOSTIC,
value=lambda device: (
device.state.error_msg
if hasattr(device.state, "error_msg") and device.state.error_msg is not None
else NO_ERROR
),
force_add=[K2, K3, T7],
),
PetKitSensorDesc(
key="End date care plus subscription",
translation_key="end_date_care_plus_subscription",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=UnitOfTime.DAYS,
value=lambda device: max(
0,
(
datetime.fromtimestamp(
device.cloud_product.work_indate, tz=timezone.utc
)
- datetime.now(timezone.utc)
).days,
),
),
]
SENSOR_MAPPING: dict[type[PetkitDevices], list[PetKitSensorDesc]] = {
Feeder: [
*COMMON_ENTITIES,
PetKitSensorDesc(
key="Desiccant left days",
translation_key="desiccant_left_days",
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=UnitOfTime.DAYS,
value=lambda device: device.state.desiccant_left_days,
),
PetKitSensorDesc(
key="Battery level",
translation_key="battery_level",
entity_category=EntityCategory.DIAGNOSTIC,
value=lambda device: (
BATTERY_LEVEL_MAP.get(device.state.battery_status, "Unknown")
if device.state.pim == 2
else "Not in use"
),
),
PetKitSensorDesc(
key="Times dispensed",
translation_key="times_dispensed",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
value=lambda device: device.state.feed_state.times,
),
PetKitSensorDesc(
key="Total planned",
translation_key="total_planned",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=UnitOfMass.GRAMS,
value=lambda device: device.state.feed_state.plan_amount_total,
),
PetKitSensorDesc(
key="Planned dispensed",
translation_key="planned_dispensed",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.TOTAL_INCREASING,
native_unit_of_measurement=UnitOfMass.GRAMS,
value=lambda device: device.state.feed_state.plan_real_amountTotal,
),
PetKitSensorDesc(
key="Total dispensed",
translation_key="total_dispensed",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.TOTAL_INCREASING,
native_unit_of_measurement=UnitOfMass.GRAMS,
value=lambda device: device.state.feed_state.real_amount_total,
),
PetKitSensorDesc(
key="Manual dispensed",
translation_key="manual_dispensed",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.TOTAL,
native_unit_of_measurement=UnitOfMass.GRAMS,
value=lambda device: device.state.feed_state.add_amount_total,
),
PetKitSensorDesc(
key="Amount eaten",
translation_key="amount_eaten",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.TOTAL,
native_unit_of_measurement=UnitOfMass.GRAMS,
value=lambda device: device.state.feed_state.eat_amount_total, # D3
),
PetKitSensorDesc(
key="Times eaten",
translation_key="times_eaten",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.TOTAL,
value=lambda device: (
len(device.state.feed_state.eat_times)
if device.state.feed_state.eat_times is not None
else None
),
ignore_types=[D4S],
),
PetKitSensorDesc(
key="Times eaten",
translation_key="times_eaten",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.TOTAL,
value=lambda device: device.state.feed_state.eat_count,
only_for_types=[D4S],
),
PetKitSensorDesc(
key="Food in bowl",
translation_key="food_in_bowl",
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=UnitOfMass.GRAMS,
value=lambda device: device.state.weight,
),
PetKitSensorDesc(
key="Avg eating time",
translation_key="avg_eating_time",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=UnitOfTime.SECONDS,
value=lambda device: device.state.feed_state.eat_avg,
),
PetKitSensorDesc(
key="Manual dispensed hopper 1",
translation_key="manual_dispensed_hopper_1",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.TOTAL,
value=lambda device: device.state.feed_state.add_amount_total1,
),
PetKitSensorDesc(
key="Manual dispensed hopper 2",
translation_key="manual_dispensed_hopper_2",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.TOTAL,
value=lambda device: device.state.feed_state.add_amount_total2,
),
PetKitSensorDesc(
key="Total planned hopper 1",
translation_key="total_planned_hopper_1",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.TOTAL,
value=lambda device: device.state.feed_state.plan_amount_total1,
),
PetKitSensorDesc(
key="Total planned hopper 2",
translation_key="total_planned_hopper_2",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.TOTAL,
value=lambda device: device.state.feed_state.plan_amount_total2,
),
PetKitSensorDesc(
key="Planned dispensed hopper 1",
translation_key="planned_dispensed_hopper_1",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.TOTAL,
value=lambda device: device.state.feed_state.plan_real_amount_total1,
),
PetKitSensorDesc(
key="Planned dispensed hopper 2",
translation_key="planned_dispensed_hopper_2",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.TOTAL,
value=lambda device: device.state.feed_state.plan_real_amount_total2,
),
PetKitSensorDesc(
key="Total dispensed hopper 1",
translation_key="total_dispensed_hopper_1",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.TOTAL,
value=lambda device: device.state.feed_state.real_amount_total1,
),
PetKitSensorDesc(
key="Total dispensed hopper 2",
translation_key="total_dispensed_hopper_2",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.TOTAL,
value=lambda device: device.state.feed_state.real_amount_total2,
),
PetKitSensorDesc(
key="Food bowl percentage",
translation_key="food_bowl_percentage",
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=PERCENTAGE,
value=lambda device: (
max(0, min(100, device.state.bowl))
if device.state.bowl is not None
else None
),
),
PetKitSensorDesc(
key="Food left",
translation_key="food_left",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=PERCENTAGE,
value=lambda device: device.state.percent,
),
PetKitSensorDesc(
key="RAW distribution data",
translation_key="raw_distribution_data",
entity_category=EntityCategory.DIAGNOSTIC,
value=lambda device: get_raw_feed_plan(device.device_records),
force_add=[D4H, D4SH],
),
],
Litter: [
*COMMON_ENTITIES,
PetKitSensorDesc(
key="Litter level",
translation_key="litter_level",
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=PERCENTAGE,
value=lambda device: device.state.sand_percent,
ignore_types=LITTER_WITH_CAMERA,
),
PetKitSensorDesc(
key="Litter weight",
translation_key="litter_weight",
device_class=SensorDeviceClass.WEIGHT,
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=UnitOfMass.KILOGRAMS,
value=lambda device: round((device.state.sand_weight / 1000), 1),
ignore_types=[T7],
),
PetKitSensorDesc(
key="State",
translation_key="litter_state",
value=lambda device: map_work_state(device.state.work_state),
smart_poll_trigger=lambda device: map_work_state(device.state.work_state)
!= "idle",
),
PetKitSensorDesc(
key="Litter last event",
translation_key="litter_last_event",
value=lambda device: map_litter_event(device.device_records),
force_add=DEVICES_LITTER_BOX,
),
PetKitSensorDesc(
key="Odor eliminator N50 left days",
translation_key="odor_eliminator_n50_left_days",
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=UnitOfTime.DAYS,
value=lambda device: device.state.deodorant_left_days,
),
PetKitSensorDesc(
key="Odor eliminator N60 left days",
translation_key="odor_eliminator_n60_left_days",
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=UnitOfTime.DAYS,
value=lambda device: device.state.spray_left_days,
),
PetKitSensorDesc(
key="Times used T3 T4",
translation_key="times_used",
state_class=SensorStateClass.TOTAL,
value=lambda device: device.device_stats.times,
force_add=[T3, T4],
ignore_types=[T5, T6],
),
PetKitSensorDesc(
key="Times used T5 T6",
translation_key="times_used",
state_class=SensorStateClass.TOTAL,
value=lambda device: device.in_times,
force_add=[T5, T6],
ignore_types=[T3, T4],
),
PetKitSensorDesc(
key="Total time T3 T4",
translation_key="total_time",
state_class=SensorStateClass.TOTAL,
native_unit_of_measurement=UnitOfTime.SECONDS,
value=lambda device: device.device_stats.total_time,
force_add=[T3, T4],
ignore_types=[T5, T6],
),
PetKitSensorDesc(
key="Total time T5 T6",
translation_key="total_time",
state_class=SensorStateClass.TOTAL,
native_unit_of_measurement=UnitOfTime.SECONDS,
value=lambda device: device.total_time,
force_add=[T5, T6],
ignore_types=[T3, T4],
),
PetKitSensorDesc(
key="Average time",
translation_key="average_time",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=UnitOfTime.SECONDS,
value=lambda device: device.device_stats.avg_time,
),
PetKitSensorDesc(
key="Last used by",
translation_key="last_used_by",
value=lambda device: (
device.device_stats.statistic_info[-1].pet_name
if device.device_stats.statistic_info
else None
),
force_add=[T3, T4],
restore_state=True,
),
PetKitSensorDesc(
key="Last used by",
translation_key="last_used_by",
value=lambda device: (
device.device_pet_graph_out[-1].pet_name
if device.device_pet_graph_out
else None
),
force_add=LITTER_WITH_CAMERA,
restore_state=True,
),
PetKitSensorDesc(
key="Total package",
translation_key="total_package",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.TOTAL,
value=lambda device: device.package_total_count,
),
PetKitSensorDesc(
key="Package used",
translation_key="package_used",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
value=lambda device: device.package_used_count,
),
],
WaterFountain: [
*COMMON_ENTITIES,
PetKitSensorDesc(
key="Today pump run time",
translation_key="today_pump_run_time",
entity_category=EntityCategory.DIAGNOSTIC,
device_class=SensorDeviceClass.ENERGY,
native_unit_of_measurement=UnitOfEnergy.KILO_WATT_HOUR,
value=lambda device: round(
((0.75 * int(device.today_pump_run_time)) / 3600000), 4
),
),
PetKitSensorDesc(
key="Last update",
translation_key="last_update",
entity_category=EntityCategory.DIAGNOSTIC,
device_class=SensorDeviceClass.TIMESTAMP,
value=lambda device: datetime.fromisoformat(
device.update_at.replace(".000Z", "+00:00")
),
),
PetKitSensorDesc(
key="Filter percent",
translation_key="filter_percent",
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=PERCENTAGE,
value=lambda device: device.filter_percent,
),
PetKitSensorDesc(
key="Purified water",
translation_key="purified_water",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
value=lambda device: int(
((1.5 * int(device.today_pump_run_time)) / 60) / 3.0
),
only_for_types=[CTW3],
),
PetKitSensorDesc(
key="Purified water",
translation_key="purified_water",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
value=lambda device: int(
((1.5 * int(device.today_pump_run_time)) / 60) / 2.0
),
ignore_types=[CTW3],
),
PetKitSensorDesc(
key="Drink times",
translation_key="drink_times",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.TOTAL,
value=lambda device: (
len(device.device_records)
if isinstance(device.device_records, list)
else None
),
),
PetKitSensorDesc(
key="Battery",
translation_key="battery",
entity_category=EntityCategory.DIAGNOSTIC,
device_class=SensorDeviceClass.BATTERY,
native_unit_of_measurement=PERCENTAGE,
value=lambda device: device.electricity.battery_percent,
),
PetKitSensorDesc(
key="Battery voltage",
translation_key="battery_voltage",
entity_category=EntityCategory.DIAGNOSTIC,
device_class=SensorDeviceClass.VOLTAGE,
native_unit_of_measurement=UnitOfElectricPotential.VOLT,
value=lambda device: (
round(device.electricity.battery_voltage / 1000, 1)
if isinstance(device.electricity.battery_voltage, (int, float))
and device.electricity.battery_voltage > 0
else None
),
),
PetKitSensorDesc(
key="Supply voltage",
translation_key="supply_voltage",
entity_category=EntityCategory.DIAGNOSTIC,
device_class=SensorDeviceClass.VOLTAGE,
native_unit_of_measurement=UnitOfElectricPotential.VOLT,
value=lambda device: (
round(device.electricity.supply_voltage / 1000, 1)
if isinstance(device.electricity.supply_voltage, (int, float))
and device.electricity.supply_voltage > 0
else None
),
),
],
Purifier: [
*COMMON_ENTITIES,
PetKitSensorDesc(
key="Humidity",
translation_key="humidity",
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=PERCENTAGE,
device_class=SensorDeviceClass.HUMIDITY,
value=lambda device: round(device.state.humidity / 10),
),
PetKitSensorDesc(
key="Temperature",
translation_key="temperature",
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
value=lambda device: round(device.state.temp / 10),
),
PetKitSensorDesc(
key="Air purified",
translation_key="air_purified",
state_class=SensorStateClass.TOTAL,
native_unit_of_measurement=UnitOfVolume.CUBIC_METERS,
device_class=SensorDeviceClass.VOLUME,
value=lambda device: round(device.state.refresh),
),
PetKitSensorDesc(
key="Purifier liquid",
translation_key="purifier_liquid",
entity_category=EntityCategory.DIAGNOSTIC,
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=PERCENTAGE,
value=get_liquid_value,
),
PetKitSensorDesc(
key="Battery",
translation_key="battery",
entity_category=EntityCategory.DIAGNOSTIC,
device_class=SensorDeviceClass.BATTERY,
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=PERCENTAGE,
value=lambda device: device.battery,
),
PetKitSensorDesc(
key="Battery voltage",
translation_key="battery_voltage",
entity_category=EntityCategory.DIAGNOSTIC,
device_class=SensorDeviceClass.VOLTAGE,
native_unit_of_measurement=UnitOfElectricPotential.VOLT,
value=lambda device: (
round(device.voltage / 1000, 1)
if isinstance(device.voltage, (int, float)) and device.voltage > 0
else None
),
),
# PetKitSensorDesc(
# key="Spray times",
# translation_key="spray_times",
# state_class=SensorStateClass.TOTAL,
# value=lambda device: device.spray_times,
# ),
],
Pet: [
PetKitSensorDesc(
key="Pet last weight measurement",
translation_key="pet_last_weight_measurement",
entity_picture=lambda pet: pet.avatar,
device_class=SensorDeviceClass.WEIGHT,
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=UnitOfMass.KILOGRAMS,
value=lambda pet: (
round((pet.last_measured_weight / 1000), 2)
if pet.last_measured_weight is not None
else None
),
restore_state=True,
),
PetKitSensorDesc(
key="Pet last use duration",
translation_key="pet_last_use_duration",
entity_picture=lambda pet: pet.avatar,
device_class=SensorDeviceClass.DURATION,
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=UnitOfTime.SECONDS,
value=lambda pet: pet.last_duration_usage,
restore_state=True,
),
PetKitSensorDesc(
key="Pet last device used",
translation_key="pet_last_device_used",
entity_picture=lambda pet: pet.avatar,
value=lambda pet: pet.last_device_used,
restore_state=True,
),
PetKitSensorDesc(
key="Pet last use date",
translation_key="pet_last_use_date",
entity_picture=lambda pet: pet.avatar,
value=lambda pet: (
datetime.fromtimestamp(pet.last_litter_usage)
if pet.last_litter_usage is not None and pet.last_litter_usage != 0
else "Unknown"
),
restore_state=True,
),
PetKitSensorDesc(
key="Urine measured ph",
translation_key="urine_measured_ph",
entity_picture=lambda pet: pet.avatar,
state_class=SensorStateClass.MEASUREMENT,
value=lambda pet: pet.measured_ph,
restore_state=True,
),
PetKitSensorDesc(
key="Pet last urination date",
translation_key="pet_last_urination_date",
entity_picture=lambda pet: pet.avatar,
value=lambda pet: (
None
if pet.last_urination is None
else (
"Unknown"
if pet.last_urination == 0
else datetime.fromtimestamp(pet.last_urination)
)
),
restore_state=True,
),
PetKitSensorDesc(
key="Pet last defecation date",
translation_key="pet_last_defecation_date",
entity_picture=lambda pet: pet.avatar,
value=lambda pet: (
None
if pet.last_defecation is None
else (
"Unknown"
if pet.last_defecation == 0
else datetime.fromtimestamp(pet.last_defecation)
)
),
restore_state=True,
),
],
}
SENSOR_BT_MAPPING: dict[type[PetkitDevices], list[PetKitSensorDesc]] = {
WaterFountain: [
PetKitSensorDesc(
key="Last connection",
translation_key="last_connection",
entity_category=EntityCategory.DIAGNOSTIC,
device_class=SensorDeviceClass.TIMESTAMP,
value=lambda device: (
device.coordinator_bluetooth.last_update_timestamps.get(device.id)
if hasattr(device, "coordinator_bluetooth")
and device.coordinator_bluetooth.last_update_timestamps.get(device.id)
else None
),
bluetooth_coordinator=True,
force_add=[CTW3, W5],
),
# PetKitSensorDesc(
# key="Connection status",
# translation_key="connection_state",
# entity_category=EntityCategory.DIAGNOSTIC,
# value=lambda device: device.coordinator_bluetooth.ble_connection_state,
# bluetooth_coordinator=True,
# force_add=[CTW3, W5],
# ),
]
}
async def async_setup_entry(
hass: HomeAssistant,
entry: PetkitConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up binary_sensors using config entry."""
devices = entry.runtime_data.client.petkit_entities.values()
entities = [
PetkitSensor(
coordinator=entry.runtime_data.coordinator,
entity_description=entity_description,
device=device,
)
for device in devices
for device_type, entity_descriptions in SENSOR_MAPPING.items()
if isinstance(device, device_type)
for entity_description in entity_descriptions
if entity_description.is_supported(device) # Check if the entity is supported
]
LOGGER.debug(
"SENSOR : Adding %s (on %s available)",
len(entities),
len(SENSOR_MAPPING.items()),
)
entities_bt = [
PetkitSensorBt(
coordinator_bluetooth=entry.runtime_data.coordinator_bluetooth,
entity_description=entity_description,
device=device,
)
for device in devices
for device_type, entity_descriptions in SENSOR_BT_MAPPING.items()
if isinstance(device, device_type)
for entity_description in entity_descriptions
if entity_description.is_supported(device) # Check if the entity is supported
]
LOGGER.debug(
"SENSOR BT : Adding %s (on %s available)",
len(entities_bt),
sum(len(descriptors) for descriptors in SENSOR_MAPPING.values()),
)
async_add_entities(entities + entities_bt)
class PetkitSensor(PetkitEntity, SensorEntity):
"""Petkit Smart Devices BinarySensor class."""
entity_description: PetKitSensorDesc
def __init__(
self,
coordinator: PetkitDataUpdateCoordinator,
entity_description: PetKitSensorDesc,
device: PetkitDevices,
) -> None:
"""Initialize the binary_sensor class."""
super().__init__(coordinator, device)
self.coordinator = coordinator
self.entity_description = entity_description
self.device = device
@property
def native_value(self) -> Any:
"""Return the state of the sensor."""
device_data = self.coordinator.data.get(self.device.id)
if device_data:
return self.entity_description.value(device_data)
return None
@property
def entity_picture(self) -> str | None:
"""Grab associated pet picture."""
if self.entity_description.entity_picture:
return self.entity_description.entity_picture(self.device)
return None
@property
def native_unit_of_measurement(self) -> str | None:
"""Return the unit of measurement."""
return self.entity_description.native_unit_of_measurement
def check_smart_poll_trigger(self) -> bool:
"""Check if fast poll trigger condition is met."""
if self.entity_description.smart_poll_trigger:
return self.entity_description.smart_poll_trigger(self.device)
return False
class PetkitSensorBt(PetkitEntity, SensorEntity):
"""Petkit Smart Devices Bluetooth Sensor class."""
entity_description: PetKitSensorDesc
def __init__(
self,
coordinator_bluetooth: PetkitBluetoothUpdateCoordinator,
entity_description: PetKitSensorDesc,
device: PetkitDevices,
) -> None:
"""Initialize the Bluetooth sensor class."""
super().__init__(coordinator_bluetooth, device)
self.coordinator_bluetooth = coordinator_bluetooth
self.entity_description = entity_description
self.device = device
@property
def native_value(self) -> Any:
"""Return the state of the Bluetooth sensor."""
device_data = self.coordinator_bluetooth.data.get(self.device.id)
if device_data:
return device_data
return None
@property
def unique_id(self) -> str:
"""Return a unique ID for the Bluetooth sensor."""
return f"{self.device.device_nfo.device_type}_{self.device.sn}_{self.entity_description.key}"
@property
def native_unit_of_measurement(self) -> str | None:
"""Return the unit of measurement."""
return self.entity_description.native_unit_of_measurement
| python | MIT | 362911ceac878aec5f56d82efa9b0515e2028355 | 2026-01-05T07:12:41.716988Z | false |
Jezza34000/homeassistant_petkit | https://github.com/Jezza34000/homeassistant_petkit/blob/362911ceac878aec5f56d82efa9b0515e2028355/custom_components/petkit/__init__.py | custom_components/petkit/__init__.py | """Custom integration to integrate Petkit Smart Devices with Home Assistant."""
from __future__ import annotations
from datetime import timedelta
from typing import TYPE_CHECKING
from pypetkitapi import PetKitClient
from homeassistant.const import (
CONF_PASSWORD,
CONF_REGION,
CONF_SCAN_INTERVAL,
CONF_TIME_ZONE,
CONF_USERNAME,
Platform,
)
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.loader import async_get_loaded_integration
from .const import (
BT_SECTION,
CONF_SCAN_INTERVAL_BLUETOOTH,
CONF_SCAN_INTERVAL_MEDIA,
COORDINATOR,
COORDINATOR_BLUETOOTH,
COORDINATOR_MEDIA,
DOMAIN,
LOGGER,
MEDIA_SECTION,
)
from .coordinator import (
PetkitBluetoothUpdateCoordinator,
PetkitDataUpdateCoordinator,
PetkitMediaUpdateCoordinator,
)
from .data import PetkitData
if TYPE_CHECKING:
from homeassistant.core import HomeAssistant
from .data import PetkitConfigEntry
PLATFORMS: list[Platform] = [
Platform.SENSOR,
Platform.BINARY_SENSOR,
Platform.SWITCH,
Platform.LIGHT,
Platform.TEXT,
Platform.BUTTON,
Platform.NUMBER,
Platform.SELECT,
Platform.IMAGE,
Platform.FAN,
]
async def async_setup_entry(
hass: HomeAssistant,
entry: PetkitConfigEntry,
) -> bool:
"""Set up this integration using UI."""
country_from_ha = hass.config.country
tz_from_ha = hass.config.time_zone
coordinator = PetkitDataUpdateCoordinator(
hass=hass,
logger=LOGGER,
name=f"{DOMAIN}.devices",
update_interval=timedelta(seconds=entry.options[CONF_SCAN_INTERVAL]),
config_entry=entry,
)
coordinator_media = PetkitMediaUpdateCoordinator(
hass=hass,
logger=LOGGER,
name=f"{DOMAIN}.medias",
update_interval=timedelta(
minutes=entry.options[MEDIA_SECTION][CONF_SCAN_INTERVAL_MEDIA]
),
config_entry=entry,
data_coordinator=coordinator,
)
coordinator_bluetooth = PetkitBluetoothUpdateCoordinator(
hass=hass,
logger=LOGGER,
name=f"{DOMAIN}.bluetooth",
update_interval=timedelta(
minutes=entry.options[BT_SECTION][CONF_SCAN_INTERVAL_BLUETOOTH]
),
config_entry=entry,
data_coordinator=coordinator,
)
entry.runtime_data = PetkitData(
client=PetKitClient(
username=entry.data[CONF_USERNAME],
password=entry.data[CONF_PASSWORD],
region=entry.data.get(CONF_REGION, country_from_ha),
timezone=entry.data.get(CONF_TIME_ZONE, tz_from_ha),
session=async_get_clientsession(hass),
),
integration=async_get_loaded_integration(hass, entry.domain),
coordinator=coordinator,
coordinator_media=coordinator_media,
coordinator_bluetooth=coordinator_bluetooth,
)
await coordinator.async_config_entry_first_refresh()
await coordinator_media.async_config_entry_first_refresh()
await coordinator_bluetooth.async_config_entry_first_refresh()
await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)
entry.async_on_unload(entry.add_update_listener(async_reload_entry))
if DOMAIN not in hass.data:
hass.data[DOMAIN] = {}
hass.data[DOMAIN][COORDINATOR] = coordinator
hass.data[DOMAIN][COORDINATOR_MEDIA] = coordinator
hass.data[DOMAIN][COORDINATOR_BLUETOOTH] = coordinator
return True
async def async_unload_entry(
hass: HomeAssistant,
entry: PetkitConfigEntry,
) -> bool:
"""Handle removal of an entry."""
return await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
async def async_reload_entry(
hass: HomeAssistant,
entry: PetkitConfigEntry,
) -> None:
"""Reload config entry."""
await async_unload_entry(hass, entry)
await async_setup_entry(hass, entry)
async def async_update_options(hass: HomeAssistant, entry: PetkitConfigEntry) -> None:
"""Update options."""
await hass.config_entries.async_reload(entry.entry_id)
async def async_remove_config_entry_device(
hass: HomeAssistant, config_entry: PetkitConfigEntry, device_entry: dr.DeviceEntry
) -> bool:
"""Remove a config entry from a device."""
return True
| python | MIT | 362911ceac878aec5f56d82efa9b0515e2028355 | 2026-01-05T07:12:41.716988Z | false |
Jezza34000/homeassistant_petkit | https://github.com/Jezza34000/homeassistant_petkit/blob/362911ceac878aec5f56d82efa9b0515e2028355/custom_components/petkit/entity.py | custom_components/petkit/entity.py | """Petkit Smart Devices Entity class."""
from __future__ import annotations
from collections.abc import Callable
from dataclasses import dataclass
from typing import Any, Generic, TypeVar
from pypetkitapi import Feeder, Litter, Pet, Purifier, WaterFountain
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC, DeviceInfo
from homeassistant.helpers.entity import EntityDescription
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN, LOGGER, PETKIT_DEVICES_MAPPING
from .coordinator import (
PetkitBluetoothUpdateCoordinator,
PetkitDataUpdateCoordinator,
PetkitMediaUpdateCoordinator,
)
from .data import PetkitDevices
_DevicesT = TypeVar("_DevicesT", bound=Feeder | Litter | WaterFountain | Purifier | Pet)
@dataclass(frozen=True, kw_only=True)
class PetKitDescSensorBase(EntityDescription):
"""A class that describes sensor entities."""
value: Callable[[_DevicesT], Any] | None = None
ignore_types: list[str] | None = None # List of device types to ignore
only_for_types: list[str] | None = None # List of device types to support
force_add: list[str] | None = None
entity_picture: Callable[[PetkitDevices], str | None] | None = None
def is_supported(self, device: _DevicesT) -> bool:
"""Check if the entity is supported by trying to execute the value lambda."""
if not isinstance(device, Feeder | Litter | WaterFountain | Purifier | Pet):
LOGGER.error(
f"Device instance is not of expected type: {type(device)} can't check support"
)
return False
device_type = getattr(device.device_nfo, "device_type", None)
if not device_type:
LOGGER.error(f"Entities {device.name} has no type, can't check support")
return False
device_type = device_type.lower()
if self._is_force_added(device_type):
return True
if self._is_ignored(device_type):
return False
if self._is_not_in_supported_types(device_type):
return False
return self._check_value_support(device)
def _is_force_added(self, device_type: str) -> bool:
"""Check if the device is in the force_add list."""
if self.force_add and device_type in self.force_add:
LOGGER.debug(f"{device_type} force add for '{self.key}'")
return True
return False
def _is_ignored(self, device_type: str) -> bool:
"""Check if the device is in the ignore_types list."""
if self.ignore_types and device_type in self.ignore_types:
LOGGER.debug(f"{device_type} force ignore for '{self.key}'")
return True
return False
def _is_not_in_supported_types(self, device_type: str) -> bool:
"""Check if the device is not in the only_for_types list."""
if self.only_for_types and device_type not in self.only_for_types:
LOGGER.debug(f"{device_type} is NOT COMPATIBLE with '{self.key}'")
return True
return False
def _check_value_support(self, device: _DevicesT) -> bool:
"""Check if the device supports the value lambda."""
if self.value is not None:
try:
result = self.value(device)
if result is None:
LOGGER.debug(
f"{device.device_nfo.device_type} DOES NOT support '{self.key}' (value is None)"
)
return False
LOGGER.debug(f"{device.device_nfo.device_type} supports '{self.key}'")
except AttributeError:
LOGGER.debug(
f"{device.device_nfo.device_type} DOES NOT support '{self.key}'"
)
return False
return True
class PetkitEntity(
CoordinatorEntity[
PetkitDataUpdateCoordinator
| PetkitMediaUpdateCoordinator
| PetkitBluetoothUpdateCoordinator
],
Generic[_DevicesT],
):
"""Petkit Entity class."""
_attr_has_entity_name = True
def __init__(
self,
coordinator: (
PetkitDataUpdateCoordinator
| PetkitMediaUpdateCoordinator
| PetkitBluetoothUpdateCoordinator
),
device: _DevicesT,
) -> None:
"""Initialize."""
super().__init__(coordinator)
self.device = device
self._attr_unique_id = coordinator.config_entry.entry_id
self._attr_device_info = DeviceInfo(
identifiers={
(
coordinator.config_entry.domain,
coordinator.config_entry.entry_id,
),
},
)
@property
def unique_id(self) -> str:
"""Return a unique ID for the binary_sensor."""
return f"{self.device.device_nfo.device_type}_{self.device.sn}_{self.entity_description.key}"
@property
def device_info(self) -> DeviceInfo:
"""Return the device information."""
if self.device.device_nfo.device_type:
device_type = self.device.device_nfo.device_type
device_model = PETKIT_DEVICES_MAPPING.get(
str(self.device.device_nfo.type_code) + str(device_type.lower()),
"Unknown Device",
)
else:
device_type = "Unknown"
device_model = "Unknown Device"
device_info = DeviceInfo(
identifiers={(DOMAIN, self.device.sn)},
manufacturer="Petkit",
model=device_model,
model_id=device_type.upper(),
name=self.device.name,
)
if not isinstance(self.device, Pet):
if self.device.mac is not None:
device_info["connections"] = {(CONNECTION_NETWORK_MAC, self.device.mac)}
if self.device.firmware is not None:
device_info["sw_version"] = str(self.device.firmware)
if self.device.hardware is not None:
device_info["hw_version"] = str(self.device.hardware)
if self.device.sn is not None:
device_info["serial_number"] = str(self.device.sn)
return device_info
| python | MIT | 362911ceac878aec5f56d82efa9b0515e2028355 | 2026-01-05T07:12:41.716988Z | false |
Jezza34000/homeassistant_petkit | https://github.com/Jezza34000/homeassistant_petkit/blob/362911ceac878aec5f56d82efa9b0515e2028355/custom_components/petkit/button.py | custom_components/petkit/button.py | """Switch platform for Petkit Smart Devices integration."""
from __future__ import annotations
import asyncio
from collections.abc import Callable
from dataclasses import dataclass
from typing import TYPE_CHECKING
from pypetkitapi import (
D3,
D4H,
D4S,
D4SH,
DEVICES_FEEDER,
DEVICES_LITTER_BOX,
DEVICES_WATER_FOUNTAIN,
LITTER_WITH_CAMERA,
T4,
T5,
T7,
DeviceAction,
DeviceCommand,
Feeder,
FeederCommand,
LBCommand,
Litter,
LitterCommand,
Pet,
Purifier,
WaterFountain,
)
from pypetkitapi.command import FountainAction
from homeassistant.components.button import ButtonEntity, ButtonEntityDescription
from .const import LOGGER, POWER_ONLINE_STATE
from .entity import PetKitDescSensorBase, PetkitEntity
if TYPE_CHECKING:
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .coordinator import PetkitDataUpdateCoordinator
from .data import PetkitConfigEntry, PetkitDevices
@dataclass(frozen=True, kw_only=True)
class PetKitButtonDesc(PetKitDescSensorBase, ButtonEntityDescription):
"""A class that describes sensor entities."""
action: Callable[PetkitConfigEntry]
is_available: Callable[[PetkitDevices], bool] | None = None
COMMON_ENTITIES = []
BUTTON_MAPPING: dict[type[PetkitDevices], list[PetKitButtonDesc]] = {
Feeder: [
*COMMON_ENTITIES,
PetKitButtonDesc(
key="Reset desiccant",
translation_key="reset_desiccant",
action=lambda api, device: api.send_api_request(
device.id, FeederCommand.RESET_DESICCANT
),
only_for_types=DEVICES_FEEDER,
),
PetKitButtonDesc(
key="Cancel manual feed",
translation_key="cancel_manual_feed",
action=lambda api, device: api.send_api_request(
device.id, FeederCommand.CANCEL_MANUAL_FEED
),
only_for_types=DEVICES_FEEDER,
),
PetKitButtonDesc(
key="Call pet",
translation_key="call_pet",
action=lambda api, device: api.send_api_request(
device.id, FeederCommand.CALL_PET
),
only_for_types=[D3],
),
PetKitButtonDesc(
key="Food replenished",
translation_key="food_replenished",
action=lambda api, device: api.send_api_request(
device.id, FeederCommand.FOOD_REPLENISHED
),
only_for_types=[D4S, D4H, D4SH],
),
],
Litter: [
*COMMON_ENTITIES,
PetKitButtonDesc(
key="Scoop",
translation_key="start_scoop",
action=lambda api, device: api.send_api_request(
device.id,
DeviceCommand.CONTROL_DEVICE,
{DeviceAction.START: LBCommand.CLEANING},
),
only_for_types=DEVICES_LITTER_BOX,
is_available=lambda device: device.state.work_state is None,
),
PetKitButtonDesc(
key="Maintenance mode",
translation_key="start_maintenance",
action=lambda api, device: api.send_api_request(
device.id,
DeviceCommand.CONTROL_DEVICE,
{DeviceAction.START: LBCommand.MAINTENANCE},
),
only_for_types=[T4, T5],
is_available=lambda device: device.state.work_state is None,
),
PetKitButtonDesc(
key="Exit maintenance mode",
translation_key="exit_maintenance",
action=lambda api, device: api.send_api_request(
device.id,
DeviceCommand.CONTROL_DEVICE,
{DeviceAction.END: LBCommand.MAINTENANCE},
),
only_for_types=[T4, T5],
is_available=lambda device: device.state.work_state is not None
and device.state.work_state.work_mode == 9,
),
PetKitButtonDesc(
key="Dump litter",
translation_key="dump_litter",
action=lambda api, device: api.send_api_request(
device.id,
DeviceCommand.CONTROL_DEVICE,
{DeviceAction.START: LBCommand.DUMPING},
),
only_for_types=DEVICES_LITTER_BOX,
ignore_types=[T7], # T7 does not support Dumping
is_available=lambda device: device.state.work_state is None,
),
PetKitButtonDesc(
key="Pause",
translation_key="action_pause",
action=lambda api, device: api.send_api_request(
device.id,
DeviceCommand.CONTROL_DEVICE,
{
DeviceAction.STOP: api.petkit_entities[
device.id
].state.work_state.work_mode
},
),
only_for_types=DEVICES_LITTER_BOX,
is_available=lambda device: device.state.work_state is not None,
),
PetKitButtonDesc(
key="Continue",
translation_key="action_continue",
action=lambda api, device: api.send_api_request(
device.id,
DeviceCommand.CONTROL_DEVICE,
{
DeviceAction.CONTINUE: api.petkit_entities[
device.id
].state.work_state.work_mode
},
),
only_for_types=DEVICES_LITTER_BOX,
is_available=lambda device: device.state.work_state is not None,
),
PetKitButtonDesc(
key="Reset",
translation_key="action_reset",
action=lambda api, device: api.send_api_request(
device.id,
DeviceCommand.CONTROL_DEVICE,
{
DeviceAction.END: api.petkit_entities[
device.id
].state.work_state.work_mode
},
),
only_for_types=DEVICES_LITTER_BOX,
is_available=lambda device: device.state.work_state is not None,
),
PetKitButtonDesc(
# For T3/T4 only
key="Deodorize T3 T4",
translation_key="deodorize",
action=lambda api, device: api.send_api_request(
device.id,
DeviceCommand.CONTROL_DEVICE,
{DeviceAction.START: LBCommand.ODOR_REMOVAL},
),
only_for_types=[T4],
value=lambda device: device.k3_device,
),
PetKitButtonDesc(
# For T5 / T7 only using the N60 deodorizer
key="Deodorize T5 T7",
translation_key="deodorize",
action=lambda api, device: api.send_api_request(
device.id,
DeviceCommand.CONTROL_DEVICE,
{DeviceAction.START: LBCommand.ODOR_REMOVAL},
),
only_for_types=[T5, T7],
force_add=[T5, T7],
is_available=lambda device: device.state.refresh_state is None,
),
PetKitButtonDesc(
key="Reset N50 odor eliminator",
translation_key="reset_n50_odor_eliminator",
action=lambda api, device: api.send_api_request(
device.id, LitterCommand.RESET_N50_DEODORIZER
),
only_for_types=DEVICES_LITTER_BOX,
ignore_types=[T7],
),
PetKitButtonDesc(
key="Reset N60 odor eliminator",
translation_key="reset_n60_odor_eliminator",
action=lambda api, device: api.send_api_request(
device.id,
DeviceCommand.CONTROL_DEVICE,
{DeviceAction.START: LBCommand.RESET_N60_DEODOR},
),
only_for_types=LITTER_WITH_CAMERA,
),
PetKitButtonDesc(
key="Level litter",
translation_key="level_litter",
action=lambda api, device: api.send_api_request(
device.id,
DeviceCommand.CONTROL_DEVICE,
{DeviceAction.START: LBCommand.LEVELING},
),
is_available=lambda device: device.state.work_state is None,
),
],
WaterFountain: [
*COMMON_ENTITIES,
PetKitButtonDesc(
key="Reset filter",
translation_key="reset_filter",
action=lambda api, device: api.bluetooth_manager.send_ble_command(
device.id, FountainAction.RESET_FILTER
),
only_for_types=DEVICES_WATER_FOUNTAIN,
),
],
Purifier: [*COMMON_ENTITIES],
Pet: [*COMMON_ENTITIES],
}
async def async_setup_entry(
hass: HomeAssistant,
entry: PetkitConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up binary_sensors using config entry."""
devices = entry.runtime_data.client.petkit_entities.values()
entities = [
PetkitButton(
coordinator=entry.runtime_data.coordinator,
entity_description=entity_description,
device=device,
)
for device in devices
for device_type, entity_descriptions in BUTTON_MAPPING.items()
if isinstance(device, device_type)
for entity_description in entity_descriptions
if entity_description.is_supported(device) # Check if the entity is supported
]
LOGGER.debug(
"BUTTON : Adding %s (on %s available)",
len(entities),
sum(len(descriptors) for descriptors in BUTTON_MAPPING.values()),
)
async_add_entities(entities)
class PetkitButton(PetkitEntity, ButtonEntity):
"""Petkit Smart Devices Button class."""
entity_description: PetKitButtonDesc
def __init__(
self,
coordinator: PetkitDataUpdateCoordinator,
entity_description: PetKitButtonDesc,
device: Feeder | Litter | WaterFountain,
) -> None:
"""Initialize the switch class."""
super().__init__(coordinator, device)
self.coordinator = coordinator
self.entity_description = entity_description
self.device = device
@property
def available(self) -> bool:
"""Only make available if device is online."""
device_data = self.coordinator.data.get(self.device.id)
try:
if device_data.state.pim not in POWER_ONLINE_STATE:
return False
except AttributeError:
pass
if self.entity_description.is_available:
is_available = self.entity_description.is_available(device_data)
LOGGER.debug(
"Button %s availability result is: %s",
self.entity_description.key,
is_available,
)
return is_available
return True
async def async_press(self) -> None:
"""Handle the button press."""
LOGGER.debug("Button pressed: %s", self.entity_description.key)
self.coordinator.enable_smart_polling(12)
await self.entity_description.action(
self.coordinator.config_entry.runtime_data.client, self.device
)
await asyncio.sleep(1.5)
await self.coordinator.async_request_refresh()
| python | MIT | 362911ceac878aec5f56d82efa9b0515e2028355 | 2026-01-05T07:12:41.716988Z | false |
Jezza34000/homeassistant_petkit | https://github.com/Jezza34000/homeassistant_petkit/blob/362911ceac878aec5f56d82efa9b0515e2028355/custom_components/petkit/text.py | custom_components/petkit/text.py | """Switch platform for Petkit Smart Devices integration."""
from __future__ import annotations
from collections.abc import Callable
from dataclasses import dataclass
from datetime import timedelta
from typing import TYPE_CHECKING, Any
from pypetkitapi import (
D3,
D4,
D4H,
D4S,
D4SH,
FEEDER,
FEEDER_MINI,
Feeder,
FeederCommand,
Litter,
Pet,
Purifier,
WaterFountain,
)
from homeassistant.components.text import TextEntity, TextEntityDescription
from .const import INPUT_FEED_PATTERN, LOGGER, MIN_SCAN_INTERVAL, POWER_ONLINE_STATE
from .entity import PetKitDescSensorBase, PetkitEntity
if TYPE_CHECKING:
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .coordinator import PetkitDataUpdateCoordinator
from .data import PetkitConfigEntry, PetkitDevices
@dataclass(frozen=True, kw_only=True)
class PetkitTextDesc(PetKitDescSensorBase, TextEntityDescription):
"""A class that describes sensor entities."""
native_value: str | None = None
action: Callable[[PetkitConfigEntry, PetkitDevices, str], Any] | None = None
COMMON_ENTITIES = []
TEXT_MAPPING: dict[type[PetkitDevices], list[PetkitTextDesc]] = {
Feeder: [
*COMMON_ENTITIES,
PetkitTextDesc(
key="Manual feed single",
translation_key="manual_feed_single",
value=lambda device: device.settings.light_mode,
native_min=1,
native_max=2,
pattern=INPUT_FEED_PATTERN,
native_value="0",
action=lambda api, device, amount_value: api.send_api_request(
device.id, FeederCommand.MANUAL_FEED, {"amount": int(amount_value)}
),
only_for_types=[FEEDER, FEEDER_MINI, D3, D4, D4H],
),
PetkitTextDesc(
key="Manual feed dual h1",
translation_key="manual_feed_dual_h1",
value=lambda device: device.settings.light_mode,
native_min=1,
native_max=2,
pattern=INPUT_FEED_PATTERN,
native_value="0",
action=lambda api, device, amount_value: api.send_api_request(
device.id,
FeederCommand.MANUAL_FEED,
{"amount1": int(amount_value), "amount2": 0},
),
only_for_types=[D4S, D4SH],
),
PetkitTextDesc(
key="Manual feed dual h2",
translation_key="manual_feed_dual_h2",
value=lambda device: device.settings.light_mode,
native_min=1,
native_max=2,
pattern=INPUT_FEED_PATTERN,
native_value="0",
action=lambda api, device, amount_value: api.send_api_request(
device.id,
FeederCommand.MANUAL_FEED,
{"amount1": 0, "amount2": int(amount_value)},
),
only_for_types=[D4S, D4SH],
),
],
Litter: [*COMMON_ENTITIES],
WaterFountain: [*COMMON_ENTITIES],
Purifier: [*COMMON_ENTITIES],
Pet: [*COMMON_ENTITIES],
}
async def async_setup_entry(
hass: HomeAssistant,
entry: PetkitConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up binary_sensors using config entry."""
devices = entry.runtime_data.client.petkit_entities.values()
entities = [
PetkitText(
coordinator=entry.runtime_data.coordinator,
entity_description=entity_description,
device=device,
)
for device in devices
for device_type, entity_descriptions in TEXT_MAPPING.items()
if isinstance(device, device_type)
for entity_description in entity_descriptions
if entity_description.is_supported(device) # Check if the entity is supported
]
LOGGER.debug(
"TEXT : Adding %s (on %s available)",
len(entities),
sum(len(descriptors) for descriptors in TEXT_MAPPING.values()),
)
async_add_entities(entities)
class PetkitText(PetkitEntity, TextEntity):
"""Petkit Smart Devices Switch class."""
entity_description: PetkitTextDesc
def __init__(
self,
coordinator: PetkitDataUpdateCoordinator,
entity_description: PetkitTextDesc,
device: PetkitDevices,
) -> None:
"""Initialize the switch class."""
super().__init__(coordinator, device)
self.coordinator = coordinator
self.entity_description = entity_description
self.device = device
@property
def native_max(self) -> int:
"""Max number of characters."""
return self.entity_description.native_max
@property
def native_min(self) -> int:
"""Min number of characters."""
return self.entity_description.native_min
@property
def pattern(self) -> str | None:
"""Check validity with regex pattern."""
return self.entity_description.pattern
@property
def native_value(self) -> str:
"""Always reset to native_value."""
return self.entity_description.native_value
@property
def available(self) -> bool:
"""Return if this button is available or not."""
device_data = self.coordinator.data.get(self.device.id)
if hasattr(device_data.state, "pim"):
return device_data.state.pim in POWER_ONLINE_STATE
return True
async def async_set_value(self, value: str) -> None:
"""Set manual feeding amount."""
if self.device.device_nfo.device_type in [D4, D4H]:
# D4/D4H => 10,20,30,40,50
valid_values = [10, 20, 30, 40, 50]
elif self.device.device_nfo.device_type == FEEDER_MINI:
# FeederMini => 0,5,10,15,20,25,30,35,40,45,50
valid_values = [0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50]
elif self.device.device_nfo.device_type == D3:
# D3 => 5 to 200
valid_values = list(range(5, 201))
else:
# Other, D4sh => 1,2,3,4,5,7,8,9,10
valid_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
if int(value) not in valid_values:
raise ValueError(
f"Feeding value '{value}' is not valid for this feeder. Valid values are: {valid_values}"
)
self.coordinator.update_interval = timedelta(seconds=MIN_SCAN_INTERVAL)
self.coordinator.fast_poll_tic = 12
LOGGER.debug(
"Setting value for : %s with value : %s", self.entity_description.key, value
)
await self.entity_description.action(
self.coordinator.config_entry.runtime_data.client, self.device, value
)
| python | MIT | 362911ceac878aec5f56d82efa9b0515e2028355 | 2026-01-05T07:12:41.716988Z | false |
Jezza34000/homeassistant_petkit | https://github.com/Jezza34000/homeassistant_petkit/blob/362911ceac878aec5f56d82efa9b0515e2028355/custom_components/petkit/select.py | custom_components/petkit/select.py | """Switch platform for Petkit Smart Devices integration."""
from __future__ import annotations
from collections.abc import Callable
from dataclasses import dataclass
from typing import TYPE_CHECKING
from pypetkitapi import (
D4H,
D4SH,
T7,
DeviceCommand,
Feeder,
Litter,
Pet,
Purifier,
WaterFountain,
)
from homeassistant.components.select import SelectEntity, SelectEntityDescription
from homeassistant.const import EntityCategory
from .const import (
CLEANING_INTERVAL_OPT,
IA_DETECTION_SENSITIVITY_OPT,
LITTER_TYPE_OPT,
LOGGER,
POWER_ONLINE_STATE,
SURPLUS_FOOD_LEVEL_OPT,
)
from .entity import PetKitDescSensorBase, PetkitEntity
if TYPE_CHECKING:
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .coordinator import PetkitDataUpdateCoordinator
from .data import PetkitConfigEntry, PetkitDevices
@dataclass(frozen=True, kw_only=True)
class PetKitSelectDesc(PetKitDescSensorBase, SelectEntityDescription):
"""A class that describes sensor entities."""
current_option: Callable[[PetkitDevices], str] | None = None
options: Callable[[], list[str]] | None = None
action: Callable[PetkitConfigEntry]
COMMON_ENTITIES = []
SELECT_MAPPING: dict[type[PetkitDevices], list[PetKitSelectDesc]] = {
Feeder: [
*COMMON_ENTITIES,
PetKitSelectDesc(
key="Surplus level",
translation_key="surplus_level",
current_option=lambda device: SURPLUS_FOOD_LEVEL_OPT[
device.settings.surplus_standard
],
options=lambda: list(SURPLUS_FOOD_LEVEL_OPT.values()),
action=lambda api, device, opt_value: api.send_api_request(
device.id,
DeviceCommand.UPDATE_SETTING,
{
"surplusStandard": next(
key
for key, value in SURPLUS_FOOD_LEVEL_OPT.items()
if value == opt_value
)
},
),
only_for_types=[D4H, D4SH],
),
PetKitSelectDesc(
key="Eat detection sensitivity",
translation_key="eat_detection_sensitivity",
current_option=lambda device: IA_DETECTION_SENSITIVITY_OPT[
device.settings.eat_sensitivity
],
options=lambda: list(IA_DETECTION_SENSITIVITY_OPT.values()),
action=lambda api, device, opt_value: api.send_api_request(
device.id,
DeviceCommand.UPDATE_SETTING,
{
"eatSensitivity": next(
key
for key, value in IA_DETECTION_SENSITIVITY_OPT.items()
if value == opt_value
)
},
),
entity_category=EntityCategory.CONFIG,
only_for_types=[D4H, D4SH],
),
PetKitSelectDesc(
key="Pet detection sensitivity",
translation_key="pet_detection_sensitivity",
current_option=lambda device: IA_DETECTION_SENSITIVITY_OPT[
device.settings.pet_sensitivity
],
options=lambda: list(IA_DETECTION_SENSITIVITY_OPT.values()),
action=lambda api, device, opt_value: api.send_api_request(
device.id,
DeviceCommand.UPDATE_SETTING,
{
"petSensitivity": next(
key
for key, value in IA_DETECTION_SENSITIVITY_OPT.items()
if value == opt_value
)
},
),
entity_category=EntityCategory.CONFIG,
only_for_types=[D4H, D4SH],
),
PetKitSelectDesc(
key="Move detection sensitivity",
translation_key="move_detection_sensitivity",
current_option=lambda device: IA_DETECTION_SENSITIVITY_OPT[
device.settings.move_sensitivity
],
options=lambda: list(IA_DETECTION_SENSITIVITY_OPT.values()),
action=lambda api, device, opt_value: api.send_api_request(
device.id,
DeviceCommand.UPDATE_SETTING,
{
"moveSensitivity": next(
key
for key, value in IA_DETECTION_SENSITIVITY_OPT.items()
if value == opt_value
)
},
),
entity_category=EntityCategory.CONFIG,
only_for_types=[D4H, D4SH],
),
],
Litter: [
*COMMON_ENTITIES,
PetKitSelectDesc(
key="Litter type",
translation_key="litter_type",
current_option=lambda device: LITTER_TYPE_OPT[device.settings.sand_type],
options=lambda: list(LITTER_TYPE_OPT.values()),
action=lambda api, device, opt_value: api.send_api_request(
device.id,
DeviceCommand.UPDATE_SETTING,
{
"sandType": next(
key
for key, value in LITTER_TYPE_OPT.items()
if value == opt_value
)
},
),
entity_category=EntityCategory.CONFIG,
ignore_types=[T7],
),
PetKitSelectDesc(
key="Avoid repeat cleaning interval",
translation_key="avoid_repeat_cleaning_interval",
current_option=lambda device: CLEANING_INTERVAL_OPT[
device.settings.auto_interval_min
],
options=lambda: list(CLEANING_INTERVAL_OPT.values()),
action=lambda api, device, opt_value: api.send_api_request(
device.id,
DeviceCommand.UPDATE_SETTING,
{
"autoIntervalMin": next(
key
for key, value in CLEANING_INTERVAL_OPT.items()
if value == opt_value
)
},
),
entity_category=EntityCategory.CONFIG,
),
],
WaterFountain: [*COMMON_ENTITIES],
Purifier: [*COMMON_ENTITIES],
Pet: [*COMMON_ENTITIES],
}
async def async_setup_entry(
hass: HomeAssistant,
entry: PetkitConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up binary_sensors using config entry."""
devices = entry.runtime_data.client.petkit_entities.values()
entities = [
PetkitSelect(
coordinator=entry.runtime_data.coordinator,
entity_description=entity_description,
device=device,
)
for device in devices
for device_type, entity_descriptions in SELECT_MAPPING.items()
if isinstance(device, device_type)
for entity_description in entity_descriptions
if entity_description.is_supported(device) # Check if the entity is supported
]
LOGGER.debug(
"SELECT : Adding %s (on %s available)",
len(entities),
sum(len(descriptors) for descriptors in SELECT_MAPPING.values()),
)
async_add_entities(entities)
class PetkitSelect(PetkitEntity, SelectEntity):
"""Petkit Smart Devices Select class."""
entity_description: PetKitSelectDesc
def __init__(
self,
coordinator: PetkitDataUpdateCoordinator,
entity_description: PetKitSelectDesc,
device: Feeder | Litter | WaterFountain,
) -> None:
"""Initialize the switch class."""
super().__init__(coordinator, device)
self.coordinator = coordinator
self.entity_description = entity_description
self.device = device
@property
def current_option(self) -> str | None:
"""Return the current surplus food level option."""
device_data = self.coordinator.data.get(self.device.id)
if device_data:
return self.entity_description.current_option(device_data)
return None
@property
def options(self) -> list[str]:
"""Return list of all available manual feed amounts."""
return self.entity_description.options()
@property
def available(self) -> bool:
"""Return if this button is available or not."""
device_data = self.coordinator.data.get(self.device.id)
if hasattr(device_data.state, "pim"):
return device_data.state.pim in POWER_ONLINE_STATE
return True
async def async_select_option(self, value: str) -> None:
"""Set manual feeding amount."""
LOGGER.debug(
"Setting value for : %s with value : %s", self.entity_description.key, value
)
await self.entity_description.action(
self.coordinator.config_entry.runtime_data.client, self.device, value
)
| python | MIT | 362911ceac878aec5f56d82efa9b0515e2028355 | 2026-01-05T07:12:41.716988Z | false |
Jezza34000/homeassistant_petkit | https://github.com/Jezza34000/homeassistant_petkit/blob/362911ceac878aec5f56d82efa9b0515e2028355/custom_components/petkit/light.py | custom_components/petkit/light.py | """Light platform for Petkit Smart Devices integration."""
from __future__ import annotations
import asyncio
from collections.abc import Callable
from dataclasses import dataclass
from datetime import timedelta
from typing import TYPE_CHECKING, Any
from pypetkitapi import LITTER_WITH_CAMERA, T4, DeviceAction, DeviceCommand, LBCommand
from homeassistant.components.light import (
ColorMode,
LightEntity,
LightEntityDescription,
)
from .const import LOGGER, MIN_SCAN_INTERVAL, POWER_ONLINE_STATE
from .entity import PetKitDescSensorBase, PetkitEntity
if TYPE_CHECKING:
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .data import PetkitConfigEntry
@dataclass(frozen=True, kw_only=True)
class PetKitLightDesc(PetKitDescSensorBase, LightEntityDescription):
"""A class for describing Petkit light entities."""
turn_on: Callable[[Any, Any], Any] | None = None
turn_off: Callable[[Any, Any], Any] | None = None
def get_k3_light_value(device):
"""Get the light value for K3 devices."""
if device.k3_device is None:
return None
if device.state.light_state is not None:
return device.state.light_state
return 0
LIGHT_ENTITIES = [
PetKitLightDesc(
# For K3 or K3 (binded to T4)
key="Light K3",
translation_key="light",
value=get_k3_light_value,
turn_on=lambda api, device: api.send_api_request(
device.id,
DeviceCommand.CONTROL_DEVICE,
{DeviceAction.START: LBCommand.LIGHT},
),
turn_off=lambda api, device: api.send_api_request(
device.id,
DeviceCommand.CONTROL_DEVICE,
{DeviceAction.END: LBCommand.LIGHT},
),
only_for_types=[T4],
),
PetKitLightDesc(
# For T5 / T6
key="Light camera",
translation_key="light",
value=lambda device: (
device.state.light_state.work_process
if device.state.light_state is not None
else 0
),
turn_on=lambda api, device: api.send_api_request(
device.id,
DeviceCommand.CONTROL_DEVICE,
{DeviceAction.START: LBCommand.LIGHT},
),
turn_off=lambda api, device: api.send_api_request(
device.id,
DeviceCommand.CONTROL_DEVICE,
{DeviceAction.END: LBCommand.LIGHT},
),
only_for_types=LITTER_WITH_CAMERA,
),
]
async def async_setup_entry(
hass: HomeAssistant,
entry: PetkitConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up light entities using config entry."""
devices = entry.runtime_data.client.petkit_entities.values()
entities = [
PetkitLight(
coordinator=entry.runtime_data.coordinator,
entity_description=entity_description,
device=device,
)
for device in devices
for entity_description in LIGHT_ENTITIES
if entity_description.is_supported(device)
]
LOGGER.debug(
"LIGHT: Adding %s light entities.",
len(entities),
)
async_add_entities(entities)
class PetkitLight(PetkitEntity, LightEntity):
"""Petkit Smart Devices Light class."""
_attr_supported_color_modes = {ColorMode.ONOFF}
_attr_color_mode = ColorMode.ONOFF
entity_description: PetKitLightDesc
def __init__(
self,
coordinator,
entity_description: PetKitLightDesc,
device: Any,
) -> None:
"""Initialize the light class."""
super().__init__(coordinator, device)
self.coordinator = coordinator
self.entity_description = entity_description
self.device = device
@property
def available(self) -> bool:
"""Return if this light is available or not."""
device_data = self.coordinator.data.get(self.device.id)
if hasattr(device_data.state, "pim"):
return device_data.state.pim in POWER_ONLINE_STATE
return True
@property
def is_on(self) -> bool | None:
"""Return true if the light is on."""
updated_device = self.coordinator.data.get(self.device.id)
if updated_device and self.entity_description.value:
return bool(self.entity_description.value(updated_device))
return None
async def async_turn_on(self, **_: Any) -> None:
"""Turn on the light."""
LOGGER.debug("Turn ON Light")
res = await self.entity_description.turn_on(
self.coordinator.config_entry.runtime_data.client, self.device
)
await self._update_coordinator_data(res)
async def async_turn_off(self, **_: Any) -> None:
"""Turn off the light."""
LOGGER.debug("Turn OFF Light")
res = await self.entity_description.turn_off(
self.coordinator.config_entry.runtime_data.client, self.device
)
await self._update_coordinator_data(res)
async def _update_coordinator_data(self, result: bool) -> None:
"""Update the coordinator data based on the result."""
self.coordinator.update_interval = timedelta(seconds=MIN_SCAN_INTERVAL)
self.coordinator.fast_poll_tic = 3
await asyncio.sleep(1)
await self.coordinator.async_request_refresh()
| python | MIT | 362911ceac878aec5f56d82efa9b0515e2028355 | 2026-01-05T07:12:41.716988Z | false |
Jezza34000/homeassistant_petkit | https://github.com/Jezza34000/homeassistant_petkit/blob/362911ceac878aec5f56d82efa9b0515e2028355/custom_components/petkit/data.py | custom_components/petkit/data.py | """Custom types for Petkit Smart Devices integration."""
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING
from pypetkitapi import Feeder, Litter, Pet, Purifier, WaterFountain
if TYPE_CHECKING:
from pypetkitapi.client import PetKitClient
from homeassistant.config_entries import ConfigEntry
from homeassistant.loader import Integration
from .coordinator import (
PetkitBluetoothUpdateCoordinator,
PetkitDataUpdateCoordinator,
PetkitMediaUpdateCoordinator,
)
type PetkitConfigEntry = ConfigEntry[PetkitData]
# Custom types for Petkit Smart Devices integration
type PetkitDevices = Feeder | Litter | WaterFountain | Purifier | Pet
@dataclass
class PetkitData:
"""Data for the Petkit integration."""
client: PetKitClient
coordinator: PetkitDataUpdateCoordinator
coordinator_media: PetkitMediaUpdateCoordinator
coordinator_bluetooth: PetkitBluetoothUpdateCoordinator
integration: Integration
| python | MIT | 362911ceac878aec5f56d82efa9b0515e2028355 | 2026-01-05T07:12:41.716988Z | false |
Jezza34000/homeassistant_petkit | https://github.com/Jezza34000/homeassistant_petkit/blob/362911ceac878aec5f56d82efa9b0515e2028355/custom_components/petkit/config_flow.py | custom_components/petkit/config_flow.py | """Adds config flow for Petkit Smart Devices."""
from __future__ import annotations
from typing import Any
from pypetkitapi import (
PetkitAuthenticationUnregisteredEmailError,
PetKitClient,
PetkitRegionalServerNotFoundError,
PetkitSessionError,
PetkitSessionExpiredError,
PetkitTimeoutError,
PypetkitError,
)
import voluptuous as vol
from homeassistant import data_entry_flow
from homeassistant.config_entries import (
ConfigEntry,
ConfigFlow,
ConfigFlowResult,
OptionsFlow,
)
from homeassistant.const import (
CONF_PASSWORD,
CONF_REGION,
CONF_SCAN_INTERVAL,
CONF_TIME_ZONE,
CONF_USERNAME,
)
from homeassistant.core import callback
from homeassistant.data_entry_flow import section
from homeassistant.helpers import selector
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.selector import BooleanSelector, BooleanSelectorConfig
from .const import (
ALL_TIMEZONES_LST,
BT_SECTION,
CODE_TO_COUNTRY_DICT,
CONF_BLE_RELAY_ENABLED,
CONF_DELETE_AFTER,
CONF_MEDIA_DL_IMAGE,
CONF_MEDIA_DL_VIDEO,
CONF_MEDIA_EV_TYPE,
CONF_MEDIA_PATH,
CONF_SCAN_INTERVAL_BLUETOOTH,
CONF_SCAN_INTERVAL_MEDIA,
CONF_SMART_POLLING,
COUNTRY_TO_CODE_DICT,
DEFAULT_BLUETOOTH_RELAY,
DEFAULT_DELETE_AFTER,
DEFAULT_DL_IMAGE,
DEFAULT_DL_VIDEO,
DEFAULT_EVENTS,
DEFAULT_MEDIA_PATH,
DEFAULT_SCAN_INTERVAL,
DEFAULT_SCAN_INTERVAL_BLUETOOTH,
DEFAULT_SCAN_INTERVAL_MEDIA,
DEFAULT_SMART_POLLING,
DOMAIN,
LOGGER,
MEDIA_SECTION,
)
class PetkitOptionsFlowHandler(OptionsFlow):
"""Handle Petkit options."""
async def async_step_init(
self, user_input: dict[str, Any] | None = None
) -> ConfigFlowResult:
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(data=user_input)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Required(
CONF_SCAN_INTERVAL,
default=self.config_entry.options.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL
),
): vol.All(int, vol.Range(min=15, max=3600)),
vol.Required(
CONF_SMART_POLLING,
default=self.config_entry.options.get(
CONF_SMART_POLLING, DEFAULT_SMART_POLLING
),
): BooleanSelector(BooleanSelectorConfig()),
vol.Required(MEDIA_SECTION): section(
vol.Schema(
{
vol.Required(
CONF_MEDIA_PATH,
default=self.config_entry.options.get(
MEDIA_SECTION, {}
).get(CONF_MEDIA_PATH, DEFAULT_MEDIA_PATH),
): vol.All(str),
vol.Required(
CONF_SCAN_INTERVAL_MEDIA,
default=self.config_entry.options.get(
MEDIA_SECTION, {}
).get(
CONF_SCAN_INTERVAL_MEDIA,
DEFAULT_SCAN_INTERVAL_MEDIA,
),
): vol.All(int, vol.Range(min=5, max=120)),
vol.Required(
CONF_MEDIA_DL_IMAGE,
default=self.config_entry.options.get(
MEDIA_SECTION, {}
).get(CONF_MEDIA_DL_IMAGE, DEFAULT_DL_IMAGE),
): BooleanSelector(BooleanSelectorConfig()),
vol.Required(
CONF_MEDIA_DL_VIDEO,
default=self.config_entry.options.get(
MEDIA_SECTION, {}
).get(CONF_MEDIA_DL_VIDEO, DEFAULT_DL_VIDEO),
): BooleanSelector(BooleanSelectorConfig()),
vol.Optional(
CONF_MEDIA_EV_TYPE,
default=self.config_entry.options.get(
MEDIA_SECTION, {}
).get(CONF_MEDIA_EV_TYPE, DEFAULT_EVENTS),
): selector.SelectSelector(
selector.SelectSelectorConfig(
multiple=True,
sort=False,
options=[
"Pet",
"Eat",
"Feed",
"Toileting",
"Move",
"Dish_before",
"Dish_after",
"Waste_check",
],
)
),
vol.Required(
CONF_DELETE_AFTER,
default=self.config_entry.options.get(
MEDIA_SECTION, {}
).get(CONF_DELETE_AFTER, DEFAULT_DELETE_AFTER),
): vol.All(int, vol.Range(min=0, max=30)),
}
),
{"collapsed": False},
),
vol.Required(BT_SECTION): section(
vol.Schema(
{
vol.Required(
CONF_BLE_RELAY_ENABLED,
default=self.config_entry.options.get(
BT_SECTION, {}
).get(
CONF_BLE_RELAY_ENABLED, DEFAULT_BLUETOOTH_RELAY
),
): BooleanSelector(BooleanSelectorConfig()),
vol.Required(
CONF_SCAN_INTERVAL_BLUETOOTH,
default=self.config_entry.options.get(
BT_SECTION, {}
).get(
CONF_SCAN_INTERVAL_BLUETOOTH,
DEFAULT_SCAN_INTERVAL_BLUETOOTH,
),
): vol.All(int, vol.Range(min=5, max=120)),
}
),
{"collapsed": False},
),
}
),
)
class PetkitFlowHandler(ConfigFlow, domain=DOMAIN):
"""Config flow for Petkit Smart Devices."""
VERSION = 7
@staticmethod
@callback
def async_get_options_flow(
config_entry: ConfigEntry,
) -> PetkitOptionsFlowHandler:
"""Options callback."""
return PetkitOptionsFlowHandler()
async def async_step_user(
self,
user_input: dict | None = None,
) -> data_entry_flow.FlowResult:
"""Handle a flow initialized by the user."""
_errors = {}
country_from_ha = self.hass.config.country
tz_from_ha = self.hass.config.time_zone
LOGGER.debug(
f"Country code from HA : {self.hass.config.country} Default timezone: {tz_from_ha}"
)
if user_input is not None:
user_region = (
COUNTRY_TO_CODE_DICT.get(user_input.get(CONF_REGION, None))
or country_from_ha
)
# Check if the account already exists
existing_entries = self._async_current_entries()
for entry in existing_entries:
if entry.data.get(CONF_USERNAME) == user_input[CONF_USERNAME]:
_errors["base"] = "account_exists"
break
else:
try:
await self._test_credentials(
username=user_input[CONF_USERNAME],
password=user_input[CONF_PASSWORD],
region=user_region,
timezone=user_input.get(CONF_TIME_ZONE, tz_from_ha),
)
except (
PetkitTimeoutError,
PetkitSessionError,
PetkitSessionExpiredError,
PetkitAuthenticationUnregisteredEmailError,
PetkitRegionalServerNotFoundError,
) as exception:
LOGGER.error(exception)
_errors["base"] = str(exception)
except PypetkitError as exception:
LOGGER.error(exception)
_errors["base"] = "error"
else:
return self.async_create_entry(
title=user_input[CONF_USERNAME],
data=user_input,
options={
CONF_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL,
CONF_SMART_POLLING: DEFAULT_SMART_POLLING,
MEDIA_SECTION: {
CONF_MEDIA_PATH: DEFAULT_MEDIA_PATH,
CONF_SCAN_INTERVAL_MEDIA: DEFAULT_SCAN_INTERVAL_MEDIA,
CONF_MEDIA_DL_IMAGE: DEFAULT_DL_IMAGE,
CONF_MEDIA_DL_VIDEO: DEFAULT_DL_VIDEO,
CONF_MEDIA_EV_TYPE: DEFAULT_EVENTS,
CONF_DELETE_AFTER: DEFAULT_DELETE_AFTER,
},
BT_SECTION: {
CONF_BLE_RELAY_ENABLED: DEFAULT_BLUETOOTH_RELAY,
CONF_SCAN_INTERVAL_BLUETOOTH: DEFAULT_SCAN_INTERVAL_BLUETOOTH,
},
},
)
data_schema = {
vol.Required(
CONF_USERNAME,
default=(user_input or {}).get(CONF_USERNAME, vol.UNDEFINED),
): selector.TextSelector(
selector.TextSelectorConfig(
type=selector.TextSelectorType.TEXT,
),
),
vol.Required(CONF_PASSWORD): selector.TextSelector(
selector.TextSelectorConfig(
type=selector.TextSelectorType.PASSWORD,
),
),
}
if _errors:
data_schema.update(
{
vol.Required(
CONF_REGION,
default=CODE_TO_COUNTRY_DICT.get(
country_from_ha, country_from_ha
),
): selector.SelectSelector(
selector.SelectSelectorConfig(
options=sorted(CODE_TO_COUNTRY_DICT.values())
),
),
vol.Required(
CONF_TIME_ZONE, default=tz_from_ha
): selector.SelectSelector(
selector.SelectSelectorConfig(options=ALL_TIMEZONES_LST),
),
}
)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(data_schema),
errors=_errors,
)
async def _test_credentials(
self, username: str, password: str, region: str, timezone: str
) -> None:
"""Validate credentials."""
client = PetKitClient(
username=username,
password=password,
region=region,
timezone=timezone,
session=async_get_clientsession(self.hass),
)
LOGGER.debug(f"Testing credentials for {username}")
await client.login()
| python | MIT | 362911ceac878aec5f56d82efa9b0515e2028355 | 2026-01-05T07:12:41.716988Z | false |
pimoroni/displayotron | https://github.com/pimoroni/displayotron/blob/1161818428c393906fdf437c8fa51f51b329af1e/library/setup.py | library/setup.py | #!/usr/bin/env python
"""
Copyright (c) 2014 Pimoroni
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
classifiers = ['Development Status :: 5 - Production/Stable',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Topic :: System :: Hardware']
setup(
name = 'dot3k',
version = '2.0.3',
author = 'Philip Howard',
author_email = 'phil@pimoroni.com',
description = 'Display-o-Tron 3000/HAT Driver',
long_description= open('README.md').read() + '\n' + open('CHANGELOG.txt').read(),
long_description_content_type = 'text/markdown',
license = 'MIT',
keywords = 'Raspberry Pi ST7036 SN3218 LCD',
url = 'http://www.pimoroni.com',
classifiers = classifiers,
packages = ['dot3k','dothat'],
install_requires= ['st7036','sn3218','cap1xxx>=0.1.4']
)
| python | MIT | 1161818428c393906fdf437c8fa51f51b329af1e | 2026-01-05T07:12:43.667300Z | false |
pimoroni/displayotron | https://github.com/pimoroni/displayotron/blob/1161818428c393906fdf437c8fa51f51b329af1e/library/dot3k/lcd.py | library/dot3k/lcd.py | from sys import exit
try:
import st7036
except ImportError:
exit("This library requires the st7036 module\nInstall with: sudo pip install st7036")
ROWS = 3
COLS = 16
lcd = st7036.st7036(register_select_pin=25)
lcd.clear()
def write(value):
"""Write a string to the current cursor position.
:param value: The string to write
"""
lcd.write(value)
def clear():
"""Clear the display and reset the cursor."""
lcd.clear()
def set_contrast(contrast):
"""Set the display contrast.
Raises TypeError if contrast is not an int
Raises ValueError if contrast is not in the range 0..0x3F
:param contrast: contrast value
"""
lcd.set_contrast(contrast)
def set_display_mode(enable=True, cursor=False, blink=False):
"""Set the display mode.
:param enable: enable display output: True/False
:param cursor: show cursor: True/False
:param blink: blink cursor (if shown): True/False
"""
lcd.set_display_mode(enable, cursor, blink)
def set_cursor_offset(offset):
"""Set the cursor position in DRAM
:param offset: DRAM offset to place cursor
"""
lcd.set_cursor_offset(offset)
def set_cursor_position(column, row):
"""Set the cursor position in DRAM
Calculates the cursor offset based on a row and column offset.
Raises ValueError if row and column are not within defined screen size
:param column: column to move the cursor to
:param row: row to move the cursor to
"""
lcd.set_cursor_position(column, row)
def create_animation(anim_pos, anim_map, frame_rate):
"""Create an animation in a given custom character slot
Each definition should be a list of 8 bytes describing the custom character for that frame,
:param anim_pos: Character slot from 0 to 7 to store animation
:param anim_map: A list of custom character definitions
:param frame_rate: Speed of animation in frames-per-second
"""
lcd.create_animation(anim_pos, anim_map, frame_rate)
def update_animations():
"""Update animations onto the LCD
Uses wall time to figure out which frame is current for
each animation, and then updates the animations character
slot to the contents of that frame.
Only one frame, the current one, is ever stored on the LCD.
"""
lcd.update_animations()
def create_char(char_pos, char_map):
"""Create a character in the LCD memory
The st7036 has 8 slots for custom characters.
A char is defined as a list of 8 integers with the
upper 5 bits setting the state of each row of pixels.
Note: These slots are also used for animations!
:param char_pos: Char slot to use (0-7)
:param char_map: List of 8 integers containing bitmap
"""
lcd.create_char(char_pos, char_map)
| python | MIT | 1161818428c393906fdf437c8fa51f51b329af1e | 2026-01-05T07:12:43.667300Z | false |
pimoroni/displayotron | https://github.com/pimoroni/displayotron/blob/1161818428c393906fdf437c8fa51f51b329af1e/library/dot3k/joystick.py | library/dot3k/joystick.py | import time
from sys import exit
try:
import RPi.GPIO as GPIO
except ImportError:
exit("This library requires the RPi.GPIO module\nInstall with: sudo pip install RPi.GPIO")
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
LEFT = 17
RIGHT = 22
if GPIO.RPI_REVISION == 1:
UP = 21
else:
UP = 27
DOWN = 9
BUTTON = 4
BOUNCE = 300
repeat_status = {
UP: False,
DOWN: False,
LEFT: False,
RIGHT: False,
BUTTON: False
}
def on(buttons, bounce=BOUNCE):
"""Handle a joystick direction or button push
Decorator. Use with @joystick.on(joystick.UP)
:param buttons: List, or single instance of joystick button constant
:param bounce: Debounce delay in milliseconds: default 300
"""
buttons = buttons if isinstance(buttons, list) else [buttons]
def register(handler):
for button in buttons:
GPIO.remove_event_detect(button)
GPIO.add_event_detect(button, GPIO.FALLING, callback=handler, bouncetime=bounce)
return register
def millis():
"""Return the current time in milliseconds."""
return int(round(time.time() * 1000))
def repeat(button, handler, delay=0.1, ramp=1.0):
"""Detect a held direction and repeat
If you want to hold a direction and have it auto-repeat,
call this within a joystick direction handler.
:param button: Joystick button constant to watch
:param handler: Function to call every repeat
:param delay: Delay, in seconds, before repeat starts and between each repeat
:param ramp: Multiplier applied to delay after each repeat, 1.0=linear speed up
"""
if repeat_status[button]:
return False
repeat_status[button] = True
last_trigger = millis()
while GPIO.input(button) == 0:
m = millis()
if m - last_trigger >= (delay * 1000):
handler()
delay *= ramp
last_trigger = m
repeat_status[button] = False
up = GPIO.setup(UP, GPIO.IN, pull_up_down=GPIO.PUD_UP)
down = GPIO.setup(DOWN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
left = GPIO.setup(LEFT, GPIO.IN, pull_up_down=GPIO.PUD_UP)
right = GPIO.setup(RIGHT, GPIO.IN, pull_up_down=GPIO.PUD_UP)
button = GPIO.setup(BUTTON, GPIO.IN, pull_up_down=GPIO.PUD_UP)
| python | MIT | 1161818428c393906fdf437c8fa51f51b329af1e | 2026-01-05T07:12:43.667300Z | false |
pimoroni/displayotron | https://github.com/pimoroni/displayotron/blob/1161818428c393906fdf437c8fa51f51b329af1e/library/dot3k/menu.py | library/dot3k/menu.py | import atexit
import os
import threading
import time
from collections import OrderedDict
from sys import version_info
if version_info[0] >= 3:
import configparser as ConfigParser
else:
import ConfigParser
_MODE_NAV = 'navigate'
_MODE_ADJ = 'adjust'
_MODE_TXT = 'entry'
class MenuIcon:
arrow_left = [0, 0, 8, 24, 8, 0, 0, 0]
arrow_right = [0, 0, 2, 3, 2, 0, 0, 0]
arrow_up = [0, 4, 14, 0, 0, 0, 0, 0]
arrow_down = [0, 0, 0, 0, 0, 14, 4, 0]
arrow_left_right = [0, 0, 10, 27, 10, 0, 0, 0]
arrow_up_down = [0, 4, 14, 0, 0, 14, 4, 0]
play = [0, 24, 30, 31, 30, 24, 0, 0]
pause = [0, 27, 27, 27, 27, 27, 0, 0]
back = [0, 8, 30, 9, 1, 1, 14, 0]
bar_left = [0, 3, 2, 2, 2, 2, 3, 0]
bar_right = [0, 24, 8, 8, 8, 8, 24, 0]
bar_full = [0, 31, 0, 31, 31, 0, 31, 0]
bar_empty = [0, 32, 0, 0, 0, 0, 32, 0]
class StoppableThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.stop_event = threading.Event()
self.daemon = True
def start(self):
if not self.isAlive():
self.stop_event.clear()
threading.Thread.start(self)
def stop(self):
if self.isAlive():
# set event to signal thread to terminate
self.stop_event.set()
# block calling thread until thread really has terminated
self.join()
class AsyncWorker(StoppableThread):
def __init__(self, todo):
StoppableThread.__init__(self)
self.todo = todo
def run(self):
while not self.stop_event.is_set():
if self.todo() is False:
self.stop_event.set()
break
class Menu:
"""
This class accepts a list of menu items,
Each key corresponds to a text item displayed on the menu
Each value can either be:
* A nested list, for a sub-menu
* A function, which is called immediately on select
* A class derived from MenuOption, providing interactive functionality
"""
def __init__(self, *args, **kwargs):
"""
structure, lcd, idle_handler = None, idle_time = 60
"""
self.menu_options = OrderedDict()
self.lcd = None
self.idle_handler = None
self.input_handler = None
self.idle_time = 60 * 1000
self.config_file = 'dot3k.cfg'
# Track displayed text for auto-scroll
self.last_text = ['', '', '']
self.last_change = [0, 0, 0]
if len(args) > 0 and args[0] is not None:
self.menu_options = args[0]
if len(args) > 1:
self.lcd = args[1]
if len(args) > 2:
self.idle_handler = args[2]
if len(args) > 3:
self.idle_time = args[3] * 1000
if 'structure' in kwargs.keys() and kwargs['structure'] is not None:
self.menu_options = kwargs['structure']
if 'lcd' in kwargs.keys():
self.lcd = kwargs['lcd']
if 'idle_handler' in kwargs.keys():
self.idle_handler = kwargs['idle_handler']
if 'idle_time' in kwargs.keys():
self.idle_time = kwargs['idle_time'] * 1000
if 'input_handler' in kwargs.keys():
self.input_handler = kwargs['input_handler']
if 'config_file' in kwargs.keys():
self.config_file = kwargs['config_file']
self.list_location = []
self.current_position = 0
self.idle = False
self.mode = _MODE_NAV
self.config = ConfigParser.ConfigParser()
self.config.read([self.config_file, os.path.expanduser('~/.' + self.config_file)])
if type(self.menu_options) is dict or type(self.menu_options) is OrderedDict:
self._setup_menu(self.menu_options)
self.last_action = self.millis()
self._thread = AsyncWorker(self._update)
atexit.register(self.save)
def run(self):
self._thread.start()
atexit.register(self.stop)
def stop(self):
self._thread.stop()
def _update(self):
self.redraw()
time.sleep(0.05)
def millis(self):
return int(round(time.time() * 1000))
def add_item(self, path, handler):
if not type(path) is list:
path = path.split('/')
loc = self.menu_options
last = path.pop()
while len(path) > 0:
key = path.pop(0)
if key not in loc:
loc[key] = OrderedDict()
loc = loc[key]
loc[last] = handler
if isinstance(loc[last], MenuOption):
loc[last].setup(self.config)
def save(self):
if version_info[0] >= 3:
with open('dot3k.cfg', 'wt', encoding='utf-8') as configfile:
self.config.write(configfile)
else:
with open('dot3k.cfg', 'wb') as configfile:
self.config.write(configfile)
print('Config saved to dot3k.cfg')
def _setup_menu(self, menu):
for key in menu:
value = menu[key]
if type(value) is dict or type(value) is OrderedDict:
self._setup_menu(value)
elif isinstance(value, MenuOption):
value.setup(self.config)
def current_submenu(self):
"""
Traverse the list of indexes in list_location
and find the relevant nested listionary
"""
menu = self.menu_options
for location in self.list_location:
menu = menu[list(menu.keys())[location]]
return menu
def current_value(self):
return self.current_submenu()[self.current_key()]
def current_key(self):
"""
Convert the integer current_position into
a valid key for the currently selected listionary
"""
return list(self.current_submenu().keys())[self.current_position]
def next_position(self):
position = self.current_position + 1
position %= len(self.current_submenu())
return position
def previous_position(self):
position = self.current_position - 1
position %= len(self.current_submenu())
return position
def select_option(self):
"""
Navigate into, or handle selected menu option accordingly
"""
if type(self.current_value()) is dict or type(self.current_value()) is OrderedDict:
self.list_location.append(self.current_position)
self.current_position = 0
elif isinstance(self.current_value(), MenuOption):
self.mode = _MODE_ADJ
self.current_value().begin()
elif callable(self.current_submenu()[self.current_key()]):
self.current_submenu()[self.current_key()]()
def prev_option(self):
"""
Decrement the option pointer,
select previous menu item
"""
self.current_position = self.previous_position()
def next_option(self):
"""
Increment the option pointer,
select next menu item
"""
self.current_position = self.next_position()
def exit_option(self):
"""
Exit current submenu and restore position
in previous menu
"""
if len(self.list_location) > 0:
self.current_position = self.list_location.pop()
def start_input(self):
if self.input_handler is None:
return False
self.current_value().text_entry = False
self.input_handler.begin()
self.input_handler.set_value(self.current_value().initial_value())
self.input_handler.set_prompt(self.current_value().input_prompt())
self.mode = _MODE_TXT
def finish_input(self):
if self.input_handler.cancel_input:
self.current_value().cancel_input()
self.input_handler.cancel_input = False
self.input_handler.cleanup()
self.mode = _MODE_ADJ
else:
self.current_value().receive_input(self.input_handler.get_value())
self.input_handler.cleanup()
self.mode = _MODE_ADJ
def select(self):
"""
Handle "select" action
"""
self.last_action = self.millis()
if self.idle:
self.idle = False
self.idle_handler.cleanup()
self.idle_handler.idling = False
return True
if self.mode == _MODE_NAV:
self.select_option()
elif self.mode == _MODE_ADJ:
# The "select" call must return true to exit the adjust
if self.current_value().select() is True:
self.mode = _MODE_NAV
elif self.mode == _MODE_TXT:
if self.input_handler.select():
self.finish_input()
def cancel(self):
self.last_action = self.millis()
if self.idle:
self.idle = False
self.idle_handler.cleanup()
self.idle_handler.idling = False
return True
if self.mode == _MODE_NAV:
self.exit_option()
if self.mode == _MODE_ADJ:
self.current_value().cleanup()
self.mode = _MODE_NAV
def up(self):
self.last_action = self.millis()
if self.idle:
self.idle = False
self.idle_handler.cleanup()
self.idle_handler.idling = False
return True
if self.mode == _MODE_NAV:
self.prev_option()
elif self.mode == _MODE_ADJ:
self.current_value().up()
elif self.mode == _MODE_TXT:
self.input_handler.up()
def down(self):
self.last_action = self.millis()
if self.idle:
self.idle = False
self.idle_handler.cleanup()
self.idle_handler.idling = False
return True
if self.mode == _MODE_NAV:
self.next_option()
elif self.mode == _MODE_ADJ:
self.current_value().down()
elif self.mode == _MODE_TXT:
self.input_handler.down()
def left(self):
self.last_action = self.millis()
if self.idle:
self.idle = False
self.idle_handler.cleanup()
self.idle_handler.idling = False
return True
if self.mode == _MODE_NAV:
self.exit_option()
elif self.mode == _MODE_ADJ:
if not self.current_value().left():
self.current_value().cleanup()
self.mode = _MODE_NAV
elif self.mode == _MODE_TXT:
self.input_handler.left()
def right(self):
self.last_action = self.millis()
if self.idle:
self.idle = False
self.idle_handler.cleanup()
self.idle_handler.idling = False
return True
if self.mode == _MODE_NAV:
self.select_option()
elif self.mode == _MODE_ADJ:
self.current_value().right()
elif self.mode == _MODE_TXT:
self.input_handler.right()
def clear_row(self, row):
self.lcd.set_cursor_position(0, row)
self.lcd.write(' ' * self.lcd.COLS)
def write_row(self, row, text):
self.lcd.set_cursor_position(0, row)
while len(text) < self.lcd.COLS:
text += ' '
self.lcd.write(text[0:self.lcd.COLS])
def write_option(self, *args, **kwargs):
row = 0
text = ''
icon = ''
margin = 0
scroll = False
scroll_padding = ' '
scroll_delay = 2000
scroll_repeat = 10000
scroll_speed = 200
if len(args) > 0:
row = args[0]
if len(args) > 1:
text = args[1]
if len(args) > 2:
icon = args[2]
if len(args) > 3:
margin = args[3]
if 'row' in kwargs.keys():
row = kwargs['row']
if 'text' in kwargs.keys():
text = kwargs['text']
if 'icon' in kwargs.keys():
icon = kwargs['icon']
if 'margin' in kwargs.keys():
margin = kwargs['margin']
if 'scroll' in kwargs.keys() and kwargs['scroll'] == True:
scroll = True
if 'scroll_speed' in kwargs.keys():
scroll_speed = kwargs['scroll_speed']
if 'scroll_repeat' in kwargs.keys():
scroll_repeat = kwargs['scroll_repeat']
if 'scroll_delay' in kwargs.keys():
scroll_delay = kwargs['scroll_delay']
if 'scroll_padding' in kwargs.keys():
scroll_padding = kwargs['scroll_padding']
if icon == None:
icon = ''
if margin == None:
margin = 0
current_row = ''
if self.last_text[row] != text:
self.last_text[row] = text
self.last_change[row] = self.millis()
if scroll:
text += scroll_padding
if scroll and self.millis() - self.last_change[row] > scroll_delay:
pos = int(((self.millis() - self.last_change[row] - scroll_delay) / scroll_speed) % len(text))
text = text[pos:] + text[:pos]
if pos == len(text) - 1:
self.last_change[row] = self.millis() + scroll_repeat
current_row += icon
while len(current_row) < margin:
current_row += ' '
current_row += text
self.write_row(row, current_row)
def get_menu_item(self, index):
return list(self.current_submenu().keys())[index]
def redraw(self):
if self.can_idle() and isinstance(self.idle_handler, MenuOption):
if not self.idle:
self.idle_handler.idling = True
self.idle_handler.begin()
self.idle = True
self.idle_handler.redraw(self)
return False
if self.mode == _MODE_NAV:
self.write_option(
row=1,
text=self.get_menu_item(self.current_position),
icon=chr(252),
margin=1
)
if len(self.current_submenu()) > 2:
self.write_option(
row=0,
text=self.get_menu_item(self.previous_position()),
margin=1
)
else:
self.clear_row(0)
if len(self.current_submenu()) > 1:
self.write_option(
row=2,
text=self.get_menu_item(self.next_position()),
margin=1
)
else:
self.clear_row(2)
# Call the redraw function of the endpoint Class
elif self.mode == _MODE_ADJ:
self.current_value().redraw(self)
if self.current_value().text_entry:
self.start_input()
elif self.mode == _MODE_TXT:
self.input_handler.redraw(self)
def can_idle(self):
if self.millis() - self.last_action >= self.idle_time:
if self.mode == _MODE_NAV:
return True
if self.mode == _MODE_ADJ and self.current_value().can_idle:
return True
return False
class MenuOption:
def __init__(self):
self.idling = False
self.can_idle = False
self.config = None
self.text_entry = False
"""
These helper functions let you start
the input_handler of the parent menu
and receive the text that the user inputs
"""
def initial_value(self):
# Return a value to start with
return ''
def receive_input(self, value):
# Return false to reject input
return True
def request_input(self):
self.text_entry = True
def cancel_input(self):
# Called if input is cancelled by handler
pass
def input_prompt(self):
return 'Input text:'
"""
These helper functions are for
input handler plugins
"""
def set_value(self, value):
pass
def get_value(self):
return ''
def set_prompt(self, value):
pass
def millis(self):
return int(round(time.time() * 1000))
def up(self):
pass
def down(self):
pass
def left(self):
pass
def right(self):
pass
def select(self):
# Must return true to allow exit
return True
def begin(self):
pass
def redraw(self, menu):
pass
def setup(self, config):
self.config = config
def cleanup(self):
# Undo any backlight or other changes
pass
def set_option(self, section, option, value):
if self.config is not None:
if section not in self.config.sections():
self.config.add_section(section)
self.config.set(section, option, value)
def get_option(self, section, option, default=None):
if section not in self.config.sections():
self.config.add_section(section)
if option in self.config.options(section):
return self.config.get(section, option)
elif default is None:
return False
else:
self.config.set(section, option, str(default))
return default
| python | MIT | 1161818428c393906fdf437c8fa51f51b329af1e | 2026-01-05T07:12:43.667300Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.