id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
6,400 | resource guid | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetDdosCustomPolicyResult',
'AwaitableGetDdosCustomPolicyResult',
'get_ddos_custom_policy',
'get_ddos_custom_policy_output',
]
@pulumi.output_type
class GetDdosCustomPolicyResult:
"""
A DDoS custom policy in a resource group.
"""
def __init__(__self__, etag=None, id=None, location=None, name=None, protocol_custom_settings=None, provisioning_state=None, public_ip_addresses=None, METHOD_NAME=None, tags=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if protocol_custom_settings and not isinstance(protocol_custom_settings, list):
raise TypeError("Expected argument 'protocol_custom_settings' to be a list")
pulumi.set(__self__, "protocol_custom_settings", protocol_custom_settings)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_ip_addresses and not isinstance(public_ip_addresses, list):
raise TypeError("Expected argument 'public_ip_addresses' to be a list")
pulumi.set(__self__, "public_ip_addresses", public_ip_addresses)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", METHOD_NAME)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="protocolCustomSettings")
def protocol_custom_settings(self) -> Optional[Sequence['outputs.ProtocolCustomSettingsFormatResponse']]:
"""
The protocol-specific DDoS policy customization parameters.
"""
return pulumi.get(self, "protocol_custom_settings")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the DDoS custom policy resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicIPAddresses")
def public_ip_addresses(self) -> Sequence['outputs.SubResourceResponse']:
"""
The list of public IPs associated with the DDoS custom policy resource. This list is read-only.
"""
return pulumi.get(self, "public_ip_addresses")
@property
@pulumi.getter(name="resourceGuid")
def METHOD_NAME(self) -> str:
"""
The resource GUID property of the DDoS custom policy resource. It uniquely identifies the resource, even if the user changes its name or migrate the resource across subscriptions or resource groups.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetDdosCustomPolicyResult(GetDdosCustomPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDdosCustomPolicyResult(
etag=self.etag,
id=self.id,
location=self.location,
name=self.name,
protocol_custom_settings=self.protocol_custom_settings,
provisioning_state=self.provisioning_state,
public_ip_addresses=self.public_ip_addresses,
METHOD_NAME=self.METHOD_NAME,
tags=self.tags,
type=self.type)
def get_ddos_custom_policy(ddos_custom_policy_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDdosCustomPolicyResult:
"""
Gets information about the specified DDoS custom policy.
:param str ddos_custom_policy_name: The name of the DDoS custom policy.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['ddosCustomPolicyName'] = ddos_custom_policy_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:network/v20220101:getDdosCustomPolicy', __args__, opts=opts, typ=GetDdosCustomPolicyResult).value
return AwaitableGetDdosCustomPolicyResult(
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
protocol_custom_settings=pulumi.get(__ret__, 'protocol_custom_settings'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
public_ip_addresses=pulumi.get(__ret__, 'public_ip_addresses'),
METHOD_NAME=pulumi.get(__ret__, 'resource_guid'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_ddos_custom_policy)
def get_ddos_custom_policy_output(ddos_custom_policy_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDdosCustomPolicyResult]:
"""
Gets information about the specified DDoS custom policy.
:param str ddos_custom_policy_name: The name of the DDoS custom policy.
:param str resource_group_name: The name of the resource group.
"""
... |
6,401 | assembly parrot rule1 | # Leo colorizer control file for assembly_parrot mode.
# This file is in the public domain.
# Properties for assembly_parrot mode.
properties = {
"lineComment": "#",
}
# Attributes dict for assembly_parrot_main ruleset.
assembly_parrot_main_attributes_dict = {
"default": "null",
"digit_re": "(0x[[:xdigit:]]+|[[:digit:]]+)",
"escape": "",
"highlight_digits": "true",
"ignore_case": "false",
"no_word_sep": "",
}
# Dictionary of attributes dictionaries for assembly_parrot mode.
attributesDictDict = {
"assembly_parrot_main": assembly_parrot_main_attributes_dict,
}
# Keywords dict for assembly_parrot_main ruleset.
assembly_parrot_main_keywords_dict = {
"abs": "keyword1",
"acos": "keyword1",
"add": "keyword1",
"and": "keyword1",
"asec": "keyword1",
"asin": "keyword1",
"atan": "keyword1",
"bounds": "keyword1",
"branch": "keyword1",
"bsr": "keyword1",
"chopm": "keyword1",
"cleari": "keyword1",
"clearn": "keyword1",
"clearp": "keyword1",
"clears": "keyword1",
"clone": "keyword1",
"close": "keyword1",
"cmod": "keyword1",
"concat": "keyword1",
"cos": "keyword1",
"cosh": "keyword1",
"debug": "keyword1",
"dec": "keyword1",
"div": "keyword1",
"end": "keyword1",
"entrytype": "keyword1",
"eq": "keyword1",
"err": "keyword1",
"exp": "keyword1",
"find_global": "keyword1",
"find_type": "keyword1",
"ge": "keyword1",
"getfile": "keyword1",
"getline": "keyword1",
"getpackage": "keyword1",
"gt": "keyword1",
"if": "keyword1",
"inc": "keyword1",
"index": "keyword1",
"jsr": "keyword1",
"jump": "keyword1",
"le": "keyword1",
"length": "keyword1",
"ln": "keyword1",
"log10": "keyword1",
"log2": "keyword1",
"lt": "keyword1",
"mod": "keyword1",
"mul": "keyword1",
"ne": "keyword1",
"new": "keyword1",
"newinterp": "keyword1",
"noop": "keyword1",
"not": "keyword1",
"open": "keyword1",
"or": "keyword1",
"ord": "keyword1",
"pack": "keyword1",
"pop": "keyword1",
"popi": "keyword1",
"popn": "keyword1",
"popp": "keyword1",
"pops": "keyword1",
"pow": "keyword1",
"print": "keyword1",
"profile": "keyword1",
"push": "keyword1",
"pushi": "keyword1",
"pushn": "keyword1",
"pushp": "keyword1",
"pushs": "keyword1",
"read": "keyword1",
"readline": "keyword1",
"repeat": "keyword1",
"restore": "keyword1",
"ret": "keyword1",
"rotate_up": "keyword1",
"runinterp": "keyword1",
"save": "keyword1",
"sec": "keyword1",
"sech": "keyword1",
"set": "keyword1",
"set_keyed": "keyword1",
"setfile": "keyword1",
"setline": "keyword1",
"setpackage": "keyword1",
"shl": "keyword1",
"shr": "keyword1",
"sin": "keyword1",
"sinh": "keyword1",
"sleep": "keyword1",
"sub": "keyword1",
"substr": "keyword1",
"tan": "keyword1",
"tanh": "keyword1",
"time": "keyword1",
"trace": "keyword1",
"typeof": "keyword1",
"unless": "keyword1",
"warningsoff": "keyword1",
"warningson": "keyword1",
"write": "keyword1",
"xor": "keyword1",
}
# Dictionary of keywords dictionaries for assembly_parrot mode.
keywordsDictDict = {
"assembly_parrot_main": assembly_parrot_main_keywords_dict,
}
# Rules for assembly_parrot_main ruleset.
def assembly_parrot_rule0(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="\"", end="\"",
no_line_break=True)
def METHOD_NAME(colorer, s, i):
return colorer.match_eol_span(s, i, kind="comment1", seq="#")
def assembly_parrot_rule2(colorer, s, i):
return colorer.match_mark_previous(s, i, kind="label", pattern=":",
at_line_start=True,
exclude_match=True)
def assembly_parrot_rule3(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq=",")
def assembly_parrot_rule4(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="literal2", regexp="I\\d{1,2}")
def assembly_parrot_rule5(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="literal2", regexp="S\\d{1,2}")
def assembly_parrot_rule6(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="literal2", regexp="N\\d{1,2}")
def assembly_parrot_rule7(colorer, s, i):
return colorer.match_seq_regexp(s, i, kind="literal2", regexp="P\\d{1,2}")
def assembly_parrot_rule8(colorer, s, i):
return colorer.match_keywords(s, i)
# Rules dict for assembly_parrot_main ruleset.
rulesDict1 = {
"\"": [assembly_parrot_rule0,],
"#": [METHOD_NAME,],
",": [assembly_parrot_rule3,],
"0": [assembly_parrot_rule8,],
"1": [assembly_parrot_rule8,],
"2": [assembly_parrot_rule8,],
"3": [assembly_parrot_rule8,],
"4": [assembly_parrot_rule8,],
"5": [assembly_parrot_rule8,],
"6": [assembly_parrot_rule8,],
"7": [assembly_parrot_rule8,],
"8": [assembly_parrot_rule8,],
"9": [assembly_parrot_rule8,],
":": [assembly_parrot_rule2,],
"@": [assembly_parrot_rule8,],
"A": [assembly_parrot_rule8,],
"B": [assembly_parrot_rule8,],
"C": [assembly_parrot_rule8,],
"D": [assembly_parrot_rule8,],
"E": [assembly_parrot_rule8,],
"F": [assembly_parrot_rule8,],
"G": [assembly_parrot_rule8,],
"H": [assembly_parrot_rule8,],
"I": [assembly_parrot_rule4, assembly_parrot_rule8,],
"J": [assembly_parrot_rule8,],
"K": [assembly_parrot_rule8,],
"L": [assembly_parrot_rule8,],
"M": [assembly_parrot_rule8,],
"N": [assembly_parrot_rule6, assembly_parrot_rule8,],
"O": [assembly_parrot_rule8,],
"P": [assembly_parrot_rule7, assembly_parrot_rule8,],
"Q": [assembly_parrot_rule8,],
"R": [assembly_parrot_rule8,],
"S": [assembly_parrot_rule5, assembly_parrot_rule8,],
"T": [assembly_parrot_rule8,],
"U": [assembly_parrot_rule8,],
"V": [assembly_parrot_rule8,],
"W": [assembly_parrot_rule8,],
"X": [assembly_parrot_rule8,],
"Y": [assembly_parrot_rule8,],
"Z": [assembly_parrot_rule8,],
"_": [assembly_parrot_rule8,],
"a": [assembly_parrot_rule8,],
"b": [assembly_parrot_rule8,],
"c": [assembly_parrot_rule8,],
"d": [assembly_parrot_rule8,],
"e": [assembly_parrot_rule8,],
"f": [assembly_parrot_rule8,],
"g": [assembly_parrot_rule8,],
"h": [assembly_parrot_rule8,],
"i": [assembly_parrot_rule8,],
"j": [assembly_parrot_rule8,],
"k": [assembly_parrot_rule8,],
"l": [assembly_parrot_rule8,],
"m": [assembly_parrot_rule8,],
"n": [assembly_parrot_rule8,],
"o": [assembly_parrot_rule8,],
"p": [assembly_parrot_rule8,],
"q": [assembly_parrot_rule8,],
"r": [assembly_parrot_rule8,],
"s": [assembly_parrot_rule8,],
"t": [assembly_parrot_rule8,],
"u": [assembly_parrot_rule8,],
"v": [assembly_parrot_rule8,],
"w": [assembly_parrot_rule8,],
"x": [assembly_parrot_rule8,],
"y": [assembly_parrot_rule8,],
"z": [assembly_parrot_rule8,],
}
# x.rulesDictDict for assembly_parrot mode.
rulesDictDict = {
"assembly_parrot_main": rulesDict1,
}
# Import dict for assembly_parrot mode.
importDict = {} |
6,402 | fix checklog | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by Django 3.1.2 on 2021-04-27 08:53
import django.db.models.deletion
from django.db import migrations, models
def fix_tasklog(apps, schema_editor):
TaskLog = apps.get_model("cm", "TaskLog")
Action = apps.get_model("cm", "Action")
for task in TaskLog.objects.all():
if task.old_action_id:
try:
action = Action.objects.get(id=task.old_action_id)
task.action = action
if task.attr is None:
task.attr = {}
task.save()
except Action.DoesNotExist:
pass
def fix_joblog(apps, schema_editor):
JobLog = apps.get_model("cm", "JobLog")
TaskLog = apps.get_model("cm", "TaskLog")
Action = apps.get_model("cm", "Action")
SubAction = apps.get_model("cm", "SubAction")
for job in JobLog.objects.all():
if job.old_action_id:
try:
action = Action.objects.get(id=job.old_action_id)
job.action = action
except Action.DoesNotExist:
pass
if job.old_sub_action_id:
try:
sub_action = SubAction.objects.get(id=job.old_sub_action_id)
job.sub_action = sub_action
except SubAction.DoesNotExist:
pass
try:
task = TaskLog.objects.get(id=job.old_task_id)
job.task = task
except TaskLog.DoesNotExist:
pass
job.save()
def METHOD_NAME(apps, schema_editor):
JobLog = apps.get_model("cm", "JobLog")
CheckLog = apps.get_model("cm", "CheckLog")
for cl in CheckLog.objects.all():
if cl.old_job_id:
try:
job = JobLog.objects.get(id=cl.old_job_id)
cl.job = job
cl.save()
except JobLog.DoesNotExist:
pass
def fix_grouplog(apps, schema_editor):
JobLog = apps.get_model("cm", "JobLog")
GroupCheckLog = apps.get_model("cm", "GroupCheckLog")
for cl in GroupCheckLog.objects.all():
if cl.old_job_id:
try:
job = JobLog.objects.get(id=cl.old_job_id)
cl.job = job
cl.save()
except JobLog.DoesNotExist:
pass
class Migration(migrations.Migration):
dependencies = [
("cm", "0065_auto_20210220_0902"),
]
operations = [
migrations.RenameField(
model_name="joblog",
old_name="action_id",
new_name="old_action_id",
),
migrations.AlterField(
model_name="joblog",
name="old_action_id",
field=models.PositiveIntegerField(default=0),
),
migrations.RenameField(
model_name="joblog",
old_name="sub_action_id",
new_name="old_sub_action_id",
),
migrations.RenameField(
model_name="joblog",
old_name="task_id",
new_name="old_task_id",
),
migrations.RenameField(
model_name="tasklog",
old_name="action_id",
new_name="old_action_id",
),
migrations.AlterField(
model_name="tasklog",
name="old_action_id",
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name="joblog",
name="action",
field=models.ForeignKey(
default=None,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="cm.action",
),
),
migrations.AddField(
model_name="joblog",
name="sub_action",
field=models.ForeignKey(
default=None,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="cm.subaction",
),
),
migrations.AddField(
model_name="joblog",
name="task",
field=models.ForeignKey(
default=None,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="cm.tasklog",
),
),
migrations.AddField(
model_name="tasklog",
name="action",
field=models.ForeignKey(
default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to="cm.action"
),
),
migrations.RemoveConstraint(
model_name="groupchecklog",
name="unique_group_job",
),
migrations.RenameField(
model_name="checklog",
old_name="job_id",
new_name="old_job_id",
),
migrations.RenameField(
model_name="groupchecklog",
old_name="job_id",
new_name="old_job_id",
),
migrations.AddField(
model_name="checklog",
name="job",
field=models.ForeignKey(
default=None,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="cm.joblog",
),
),
migrations.AddField(
model_name="groupchecklog",
name="job",
field=models.ForeignKey(
default=None,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="cm.joblog",
),
),
migrations.AddConstraint(
model_name="groupchecklog",
constraint=models.UniqueConstraint(fields=("job", "title"), name="unique_group_job"),
),
migrations.RunPython(fix_tasklog),
migrations.RunPython(fix_joblog),
migrations.RunPython(METHOD_NAME),
migrations.RunPython(fix_grouplog),
migrations.RemoveField(
model_name="checklog",
name="old_job_id",
),
migrations.RemoveField(
model_name="groupchecklog",
name="old_job_id",
),
migrations.RemoveField(
model_name="joblog",
name="old_action_id",
),
migrations.RemoveField(
model_name="joblog",
name="old_sub_action_id",
),
migrations.RemoveField(
model_name="joblog",
name="old_task_id",
),
migrations.RemoveField(
model_name="tasklog",
name="old_action_id",
),
] |
6,403 | get camera function image info | # -*- coding: utf-8 -*-
import datetime
import json
import logging
import os
from mycodo.config import PATH_CAMERAS
from mycodo.databases.models import CustomController
from mycodo.mycodo_flask.utils.utils_general import bytes2human
from mycodo.utils.database import db_retrieve_table_daemon
logger = logging.getLogger("mycodo.camera_functions")
def get_camera_function_paths(unique_id):
"""Retrieve still/timelapse paths for the given camera function ID"""
camera_path = os.path.join(PATH_CAMERAS, unique_id)
function = db_retrieve_table_daemon(
CustomController, unique_id=unique_id)
try:
custom_options = json.loads(function.custom_options)
except:
custom_options = {}
if 'custom_path_still' in custom_options and custom_options['custom_path_still']:
still_path = custom_options['custom_path_still']
else:
still_path = os.path.join(camera_path, 'still')
if 'custom_path_video' in custom_options and custom_options['custom_path_video']:
video_path = custom_options['custom_path_video']
else:
video_path = os.path.join(camera_path, 'video')
if 'custom_path_timelapse' in custom_options and custom_options['custom_path_timelapse']:
tl_path = custom_options['custom_path_timelapse']
else:
tl_path = os.path.join(camera_path, 'timelapse')
return still_path, video_path, tl_path
def METHOD_NAME(unique_id):
"""Retrieve information about the latest camera images."""
latest_img_still_ts = None
latest_img_still_size = None
latest_img_still = None
latest_img_video_ts = None
latest_img_video_size = None
latest_img_video = None
latest_img_tl_ts = None
latest_img_tl_size = None
latest_img_tl = None
time_lapse_imgs = None
still_path, video_path, tl_path = get_camera_function_paths(unique_id)
function = db_retrieve_table_daemon(
CustomController, unique_id=unique_id)
try:
custom_options = json.loads(function.custom_options)
except:
custom_options = {}
if (('still_last_file' in custom_options and custom_options['still_last_file']) and
('still_last_ts' in custom_options and custom_options['still_last_ts'])):
latest_img_still_ts = datetime.datetime.fromtimestamp(
custom_options['still_last_ts']).strftime("%Y-%m-%d %H:%M:%S")
latest_img_still = custom_options['still_last_file']
file_still_path = os.path.join(still_path, custom_options['still_last_file'])
if os.path.exists(file_still_path):
latest_img_still_size = bytes2human(os.path.getsize(file_still_path))
else:
latest_img_still = None
if (('video_last_file' in custom_options and custom_options['video_last_file']) and
('video_last_ts' in custom_options and custom_options['video_last_ts'])):
latest_img_video_ts = datetime.datetime.fromtimestamp(
custom_options['video_last_ts']).strftime("%Y-%m-%d %H:%M:%S")
latest_img_video = custom_options['video_last_file']
file_video_path = os.path.join(video_path, custom_options['video_last_file'])
if os.path.exists(file_video_path):
latest_img_video_size = bytes2human(os.path.getsize(file_video_path))
else:
latest_img_video = None
try:
# Get list of timelapse filename sets for generating a video from images
time_lapse_imgs = []
for i in os.listdir(tl_path):
if (os.path.isfile(os.path.join(tl_path, i)) and
i[:-10] not in time_lapse_imgs):
time_lapse_imgs.append(i[:-10])
time_lapse_imgs.sort()
except Exception:
pass
if (('tl_last_file' in custom_options and custom_options['tl_last_file']) and
('tl_last_ts' in custom_options and custom_options['tl_last_ts'])):
latest_img_tl_ts = datetime.datetime.fromtimestamp(
custom_options['tl_last_ts']).strftime("%Y-%m-%d %H:%M:%S")
latest_img_tl = custom_options['tl_last_file']
file_tl_path = os.path.join(tl_path, custom_options['tl_last_file'])
if os.path.exists(file_tl_path):
latest_img_tl_size = bytes2human(os.path.getsize(file_tl_path))
else:
latest_img_tl = None
return (latest_img_still_ts, latest_img_still_size, latest_img_still,
latest_img_video_ts, latest_img_video_size, latest_img_video,
latest_img_tl_ts, latest_img_tl_size, latest_img_tl, time_lapse_imgs) |
6,404 | logger | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from datetime import datetime
from typing import Dict
from unittest.mock import AsyncMock, MagicMock, call, patch
from airbyte_cdk.models import (
AirbyteMessage,
AirbyteRecordMessage,
AirbyteStream,
ConfiguredAirbyteCatalog,
ConfiguredAirbyteStream,
DestinationSyncMode,
SyncMode,
Type,
)
from destination_databend.destination import DatabendClient, DestinationDatabend
from pytest import fixture
@fixture
def METHOD_NAME() -> MagicMock:
return MagicMock()
@fixture
def config() -> Dict[str, str]:
args = {
"database": "default",
"username": "root",
"password": "root",
"host": "localhost",
"port": 8081,
"table": "default",
}
return args
@fixture(name="mock_connection")
def async_connection_cursor_mock():
connection = MagicMock()
cursor = AsyncMock()
connection.cursor.return_value = cursor
return connection, cursor
@fixture
def configured_stream1() -> ConfiguredAirbyteStream:
return ConfiguredAirbyteStream(
stream=AirbyteStream(
name="table1",
json_schema={
"type": "object",
"properties": {"col1": {"type": "string"}, "col2": {"type": "integer"}},
},
supported_sync_modes=[SyncMode.incremental],
),
sync_mode=SyncMode.incremental,
destination_sync_mode=DestinationSyncMode.append,
)
@fixture
def configured_stream2() -> ConfiguredAirbyteStream:
return ConfiguredAirbyteStream(
stream=AirbyteStream(
name="table2",
json_schema={
"type": "object",
"properties": {"col1": {"type": "string"}, "col2": {"type": "integer"}},
},
supported_sync_modes=[SyncMode.incremental],
),
sync_mode=SyncMode.incremental,
destination_sync_mode=DestinationSyncMode.append,
)
@fixture
def airbyte_message1() -> AirbyteMessage:
return AirbyteMessage(
type=Type.RECORD,
record=AirbyteRecordMessage(
stream="table1",
data={"key1": "value1", "key2": 2},
emitted_at=int(datetime.now().timestamp()) * 1000,
),
)
@fixture
def airbyte_message2() -> AirbyteMessage:
return AirbyteMessage(
type=Type.RECORD,
record=AirbyteRecordMessage(
stream="table2",
data={"key1": "value2", "key2": 3},
emitted_at=int(datetime.now().timestamp()) * 1000,
),
)
@fixture
def airbyte_state_message() -> AirbyteMessage:
return AirbyteMessage(type=Type.STATE)
@patch("destination_databend.client.DatabendClient", MagicMock())
def test_connection(config: Dict[str, str], METHOD_NAME: MagicMock) -> None:
# Check no log object
DatabendClient(**config)
@patch("destination_databend.writer.DatabendSQLWriter")
@patch("destination_databend.client.DatabendClient")
def test_sql_write_append(
mock_connection: MagicMock,
mock_writer: MagicMock,
config: Dict[str, str],
configured_stream1: ConfiguredAirbyteStream,
configured_stream2: ConfiguredAirbyteStream,
airbyte_message1: AirbyteMessage,
airbyte_message2: AirbyteMessage,
airbyte_state_message: AirbyteMessage,
) -> None:
catalog = ConfiguredAirbyteCatalog(streams=[configured_stream1, configured_stream2])
destination = DestinationDatabend()
result = destination.write(config, catalog, [airbyte_message1, airbyte_state_message, airbyte_message2])
assert list(result) == [airbyte_state_message]
mock_writer.return_value.delete_table.assert_not_called()
mock_writer.return_value.create_raw_table.mock_calls = [call(mock_connection, "table1"), call(mock_connection, "table2")]
assert len(mock_writer.return_value.queue_write_data.mock_calls) == 2
mock_writer.return_value.flush.assert_called_once()
@patch("destination_databend.writer.DatabendSQLWriter")
@patch("destination_databend.client.DatabendClient")
def test_sql_write_overwrite(
mock_connection: MagicMock,
mock_writer: MagicMock,
config: Dict[str, str],
configured_stream1: ConfiguredAirbyteStream,
configured_stream2: ConfiguredAirbyteStream,
airbyte_message1: AirbyteMessage,
airbyte_message2: AirbyteMessage,
airbyte_state_message: AirbyteMessage,
):
# Overwrite triggers a delete
configured_stream1.destination_sync_mode = DestinationSyncMode.overwrite
catalog = ConfiguredAirbyteCatalog(streams=[configured_stream1, configured_stream2])
destination = DestinationDatabend()
result = destination.write(config, catalog, [airbyte_message1, airbyte_state_message, airbyte_message2])
assert list(result) == [airbyte_state_message]
mock_writer.return_value.delete_table.assert_called_once_with("table1")
mock_writer.return_value.create_raw_table.mock_calls = [call(mock_connection, "table1"), call(mock_connection, "table2")] |
6,405 | update filter from operator | from __future__ import absolute_import
###############################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
###############################################################################
# Python
import os
import logging
logger = logging.getLogger(__name__)
# PyQt
from PyQt5 import uic
from PyQt5.QtWidgets import QMainWindow, QMessageBox
# ilastik
from ilastik.utility import bind, log_exception
from ilastik.utility.gui import ThreadRouter, threadRouted
from .preprocessingViewerGui import PreprocessingViewerGui
class PreprocessingGui(QMainWindow):
def __init__(self, parentApplet, topLevelOperatorView):
super(PreprocessingGui, self).__init__()
self.parentApplet = parentApplet
self.drawer = None
self.threadRouter = ThreadRouter(self)
self.guiMode = 1
self.topLevelOperatorView = topLevelOperatorView
self.initAppletDrawerUic()
self.centralGui = PreprocessingViewerGui(parentApplet, self.topLevelOperatorView)
def initAppletDrawerUic(self):
"""
Load the ui file for the applet drawer, which we own.
"""
# Load the ui file (find it in our own directory)
localDir = os.path.split(__file__)[0] + "/"
# (We don't pass self here because we keep the drawer ui in a separate object.)
self.drawer = uic.loadUi(localDir + "/preprocessingDrawer.ui")
# Set up radiobox layout
self.filterbuttons = [
self.drawer.filter1,
self.drawer.filter2,
self.drawer.filter3,
self.drawer.filter4,
self.drawer.filter5,
]
self.correspondingSigmaMins = [0.9, 0.9, 0.6, 0.1, 0.1]
# Set up our handlers
for f in self.filterbuttons:
f.clicked.connect(self.handleFilterChanged)
# Initialize widget values
self.updateDrawerFromOperator()
# Event handlers: everything is handled once the run button is clicked, not live
self.drawer.runButton.clicked.connect(self.handleRunButtonClicked)
self.drawer.writeprotectBox.stateChanged.connect(self.handleWriterprotectStateChanged)
self.parentApplet.appletStateUpdateRequested.subscribe(self.processingFinished)
# Slot change handlers (in case the operator is somehow changed *outside* the gui, such as by the workflow.
self.topLevelOperatorView.Filter.notifyDirty(self.METHOD_NAME)
self.topLevelOperatorView.Sigma.notifyDirty(self.updateSigmaFromOperator)
def METHOD_NAME(self, *args):
self.filterbuttons[self.topLevelOperatorView.Filter.value].setChecked(True)
self.filterChoice = [f.isChecked() for f in self.filterbuttons].index(True)
def updateSigmaFromOperator(self, *args):
self.drawer.sigmaSpin.setValue(self.topLevelOperatorView.Sigma.value)
def updateDrawerFromOperator(self, *args):
self.METHOD_NAME()
self.updateSigmaFromOperator()
def handleFilterChanged(self):
choice = [f.isChecked() for f in self.filterbuttons].index(True)
self.filterChoice = choice
# update lower bound for sigma
self.drawer.sigmaSpin.setMinimum(self.correspondingSigmaMins[choice])
def processingFinished(self):
"""Method makes sure finished processing is communicated visually
After processing is finished it is checked whether one of the result
layers is visible. If not, finished processing is communicated by
showing the watershed layer.
"""
layerStack = self.centralGui.editor.layerStack
watershedIndex = layerStack.findMatchingIndex(lambda x: x.name == "Watershed")
filteredIndex = layerStack.findMatchingIndex(lambda x: x.name == "Filtered Data")
# Only do something if none of the result layers is visible
if not layerStack[watershedIndex].visible:
if not layerStack[filteredIndex].visible:
layerStack[watershedIndex].visible = True
@threadRouted
def onFailed(self, exception, exc_info):
log_exception(logger, exc_info=exc_info)
QMessageBox.critical(self, "error", str(exception))
def handleRunButtonClicked(self):
cached_result = self.topLevelOperatorView.cachedResult[0]
n_saved = len(cached_result.object_names) if cached_result is not None else 0
if n_saved:
response = QMessageBox.warning(
self,
"Confirm Deleting Saved Objects",
(
f"<p>This project already contains {n_saved} saved segmented objects. The existing segmentations "
"become invalid if you change preprocessing settings, and will be deleted.</p>"
"<p>Please consider creating a copy of the project file for the different preprocessing instead.</p>"
"<p>Run preprocessing and delete all saved objects?</p>"
),
buttons=QMessageBox.Yes | QMessageBox.Cancel,
defaultButton=QMessageBox.Cancel,
)
if response == QMessageBox.Cancel:
return
self.setWriteprotect()
self.topLevelOperatorView.Filter.setValue(self.filterChoice)
self.topLevelOperatorView.SizeRegularizer.setValue(self.drawer.sizeRegularizerSpin.value())
self.topLevelOperatorView.Sigma.setValue(self.drawer.sigmaSpin.value())
self.topLevelOperatorView.ReduceTo.setValue(self.drawer.reduceToSpin.value())
self.topLevelOperatorView.DoAgglo.setValue(self.drawer.doAggloCheckBox.isChecked())
r = self.topLevelOperatorView.PreprocessedData[:]
r.notify_failed(self.onFailed)
r.notify_finished(bind(self.parentApplet.appletStateUpdateRequested))
r.submit()
def handleWriterprotectStateChanged(self):
iswriteprotect = self.drawer.writeprotectBox.checkState()
for f in self.filterbuttons:
f.setEnabled(not iswriteprotect)
self.drawer.sigmaSpin.setEnabled(not iswriteprotect)
self.drawer.runButton.setEnabled(not iswriteprotect)
self.drawer.sizeRegularizerSpin.setEnabled(not iswriteprotect)
self.drawer.reduceToSpin.setEnabled(not iswriteprotect)
self.drawer.doAggloCheckBox.setEnabled(not iswriteprotect)
def enableWriteprotect(self, ew):
self.drawer.writeprotectBox.setEnabled(ew)
def setWriteprotect(self):
self.drawer.writeprotectBox.setChecked(True)
def setFilter(self, s, propagate=False):
self.filterbuttons[s].setChecked(True)
self.handleFilterChanged()
def setSigma(self, sigma):
self.drawer.sigmaSpin.setValue(sigma)
def centralWidget(self):
return self.centralGui
def appletDrawer(self):
return self.drawer
def menus(self):
return []
def viewerControlWidget(self):
return self.centralGui.viewerControlWidget()
def setImageIndex(self, imageIndex):
pass
def imageLaneAdded(self, imageIndex):
pass
def imageLaneRemoved(self, laneIndex, finalLength):
pass
def stopAndCleanUp(self):
self.centralGui.stopAndCleanUp() |
6,406 | nndct info print |
#
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
import logging
import io
from nndct_shared.base import NNDCT_KEYS, GLOBAL_MAP, NNDCT_DEBUG_LVL
def obj_to_str(obj):
if isinstance(obj, list):
string = '\n'.join(["{}".format(n) for n in obj])
elif isinstance(obj, dict):
string = '\n'.join(["{} : {}".format(k, v) for k, v in obj.items()])
elif isinstance(obj, str):
string = obj
else:
raise Exception("nndct_details_debug only support list and dictionary")
return string
def METHOD_NAME(string):
logger = GLOBAL_MAP.get_ele(NNDCT_KEYS.LOGGER)
if logger:
logger.info("[NNDCT_INFO] {}".format(string))
else:
print("[NNDCT_INFO] {}".format(string))
def nndct_warn_print(string):
if True == GLOBAL_MAP.get_ele(NNDCT_KEYS.WARN_FLAG):
logger = GLOBAL_MAP.get_ele(NNDCT_KEYS.LOGGER)
if logger:
logger.warning("[NNDCT_WARN] {}".format(string))
else:
print("[NNDCT_WARN] {}".format(string))
def nndct_debug_print(string, title='', level=1):
if True == GLOBAL_MAP.get_ele(
NNDCT_KEYS.DEBUG_FLAG) and level <= GLOBAL_MAP.get_ele(
NNDCT_KEYS.VERBOSE_LEVEL):
logger = GLOBAL_MAP.get_ele(NNDCT_KEYS.LOGGER)
if title == 'Start':
string = "\n********************* <{} : {}> *********************".format(
title, string)
elif title == 'End':
string = "\n********************* <{} : {}> *********************\n".format(
title, string)
if logger:
logger.debug("[NNDCT_DEBUG_Lv_{}] {}".format(level, string))
else:
print("[NNDCT_DEBUG_Lv_{}] {}".format(level, string))
def nndct_error_print(string):
if True == GLOBAL_MAP.get_ele(NNDCT_KEYS.ERROR_FLAG):
logger = GLOBAL_MAP.get_ele(NNDCT_KEYS.LOGGER)
if logger:
logger.error("[NNDCT_ERROR] {}".format(string))
else:
print("[NNDCT_ERROR] {}".format(string))
sys.exit(1)
def nndct_details_debug(obj, title, level=NNDCT_DEBUG_LVL.DETAILS):
nndct_debug_print(
"\n********************* <Start : {}> *********************\n{}".format(
title, obj_to_str(obj)),
level=level)
nndct_debug_print(title, title='End', level=level)
#some wrappers
def nndct_info(func):
def wrapper(*args, **kwargs):
info_flag = GLOBAL_MAP.get_ele(NNDCT_KEYS.INFO_FLAG)
if info_flag == True:
print("[NNDCT_INFO]", end='')
return func(*args, **kwargs)
return wrapper
def nndct_warn(func):
def wrapper(*args, **kwargs):
warn_flag = GLOBAL_MAP.get_ele(NNDCT_KEYS.WARN_FLAG)
if warn_flag == True:
print("[NNDCT_WARN]", end='')
return func(*args, **kwargs)
return wrapper
def nndct_debug(func):
def wrapper(*args, **kwargs):
debug_flag = GLOBAL_MAP.get_ele(NNDCT_KEYS.DEBUG_FLAG)
if debug_flag == True:
print("[NNDCT_DEBUG]", end='')
return func(*args, **kwargs)
return wrapper
def nndct_error(func):
def wrapper(*args, **kwargs):
error_flag = GLOBAL_MAP.get_ele(NNDCT_KEYS.ERROR_FLAG)
if error_flag == True:
print("[NNDCT_ERROR]", end='')
return func(*args, **kwargs)
if error_flag == True:
exit(1)
return wrapper
def get_nndct_logger(filename='NndctGen_log'):
log_dir = os.path.dirname(filename)
if log_dir and not os.path.exists(log_dir):
os.makedirs(log_dir)
logger = logging.getLogger(filename.replace("/", 'SPL'))
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')
sh = logging.StreamHandler()
fh = logging.FileHandler(filename, mode='w', encoding=None, delay=False)
for h in [sh, fh]:
h.setLevel(logging.INFO)
h.setFormatter(formatter)
logger.addHandler(h)
return logger
def log_or_print(str, logger=None):
if logger:
logger.info(str)
else:
print(str)
def get_config_str(obj,
title,
ignore_prefix=[],
ignore_suffix=[],
ignore_keys=[]):
assert hasattr(
obj, 'default_kwargs'
), 'object {} has no default_kwargs, failed to generate configuration string'.format(
obj)
config_str = '\n' + ">> <{}>".format(title) + '\n>> '
for key in obj.default_kwargs:
value = getattr(obj, key, None)
if value and not any(key.endswith(s) for s in ignore_suffix) and \
not any(key.startswith(p) for p in ignore_prefix) and \
key not in ignore_keys:
if isinstance(value, dict):
config_str += '\n>> {}: \n>> {}'.format(
key, '\n>> '.join(
['{} : {}'.format(k, v) for k, v in value.items()]))
else:
config_str += '\n>> {} : {}'.format(key, value)
return config_str
class NndctDebugger:
def __init__(self):
self.__DebugLv = 0
def __host_info(self):
return "<{}> ".format(self.__class__.__name__)
def set_debug_lv(self, level):
self.__DebugLv = level
def debug(self, string, title='', level=None):
nndct_debug_print(
self.__host_info() + string, title=title, level=level or self.__DebugLv)
def debug_details(self, obj, title, level=None):
nndct_details_debug(
obj,
title=self.__host_info() + title,
level=level or NNDCT_DEBUG_LVL.DETAILS) |
6,407 | get voltage | #############################################################################
# Accton
#
# Module contains an implementation of SONiC Platform Base API and
# provides the PSUs status which are available in the platform
#
#############################################################################
try:
from sonic_platform_base.psu_base import PsuBase
#from sonic_platform.fan import Fan
from .helper import APIHelper
except ImportError as e:
raise ImportError(str(e) + "- required module not found")
I2C_PATH ="/sys/bus/i2c/devices/{0}-00{1}/"
PSU_NAME_LIST = ["PSU-1", "PSU-2"]
PSU_NUM_FAN = [1, 1]
PSU_HWMON_I2C_MAPPING = {
0: {
"num": 9,
"addr": "58"
},
1: {
"num": 9,
"addr": "59"
},
}
PSU_CPLD_I2C_MAPPING = {
0: {
"num": 9,
"addr": "50"
},
1: {
"num": 9,
"addr": "51"
},
}
class Psu(PsuBase):
"""Platform-specific Psu class"""
def __init__(self, psu_index=0):
PsuBase.__init__(self)
self.index = psu_index
self._api_helper = APIHelper()
self.i2c_num = PSU_HWMON_I2C_MAPPING[self.index]["num"]
self.i2c_addr = PSU_HWMON_I2C_MAPPING[self.index]["addr"]
self.hwmon_path = I2C_PATH.format(self.i2c_num, self.i2c_addr)
self.i2c_num = PSU_CPLD_I2C_MAPPING[self.index]["num"]
self.i2c_addr = PSU_CPLD_I2C_MAPPING[self.index]["addr"]
self.cpld_path = I2C_PATH.format(self.i2c_num, self.i2c_addr)
self.__initialize_fan()
'''
for fan_index in range(0, PSU_NUM_FAN[self.index]):
#def __init__(self, fan_tray_index, fan_index=0, is_psu_fan=False, psu_index=0):
#fan = Fan(fan_index, 0, is_psu_fan=True, psu_index=self.index)
fan = Fan(fan_index, 0, True, self.index)
self._fan_list.append(fan)
'''
def __initialize_fan(self):
from sonic_platform.fan import Fan
for fan_index in range(0, PSU_NUM_FAN[self.index]):
fan = Fan(fan_index, 0, is_psu_fan=True, psu_index=self.index)
self._fan_list.append(fan)
def METHOD_NAME(self):
"""
Retrieves current PSU voltage output
Returns:
A float number, the output voltage in volts,
e.g. 12.1
"""
vout_path = "{}{}".format(self.hwmon_path, 'psu_v_out')
vout_val=self._api_helper.read_txt_file(vout_path)
if vout_val is not None:
return float(vout_val)/ 1000
else:
return 0
def get_current(self):
"""
Retrieves present electric current supplied by PSU
Returns:
A float number, the electric current in amperes, e.g 15.4
"""
iout_path = "{}{}".format(self.hwmon_path, 'psu_i_out')
val=self._api_helper.read_txt_file(iout_path)
if val is not None:
return float(val)/1000
else:
return 0
def get_power(self):
"""
Retrieves current energy supplied by PSU
Returns:
A float number, the power in watts, e.g. 302.6
"""
pout_path = "{}{}".format(self.hwmon_path, 'psu_p_out')
val=self._api_helper.read_txt_file(pout_path)
if val is not None:
return float(val)/1000
else:
return 0
def get_powergood_status(self):
"""
Retrieves the powergood status of PSU
Returns:
A boolean, True if PSU has stablized its output voltages and passed all
its internal self-tests, False if not.
"""
return self.get_status()
def set_status_led(self, color):
"""
Sets the state of the PSU status LED
Args:
color: A string representing the color with which to set the PSU status LED
Note: Only support green and off
Returns:
bool: True if status LED state is set successfully, False if not
"""
return False #Controlled by HW
def get_status_led(self):
"""
Gets the state of the PSU status LED
Returns:
A string, one of the predefined STATUS_LED_COLOR_* strings above
"""
return False #Controlled by HW
def get_temperature(self):
"""
Retrieves current temperature reading from PSU
Returns:
A float number of current temperature in Celsius up to nearest thousandth
of one degree Celsius, e.g. 30.125
"""
temp_path = "{}{}".format(self.hwmon_path, 'psu_temp1_input')
val=self._api_helper.read_txt_file(temp_path)
if val is not None:
return float(val)/1000
else:
return 0
def get_temperature_high_threshold(self):
"""
Retrieves the high threshold temperature of PSU
Returns:
A float number, the high threshold temperature of PSU in Celsius
up to nearest thousandth of one degree Celsius, e.g. 30.125
"""
return False #Not supported
def get_voltage_high_threshold(self):
"""
Retrieves the high threshold PSU voltage output
Returns:
A float number, the high threshold output voltage in volts,
e.g. 12.1
"""
vout_path = "{}{}".format(self.hwmon_path, 'psu_mfr_vout_max')
vout_val=self._api_helper.read_txt_file(vout_path)
if vout_val is not None:
return float(vout_val)/ 1000
else:
return 0
def get_voltage_low_threshold(self):
"""
Retrieves the low threshold PSU voltage output
Returns:
A float number, the low threshold output voltage in volts,
e.g. 12.1
"""
vout_path = "{}{}".format(self.hwmon_path, 'psu_mfr_vout_min')
vout_val=self._api_helper.read_txt_file(vout_path)
if vout_val is not None:
return float(vout_val)/ 1000
else:
return 0
def get_name(self):
"""
Retrieves the name of the device
Returns:
string: The name of the device
"""
return PSU_NAME_LIST[self.index]
def get_presence(self):
"""
Retrieves the presence of the PSU
Returns:
bool: True if PSU is present, False if not
"""
presence_path="{}{}".format(self.cpld_path, 'psu_present')
val=self._api_helper.read_txt_file(presence_path)
if val is not None:
return int(val, 10) == 1
else:
return 0
def get_status(self):
"""
Retrieves the operational status of the device
Returns:
A boolean value, True if device is operating properly, False if not
"""
power_path="{}{}".format(self.cpld_path, 'psu_power_good')
val=self._api_helper.read_txt_file(power_path)
if val is not None:
return int(val, 10) == 1
else:
return 0 |
6,408 | id | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetTIDataConnectorResult',
'AwaitableGetTIDataConnectorResult',
'get_ti_data_connector',
'get_ti_data_connector_output',
]
@pulumi.output_type
class GetTIDataConnectorResult:
"""
Represents threat intelligence data connector.
"""
def __init__(__self__, data_types=None, etag=None, METHOD_NAME=None, kind=None, name=None, system_data=None, tenant_id=None, tip_lookback_period=None, type=None):
if data_types and not isinstance(data_types, dict):
raise TypeError("Expected argument 'data_types' to be a dict")
pulumi.set(__self__, "data_types", data_types)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tenant_id and not isinstance(tenant_id, str):
raise TypeError("Expected argument 'tenant_id' to be a str")
pulumi.set(__self__, "tenant_id", tenant_id)
if tip_lookback_period and not isinstance(tip_lookback_period, str):
raise TypeError("Expected argument 'tip_lookback_period' to be a str")
pulumi.set(__self__, "tip_lookback_period", tip_lookback_period)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="dataTypes")
def data_types(self) -> Optional['outputs.TIDataConnectorDataTypesResponse']:
"""
The available data types for the connector.
"""
return pulumi.get(self, "data_types")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Etag of the azure resource
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> str:
"""
The kind of the data connector
Expected value is 'ThreatIntelligence'.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[str]:
"""
The tenant id to connect to, and get the data from.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter(name="tipLookbackPeriod")
def tip_lookback_period(self) -> Optional[str]:
"""
The lookback period for the feed to be imported.
"""
return pulumi.get(self, "tip_lookback_period")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetTIDataConnectorResult(GetTIDataConnectorResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetTIDataConnectorResult(
data_types=self.data_types,
etag=self.etag,
METHOD_NAME=self.METHOD_NAME,
kind=self.kind,
name=self.name,
system_data=self.system_data,
tenant_id=self.tenant_id,
tip_lookback_period=self.tip_lookback_period,
type=self.type)
def get_ti_data_connector(data_connector_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTIDataConnectorResult:
"""
Gets a data connector.
Azure REST API version: 2023-02-01.
:param str data_connector_id: Connector ID
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['dataConnectorId'] = data_connector_id
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:securityinsights:getTIDataConnector', __args__, opts=opts, typ=GetTIDataConnectorResult).value
return AwaitableGetTIDataConnectorResult(
data_types=pulumi.get(__ret__, 'data_types'),
etag=pulumi.get(__ret__, 'etag'),
METHOD_NAME=pulumi.get(__ret__, 'id'),
kind=pulumi.get(__ret__, 'kind'),
name=pulumi.get(__ret__, 'name'),
system_data=pulumi.get(__ret__, 'system_data'),
tenant_id=pulumi.get(__ret__, 'tenant_id'),
tip_lookback_period=pulumi.get(__ret__, 'tip_lookback_period'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_ti_data_connector)
def get_ti_data_connector_output(data_connector_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetTIDataConnectorResult]:
"""
Gets a data connector.
Azure REST API version: 2023-02-01.
:param str data_connector_id: Connector ID
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
... |
6,409 | collate data | """
ASE scraper is meant to be disconnected from others, and therefore a collection of functions for now.
This is by design since most use cases of ASE desire more flexibility; simply import the functions.
"""
import numpy as np
from fitsnap3lib.tools.group_tools import assign_validation
def ase_scraper(data, random_test: bool=False) -> list:
"""
Function to organize groups and allocate shared arrays used in Calculator. For now when using
ASE frames, we don't have groups.
Args:
data: List of ASE frames or dictionary group table containing frames.
random_test: Select test data randomly if True, else take last percentage of configs in a group for
reproducibility.
Returns a list of data dictionaries suitable for fitsnap descriptor calculator.
If running in parallel, this list will be distributed over procs, so that each proc will have a
portion of the list.
"""
# Simply collate data from Atoms objects if we have a list of Atoms objects.
if type(data) == list:
return [METHOD_NAME(atoms) for atoms in data]
# If we have a dictionary, assume we are dealing with groups.
elif type(data) == dict:
assign_validation(data, random_test=random_test)
ret = []
for name in data:
frames = data[name]["frames"]
# Extend the fitsnap data list with this group.
ret.extend([METHOD_NAME(atoms, name, data[name], f) for f, atoms in enumerate(frames)])
return ret
else:
raise Exception("Argument must be list or dictionary for ASE scraper.")
def get_apre(cell):
"""
Calculate transformed ASE cell for LAMMPS calculations. Thank you Jan Janssen!
Args:
cell: ASE atoms cell.
Returns transformed cell as np.array which is suitable for LAMMPS.
"""
a, b, c = cell
an, bn, cn = [np.linalg.norm(v) for v in cell]
alpha = np.arccos(np.dot(b, c) / (bn * cn))
beta = np.arccos(np.dot(a, c) / (an * cn))
gamma = np.arccos(np.dot(a, b) / (an * bn))
xhi = an
xyp = np.cos(gamma) * bn
yhi = np.sin(gamma) * bn
xzp = np.cos(beta) * cn
yzp = (bn * cn * np.cos(alpha) - xyp * xzp) / yhi
zhi = np.sqrt(cn**2 - xzp**2 - yzp**2)
return np.array(((xhi, 0, 0), (xyp, yhi, 0), (xzp, yzp, zhi)))
def METHOD_NAME(atoms, name: str=None, group_dict: dict=None, f: int=0) -> dict:
"""
Function to organize fitting data for FitSNAP from ASE atoms objects.
Args:
atoms: ASE atoms object for a single configuration of atoms.
name: Optional name of this configuration.
group_dict: Optional dictionary containing group information.
f: Optional index associated with configuration in a group.
Returns a data dictionary for a single configuration.
"""
# Transform ASE cell to be appropriate for LAMMPS.
apre = get_apre(cell=atoms.cell)
R = np.dot(np.linalg.inv(atoms.cell), apre)
positions = np.matmul(atoms.get_positions(), R)
cell = apre.T
# Make a data dictionary for this config.
data = {}
data['Group'] = name # TODO: Make this customizable for ASE groups.
data['File'] = f"{name}_{f}"
data['Positions'] = positions
data['AtomTypes'] = atoms.get_chemical_symbols()
if (atoms.calc is None):
# Just calculating descriptors; assign 0.
data['Energy'] = 0.0
data['Forces'] = np.zeros((len(atoms), 3))
data['Stress'] = np.zeros(6)
else:
data['Energy'] = atoms.get_total_energy()
data['Forces'] = atoms.get_forces()
data['Stress'] = atoms.get_stress(voigt=False)
data['NumAtoms'] = len(atoms)
data['QMLattice'] = cell
data['Lattice'] = cell
data['Rotation'] = np.array([[1,0,0],[0,1,0],[0,0,1]])
data['Translation'] = np.zeros((len(atoms), 3))
# Inject the weights and other group quantities.
if group_dict is not None:
data['eweight'] = group_dict["eweight"] if "eweight" in group_dict else 1.0
data['fweight'] = group_dict["fweight"] if "fweight" in group_dict else 1.0
data['vweight'] = group_dict["vweight"] if "vweight" in group_dict else 1.0
data['test_bool'] = group_dict['test_bools'][f]
else:
data['eweight'] = 1.0
data['fweight'] = 1.0
data['vweight'] = 1.0
data['test_bool'] = 0
return dat |
6,410 | package info | import os
from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.apple import is_apple_os
from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
from conan.tools.env import VirtualBuildEnv
from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, rmdir, collect_libs
required_conan_version = ">=1.53.0"
class QuickfixConan(ConanFile):
name = "quickfix"
description = "QuickFIX is a free and open source implementation of the FIX protocol"
license = "The QuickFIX Software License, Version 1.0"
url = "https://github.com/conan-io/conan-center-index"
homepage = "http://www.quickfixengine.org"
topics = ("FIX", "Financial Information Exchange", "libraries", "cpp")
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_ssl": [True, False],
"with_postgres": [True, False],
"with_mysql": [None, "libmysqlclient"],
}
default_options = {
"shared": False,
"fPIC": True,
"with_ssl": False,
"with_postgres": False,
"with_mysql": None,
}
def export_sources(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
def layout(self):
cmake_layout(self, src_folder="src")
def requirements(self):
if self.options.with_ssl:
self.requires("openssl/[>=1.1 <4]")
if self.options.with_postgres:
self.requires("libpq/15.3")
if self.options.with_mysql == "libmysqlclient":
self.requires("libmysqlclient/8.0.31")
def validate(self):
if self.settings.os == "Windows" and self.options.shared:
raise ConanInvalidConfiguration("QuickFIX cannot be built as shared lib on Windows")
if is_apple_os(self) and self.settings.arch == "armv8":
# See issue: https://github.com/quickfix/quickfix/issues/206
raise ConanInvalidConfiguration("QuickFIX doesn't support ARM compilation")
def build_requirements(self):
self.tool_requires("cmake/[>=3.16 <4]")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
env = VirtualBuildEnv(self)
env.generate()
tc = CMakeToolchain(self)
tc.variables["HAVE_SSL"] = self.options.with_ssl
tc.variables["HAVE_POSTGRESQL"] = self.options.with_postgres
tc.variables["HAVE_MYSQL"] = bool(self.options.with_mysql)
tc.generate()
tc = CMakeDeps(self)
tc.generate()
def build(self):
apply_conandata_patches(self)
cmake = CMake(self)
cmake.configure()
cmake.build(target="quickfix")
def package(self):
cmake = CMake(self)
cmake.install()
copy(self, "config.h",
dst=os.path.join(self.package_folder, "include", "quickfix"),
src=self.build_folder)
copy(self, "Except.h",
dst=os.path.join(self.package_folder, "include"),
src=os.path.join(self.source_folder, "src", "C++"))
copy(self, "LICENSE",
dst=os.path.join(self.package_folder, "licenses"),
src=self.source_folder)
rmdir(self, os.path.join(self.package_folder, "share"))
def METHOD_NAME(self):
self.cpp_info.libs = collect_libs(self)
if self.options.with_ssl:
self.cpp_info.defines.append("HAVE_SSL=1")
if self.options.with_postgres:
self.cpp_info.defines.append("HAVE_POSTGRESQL=1")
if self.options.with_mysql:
self.cpp_info.defines.append("HAVE_MYSQL=1")
if self.settings.os == "Windows":
self.cpp_info.system_libs.extend(["ws2_32"])
elif self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.system_libs.extend(["pthread", "m"]) |
6,411 | test address query as anonymous user | import graphene
from ....tests.utils import (
assert_no_permission,
get_graphql_content,
get_graphql_content_from_response,
)
ADDRESS_QUERY = """
query address($id: ID!) {
address(id: $id) {
postalCode
lastName
firstName
city
country {
code
}
}
}
"""
def test_address_query_as_owner(user_api_client, customer_user):
address = customer_user.addresses.first()
variables = {"id": graphene.Node.to_global_id("Address", address.pk)}
response = user_api_client.post_graphql(ADDRESS_QUERY, variables)
content = get_graphql_content(response)
data = content["data"]["address"]
assert data["country"]["code"] == address.country.code
def test_address_query_as_not_owner(
user_api_client, customer_user, address_other_country
):
variables = {"id": graphene.Node.to_global_id("Address", address_other_country.pk)}
response = user_api_client.post_graphql(ADDRESS_QUERY, variables)
content = get_graphql_content(response)
data = content["data"]["address"]
assert not data
def test_address_query_as_app_with_permission(
app_api_client,
address_other_country,
permission_manage_users,
):
variables = {"id": graphene.Node.to_global_id("Address", address_other_country.pk)}
response = app_api_client.post_graphql(
ADDRESS_QUERY, variables, permissions=[permission_manage_users]
)
content = get_graphql_content(response)
data = content["data"]["address"]
assert data["country"]["code"] == address_other_country.country.code
def test_address_query_as_app_without_permission(
app_api_client, app, address_other_country
):
variables = {"id": graphene.Node.to_global_id("Address", address_other_country.pk)}
response = app_api_client.post_graphql(ADDRESS_QUERY, variables)
assert_no_permission(response)
def METHOD_NAME(api_client, address_other_country):
variables = {"id": graphene.Node.to_global_id("Address", address_other_country.pk)}
response = api_client.post_graphql(ADDRESS_QUERY, variables)
assert_no_permission(response)
def test_address_query_invalid_id(
staff_api_client,
address_other_country,
):
id = "..afs"
variables = {"id": id}
response = staff_api_client.post_graphql(ADDRESS_QUERY, variables)
content = get_graphql_content_from_response(response)
assert len(content["errors"]) == 1
assert content["errors"][0]["message"] == f"Couldn't resolve id: {id}."
assert content["data"]["address"] is None
def test_address_query_with_invalid_object_type(
staff_api_client,
address_other_country,
):
variables = {"id": graphene.Node.to_global_id("Order", address_other_country.pk)}
response = staff_api_client.post_graphql(ADDRESS_QUERY, variables)
content = get_graphql_content(response)
assert content["data"]["address"] is None
ADDRESS_FEDERATION_QUERY = """
query GetUserInFederation($representations: [_Any]) {
_entities(representations: $representations) {
__typename
... on Address {
id
city
}
}
}
"""
def test_customer_query_address_federation(user_api_client, customer_user, address):
customer_user.addresses.add(address)
address_id = graphene.Node.to_global_id("Address", address.pk)
variables = {
"representations": [
{
"__typename": "Address",
"id": address_id,
},
],
}
response = user_api_client.post_graphql(ADDRESS_FEDERATION_QUERY, variables)
content = get_graphql_content(response)
assert content["data"]["_entities"] == [
{
"__typename": "Address",
"id": address_id,
"city": address.city,
}
]
def test_customer_query_other_user_address_federation(
user_api_client, staff_user, customer_user, address
):
staff_user.addresses.add(address)
address_id = graphene.Node.to_global_id("Address", address.pk)
variables = {
"representations": [
{
"__typename": "Address",
"id": address_id,
},
],
}
response = user_api_client.post_graphql(ADDRESS_FEDERATION_QUERY, variables)
content = get_graphql_content(response)
assert content["data"]["_entities"] == [None]
def test_staff_query_other_user_address_federation(
staff_api_client, customer_user, address
):
customer_user.addresses.add(address)
address_id = graphene.Node.to_global_id("Address", address.pk)
variables = {
"representations": [
{
"__typename": "Address",
"id": address_id,
},
],
}
response = staff_api_client.post_graphql(ADDRESS_FEDERATION_QUERY, variables)
content = get_graphql_content(response)
assert content["data"]["_entities"] == [None]
def test_staff_query_other_user_address_with_permission_federation(
staff_api_client, customer_user, address, permission_manage_users
):
customer_user.addresses.add(address)
address_id = graphene.Node.to_global_id("Address", address.pk)
variables = {
"representations": [
{
"__typename": "Address",
"id": address_id,
},
],
}
response = staff_api_client.post_graphql(
ADDRESS_FEDERATION_QUERY,
variables,
permissions=[permission_manage_users],
check_no_permissions=False,
)
content = get_graphql_content(response)
assert content["data"]["_entities"] == [None]
def test_app_query_address_federation(app_api_client, address, permission_manage_users):
address_id = graphene.Node.to_global_id("Address", address.pk)
variables = {
"representations": [
{
"__typename": "Address",
"id": address_id,
},
],
}
response = app_api_client.post_graphql(
ADDRESS_FEDERATION_QUERY,
variables,
permissions=[permission_manage_users],
check_no_permissions=False,
)
content = get_graphql_content(response)
assert content["data"]["_entities"] == [
{
"__typename": "Address",
"id": address_id,
"city": address.city,
}
]
def test_app_no_permission_query_address_federation(app_api_client, address):
address_id = graphene.Node.to_global_id("Address", address.pk)
variables = {
"representations": [
{
"__typename": "Address",
"id": address_id,
},
],
}
response = app_api_client.post_graphql(ADDRESS_FEDERATION_QUERY, variables)
content = get_graphql_content(response)
assert content["data"]["_entities"] == [None]
def test_unauthenticated_query_address_federation(api_client, address):
address_id = graphene.Node.to_global_id("Address", address.pk)
variables = {
"representations": [
{
"__typename": "Address",
"id": address_id,
},
],
}
response = api_client.post_graphql(ADDRESS_FEDERATION_QUERY, variables)
content = get_graphql_content(response)
assert content["data"]["_entities"] == [None] |
6,412 | convert combed | from os.path import join, isdir, dirname, abspath
from os import getcwd
import os
from sys import getfilesystemencoding
from collections import OrderedDict
import pandas as pd
from nilm_metadata import convert_yaml_to_hdf5
from nilmtk.datastore import Key
from nilmtk.measurement import LEVEL_NAMES
from nilmtk.utils import check_directory_exists, get_datastore, get_module_directory
#{"load_type": {"floor/wing":meter_number_in_nilmtk}
acad_block_meter_mapping = {'Building Total Mains': {'0': 1},
'Lifts': {'0': 2},
'Floor Total': {'1': 3, '2': 4, '3': 5, '4': 6, '5': 7},
'AHU': {'0': 8, '1': 9, '2': 10, '5': 11},
'Light': {'3': 12},
'Power Sockets': {'3': 13},
'UPS Sockets': {'3': 14}}
lecture_block_meter_mapping = {'Building Total Mains': {'0': 1},
'Floor Total': {'0': 2, '1': 3, '2': 4},
'AHU': {'1': 5, '2': 6, '3': 7}}
overall_dataset_mapping = OrderedDict({'Academic Block': acad_block_meter_mapping,
'Lecture Block': lecture_block_meter_mapping})
building_number_mapping = {'Academic Block': 1, 'Lecture Block': 2}
column_mapping = OrderedDict({
'Power': ('power', 'active'),
'Energy': ('energy', 'active'),
'Current': ('current', '')})
def METHOD_NAME(combed_path, output_filename, format='HDF'):
"""
Parameters
----------
combed_path : str
The root path of the combed dataset.
output_filename : str
The destination HDF5 filename (including path and suffix).
"""
check_directory_exists(combed_path)
# Open store
store = get_datastore(output_filename, format, mode='w')
any_file_converted = False
for building_name, building_mapping in overall_dataset_mapping.items():
for load_name, load_mapping in building_mapping.items():
for load_mapping_path, meter_number in load_mapping.items():
building_number = building_number_mapping[building_name]
key = Key(building=building_number, meter=meter_number)
dfs = []
for attribute in column_mapping.keys():
filename_attribute = join(combed_path, building_name, load_name, load_mapping_path, "%s.csv" %attribute)
if not os.path.isfile(filename_attribute):
# File not found directly in the combed_path provided
# Try adding 'iiitd' to it
filename_attribute = join(combed_path, 'iiitd', building_name, load_name, load_mapping_path, "%s.csv" %attribute)
if os.path.isfile(filename_attribute):
exists = True
print(filename_attribute)
df = pd.read_csv(filename_attribute, names=["timestamp", attribute])
df.index = pd.to_datetime(df["timestamp"], unit='ms')
df = df.drop("timestamp", 1)
dfs.append(df)
else:
exists = False
if exists:
total = pd.concat(dfs, axis=1)
total = total.tz_localize('UTC').tz_convert('Asia/Kolkata')
total.columns = pd.MultiIndex.from_tuples([column_mapping[x] for x in total.columns])
total.columns.set_names(LEVEL_NAMES, inplace=True)
assert total.index.is_unique
store.put(str(key), total)
any_file_converted = True
if not any_file_converted:
raise RuntimeError('No files converted, did you specify the correct path?')
convert_yaml_to_hdf5(
join(get_module_directory(), 'dataset_converters', 'combed', 'metadata'),
output_filename
)
print("Done converting COMBED to HDF5!") |
6,413 | test flush | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `tensorboard.summary._writer` module."""
import time
from unittest import mock
import numpy as np
from tensorboard.compat.proto import summary_pb2
from tensorboard.summary import _output as output_lib
from tensorboard.summary import _writer as writer_lib
from tensorboard.summary import _test_util
from tensorboard.util import tensor_util
from tensorboard import test as tb_test
class WriterTest(tb_test.TestCase):
def test_real_directory(self):
logdir = self.get_temp_dir()
w = writer_lib.Writer(logdir)
w.close()
events = _test_util.read_tfevents(logdir)
self.assertLen(events, 1)
self.assertEqual(events[0].file_version, "brain.Event:2")
def METHOD_NAME(self):
output = mock.create_autospec(output_lib.Output)
w = writer_lib.Writer(output)
w.flush()
output.flush.assert_called_once()
def test_flush_after_close(self):
output = mock.create_autospec(output_lib.Output)
w = writer_lib.Writer(output)
w.close()
with self.assertRaisesRegex(RuntimeError, "already closed"):
w.flush()
def test_close(self):
output = mock.create_autospec(output_lib.Output)
w = writer_lib.Writer(output)
w.close()
output.close.assert_called_once()
def test_close_after_close(self):
output = mock.create_autospec(output_lib.Output)
w = writer_lib.Writer(output)
w.close()
with self.assertRaisesRegex(RuntimeError, "already closed"):
w.close()
class WriterAddScalarTest(tb_test.TestCase):
def test_real_directory(self):
logdir = self.get_temp_dir()
w = writer_lib.Writer(logdir)
w.add_scalar("foo", 42.0, 12, wall_time=123.456, description="fooful")
w.close()
events = _test_util.read_tfevents(logdir)
self.assertLen(events, 2)
self.assertEqual(events[0].file_version, "brain.Event:2")
event = events[1]
self.assertEqual(event.step, 12)
self.assertEqual(event.wall_time, 123.456)
summary = event.summary.value[0]
self.assertEqual(summary.tag, "foo")
self.assertEqual(
tensor_util.make_ndarray(summary.tensor), np.array(42.0)
)
self.assertEqual(
summary.metadata.data_class, summary_pb2.DATA_CLASS_SCALAR
)
self.assertEqual(summary.metadata.plugin_data.plugin_name, "scalars")
self.assertEqual(summary.metadata.summary_description, "fooful")
def test_basic(self):
output = mock.create_autospec(output_lib.Output)
w = writer_lib.Writer(output)
w.add_scalar("foo", np.float32(42.0), np.int64(12))
output.emit_scalar.assert_called_once_with(
plugin_name="scalars",
tag="foo",
data=np.float32(42.0),
step=np.int64(12),
wall_time=mock.ANY,
description=None,
)
_, kwargs = output.emit_scalar.call_args
self.assertEqual(np.float32, type(kwargs["data"]))
self.assertEqual(np.int64, type(kwargs["step"]))
def test_accepts_python_types(self):
output = mock.create_autospec(output_lib.Output)
w = writer_lib.Writer(output)
w.add_scalar("foo", 42.0, 12)
output.emit_scalar.assert_called_once_with(
plugin_name="scalars",
tag="foo",
data=np.float32(42.0),
step=np.int64(12),
wall_time=mock.ANY,
description=None,
)
_, kwargs = output.emit_scalar.call_args
self.assertEqual(np.float32, type(kwargs["data"]))
self.assertEqual(np.int64, type(kwargs["step"]))
def test_validates_data_shape(self):
output = mock.create_autospec(output_lib.Output)
w = writer_lib.Writer(output)
with self.assertRaisesRegex(ValueError, "scalar.*data"):
w.add_scalar("foo", np.float32([1.0]), 12)
def test_validates_step_shape(self):
output = mock.create_autospec(output_lib.Output)
w = writer_lib.Writer(output)
with self.assertRaisesRegex(ValueError, "scalar.*step"):
w.add_scalar("foo", 42.0, np.int64([12]))
def test_default_wall_time(self):
output = mock.create_autospec(output_lib.Output)
w = writer_lib.Writer(output)
with mock.patch.object(time, "time") as mock_time:
mock_time.return_value = 12345.678
w.add_scalar("foo", 42.0, 12)
output.emit_scalar.assert_called_once()
_, kwargs = output.emit_scalar.call_args
self.assertEqual(12345.678, kwargs["wall_time"])
def test_explicit_wall_time(self):
output = mock.create_autospec(output_lib.Output)
w = writer_lib.Writer(output)
w.add_scalar("foo", 42.0, 12, wall_time=999.999)
output.emit_scalar.assert_called_once()
_, kwargs = output.emit_scalar.call_args
self.assertEqual(999.999, kwargs["wall_time"])
def test_explicit_description(self):
output = mock.create_autospec(output_lib.Output)
w = writer_lib.Writer(output)
w.add_scalar("foo", 42.0, 12, description="fooful")
output.emit_scalar.assert_called_once()
_, kwargs = output.emit_scalar.call_args
self.assertEqual("fooful", kwargs["description"])
def test_after_close(self):
output = mock.create_autospec(output_lib.Output)
w = writer_lib.Writer(output)
w.close()
with self.assertRaisesRegex(RuntimeError, "already closed"):
w.add_scalar("unused", 0.0, 0)
if __name__ == "__main__":
tb_test.main() |
6,414 | init | # -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2020 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Santiago Dueñas <sduenas@bitergia.com>
#
import configparser
import os.path
import click
from ..utils import display
CONFIG_OPTIONS = {
'endpoint': {
'user': '',
'password': '',
'host': 'localhost',
'port': '9314',
'path': '/',
'ssl': 'true',
}
}
SORTINGHAT_CFG_DIR_NAME = '~/.sortinghat'
SORTINGHAT_CFG_FILE_NAME = 'sortinghat.cfg'
@click.group()
def config():
"""Configure SortingHat client.
This command gets or sets parameters from/to the client
configuration file.
On Unix systems, configuration will be stored by default
under the file '~/.sortinghat/sortinghat.cfg'.
Configuration parameters selected to get/set must follow
the schema "<section>.<option>" (e.g 'endpoint.host').
"""
pass
@config.command()
@click.option('--filepath',
help="Path to the configuration file.")
@click.option('--overwrite', is_flag=True,
help="Force to replace an existing configuration file.")
def METHOD_NAME(filepath, overwrite):
"""Create a configuration file with default parameters.
This command will create a configuration file under <filepath>
using default configuration parameters. When <filepath> is not
given the default location for the configuration file will be
used instead.
The configuration file must not exist. Otherwise the command
will return with an error. Use the option '--overwrite' to force
to replace the existing file.
"""
if not filepath:
dirpath = os.path.expanduser(SORTINGHAT_CFG_DIR_NAME)
os.makedirs(dirpath, exist_ok=True)
config_file = os.path.join(dirpath, SORTINGHAT_CFG_FILE_NAME)
else:
config_file = filepath
if os.path.isfile(config_file) and not overwrite:
msg = ("Configuration file {} already exists. "
"Use '--overwrite' to replace it.").format(config_file)
raise click.ClickException(msg)
cfg = configparser.ConfigParser()
cfg.read_dict(CONFIG_OPTIONS)
try:
with open(config_file, 'w') as f:
cfg.write(f)
except IOError as e:
raise click.FileError(config_file, hint=str(e))
@config.command()
@click.argument('key')
@click.option('--filepath',
help="Path to the configuration file.")
def get(key, filepath):
"""Get configuration parameters.
This command reads <key> configuration parameter from the
configuration file given in <filepath>. When <filepath>
is not given, the command will use the default configuration
file.
Configuration parameter in <key> must follow the pattern
"<section>.<option>" (e.g 'endpoint.host').
KEY: configuration parameter
"""
if not filepath:
dirpath = os.path.expanduser(SORTINGHAT_CFG_DIR_NAME)
config_file = os.path.join(dirpath, SORTINGHAT_CFG_FILE_NAME)
else:
config_file = filepath
if not _check_config_key(key):
msg = "{} config parameter is not supported".format(key)
raise click.ClickException(msg)
if not os.path.isfile(config_file):
raise click.FileError(config_file, hint="file does not exist")
section, option = key.split('.')
cfg = configparser.ConfigParser()
cfg.read(config_file)
try:
option = cfg.get(section, option)
display('config.tmpl', key=key, option=option)
except (configparser.NoSectionError, configparser.NoOptionError):
pass
@config.command()
@click.argument('key')
@click.argument('value')
@click.option('--filepath',
help="Path to the configuration file.")
def set(key, value, filepath):
"""Set configuration parameter.
This command writes <value> on <key> parameter in the
configuration file given in <filepath>. When <filepath>
is not given, the command will use the default configuration
file.
Configuration parameter in <key> must follow the pattern
"<section>.<option>" (e.g 'endpoint.host').
KEY: configuration parameter
VALUE: value for the configuration parameter
"""
if not filepath:
dirpath = os.path.expanduser(SORTINGHAT_CFG_DIR_NAME)
os.makedirs(dirpath, exist_ok=True)
config_file = os.path.join(dirpath, SORTINGHAT_CFG_FILE_NAME)
else:
config_file = filepath
if not _check_config_key(key):
msg = "{} config parameter is not supported".format(key)
raise click.ClickException(msg)
cfg = configparser.ConfigParser()
if os.path.isfile(config_file):
cfg.read(config_file)
section, option = key.split('.')
if section not in cfg.sections():
cfg.add_section(section)
try:
cfg.set(section, option, value)
except TypeError as e:
raise click.ClickException(str(e))
try:
with open(config_file, 'w') as f:
cfg.write(f)
except IOError as e:
raise click.FileError(config_file, hint=str(e))
def _check_config_key(key):
"""Check whether the key is valid.
A valid key has the schema <section>.<option>. Keys supported
are listed in CONFIG_OPTIONS dict.
:param key: <section>.<option> key
"""
try:
section, option = key.split('.')
except (AttributeError, ValueError):
return False
if not section or not option:
return False
return section in CONFIG_OPTIONS and\
option in CONFIG_OPTIONS[section] |
6,415 | get setting | #!/usr/bin/env python
# --!-- coding: utf8 --!--
import os
import shutil
import subprocess
from PyQt5.QtCore import QSettings
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QCheckBox, QHBoxLayout, \
QLabel, QSpinBox, QComboBox, QLineEdit
from manuskript.ui.collapsibleGroupBox2 import collapsibleGroupBox2
from manuskript.ui import style
class abstractImporter:
"""
abstractImporter is used to import documents into manuskript.
The startImport function must be subclassed. It takes a filePath (str to
the document to import), and must return `outlineItem`s.
"""
name = ""
description = ""
fileFormat = "" # File format accepted. For example: "OPML Files (*.opml)"
# For folder, use "<<folder>>"
icon = ""
engine = "Internal"
def __init__(self):
self.settingsList = [] # Keep the name of the settings in order
self.settings = {}
def startImport(self, filePath, parentItem, settingsWidget):
"""
Takes a str path to the file/folder to import, and the settingsWidget
returned by `self.settingsWidget()` containing the user set settings,
and return `outlineItem`s.
"""
pass
@classmethod
def isValid(cls):
return False
def settingsWidget(self, widget):
"""
Takes a QWidget that can be modified and must be returned.
"""
return widget
def addPage(self, widget, title):
"""
Convenience function to add a page to the settingsWidget `widget`, at
the end.
Returns the page widget.
"""
w = QWidget(widget)
w.setLayout(QVBoxLayout())
widget.toolBox.insertItem(widget.toolBox.count(), w, title)
widget.toolBox.layout().setSpacing(0)
return w
def addGroup(self, parent, title):
"""
Adds a collapsible group to the given widget.
"""
g = collapsibleGroupBox2(title=title)
parent.layout().addWidget(g)
g.setLayout(QVBoxLayout())
return g
def addSetting(self, name, type, label, widget=None, default=None,
tooltip=None, min=None, max=None, vals=None, suffix=""):
self.settingsList.append(name)
self.settings[name] = self.setting(name, type, label, widget, default,
tooltip, min, max, vals, suffix)
def widget(self, name):
if name in self.settings:
return self.settings[name].widget()
def METHOD_NAME(self, name):
if name in self.settings:
return self.settings[name]
def addSettingsTo(self, widget):
"""
Adds all the settings to the given widget. Assume that the settings
have not been called yet, so calling `.widget()` will create their
widgets.
"""
for name in self.settingsList:
self.settings[name].widget(widget)
class setting:
"""
A class used to store setting, and display a widget for the user to
modify it.
"""
def __init__(self, name, type, label, widget=None, default=None,
tooltip=None, min=None, max=None, vals=None, suffix=""):
self.name = name
self.type = type
self.label = label
self._widget = widget
self.default = default
self.min = min
self.max = max
self.vals = vals.split("|") if vals else []
self.suffix = suffix
self.tooltip = tooltip
def widget(self, parent=None):
"""
Returns the widget used, or creates it if not done yet. If parent
is given, widget is inserted in parent's layout.
"""
if self._widget:
return self._widget
else:
if "checkbox" in self.type:
self._widget = QCheckBox(self.label)
if self.default:
self._widget.setChecked(True)
if parent:
parent.layout().addWidget(self._widget)
elif "number" in self.type:
l = QHBoxLayout()
label = QLabel(self.label, parent)
label.setWordWrap(True)
l.addWidget(label, 8)
self._widget = QSpinBox()
self._widget.setValue(self.default if self.default else 0)
if self.min:
self._widget.setMinimum(self.min)
if self.max:
self._widget.setMaximum(self.max)
if self.suffix:
self._widget.setSuffix(self.suffix)
l.addWidget(self._widget, 2)
if parent:
parent.layout().addLayout(l)
elif "combo" in self.type:
l = QHBoxLayout()
label = QLabel(self.label, parent)
label.setWordWrap(True)
l.addWidget(label, 6)
self._widget = QComboBox()
self._widget.addItems(self.vals)
if self.default:
self._widget.setCurrentText(self.default)
l.addWidget(self._widget, 2)
if parent:
parent.layout().addLayout(l)
elif "text" in self.type:
l = QHBoxLayout()
label = QLabel(self.label, parent)
label.setWordWrap(True)
l.addWidget(label, 5)
self._widget = QLineEdit()
self._widget.setStyleSheet(style.lineEditSS())
if self.default:
self._widget.setText(self.default)
l.addWidget(self._widget, 3)
if parent:
parent.layout().addLayout(l)
elif "label" in self.type:
self._widget = QLabel(self.label, parent)
self._widget.setWordWrap(True)
if parent:
parent.layout().addWidget(self._widget)
if self.tooltip:
self._widget.setToolTip(self.tooltip)
return self._widget
def value(self):
"""
Return the value contained in the widget.
"""
if not self._widget:
return self.default
else:
if "checkbox" in self.type:
return self._widget.isChecked()
elif "number" in self.type:
return self._widget.value()
elif "combo" in self.type:
return self._widget.currentText()
elif "text" in self.type:
return self._widget.text()
|
6,416 | test ruleset invalid | import os
from unittest import mock
import unittest
from ScoutSuite.core.console import set_logger_configuration, print_debug
from ScoutSuite.core.rule import Rule
from ScoutSuite.core.ruleset import Ruleset
class TestScoutRulesRuleset(unittest.TestCase):
def setUp(self):
set_logger_configuration(is_debug=True)
self.test_dir = os.path.dirname(os.path.realpath(__file__))
self.test_ruleset_001 = os.path.join(self.test_dir, 'data/test-ruleset.json')
self.test_ruleset_002 = os.path.join(self.test_dir, 'data/test-ruleset-absolute-path.json')
@mock.patch("ScoutSuite.core.ruleset.print_error")
def test_ruleset_class(self, printError):
test001 = Ruleset(cloud_provider='aws', filename=self.test_ruleset_001)
assert (os.path.isdir(test001.rules_data_path))
assert (os.path.isfile(test001.filename))
assert (test001.name == "test-ruleset")
assert (test001.about == "regression test")
test_file_key = 'iam-password-policy-no-expiration.json'
assert (test_file_key in test001.rules)
assert (type(test001.rules[test_file_key]) == list)
assert (type(test001.rules[test_file_key][0] == Rule))
assert (hasattr(test001.rules[test_file_key][0], 'path'))
for rule in test001.rules:
print_debug(test001.rules[rule][0].to_string())
assert (test_file_key in test001.rule_definitions)
assert (test001.rule_definitions[test_file_key].description == "Password Expiration Disabled")
for rule_def in test001.rule_definitions:
print_debug(str(test001.rule_definitions[rule_def]))
assert (printError.call_count == 0)
test002 = Ruleset(cloud_provider='aws', filename=self.test_ruleset_002)
for rule in test002.rules:
print_debug(test002.rules[rule][0].to_string())
assert (printError.call_count == 1) # is this expected ??
assert ("test-ruleset-absolute-path.json does not exist." in printError.call_args_list[0][0][0])
test005 = Ruleset(cloud_provider='aws', filename=self.test_ruleset_001, ruleset_generator=True)
@mock.patch("ScoutSuite.core.ruleset.print_error")
def test_ruleset_file_not_exist(self, printError):
test003 = Ruleset(cloud_provider='aws', filename='tests/data/no-such-file.json')
assert (test003.rules == [])
assert (printError.call_count == 1)
assert ("no-such-file.json does not exist" in printError.call_args_list[0][0][0])
@mock.patch("ScoutSuite.core.ruleset.print_exception")
def METHOD_NAME(self, printException):
test004 = Ruleset(cloud_provider='aws', filename='tests/data/invalid-file.json')
assert (test004.rules == [])
assert (printException.call_count == 1)
assert ("invalid-file.json contains malformed JSON" in printException.call_args_list[0][0][0])
def test_path_for_cloud_providers(self):
target = Ruleset(cloud_provider='aws', filename=self.test_ruleset_001)
assert (os.path.samefile(target.rules_data_path, './ScoutSuite/providers/aws/rules'))
target = Ruleset(cloud_provider='azure', filename=self.test_ruleset_001)
assert (os.path.samefile(target.rules_data_path, './ScoutSuite/providers/azure/rules'))
target = Ruleset(cloud_provider='gcp', filename=self.test_ruleset_001)
assert (os.path.samefile(target.rules_data_path, './ScoutSuite/providers/gcp/rules'))
def test_path_for_ruletypes(self):
rpath = "./ScoutSuite/providers/aws/rules/"
target = Ruleset(cloud_provider='aws', filename='default.json')
assert (os.path.samefile(target.filename, rpath + 'rulesets/default.json'))
target = Ruleset(cloud_provider='aws', filename='default')
assert (os.path.samefile(target.filename, rpath + 'rulesets/default.json'))
target = Ruleset(cloud_provider='aws', filename='filters.json')
assert (os.path.samefile(target.filename, rpath + 'rulesets/filters.json'))
target = Ruleset(cloud_provider='aws', filename='filters')
assert (os.path.samefile(target.filename, rpath + 'rulesets/filters.json'))
@mock.patch("ScoutSuite.core.ruleset.prompt_yes_no")
def test_file_search(self, prompt_yes_no):
prompt_yes_no.return_value = False
target = Ruleset(cloud_provider='aws', filename=None)
assert (prompt_yes_no.call_count == 0)
assert (os.path.samefile(target.filename, os.path.join(target.rules_data_path, './rulesets/default.json')))
target = Ruleset(cloud_provider='aws', environment_name="notexist", filename=None)
assert (prompt_yes_no.call_count == 0)
assert (os.path.samefile(target.filename, os.path.join(target.rules_data_path, './rulesets/default.json')))
prompt_yes_no.reset_mock()
prompt_yes_no.return_value = True
def test_find_file(self):
test101 = Ruleset(cloud_provider='aws').find_file(self.test_ruleset_001)
test102 = Ruleset(cloud_provider='aws').find_file('default')
def test_search_ruleset(self):
test201 = Ruleset(cloud_provider='aws').search_ruleset('test', no_prompt=True) |
6,417 | check uploads exists | from chemaboxwriters.app import write_abox
import pytest
import os
from chemaboxwriters.common.pipeline import Pipeline
import chemaboxwriters.common.assemble_pipeline as asp
from chemaboxwriters.ontocompchem.pipeline import OC_PIPELINE
from chemaboxwriters.ontospecies.pipeline import OS_PIPELINE
from chemaboxwriters.ontomops.pipeline import OMOPS_PIPELINE
from chemaboxwriters.ontopesscan.pipeline import OPS_PIPELINE
from pytest_mock import MockerFixture
from typing import Callable, Dict, Optional, List
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
ABOX_CONFIG_FILE = os.path.join(THIS_DIR, "test_config_files", "abox_config.yml")
REF_DIR = os.path.join(THIS_DIR, "..", "refData")
OCOMPCHEM_REF_DIR = os.path.join(REF_DIR, "ontocompchem")
OSPECIES_REF_DIR = os.path.join(REF_DIR, "ontospecies")
OPSSCAN_REF_DIR = os.path.join(REF_DIR, "ontopesscan")
OMOPS_REF_DIR = os.path.join(REF_DIR, "ontomops")
class DummyPubchemComp:
def __init__(self, cid: int, synonyms: List[str]):
self.cid = cid
self.synonyms = synonyms
def check_uploads(
pipeline: Pipeline, inp_file_type: str, fs_num_uploads: int, ts_num_uploads: int
) -> None:
fs_uploads = pipeline._file_server_uploads
ts_uploads = pipeline._triple_store_uploads
assert len(fs_uploads) == fs_num_uploads
assert len(ts_uploads) == ts_num_uploads
inp_stage = inp_file_type
for handler in pipeline._handlers.values():
if handler._in_stage == inp_stage:
fs_upload_configs = handler.get_file_server_upload_configs()
ts_upload_configs = handler.get_triple_store_upload_configs()
METHOD_NAME(fs_uploads, fs_upload_configs)
METHOD_NAME(ts_uploads, ts_upload_configs)
inp_stage = handler._out_stage
def METHOD_NAME(uploads: Dict, upload_configs: Optional[Dict]) -> None:
if upload_configs is not None:
upload_file_types = upload_configs["upload_file_types"]
url = _construct_full_url(upload_configs)
file_type_in_uploads = []
for file_type in upload_file_types:
for _, upload_entry in uploads.items():
if upload_entry["input_type"] == file_type:
file_type_in_uploads.append(file_type)
upload_url = (
f"{'/'.join(upload_entry['location'].split('/')[:-1])}/"
)
assert upload_url == url
assert set(file_type_in_uploads) == set(upload_file_types)
def _construct_full_url(upload_configs: Dict) -> str:
url = upload_configs["url"]
subdirs = upload_configs["subdirs"]
if not url.endswith("/"):
url = f"{url}/"
if subdirs is not None:
if not subdirs.endswith("/"):
subdirs = f"{subdirs}/"
url = f"{url}{subdirs}"
return url
@pytest.mark.parametrize(
"inp_file_or_dir, inp_file_type, fs_num_uploads, ts_num_uploads",
[
(
os.path.join("OC_qc_log_single_log_scan_test", "ethane_scan_rigid.g09"),
"qc_log",
5,
4,
),
],
)
def test_ocompchem_abox_uploads(
inp_file_or_dir: str,
inp_file_type: str,
fs_num_uploads: int,
ts_num_uploads: int,
clean_tests: bool,
cleanup_test_data: Callable,
):
print("========================================================")
print("TEST INPUT FILE: ", inp_file_or_dir)
print("TEST INPUT FILE TYPE: ", inp_file_type)
print()
print()
inp_file_or_dir = os.path.join(OCOMPCHEM_REF_DIR, inp_file_or_dir)
pipeline = asp.assemble_pipeline(
pipeline_type=OC_PIPELINE, config_file=ABOX_CONFIG_FILE
)
write_abox(
pipeline=pipeline,
file_or_dir=inp_file_or_dir,
input_file_type=inp_file_type,
dry_run=False,
)
check_uploads(pipeline, inp_file_type, fs_num_uploads, ts_num_uploads)
if clean_tests:
cleanup_test_data(pipeline.written_files)
print("========================================================")
print()
print()
@pytest.mark.parametrize(
"inp_file_or_dir, inp_file_type, fs_num_uploads, ts_num_uploads",
[
("OS_qc_log_test\\h2o_opt_n_g09.log", "qc_log", 0, 1),
],
)
def test_ospecies_abox_uploads(
inp_file_or_dir: str,
inp_file_type: str,
fs_num_uploads: int,
ts_num_uploads: int,
mocker: MockerFixture,
clean_tests: bool,
cleanup_test_data: Callable,
):
print("========================================================")
print("TEST INPUT FILE: ", inp_file_or_dir)
print("TEST INPUT FILE TYPE: ", inp_file_type)
print()
print()
inp_file_or_dir = os.path.join(OSPECIES_REF_DIR, inp_file_or_dir)
mocker.patch(
"chemaboxwriters.ontospecies.handlers.qc_json_handler.pcp.get_compounds",
return_value=[DummyPubchemComp(cid=1111, synonyms=["1111-11-1"])],
)
pipeline = asp.assemble_pipeline(
pipeline_type=OS_PIPELINE, config_file=ABOX_CONFIG_FILE
)
write_abox(
pipeline=pipeline,
file_or_dir=inp_file_or_dir,
input_file_type=inp_file_type,
dry_run=False,
)
check_uploads(pipeline, inp_file_type, fs_num_uploads, ts_num_uploads)
if clean_tests:
cleanup_test_data(pipeline.written_files)
print("========================================================")
print()
print()
@pytest.mark.parametrize(
"inp_file_or_dir, inp_file_type, fs_num_uploads, ts_num_uploads",
[
("OPS_oc_json_angle_test", "oc_json", 0, 1),
],
)
def test_opsscan_abox_uploads(
inp_file_or_dir: str,
inp_file_type: str,
fs_num_uploads: int,
ts_num_uploads: int,
clean_tests: bool,
cleanup_test_data: Callable,
):
print("========================================================")
print("TEST INPUT DIR: ", inp_file_or_dir)
print("TEST INPUT FILE TYPE: ", inp_file_type)
print()
print()
inp_file_or_dir = os.path.join(OPSSCAN_REF_DIR, inp_file_or_dir)
pipeline = asp.assemble_pipeline(
pipeline_type=OPS_PIPELINE, config_file=ABOX_CONFIG_FILE
)
write_abox(
pipeline=pipeline,
file_or_dir=inp_file_or_dir,
input_file_type=inp_file_type,
dry_run=False,
)
check_uploads(pipeline, inp_file_type, fs_num_uploads, ts_num_uploads)
if clean_tests:
cleanup_test_data(pipeline.written_files)
print("========================================================")
print()
print()
@pytest.mark.parametrize(
"inp_file_or_dir, inp_file_type, fs_num_uploads, ts_num_uploads",
[
("OM_om_json_test\\example.ominp_json", "ominp_json", 1, 1),
],
)
def test_omops_abox_uploads(
inp_file_or_dir: str,
inp_file_type: str,
fs_num_uploads: int,
ts_num_uploads: int,
clean_tests: bool,
cleanup_test_data: Callable,
):
print("========================================================")
print("TEST INPUT FILE: ", inp_file_or_dir)
print("TEST INPUT FILE TYPE: ", inp_file_type)
print()
print()
inp_file_or_dir = os.path.join(OMOPS_REF_DIR, inp_file_or_dir)
pipeline = asp.assemble_pipeline(
pipeline_type=OMOPS_PIPELINE, config_file=ABOX_CONFIG_FILE
)
write_abox(
pipeline=pipeline,
file_or_dir=inp_file_or_dir,
input_file_type=inp_file_type,
dry_run=False,
)
check_uploads(pipeline, inp_file_type, fs_num_uploads, ts_num_uploads)
if clean_tests:
cleanup_test_data(pipeline.written_files)
print("========================================================")
print()
print() |
6,418 | test assign empty | #
# Copyright (c) 2020, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from mock import (
MagicMock,
call,
patch,
)
from neptune.attributes.series.float_series import (
FloatSeries,
FloatSeriesVal,
)
from neptune.attributes.series.string_series import (
StringSeries,
StringSeriesVal,
)
from neptune.internal.operation import (
ClearFloatLog,
ClearStringLog,
ConfigFloatSeries,
LogFloats,
)
from tests.unit.neptune.new.attributes.test_attribute_base import TestAttributeBase
@patch("time.time", new=TestAttributeBase._now)
class TestSeries(TestAttributeBase):
@patch("neptune.metadata_containers.metadata_container.get_operation_processor")
def test_assign(self, get_operation_processor):
value = FloatSeriesVal([17, 3.6], min=0, max=100, unit="%")
expected = [
LogFloats.ValueType(17, None, self._now()),
LogFloats.ValueType(3.6, None, self._now()),
]
processor = MagicMock()
get_operation_processor.return_value = processor
path, wait = (
self._random_path(),
self._random_wait(),
)
with self._exp() as exp:
var = FloatSeries(exp, path)
var.assign(value, wait=wait)
processor.enqueue_operation.assert_has_calls(
[
call(ConfigFloatSeries(path, min=0, max=100, unit="%"), wait=False),
call(ClearFloatLog(path), wait=False),
call(LogFloats(path, expected), wait=wait),
]
)
@patch("neptune.metadata_containers.metadata_container.get_operation_processor")
def METHOD_NAME(self, get_operation_processor):
processor = MagicMock()
get_operation_processor.return_value = processor
with self._exp() as exp:
path, wait = (
self._random_path(),
self._random_wait(),
)
var = StringSeries(exp, path)
var.assign(StringSeriesVal([]), wait=wait)
processor.enqueue_operation.assert_called_with(ClearStringLog(path), wait=wait)
@patch("neptune.metadata_containers.metadata_container.get_operation_processor")
def test_log(self, get_operation_processor):
value_and_expected = [
(13, [LogFloats.ValueType(13, None, self._now())]),
(15.3, [LogFloats.ValueType(15.3, None, self._now())]),
(
[1, 9, 7],
[
LogFloats.ValueType(1, None, self._now()),
LogFloats.ValueType(9, None, self._now()),
LogFloats.ValueType(7, None, self._now()),
],
),
(
(1, 9, 7),
[
LogFloats.ValueType(1, None, self._now()),
LogFloats.ValueType(9, None, self._now()),
LogFloats.ValueType(7, None, self._now()),
],
),
(
{1, 9, 7},
[
LogFloats.ValueType(1, None, self._now()),
LogFloats.ValueType(9, None, self._now()),
LogFloats.ValueType(7, None, self._now()),
],
),
]
for value, expected in value_and_expected:
processor = MagicMock()
get_operation_processor.return_value = processor
with self._exp() as exp:
path, wait = (
self._random_path(),
self._random_wait(),
)
var = FloatSeries(exp, path)
var.log(value, wait=wait)
processor.enqueue_operation.assert_called_with(LogFloats(path, expected), wait=wait)
@patch("neptune.metadata_containers.metadata_container.get_operation_processor")
def test_log_with_step(self, get_operation_processor):
value_step_and_expected = [
(13, 5.3, LogFloats.ValueType(13, 5.3, self._now())),
(15.3, 10, LogFloats.ValueType(15.3, 10, self._now())),
([13], 5.3, LogFloats.ValueType(13, 5.3, self._now())),
((13,), 5.3, LogFloats.ValueType(13, 5.3, self._now())),
({13}, 5.3, LogFloats.ValueType(13, 5.3, self._now())),
]
for value, step, expected in value_step_and_expected:
processor = MagicMock()
get_operation_processor.return_value = processor
with self._exp() as exp:
path, wait = (
self._random_path(),
self._random_wait(),
)
var = FloatSeries(exp, path)
var.log(value, step=step, wait=wait)
processor.enqueue_operation.assert_called_with(LogFloats(path, [expected]), wait=wait)
@patch("neptune.metadata_containers.metadata_container.get_operation_processor")
def test_log_with_timestamp(self, get_operation_processor):
value_step_and_expected = [
(13, 5.3, LogFloats.ValueType(13, None, 5.3)),
(15.3, 10, LogFloats.ValueType(15.3, None, 10)),
]
for value, ts, expected in value_step_and_expected:
processor = MagicMock()
get_operation_processor.return_value = processor
with self._exp() as exp:
path, wait = (
self._random_path(),
self._random_wait(),
)
var = FloatSeries(exp, path)
var.log(value, timestamp=ts, wait=wait)
processor.enqueue_operation.assert_called_with(LogFloats(path, [expected]), wait=wait)
@patch("neptune.metadata_containers.metadata_container.get_operation_processor")
def test_log_value_errors(self, get_operation_processor):
processor = MagicMock()
get_operation_processor.return_value = processor
with self._exp() as exp:
attr = FloatSeries(exp, self._random_path())
with self.assertRaises(ValueError):
attr.log(["str", 5])
with self.assertRaises(ValueError):
attr.log([5, 10], step=10)
with self.assertRaises(TypeError):
attr.log(5, step="str")
with self.assertRaises(TypeError):
attr.log(5, timestamp="str")
@patch("neptune.metadata_containers.metadata_container.get_operation_processor")
def test_clear(self, get_operation_processor):
processor = MagicMock()
get_operation_processor.return_value = processor
with self._exp() as exp:
path, wait = (
self._random_path(),
self._random_wait(),
)
var = FloatSeries(exp, path)
var.clear(wait=wait)
processor.enqueue_operation.assert_called_with(ClearFloatLog(path), wait=wait) |
6,419 | get | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VpnSiteLinkConnectionsOperations:
"""VpnSiteLinkConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def METHOD_NAME(
self,
resource_group_name: str,
gateway_name: str,
connection_name: str,
link_connection_name: str,
**kwargs
) -> "_models.VpnSiteLinkConnection":
"""Retrieves the details of a vpn site link connection.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param connection_name: The name of the vpn connection.
:type connection_name: str
:param link_connection_name: The name of the vpn connection.
:type link_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnSiteLinkConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.VpnSiteLinkConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnSiteLinkConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.METHOD_NAME.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'linkConnectionName': self._serialize.url("link_connection_name", link_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.METHOD_NAME(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnSiteLinkConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
METHOD_NAME.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}/vpnLinkConnections/{linkConnectionName}'} # type: ignore |
6,420 | set up | from unittest import mock
from rest_framework import status
from lego.apps.users.models import AbakusGroup, User
from lego.utils.test_utils import BaseAPITestCase
class ContactViewSetTestCase(BaseAPITestCase):
fixtures = [
"initial_files.yaml",
"initial_abakus_groups.yaml",
"development_users.yaml",
"development_memberships.yaml",
]
def METHOD_NAME(self):
self.url = "/api/v1/contact-form/"
self.user = User.objects.first()
@mock.patch("lego.apps.contact.views.send_message")
@mock.patch("lego.apps.contact.serializers.verify_captcha", return_value=True)
def test_without_auth(self, mock_verify_captcha, mock_send_message):
response = self.client.post(
self.url,
{
"title": "title",
"message": "message",
"anonymous": True,
"captcha_response": "test",
"recipient_group": None,
},
)
self.assertEqual(status.HTTP_202_ACCEPTED, response.status_code)
mock_verify_captcha.assert_called_once()
@mock.patch("lego.apps.contact.views.send_message")
@mock.patch("lego.apps.contact.serializers.verify_captcha", return_value=True)
def test_without_auth_not_anonymous(self, mock_verify_captcha, mock_send_message):
response = self.client.post(
self.url,
{
"title": "title",
"message": "message",
"anonymous": False,
"captcha_response": "test",
"recipient_group": None,
},
)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
@mock.patch("lego.apps.contact.views.send_message")
@mock.patch("lego.apps.contact.serializers.verify_captcha", return_value=True)
def test_with_auth(self, mock_verify_captcha, mock_send_message):
self.client.force_authenticate(self.user)
response = self.client.post(
self.url,
{
"title": "title",
"message": "message",
"anonymous": True,
"captcha_response": "test",
"recipient_group": None,
},
)
self.assertEqual(status.HTTP_202_ACCEPTED, response.status_code)
mock_verify_captcha.assert_called_once()
mock_send_message.assert_called_once_with(
"title", "message", self.user, True, None
)
@mock.patch("lego.apps.contact.views.send_message")
@mock.patch("lego.apps.contact.serializers.verify_captcha", return_value=False)
def test_with_auth_invalid_captcha(self, mock_verify_captcha, mock_send_message):
self.client.force_authenticate(self.user)
response = self.client.post(
self.url,
{
"title": "title",
"message": "message",
"anonymous": True,
"captcha_response": "test",
"recipient_group": None,
},
)
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
mock_verify_captcha.assert_called_once()
@mock.patch("lego.apps.contact.views.send_message")
@mock.patch("lego.apps.contact.serializers.verify_captcha", return_value=True)
def test_committee_as_recipient(self, mock_verify_captcha, mock_send_message):
webkom = AbakusGroup.objects.get(name="Webkom")
webkom_id = webkom.id
self.client.force_authenticate(self.user)
response = self.client.post(
self.url,
{
"title": "title",
"message": "message",
"anonymous": True,
"captcha_response": "test",
"recipient_group": webkom_id,
},
)
self.assertEqual(status.HTTP_202_ACCEPTED, response.status_code)
mock_verify_captcha.assert_called_once()
mock_send_message.assert_called_once_with(
"title", "message", self.user, True, webkom
) |
6,421 | pool | # Copyright 2023 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pooling modules."""
from jax import lax
import jax.numpy as jnp
import numpy as np
def METHOD_NAME(inputs, init, reduce_fn, window_shape, strides, padding):
"""Helper function to define pooling functions.
Pooling functions are implemented using the ReduceWindow XLA op.
NOTE: Be aware that pooling is not generally differentiable.
That means providing a reduce_fn that is differentiable does not imply that
pool is differentiable.
Args:
inputs: input data with dimensions (batch, window dims..., features).
init: the initial value for the reduction
reduce_fn: a reduce function of the form `(T, T) -> T`.
window_shape: a shape tuple defining the window to reduce over.
strides: a sequence of `n` integers, representing the inter-window
strides (default: `(1, ..., 1)`).
padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
of `n` `(low, high)` integer pairs that give the padding to apply before
and after each spatial dimension.
Returns:
The output of the reduction for each window slice.
"""
num_batch_dims = inputs.ndim - (len(window_shape) + 1)
strides = strides or (1,) * len(window_shape)
assert len(window_shape) == len(
strides
), f"len({window_shape}) must equal len({strides})"
strides = (1,) * num_batch_dims + strides + (1,)
dims = (1,) * num_batch_dims + window_shape + (1,)
is_single_input = False
if num_batch_dims == 0:
# add singleton batch dimension because lax.reduce_window always
# needs a batch dimension.
inputs = inputs[None]
strides = (1,) + strides
dims = (1,) + dims
is_single_input = True
assert inputs.ndim == len(dims), f"len({inputs.shape}) != len({dims})"
if not isinstance(padding, str):
padding = tuple(map(tuple, padding))
assert len(padding) == len(window_shape), (
f"padding {padding} must specify pads for same number of dims as "
f"window_shape {window_shape}"
)
assert all(
[len(x) == 2 for x in padding]
), f"each entry in padding {padding} must be length 2"
padding = ((0, 0),) + padding + ((0, 0),)
y = lax.reduce_window(inputs, init, reduce_fn, dims, strides, padding)
if is_single_input:
y = jnp.squeeze(y, axis=0)
return y
def avg_pool(
inputs, window_shape, strides=None, padding="VALID", count_include_pad=True
):
"""Pools the input by taking the average over a window.
Args:
inputs: input data with dimensions (batch, window dims..., features).
window_shape: a shape tuple defining the window to reduce over.
strides: a sequence of `n` integers, representing the inter-window
strides (default: `(1, ..., 1)`).
padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
of `n` `(low, high)` integer pairs that give the padding to apply before
and after each spatial dimension (default: `'VALID'`).
count_include_pad: a boolean whether to include padded tokens
in the average calculation (default: `True`).
Returns:
The average for each window slice.
"""
y = METHOD_NAME(inputs, 0.0, lax.add, window_shape, strides, padding)
if count_include_pad:
y = y / np.prod(window_shape)
else:
div_shape = inputs.shape[:-1] + (1,)
if len(div_shape) - 2 == len(window_shape):
div_shape = (1,) + div_shape[1:]
y = y / METHOD_NAME(
jnp.ones(div_shape), 0.0, lax.add, window_shape, strides, padding
)
return y
def max_pool(inputs, window_shape, strides=None, padding="VALID"):
"""Pools the input by taking the maximum of a window slice.
Args:
inputs: input data with dimensions (batch, window dims..., features).
window_shape: a shape tuple defining the window to reduce over.
strides: a sequence of `n` integers, representing the inter-window
strides (default: `(1, ..., 1)`).
padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
of `n` `(low, high)` integer pairs that give the padding to apply before
and after each spatial dimension (default: `'VALID'`).
Returns:
The maximum for each window slice.
"""
y = METHOD_NAME(inputs, -jnp.inf, lax.max, window_shape, strides, padding)
return y
def min_pool(inputs, window_shape, strides=None, padding="VALID"):
"""Pools the input by taking the minimum of a window slice.
Args:
inputs: Input data with dimensions (batch, window dims..., features).
window_shape: A shape tuple defining the window to reduce over.
strides: A sequence of `n` integers, representing the inter-window strides
(default: `(1, ..., 1)`).
padding: Either the string `'SAME'`, the string `'VALID'`, or a sequence of
`n` `(low, high)` integer pairs that give the padding to apply before and
after each spatial dimension (default: `'VALID'`).
Returns:
The minimum for each window slice.
"""
return METHOD_NAME(inputs, jnp.inf, lax.min, window_shape, strides, padding) |
6,422 | list by location | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_location_request(location: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Storage/locations/{location}/usages"
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"location": _SERIALIZER.url("location", location, "str"),
}
_url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class UsagesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.storage.v2021_06_01.StorageManagementClient`'s
:attr:`usages` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version")
@distributed_trace
def METHOD_NAME(self, location: str, **kwargs: Any) -> Iterable["_models.Usage"]:
"""Gets the current usage count and the limit for the resources of the location under the
subscription.
:param location: The location of the Azure Storage resource. Required.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Usage or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.storage.v2021_06_01.models.Usage]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2021-06-01"))
cls: ClsType[_models.UsageListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_location_request(
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("UsageListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
METHOD_NAME.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Storage/locations/{location}/usages"
} |
6,423 | success to map | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''
Created on May 17, 2011
'''
try:
import json
except ImportError:
import simplejson as json
from OvmObjectModule import *
import types
import logging
import popen2
import subprocess
from OvmFaultConstants import toErrCode, dispatchErrCode, NoVmFoundException, ShellExceutedFailedException
from xmlrpclib import Fault as XmlRpcFault
from OVSCommons import *
from OvmLoggerModule import OvmLogger
from OVSXXenStore import xen_get_vm_path
from OVSSiteRMServer import get_master_ip
HEARTBEAT_TIMESTAMP_FORMAT='<timestamp>%s</timestamp>'
HEARTBEAT_TIMESTAMP_PATTERN='(\<timestamp\>\d+.\d+<\/timestamp\>)'
HEARTBEAT_DIR='heart_beat'
ETC_HOSTS='/etc/hosts'
HOSTNAME_FILE='/etc/sysconfig/network'
OWNER_FILE_PREFIX='host_'
OCFS2_CONF='/etc/ocfs2/cluster.conf'
logger = OvmLogger('OvmCommon')
def setAttrFromDict(obj, name, refDict, convertFunc=None):
if not convertFunc:
setattr(obj, name, refDict[name])
else:
setattr(obj, name, convertFunc(refDict[name]))
def safeSetAttr(obj, name, value):
if not hasattr(obj, name): raise Exception("%s doesn't have attribute %s"%(obj.__class__.__name__, name))
setattr(obj, name, value)
def toAscii(jstr):
return str(jstr).encode('ascii', 'ignore')
def toAsciiHook(dct):
for k in dct:
v = dct[k]
if type(v) is types.UnicodeType:
v = toAscii(v)
del dct[k]
k = toAscii(k)
dct[k] = v
return dct
def asciiLoads(jStr):
jStr = str(jStr).replace("'", '"').replace('False', 'false').replace('True', 'true')
return json.loads(jStr, object_hook=toAsciiHook)
def exceptionIfNoSuccess(str, errMsg=None):
if not errMsg: errMsg = str
if not "success" in str: raise Exception("%s (%s)"%(errMsg, str))
def METHOD_NAME(str, sep=';'):
if not str.startswith("success"): raise Exception(str)
str = str[len('success:'):]
dct = {}
for pair in str.split(sep):
(key, value) = pair.split('=', 1)
dct[key] = value
return dct
def jsonSuccessToMap(str):
dct = json.loads(str)
if dct['status'] != 'SUCC': raise Exception(str)
return dct['value']
def safeDictSet(obj, dct, name):
if not hasattr(obj, name): raise Exception("%s has no attribute %s for encoding"%(obj.__class__.__name__, name))
dct[name] = getattr(obj, name)
def normalizeToGson(str):
return str.replace('\\', '').strip('"').replace('"{', '{').replace('}"', '}');
def toGson(obj):
return normalizeToGson(json.dumps(obj))
def MtoBytes(M):
return M * 1024 * 1024
def BytesToM(bytes):
return bytes/(1024*1024)
def BytesToG(bytes):
return bytes/(1024*1024*1024)
def runCmd(cmds):
process = subprocess.Popen(cmds, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode != 0:
raise ShellExceutedFailedException(stderr, process.returncode)
return stdout
def doCmd(lst):
cmds = [str(i) for i in lst]
cmdStr = ' '.join(cmds)
logger.debug(doCmd, cmdStr)
res = runCmd(cmdStr)
logger.debug(doCmd, 'result:' + res)
return res
def execute(cmd):
p = popen2.Popen3(cmd, True)
if (p.wait() != 0):
raise Exception("Failed to execute command. Command: " + cmd + ", Error: " + p.childerr.read())
return p.fromchild.read()
def getDomId(vm_name):
return execute("xm list | grep " + vm_name + " | awk '{print $2}'").strip()
def raiseExceptionIfFail(res):
if not "success" in res and not "SUCC" in res: raise Exception(res)
def ipToHeartBeatFileName(ip):
return ip.replace('.', '_') + "_HEARTBEAT"
def getVmNameFromConfigureFile(cfgPath):
fd = open(cfgPath)
for i in fd.readlines():
i = i.strip()
if i.startswith('name'):
(key, value) = i.split("=", 1)
value = value.strip().strip("'")
fd.close()
return value
fd.close()
raise Exception('Cannot find vm name in %s'%cfgPath)
def makeOwnerFileName():
hostIp = METHOD_NAME(get_master_ip())['ip']
ownerFileName = OWNER_FILE_PREFIX + hostIp.replace('.', '_')
return ownerFileName |
6,424 | get preserved order | import libvoikko
from django.db import connection
from django.db.models import Case, When
from services.models import (
OrganizationServiceNodeUnitCount,
ServiceNode,
ServiceNodeUnitCount,
Unit,
)
from services.search.constants import (
DEFAULT_TRIGRAM_THRESHOLD,
SEARCHABLE_MODEL_TYPE_NAMES,
)
voikko = libvoikko.Voikko("fi")
voikko.setNoUglyHyphenation(True)
def is_compound_word(word):
result = voikko.analyze(word)
if len(result) == 0:
return False
return True if result[0]["WORDBASES"].count("+") > 1 else False
def hyphenate(word):
"""
Returns a list of syllables of the word if it is a compound word.
"""
word = word.strip()
if is_compound_word(word):
# By Setting the setMinHyphenatedWordLength to word_length,
# voikko returns the words that are in the compound word
voikko.setMinHyphenatedWordLength(len(word))
syllables = voikko.hyphenate(word)
return syllables.split("-")
else:
return [word]
def set_service_node_unit_count(ids, representation):
"""
As representation is a dict(mutable) passed by the serializer
set the unit_counts for the service_node.
"""
unit_counts = {}
org_unit_counts = {}
if len(ids) == 1:
service_node_count_qs = ServiceNodeUnitCount.objects.filter(
service_node_id=ids[0]
)
for service_node_count in service_node_count_qs:
if hasattr(service_node_count.division, "name"):
division = service_node_count.division.name_fi.lower()
else:
continue
count = service_node_count.count
if division in unit_counts:
unit_counts[division] += count
else:
unit_counts[division] = count
org_service_node_count_qs = OrganizationServiceNodeUnitCount.objects.filter(
service_node_id=ids[0]
)
for org_service_node_count in org_service_node_count_qs:
org_name = org_service_node_count.organization.name.lower()
count = org_service_node_count.count
if org_name in org_unit_counts:
org_unit_counts[org_name] += count
else:
org_unit_counts[org_name] = count
else:
# Handle grouped service_nodes
units_qs = Unit.objects.none()
for id in ids:
service_node = ServiceNode.objects.get(id=id)
units_qs = units_qs | service_node.get_units_qs()
units_qs = units_qs.distinct()
for unit in units_qs:
division = unit.municipality_id
if not division:
continue
if division in unit_counts:
unit_counts[division] += 1
else:
unit_counts[division] = 1
for unit in units_qs:
org_name = unit.root_department.name.lower()
if not org_name:
continue
if org_name in org_unit_counts:
org_unit_counts[org_name] += 1
else:
org_unit_counts[org_name] = 1
representation["unit_count"] = {
"municipality": unit_counts,
"organization": org_unit_counts,
}
representation["unit_count"]["total"] = sum(unit_counts.values())
def set_service_unit_count(obj, representation):
"""
As representation is a dict(mutable) passed by the serializer
set the unit_counts for the service.
"""
representation["unit_count"] = dict(
municipality=dict(
(
(
x.division.name_fi.lower() if x.division else "_unknown",
x.count,
)
for x in obj.unit_counts.all()
)
),
organization=dict(
(
(
x.organization.name.lower() if x.organization else "_unknown",
x.count,
)
for x in obj.unit_count_organizations.all()
)
),
)
total = 0
for _, part in representation["unit_count"]["municipality"].items():
total += part
representation["unit_count"]["total"] = total
def set_address_fields(obj, representation):
"""
Populates mutable dict representation of address related
fields to the serializer.
"""
representation["number"] = getattr(obj, "number", "")
representation["number_end"] = getattr(obj, "number_end", "")
representation["letter"] = getattr(obj, "letter", "")
representation["modified_at"] = getattr(obj, "modified_at", "")
municipality = {
"id": getattr(obj.street, "municipality_id", ""),
"name": {},
}
municipality["name"]["fi"] = getattr(obj.street.municipality, "name_fi", "")
municipality["name"]["sv"] = getattr(obj.street.municipality, "name_sv", "")
representation["municipality"] = municipality
street = {"name": {}}
street["name"]["fi"] = getattr(obj.street, "name_fi", "")
street["name"]["sv"] = getattr(obj.street, "name_sv", "")
representation["street"] = street
def get_service_node_results(all_results):
"""
Returns a dict with the aggregated ids. Key is the first id in the results.
This dict is also sent as context to the serializer to output the ids list.
"""
ids = {}
for row in all_results:
if row[1] == "ServiceNode":
# Id is the first col and in format type_42_43_44.
tmp = row[0].split("_")[1:]
ids[tmp[0]] = tmp[0:]
return ids
def get_ids_from_sql_results(all_results, type="Unit"):
"""
Returns a list of ids by the give type.
"""
ids = []
for row in all_results:
if row[1] == type:
# Id is the first col and in format 42_type.
ids.append(row[0].split("_")[1])
return ids
def get_all_ids_from_sql_results(all_results):
"""
Returns a dict with the model names as keys and the
object ids of the model as values.
"""
ids = {}
for t in SEARCHABLE_MODEL_TYPE_NAMES:
ids[t] = []
for row in all_results:
ids[row[1]].append(row[0].split("_")[1])
return ids
def METHOD_NAME(ids):
"""
Returns a Case expression that can be used in the order_by method,
ordering will be equal to the order of ids in the ids list.
"""
if ids:
return Case(*[When(id=id, then=pos) for pos, id in enumerate(ids)])
else:
return Case()
# def get_trigram_results(model, field, q_val, threshold=0.1):
# trigm = (
# model.objects.annotate(
# similarity=TrigramSimilarity(field, q_val),
# )
# .filter(similarity__gt=threshold)
# .order_by("-similarity")
# )
# ids = trigm.values_list("id", flat=True)
# if ids:
# preserved = get_preserved_order(ids)
# return model.objects.filter(id__in=ids).order_by(preserved)
# else:
# return model.objects.none()
def get_trigram_results(
model, model_name, field, q_val, threshold=DEFAULT_TRIGRAM_THRESHOLD
):
sql = f"""SELECT id, similarity({field}, '{q_val}') AS sml
FROM {model_name}
WHERE similarity({field}, '{q_val}') >= {threshold}
ORDER BY sml DESC;
"""
cursor = connection.cursor()
cursor.execute(sql)
all_results = cursor.fetchall()
ids = [row[0] for row in all_results]
objs = model.objects.filter(id__in=ids)
return objs |
6,425 | fully qualified name | """
Python representations of the JSON Schema Test Suite tests.
"""
from functools import partial
from pathlib import Path
import json
import os
import re
import subprocess
import sys
import unittest
import attr
from asdf._jsonschema.validators import _VALIDATORS
import asdf._jsonschema
def _find_suite():
root = os.environ.get("JSON_SCHEMA_TEST_SUITE")
if root is not None:
return Path(root)
root = Path(asdf._jsonschema.__file__).parent / "json"
if not root.is_dir(): # pragma: no cover
raise ValueError(
(
"Can't find the JSON-Schema-Test-Suite directory. "
"Set the 'JSON_SCHEMA_TEST_SUITE' environment "
"variable or run the tests from alongside a checkout "
"of the suite."
),
)
return root
@attr.s(hash=True)
class Suite:
_root = attr.ib(default=attr.Factory(_find_suite))
def _remotes(self):
jsonschema_suite = self._root.joinpath("bin", "jsonschema_suite")
remotes = subprocess.check_output(
[sys.executable, str(jsonschema_suite), "remotes"],
)
return json.loads(remotes.decode("utf-8"))
def benchmark(self, runner): # pragma: no cover
for name, Validator in _VALIDATORS.items():
self.version(name=name).benchmark(
runner=runner,
Validator=Validator,
)
def version(self, name):
return Version(
name=name,
path=self._root.joinpath("tests", name),
remotes=self._remotes(),
)
@attr.s(hash=True)
class Version:
_path = attr.ib()
_remotes = attr.ib()
name = attr.ib()
def benchmark(self, runner, **kwargs): # pragma: no cover
for suite in self.tests():
for test in suite:
runner.bench_func(
test.METHOD_NAME,
partial(test.validate_ignoring_errors, **kwargs),
)
def tests(self):
return (
test
for child in self._path.glob("*.json")
for test in self._tests_in(
subject=child.name[:-5],
path=child,
)
)
def format_tests(self):
path = self._path.joinpath("optional", "format")
return (
test
for child in path.glob("*.json")
for test in self._tests_in(
subject=child.name[:-5],
path=child,
)
)
def optional_tests_of(self, name):
return self._tests_in(
subject=name,
path=self._path.joinpath("optional", name + ".json"),
)
def to_unittest_testcase(self, *suites, **kwargs):
name = kwargs.pop("name", "Test" + self.name.title().replace("-", ""))
methods = {
test.method_name: test.to_unittest_method(**kwargs)
for suite in suites
for tests in suite
for test in tests
}
cls = type(name, (unittest.TestCase,), methods)
try:
cls.__module__ = _someone_save_us_the_module_of_the_caller()
except Exception: # pragma: no cover
# We're doing crazy things, so if they go wrong, like a function
# behaving differently on some other interpreter, just make them
# not happen.
pass
return cls
def _tests_in(self, subject, path):
for each in json.loads(path.read_text(encoding="utf-8")):
yield (
_Test(
version=self,
subject=subject,
case_description=each["description"],
schema=each["schema"],
remotes=self._remotes,
**test,
) for test in each["tests"]
)
@attr.s(hash=True, repr=False)
class _Test:
version = attr.ib()
subject = attr.ib()
case_description = attr.ib()
description = attr.ib()
data = attr.ib()
schema = attr.ib(repr=False)
valid = attr.ib()
_remotes = attr.ib()
comment = attr.ib(default=None)
def __repr__(self): # pragma: no cover
return "<Test {}>".format(self.METHOD_NAME)
@property
def METHOD_NAME(self): # pragma: no cover
return " > ".join(
[
self.version.name,
self.subject,
self.case_description,
self.description,
],
)
@property
def method_name(self):
delimiters = r"[\W\- ]+"
return "test_{}_{}_{}".format(
re.sub(delimiters, "_", self.subject),
re.sub(delimiters, "_", self.case_description),
re.sub(delimiters, "_", self.description),
)
def to_unittest_method(self, skip=lambda test: None, **kwargs):
if self.valid:
def fn(this):
self.validate(**kwargs)
else:
def fn(this):
with this.assertRaises(asdf._jsonschema.ValidationError):
self.validate(**kwargs)
fn.__name__ = self.method_name
reason = skip(self)
if reason is None or os.environ.get("JSON_SCHEMA_DEBUG", "0") != "0":
return fn
elif os.environ.get("JSON_SCHEMA_EXPECTED_FAILURES", "0") != "0":
return unittest.expectedFailure(fn)
else:
return unittest.skip(reason)(fn)
def validate(self, Validator, **kwargs):
Validator.check_schema(self.schema)
resolver = asdf._jsonschema.RefResolver.from_schema(
schema=self.schema,
store=self._remotes,
id_of=Validator.ID_OF,
)
# XXX: #693 asks to improve the public API for this, since yeah, it's
# bad. Figures that since it's hard for end-users, we experience
# the pain internally here too.
def prevent_network_access(uri):
raise RuntimeError(f"Tried to access the network: {uri}")
resolver.resolve_remote = prevent_network_access
validator = Validator(schema=self.schema, resolver=resolver, **kwargs)
if os.environ.get("JSON_SCHEMA_DEBUG", "0") != "0":
breakpoint()
validator.validate(instance=self.data)
def validate_ignoring_errors(self, Validator): # pragma: no cover
try:
self.validate(Validator=Validator)
except asdf._jsonschema.ValidationError:
pass
def _someone_save_us_the_module_of_the_caller():
"""
The FQON of the module 2nd stack frames up from here.
This is intended to allow us to dynamically return test case classes that
are indistinguishable from being defined in the module that wants them.
Otherwise, trial will mis-print the FQON, and copy pasting it won't re-run
the class that really is running.
Save us all, this is all so so so so so terrible.
"""
return sys._getframe(2).f_globals["__name__"] |
6,426 | joystick on off | #!/usr/bin/env python
"""
RS232 interface to a Zaber XY stage.
Hazen 04/17
Jeff 04/21
"""
import traceback
import storm_control.sc_hardware.serial.RS232 as RS232
import storm_control.sc_library.hdebug as hdebug
class ZaberXYRS232(RS232.RS232):
"""
ZaberXY stage RS232 interface class.
"""
def __init__(self, **kwds):
"""
Connect to the Zaber stage at the specified port.
"""
self.live = True
self.unit_to_um = kwds["unit_to_um"]
self.um_to_unit = 1.0/self.unit_to_um
self.x = 0.0
self.y = 0.0
self.stage_id = kwds["stage_id"]
self.limits = kwds["limits_dict"]
# We need to remove the keywords not needed for the RS232 super class initialization
del kwds["stage_id"]
del kwds["unit_to_um"]
del kwds["limits_dict"]
# RS232 stuff
try:
super().__init__(**kwds)
test = self.commWithResp("/")
if not test:
self.live = False
except (AttributeError, AssertionError):
print(traceback.format_exc())
self.live = False
print("Zaber XY Stage is not connected? Stage is not on?")
print("Failed to connect to the Zaber XY stage at port", kwds["port"])
def goAbsolute(self, x, y):
# Coerce values to stage limits
coerced_value = False
if x<self.limits["x_min"]:
x=self.limits["x_min"]
coerced_value = True
if y<self.limits["y_min"]:
y=self.limits["y_min"]
coerced_value = True
if x>self.limits["x_max"]:
x=self.limits["x_max"]
coerced_value = True
if y>self.limits["y_max"]:
y=self.limits["y_max"]
coerced_value = True
if coerced_value:
print("Stage warning: Requested a move outside of programmed limits")
# Convert um units to the stage step units and round to an integer
x = int(round(x * self.um_to_unit))
y = int(round(y * self.um_to_unit))
# Send a command for each axis
for axis, pos in enumerate([x,y]):
self.writeline("/" + str(self.stage_id)+ " " + str(axis+1) + " move abs " + str(pos))
def goRelative(self, x, y):
# Convert um units to the stage step units and round to an integer
x = int(round(x * self.um_to_unit))
y = int(round(y * self.um_to_unit))
# Send a command for each axis
for axis, pos in enumerate([x,y]):
self.writeline("/" + str(self.stage_id)+ " " + str(axis+1) + " move rel " + str(pos))
def jog(self, x_speed, y_speed):
# Convert um units to the stage step units and round to an integer
vx = int(round(x_speed * self.um_to_unit * 1.6384))
vy = int(round(y_speed * self.um_to_unit * 1.6384))
# Send a command for each axis
for axis, vel in enumerate([vx,vy]):
self.writeline("/" + str(self.stage_id)+ " " + str(axis+1) + " move vel " + str(vel))
def METHOD_NAME(self, on):
print("Joystick cannot be inactivated")
#if on:
# self.writeline("!joy 2")
#else:
# self.writeline("!joy 0")
def position(self):
### UNUSED?!?!
pass
#self.writeline("?pos")
def getPosition(self):
response = self.commWithResp("/" + str(self.stage_id) + " get pos")
#print("Position response: " + response)
response = response.strip()
response_parts = response.split(" ")
try:
[sx, sy] = map(float, response_parts[5:])
except ValueError:
return [None, None]
return [sx*self.unit_to_um,sy*self.unit_to_um]
def isStageMoving(self):
response = self.commWithResp("/" + str(self.stage_id))
#print("isMoving response: " + response)
# Parse the response
response_parts = response.split(" ")
# Handle an error response, or an empty response
if not (response_parts[2] == "OK") or len(response_parts) < 2:
print("STAGE ERROR: " + response)
return "ERROR"
# Parse IDLE/BUSY
if response_parts[3] == "IDLE":
return "IDLE"
else: # BUSY Case
return "MOVING"
def serialNumber(self):
"""
Return the stages serial number.
"""
#return self.writeline("?readsn")
pass
def setVelocity(self, x_vel, y_vel):
## NOTE THAT THERE IS ONLY ONE MAXIMUM VELOCITY (x_vel)
# Convert um units to the stage step units and round to an integer
vx = int(round(x_vel * self.um_to_unit * 1.6384))
# Write the command
self.writeline("/" + str(self.stage_id)+ " " + "set maxspeed " + str(vx))
def setAcceleration(self, x_accel, y_accel):
## NOTE THAT THERE IS ONLY ONE MAXIMUM VELOCITY (x_vel)
# Convert to stage units
ax = int(round(x_accel * self.um_to_unit * 1.6384 / 10000))
if ax > 2147483647:
print("ERROR: Invalid acceleration requested: " + str(ax))
return
# Write the command
self.writeline("/" + str(self.stage_id)+ " " + "set accel " + str(ax))
def zero(self):
print("The Zaber stage cannot be zeroed!")
#self.writeline("!pos 0 0")
#
# Testing
#
if (__name__ == "__main__"):
import time
stage = ZaberXYRS232(port = "COM1", baudrate = 1156200)
def comm(cmd, timeout):
cmd()
time.sleep(timeout)
return stage.readline()
if stage.getStatus():
# Test communication.
if False:
print("SN:", comm(stage.serialNumber, 0.1))
print("zero:", comm(stage.zero, 0.1))
print("position:", comm(stage.position, 0.1))
print("goAbsolute:", comm(lambda: stage.goAbsolute(100,100), 0.5))
print("position:", comm(stage.position, 0.1))
print("goRelative:", len(comm(lambda: stage.goRelative(100,100), 0.5)))
print("position:", comm(stage.position, 0.1))
# Test whether we can jam up stage communication.
if True:
reps = 20
for i in range(reps):
print(i)
stage.position()
stage.goAbsolute(i*10,0)
stage.position()
time.sleep(0.1)
for i in range(3*reps + 4):
responses = stage.readline()
for resp in responses.split("\r"):
print(i, resp, len(resp))
stage.shutDown()
#
# The MIT License
#
# Copyright (c) 2021 Moffitt Lab, Boston Children's Hospital
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# |
6,427 | output at dungeon tiles | """NOTE: THIS IS CURRENTLY OUTDATAED FROM EARLY EXPERIMENTATION!"""
# Copyright 2020-2023 Capypara and the SkyTemple Contributors
#
# This file is part of SkyTemple.
#
# SkyTemple is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SkyTemple is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SkyTemple. If not, see <https://www.gnu.org/licenses/>.
# mypy: ignore-errors
from __future__ import annotations
import os
from itertools import islice
from ndspy.rom import NintendoDSRom
from PIL import Image
from skytemple_files.common.tiled_image import TilemapEntry, to_pil
from skytemple_files.common.util import get_ppmdu_config_for_rom, iter_bytes
from skytemple_files.compression_container.common_at.model import CommonAt
from skytemple_files.container.dungeon_bin.handler import DungeonBinHandler
from skytemple_files.container.sir0.handler import Sir0Handler
from skytemple_files.graphics.dpl._model import Dpl
from skytemple_files.graphics.dpla._model import Dpla
output_dir = os.path.join(os.path.dirname(__file__), "dbg_output")
base_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", "..")
os.makedirs(os.path.join(output_dir, "raw"), exist_ok=True)
os.makedirs(os.path.join(output_dir, "test"), exist_ok=True)
rom = NintendoDSRom.fromFile(os.path.join(base_dir, "skyworkcopy_us.nds"))
dungeon_bin_bin = rom.getFileByName("DUNGEON/dungeon.bin")
static_data = get_ppmdu_config_for_rom(rom)
dungeon_bin = DungeonBinHandler.deserialize(dungeon_bin_bin, static_data)
def chunk(it, size):
it = iter(it)
return iter(lambda: tuple(islice(it, size)), ())
def output_dpla(fn: str, file: Dpla):
print("Outputting weird palette as image.")
max_pal_len = max(max(int(len(p) / 3) for p in file.colors), 1)
# each entry on the y access is a color, x axis shows animation
out_img = Image.new("RGBA", (max_pal_len, max(len(file.colors), 1)), (0, 0, 0, 0))
pix = out_img.load()
for palidx, pal in enumerate(file.colors):
for i, (r, g, b) in enumerate(chunk(pal, 3)):
pix[(i, palidx)] = (r, g, b, 255)
out_img.save(os.path.join(output_dir, fn + ".png"))
def output_raw_palette(fn: str, file: Dpl):
print("Outputting raw palette as image.")
max_pal_len = max(max(int(len(p) / 3) for p in file.palettes), 1)
# each entry on the y access is a palette, x axis shows palette colors
out_img = Image.new("RGBA", (max_pal_len, max(len(file.palettes), 1)), (0, 0, 0, 0))
pix = out_img.load()
for palidx, pal in enumerate(file.palettes):
for i, (r, g, b) in enumerate(chunk(pal, 3)):
pix[(i, palidx)] = (r, g, b, 255)
out_img.save(os.path.join(output_dir, fn + ".png"))
def output_at_water_tiles(fn: str, common_at: CommonAt, pal: Dpla):
print("Outputting water AT as image.")
img_bin = common_at.decompress()
tiles = list(iter_bytes(img_bin, int(8 * 8 / 2)))
# create a dummy tile map containing all the tiles
tilemap = []
for i in range(0, len(tiles)):
tilemap.append(TilemapEntry(i, False, False, 0, True))
out_img = to_pil(
tilemap,
tiles,
[pal.get_palette_for_frame(0, 0)],
8,
int(len(tiles) * 8 / 3),
4 * 8,
tiling_width=1,
tiling_height=1,
)
os.makedirs(os.path.join(output_dir, "raw_img"), exist_ok=True)
with open(os.path.join(output_dir, "raw_img", fn), "wb") as f:
f.write(img_bin)
out_img.save(os.path.join(output_dir, fn + ".png"))
def METHOD_NAME(fn: str, common_at: CommonAt, pal: Dpla):
print("Outputting dungeon AT as image.")
img_bin = common_at.decompress()
tiles = list(iter_bytes(img_bin, int(8 * 8)))
# create a dummy tile map containing all the tiles
tilemap = []
for i in range(0, len(tiles)):
tilemap.append(TilemapEntry(i, False, False, 0, True))
out_img = to_pil(
tilemap,
tiles,
[pal.get_palette_for_frame(0, 0)],
8,
int(len(tiles) * 8 / 3),
4 * 8,
tiling_width=1,
tiling_height=1,
bpp=8,
)
# Alternate stategy:
img_8bpp = img_bin # bytes(x for x in iter_bytes_4bit_le(img_bin))
mod = 16 * 4
channels = 1
mode = "RGB" if channels == 3 else "P"
out_img = Image.frombuffer(
mode,
(int(len(img_8bpp) / mod / channels), mod),
bytes(img_8bpp),
"raw",
mode,
0,
1,
)
if mode == "P":
out_img.putpalette(pal.get_palette_for_frame(0, 0))
os.makedirs(os.path.join(output_dir, "raw_img"), exist_ok=True)
with open(os.path.join(output_dir, "raw_img", fn), "wb") as f:
f.write(img_bin)
out_img.save(os.path.join(output_dir, fn + ".png"))
# Output high level representations for all models, if possible.
for i, file in enumerate(dungeon_bin):
fn = dungeon_bin.get_filename(i)
fdef = static_data.dungeon_data.dungeon_bin_files.get(i)
print(i, type(file), fn)
if isinstance(file, Dpla):
output_dpla(fn, file)
elif isinstance(file, CommonAt):
# As the palette, we use one of the first 170 files, matching the modulo index.
# TODO: This is currently using the animated palette actually...
pal = dungeon_bin[i % 170]
assert isinstance(pal, Dpla)
if fdef.name == "dungeon%i.bpci":
output_at_water_tiles(fn, file, pal)
else:
METHOD_NAME(fn, file, pal)
elif isinstance(file, Dpl):
output_raw_palette(fn, file)
elif isinstance(file, bytes):
print("No model, skipped.")
else:
print("Unknown type, skipped.")
# Also output the raw files
for i, file in enumerate(dungeon_bin.get_files_bytes()):
fn = dungeon_bin.get_filename(i)
if i == 1028:
sir0 = Sir0Handler.deserialize(file)
file = sir0.content
with open(os.path.join(output_dir, "raw", fn), "wb") as f:
f.write(file)
for i, file in enumerate(dungeon_bin):
fn = dungeon_bin.get_filename(i)
if fn.endswith(".at.sir0"):
with open(os.path.join(output_dir, "test", fn), "wb") as f:
f.write(file.decompress()) |
6,428 | stan variable | """Container for the results of running autodiff variational inference"""
from collections import OrderedDict
from typing import Dict, Optional, Tuple, Union
import numpy as np
import pandas as pd
from cmdstanpy.cmdstan_args import Method
from cmdstanpy.utils import scan_variational_csv
from .metadata import InferenceMetadata
from .runset import RunSet
class CmdStanVB:
"""
Container for outputs from CmdStan variational run.
Created by :meth:`CmdStanModel.variational`.
"""
def __init__(self, runset: RunSet) -> None:
"""Initialize object."""
if not runset.method == Method.VARIATIONAL:
raise ValueError(
'Wrong runset method, expecting variational inference, '
'found method {}'.format(runset.method)
)
self.runset = runset
self._set_variational_attrs(runset.csv_files[0])
def __repr__(self) -> str:
repr = 'CmdStanVB: model={}{}'.format(
self.runset.model, self.runset._args.method_args.compose(0, cmd=[])
)
repr = '{}\n csv_file:\n\t{}\n output_file:\n\t{}'.format(
repr,
'\n\t'.join(self.runset.csv_files),
'\n\t'.join(self.runset.stdout_files),
)
# TODO - diagnostic, profiling files
return repr
def __getattr__(self, attr: str) -> Union[np.ndarray, float]:
"""Synonymous with ``fit.stan_variable(attr)"""
if attr.startswith("_"):
raise AttributeError(f"Unknown variable name {attr}")
try:
return self.METHOD_NAME(attr)
except ValueError as e:
# pylint: disable=raise-missing-from
raise AttributeError(*e.args)
def _set_variational_attrs(self, sample_csv_0: str) -> None:
meta = scan_variational_csv(sample_csv_0)
self._metadata = InferenceMetadata(meta)
# these three assignments don't grant type information
self._column_names: Tuple[str, ...] = meta['column_names']
self._eta: float = meta['eta']
self._variational_mean: np.ndarray = meta['variational_mean']
self._variational_sample: np.ndarray = meta['variational_sample']
@property
def columns(self) -> int:
"""
Total number of information items returned by sampler.
Includes approximation information and names of model parameters
and computed quantities.
"""
return len(self._column_names)
@property
def column_names(self) -> Tuple[str, ...]:
"""
Names of information items returned by sampler for each draw.
Includes approximation information and names of model parameters
and computed quantities.
"""
return self._column_names
@property
def eta(self) -> float:
"""
Step size scaling parameter 'eta'
"""
return self._eta
@property
def variational_params_np(self) -> np.ndarray:
"""
Returns inferred parameter means as numpy array.
"""
return self._variational_mean
@property
def variational_params_pd(self) -> pd.DataFrame:
"""
Returns inferred parameter means as pandas DataFrame.
"""
return pd.DataFrame([self._variational_mean], columns=self.column_names)
@property
def variational_params_dict(self) -> Dict[str, np.ndarray]:
"""Returns inferred parameter means as Dict."""
return OrderedDict(zip(self.column_names, self._variational_mean))
@property
def metadata(self) -> InferenceMetadata:
"""
Returns object which contains CmdStan configuration as well as
information about the names and structure of the inference method
and model output variables.
"""
return self._metadata
def METHOD_NAME(self, var: str) -> np.ndarray:
"""
Return a numpy.ndarray which contains the estimates for the
for the named Stan program variable where the dimensions of the
numpy.ndarray match the shape of the Stan program variable, with
a leading axis added for the number of draws from the variational
approximation.
* If the variable is a scalar variable, the return array has shape
( draws, ).
* If the variable is a vector, the return array has shape
( draws, len(vector))
* If the variable is a matrix, the return array has shape
( draws, size(dim 1), size(dim 2) )
* If the variable is an array with N dimensions, the return array
has shape ( draws, size(dim 1), ..., size(dim N))
This functionaltiy is also available via a shortcut using ``.`` -
writing ``fit.a`` is a synonym for ``fit.stan_variable("a")``
:param var: variable name
See Also
--------
CmdStanVB.stan_variables
CmdStanMCMC.stan_variable
CmdStanMLE.stan_variable
CmdStanGQ.stan_variable
"""
try:
out: np.ndarray = self._metadata.stan_vars[var].extract_reshape(
self._variational_sample
)
return out
except KeyError:
# pylint: disable=raise-missing-from
raise ValueError(
f'Unknown variable name: {var}\n'
'Available variables are '
+ ", ".join(self._metadata.stan_vars.keys())
)
def stan_variables(self) -> Dict[str, np.ndarray]:
"""
Return a dictionary mapping Stan program variables names
to the corresponding numpy.ndarray containing the inferred values.
See Also
--------
CmdStanVB.stan_variable
CmdStanMCMC.stan_variables
CmdStanMLE.stan_variables
CmdStanGQ.stan_variables
"""
result = {}
for name in self._metadata.stan_vars:
result[name] = self.METHOD_NAME(name)
return result
@property
def variational_sample(self) -> np.ndarray:
"""Returns the set of approximate posterior output draws."""
return self._variational_sample
@property
def variational_sample_pd(self) -> pd.DataFrame:
"""
Returns the set of approximate posterior output draws as
a pandas DataFrame.
"""
return pd.DataFrame(self._variational_sample, columns=self.column_names)
def save_csvfiles(self, dir: Optional[str] = None) -> None:
"""
Move output CSV files to specified directory. If files were
written to the temporary session directory, clean filename.
E.g., save 'bernoulli-201912081451-1-5nm6as7u.csv' as
'bernoulli-201912081451-1.csv'.
:param dir: directory path
See Also
--------
stanfit.RunSet.save_csvfiles
cmdstanpy.from_csv
"""
self.runset.save_csvfiles(dir) |
6,429 | update | #--------------------------------------------------------------------------
# Software: InVesalius - Software de Reconstrucao 3D de Imagens Medicas
# Copyright: (C) 2001 Centro de Pesquisas Renato Archer
# Homepage: http://www.softwarepublico.gov.br
# Contact: invesalius@cti.gov.br
# License: GNU - GPL 2 (LICENSE.txt/LICENCA.txt)
#--------------------------------------------------------------------------
# Este programa e software livre; voce pode redistribui-lo e/ou
# modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
# publicada pela Free Software Foundation; de acordo com a versao 2
# da Licenca.
#
# Este programa eh distribuido na expectativa de ser util, mas SEM
# QUALQUER GARANTIA; sem mesmo a garantia implicita de
# COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
# PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
# detalhes.
#--------------------------------------------------------------------------
from vtkmodules.vtkFiltersSources import vtkPlaneSource
from vtkmodules.vtkInteractionWidgets import vtkImagePlaneWidget
from vtkmodules.vtkRenderingCore import vtkActor, vtkCellPicker, vtkPolyDataMapper
AXIAL, SAGITAL, CORONAL = 0, 1, 2
PLANE_DATA = {AXIAL: ["z",(0,0,1)],
SAGITAL: ["x", (1,0,0)],
CORONAL: ["y", (0,1,0)]}
class Plane():
"""
How to use:
import ivVolumeWidgets as vw
imagedata = v16.GetOutput()
axial_plane = vw.Plane()
axial_plane.SetRender(ren)
axial_plane.SetInteractor(pane)
axial_plane.SetOrientation(vw.CORONAL)
axial_plane.SetInput(imagedata)
axial_plane.Show()
axial_plane.Update()
"""
def __init__(self):
self.orientation = AXIAL
self.render = None
self.iren = None
self.index = 0
self.source = None
self.widget = None
self.actor = None
def SetOrientation(self, orientation=AXIAL):
self.orientation = orientation
def SetRender(self, render=None):
self.render = render
def SetInteractor(self, iren=None):
self.iren = iren
def SetSliceIndex(self, index):
self.index = 0
try:
self.widget.SetSliceIndex(int(index))
except AttributeError:
pass
else:
self.METHOD_NAME()
if self.widget.GetEnabled():
print("send signal - update slice info in panel and in 2d")
def SetInput(self, imagedata):
axes = PLANE_DATA[self.orientation][0] # "x", "y" or "z"
colour = PLANE_DATA[self.orientation][1]
#if self.orientation == SAGITAL:
# spacing = min(imagedata.GetSpacing())
# permute = vtk.vtkImagePermute()
# permute.SetInput(imagedata)
# permute.GetOutput().ReleaseDataFlagOn()
# permute.SetOutputSpacing(spacing, spacing, spacing)
# imagedata = permute.GetOutput()
# Picker for enabling plane motion.
# Allows selection of a cell by shooting a ray into graphics window
picker = vtkCellPicker()
picker.SetTolerance(0.005)
picker.PickFromListOn()
# 3D widget for reslicing image data.
# This 3D widget defines a plane that can be interactively placed in an image volume.
widget = vtkImagePlaneWidget()
widget.SetInput(imagedata)
widget.SetSliceIndex(self.index)
widget.SetPicker(picker)
widget.SetKeyPressActivationValue(axes)
widget.SetInteractor(self.iren)
widget.TextureVisibilityOff()
widget.DisplayTextOff()
widget.RestrictPlaneToVolumeOff()
exec("widget.SetPlaneOrientationTo"+axes.upper()+"Axes()")
widget.AddObserver("InteractionEvent",self.METHOD_NAME)
self.widget = widget
prop = widget.GetPlaneProperty()
prop.SetColor(colour)
# Syncronize coloured outline with texture appropriately
source = vtkPlaneSource()
source.SetOrigin(widget.GetOrigin())
source.SetPoint1(widget.GetPoint1())
source.SetPoint2(widget.GetPoint2())
source.SetNormal(widget.GetNormal())
self.source = source
mapper = vtkPolyDataMapper()
mapper.SetInput(source.GetOutput())
actor = vtkActor()
actor.SetMapper(mapper)
actor.SetTexture(widget.GetTexture())
actor.VisibilityOff()
self.actor = actor
self.render.AddActor(actor)
def METHOD_NAME(self, x=None, y=None):
source = self.source
widget = self.widget
source.SetOrigin(widget.GetOrigin())
source.SetPoint1(widget.GetPoint1())
source.SetPoint2(widget.GetPoint2())
source.SetNormal(widget.GetNormal())
def Show(self, show=1):
actor = self.actor
widget = self.widget
if show:
actor.VisibilityOn()
widget.On()
else:
actor.VisibilityOff()
widget.Off() |
6,430 | run | # -*- coding: utf-8 -*-
from urllib.parse import urlencode, urlparse
from kubernetes import client
from kubernetes.client import api_client
from kubernetes.client.api import core_v1_api
from kubernetes.client.exceptions import ApiException
from sshtunnel import SSHTunnelForwarder, BaseSSHTunnelForwarderError
from common.utils import get_logger
from ..const import CloudTypes, Category
logger = get_logger(__file__)
class KubernetesClient:
def __init__(self, asset, token):
self.url = asset.address
self.token = token or ''
self.server = self.get_gateway_server(asset)
@property
def api(self):
configuration = client.Configuration()
scheme = urlparse(self.url).scheme
if not self.server:
host = self.url
else:
host = f'{scheme}://127.0.0.1:{self.server.local_bind_port}'
configuration.host = host
configuration.verify_ssl = False
configuration.api_key = {"authorization": "Bearer " + self.token}
c = api_client.ApiClient(configuration=configuration)
api = core_v1_api.CoreV1Api(c)
return api
def get_namespaces(self):
namespaces = []
resp = self.api.list_namespace()
for ns in resp.items:
namespaces.append(ns.metadata.name)
return namespaces
def get_pods(self, namespace):
pods = []
resp = self.api.list_namespaced_pod(namespace)
for pd in resp.items:
pods.append(pd.metadata.name)
return pods
def get_containers(self, namespace, pod_name):
containers = []
resp = self.api.read_namespaced_pod(pod_name, namespace)
for container in resp.spec.containers:
containers.append(container.name)
return containers
@staticmethod
def get_gateway_server(asset):
gateway = None
if not asset.is_gateway and asset.domain:
gateway = asset.domain.select_gateway()
if not gateway:
return
remote_bind_address = (
urlparse(asset.address).hostname,
urlparse(asset.address).port or 443
)
server = SSHTunnelForwarder(
(gateway.address, gateway.port),
ssh_username=gateway.username,
ssh_password=gateway.password,
ssh_pkey=gateway.private_key_path,
remote_bind_address=remote_bind_address
)
try:
server.start()
except BaseSSHTunnelForwarderError:
err_msg = 'Gateway is not active: %s' % asset.get('name', '')
print('\033[31m %s \033[0m\n' % err_msg)
return server
def METHOD_NAME(self, tp, *args):
func_name = f'get_{tp}s'
data = []
if hasattr(self, func_name):
try:
data = getattr(self, func_name)(*args)
except ApiException as e:
logger.error(e.reason)
if self.server:
self.server.stop()
return data
class KubernetesTree:
def __init__(self, asset, secret):
self.asset = asset
self.secret = secret
def as_asset_tree_node(self):
i = str(self.asset.id)
name = str(self.asset)
node = self.create_tree_node(
i, i, name, 'asset', icon='k8s', is_open=True,
)
return node
def as_namespace_node(self, name, tp):
i = urlencode({'namespace': name})
pid = str(self.asset.id)
node = self.create_tree_node(i, pid, name, tp, icon='cloud')
return node
def as_pod_tree_node(self, namespace, name, tp):
pid = urlencode({'namespace': namespace})
i = urlencode({'namespace': namespace, 'pod': name})
node = self.create_tree_node(i, pid, name, tp, icon='cloud')
return node
def as_container_tree_node(self, namespace, pod, name, tp):
pid = urlencode({'namespace': namespace, 'pod': pod})
i = urlencode({'namespace': namespace, 'pod': pod, 'container': name})
node = self.create_tree_node(
i, pid, name, tp, icon='cloud', is_container=True
)
return node
@staticmethod
def create_tree_node(id_, pid, name, identity, icon='', is_container=False, is_open=False):
node = {
'id': id_,
'name': name,
'title': name,
'pId': pid,
'isParent': not is_container,
'open': is_open,
'iconSkin': icon,
'meta': {
'type': 'k8s',
'data': {
'category': Category.CLOUD,
'type': CloudTypes.K8S,
'identity': identity
}
}
}
return node
def async_tree_node(self, namespace, pod):
tree = []
k8s_client = KubernetesClient(self.asset, self.secret)
if pod:
tp = 'container'
containers = k8s_client.METHOD_NAME(
tp, namespace, pod
)
for container in containers:
container_node = self.as_container_tree_node(
namespace, pod, container, tp
)
tree.append(container_node)
elif namespace:
tp = 'pod'
pods = k8s_client.METHOD_NAME(tp, namespace)
for pod in pods:
pod_node = self.as_pod_tree_node(namespace, pod, tp)
tree.append(pod_node)
else:
tp = 'namespace'
namespaces = k8s_client.METHOD_NAME(tp)
for namespace in namespaces:
namespace_node = self.as_namespace_node(namespace, tp)
tree.append(namespace_node)
return tree |
6,431 | test rand gamma forward | # Copyright 2017,2018,2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import nnabla as nn
import nnabla.functions as F
from nbla_test_utils import list_context, recomputation_test
import platform
ctxs_rand = list_context('Rand')
ctxs_randint = list_context('Randint')
ctxs_randn = list_context('Randn')
ctxs_rand_binomial = list_context('RandBinomial')
ctxs_rand_beta = list_context('RandBeta')
ctxs_rand_gamma = list_context('RandGamma')
@pytest.mark.parametrize("ctx, func_name", ctxs_rand)
@pytest.mark.parametrize("low, high", [(0, 1), (-2.5, 100), (0.1, 0.11)])
@pytest.mark.parametrize("shape", [[], [5], [100, 100]])
@pytest.mark.parametrize("seed", [-1, 313])
def test_rand_forward(seed, ctx, func_name, low, high, shape):
with nn.context_scope(ctx):
o = F.rand(low, high, shape, seed=seed)
assert o.shape == tuple(shape)
assert o.parent.name == func_name
o.forward()
# NOTE: The following should be < high,
# but use <= high because std::uniform_random contains a bug.
assert np.all(o.d <= high)
assert np.all(o.d >= low)
# Checking recomputation
func_args = [low, high, shape, seed]
recomputation_test(rng=None, func=F.rand, vinputs=[],
func_args=func_args, func_kwargs={}, ctx=ctx)
@pytest.mark.parametrize("ctx, func_name", ctxs_randint)
@pytest.mark.parametrize("low, high", [(100, 50000), (-5, 100), (101, 102)])
@pytest.mark.parametrize("shape", [[], [5], [100, 100]])
@pytest.mark.parametrize("seed", [-1, 313])
def test_randint_forward(seed, ctx, func_name, low, high, shape):
with nn.context_scope(ctx):
o = F.randint(low, high, shape, seed=seed)
assert o.shape == tuple(shape)
assert o.parent.name == func_name
o.forward()
# NOTE: The following should be < high,
# but use <= high because std::uniform_random contains a bug.
assert np.all(o.d <= high)
assert np.all(o.d >= low)
# Checking recomputation
func_args = [low, high, shape, seed]
recomputation_test(rng=None, func=F.randint, vinputs=[],
func_args=func_args, func_kwargs={}, ctx=ctx)
@pytest.mark.parametrize("ctx, func_name", ctxs_randn)
@pytest.mark.parametrize("mu, sigma", [(0, 1), (-10, 10), (10000.5, 0.5)])
@pytest.mark.parametrize("shape", [[], [5], [100, 100]])
@pytest.mark.parametrize("seed", [-1, 313])
def test_randn_forward_backward(seed, ctx, func_name, mu, sigma, shape):
with nn.context_scope(ctx):
o = F.randn(mu, sigma, shape, seed=seed)
assert o.shape == tuple(shape)
assert o.parent.name == func_name
o.forward()
if o.size >= 10000:
est_mu = o.d.mean()
est_sigma = o.d.std()
np.isclose(est_mu, mu, atol=sigma)
np.isclose(est_sigma, sigma, atol=sigma)
else:
data = []
for i in range(10000):
o.forward()
data += [o.d.copy()]
est_mu = np.mean(np.array(data))
est_sigma = np.std(np.array(data))
np.isclose(est_mu, mu, atol=sigma)
np.isclose(est_sigma, sigma, atol=sigma)
# Checking recomputation
func_args = [mu, sigma, shape, seed]
recomputation_test(rng=None, func=F.randn, vinputs=[],
func_args=func_args, func_kwargs={}, ctx=ctx)
@pytest.mark.parametrize("ctx, func_name", ctxs_rand_beta)
@pytest.mark.parametrize("alpha, beta", [(0.5, 0.5), (5, 1), (1, 3), (2, 5), (2, 2)])
@pytest.mark.parametrize("shape", [[50], [100, 100], [32, 4, 16, 16]])
@pytest.mark.parametrize("seed", [-1, 313])
def test_rand_beta_forward(seed, ctx, func_name, alpha, beta, shape):
with nn.context_scope(ctx):
o = F.rand_beta(alpha, beta, shape, seed=seed)
assert o.shape == tuple(shape)
assert o.parent.name == func_name
o.forward()
if o.size >= 10000:
est_mu = o.d.mean()
est_sigma = o.d.std()
else:
data = []
for i in range(10000):
o.forward()
data += [o.d.copy()]
est_mu = np.mean(np.array(data))
est_sigma = np.std(np.array(data))
mu = alpha / (alpha + beta) # theoretical mean
var = alpha*beta / ((alpha + beta)*(alpha + beta)*(alpha + beta + 1))
sigma = np.sqrt(var) # theoretical std
assert np.isclose(est_mu, mu, atol=5e-2)
assert np.isclose(est_sigma, sigma, atol=5e-2)
# Checking recomputation
func_args = [alpha, beta, shape, seed]
recomputation_test(rng=None, func=F.rand_beta, vinputs=[],
func_args=func_args, func_kwargs={}, ctx=ctx)
@pytest.mark.parametrize("ctx, func_name", ctxs_rand_binomial)
@pytest.mark.parametrize("n, p", [(1, 0.5), (1, 0.9), (5, 0.5), (5, 0.15), (10, 0.45)])
@pytest.mark.parametrize("shape", [[50], [100, 100], [32, 4, 16, 16]])
@pytest.mark.parametrize("seed", [-1, 313])
def test_rand_binomial_forward(seed, ctx, func_name, n, p, shape):
with nn.context_scope(ctx):
o = F.rand_binomial(n, p, shape, seed=seed)
assert o.shape == tuple(shape)
assert o.parent.name == func_name
o.forward()
if o.size >= 10000:
est_mu = o.d.mean()
est_sigma = o.d.std()
else:
data = []
for i in range(10000):
o.forward()
data += [o.d.copy()]
est_mu = np.mean(np.array(data))
est_sigma = np.std(np.array(data))
mu = n * p # theoretical mean
sigma = np.sqrt(n * p * (1 - p)) # theoretical std
assert np.isclose(est_mu, mu, atol=5e-2)
assert np.isclose(est_sigma, sigma, atol=5e-2)
# Checking recomputation
func_args = [n, p, shape, seed]
recomputation_test(rng=None, func=F.rand_binomial, vinputs=[],
func_args=func_args, func_kwargs={}, ctx=ctx)
@pytest.mark.parametrize("ctx, func_name", ctxs_rand_gamma)
@pytest.mark.parametrize("k, theta", [(1, 2), (9, 0.5), (3, 2), (7.5, 1), (0.5, 1)])
@pytest.mark.parametrize("shape", [[50], [100, 100], [1000, 1000]])
@pytest.mark.parametrize("seed", [-1, 313])
@pytest.mark.skipif(platform.system() == "Darwin", reason='skipped on mac')
@pytest.mark.skipif(platform.system() == "Windows", reason='skipped on win')
def METHOD_NAME(seed, ctx, func_name, k, theta, shape):
with nn.context_scope(ctx):
o = F.rand_gamma(k, theta, shape, seed=seed)
assert o.shape == tuple(shape)
assert o.parent.name == func_name
o.forward()
if o.size > 10000:
est_mu = o.d.mean()
est_sigma = o.d.std()
else:
data = []
for i in range(1000000//o.size):
o.forward()
data += [o.d.copy()]
est_mu = np.mean(np.array(data))
est_sigma = np.std(np.array(data))
mu = k * theta # theoretical mean
var = k * theta * theta
sigma = np.sqrt(var) # theoretical std
assert np.isclose(est_mu, mu, atol=5e-2)
assert np.isclose(est_sigma, sigma, atol=5e-2)
# Checking recomputation
func_args = [k, theta, shape, seed]
recomputation_test(rng=None, func=F.rand_gamma, vinputs=[],
func_args=func_args, func_kwargs={}, ctx=ctx) |
6,432 | create release metrics | #SPDX-License-Identifier: MIT
"""
Metrics that provide data about releases
"""
import datetime
import sqlalchemy as s
import pandas as pd
from augur.api.util import register_metric
from ..server import engine
@register_metric()
def releases(repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None):
""" Returns a timeseris of new releases created
:param repo_group_id: The repository's repo_group_id
:param repo_id: The repository's repo_id, defaults to None
:param period: To set the periodicity to 'day', 'week', 'month' or 'year', defaults to 'day'
:param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00'
:param end_date: Specifies the end date, defaults to datetime.now()
:return: DataFrame of new releases/period
"""
if not begin_date:
begin_date = '1970-1-1'
if not end_date:
end_date = datetime.datetime.now().strftime('%Y-%m-%d')
if not repo_id:
releases_SQL = s.sql.text("""
SELECT
repo.repo_name,
releases.release_id,
releases.release_name,
releases.release_description,
releases.release_author,
releases.release_created_at,
releases.release_published_at,
releases.release_updated_at,
releases.release_is_draft,
releases.release_is_prerelease,
releases.release_tag_name,
releases.release_url,
COUNT(releases)
FROM
releases LEFT JOIN repo ON releases.repo_id = repo.repo_id
WHERE releases.tag_only = False
AND repo.repo_id IN (SELECT repo_id FROM repo WHERE repo_group_id=:repo_group_id )
GROUP BY repo.repo_id, releases.release_id
ORDER BY releases.release_published_at DESC
""")
results = pd.read_sql(releases_SQL, engine,
params={'period': period, 'repo_group_id': repo_group_id,
'begin_date': begin_date, 'end_date': end_date })
return results
else:
releases_SQL = s.sql.text("""
SELECT
repo.repo_name,
releases.release_id,
releases.release_name,
releases.release_description,
releases.release_author,
releases.release_created_at,
releases.release_published_at,
releases.release_updated_at,
releases.release_is_draft,
releases.release_is_prerelease,
releases.release_tag_name,
releases.release_url,
COUNT(releases)
FROM
releases LEFT JOIN repo ON releases.repo_id = repo.repo_id
WHERE releases.tag_only = False
AND repo.repo_id = :repo_id
GROUP BY repo.repo_id, releases.release_id
ORDER BY releases.release_published_at DESC
""")
results = pd.read_sql(releases_SQL, engine,
params={'period': period, 'repo_id': repo_id,
'begin_date': begin_date, 'end_date': end_date})
return results
@register_metric()
def tag_only_releases(repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None):
""" Returns a timeseris of new tags that are considered releases
without an official release being published
:param repo_group_id: The repository's repo_group_id
:param repo_id: The repository's repo_id, defaults to None
:param period: To set the periodicity to 'day', 'week', 'month' or 'year', defaults to 'day'
:param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00'
:param end_date: Specifies the end date, defaults to datetime.now()
:return: DataFrame of new releases/period
"""
if not begin_date:
begin_date = '1970-1-1'
if not end_date:
end_date = datetime.datetime.now().strftime('%Y-%m-%d')
if not repo_id:
releases_SQL = s.sql.text("""
SELECT
res.repo_name,
res.release_id,
res.release_name,
res.release_author,
res.release_created_at,
res.release_tag_name,
COUNT(res)
FROM (
SELECT
releases.*
repo.repo_name
FROM
releases LEFT JOIN repo ON releases.repo_id = repo.repo_id
WHERE
repo.repo_id in (SELECT repo_id FROM repo WHERE repo_group_id=:repo_group_id )
AND releases.tag_only = True
) as res
GROUP BY releases.repo_id, releases.release_id
ORDER BY releases.release_published_at DESC
""")
results = pd.read_sql(releases_SQL, engine,
params={'period': period, 'repo_group_id': repo_group_id,
'begin_date': begin_date, 'end_date': end_date })
return results
else:
releases_SQL = s.sql.text("""
SELECT
repo.repo_name,
releases.release_id,
releases.release_name,
releases.release_author,
releases.release_created_at,
releases.release_tag_name,
COUNT(releases)
FROM
releases LEFT JOIN repo ON releases.repo_id = repo.repo_id
WHERE releases.tag_only = True
GROUP BY repo.repo_id, releases.release_id
ORDER BY releases.release_published_at DESC
""")
results = pd.read_sql(releases_SQL, engine,
params={'period': period, 'repo_id': repo_id,
'begin_date': begin_date, 'end_date': end_date})
return results
def METHOD_NAME(metrics):
add_metrics(metrics, __name__) |
6,433 | battle 0 | from module.logger import logger
from module.map.map_base import CampaignMap
from module.map.map_grids import RoadGrids, SelectedGrids
from .campaign_14_base import CampaignBase
from .campaign_14_base import Config as ConfigBase
MAP = CampaignMap('14-4')
MAP.shape = 'K9'
MAP.camera_data = ['D2', 'D5', 'D7', 'H2', 'H5', 'H7']
MAP.camera_data_spawn_point = ['H2']
MAP.map_covered = ['A4']
MAP.map_data = """
ME -- ++ ++ -- ME ME ME ++ ++ ++
-- ME ME ME ME ME ME -- SP SP --
MB -- __ -- -- -- -- -- ME -- --
MB ME -- Me Me -- Me ++ ++ -- ME
MM -- Me ME -- Me -- MA ++ -- ME
++ ME ME -- ++ -- ME -- ME -- --
++ -- ME Me Me ME -- -- -- -- ++
-- -- -- -- -- __ -- ME ME -- ME
-- -- ++ MB MB ++ ++ MM ME ME ME
"""
MAP.weight_data = """
40 40 40 40 50 50 50 50 50 50 50
40 40 40 40 50 50 50 50 50 50 50
40 40 40 40 50 50 50 50 50 50 50
40 40 40 40 50 50 50 50 50 50 50
40 40 40 40 50 50 50 50 50 50 50
40 40 40 40 50 50 50 50 50 50 50
40 40 40 40 50 50 50 50 50 50 50
50 10 50 50 50 50 50 40 40 50 50
50 50 50 50 50 50 50 50 50 40 50
"""
MAP.spawn_data = [
{'battle': 0, 'enemy': 3},
{'battle': 1, 'enemy': 3},
{'battle': 2, 'enemy': 2},
{'battle': 3, 'enemy': 2},
{'battle': 4, 'enemy': 1},
{'battle': 5, 'enemy': 1},
{'battle': 6},
{'battle': 7, 'boss': 1},
]
MAP.spawn_data_loop = [
{'battle': 0, 'enemy': 2},
{'battle': 1, 'enemy': 3},
{'battle': 2, 'enemy': 2},
{'battle': 3, 'enemy': 2},
{'battle': 4, 'enemy': 1},
{'battle': 5, 'enemy': 1},
{'battle': 6},
{'battle': 7, 'boss': 1},
]
A1, B1, C1, D1, E1, F1, G1, H1, I1, J1, K1, \
A2, B2, C2, D2, E2, F2, G2, H2, I2, J2, K2, \
A3, B3, C3, D3, E3, F3, G3, H3, I3, J3, K3, \
A4, B4, C4, D4, E4, F4, G4, H4, I4, J4, K4, \
A5, B5, C5, D5, E5, F5, G5, H5, I5, J5, K5, \
A6, B6, C6, D6, E6, F6, G6, H6, I6, J6, K6, \
A7, B7, C7, D7, E7, F7, G7, H7, I7, J7, K7, \
A8, B8, C8, D8, E8, F8, G8, H8, I8, J8, K8, \
A9, B9, C9, D9, E9, F9, G9, H9, I9, J9, K9, \
= MAP.flatten()
# 14-4 has special enemy spawn mechanism
# After entering map or each battle, enemies spawn on these nodes:
# ['C2', 'D3', 'D4', 'H8', 'I7'], and 'B8' must spawns an enemy
# ['A1', 'B2', 'B6', 'C7']
# ['F5', 'G4', 'G6', 'I8', 'J9']
# ['F2', 'G1', 'H2', 'K4', 'K5']
# ['C5', 'C6', 'D5']
# ['E8', 'G8']
OVERRIDE = CampaignMap('14-4')
OVERRIDE.map_data = """
ME -- -- -- -- -- ME -- -- -- --
-- ME ME -- -- ME -- ME -- -- --
-- -- -- ME -- -- -- -- -- -- --
-- -- -- ME -- -- ME -- -- -- ME
-- -- ME ME -- ME -- -- -- -- ME
-- ME ME -- -- -- ME -- -- -- --
-- -- ME -- -- -- -- -- ME -- --
-- ME -- -- ME -- ME ME ME -- --
-- -- -- -- -- -- -- -- -- ME --
"""
road_A8 = RoadGrids([B8])
road_H9 = RoadGrids([[H8, I8, J9], ])
class Config(ConfigBase):
# ===== Start of generated config =====
# MAP_SIREN_TEMPLATE = ['0']
# MOVABLE_ENEMY_TURN = (2,)
# MAP_HAS_SIREN = True
# MAP_HAS_MOVABLE_ENEMY = True
MAP_HAS_MAP_STORY = False
MAP_HAS_FLEET_STEP = False
MAP_HAS_AMBUSH = True
# MAP_HAS_MYSTERY = True
# ===== End of generated config =====
MAP_WALK_USE_CURRENT_FLEET = True
class Campaign(CampaignBase):
MAP = MAP
def map_data_init(self, map_):
super().map_data_init(map_)
for override_grid in OVERRIDE:
# Set may_enemy, but keep may_ambush
self.map[override_grid.location].may_enemy = override_grid.may_enemy
def METHOD_NAME(self):
self.pick_up_light_house(A9)
if self.clear_roadblocks([road_A8, road_H9], weakest=False):
return True
if self.clear_filter_enemy(self.ENEMY_FILTER, preserve=1):
return True
return self.battle_default()
def battle_3(self):
self.pick_up_light_house(A9)
self.pick_up_ammo()
self.pick_up_flare(H9)
if self.clear_roadblocks([road_A8, road_H9], weakest=False):
return True
if self.clear_filter_enemy(self.ENEMY_FILTER, preserve=1):
return True
return self.battle_default()
def battle_6(self):
self.pick_up_light_house(A9)
self.pick_up_ammo()
self.pick_up_flare(H9)
if self.clear_roadblocks([road_A8, road_H9], weakest=False):
return True
if self.clear_filter_enemy(self.ENEMY_FILTER, preserve=0):
return True
return self.battle_default()
def battle_7(self):
self.fleet_boss.pick_up_flare(A5)
return self.fleet_boss.clear_boss() |
6,434 | pow | """pytorch backend implementation"""
from packaging.version import Version
import torch
if Version(torch.__version__) < Version("1.9.0"):
raise RuntimeError("DeepXDE requires PyTorch>=1.9.0.")
# To write device-agnostic (CPU or GPU) code, a common pattern is to first determine
# torch.device and then use it for all the tensors.
# https://pytorch.org/docs/stable/notes/cuda.html
# >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# >>> tensor.to(device=device)
# But, taking care of all tensors requires a lot of work.
# An alternative way is to use GPU by default if GPU is available, which is similar to
# TensorFlow.
if torch.cuda.is_available():
torch.set_default_tensor_type(torch.cuda.FloatTensor)
lib = torch
def data_type_dict():
return {
"float16": torch.float16,
"float32": torch.float32,
"float64": torch.float64,
"uint8": torch.uint8,
"int8": torch.int8,
"int16": torch.int16,
"int32": torch.int32,
"int64": torch.int64,
"bool": torch.bool,
}
def is_gpu_available():
return torch.cuda.is_available()
def is_tensor(obj):
return torch.is_tensor(obj)
def shape(input_tensor):
return list(input_tensor.shape)
def size(tensor):
return torch.numel(tensor)
def ndim(input_tensor):
return input_tensor.dim()
def transpose(tensor, axes=None):
if axes is None:
axes = tuple(range(tensor.dim())[::-1])
return torch.permute(tensor, axes)
def reshape(tensor, shape):
return torch.reshape(tensor, shape)
def Variable(initial_value, dtype=None):
return torch.tensor(initial_value, dtype=dtype, requires_grad=True)
def as_tensor(data, dtype=None):
if isinstance(data, torch.Tensor):
if dtype is None or data.dtype == dtype:
return data
return data.type(dtype=dtype)
return torch.as_tensor(data, dtype=dtype)
def sparse_tensor(indices, values, shape):
return torch.sparse_coo_tensor(list(zip(*indices)), values, shape, requires_grad=True)
def from_numpy(np_array):
# Both torch.from_numpy and torch.as_tensor work without memory copy.
# https://discuss.pytorch.org/t/from-numpy-vs-as-tensor/79932
# https://stackoverflow.com/questions/48482787/pytorch-memory-model-torch-from-numpy-vs-torch-tensor
# But torch.from_numpy cannot handle device.
return torch.as_tensor(np_array)
def to_numpy(input_tensor):
return input_tensor.detach().cpu().numpy()
def concat(values, axis):
return torch.cat(values, axis)
def stack(values, axis):
return torch.stack(values, axis)
def expand_dims(tensor, axis):
return torch.unsqueeze(tensor, axis)
def reverse(tensor, axis):
return torch.flip(tensor, axis)
def roll(tensor, shift, axis):
return torch.roll(tensor, shift, axis)
def lgamma(x):
return torch.lgamma(x)
def elu(x):
return torch.nn.functional.elu(x)
def relu(x):
return torch.nn.functional.relu(x)
def gelu(x):
return torch.nn.functional.gelu(x)
def selu(x):
return torch.nn.functional.selu(x)
def sigmoid(x):
return torch.nn.functional.sigmoid(x)
def silu(x):
return torch.nn.functional.silu(x)
def sin(x):
return torch.sin(x)
def cos(x):
return torch.cos(x)
def exp(x):
return torch.exp(x)
def square(x):
return torch.square(x)
# pylint: disable=redefined-builtin
def abs(x):
return torch.abs(x)
def minimum(x, y):
return torch.minimum(x, y)
def tanh(x):
return torch.tanh(x)
def METHOD_NAME(x, y):
return torch.METHOD_NAME(x, y)
def mean(input_tensor, dim, keepdims=False):
return torch.mean(input_tensor, dim, keepdim=keepdims)
def reduce_mean(input_tensor):
return torch.mean(input_tensor)
def sum(input_tensor, dim, keepdims=False):
return torch.sum(input_tensor, dim, keepdim=keepdims)
def reduce_sum(input_tensor):
return torch.sum(input_tensor)
def prod(input_tensor, dim, keepdims=False):
return torch.prod(input_tensor, dim, keepdim=keepdims)
def reduce_prod(input_tensor):
return torch.prod(input_tensor)
# pylint: disable=redefined-builtin
def min(input_tensor, dim, keepdims=False):
return torch.amin(input_tensor, dim, keepdim=keepdims)
def reduce_min(input_tensor):
return torch.min(input_tensor)
# pylint: disable=redefined-builtin
def max(input_tensor, dim, keepdims=False):
return torch.amax(input_tensor, dim, keepdim=keepdims)
def reduce_max(input_tensor):
return torch.max(input_tensor)
def norm(tensor, ord=None, axis=None, keepdims=False):
return torch.linalg.norm(tensor, ord=ord, dim=axis, keepdim=keepdims)
def zeros(shape, dtype):
return torch.zeros(shape, dtype=dtype)
def zeros_like(input_tensor):
return torch.zeros_like(input_tensor)
def matmul(x, y):
return torch.mm(x, y)
def sparse_dense_matmul(x, y):
return torch.sparse.mm(x, y) |
6,435 | get queue iam policy | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetQueueIamPolicyResult',
'AwaitableGetQueueIamPolicyResult',
'get_queue_iam_policy',
'get_queue_iam_policy_output',
]
@pulumi.output_type
class GetQueueIamPolicyResult:
"""
A collection of values returned by getQueueIamPolicy.
"""
def __init__(__self__, etag=None, id=None, location=None, name=None, policy_data=None, project=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if policy_data and not isinstance(policy_data, str):
raise TypeError("Expected argument 'policy_data' to be a str")
pulumi.set(__self__, "policy_data", policy_data)
if project and not isinstance(project, str):
raise TypeError("Expected argument 'project' to be a str")
pulumi.set(__self__, "project", project)
@property
@pulumi.getter
def etag(self) -> str:
"""
(Computed) The etag of the IAM policy.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="policyData")
def policy_data(self) -> str:
"""
(Required only by `cloudtasks.QueueIamPolicy`) The policy data generated by
a `organizations_get_iam_policy` data source.
"""
return pulumi.get(self, "policy_data")
@property
@pulumi.getter
def project(self) -> str:
return pulumi.get(self, "project")
class AwaitableGetQueueIamPolicyResult(GetQueueIamPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetQueueIamPolicyResult(
etag=self.etag,
id=self.id,
location=self.location,
name=self.name,
policy_data=self.policy_data,
project=self.project)
def METHOD_NAME(location: Optional[str] = None,
name: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetQueueIamPolicyResult:
"""
Retrieves the current IAM policy data for queue
## example
```python
import pulumi
import pulumi_gcp as gcp
policy = gcp.cloudtasks.get_queue_iam_policy(project=google_cloud_tasks_queue["default"]["project"],
location=google_cloud_tasks_queue["default"]["location"],
name=google_cloud_tasks_queue["default"]["name"])
```
:param str location: The location of the queue Used to find the parent resource to bind the IAM policy to
:param str name: Used to find the parent resource to bind the IAM policy to
:param str project: The ID of the project in which the resource belongs.
If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used.
"""
__args__ = dict()
__args__['location'] = location
__args__['name'] = name
__args__['project'] = project
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('gcp:cloudtasks/getQueueIamPolicy:getQueueIamPolicy', __args__, opts=opts, typ=GetQueueIamPolicyResult).value
return AwaitableGetQueueIamPolicyResult(
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
policy_data=pulumi.get(__ret__, 'policy_data'),
project=pulumi.get(__ret__, 'project'))
@_utilities.lift_output_func(METHOD_NAME)
def get_queue_iam_policy_output(location: Optional[pulumi.Input[Optional[str]]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetQueueIamPolicyResult]:
"""
Retrieves the current IAM policy data for queue
## example
```python
import pulumi
import pulumi_gcp as gcp
policy = gcp.cloudtasks.get_queue_iam_policy(project=google_cloud_tasks_queue["default"]["project"],
location=google_cloud_tasks_queue["default"]["location"],
name=google_cloud_tasks_queue["default"]["name"])
```
:param str location: The location of the queue Used to find the parent resource to bind the IAM policy to
:param str name: Used to find the parent resource to bind the IAM policy to
:param str project: The ID of the project in which the resource belongs.
If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used.
"""
... |
6,436 | get formatter by name | # -*- coding: utf-8 -*-
"""
pygments.formatters
~~~~~~~~~~~~~~~~~~~
Pygments formatters.
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
import types
import fnmatch
from os.path import basename
from pygments.formatters._mapping import FORMATTERS
from pygments.plugin import find_plugin_formatters
from pygments.util import ClassNotFound, itervalues
__all__ = ['get_formatter_by_name', 'get_formatter_for_filename',
'get_all_formatters', 'load_formatter_from_file'] + list(FORMATTERS)
_formatter_cache = {} # classes by name
_pattern_cache = {}
def _fn_matches(fn, glob):
"""Return whether the supplied file name fn matches pattern filename."""
if glob not in _pattern_cache:
pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob))
return pattern.match(fn)
return _pattern_cache[glob].match(fn)
def _load_formatters(module_name):
"""Load a formatter (and all others in the module too)."""
mod = __import__(module_name, None, None, ['__all__'])
for formatter_name in mod.__all__:
cls = getattr(mod, formatter_name)
_formatter_cache[cls.name] = cls
def get_all_formatters():
"""Return a generator for all formatter classes."""
# NB: this returns formatter classes, not info like get_all_lexers().
for info in itervalues(FORMATTERS):
if info[1] not in _formatter_cache:
_load_formatters(info[0])
yield _formatter_cache[info[1]]
for _, formatter in find_plugin_formatters():
yield formatter
def find_formatter_class(alias):
"""Lookup a formatter by alias.
Returns None if not found.
"""
for module_name, name, aliases, _, _ in itervalues(FORMATTERS):
if alias in aliases:
if name not in _formatter_cache:
_load_formatters(module_name)
return _formatter_cache[name]
for _, cls in find_plugin_formatters():
if alias in cls.aliases:
return cls
def METHOD_NAME(_alias, **options):
"""Lookup and instantiate a formatter by alias.
Raises ClassNotFound if not found.
"""
cls = find_formatter_class(_alias)
if cls is None:
raise ClassNotFound("no formatter found for name %r" % _alias)
return cls(**options)
def load_formatter_from_file(filename, formattername="CustomFormatter",
**options):
"""Load a formatter from a file.
This method expects a file located relative to the current working
directory, which contains a class named CustomFormatter. By default,
it expects the Formatter to be named CustomFormatter; you can specify
your own class name as the second argument to this function.
Users should be very careful with the input, because this method
is equivalent to running eval on the input file.
Raises ClassNotFound if there are any problems importing the Formatter.
.. versionadded:: 2.2
"""
try:
# This empty dict will contain the namespace for the exec'd file
custom_namespace = {}
with open(filename, 'rb') as f:
exec(f.read(), custom_namespace)
# Retrieve the class `formattername` from that namespace
if formattername not in custom_namespace:
raise ClassNotFound('no valid %s class found in %s' %
(formattername, filename))
formatter_class = custom_namespace[formattername]
# And finally instantiate it with the options
return formatter_class(**options)
except IOError as err:
raise ClassNotFound('cannot read %s: %s' % (filename, err))
except ClassNotFound:
raise
except Exception as err:
raise ClassNotFound('error when loading custom formatter: %s' % err)
def get_formatter_for_filename(fn, **options):
"""Lookup and instantiate a formatter by filename pattern.
Raises ClassNotFound if not found.
"""
fn = basename(fn)
for modname, name, _, filenames, _ in itervalues(FORMATTERS):
for filename in filenames:
if _fn_matches(fn, filename):
if name not in _formatter_cache:
_load_formatters(modname)
return _formatter_cache[name](**options)
for cls in find_plugin_formatters():
for filename in cls.filenames:
if _fn_matches(fn, filename):
return cls(**options)
raise ClassNotFound("no formatter found for file name %r" % fn)
class _automodule(types.ModuleType):
"""Automatically import formatters."""
def __getattr__(self, name):
info = FORMATTERS.get(name)
if info:
_load_formatters(info[0])
cls = _formatter_cache[info[1]]
setattr(self, name, cls)
return cls
raise AttributeError(name)
oldmod = sys.modules[__name__]
newmod = _automodule(__name__)
newmod.__dict__.update(oldmod.__dict__)
sys.modules[__name__] = newmod
del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types |
6,437 | get max ni | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import collections
from libmozdata import utils as lmdutils
from bugbot import logger, people, utils
from bugbot.bzcleaner import BzCleaner
from bugbot.constants import HIGH_PRIORITY, HIGH_SEVERITY
from bugbot.user_activity import UserActivity
class AssigneeNoLogin(BzCleaner):
def __init__(self):
super(AssigneeNoLogin, self).__init__()
self.unassign_weeks = utils.get_config(self.name(), "unassign_weeks", 2)
self.max_ni = utils.get_config(self.name(), "max_ni")
self.max_actions = utils.get_config(self.name(), "max_actions")
self.default_assignees = utils.get_default_assignees()
self.people = people.People.get_instance()
self.unassign_count = collections.defaultdict(int)
self.extra_ni = {}
def description(self):
return "Open and assigned bugs where the assignee is inactive"
def has_product_component(self):
return True
def has_assignee(self):
return True
def get_extra_for_needinfo_template(self):
return self.get_extra_for_template()
def get_extra_for_template(self):
return self.extra_ni
def columns(self):
return [
"triage_owner_name",
"component",
"id",
"summary",
"assignee",
"assignee_status",
]
def METHOD_NAME(self):
return self.max_ni
def get_max_actions(self):
return self.max_actions
def get_bugs(self, *args, **kwargs):
bugs = super().get_bugs(*args, **kwargs)
bugs = self.handle_inactive_assignees(bugs)
# Resolving https://github.com/mozilla/bugbot/issues/1300 should clean this
# including improve the wording in the template (i.e., "See the search query on Bugzilla").
self.query_url = utils.get_bz_search_url({"bug_id": ",".join(bugs.keys())})
return bugs
def handle_inactive_assignees(self, bugs):
user_activity = UserActivity()
assignees = {bug["assigned_to"] for bug in bugs.values()}
triage_owners = {bug["triage_owner"] for bug in bugs.values()}
inactive_users = user_activity.check_users(assignees | triage_owners)
res = {}
for bugid, bug in bugs.items():
if (
bug["assigned_to"] not in inactive_users
# If we don't have an active triage owner, we need to wait until
# we have one before doing anything.
or bug["triage_owner"] in inactive_users
):
continue
bug["assignee_status"] = user_activity.get_string_status(
inactive_users[bug["assigned_to"]]["status"]
)
self.add_action(bug)
res[bugid] = bug
return res
def add_action(self, bug):
prod = bug["product"]
comp = bug["component"]
default_assignee = self.default_assignees[prod][comp]
autofix = {"assigned_to": default_assignee}
# Avoid to ni if the bug has low priority and low severity.
# It's not paramount for triage owners to make an explicit decision here, it's enough for them
# to receive the notification about the unassignment from Bugzilla via email.
if (
bug["priority"] not in HIGH_PRIORITY
and bug["severity"] not in HIGH_SEVERITY
) or "stalled" in bug["keywords"]:
needinfo = None
autofix["comment"] = {
"body": "The bug assignee is inactive on Bugzilla, so the assignee is being reset."
}
else:
reason = []
if bug["priority"] in HIGH_PRIORITY:
reason.append("priority '{}'".format(bug["priority"]))
if bug["severity"] in HIGH_SEVERITY:
reason.append("severity '{}'".format(bug["severity"]))
needinfo = {
"mail": bug["triage_owner"],
"nickname": bug["triage_owner_nick"],
"extra": {"reason": "/".join(reason)},
}
self.add_prioritized_action(bug, bug["triage_owner"], needinfo, autofix)
def handle_bug(self, bug, data):
bugid = str(bug["id"])
if "triage_owner_detail" not in bug:
logger.warning("Skip bug %s: no triage owner", bugid)
return None
data[bugid] = {
"assigned_to": bug["assigned_to"],
"triage_owner": bug["triage_owner"],
"triage_owner_name": utils.get_name_from_user_detail(
bug["triage_owner_detail"]
),
"triage_owner_nick": bug["triage_owner_detail"]["nick"],
"priority": bug["priority"],
"severity": bug["severity"],
"keywords": bug["keywords"],
}
return bug
def get_bug_sort_key(self, *args, **kwargs):
return utils.get_sort_by_bug_importance_key(*args, **kwargs)
def get_bz_params(self, date):
date = lmdutils.get_date_ymd(date)
fields = [
"assigned_to",
"triage_owner",
"flags",
"priority",
"severity",
"keywords",
]
params = {
"include_fields": fields,
"resolution": "---",
"n3": "1",
"f3": "assigned_to",
"o3": "changedafter",
"v3": f"-{self.unassign_weeks}w",
}
utils.get_empty_assignees(params, negation=True)
return params
if __name__ == "__main__":
AssigneeNoLogin().run() |
6,438 | populate channels | #
# Copyright (c) 2010--2016 Red Hat, Inc.
# Copyright (c) 2022 SUSE, LLC
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
# Converts headers to the intermediate format
#
from . import headerSource
import time
from contextlib import suppress
from spacewalk.server.importlib.importLib import Channel
from spacewalk.server.importlib.backendLib import gmtime, localtime
class debBinaryPackage(headerSource.rpmBinaryPackage):
def __init__(self, header, size, checksum_type, checksum, path=None, org_id=None,
channels=[]):
headerSource.rpmBinaryPackage.__init__(self)
self.tagMap = headerSource.rpmBinaryPackage.tagMap.copy()
# Remove already-mapped tags
self._already_mapped = [
'rpm_version', 'payload_size', 'payload_format',
'package_group', 'build_time', 'build_host'
]
for tag in self._already_mapped:
with suppress(KeyError):
del self.tagMap[tag]
# XXX is seems to me that this is the place that 'source_rpm' is getting
# set
for key in self.keys():
field = self.tagMap.get(key, key)
if not field: # unsupported
continue
value = header[field]
if key == "build_time" and isinstance(value, int):
value = gmtime(value) # unix timestamp
elif value == []:
value = None
elif value:
value = str(value)
self[key] = value
self['package_size'] = size
self['checksum_type'] = checksum_type
self['checksum'] = checksum
self['path'] = path
self['org_id'] = org_id
self['header_start'] = None
self['header_end'] = None
self['last_modified'] = localtime(time.time())
if self['sigmd5']:
self['sigchecksum_type'] = 'md5'
self['sigchecksum'] = self['sigmd5']
del(self['sigmd5'])
# Fix some of the information up
vendor = self['vendor']
if vendor is None:
self['vendor'] = 'Debian'
payloadFormat = self['payload_format']
if payloadFormat is None:
self['payload_format'] = 'ar'
if self['payload_size'] is None:
self['payload_size'] = 0
# Populate file information
self._populateFiles(header)
# Populate dependency information
self._populateDependencyInformation(header)
# Populate changelogs
self._populateChangeLog(header)
# Channels
self.METHOD_NAME(channels)
# populate extraTags from headers not in already mapped fields
self._populateExtraTags(header)
self['source_rpm'] = None
group = self.get('package_group', '')
if group == '' or group is None:
self['package_group'] = 'NoGroup'
def _populateFiles(self, header):
files = []
# for f in header.get('files', []):
# fc = headerSource.rpmFile()
# fc.populate(f)
# files.append(fc)
self['files'] = files
def _populateDependencyInformation(self, header):
mapping = {
'provides': headerSource.rpmProvides,
'requires': headerSource.rpmRequires,
'conflicts': headerSource.rpmConflicts,
'obsoletes': headerSource.rpmObsoletes,
'suggests': headerSource.rpmSuggests,
'recommends': headerSource.rpmRecommends,
'breaks': headerSource.rpmBreaks,
'predepends': headerSource.rpmPredepends,
}
for k, dclass in list(mapping.items()):
l = []
values = header[k]
if values is not None:
val = [ elem.strip() for elem in values.split(',') ] # split packages
i = 0
for v in val:
relation = 0
version = ''
if '|' in v:
# TODO: store alternative-package-names semantically someday
name = v + '_' + str(i)
else:
nv = v.split('(')
name = nv[0] + '_' + str(i)
if (len(nv) > 1):
version = nv[1].rstrip(')')
if version:
while version.startswith(("<", ">", "=")):
if version.startswith("<"):
relation |= 2
if version.startswith(">"):
relation |= 4
if version.startswith("="):
relation |= 8
version = version[1:]
hash = {'name': name, 'version': version, 'flags': relation}
finst = dclass()
finst.populate(hash)
l.append(finst)
i += 1
self[k] = l
def _populateChangeLog(self, header):
l = []
# for cinfo in header.get('changelog', []):
# cinst = headerSource.rpmChangeLog()
# cinst.populate(cinfo)
# l.append(cinst)
self['changelog'] = l
def METHOD_NAME(self, channels):
l = []
for channel in channels:
dict = {'label': channel}
obj = Channel()
obj.populate(dict)
l.append(obj)
self['channels'] = l
def _populateExtraTags(self, header):
already_processed = ['arch',
'name',
'summary',
'epoch',
'version',
'release',
'payload_size',
'vendor',
'package_group',
'requires',
'obsoletes',
'predepends',
'package',
'architecture',
'description',
'maintainer',
'section',
'version',
'depends',
'provides',
'conflicts',
'replaces',
'recommends',
'suggests',
'breaks',
'pre-depends',
'installed-size',
]
l = []
for k, v in header.items():
if k.lower() not in already_processed and v:
l.append({'name': k, 'value': v})
self['extra_tags'] = l |
6,439 | is already covered | from typing import Iterator, Optional, Tuple
from sortedcontainers import SortedDict
class TaggedIntervalMap:
"""
Catalogs features of intervals.
"""
def __init__(self, nbits: int = 0):
"""
:param nbits: Number of binning bits. Higher values reduce detail. 0 for no binning.
"""
self._nbits: int = nbits
self._map: SortedDict = SortedDict() # SortedDict[int, int]
@property
def nbits(self) -> int:
return self._nbits
def add(self, addr: int, size: int, tags: int) -> None:
"""
Add interval starting at `addr` of `size` bytes.
When binning, intervals endpoints are aligned to 2^nbits. Gaps between added intervals are populated with
implicit intervals having tag value of 0. Overlapping intervals will have tag bits OR'd together.
Adjacent intervals in the interval map have unique tags. When intervals are added to the map, any adjacent stops
with identical tags will be eliminated so the map retains this property.
For example: if an interval(addr=0, size=100, tags=1) is added, followed by (100, 100, 1), the resulting
interval in the map will be (0, 200, 1).
"""
assert addr >= 0
assert size >= 0
assert tags != 0
if size == 0:
return
max_bin_offset = (1 << self._nbits) - 1
mask = ~max_bin_offset
start_addr = addr & mask # Round down to bin alignment
end_addr = (addr + size + max_bin_offset) & mask # Round up to bin alignment
if self.METHOD_NAME(start_addr, end_addr, tags):
return
self._insert_stop(start_addr)
self._insert_stop(end_addr)
for affected_addr in self._map.irange(start_addr, end_addr, inclusive=(True, False)):
self._map[affected_addr] |= tags
self._eliminate_extraneous_stops(start_addr, end_addr)
def _insert_stop(self, addr: int) -> None:
"""
Insert a new interval stop point at `addr`, if one is not already present in the map. Tags are copied from
nearest stop before `addr`.
"""
if addr not in self._map:
idx = self._map.bisect(addr) - 1
self._map[addr] = self._map.peekitem(idx)[1] if idx >= 0 else 0
def METHOD_NAME(self, min_addr: int, end_addr: int, tags: int) -> bool:
"""
Determine if interval [min_addr, end_addr) is covered by an existing range with identical tags.
"""
idx = self._map.bisect(min_addr) - 1
if idx >= 0 and len(self._map) > idx + 1:
e_addr, e_tags = self._map.peekitem(idx)
e_addr_next, _ = self._map.peekitem(idx + 1)
return (e_addr <= min_addr) and (end_addr <= e_addr_next) and (e_tags == tags)
return False
def _eliminate_extraneous_stops(self, min_addr: int, max_addr: int) -> None:
"""
Canonicalize the map by eliminating adjacent stops with identical tags both inside and directly outside of
[min_addr, max_addr].
"""
keys_to_drop = []
prev_tags = None
for addr, _, tags in self.irange(min_addr, max_addr):
if tags == prev_tags:
keys_to_drop.append(addr)
else:
prev_tags = tags
for addr in keys_to_drop:
del self._map[addr]
def irange(self, min_addr: Optional[int] = None, max_addr: Optional[int] = None) -> Iterator[Tuple[int, int, int]]:
"""
Iterate over intervals intersecting [min_addr, max_addr], yielding interval (addr, size, tags) tuples. Implicit
gap intervals (with tags=0) are also returned.
:param min_addr: Minimum address (inclusive) to begin iterating from. If None, iterate from start of map.
:param max_addr: Maximum address (inclusive) to iterate to. If None, iterate to end of map.
"""
if not self._map:
return
start_idx = 0 if min_addr is None else max(0, self._map.bisect_left(min_addr) - 1)
stop_idx = None if max_addr is None else (self._map.bisect(max_addr) + 1)
start_addr, tags = self._map.peekitem(start_idx)
for addr in self._map.islice(start_idx + 1, stop_idx):
yield (start_addr, addr - start_addr, tags)
tags = self._map[addr]
start_addr = addr |
6,440 | sub tree | '''
Implementation of a Decision Tree using ID3 Algorithm.
Author - Anushka Nambiar
Requirements:
- Numpy
- Pandas
Python:
- 3.10
Algorithm:
1. Calculate the entropy of the whole dataset.
2. Calculate the feature_entropy (entropy of each feature).
3. Calculate the Information Gain of each attribute using it's corresponding feature_entropy.
4. The attribute with the highest information gain will be the parent node.
5. Continue 2, 3 and 4 till the decision tree is completed.
'''
import numpy as np
import pandas as pd
df = pd.read_csv('Algorithms/Python/Machine Learning/Decision Tree/PlayTennis.csv')
# Calculate the entropy for the whole dataset.
# Entropy(S) = -P(Yes)log2(P(Yes)) - P(No)log2(P(No))
def total_entropy(df, label, class_name):
rows = df.shape[0]
entropy = 0
for i in class_name:
class_count = df[df[label] == i].shape[0]
class_entropy = -(class_count/rows)*np.log2(class_count/rows)
entropy += class_entropy
return entropy
# Calculate the entropy for each feature of an attribute
def feature_entropy(df, label, class_name):
rows = df.shape[0]
entropy = 0
for i in class_name:
label_count = df[df[label] == i].shape[0]
entropy_class = 0
if label_count != 0:
probability = label_count/rows
entropy_class = - probability * np.log2(probability)
entropy += entropy_class
return entropy
# Calculate the information gain of an attribute.
# Information gain = Entropy(S) - Sum(Entropy(Feature)/Total No of Records)
def info_gain(feature_name, df, label, class_list):
feature_value_list = df[feature_name].unique()
row = df.shape[0]
feature_info = 0.0
for feature_value in feature_value_list:
feature_value_data = df[df[feature_name] == feature_value]
feature_value_count = feature_value_data.shape[0]
feature_value_entropy = feature_entropy(feature_value_data, label, class_list)
feature_value_probability = feature_value_count/row
feature_info += feature_value_probability * feature_value_entropy
return total_entropy(df, label, class_list) - feature_info
# Find the attribute the maximum information gain.
def max_infogain(data, label, class_list):
feature_list = data.columns.drop(label)
max_info_gain = -1
max_info = None
for feature in feature_list:
feature_info_gain = info_gain(feature, data, label, class_list)
if max_info_gain < feature_info_gain:
max_info_gain = feature_info_gain
max_info = feature
return max_info
# Generate the subtree after each step.
def METHOD_NAME(feature_name, data, label, class_list):
feature_value_count_dict = data[feature_name].value_counts(sort=False)
tree = {}
for feature_value, count in feature_value_count_dict.iteritems():
feature_value_data = data[data[feature_name] == feature_value]
assigned_to_node = False
for c in class_list:
class_count = feature_value_data[feature_value_data[label] == c].shape[0]
if class_count == count:
tree[feature_value] = c
data = data[data[feature_name] != feature_value]
assigned_to_node = True
if not assigned_to_node:
tree[feature_value] = "?"
return tree, data
# Design the decision tree.
def print_tree(root, prev_feature_value, data, label, class_list):
if data.shape[0] != 0:
max_info = max_infogain(data, label, class_list)
tree, data = METHOD_NAME(max_info, data, label, class_list)
next_root = None
if prev_feature_value != None:
root[prev_feature_value] = dict()
root[prev_feature_value][max_info] = tree
next_root = root[prev_feature_value][max_info]
else:
root[max_info] = tree
next_root = root[max_info]
for node, branch in list(next_root.items()):
if branch == "?":
feature_value_data = data[data[max_info] == node]
print_tree(next_root, node, feature_value_data, label, class_list)
# The ID3 algorithm.
def id3(df, label):
data = df.copy()
tree = {}
class_list = data[label].unique()
print_tree(tree, None, data, label, class_list)
print(tree)
tree = id3(df, 'play' |
6,441 | find by id | import json
from sqlalchemy import Column, Integer, String, Text, Boolean
from sqlalchemy.dialects.postgresql import JSONB
from superagi.models.base_model import DBBaseModel
class IterationWorkflowStep(DBBaseModel):
"""
Step of an iteration workflow
Attributes:
id (int): The unique identifier of the agent workflow step.
iteration_workflow_id (int): The ID of the agent workflow to which this step belongs.
unique_id (str): The unique identifier of the step.
prompt (str): The prompt for the step.
variables (str): The variables associated with the step.
output_type (str): The output type of the step.
step_type (str): The type of the step (TRIGGER, NORMAL).
next_step_id (int): The ID of the next step in the workflow.
history_enabled (bool): Indicates whether history is enabled for the step.
completion_prompt (str): The completion prompt for the step.
"""
__tablename__ = 'iteration_workflow_steps'
id = Column(Integer, primary_key=True)
iteration_workflow_id = Column(Integer)
unique_id = Column(String)
prompt = Column(Text)
variables = Column(Text)
output_type = Column(String)
step_type = Column(String) # TRIGGER, NORMAL
next_step_id = Column(Integer)
history_enabled = Column(Boolean)
completion_prompt = Column(Text)
def __repr__(self):
"""
Returns a string representation of the AgentWorkflowStep object.
Returns:
str: String representation of the AgentWorkflowStep.
"""
return f"AgentWorkflowStep(id={self.id}, status='{self.next_step_id}', " \
f"prompt='{self.prompt}'"
def to_dict(self):
"""
Converts the AgentWorkflowStep object to a dictionary.
Returns:
dict: Dictionary representation of the AgentWorkflowStep.
"""
return {
'id': self.id,
'next_step_id': self.next_step_id,
'agent_id': self.agent_id,
'prompt': self.prompt
}
def to_json(self):
"""
Converts the AgentWorkflowStep object to a JSON string.
Returns:
str: JSON string representation of the AgentWorkflowStep.
"""
return json.dumps(self.to_dict())
@classmethod
def from_json(cls, json_data):
"""
Creates an AgentWorkflowStep object from a JSON string.
Args:
json_data (str): JSON string representing the AgentWorkflowStep.
Returns:
AgentWorkflowStep: AgentWorkflowStep object created from the JSON string.
"""
data = json.loads(json_data)
return cls(
id=data['id'],
prompt=data['prompt'],
agent_id=data['agent_id'],
next_step_id=data['next_step_id']
)
@classmethod
def METHOD_NAME(cls, session, step_id: int):
return session.query(IterationWorkflowStep).filter(IterationWorkflowStep.id == step_id).first()
@classmethod
def find_or_create_step(self, session, iteration_workflow_id: int, unique_id: str,
prompt: str, variables: str, step_type: str, output_type: str,
completion_prompt: str = "", history_enabled: bool = False):
workflow_step = session.query(IterationWorkflowStep).filter(IterationWorkflowStep.unique_id == unique_id).first()
if workflow_step is None:
workflow_step = IterationWorkflowStep(unique_id=unique_id)
session.add(workflow_step)
session.commit()
workflow_step.prompt = prompt
workflow_step.variables = variables
workflow_step.step_type = step_type
workflow_step.output_type = output_type
workflow_step.iteration_workflow_id = iteration_workflow_id
workflow_step.next_step_id = -1
workflow_step.history_enabled = history_enabled
if completion_prompt:
workflow_step.completion_prompt = completion_prompt
session.commit()
return workflow_step
|
6,442 | check quantile output | # copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Tests for BaseDistribution API points."""
__author__ = ["fkiraly"]
import numpy as np
import pandas as pd
import pytest
from sktime.datatypes import check_is_mtype
from sktime.tests.test_all_estimators import BaseFixtureGenerator, QuickTester
class DistributionFixtureGenerator(BaseFixtureGenerator):
"""Fixture generator for probability distributions.
Fixtures parameterized
----------------------
estimator_class: estimator inheriting from BaseObject
ranges over estimator classes not excluded by EXCLUDE_ESTIMATORS, EXCLUDED_TESTS
estimator_instance: instance of estimator inheriting from BaseObject
ranges over estimator classes not excluded by EXCLUDE_ESTIMATORS, EXCLUDED_TESTS
instances are generated by create_test_instance class method
"""
estimator_type_filter = "distribution"
def _has_capability(distr, method):
"""Check whether distr has capability of method.
Parameters
----------
distr : BaseDistribution object
method : str
method name to check
Returns
-------
whether distr has capability method, according to tags
capabilities:approx and capabilities:exact
"""
approx_methods = distr.get_tag("capabilities:approx")
exact_methods = distr.get_tag("capabilities:exact")
return method in approx_methods or method in exact_methods
METHODS_SCALAR = ["mean", "var", "energy"]
METHODS_SCALAR_POS = ["var", "energy"] # result always non-negative?
METHODS_X = ["energy", "pdf", "log_pdf", "cdf"]
METHODS_X_POS = ["energy", "pdf", "cdf"] # result always non-negative?
METHODS_P = ["ppf"]
METHODS_ROWWISE = ["energy"] # results in one column
class TestAllDistributions(DistributionFixtureGenerator, QuickTester):
"""Module level tests for all sktime parameter fitters."""
def test_sample(self, estimator_instance):
"""Test sample expected return."""
d = estimator_instance
res = d.sample()
assert d.shape == res.shape
assert (res.index == d.index).all()
assert (res.columns == d.columns).all()
res_panel = d.sample(3)
dummy_panel = pd.concat([res, res, res], keys=range(3))
assert dummy_panel.shape == res_panel.shape
assert (res_panel.index == dummy_panel.index).all()
assert (res_panel.columns == dummy_panel.columns).all()
@pytest.mark.parametrize("method", METHODS_SCALAR, ids=METHODS_SCALAR)
def test_methods_scalar(self, estimator_instance, method):
"""Test expected return of scalar methods."""
if not _has_capability(estimator_instance, method):
return None
d = estimator_instance
res = getattr(estimator_instance, method)()
_check_output_format(res, d, method)
@pytest.mark.parametrize("method", METHODS_X, ids=METHODS_X)
def test_methods_x(self, estimator_instance, method):
"""Test expected return of methods that take sample-like argument."""
if not _has_capability(estimator_instance, method):
return None
d = estimator_instance
x = d.sample()
res = getattr(estimator_instance, method)(x)
_check_output_format(res, d, method)
@pytest.mark.parametrize("method", METHODS_P, ids=METHODS_P)
def test_methods_p(self, estimator_instance, method):
"""Test expected return of methods that take percentage-like argument."""
if not _has_capability(estimator_instance, method):
return None
d = estimator_instance
np_unif = np.random.uniform(size=d.shape)
p = pd.DataFrame(np_unif, index=d.index, columns=d.columns)
res = getattr(estimator_instance, method)(p)
_check_output_format(res, d, method)
@pytest.mark.parametrize("q", [0.7, [0.1, 0.3, 0.9]])
def test_quantile(self, estimator_instance, q):
"""Test expected return of quantile method."""
if not _has_capability(estimator_instance, "ppf"):
return None
d = estimator_instance
def METHOD_NAME(obj, q):
assert check_is_mtype(obj, "pred_quantiles", "Proba")
assert (obj.index == d.index).all()
if not isinstance(q, list):
q = [q]
expected_columns = pd.MultiIndex.from_product([d.columns, q])
assert (obj.columns == expected_columns).all()
res = d.quantile(q)
METHOD_NAME(res, q)
def _check_output_format(res, dist, method):
"""Check output format expectations for BaseDistribution tests."""
if method in METHODS_ROWWISE:
exp_shape = (dist.shape[0], 1)
else:
exp_shape = dist.shape
assert res.shape == exp_shape
assert (res.index == dist.index).all()
if method not in METHODS_ROWWISE:
assert (res.columns == dist.columns).all()
if method in METHODS_SCALAR_POS or method in METHODS_X_POS:
assert (res >= 0).all().all() |
6,443 | test missing car info | #!/usr/bin/env python3
from collections import defaultdict
import os
import re
import unittest
from openpilot.common.basedir import BASEDIR
from openpilot.selfdrive.car.car_helpers import interfaces, get_interface_attr
from openpilot.selfdrive.car.docs import CARS_MD_OUT, CARS_MD_TEMPLATE, generate_cars_md, get_all_car_info
from openpilot.selfdrive.car.docs_definitions import Cable, Column, PartType, Star
from openpilot.selfdrive.car.honda.values import CAR as HONDA
from openpilot.selfdrive.debug.dump_car_info import dump_car_info
from openpilot.selfdrive.debug.print_docs_diff import print_car_info_diff
class TestCarDocs(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.all_cars = get_all_car_info()
def test_generator(self):
generated_cars_md = generate_cars_md(self.all_cars, CARS_MD_TEMPLATE)
with open(CARS_MD_OUT, "r") as f:
current_cars_md = f.read()
self.assertEqual(generated_cars_md, current_cars_md,
"Run selfdrive/car/docs.py to update the compatibility documentation")
def test_docs_diff(self):
dump_path = os.path.join(BASEDIR, "selfdrive", "car", "tests", "cars_dump")
dump_car_info(dump_path)
print_car_info_diff(dump_path)
os.remove(dump_path)
def test_duplicate_years(self):
make_model_years = defaultdict(list)
for car in self.all_cars:
with self.subTest(car_info_name=car.name):
make_model = (car.make, car.model)
for year in car.year_list:
self.assertNotIn(year, make_model_years[make_model], f"{car.name}: Duplicate model year")
make_model_years[make_model].append(year)
def METHOD_NAME(self):
all_car_info_platforms = get_interface_attr("CAR_INFO", combine_brands=True).keys()
for platform in sorted(interfaces.keys()):
with self.subTest(platform=platform):
self.assertTrue(platform in all_car_info_platforms, "Platform: {} doesn't exist in CarInfo".format(platform))
def test_naming_conventions(self):
# Asserts market-standard car naming conventions by brand
for car in self.all_cars:
with self.subTest(car=car):
tokens = car.model.lower().split(" ")
if car.car_name == "hyundai":
self.assertNotIn("phev", tokens, "Use `Plug-in Hybrid`")
self.assertNotIn("hev", tokens, "Use `Hybrid`")
if "plug-in hybrid" in car.model.lower():
self.assertIn("Plug-in Hybrid", car.model, "Use correct capitalization")
if car.make != "Kia":
self.assertNotIn("ev", tokens, "Use `Electric`")
elif car.car_name == "toyota":
if "rav4" in tokens:
self.assertIn("RAV4", car.model, "Use correct capitalization")
def test_torque_star(self):
# Asserts brand-specific assumptions around steering torque star
for car in self.all_cars:
with self.subTest(car=car):
# honda sanity check, it's the definition of a no torque star
if car.car_fingerprint in (HONDA.ACCORD, HONDA.CIVIC, HONDA.CRV, HONDA.ODYSSEY, HONDA.PILOT):
self.assertEqual(car.row[Column.STEERING_TORQUE], Star.EMPTY, f"{car.name} has full torque star")
elif car.car_name in ("toyota", "hyundai"):
self.assertNotEqual(car.row[Column.STEERING_TORQUE], Star.EMPTY, f"{car.name} has no torque star")
def test_year_format(self):
for car in self.all_cars:
with self.subTest(car=car):
self.assertIsNone(re.search(r"\d{4}-\d{4}", car.name), f"Format years correctly: {car.name}")
def test_harnesses(self):
for car in self.all_cars:
with self.subTest(car=car):
if car.name == "comma body":
raise unittest.SkipTest
car_part_type = [p.part_type for p in car.car_parts.all_parts()]
car_parts = list(car.car_parts.all_parts())
self.assertTrue(len(car_parts) > 0, f"Need to specify car parts: {car.name}")
self.assertTrue(car_part_type.count(PartType.connector) == 1, f"Need to specify one harness connector: {car.name}")
self.assertTrue(car_part_type.count(PartType.mount) == 1, f"Need to specify one mount: {car.name}")
self.assertTrue(Cable.right_angle_obd_c_cable_1_5ft in car_parts, f"Need to specify a right angle OBD-C cable (1.5ft): {car.name}")
if __name__ == "__main__":
unittest.main() |
6,444 | set up | #!/usr/bin/env python
"""
_JobTestBase
Base class for tests of WMBS job class.
"""
from __future__ import absolute_import, division, print_function
import threading
import unittest
from WMCore.DAOFactory import DAOFactory
from WMCore.DataStructs.Run import Run
from WMCore.Services.UUIDLib import makeUUID
from WMCore.WMBS.File import File
from WMCore.WMBS.Fileset import Fileset as Fileset
from WMCore.WMBS.Job import Job
from WMCore.WMBS.JobGroup import JobGroup
from WMCore.WMBS.Subscription import Subscription
from WMCore.WMBS.Workflow import Workflow
from WMQuality.TestInit import TestInit
class JobTestBase(unittest.TestCase):
"""
Base class for tests of WMBS job class (Job_t and JobWorkUnit_t)
"""
def __init__(self, *args, **kwargs):
"""
Define some class instance variables we'll fill later
"""
super(JobTestBase, self).__init__(*args, **kwargs)
self.testFileA = None
self.testFileB = None
self.testWorkflow = None
self.testJob = None
return
def METHOD_NAME(self):
"""
_setUp_
Setup the database and logging connection. Try to create all of the
WMBS tables.
"""
self.testInit = TestInit(__file__)
self.testInit.setLogging()
self.testInit.setDatabaseConnection()
self.testInit.setSchema(customModules=["WMCore.WMBS"],
useDefault=False)
myThread = threading.currentThread()
self.daoFactory = DAOFactory(package="WMCore.WMBS",
logger=myThread.logger,
dbinterface=myThread.dbi)
locationNew = self.daoFactory(classname="Locations.New")
locationNew.execute(siteName="test.site.ch", pnn="T2_CH_CERN")
locationNew.execute(siteName="test2.site.ch", pnn="T2_CH_CERN")
return
def tearDown(self):
"""
_tearDown_
Drop all the WMBS tables.
"""
self.testInit.clearDatabase()
@staticmethod
def createTestJob(subscriptionType="Merge"):
"""
_createTestJob_
Create a test job with two files as input. This will also create the
appropriate workflow, jobgroup and subscription.
"""
testWorkflow = Workflow(spec=makeUUID(), owner="Simon", name=makeUUID(), task="Test")
testWorkflow.create()
testWMBSFileset = Fileset(name="TestFileset")
testWMBSFileset.create()
testSubscription = Subscription(fileset=testWMBSFileset, workflow=testWorkflow, type=subscriptionType)
testSubscription.create()
testJobGroup = JobGroup(subscription=testSubscription)
testJobGroup.create()
testFileA = File(lfn="/this/is/a/lfnA", size=1024, events=10)
testFileA.addRun(Run(1, *[45]))
testFileB = File(lfn="/this/is/a/lfnB", size=1024, events=10)
testFileB.addRun(Run(1, *[46]))
testFileA.create()
testFileB.create()
testJob = Job(name=makeUUID(), files=[testFileA, testFileB])
testJob["couch_record"] = "somecouchrecord"
testJob["location"] = "test.site.ch"
testJob.create(group=testJobGroup)
testJob.associateFiles()
return testJob
def createSingleJobWorkflow(self):
"""
Create a workflow with one jobs and two files and store the results in instance variables
"""
self.testWorkflow = Workflow(spec="spec.xml", owner="Simon", name="wf001", task="Test")
self.testWorkflow.create()
testWMBSFileset = Fileset(name="TestFileset")
testWMBSFileset.create()
testSubscription = Subscription(fileset=testWMBSFileset, workflow=self.testWorkflow)
testSubscription.create()
testJobGroup = JobGroup(subscription=testSubscription)
testJobGroup.create()
self.testFileA = File(lfn="/this/is/a/lfnA", size=1024, events=10)
self.testFileA.addRun(Run(1, *[45]))
self.testFileB = File(lfn="/this/is/a/lfnB", size=1024, events=10)
self.testFileB.addRun(Run(1, *[46]))
self.testFileA.create()
self.testFileB.create()
self.testJob = Job(name="TestJob", files=[self.testFileA, self.testFileB])
self.testJob.create(group=testJobGroup)
self.testJob.associateFiles() |
6,445 | test04 fov axis | import pytest
import drjit as dr
import mitsuba as mi
def create_camera(o, d, fov=34, fov_axis="x", s_open=1.5, s_close=5, near_clip=1.0):
t = [o[0] + d[0], o[1] + d[1], o[2] + d[2]]
camera_dict = {
"type": "perspective",
"near_clip": near_clip,
"far_clip": 35.0,
"focus_distance": 15.0,
"fov": fov,
"fov_axis": fov_axis,
"shutter_open": s_open,
"shutter_close": s_close,
"to_world": mi.ScalarTransform4f.look_at(
origin=o,
target=t,
up=[0, 1, 0]
),
"film": {
"type": "hdrfilm",
"width": 512,
"height": 256,
}
}
return mi.load_dict(camera_dict)
origins = [[1.0, 0.0, 1.5], [1.0, 4.0, 1.5]]
directions = [[0.0, 0.0, 1.0], [1.0, 0.0, 0.0]]
@pytest.mark.parametrize("origin", origins)
@pytest.mark.parametrize("direction", directions)
@pytest.mark.parametrize("s_open", [0.0, 1.5])
@pytest.mark.parametrize("s_time", [0.0, 3.0])
def test01_create(variant_scalar_rgb, origin, direction, s_open, s_time):
camera = create_camera(origin, direction, s_open=s_open, s_close=s_open + s_time)
assert dr.allclose(camera.near_clip(), 1)
assert dr.allclose(camera.far_clip(), 35)
assert dr.allclose(camera.focus_distance(), 15)
assert dr.allclose(camera.shutter_open(), s_open)
assert dr.allclose(camera.shutter_open_time(), s_time)
assert not camera.needs_aperture_sample()
assert camera.bbox() == mi.BoundingBox3f(origin, origin)
assert dr.allclose(camera.world_transform().matrix,
mi.Transform4f.look_at(origin, mi.Vector3f(origin) + direction, [0, 1, 0]).matrix)
@pytest.mark.parametrize("origin", origins)
@pytest.mark.parametrize("direction", directions)
def test02_sample_ray(variants_vec_spectral, origin, direction):
"""Check the correctness of the sample_ray() method"""
near_clip = 1.0
camera = create_camera(origin, direction, near_clip=near_clip)
time = 0.5
wav_sample = [0.5, 0.33, 0.1]
pos_sample = [[0.2, 0.1, 0.2], [0.6, 0.9, 0.2]]
aperture_sample = 0 # Not being used
ray, spec_weight = camera.sample_ray(time, wav_sample, pos_sample, aperture_sample)
# Importance sample wavelength and weight
wav, spec = mi.sample_rgb_spectrum(mi.sample_shifted(wav_sample))
assert dr.allclose(ray.wavelengths, wav)
assert dr.allclose(spec_weight, spec)
assert dr.allclose(ray.time, time)
inv_z = dr.rcp((camera.world_transform().inverse() @ ray.d).z)
o = mi.Point3f(origin) + near_clip * inv_z * mi.Vector3f(ray.d)
assert dr.allclose(ray.o, o, atol=1e-4)
# Check that a [0.5, 0.5] position_sample generates a ray
# that points in the camera direction
ray, _ = camera.sample_ray(0, 0, [0.5, 0.5], 0)
assert dr.allclose(ray.d, direction, atol=1e-7)
@pytest.mark.parametrize("origin", origins)
@pytest.mark.parametrize("direction", directions)
def test03_sample_ray_differential(variants_vec_spectral, origin, direction):
"""Check the correctness of the sample_ray_differential() method"""
near_clip = 1.0
camera = create_camera(origin, direction, near_clip=near_clip)
time = 0.5
wav_sample = [0.5, 0.33, 0.1]
pos_sample = [[0.2, 0.1, 0.2], [0.6, 0.9, 0.2]]
ray, spec_weight = camera.sample_ray_differential(time, wav_sample, pos_sample, 0)
# Importance sample wavelength and weight
wav, spec = mi.sample_rgb_spectrum(mi.sample_shifted(wav_sample))
assert dr.allclose(ray.wavelengths, wav)
assert dr.allclose(spec_weight, spec)
assert dr.allclose(ray.time, time)
inv_z = dr.rcp((camera.world_transform().inverse() @ ray.d).z)
o = mi.Point3f(origin) + near_clip * inv_z * mi.Vector3f(ray.d)
assert dr.allclose(ray.o, o, atol=1e-4)
# Check that the derivatives are orthogonal
assert dr.allclose(dr.dot(ray.d_x - ray.d, ray.d_y - ray.d), 0, atol=1e-7)
# Check that a [0.5, 0.5] position_sample generates a ray
# that points in the camera direction
ray_center, _ = camera.sample_ray_differential(0, 0, [0.5, 0.5], 0)
assert dr.allclose(ray_center.d, direction, atol=1e-7)
# Check correctness of the ray derivatives
# Deltas in screen space
dx = 1.0 / camera.film().crop_size().x
dy = 1.0 / camera.film().crop_size().y
# Sample the rays by offsetting the position_sample with the deltas
ray_dx, _ = camera.sample_ray_differential(0, 0, [0.5 + dx, 0.5], 0)
ray_dy, _ = camera.sample_ray_differential(0, 0, [0.5, 0.5 + dy], 0)
assert dr.allclose(ray_dx.d, ray_center.d_x)
assert dr.allclose(ray_dy.d, ray_center.d_y)
@pytest.mark.parametrize("origin", [[1.0, 0.0, 1.5]])
@pytest.mark.parametrize("direction", [[0.0, 0.0, 1.0], [1.0, 0.0, 0.0]])
@pytest.mark.parametrize("fov", [34, 80])
def METHOD_NAME(variants_vec_spectral, origin, direction, fov):
"""
Check that sampling position_sample at the extremities of the unit square
along the fov_axis should generate a ray direction that make angle of fov/2
with the camera direction.
"""
def check_fov(camera, sample):
ray, _ = camera.sample_ray(0, 0, sample, 0)
assert dr.allclose(dr.acos(dr.dot(ray.d, direction)) * 180 / dr.pi, fov / 2)
# In the configuration, aspect==1.5, so 'larger' should give the 'x'-axis
for fov_axis in ['x', 'larger']:
camera = create_camera(origin, direction, fov=fov, fov_axis=fov_axis)
for sample in [[0.0, 0.5], [1.0, 0.5]]:
check_fov(camera, sample)
# In the configuration, aspect==1.5, so 'smaller' should give the 'y'-axis
for fov_axis in ['y', 'smaller']:
camera = create_camera(origin, direction, fov=fov, fov_axis=fov_axis)
for sample in [[0.5, 0.0], [0.5, 1.0]]:
check_fov(camera, sample)
# Check the 4 corners for the `diagonal` case
camera = create_camera(origin, direction, fov=fov, fov_axis='diagonal')
for sample in [[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]]:
check_fov(camera, sample)
def test05_spectrum_sampling(variants_vec_spectral):
# Check RGB wavelength sampling
camera = mi.load_dict({
'type': 'perspective',
})
wavelengths, _ = camera.sample_wavelengths(dr.zeros(mi.SurfaceInteraction3f), mi.Float([0.1, 0.4, 0.9]))
assert (dr.all_nested((wavelengths >= mi.MI_CIE_MIN) & (wavelengths <= mi.MI_CIE_MAX)))
# Check custom SRF wavelength sampling
camera = mi.load_dict({
'type': 'perspective',
'srf': {
'type': 'spectrum',
'value': [(1200,1.0), (1400,1.0)]
}
})
wavelengths, _ = camera.sample_wavelengths(dr.zeros(mi.SurfaceInteraction3f), mi.Float([0.1, 0.4, 0.9]))
assert (dr.all_nested((wavelengths >= 1200) & (wavelengths <= 1400)))
# Check error if double SRF is defined
with pytest.raises(RuntimeError, match=r'Sensor\(\)'):
camera = mi.load_dict({
'type': 'perspective',
'srf': {
'type': 'spectrum',
'value': [(1200,1.0), (1400,1.0)]
},
'film': {
'type': 'specfilm',
'srf_test': {
'type': 'spectrum',
'value': [(34,1.0),(79,1.0),(120,1.0)]
}
}
}) |
6,446 | merge single | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import glob
import numpy as np
from multiprocessing import Pool
from functools import partial
from shapely.geometry import Polygon
import argparse
wordname_15 = [
'plane', 'baseball-diamond', 'bridge', 'ground-track-field',
'small-vehicle', 'large-vehicle', 'ship', 'tennis-court',
'basketball-court', 'storage-tank', 'soccer-ball-field', 'roundabout',
'harbor', 'swimming-pool', 'helicopter'
]
wordname_16 = wordname_15 + ['container-crane']
wordname_18 = wordname_16 + ['airport', 'helipad']
DATA_CLASSES = {
'dota10': wordname_15,
'dota15': wordname_16,
'dota20': wordname_18
}
def rbox_iou(g, p):
"""
iou of rbox
"""
g = np.array(g)
p = np.array(p)
g = Polygon(g[:8].reshape((4, 2)))
p = Polygon(p[:8].reshape((4, 2)))
g = g.buffer(0)
p = p.buffer(0)
if not g.is_valid or not p.is_valid:
return 0
inter = Polygon(g).intersection(Polygon(p)).area
union = g.area + p.area - inter
if union == 0:
return 0
else:
return inter / union
def py_cpu_nms_poly_fast(dets, thresh):
"""
Args:
dets: pred results
thresh: nms threshold
Returns: index of keep
"""
obbs = dets[:, 0:-1]
x1 = np.min(obbs[:, 0::2], axis=1)
y1 = np.min(obbs[:, 1::2], axis=1)
x2 = np.max(obbs[:, 0::2], axis=1)
y2 = np.max(obbs[:, 1::2], axis=1)
scores = dets[:, 8]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
polys = []
for i in range(len(dets)):
tm_polygon = [
dets[i][0], dets[i][1], dets[i][2], dets[i][3], dets[i][4],
dets[i][5], dets[i][6], dets[i][7]
]
polys.append(tm_polygon)
polys = np.array(polys)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
ovr = []
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
hbb_inter = w * h
hbb_ovr = hbb_inter / (areas[i] + areas[order[1:]] - hbb_inter)
h_inds = np.where(hbb_ovr > 0)[0]
tmp_order = order[h_inds + 1]
for j in range(tmp_order.size):
iou = rbox_iou(polys[i], polys[tmp_order[j]])
hbb_ovr[h_inds[j]] = iou
try:
if math.isnan(ovr[0]):
pdb.set_trace()
except:
pass
inds = np.where(hbb_ovr <= thresh)[0]
order = order[inds + 1]
return keep
def poly2origpoly(poly, x, y, rate):
origpoly = []
for i in range(int(len(poly) / 2)):
tmp_x = float(poly[i * 2] + x) / float(rate)
tmp_y = float(poly[i * 2 + 1] + y) / float(rate)
origpoly.append(tmp_x)
origpoly.append(tmp_y)
return origpoly
def nmsbynamedict(nameboxdict, nms, thresh):
"""
Args:
nameboxdict: nameboxdict
nms: nms
thresh: nms threshold
Returns: nms result as dict
"""
nameboxnmsdict = {x: [] for x in nameboxdict}
for imgname in nameboxdict:
keep = nms(np.array(nameboxdict[imgname]), thresh)
outdets = []
for index in keep:
outdets.append(nameboxdict[imgname][index])
nameboxnmsdict[imgname] = outdets
return nameboxnmsdict
def METHOD_NAME(output_dir, nms, nms_thresh, pred_class_lst):
"""
Args:
output_dir: output_dir
nms: nms
pred_class_lst: pred_class_lst
class_name: class_name
Returns:
"""
class_name, pred_bbox_list = pred_class_lst
nameboxdict = {}
for line in pred_bbox_list:
splitline = line.split(' ')
subname = splitline[0]
splitname = subname.split('__')
oriname = splitname[0]
pattern1 = re.compile(r'__\d+___\d+')
x_y = re.findall(pattern1, subname)
x_y_2 = re.findall(r'\d+', x_y[0])
x, y = int(x_y_2[0]), int(x_y_2[1])
pattern2 = re.compile(r'__([\d+\.]+)__\d+___')
rate = re.findall(pattern2, subname)[0]
confidence = splitline[1]
poly = list(map(float, splitline[2:]))
origpoly = poly2origpoly(poly, x, y, rate)
det = origpoly
det.append(confidence)
det = list(map(float, det))
if (oriname not in nameboxdict):
nameboxdict[oriname] = []
nameboxdict[oriname].append(det)
nameboxnmsdict = nmsbynamedict(nameboxdict, nms, nms_thresh)
# write result
dstname = os.path.join(output_dir, class_name + '.txt')
with open(dstname, 'w') as f_out:
for imgname in nameboxnmsdict:
for det in nameboxnmsdict[imgname]:
confidence = det[-1]
bbox = det[0:-1]
outline = imgname + ' ' + str(confidence) + ' ' + ' '.join(
map(str, bbox))
f_out.write(outline + '\n')
def generate_result(pred_txt_dir,
output_dir='output',
class_names=wordname_15,
nms_thresh=0.1):
"""
pred_txt_dir: dir of pred txt
output_dir: dir of output
class_names: class names of data
"""
pred_txt_list = glob.glob("{}/*.txt".format(pred_txt_dir))
# step1: summary pred bbox
pred_classes = {}
for class_name in class_names:
pred_classes[class_name] = []
for current_txt in pred_txt_list:
img_id = os.path.split(current_txt)[1]
img_id = img_id.split('.txt')[0]
with open(current_txt) as f:
res = f.readlines()
for item in res:
item = item.split(' ')
pred_class = item[0]
item[0] = img_id
pred_bbox = ' '.join(item)
pred_classes[pred_class].append(pred_bbox)
pred_classes_lst = []
for class_name in pred_classes.keys():
print('class_name: {}, count: {}'.format(class_name,
len(pred_classes[class_name])))
pred_classes_lst.append((class_name, pred_classes[class_name]))
# step2: merge
pool = Pool(len(class_names))
nms = py_cpu_nms_poly_fast
mergesingle_fn = partial(METHOD_NAME, output_dir, nms, nms_thresh)
pool.map(mergesingle_fn, pred_classes_lst)
def parse_args():
parser = argparse.ArgumentParser(description='generate test results')
parser.add_argument('--pred_txt_dir', type=str, help='path of pred txt dir')
parser.add_argument(
'--output_dir', type=str, default='output', help='path of output dir')
parser.add_argument(
'--data_type', type=str, default='dota10', help='data type')
parser.add_argument(
'--nms_thresh',
type=float,
default=0.1,
help='nms threshold while merging results')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
output_dir = args.output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
class_names = DATA_CLASSES[args.data_type]
generate_result(args.pred_txt_dir, output_dir, class_names)
print('done!') |
6,447 | update resource | import logging
from typing import TypedDict
from localstack.services.cloudformation.deployment_utils import check_not_found_exception
LOG = logging.getLogger(__name__)
# dict key used to store the deployment state of a resource
KEY_RESOURCE_STATE = "_state_"
class DependencyNotYetSatisfied(Exception):
"""Exception indicating that a resource dependency is not (yet) deployed/available."""
def __init__(self, resource_ids, message=None):
message = message or "Unresolved dependencies: %s" % resource_ids
super(DependencyNotYetSatisfied, self).__init__(message)
resource_ids = resource_ids if isinstance(resource_ids, list) else [resource_ids]
self.resource_ids = resource_ids
class ResourceJson(TypedDict):
Type: str
Properties: dict
class GenericBaseModel:
"""Abstract base class representing a resource model class in LocalStack.
This class keeps references to a combination of (1) the CF resource
properties (as defined in the template), and (2) the current deployment
state of a resource.
Concrete subclasses will implement convenience methods to manage resources,
e.g., fetching the latest deployment state, getting the resource name, etc.
"""
def __init__(self, account_id: str, region_name: str, resource_json: dict, **params):
# self.stack_name = stack_name # TODO: add stack name to params
self.account_id = account_id
self.region_name = region_name
self.resource_json = resource_json
self.resource_type = resource_json["Type"]
# Properties, as defined in the resource template
self.properties = resource_json["Properties"] = resource_json.get("Properties") or {}
# State, as determined from the deployed resource; use a special dict key here to keep
# track of state changes within resource_json (this way we encapsulate all state details
# in `resource_json` and the changes will survive creation of multiple instances of this class)
self.state = resource_json[KEY_RESOURCE_STATE] = resource_json.get(KEY_RESOURCE_STATE) or {}
# ----------------------
# ABSTRACT BASE METHODS
# ----------------------
def fetch_state(self, stack_name, resources):
"""Fetch the latest deployment state of this resource, or return None if not currently deployed (NOTE: THIS IS NOT ALWAYS TRUE)."""
return None
def METHOD_NAME(self, new_resource, stack_name, resources):
"""Update the deployment of this resource, using the updated properties (implemented by subclasses)."""
raise NotImplementedError
def is_updatable(self) -> bool:
return type(self).METHOD_NAME != GenericBaseModel.METHOD_NAME
@classmethod
def cloudformation_type(cls):
"""Return the CloudFormation resource type name, e.g., "AWS::S3::Bucket" (implemented by subclasses)."""
pass
@staticmethod
def get_deploy_templates():
"""Return template configurations used to create the final API requests (implemented by subclasses)."""
pass
# TODO: rework to normal instance method when resources aren't mutated in different place anymore
@staticmethod
def add_defaults(resource, stack_name: str):
"""Set any defaults required, including auto-generating names. Must be called before deploying the resource"""
pass
# ---------------------
# GENERIC UTIL METHODS
# ---------------------
# TODO: remove
def fetch_and_update_state(self, *args, **kwargs):
if self.physical_resource_id is None:
return None
try:
state = self.fetch_state(*args, **kwargs)
self.update_state(state)
return state
except Exception as e:
if not check_not_found_exception(e, self.resource_type, self.properties):
LOG.warning(
"Unable to fetch state for resource %s: %s",
self,
e,
exc_info=LOG.isEnabledFor(logging.DEBUG),
)
# TODO: remove
def update_state(self, details):
"""Update the deployment state of this resource (existing attributes will be overwritten)."""
details = details or {}
self.state.update(details)
@property
def physical_resource_id(self) -> str | None:
"""Return the (cached) physical resource ID."""
return self.resource_json.get("PhysicalResourceId")
@property
def logical_resource_id(self) -> str:
"""Return the logical resource ID."""
return self.resource_json["LogicalResourceId"]
# TODO: rename? make it clearer what props are in comparison with state, properties and resource_json
@property
def props(self) -> dict:
"""Return a copy of (1) the resource properties (from the template), combined with
(2) the current deployment state properties of the resource."""
result = dict(self.properties)
result.update(self.state or {})
last_state = self.resource_json.get("_last_deployed_state", {})
result.update(last_state)
return result |
6,448 | test tag | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""This module contains code to test SageMaker ``Actions``"""
from __future__ import absolute_import
import datetime
import logging
import time
import pytest
from sagemaker.lineage import action
from sagemaker.lineage.query import LineageQueryDirectionEnum
def test_create_delete(action_obj):
# fixture does create and then delete, this test ensures it happens at least once
assert action_obj.action_arn
def test_create_delete_with_association(action_obj_with_association):
# fixture does create and then delete, this test ensures it happens at least once
assert action_obj_with_association.action_arn
def test_save(action_obj, sagemaker_session):
action_obj.description = "updated integration test description"
action_obj.status = "Completed"
action_obj.properties = {"k3": "v3"}
action_obj.properties_to_remove = ["k1"]
action_obj.save()
loaded = action.Action.load(
action_name=action_obj.action_name, sagemaker_session=sagemaker_session
)
assert "updated integration test description" == loaded.description
assert "Completed" == loaded.status
assert {"k3": "v3"} == loaded.properties
def test_load(action_obj, sagemaker_session):
assert action_obj.action_name
logging.info(f"loading {action_obj.action_name}")
loaded = action.Action.load(
action_name=action_obj.action_name, sagemaker_session=sagemaker_session
)
assert action_obj.action_arn == loaded.action_arn
def test_list(action_objs, sagemaker_session):
slack = datetime.timedelta(minutes=1)
now = datetime.datetime.now(datetime.timezone.utc)
action_names = [actn.action_name for actn in action_objs]
for sort_order in ["Ascending", "Descending"]:
action_names_listed = [
action_listed.action_name
for action_listed in action.Action.list(
created_after=now - slack,
created_before=now + slack,
sort_by="CreationTime",
sort_order=sort_order,
sagemaker_session=sagemaker_session,
)
if action_listed.action_name in action_names
]
if sort_order == "Descending":
action_names_listed = action_names_listed[::-1]
assert action_names == action_names_listed
# sanity check
assert action_names
@pytest.mark.timeout(30)
def METHOD_NAME(action_obj, sagemaker_session):
tag = {"Key": "foo", "Value": "bar"}
action_obj.set_tag(tag)
while True:
actual_tags = sagemaker_session.sagemaker_client.list_tags(
ResourceArn=action_obj.action_arn
)["Tags"]
if actual_tags:
break
time.sleep(5)
# When sagemaker-client-config endpoint-url is passed as argument to hit some endpoints,
# length of actual tags will be greater than 1
assert len(actual_tags) > 0
assert actual_tags[0] == tag
@pytest.mark.timeout(30)
def test_tags(action_obj, sagemaker_session):
tags = [{"Key": "foo1", "Value": "bar1"}]
action_obj.set_tags(tags)
while True:
actual_tags = sagemaker_session.sagemaker_client.list_tags(
ResourceArn=action_obj.action_arn
)["Tags"]
if actual_tags:
break
time.sleep(5)
# When sagemaker-client-config endpoint-url is passed as argument to hit some endpoints,
# length of actual tags will be greater than 1
assert len(actual_tags) > 0
assert [actual_tags[-1]] == tags
@pytest.mark.skip("data inconsistency P61661075")
def test_upstream_artifacts(static_model_deployment_action):
artifacts_from_query = static_model_deployment_action.artifacts(
direction=LineageQueryDirectionEnum.ASCENDANTS
)
assert len(artifacts_from_query) > 0
for artifact in artifacts_from_query:
assert "artifact" in artifact.artifact_arn
@pytest.mark.skip("data inconsistency P61661075")
def test_downstream_artifacts(static_approval_action):
artifacts_from_query = static_approval_action.artifacts(
direction=LineageQueryDirectionEnum.DESCENDANTS
)
assert len(artifacts_from_query) > 0
for artifact in artifacts_from_query:
assert "artifact" in artifact.artifact_arn
@pytest.mark.skip("data inconsistency P61661075")
def test_datasets(static_approval_action, static_dataset_artifact, sagemaker_session):
try:
sagemaker_session.sagemaker_client.add_association(
SourceArn=static_dataset_artifact.artifact_arn,
DestinationArn=static_approval_action.action_arn,
AssociationType="ContributedTo",
)
except Exception:
print("Source and Destination association already exists.")
time.sleep(3)
artifacts_from_query = static_approval_action.datasets()
assert len(artifacts_from_query) > 0
for artifact in artifacts_from_query:
assert "artifact" in artifact.artifact_arn
assert artifact.artifact_type == "DataSet"
try:
sagemaker_session.sagemaker_client.delete_association(
SourceArn=static_dataset_artifact.artifact_arn,
DestinationArn=static_approval_action.action_arn,
)
except Exception:
pass
@pytest.mark.skip("data inconsistency P61661075")
def test_endpoints(static_approval_action):
endpoint_contexts_from_query = static_approval_action.endpoints()
assert len(endpoint_contexts_from_query) > 0
for endpoint in endpoint_contexts_from_query:
assert endpoint.context_type == "Endpoint"
assert "endpoint" in endpoint.context_arn |
6,449 | sync dir | import os
import platform
import tempfile
import numpy as np
class WriteHandle:
"""
Parameters:
-----------
path : str
Full path to the file to be written (should not exist, but the directory should)
tmp_base_path : str
Path to a directory where temporary files should be written
(must be on the same file system as `path`, so maybe)
part_slice : Slice
Slice of the object (i.e. Partition) we are writing, to convert
from global tile coordinates to local slices.
dtype : numpy dtype
For which dtype should the file be opened
"""
def __init__(self, path, tmp_base_path, part_slice, dtype):
# TODO: support for direct I/O
# (useful if we have a very high write rate, otherwise not so much; very high being
# multiple GiB/s)
self._path = path
self._tmp_base_path = tmp_base_path
self._slice = part_slice
self._dtype = dtype
self._dest = None
self._tmp_file = None
self._aborted = False
def write_tile(self, tile):
"""
Write a single `DataTile`
"""
assert self._tmp_file is not None
dest_slice = tile.tile_slice.shift(self._slice)
self._dest[dest_slice.get()] = tile.data
def write_tiles(self, tiles):
"""
Write all `tiles`, while yielding each tile for further processing
"""
for tile in tiles:
self.write_tile(tile)
yield tile
def abort(self):
self._cleanup()
self._aborted = True
def __enter__(self):
self._open_tmp()
shape = tuple(self._slice.shape)
self._dest = np.memmap(self._tmp_file.name, dtype=self._dtype, mode='write', shape=shape)
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is not None or self._aborted:
# we have an exception, or abort() was called
self._cleanup()
return
self._finalize()
def _open_tmp(self):
"""
Open a temporary file for writing. We pass delete=False, because, on success, we move the
file to its new name, and don't want it to be deleted. Only in case of errors should
it be removed (done in _cleanup).
Filename can be accessed as `self._tmp_file.name`
"""
assert self._tmp_file is None
prefix = os.path.basename(".tmp-%s" % self._path)
self._tmp_file = tempfile.NamedTemporaryFile(
prefix=prefix,
dir=self._tmp_base_path,
delete=False
)
def _cleanup(self):
"""
In case of errors, remove the temporary file.
"""
self._dest = None
if self._tmp_file is not None:
self._tmp_file.close()
os.unlink(self._tmp_file.name)
self._tmp_file = None
def _finalize(self):
"""
Called in case of success.
What we need to do here:
+ call msync(2) w/ MS_SYNC to flush changes to filesystem
(done by numpy when caling `flush` on the memmap object)
+ atomically move partition to final filename
+ call fsync on the destination directory
"""
self._dest.flush() # msync
# to make Windows™ happy:
self._tmp_file.close()
# FIXME temporary workaround, see if fixed upstream:
# https://github.com/numpy/numpy/issues/13510
mm = self._dest._mmap
if mm is not None:
mm.close()
os.rename(self._tmp_file.name, self._path)
dest_dir = os.path.dirname(self._path)
self.METHOD_NAME(dest_dir)
self._tmp_file = None
self._dest = None
def METHOD_NAME(self, path):
# noop on windows, as:
# "On Windows… err, there is no clear answer. You can not call FlushFileBuffers()
# on a directory handle as far as I can see."
# (from http://blog.httrack.com/blog/2013/11/15/everything-you-always-wanted-to-know-about-fsync/) # NOQA
if platform.system() == "Windows":
return
fd = os.open(path, os.O_RDONLY | os.O_DIRECTORY)
try:
os.fsync(fd)
finally:
os.close(fd) |
6,450 | hook cmd | from __future__ import annotations
import contextlib
import os
import random
import re
import shlex
from typing import Any
from typing import ContextManager
from typing import Generator
from typing import NoReturn
from typing import Protocol
from typing import Sequence
import pre_commit.constants as C
from pre_commit import parse_shebang
from pre_commit import xargs
from pre_commit.prefix import Prefix
from pre_commit.util import cmd_output_b
FIXED_RANDOM_SEED = 1542676187
SHIMS_RE = re.compile(r'[/\\]shims[/\\]')
class Language(Protocol):
# Use `None` for no installation / environment
@property
def ENVIRONMENT_DIR(self) -> str | None: ...
# return a value to replace `'default` for `language_version`
def get_default_version(self) -> str: ...
# return whether the environment is healthy (or should be rebuilt)
def health_check(self, prefix: Prefix, version: str) -> str | None: ...
# install a repository for the given language and language_version
def install_environment(
self,
prefix: Prefix,
version: str,
additional_dependencies: Sequence[str],
) -> None:
...
# modify the environment for hook execution
def in_env(self, prefix: Prefix, version: str) -> ContextManager[None]: ...
# execute a hook and return the exit code and output
def run_hook(
self,
prefix: Prefix,
entry: str,
args: Sequence[str],
file_args: Sequence[str],
*,
is_local: bool,
require_serial: bool,
color: bool,
) -> tuple[int, bytes]:
...
def exe_exists(exe: str) -> bool:
found = parse_shebang.find_executable(exe)
if found is None: # exe exists
return False
homedir = os.path.expanduser('~')
try:
common: str | None = os.path.commonpath((found, homedir))
except ValueError: # on windows, different drives raises ValueError
common = None
return (
# it is not in a /shims/ directory
not SHIMS_RE.search(found) and
(
# the homedir is / (docker, service user, etc.)
os.path.dirname(homedir) == homedir or
# the exe is not contained in the home directory
common != homedir
)
)
def setup_cmd(prefix: Prefix, cmd: tuple[str, ...], **kwargs: Any) -> None:
cmd_output_b(*cmd, cwd=prefix.prefix_dir, **kwargs)
def environment_dir(prefix: Prefix, d: str, language_version: str) -> str:
return prefix.path(f'{d}-{language_version}')
def assert_version_default(binary: str, version: str) -> None:
if version != C.DEFAULT:
raise AssertionError(
f'for now, pre-commit requires system-installed {binary} -- '
f'you selected `language_version: {version}`',
)
def assert_no_additional_deps(
lang: str,
additional_deps: Sequence[str],
) -> None:
if additional_deps:
raise AssertionError(
f'for now, pre-commit does not support '
f'additional_dependencies for {lang} -- '
f'you selected `additional_dependencies: {additional_deps}`',
)
def basic_get_default_version() -> str:
return C.DEFAULT
def basic_health_check(prefix: Prefix, language_version: str) -> str | None:
return None
def no_install(
prefix: Prefix,
version: str,
additional_dependencies: Sequence[str],
) -> NoReturn:
raise AssertionError('This language is not installable')
@contextlib.contextmanager
def no_env(prefix: Prefix, version: str) -> Generator[None, None, None]:
yield
def target_concurrency() -> int:
if 'PRE_COMMIT_NO_CONCURRENCY' in os.environ:
return 1
else:
# Travis appears to have a bunch of CPUs, but we can't use them all.
if 'TRAVIS' in os.environ:
return 2
else:
return xargs.cpu_count()
def _shuffled(seq: Sequence[str]) -> list[str]:
"""Deterministically shuffle"""
fixed_random = random.Random()
fixed_random.seed(FIXED_RANDOM_SEED, version=1)
seq = list(seq)
fixed_random.shuffle(seq)
return seq
def run_xargs(
cmd: tuple[str, ...],
file_args: Sequence[str],
*,
require_serial: bool,
color: bool,
) -> tuple[int, bytes]:
if require_serial:
jobs = 1
else:
# Shuffle the files so that they more evenly fill out the xargs
# partitions, but do it deterministically in case a hook cares about
# ordering.
file_args = _shuffled(file_args)
jobs = target_concurrency()
return xargs.xargs(cmd, file_args, target_concurrency=jobs, color=color)
def METHOD_NAME(entry: str, args: Sequence[str]) -> tuple[str, ...]:
return (*shlex.split(entry), *args)
def basic_run_hook(
prefix: Prefix,
entry: str,
args: Sequence[str],
file_args: Sequence[str],
*,
is_local: bool,
require_serial: bool,
color: bool,
) -> tuple[int, bytes]:
return run_xargs(
METHOD_NAME(entry, args),
file_args,
require_serial=require_serial,
color=color,
) |
6,451 | get extended attention mask | # ------------------------------------------------------------------------------------
# BaSSL
# Copyright (c) 2021 KakaoBrain. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# Github: https://github.com/kakaobrain/bassl
# ------------------------------------------------------------------------------------
import torch
import torch.nn as nn
from transformers.models.bert.modeling_bert import BertEncoder
class ShotEmbedding(nn.Module):
def __init__(self, cfg):
super().__init__()
nn_size = cfg.neighbor_size + 2 # +1 for center shot, +1 for cls
self.shot_embedding = nn.Linear(cfg.input_dim, cfg.hidden_size)
self.position_embedding = nn.Embedding(nn_size, cfg.hidden_size)
self.mask_embedding = nn.Embedding(2, cfg.input_dim, padding_idx=0)
# tf naming convention for layer norm
self.LayerNorm = nn.LayerNorm(cfg.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(cfg.hidden_dropout_prob)
self.register_buffer('pos_ids',
torch.arange(nn_size, dtype=torch.long))
def forward(
self,
shot_emb: torch.Tensor,
mask: torch.Tensor = None,
pos_ids: torch.Tensor = None,
) -> torch.Tensor:
assert len(shot_emb.size()) == 3
if pos_ids is None:
pos_ids = self.pos_ids
# this for mask embedding (un-masked ones remain unchanged)
if mask is not None:
self.mask_embedding.weight.data[0, :].fill_(0)
mask_emb = self.mask_embedding(mask.long())
shot_emb = (shot_emb * (1 - mask).float()[:, :, None]) + mask_emb
# we set [CLS] token to averaged feature
cls_emb = shot_emb.mean(dim=1)
# embedding shots
shot_emb = torch.cat([cls_emb[:, None, :], shot_emb], dim=1)
shot_emb = self.shot_embedding(shot_emb)
pos_emb = self.position_embedding(pos_ids)
embeddings = shot_emb + pos_emb[None, :]
embeddings = self.dropout(self.LayerNorm(embeddings))
return embeddings
class TransformerCRN(nn.Module):
def __init__(self, cfg):
super().__init__()
self.pooling_method = cfg.pooling_method
self.shot_embedding = ShotEmbedding(cfg)
self.encoder = BertEncoder(cfg)
nn_size = cfg.neighbor_size + 2 # +1 for center shot, +1 for cls
self.register_buffer(
'attention_mask',
self.METHOD_NAME(
torch.ones((1, nn_size)).float()),
)
def forward(
self,
shot: torch.Tensor,
mask: torch.Tensor = None,
pos_ids: torch.Tensor = None,
pooling_method: str = None,
):
if self.attention_mask.shape[1] != (shot.shape[1] + 1):
n_shot = shot.shape[1] + 1 # +1 for CLS token
attention_mask = self.METHOD_NAME(
torch.ones((1, n_shot), dtype=torch.float, device=shot.device))
else:
attention_mask = self.attention_mask
shot_emb = self.shot_embedding(shot, mask=mask, pos_ids=pos_ids)
encoded_emb = self.encoder(
shot_emb, attention_mask=attention_mask).last_hidden_state
return encoded_emb, self.pooler(
encoded_emb, pooling_method=pooling_method)
def pooler(self, sequence_output, pooling_method=None):
if pooling_method is None:
pooling_method = self.pooling_method
if pooling_method == 'cls':
return sequence_output[:, 0, :]
elif pooling_method == 'avg':
return sequence_output[:, 1:].mean(dim=1)
elif pooling_method == 'max':
return sequence_output[:, 1:].max(dim=1)[0]
elif pooling_method == 'center':
cidx = sequence_output.shape[1] // 2
return sequence_output[:, cidx, :]
else:
raise ValueError
def METHOD_NAME(self, attention_mask):
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
f'Wrong shape for attention_mask (shape {attention_mask.shape})'
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask |
6,452 | worker status | """
Manage modjk workers
====================
Send commands to a :strong:`modjk` load balancer via the peer system.
This module can be used with the :ref:`prereq <requisites-prereq>`
requisite to remove/add the worker from the load balancer before
deploying/restarting service.
Mandatory Settings:
- The minion needs to have permission to publish the :strong:`modjk.*`
functions (see :ref:`here <peer>` for information on configuring
peer publishing permissions)
- The modjk load balancer must be configured as stated in the :strong:`modjk`
execution module :mod:`documentation <salt.modules.modjk>`
"""
def __virtual__():
"""
Check if we have peer access ?
"""
return True
def _send_command(cmd, worker, lbn, target, profile="default", tgt_type="glob"):
"""
Send a command to the modjk loadbalancer
The minion need to be able to publish the commands to the load balancer
cmd:
worker_stop - won't get any traffic from the lbn
worker_activate - activate the worker
worker_disable - will get traffic only for current sessions
"""
ret = {
"code": False,
"msg": "OK",
"minions": [],
}
# Send the command to target
func = "modjk.{}".format(cmd)
args = [worker, lbn, profile]
response = __salt__["publish.publish"](target, func, args, tgt_type)
# Get errors and list of affeced minions
errors = []
minions = []
for minion in response:
minions.append(minion)
if not response[minion]:
errors.append(minion)
# parse response
if not response:
ret["msg"] = "no servers answered the published command {}".format(cmd)
return ret
elif len(errors) > 0:
ret["msg"] = "the following minions return False"
ret["minions"] = errors
return ret
else:
ret["code"] = True
ret["msg"] = "the commad was published successfully"
ret["minions"] = minions
return ret
def METHOD_NAME(target, worker, activation, profile="default", tgt_type="glob"):
"""
Check if the worker is in `activation` state in the targeted load balancers
The function will return the following dictionary:
result - False if no server returned from the published command
errors - list of servers that couldn't find the worker
wrong_state - list of servers that the worker was in the wrong state
(not activation)
"""
ret = {
"result": True,
"errors": [],
"wrong_state": [],
}
args = [worker, profile]
status = __salt__["publish.publish"](target, "modjk.worker_status", args, tgt_type)
# Did we got any respone from someone ?
if not status:
ret["result"] = False
return ret
# Search for errors & status
for balancer in status:
if not status[balancer]:
ret["errors"].append(balancer)
elif status[balancer]["activation"] != activation:
ret["wrong_state"].append(balancer)
return ret
def _talk2modjk(name, lbn, target, action, profile="default", tgt_type="glob"):
"""
Wrapper function for the stop/disable/activate functions
"""
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
action_map = {
"worker_stop": "STP",
"worker_disable": "DIS",
"worker_activate": "ACT",
}
# Check what needs to be done
status = METHOD_NAME(target, name, action_map[action], profile, tgt_type)
if not status["result"]:
ret["result"] = False
ret["comment"] = "no servers answered the published command modjk.worker_status"
return ret
if status["errors"]:
ret["result"] = False
ret[
"comment"
] = "the following balancers could not find the worker {}: {}".format(
name, status["errors"]
)
return ret
if not status["wrong_state"]:
ret[
"comment"
] = "the worker is in the desired activation state on all the balancers"
return ret
else:
ret["comment"] = "the action {} will be sent to the balancers {}".format(
action, status["wrong_state"]
)
ret["changes"] = {action: status["wrong_state"]}
if __opts__["test"]:
ret["result"] = None
return ret
# Send the action command to target
response = _send_command(action, name, lbn, target, profile, tgt_type)
ret["comment"] = response["msg"]
ret["result"] = response["code"]
return ret
def stop(name, lbn, target, profile="default", tgt_type="glob"):
"""
.. versionchanged:: 2017.7.0
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Stop the named worker from the lbn load balancers at the targeted minions
The worker won't get any traffic from the lbn
Example:
.. code-block:: yaml
disable-before-deploy:
modjk_worker.stop:
- name: {{ grains['id'] }}
- lbn: application
- target: 'roles:balancer'
- tgt_type: grain
"""
return _talk2modjk(name, lbn, target, "worker_stop", profile, tgt_type)
def activate(name, lbn, target, profile="default", tgt_type="glob"):
"""
.. versionchanged:: 2017.7.0
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Activate the named worker from the lbn load balancers at the targeted
minions
Example:
.. code-block:: yaml
disable-before-deploy:
modjk_worker.activate:
- name: {{ grains['id'] }}
- lbn: application
- target: 'roles:balancer'
- tgt_type: grain
"""
return _talk2modjk(name, lbn, target, "worker_activate", profile, tgt_type)
def disable(name, lbn, target, profile="default", tgt_type="glob"):
"""
.. versionchanged:: 2017.7.0
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Disable the named worker from the lbn load balancers at the targeted
minions. The worker will get traffic only for current sessions and won't
get new ones.
Example:
.. code-block:: yaml
disable-before-deploy:
modjk_worker.disable:
- name: {{ grains['id'] }}
- lbn: application
- target: 'roles:balancer'
- tgt_type: grain
"""
return _talk2modjk(name, lbn, target, "worker_disable", profile, tgt_type) |
6,453 | get parent uids | from __future__ import annotations
from sqlalchemy import (
BigInteger,
Boolean,
Column,
Date,
Float,
ForeignKey,
Integer,
LargeBinary,
PrimaryKeyConstraint,
Table,
delete,
event,
)
from sqlalchemy.dialects.postgresql import ARRAY, CHAR, JSONB, VARCHAR
from sqlalchemy.ext.mutable import MutableDict, MutableList
from sqlalchemy.orm import Session, backref, declarative_base, mapped_column, relationship
Base = declarative_base()
UID = VARCHAR(78)
# primary_key=True implies `unique=True` and `nullable=False`
class AnalysisEntry(Base):
__tablename__ = 'analysis'
uid = mapped_column(UID, ForeignKey('file_object.uid', ondelete='CASCADE'), index=True)
plugin = mapped_column(VARCHAR(64), nullable=False)
plugin_version = mapped_column(VARCHAR(16), nullable=False)
system_version = mapped_column(VARCHAR)
analysis_date = mapped_column(Float, nullable=False)
summary = mapped_column(ARRAY(VARCHAR, dimensions=1), nullable=True)
tags = mapped_column(MutableDict.as_mutable(JSONB), nullable=True)
result = mapped_column(MutableDict.as_mutable(JSONB), nullable=True)
file_object = relationship('FileObjectEntry', back_populates='analyses')
__table_args__ = (PrimaryKeyConstraint('uid', 'plugin', name='_analysis_primary_key'),)
def __repr__(self) -> str:
return f'AnalysisEntry({self.uid}, {self.plugin}, {self.plugin_version})'
included_files_table = Table(
'included_files',
Base.metadata,
Column('parent_uid', UID, ForeignKey('file_object.uid', ondelete='CASCADE'), primary_key=True, index=True),
Column('child_uid', UID, ForeignKey('file_object.uid', ondelete='CASCADE'), primary_key=True, index=True),
)
fw_files_table = Table(
'fw_files',
Base.metadata,
Column('root_uid', UID, ForeignKey('file_object.uid', ondelete='CASCADE'), primary_key=True, index=True),
Column('file_uid', UID, ForeignKey('file_object.uid', ondelete='CASCADE'), primary_key=True, index=True),
)
comparisons_table = Table(
'compared_files',
Base.metadata,
Column('comparison_id', VARCHAR, ForeignKey('comparison.comparison_id', ondelete='CASCADE'), primary_key=True),
Column('file_uid', UID, ForeignKey('file_object.uid', ondelete='CASCADE'), primary_key=True),
)
class FileObjectEntry(Base):
__tablename__ = 'file_object'
uid = mapped_column(UID, primary_key=True)
sha256 = mapped_column(CHAR(64), nullable=False)
file_name = mapped_column(VARCHAR, nullable=False)
depth = mapped_column(Integer, nullable=False)
size = mapped_column(BigInteger, nullable=False)
comments = mapped_column(MutableList.as_mutable(JSONB))
is_firmware = mapped_column(Boolean, nullable=False)
firmware = relationship( # 1:1
'FirmwareEntry',
back_populates='root_object',
uselist=False,
cascade='all, delete',
)
parent_files = relationship( # n:n
'FileObjectEntry',
secondary=included_files_table,
primaryjoin=uid == included_files_table.c.child_uid,
secondaryjoin=uid == included_files_table.c.parent_uid,
back_populates='included_files',
)
included_files = relationship( # n:n
'FileObjectEntry',
secondary=included_files_table,
primaryjoin=uid == included_files_table.c.parent_uid,
secondaryjoin=uid == included_files_table.c.child_uid,
back_populates='parent_files',
)
root_firmware = relationship( # n:n
'FileObjectEntry',
secondary=fw_files_table,
primaryjoin=uid == fw_files_table.c.file_uid,
secondaryjoin=uid == fw_files_table.c.root_uid,
backref=backref('all_included_files'),
)
analyses = relationship( # 1:n
'AnalysisEntry',
back_populates='file_object',
cascade='all, delete-orphan', # the analysis should be deleted when the file object is deleted
)
comparisons = relationship( # n:n
'ComparisonEntry',
secondary=comparisons_table,
cascade='all, delete', # comparisons should also be deleted when the file object is deleted
backref=backref('file_objects'),
)
def get_included_uids(self) -> set[str]:
return {child.uid for child in self.included_files}
def METHOD_NAME(self) -> set[str]:
return {parent.uid for parent in self.parent_files}
def get_parent_fw_uids(self) -> set[str]:
return {fw.uid for fw in self.root_firmware}
def __repr__(self) -> str:
return f'FileObject({self.uid}, {self.file_name}, {self.is_firmware})'
class FirmwareEntry(Base):
__tablename__ = 'firmware'
uid = mapped_column(UID, ForeignKey('file_object.uid', ondelete='CASCADE'), primary_key=True)
submission_date = mapped_column(Float, nullable=False)
release_date = mapped_column(Date, nullable=False)
version = mapped_column(VARCHAR, nullable=False)
vendor = mapped_column(VARCHAR, nullable=False)
device_name = mapped_column(VARCHAR, nullable=False)
device_class = mapped_column(VARCHAR, nullable=False)
device_part = mapped_column(VARCHAR, nullable=False)
firmware_tags = mapped_column(ARRAY(VARCHAR, dimensions=1)) # list of strings
root_object = relationship('FileObjectEntry', back_populates='firmware')
class ComparisonEntry(Base):
__tablename__ = 'comparison'
comparison_id = mapped_column(VARCHAR, primary_key=True)
submission_date = mapped_column(Float, nullable=False)
data = mapped_column(MutableDict.as_mutable(JSONB))
class StatsEntry(Base):
__tablename__ = 'stats'
name = mapped_column(VARCHAR, primary_key=True)
data = mapped_column(MutableDict.as_mutable(JSONB), nullable=False)
class SearchCacheEntry(Base):
__tablename__ = 'search_cache'
uid = mapped_column(UID, primary_key=True)
query = mapped_column(VARCHAR, nullable=False) # the query that searches for the files that the YARA rule matched
yara_rule = mapped_column(VARCHAR, nullable=False)
class WebInterfaceTemplateEntry(Base):
__tablename__ = 'templates'
plugin = mapped_column(VARCHAR, primary_key=True)
template = mapped_column(LargeBinary, nullable=False)
class VirtualFilePath(Base):
"""Represents a file path `file_path` of file `file_object` extracted from `_parent_object`"""
__tablename__ = 'virtual_file_path'
parent_uid = mapped_column(UID, ForeignKey('file_object.uid', ondelete='CASCADE'), nullable=False, index=True)
file_uid = mapped_column(UID, ForeignKey('file_object.uid', ondelete='CASCADE'), nullable=False, index=True)
file_path = mapped_column(VARCHAR, nullable=False)
_file_object = relationship('FileObjectEntry', uselist=False, foreign_keys=[file_uid])
# for cascade deletion:
_parent_object = relationship('FileObjectEntry', uselist=False, foreign_keys=[parent_uid])
# unique constraint: each combination of parent + child + path should be unique
__table_args__ = (PrimaryKeyConstraint('parent_uid', 'file_uid', 'file_path', name='_vfp_primary_key'),)
@event.listens_for(Session, 'persistent_to_deleted')
def delete_file_orphans(session, deleted_object):
"""
If a firmware is deleted, delete all "orphaned" files: files that do not belong to any firmware anymore (and also
are not a firmware themselves).
"""
if isinstance(deleted_object, FirmwareEntry):
session.execute(
delete(FileObjectEntry)
.where(~FileObjectEntry.is_firmware, ~FileObjectEntry.root_firmware.any())
.execution_options(synchronize_session='fetch')
) |
6,454 | clean annot dict | def raise_break(signal_number, stack_frame):
import os
import platform
import psutil
pl = platform.platform()
if pl.startswith("Windows"):
pid = os.getpid()
ppid = os.getppid()
for child in psutil.Process(pid).children(recursive=True):
try:
child.kill()
except psutil.NoSuchProcess:
pass
os.kill(pid, signal.SIGTERM)
elif pl.startswith("Linux"):
pid = os.getpid()
ppid = os.getppid()
for child in psutil.Process(pid).children(recursive=True):
try:
child.kill()
except psutil.NoSuchProcess:
pass
os.kill(pid, signal.SIGTERM)
elif pl.startswith("Darwin") or pl.startswith("macOS"):
pid = os.getpid()
ppid = os.getppid()
for child in psutil.Process(pid).children(recursive=True):
try:
child.kill()
except psutil.NoSuchProcess:
pass
os.kill(pid, signal.SIGTERM)
import signal
signal.signal(signal.SIGINT, raise_break)
from .base_converter import BaseConverter
from .base_annotator import BaseAnnotator
from .base_mapper import BaseMapper
from .base_postaggregator import BasePostAggregator
from .base_commonmodule import BaseCommonModule
from .cravat_report import CravatReport, run_reporter
from .exceptions import *
from . import util
from . import admin_util
from .config_loader import ConfigLoader
from . import constants
from .cravat_filter import CravatFilter
from .cravat_class import Cravat
from .cravat_class import run_cravat_job as run
from .util import get_ucsc_bins, reverse_complement, translate_codon, switch_strand
from .constants import crx_def
wgs = None
def get_live_annotator(module_name):
try:
import os
ModuleClass = get_module(module_name)
module = ModuleClass(input_file="__dummy__", live=True)
# module.module_name = module_name
module.annotator_name = module_name
# module.module_dir = os.path.dirname(script_path)
module.annotator_dir = os.path.dirname(module.script_path)
module.data_dir = os.path.join(module.module_dir, "data")
module._open_db_connection()
# module.conf = config_loader.get_module_conf(module_name)
module.setup()
except:
print(" module loading error: {}".format(module.module_name))
import traceback
traceback.print_exc()
return None
return module
def get_live_mapper(module_name):
try:
import os
ModuleClass = get_module(module_name)
module = ModuleClass(
{
"script_path": os.path.abspath(ModuleClass.script_path),
"input_file": "__dummy__",
"live": True,
}
)
module.base_setup()
except Exception as e:
print(" module loading error: {}".format(module_name))
import traceback
traceback.print_exc()
return None
return module
def get_module(module_name):
try:
import os
config_loader = ConfigLoader()
module_info = admin_util.get_local_module_info(module_name)
script_path = module_info.script_path
ModuleClass = util.load_class(script_path)
ModuleClass.script_path = script_path
ModuleClass.module_name = module_name
ModuleClass.module_dir = os.path.dirname(script_path)
ModuleClass.conf = config_loader.get_module_conf(module_name)
return ModuleClass
except Exception as e:
print(" module loading error: {}".format(module_name))
import traceback
traceback.print_exc()
return None
def get_wgs_reader(assembly="hg38"):
ModuleClass = get_module(assembly + "wgs")
if ModuleClass is None:
wgs = None
else:
wgs = ModuleClass()
wgs.setup()
return wgs
class LiveAnnotator:
def __init__(self, mapper="hg38", annotators=[]):
self.live_annotators = {}
self.load_live_modules(mapper, annotators)
self.variant_uid = 1
def load_live_modules(self, mapper, annotator_names):
self.live_mapper = get_live_mapper(mapper)
for module_name in admin_util.mic.local.keys():
if module_name in annotator_names:
module = admin_util.mic.local[module_name]
if "secondary_inputs" in module.conf:
continue
annotator = get_live_annotator(module.name)
if annotator is None:
continue
self.live_annotators[module.name] = annotator
def METHOD_NAME(self, d):
keys = d.keys()
for key in keys:
value = d[key]
if value == "" or value == {}:
d[key] = None
elif type(value) is dict:
d[key] = self.METHOD_NAME(value)
if type(d) is dict:
all_none = True
for key in keys:
if d[key] is not None:
all_none = False
break
if all_none:
d = None
return d
def annotate(self, crv):
from .inout import AllMappingsParser
from cravat.constants import all_mappings_col_name
if "uid" not in crv:
crv["uid"] = self.variant_uid
self.variant_uid += 1
response = {}
crx_data = self.live_mapper.map(crv)
crx_data = self.live_mapper.live_report_substitute(crx_data)
crx_data["tmp_mapper"] = AllMappingsParser(crx_data[all_mappings_col_name])
for k, v in self.live_annotators.items():
try:
annot_data = v.annotate(input_data=crx_data)
annot_data = v.live_report_substitute(annot_data)
if annot_data == "" or annot_data == {}:
annot_data = None
elif type(annot_data) is dict:
annot_data = self.METHOD_NAME(annot_data)
response[k] = annot_data
except Exception as e:
import traceback
traceback.print_exc()
response[k] = None
del crx_data["tmp_mapper"]
response["base"] = crx_data
return response |
6,455 | read uint16 | ## Copyright 2018 Intel Corporation
## SPDX-License-Identifier: Apache-2.0
import struct
import numpy as np
# Tensor Archive (TZA) file format
VERSION = (2, 0)
_MAGIC = 0x41D7
# Writes tensors to a TZA file
class Writer(object):
# Creates a new file
def __init__(self, filename):
self._table = []
self._file = open(filename, 'wb')
self._write_header()
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
# Encodes data type
def _encode_dtype(self, dtype):
if dtype == np.float32:
return 'f'
elif dtype == np.float16:
return 'h'
elif dtype == np.int8:
return 'b'
elif dtype == np.uint8:
return 'B'
else:
raise ValueError('unsupported tensor data type')
def _write_uint8(self, x):
self._file.write(struct.pack('B', x))
def _write_uint16(self, x):
self._file.write(struct.pack('H', x))
def _write_uint32(self, x):
self._file.write(struct.pack('I', x))
def _write_uint64(self, x):
self._file.write(struct.pack('Q', x))
# Writes a raw byte string (without length) to the file
def _write_raw_str(self, str):
self._file.write(str.encode(encoding='ascii'))
# Writes an UTF-8 string to the file
def _write_str(self, str):
data = str.encode()
self._write_uint16(len(data))
self._file.write(data)
# Writes padding to the file
def _write_pad(self, alignment=64):
offset = self._file.tell()
pad = (offset + alignment - 1) // alignment * alignment - offset
for _ in range(pad):
self._write_uint8(0)
# Writes the header to the file
def _write_header(self):
self._write_uint16(_MAGIC)
self._write_uint8(VERSION[0])
self._write_uint8(VERSION[1])
self._write_uint64(0) # placeholder for the table offset
# Writes the table to the file
def _write_table(self):
self._write_pad()
table_offset = self._file.tell()
self._write_uint32(len(self._table))
for name, shape, layout, dtype, offset in self._table:
self._write_str(name)
ndims = len(shape)
self._write_uint8(ndims)
for dim in shape:
self._write_uint32(dim)
self._write_raw_str(layout)
self._write_raw_str(dtype)
self._write_uint64(offset)
self._file.seek(4) # skip magic and version
self._write_uint64(table_offset)
# Writes a tensor to the file
def write(self, name, tensor, layout):
shape = tensor.shape
ndims = len(shape)
if len(layout) != ndims:
raise ValueError('invalid tensor layout')
dtype = self._encode_dtype(tensor.dtype)
self._write_pad()
offset = self._file.tell()
self._table.append((name, shape, layout, dtype, offset))
tensor.tofile(self._file)
# Closes the file
def close(self):
self._write_table()
self._file.close()
# Reads tensors from a TZA file
class Reader(object):
# Opens a file
def __init__(self, filename):
self.filename = filename
self._file = open(filename, 'rb')
# Read the header and the table of contents
self._read_header()
self._read_table()
# Close the file for now
# We want to keep the object serializable (for multiprocessing)
self._file.close()
del self._file
# We will lazily map the file into memory
self._buffer = None
# Returns the number of stored tensors
def __len__(self):
return len(self._table)
# Returns a (tensor, layout) tuple given the name of a tensor
def __getitem__(self, name):
# Lazily map the entire file into memory
if self._buffer is None:
self._buffer = np.memmap(self.filename,
dtype=np.uint8,
mode='r')
# Look up the requested tensor in the table
shape, layout, dtype, offset = self._table[name]
# Get the tensor from the memory mapped buffer
tensor = np.ndarray(shape,
dtype=dtype,
buffer=self._buffer,
offset=offset)
# Return the tensor and its layout
return tensor, layout
# Decodes data type
def _decode_dtype(self, dtype):
if dtype == 'f':
return np.float32
elif dtype == 'h':
return np.float16
elif dtype == 'b':
return np.int8
elif dtype == 'B':
return np.uint8
else:
raise ValueError('unsupported tensor data type')
def _read_uint8(self):
return struct.unpack('B', self._file.read(1))[0]
def METHOD_NAME(self):
return struct.unpack('H', self._file.read(2))[0]
def _read_uint32(self):
return struct.unpack('I', self._file.read(4))[0]
def _read_uint64(self):
return struct.unpack('Q', self._file.read(8))[0]
# Reads a raw byte string (without length) from the file
def _read_raw_str(self, size):
return self._file.read(size).decode(encoding='ascii')
# Reads an UTF-8 string from the file
def _read_str(self):
n = self.METHOD_NAME()
data = self._file.read(n)
return data.decode()
# Reads the header from the file
def _read_header(self):
magic = self.METHOD_NAME()
if magic != _MAGIC:
raise ValueError('invalid tensor format')
self._version = (self._read_uint8(), self._read_uint8())
if self._version[0] != VERSION[0]:
raise ValueError('unsupported tensor format version')
self._table_offset = self._read_uint64()
# Reads the table from the file
def _read_table(self):
self._table = {}
self._file.seek(self._table_offset)
num_tensors = self._read_uint32()
for _ in range(num_tensors):
name = self._read_str()
ndims = self._read_uint8()
shape = tuple(self._read_uint32() for _ in range(ndims))
layout = self._read_raw_str(ndims)
dtype = self._decode_dtype(self._read_raw_str(1))
offset = self._read_uint64()
self._table[name] = (shape, layout, dtype, offset) |
6,456 | replace atomic | import contextlib
import io
import os
import sys
import tempfile
try:
import fcntl
except ImportError:
fcntl = None
# `fspath` was added in Python 3.6
try:
from os import fspath
except ImportError:
fspath = None
__version__ = '1.4.1'
PY2 = sys.version_info[0] == 2
text_type = unicode if PY2 else str # noqa
def _path_to_unicode(x):
if not isinstance(x, text_type):
return x.decode(sys.getfilesystemencoding())
return x
DEFAULT_MODE = "wb" if PY2 else "w"
_proper_fsync = os.fsync
if sys.platform != 'win32':
if hasattr(fcntl, 'F_FULLFSYNC'):
def _proper_fsync(fd):
# https://lists.apple.com/archives/darwin-dev/2005/Feb/msg00072.html
# https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/fsync.2.html
# https://github.com/untitaker/python-atomicwrites/issues/6
fcntl.fcntl(fd, fcntl.F_FULLFSYNC)
def _sync_directory(directory):
# Ensure that filenames are written to disk
fd = os.open(directory, 0)
try:
_proper_fsync(fd)
finally:
os.close(fd)
def _replace_atomic(src, dst):
os.rename(src, dst)
_sync_directory(os.path.normpath(os.path.dirname(dst)))
def _move_atomic(src, dst):
os.link(src, dst)
os.unlink(src)
src_dir = os.path.normpath(os.path.dirname(src))
dst_dir = os.path.normpath(os.path.dirname(dst))
_sync_directory(dst_dir)
if src_dir != dst_dir:
_sync_directory(src_dir)
else:
from ctypes import windll, WinError
_MOVEFILE_REPLACE_EXISTING = 0x1
_MOVEFILE_WRITE_THROUGH = 0x8
_windows_default_flags = _MOVEFILE_WRITE_THROUGH
def _handle_errors(rv):
if not rv:
raise WinError()
def _replace_atomic(src, dst):
_handle_errors(windll.kernel32.MoveFileExW(
_path_to_unicode(src), _path_to_unicode(dst),
_windows_default_flags | _MOVEFILE_REPLACE_EXISTING
))
def _move_atomic(src, dst):
_handle_errors(windll.kernel32.MoveFileExW(
_path_to_unicode(src), _path_to_unicode(dst),
_windows_default_flags
))
def METHOD_NAME(src, dst):
'''
Move ``src`` to ``dst``. If ``dst`` exists, it will be silently
overwritten.
Both paths must reside on the same filesystem for the operation to be
atomic.
'''
return _replace_atomic(src, dst)
def move_atomic(src, dst):
'''
Move ``src`` to ``dst``. There might a timewindow where both filesystem
entries exist. If ``dst`` already exists, :py:exc:`FileExistsError` will be
raised.
Both paths must reside on the same filesystem for the operation to be
atomic.
'''
return _move_atomic(src, dst)
class AtomicWriter(object):
'''
A helper class for performing atomic writes. Usage::
with AtomicWriter(path).open() as f:
f.write(...)
:param path: The destination filepath. May or may not exist.
:param mode: The filemode for the temporary file. This defaults to `wb` in
Python 2 and `w` in Python 3.
:param overwrite: If set to false, an error is raised if ``path`` exists.
Errors are only raised after the file has been written to. Either way,
the operation is atomic.
:param open_kwargs: Keyword-arguments to pass to the underlying
:py:func:`open` call. This can be used to set the encoding when opening
files in text-mode.
If you need further control over the exact behavior, you are encouraged to
subclass.
'''
def __init__(self, path, mode=DEFAULT_MODE, overwrite=False,
**open_kwargs):
if 'a' in mode:
raise ValueError(
'Appending to an existing file is not supported, because that '
'would involve an expensive `copy`-operation to a temporary '
'file. Open the file in normal `w`-mode and copy explicitly '
'if that\'s what you\'re after.'
)
if 'x' in mode:
raise ValueError('Use the `overwrite`-parameter instead.')
if 'w' not in mode:
raise ValueError('AtomicWriters can only be written to.')
# Attempt to convert `path` to `str` or `bytes`
if fspath is not None:
path = fspath(path)
self._path = path
self._mode = mode
self._overwrite = overwrite
self._open_kwargs = open_kwargs
def open(self):
'''
Open the temporary file.
'''
return self._open(self.get_fileobject)
@contextlib.contextmanager
def _open(self, get_fileobject):
f = None # make sure f exists even if get_fileobject() fails
try:
success = False
with get_fileobject(**self._open_kwargs) as f:
yield f
self.sync(f)
self.commit(f)
success = True
finally:
if not success:
try:
self.rollback(f)
except Exception:
pass
def get_fileobject(self, suffix="", prefix=tempfile.gettempprefix(),
dir=None, **kwargs):
'''Return the temporary file to use.'''
if dir is None:
dir = os.path.normpath(os.path.dirname(self._path))
descriptor, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
dir=dir)
# io.open() will take either the descriptor or the name, but we need
# the name later for commit()/replace_atomic() and couldn't find a way
# to get the filename from the descriptor.
os.close(descriptor)
kwargs['mode'] = self._mode
kwargs['file'] = name
return io.open(**kwargs)
def sync(self, f):
'''responsible for clearing as many file caches as possible before
commit'''
f.flush()
_proper_fsync(f.fileno())
def commit(self, f):
'''Move the temporary file to the target location.'''
if self._overwrite:
METHOD_NAME(f.name, self._path)
else:
move_atomic(f.name, self._path)
def rollback(self, f):
'''Clean up all temporary resources.'''
os.unlink(f.name)
def atomic_write(path, writer_cls=AtomicWriter, **cls_kwargs):
'''
Simple atomic writes. This wraps :py:class:`AtomicWriter`::
with atomic_write(path) as f:
f.write(...)
:param path: The target path to write to.
:param writer_cls: The writer class to use. This parameter is useful if you
subclassed :py:class:`AtomicWriter` to change some behavior and want to
use that new subclass.
Additional keyword arguments are passed to the writer class. See
:py:class:`AtomicWriter`.
'''
return writer_cls(path, **cls_kwargs).open() |
6,457 | net | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import paddle
from METHOD_NAME import WideDeepLayer
class StaticModel():
def __init__(self, config):
self.cost = None
self.config = config
self._init_hyper_parameters()
self.sync_mode = config.get("runner.sync_mode")
def _init_hyper_parameters(self):
self.is_distributed = False
self.distributed_embedding = False
if self.config.get("hyper_parameters.distributed_embedding", 0) == 1:
self.distributed_embedding = True
self.sparse_feature_number = self.config.get(
"hyper_parameters.sparse_feature_number")
self.sparse_feature_dim = self.config.get(
"hyper_parameters.sparse_feature_dim")
self.sparse_inputs_slots = self.config.get(
"hyper_parameters.sparse_inputs_slots")
self.dense_input_dim = self.config.get(
"hyper_parameters.dense_input_dim")
self.learning_rate = self.config.get(
"hyper_parameters.optimizer.learning_rate")
self.fc_sizes = self.config.get("hyper_parameters.fc_sizes")
def create_feeds(self, is_infer=False):
dense_input = paddle.static.data(
name="dense_input",
shape=[None, self.dense_input_dim],
dtype="float32")
# sparse_input_ids = [
# paddle.static.data(
# name="C" + str(i), shape=[None, 1], dtype="int64")
# for i in range(1, self.sparse_inputs_slots)
# ]
sparse_input_ids = [
paddle.static.data(
name=str(i), shape=[None, 1], dtype="int64")
for i in range(1, self.sparse_inputs_slots)
]
label = paddle.static.data(
name="label", shape=[None, 1], dtype="int64")
self._sparse_data_var = [label] + sparse_input_ids
self._dense_data_var = [dense_input]
feeds_list = [label] + sparse_input_ids + [dense_input]
return feeds_list
def METHOD_NAME(self, input, is_infer=False):
self.label_input = input[0]
self.sparse_inputs = input[1:self.sparse_inputs_slots]
self.dense_input = input[-1]
sparse_number = self.sparse_inputs_slots - 1
wide_deep_model = WideDeepLayer(
self.sparse_feature_number,
self.sparse_feature_dim,
self.dense_input_dim,
sparse_number,
self.fc_sizes,
sync_mode=self.sync_mode)
self.cast_label = paddle.cast(self.label_input, dtype='float32')
ones = paddle.full_like(
self.label_input, fill_value=1, dtype="float32")
show_click = paddle.cast(
paddle.concat(
[ones, self.cast_label], axis=1), dtype='float32')
show_click.stop_gradient = True
pred = wide_deep_model.forward(self.sparse_inputs, self.dense_input,
show_click)
predict_2d = paddle.concat(x=[1 - pred, pred], axis=1)
self.predict = predict_2d
auc, batch_auc, [
self.batch_stat_pos, self.batch_stat_neg, self.stat_pos,
self.stat_neg
] = paddle.static.auc(input=self.predict,
label=self.label_input,
num_thresholds=2**12,
slide_steps=20)
auc = paddle.cast(auc, "float32")
self.inference_target_var = auc
if is_infer:
fetch_dict = {'auc': auc}
return fetch_dict
cost = paddle.nn.functional.log_loss(
input=pred, label=paddle.cast(
self.label_input, dtype="float32"))
avg_cost = paddle.mean(x=cost)
self._cost = avg_cost
fetch_dict = {'cost': avg_cost, 'auc': auc}
return fetch_dict
def create_optimizer(self, strategy=None):
optimizer = paddle.optimizer.Adam(
learning_rate=self.learning_rate, lazy_mode=True)
if strategy != None:
import paddle.distributed.fleet as fleet
optimizer = fleet.distributed_optimizer(optimizer, strategy)
optimizer.minimize(self._cost)
def infer_net(self, input):
return self.METHOD_NAME(input, is_infer=True) |
6,458 | test srmse | # Copyright (c) 2022 Samsung Electronics Co., Ltd. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Test visqlib.QErrorComputer module'''
import unittest
import tempfile
import numpy as np
import os
import json
from visqlib.QErrorComputer import MPEIRComputer
from visqlib.QErrorComputer import MSEComputer
from visqlib.QErrorComputer import TAEComputer
from visqlib.QErrorComputer import SRMSEComputer
class VisqQErrorComputerTest(unittest.TestCase):
def setUp(self):
"Called before running each test"
self.fp32_dir = tempfile.TemporaryDirectory()
self.fq_dir = tempfile.TemporaryDirectory()
def tearDown(self):
"Called after running each test"
self.fp32_dir.cleanup()
self.fq_dir.cleanup()
def _setUpSingleTensorData(self):
tensor_id = {}
tensor_id['test'] = 0
with open(self.fp32_dir.name + '/tensors.json', 'w') as f:
json.dump(tensor_id, f)
with open(self.fq_dir.name + '/tensors.json', 'w') as f:
json.dump(tensor_id, f)
scales = {}
scales['test'] = 2.0
with open(self.fq_dir.name + '/scales.txt', 'w') as f:
json.dump(scales, f)
os.mkdir(self.fp32_dir.name + '/0')
os.mkdir(self.fq_dir.name + '/0')
test_data = np.zeros(16)
np.save(self.fp32_dir.name + '/0/0.npy', test_data)
np.save(self.fq_dir.name + '/0/0.npy', test_data)
def _setUpTwoTensorData(self):
tensor_id = {}
tensor_id['test'] = 0
with open(self.fp32_dir.name + '/tensors.json', 'w') as f:
json.dump(tensor_id, f)
with open(self.fq_dir.name + '/tensors.json', 'w') as f:
json.dump(tensor_id, f)
scales = {}
scales['test'] = 2.0
with open(self.fq_dir.name + '/scales.txt', 'w') as f:
json.dump(scales, f)
os.mkdir(self.fp32_dir.name + '/0')
os.mkdir(self.fp32_dir.name + '/1')
os.mkdir(self.fq_dir.name + '/0')
os.mkdir(self.fq_dir.name + '/1')
test_data_one = np.ones(16)
test_data_zero = np.zeros(16)
np.save(self.fp32_dir.name + '/0/0.npy', test_data_one)
np.save(self.fp32_dir.name + '/1/0.npy', test_data_zero)
np.save(self.fq_dir.name + '/0/0.npy', test_data_zero)
np.save(self.fq_dir.name + '/1/0.npy', test_data_zero)
# Golden: (1 + 0) / 2 = 0.5 for MSE
def _setUpDifferentTensorData(self):
# Two fp32 data (test, test2)
# One fq data (test)
# NOTE When does this happen?
# This case can happen because visq ignores nodes that do not affect qerrors.
# For example, RESHAPE Op does not affect qerrors, so its fq data is not dumped,
# although it is listed in 'tensors.json'.
tensor_id = {}
tensor_id['test'] = 0
tensor_id['test2'] = 1
with open(self.fp32_dir.name + '/tensors.json', 'w') as f:
json.dump(tensor_id, f)
with open(self.fq_dir.name + '/tensors.json', 'w') as f:
json.dump(tensor_id, f)
scales = {}
scales['test'] = 2.0
scales['test2'] = 1.0
with open(self.fq_dir.name + '/scales.txt', 'w') as f:
json.dump(scales, f)
os.mkdir(self.fp32_dir.name + '/0')
os.mkdir(self.fq_dir.name + '/0')
test_data = np.zeros(16)
np.save(self.fp32_dir.name + '/0/0.npy', test_data)
np.save(self.fp32_dir.name + '/0/1.npy', test_data)
np.save(self.fq_dir.name + '/0/0.npy', test_data)
def test_MPEIR(self):
self._setUpSingleTensorData()
computer = MPEIRComputer(self.fp32_dir.name, self.fq_dir.name)
qmap, _, _ = computer.run()
self.assertAlmostEqual(0.0, qmap['test'])
def test_MPEIR_different_tensors(self):
self._setUpDifferentTensorData()
computer = MPEIRComputer(self.fp32_dir.name, self.fq_dir.name)
qmap, _, _ = computer.run()
self.assertAlmostEqual(0.0, qmap['test'])
def test_MSE(self):
self._setUpSingleTensorData()
computer = MSEComputer(self.fp32_dir.name, self.fq_dir.name)
qmap, qmin, qmax = computer.run()
self.assertAlmostEqual(0.0, qmap['test'])
self.assertAlmostEqual(0.0, qmin)
self.assertAlmostEqual(0.0, qmax)
def test_MSE_two(self):
self._setUpTwoTensorData()
computer = MSEComputer(self.fp32_dir.name, self.fq_dir.name)
qmap, qmin, qmax = computer.run()
self.assertAlmostEqual(0.5, qmap['test'])
self.assertAlmostEqual(0.0, qmin)
self.assertAlmostEqual(1.0, qmax)
def test_MSE_different_tensors(self):
self._setUpDifferentTensorData()
computer = MSEComputer(self.fp32_dir.name, self.fq_dir.name)
qmap, qmin, qmax = computer.run()
self.assertAlmostEqual(0.0, qmap['test'])
self.assertAlmostEqual(0.0, qmin)
self.assertAlmostEqual(0.0, qmax)
def test_TAE(self):
self._setUpSingleTensorData()
computer = TAEComputer(self.fp32_dir.name, self.fq_dir.name)
qmap, qmin, qmax = computer.run()
self.assertAlmostEqual(0.0, qmap['test'])
def test_TAE_different_options(self):
self._setUpDifferentTensorData()
computer = TAEComputer(self.fp32_dir.name, self.fq_dir.name)
qmap, qmin, qmax = computer.run()
self.assertAlmostEqual(0.0, qmap['test'])
self.assertAlmostEqual(0.0, qmin)
self.assertAlmostEqual(0.0, qmax)
def test_TAE_two(self):
self._setUpTwoTensorData()
computer = TAEComputer(self.fp32_dir.name, self.fq_dir.name)
qmap, qmin, qmax = computer.run()
self.assertAlmostEqual(0.0, qmin)
self.assertAlmostEqual(8.0, qmap['test'])
self.assertAlmostEqual(16.0, qmax)
def METHOD_NAME(self):
self._setUpSingleTensorData()
computer = SRMSEComputer(self.fp32_dir.name, self.fq_dir.name)
qmap, qmin, qmax = computer.run()
self.assertAlmostEqual(0.0, qmap['test'])
self.assertAlmostEqual(0.0, qmin)
self.assertAlmostEqual(0.0, qmax)
def test_SRMSE_different_options(self):
self._setUpDifferentTensorData()
computer = SRMSEComputer(self.fp32_dir.name, self.fq_dir.name)
qmap, qmin, qmax = computer.run()
self.assertAlmostEqual(0.0, qmap['test'])
self.assertAlmostEqual(0.0, qmin)
self.assertAlmostEqual(0.0, qmax)
def test_SRMSE_two(self):
self._setUpTwoTensorData()
computer = SRMSEComputer(self.fp32_dir.name, self.fq_dir.name)
qmap, qmin, qmax = computer.run()
# Golden: sqrt(Golden of MSE) / scale = sqrt(0.5) / 2
self.assertAlmostEqual(np.sqrt(0.5) / 2, qmap['test'])
self.assertAlmostEqual(0.0, qmin)
self.assertAlmostEqual(np.sqrt(0.5) / 2, qmax)
if __name__ == '__main__':
unittest.main() |
6,459 | run test | #!/usr/bin/env python3
import os
import sys
import time
sys.path.insert(1, os.path.dirname(sys.path[0]))
from test_framework.test_framework import ConfluxTestFramework
from test_framework.mininode import DefaultNode, network_thread_start
from test_framework.util import connect_nodes, get_peer_addr, wait_until
from conflux.rpc import RpcClient
class NodeReputationTests(ConfluxTestFramework):
def set_test_params(self):
self.num_nodes = 4
# try to create more outgoing connections timely
self.test_house_keeping_ms = 300
self.conf_parameters = {
"discovery_housekeeping_timeout_ms": str(self.test_house_keeping_ms),
# Enable ip_limit to make node sampling robuster.
"subnet_quota": "4",
}
def setup_network(self):
self.setup_nodes()
def METHOD_NAME(self):
client0 = RpcClient(self.nodes[0])
self.test_disconnect_with_failure(client0)
self.test_disconnect_with_demote(client0)
self.test_disconnect_with_remove(client0)
def connect_nodes(self, client0: RpcClient, to_index: int) -> dict:
connect_nodes(self.nodes, 0, to_index)
node = client0.get_node(self.nodes[to_index].key)
assert node is not None
assert node[0] == "trusted"
assert node[1]["lastConnected"].get("success")
assert node[1]["lastContact"].get("success")
assert node[1]["streamToken"] is not None
return node
def compare_node_time(self, t1:dict, t2:dict):
if t1["secs_since_epoch"] > t2["secs_since_epoch"]:
return 1
if t1["secs_since_epoch"] < t2["secs_since_epoch"]:
return -1
if t1["nanos_since_epoch"] > t2["nanos_since_epoch"]:
return 1
if t1["nanos_since_epoch"] < t2["nanos_since_epoch"]:
return -1
return 0
def test_disconnect_with_failure(self, client0: RpcClient):
n = self.connect_nodes(client0, 1)
assert client0.disconnect_peer(self.nodes[1].key, client0.UPDATE_NODE_OP_FAILURE)
# Node 1 is still in trusted node table, only marked as failure.
# But it may be auto connected again (by timer).
node = client0.get_node(self.nodes[1].key)
assert node[0] == "trusted"
if node[1]["lastContact"].get("failure"):
# Node 1 marked as failure
assert node[1]["lastConnected"].get("failure")
assert node[1]["streamToken"] == n[1]["streamToken"]
else:
# Node 1 auto connected by timer, so timestamp changed
assert self.compare_node_time(node[1]["lastConnected"]["success"], n[1]["lastConnected"]["success"]) == 1
assert self.compare_node_time(node[1]["lastContact"]["success"], n[1]["lastContact"]["success"]) == 1
# Node 0 still create outgoing connection to Node 1
time.sleep((self.test_house_keeping_ms + 100) / 1000)
assert client0.get_peer(self.nodes[1].key) is not None
def test_disconnect_with_demote(self, client0: RpcClient):
n = self.connect_nodes(client0, 2)
assert client0.disconnect_peer(self.nodes[2].key, client0.UPDATE_NODE_OP_DEMOTE)
# demote to untrusted node table
node = client0.get_node(self.nodes[2].key)
assert node[0] == "untrusted"
assert node[1]["lastContact"].get("demoted")
assert node[1]["streamToken"] == n[1]["streamToken"]
last_connected = node[1]["lastConnected"]
# log to dubug flaky error, suspect lastConnected not updated timely, so assert it at last
if not last_connected.get("failure"):
self.log.info("Last connected: {}".format(last_connected))
assert last_connected.get("failure") or last_connected.get("demoted")
# Node 0 will not create outgoing connection to Node 2
time.sleep((self.test_house_keeping_ms + 100) / 1000)
assert client0.get_peer(self.nodes[2].key) is None
def test_disconnect_with_remove(self, client0: RpcClient):
n = self.connect_nodes(client0, 3)
assert client0.disconnect_peer(self.nodes[3].key, client0.UPDATE_NODE_OP_REMOVE)
# On node 0: node 3 is blacklisted, and cannot immediately add it again
assert client0.get_node(self.nodes[3].key) is None
self.nodes[0].addnode(self.nodes[3].key, get_peer_addr(self.nodes[3]))
assert client0.get_node(self.nodes[3].key) is None
# On node 3: add node 0 as trusted node, so that try to create
# outgoing connection to node 0.
client3 = RpcClient(self.nodes[3])
self.nodes[3].addnode(self.nodes[0].key, get_peer_addr(self.nodes[0]))
node0 = client3.get_node(self.nodes[0].key)
assert node0[0] == "trusted"
# Node 3 create more outgoing connection, but it's blacklisted in node 0.
time.sleep((self.test_house_keeping_ms + 100) / 1000)
peer0 = client3.get_peer(self.nodes[0].key)
# refused during handshake or not handshaked yet
assert peer0 is None or len(peer0["protocols"]) == 0
if __name__ == "__main__":
NodeReputationTests().main() |
6,460 | compare thin | # Copyright (c) 2012-2022 by the GalSim developers team on GitHub
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
# https://github.com/GalSim-developers/GalSim
#
# GalSim is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
#
import os
import time
import numpy as np
import galsim
path, filename = os.path.split(__file__)
sedpath = os.path.abspath(os.path.join(path, "../share/"))
bppath = os.path.abspath(os.path.join(path, "../examples/data/"))
def dDCR_moments(SED1, SED2, bandpass):
zenith_angle = np.pi/4.0 * galsim.radians
R500 = galsim.dcr.get_refraction(500, zenith_angle) * galsim.radians
# analytic first moment differences
R = lambda w:(galsim.dcr.get_refraction(w, zenith_angle)*galsim.radians - R500) / galsim.arcsec
x1 = np.union1d(bandpass.wave_list, SED1.wave_list)
x1 = x1[(x1 >= bandpass.blue_limit) & (x1 <= bandpass.red_limit)]
x2 = np.union1d(bandpass.wave_list, SED2.wave_list)
x2 = x2[(x2 >= bandpass.blue_limit) & (x2 <= bandpass.red_limit)]
numR1 = np.trapz(R(x1) * bandpass(x1) * SED1(x1), x1)
numR2 = np.trapz(R(x2) * bandpass(x2) * SED2(x2), x2)
den1 = SED1.calculateFlux(bandpass)
den2 = SED2.calculateFlux(bandpass)
R1 = numR1/den1
R2 = numR2/den2
dR_analytic = R1 - R2
# analytic second moment differences
V1_kernel = lambda w:(R(w) - R1)**2
V2_kernel = lambda w:(R(w) - R2)**2
numV1 = np.trapz(V1_kernel(x1) * bandpass(x1) * SED1(x1), x1)
numV2 = np.trapz(V2_kernel(x2) * bandpass(x2) * SED2(x2), x2)
V1 = numV1/den1
V2 = numV2/den2
dV_analytic = V1 - V2
return dR_analytic, dV_analytic, len(x2)
def dseeing_moments(SED1, SED2, bandpass):
index = -0.2
# analytic moment differences
x1 = np.union1d(bandpass.wave_list, SED1.wave_list)
x1 = x1[(x1 <= bandpass.red_limit) & (x1 >= bandpass.blue_limit)]
x2 = np.union1d(bandpass.wave_list, SED2.wave_list)
x2 = x2[(x2 <= bandpass.red_limit) & (x2 >= bandpass.blue_limit)]
num1 = np.trapz((x1/500)**(2*index) * bandpass(x1) * SED1(x1), x1)
num2 = np.trapz((x2/500)**(2*index) * bandpass(x2) * SED2(x2), x2)
den1 = SED1.calculateFlux(bandpass)
den2 = SED2.calculateFlux(bandpass)
r2_1 = num1/den1
r2_2 = num2/den2
dr2byr2_analytic = (r2_1 - r2_2) / r2_1
return dr2byr2_analytic
def METHOD_NAME():
# compare the differences in chromatic moment shifts between two SEDs as a function of
# Bandpass thinning. Goals should be to keep the error below:
# sigma(dRbar) < 0.01 arcsec
# sigma(dV) < 0.0001 arcsec^2
# sigma(dseeing) < 0.0001
import glob
SED_files = glob.glob(os.path.join(sedpath, '*.sed'))
bp_files = glob.glob(os.path.join(bppath, '*.dat'))
SED1 = galsim.SED(os.path.join(sedpath, 'CWW_E_ext.sed'),
wave_type='nm', flux_type='flambda').withFluxDensity(0.01, 500.0)
SEDs = dict([(os.path.basename(SED_file),
galsim.SED(SED_file, wave_type='nm', flux_type='flambda')) for SED_file in SED_files])
del SEDs['CWW_E_ext.sed']
bands = dict([(os.path.basename(bp_file),
galsim.Bandpass(bp_file, wave_type='nm')) for bp_file in bp_files])
redshifts = [0.0, 0.5, 1.0]
rel_errs = [1.e-4, 1.e-3]
for SED_name, SED0 in SEDs.items():
for redshift in redshifts:
SED = SED0.atRedshift(redshift).withFluxDensity(0.01, 500.0)
for bandname, band in bands.items():
print('{0} SED at z={1} through {2} filter'.format(
SED_name, redshift, bandname))
dDCR = dDCR_moments(SED1, SED, band)
dseeing = dseeing_moments(SED1, SED, band)
flux = SED.calculateFlux(band)
hdr = '{0:8s} {1:>8s} {2:>8s} {3:>8s} {4:>8s} {5:>8s} {6:>8s} {7:>8s} {8:>8s}'
print(hdr.format(
'rel_err', 'dRbar', 'dV', 'dseeing', 'flux',
'd(dRbar)', 'd(dV)', 'd(dseeing)', 'd(flux)/flux'))
out = '{0:8} {1:8.5f} {2:8.5f} {3:8.5f} {4:8.5f}'
print(out.format('full', dDCR[0], dDCR[1], dseeing, flux))
for rel_err in rel_errs:
band1 = band.thin(rel_err=rel_err)
dDCR_thinned = dDCR_moments(SED1, SED, band1)
dseeing_thinned = dseeing_moments(SED1, SED, band1)
flux_thinned = SED.calculateFlux(band1)
out = ('{0:8s} {1:8.5f} {2:8.5f} {3:8.5f} {4:8.5f}'
+' {5:8.5f} {6:8.5f} {7:8.5f} {8:8.5f}')
print(out.format(
str(rel_err), dDCR_thinned[0], dDCR_thinned[1],
dseeing_thinned, flux_thinned,
dDCR_thinned[0] - dDCR[0], dDCR_thinned[1] - dDCR[1],
dseeing_thinned - dseeing, (flux_thinned - flux)/flux))
print()
print('{0:8s} {1:>8s} {2:>8s}'.format('rel_err', 'time', 'Neval'))
t0 = time.time()
for i in range(20):
dDCR_thinned = dDCR_moments(SED1, SED, band)
dseeing_thinned = dseeing_moments(SED1, SED, band)
flux_thinned = SED.calculateFlux(band)
t1 = time.time()
print('{0:8s} {1:8.5f} {2:8d}'.format('full', t1-t0, dDCR_thinned[2]))
for rel_err in rel_errs:
band1 = band.thin(rel_err=rel_err)
t0 = time.time()
for i in range(20):
dDCR_thinned = dDCR_moments(SED1, SED, band1)
dseeing_thinned = dseeing_moments(SED1, SED, band1)
flux_thinned = SED.calculateFlux(band1)
t1 = time.time()
print('{0:8s} {1:8.5f} {2:8d}'.format(str(rel_err), t1-t0, dDCR_thinned[2]))
if __name__ == '__main__':
METHOD_NAME() |
6,461 | worker func | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Meta schedule tuning utilities for Hexagon."""
import os
import tempfile
from typing import Callable, Dict, List, Optional
import tvm
from tvm.ir.module import IRModule
from tvm.runtime import Module, NDArray
from tvm.target import Target
from tvm.driver import build as tvm_build
from tvm.tir.transform import RemoveWeightLayoutRewriteBlock
from tvm.contrib.popen_pool import PopenPoolExecutor
from tvm.meta_schedule.utils import cpu_count, derived_object
from tvm.meta_schedule.builder import LocalBuilder
from tvm.meta_schedule.runner import (
EvaluatorConfig,
RunnerInput,
RunnerFuture,
PyRunner,
)
from tvm.meta_schedule.runner.rpc_runner import (
default_alloc_argument,
default_run_evaluator,
RPCRunnerFuture,
)
from .build import HexagonLauncherRPC
from .tools import export_module
@derived_object
class HexagonRPCRunner(PyRunner):
"""RPCRunner for Hexagon. See the documentation of RPCRunner for more details."""
def __init__(
self,
hexagon_launcher: HexagonLauncherRPC,
evaluator_config: Optional[EvaluatorConfig] = None,
cooldown_sec: float = 0.0,
alloc_repeat: int = 1,
max_workers: Optional[int] = None,
initializer: Optional[Callable[[], None]] = None,
):
"""
Parameters
----------
hexagon_launcher : HexagonLauncherRPC
The RPC launcher for Hexagon. It is needed for creating hexagon.Session
object inside the worker function.
evaluator_config: EvaluatorConfig
The evaluator configuration.
cooldown_sec: float
The cooldown in seconds.
alloc_repeat: int
The number of times to random fill the allocation.
max_workers: Optional[int] = None
The maximum number of connections. Defaults to number of logical CPU cores.
initializer: Optional[Callable[[], None]]
The initializer function.
"""
super().__init__()
self.hexagon_launcher = hexagon_launcher
self.evaluator_config = EvaluatorConfig._normalized(evaluator_config)
self.cooldown_sec = cooldown_sec
self.alloc_repeat = alloc_repeat
if max_workers is None:
max_workers = cpu_count(logical=True)
self.pool = PopenPoolExecutor(
max_workers=max_workers,
timeout=100,
initializer=initializer,
)
def run(self, runner_inputs: List[RunnerInput]) -> List[RunnerFuture]:
results = []
for runner_input in runner_inputs:
future = RPCRunnerFuture(
future=self.pool.submit(
METHOD_NAME,
self.hexagon_launcher,
self.evaluator_config,
self.alloc_repeat,
str(runner_input.artifact_path),
tuple(arg_info.as_json() for arg_info in runner_input.args_info),
),
timeout_sec=100,
)
results.append(future)
return results
def METHOD_NAME(hexagon_launcher, evaluator_config, alloc_repeat, artifact_path, args_info):
with hexagon_launcher.create_session() as session:
device = session.device
_, remote_path = os.path.split(artifact_path)
uploaded = session.upload(artifact_path, remote_path)
rt_mod = session.load_module(uploaded)
repeated_args = default_alloc_argument(
session,
device,
args_info,
alloc_repeat,
)
costs = default_run_evaluator(
session,
rt_mod,
device,
evaluator_config,
repeated_args,
)
return costs
def get_hexagon_local_builder(
pass_context: tvm.transform.PassContext = None,
max_workers: Optional[int] = None,
timeout_sec: float = 30.0,
):
"""Return Hexagon-compatible Builder for meta schedule."""
def export_func(mod):
binary_path = export_module(mod, tempfile.mkdtemp())
return str(binary_path)
def default_build_with_context(
mod: IRModule, target: Target, _params: Optional[Dict[str, NDArray]]
) -> Module:
with pass_context:
mod = RemoveWeightLayoutRewriteBlock(skip_ndarray_rewrite=True)(mod)
return tvm_build(mod, target=target)
if pass_context is not None:
return LocalBuilder(
f_build=default_build_with_context,
f_export=export_func,
max_workers=max_workers,
timeout_sec=timeout_sec,
)
else:
return LocalBuilder(f_export=export_func, max_workers=max_workers, timeout_sec=timeout_sec)
def get_hexagon_rpc_runner(
hexagon_launcher: HexagonLauncherRPC,
number=3,
repeat=1,
min_repeat_ms=100,
max_workers: Optional[int] = None,
):
"""Return Hexagon-compatible RPC Runner for meta schedule.
Parameters
----------
hexagon_launcher : HexagonLauncherRPC
The RPC launcher for Hexagon.
number: int
The number of times to run this function for taking average.
We call these runs as one `repeat` of measurement.
repeat: int
The number of times to repeat the measurement.
In total, the function will be invoked (1 + number x repeat) times,
where the first one is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int
Minimum repeat time in ms. if the execution latency is too short,
increase the number of runs to the given time (in ms) to reduce the measurement error.
"""
evaluator_config = EvaluatorConfig(
number=number,
repeat=repeat,
min_repeat_ms=min_repeat_ms,
enable_cpu_cache_flush=False,
)
return HexagonRPCRunner(hexagon_launcher, evaluator_config, max_workers=max_workers) |
6,462 | build route middleware stack | from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING, Any, cast
from litestar._asgi.routing_trie.types import (
ASGIHandlerTuple,
PathParameterSentinel,
create_node,
)
from litestar._asgi.utils import wrap_in_exception_handler
from litestar.types.internal_types import PathParameterDefinition
__all__ = ("add_mount_route", "add_route_to_trie", "build_route_middleware_stack", "configure_node")
if TYPE_CHECKING:
from litestar._asgi.routing_trie.types import RouteTrieNode
from litestar.app import Litestar
from litestar.routes import ASGIRoute, HTTPRoute, WebSocketRoute
from litestar.types import ASGIApp, RouteHandlerType
def add_mount_route(
current_node: RouteTrieNode,
mount_routes: dict[str, RouteTrieNode],
root_node: RouteTrieNode,
route: ASGIRoute,
) -> RouteTrieNode:
"""Add a node for a mount route.
Args:
current_node: The current trie node that is being mapped.
mount_routes: A dictionary mapping static routes to trie nodes.
root_node: The root trie node.
route: The route that is being added.
Returns:
A trie node.
"""
# we need to ensure that we can traverse the map both through the full path key, e.g. "/my-route/sub-path" and
# via the components keys ["my-route, "sub-path"]
if route.path not in current_node.children:
root_node = current_node
for component in route.path_components:
if component not in current_node.children:
current_node.children[component] = create_node() # type: ignore[index]
current_node = current_node.children[component] # type: ignore[index]
current_node.is_mount = True
current_node.is_static = route.route_handler.is_static
if route.path != "/":
mount_routes[route.path] = root_node.children[route.path] = current_node
else:
mount_routes[route.path] = current_node
return current_node
def add_route_to_trie(
app: Litestar,
mount_routes: dict[str, RouteTrieNode],
plain_routes: set[str],
root_node: RouteTrieNode,
route: HTTPRoute | WebSocketRoute | ASGIRoute,
) -> RouteTrieNode:
"""Add a new route path (e.g. '/foo/bar/{param:int}') into the route_map tree.
Inserts non-parameter paths ('plain routes') off the tree's root
node. For paths containing parameters, splits the path on '/' and
nests each path segment under the previous segment's node (see
prefix tree / trie).
Args:
app: The Litestar app instance.
mount_routes: A dictionary mapping static routes to trie nodes.
plain_routes: A set of routes that do not have path parameters.
root_node: The root trie node.
route: The route that is being added.
Returns:
A RouteTrieNode instance.
"""
current_node = root_node
has_path_parameters = bool(route.path_parameters)
if (route_handler := getattr(route, "route_handler", None)) and getattr(route_handler, "is_mount", False):
current_node = add_mount_route(
current_node=current_node,
mount_routes=mount_routes,
root_node=root_node,
route=cast("ASGIRoute", route),
)
elif not has_path_parameters:
plain_routes.add(route.path)
if route.path not in root_node.children:
current_node.children[route.path] = create_node()
current_node = root_node.children[route.path]
else:
for component in route.path_components:
if isinstance(component, PathParameterDefinition):
current_node.is_path_param_node = True
next_node_key: type[PathParameterSentinel] | str = PathParameterSentinel
else:
next_node_key = component
if next_node_key not in current_node.children:
current_node.children[next_node_key] = create_node()
current_node.child_keys = set(current_node.children.keys())
current_node = current_node.children[next_node_key]
if isinstance(component, PathParameterDefinition) and component.type is Path:
current_node.is_path_type = True
configure_node(route=route, app=app, node=current_node)
return current_node
def configure_node(
app: Litestar,
route: HTTPRoute | WebSocketRoute | ASGIRoute,
node: RouteTrieNode,
) -> None:
"""Set required attributes and route handlers on route_map tree node.
Args:
app: The Litestar app instance.
route: The route that is being added.
node: The trie node being configured.
Returns:
None
"""
from litestar.routes import HTTPRoute, WebSocketRoute
if not node.path_parameters:
node.path_parameters = {}
if isinstance(route, HTTPRoute):
for method, handler_mapping in route.route_handler_map.items():
handler, _ = handler_mapping
node.asgi_handlers[method] = ASGIHandlerTuple(
asgi_app=METHOD_NAME(app=app, route=route, route_handler=handler),
handler=handler,
)
node.path_parameters[method] = route.path_parameters
elif isinstance(route, WebSocketRoute):
node.asgi_handlers["websocket"] = ASGIHandlerTuple(
asgi_app=METHOD_NAME(app=app, route=route, route_handler=route.route_handler),
handler=route.route_handler,
)
node.path_parameters["websocket"] = route.path_parameters
else:
node.asgi_handlers["asgi"] = ASGIHandlerTuple(
asgi_app=METHOD_NAME(app=app, route=route, route_handler=route.route_handler),
handler=route.route_handler,
)
node.path_parameters["asgi"] = route.path_parameters
node.is_asgi = True
def METHOD_NAME(
app: Litestar,
route: HTTPRoute | WebSocketRoute | ASGIRoute,
route_handler: RouteHandlerType,
) -> ASGIApp:
"""Construct a middleware stack that serves as the point of entry for each route.
Args:
app: The Litestar app instance.
route: The route that is being added.
route_handler: The route handler that is being wrapped.
Returns:
An ASGIApp that is composed of a "stack" of middlewares.
"""
from litestar.middleware.allowed_hosts import AllowedHostsMiddleware
from litestar.middleware.compression import CompressionMiddleware
from litestar.middleware.csrf import CSRFMiddleware
# we wrap the route.handle method in the ExceptionHandlerMiddleware
asgi_handler = wrap_in_exception_handler(
app=route.handle, exception_handlers=route_handler.resolve_exception_handlers() # type: ignore[arg-type]
)
if app.csrf_config:
asgi_handler = CSRFMiddleware(app=asgi_handler, config=app.csrf_config)
if app.compression_config:
asgi_handler = CompressionMiddleware(app=asgi_handler, config=app.compression_config)
if app.allowed_hosts:
asgi_handler = AllowedHostsMiddleware(app=asgi_handler, config=app.allowed_hosts)
for middleware in route_handler.resolve_middleware():
if hasattr(middleware, "__iter__"):
handler, kwargs = cast("tuple[Any, dict[str, Any]]", middleware)
asgi_handler = handler(app=asgi_handler, **kwargs)
else:
asgi_handler = middleware(app=asgi_handler) # type: ignore
# we wrap the entire stack again in ExceptionHandlerMiddleware
return wrap_in_exception_handler(
app=cast("ASGIApp", asgi_handler),
exception_handlers=route_handler.resolve_exception_handlers(),
) # pyright: ignore |
6,463 | test metadata | import datetime
import os
import zoneinfo
from pathlib import Path
from unittest import mock
from django.test import TestCase
from django.test import override_settings
from httpx import Request
from httpx import Response
from rest_framework import status
from documents.parsers import ParseError
from paperless_tika.parsers import TikaDocumentParser
from paperless_tika.tests.utils import HttpxMockMixin
class TestTikaParser(HttpxMockMixin, TestCase):
def setUp(self) -> None:
self.parser = TikaDocumentParser(logging_group=None)
def tearDown(self) -> None:
self.parser.cleanup()
@override_settings(TIME_ZONE="America/Chicago")
def test_parse(self):
# Pretend parse response
self.httpx_mock.add_response(
json={
"Content-Type": "application/vnd.oasis.opendocument.text",
"X-TIKA:Parsed-By": [],
"X-TIKA:content": "the content",
"dcterms:created": "2020-11-21T00:00:00",
},
)
# Pretend convert to PDF response
self.httpx_mock.add_response(content=b"PDF document")
file = Path(os.path.join(self.parser.tempdir, "input.odt"))
file.touch()
self.parser.parse(file, "application/vnd.oasis.opendocument.text")
self.assertEqual(self.parser.text, "the content")
self.assertIsNotNone(self.parser.archive_path)
with open(self.parser.archive_path, "rb") as f:
self.assertEqual(f.read(), b"PDF document")
self.assertEqual(
self.parser.date,
datetime.datetime(
2020,
11,
21,
tzinfo=zoneinfo.ZoneInfo("America/Chicago"),
),
)
def METHOD_NAME(self):
self.httpx_mock.add_response(
json={
"Content-Type": "application/vnd.oasis.opendocument.text",
"X-TIKA:Parsed-By": [],
"Some-key": "value",
"dcterms:created": "2020-11-21T00:00:00",
},
)
file = Path(os.path.join(self.parser.tempdir, "input.odt"))
file.touch()
metadata = self.parser.extract_metadata(
file,
"application/vnd.oasis.opendocument.text",
)
self.assertTrue("dcterms:created" in [m["key"] for m in metadata])
self.assertTrue("Some-key" in [m["key"] for m in metadata])
def test_convert_failure(self):
"""
GIVEN:
- Document needs to be converted to PDF
WHEN:
- Gotenberg server returns an error
THEN:
- Parse error is raised
"""
# Pretend convert to PDF response
self.httpx_mock.add_response(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)
file = Path(os.path.join(self.parser.tempdir, "input.odt"))
file.touch()
with self.assertRaises(ParseError):
self.parser.convert_to_pdf(file, None)
@mock.patch("paperless_tika.parsers.httpx.post")
def test_request_pdf_a_format(self, post: mock.Mock):
"""
GIVEN:
- Document needs to be converted to PDF
WHEN:
- Specific PDF/A format requested
THEN:
- Request to Gotenberg contains the expected PDF/A format string
"""
file = Path(os.path.join(self.parser.tempdir, "input.odt"))
file.touch()
response = Response(status_code=status.HTTP_200_OK)
response.request = Request("POST", "/somewhere/")
post.return_value = response
for setting, expected_key in [
("pdfa", "PDF/A-2b"),
("pdfa-2", "PDF/A-2b"),
("pdfa-1", "PDF/A-1a"),
("pdfa-3", "PDF/A-3b"),
]:
with override_settings(OCR_OUTPUT_TYPE=setting):
self.parser.convert_to_pdf(file, None)
post.assert_called_once()
_, kwargs = post.call_args
self.assertEqual(kwargs["data"]["pdfFormat"], expected_key)
post.reset_mock() |
6,464 | retry mysql creation fn | import logging
import re
import time
from contextlib import contextmanager
from typing import Callable, Iterator, Optional, Tuple, TypeVar, Union, cast
from urllib.parse import (
quote_plus as urlquote,
urlparse,
)
import mysql.connector as mysql
import mysql.connector.errorcode as mysql_errorcode
import sqlalchemy as db
import sqlalchemy.exc as db_exc
from alembic.config import Config
from dagster import _check as check
from dagster._core.storage.config import MySqlStorageConfig
from dagster._core.storage.sql import get_alembic_config
from mysql.connector.pooling import PooledMySQLConnection
from sqlalchemy.engine import Connection
from typing_extensions import TypeAlias
T = TypeVar("T")
# Represents the output of mysql connection function
MySQLConnectionUnion: TypeAlias = Union[
db.engine.Connection, mysql.MySQLConnection, PooledMySQLConnection
]
class DagsterMySQLException(Exception):
pass
def get_conn(conn_string: str) -> MySQLConnectionUnion:
parsed = urlparse(conn_string)
conn = cast(
MySQLConnectionUnion,
mysql.connect(
user=parsed.username,
passwd=parsed.password,
host=parsed.hostname,
database=parsed.path[1:], # Skip first char, URL parser retains leading "/"
port=parsed.port,
),
)
# https://github.com/dagster-io/dagster/issues/3735
return conn
def mysql_url_from_config(config_value: MySqlStorageConfig) -> str:
if config_value.get("mysql_url"):
return config_value["mysql_url"]
return get_conn_string(**config_value["mysql_db"])
def get_conn_string(
username: str, password: str, hostname: str, db_name: str, port: Union[int, str] = "3306"
) -> str:
return "mysql+mysqlconnector://{username}:{password}@{hostname}:{port}/{db_name}".format(
username=username,
password=urlquote(password),
hostname=hostname,
db_name=db_name,
port=port,
)
def parse_mysql_version(version: str) -> Tuple[int, ...]:
"""Parse MySQL version into a tuple of ints.
Args:
version (str): MySQL version string.
Returns:
tuple: Tuple of ints representing the MySQL version.
"""
parsed = []
for part in re.split(r"\D+", version):
if len(part) == 0:
continue
try:
parsed.append(int(part))
except ValueError:
continue
return tuple(parsed)
def METHOD_NAME(
fn: Callable[[], T], retry_limit: int = 5, retry_wait: float = 0.2
) -> T:
# Retry logic to recover from the case where two processes are creating
# tables at the same time using sqlalchemy
check.callable_param(fn, "fn")
check.int_param(retry_limit, "retry_limit")
check.numeric_param(retry_wait, "retry_wait")
while True:
try:
return fn()
except (
mysql.ProgrammingError,
mysql.IntegrityError,
db_exc.ProgrammingError,
db_exc.IntegrityError,
) as exc:
if (
isinstance(exc, db_exc.ProgrammingError)
and exc.orig
and exc.orig.errno == mysql_errorcode.ER_TABLE_EXISTS_ERROR
) or (
isinstance(exc, mysql.ProgrammingError)
and exc.errno == mysql_errorcode.ER_TABLE_EXISTS_ERROR
):
raise
logging.warning("Retrying failed database creation")
if retry_limit == 0:
raise DagsterMySQLException("too many retries for DB creation") from exc
time.sleep(retry_wait)
retry_limit -= 1
def retry_mysql_connection_fn(
fn: Callable[[], T],
retry_limit: int = 5,
retry_wait: float = 0.2,
) -> T:
"""Reusable retry logic for any MySQL connection functions that may fail.
Intended to be used anywhere we connect to MySQL, to gracefully handle transient connection
issues.
"""
check.callable_param(fn, "fn")
check.int_param(retry_limit, "retry_limit")
check.numeric_param(retry_wait, "retry_wait")
while True:
try:
return fn()
except (
mysql.DatabaseError,
mysql.OperationalError,
db_exc.DatabaseError,
db_exc.OperationalError,
mysql.errors.InterfaceError,
) as exc:
logging.warning("Retrying failed database connection")
if retry_limit == 0:
raise DagsterMySQLException("too many retries for DB connection") from exc
time.sleep(retry_wait)
retry_limit -= 1
def wait_for_connection(conn_string: str, retry_limit: int = 5, retry_wait: float = 0.2) -> bool:
parsed = urlparse(conn_string)
retry_mysql_connection_fn(
lambda: cast(
Union[mysql.MySQLConnection, PooledMySQLConnection],
mysql.connect(
user=parsed.username,
passwd=parsed.password,
host=parsed.hostname,
database=parsed.path[1:], # Skip first char, URL parser retains leading "/"
port=parsed.port,
),
),
retry_limit=retry_limit,
retry_wait=retry_wait,
)
return True
def mysql_alembic_config(dunder_file: str) -> Config:
return get_alembic_config(dunder_file, config_path="../alembic/alembic.ini")
def mysql_isolation_level():
if db.__version__.startswith("2.") or db.__version__.startswith("1.4"):
# Starting with 1.4, the ability to emulate autocommit was deprecated, so we need to
# explicitly call commit on the connection for MySQL where the AUTOCOMMIT isolation
# level is not supported. We should then set the isolation level to the MySQL default
return "REPEATABLE READ"
return "AUTOCOMMIT"
@contextmanager
def create_mysql_connection(
engine: db.engine.Engine, dunder_file: str, storage_type_desc: Optional[str] = None
) -> Iterator[Connection]:
check.inst_param(engine, "engine", db.engine.Engine)
check.str_param(dunder_file, "dunder_file")
check.opt_str_param(storage_type_desc, "storage_type_desc", "")
if storage_type_desc:
storage_type_desc += " "
else:
storage_type_desc = ""
conn_cm = retry_mysql_connection_fn(engine.connect)
with conn_cm as conn:
with conn.begin():
yield conn |
6,465 | test get deck labware fixtures ot2 standard | """Test deck data provider."""
import pytest
from pytest_lazyfixture import lazy_fixture # type: ignore[import]
from decoy import Decoy
from opentrons_shared_data.deck.dev_types import DeckDefinitionV3
from opentrons.protocols.models import LabwareDefinition
from opentrons.types import DeckSlotName
from opentrons.protocol_engine.types import DeckSlotLocation, DeckType
from opentrons.protocol_engine.resources import (
LabwareDataProvider,
DeckDataProvider,
DeckFixedLabware,
)
@pytest.fixture
def mock_labware_data_provider(decoy: Decoy) -> LabwareDataProvider:
"""Get a mock in the shape of the LabwareDataProvider."""
return decoy.mock(cls=LabwareDataProvider)
@pytest.mark.parametrize(
("deck_type", "expected_definition"),
[
(DeckType.OT2_STANDARD, lazy_fixture("ot2_standard_deck_def")),
(DeckType.OT2_SHORT_TRASH, lazy_fixture("ot2_short_trash_deck_def")),
(DeckType.OT3_STANDARD, lazy_fixture("ot3_standard_deck_def")),
],
)
async def test_get_deck_definition(
deck_type: DeckType,
expected_definition: DeckDefinitionV3,
mock_labware_data_provider: LabwareDataProvider,
) -> None:
"""It should be able to load the correct deck definition."""
subject = DeckDataProvider(
deck_type=deck_type, labware_data=mock_labware_data_provider
)
result = await subject.get_deck_definition()
assert result == expected_definition
async def METHOD_NAME(
decoy: Decoy,
ot2_standard_deck_def: DeckDefinitionV3,
ot2_fixed_trash_def: LabwareDefinition,
mock_labware_data_provider: LabwareDataProvider,
) -> None:
"""It should be able to get a list of prepopulated labware on the deck."""
subject = DeckDataProvider(
deck_type=DeckType.OT2_STANDARD, labware_data=mock_labware_data_provider
)
decoy.when(
await mock_labware_data_provider.get_labware_definition(
load_name="opentrons_1_trash_1100ml_fixed",
namespace="opentrons",
version=1,
)
).then_return(ot2_fixed_trash_def)
result = await subject.get_deck_fixed_labware(ot2_standard_deck_def)
assert result == [
DeckFixedLabware(
labware_id="fixedTrash",
location=DeckSlotLocation(slotName=DeckSlotName.FIXED_TRASH),
definition=ot2_fixed_trash_def,
)
]
async def test_get_deck_labware_fixtures_ot2_short_trash(
decoy: Decoy,
ot2_short_trash_deck_def: DeckDefinitionV3,
ot2_short_fixed_trash_def: LabwareDefinition,
mock_labware_data_provider: LabwareDataProvider,
) -> None:
"""It should be able to get a list of prepopulated labware on the deck."""
subject = DeckDataProvider(
deck_type=DeckType.OT2_SHORT_TRASH, labware_data=mock_labware_data_provider
)
decoy.when(
await mock_labware_data_provider.get_labware_definition(
load_name="opentrons_1_trash_850ml_fixed",
namespace="opentrons",
version=1,
)
).then_return(ot2_short_fixed_trash_def)
result = await subject.get_deck_fixed_labware(ot2_short_trash_deck_def)
assert result == [
DeckFixedLabware(
labware_id="fixedTrash",
location=DeckSlotLocation(slotName=DeckSlotName.FIXED_TRASH),
definition=ot2_short_fixed_trash_def,
)
]
async def test_get_deck_labware_fixtures_ot3_standard(
decoy: Decoy,
ot3_standard_deck_def: DeckDefinitionV3,
ot3_fixed_trash_def: LabwareDefinition,
mock_labware_data_provider: LabwareDataProvider,
) -> None:
"""It should be able to get a list of prepopulated labware on the deck."""
subject = DeckDataProvider(
deck_type=DeckType.OT3_STANDARD, labware_data=mock_labware_data_provider
)
decoy.when(
await mock_labware_data_provider.get_labware_definition(
load_name="opentrons_1_trash_3200ml_fixed",
namespace="opentrons",
version=1,
)
).then_return(ot3_fixed_trash_def)
result = await subject.get_deck_fixed_labware(ot3_standard_deck_def)
assert result == [
DeckFixedLabware(
labware_id="fixedTrash",
location=DeckSlotLocation(slotName=DeckSlotName.SLOT_A3),
definition=ot3_fixed_trash_def,
)
] |
6,466 | test init by in | # Copyright (c) 2017 The University of Manchester
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy
import pyNN.spiNNaker as sim
from spynnaker.pyNN.config_setup import unittest_setup
from spynnaker.pyNN.models.neuron import (
AbstractPopulationVertex, AbstractPyNNNeuronModelStandard)
from spynnaker.pyNN.models.neuron.synapse_types import AbstractSynapseType
from spynnaker.pyNN.models.defaults import default_initial_values, defaults
from spynnaker.pyNN.models.neuron.implementations import (
AbstractStandardNeuronComponent)
class EmptyNeuronComponent(AbstractStandardNeuronComponent):
def __init__(self):
super().__init__([], [])
def add_parameters(self, parameters):
pass
def add_state_variables(self, state_variables):
pass
def get_values(self, parameters, state_variables, vertex_slice, ts):
return numpy.zeros(dtype="uint32")
def update_values(self, values, parameters, state_variables):
pass
def has_variable(self, variable):
return False
def get_units(self, variable):
return None
class EmptySynapseType(AbstractSynapseType, EmptyNeuronComponent):
def get_n_synapse_types(self):
return 0
def get_synapse_targets(self):
return []
def get_synapse_id_by_target(self, target):
return None
class _MyNeuronModel(AbstractStandardNeuronComponent):
def __init__(self, foo, bar):
super().__init__([], [])
self._foo = foo
self._bar = bar
def add_parameters(self, parameters):
pass
def add_state_variables(self, state_variables):
state_variables["foo"] = self._foo
state_variables["bar"] = self._bar
def get_values(self, parameters, state_variables, vertex_slice, ts):
return numpy.zeros(dtype="uint32")
def update_values(self, values, parameters, state_variables):
pass
def has_variable(self, variable):
return False
def get_units(self, variable):
return None
@defaults
class FooBar(AbstractPyNNNeuronModelStandard):
@default_initial_values({"foo", "bar"})
def __init__(self, foo=1, bar=11):
super().__init__(
"FooBar", "foobar.aplx", _MyNeuronModel(foo, bar),
EmptyNeuronComponent(), EmptySynapseType(), EmptyNeuronComponent())
@property
def model(self):
return self._model
class MockNeuron(AbstractPopulationVertex):
def __init__(self):
foo_bar = FooBar()
super().__init__(
n_neurons=5, label="Mock",
max_atoms_per_core=None, spikes_per_second=None,
ring_buffer_sigma=None, incoming_spike_buffer_size=None,
neuron_impl=foo_bar.model, pynn_model=foo_bar,
drop_late_spikes=True, splitter=None, seed=None,
n_colour_bits=None)
def test_initializable():
unittest_setup()
sim.setup(1.0)
neuron = MockNeuron()
assert [1, 1, 1, 1, 1] == neuron.get_initial_state_values("foo")
neuron.set_initial_state_values("foo", 2)
assert [11, 11, 11, 11, 11] == neuron.get_initial_state_values("bar")
def METHOD_NAME():
unittest_setup()
sim.setup(1.0)
neuron = MockNeuron()
assert [1, 1, 1, 1, 1] == neuron.get_initial_state_values("foo")
neuron.set_initial_state_values("foo", 11, selector=1)
assert [1, 11, 1, 1, 1] == neuron.get_initial_state_values("foo")
neuron.set_initial_state_values("foo", 12, selector=2)
assert [1, 11, 12, 1, 1] == neuron.get_initial_state_values("foo")
assert 11 == neuron.get_initial_state_values("bar", selector=1)
assert 12 == neuron.get_initial_state_values("foo", selector=2)
def test_init_bad():
unittest_setup()
neuron = MockNeuron()
with pytest.raises(KeyError):
neuron.get_initial_state_values("badvariable")
with pytest.raises(KeyError):
assert 1 == neuron.set_initial_state_values("anotherbad", "junk")
def test_initial_values():
unittest_setup()
sim.setup(1.0)
neuron = MockNeuron()
initial_values = neuron.get_initial_state_values(
neuron.get_state_variables())
assert "foo" in initial_values
assert "bar" in initial_values
initial_values = neuron.get_initial_state_values(
neuron.get_state_variables(), selector=3)
assert {"foo": 1, "bar": 11} == initial_values |
6,467 | load | #!/usr/bin/env python3
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
'''
Merge training configs into a single inference config.
The single inference config is for CLI, which only takes a single config to do inferencing.
The trainig configs includes: model config, preprocess config, decode config, vocab file and cmvn file.
Process:
# step 1: prepare dir
mkdir -p release_dir
cp -r exp conf data release_dir
cd release_dir
# step 2: get "model.yaml" which conatains all configuration info.
# if does not contain preprocess.yaml file. e.g ds2:
python generate_infer_yaml.py --cfg_pth conf/deepspeech2_online.yaml --dcd_pth conf/tuning/chunk_decode.yaml --vb_pth data/lang_char/vocab.txt --cmvn_pth data/mean_std.json --save_pth model.yaml --pre_pth null
# if contains preprocess.yaml file. e.g u2:
python generate_infer_yaml.py --cfg_pth conf/chunk_conformer.yaml --dcd_pth conf/tuning/chunk_decode.yaml --vb_pth data/lang_char/vocab.txt --cmvn_pth data/mean_std.json --save_pth model.yaml --pre_pth conf/preprocess.yaml
# step 3: remove redundant things
rm xxx
# step 4: tar file
# ds2
tar czvf asr0_deepspeech2_online_aishell_ckpt_0.2.0.model.tar.gz model.yaml conf data/ exp/
# u2
tar czvf asr1_chunk_conformer_aishell_ckpt_0.2.0.model.tar.gz model.yaml conf data/ exp/
'''
import argparse
import json
import math
import os
from contextlib import redirect_stdout
from yacs.config import CfgNode
from paddlespeech.s2t.frontend.utility import load_dict
def save(save_path, config):
with open(save_path, 'w') as fp:
with redirect_stdout(fp):
print(config.dump())
def METHOD_NAME(save_path):
config = CfgNode(new_allowed=True)
config.merge_from_file(save_path)
return config
def load_json(json_path):
with open(json_path) as f:
json_content = json.METHOD_NAME(f)
return json_content
def remove_config_part(config, key_list):
if len(key_list) == 0:
return
for i in range(len(key_list) - 1):
config = config[key_list[i]]
config.pop(key_list[-1])
def load_cmvn_from_json(cmvn_stats):
means = cmvn_stats['mean_stat']
variance = cmvn_stats['var_stat']
count = cmvn_stats['frame_num']
for i in range(len(means)):
means[i] /= count
variance[i] = variance[i] / count - means[i] * means[i]
if variance[i] < 1.0e-20:
variance[i] = 1.0e-20
variance[i] = 1.0 / math.sqrt(variance[i])
cmvn_stats = {"mean": means, "istd": variance}
return cmvn_stats
def merge_configs(
conf_path="conf/conformer.yaml",
preprocess_path="conf/preprocess.yaml",
decode_path="conf/tuning/decode.yaml",
vocab_path="data/vocab.txt",
cmvn_path="data/mean_std.json",
save_path="conf/conformer_infer.yaml", ):
# Load the configs
config = METHOD_NAME(conf_path)
decode_config = METHOD_NAME(decode_path)
vocab_list = load_dict(vocab_path)
# If use the kaldi feature, do not load the cmvn file
if cmvn_path.split(".")[-1] == 'json':
cmvn_stats = load_json(cmvn_path)
if os.path.exists(preprocess_path):
preprocess_config = METHOD_NAME(preprocess_path)
for idx, process in enumerate(preprocess_config["process"]):
if process['type'] == "cmvn_json":
preprocess_config["process"][idx]["cmvn_path"] = cmvn_stats
break
config.preprocess_config = preprocess_config
else:
cmvn_stats = load_cmvn_from_json(cmvn_stats)
config.mean_std_filepath = [{"cmvn_stats": cmvn_stats}]
config.augmentation_config = ''
# the cmvn file is end with .ark
else:
config.cmvn_path = cmvn_path
# Updata the config
config.vocab_filepath = vocab_list
config.input_dim = config.feat_dim
config.output_dim = len(config.vocab_filepath)
config.decode = decode_config
# Remove some parts of the config
if os.path.exists(preprocess_path):
remove_train_list = [
"train_manifest",
"dev_manifest",
"test_manifest",
"n_epoch",
"accum_grad",
"global_grad_clip",
"optim",
"optim_conf",
"scheduler",
"scheduler_conf",
"log_interval",
"checkpoint",
"shuffle_method",
"weight_decay",
"ctc_grad_norm_type",
"minibatches",
"subsampling_factor",
"batch_bins",
"batch_count",
"batch_frames_in",
"batch_frames_inout",
"batch_frames_out",
"sortagrad",
"feat_dim",
"stride_ms",
"window_ms",
"batch_size",
"maxlen_in",
"maxlen_out",
]
else:
remove_train_list = [
"train_manifest",
"dev_manifest",
"test_manifest",
"n_epoch",
"accum_grad",
"global_grad_clip",
"log_interval",
"checkpoint",
"lr",
"lr_decay",
"batch_size",
"shuffle_method",
"weight_decay",
"sortagrad",
"num_workers",
]
for item in remove_train_list:
try:
remove_config_part(config, [item])
except Exception as e:
print(item + " " + "can not be removed")
# Save the config
save(save_path, config)
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog='Config merge', add_help=True)
parser.add_argument(
'--cfg_pth',
type=str,
default='conf/transformer.yaml',
help='origin config file')
parser.add_argument(
'--pre_pth', type=str, default="conf/preprocess.yaml", help='')
parser.add_argument(
'--dcd_pth', type=str, default="conf/tuninig/decode.yaml", help='')
parser.add_argument(
'--vb_pth', type=str, default="data/lang_char/vocab.txt", help='')
parser.add_argument(
'--cmvn_pth', type=str, default="data/mean_std.json", help='')
parser.add_argument(
'--save_pth', type=str, default="conf/transformer_infer.yaml", help='')
parser_args = parser.parse_args()
merge_configs(
conf_path=parser_args.cfg_pth,
decode_path=parser_args.dcd_pth,
preprocess_path=parser_args.pre_pth,
vocab_path=parser_args.vb_pth,
cmvn_path=parser_args.cmvn_pth,
save_path=parser_args.save_pth, ) |
6,468 | pause | """
Skeleton example of a Ginga local plugin called 'MyLocalPlugin'
To enable it, copy it to your $HOME/.ginga/plugins folder (create it first
if it does not already exist), then run ginga with the command:
$ ginga --plugins=MyLocalPlugin
From the "Operations" menu you should be able to select
Custom->MyLocalPlugin; it should become active under the "Dialogs" tab.
"""
from ginga import GingaPlugin
from ginga.gw import Widgets
# import any other modules you want here--it's a python world!
class MyLocalPlugin(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
"""
This method is called when the plugin is loaded for the first
time. ``fv`` is a reference to the Ginga (reference viewer) shell
and ``fitsimage`` is a reference to the specific CanvasView
object associated with the channel on which the plugin is being
invoked.
You need to call the superclass initializer and then do any local
initialization.
"""
super(MyLocalPlugin, self).__init__(fv, fitsimage)
# your local state and initialization code goes here
def build_gui(self, container):
"""
This method is called when the plugin is invoked. It builds the
GUI used by the plugin into the widget layout passed as
``container``.
This method may be called many times as the plugin is opened and
closed for modal operations. The method may be omitted if there
is no GUI for the plugin.
This specific example uses the GUI widget set agnostic wrappers
to build the GUI, but you can also just as easily use explicit
toolkit calls here if you only want to support one widget set.
"""
top = Widgets.VBox()
top.set_border_width(4)
# this is a little trick for making plugins that work either in
# a vertical or horizontal orientation. It returns a box container,
# a scroll widget and an orientation ('vertical', 'horizontal')
vbox, sw, orientation = Widgets.get_oriented_box(container)
vbox.set_border_width(4)
vbox.set_spacing(2)
# Add a spacer to stretch the rest of the way to the end of the
# plugin space
spacer = Widgets.Label('')
vbox.add_widget(spacer, stretch=1)
# scroll bars will allow lots of content to be accessed
top.add_widget(sw, stretch=1)
# A button box that is always visible at the bottom
btns = Widgets.HBox()
btns.set_spacing(3)
# Add a close button for the convenience of the user
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn, stretch=0)
btn = Widgets.Button("Help")
# help() method is built into our parent class--it works as long
# as we have a module docstring defined
btn.add_callback('activated', lambda w: self.help())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
top.add_widget(btns, stretch=0)
# Add our GUI to the container
container.add_widget(top, stretch=1)
# NOTE: if you are building a GUI using a specific widget toolkit
# (e.g. Qt) GUI calls, you need to extract the widget or layout
# from the non-toolkit specific container wrapper and call on that
# to pack your widget, e.g.:
#cw = container.get_widget()
#cw.addWidget(widget, stretch=1)
def close(self):
"""
Example close method. You can use this method and attach it as a
callback to a button that you place in your GUI to close the plugin
as a convenience to the user.
"""
self.fv.stop_local_plugin(self.chname, str(self))
return True
def start(self):
"""
This method is called just after ``build_gui()`` when the plugin
is invoked. This method may be called many times as the plugin is
opened and closed for modal operations. This method may be omitted
in many cases.
"""
self.resume()
def METHOD_NAME(self):
"""
This method is called when the plugin loses focus.
It should take any actions necessary to stop handling user
interaction events that were initiated in ``start()`` or
``resume()``.
This method may be called many times as the plugin is focused
or defocused. It may be omitted if there is no user event handling
to disable.
"""
pass
def resume(self):
"""
This method is called when the plugin gets focus.
It should take any actions necessary to start handling user
interaction events for the operations that it does.
This method may be called many times as the plugin is focused or
defocused. The method may be omitted if there is no user event
handling to enable.
"""
pass
def stop(self):
"""
This method is called when the plugin is stopped.
It should perform any special clean up necessary to terminate
the operation. The GUI will be destroyed by the plugin manager
so there is no need for the stop method to do that.
This method may be called many times as the plugin is opened and
closed for modal operations, and may be omitted if there is no
special cleanup required when stopping.
"""
pass
def redo(self):
"""
This method is called when the plugin is active and a new
image is loaded into the associated channel. It can optionally
redo the current operation on the new image. This method may be
called many times as new images are loaded while the plugin is
active. This method may be omitted.
"""
pass
def __str__(self):
"""
This method should be provided and should return the lower case
name of the plugin.
"""
return 'mylocalplugin' |
6,469 | test automatic background subtraction | # -*- coding: utf-8 -*-
# Dioptas - GUI program for fast processing of 2D X-ray diffraction data
# Principal author: Clemens Prescher (clemens.prescher@gmail.com)
# Copyright (C) 2014-2019 GSECARS, University of Chicago, USA
# Copyright (C) 2015-2018 Institute for Geology and Mineralogy, University of Cologne, Germany
# Copyright (C) 2019-2020 DESY, Hamburg, Germany
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import numpy as np
import pytest
from pytest import approx
from ...model.util.Pattern import BkgNotInRangeError
from ...model.util import Pattern
from ...model.util.PeakShapes import gaussian
unittest_path = os.path.dirname(__file__)
data_path = os.path.join(unittest_path, '../data')
def test_loading_chi_file():
spec = Pattern()
x, y = spec.data
spec.load(os.path.join(data_path, 'pattern_001.chi'))
new_x, new_y = spec.data
assert len(x) != len(new_x)
assert len(y) != len(new_y)
def test_loading_invalid_file():
pattern = Pattern()
assert -1 == pattern.load(os.path.join(data_path, 'wrong_file_format.txt'))
def test_saving_a_file(tmp_path):
x = np.linspace(-5, 5, 100)
y = x ** 2
pattern = Pattern(x, y)
filename = os.path.join(tmp_path, "test.dat")
pattern.save(filename)
pattern2 = Pattern()
pattern2.load(filename)
pattern2_x, pattern2_y = pattern2.data
assert pattern2_x == pytest.approx(x)
assert pattern2_y == pytest.approx(y)
def test_plus_and_minus_operators():
x = np.linspace(0, 10, 100)
pattern1 = Pattern(x, np.sin(x))
pattern2 = Pattern(x, np.sin(x))
pattern3 = pattern1 + pattern2
assert np.array_equal(pattern3.y, np.sin(x) * 2)
assert np.array_equal(pattern2.original_y, np.sin(x) * 1)
assert np.array_equal(pattern1.original_y, np.sin(x) * 1)
pattern3 = pattern1 + pattern1
assert np.array_equal(pattern3.y, np.sin(x) * 2)
assert np.array_equal(pattern1.original_y, np.sin(x) * 1)
assert np.array_equal(pattern1.original_y, np.sin(x) * 1)
pattern3 = pattern2 - pattern1
assert np.array_equal(pattern3.y, np.sin(x) * 0)
assert np.array_equal(pattern2.original_y, np.sin(x) * 1)
assert np.array_equal(pattern1.original_y, np.sin(x) * 1)
pattern3 = pattern1 - pattern1
assert np.array_equal(pattern3.y, np.sin(x) * 0)
assert np.array_equal(pattern1.original_y, np.sin(x) * 1)
assert np.array_equal(pattern1.original_y, np.sin(x) * 1)
def test_plus_and_minus_operators_with_different_shapes():
x = np.linspace(0, 10, 1000)
x2 = np.linspace(0, 12, 1300)
pattern1 = Pattern(x, np.sin(x))
pattern2 = Pattern(x2, np.sin(x2))
pattern3 = pattern1 + pattern2
assert pattern3.x == approx(pattern1._original_x)
assert pattern3.y == approx(pattern1._original_y * 2, abs=1e-4)
pattern3 = pattern1 + pattern1
assert pattern3.y == approx(np.sin(x) * 2, abs=1e-4)
pattern3 = pattern1 - pattern2
assert pattern3.y == approx(np.sin(x) * 0, abs=1e-4)
pattern3 = pattern1 - pattern1
assert pattern3.y == approx(np.sin(x) * 0, abs=1e-4)
def test_multiply_with_scalar_operator():
x = np.linspace(0, 10, 100)
pattern = 2 * Pattern(x, np.sin(x))
assert np.array_equal(pattern.y, np.sin(x) * 2)
def test_using_background_pattern():
x = np.linspace(-5, 5, 100)
pattern_y = x ** 2
bkg_y = x
spec = Pattern(x, pattern_y)
background_pattern = Pattern(x, bkg_y)
spec.background_pattern = background_pattern
new_x, new_y = spec.data
assert np.array_equal(new_x, x)
assert np.array_equal(new_y, pattern_y - bkg_y)
def test_using_background_pattern_with_different_spacing():
x = np.linspace(-5, 5, 100)
pattern_y = x ** 2
x_bkg = np.linspace(-5, 5, 99)
bkg_y = x_bkg
spec = Pattern(x, pattern_y)
background_pattern = Pattern(x_bkg, bkg_y)
spec.background_pattern = background_pattern
new_x, new_y = spec.data
assert np.array_equal(new_x, x)
assert np.array_equal(new_y, pattern_y - x)
def test_background_out_of_range_throws_error():
x1 = np.linspace(0, 10)
x2 = np.linspace(-10, -1)
spec = Pattern(x1, x1)
background_pattern = Pattern(x2, x2)
with pytest.raises(BkgNotInRangeError):
spec.background_pattern = background_pattern
def METHOD_NAME():
x = np.linspace(0, 24, 2500)
y = np.zeros(x.shape)
peaks = [
[10, 3, 0.1],
[12, 4, 0.1],
[12, 6, 0.1],
]
for peak in peaks:
y += gaussian(x, peak[0], peak[1], peak[2])
y_bkg = x * 0.4 + 5.0
y_measurement = y + y_bkg
pattern = Pattern(x, y_measurement)
auto_background_subtraction_parameters = [2, 50, 50]
pattern.set_auto_background_subtraction(auto_background_subtraction_parameters)
x_spec, y_spec = pattern.data
assert y_spec == approx(y, abs=1e-4)
def test_automatic_background_subtraction_with_roi():
x = np.linspace(0, 24, 2500)
y = np.zeros(x.shape)
peaks = [
[10, 3, 0.1],
[12, 4, 0.1],
[12, 6, 0.1],
]
for peak in peaks:
y += gaussian(x, peak[0], peak[1], peak[2])
y_bkg = x * 0.4 + 5.0
y_measurement = y + y_bkg
roi = [1, 23]
pattern = Pattern(x, y_measurement)
auto_background_subtraction_parameters = [2, 50, 50]
pattern.set_auto_background_subtraction(auto_background_subtraction_parameters, roi)
x_spec, y_spec = pattern.data
assert x_spec[0] > roi[0]
assert x_spec[-1] < roi[1]
def test_setting_new_data():
spec = Pattern()
x = np.linspace(0, 10)
y = np.sin(x)
spec.data = x, y
new_x, new_y = spec.data
assert np.array_equal(new_x, x)
assert np.array_equal(new_y, y)
def test_using_len():
x = np.linspace(0, 10, 234)
y = x ** 2
spec = Pattern(x, y)
assert len(spec) == 234 |
6,470 | test invalid port mapping fail | """
Tests that runs validity checks on arguments passed in from shell
"""
import os
import subprocess
import pytest
here = os.path.dirname(os.path.abspath(__file__))
test_dir = os.path.dirname(here)
docker_simple = os.path.join(test_dir, "dockerfile", "simple")
# default to building in the cwd (a temporary directory)
builddir = "."
@pytest.fixture
def temp_cwd(tmpdir):
tmpdir.chdir()
invalid_image_name_template = (
"%r is not a valid docker image name. Image name "
"must start with a lowercase or numeric character and "
"can then use _ . or - in addition to lowercase and numeric."
)
def validate_arguments(builddir, args_list=".", expected=None, disable_dockerd=False):
try:
cmd = ["repo2docker"]
for k in args_list:
cmd.append(k)
cmd.append(builddir)
env = os.environ.copy()
if disable_dockerd:
env["DOCKER_HOST"] = "INCORRECT"
subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT)
return True
except subprocess.CalledProcessError as e:
output = e.output.decode()
if expected is not None:
assert expected in output
return False
else:
print(output)
raise
def test_image_name_fail(temp_cwd):
"""
Test to check if repo2docker throws image_name validation error on --image-name argument containing
uppercase characters and _ characters in incorrect positions.
"""
image_name = "Test/Invalid_name:1.0.0"
args_list = ["--no-run", "--no-build", "--image-name", image_name]
expected = invalid_image_name_template % image_name
assert not validate_arguments(builddir, args_list, expected)
def test_image_name_underscore_fail(temp_cwd):
"""
Test to check if repo2docker throws image_name validation error on --image-name argument starts with _.
"""
image_name = "_test/invalid_name:1.0.0"
args_list = ["--no-run", "--no-build", "--image-name", image_name]
expected = invalid_image_name_template % image_name
assert not validate_arguments(builddir, args_list, expected)
def test_image_name_double_dot_fail(temp_cwd):
"""
Test to check if repo2docker throws image_name validation error on --image-name argument contains consecutive dots.
"""
image_name = "test..com/invalid_name:1.0.0"
args_list = ["--no-run", "--no-build", "--image-name", image_name]
expected = invalid_image_name_template % image_name
assert not validate_arguments(builddir, args_list, expected)
def test_image_name_valid_restircted_registry_domain_name_fail(temp_cwd):
"""
Test to check if repo2docker throws image_name validation error on -image-name argument being invalid. Based on the
regex definitions first part of registry domain cannot contain uppercase characters
"""
image_name = "Test.com/valid_name:1.0.0"
args_list = ["--no-run", "--no-build", "--image-name", image_name]
expected = invalid_image_name_template % image_name
assert not validate_arguments(builddir, args_list, expected)
def test_image_name_valid_registry_domain_name_success(temp_cwd):
"""
Test to check if repo2docker runs with a valid --image-name argument.
"""
builddir = docker_simple
image_name = "test.COM/valid_name:1.0.0"
args_list = ["--no-run", "--no-build", "--image-name", image_name]
assert validate_arguments(builddir, args_list, None)
def test_image_name_valid_name_success(temp_cwd):
"""
Test to check if repo2docker runs with a valid --image-name argument.
"""
builddir = docker_simple
image_name = "test.com/valid_name:1.0.0"
args_list = ["--no-run", "--no-build", "--image-name", image_name]
assert validate_arguments(builddir, args_list, None)
def test_volume_no_build_fail(temp_cwd):
"""
Test to check if repo2docker fails when both --no-build and -v arguments are given
"""
args_list = ["--no-build", "-v", "/data:/data"]
assert not validate_arguments(
builddir, args_list, "Cannot mount volumes if container is not run"
)
def test_volume_no_run_fail(temp_cwd):
"""
Test to check if repo2docker fails when both --no-run and -v arguments are given
"""
args_list = ["--no-run", "-v", "/data:/data"]
assert not validate_arguments(
builddir, args_list, "Cannot mount volumes if container is not run"
)
def test_env_no_run_fail(temp_cwd):
"""
Test to check if repo2docker fails when both --no-run and -e arguments are given
"""
args_list = ["--no-run", "-e", "FOO=bar", "--"]
assert not validate_arguments(
builddir,
args_list,
"To specify environment variables, you also need to run the container",
)
def test_port_mapping_no_run_fail(temp_cwd):
"""
Test to check if repo2docker fails when both --no-run and --publish arguments are specified.
"""
args_list = ["--no-run", "--publish", "8000:8000"]
assert not validate_arguments(
builddir,
args_list,
"To publish user-defined port mappings, the container must also be run",
)
def test_all_ports_mapping_no_run_fail(temp_cwd):
"""
Test to check if repo2docker fails when both --no-run and -P arguments are specified.
"""
args_list = ["--no-run", "-P"]
assert not validate_arguments(
builddir,
args_list,
"To publish user-defined port mappings, the container must also be run",
)
def METHOD_NAME(temp_cwd):
"""
Test to check if r2d fails when an invalid port is specified in the port mapping
"""
# Specifying builddir here itself to simulate passing in a run command
# builddir passed in the function will be an argument for the run command
args_list = ["-p", "75000:80", builddir, "ls"]
assert not validate_arguments(builddir, args_list, "Port specification")
def test_invalid_protocol_port_mapping_fail(temp_cwd):
"""
Test to check if r2d fails when an invalid protocol is specified in the port mapping
"""
# Specifying builddir here itself to simulate passing in a run command
# builddir passed in the function will be an argument for the run command
args_list = ["-p", "80/tpc:8000", builddir, "ls"]
assert not validate_arguments(builddir, args_list, "Port specification")
def test_invalid_container_port_protocol_mapping_fail(temp_cwd):
"""
Test to check if r2d fails when an invalid protocol is specified in the container port in port mapping
"""
# Specifying builddir here itself to simulate passing in a run command
# builddir passed in the function will be an argument for the run command
args_list = ["-p", "80:8000/upd", builddir, "ls"]
assert not validate_arguments(builddir, args_list, "Port specification")
def test_docker_handle_fail(temp_cwd):
"""
Test to check if r2d fails with minimal error message on not being able to connect to docker daemon
"""
args_list = []
assert not validate_arguments(
builddir,
args_list,
"Check if docker is running on the host.",
disable_dockerd=True,
)
def test_docker_handle_debug_fail(temp_cwd):
"""
Test to check if r2d fails with helpful error message on not being able to connect to docker daemon and debug enabled
"""
args_list = ["--debug"]
assert not validate_arguments(
builddir,
args_list,
"Check if docker is running on the host.",
disable_dockerd=True,
)
def test_docker_no_build_success(temp_cwd):
"""
Test to check if r2d succeeds with --no-build argument with not being able to connect to docker daemon
"""
args_list = ["--no-build", "--no-run"]
assert validate_arguments(builddir, args_list, disable_dockerd=True) |
6,471 | driver capabilities | import os
import core.exceptions as ex
import utilities.ping
from .. import \
BaseContainer, \
KW_START_TIMEOUT, \
KW_STOP_TIMEOUT, \
KW_NO_PREEMPT_ABORT, \
KW_NAME, \
KW_HOSTNAME, \
KW_OSVC_ROOT_PATH, \
KW_GUESTOS, \
KW_PROMOTE_RW, \
KW_SCSIRESERV
from env import Env
from core.resource import Resource
from core.objects.svcdict import KEYS
from utilities.proc import qcall
DRIVER_GROUP = "container"
DRIVER_BASENAME = "ldom"
KEYWORDS = [
KW_START_TIMEOUT,
KW_STOP_TIMEOUT,
KW_NO_PREEMPT_ABORT,
KW_NAME,
KW_HOSTNAME,
KW_OSVC_ROOT_PATH,
KW_GUESTOS,
KW_PROMOTE_RW,
KW_SCSIRESERV,
]
KEYS.register_driver(
DRIVER_GROUP,
DRIVER_BASENAME,
name=__name__,
keywords=KEYWORDS,
)
def METHOD_NAME(node=None):
from utilities.proc import which
data = []
if which("/usr/sbin/ldm"):
data.append("container.ldom")
return data
class ContainerLdom(BaseContainer):
def __init__(self, guestos="SunOS", **kwargs):
super(ContainerLdom, self).__init__(type="container.ldom", guestos=guestos, **kwargs)
self.sshbin = '/usr/local/bin/ssh'
def __str__(self):
return "%s name=%s" % (Resource.__str__(self), self.name)
def check_capabilities(self):
cmd = ['/usr/sbin/ldm', 'list' ]
(ret, out, err) = self.call(cmd)
if ret != 0:
return False
return True
def state(self):
""" ldm state : None/inactive/bound/active
ldm list -p domainname outputs:
VERSION
DOMAIN|[varname=varvalue]*
"""
cmd = ['/usr/sbin/ldm', 'list', '-p', self.name]
(ret, out, err) = self.call(cmd)
if ret != 0:
return None
for word in out.split("|"):
a=word.split('=')
if len(a) == 2:
if a[0] == 'state':
return a[1]
return None
def ping(self):
return utilities.ping.check_ping(self.addr)
def container_action(self,action):
cmd = ['/usr/sbin/ldm', action, self.name]
(ret, buff, err) = self.vcall(cmd)
if ret != 0:
raise ex.Error
return None
def container_start(self):
""" ldm bind domain
ldm start domain
"""
state = self.state()
if state == 'None':
raise ex.Error
if state == 'inactive':
self.container_action('bind')
self.container_action('start')
if state == 'bound' :
self.container_action('start')
def container_forcestop(self):
""" ldm unbind domain
ldm stop domain
"""
if self.state == 'active':
try:
self.container_action('stop')
except ex.Error:
pass
self.container_action('unbind')
def container_stop(self):
""" launch init 5 into container
wait_for_shutdown
ldm stop domain
ldm unbind domain
"""
state = self.state()
if state == 'None':
raise ex.Error
if state == 'inactive':
return None
if state == 'bound' :
self.container_action('unbind')
if state == 'active' :
cmd = Env.rsh.split() + [ self.name, '/usr/sbin/init', '5' ]
(ret, buff, err) = self.vcall(cmd)
if ret == 0:
try:
self.log.info("wait for container shutdown")
self.wait_for_fn(self.is_shutdown, self.stop_timeout, 2)
except ex.Error:
pass
self.container_forcestop()
def check_manual_boot(self):
cmd = ['/usr/sbin/ldm', 'list-variable', 'auto-boot?', self.name]
(ret, out, err) = self.call(cmd)
if ret != 0:
return False
if out != 'auto-boot?=False' :
return True
self.log.info("Auto boot should be turned off")
return False
def is_shutdown(self):
state = self.state()
if state == 'inactive' or state == 'bound':
return True
return False
def is_down(self):
if self.state() == 'inactive':
return True
return False
def is_up(self):
if self.state() == 'active':
return True
return False
|
6,472 | get page data | from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
str_or_none,
js_to_json,
parse_filesize,
traverse_obj,
urlencode_postdata,
urljoin,
)
class ZoomIE(InfoExtractor):
IE_NAME = 'zoom'
_VALID_URL = r'(?P<base_url>https?://(?:[^.]+\.)?zoom.us/)rec(?:ording)?/(?P<type>play|share)/(?P<id>[A-Za-z0-9_.-]+)'
_TESTS = [{
'url': 'https://economist.zoom.us/rec/play/dUk_CNBETmZ5VA2BwEl-jjakPpJ3M1pcfVYAPRsoIbEByGsLjUZtaa4yCATQuOL3der8BlTwxQePl_j0.EImBkXzTIaPvdZO5',
'md5': 'ab445e8c911fddc4f9adc842c2c5d434',
'info_dict': {
'id': 'dUk_CNBETmZ5VA2BwEl-jjakPpJ3M1pcfVYAPRsoIbEByGsLjUZtaa4yCATQuOL3der8BlTwxQePl_j0.EImBkXzTIaPvdZO5',
'ext': 'mp4',
'title': 'China\'s "two sessions" and the new five-year plan',
},
'skip': 'Recording requires email authentication to access',
}, {
# play URL
'url': 'https://ffgolf.zoom.us/rec/play/qhEhXbrxq1Zoucx8CMtHzq1Z_2YZRPVCqWK_K-2FkEGRsSLDeOX8Tu4P6jtjZcRry8QhIbvKZdtr4UNo.QcPn2debFskI9whJ',
'md5': '2c4b1c4e5213ebf9db293e88d9385bee',
'info_dict': {
'id': 'qhEhXbrxq1Zoucx8CMtHzq1Z_2YZRPVCqWK_K-2FkEGRsSLDeOX8Tu4P6jtjZcRry8QhIbvKZdtr4UNo.QcPn2debFskI9whJ',
'ext': 'mp4',
'title': 'Prépa AF2023 - Séance 5 du 11 avril - R20/VM/GO',
},
}, {
# share URL
'url': 'https://us02web.zoom.us/rec/share/hkUk5Zxcga0nkyNGhVCRfzkA2gX_mzgS3LpTxEEWJz9Y_QpIQ4mZFOUx7KZRZDQA.9LGQBdqmDAYgiZ_8',
'md5': '90fdc7cfcaee5d52d1c817fc03c43c9b',
'info_dict': {
'id': 'hkUk5Zxcga0nkyNGhVCRfzkA2gX_mzgS3LpTxEEWJz9Y_QpIQ4mZFOUx7KZRZDQA.9LGQBdqmDAYgiZ_8',
'ext': 'mp4',
'title': 'Timea Andrea Lelik\'s Personal Meeting Room',
},
}]
def METHOD_NAME(self, webpage, video_id):
return self._search_json(
r'window\.__data__\s*=', webpage, 'data', video_id, transform_source=js_to_json)
def _get_real_webpage(self, url, base_url, video_id, url_type):
webpage = self._download_webpage(url, video_id, note=f'Downloading {url_type} webpage')
try:
form = self._form_hidden_inputs('password_form', webpage)
except ExtractorError:
return webpage
password = self.get_param('videopassword')
if not password:
raise ExtractorError(
'This video is protected by a passcode, use the --video-password option', expected=True)
is_meeting = form.get('useWhichPasswd') == 'meeting'
validation = self._download_json(
base_url + 'rec/validate%s_passwd' % ('_meet' if is_meeting else ''),
video_id, 'Validating passcode', 'Wrong passcode', data=urlencode_postdata({
'id': form[('meet' if is_meeting else 'file') + 'Id'],
'passwd': password,
'action': form.get('action'),
}))
if not validation.get('status'):
raise ExtractorError(validation['errorMessage'], expected=True)
return self._download_webpage(url, video_id, note=f'Re-downloading {url_type} webpage')
def _real_extract(self, url):
base_url, url_type, video_id = self._match_valid_url(url).group('base_url', 'type', 'id')
if url_type == 'share':
webpage = self._get_real_webpage(url, base_url, video_id, 'share')
meeting_id = self.METHOD_NAME(webpage, video_id)['meetingId']
redirect_path = self._download_json(
f'{base_url}nws/recording/1.0/play/share-info/{meeting_id}',
video_id, note='Downloading share info JSON')['result']['redirectUrl']
url = urljoin(base_url, redirect_path)
webpage = self._get_real_webpage(url, base_url, video_id, 'play')
file_id = self.METHOD_NAME(webpage, video_id)['fileId']
if not file_id:
# When things go wrong, file_id can be empty string
raise ExtractorError('Unable to extract file ID')
data = self._download_json(
f'{base_url}nws/recording/1.0/play/info/{file_id}', video_id,
note='Downloading play info JSON')['result']
subtitles = {}
for _type in ('transcript', 'cc', 'chapter'):
if data.get('%sUrl' % _type):
subtitles[_type] = [{
'url': urljoin(base_url, data['%sUrl' % _type]),
'ext': 'vtt',
}]
formats = []
if data.get('viewMp4Url'):
formats.append({
'format_note': 'Camera stream',
'url': str_or_none(data.get('viewMp4Url')),
'width': int_or_none(traverse_obj(data, ('viewResolvtions', 0))),
'height': int_or_none(traverse_obj(data, ('viewResolvtions', 1))),
'format_id': str_or_none(traverse_obj(data, ('recording', 'id'))),
'ext': 'mp4',
'filesize_approx': parse_filesize(str_or_none(traverse_obj(data, ('recording', 'fileSizeInMB')))),
'preference': 0
})
if data.get('shareMp4Url'):
formats.append({
'format_note': 'Screen share stream',
'url': str_or_none(data.get('shareMp4Url')),
'width': int_or_none(traverse_obj(data, ('shareResolvtions', 0))),
'height': int_or_none(traverse_obj(data, ('shareResolvtions', 1))),
'format_id': str_or_none(traverse_obj(data, ('shareVideo', 'id'))),
'ext': 'mp4',
'preference': -1
})
return {
'id': video_id,
'title': str_or_none(traverse_obj(data, ('meet', 'topic'))),
'subtitles': subtitles,
'formats': formats,
'http_headers': {
'Referer': base_url,
},
} |
6,473 | list records | """Module provider for CloudXNS"""
import hashlib
import json
import logging
import time
from argparse import ArgumentParser
from typing import List
from urllib.parse import urlencode
import requests
from lexicon.exceptions import AuthenticationError
from lexicon.interfaces import Provider as BaseProvider
LOGGER = logging.getLogger(__name__)
class Provider(BaseProvider):
"""Provider class for CloudXNS"""
@staticmethod
def get_nameservers() -> List[str]:
return ["cloudxns.net"]
@staticmethod
def configure_parser(parser: ArgumentParser) -> None:
parser.add_argument(
"--auth-username", help="specify API-KEY for authentication"
)
parser.add_argument(
"--auth-token", help="specify SECRET-KEY for authentication"
)
def __init__(self, config):
super(Provider, self).__init__(config)
self.domain_id = None
self.api_endpoint = "https://www.cloudxns.net/api2"
def authenticate(self):
payload = self._get("/domain")
for record in payload["data"]:
if record["domain"] == self.domain + ".":
self.domain_id = record["id"]
break
else:
raise AuthenticationError("No domain found")
def cleanup(self) -> None:
pass
# Create record. If record already exists with the same content, do nothing'
def create_record(self, rtype, name, content):
record = {
"domain_id": self.domain_id,
"host": self._relative_name(name),
"value": content,
"type": rtype,
"line_id": 1,
}
if self._get_lexicon_option("ttl"):
record["ttl"] = self._get_lexicon_option("ttl")
try:
self._post("/record", record)
except requests.exceptions.HTTPError as err:
already_exists = err.response.json()["code"] == 34
if not already_exists:
raise
# CloudXNS will return bad HTTP Status when error, will throw at
# r.raise_for_status() in _request()
LOGGER.debug("create_record: %s", True)
return True
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def METHOD_NAME(self, rtype=None, name=None, content=None):
payload = self._get(
"/record/" + self.domain_id, {"host_id": 0, "offset": 0, "row_num": 2000}
)
records = []
for record in payload["data"]:
processed_record = {
"type": record["type"],
"name": self._full_name(record["host"]),
"ttl": record["ttl"],
"content": record["value"],
# this id is useless unless your doing record linking. Lets return the
# original record identifier.
"id": record["record_id"],
}
if processed_record["type"] == "TXT":
processed_record["content"] = processed_record["content"].replace(
'"', ""
)
# CloudXNS will add quotes automaticly for TXT records,
# https://www.cloudxns.net/Support/detail/id/114.html
records.append(processed_record)
if rtype:
records = [record for record in records if record["type"] == rtype]
if name:
records = [
record for record in records if record["name"] == self._full_name(name)
]
if content:
records = [record for record in records if record["content"] == content]
LOGGER.debug("list_records: %s", records)
return records
# Create or update a record.
def update_record(self, identifier, rtype=None, name=None, content=None):
if not identifier:
records = self.METHOD_NAME(name=name)
if len(records) == 1:
identifier = records[0]["id"]
else:
raise Exception("Record identifier could not be found.")
data = {
"domain_id": self.domain_id,
"host": self._relative_name(name),
"value": content,
"type": rtype,
}
if self._get_lexicon_option("ttl"):
data["ttl"] = self._get_lexicon_option("ttl")
self._put("/record/" + identifier, data)
LOGGER.debug("update_record: %s", True)
return True
# Delete an existing record.
# If record does not exist, do nothing.
def delete_record(self, identifier=None, rtype=None, name=None, content=None):
delete_record_id = []
if not identifier:
records = self.METHOD_NAME(rtype, name, content)
delete_record_id = [record["id"] for record in records]
else:
delete_record_id.append(identifier)
LOGGER.debug("delete_records: %s", delete_record_id)
for record_id in delete_record_id:
self._delete("/record/" + record_id + "/" + self.domain_id)
# is always True at this point, if a non 200 response is returned an error is raised.
LOGGER.debug("delete_record: %s", True)
return True
# Helpers
def _request(self, action="GET", url="/", data=None, query_params=None):
if data is None:
data = {}
data["login_token"] = (
self._get_provider_option("auth_username")
+ ","
+ self._get_provider_option("auth_token")
)
data["format"] = "json"
if query_params:
query_string = "?" + urlencode(query_params)
else:
query_string = ""
query_params = {}
if data:
data = json.dumps(data)
else:
data = ""
date = time.strftime("%a %b %d %H:%M:%S %Y", time.localtime())
default_headers = {
"API-KEY": self._get_provider_option("auth_username"),
"API-REQUEST-DATE": date,
"API-HMAC": hashlib.md5(
f"{self._get_provider_option('auth_username')}{self.api_endpoint}{url}{query_string}{data}{date}{self._get_provider_option('auth_token')}".encode(
"utf-8"
)
).hexdigest(),
"API-FORMAT": "json",
}
default_auth = None
response = requests.request(
action,
self.api_endpoint + url,
params=query_params,
data=data,
headers=default_headers,
auth=default_auth,
)
# if the request fails for any reason, throw an error.
response.raise_for_status()
return response.json() |
6,474 | handle undefine | """
Parse a C source file.
To use, subclass CParser and override its handle_* methods. Then instantiate
the class with a string to parse.
"""
__docformat__ = "restructuredtext"
import os.path
import sys
from ctypesgen.parser import cgrammar, preprocessor, yacc
# --------------------------------------------------------------------------
# Lexer
# --------------------------------------------------------------------------
class CLexer(object):
def __init__(self, cparser):
self.cparser = cparser
self.type_names = set()
self.in_define = False
self.lineno = -1
self.lexpos = -1
def input(self, tokens):
self.tokens = tokens
self.pos = 0
def token(self):
while self.pos < len(self.tokens):
t = self.tokens[self.pos]
self.pos += 1
if not t:
break
if t.type == "PP_DEFINE":
self.in_define = True
elif t.type == "PP_END_DEFINE":
self.in_define = False
# Transform PP tokens into C tokens
elif t.type == "IDENTIFIER" and t.value in cgrammar.keywords:
t.type = cgrammar.keyword_map[t.value]
elif t.type == "IDENTIFIER" and t.value in self.type_names:
if self.pos < 2 or self.tokens[self.pos - 2].type not in (
"VOID",
"_BOOL",
"CHAR",
"SHORT",
"INT",
"LONG",
"FLOAT",
"DOUBLE",
"SIGNED",
"UNSIGNED",
"ENUM",
"STRUCT",
"UNION",
"TYPE_NAME",
):
t.type = "TYPE_NAME"
t.lexer = self
t.clexpos = self.pos - 1
return t
return None
# --------------------------------------------------------------------------
# Parser
# --------------------------------------------------------------------------
class CParser(object):
"""Parse a C source file.
Subclass and override the handle_* methods. Call `parse` with a string
to parse.
"""
def __init__(self, options):
super(CParser, self).__init__()
self.preprocessor_parser = preprocessor.PreprocessorParser(options, self)
self.parser = yacc.yacc(
method="LALR",
debug=False,
module=cgrammar,
write_tables=True,
outputdir=os.path.dirname(__file__),
optimize=True,
)
self.parser.errorfunc = cgrammar.p_error
self.parser.cparser = self
self.lexer = CLexer(self)
if not options.no_stddef_types:
self.lexer.type_names.add("wchar_t")
self.lexer.type_names.add("ptrdiff_t")
self.lexer.type_names.add("size_t")
if not options.no_gnu_types:
self.lexer.type_names.add("__builtin_va_list")
if sys.platform == "win32" and not options.no_python_types:
self.lexer.type_names.add("__int64")
def parse(self, filename, debug=False):
"""Parse a file.
If `debug` is True, parsing state is dumped to stdout.
"""
self.handle_status("Preprocessing %s" % filename)
self.preprocessor_parser.parse(filename)
self.lexer.input(self.preprocessor_parser.output)
self.handle_status("Parsing %s" % filename)
self.parser.parse(lexer=self.lexer, debug=debug, tracking=True)
# ----------------------------------------------------------------------
# Parser interface. Override these methods in your subclass.
# ----------------------------------------------------------------------
def handle_error(self, message, filename, lineno):
"""A parse error occurred.
The default implementation prints `lineno` and `message` to stderr.
The parser will try to recover from errors by synchronising at the
next semicolon.
"""
sys.stderr.write("%s:%s %s\n" % (filename, lineno, message))
def handle_pp_error(self, message):
"""The C preprocessor emitted an error.
The default implementatin prints the error to stderr. If processing
can continue, it will.
"""
sys.stderr.write("Preprocessor: {}\n".format(message))
def handle_status(self, message):
"""Progress information.
The default implementationg prints message to stderr.
"""
sys.stderr.write("{}\n".format(message))
def handle_define(self, name, params, value, filename, lineno):
"""#define `name` `value`
or #define `name`(`params`) `value`
name is a string
params is None or a list of strings
value is a ...?
"""
def handle_define_constant(self, name, value, filename, lineno):
"""#define `name` `value`
name is a string
value is an ExpressionNode or None
"""
def handle_define_macro(self, name, params, value, filename, lineno):
"""#define `name`(`params`) `value`
name is a string
params is a list of strings
value is an ExpressionNode or None
"""
def METHOD_NAME(self, name, filename, lineno):
"""#undef `name`
name is a string
"""
def impl_handle_declaration(self, declaration, filename, lineno):
"""Internal method that calls `handle_declaration`. This method
also adds any new type definitions to the lexer's list of valid type
names, which affects the parsing of subsequent declarations.
"""
if declaration.storage == "typedef":
declarator = declaration.declarator
if not declarator:
# XXX TEMPORARY while struct etc not filled
return
while declarator.pointer:
declarator = declarator.pointer
self.lexer.type_names.add(declarator.identifier)
self.handle_declaration(declaration, filename, lineno)
def handle_declaration(self, declaration, filename, lineno):
"""A declaration was encountered.
`declaration` is an instance of Declaration. Where a declaration has
multiple initialisers, each is returned as a separate declaration.
"""
pass
class DebugCParser(CParser):
"""A convenience class that prints each invocation of a handle_* method to
stdout.
"""
def handle_define(self, name, value, filename, lineno):
print("#define name=%r, value=%r" % (name, value))
def handle_define_constant(self, name, value, filename, lineno):
print("#define constant name=%r, value=%r" % (name, value))
def handle_declaration(self, declaration, filename, lineno):
print(declaration)
def get_ctypes_type(self, typ, declarator):
return typ
def handle_define_unparseable(self, name, params, value, filename, lineno):
if params:
original_string = "#define %s(%s) %s" % (name, ",".join(params), " ".join(value))
else:
original_string = "#define %s %s" % (name, " ".join(value))
print(original_string)
if __name__ == "__main__":
DebugCParser().parse(sys.argv[1], debug=True) |
6,475 | initialize control | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2023 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import numpy as np
from pandapower.auxiliary import read_from_net, write_to_net
from pandapower.control.controller.trafo_control import TrafoController
class DiscreteTapControl(TrafoController):
"""
Trafo Controller with local tap changer voltage control.
INPUT:
**net** (attrdict) - Pandapower struct
**tid** (int) - ID of the trafo that is controlled
**vm_lower_pu** (float) - Lower voltage limit in pu
**vm_upper_pu** (float) - Upper voltage limit in pu
OPTIONAL:
**side** (string, "lv") - Side of the transformer where the voltage is controlled (hv or lv)
**trafotype** (float, "2W") - Trafo type ("2W" or "3W")
**tol** (float, 0.001) - Voltage tolerance band at bus in Percent (default: 1% = 0.01pu)
**in_service** (bool, True) - Indicates if the controller is currently in_service
**drop_same_existing_ctrl** (bool, False) - Indicates if already existing controllers of the same type and with the same matching parameters (e.g. at same element) should be dropped
"""
def __init__(self, net, tid, vm_lower_pu, vm_upper_pu, side="lv", trafotype="2W",
tol=1e-3, in_service=True, level=0, order=0, drop_same_existing_ctrl=False,
matching_params=None, **kwargs):
if matching_params is None:
matching_params = {"tid": tid, 'trafotype': trafotype}
super().__init__(net, tid, side, tol=tol, in_service=in_service, level=level, order=order, trafotype=trafotype,
drop_same_existing_ctrl=drop_same_existing_ctrl, matching_params=matching_params,
**kwargs)
self.vm_lower_pu = vm_lower_pu
self.vm_upper_pu = vm_upper_pu
self.vm_delta_pu = self.tap_step_percent / 100. * .5 + self.tol
self.vm_set_pu = kwargs.get("vm_set_pu")
@classmethod
def from_tap_step_percent(cls, net, tid, vm_set_pu, side="lv", trafotype="2W", tol=1e-3, in_service=True, order=0,
drop_same_existing_ctrl=False, matching_params=None, **kwargs):
"""
Alternative mode of the controller, which uses a set point for voltage and the value of net.trafo.tap_step_percent to calculate
vm_upper_pu and vm_lower_pu. To this end, the parameter vm_set_pu should be provided, instead of vm_lower_pu and vm_upper_pu.
To use this mode of the controller, the controller can be initialized as following:
>>> c = DiscreteTapControl.from_tap_step_percent(net, tid, vm_set_pu)
INPUT:
**net** (attrdict) - Pandapower struct
**tid** (int) - ID of the trafo that is controlled
**vm_set_pu** (float) - Voltage setpoint in pu
"""
self = cls(net, tid=tid, vm_lower_pu=None, vm_upper_pu=None, side=side, trafotype=trafotype, tol=tol,
in_service=in_service, order=order, drop_same_existing_ctrl=drop_same_existing_ctrl,
matching_params=matching_params, vm_set_pu=vm_set_pu, **kwargs)
return self
@property
def vm_set_pu(self):
return self._vm_set_pu
@vm_set_pu.setter
def vm_set_pu(self, value):
self._vm_set_pu = value
if value is None:
return
self.vm_lower_pu = value - self.vm_delta_pu
self.vm_upper_pu = value + self.vm_delta_pu
def METHOD_NAME(self, net):
super().METHOD_NAME(net)
if hasattr(self, 'vm_set_pu') and self.vm_set_pu is not None:
self.vm_delta_pu = self.tap_step_percent / 100. * .5 + self.tol
def control_step(self, net):
"""
Implements one step of the Discrete controller, always stepping only one tap position up or down
"""
if self.nothing_to_do(net):
return
vm_pu = read_from_net(net, "res_bus", self.controlled_bus, "vm_pu", self._read_write_flag)
self.tap_pos = read_from_net(net, self.trafotable, self.controlled_tid, "tap_pos", self._read_write_flag)
increment = np.where(self.tap_side_coeff * self.tap_sign == 1,
np.where(np.logical_and(vm_pu < self.vm_lower_pu, self.tap_pos > self.tap_min), -1,
np.where(np.logical_and(vm_pu > self.vm_upper_pu, self.tap_pos < self.tap_max), 1, 0)),
np.where(np.logical_and(vm_pu < self.vm_lower_pu, self.tap_pos < self.tap_max), 1,
np.where(np.logical_and(vm_pu > self.vm_upper_pu, self.tap_pos > self.tap_min), -1, 0)))
self.tap_pos += increment
# WRITE TO NET
write_to_net(net, self.trafotable, self.controlled_tid, 'tap_pos', self.tap_pos, self._read_write_flag)
def is_converged(self, net):
"""
Checks if the voltage is within the desired voltage band, then returns True
"""
if self.nothing_to_do(net):
return True
vm_pu = read_from_net(net, "res_bus", self.controlled_bus, "vm_pu", self._read_write_flag)
self.tap_pos = read_from_net(net, self.trafotable, self.controlled_tid, "tap_pos", self._read_write_flag)
reached_limit = np.where(self.tap_side_coeff * self.tap_sign == 1,
(vm_pu < self.vm_lower_pu) & (self.tap_pos == self.tap_min) |
(vm_pu > self.vm_upper_pu) & (self.tap_pos == self.tap_max),
(vm_pu < self.vm_lower_pu) & (self.tap_pos == self.tap_max) |
(vm_pu > self.vm_upper_pu) & (self.tap_pos == self.tap_min))
converged = np.logical_or(reached_limit, np.logical_and(self.vm_lower_pu < vm_pu, vm_pu < self.vm_upper_pu))
return np.all(converged)
|
6,476 | prepare test | import os
from virttest import utils_net
from virttest import virsh
from virttest.libvirt_xml import vm_xml
from virttest.libvirt_xml.devices.controller import Controller # pylint: disable=W0611
from virttest.libvirt_xml.devices.disk import Disk # pylint: disable=W0611
from virttest.libvirt_xml.devices.filesystem import Filesystem # pylint: disable=W0611
from virttest.libvirt_xml.devices.interface import Interface # pylint: disable=W0611
from virttest.libvirt_xml.devices.input import Input # pylint: disable=W0611
from virttest.libvirt_xml.devices.memballoon import Memballoon # pylint: disable=W0611
from virttest.libvirt_xml.devices.rng import Rng # pylint: disable=W0611
from virttest.libvirt_xml.devices.video import Video # pylint: disable=W0611
def run(test, params, env):
"""
Start guest with virtio page_per_vq attribute - various virtio devices
1) Prepare a guest with virtio page_per_vq attribute in different virtio devices.
2) Start the guest.
3) Login the vm and check network.
"""
def METHOD_NAME(vmxml):
"""
Prepare the guest with different virtio devices to test
:params vmxml: the vm xml
"""
vmxml.remove_all_device_by_type(device_type)
vmxml.sync()
# Need to use shared memory for filesystem device
if device_type == "filesystem":
vm_xml.VMXML.set_memoryBacking_tag(vmxml.vm_name, access_mode="shared",
hpgs=False)
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
# Prepare device xml by using device function, for example, Disk().
device_obj = device_type.capitalize()
if device_type == "input":
device_xml = eval(device_obj)(input_type)
else:
device_xml = eval(device_obj)()
device_xml.setup_attrs(**device_dict)
return device_xml, vmxml
def run_test(device_xml, vmxml):
"""
Start a guest and check the network.
:params device_xml: the device xml prepared in prepare_test
"params vmxml: the vm xml after prepare_test()
"""
if not hotplug:
vmxml.add_device(device_xml)
vmxml.sync()
test.log.info("TEST_STEP1: start guest")
start_guest()
else:
test.log.info("TEST_STEP1: hotplug %s device", device_type)
start_guest()
virsh.attach_device(vm_name, device_xml.xml, ignore_status=False, debug=True)
vm.wait_for_login()
test.log.info("TEST_STEP2: check the attribute in %s xml", device_type)
check_attribute()
if hotplug:
virsh.detach_device(vm_name, device_xml.xml, ignore_status=False, debug=True)
test.log.info("TEST_STEP3: check the network by ping")
utils_net.ping(dest=ping_outside, count='3', timeout=10, session=vm.session, force_ipv4=True)
def teardown_test():
"""
Clean up the test environment.
"""
bkxml.sync()
if hotplug and os.path.exists(disk_image):
os.remove(disk_image)
def check_attribute():
"""
Check the page_per_vq attribute after starting the guest
"""
af_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
test.log.info("The current dumpxml is %s", virsh.dumpxml(af_vmxml))
# Keyboard and mouse input will be default in guest. So identify input device.
if device_type == "input" and hotplug:
dev_xml = af_vmxml.get_devices(device_type)[2]
# Guest has many controllers, so also need to identify it.
elif device_type == "controller":
dev_xml = af_vmxml.get_devices(device_type)
else:
dev_xml = af_vmxml.get_devices(device_type)[0]
# Select the virtio-scsi/virtio-serial controller from all controllers
if device_type == "controller":
for controller in dev_xml:
if controller.type == controller_type:
cur_dict = controller.fetch_attrs()["driver"]
else:
cur_dict = dev_xml.fetch_attrs()["driver"]
pre_dict = driver_dict["driver"]
for key, value in pre_dict.items():
if cur_dict.get(key) != value:
test.fail("Driver XML compare fails. It should be '%s', but "
"got '%s'" % (pre_dict, cur_dict))
else:
test.log.debug("Driver XML compare successfully. The '%s' matches"
" the '%s'", (pre_dict, cur_dict))
def start_guest():
"""
Start or reboot the guest
"""
test.log.info("Start the guest")
if not vm.is_alive():
virsh.start(vm_name, ignore_status=False, debug=True)
vm_name = params.get("main_vm")
device_type = params.get("device_type")
driver_dict = eval(params.get("driver_dict", "{}"))
ping_outside = params.get("ping_outside")
hotplug = params.get("hotplug", "no") == "yes"
device_dict = eval(params.get("device_dict", "{}"))
disk_image = params.get("disk_image", "")
input_type = params.get("input_type")
controller_type = params.get("controller_type")
vm = env.get_vm(vm_name)
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
bkxml = vmxml.copy()
try:
device_xml, vmxml = METHOD_NAME(vmxml)
run_test(device_xml, vmxml)
finally:
teardown_test() |
6,477 | extract toc items | """
TOC directive
~~~~~~~~~~~~~
The TOC directive syntax looks like::
.. toc:: Title
:depth: 3
"Title" and "depth" option can be empty. "depth" is an integer less
than 6, which defines the max heading level writers want to include
in TOC.
"""
from .base import Directive
class DirectiveToc(Directive):
def __init__(self, depth=3):
self.depth = depth
def parse(self, block, m, state):
title = m.group('value')
depth = None
options = self.parse_options(m)
if options:
depth = dict(options).get('depth')
if depth:
try:
depth = int(depth)
except (ValueError, TypeError):
return {
'type': 'block_error',
'raw': 'TOC depth MUST be integer',
}
return {'type': 'toc', 'raw': None, 'params': (title, depth)}
def reset_toc_state(self, md, s, state):
state['toc_depth'] = self.depth
state['toc_headings'] = []
return s, state
def register_plugin(self, md):
md.block.tokenize_heading = record_toc_heading
md.before_parse_hooks.append(self.reset_toc_state)
md.before_render_hooks.append(md_toc_hook)
if md.renderer.NAME == 'html':
md.renderer.register('theading', render_html_theading)
elif md.renderer.NAME == 'ast':
md.renderer.register('theading', render_ast_theading)
def __call__(self, md):
self.register_directive(md, 'toc')
self.register_plugin(md)
if md.renderer.NAME == 'html':
md.renderer.register('toc', render_html_toc)
elif md.renderer.NAME == 'ast':
md.renderer.register('toc', render_ast_toc)
def record_toc_heading(text, level, state):
# we will use this method to replace tokenize_heading
tid = 'toc_' + str(len(state['toc_headings']) + 1)
state['toc_headings'].append((tid, text, level))
return {'type': 'theading', 'text': text, 'params': (level, tid)}
def md_toc_hook(md, tokens, state):
headings = state.get('toc_headings')
if not headings:
return tokens
# add TOC items into the given location
default_depth = state.get('toc_depth', 3)
headings = list(_cleanup_headings_text(md.inline, headings, state))
for tok in tokens:
if tok['type'] == 'toc':
params = tok['params']
depth = params[1] or default_depth
items = [d for d in headings if d[2] <= depth]
tok['raw'] = items
return tokens
def render_ast_toc(items, title, depth):
return {
'type': 'toc',
'items': [list(d) for d in items],
'title': title,
'depth': depth,
}
def render_ast_theading(children, level, tid):
return {
'type': 'heading', 'children': children,
'level': level, 'id': tid,
}
def render_html_toc(items, title, depth):
html = '<section class="toc">\n'
if title:
html += '<h1>' + title + '</h1>\n'
return html + render_toc_ul(items) + '</section>\n'
def render_html_theading(text, level, tid):
tag = 'h' + str(level)
return '<' + tag + ' id="' + tid + '">' + text + '</' + tag + '>\n'
def METHOD_NAME(md, s):
"""Extract TOC headings into list structure of::
[
('toc_1', 'Introduction', 1),
('toc_2', 'Install', 2),
('toc_3', 'Upgrade', 2),
('toc_4', 'License', 1),
]
:param md: Markdown Instance with TOC plugin.
:param s: text string.
"""
s, state = md.before_parse(s, {})
md.block.parse(s, state)
headings = state.get('toc_headings')
if not headings:
return []
return list(_cleanup_headings_text(md.inline, headings, state))
def render_toc_ul(toc):
"""Render a <ul> table of content HTML. The param "toc" should
be formatted into this structure::
[
(toc_id, text, level),
]
For example::
[
('toc-intro', 'Introduction', 1),
('toc-install', 'Install', 2),
('toc-upgrade', 'Upgrade', 2),
('toc-license', 'License', 1),
]
"""
if not toc:
return ''
s = '<ul>\n'
levels = []
for k, text, level in toc:
item = '<a href="#{}">{}</a>'.format(k, text)
if not levels:
s += '<li>' + item
levels.append(level)
elif level == levels[-1]:
s += '</li>\n<li>' + item
elif level > levels[-1]:
s += '\n<ul>\n<li>' + item
levels.append(level)
else:
last_level = levels.pop()
while levels:
last_level = levels.pop()
if level == last_level:
s += '</li>\n</ul>\n</li>\n<li>' + item
levels.append(level)
break
elif level > last_level:
s += '</li>\n<li>' + item
levels.append(last_level)
levels.append(level)
break
else:
s += '</li>\n</ul>\n'
else:
levels.append(level)
s += '</li>\n<li>' + item
while len(levels) > 1:
s += '</li>\n</ul>\n'
levels.pop()
return s + '</li>\n</ul>\n'
def _cleanup_headings_text(inline, items, state):
for item in items:
text = item[1]
tokens = inline._scan(text, state, inline.rules)
text = ''.join(_inline_token_text(tok) for tok in tokens)
yield item[0], text, item[2]
def _inline_token_text(token):
tok_type = token[0]
if tok_type == 'inline_html':
return ''
if len(token) == 2:
return token[1]
if tok_type in {'image', 'link'}:
return token[2]
return '' |
6,478 | refresh access token | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import logging
from typing import Any, List, Mapping, MutableMapping, Optional, Tuple
import backoff
import pendulum
import requests
from deprecated import deprecated
from ..exceptions import DefaultBackoffException
from .core import HttpAuthenticator
logger = logging.getLogger("airbyte")
@deprecated(version="0.1.20", reason="Use airbyte_cdk.sources.streams.http.requests_native_auth.Oauth2Authenticator instead")
class Oauth2Authenticator(HttpAuthenticator):
"""
Generates OAuth2.0 access tokens from an OAuth2.0 refresh token and client credentials.
The generated access token is attached to each request via the Authorization header.
"""
def __init__(
self,
token_refresh_endpoint: str,
client_id: str,
client_secret: str,
refresh_token: str,
scopes: List[str] = None,
refresh_access_token_headers: Optional[Mapping[str, Any]] = None,
refresh_access_token_authenticator: Optional[HttpAuthenticator] = None,
):
self.token_refresh_endpoint = token_refresh_endpoint
self.client_secret = client_secret
self.client_id = client_id
self.refresh_token = refresh_token
self.scopes = scopes
self.refresh_access_token_headers = refresh_access_token_headers
self.refresh_access_token_authenticator = refresh_access_token_authenticator
self._token_expiry_date = pendulum.now().subtract(days=1)
self._access_token = None
def get_auth_header(self) -> Mapping[str, Any]:
return {"Authorization": f"Bearer {self.get_access_token()}"}
def get_access_token(self):
if self.token_has_expired():
t0 = pendulum.now()
token, expires_in = self.METHOD_NAME()
self._access_token = token
self._token_expiry_date = t0.add(seconds=expires_in)
return self._access_token
def token_has_expired(self) -> bool:
return pendulum.now() > self._token_expiry_date
def get_refresh_request_body(self) -> Mapping[str, Any]:
"""Override to define additional parameters"""
payload: MutableMapping[str, Any] = {
"grant_type": "refresh_token",
"client_id": self.client_id,
"client_secret": self.client_secret,
"refresh_token": self.refresh_token,
}
if self.scopes:
payload["scopes"] = self.scopes
return payload
@backoff.on_exception(
backoff.expo,
DefaultBackoffException,
on_backoff=lambda details: logger.info(
f"Caught retryable error after {details['tries']} tries. Waiting {details['wait']} seconds then retrying..."
),
max_time=300,
)
def METHOD_NAME(self) -> Tuple[str, int]:
"""
returns a tuple of (access_token, token_lifespan_in_seconds)
"""
try:
response = requests.request(
method="POST",
url=self.token_refresh_endpoint,
data=self.get_refresh_request_body(),
headers=self.get_refresh_access_token_headers(),
)
response.raise_for_status()
response_json = response.json()
return response_json["access_token"], int(response_json["expires_in"])
except requests.exceptions.RequestException as e:
if e.response.status_code == 429 or e.response.status_code >= 500:
raise DefaultBackoffException(request=e.response.request, response=e.response)
raise
except Exception as e:
raise Exception(f"Error while refreshing access token: {e}") from e
def get_refresh_access_token_headers(self):
headers = {}
if self.refresh_access_token_headers:
headers = self.refresh_access_token_headers
if self.refresh_access_token_authenticator:
refresh_auth_headers = self.refresh_access_token_authenticator.get_auth_header()
headers.update(refresh_auth_headers)
return headers |
6,479 | table | #!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import subprocess
import time
import argparse
import pandas
import matplotlib.pyplot as plt
import multiprocessing
import statistics
import collections
import mooseutils
def get_args():
parser = argparse.ArgumentParser(description='Utility for producing results, plots, and tables for STM paper')
parser.add_argument('--run', action='store_true', help="Perform simulations.")
parser.add_argument('--replicates', default=10, type=int, help="Number of replicates to perform.")
parser.add_argument('--base', default=128, type=int, help="The base number of samples to perform.")
parser.add_argument('--memory-levels', default=6, type=int, help="Number of levels to perform for memory/timing runs, n in [base*2^0, ..., base*2^n-1].")
parser.add_argument('--memory-cores', default=32, type=int, help="Number of processors to use for memory/timing runs.")
parser.add_argument('--weak-levels', default=7, type=int, help="Number of processor levels to perform for weak scaling, n in [2^0,...,2^n-1].")
parser.add_argument('--write', default=1, type=int, help="Toggle writing to results directory when --run is used.")
return parser.parse_args()
def execute(infile, outfile, mode, samples, mpi=None, replicates=1, write=True):
data = collections.defaultdict(list)
if mpi is None: mpi = [1]*len(samples)
exe = mooseutils.find_moose_executable_recursive()
for n_cores, n_samples in zip(mpi, samples):
cmd = ['mpiexec', '-n', str(n_cores), exe, '-i', infile, 'Samplers/mc/num_rows={}'.format(int(n_samples)),
'Executioner/num_steps={}'.format(replicates),
'MultiApps/runner/mode={}'.format(mode),
'Outputs/file_base={}'.format(mode)]
print(' '.join(cmd))
out = subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
local = pandas.read_csv('{}.csv'.format(mode))
data['n_ranks'].append(n_cores)
data['n_samples'].append(n_samples)
data['mem_total'].append(local['total'].iloc[1])
data['mem_per_proc'].append(local['per_proc'].iloc[1])
data['mem_max_proc'].append(local['max_proc'].iloc[1])
data['run_time'].append(statistics.mean(local['run_time'].iloc[1:]))
data['run_time_min'].append(min(local['run_time'].iloc[1:]))
data['run_time_max'].append(max(local['run_time'].iloc[1:]))
df = pandas.DataFrame(data, columns=data.keys())
if write:
df.to_csv('results/{}_{}.csv'.format(outfile, mode))
def plot(prefix, suffix, xname, yname, xlabel=None, ylabel=None, yerr=None, results=True):
fig = plt.figure(figsize=[4,4], dpi=300, tight_layout=True)
ax = fig.subplots()
for i, mode in enumerate(['normal']):#, 'batch-restore', 'batch-reset']):
data = pandas.read_csv('results/{}_{}.csv'.format(prefix, mode))
kwargs = dict()
kwargs['label'] = mode
kwargs['linewidth'] = 0.5
kwargs['color'] = 'k'
kwargs['markersize'] = 2
kwargs['marker'] = 'osd'[i]
if yerr is not None:
kwargs['elinewidth'] = 0.5
kwargs['capsize'] = 2
kwargs['yerr'] = [(-data[yname] + data[yerr[1]]).tolist(),
(-data[yerr[0]] + data[yname]).tolist() ]
ax.errorbar(data[xname], data[yname], **kwargs)
if xlabel is not None:
ax.set_xlabel(xlabel, fontsize=10)
if ylabel is not None:
ax.set_ylabel(ylabel, fontsize=10)
ax.grid(True, color=[0.7]*3)
ax.grid(True, which='minor', color=[0.8]*3)
ax.legend()
outfile = '{}_{}.pdf'.format(prefix, suffix)
fig.savefig(outfile)
def METHOD_NAME(prefix, write=True):
out = list()
out.append(r'\begin{tabular}{ccccc}')
out.append(r'\toprule')
out.append(r'& & {} \\'.format('\multicolumn{3}{c}{time (sec.)}'))
out.append(r'\cmidrule{3-5}')
out.append(r'{} & {} & {} & {} & {} \\'.format('Processors', 'Simulations', 'normal', 'batch-reset', 'batch-restore'))
out.append(r'\midrule')
times = collections.defaultdict(list)
for i, mode in enumerate(['normal', 'batch-reset', 'batch-restore']):
data = pandas.read_csv('results/{}_{}.csv'.format(prefix, mode))
for idx, row in data.iterrows():
key = (int(row['n_samples']), int(row['n_ranks']))
times[key].append((row['run_time'], row['run_time_min'], row['run_time_max']))
for key, value in times.items():
n_samples = key[0]
n_ranks = key[1]
normal = '{:.1f} ({:.1f}, {:.1f})'.format(*value[0])
reset = '{:.1f} ({:.1f}, {:.1f})'.format(*value[1])
restore = '{:.1f} ({:.1f}, {:.1f})'.format(*value[2])
out.append(r'{} & {} & {} & {} & {} \\'.format(n_ranks, n_samples, normal, reset, restore))
out.append(r'\bottomrule')
out.append(r'\end{tabular}')
if write:
with open('results/weak.tex', 'w') as fid:
fid.write('\n'.join(out))
if __name__ == '__main__':
input_file = 'full_solve.i'
args = get_args()
# Memory Parallel
if args.run:
prefix = 'full_solve_memory_parallel'
samples = [args.base*2**n for n in range(args.memory_levels)]
mpi = [args.memory_cores]*len(samples)
execute(input_file, prefix, 'normal', samples, mpi, args.replicates, args.write)
execute(input_file, prefix, 'batch-reset', samples, mpi, args.replicates, args.write)
execute(input_file, prefix, 'batch-restore', samples, mpi, args.replicates, args.write)
# Weak scale
if args.run:
prefix = 'full_solve_weak_scale'
mpi = [2**n for n in range(args.weak_levels)]
samples = [args.base*m for m in mpi]
execute(input_file, prefix, 'normal', samples, mpi, args.replicates, args.write)
execute(input_file, prefix, 'batch-reset', samples, mpi, args.replicates, args.write)
execute(input_file, prefix, 'batch-restore', samples, mpi, args.replicates, args.write)
# Parallel time and memory plots
plot('full_solve_memory_parallel', 'time',
xname='n_samples', xlabel='Number of Simulations',
yname='run_time', ylabel='Time (sec.)', yerr=('run_time_min', 'run_time_max'))
plot('full_solve_memory_parallel', 'memory',
xname='n_samples', xlabel='Number of Simulations',
yname='mem_per_proc', ylabel='Memory (MiB)')
# Weak scaling table
METHOD_NAME('full_solve_weak_scale', args.write) |
6,480 | test grid attributes | import numpy as np
import yt # NOQA
from yt.frontends.amrvac.api import AMRVACDataset, AMRVACGrid
from yt.testing import requires_file
from yt.units import YTArray
from yt.utilities.answer_testing.framework import (
data_dir_load,
requires_ds,
small_patch_amr,
)
blastwave_spherical_2D = "amrvac/bw_2d0000.dat"
khi_cartesian_2D = "amrvac/kh_2d0000.dat"
khi_cartesian_3D = "amrvac/kh_3D0000.dat"
jet_cylindrical_25D = "amrvac/Jet0003.dat"
riemann_cartesian_175D = "amrvac/R_1d0005.dat"
blastwave_cartesian_3D = "amrvac/bw_3d0000.dat"
blastwave_polar_2D = "amrvac/bw_polar_2D0000.dat"
blastwave_cylindrical_3D = "amrvac/bw_cylindrical_3D0000.dat"
rmi_cartesian_dust_2D = "amrvac/Richtmyer_Meshkov_dust_2D/RM2D_dust_Kwok0000.dat"
def _get_fields_to_check(ds):
fields = ["density", "velocity_magnitude"]
raw_fields_labels = [fname for ftype, fname in ds.field_list]
if "b1" in raw_fields_labels:
fields.append("magnetic_energy_density")
if "e" in raw_fields_labels:
fields.append("energy_density")
if "rhod1" in raw_fields_labels:
fields.append("total_dust_density")
# note : not hitting dust velocity fields
return fields
@requires_file(khi_cartesian_2D)
def test_AMRVACDataset():
assert isinstance(data_dir_load(khi_cartesian_2D), AMRVACDataset)
@requires_ds(blastwave_cartesian_3D)
def test_domain_size():
# "Check for correct box size, see bw_3d.par"
ds = data_dir_load(blastwave_cartesian_3D)
for lb in ds.domain_left_edge:
assert int(lb) == 0
for rb in ds.domain_right_edge:
assert int(rb) == 2
for w in ds.domain_width:
assert int(w) == 2
@requires_file(blastwave_cartesian_3D)
def METHOD_NAME():
# "Check various grid attributes"
ds = data_dir_load(blastwave_cartesian_3D)
grids = ds.index.grids
assert ds.index.max_level == 2
for g in grids:
assert isinstance(g, AMRVACGrid)
assert isinstance(g.LeftEdge, YTArray)
assert isinstance(g.RightEdge, YTArray)
assert isinstance(g.ActiveDimensions, np.ndarray)
assert isinstance(g.Level, (np.int32, np.int64, int))
@requires_ds(blastwave_polar_2D)
def test_bw_polar_2d():
ds = data_dir_load(blastwave_polar_2D)
for test in small_patch_amr(ds, _get_fields_to_check(ds)):
test_bw_polar_2d.__name__ = test.description
yield test
@requires_ds(blastwave_cartesian_3D)
def test_blastwave_cartesian_3D():
ds = data_dir_load(blastwave_cartesian_3D)
for test in small_patch_amr(ds, _get_fields_to_check(ds)):
test_blastwave_cartesian_3D.__name__ = test.description
yield test
@requires_ds(blastwave_spherical_2D)
def test_blastwave_spherical_2D():
ds = data_dir_load(blastwave_spherical_2D)
for test in small_patch_amr(ds, _get_fields_to_check(ds)):
test_blastwave_spherical_2D.__name__ = test.description
yield test
@requires_ds(blastwave_cylindrical_3D)
def test_blastwave_cylindrical_3D():
ds = data_dir_load(blastwave_cylindrical_3D)
for test in small_patch_amr(ds, _get_fields_to_check(ds)):
test_blastwave_cylindrical_3D.__name__ = test.description
yield test
@requires_ds(khi_cartesian_2D)
def test_khi_cartesian_2D():
ds = data_dir_load(khi_cartesian_2D)
for test in small_patch_amr(ds, _get_fields_to_check(ds)):
test_khi_cartesian_2D.__name__ = test.description
yield test
@requires_ds(khi_cartesian_3D)
def test_khi_cartesian_3D():
ds = data_dir_load(khi_cartesian_3D)
for test in small_patch_amr(ds, _get_fields_to_check(ds)):
test_khi_cartesian_3D.__name__ = test.description
yield test
@requires_ds(jet_cylindrical_25D)
def test_jet_cylindrical_25D():
ds = data_dir_load(jet_cylindrical_25D)
for test in small_patch_amr(ds, _get_fields_to_check(ds)):
test_jet_cylindrical_25D.__name__ = test.description
yield test
@requires_ds(riemann_cartesian_175D)
def test_riemann_cartesian_175D():
ds = data_dir_load(riemann_cartesian_175D)
for test in small_patch_amr(ds, _get_fields_to_check(ds)):
test_riemann_cartesian_175D.__name__ = test.description
yield test
@requires_ds(rmi_cartesian_dust_2D)
def test_rmi_cartesian_dust_2D():
# dataset with dust fields
ds = data_dir_load(rmi_cartesian_dust_2D)
for test in small_patch_amr(ds, _get_fields_to_check(ds)):
test_rmi_cartesian_dust_2D.__name__ = test.description
yield test |
6,481 | test gaussian distribution | import pytest
from mlagents.torch_utils import torch
from mlagents.trainers.torch_entities.distributions import (
GaussianDistribution,
MultiCategoricalDistribution,
GaussianDistInstance,
TanhGaussianDistInstance,
CategoricalDistInstance,
)
@pytest.mark.parametrize("tanh_squash", [True, False])
@pytest.mark.parametrize("conditional_sigma", [True, False])
def METHOD_NAME(conditional_sigma, tanh_squash):
torch.manual_seed(0)
hidden_size = 16
act_size = 4
sample_embedding = torch.ones((1, 16))
gauss_dist = GaussianDistribution(
hidden_size,
act_size,
conditional_sigma=conditional_sigma,
tanh_squash=tanh_squash,
)
# Make sure backprop works
force_action = torch.zeros((1, act_size))
optimizer = torch.optim.Adam(gauss_dist.parameters(), lr=3e-3)
for _ in range(50):
dist_inst = gauss_dist(sample_embedding)
if tanh_squash:
assert isinstance(dist_inst, TanhGaussianDistInstance)
else:
assert isinstance(dist_inst, GaussianDistInstance)
log_prob = dist_inst.log_prob(force_action)
loss = torch.nn.functional.mse_loss(log_prob, -2 * torch.ones(log_prob.shape))
optimizer.zero_grad()
loss.backward()
optimizer.step()
for prob in log_prob.flatten().tolist():
assert prob == pytest.approx(-2, abs=0.1)
def test_multi_categorical_distribution():
torch.manual_seed(0)
hidden_size = 16
act_size = [3, 3, 4]
sample_embedding = torch.ones((1, 16))
gauss_dist = MultiCategoricalDistribution(hidden_size, act_size)
# Make sure backprop works
optimizer = torch.optim.Adam(gauss_dist.parameters(), lr=3e-3)
def create_test_prob(size: int) -> torch.Tensor:
test_prob = torch.tensor(
[[1.0 - 0.01 * (size - 1)] + [0.01] * (size - 1)]
) # High prob for first action
return test_prob.log()
for _ in range(100):
dist_insts = gauss_dist(sample_embedding, masks=torch.ones((1, sum(act_size))))
loss = 0
for i, dist_inst in enumerate(dist_insts):
assert isinstance(dist_inst, CategoricalDistInstance)
log_prob = dist_inst.all_log_prob()
test_log_prob = create_test_prob(act_size[i])
# Force log_probs to match the high probability for the first action generated by
# create_test_prob
loss += torch.nn.functional.mse_loss(log_prob, test_log_prob)
optimizer.zero_grad()
loss.backward()
optimizer.step()
for dist_inst, size in zip(dist_insts, act_size):
# Check that the log probs are close to the fake ones that we generated.
test_log_probs = create_test_prob(size)
for _prob, _test_prob in zip(
dist_inst.all_log_prob().flatten().tolist(),
test_log_probs.flatten().tolist(),
):
assert _prob == pytest.approx(_test_prob, abs=0.1)
# Test masks
masks = []
for branch in act_size:
masks += [0] * (branch - 1) + [1]
masks = torch.tensor([masks])
dist_insts = gauss_dist(sample_embedding, masks=masks)
for dist_inst in dist_insts:
log_prob = dist_inst.all_log_prob()
assert log_prob.flatten()[-1].tolist() == pytest.approx(0, abs=0.001)
def test_gaussian_dist_instance():
torch.manual_seed(0)
act_size = 4
dist_instance = GaussianDistInstance(
torch.zeros(1, act_size), torch.ones(1, act_size)
)
action = dist_instance.sample()
assert action.shape == (1, act_size)
for log_prob in (
dist_instance.log_prob(torch.zeros((1, act_size))).flatten().tolist()
):
# Log prob of standard normal at 0
assert log_prob == pytest.approx(-0.919, abs=0.01)
for ent in dist_instance.entropy().flatten().tolist():
# entropy of standard normal at 0, based on 1/2 + ln(sqrt(2pi)sigma)
assert ent == pytest.approx(1.42, abs=0.01)
def test_tanh_gaussian_dist_instance():
torch.manual_seed(0)
act_size = 4
dist_instance = TanhGaussianDistInstance(
torch.zeros(1, act_size), torch.ones(1, act_size)
)
for _ in range(10):
action = dist_instance.sample()
assert action.shape == (1, act_size)
assert torch.max(action) < 1.0 and torch.min(action) > -1.0
def test_categorical_dist_instance():
torch.manual_seed(0)
act_size = 4
test_prob = torch.tensor(
[[1.0 - 0.1 * (act_size - 1)] + [0.1] * (act_size - 1)]
) # High prob for first action
dist_instance = CategoricalDistInstance(test_prob)
for _ in range(10):
action = dist_instance.sample()
assert action.shape == (1, 1)
assert action < act_size
# Make sure the first action as higher probability than the others.
prob_first_action = dist_instance.log_prob(torch.tensor([0]))
for i in range(1, act_size):
assert dist_instance.log_prob(torch.tensor([i])) < prob_first_action |
6,482 | build arguments schema | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network vpn-connection shared-key reset",
)
class Reset(AAZCommand):
"""Reset a VPN connection shared key.
:example: Reset the shared key on a connection.
az network vpn-connection shared-key reset -g MyResourceGroup --connection-name MyConnection --key-length 128
:example: Reset a VPN connection shared key.
az network vpn-connection shared-key reset --connection-name MyConnection --key-length 128 --resource-group MyResourceGroup --subscription MySubscription
"""
_aaz_info = {
"version": "2017-10-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/connections/{}/sharedkey/reset", "2017-10-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, self._output)
_args_schema = None
@classmethod
def METHOD_NAME(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super().METHOD_NAME(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.connection_name = AAZStrArg(
options=["--connection-name"],
help="Connection name.",
required=True,
id_part="name",
)
_args_schema.key_length = AAZIntArg(
options=["--key-length"],
help="The virtual network connection reset shared key length, should between 1 and 128.",
required=True,
fmt=AAZIntArgFormat(
maximum=128,
minimum=1,
),
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.VirtualNetworkGatewayConnectionsResetSharedKey(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class VirtualNetworkGatewayConnectionsResetSharedKey(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey/reset",
**self.url_parameters
)
@property
def method(self):
return "POST"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"virtualNetworkGatewayConnectionName", self.ctx.args.connection_name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2017-10-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Content-Type", "application/json",
),
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
@property
def content(self):
_content_value, _builder = self.new_content_builder(
self.ctx.args,
typ=AAZObjectType,
typ_kwargs={"flags": {"required": True, "client_flatten": True}}
)
_builder.set_prop("keyLength", AAZIntType, ".key_length", typ_kwargs={"flags": {"required": True}})
return self.serialize_content(_content_value)
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.key_length = AAZIntType(
serialized_name="keyLength",
flags={"required": True},
)
return cls._schema_on_200
class _ResetHelper:
"""Helper class for Reset"""
__all__ = ["Reset"] |
6,483 | test available modes | from homeassistant.components.humidifier import HumidifierEntityFeature
from homeassistant.components.humidifier.const import (
MODE_AUTO,
MODE_BOOST,
MODE_NORMAL,
MODE_SLEEP,
)
from homeassistant.components.sensor import SensorDeviceClass
from homeassistant.const import PERCENTAGE
from ..const import WETAIR_WAWH1210_HUMIDIFIER_PAYLOAD
from ..helpers import assert_device_properties_set
from ..mixins.light import BasicLightTests
from ..mixins.lock import BasicLockTests
from ..mixins.sensor import MultiSensorTests
from ..mixins.switch import MultiSwitchTests, SwitchableTests
from .base_device_tests import TuyaDeviceTestCase
SWITCH_DPS = "1"
LIGHT_DPS = "5"
SOUND_DPS = "8"
HUMIDITY_DPS = "13"
CURRENTHUMID_DPS = "14"
UNKNOWN22_DPS = "22"
PRESET_DPS = "24"
IONIZER_DPS = "25"
LOCK_DPS = "29"
LEVEL_DPS = "101"
class TestWetairWAWH1210LWHumidifier(
BasicLightTests,
BasicLockTests,
MultiSensorTests,
MultiSwitchTests,
SwitchableTests,
TuyaDeviceTestCase,
):
__test__ = True
def setUp(self):
self.setUpForConfig(
"wetair_wawh1210lw_humidifier.yaml", WETAIR_WAWH1210_HUMIDIFIER_PAYLOAD
)
self.subject = self.entities.get("humidifier_humidifier")
self.setUpSwitchable(SWITCH_DPS, self.subject)
self.setUpBasicLight(LIGHT_DPS, self.entities.get("light_display"))
self.setUpBasicLock(LOCK_DPS, self.entities.get("lock_child_lock"))
self.setUpMultiSensors(
[
{
"dps": CURRENTHUMID_DPS,
"name": "sensor_current_humidity",
"device_class": SensorDeviceClass.HUMIDITY,
"state_class": "measurement",
"unit": PERCENTAGE,
},
{
"dps": LEVEL_DPS,
"name": "sensor_water_level",
"unit": PERCENTAGE,
},
]
)
self.setUpMultiSwitch(
[
{
"dps": SOUND_DPS,
"name": "switch_sound",
},
{
"dps": IONIZER_DPS,
"name": "switch_ionizer",
},
]
)
self.mark_secondary(
[
"light_display",
"lock_child_lock",
"sensor_current_humidity",
"sensor_water_level",
"switch_sound",
]
)
def test_supported_features(self):
self.assertEqual(self.subject.supported_features, HumidifierEntityFeature.MODES)
def test_icons(self):
self.dps[SWITCH_DPS] = True
self.assertEqual(self.subject.icon, "mdi:air-humidifier")
self.dps[SWITCH_DPS] = False
self.assertEqual(self.subject.icon, "mdi:air-humidifier-off")
def test_min_target_humidity(self):
self.assertEqual(self.subject.min_humidity, 30)
def test_max_target_humidity(self):
self.assertEqual(self.subject.max_humidity, 80)
def test_target_humidity(self):
self.dps[HUMIDITY_DPS] = 55
self.assertEqual(self.subject.target_humidity, 55)
def METHOD_NAME(self):
self.assertCountEqual(
self.subject.available_modes,
[MODE_AUTO, MODE_BOOST, MODE_NORMAL, MODE_SLEEP],
)
def test_mode(self):
self.dps[PRESET_DPS] = "AUTO"
self.assertEqual(self.subject.mode, MODE_AUTO)
self.dps[PRESET_DPS] = "MIDDLE"
self.assertEqual(self.subject.mode, MODE_NORMAL)
self.dps[PRESET_DPS] = "HIGH"
self.assertEqual(self.subject.mode, MODE_BOOST)
self.dps[PRESET_DPS] = "SLEEP"
self.assertEqual(self.subject.mode, MODE_SLEEP)
async def test_set_mode_to_auto(self):
async with assert_device_properties_set(
self.subject._device, {PRESET_DPS: "AUTO"}
):
await self.subject.async_set_mode(MODE_AUTO)
async def test_set_mode_to_normal(self):
async with assert_device_properties_set(
self.subject._device, {PRESET_DPS: "MIDDLE"}
):
await self.subject.async_set_mode(MODE_NORMAL)
async def test_set_mode_to_boost(self):
async with assert_device_properties_set(
self.subject._device, {PRESET_DPS: "HIGH"}
):
await self.subject.async_set_mode(MODE_BOOST)
async def test_set_mode_to_sleep(self):
async with assert_device_properties_set(
self.subject._device, {PRESET_DPS: "SLEEP"}
):
await self.subject.async_set_mode(MODE_SLEEP)
def test_extra_state_attributes(self):
self.dps[UNKNOWN22_DPS] = 22
self.assertDictEqual(
self.subject.extra_state_attributes,
{"unknown_22": 22},
) |
6,484 | get label control name | import wx
import wx.lib.rcsizer
from cellprofiler_core.setting import DataTypes
from ._module_view import ModuleView
from ..utilities.module_view import edit_control_name
class DataTypeController:
"""The DataTypeController manages a DataType setting"""
DTC_NONE = "None"
DTC_TEXT = "Text"
DTC_INTEGER = "Integer"
DTC_FLOAT = "Float"
DTC_TO_DT = {
DTC_NONE: DataTypes.DT_NONE,
DTC_TEXT: DataTypes.DT_TEXT,
DTC_INTEGER: DataTypes.DT_INTEGER,
DTC_FLOAT: DataTypes.DT_FLOAT,
None: DataTypes.DT_TEXT,
}
DT_TO_DTC = {
DataTypes.DT_NONE: DTC_NONE,
DataTypes.DT_TEXT: DTC_TEXT,
DataTypes.DT_INTEGER: DTC_INTEGER,
DataTypes.DT_FLOAT: DTC_FLOAT,
}
def __init__(self, module_view, v):
assert isinstance(v, DataTypes)
self.module_view = module_view
self.v = v
self.panel = module_view.module_panel.FindWindowByName(edit_control_name(v))
if self.panel is None:
class DoesntInheritBackgroundColor(wx.Panel):
def InheritsBackgroundColour(self):
return False
self.panel = DoesntInheritBackgroundColor(
module_view.module_panel, -1, name=edit_control_name(v)
)
self.panel.Sizer = wx.lib.rcsizer.RowColSizer()
self.panel.Bind(wx.EVT_PAINT, self.on_paint)
self.panel.controller = self
self.n_items = 0
self.update()
def on_paint(self, event):
dc = wx.BufferedPaintDC(self.panel)
dc.SetBackground(wx.Brush(wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOW)))
dc.Clear()
dc.SetPen(wx.Pen(wx.SystemSettings.GetColour(wx.SYS_COLOUR_GRAYTEXT)))
sizer = self.panel.Sizer
_, panel_width = self.panel.GetClientSize()
assert isinstance(sizer, wx.lib.rcsizer.RowColSizer)
bottom_choice_name = self.get_choice_control_name(self.n_items)
bottom_choice = self.panel.FindWindowByName(bottom_choice_name)
if bottom_choice is not None:
r = bottom_choice.GetRect()
dc.DrawLine(r.Left - 2, 1, r.Left - 2, r.Bottom)
for i in range(1, self.n_items + 1):
choice_name = self.get_choice_control_name(i)
choice = self.panel.FindWindowByName(choice_name)
if choice is not None:
r = choice.GetRect()
dc.DrawLine(1, r.Top - 2, panel_width - 1, r.Top - 2)
event.Skip()
def METHOD_NAME(self, rowidx):
"""The name of the label control that holds the feature name"""
return "label_control_%d_%s" % (rowidx, str(self.v.key()))
def get_choice_control_name(self, rowidx):
"""The name of the choice control holding the data type choices"""
return "choice_control_%d_%s" % (rowidx, str(self.v.key()))
def update(self):
"""Change the UI state to match that of the DataTypes setting"""
d = self.v.get_data_types()
needs_bind = []
sizer = self.panel.Sizer
assert isinstance(sizer, wx.lib.rcsizer.RowColSizer)
for child in self.panel.GetChildren():
sizer.Hide(child)
label_header_name = self.METHOD_NAME(0)
choice_header_name = self.get_choice_control_name(0)
for header_name, column, text in (
(label_header_name, 0, "Key"),
(choice_header_name, 1, "Data type"),
):
ctrl = self.panel.FindWindowByName(header_name)
if ctrl is None:
ctrl = wx.StaticText(self.panel, label=text, name=header_name)
ctrl.Font = wx.Font(
ctrl.Font.GetPointSize(),
ctrl.Font.GetFamily(),
ctrl.Font.GetStyle(),
wx.FONTWEIGHT_BOLD,
False,
ctrl.Font.GetFaceName(),
)
sizer.Add(
ctrl,
flag=wx.ALIGN_CENTER_HORIZONTAL
| wx.ALIGN_CENTER_VERTICAL
| wx.BOTTOM,
border=1,
row=0,
col=column,
)
else:
sizer.Show(ctrl)
for i, feature in enumerate(sorted(d.keys())):
label_name = self.METHOD_NAME(i + 1)
choice_name = self.get_choice_control_name(i + 1)
label = self.panel.FindWindowByName(label_name)
if label is None:
label = wx.StaticText(self.panel, label=feature, name=label_name)
sizer.Add(
label,
flag=wx.ALIGN_LEFT | wx.BOTTOM | wx.LEFT | wx.RIGHT,
border=3,
row=i + 1,
col=0,
)
else:
sizer.Show(label)
if label.Label != feature:
label.Label = feature
choice = self.panel.FindWindowByName(choice_name)
if choice is None:
choice = wx.Choice(
self.panel,
choices=[
self.DTC_TEXT,
self.DTC_INTEGER,
self.DTC_FLOAT,
self.DTC_NONE,
],
name=choice_name,
)
sizer.Add(
choice,
flag=wx.EXPAND | wx.BOTTOM | wx.RIGHT,
border=3,
row=i + 1,
col=1,
)
needs_bind.append(choice)
else:
sizer.Show(choice)
value = self.DT_TO_DTC.get(d[feature], self.DTC_TEXT)
if choice.GetStringSelection() != value:
choice.SetStringSelection(value)
self.n_items = len(list(d.keys()))
for choice in needs_bind:
choice.Bind(wx.EVT_CHOICE, self.on_choice_changed)
def on_choice_changed(self, event):
result = {}
for i in range(1, self.n_items + 1):
label = self.panel.FindWindowByName(self.METHOD_NAME(i))
choice = self.panel.FindWindowByName(self.get_choice_control_name(i))
result[label.Label] = self.DTC_TO_DT[choice.GetStringSelection()]
result = DataTypes.encode_data_types(result)
if self.v.value != result:
self.module_view.on_value_change(self.v, self.panel, result, event)
@classmethod
def update_control(cls, module_view, v):
"""Update the Joiner setting's control
returns the control
"""
assert isinstance(module_view, ModuleView)
control = module_view.module_panel.FindWindowByName(edit_control_name(v))
if control is None:
controller = DataTypeController(module_view, v)
return controller.panel
else:
control.controller.update()
return control |
6,485 | define decision state | # Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#/
from antlr4.IntervalSet import IntervalSet
from antlr4.RuleContext import RuleContext
from antlr4.Token import Token
from antlr4.atn.ATNType import ATNType
from antlr4.atn.ATNState import ATNState, DecisionState
class ATN(object):
__slots__ = (
'grammarType', 'maxTokenType', 'states', 'decisionToState',
'ruleToStartState', 'ruleToStopState', 'modeNameToStartState',
'ruleToTokenType', 'lexerActions', 'modeToStartState'
)
INVALID_ALT_NUMBER = 0
# Used for runtime deserialization of ATNs from strings#/
def __init__(self, grammarType:ATNType , maxTokenType:int ):
# The type of the ATN.
self.grammarType = grammarType
# The maximum value for any symbol recognized by a transition in the ATN.
self.maxTokenType = maxTokenType
self.states = []
# Each subrule/rule is a decision point and we must track them so we
# can go back later and build DFA predictors for them. This includes
# all the rules, subrules, optional blocks, ()+, ()* etc...
self.decisionToState = []
# Maps from rule index to starting state number.
self.ruleToStartState = []
# Maps from rule index to stop state number.
self.ruleToStopState = None
self.modeNameToStartState = dict()
# For lexer ATNs, this maps the rule index to the resulting token type.
# For parser ATNs, this maps the rule index to the generated bypass token
# type if the
# {@link ATNDeserializationOptions#isGenerateRuleBypassTransitions}
# deserialization option was specified; otherwise, this is {@code null}.
self.ruleToTokenType = None
# For lexer ATNs, this is an array of {@link LexerAction} objects which may
# be referenced by action transitions in the ATN.
self.lexerActions = None
self.modeToStartState = []
# Compute the set of valid tokens that can occur starting in state {@code s}.
# If {@code ctx} is null, the set of tokens will not include what can follow
# the rule surrounding {@code s}. In other words, the set will be
# restricted to tokens reachable staying within {@code s}'s rule.
def nextTokensInContext(self, s:ATNState, ctx:RuleContext):
from antlr4.LL1Analyzer import LL1Analyzer
anal = LL1Analyzer(self)
return anal.LOOK(s, ctx=ctx)
# Compute the set of valid tokens that can occur starting in {@code s} and
# staying in same rule. {@link Token#EPSILON} is in set if we reach end of
# rule.
def nextTokensNoContext(self, s:ATNState):
if s.nextTokenWithinRule is not None:
return s.nextTokenWithinRule
s.nextTokenWithinRule = self.nextTokensInContext(s, None)
s.nextTokenWithinRule.readonly = True
return s.nextTokenWithinRule
def nextTokens(self, s:ATNState, ctx:RuleContext = None):
if ctx==None:
return self.nextTokensNoContext(s)
else:
return self.nextTokensInContext(s, ctx)
def addState(self, state:ATNState):
if state is not None:
state.atn = self
state.stateNumber = len(self.states)
self.states.append(state)
def removeState(self, state:ATNState):
self.states[state.stateNumber] = None # just free mem, don't shift states in list
def METHOD_NAME(self, s:DecisionState):
self.decisionToState.append(s)
s.decision = len(self.decisionToState)-1
return s.decision
def getDecisionState(self, decision:int):
if len(self.decisionToState)==0:
return None
else:
return self.decisionToState[decision]
# Computes the set of input symbols which could follow ATN state number
# {@code stateNumber} in the specified full {@code context}. This method
# considers the complete parser context, but does not evaluate semantic
# predicates (i.e. all predicates encountered during the calculation are
# assumed true). If a path in the ATN exists from the starting state to the
# {@link RuleStopState} of the outermost context without matching any
# symbols, {@link Token#EOF} is added to the returned set.
#
# <p>If {@code context} is {@code null}, it is treated as
# {@link ParserRuleContext#EMPTY}.</p>
#
# @param stateNumber the ATN state number
# @param context the full parse context
# @return The set of potentially valid input symbols which could follow the
# specified state in the specified context.
# @throws IllegalArgumentException if the ATN does not contain a state with
# number {@code stateNumber}
#/
def getExpectedTokens(self, stateNumber:int, ctx:RuleContext ):
if stateNumber < 0 or stateNumber >= len(self.states):
raise Exception("Invalid state number.")
s = self.states[stateNumber]
following = self.nextTokens(s)
if Token.EPSILON not in following:
return following
expected = IntervalSet()
expected.addSet(following)
expected.removeOne(Token.EPSILON)
while (ctx != None and ctx.invokingState >= 0 and Token.EPSILON in following):
invokingState = self.states[ctx.invokingState]
rt = invokingState.transitions[0]
following = self.nextTokens(rt.followState)
expected.addSet(following)
expected.removeOne(Token.EPSILON)
ctx = ctx.parentCtx
if Token.EPSILON in following:
expected.addOne(Token.EOF)
return expected |
6,486 | inspect versions | # Introspect Postgres instance, temBoard agent and system
#
# Discover is a stable set of properties identifying a running system. temBoard
# computes an ETag from discover data to ease change detection.
#
# Discover data is cached in a discover.json file. temBoard refreshes this file
# in two cases : start of agent, connection lost.
#
import hashlib
import json
import logging
import os
import socket
import sys
from platform import machine, python_version
from multiprocessing import cpu_count
from .core import workers
from .queries import QUERIES
from .toolkit.errors import UserError
from .toolkit.versions import (
format_pq_version,
read_distinfo,
read_libpq_version,
)
from .tools import noop_manager
from .version import __version__
logger = logging.getLogger(__name__)
class Discover:
def __init__(self, app):
self.app = app
self.path = self.app.config.temboard.home + '/discover.json'
self.data = dict(postgres={}, system={}, temboard={})
self.json = None # bytes
self.etag = None
self.file_etag = None
self.mtime = None
self.inhibit_observer = False
def connection_lost(self):
if self.inhibit_observer:
return
# Callback for postgres.ConnectionPool connection lost event.
logger.info("Queueing discover refresh.")
discover.defer(self.app)
def ensure_latest(self):
if self.mtime != os.stat(self.path).st_mtime:
logger.debug("Discover file changed.")
return self.read()
return self.data
def read(self):
logger.debug("Reading discover data from %s.", self.path)
try:
fo = open(self.path, 'rb')
except IOError as e:
logger.debug("Failed to read manifest: %s.", e)
return self.data
with fo:
self.json = fo.read()
self.etag = self.file_etag = compute_etag(self.json)
try:
data = json.loads(self.json.decode('utf-8'))
except json.JSONDecodeError as e:
raise UserError("Malformed manifest: %s" % e)
if not isinstance(data, dict):
raise UserError("Malformed manifest: not a mapping")
self.data.update(data)
self.mtime = os.stat(self.path).st_mtime
return self.data
def write(self, fo=None):
if fo is None:
if self.etag == self.file_etag:
logger.debug("Discover file up to date.")
return
self.mtime = None
with (fo or open(self.path, 'w')) as fo:
fo.write(self.json.decode('utf-8'))
if self.mtime is None: # if not sys.stdout.
logger.debug("Wrote discover.json with ETag %s.", self.etag)
self.mtime = os.stat(self.path).st_mtime
self.file_etag = self.etag
def refresh(self, conn=None):
logger.debug("Inspecting temBoard and system.")
d = self.data
old_postgres = self.data.get('postgres', {})
d.clear()
d['postgres'] = {}
d['system'] = {}
d['temboard'] = {}
d['temboard']['bin'] = sys.argv[0]
d['temboard']['configfile'] = self.app.config.temboard.configfile
d['temboard']['plugins'] = self.app.config.temboard.plugins
d['system']['fqdn'] = self.app.config.temboard.hostname
collect_versions(d)
collect_cpu(d)
collect_memory(d)
collect_system(d)
try:
mgr = noop_manager(conn) if conn else self.app.postgres.connect()
except Exception as e:
logger.error("Failed to collect Postgres data: %s", e)
d['postgres'] = old_postgres
else:
with mgr as conn:
logger.debug("Inspecting Postgres instance.")
collect_postgres(d, conn)
# Build JSON to compute ETag.
json_text = json.dumps(
self.data,
indent=" ",
sort_keys=True,
) + "\n"
self.json = json_text.encode('utf-8')
self.etag = compute_etag(self.json)
if self.etag != self.file_etag:
logger.info("Instance discover updated.")
else:
logger.debug("Instance discover has not changed.")
return self.data
def collect_cpu(data):
s = data['system']
s['cpu_count'] = cpu_count()
with open('/proc/cpuinfo') as fo:
for line in fo:
if not line.startswith('model name\t'):
continue
_, _, model = line.partition("\t: ")
s['cpu_model'] = model.rstrip()
def collect_memory(data):
meminfo = {}
with open('/proc/meminfo', 'r') as fo:
for line in fo:
if 'kB' not in line:
continue
field, value, kb = line.split()
meminfo[field[:-1]] = int(value) * 1024
s = data['system']
s['memory'] = meminfo['MemTotal']
s['swap'] = meminfo['SwapTotal']
s['hugepage'] = meminfo['Hugepagesize']
def collect_postgres(data, conn):
row = conn.queryone(QUERIES['discover'])
data['postgres'].update(row)
for row in conn.query(QUERIES['discover-settings']):
t = row['vartype']
v = row['setting']
if 'integer' == t:
v = int(v)
elif 'bool' == t:
v = 'on' == v
u = row['unit']
if u is None or 'B' == u:
pass
elif '8kB' == u:
v = v * 8 * 1024
else:
raise ValueError("Unsupported unit %s" % u)
data['postgres'][row['name']] = v
def collect_system(data):
uname = os.uname()
s = data['system']
s['os'] = uname.sysname
s['os_version'] = uname.release
s['arch'] = machine()
s['hostname'] = socket.gethostname()
def collect_versions(data):
versions = METHOD_NAME()
data['temboard'].update(dict(
agent_version=versions['temboard'],
))
for k in 'bottle', 'cryptography', 'libpq', 'psycopg2', 'python':
data['temboard'][k + '_version'] = versions[k]
data['temboard']['pythonbin'] = versions['pythonbin']
dist = versions['distname'] + ' ' + versions['distversion']
data['system']['distribution'] = dist
def METHOD_NAME():
from bottle import __version__ as bottle_version
from psycopg2 import __version__ as psycopg2_version
from cryptography import __version__ as cryptography_version
distinfos = read_distinfo()
return dict(
temboard=__version__,
temboardbin=sys.argv[0],
psycopg2=psycopg2_version,
python=python_version(),
pythonbin=sys.executable,
bottle=bottle_version,
distname=distinfos['NAME'],
distversion=distinfos.get('VERSION', 'n/a'),
libpq=format_pq_version(read_libpq_version()),
cryptography=cryptography_version,
)
def compute_etag(data):
h = hashlib.new('sha256')
h.update(data)
return h.hexdigest()
@workers.register(pool_size=1)
def discover(app):
""" Refresh discover data. """
app.discover.ensure_latest()
app.discover.inhibit_observer = True
app.discover.refresh()
app.discover.inhibit_observer = False
app.discover.write() |
6,487 | test pool args | import numpy as np
import pytest
import dynesty
import multiprocessing as mp
import dynesty.pool as dypool
from utils import get_rstate, get_printing
"""
Run a series of basic tests to check whether anything huge is broken.
"""
nlive = 1000
printing = get_printing()
ndim = 2
gau_s = 0.01
def loglike_gau(x):
return (-0.5 * np.log(2 * np.pi) * ndim - np.log(gau_s) * ndim -
0.5 * np.sum((x - 0.5)**2) / gau_s**2)
def prior_transform_gau(x):
return x
# EGGBOX
# see 1306.2144
def loglike_egg(x):
logl = ((2 + np.cos(x[0] / 2) * np.cos(x[1] / 2))**5)
return logl
def prior_transform_egg(x):
return x * 10 * np.pi
LOGZ_TRUTH_GAU = 0
LOGZ_TRUTH_EGG = 235.856
def terminator(pool):
# Because of https://github.com/nedbat/coveragepy/issues/1310
# I have to close join and can't fully rely on contexts that
# do send SIGTERMS
pool.close()
pool.join()
def test_pool():
# test pool on egg problem
rstate = get_rstate()
# i specify large queue_size here, otherwise it is too slow
with dypool.Pool(2, loglike_egg, prior_transform_egg) as pool:
sampler = dynesty.NestedSampler(pool.loglike,
pool.prior_transform,
ndim,
nlive=nlive,
pool=pool,
queue_size=100,
rstate=rstate)
sampler.run_nested(dlogz=0.1, print_progress=printing)
assert (abs(LOGZ_TRUTH_EGG - sampler.results['logz'][-1])
< 5. * sampler.results['logzerr'][-1])
terminator(pool)
def test_pool_x():
# check without specifying queue_size
rstate = get_rstate()
with dypool.Pool(2, loglike_egg, prior_transform_egg) as pool:
sampler = dynesty.NestedSampler(pool.loglike,
pool.prior_transform,
ndim,
nlive=50,
pool=pool,
queue_size=100,
rstate=rstate)
sampler.run_nested(print_progress=printing, maxiter=100)
assert (abs(LOGZ_TRUTH_EGG - sampler.results['logz'][-1])
< 5. * sampler.results['logzerr'][-1])
terminator(pool)
def test_pool_dynamic():
# test pool on gau problem
# i specify large queue_size here, otherwise it is too slow
rstate = get_rstate()
with dypool.Pool(2, loglike_gau, prior_transform_gau) as pool:
sampler = dynesty.DynamicNestedSampler(pool.loglike,
pool.prior_transform,
ndim,
nlive=nlive,
pool=pool,
queue_size=100,
rstate=rstate)
sampler.run_nested(dlogz_init=1, print_progress=printing)
assert (abs(LOGZ_TRUTH_GAU - sampler.results['logz'][-1])
< 5. * sampler.results['logzerr'][-1])
terminator(pool)
def loglike_gau_args(x, y, z=None):
return (-0.5 * np.log(2 * np.pi) * ndim - np.log(gau_s) * ndim -
0.5 * np.sum((x - 0.5)**2) / gau_s**2) + y + z
def prior_transform_gau_args(x, y, z=None):
return x + y + z
def METHOD_NAME():
# test pool on gau problem
# i specify large queue_size here, otherwise it is too slow
rstate = get_rstate()
with dypool.Pool(2,
loglike_gau_args,
prior_transform_gau_args,
logl_args=(0, ),
ptform_args=(0, ),
logl_kwargs=dict(z=0),
ptform_kwargs=dict(z=0)) as pool:
sampler = dynesty.DynamicNestedSampler(pool.loglike,
pool.prior_transform,
ndim,
nlive=nlive,
pool=pool,
queue_size=100,
rstate=rstate)
sampler.run_nested(maxiter=300, print_progress=printing)
assert (abs(LOGZ_TRUTH_GAU - sampler.results['logz'][-1])
< 5. * sampler.results['logzerr'][-1])
# to ensure we get coverage
terminator(pool)
@pytest.mark.parametrize('sample', ['slice', 'rwalk', 'rslice'])
def test_pool_samplers(sample):
# this is to test how the samplers are dealing with queue_size>1
rstate = get_rstate()
with mp.Pool(2) as pool:
sampler = dynesty.NestedSampler(loglike_gau,
prior_transform_gau,
ndim,
nlive=nlive,
sample=sample,
pool=pool,
queue_size=100,
rstate=rstate)
sampler.run_nested(print_progress=printing)
assert (abs(LOGZ_TRUTH_GAU - sampler.results['logz'][-1])
< 5. * sampler.results['logzerr'][-1])
terminator(pool)
POOL_KW = ['prior_transform', 'loglikelihood', 'propose_point', 'update_bound']
@pytest.mark.parametrize('func', POOL_KW)
def test_usepool(func):
# test all the use_pool options, toggle them one by one
rstate = get_rstate()
use_pool = {}
for k in POOL_KW:
use_pool[k] = False
use_pool[func] = True
with mp.Pool(2) as pool:
sampler = dynesty.DynamicNestedSampler(loglike_gau,
prior_transform_gau,
ndim,
nlive=nlive,
rstate=rstate,
use_pool=use_pool,
pool=pool,
queue_size=100)
sampler.run_nested(maxiter=10000, print_progress=printing)
terminator(pool) |
6,488 | test find executable | """Tests for distutils.spawn."""
import os
import stat
import sys
import unittest.mock
from test.support import run_unittest, unix_shell
from test.support import os_helper
from distutils.spawn import find_executable
from distutils.spawn import spawn
from distutils.errors import DistutilsExecError
from distutils.tests import support
class SpawnTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
@unittest.skipUnless(os.name in ('nt', 'posix'),
'Runs only under posix or nt')
def test_spawn(self):
tmpdir = self.mkdtemp()
# creating something executable
# through the shell that returns 1
if sys.platform != 'win32':
exe = os.path.join(tmpdir, 'foo.sh')
self.write_file(exe, '#!%s\nexit 1' % unix_shell)
else:
exe = os.path.join(tmpdir, 'foo.bat')
self.write_file(exe, 'exit 1')
os.chmod(exe, 0o777)
self.assertRaises(DistutilsExecError, spawn, [exe])
# now something that works
if sys.platform != 'win32':
exe = os.path.join(tmpdir, 'foo.sh')
self.write_file(exe, '#!%s\nexit 0' % unix_shell)
else:
exe = os.path.join(tmpdir, 'foo.bat')
self.write_file(exe, 'exit 0')
os.chmod(exe, 0o777)
spawn([exe]) # should work without any error
def METHOD_NAME(self):
with os_helper.temp_dir() as tmp_dir:
# use TESTFN to get a pseudo-unique filename
program_noeext = os_helper.TESTFN
# Give the temporary program an ".exe" suffix for all.
# It's needed on Windows and not harmful on other platforms.
program = program_noeext + ".exe"
filename = os.path.join(tmp_dir, program)
with open(filename, "wb"):
pass
os.chmod(filename, stat.S_IXUSR)
# test path parameter
rv = find_executable(program, path=tmp_dir)
self.assertEqual(rv, filename)
if sys.platform == 'win32':
# test without ".exe" extension
rv = find_executable(program_noeext, path=tmp_dir)
self.assertEqual(rv, filename)
# test find in the current directory
with os_helper.change_cwd(tmp_dir):
rv = find_executable(program)
self.assertEqual(rv, program)
# test non-existent program
dont_exist_program = "dontexist_" + program
rv = find_executable(dont_exist_program , path=tmp_dir)
self.assertIsNone(rv)
# PATH='': no match, except in the current directory
with os_helper.EnvironmentVarGuard() as env:
env['PATH'] = ''
with unittest.mock.patch('distutils.spawn.os.confstr',
return_value=tmp_dir, create=True), \
unittest.mock.patch('distutils.spawn.os.defpath',
tmp_dir):
rv = find_executable(program)
self.assertIsNone(rv)
# look in current directory
with os_helper.change_cwd(tmp_dir):
rv = find_executable(program)
self.assertEqual(rv, program)
# PATH=':': explicitly looks in the current directory
with os_helper.EnvironmentVarGuard() as env:
env['PATH'] = os.pathsep
with unittest.mock.patch('distutils.spawn.os.confstr',
return_value='', create=True), \
unittest.mock.patch('distutils.spawn.os.defpath', ''):
rv = find_executable(program)
self.assertIsNone(rv)
# look in current directory
with os_helper.change_cwd(tmp_dir):
rv = find_executable(program)
self.assertEqual(rv, program)
# missing PATH: test os.confstr("CS_PATH") and os.defpath
with os_helper.EnvironmentVarGuard() as env:
env.pop('PATH', None)
# without confstr
with unittest.mock.patch('distutils.spawn.os.confstr',
side_effect=ValueError,
create=True), \
unittest.mock.patch('distutils.spawn.os.defpath',
tmp_dir):
rv = find_executable(program)
self.assertEqual(rv, filename)
# with confstr
with unittest.mock.patch('distutils.spawn.os.confstr',
return_value=tmp_dir, create=True), \
unittest.mock.patch('distutils.spawn.os.defpath', ''):
rv = find_executable(program)
self.assertEqual(rv, filename)
def test_spawn_missing_exe(self):
with self.assertRaises(DistutilsExecError) as ctx:
spawn(['does-not-exist'])
self.assertIn("command 'does-not-exist' failed", str(ctx.exception))
def test_suite():
return unittest.makeSuite(SpawnTestCase)
if __name__ == "__main__":
run_unittest(test_suite()) |
6,489 | convert identity | # -*- coding: utf-8 -*-
"""Machine type converters for Series scitype.
Exports conversion and mtype dictionary for Series scitype:
convert_dict: dict indexed by triples of str
1st element = convert from - str
2nd element = convert to - str
3rd element = considered as this scitype - str
elements are conversion functions of machine type (1st) -> 2nd
Function signature of all elements
convert_dict[(from_type, to_type, as_scitype)]
Parameters
----------
obj : from_type - object to convert
store : dictionary - reference of storage for lossy conversions, default=None (no store)
Returns
-------
converted_obj : to_type - object obj converted to to_type
Raises
------
ValueError and TypeError, if requested conversion is not possible
(depending on conversion logic)
"""
__author__ = ["fkiraly"]
__all__ = ["convert_dict"]
import numpy as np
import pandas as pd
from aeon.datatypes._proba._registry import MTYPE_LIST_PROBA
##############################################################
# methods to convert one machine type to another machine type
##############################################################
convert_dict = dict()
def METHOD_NAME(obj, store=None):
return obj
# assign identity function to type conversion to self
for tp in MTYPE_LIST_PROBA:
convert_dict[(tp, tp, "Proba")] = METHOD_NAME
def convert_pred_interval_to_quantiles(y_pred, inplace=False):
"""Convert interval predictions to quantile predictions.
Parameters
----------
y_pred : pd.DataFrame
Column has multi-index: first level is variable name from y in fit,
second level coverage fractions for which intervals were computed.
in the same order as in input `coverage`.
Third level is string "lower" or "upper", for lower/upper interval end.
Row index is fh. Entries are forecasts of lower/upper interval end,
for var in col index, at nominal coverage in selencond col index,
lower/upper depending on third col index, for the row index.
Upper/lower interval end forecasts are equivalent to
quantile forecasts at alpha = 0.5 - c/2, 0.5 + c/2 for c in coverage.
inplace : bool, optional, default=False
whether to copy the input data frame (False), or modify (True)
Returns
-------
y_pred : pd.DataFrame
Column has multi-index: first level is variable name from y in fit,
second level being the values of alpha passed to the function.
Row index is fh. Entries are quantile forecasts, for var in col index,
at quantile probability in second col index, for the row index.
"""
if not inplace:
y_pred = y_pred.copy()
# all we need to do is to replace the index with var_names/alphas
# var_names will be the same as interval level 0
idx = y_pred.columns
var_names = idx.get_level_values(0)
# treat univariate default name
# todo: maybe not a good idea, remove this...
# here because it's in the current specification
var_names = ["Quantiles" if x == "Coverage" else x for x in var_names]
# alpha, we compute by the coverage/alphas formula correspondence
coverages = idx.get_level_values(1)
alphas = np.array(coverages.copy())
lower_upper = idx.get_level_values(2)
lower_selector = lower_upper == "lower"
upper_selector = lower_upper == "upper"
alphas[lower_selector] = 0.5 - 0.5 * alphas[lower_selector]
alphas[upper_selector] = 0.5 + 0.5 * alphas[upper_selector]
# idx returned by _predict_quantiles
# is 2-level MultiIndex with variable names, alpha
int_idx = pd.MultiIndex.from_arrays([var_names, alphas])
y_pred.columns = int_idx
return y_pred
def convert_interval_to_quantiles(obj: pd.DataFrame, store=None) -> pd.DataFrame:
return convert_pred_interval_to_quantiles(y_pred=obj)
convert_dict[
("pred_interval", "pred_quantiles", "Proba")
] = convert_interval_to_quantiles
def convert_pred_quantiles_to_interval(y_pred, inplace=False):
"""Convert quantile predictions to interval predictions.
Parameters
----------
y_pred : pd.DataFrame
Column has multi-index: first level is variable name from y in fit,
second level being the values of alpha passed to the function.
Row index is fh. Entries are quantile forecasts, for var in col index,
at quantile probability in second col index, for the row index.
inplace : bool, optional, default=False
whether to copy the input data frame (False), or modify (True)
Returns
-------
y_pred : pd.DataFrame
Column has multi-index: first level is variable name from y in fit,
second level coverage fractions for which intervals were computed.
in the same order as in input `coverage`.
Third level is string "lower" or "upper", for lower/upper interval end.
Row index is fh. Entries are forecasts of lower/upper interval end,
for var in col index, at nominal coverage in selencond col index,
lower/upper depending on third col index, for the row index.
Upper/lower interval end forecasts are equivalent to
quantile forecasts at alpha = 0.5 - c/2, 0.5 + c/2 for c in coverage.
"""
if not inplace:
y_pred = y_pred.copy()
# all we need to do is to replace the index with var_names/alphas
# var_names will be the same as interval level 0
idx = y_pred.columns
var_names = idx.get_level_values(0)
# treat univariate default name
# todo: maybe not a good idea, remove this...
# here because it's in the current specification
var_names = ["Coverage" if x == "Quantiles" else x for x in var_names]
# coverages we compute by the coverage/alphas formula correspondence
alphas = idx.get_level_values(1)
alphas = np.array(alphas.copy())
coverages = 2 * np.abs(0.5 - alphas)
lower_upper = ["lower" if a <= 0.5 else "upper" for a in alphas]
# idx returned by _predict_quantiles
# is 3-level MultiIndex with variable names, coverages, lower/upper
int_idx = pd.MultiIndex.from_arrays([var_names, coverages, lower_upper])
y_pred.columns = int_idx
return y_pred
def convert_quantiles_to_interval(obj: pd.DataFrame, store=None) -> pd.DataFrame:
return convert_pred_quantiles_to_interval(y_pred=obj)
convert_dict[
("pred_quantiles", "pred_interval", "Proba")
] = convert_quantiles_to_interval |
6,490 | test terraform plan framework | from __future__ import annotations
import logging
from pathlib import Path
from typing import TYPE_CHECKING
import pytest
from checkov.common.bridgecrew.check_type import CheckType
from checkov.common.runners.runner_registry import RunnerRegistry
from checkov.main import DEFAULT_RUNNERS
from checkov.runner_filter import RunnerFilter
if TYPE_CHECKING:
from _pytest.logging import LogCaptureFixture
TESTS_DIR = Path(__file__).parent.parent / "tests"
def test_all_frameworks_are_tested() -> None:
# given
checkov_runners = {value for attr, value in CheckType.__dict__.items() if not attr.startswith("__")}
# remove frameworks, which are not applicable
checkov_runners.difference_update(
{
CheckType.BITBUCKET_CONFIGURATION,
CheckType.GITHUB_CONFIGURATION,
CheckType.GITLAB_CONFIGURATION,
CheckType.JSON,
CheckType.SCA_IMAGE,
CheckType.SCA_PACKAGE,
CheckType.YAML,
}
)
assert checkov_runners == {
CheckType.ANSIBLE,
CheckType.ARGO_WORKFLOWS,
CheckType.ARM,
CheckType.AZURE_PIPELINES,
CheckType.BICEP,
CheckType.BITBUCKET_PIPELINES,
CheckType.CIRCLECI_PIPELINES,
CheckType.CLOUDFORMATION,
CheckType.DOCKERFILE,
CheckType.GITHUB_ACTIONS,
CheckType.GITLAB_CI,
CheckType.HELM,
CheckType.KUBERNETES,
CheckType.KUSTOMIZE,
CheckType.OPENAPI,
CheckType.SECRETS,
CheckType.SERVERLESS,
CheckType.TERRAFORM,
CheckType.TERRAFORM_JSON,
CheckType.TERRAFORM_PLAN,
CheckType.POLICY_3D
}, "Don't forget to add a test case for the new runner here"
def test_ansible_framework(caplog: LogCaptureFixture) -> None:
run_framework_test(caplog=caplog, framework=CheckType.ANSIBLE)
def test_argo_workflows_framework(caplog: LogCaptureFixture) -> None:
run_framework_test(caplog=caplog, framework=CheckType.ARGO_WORKFLOWS)
def test_arm_framework(caplog: LogCaptureFixture) -> None:
excluded_paths = ["arm/parser/examples/json/with_comments.json$"]
run_framework_test(caplog=caplog, framework=CheckType.ARM, excluded_paths=excluded_paths)
def test_azure_pipelines_framework(caplog: LogCaptureFixture) -> None:
run_framework_test(caplog=caplog, framework=CheckType.AZURE_PIPELINES)
def test_bicep_framework(caplog: LogCaptureFixture) -> None:
excluded_paths = ["bicep/examples/malformed.bicep$"]
run_framework_test(caplog=caplog, framework=CheckType.BICEP, excluded_paths=excluded_paths)
def test_bitbucket_pipelines_framework(caplog: LogCaptureFixture) -> None:
run_framework_test(caplog=caplog, framework=CheckType.BITBUCKET_PIPELINES)
def test_circleci_pipelines_framework(caplog: LogCaptureFixture) -> None:
run_framework_test(caplog=caplog, framework=CheckType.CIRCLECI_PIPELINES)
def test_cloudformation_framework(caplog: LogCaptureFixture) -> None:
excluded_paths = [
"cloudformation/parser/cfn_bad_name.yaml$",
"cloudformation/parser/cfn_with_ref_bad.yaml$",
"cloudformation/parser/success_triple_quotes_string.json$",
"cloudformation/runner/resources/invalid.json$",
"cloudformation/runner/resources/invalid.yaml$",
"cloudformation/runner/resources/invalid_properties.json$",
"cloudformation/runner/resources/invalid_properties.yaml$",
]
run_framework_test(caplog=caplog, framework=CheckType.CLOUDFORMATION, excluded_paths=excluded_paths)
def test_dockerfile_framework(caplog: LogCaptureFixture) -> None:
run_framework_test(caplog=caplog, framework=CheckType.DOCKERFILE)
def test_github_actions_framework(caplog: LogCaptureFixture) -> None:
run_framework_test(caplog=caplog, framework=CheckType.GITHUB_ACTIONS)
def test_gitlab_ci_framework(caplog: LogCaptureFixture) -> None:
run_framework_test(caplog=caplog, framework=CheckType.GITLAB_CI)
def test_helm_framework(caplog: LogCaptureFixture) -> None:
run_framework_test(caplog=caplog, framework=CheckType.HELM)
def test_kubernetes_framework(caplog: LogCaptureFixture) -> None:
run_framework_test(caplog=caplog, framework=CheckType.KUBERNETES)
@pytest.mark.skip(reason="kustomize needs a context to do a proper scan, which is hard to set here")
def test_kustomize_framework(caplog: LogCaptureFixture) -> None:
run_framework_test(caplog=caplog, framework=CheckType.KUSTOMIZE)
def test_openapi_framework(caplog: LogCaptureFixture) -> None:
run_framework_test(caplog=caplog, framework=CheckType.OPENAPI)
def test_secrets_framework(caplog: LogCaptureFixture) -> None:
run_framework_test(caplog=caplog, framework=CheckType.SECRETS)
def test_serverless_framework(caplog: LogCaptureFixture) -> None:
run_framework_test(caplog=caplog, framework=CheckType.SERVERLESS)
def test_terraform_framework(caplog: LogCaptureFixture) -> None:
excluded_paths = [
"terraform/runner/resources/example/invalid.tf$",
"terraform/runner/resources/invalid_terraform_syntax/bad_tf_1.tf$",
"terraform/runner/resources/invalid_terraform_syntax/bad_tf_2.tf$",
"terraform/runner/resources/unbalanced_eval_brackets/main.tf$",
]
run_framework_test(caplog=caplog, framework=CheckType.TERRAFORM, excluded_paths=excluded_paths)
def test_terraform_json_framework(caplog: LogCaptureFixture) -> None:
run_framework_test(caplog=caplog, framework=CheckType.TERRAFORM_JSON)
def METHOD_NAME(caplog: LogCaptureFixture) -> None:
excluded_paths = [
"arm/parser/examples/json/with_comments.json$",
"cloudformation/parser/fail.json$",
"cloudformation/parser/success_triple_quotes_string.json$",
"cloudformation/runner/resources/invalid.json$",
]
run_framework_test(caplog=caplog, framework=CheckType.TERRAFORM_PLAN, excluded_paths=excluded_paths)
def run_framework_test(caplog: LogCaptureFixture, framework: str, excluded_paths: list[str] | None = None) -> None:
# given
caplog.set_level(logging.ERROR)
runner_registry = RunnerRegistry(
"", RunnerFilter(framework=[framework], excluded_paths=excluded_paths), *DEFAULT_RUNNERS
)
# when
scan_reports = runner_registry.run(root_folder=str(TESTS_DIR))
# then
for report in scan_reports:
assert report.failed_checks
assert not report.parsing_errors, f"Found parsing errors for framework '{report.check_type}'"
assert not caplog.text, caplog.text |
6,491 | get info | # Copyright 2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
from datetime import datetime
from shlex import quote
from devlib.utils.android import ApkInfo as _ApkInfo
from wa.framework.configuration import settings
from wa.utils.serializer import read_pod, write_pod, Podable
from wa.utils.types import enum
from wa.utils.misc import atomic_write_path
LogcatLogLevel = enum(['verbose', 'debug', 'info', 'warn', 'error', 'assert'], start=2)
log_level_map = ''.join(n[0].upper() for n in LogcatLogLevel.names)
logcat_logger = logging.getLogger('logcat')
apk_info_cache_logger = logging.getLogger('apk_info_cache')
apk_info_cache = None
class LogcatEvent(object):
__slots__ = ['timestamp', 'pid', 'tid', 'level', 'tag', 'message']
def __init__(self, timestamp, pid, tid, level, tag, message):
self.timestamp = timestamp
self.pid = pid
self.tid = tid
self.level = level
self.tag = tag
self.message = message
def __repr__(self):
return '{} {} {} {} {}: {}'.format(
self.timestamp, self.pid, self.tid,
self.level.name.upper(), self.tag,
self.message,
)
__str__ = __repr__
class LogcatParser(object):
def parse(self, filepath):
with open(filepath, errors='replace') as fh:
for line in fh:
event = self.parse_line(line)
if event:
yield event
def parse_line(self, line): # pylint: disable=no-self-use
line = line.strip()
if not line or line.startswith('-') or ': ' not in line:
return None
metadata, message = line.split(': ', 1)
parts = metadata.split(None, 5)
try:
ts = ' '.join([parts.pop(0), parts.pop(0)])
timestamp = datetime.strptime(ts, '%m-%d %H:%M:%S.%f').replace(year=datetime.now().year)
pid = int(parts.pop(0))
tid = int(parts.pop(0))
level = LogcatLogLevel.levels[log_level_map.index(parts.pop(0))]
tag = (parts.pop(0) if parts else '').strip()
except Exception as e: # pylint: disable=broad-except
message = 'Invalid metadata for line:\n\t{}\n\tgot: "{}"'
logcat_logger.warning(message.format(line, e))
return None
return LogcatEvent(timestamp, pid, tid, level, tag, message)
# pylint: disable=protected-access,attribute-defined-outside-init
class ApkInfo(_ApkInfo, Podable):
'''Implement ApkInfo as a Podable class.'''
_pod_serialization_version = 1
@staticmethod
def from_pod(pod):
instance = ApkInfo()
instance.path = pod['path']
instance.package = pod['package']
instance.activity = pod['activity']
instance.label = pod['label']
instance.version_name = pod['version_name']
instance.version_code = pod['version_code']
instance.native_code = pod['native_code']
instance.permissions = pod['permissions']
instance._apk_path = pod['_apk_path']
instance._activities = pod['_activities']
instance._methods = pod['_methods']
return instance
def __init__(self, path=None):
super().__init__(path)
self._pod_version = self._pod_serialization_version
def to_pod(self):
pod = super().to_pod()
pod['path'] = self.path
pod['package'] = self.package
pod['activity'] = self.activity
pod['label'] = self.label
pod['version_name'] = self.version_name
pod['version_code'] = self.version_code
pod['native_code'] = self.native_code
pod['permissions'] = self.permissions
pod['_apk_path'] = self._apk_path
pod['_activities'] = self.activities # Force extraction
pod['_methods'] = self.methods # Force extraction
return pod
@staticmethod
def _pod_upgrade_v1(pod):
pod['_pod_version'] = pod.get('_pod_version', 1)
return pod
class ApkInfoCache:
@staticmethod
def _check_env():
if not os.path.exists(settings.cache_directory):
os.makedirs(settings.cache_directory)
def __init__(self, path=settings.apk_info_cache_file):
self._check_env()
self.path = path
self.last_modified = None
self.cache = {}
self._update_cache()
def store(self, apk_info, apk_id, overwrite=True):
self._update_cache()
if apk_id in self.cache and not overwrite:
raise ValueError('ApkInfo for {} is already in cache.'.format(apk_info.path))
self.cache[apk_id] = apk_info.to_pod()
with atomic_write_path(self.path) as at_path:
write_pod(self.cache, at_path)
self.last_modified = os.stat(self.path)
def METHOD_NAME(self, key):
self._update_cache()
pod = self.cache.get(key)
info = ApkInfo.from_pod(pod) if pod else None
return info
def _update_cache(self):
if not os.path.exists(self.path):
return
if self.last_modified != os.stat(self.path):
apk_info_cache_logger.debug('Updating cache {}'.format(self.path))
self.cache = read_pod(self.path)
self.last_modified = os.stat(self.path)
def get_cacheable_apk_info(path):
# pylint: disable=global-statement
global apk_info_cache
if not path:
return
stat = os.stat(path)
modified = stat.st_mtime
apk_id = '{}-{}'.format(path, modified)
info = apk_info_cache.METHOD_NAME(apk_id)
if info:
msg = 'Using ApkInfo ({}) from cache'.format(info.package)
else:
info = ApkInfo(path)
apk_info_cache.store(info, apk_id, overwrite=True)
msg = 'Storing ApkInfo ({}) in cache'.format(info.package)
apk_info_cache_logger.debug(msg)
return info
apk_info_cache = ApkInfoCache()
def build_apk_launch_command(package, activity=None, apk_args=None):
args_string = ''
if apk_args:
for k, v in apk_args.items():
if isinstance(v, str):
arg = '--es'
v = quote(v)
elif isinstance(v, float):
arg = '--ef'
elif isinstance(v, bool):
arg = '--ez'
elif isinstance(v, int):
arg = '--ei'
else:
raise ValueError('Unable to encode {} {}'.format(v, type(v)))
args_string = '{} {} {} {}'.format(args_string, arg, k, v)
if not activity:
cmd = 'am start -W {} {}'.format(package, args_string)
else:
cmd = 'am start -W -n {}/{} {}'.format(package, activity, args_string)
return cmd |
6,492 | name | """
Built-in extensions for PyScaffold.
"""
import argparse
import sys
import textwrap
from typing import Callable, Iterable, List, Optional, Type
from ..actions import Action, register, unregister
from ..exceptions import ErrorLoadingExtension
from ..identification import dasherize, deterministic_sort, underscore
if sys.version_info[:2] >= (3, 8):
# TODO: Import directly (no need for conditional) when `python_requires = >= 3.8`
from importlib.metadata import EntryPoint, entry_points # pragma: no cover
else:
from importlib_metadata import EntryPoint, entry_points # pragma: no cover
ENTRYPOINT_GROUP = "pyscaffold.cli"
NO_LONGER_NEEDED = {"pyproject", "tox"}
"""Extensions that are no longer needed and are now part of PyScaffold itself"""
# TODO: NO_LONGER_SUPPORTED = {"no_pyproject"}
class Extension:
"""Base class for PyScaffold's extensions
Args:
name (str): How the extension should be named. Default: name of class
By default, this value is used to create the activation flag in
PyScaffold cli.
See our docs on how to create extensions in:
https://pyscaffold.org/en/latest/extensions.html
Also check :obj:`~pyscaffold.actions`, :obj:`~pyscaffold.structure.Structure` and
:obj:`~pyscaffold.operations.ScaffoldOpts` for more details.
Note:
Please name your class using a CamelCase version of the name you use in the
setuptools entrypoint (alternatively you will need to overwrite the ``name``
property to match the entrypoint name).
"""
#: When ``True`` PyScaffold will store the extension in the PyScaffold's section of
#: ``setup.cfg``. Useful for updates. Set to ``False`` if the extension should not
#: be re-invoked on updates.
persist = True
def __init__(self, METHOD_NAME: Optional[str] = None):
self._name = METHOD_NAME or underscore(self.__class__.__name__)
@property
def METHOD_NAME(self):
return self._name
@property
def flag(self) -> str:
return f"--{dasherize(self.METHOD_NAME)}"
@property
def help_text(self) -> str:
if self.__doc__ is None:
raise NotImplementedError("Please provide a help text for your extension")
doc = textwrap.dedent(self.__doc__)
return doc[0].lower() + doc[1:]
def augment_cli(self, parser: argparse.ArgumentParser):
"""Augments the command-line interface parser.
A command line argument ``--FLAG`` where FLAG=``self.name`` is added
which appends ``self.activate`` to the list of extensions. As help
text the docstring of the extension class is used.
In most cases this method does not need to be overwritten.
Args:
parser: current parser object
"""
parser.add_argument(
self.flag,
dest="extensions",
action="append_const",
const=self,
help=self.help_text,
)
return self
def activate(self, actions: List[Action]) -> List[Action]:
"""Activates the extension by registering its functionality
Args:
actions (List[Action]): list of action to perform
Returns:
List[Action]: updated list of actions
"""
raise NotImplementedError(f"Extension {self.METHOD_NAME} has no actions registered")
#: Shortcut for :obj:`pyscaffold.actions.register`
register = staticmethod(register)
#: Shortcut for :obj:`pyscaffold.actions.unregister`
unregister = staticmethod(unregister)
def __call__(self, actions: List[Action]) -> List[Action]:
"""Just delegating to :obj:`self.activate`"""
return self.activate(actions)
def include(*extensions: Extension) -> Type[argparse.Action]:
"""Create a custom :obj:`argparse.Action` that saves multiple extensions for
activation.
Args:
*extensions: extension objects to be saved
"""
class IncludeExtensions(argparse.Action):
"""Appends the given extensions to the extensions list."""
def __call__(self, parser, namespace, values, option_string=None):
ext_list = list(getattr(namespace, "extensions", []))
namespace.extensions = ext_list + list(extensions)
return IncludeExtensions
def store_with(*extensions: Extension) -> Type[argparse.Action]:
"""Create a custom :obj:`argparse.Action` that stores the value of the given option
in addition to saving the extension for activation.
Args:
*extensions: extension objects to be saved for activation
"""
class AddExtensionAndStore(include(*extensions)): # type: ignore
"""\
Consumes the values provided, but also appends the given extension
to the extensions list.
"""
def __call__(self, parser, namespace, values, option_string=None):
super().__call__(parser, namespace, values, option_string)
setattr(namespace, self.dest, values)
return AddExtensionAndStore
def iterate_entry_points(group=ENTRYPOINT_GROUP) -> Iterable[EntryPoint]:
"""Produces a generator yielding an EntryPoint object for each extension registered
via `setuptools`_ entry point mechanism.
This method can be used in conjunction with :obj:`load_from_entry_point` to filter
the extensions before actually loading them.
.. _setuptools: https://setuptools.pypa.io/en/latest/userguide/entry_point.html
""" # noqa
entries = entry_points()
if hasattr(entries, "select"):
# The select method was introduced in importlib_metadata 3.9 (and Python 3.10)
# and the previous dict interface was declared deprecated
return entries.select(group=group) # type: ignore
else:
# TODO: Once Python 3.10 becomes the oldest version supported, this fallback and
# conditional statement can be removed.
return (extension for extension in entries.get(group, [])) # type: ignore
def load_from_entry_point(entry_point: EntryPoint) -> Extension:
"""Carefully load the extension, raising a meaningful message in case of errors"""
try:
return entry_point.load()(entry_point.METHOD_NAME)
except Exception as ex:
raise ErrorLoadingExtension(entry_point=entry_point) from ex
def list_from_entry_points(
group: str = ENTRYPOINT_GROUP,
filtering: Callable[[EntryPoint], bool] = lambda _: True,
) -> List[Extension]:
"""Produces a list of extension objects for each extension registered
via `setuptools`_ entry point mechanism.
Args:
group: name of the setuptools' entry_point group where extensions is being
registered
filtering: function returning a boolean deciding if the entry point should be
loaded and included (or not) in the final list. A ``True`` return means the
extension should be included.
.. _setuptools: https://setuptools.pypa.io/en/latest/userguide/entry_point.html
""" # noqa
return deterministic_sort(
load_from_entry_point(e) for e in iterate_entry_points(group) if filtering(e)
) |
6,493 | forward | """Torch Module for Graph Isomorphism Network layer"""
# pylint: disable= no-member, arguments-differ, invalid-name
import torch as th
from torch import nn
from .... import function as fn
from ....utils import expand_as_pair
class GINConv(nn.Module):
r"""Graph Isomorphism Network layer from `How Powerful are Graph
Neural Networks? <https://arxiv.org/pdf/1810.00826.pdf>`__
.. math::
h_i^{(l+1)} = f_\Theta \left((1 + \epsilon) h_i^{l} +
\mathrm{aggregate}\left(\left\{h_j^{l}, j\in\mathcal{N}(i)
\right\}\right)\right)
If a weight tensor on each edge is provided, the weighted graph convolution is defined as:
.. math::
h_i^{(l+1)} = f_\Theta \left((1 + \epsilon) h_i^{l} +
\mathrm{aggregate}\left(\left\{e_{ji} h_j^{l}, j\in\mathcal{N}(i)
\right\}\right)\right)
where :math:`e_{ji}` is the weight on the edge from node :math:`j` to node :math:`i`.
Please make sure that `e_{ji}` is broadcastable with `h_j^{l}`.
Parameters
----------
apply_func : callable activation function/layer or None
If not None, apply this function to the updated node feature,
the :math:`f_\Theta` in the formula, default: None.
aggregator_type : str
Aggregator type to use (``sum``, ``max`` or ``mean``), default: 'sum'.
init_eps : float, optional
Initial :math:`\epsilon` value, default: ``0``.
learn_eps : bool, optional
If True, :math:`\epsilon` will be a learnable parameter. Default: ``False``.
activation : callable activation function/layer or None, optional
If not None, applies an activation function to the updated node features.
Default: ``None``.
Examples
--------
>>> import dgl
>>> import numpy as np
>>> import torch as th
>>> from dgl.nn import GINConv
>>>
>>> g = dgl.graph(([0,1,2,3,2,5], [1,2,3,4,0,3]))
>>> feat = th.ones(6, 10)
>>> lin = th.nn.Linear(10, 10)
>>> conv = GINConv(lin, 'max')
>>> res = conv(g, feat)
>>> res
tensor([[-0.4821, 0.0207, -0.7665, 0.5721, -0.4682, -0.2134, -0.5236, 1.2855,
0.8843, -0.8764],
[-0.4821, 0.0207, -0.7665, 0.5721, -0.4682, -0.2134, -0.5236, 1.2855,
0.8843, -0.8764],
[-0.4821, 0.0207, -0.7665, 0.5721, -0.4682, -0.2134, -0.5236, 1.2855,
0.8843, -0.8764],
[-0.4821, 0.0207, -0.7665, 0.5721, -0.4682, -0.2134, -0.5236, 1.2855,
0.8843, -0.8764],
[-0.4821, 0.0207, -0.7665, 0.5721, -0.4682, -0.2134, -0.5236, 1.2855,
0.8843, -0.8764],
[-0.1804, 0.0758, -0.5159, 0.3569, -0.1408, -0.1395, -0.2387, 0.7773,
0.5266, -0.4465]], grad_fn=<AddmmBackward>)
>>> # With activation
>>> from torch.nn.functional import relu
>>> conv = GINConv(lin, 'max', activation=relu)
>>> res = conv(g, feat)
>>> res
tensor([[5.0118, 0.0000, 0.0000, 3.9091, 1.3371, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000],
[5.0118, 0.0000, 0.0000, 3.9091, 1.3371, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000],
[5.0118, 0.0000, 0.0000, 3.9091, 1.3371, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000],
[5.0118, 0.0000, 0.0000, 3.9091, 1.3371, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000],
[5.0118, 0.0000, 0.0000, 3.9091, 1.3371, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000],
[2.5011, 0.0000, 0.0089, 2.0541, 0.8262, 0.0000, 0.0000, 0.1371, 0.0000,
0.0000]], grad_fn=<ReluBackward0>)
"""
def __init__(
self,
apply_func=None,
aggregator_type="sum",
init_eps=0,
learn_eps=False,
activation=None,
):
super(GINConv, self).__init__()
self.apply_func = apply_func
self._aggregator_type = aggregator_type
self.activation = activation
if aggregator_type not in ("sum", "max", "mean"):
raise KeyError(
"Aggregator type {} not recognized.".format(aggregator_type)
)
# to specify whether eps is trainable or not.
if learn_eps:
self.eps = th.nn.Parameter(th.FloatTensor([init_eps]))
else:
self.register_buffer("eps", th.FloatTensor([init_eps]))
def METHOD_NAME(self, graph, feat, edge_weight=None):
r"""
Description
-----------
Compute Graph Isomorphism Network layer.
Parameters
----------
graph : DGLGraph
The graph.
feat : torch.Tensor or pair of torch.Tensor
If a torch.Tensor is given, the input feature of shape :math:`(N, D_{in})` where
:math:`D_{in}` is size of input feature, :math:`N` is the number of nodes.
If a pair of torch.Tensor is given, the pair must contain two tensors of shape
:math:`(N_{in}, D_{in})` and :math:`(N_{out}, D_{in})`.
If ``apply_func`` is not None, :math:`D_{in}` should
fit the input dimensionality requirement of ``apply_func``.
edge_weight : torch.Tensor, optional
Optional tensor on the edge. If given, the convolution will weight
with regard to the message.
Returns
-------
torch.Tensor
The output feature of shape :math:`(N, D_{out})` where
:math:`D_{out}` is the output dimensionality of ``apply_func``.
If ``apply_func`` is None, :math:`D_{out}` should be the same
as input dimensionality.
"""
_reducer = getattr(fn, self._aggregator_type)
with graph.local_scope():
aggregate_fn = fn.copy_u("h", "m")
if edge_weight is not None:
assert edge_weight.shape[0] == graph.num_edges()
graph.edata["_edge_weight"] = edge_weight
aggregate_fn = fn.u_mul_e("h", "_edge_weight", "m")
feat_src, feat_dst = expand_as_pair(feat, graph)
graph.srcdata["h"] = feat_src
graph.update_all(aggregate_fn, _reducer("m", "neigh"))
rst = (1 + self.eps) * feat_dst + graph.dstdata["neigh"]
if self.apply_func is not None:
rst = self.apply_func(rst)
# activation
if self.activation is not None:
rst = self.activation(rst)
return rst |
6,494 | adjust cores for memory request | import logging
import math
from typing import Dict, List, Optional, Tuple
from ..globals import RESERVED_STORAGE_GB_PER_CORE
from .azure.resource_utils import (
azure_is_valid_storage_request,
azure_local_ssd_size,
azure_machine_type_to_worker_type_and_cores,
azure_memory_to_worker_type,
azure_requested_to_actual_storage_bytes,
azure_valid_cores_from_worker_type,
azure_valid_machine_types,
azure_worker_memory_per_core_mib,
)
from .gcp.resource_utils import (
gcp_cost_from_msec_mcpu,
gcp_is_valid_storage_request,
gcp_local_ssd_size,
gcp_machine_type_to_worker_type_and_cores,
gcp_memory_to_worker_type,
gcp_requested_to_actual_storage_bytes,
gcp_valid_cores_from_worker_type,
gcp_valid_machine_types,
gcp_worker_memory_per_core_mib,
)
log = logging.getLogger('resource_utils')
def round_up_division(numerator: int, denominator: int) -> int:
return (numerator + denominator - 1) // denominator
def possible_cores_from_worker_type(cloud: str, worker_type: str) -> List[int]:
if cloud == 'azure':
return azure_valid_cores_from_worker_type[worker_type]
assert cloud == 'gcp'
return gcp_valid_cores_from_worker_type[worker_type]
def valid_machine_types(cloud: str) -> List[str]:
if cloud == 'azure':
return azure_valid_machine_types
assert cloud == 'gcp'
return gcp_valid_machine_types
def memory_to_worker_type(cloud: str) -> Dict[str, str]:
if cloud == 'azure':
return azure_memory_to_worker_type
assert cloud == 'gcp'
return gcp_memory_to_worker_type
def machine_type_to_worker_type_cores(cloud: str, machine_type: str) -> Tuple[str, int]:
if cloud == 'azure':
return azure_machine_type_to_worker_type_and_cores(machine_type)
assert cloud == 'gcp'
return gcp_machine_type_to_worker_type_and_cores(machine_type)
def cost_from_msec_mcpu(msec_mcpu: int) -> Optional[float]:
if msec_mcpu is None:
return None
# msec_mcpu is deprecated and only applicable to GCP
return gcp_cost_from_msec_mcpu(msec_mcpu)
def worker_memory_per_core_mib(cloud: str, worker_type: str) -> int:
if cloud == 'azure':
return azure_worker_memory_per_core_mib(worker_type)
assert cloud == 'gcp'
return gcp_worker_memory_per_core_mib(worker_type)
def worker_memory_per_core_bytes(cloud: str, worker_type: str) -> int:
m = worker_memory_per_core_mib(cloud, worker_type)
return int(m * 1024**2)
def memory_bytes_to_cores_mcpu(cloud: str, memory_in_bytes: int, worker_type: str) -> int:
return math.ceil((memory_in_bytes / worker_memory_per_core_bytes(cloud, worker_type)) * 1000)
def cores_mcpu_to_memory_bytes(cloud: str, cores_in_mcpu: int, worker_type: str) -> int:
return int((cores_in_mcpu / 1000) * worker_memory_per_core_bytes(cloud, worker_type))
def METHOD_NAME(cloud: str, cores_in_mcpu: int, memory_in_bytes: int, worker_type: str) -> int:
min_cores_mcpu = memory_bytes_to_cores_mcpu(cloud, memory_in_bytes, worker_type)
return max(cores_in_mcpu, min_cores_mcpu)
def unreserved_worker_data_disk_size_gib(data_disk_size_gib: int, cores: int) -> int:
reserved_image_size = 30
reserved_container_size = RESERVED_STORAGE_GB_PER_CORE * cores
return data_disk_size_gib - reserved_image_size - reserved_container_size
def requested_storage_bytes_to_actual_storage_gib(
cloud: str, storage_bytes: int, allow_zero_storage: bool
) -> Optional[int]:
if cloud == 'azure':
actual_storage_bytes = azure_requested_to_actual_storage_bytes(storage_bytes, allow_zero_storage)
else:
assert cloud == 'gcp'
actual_storage_bytes = gcp_requested_to_actual_storage_bytes(storage_bytes, allow_zero_storage)
if actual_storage_bytes is None:
return None
return round_storage_bytes_to_gib(actual_storage_bytes)
def adjust_cores_for_packability(cores_in_mcpu: int) -> int:
cores_in_mcpu = max(1, cores_in_mcpu)
power = max(-2, math.ceil(math.log2(cores_in_mcpu / 1000)))
return int(2**power * 1000)
def round_storage_bytes_to_gib(storage_bytes: int) -> int:
gib = storage_bytes / 1024 / 1024 / 1024
gib = math.ceil(gib)
return gib
def storage_gib_to_bytes(storage_gib: int) -> int:
return math.ceil(storage_gib * 1024**3)
def is_valid_cores_mcpu(cores_mcpu: int) -> bool:
if cores_mcpu <= 0:
return False
quarter_core_mcpu = cores_mcpu * 4
if quarter_core_mcpu % 1000 != 0:
return False
quarter_cores = quarter_core_mcpu // 1000
return quarter_cores & (quarter_cores - 1) == 0
def is_valid_storage_request(cloud: str, storage_in_gib: int) -> bool:
if cloud == 'azure':
return azure_is_valid_storage_request(storage_in_gib)
assert cloud == 'gcp'
return gcp_is_valid_storage_request(storage_in_gib)
def local_ssd_size(cloud: str, worker_type: str, cores: int) -> int:
if cloud == 'azure':
return azure_local_ssd_size(worker_type, cores)
assert cloud == 'gcp', cloud
return gcp_local_ssd_size() |
6,495 | watch cursor line | from __future__ import annotations
import typing as t
from abc import abstractmethod
from dataclasses import dataclass
from rich import console
from rich.text import Text
from rich.style import Style
from rich.table import box, Table
from textual.app import Reactive
from textual.widget import Widget
from starwhale.utils import (
Order,
get_field,
pretty_bytes,
sort_obj_list,
snake_to_camel,
)
from starwhale.consts import CREATED_AT_KEY, DEFAULT_PROJECT, STANDALONE_INSTANCE
from starwhale.core.job.view import JobTermView
from starwhale.core.model.view import ModelTermView
from starwhale.core.dataset.view import DatasetTermView
from starwhale.core.runtime.view import RuntimeTermView
if t.TYPE_CHECKING:
from textual import events
from textual.widget import RenderableType
default_project = f"{STANDALONE_INSTANCE}/{DEFAULT_PROJECT}"
@dataclass
class Column:
key: str
name: t.Optional[str] = ""
render: t.Optional[t.Callable[[int, t.Any], t.Any]] = None
class OrderBy:
def __init__(self) -> None:
self.orderby_keys: t.Dict[str, str] = {
"C": CREATED_AT_KEY,
"N": "name",
"S": "size",
}
self.current_order = Order("")
def record_key(self, key: str) -> bool:
if key not in self.orderby_keys:
return False
field = self.orderby_keys[key]
if self.current_order.field == field:
self.current_order.reverse = not self.current_order.reverse
else:
self.current_order.field = field
self.current_order.reverse = False
return True
def sort(self, data: t.Sequence) -> t.Sequence:
if not self.current_order.field:
return data
return sort_obj_list(data, [self.current_order])
def get_order_icon(self) -> t.Tuple[str, RenderableType]:
if not self.current_order.field:
return "", ""
field = self.current_order.field
if self.current_order.reverse:
return field, Text(" ↓", style="green")
else:
return field, Text(" ↑", style="red")
class TableWidget(Widget):
"""TableWidget makes an interactive rich.Table"""
def __init__(self, **kwargs: t.Any) -> None:
super().__init__(**kwargs)
self.table = Table(expand=True, box=box.SIMPLE)
self.data: t.Sequence = []
self.render_fn: t.List[Column] = []
self._info: t.Any = None
self._orderby = OrderBy()
show_info: Reactive[bool] = Reactive(False)
cursor_line: Reactive[int] = Reactive(0, repaint=False)
def watch_show_info(self, show: bool) -> None:
self._info = show and self.info(self.cursor_line) or None
self.refresh(layout=True)
def METHOD_NAME(self, value: int) -> None:
self.highlight_row(value)
self.refresh()
@abstractmethod
def reloadImpl(self) -> None:
raise NotImplementedError
def info(self, idx: int) -> RenderableType:
return console.Pretty(self.data[idx], indent_guides=True)
def watch_data(self) -> None:
pass
def render(self) -> Table:
self.app.sub_title = self.__class__.__name__
return self._info or self.table
def reload(self) -> None:
self.table.columns = []
for i in self.render_fn:
name = Text(i.name and i.name or snake_to_camel(i.key))
f, icon = self._orderby.get_order_icon()
if i.key == f:
name += icon
self.table.add_column(name)
self.reloadImpl()
self.table.rows = []
data = self._orderby.sort(self.data)
for idx, item in enumerate(data):
def try_render(col: Column) -> t.Any:
if col.render:
return col.render(idx, item)
return get_field(item, col.key)
self.table.add_row(*[try_render(i) for i in self.render_fn])
self.highlight_row(self.cursor_line)
self.refresh()
def highlight_row(self, row: int) -> None:
self.table.row_styles = [
Style(bgcolor="magenta") if i == row else ""
for i in range(self.table.row_count)
]
async def on_key(self, event: events.Key) -> None:
if event.key == "r":
self.reload()
if self._orderby.record_key(event.key):
self.reload()
await self.dispatch_key(event)
async def key_down(self) -> None:
self.cursor_down()
async def key_j(self) -> None:
self.cursor_down()
async def key_up(self) -> None:
self.cursor_up()
async def key_k(self) -> None:
self.cursor_up()
def cursor_down(self) -> None:
if self.cursor_line < self.table.row_count - 1:
self.cursor_line += 1
def cursor_up(self) -> None:
if self.cursor_line > 0:
self.cursor_line -= 1
async def key_i(self) -> None:
self.show_info = True
async def key_escape(self) -> None:
self.show_info = False
async def key_h(self) -> None:
self.show_info = False
class Models(TableWidget):
"""Models represents starwhale model view"""
# TODO use constance
def __init__(self, uri: str = default_project, **kwargs: t.Any) -> None:
super().__init__(**kwargs)
self.render_fn = [
Column("name"),
Column("version"),
Column("tags", render=lambda _, x: ",".join(x["tags"])),
Column("size", render=lambda _, x: pretty_bytes(x["size"])),
Column(CREATED_AT_KEY, "Created At"),
]
self.uri = uri
self.reload()
def reloadImpl(self) -> None:
self.data, _ = ModelTermView.list(self.uri)
class Datasets(TableWidget):
"""Datasets represents starwhale model view"""
def __init__(self, uri: str = default_project, **kwargs: t.Any) -> None:
super().__init__(**kwargs)
self.render_fn = [
Column("name"),
Column("version"),
Column("tags", render=lambda _, x: ",".join(x["tags"])),
Column("size", render=lambda _, x: pretty_bytes(x["size"])),
Column(CREATED_AT_KEY, "Created At"),
]
self.uri = uri
self.reload()
def reloadImpl(self) -> None:
self.data, _ = DatasetTermView.list(self.uri)
class Runtimes(TableWidget):
"""Runtimes represents starwhale model view"""
def __init__(self, uri: str = default_project, **kwargs: t.Any) -> None:
super().__init__(**kwargs)
self.render_fn = [
Column("name"),
Column("version"),
Column("tags", render=lambda _, x: ",".join(x["tags"])),
Column("size", render=lambda _, x: pretty_bytes(x["size"])),
Column(CREATED_AT_KEY, "Created At"),
]
self.uri = uri
self.reload()
def reloadImpl(self) -> None:
self.data, _ = RuntimeTermView.list(self.uri)
class Jobs(TableWidget):
"""Job represents starwhale model view"""
def __init__(self, uri: str = default_project, **kwargs: t.Any) -> None:
super().__init__(**kwargs)
self.render_fn = [
Column("manifest.version", "Name"),
Column("manifest.model", "Model"),
Column(
"manifest.datasets",
"Datasets",
render=lambda _, x: console.Pretty(get_field(x, "manifest.datasets")),
),
Column(f"manifest.{CREATED_AT_KEY}", "Created At"),
Column("manifest.finished_at", "Finished At"),
]
self.uri = uri
self.reload()
def reloadImpl(self) -> None:
self.data, _ = JobTermView.list(self.uri) |
6,496 | path | """
Ethereum Forks
^^^^^^^^^^^^^^
Detects Python packages that specify Ethereum hardforks.
"""
import importlib
import pkgutil
from enum import Enum, auto
from pkgutil import ModuleInfo
from types import ModuleType
from typing import Any, Dict, Iterator, List, Optional, Type, TypeVar
import ethereum
from ethereum.fork_criteria import ByBlockNumber, ByTimestamp, ForkCriteria
class ConsensusType(Enum):
"""
How a fork chooses its canonical chain.
"""
PROOF_OF_WORK = auto()
PROOF_OF_STAKE = auto()
def is_pow(self) -> bool:
"""
Returns True if self == PROOF_OF_WORK.
"""
return self == ConsensusType.PROOF_OF_WORK
def is_pos(self) -> bool:
"""
Returns True if self == PROOF_OF_STAKE.
"""
return self == ConsensusType.PROOF_OF_STAKE
H = TypeVar("H", bound="Hardfork")
class Hardfork:
"""
Metadata associated with an Ethereum hardfork.
"""
mod: ModuleType
@classmethod
def discover(cls: Type[H]) -> List[H]:
"""
Find packages which contain Ethereum hardfork specifications.
"""
METHOD_NAME = getattr(ethereum, "__path__", None)
if METHOD_NAME is None:
raise ValueError("module `ethereum` has no path information")
modules = pkgutil.iter_modules(METHOD_NAME, ethereum.__name__ + ".")
modules = (module for module in modules if module.ispkg)
forks: List[H] = []
for pkg in modules:
mod = importlib.import_module(pkg.name)
if hasattr(mod, "FORK_CRITERIA"):
forks.append(cls(mod))
# Timestamps are bigger than block numbers, so this always works.
forks.sort(key=lambda fork: fork.criteria)
return forks
@classmethod
def load(cls: Type[H], config_dict: Dict[ForkCriteria, str]) -> List[H]:
"""
Load the forks from a config dict specifying fork blocks and
timestamps.
"""
config = sorted(config_dict.items(), key=lambda x: x[0])
forks = []
for (criteria, name) in config:
mod = importlib.import_module("ethereum." + name)
mod.FORK_CRITERIA = criteria # type: ignore
forks.append(cls(mod))
return forks
@classmethod
def load_from_json(cls: Type[H], json: Any) -> List[H]:
"""
Load fork config from the json format used by Geth.
Does not support some forks that only exist on Mainnet. Use
`discover()` for Mainnet.
"""
c = json["config"]
config = {
ByBlockNumber(0): "frontier",
ByBlockNumber(c["homesteadBlock"]): "homestead",
ByBlockNumber(c["eip150Block"]): "tangerine_whistle",
ByBlockNumber(c["eip155Block"]): "spurious_dragon",
ByBlockNumber(c["byzantiumBlock"]): "byzantium",
ByBlockNumber(c["constantinopleBlock"]): "constantinople",
ByBlockNumber(c["istanbulBlock"]): "istanbul",
ByBlockNumber(c["berlinBlock"]): "berlin",
ByBlockNumber(c["londonBlock"]): "london",
ByBlockNumber(c["mergeForkBlock"]): "paris",
ByTimestamp(c["shanghaiTime"]): "shanghai",
}
if "daoForkBlock" in c:
raise Exception(
"Hardfork.load_from_json() does not support Mainnet"
)
return cls.load(config)
def __init__(self, mod: ModuleType) -> None:
self.mod = mod
@property
def consensus(self) -> ConsensusType:
"""
How this fork chooses its canonical chain.
"""
if hasattr(self.module("fork"), "validate_proof_of_work"):
return ConsensusType.PROOF_OF_WORK
else:
return ConsensusType.PROOF_OF_STAKE
@property
def criteria(self) -> ForkCriteria:
"""
Criteria to trigger this hardfork.
"""
criteria = self.mod.FORK_CRITERIA # type: ignore[attr-defined]
assert isinstance(criteria, ForkCriteria)
return criteria
@property
def block(self) -> int:
"""
Block number of the first block in this hard fork.
"""
if isinstance(self.criteria, ByBlockNumber):
return self.criteria.block_number
else:
raise AttributeError
@property
def timestamp(self) -> int:
"""
Block number of the first block in this hard fork.
"""
if isinstance(self.criteria, ByTimestamp):
return self.criteria.timestamp
else:
raise AttributeError
def has_activated(self, block_number: int, timestamp: int) -> bool:
"""
Check whether this fork has activated.
"""
return self.criteria.check(block_number, timestamp)
@property
def METHOD_NAME(self) -> Optional[str]:
"""
Path to the module containing this hard fork.
"""
return getattr(self.mod, "__path__", None)
@property
def short_name(self) -> str:
"""
Short name (without the `ethereum.` prefix) of the hard fork.
"""
return self.mod.__name__.split(".")[-1]
@property
def name(self) -> str:
"""
Name of the hard fork.
"""
return self.mod.__name__
@property
def title_case_name(self) -> str:
"""
Name of the hard fork.
"""
return self.short_name.replace("_", " ").title()
def __repr__(self) -> str:
"""
Return repr(self).
"""
return (
self.__class__.__name__
+ "("
+ f"name={self.name!r}, "
+ f"criteria={self.criteria}, "
+ "..."
+ ")"
)
def import_module(self) -> ModuleType:
"""
Return the module containing this specification.
"""
return self.mod
def module(self, name: str) -> Any:
"""
Import if necessary, and return the given module belonging to this hard
fork.
"""
return importlib.import_module(self.mod.__name__ + "." + name)
def optimized_module(self, name: str) -> Any:
"""
Import if necessary, and return the given module belonging to this hard
fork's optimized implementation.
"""
assert self.mod.__name__.startswith("ethereum.")
module = "ethereum_optimized" + self.mod.__name__[8:] + "." + name
return importlib.import_module(module)
def iter_modules(self) -> Iterator[ModuleInfo]:
"""
Iterate through the (sub-)modules describing this hardfork.
"""
if self.METHOD_NAME is None:
raise ValueError(f"cannot walk {self.name}, path is None")
return pkgutil.iter_modules(self.METHOD_NAME, self.name + ".")
def walk_packages(self) -> Iterator[ModuleInfo]:
"""
Iterate recursively through the (sub-)modules describing this hardfork.
"""
if self.METHOD_NAME is None:
raise ValueError(f"cannot walk {self.name}, path is None")
return pkgutil.walk_packages(self.METHOD_NAME, self.name + ".") |
6,497 | test multiple input fit | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for numerical correctness."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
class Bias(keras.layers.Layer):
"""Layer that add a bias to its inputs."""
def build(self, input_shape):
self.bias = self.add_variable('bias', (1,), initializer='zeros')
def call(self, inputs):
return inputs + self.bias
class MultiInputSubclassed(keras.Model):
"""Subclassed Model that adds its inputs and then adds a bias."""
def __init__(self):
super(MultiInputSubclassed, self).__init__()
self.add = keras.layers.Add()
self.bias = Bias()
def call(self, inputs):
added = self.add(inputs)
return self.bias(added)
def multi_input_functional():
"""Functional Model that adds its inputs and then adds a bias."""
input_1 = keras.Input(shape=(1,))
input_2 = keras.Input(shape=(1,))
input_3 = keras.Input(shape=(1,))
added = keras.layers.Add()([input_1, input_2, input_3])
output = Bias()(added)
return keras.Model([input_1, input_2, input_3], output)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class SimpleBiasTest(keras_parameterized.TestCase):
def _get_simple_bias_model(self):
model = testing_utils.get_model_from_layers([Bias()], input_shape=(1,))
model.compile(
keras.optimizer_v2.gradient_descent.SGD(0.1),
'mae',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
def test_simple_bias_fit(self):
x = np.array([[0.], [1.], [2.]])
y = np.array([[0.5], [2.], [3.5]])
model = self._get_simple_bias_model()
history = model.fit(x, y, batch_size=3, epochs=5)
self.assertAllClose(history.history['loss'], [1., 0.9, 0.8, 0.7, 0.6])
def test_simple_bias_evaluate(self):
x = np.array([[0.], [1.], [2.]])
y = np.array([[1.], [3.], [5.]])
model = self._get_simple_bias_model()
loss = model.evaluate(x, y, batch_size=1)
self.assertAlmostEqual(loss, 2.)
def test_simple_bias_predict(self):
x = np.array([[0.], [1.], [2.]])
model = self._get_simple_bias_model()
pred = model.predict(x, batch_size=1)
self.assertAllClose(x, pred)
@keras_parameterized.run_all_keras_modes
class MultipleInputTest(keras_parameterized.TestCase):
def _get_multiple_input_model(self, subclassed=True):
if subclassed:
model = MultiInputSubclassed()
else:
model = multi_input_functional()
model.compile(
keras.optimizer_v2.gradient_descent.SGD(0.1),
'mae',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
@parameterized.named_parameters(('subclassed', True), ('functional', False))
def METHOD_NAME(self, subclassed):
x = [
np.array([[1.], [2.], [3.]]),
np.array([[4.], [5.], [6.]]),
np.array([[7.], [8.], [9.]])
]
y = np.array([[12.5], [16.], [19.5]])
model = self._get_multiple_input_model(subclassed)
history = model.fit(x, y, batch_size=3, epochs=5)
self.assertAllClose(history.history['loss'], [1., 0.9, 0.8, 0.7, 0.6])
@parameterized.named_parameters(('subclassed', True), ('functional', False))
def test_multiple_input_evaluate(self, subclassed):
x = [
np.array([[1.], [2.], [3.]]),
np.array([[4.], [5.], [6.]]),
np.array([[7.], [8.], [9.]])
]
y = np.array([[13.], [17.], [21.]])
model = self._get_multiple_input_model(subclassed)
loss = model.evaluate(x, y, batch_size=3)
self.assertAlmostEqual(loss, 2.)
@parameterized.named_parameters(('subclassed', True), ('functional', False))
def test_multiple_input_predict(self, subclassed):
x = [
np.array([[1.], [2.], [3.]]),
np.array([[4.], [5.], [6.]]),
np.array([[7.], [8.], [9.]])
]
model = self._get_multiple_input_model(subclassed)
pred = model.predict(x, batch_size=1)
self.assertAllClose(pred, [[12.], [15.], [18.]])
if __name__ == '__main__':
test.main() |
6,498 | test stop when stopped | """
Test for salt.modules.vmctl
"""
import pytest
import salt.modules.vmctl as vmctl
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {vmctl: {}}
def test_create_disk():
"""
Tests creating a new disk image.
"""
ret = {}
ret["stdout"] = "vmctl: imagefile created"
ret["stderr"] = ""
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(vmctl.__salt__, {"cmd.run_all": mock_cmd}):
assert vmctl.create_disk("/path/to/disk.img", "1G")
def test_load():
"""
Tests loading a configuration file.
"""
ret = {}
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(vmctl.__salt__, {"cmd.run_all": mock_cmd}):
assert vmctl.load("/etc/vm.switches.conf")
def test_reload():
"""
Tests reloading the configuration.
"""
ret = {}
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(vmctl.__salt__, {"cmd.run_all": mock_cmd}):
assert vmctl.reload()
def test_reset():
"""
Tests resetting VMM.
"""
ret = {}
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(vmctl.__salt__, {"cmd.run_all": mock_cmd}):
res = vmctl.reset()
mock_cmd.assert_called_once_with(
["vmctl", "reset"], output_loglevel="trace", python_shell=False
)
assert res
def test_reset_vms():
"""
Tests resetting VMs.
"""
ret = {}
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(vmctl.__salt__, {"cmd.run_all": mock_cmd}):
res = vmctl.reset(vms=True)
mock_cmd.assert_called_once_with(
["vmctl", "reset", "vms"], output_loglevel="trace", python_shell=False
)
assert res
def test_reset_switches():
"""
Tests resetting switches.
"""
ret = {}
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(vmctl.__salt__, {"cmd.run_all": mock_cmd}):
res = vmctl.reset(switches=True)
mock_cmd.assert_called_once_with(
["vmctl", "reset", "switches"],
output_loglevel="trace",
python_shell=False,
)
assert res
def test_reset_all():
"""
Tests resetting all.
"""
ret = {}
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(vmctl.__salt__, {"cmd.run_all": mock_cmd}):
res = vmctl.reset(all=True)
mock_cmd.assert_called_once_with(
["vmctl", "reset", "all"], output_loglevel="trace", python_shell=False
)
assert res
def test_start_existing_vm():
"""
Tests starting a VM that is already defined.
"""
ret = {}
ret["stderr"] = "vmctl: started vm 4 successfully, tty /dev/ttyp4"
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
expected = {"changes": True, "console": "/dev/ttyp4"}
with patch.dict(vmctl.__salt__, {"cmd.run_all": mock_cmd}):
assert vmctl.start("4") == expected
def test_start_new_vm():
"""
Tests starting a new VM.
"""
ret = {}
ret["stderr"] = "vmctl: started vm 4 successfully, tty /dev/ttyp4"
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
mock_status = MagicMock(return_value={})
expected = {"changes": True, "console": "/dev/ttyp4"}
with patch.dict(vmctl.__salt__, {"cmd.run_all": mock_cmd}):
with patch("salt.modules.vmctl.status", mock_status):
res = vmctl.start("web1", bootpath="/bsd.rd", nics=2, disk="/disk.img")
mock_cmd.assert_called_once_with(
[
"vmctl",
"start",
"web1",
"-i 2",
"-b",
"/bsd.rd",
"-d",
"/disk.img",
],
output_loglevel="trace",
python_shell=False,
)
assert res == expected
def test_status():
"""
Tests getting status for all VMs.
"""
ret = {}
ret["stdout"] = (
" ID PID VCPUS MAXMEM CURMEM TTY OWNER NAME\n"
" 1 123 1 2.9G 150M ttyp5 john web1 - stopping\n"
" 2 456 1 512M 301M ttyp4 paul web2\n"
" 3 - 1 512M - - george web3\n"
)
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
expected = {
"web1": {
"curmem": "150M",
"id": "1",
"maxmem": "2.9G",
"owner": "john",
"pid": "123",
"state": "stopping",
"tty": "ttyp5",
"vcpus": "1",
},
"web2": {
"curmem": "301M",
"id": "2",
"maxmem": "512M",
"owner": "paul",
"pid": "456",
"state": "running",
"tty": "ttyp4",
"vcpus": "1",
},
"web3": {
"curmem": "-",
"id": "3",
"maxmem": "512M",
"owner": "george",
"pid": "-",
"state": "stopped",
"tty": "-",
"vcpus": "1",
},
}
with patch.dict(vmctl.__salt__, {"cmd.run_all": mock_cmd}):
assert vmctl.status() == expected
def test_status_single():
"""
Tests getting status for a single VM.
"""
ret = {}
ret["stdout"] = (
" ID PID VCPUS MAXMEM CURMEM TTY OWNER NAME\n"
" 1 123 1 2.9G 150M ttyp5 ringo web4\n"
" 2 - 1 512M - - george web3\n"
)
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
expected = {
"web4": {
"curmem": "150M",
"id": "1",
"maxmem": "2.9G",
"owner": "ringo",
"pid": "123",
"state": "running",
"tty": "ttyp5",
"vcpus": "1",
},
}
with patch.dict(vmctl.__salt__, {"cmd.run_all": mock_cmd}):
assert vmctl.status("web4") == expected
def test_stop_when_running():
"""
Tests stopping a VM that is running.
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "vmctl: sent request to terminate vm 14"
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(vmctl.__salt__, {"cmd.run_all": mock_cmd}):
res = vmctl.stop("web1")
mock_cmd.assert_called_once_with(
["vmctl", "stop", "web1"], output_loglevel="trace", python_shell=False
)
assert res["changes"]
def METHOD_NAME():
"""
Tests stopping a VM that is already stopped/stopping.
"""
ret = {}
ret["stdout"] = ""
ret["stderr"] = "vmctl: terminate vm command failed: Invalid argument"
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(vmctl.__salt__, {"cmd.run_all": mock_cmd}):
res = vmctl.stop("web1")
mock_cmd.assert_called_once_with(
["vmctl", "stop", "web1"], output_loglevel="trace", python_shell=False
)
assert not res["changes"] |
6,499 | test run ui with valid users commands | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the readline-based CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import argparse
import tempfile
from tensorflow.python.debug.cli import cli_config
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import readline_ui
from tensorflow.python.debug.cli import ui_factory
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
class MockReadlineUI(readline_ui.ReadlineUI):
"""Test subclass of ReadlineUI that bypasses terminal manipulations."""
def __init__(self, on_ui_exit=None, command_sequence=None):
readline_ui.ReadlineUI.__init__(
self, on_ui_exit=on_ui_exit,
config=cli_config.CLIConfig(config_file_path=tempfile.mktemp()))
self._command_sequence = command_sequence
self._command_counter = 0
self.observers = {"screen_outputs": []}
def _get_user_command(self):
command = self._command_sequence[self._command_counter]
self._command_counter += 1
return command
def _display_output(self, screen_output):
self.observers["screen_outputs"].append(screen_output)
class CursesTest(test_util.TensorFlowTestCase):
def setUp(self):
self._tmp_dir = tempfile.mkdtemp()
self._tmp_config_path = os.path.join(self._tmp_dir, ".tfdbg_config")
self.assertFalse(gfile.Exists(self._tmp_config_path))
super(CursesTest, self).setUp()
def tearDown(self):
shutil.rmtree(self._tmp_dir)
super(CursesTest, self).tearDown()
def _babble(self, args, screen_info=None):
ap = argparse.ArgumentParser(
description="Do babble.", usage=argparse.SUPPRESS)
ap.add_argument(
"-n",
"--num_times",
dest="num_times",
type=int,
default=60,
help="How many times to babble")
parsed = ap.parse_args(args)
lines = ["bar"] * parsed.num_times
return debugger_cli_common.RichTextLines(lines)
def testUIFactoryCreatesReadlineUI(self):
ui = ui_factory.get_ui(
"readline",
config=cli_config.CLIConfig(config_file_path=self._tmp_config_path))
self.assertIsInstance(ui, readline_ui.ReadlineUI)
def testUIFactoryRaisesExceptionOnInvalidUIType(self):
with self.assertRaisesRegexp(ValueError, "Invalid ui_type: 'foobar'"):
ui_factory.get_ui(
"foobar",
config=cli_config.CLIConfig(config_file_path=self._tmp_config_path))
def testUIFactoryRaisesExceptionOnInvalidUITypeGivenAvailable(self):
with self.assertRaisesRegexp(ValueError, "Invalid ui_type: 'readline'"):
ui_factory.get_ui(
"readline",
available_ui_types=["curses"],
config=cli_config.CLIConfig(config_file_path=self._tmp_config_path))
def testRunUIExitImmediately(self):
"""Make sure that the UI can exit properly after launch."""
ui = MockReadlineUI(command_sequence=["exit"])
ui.run_ui()
# No screen output should have happened.
self.assertEqual(0, len(ui.observers["screen_outputs"]))
def testRunUIEmptyCommand(self):
"""Issue an empty command then exit."""
ui = MockReadlineUI(command_sequence=["", "exit"])
ui.run_ui()
self.assertEqual(1, len(ui.observers["screen_outputs"]))
def testRunUIWithInitCmd(self):
"""Run UI with an initial command specified."""
ui = MockReadlineUI(command_sequence=["exit"])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui(init_command="babble")
screen_outputs = ui.observers["screen_outputs"]
self.assertEqual(1, len(screen_outputs))
self.assertEqual(["bar"] * 60, screen_outputs[0].lines)
def METHOD_NAME(self):
"""Run UI with an initial command specified."""
ui = MockReadlineUI(command_sequence=["babble -n 3", "babble -n 6", "exit"])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
screen_outputs = ui.observers["screen_outputs"]
self.assertEqual(2, len(screen_outputs))
self.assertEqual(["bar"] * 3, screen_outputs[0].lines)
self.assertEqual(["bar"] * 6, screen_outputs[1].lines)
def testRunUIWithInvalidUsersCommands(self):
"""Run UI with an initial command specified."""
ui = MockReadlineUI(command_sequence=["babble -n 3", "wobble", "exit"])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
screen_outputs = ui.observers["screen_outputs"]
self.assertEqual(2, len(screen_outputs))
self.assertEqual(["bar"] * 3, screen_outputs[0].lines)
self.assertEqual(["ERROR: Invalid command prefix \"wobble\""],
screen_outputs[1].lines)
def testRunUIWithOnUIExitCallback(self):
observer = {"callback_invoked": False}
def callback_for_test():
observer["callback_invoked"] = True
ui = MockReadlineUI(on_ui_exit=callback_for_test, command_sequence=["exit"])
self.assertFalse(observer["callback_invoked"])
ui.run_ui()
self.assertEqual(0, len(ui.observers["screen_outputs"]))
self.assertTrue(observer["callback_invoked"])
def testIncompleteRedirectWorks(self):
output_path = tempfile.mktemp()
ui = MockReadlineUI(
command_sequence=["babble -n 2 > %s" % output_path, "exit"])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
screen_outputs = ui.observers["screen_outputs"]
self.assertEqual(1, len(screen_outputs))
self.assertEqual(["bar"] * 2, screen_outputs[0].lines)
with gfile.Open(output_path, "r") as f:
self.assertEqual("bar\nbar\n", f.read())
def testConfigSetAndShow(self):
"""Run UI with an initial command specified."""
ui = MockReadlineUI(command_sequence=[
"config set graph_recursion_depth 5", "config show", "exit"])
ui.run_ui()
outputs = ui.observers["screen_outputs"]
self.assertEqual(
["Command-line configuration:",
"",
" graph_recursion_depth: 5"], outputs[1].lines[:3])
if __name__ == "__main__":
googletest.main() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.