id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
5,800 | mockingjay log mel base t adam w | # -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ upstream/mockingjay/hubconf.py ]
# Synopsis [ the mockingjay torch hubconf ]
# Author [ S3PRL ]
# Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ]
"""*********************************************************************************************"""
import os
import torch
from s3prl.util.download import _urls_to_filepaths
from .expert import UpstreamExpert as _UpstreamExpert
def mockingjay_local(ckpt, options_config=None, *args, **kwargs):
"""
The model from local ckpt
ckpt (str): PATH
feature_selection (int): -1 (default, the last layer) or an int in range(0, max_layer_num)
"""
assert os.path.isfile(ckpt)
if options_config is not None:
assert os.path.isfile(options_config)
return _UpstreamExpert(ckpt, options_config, *args, **kwargs)
def mockingjay_url(ckpt, refresh=False, *args, **kwargs):
"""
The model from URL
ckpt (str): URL
"""
return mockingjay_local(_urls_to_filepaths(ckpt, refresh=refresh), *args, **kwargs)
def mockingjay(refresh=False, *args, **kwargs):
"""
The default model
refresh (bool): whether to download ckpt/config again if existed
"""
return mockingjay_origin(refresh=refresh, *args, **kwargs)
###########
# ALIASES #
###########
def mockingjay_origin(refresh=False, *args, **kwargs):
"""
The mockingjay large model on 360hr, with Lel as input and Linear as target
refresh (bool): whether to download ckpt/config again if existed
"""
return mockingjay_logMelLinearLarge_T_AdamW_b32_500k_360hr_drop1(
refresh=refresh, *args, **kwargs
)
def mockingjay_100hr(refresh=False, *args, **kwargs):
"""
The mockingjay base model on 100hr
refresh (bool): whether to download ckpt/config again if existed
"""
return mockingjay_logMelBase_T_AdamW_b32_200k_100hr(
refresh=refresh, *args, **kwargs
)
def mockingjay_960hr(refresh=False, *args, **kwargs):
"""
The mockingjay base model on 960hr
refresh (bool): whether to download ckpt/config again if existed
"""
return mockingjay_logMelBase_T_AdamW_b32_1m_960hr_drop1(
refresh=refresh, *args, **kwargs
)
##########
# 100 HR #
##########
def mockingjay_logMelBase_T_AdamW_b32_200k_100hr(refresh=False, *args, **kwargs):
"""
Feature: 80-dim log Mel
Alteration: time
Optimizer: AdamW
Batch size: 32
Total steps: 200k
Unlabled Speech: 100hr
"""
kwargs["ckpt"] = "https://www.dropbox.com/s/luorglf8mdg67l2/states-200000.ckpt?dl=1"
return mockingjay_url(refresh=refresh, *args, **kwargs)
##########
# 360 HR #
##########
def mockingjay_logMelLinearLarge_T_AdamW_b32_500k_360hr_drop1(
refresh=False, *args, **kwargs
):
"""
Feature: 80-dim log Mel (input) / 201-dim Linear (target)
Alteration: time
Optimizer: AdamW
Batch size: 32
Total steps: 500k
Unlabled Speech: 360hr
"""
kwargs["ckpt"] = "https://www.dropbox.com/s/zwsfa6w2iy2cc68/states-500000.ckpt?dl=1"
return mockingjay_url(refresh=refresh, *args, **kwargs)
##########
# 960 HR #
##########
def METHOD_NAME(refresh=False, *args, **kwargs):
"""
Feature: 80-dim log Mel
Alteration: time
Optimizer: AdamW
Batch size: 32
Total steps: 1M
Unlabled Speech: 960hr
"""
kwargs[
"ckpt"
] = "https://www.dropbox.com/s/jzx0xggk663jev6/states-1000000.ckpt?dl=1"
return mockingjay_url(refresh=refresh, *args, **kwargs)
def mockingjay_logMelBase_T_AdamW_b32_1m_960hr_drop1(refresh=False, *args, **kwargs):
"""
Feature: 80-dim log Mel
Alteration: time
Optimizer: AdamW
Batch size: 32
Total steps: 1M
Unlabled Speech: 960hr
Differences: Dropout of 0.1 (instead of 0.3)
"""
kwargs[
"ckpt"
] = "https://www.dropbox.com/s/7f9z6dzc7oix6qv/states-1000000.ckpt?dl=1"
return mockingjay_url(refresh=refresh, *args, **kwargs)
def mockingjay_logMelBase_T_AdamW_b32_1m_960hr_seq3k(refresh=False, *args, **kwargs):
"""
Feature: 80-dim log Mel
Alteration: time
Optimizer: AdamW
Batch size: 32
Total steps: 1M
Unlabled Speech: 960hr
Differences: sequence length of 3k (instead of 1.5k)
"""
kwargs[
"ckpt"
] = "https://www.dropbox.com/s/qnnvdrai2tfmjmh/states-1000000.ckpt?dl=1"
return mockingjay_url(refresh=refresh, *args, **kwargs) |
5,801 | gather batched | # Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.backend import ops
def _target_gather(
targets,
indices,
mask=None,
mask_val=0.0,
):
"""A utility function wrapping tf.gather, which deals with:
1) both batched and unbatched `targets`
2) when unbatched `targets` have empty rows, the result will be filled
with `mask_val`
3) target masking.
Args:
targets: [N, ...] or [batch_size, N, ...] Tensor representing targets such
as boxes, keypoints, etc.
indices: [M] or [batch_size, M] int32 Tensor representing indices within
`targets` to gather.
mask: optional [M, ...] or [batch_size, M, ...] boolean Tensor representing
the masking for each target. `True` means the corresponding entity
should be masked to `mask_val`, `False` means the corresponding
entity should be the target value.
mask_val: optional float representing the masking value if `mask` is True
on the entity.
Returns:
targets: [M, ...] or [batch_size, M, ...] Tensor representing
selected targets.
Raise:
ValueError: If `targets` is higher than rank 3.
"""
targets_shape = list(targets.shape)
if len(targets_shape) > 3:
raise ValueError(
"`target_gather` does not support `targets` with rank "
"larger than 3, got {}".format(len(targets.shape))
)
def _gather_unbatched(labels, match_indices, mask, mask_val):
"""Gather based on unbatched labels and boxes."""
num_gt_boxes = labels.shape[0]
def _assign_when_rows_empty():
if len(labels.shape) > 1:
mask_shape = [match_indices.shape[0], labels.shape[-1]]
else:
mask_shape = [match_indices.shape[0]]
return ops.cast(mask_val, labels.dtype) * ops.ones(
mask_shape, dtype=labels.dtype
)
def _assign_when_rows_not_empty():
targets = ops.take(labels, match_indices, axis=0)
if mask is None:
return targets
else:
masked_targets = ops.cast(
mask_val, labels.dtype
) * ops.ones_like(mask, dtype=labels.dtype)
return ops.where(mask, masked_targets, targets)
if num_gt_boxes > 0:
return _assign_when_rows_not_empty()
else:
return _assign_when_rows_empty()
def METHOD_NAME(labels, match_indices, mask, mask_val):
"""Gather based on batched labels."""
batch_size = labels.shape[0]
if batch_size == 1:
if mask is not None:
result = _gather_unbatched(
ops.squeeze(labels, axis=0),
ops.squeeze(match_indices, axis=0),
ops.squeeze(mask, axis=0),
mask_val,
)
else:
result = _gather_unbatched(
ops.squeeze(labels, axis=0),
ops.squeeze(match_indices, axis=0),
None,
mask_val,
)
return ops.expand_dims(result, axis=0)
else:
targets = ops.take_along_axis(
labels, ops.expand_dims(match_indices, axis=-1), axis=1
)
if mask is None:
return targets
else:
masked_targets = ops.cast(
mask_val, labels.dtype
) * ops.ones_like(mask, dtype=labels.dtype)
return ops.where(mask, masked_targets, targets)
if len(targets_shape) <= 2:
return _gather_unbatched(targets, indices, mask, mask_val)
elif len(targets_shape) == 3:
return METHOD_NAME(targets, indices, mask, mask_val) |
5,802 | thread mapper | from __future__ import annotations
import concurrent.futures
import contextlib
import math
import multiprocessing
import os
import subprocess
import sys
from typing import Any
from typing import Callable
from typing import Generator
from typing import Iterable
from typing import MutableMapping
from typing import Sequence
from typing import TypeVar
from pre_commit import parse_shebang
from pre_commit.util import cmd_output_b
from pre_commit.util import cmd_output_p
TArg = TypeVar('TArg')
TRet = TypeVar('TRet')
def cpu_count() -> int:
try:
# On systems that support it, this will return a more accurate count of
# usable CPUs for the current process, which will take into account
# cgroup limits
return len(os.sched_getaffinity(0))
except AttributeError:
pass
try:
return multiprocessing.cpu_count()
except NotImplementedError:
return 1
def _environ_size(_env: MutableMapping[str, str] | None = None) -> int:
environ = _env if _env is not None else getattr(os, 'environb', os.environ)
size = 8 * len(environ) # number of pointers in `envp`
for k, v in environ.items():
size += len(k) + len(v) + 2 # c strings in `envp`
return size
def _get_platform_max_length() -> int: # pragma: no cover (platform specific)
if os.name == 'posix':
maximum = os.sysconf('SC_ARG_MAX') - 2048 - _environ_size()
maximum = max(min(maximum, 2 ** 17), 2 ** 12)
return maximum
elif os.name == 'nt':
return 2 ** 15 - 2048 # UNICODE_STRING max - headroom
else:
# posix minimum
return 2 ** 12
def _command_length(*cmd: str) -> int:
full_cmd = ' '.join(cmd)
# win32 uses the amount of characters, more details at:
# https://github.com/pre-commit/pre-commit/pull/839
if sys.platform == 'win32':
return len(full_cmd.encode('utf-16le')) // 2
else:
return len(full_cmd.encode(sys.getfilesystemencoding()))
class ArgumentTooLongError(RuntimeError):
pass
def partition(
cmd: Sequence[str],
varargs: Sequence[str],
target_concurrency: int,
_max_length: int | None = None,
) -> tuple[tuple[str, ...], ...]:
_max_length = _max_length or _get_platform_max_length()
# Generally, we try to partition evenly into at least `target_concurrency`
# partitions, but we don't want a bunch of tiny partitions.
max_args = max(4, math.ceil(len(varargs) / target_concurrency))
cmd = tuple(cmd)
ret = []
ret_cmd: list[str] = []
# Reversed so arguments are in order
varargs = list(reversed(varargs))
total_length = _command_length(*cmd) + 1
while varargs:
arg = varargs.pop()
arg_length = _command_length(arg) + 1
if (
total_length + arg_length <= _max_length and
len(ret_cmd) < max_args
):
ret_cmd.append(arg)
total_length += arg_length
elif not ret_cmd:
raise ArgumentTooLongError(arg)
else:
# We've exceeded the length, yield a command
ret.append(cmd + tuple(ret_cmd))
ret_cmd = []
total_length = _command_length(*cmd) + 1
varargs.append(arg)
ret.append(cmd + tuple(ret_cmd))
return tuple(ret)
@contextlib.contextmanager
def METHOD_NAME(maxsize: int) -> Generator[
Callable[[Callable[[TArg], TRet], Iterable[TArg]], Iterable[TRet]],
None, None,
]:
if maxsize == 1:
yield map
else:
with concurrent.futures.ThreadPoolExecutor(maxsize) as ex:
yield ex.map
def xargs(
cmd: tuple[str, ...],
varargs: Sequence[str],
*,
color: bool = False,
target_concurrency: int = 1,
_max_length: int = _get_platform_max_length(),
**kwargs: Any,
) -> tuple[int, bytes]:
"""A simplified implementation of xargs.
color: Make a pty if on a platform that supports it
target_concurrency: Target number of partitions to run concurrently
"""
cmd_fn = cmd_output_p if color else cmd_output_b
retcode = 0
stdout = b''
try:
cmd = parse_shebang.normalize_cmd(cmd)
except parse_shebang.ExecutableNotFoundError as e:
return e.to_output()[:2]
# on windows, batch files have a separate length limit than windows itself
if (
sys.platform == 'win32' and
cmd[0].lower().endswith(('.bat', '.cmd'))
): # pragma: win32 cover
# this is implementation details but the command gets translated into
# full/path/to/cmd.exe /c *cmd
cmd_exe = parse_shebang.find_executable('cmd.exe')
# 1024 is additionally subtracted to give headroom for further
# expansion inside the batch file
_max_length = 8192 - len(cmd_exe) - len(' /c ') - 1024
partitions = partition(cmd, varargs, target_concurrency, _max_length)
def run_cmd_partition(
run_cmd: tuple[str, ...],
) -> tuple[int, bytes, bytes | None]:
return cmd_fn(
*run_cmd, check=False, stderr=subprocess.STDOUT, **kwargs,
)
threads = min(len(partitions), target_concurrency)
with METHOD_NAME(threads) as thread_map:
results = thread_map(run_cmd_partition, partitions)
for proc_retcode, proc_out, _ in results:
if abs(proc_retcode) > abs(retcode):
retcode = proc_retcode
stdout += proc_out
return retcode, stdout |
5,803 | parse job streams | import re
import time
from typing import List
from lnst.Common.IpAddress import ipaddress
from lnst.Controller.Recipe import RecipeError
from lnst.Controller.RecipeResults import ResultLevel
from lnst.RecipeCommon.Perf.Results import PerfInterval
from lnst.RecipeCommon.Perf.Results import SequentialPerfResult
from lnst.RecipeCommon.Perf.Results import ParallelPerfResult
from lnst.RecipeCommon.Perf.Measurements.BaseFlowMeasurement import Flow
from lnst.RecipeCommon.Perf.Measurements.BaseFlowMeasurement import NetworkFlowTest
from lnst.RecipeCommon.Perf.Measurements.BaseFlowMeasurement import BaseFlowMeasurement
from lnst.RecipeCommon.Perf.Measurements.Results import FlowMeasurementResults
from lnst.RecipeCommon.Perf.Measurements.MeasurementError import MeasurementError
from lnst.Tests.Iperf import IperfClient, IperfServer
class IperfFlowMeasurement(BaseFlowMeasurement):
_MEASUREMENT_VERSION = 1
def __init__(self, flows: List[Flow], recipe_conf=None):
super(IperfFlowMeasurement, self).__init__(recipe_conf)
self._flows = flows
self._running_measurements = []
self._finished_measurements = []
self._hosts_versions = {}
@property
def flows(self):
return self._flows
@property
def version(self):
if not self._hosts_versions:
for flow in self.flows:
if flow.receiver not in self._hosts_versions:
self._hosts_versions[flow.receiver] = self._get_host_iperf_version(flow.receiver)
if flow.generator not in self._hosts_versions:
self._hosts_versions[flow.generator] = self._get_host_iperf_version(flow.generator)
return {"measurement_version": self._MEASUREMENT_VERSION,
"hosts_iperf_versions": self._hosts_versions}
def _get_host_iperf_version(self, host):
version_job = host.run("iperf3 --version", job_level=ResultLevel.DEBUG)
if version_job.passed:
match = re.match(r"iperf (.+?) .*", version_job.stdout)
if match:
return match.group(1)
return None
def start(self):
if len(self._running_measurements) > 0:
raise MeasurementError("Measurement already running!")
test_flows = self._prepare_test_flows(self.flows)
result = None
for flow in test_flows:
flow.server_job.start(bg=True)
time.sleep(2)
for flow in test_flows:
flow.client_job.start(bg=True)
self._running_measurements = test_flows
def finish(self):
test_flows = self._running_measurements
try:
for flow in test_flows:
client_iperf = flow.client_job.what
flow.client_job.wait(timeout=client_iperf.runtime_estimate())
flow.server_job.wait(timeout=5)
finally:
for flow in test_flows:
flow.server_job.kill()
flow.client_job.kill()
self._running_measurements = []
self._finished_measurements = test_flows
def collect_results(self):
test_flows = self._finished_measurements
results = []
for test_flow in test_flows:
flow_results = FlowMeasurementResults(
measurement=self,
flow=test_flow.flow,
warmup_duration=test_flow.flow.warmup_duration
)
flow_results.generator_results = self.METHOD_NAME(
test_flow.client_job)
flow_results.generator_cpu_stats = self._parse_job_cpu(
test_flow.client_job)
flow_results.receiver_results = self.METHOD_NAME(
test_flow.server_job)
flow_results.receiver_cpu_stats = self._parse_job_cpu(
test_flow.server_job)
results.append(flow_results)
return results
def _prepare_test_flows(self, flows):
test_flows = []
for flow in flows:
server_job = self._prepare_server(flow)
client_job = self._prepare_client(flow)
test_flow = NetworkFlowTest(flow, server_job, client_job)
test_flows.append(test_flow)
return test_flows
def _prepare_server(self, flow):
host = flow.receiver
server_params = dict(bind = ipaddress(flow.receiver_bind),
oneoff = True)
self._set_cpupin_params(server_params, flow.receiver_cpupin)
if flow.type == "mptcp_stream":
server_params["mptcp"] = True
if flow.receiver_port is not None:
server_params["port"] = flow.receiver_port
return host.prepare_job(IperfServer(**server_params),
job_level=ResultLevel.NORMAL)
def _prepare_client(self, flow):
host = flow.generator
client_params = {
"server": ipaddress(flow.receiver_bind),
"duration": flow.duration,
"warmup_duration": flow.warmup_duration
}
if flow.type == "tcp_stream":
#tcp stream is the default for iperf3
pass
elif flow.type == "udp_stream":
client_params["udp"] = True
elif flow.type == "sctp_stream":
client_params["sctp"] = True
elif flow.type == "mptcp_stream":
client_params["mptcp"] = True
else:
raise RecipeError("Unsupported flow type '{}'".format(flow.type))
self._set_cpupin_params(client_params, flow.generator_cpupin)
if flow.parallel_streams > 1:
client_params["parallel"] = flow.parallel_streams
if flow.msg_size:
client_params["blksize"] = flow.msg_size
if flow.receiver_port is not None:
client_params["port"] = flow.receiver_port
if flow.generator_port is not None:
client_params["client_port"] = flow.generator_port
return host.prepare_job(IperfClient(**client_params),
job_level=ResultLevel.NORMAL)
def _set_cpupin_params(self, params, cpupin):
if cpupin is not None:
for cpu in cpupin:
if cpu < 0:
raise RecipeError("Negative perf cpupin value provided.")
params["cpu_bind"] = cpupin
def METHOD_NAME(self, job):
result = ParallelPerfResult()
if not job.passed:
result.append(PerfInterval(0, 0, "bits", time.time()))
else:
for i in job.result["data"]["end"]["streams"]:
result.append(SequentialPerfResult())
job_start = job.result["data"]["start"]["timestamp"]["timesecs"]
for interval in job.result["data"]["intervals"]:
interval_start = interval["sum"]["start"]
for i, stream in enumerate(interval["streams"]):
result[i].append(PerfInterval(stream["bytes"] * 8,
stream["seconds"],
"bits", job_start + interval_start))
return result
def _parse_job_cpu(self, job):
if not job.passed:
return PerfInterval(0, 0, "cpu_percent", time.time())
else:
cpu_percent = job.result["data"]["end"]["cpu_utilization_percent"]["host_total"]
job_start = job.result["data"]["start"]["timestamp"]["timesecs"]
duration = job.result["data"]["start"]["test_start"]["duration"]
return PerfInterval(cpu_percent*duration, duration, "cpu_percent", job_start) |
5,804 | add pins | # See LICENSE for licensing information.
#
# Copyright (c) 2016-2023 Regents of the University of California, Santa Cruz
# All rights reserved.
#
from openram.sram_factory import factory
from openram import OPTS
from .bitcell_base_array import bitcell_base_array
class dummy_array(bitcell_base_array):
"""
Generate a dummy row/column for the replica array.
"""
def __init__(self, rows, cols, column_offset=0, mirror=0, location="", name=""):
super().__init__(rows=rows, cols=cols, column_offset=column_offset, name=name)
self.mirror = mirror
self.create_netlist()
if not OPTS.netlist_only:
self.create_layout()
def create_netlist(self):
""" Create and connect the netlist """
# This will create a default set of bitline/wordline names
self.create_all_bitline_names()
self.create_all_wordline_names()
self.add_modules()
self.METHOD_NAME()
self.create_instances()
def create_layout(self):
self.place_array("dummy_r{0}_c{1}", self.mirror)
self.add_layout_pins()
self.route_supplies()
self.add_boundary()
self.DRC_LVS()
def add_modules(self):
""" Add the modules used in this design """
self.dummy_cell = factory.create(module_type=OPTS.dummy_bitcell)
self.cell = factory.create(module_type=OPTS.bitcell)
def create_instances(self):
""" Create the module instances used in this design """
self.cell_inst = {}
for col in range(self.column_size):
for row in range(self.row_size):
name = "bit_r{0}_c{1}".format(row, col)
self.cell_inst[row, col]=self.add_inst(name=name,
mod=self.dummy_cell)
self.connect_inst(self.get_bitcell_pins(row, col))
def METHOD_NAME(self):
# bitline pins are not added because they are floating
for bl_name in self.get_bitline_names():
self.add_pin(bl_name, "INOUT")
# bitline pins are not added because they are floating
for wl_name in self.get_wordline_names():
self.add_pin(wl_name, "INPUT")
self.add_pin("vdd", "POWER")
self.add_pin("gnd", "GROUND")
def add_layout_pins(self):
""" Add the layout pins """
# Add the bitline metal, but not as pins since they are going to just be floating
# For some reason, LVS has an issue if we don't add this metal
bitline_names = self.cell.get_all_bitline_names()
for col in range(self.column_size):
for port in self.all_ports:
bl_pin = self.cell_inst[0, col].get_pin(bitline_names[2 * port])
self.add_layout_pin(text="bl_{0}_{1}".format(port, col),
layer=bl_pin.layer,
offset=bl_pin.ll().scale(1, 0),
width=bl_pin.width(),
height=self.height)
br_pin = self.cell_inst[0, col].get_pin(bitline_names[2 * port + 1])
self.add_layout_pin(text="br_{0}_{1}".format(port, col),
layer=br_pin.layer,
offset=br_pin.ll().scale(1, 0),
width=br_pin.width(),
height=self.height)
wl_names = self.cell.get_all_wl_names()
for row in range(self.row_size):
for port in self.all_ports:
wl_pins = self.cell_inst[row, 0].get_pins(wl_names[port])
for wl_pin in wl_pins:
self.add_layout_pin(text="wl_{0}_{1}".format(port, row),
layer=wl_pin.layer,
offset=wl_pin.ll().scale(0, 1),
width=self.width,
height=wl_pin.height())
def route_supplies(self):
# Copy a vdd/gnd layout pin from every cell
for row in range(self.row_size):
for col in range(self.column_size):
inst = self.cell_inst[row, col]
for pin_name in ["vdd", "gnd"]:
self.copy_layout_pin(inst, pin_name)
def input_load(self):
# FIXME: This appears to be old code from previous characterization. Needs to be updated.
wl_wire = self.gen_wl_wire()
return wl_wire.return_input_cap() |
5,805 | get bluetooth paired reg | import plistlib
import datetime
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, timeline, is_platform_windows, open_sqlite_db_readonly
def get_bluetooth(files_found, report_folder, seeker, wrap_text):
for file_found in files_found:
file_found = str(file_found)
if file_found.endswith('com.apple.MobileBluetooth.ledevices.other.db'): # regex '**/Library/Database/com.apple.MobileBluetooth.ledevices.other.db'
get_bluetoothOther(file_found, report_folder, seeker, wrap_text)
elif file_found.endswith('com.apple.MobileBluetooth.ledevices.paired.db'): # regex '**/com.apple.MobileBluetooth.ledevices.paired.db'
get_bluetoothPaired(file_found, report_folder, seeker, wrap_text)
elif file_found.endswith('com.apple.MobileBluetooth.devices.plist'): # regex '**/com.apple.MobileBluetooth.devices.plist'
METHOD_NAME(file_found, report_folder, seeker, wrap_text)
def get_bluetoothOther(file_found, report_folder, seeker, wrap_text):
db = open_sqlite_db_readonly(file_found)
cursor = db.cursor()
cursor.execute(
"""
SELECT
Name,
Address,
LastSeenTime,
Uuid
FROM
OtherDevices
order by Name desc
""")
all_rows = cursor.fetchall()
usageentries = len(all_rows)
data_list = []
if usageentries > 0:
for row in all_rows:
data_list.append((row[0], row[1], row[3]))
description = ''
report = ArtifactHtmlReport('Bluetooth Other LE')
report.start_artifact_report(report_folder, 'Other LE', description)
report.add_script()
data_headers = ('Name','Address','UUID')
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = 'Bluetooth Other LE'
tsv(report_folder, data_headers, data_list, tsvname)
else:
logfunc('No data available for Bluetooth Other')
db.close()
def get_bluetoothPaired(file_found, report_folder, seeker, wrap_text):
db = open_sqlite_db_readonly(file_found)
cursor = db.cursor()
cursor.execute("""
select
Uuid,
Name,
NameOrigin,
Address,
ResolvedAddress,
LastSeenTime,
LastConnectionTime
from
PairedDevices
""")
all_rows = cursor.fetchall()
usageentries = len(all_rows)
data_list = []
if usageentries > 0:
for row in all_rows:
data_list.append((row[0], row[1], row[2], row[3], row[4],row[6]))
description = ''
report = ArtifactHtmlReport('Bluetooth Paired LE')
report.start_artifact_report(report_folder, 'Paired LE', description)
report.add_script()
data_headers = ('UUID','Name','Name Origin','Address','Resolved Address','Last Connection Time')
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = 'Bluetooth Paired LE'
tsv(report_folder, data_headers, data_list, tsvname)
else:
logfunc('No data available for Bluetooth Paired LE')
db.close()
def METHOD_NAME(file_found, report_folder, seeker, wrap_text):
data_list = []
with open(file_found, 'rb') as f:
plist = plistlib.load(f)
#print(plist)
if len(plist) > 0:
for x in plist.items():
macaddress = x[0]
#print(x[1])
if 'LastSeenTime' in x[1]:
lastseen = x[1]['LastSeenTime']
lastseen = (datetime.datetime.fromtimestamp(int(lastseen)).strftime('%Y-%m-%d %H:%M:%S'))
else:
lastseen = ''
if 'UserNameKey' in x[1]:
usernkey = x[1]['UserNameKey']
else:
usernkey = ''
if 'Name' in x[1]:
nameu = x[1]['Name']
else:
nameu = ''
if 'DeviceIdProduct' in x[1]:
deviceid = x[1]['DeviceIdProduct']
else:
deviceid = ''
if 'DefaultName' in x[1]:
defname = x[1]['DefaultName']
else:
defname = ''
data_list.append((lastseen, macaddress, usernkey, nameu, deviceid, defname))
description = ''
report = ArtifactHtmlReport('Bluetooth Paired')
report.start_artifact_report(report_folder, 'Paired', description)
report.add_script()
data_headers = ('Last Seen Time','MAC Address','Name Key','Name','Device Product ID','Default Name' )
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = 'Bluetooth Paired'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = 'Bluetooth Paired'
timeline(report_folder, tlactivity, data_list, data_headers)
else:
logfunc('No Bluetooth paired devices')
__artifacts__ = {
"bluetooth": (
"Bluetooth",
('**/com.apple.MobileBluetooth.*'),
get_bluetooth)
|
5,806 | files double event | import sys
from _typeshed import Incomplete, StrOrBytesPath
from collections.abc import Iterable
from tkinter import Button, Entry, Frame, Listbox, Misc, Scrollbar, StringVar, Toplevel, commondialog
from typing import IO, ClassVar
from typing_extensions import Literal
if sys.version_info >= (3, 9):
__all__ = [
"FileDialog",
"LoadFileDialog",
"SaveFileDialog",
"Open",
"SaveAs",
"Directory",
"askopenfilename",
"asksaveasfilename",
"askopenfilenames",
"askopenfile",
"askopenfiles",
"asksaveasfile",
"askdirectory",
]
dialogstates: dict[Incomplete, tuple[Incomplete, Incomplete]]
class FileDialog:
title: str
master: Incomplete
directory: Incomplete | None
top: Toplevel
botframe: Frame
selection: Entry
filter: Entry
midframe: Entry
filesbar: Scrollbar
files: Listbox
dirsbar: Scrollbar
dirs: Listbox
ok_button: Button
filter_button: Button
cancel_button: Button
def __init__(
self, master, title: Incomplete | None = None
) -> None: ... # title is usually a str or None, but e.g. int doesn't raise en exception either
how: Incomplete | None
def go(self, dir_or_file=".", pattern: str = "*", default: str = "", key: Incomplete | None = None): ...
def quit(self, how: Incomplete | None = None) -> None: ...
def dirs_double_event(self, event) -> None: ...
def dirs_select_event(self, event) -> None: ...
def METHOD_NAME(self, event) -> None: ...
def files_select_event(self, event) -> None: ...
def ok_event(self, event) -> None: ...
def ok_command(self) -> None: ...
def filter_command(self, event: Incomplete | None = None) -> None: ...
def get_filter(self): ...
def get_selection(self): ...
def cancel_command(self, event: Incomplete | None = None) -> None: ...
def set_filter(self, dir, pat) -> None: ...
def set_selection(self, file) -> None: ...
class LoadFileDialog(FileDialog):
title: str
def ok_command(self) -> None: ...
class SaveFileDialog(FileDialog):
title: str
def ok_command(self) -> None: ...
class _Dialog(commondialog.Dialog): ...
class Open(_Dialog):
command: ClassVar[str]
class SaveAs(_Dialog):
command: ClassVar[str]
class Directory(commondialog.Dialog):
command: ClassVar[str]
# TODO: command kwarg available on macos
def asksaveasfilename(
*,
confirmoverwrite: bool | None = ...,
defaultextension: str | None = ...,
filetypes: Iterable[tuple[str, str | list[str] | tuple[str, ...]]] | None = ...,
initialdir: StrOrBytesPath | None = ...,
initialfile: StrOrBytesPath | None = ...,
parent: Misc | None = ...,
title: str | None = ...,
typevariable: StringVar | str | None = ...,
) -> str: ... # can be empty string
def askopenfilename(
*,
defaultextension: str | None = ...,
filetypes: Iterable[tuple[str, str | list[str] | tuple[str, ...]]] | None = ...,
initialdir: StrOrBytesPath | None = ...,
initialfile: StrOrBytesPath | None = ...,
parent: Misc | None = ...,
title: str | None = ...,
typevariable: StringVar | str | None = ...,
) -> str: ... # can be empty string
def askopenfilenames(
*,
defaultextension: str | None = ...,
filetypes: Iterable[tuple[str, str | list[str] | tuple[str, ...]]] | None = ...,
initialdir: StrOrBytesPath | None = ...,
initialfile: StrOrBytesPath | None = ...,
parent: Misc | None = ...,
title: str | None = ...,
typevariable: StringVar | str | None = ...,
) -> Literal[""] | tuple[str, ...]: ...
def askdirectory(
*, initialdir: StrOrBytesPath | None = ..., mustexist: bool | None = ..., parent: Misc | None = ..., title: str | None = ...
) -> str: ... # can be empty string
# TODO: If someone actually uses these, overload to have the actual return type of open(..., mode)
def asksaveasfile(
mode: str = "w",
*,
confirmoverwrite: bool | None = ...,
defaultextension: str | None = ...,
filetypes: Iterable[tuple[str, str | list[str] | tuple[str, ...]]] | None = ...,
initialdir: StrOrBytesPath | None = ...,
initialfile: StrOrBytesPath | None = ...,
parent: Misc | None = ...,
title: str | None = ...,
typevariable: StringVar | str | None = ...,
) -> IO[Incomplete] | None: ...
def askopenfile(
mode: str = "r",
*,
defaultextension: str | None = ...,
filetypes: Iterable[tuple[str, str | list[str] | tuple[str, ...]]] | None = ...,
initialdir: StrOrBytesPath | None = ...,
initialfile: StrOrBytesPath | None = ...,
parent: Misc | None = ...,
title: str | None = ...,
typevariable: StringVar | str | None = ...,
) -> IO[Incomplete] | None: ...
def askopenfiles(
mode: str = "r",
*,
defaultextension: str | None = ...,
filetypes: Iterable[tuple[str, str | list[str] | tuple[str, ...]]] | None = ...,
initialdir: StrOrBytesPath | None = ...,
initialfile: StrOrBytesPath | None = ...,
parent: Misc | None = ...,
title: str | None = ...,
typevariable: StringVar | str | None = ...,
) -> tuple[IO[Incomplete], ...]: ... # can be empty tuple
def test() -> None: ... |
5,807 | setup method | """ Test for pint design matrix"""
import os
import numpy as np
from pint.models import get_model
from pint.toa import get_TOAs
from pint.pint_matrix import (
DesignMatrixMaker,
combine_design_matrices_by_quantity,
combine_design_matrices_by_param,
)
import astropy.units as u
from pinttestdata import datadir
class TestDesignMatrix:
def METHOD_NAME(self):
os.chdir(datadir)
self.par_file = "J1614-2230_NANOGrav_12yv3.wb.gls.par"
self.tim_file = "J1614-2230_NANOGrav_12yv3.wb.tim"
self.model = get_model(self.par_file)
self.toas = get_TOAs(self.tim_file)
self.default_test_param = []
for p in self.model.params:
if not getattr(self.model, p).frozen:
self.default_test_param.append(p)
self.test_param_lite = ["F0", "ELONG", "ELAT", "DMX_0023", "JUMP1", "DMJUMP2"]
self.phase_designmatrix_maker = DesignMatrixMaker("phase", u.Unit(""))
self.toa_designmatrix_maker = DesignMatrixMaker("toa", u.s)
self.dm_designmatrix_maker = DesignMatrixMaker("dm", u.pc / u.cm**3)
self.noise_designmatrix_maker = DesignMatrixMaker("toa_noise", u.s)
def test_make_phase_designmatrix(self):
phase_designmatrix = self.phase_designmatrix_maker(
self.toas, self.model, self.test_param_lite
)
assert phase_designmatrix.ndim == 2
assert phase_designmatrix.shape == (
self.toas.ntoas,
len(self.test_param_lite) + 1,
)
# Test labels
labels = phase_designmatrix.labels
assert len(labels) == 2
assert len(labels[0]) == 1
assert len(labels[1]) == len(self.test_param_lite) + 1
assert [l[0] for l in labels[1]] == ["Offset"] + self.test_param_lite
def test_make_dm_designmatrix(self):
test_param = ["DMX_0001", "DMX_0010", "DMJUMP1"]
phase_designmatrix = self.dm_designmatrix_maker(
self.toas, self.model, test_param
)
def test_combine_designmatrix_quantity(self):
phase_designmatrix = self.phase_designmatrix_maker(
self.toas, self.model, self.test_param_lite
)
dm_designmatrix = self.dm_designmatrix_maker(
self.toas, self.model, self.test_param_lite, offset=True, offset_padding=0.0
)
combined = combine_design_matrices_by_quantity(
[phase_designmatrix, dm_designmatrix]
)
# dim1 includes parameter lite and offset
assert combined.shape == (2 * self.toas.ntoas, len(self.test_param_lite) + 1)
assert len(combined.get_axis_labels(0)) == 2
dim0_labels = [x[0] for x in combined.get_axis_labels(0)]
assert dim0_labels == ["phase", "dm"]
dim1_labels = [x[0] for x in combined.get_axis_labels(1)]
assert dim1_labels == ["Offset"] + self.test_param_lite
def test_toa_noise_designmatrix(self):
toas = get_TOAs("B1855+09_NANOGrav_9yv1.tim")
model = get_model("B1855+09_NANOGrav_9yv1.gls.par")
noise_designmatrix = self.noise_designmatrix_maker(toas, model)
assert noise_designmatrix.shape[0] == toas.ntoas
assert noise_designmatrix.derivative_quantity == ["toa"]
assert noise_designmatrix.derivative_params == ["toa_noise_params"]
def test_combine_designmatrix_all(self):
toas = get_TOAs("B1855+09_NANOGrav_12yv3.wb.tim")
model = get_model("B1855+09_NANOGrav_12yv3.wb.gls.par")
noise_designmatrix = self.noise_designmatrix_maker(toas, model)
toa_designmatrix = self.toa_designmatrix_maker(
toas, model, self.test_param_lite
)
dm_designmatrix = self.dm_designmatrix_maker(
toas, model, self.test_param_lite, offset=True, offset_padding=0.0
)
combined_quantity = combine_design_matrices_by_quantity(
[toa_designmatrix, dm_designmatrix]
)
combined_param = combine_design_matrices_by_param(
combined_quantity, noise_designmatrix
)
assert combined_param.shape == (
toa_designmatrix.shape[0] + dm_designmatrix.shape[0],
toa_designmatrix.shape[1] + noise_designmatrix.shape[1],
)
assert np.all(
combined_param.matrix[
toas.ntoas : toas.ntoas * 2, toa_designmatrix.shape[1] : :
]
== 0.0
)
def test_param_order(self):
params_dm = self.model.designmatrix(self.toas, incoffset=False)[1]
params_free = self.model.free_params
assert params_dm == params_free |
5,808 | get digital pin | #!/usr/bin/env python3
import math, random, time, requests, threading, sys, io
sys.stdout = io.StringIO()
sys.stderr = io.StringIO()
ROBOTINOIP = "127.0.0.1:80"
PARAMS = {'sid':'robertaProgram'}
MAXSPEED = 0.5
MAXROTATION = 0.57
def getAnalogPin(pos):
ANALOGPIN_URL = "http://" + ROBOTINOIP + "/data/analoginputarray"
r = requests.get(url = ANALOGPIN_URL, params = PARAMS)
if r.status_code == requests.codes.ok:
data = r.json()
return data[pos-1]
else:
return -1
def getCameraLine(RV):
value = RV.readFloatVector(5)
if value[0]:
return (value[1]/640) -0.5
else:
return -1
def getColourBlob(RV, inputs):
RV.writeFloatVector(6, inputs)
time.sleep(0.001)
value = RV.readFloatVector(6)
if value[3] <= 0:
value = [-1,-1,0,0]
else:
value[0] = (value[0]/640) -0.5
value[1] = (value[1]/480) -0.5
return value
def METHOD_NAME(pos):
DIGITALPIN_URL = "http://" + ROBOTINOIP + "/data/digitalinputarray"
r = requests.get(url = DIGITALPIN_URL, params = PARAMS)
if r.status_code == requests.codes.ok:
data = r.json()
return data[pos-1]
else:
return -1
def getDistance(port):
DISTANCES_URL = "http://" + ROBOTINOIP + "/data/distancesensorarray"
r = requests.get(url = DISTANCES_URL, params = PARAMS)
if r.status_code == requests.codes.ok:
data = r.json()
return data[port-1] * 100
else:
return -1
def getMarkerInformation(RV, id):
RV.writeFloat(3,id)
time.sleep(0.001)
value = RV.readFloatVector(4)
if not value[0]:
value = [False, -1,-1,-1,-1,-1,-1]
else:
for i in range (1,4):
value[i] = value[i] * 100
return value[1:4]
def getMarkers(RV):
markers = RV.readFloatVector(3)
if len(markers) == 0:
return [-1]
return markers
def getOdometry(val):
ODOMETRY_URL = "http://" + ROBOTINOIP + "/data/odometry"
r = requests.get(url = ODOMETRY_URL, params = PARAMS)
if r.status_code == requests.codes.ok:
data = r.json()
#data: [x,y,rot,vx,vy,omega,seq]
if val == 'x':
return data[0]
elif val == 'y':
return data[1]
elif val == 'rot':
return data[2]
else:
return data
else:
return -1
def isBumped():
BUMPER_URL = "http://" + ROBOTINOIP + "/data/bumper"
r = requests.get(url = BUMPER_URL, params = PARAMS)
if r.status_code == requests.codes.ok:
data = r.json()
return data["value"]
else:
return -1
def resetOdometry(RV, x, y, z):
RV.writeFloatVector(1, [x, y, z, 1])
time.sleep(0.1)
RV.writeFloatVector(1, [])
_timer1 = None
_timer2 = None
_timer3 = None
_timer4 = None
_timer5 = None
___Element5 = None
___Element6 = None
___Element7 = None
___Element8 = None
___Element9 = None
___Element10 = None
___Element11 = None
___Element12 = None
___Element13 = None
___Element14 = None
___Element15 = None
___Element16 = None
___Element = None
___Element17 = None
___Element18 = None
___Element19 = None
___Element20 = None
___Element2 = []
def run(RV):
global _timer1, _timer2, _timer3, _timer4, _timer5, ___Element5, ___Element6, ___Element7, ___Element8, ___Element9, ___Element10, ___Element11, ___Element12, ___Element13, ___Element14, ___Element15, ___Element16, ___Element, ___Element17, ___Element18, ___Element19, ___Element20, ___Element2
time.sleep(1)
resetOdometry(RV, 0, 0, 0)
RV.writeFloat(4, 100)
time.sleep(0.05)
_timer1 = time.time()
_timer2 = time.time()
_timer3 = time.time()
_timer4 = time.time()
_timer5 = time.time()
___Element5 = getCameraLine(RV)
___Element6 = METHOD_NAME(1)
___Element7 = METHOD_NAME(3)
___Element8 = METHOD_NAME(4)
___Element9 = METHOD_NAME(2)
___Element10 = getAnalogPin(1)
___Element11 = METHOD_NAME(6)
___Element12 = getOdometry('x') * 100
___Element13 = getOdometry('y') * 100
___Element14 = getOdometry('rot') * (180 / math.pi)
___Element15 = getDistance(1)
___Element16 = isBumped()
___Element = ((time.time() - _timer1)/1000)
___Element17 = ((time.time() - _timer2)/1000)
___Element18 = ((time.time() - _timer3)/1000)
___Element19 = ((time.time() - _timer4)/1000)
___Element20 = ((time.time() - _timer5)/1000)
___Element2 = getColourBlob(RV, [40, 56, 42, 100, 53, 100])
resetOdometry(RV, 0, RV.readFloatVector(1)[1], RV.readFloatVector(1)[2])
resetOdometry(RV, RV.readFloatVector(1)[0], 0, RV.readFloatVector(1)[2])
resetOdometry(RV, RV.readFloatVector(1)[0], RV.readFloatVector(1)[1], 0)
resetOdometry(RV, 0, 0, 0)
___Element2 = getMarkers(RV)
time.sleep(500/1000)
___Element2 = getMarkerInformation(RV, 0)
RV.writeFloat(4, 100)
time.sleep(0.005)
def step(RV):
pass
def main(RV):
try:
run(RV)
except Exception as e:
print(e)
raise
def start(RV):
motorDaemon2 = threading.Thread(target=main, daemon=True, args=(RV,), name='mainProgram')
motorDaemon2.start()
def stop(RV):
pass
def cleanup(RV):
pass
|
5,809 | get event | """
.. module: dispatch.plugins.dispatch_google_calendar.plugin
:platform: Unix
:copyright: (c) 2019 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Kevin Glisson <kglisson@netflix.com>
"""
import logging
import time
import uuid
from datetime import datetime, timedelta
from typing import Any, List
from googleapiclient.errors import HttpError
from pytz import timezone
from tenacity import TryAgain, retry, retry_if_exception_type, stop_after_attempt, wait_exponential
from dispatch.decorators import apply, counter, timer
from dispatch.plugins.bases import ConferencePlugin
from dispatch.plugins.dispatch_google import calendar as google_calendar_plugin
from dispatch.plugins.dispatch_google.common import get_service
from dispatch.plugins.dispatch_google.config import GoogleConfiguration
log = logging.getLogger(__name__)
@retry(
stop=stop_after_attempt(3),
retry=retry_if_exception_type(TryAgain),
wait=wait_exponential(multiplier=1, min=2, max=5),
)
def make_call(client: Any, func: Any, delay: int = None, propagate_errors: bool = False, **kwargs):
"""Make an google client api call."""
try:
data = getattr(client, func)(**kwargs).execute()
if delay:
time.sleep(delay)
return data
except HttpError:
raise TryAgain from None
def METHOD_NAME(client: Any, event_id: str):
"""Fetches a calendar event."""
return make_call(client.events(), "get", calendarId="primary", eventId=event_id)
def remove_participant(client: Any, event_id: int, participant: str):
"""Remove participant from calendar event."""
event = METHOD_NAME(client, event_id)
attendees = []
for a in event["attendees"]:
if a["email"] != participant:
attendees.append(a)
event["attendees"] = attendees
return make_call(client.events(), "update", calendarId="primary", eventId=event_id, body=event)
def add_participant(client: Any, event_id: int, participant: str):
event = METHOD_NAME(client, event_id)
event["attendees"].append({"email": participant})
return make_call(client.events(), "update", calendarId="primary", eventId=event_id, body=event)
def delete_event(client, event_id: int):
return make_call(client.events(), "delete", calendarId="primary", eventId=event_id)
def create_event(
client,
name: str,
description: str = None,
title: str = None,
participants: List[str] = None,
start_time: str = None,
duration: int = 60000, # duration in mins ~6 weeks
):
if participants:
participants = [{"email": x} for x in participants]
else:
participants = []
request_id = str(uuid.uuid4())
body = {
"description": description if description else f"Situation Room for {name}. Please join.",
"summary": title if title else f"Situation Room for {name}",
"attendees": participants,
"conferenceData": {
"createRequest": {
"requestId": request_id,
"conferenceSolutionKey": {"type": "hangoutsMeet"},
}
},
"guestsCanModify": True,
}
if start_time:
raw_dt = datetime.strptime(start_time, "%Y-%m-%dT%H:%M:%S")
start = timezone("America/Los_Angeles").localize(raw_dt).astimezone(timezone("Etc/UTC"))
else:
start = datetime.utcnow()
end = start + timedelta(minutes=duration)
body.update(
{
"start": {"date": start.isoformat().split("T")[0], "timeZone": "Etc/UTC"},
"end": {"date": end.isoformat().split("T")[0], "timeZone": "Etc/UTC"},
}
)
# TODO sometimes google is slow with the meeting invite, we should poll/wait
return make_call(
client.events(), "insert", calendarId="primary", body=body, conferenceDataVersion=1
)
@apply(timer, exclude=["__init__"])
@apply(counter, exclude=["__init__"])
class GoogleCalendarConferencePlugin(ConferencePlugin):
title = "Google Calendar Plugin - Conference Management"
slug = "google-calendar-conference"
description = "Uses Google calendar to manage conference rooms/meets."
version = google_calendar_plugin.__version__
author = "Netflix"
author_url = "https://github.com/netflix/dispatch.git"
def __init__(self):
self.scopes = ["https://www.googleapis.com/auth/calendar"]
self.configuration_schema = GoogleConfiguration
def create(
self, name: str, description: str = None, title: str = None, participants: List[str] = None
):
"""Create a new event."""
client = get_service(self.configuration, "calendar", "v3", self.scopes)
conference = create_event(
client,
name,
description=description,
participants=participants,
title=title,
)
meet_url = ""
for entry_point in conference["conferenceData"]["entryPoints"]:
if entry_point["entryPointType"] == "video":
meet_url = entry_point["uri"]
return {"weblink": meet_url, "id": conference["id"], "challenge": ""}
def delete(self, event_id: str):
"""Deletes an existing event."""
client = get_service(self.configuration, "calendar", "v3", self.scopes)
return delete_event(client, event_id)
def add_participant(self, event_id: str, participant: str):
"""Adds a new participant to event."""
client = get_service(self.configuration, "calendar", "v3", self.scopes)
return add_participant(client, event_id, participant)
def remove_participant(self, event_id: str, participant: str):
"""Removes a participant from event."""
client = get_service(self.configuration, "calendar", "v3", self.scopes)
return remove_participant(client, event_id, participant) |
5,810 | message retention duration | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetTopicResult',
'AwaitableGetTopicResult',
'get_topic',
'get_topic_output',
]
@pulumi.output_type
class GetTopicResult:
"""
A collection of values returned by getTopic.
"""
def __init__(__self__, id=None, kms_key_name=None, labels=None, METHOD_NAME=None, message_storage_policies=None, name=None, project=None, schema_settings=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kms_key_name and not isinstance(kms_key_name, str):
raise TypeError("Expected argument 'kms_key_name' to be a str")
pulumi.set(__self__, "kms_key_name", kms_key_name)
if labels and not isinstance(labels, dict):
raise TypeError("Expected argument 'labels' to be a dict")
pulumi.set(__self__, "labels", labels)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'message_retention_duration' to be a str")
pulumi.set(__self__, "message_retention_duration", METHOD_NAME)
if message_storage_policies and not isinstance(message_storage_policies, list):
raise TypeError("Expected argument 'message_storage_policies' to be a list")
pulumi.set(__self__, "message_storage_policies", message_storage_policies)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if project and not isinstance(project, str):
raise TypeError("Expected argument 'project' to be a str")
pulumi.set(__self__, "project", project)
if schema_settings and not isinstance(schema_settings, list):
raise TypeError("Expected argument 'schema_settings' to be a list")
pulumi.set(__self__, "schema_settings", schema_settings)
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="kmsKeyName")
def kms_key_name(self) -> str:
return pulumi.get(self, "kms_key_name")
@property
@pulumi.getter
def labels(self) -> Mapping[str, str]:
return pulumi.get(self, "labels")
@property
@pulumi.getter(name="messageRetentionDuration")
def METHOD_NAME(self) -> str:
return pulumi.get(self, "message_retention_duration")
@property
@pulumi.getter(name="messageStoragePolicies")
def message_storage_policies(self) -> Sequence['outputs.GetTopicMessageStoragePolicyResult']:
return pulumi.get(self, "message_storage_policies")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def project(self) -> Optional[str]:
return pulumi.get(self, "project")
@property
@pulumi.getter(name="schemaSettings")
def schema_settings(self) -> Sequence['outputs.GetTopicSchemaSettingResult']:
return pulumi.get(self, "schema_settings")
class AwaitableGetTopicResult(GetTopicResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetTopicResult(
id=self.id,
kms_key_name=self.kms_key_name,
labels=self.labels,
METHOD_NAME=self.METHOD_NAME,
message_storage_policies=self.message_storage_policies,
name=self.name,
project=self.project,
schema_settings=self.schema_settings)
def get_topic(name: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTopicResult:
"""
Get information about a Google Cloud Pub/Sub Topic. For more information see
the [official documentation](https://cloud.google.com/pubsub/docs/)
and [API](https://cloud.google.com/pubsub/docs/apis).
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
my_pubsub_topic = gcp.pubsub.get_topic(name="my-pubsub-topic")
```
:param str name: The name of the Cloud Pub/Sub Topic.
- - -
:param str project: The project in which the resource belongs. If it
is not provided, the provider project is used.
"""
__args__ = dict()
__args__['name'] = name
__args__['project'] = project
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('gcp:pubsub/getTopic:getTopic', __args__, opts=opts, typ=GetTopicResult).value
return AwaitableGetTopicResult(
id=pulumi.get(__ret__, 'id'),
kms_key_name=pulumi.get(__ret__, 'kms_key_name'),
labels=pulumi.get(__ret__, 'labels'),
METHOD_NAME=pulumi.get(__ret__, 'message_retention_duration'),
message_storage_policies=pulumi.get(__ret__, 'message_storage_policies'),
name=pulumi.get(__ret__, 'name'),
project=pulumi.get(__ret__, 'project'),
schema_settings=pulumi.get(__ret__, 'schema_settings'))
@_utilities.lift_output_func(get_topic)
def get_topic_output(name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetTopicResult]:
"""
Get information about a Google Cloud Pub/Sub Topic. For more information see
the [official documentation](https://cloud.google.com/pubsub/docs/)
and [API](https://cloud.google.com/pubsub/docs/apis).
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
my_pubsub_topic = gcp.pubsub.get_topic(name="my-pubsub-topic")
```
:param str name: The name of the Cloud Pub/Sub Topic.
- - -
:param str project: The project in which the resource belongs. If it
is not provided, the provider project is used.
"""
... |
5,811 | test reference internal | import pytest
import env
from pybind11_tests import ConstructorStats
from pybind11_tests import modules as m
from pybind11_tests.modules import subsubmodule as ms
def test_nested_modules():
import pybind11_tests
assert pybind11_tests.__name__ == "pybind11_tests"
assert pybind11_tests.modules.__name__ == "pybind11_tests.modules"
assert (
pybind11_tests.modules.subsubmodule.__name__
== "pybind11_tests.modules.subsubmodule"
)
assert m.__name__ == "pybind11_tests.modules"
assert ms.__name__ == "pybind11_tests.modules.subsubmodule"
assert ms.submodule_func() == "submodule_func()"
def METHOD_NAME():
b = ms.B()
assert str(b.get_a1()) == "A[1]"
assert str(b.a1) == "A[1]"
assert str(b.get_a2()) == "A[2]"
assert str(b.a2) == "A[2]"
b.a1 = ms.A(42)
b.a2 = ms.A(43)
assert str(b.get_a1()) == "A[42]"
assert str(b.a1) == "A[42]"
assert str(b.get_a2()) == "A[43]"
assert str(b.a2) == "A[43]"
astats, bstats = ConstructorStats.get(ms.A), ConstructorStats.get(ms.B)
assert astats.alive() == 2
assert bstats.alive() == 1
del b
assert astats.alive() == 0
assert bstats.alive() == 0
assert astats.values() == ["1", "2", "42", "43"]
assert bstats.values() == []
assert astats.default_constructions == 0
assert bstats.default_constructions == 1
assert astats.copy_constructions == 0
assert bstats.copy_constructions == 0
# assert astats.move_constructions >= 0 # Don't invoke any
# assert bstats.move_constructions >= 0 # Don't invoke any
assert astats.copy_assignments == 2
assert bstats.copy_assignments == 0
assert astats.move_assignments == 0
assert bstats.move_assignments == 0
def test_importing():
from collections import OrderedDict
from pybind11_tests.modules import OD
assert OD is OrderedDict
assert str(OD([(1, "a"), (2, "b")])) == "OrderedDict([(1, 'a'), (2, 'b')])"
def test_pydoc():
"""Pydoc needs to be able to provide help() for everything inside a pybind11 module"""
import pydoc
import pybind11_tests
assert pybind11_tests.__name__ == "pybind11_tests"
assert pybind11_tests.__doc__ == "pybind11 test module"
assert pydoc.text.docmodule(pybind11_tests)
def test_duplicate_registration():
"""Registering two things with the same name"""
assert m.duplicate_registration() == []
def test_builtin_key_type():
"""Test that all the keys in the builtin modules have type str.
Previous versions of pybind11 would add a unicode key in python 2.
"""
if hasattr(__builtins__, "keys"):
keys = __builtins__.keys()
else: # this is to make pypy happy since builtins is different there.
keys = __builtins__.__dict__.keys()
assert {type(k) for k in keys} == {str}
@pytest.mark.xfail("env.PYPY", reason="PyModule_GetName()")
def test_def_submodule_failures():
sm = m.def_submodule(m, b"ScratchSubModuleName") # Using bytes to show it works.
assert sm.__name__ == m.__name__ + "." + "ScratchSubModuleName"
malformed_utf8 = b"\x80"
if env.PYPY:
# It is not worth the effort finding a trigger for a failure when running with PyPy.
pytest.skip("Sufficiently exercised on platforms other than PyPy.")
else:
# Meant to trigger PyModule_GetName() failure:
sm_name_orig = sm.__name__
sm.__name__ = malformed_utf8
try:
with pytest.raises(Exception):
# Seen with Python 3.9: SystemError: nameless module
# But we do not want to exercise the internals of PyModule_GetName(), which could
# change in future versions of Python, but a bad __name__ is very likely to cause
# some kind of failure indefinitely.
m.def_submodule(sm, b"SubSubModuleName")
finally:
# Clean up to ensure nothing gets upset by a module with an invalid __name__.
sm.__name__ = sm_name_orig # Purely precautionary.
# Meant to trigger PyImport_AddModule() failure:
with pytest.raises(UnicodeDecodeError):
m.def_submodule(sm, malformed_utf8) |
5,812 | test helpful exceptions | """Tests of whether units behave."""
from assay import assert_raises
from numpy import array, nan
from skyfield.units import (
Angle, Distance, Velocity, UnpackingError, WrongUnitError,
)
try:
from astropy import units as u
except ImportError:
u = None
def needs_astropy(test):
"""Skip `test` if AstroPy is not available."""
return None if (u is None) else test
def test_degree_rounding():
tenth = 0.1 / 60.0 / 60.0 # of an arcsecond
assert str(Angle(degrees=tenth * -600.75)) == '-00deg 01\' 00.1"'
assert str(Angle(degrees=tenth * -600.25)) == '-00deg 01\' 00.0"'
assert str(Angle(degrees=tenth * -599.75)) == '-00deg 01\' 00.0"'
assert str(Angle(degrees=tenth * -599.25)) == '-00deg 00\' 59.9"'
assert str(Angle(degrees=tenth * -1.75)) == '-00deg 00\' 00.2"'
assert str(Angle(degrees=tenth * -1.25)) == '-00deg 00\' 00.1"'
assert str(Angle(degrees=tenth * -0.75)) == '-00deg 00\' 00.1"'
assert str(Angle(degrees=tenth * -0.25)) == '00deg 00\' 00.0"'
assert str(Angle(degrees=0.0)) == '00deg 00\' 00.0"'
assert str(Angle(degrees=tenth * 0.25)) == '00deg 00\' 00.0"'
assert str(Angle(degrees=tenth * 0.75)) == '00deg 00\' 00.1"'
assert str(Angle(degrees=tenth * 1.25)) == '00deg 00\' 00.1"'
assert str(Angle(degrees=tenth * 1.75)) == '00deg 00\' 00.2"'
assert str(Angle(degrees=tenth * 599.25)) == '00deg 00\' 59.9"'
assert str(Angle(degrees=tenth * 599.75)) == '00deg 01\' 00.0"'
assert str(Angle(degrees=tenth * 600.25)) == '00deg 01\' 00.0"'
assert str(Angle(degrees=tenth * 600.75)) == '00deg 01\' 00.1"'
def test_angle_scalar_strs():
assert str(Angle(degrees=array(91))) == '''91deg 00' 00.0"'''
assert str(Angle(degrees=array(91), signed=True)) == '''+91deg 00' 00.0"'''
assert str(Angle(hours=array(12))) == '''12h 00m 00.00s'''
def test_angle_array_strs():
h = Angle(hours=array([0.5, nan, -13]))
d = Angle(degrees=h._degrees)
assert str(h) == '3 values from 00h 30m 00.00s to -13h 00m 00.00s'
assert str(d) == '''3 values from 07deg 30' 00.0" to -195deg 00' 00.0"'''
with assert_raises(WrongUnitError):
h.dstr()
d.hstr()
assert h.hstr() == d.hstr(warn=False) == [
'00h 30m 00.00s',
'nan',
'-13h 00m 00.00s',
]
assert d.dstr() == h.dstr(warn=False) == [
'07deg 30\' 00.0"',
'nan',
'-195deg 00\' 00.0"',
]
empty = Angle(radians=[])
assert str(empty) == 'Angle []'
assert empty.hstr(warn=False) == []
assert empty.dstr() == []
assert h.hstr(format='{0} {1} {2} {3} {4} {5}', places=6) == [
' 0 30 0 0 6', 'nan', '- 13 0 0 0 6']
assert d.dstr(format='{0} {1} {2} {3} {4} {5}', places=6) == [
' 7 30 0 0 6', 'nan', '- 195 0 0 0 6']
def test_angle_sexagesimal_args():
assert str(Angle(degrees=(90,))) == '''90deg 00' 00.0"'''
assert str(Angle(hours=(12,))) == '''12h 00m 00.00s'''
assert str(Angle(degrees=(90, 15))) == '''90deg 15' 00.0"'''
assert str(Angle(hours=(12, 30))) == '''12h 30m 00.00s'''
assert str(Angle(degrees=(90, 15, 30))) == '''90deg 15' 30.0"'''
assert str(Angle(hours=(12, 30, 15))) == '''12h 30m 15.00s'''
def test_arcminutes_and_arcseconds_and_mas():
angle = Angle(degrees=1.0)
assert angle.arcminutes() == 60
assert angle.arcseconds() == 60 * 60
assert angle.mas() == 60 * 60 * 1000
def test_distance_input_units():
for d in (
Distance(1.0),
Distance(au=1.0), # deprecated
Distance(m=149597870700),
Distance(km=149597870.700),
Distance.au(1.0), # modern
Distance.m(149597870700),
Distance.km(149597870.700),
):
assert abs(d.au - 1.0) <= 0
def test_velocity_input_units():
v1 = Velocity(au_per_d=2.0)
v2 = Velocity(km_per_s=3462.9137)
assert abs(v1.au_per_d - v2.au_per_d) < 1e-7
v1 = Velocity.au_per_d(2.0)
v2 = Velocity.km_per_s(3462.9137)
assert abs(v1.au_per_d - v2.au_per_d) < 1e-7
def test_stringifying_vector_distance():
a = array([1.23, 4.56])
s = str(Distance(au=a))
if '[1' in s:
# Python 3.5, says Travis CI. No idea.
assert s == '[1.23 4.56] au'
else:
# Every other version of Python.
assert s == '[ 1.23 4.56] au'
def METHOD_NAME():
distance = Distance(1.234)
expect = '''\
to use this Distance, ask for its value in a particular unit:
distance.au
distance.km
distance.m'''
with assert_raises(UnpackingError) as a:
x, y, z = distance
assert str(a.exception) == expect
with assert_raises(UnpackingError) as a:
distance[0]
assert str(a.exception) == expect
velocity = Velocity(1.234)
expect = '''\
to use this Velocity, ask for its value in a particular unit:
velocity.au_per_d
velocity.km_per_s
velocity.m_per_s'''
with assert_raises(UnpackingError) as a:
x, y, z = velocity
assert str(a.exception) == expect
with assert_raises(UnpackingError) as a:
velocity[0]
assert str(a.exception) == expect
angle = Angle(radians=1.234)
expect = '''\
to use this Angle, ask for its value in a particular unit:
angle.degrees
angle.hours
angle.radians'''
with assert_raises(UnpackingError) as a:
x, y, z = angle
assert str(a.exception) == expect
with assert_raises(UnpackingError) as a:
angle[0]
assert str(a.exception) == expect
def test_constructors_accept_plain_lists():
Distance(au=[1,2,3])
Distance(km=[1,2,3])
Distance(m=[1,2,3])
Velocity(au_per_d=[1,2,3])
Velocity(km_per_s=[1,2,3])
def test_converting_from_km_to_m():
distance = Distance(km=1.234)
assert abs(distance.m - 1234.0) < 1e-15
def test_converting_from_m_to_km():
distance = Distance(m=1234.0)
assert abs(distance.km - 1.234) < 1e-15
def test_deprecated_method_from_au():
distance = Distance.from_au(1.25)
assert distance.au == 1.25
@needs_astropy
def test_converting_distance_with_astropy():
distance = Distance(au=1.234)
value1 = distance.km
value2 = distance.to(u.km)
epsilon = 0.02 # definitions of AU seem to disagree slightly
assert abs(value1 - value2.value) < epsilon
@needs_astropy
def test_converting_velocity_with_astropy():
velocity = Velocity(au_per_d=1.234)
value1 = velocity.km_per_s
value2 = velocity.to(u.km / u.s)
epsilon = 1e-6
assert abs(value1 - value2.value) < epsilon |
5,813 | type | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetMachineLearningDatasetResult',
'AwaitableGetMachineLearningDatasetResult',
'get_machine_learning_dataset',
'get_machine_learning_dataset_output',
]
@pulumi.output_type
class GetMachineLearningDatasetResult:
"""
Machine Learning dataset object wrapped into ARM resource envelope.
"""
def __init__(__self__, id=None, identity=None, location=None, name=None, properties=None, sku=None, tags=None, METHOD_NAME=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", METHOD_NAME)
@property
@pulumi.getter
def id(self) -> str:
"""
Specifies the resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.IdentityResponse']:
"""
The identity of the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Specifies the name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.DatasetResponse':
"""
Dataset properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
The sku of the workspace.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Contains resource tags defined as key/value pairs.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Specifies the type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetMachineLearningDatasetResult(GetMachineLearningDatasetResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetMachineLearningDatasetResult(
id=self.id,
identity=self.identity,
location=self.location,
name=self.name,
properties=self.properties,
sku=self.sku,
tags=self.tags,
METHOD_NAME=self.METHOD_NAME)
def get_machine_learning_dataset(dataset_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetMachineLearningDatasetResult:
"""
Get a Dataset by name.
Azure REST API version: 2020-05-01-preview.
:param str dataset_name: The Dataset name.
:param str resource_group_name: Name of the resource group in which workspace is located.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
__args__ = dict()
__args__['datasetName'] = dataset_name
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:machinelearningservices:getMachineLearningDataset', __args__, opts=opts, typ=GetMachineLearningDatasetResult).value
return AwaitableGetMachineLearningDatasetResult(
id=pulumi.get(__ret__, 'id'),
identity=pulumi.get(__ret__, 'identity'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
sku=pulumi.get(__ret__, 'sku'),
tags=pulumi.get(__ret__, 'tags'),
METHOD_NAME=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_machine_learning_dataset)
def get_machine_learning_dataset_output(dataset_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetMachineLearningDatasetResult]:
"""
Get a Dataset by name.
Azure REST API version: 2020-05-01-preview.
:param str dataset_name: The Dataset name.
:param str resource_group_name: Name of the resource group in which workspace is located.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
... |
5,814 | items | # SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2023 Scipp contributors (https://github.com/scipp)
# @author Jan-Lukas Wynen
"""Unit helpers and predefined units.
The following predefined units are available:
Dimensionless (those two names are equivalent):
- dimensionless
- one
Common units:
- angstrom
- counts
- deg
- kg
- K
- meV
- m
- rad
- s
- us
- ns
- mm
Special:
- default_unit (used by some functions to deduce a unit)
.. seealso::
:py:class:`scipp.Unit` to construct other units.
"""
from contextlib import contextmanager
from typing import Dict, Iterable, Tuple, Union
from .._scipp.core import add_unit_alias as _add_unit_alias
from .._scipp.core import clear_unit_aliases as _clear_unit_aliases
from .._scipp.core.units import ( # NOQA
K,
angstrom,
counts,
default_unit,
deg,
dimensionless,
kg,
m,
meV,
mm,
ns,
one,
rad,
s,
us,
)
from ..core.cpp_classes import Unit, Variable, VariancesError
class UnitAliases:
"""Manager for unit aliases.
Aliases override how units are converted to and from strings.
The table is similar to a :class:`dict` and maps alias names to units.
But unlike a dict, no guarantees are made about the order of aliases or their
priority in string formatting.
And there may be only one alias for each unit at a time.
Attention
---------
This class is a singleton and should never be instantiated by user code.
Instead, use it through :attr:`scipp.units.aliases`.
"""
def __init__(self):
if any(map(lambda x: isinstance(x, UnitAliases), globals().values())):
raise RuntimeError('There can be only one instance of _Aliases')
self._aliases: Dict[str, Unit] = {}
def __setitem__(self, alias: str, unit: Union[str, Unit, Variable]):
"""Define a new unit alias."""
unit = _build_unit(unit)
if self._aliases.get(alias) == unit:
return
if unit in self.values():
raise ValueError(f"There already is an alias for unit '{unit!r}'")
_add_unit_alias(name=alias, unit=unit)
self._aliases[alias] = unit
def __delitem__(self, alias: str):
"""Remove an existing alias."""
self._del_aliases(alias)
def _del_aliases(self, *names: str):
old_aliases = dict(self._aliases)
for name in names:
del old_aliases[name]
self.clear()
for name, unit in old_aliases.METHOD_NAME():
self[name] = unit
def clear(self):
"""Remove all aliases."""
self._aliases.clear()
_clear_unit_aliases()
@contextmanager
def scoped(self, **kwargs: Union[str, Unit]):
"""Contextmanager to define temporary aliases.
Defines new aliases based on ``kwargs`` for the duration of the context.
When exiting the context, all temporary aliases are removed.
It is possible to define additional aliases in the context.
They are not removed when the context manager exits unless they override
scoped aliases. (See examples.)
Warning
-------
This context manager is not thread-safe.
Aliases defined here affect all threads and other threads can define different
aliases which affect the managed context.
Parameters
----------
**kwargs
Map from names to units for aliases to define.
Examples
--------
Define temporary aliases:
>>> with sc.units.aliases.scoped(speed='m/s'):
... str(sc.Unit('m/s'))
'speed'
Previously defined aliases still apply:
>>> sc.units.aliases.clear()
>>> sc.units.aliases['dogyear'] = '4492800s'
>>> with sc.units.aliases.scoped(speed='m/s'):
... str(sc.Unit('4492800s'))
'dogyear'
Previous aliases can be overridden and are restored after the context:
>>> sc.units.aliases.clear()
>>> sc.units.aliases['speed'] = 'km/s'
>>> with sc.units.aliases.scoped(speed='m/s'):
... sc.Unit('speed') == 'm/s'
True
>>> sc.Unit('speed') == 'km/s'
True
Aliases defined within the context remain active
unless they clash with previous alises:
>>> sc.units.aliases.clear()
>>> sc.units.aliases['speed'] = 'km/s'
>>> with sc.units.aliases.scoped(speed='m/s'):
... sc.units.aliases['speed'] = 'mm/s'
... sc.units.aliases['dogyear'] = '4492800s'
>>> str(sc.Unit('4492800s'))
'dogyear'
>>> sc.Unit('speed') == 'km/s'
True
"""
overridden = {
name: unit for name, unit in self._aliases.METHOD_NAME() if name in kwargs
}
for name, unit in kwargs.METHOD_NAME():
self[name] = unit
yield
self._del_aliases(*kwargs)
for name, unit in overridden.METHOD_NAME():
self[name] = unit
def __iter__(self) -> Iterable[str]:
"""Iterator over alias names."""
yield from self.keys()
def keys(self) -> Iterable[str]:
"""Iterator over alias names."""
yield from self._aliases.keys()
def values(self) -> Iterable[Unit]:
"""Iterator over aliased units."""
yield from self._aliases.values()
def METHOD_NAME(self) -> Iterable[Tuple[str, Unit]]:
"""Iterator over pairs of alias names and units."""
yield from self._aliases.METHOD_NAME()
# Making copies would allow _Alias's internal map and
# LLNL/Unit's global map to get out of sync.
def __copy__(self):
raise TypeError('UnitAliases is a singleton and must not be copied')
def __deepcopy__(self):
raise TypeError('UnitAliases is a singleton and must not be copied')
def _build_unit(x: Union[str, Unit, Variable]) -> Unit:
if isinstance(x, Unit):
return x
if isinstance(x, str):
return Unit(x)
if x.variance is not None:
raise VariancesError('Cannot define a unit with a variance')
# Convert to float first to make sure the variable only contains a
# multiplier and not a string that would be multiplied to the unit.
return Unit(str(float(x.value))) * x.unit
aliases = UnitAliases()
"""Table of unit aliases."""
__all__ = [
'angstrom',
'counts',
'default_unit',
'deg',
'dimensionless',
'kg',
'K',
'meV',
'm',
'one',
'rad',
's',
'us',
'ns',
'mm',
] |
5,815 | test egress acl inner l4 dst port | import pytest
TABLE_TYPE = "CUSTOM_L3"
CUSTOM_TABLE_TYPE_MATCHES = [
"L4_SRC_PORT_RANGE",
"L4_DST_PORT_RANGE",
"ETHER_TYPE",
"TUNNEL_VNI",
"TC",
"INNER_IP_PROTOCOL",
"INNER_ETHER_TYPE",
"INNER_L4_SRC_PORT",
"INNER_L4_DST_PORT",
"VLAN_ID"
]
CUSTOM_TABLE_TYPE_BPOINT_TYPES = ["PORT","PORTCHANNEL"]
CUSTOM_TABLE_TYPE_ACTIONS = ["PACKET_ACTION,COUNTER"]
EXPECTED_ACTION_LIST = ['SAI_ACL_ACTION_TYPE_PACKET_ACTION','SAI_ACL_ACTION_TYPE_COUNTER']
TABLE_NAME = "EGRESS_TEST"
BIND_PORTS = ["Ethernet0", "Ethernet4"]
RULE_NAME = "EGRESS_TEST_RULE"
class TestEgressAclTable:
@pytest.fixture
def egress_acl_table(self, dvs_acl):
try:
dvs_acl.create_acl_table_type(TABLE_TYPE, CUSTOM_TABLE_TYPE_MATCHES, CUSTOM_TABLE_TYPE_BPOINT_TYPES, CUSTOM_TABLE_TYPE_ACTIONS)
dvs_acl.create_acl_table(TABLE_NAME, TABLE_TYPE, BIND_PORTS, stage="egress")
yield dvs_acl.get_acl_table_ids(1)[0]
finally:
dvs_acl.remove_acl_table(TABLE_NAME)
dvs_acl.remove_acl_table_type(TABLE_TYPE)
dvs_acl.verify_acl_table_count(0)
def test_EgressAclTableCreationDeletion(self, dvs_acl):
try:
dvs_acl.create_acl_table_type(TABLE_TYPE, CUSTOM_TABLE_TYPE_MATCHES, CUSTOM_TABLE_TYPE_BPOINT_TYPES, CUSTOM_TABLE_TYPE_ACTIONS)
dvs_acl.create_acl_table(TABLE_NAME, TABLE_TYPE, BIND_PORTS, stage="egress")
acl_table_id = dvs_acl.get_acl_table_ids(1)[0]
acl_table_group_ids = dvs_acl.get_acl_table_group_ids(len(BIND_PORTS))
dvs_acl.verify_acl_table_group_members(acl_table_id, acl_table_group_ids, 1)
dvs_acl.verify_acl_table_port_binding(acl_table_id, BIND_PORTS, 1, stage="egress")
dvs_acl.verify_acl_table_action_list(acl_table_id, EXPECTED_ACTION_LIST)
finally:
dvs_acl.remove_acl_table(TABLE_NAME)
dvs_acl.remove_acl_table_type(TABLE_TYPE)
dvs_acl.verify_acl_table_count(0)
def test_EgressAclRuleL4SrcPortRange(self, dvs_acl, egress_acl_table):
config_qualifiers = {"L4_SRC_PORT_RANGE": "0-1001"}
expected_sai_qualifiers = {
"SAI_ACL_ENTRY_ATTR_FIELD_ACL_RANGE_TYPE": dvs_acl.get_acl_range_comparator("SAI_ACL_RANGE_TYPE_L4_SRC_PORT_RANGE", "0,1001")
}
dvs_acl.create_acl_rule(TABLE_NAME, RULE_NAME, config_qualifiers, priority="999")
dvs_acl.verify_acl_rule(expected_sai_qualifiers, priority="999")
dvs_acl.remove_acl_rule(TABLE_NAME, RULE_NAME)
dvs_acl.verify_no_acl_rules()
def test_EgressAclRuleL4DstPortRange(self, dvs_acl, egress_acl_table):
config_qualifiers = {"L4_DST_PORT_RANGE": "1003-6666"}
expected_sai_qualifiers = {
"SAI_ACL_ENTRY_ATTR_FIELD_ACL_RANGE_TYPE": dvs_acl.get_acl_range_comparator("SAI_ACL_RANGE_TYPE_L4_DST_PORT_RANGE", "1003,6666")
}
dvs_acl.create_acl_rule(TABLE_NAME, RULE_NAME, config_qualifiers, priority="999")
dvs_acl.verify_acl_rule(expected_sai_qualifiers, priority="999")
dvs_acl.remove_acl_rule(TABLE_NAME, RULE_NAME)
dvs_acl.verify_no_acl_rules()
def test_EgressAclRuleL2EthType(self, dvs_acl, egress_acl_table):
config_qualifiers = {"ETHER_TYPE": "8000"}
expected_sai_qualifiers = {
"SAI_ACL_ENTRY_ATTR_FIELD_ETHER_TYPE": dvs_acl.get_simple_qualifier_comparator("8000&mask:0xffff")
}
dvs_acl.create_acl_rule(TABLE_NAME, RULE_NAME, config_qualifiers, action="DROP", priority="1000")
dvs_acl.verify_acl_rule(expected_sai_qualifiers, action="DROP", priority="1000")
dvs_acl.remove_acl_rule(TABLE_NAME, RULE_NAME)
dvs_acl.verify_no_acl_rules()
def test_EgressAclRuleTunnelVNI(self, dvs_acl, egress_acl_table):
config_qualifiers = {"TUNNEL_VNI": "5000"}
expected_sai_qualifiers = {
"SAI_ACL_ENTRY_ATTR_FIELD_TUNNEL_VNI": dvs_acl.get_simple_qualifier_comparator("5000&mask:0xffffffff")
}
dvs_acl.create_acl_rule(TABLE_NAME, RULE_NAME, config_qualifiers, action="DROP", priority="1000")
dvs_acl.verify_acl_rule(expected_sai_qualifiers, action="DROP", priority="1000")
dvs_acl.remove_acl_rule(TABLE_NAME, RULE_NAME)
dvs_acl.verify_no_acl_rules()
def test_EgressAclRuleTC(self, dvs_acl, egress_acl_table):
config_qualifiers = {"TC": "1"}
expected_sai_qualifiers = {
"SAI_ACL_ENTRY_ATTR_FIELD_TC": dvs_acl.get_simple_qualifier_comparator("1&mask:0xff")
}
dvs_acl.create_acl_rule(TABLE_NAME, RULE_NAME, config_qualifiers, action="DROP", priority="1000")
dvs_acl.verify_acl_rule(expected_sai_qualifiers, action="DROP", priority="1000")
dvs_acl.remove_acl_rule(TABLE_NAME, RULE_NAME)
dvs_acl.verify_no_acl_rules()
def test_EgressAclInnerIPProtocol(self, dvs_acl, egress_acl_table):
config_qualifiers = {"INNER_IP_PROTOCOL": "8"}
expected_sai_qualifiers = {
"SAI_ACL_ENTRY_ATTR_FIELD_INNER_IP_PROTOCOL": dvs_acl.get_simple_qualifier_comparator("8&mask:0xff")
}
dvs_acl.create_acl_rule(TABLE_NAME, RULE_NAME, config_qualifiers, action="DROP", priority="1000")
dvs_acl.verify_acl_rule(expected_sai_qualifiers, action="DROP", priority="1000")
dvs_acl.remove_acl_rule(TABLE_NAME, RULE_NAME)
dvs_acl.verify_no_acl_rules()
def test_EgressAclInnerEthType(self, dvs_acl, egress_acl_table):
config_qualifiers = {"INNER_ETHER_TYPE": "8000"}
expected_sai_qualifiers = {
"SAI_ACL_ENTRY_ATTR_FIELD_INNER_ETHER_TYPE": dvs_acl.get_simple_qualifier_comparator("8000&mask:0xffff")
}
dvs_acl.create_acl_rule(TABLE_NAME, RULE_NAME, config_qualifiers, action="DROP", priority="1000")
dvs_acl.verify_acl_rule(expected_sai_qualifiers, action="DROP", priority="1000")
dvs_acl.remove_acl_rule(TABLE_NAME, RULE_NAME)
dvs_acl.verify_no_acl_rules()
def test_EgressAclInnerL4SrcPort(self, dvs_acl, egress_acl_table):
config_qualifiers = {"INNER_L4_SRC_PORT": "999"}
expected_sai_qualifiers = {
"SAI_ACL_ENTRY_ATTR_FIELD_INNER_L4_SRC_PORT": dvs_acl.get_simple_qualifier_comparator("999&mask:0xffff")
}
dvs_acl.create_acl_rule(TABLE_NAME, RULE_NAME, config_qualifiers, action="DROP", priority="1000")
dvs_acl.verify_acl_rule(expected_sai_qualifiers, action="DROP", priority="1000")
dvs_acl.remove_acl_rule(TABLE_NAME, RULE_NAME)
dvs_acl.verify_no_acl_rules()
def METHOD_NAME(self, dvs_acl, egress_acl_table):
config_qualifiers = {"INNER_L4_DST_PORT": "999"}
expected_sai_qualifiers = {
"SAI_ACL_ENTRY_ATTR_FIELD_INNER_L4_DST_PORT": dvs_acl.get_simple_qualifier_comparator("999&mask:0xffff")
}
dvs_acl.create_acl_rule(TABLE_NAME, RULE_NAME, config_qualifiers, action="DROP", priority="1000")
dvs_acl.verify_acl_rule(expected_sai_qualifiers, action="DROP", priority="1000")
dvs_acl.remove_acl_rule(TABLE_NAME, RULE_NAME)
dvs_acl.verify_no_acl_rules()
def test_AclRuleVlanId(self, dvs_acl, egress_acl_table):
config_qualifiers = {"VLAN_ID": "100"}
expected_sai_qualifiers = {
"SAI_ACL_ENTRY_ATTR_FIELD_OUTER_VLAN_ID": dvs_acl.get_simple_qualifier_comparator("100&mask:0xfff")
}
dvs_acl.create_acl_rule(TABLE_NAME, RULE_NAME, config_qualifiers, action="DROP", priority="1000")
dvs_acl.verify_acl_rule(expected_sai_qualifiers, action="DROP", priority="1000")
dvs_acl.remove_acl_rule(TABLE_NAME, RULE_NAME)
dvs_acl.verify_no_acl_rules()
# Add Dummy always-pass test at end as workaroud
# for issue when Flaky fail on final test it invokes module tear-down before retrying
def test_nonflaky_dummy():
pass |
5,816 | getversion | #!/usr/bin/pythonw
# This file is part of JMRI.
#
# JMRI is free software; you can redistribute it and/or modify it under
# the terms of version 2 of the GNU General Public License as published
# by the Free Software Foundation. See the "COPYING" file for a copy
# of this license.
#
# JMRI is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# Revision $Revision$
# by Simon Ginsburg (simon.ginsburg at bluewin.ch)
"""
This module represents the text manager to be used with the translation utility
This module hast four major functionalities:
- Access to the directory manager
- Access to Default texts and files
- Access to the text conversion utility
from a human readible representation to UNICODE
- Tests the validity and existance of a version number in a file
This modue consists of the public class textmanager and two nested local classes:
- elementitem: Managment class for the confelement class
- confelement: Representation of the three forms ASCIIstring Normalstring UUCode
These characters are stored in a file called Convertioncharacters.txt and are "=" separated.
"""
import os
from directorymanager import directorymanager
from singlefile import singlefile
class textmanager:
def __init__(self, dm):
"""
At startup a directory manager will be handed over
"""
self.dm = dm
self.Defaults = []
self.NonTrans = []
self.Convertible = False
self.elements = elementitem()
self.getconf()
if os.path.exists(self.dm.Defpath):
self.addDefaults()
def getstring(self, inputstring):
"""
Convert input to string
"""
outputstring = str(inputstring)
return outputstring
def METHOD_NAME(self, inputstring):
"""
Return version string from file
"""
# Revision $Revision$
partlist = inputstring.rsplit("$",2)
#print partlist
revstring = str(partlist[1]).strip()
revnum = revstring.rsplit(" ",1)
outputstring = str(revnum[1])
#print outputstring
return outputstring
def isvalidversion(self, inputstring):
"""
Check validity of the version string
If no version number is detected the return value is -1
If an old CVS style version number is detected the return value is 0
"""
# Revision $Revision$
if float(inputstring) < 0.0:
return -1
temp1 = str(inputstring).split(".", 1)
if len(temp1) > 1:
return 0
return 1
def getconf(self):
"""
Load Configuration files
"""
# print "GetConf"
if os.path.exists(self.dm.Progpath):
os.chdir(self.dm.Progpath)
temptstr = os.path.join(self.dm.Progpath,"Convertioncharacters.txt")
# print temptstr
if os.path.exists(os.path.join(self.dm.Progpath,"Convertioncharacters.txt")):
cofile = open("Convertioncharacters.txt","rU")
content = cofile.readlines()
for currline in content:
if not currline[0] == "#":
self.elements.addelement(currline)
cofile.close()
self.Convertible = True
else:
print ('File not fond: Convertioncharacters.txt')
def addDefaults(self):
"""
Load Defaults files
"""
os.chdir(self.dm.Defpath)
for filelistitem in os.listdir(self.dm.Defpath):
if str(filelistitem).strip() == str("NonTranslatable.txt").strip():
cpfile = open(filelistitem,'rU')
self.NonTrans = singlefile([], [], [], [], [], cpfile.readlines())
cpfile.close()
# print ('NonTrans read...')
else:
fullfilename = filelistitem
#print ('Reading default file ' + filelistitem + ' ...')
if not filelistitem.strip().startswith("."):
corename , ext = os.path.splitext(fullfilename)
filepath = self.dm.getdirectorystring(filelistitem)
# cpfile = open(filelistitem,'rU',errors='replace')
cpfile = open(filelistitem,'rU')
temp = singlefile(fullfilename, filepath, [], corename, [], cpfile.readlines())
cpfile.close()
self.Defaults.append(temp)
# print ('Default file ' + filelistitem + ' read...')
def isDefaults(self, corename, seachstring):
"""
Check if string is part of (non translatable) default values
"""
for filelistitem in self.Defaults:
#print filelistitem.corename
#print corename
if filelistitem.corename == corename:
if filelistitem.isitem(seachstring):
return 1
return 0
def isNonTrans(self, corename):
"""
Check if filename belongs to non translatable list
"""
#print ('Calling function isNonTrans...')
if not self.NonTrans is []:
#print ('Searching: ' + corename)
if self.NonTrans.isitem(corename):
#print ('Found...')
return 1
#print ('Not Found...')
return 0
class elementitem:
"""
This internal class maintaines the character triples
"""
def __init__(self):
"""
Initialize an empty class
"""
self.numels = 0
self.elements = []
def addelement(self, string):
"""
Add a character triple
"""
temp = string.split("=")
if len(temp) == 2:
ASCIItemp = temp[0].strip()
Normaltemp = ASCIItemp
UUCtemp = temp[1].strip()
elif len(temp) == 3:
ASCIItemp = temp[0].strip()
Normaltemp = temp[1].strip()
UUCtemp = temp[2].strip()
tempel = confelement(ASCIItemp,Normaltemp,UUCtemp)
self.elements.append(tempel)
self.numels = self.numels + 1
class confelement:
"""
This internal class represents the character triples
"""
def __init__(self, ASCIIstring, Normalstring, UUCode):
"""
At startup the triples are initialized
"""
self.ASCII = ASCIIstring
self.Normal = Normalstring
self.UUCode = UUCode
# print ASCIIstring
# print Normalstring
# print UUCode
|
5,817 | load gold data | import json
import logging
import os
from typing import Any, Dict, Tuple
import tabulate
from progress.bar import Bar
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
SEPARATORS = {"csv": ",", "tsv": "\t"}
def load_predictions(path: str, load_gold: bool = False):
"""
Load prediction file in a single dictionary, indexed by filename.
:param path: Path to prediction file
:param load_gold: When evaluating gold hypotheses upper bound
:return: Dictionary of predictions
"""
data_path = os.path.join(path)
result = {}
with open(data_path, "r") as f:
lines = list(f)
bar = Bar(message="Loading prediction file", max=len(lines))
for line in lines:
example = json.loads(line)
result[example.pop("slurp_id" if load_gold else "file")] = example
bar.next()
bar.finish()
return result
def METHOD_NAME(path: str, load_gold: bool = False):
"""
Load gold file (test.jsonl) in a single dictionary, indexed by filename.
:param path: Path to gold file
:param load_gold: When evaluating gold hypotheses upper bound
:return: Dictionary of gold examples
"""
data_path = os.path.join(path)
result = {}
with open(data_path, "r") as f:
lines = list(f)
bar = Bar(message="Loading gold data", max=len(lines))
for line in lines:
example = json.loads(line)
result.update(release2prediction(example, load_gold))
bar.next()
bar.finish()
return result
def release2prediction(example: Dict[str, Any], load_gold: bool = False):
"""
Convert the SLURP release format into prediction format.
:param example: the example in release format\
(as they come with the dataset release)
:param load_gold: When evaluating gold hypotheses upper bound
:return: a list of examples in prediction format: List[Dict[str, Union[str, List]]]
"""
result = {}
res = {
"text": " ".join([t["surface"] for t in example["tokens"]]),
"scenario": example["scenario"],
"action": example["action"],
"entities": [
{
"type": entity["type"],
"filler": " ".join(
[example["tokens"][i]["surface"].lower() for i in entity["span"]]
),
}
for entity in example["entities"]
],
}
if load_gold:
result[str(example["slurp_id"])] = res
else:
for file in example["recordings"]:
result[file["file"]] = res
return result
def format_results(
results: Dict[str, Tuple[float]],
label: str,
full: bool = True,
errors: bool = False,
table_layout: str = "fancy_grid",
):
"""
Util to format and print the results.
Format results in tabular format.
:param results: the dictionary output by the get_metric() method
:param label: the title of the table to print
:param full: is true, prints the results of all the labels.\
Otherwise prints just the average among them
:param errors: if true, prints TPs, FPs and FNs
:param table_layout: the table layout.\
Available: all those from `tabulate`, `csv` and `tsv`.
:return: the formatted table as string
"""
if errors:
threshold = 100
else:
threshold = 4
header = [label.capitalize(), "Precision", "Recall", "F-Measure", "TP", "FP", "FN"][
:threshold
]
table = [["overall".upper(), *results.pop("overall")][:threshold]]
if full:
for label in results:
table.append([label, *results[label]][:threshold])
if table_layout in {"csv", "tsv"}:
for i, row in enumerate(table):
for j, item in enumerate(row):
table[i][j] = str(item)
return (
SEPARATORS[table_layout].join(header)
+ "\n"
+ "\n".join([SEPARATORS[table_layout].join(row) for row in table])
)
if table_layout not in tabulate.tabulate_formats:
logger.warning(
"{} non valid as table format. Using ``fancy_grid``".format(table_layout)
)
table_layout = "fancy_grid"
return tabulate.tabulate(
table,
headers=header,
tablefmt=table_layout,
floatfmt=("", ".4f", ".4f", ".4f", ".0f", ".1f", ".1f"),
) |
5,818 | get pipeline definition | # Copyright 2021 deepset GmbH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import logging
import os
from pathlib import Path
import re
from typing import Any, Dict, List, Optional
from networkx import DiGraph
import yaml
logger = logging.getLogger(__name__)
VALID_CODE_GEN_INPUT_REGEX = re.compile(r"^[-a-zA-Z0-9_/.:]+$")
def METHOD_NAME(pipeline_config: Dict[str, Any], pipeline_name: Optional[str] = None) -> Dict[str, Any]:
"""
Get the definition of Pipeline from a given pipeline config. If the config contains more than one Pipeline,
then the pipeline_name must be supplied.
:param pipeline_config: Dict Pipeline config parsed as a dictionary.
:param pipeline_name: name of the Pipeline.
"""
if pipeline_name is None:
if len(pipeline_config["pipelines"]) == 1:
pipeline_definition = pipeline_config["pipelines"][0]
else:
raise Exception("The YAML contains multiple pipelines. Please specify the pipeline name to load.")
else:
pipelines_in_definitions = list(filter(lambda p: p["name"] == pipeline_name, pipeline_config["pipelines"]))
if not pipelines_in_definitions:
raise KeyError(f"Cannot find any pipeline with name '{pipeline_name}' declared in the YAML file.")
pipeline_definition = pipelines_in_definitions[0]
return pipeline_definition
def get_component_definitions(pipeline_config: Dict[str, Any], overwrite_with_env_variables: bool) -> Dict[str, Any]:
"""
Returns the definitions of all components from a given pipeline config.
:param pipeline_config: Dict Pipeline config parsed as a dictionary.
:param overwrite_with_env_variables: Overwrite the YAML configuration with environment variables. For example,
to change index name param for an ElasticsearchDocumentStore, an env
variable 'MYDOCSTORE_PARAMS_INDEX=documents-2021' can be set. Note that an
`_` sign must be used to specify nested hierarchical properties.
"""
component_definitions = {} # definitions of each component from the YAML.
raw_component_definitions = copy.deepcopy(pipeline_config["components"])
for component_definition in raw_component_definitions:
if overwrite_with_env_variables:
_overwrite_with_env_variables(component_definition)
name = component_definition.pop("name")
component_definitions[name] = component_definition
return component_definitions
def read_pipeline_config_from_yaml(path: Path):
with open(path, "r", encoding="utf-8") as stream:
return yaml.safe_load(stream)
def validate_config(pipeline_config: Dict[str, Any]):
for component in pipeline_config["components"]:
_validate_user_input(component["name"])
_validate_user_input(component["type"])
for k, v in component.get("params", {}).items():
_validate_user_input(k)
_validate_user_input(v)
for pipeline in pipeline_config["pipelines"]:
_validate_user_input(pipeline["name"])
_validate_user_input(pipeline["type"])
for node in pipeline["nodes"]:
_validate_user_input(node["name"])
for input in node["inputs"]:
_validate_user_input(input)
def build_component_dependency_graph(
pipeline_definition: Dict[str, Any], component_definitions: Dict[str, Any]
) -> DiGraph:
"""
Builds a dependency graph between components. Dependencies are:
- referenced components during component build time (e.g. init params)
- predecessor components in the pipeline that produce the needed input
This enables sorting the components in a working and meaningful order for instantiation using topological sorting.
:param pipeline_definition: the definition of the pipeline (e.g. use get_pipeline_definition() to obtain it)
:param component_definitions: the definition of the pipeline components (e.g. use get_component_definitions() to obtain it)
"""
graph = DiGraph()
for node in pipeline_definition["nodes"]:
node_name = node["name"]
graph.add_node(node_name)
for input in node["inputs"]:
if input in component_definitions:
graph.add_edge(input, node_name)
for component_name, component_definition in component_definitions.items():
params = component_definition.get("params", {})
referenced_components: List[str] = list()
for param_value in params.values():
# Currently we don't do any additional type validation here.
if param_value in component_definitions:
referenced_components.append(param_value)
for referenced_component in referenced_components:
graph.add_edge(referenced_component, component_name)
return graph
def _validate_user_input(input: str):
if isinstance(input, str) and not VALID_CODE_GEN_INPUT_REGEX.match(input):
raise ValueError(f"'{input}' is not a valid config variable name. Use word characters only.")
def _overwrite_with_env_variables(component_definition: Dict[str, Any]):
"""
Overwrite the pipeline config with environment variables. For example, to change index name param for an
ElasticsearchDocumentStore, an env variable 'MYDOCSTORE_PARAMS_INDEX=documents-2021' can be set. Note that an
`_` sign must be used to specify nested hierarchical properties.
:param definition: a dictionary containing the YAML definition of a component.
"""
env_prefix = f"{component_definition['name']}_params_".upper()
for key, value in os.environ.items():
if key.startswith(env_prefix):
param_name = key.replace(env_prefix, "").lower()
component_definition["params"][param_name] = value
logger.info(
f"Param '{param_name}' of component '{component_definition['name']}' overwritten with environment variable '{key}' value '{value}'."
) |
5,819 | bind service commands | """
Higher order classes for Libvirt Sandbox Service (lxc) service container testing
"""
from avocado.utils import process
from avocado.utils import service
from . import lvsb_base
from . import virsh
class SandboxService(object):
"""
Management for a single new/existing sandboxed service
"""
def __init__(self, params, service_name, uri='lxc:///'):
"""Initialize connection to sandbox service with name and parameters"""
# Intended workflow is:
# Use virt-sandbox-service for create/destroy
# Use service/systemd for runtime management
# Use virsh for list/edit/modify manipulation
self.virsh = virsh.Virsh(uri=uri, ignore_status=True)
self.command = lvsb_base.SandboxCommandBase(params, service_name)
self.command.BINARY_PATH_PARAM = params.get('virt_sandbox_service_binary',
"virt-sandbox-service")
self.command.add_optarg('--connect', uri)
# We need to pass self.service_name to service.Factory.create_service to
# create a service. Then we will get a SpecificServiceManager object as
# self.service. But SpecificServiceManager is not pickleable, save init
# args here.
self._run = process.run
self.service = service.Factory.create_service(self.service_name,
run=self._run)
# make self.start() --> self.service.start()
self.METHOD_NAME()
def METHOD_NAME(self):
"""Setup service methods locally for __init__ and __setstate__"""
for command in service.COMMANDS:
# Use setattr to keep pylint quiet
setattr(self, command, getattr(self.service, command))
def __getstate__(self):
"""Serialize instance for pickling"""
# SandboxCommandBase is directly pickleable
return {'command': self.command, 'run': self._run, 'virsh': dict(virsh)}
def __setstate__(self, state):
"""Actualize instance from state"""
# virsh is it's own dict of init params
self.virsh = virsh.Virsh(**state['virsh'])
# already used it's own get/sets state methods when unpickling state
self.command = state['command']
# Recreate SpecificServiceManager from the init args
self._run = state['run']
self.service = service.Factory.create_service(self.service_name,
run=self._run)
self.METHOD_NAME()
# Enforce read-only at all levels
@property
def service_name(self):
return self.command.name
# property accessor functions must be defined before naming attribute
def __get_uri__(self):
return self.virsh.uri
def __set_uri__(self, uri):
self.virsh.uri = uri
def __del_uri__(self):
# Virsh class interface insists this attribute exist, but can be None
self.virsh.uri = None
# Property definition must follow accessor definitions
uri = property(__get_uri__, __set_uri__, __del_uri__)
def create(self):
return self.command.run(extra='create')
def destroy(self):
return self.command.run(extra='destroy')
# Specialized list calls can just call self.virsh.dom_list() directly
@property # behave like attribute to make value-access easier
def list(self):
"""
Return list of dictionaries mapping column names to values
"""
# For simple callers, just return list of names to be convenient
cmdresult = self.virsh.dom_list() # uri is passed automatically
result = []
column_names = [] # scope outside loop
for lineno, line in cmdresult.stdout_text.strip():
if lineno == 0:
column_names = line.strip().split()
assert len(column_names) > 2
else:
assert len(column_names) > 0
# raises exception when column_names & value count mismatch
items = [(column_names[index].lower(), value.lower())
for index, value in line.strip().split()]
# combine [('id',99), ('name', 'foobar'), ('state', 'running')]
result.append(dict(items))
return result
# Specialized list calls can just call self.virsh.dom_list() directly
@property # behave like attribute for easy passing to XML handling methods
def xmlstr(self):
result = self.virsh.dumpxml(self.service_name)
return result.stdout_text.strip() |
5,820 | test or group | from typing import Optional
import databases
import sqlalchemy
import ormar
from tests.settings import DATABASE_URL
database = databases.Database(DATABASE_URL)
metadata = sqlalchemy.MetaData()
class BaseMeta(ormar.ModelMeta):
metadata = metadata
database = database
class Author(ormar.Model):
class Meta(BaseMeta):
tablename = "authors"
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100)
class Book(ormar.Model):
class Meta(BaseMeta):
tablename = "books"
id: int = ormar.Integer(primary_key=True)
author: Optional[Author] = ormar.ForeignKey(Author)
title: str = ormar.String(max_length=100)
year: int = ormar.Integer(nullable=True)
def METHOD_NAME():
result = ormar.or_(name="aa", books__title="bb")
result.resolve(model_cls=Author)
assert len(result.actions) == 2
assert result.actions[0].target_model == Author
assert result.actions[1].target_model == Book
assert (
str(result.get_text_clause().compile(compile_kwargs={"literal_binds": True}))
== f"(authors.name = 'aa' OR "
f"{result.actions[1].table_prefix}"
f"_books.title = 'bb')"
)
def test_and_group():
result = ormar.and_(name="aa", books__title="bb")
result.resolve(model_cls=Author)
assert len(result.actions) == 2
assert result.actions[0].target_model == Author
assert result.actions[1].target_model == Book
assert (
str(result.get_text_clause().compile(compile_kwargs={"literal_binds": True}))
== f"(authors.name = 'aa' AND "
f"{result.actions[1].table_prefix}"
f"_books.title = 'bb')"
)
def test_nested_and():
result = ormar.and_(
ormar.or_(name="aa", books__title="bb"), ormar.or_(name="cc", books__title="dd")
)
result.resolve(model_cls=Author)
assert len(result.actions) == 0
assert len(result._nested_groups) == 2
book_prefix = result._nested_groups[0].actions[1].table_prefix
assert (
str(result.get_text_clause().compile(compile_kwargs={"literal_binds": True}))
== f"((authors.name = 'aa' OR "
f"{book_prefix}"
f"_books.title = 'bb') AND "
f"(authors.name = 'cc' OR "
f"{book_prefix}"
f"_books.title = 'dd'))"
)
def test_nested_group_and_action():
result = ormar.and_(ormar.or_(name="aa", books__title="bb"), books__title="dd")
result.resolve(model_cls=Author)
assert len(result.actions) == 1
assert len(result._nested_groups) == 1
book_prefix = result._nested_groups[0].actions[1].table_prefix
assert (
str(result.get_text_clause().compile(compile_kwargs={"literal_binds": True}))
== f"((authors.name = 'aa' OR "
f"{book_prefix}"
f"_books.title = 'bb') AND "
f"{book_prefix}"
f"_books.title = 'dd')"
)
def test_deeply_nested_or():
result = ormar.or_(
ormar.and_(
ormar.or_(name="aa", books__title="bb"),
ormar.or_(name="cc", books__title="dd"),
),
ormar.and_(
ormar.or_(books__year__lt=1900, books__title="11"),
ormar.or_(books__year__gt="xx", books__title="22"),
),
)
result.resolve(model_cls=Author)
assert len(result.actions) == 0
assert len(result._nested_groups) == 2
assert len(result._nested_groups[0]._nested_groups) == 2
book_prefix = result._nested_groups[0]._nested_groups[0].actions[1].table_prefix
result_qry = str(
result.get_text_clause().compile(compile_kwargs={"literal_binds": True})
)
expected_qry = (
f"(((authors.name = 'aa' OR {book_prefix}_books.title = 'bb') AND "
f"(authors.name = 'cc' OR {book_prefix}_books.title = 'dd')) "
f"OR (({book_prefix}_books.year < 1900 OR {book_prefix}_books.title = '11') AND"
f" ({book_prefix}_books.year > 'xx' OR {book_prefix}_books.title = '22')))"
)
assert result_qry.replace("\n", "") == expected_qry.replace("\n", "")
def test_one_model_group():
result = ormar.and_(year__gt=1900, title="bb")
result.resolve(model_cls=Book)
assert len(result.actions) == 2
assert len(result._nested_groups) == 0
def test_one_model_nested_group():
result = ormar.and_(
ormar.or_(year__gt=1900, title="bb"), ormar.or_(year__lt=1800, title="aa")
)
result.resolve(model_cls=Book)
assert len(result.actions) == 0
assert len(result._nested_groups) == 2
def test_one_model_with_group():
result = ormar.or_(ormar.and_(year__gt=1900, title="bb"), title="uu")
result.resolve(model_cls=Book)
assert len(result.actions) == 1
assert len(result._nested_groups) == 1 |
5,821 | run | import os
import signal
import datetime
import time
import renderdoc as rd
from . import util
from .logging import log
class TargetControl():
def __init__(self, ident: int, host="localhost", username="testrunner", force=True, timeout=None, exit_kill=True):
"""
Creates a target control manager for a given ident
:param ident: The ident to connect to.
:param host: The hostname.
:param username: The username to use when connecting.
:param force: Whether to force the connection.
:param timeout: The timeout in seconds before aborting the run.
:param exit_kill: Whether to kill the process when the control loop ends.
"""
self._pid = 0
self._captures = []
self._children = []
self.control = rd.CreateTargetControl(host, ident, username, force)
self._timeout = timeout
if self._timeout is None:
self._timeout = 60
self._exit_kill = exit_kill
if self.control is None:
raise RuntimeError("Couldn't connect target control")
self._pid = self.control.GetPID()
def pid(self):
"""Return the PID of the connected application."""
return self._pid
def captures(self):
"""Return a list of renderdoc.NewCaptureData with captures made."""
return self._captures
def children(self):
"""Return a list of renderdoc.NewChildData with any child processes created."""
return self._children
def queue_capture(self, frame: int, num=1):
"""
Queue a frame to make a capture of.
:param frame: The frame number to capture.
:param num: The number of frames
"""
if self.control is not None:
self.control.QueueCapture(frame, num)
def METHOD_NAME(self, keep_running):
"""
Runs a loop ticking the target control. The callback is called each time and
can be used to determine if the loop should keep running. The default callback
continues running until at least one capture has been made.
Either way, if the target application closes and the target control connection
is lost, the loop exits and the function returns.
:param keep_running: A callback function to call each tick. Returns ``True`` if
the loop should continue, or ``False`` otherwise.
"""
if self.control is None:
return
start_time = datetime.datetime.now(datetime.timezone.utc)
while keep_running(self):
msg: rd.TargetControlMessage = self.control.ReceiveMessage(None)
if (datetime.datetime.now(datetime.timezone.utc) - start_time).total_seconds() > self._timeout:
log.error("Timed out")
break
# If we got a graceful or non-graceful shutdown, break out of the loop
if (msg.type == rd.TargetControlMessageType.Disconnected or
not self.control.Connected()):
break
# If we got a new capture, add it to our list
if msg.type == rd.TargetControlMessageType.NewCapture:
self._captures.append(msg.newCapture)
continue
# Similarly for a new child
if msg.type == rd.TargetControlMessageType.NewChild:
self._children.append(msg.newChild)
continue
# Shut down the connection
self.control.Shutdown()
self.control = None
# If we should make sure the application is killed when we exit, do that now
if self._exit_kill:
# Try 5 times to kill the application. This may fail if the application exited already
for attempt in range(5):
try:
os.kill(self._pid, signal.SIGTERM)
time.sleep(1)
return
except Exception:
# Ignore errors killing the program
continue
def run_executable(exe: str, cmdline: str,
workdir="", envmods=None, cappath=None,
opts=rd.GetDefaultCaptureOptions()):
"""
Runs an executable with RenderDoc injected, and returns the control ident.
Throws a RuntimeError if the execution failed for any reason.
:param exe: The executable to run.
:param cmdline: The command line to pass.
:param workdir: The working directory.
:param envmods: Environment modifications to apply.
:param cappath: The directory to output captures in.
:param opts: An instance of renderdoc.CaptureOptions.
:return:
"""
if envmods is None:
envmods = []
if cappath is None:
cappath = util.get_tmp_path('capture')
wait_for_exit = False
log.print("Running exe:'{}' cmd:'{}' in dir:'{}' with env:'{}'".format(exe, cmdline, workdir, envmods))
# Execute the test program
res = rd.ExecuteAndInject(exe, workdir, cmdline, envmods, cappath, opts, wait_for_exit)
if res.result != rd.ResultCode.Succeeded:
raise RuntimeError("Couldn't launch program: {}".format(str(res.result)))
return res.ident
def run_and_capture(exe: str, cmdline: str, frame: int, *, frame_count=1, captures_expected=None, capture_name=None, opts=rd.GetDefaultCaptureOptions(),
timeout=None, logfile=None):
"""
Helper function to run an executable with a command line, capture a particular frame, and exit.
This will raise a RuntimeError if anything goes wrong, otherwise it will return the path of the
capture that was generated.
:param exe: The executable to run.
:param cmdline: The command line to pass.
:param frame: The frame to capture.
:param frame_count: The number of frames to capture.
:param capture_name: The name to use creating the captures
:param opts: The capture options to use
:param timeout: The timeout to wait before killing the process if no capture has happened.
:param logfile: The log file output to include in the test log.
:return: The path of the generated capture.
:rtype: str
"""
if capture_name is None:
capture_name = 'capture'
if captures_expected is None:
captures_expected = frame_count
control = TargetControl(run_executable(exe, cmdline, cappath=util.get_tmp_path(capture_name), opts=opts), timeout=timeout)
log.print("Queuing capture of frame {}..{} with timeout of {}".format(frame, frame+frame_count, "default" if timeout is None else timeout))
# Capture frame
control.queue_capture(frame, frame_count)
# Run until we have all expected captures (probably just 1). If the program
# exits or times out we will also stop, of course
control.METHOD_NAME(keep_running=lambda x: len(x.captures()) < captures_expected)
captures = control.captures()
if logfile is not None and os.path.exists(logfile):
log.inline_file('Process output', logfile, with_stdout=True)
if len(captures) != captures_expected:
if len(captures) == 0:
raise RuntimeError("No capture made in program")
raise RuntimeError("Expected {} captures, but only got {}".format(frame_count, len(captures)))
return captures[0].path |
5,822 | test lock without timeout | from unittest import TestCase
import attr
from unittest.mock import call, patch
from corehq.util.metrics.tests.utils import capture_metrics
from ..lockmeter import MeteredLock
class TestMeteredLock(TestCase):
def test_initially_not_locked(self):
fake = FakeLock()
MeteredLock(fake, "test")
self.assertFalse(fake.locked)
def test_acquire(self):
fake = FakeLock()
lock = MeteredLock(fake, "test")
with capture_metrics() as metrics:
lock.acquire()
self.assertTrue(fake.locked)
self.assertEqual(1, len(metrics.list("commcare.lock.acquire_time")), metrics)
def test_not_acquired(self):
fake = FakeLock()
lock = MeteredLock(fake, "test")
with capture_metrics() as metrics:
self.assertFalse(lock.acquire(blocking=False))
self.assertEqual(1, len(metrics.list("commcare.lock.acquire_time")), metrics)
def test_release(self):
fake = FakeLock()
lock = MeteredLock(fake, "test")
lock.acquire()
with capture_metrics() as metrics:
lock.release()
self.assertFalse(fake.locked)
self.assertEqual(1, len(metrics.list("commcare.lock.locked_time")), metrics)
def test_release_not_locked(self):
fake = FakeLock()
lock = MeteredLock(fake, "test")
with patch("corehq.util.metrics.metrics_histogram") as counter:
lock.release()
self.assertFalse(fake.locked)
counter.assert_not_called()
def test_lock_as_context_manager(self):
fake = FakeLock()
lock = MeteredLock(fake, "test")
with capture_metrics() as metrics:
with lock:
self.assertTrue(fake.locked)
self.assertFalse(fake.locked)
self.assertEqual(1, len(metrics.list("commcare.lock.acquire_time")), metrics)
self.assertEqual(1, len(metrics.list("commcare.lock.locked_time")), metrics)
def test_release_failed(self):
lock = MeteredLock(FakeLock(), "test")
with capture_metrics() as metrics:
lock.release_failed()
self.assertEqual(1, len(metrics.list("commcare.lock.release_failed", lock_name='test')), metrics)
def test_degraded(self):
lock = MeteredLock(FakeLock(), "test")
with capture_metrics() as metrics:
lock.degraded()
self.assertEqual(1, len(metrics.list("commcare.lock.degraded", lock_name='test')), metrics)
def test_released_after_timeout(self):
lock = MeteredLock(FakeLock(timeout=-1), "test")
lock.acquire()
with capture_metrics() as metrics:
lock.release()
self.assertEqual(1, len(metrics.list("commcare.lock.released_after_timeout", lock_name='test')), metrics)
def METHOD_NAME(self):
fake = FakeLock()
del fake.timeout
assert not hasattr(fake, "timeout")
lock = MeteredLock(fake, "test")
lock.acquire() # should not raise
def test_acquire_trace(self):
fake = FakeLock()
lock = MeteredLock(fake, "test")
with patch("corehq.util.metrics.lockmeter.tracer") as tracer:
lock.acquire()
self.assertListEqual(tracer.mock_calls, [
call.trace("commcare.lock.acquire", resource="key"),
call.trace().__enter__(),
call.trace().__enter__().set_tags({
"key": "key",
"name": "test",
"acquired": "true",
}),
call.trace().__exit__(None, None, None),
call.trace("commcare.lock.locked", resource="key"),
call.trace().set_tags({"key": "key", "name": "test"}),
])
def test_release_trace(self):
fake = FakeLock()
lock = MeteredLock(fake, "test")
with patch("corehq.util.metrics.lockmeter.tracer") as tracer:
lock.acquire()
tracer.reset_mock()
lock.release()
self.assertListEqual(tracer.mock_calls, [call.trace().finish()])
def test_del_trace(self):
fake = FakeLock()
lock = MeteredLock(fake, "test")
with patch("corehq.util.metrics.lockmeter.tracer") as tracer:
lock.acquire()
tracer.reset_mock()
lock.__del__()
self.assertListEqual(tracer.mock_calls, [
call.trace().set_tag("deleted", "not_released"),
call.trace().finish(),
])
def test_acquire_untracked(self):
fake = FakeLock()
lock = MeteredLock(fake, "test", track_unreleased=False)
with patch("corehq.util.metrics.lockmeter.tracer") as tracer:
lock.acquire()
self.assertListEqual(tracer.mock_calls, [
call.trace("commcare.lock.acquire", resource="key"),
call.trace().__enter__(),
call.trace().__enter__().set_tags({
"key": "key",
"name": "test",
"acquired": "true",
}),
call.trace().__exit__(None, None, None),
])
def test_release_untracked(self):
fake = FakeLock()
lock = MeteredLock(fake, "test", track_unreleased=False)
with patch("corehq.util.metrics.lockmeter.tracer") as tracer:
lock.acquire()
tracer.reset_mock()
lock.release()
self.assertListEqual(tracer.mock_calls, [])
def test_del_untracked(self):
fake = FakeLock()
lock = MeteredLock(fake, "test", track_unreleased=False)
with patch("corehq.util.metrics.lockmeter.tracer") as tracer:
lock.acquire()
tracer.reset_mock()
lock.__del__()
self.assertListEqual(tracer.mock_calls, [])
@attr.s
class FakeLock(object):
locked = False
name = attr.ib(default="key")
timeout = attr.ib(default=None)
def acquire(self, blocking=True):
self.locked = True
return blocking
def release(self):
self.locked = False |
5,823 | test write header given fcontext | # -*- coding: utf-8 -*-
# Copyright 2017 Workiva
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from io import BytesIO
from struct import unpack_from
from thrift.protocol.TProtocol import TProtocolException
from frugal.context import FContext
from frugal.util.headers import _Headers
class TestHeaders(unittest.TestCase):
def setUp(self):
self.headers = _Headers()
def METHOD_NAME(self):
ctx = FContext("corrId")
# Manually set the op id to avoid changes to global op id
ctx._set_op_id(0)
expected = bytearray(b'\x00\x00\x00\x00 \x00\x00\x00\x05_opid\x00\x00'
b'\x00\x010\x00\x00\x00\x04_cid\x00\x00\x00\x06'
b'corrId\x00\x00\x00\x08_timeout\x00\x00\x00'
b'\x044000')
buff = self.headers._write_to_bytearray(ctx.get_request_headers())
self.assertEqual(len(expected), len(buff))
def test_read_throws_bad_version(self):
buff = bytearray(b'\x01\x00\x00\x00\x00')
with self.assertRaises(TProtocolException) as cm:
self.headers._read(BytesIO(buff))
self.assertEqual(TProtocolException.BAD_VERSION, cm.exception.type)
self.assertEqual("Wrong Frugal version. Found 1, wanted 0.",
str(cm.exception))
def test_read(self):
buff = bytearray(b'\x00\x00\x00\x00 \x00\x00\x00\x05_opid\x00\x00\x00'
b'\x010\x00\x00\x00\x04_cid\x00\x00\x00\x06corrId')
headers = self.headers._read(BytesIO(buff))
self.assertEqual("0", headers["_opid"])
self.assertEqual("corrId", headers["_cid"])
def test_write_read(self):
context = FContext("corrId")
context.set_request_header("foo", "bar")
expected = context.get_request_headers()
buff = self.headers._write_to_bytearray(expected)
actual = self.headers._read(BytesIO(buff))
self.assertEqual(expected["_opid"], actual["_opid"])
self.assertEqual(expected["_cid"], actual["_cid"])
self.assertEqual(expected["foo"], actual["foo"])
def test_decode_from_frame_throws_fprotocol_exception_frame_too_short(self):
frame = bytearray(b'\x00')
with self.assertRaises(TProtocolException) as cm:
self.headers.decode_from_frame(frame)
self.assertEqual(TProtocolException.INVALID_DATA, cm.exception.type)
self.assertEqual("Invalid frame size: 1", str(cm.exception))
def test_decode_from_frame_throws_bad_version(self):
frame = bytearray(b'\x01\x00\x00\x00\x00')
with self.assertRaises(TProtocolException) as cm:
self.headers.decode_from_frame(frame)
self.assertEqual(TProtocolException.BAD_VERSION, cm.exception.type)
self.assertEqual("Wrong Frugal version. Found 1, wanted 0.",
str(cm.exception))
def test_decode_from_frame_reads_pairs(self):
buff = bytearray(b'\x00\x00\x00\x00 \x00\x00\x00\x05_opid\x00\x00\x00'
b'\x010\x00\x00\x00\x04_cid\x00\x00\x00\x06corrId')
headers = self.headers.decode_from_frame(buff)
self.assertEqual("0", headers["_opid"])
self.assertEqual("corrId", headers["_cid"])
def test_read_pairs(self):
buff = bytearray(b'\x00\x00\x00\x00 \x00\x00\x00\x05_opid\x00\x00\x00'
b'\x010\x00\x00\x00\x04_cid\x00\x00\x00\x06corrId')
size = unpack_from('!I', buff[1:5])[0]
headers = self.headers._read_pairs(buff, 5, size + 5)
self.assertEqual("0", headers["_opid"])
self.assertEqual("corrId", headers["_cid"])
def test_read_pars_bad_key_throws_error(self):
buff = bytearray(b'\x00\x00\x00\x00 \x00\x00\x00\x20_opid\x00\x00\x00'
b'\x010\x00\x00\x00\x04_cid\x00\x00\x00\x06corrId')
size = unpack_from('!I', buff[1:5])[0]
with self.assertRaises(TProtocolException) as cm:
self.headers._read_pairs(buff, 5, size + 5)
self.assertEqual(TProtocolException.INVALID_DATA, cm.exception.type)
self.assertEqual("invalid protocol header name size: 32",
str(cm.exception))
def test_read_pars_bad_value_throws(self):
buff = bytearray(b'\x00\x00\x00\x00 \x00\x00\x00\x05_opid\x00\x00\x01'
b'\x000\x00\x00\x00\x04_cid\x00\x00\x00\x06corrId')
size = unpack_from('!I', buff[1:5])[0]
with self.assertRaises(TProtocolException) as cm:
self.headers._read_pairs(buff, 5, size + 5)
self.assertEqual(TProtocolException.INVALID_DATA, cm.exception.type)
self.assertEqual("invalid protocol header value size: 256",
str(cm.exception))
def test_encode_decode_utf8(self):
headers = {
u'Đ¥ÑØ': u'δάüΓ',
u'good\u00F1ight': u'moo\u00F1',
}
encoded_headers = _Headers._write_to_bytearray(headers)
decoded_headers = _Headers.decode_from_frame(encoded_headers)
self.assertEqual(headers, decoded_headers) |
5,824 | test can get labels | # Copyright (C) 2022-2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
import io
from logging import Logger
from pathlib import Path
from typing import Tuple
import pytest
from cvat_sdk import Client, models
from cvat_sdk.api_client import exceptions
from cvat_sdk.core.proxies.projects import Project
from cvat_sdk.core.proxies.tasks import ResourceType, Task
from cvat_sdk.core.utils import filter_dict
from PIL import Image
from .util import make_pbar
class TestProjectUsecases:
@pytest.fixture(autouse=True)
def setup(
self,
tmp_path: Path,
fxt_login: Tuple[Client, str],
fxt_logger: Tuple[Logger, io.StringIO],
fxt_stdout: io.StringIO,
):
self.tmp_path = tmp_path
logger, self.logger_stream = fxt_logger
self.stdout = fxt_stdout
self.client, self.user = fxt_login
self.client.logger = logger
api_client = self.client.api_client
for k in api_client.configuration.logger:
api_client.configuration.logger[k] = logger
@pytest.fixture
def fxt_new_task(self, fxt_image_file: Path):
task = self.client.tasks.create_from_data(
spec={
"name": "test_task",
"labels": [{"name": "car"}, {"name": "person"}],
},
resource_type=ResourceType.LOCAL,
resources=[str(fxt_image_file)],
data_params={"image_quality": 80},
)
return task
@pytest.fixture
def fxt_task_with_shapes(self, fxt_new_task: Task):
labels = fxt_new_task.get_labels()
fxt_new_task.set_annotations(
models.LabeledDataRequest(
shapes=[
models.LabeledShapeRequest(
frame=0,
label_id=labels[0].id,
type="rectangle",
points=[1, 1, 2, 2],
),
],
)
)
return fxt_new_task
@pytest.fixture
def fxt_new_project(self):
project = self.client.projects.create(
spec={
"name": "test_project",
"labels": [{"name": "car"}, {"name": "person"}],
},
)
return project
@pytest.fixture
def fxt_empty_project(self):
return self.client.projects.create(spec={"name": "test_project"})
@pytest.fixture
def fxt_project_with_shapes(self, fxt_task_with_shapes: Task):
project = self.client.projects.create(
spec=models.ProjectWriteRequest(
name="test_project",
labels=[
models.PatchedLabelRequest(
**filter_dict(label.to_dict(), drop=["id", "has_parent"])
)
for label in fxt_task_with_shapes.get_labels()
],
)
)
fxt_task_with_shapes.update(models.PatchedTaskWriteRequest(project_id=project.id))
project.fetch()
return project
@pytest.fixture
def fxt_backup_file(self, fxt_project_with_shapes: Project):
backup_path = self.tmp_path / "backup.zip"
fxt_project_with_shapes.download_backup(str(backup_path))
yield backup_path
def test_can_create_empty_project(self):
project = self.client.projects.create(spec=models.ProjectWriteRequest(name="test project"))
assert project.id != 0
assert project.name == "test project"
def test_can_create_project_with_attribute_with_blank_default(self):
project = self.client.projects.create(
spec=models.ProjectWriteRequest(
name="test project",
labels=[
models.PatchedLabelRequest(
name="text",
attributes=[
models.AttributeRequest(
name="text",
mutable=True,
input_type=models.InputTypeEnum("text"),
values=[],
default_value="",
)
],
)
],
)
)
labels = project.get_labels()
assert labels[0].attributes[0].default_value == ""
def test_can_create_project_from_dataset(self, fxt_coco_dataset: Path):
pbar_out = io.StringIO()
pbar = make_pbar(file=pbar_out)
project = self.client.projects.create_from_dataset(
spec=models.ProjectWriteRequest(name="project with data"),
dataset_path=fxt_coco_dataset,
dataset_format="COCO 1.0",
pbar=pbar,
)
assert project.get_tasks()[0].size == 1
assert "100%" in pbar_out.getvalue().strip("\r").split("\r")[-1]
assert self.stdout.getvalue() == ""
def test_can_retrieve_project(self, fxt_new_project: Project):
project_id = fxt_new_project.id
project = self.client.projects.retrieve(project_id)
assert project.id == project_id
assert self.stdout.getvalue() == ""
def test_can_list_projects(self, fxt_new_project: Project):
project_id = fxt_new_project.id
projects = self.client.projects.list()
assert any(p.id == project_id for p in projects)
assert self.stdout.getvalue() == ""
def test_can_update_project(self, fxt_new_project: Project):
fxt_new_project.update(models.PatchedProjectWriteRequest(name="foo"))
retrieved_project = self.client.projects.retrieve(fxt_new_project.id)
assert retrieved_project.name == "foo"
assert fxt_new_project.name == retrieved_project.name
assert self.stdout.getvalue() == ""
def test_can_delete_project(self, fxt_new_project: Project):
fxt_new_project.remove()
with pytest.raises(exceptions.NotFoundException):
fxt_new_project.fetch()
assert self.stdout.getvalue() == ""
def test_can_get_tasks(self, fxt_project_with_shapes: Project):
tasks = fxt_project_with_shapes.get_tasks()
assert len(tasks) == 1
assert tasks[0].project_id == fxt_project_with_shapes.id
def METHOD_NAME(self, fxt_project_with_shapes: Project):
expected_labels = {"car", "person"}
received_labels = fxt_project_with_shapes.get_labels()
assert {obj.name for obj in received_labels} == expected_labels
assert self.stdout.getvalue() == ""
def test_can_download_backup(self, fxt_project_with_shapes: Project):
pbar_out = io.StringIO()
pbar = make_pbar(file=pbar_out)
backup_path = self.tmp_path / "backup.zip"
fxt_project_with_shapes.download_backup(str(backup_path), pbar=pbar)
assert backup_path.stat().st_size > 0
assert "100%" in pbar_out.getvalue().strip("\r").split("\r")[-1]
assert self.stdout.getvalue() == ""
def test_can_create_from_backup(self, fxt_backup_file: Path):
pbar_out = io.StringIO()
pbar = make_pbar(file=pbar_out)
restored_project = self.client.projects.create_from_backup(fxt_backup_file, pbar=pbar)
assert restored_project.get_tasks()[0].size == 1
assert "100%" in pbar_out.getvalue().strip("\r").split("\r")[-1]
assert self.stdout.getvalue() == ""
def test_can_download_preview(self, fxt_project_with_shapes: Project):
frame_encoded = fxt_project_with_shapes.get_preview()
(width, height) = Image.open(frame_encoded).size
assert width > 0 and height > 0
assert self.stdout.getvalue() == "" |
5,825 | test constructor no arg | # Project: MapServer
# Purpose: xUnit style Python mapscript tests of classObj
# Author: Sean Gillies, sgillies@frii.com
#
# ===========================================================================
# Copyright (c) 2004, Sean Gillies
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ===========================================================================
import unittest
import mapscript
from .testing import MapTestCase
class ClassObjTestCase(unittest.TestCase):
def METHOD_NAME(self):
c = mapscript.classObj()
assert c.thisown == 1
assert c.layer is None
assert c.numstyles == 0
def testConstructorWithArg(self):
lyr = mapscript.layerObj()
lyr.name = 'foo'
c = mapscript.classObj(lyr)
assert c.thisown == 1
assert c.layer.name == lyr.name
assert c.numstyles == 0
def testGetSetAttributes(self):
c = mapscript.classObj()
val = '/tmp/legend.png'
c.keyimage = val
assert c.keyimage == val
c.debug = mapscript.MS_TRUE
assert c.debug == mapscript.MS_TRUE
val = 'Group1'
c.group = val
assert c.group == val
val = 10000
c.maxscaledenom = val
assert c.maxscaledenom == val
val = 3
c.minfeaturesize = val
assert c.minfeaturesize == val
val = 1000
c.minscaledenom = val
assert c.minscaledenom == val
val = 'Class1'
c.name = val
assert c.name == val
c.status = mapscript.MS_OFF
assert c.status == mapscript.MS_OFF
val = 'template.html'
c.template = val
assert c.template == val
val = 'Title1'
c.title = val
assert c.title == val
class ClassCloningTestCase(unittest.TestCase):
def testCloneClass(self):
"""check attributes of a cloned class"""
c = mapscript.classObj()
c.minscaledenom = 5.0
clone = c.clone()
assert clone.thisown == 1
assert clone.minscaledenom == 5.0
class ClassIconTestCase(MapTestCase):
"""testing for bug 1250"""
def testAlphaTransparentPixmap(self):
lo = self.map.getLayerByName('INLINE-PIXMAP-RGBA')
co = lo.getClass(0)
self.map.selectOutputFormat('PNG')
im = co.createLegendIcon(self.map, lo, 48, 48)
im.save('testAlphaTransparentPixmapIcon.png')
def testAlphaTransparentPixmapPNG24(self):
lo = self.map.getLayerByName('INLINE-PIXMAP-RGBA')
co = lo.getClass(0)
self.map.selectOutputFormat('PNG24')
im = co.createLegendIcon(self.map, lo, 48, 48)
im.save('testAlphaTransparentPixmapIcon24.png')
def testAlphaTransparentPixmapJPG(self):
lo = self.map.getLayerByName('INLINE-PIXMAP-RGBA')
co = lo.getClass(0)
self.map.selectOutputFormat('JPEG')
im = co.createLegendIcon(self.map, lo, 48, 48)
im.save('testAlphaTransparentPixmapIcon.jpg')
def testIndexedTransparentPixmap(self):
lo = self.map.getLayerByName('INLINE-PIXMAP-PCT')
lo.type = mapscript.MS_LAYER_POINT
co = lo.getClass(0)
self.map.selectOutputFormat('PNG')
im = co.createLegendIcon(self.map, lo, 32, 32)
im.save('testIndexedTransparentPixmapIcon.png')
def testIndexedTransparentPixmapJPG(self):
lo = self.map.getLayerByName('INLINE-PIXMAP-PCT')
lo.type = mapscript.MS_LAYER_POINT
co = lo.getClass(0)
self.map.selectOutputFormat('JPEG')
im = co.createLegendIcon(self.map, lo, 32, 32)
im.save('testIndexedTransparentPixmapIcon.jpg')
if __name__ == '__main__':
unittest.main() |
5,826 | test augment batch point clouds and bounding | # Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import numpy as np
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.layers.preprocessing_3d.waymo.global_random_dropping_points import ( # noqa: E501
GlobalRandomDroppingPoints,
)
from keras_cv.tests.test_case import TestCase
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
class GlobalDropPointsTest(TestCase):
def test_augment_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomDroppingPoints(drop_rate=0.5)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
def test_specific_augment_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomDroppingPoints(drop_rate=0.5)
point_clouds = np.random.random(size=(1, 50, 2)).astype("float32")
point_clouds = np.concatenate([point_clouds, point_clouds], axis=0)
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
# The augmented point clouds in the first frame should be the same as
# the augmented point clouds in the second frame.
self.assertAllClose(outputs[POINT_CLOUDS][0], outputs[POINT_CLOUDS][1])
def test_not_augment_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomDroppingPoints(drop_rate=0.0)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
def test_drop_all_point_clouds(self):
add_layer = GlobalRandomDroppingPoints(drop_rate=1.0)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs[POINT_CLOUDS] * 0.0, outputs[POINT_CLOUDS])
def test_exclude_all_points(self):
add_layer = GlobalRandomDroppingPoints(drop_rate=1.0, exclude_classes=1)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
exclude_classes = np.ones(shape=(2, 50, 1)).astype("float32")
point_clouds = np.concatenate([point_clouds, exclude_classes], axis=-1)
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
def test_exclude_the_first_half_points(self):
add_layer = GlobalRandomDroppingPoints(
drop_rate=1.0, exclude_classes=[1, 2]
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
class_1 = np.ones(shape=(2, 10, 1)).astype("float32")
class_2 = np.ones(shape=(2, 15, 1)).astype("float32") * 2
classes = np.concatenate(
[class_1, class_2, np.zeros(shape=(2, 25, 1)).astype("float32")],
axis=1,
)
point_clouds = np.concatenate([point_clouds, classes], axis=-1)
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(
inputs[POINT_CLOUDS][:, 25:, :] * 0.0,
outputs[POINT_CLOUDS][:, 25:, :],
)
self.assertAllClose(
inputs[POINT_CLOUDS][:, :25, :], outputs[POINT_CLOUDS][:, :25, :]
)
def METHOD_NAME(self):
add_layer = GlobalRandomDroppingPoints(drop_rate=0.5)
point_clouds = np.random.random(size=(3, 2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(3, 2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs) |
5,827 | try downloading | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import parlai.core.build_data as build_data
import os
import subprocess
import shutil
import csv
import time
from parlai.core.build_data import DownloadableFile
from parlai.utils.io import PathManager
RESOURCES = [
DownloadableFile(
'https://github.com/deepmind/narrativeqa/archive/master.zip',
'narrative_qa.zip',
'd9fc92d5f53409f845ba44780e6689676d879c739589861b4805064513d1476b',
)
]
def get_rows_for_set(reader, req_set):
selected_rows = [row for row in reader if row['set'].strip() == req_set]
return selected_rows
def read_csv_to_dict_list(filepath):
f = open(filepath, 'r')
return csv.DictReader(f, delimiter=','), f
def write_dict_list_to_csv(dict_list, filepath):
keys = list(dict_list[0].keys())
with PathManager.open(filepath, 'w') as f:
writer = csv.DictWriter(f, fieldnames=keys)
writer.writeheader()
for row in dict_list:
writer.writerow(row)
def divide_csv_into_sets(csv_filepath, sets=('train', 'valid', 'test')):
reader, fh = read_csv_to_dict_list(csv_filepath)
base_filename = os.path.basename(csv_filepath).split('.')[0]
base_path = os.path.dirname(csv_filepath)
for s in sets:
path = os.path.join(base_path, base_filename + '_' + s + '.csv')
fh.seek(0)
rows = get_rows_for_set(reader, s)
write_dict_list_to_csv(rows, path)
fh.close()
def make_folders(base_path, sets=('train', 'valid', 'test')):
for s in sets:
path = os.path.join(base_path, s)
if not os.path.exists(path):
os.mkdir(path)
def move_files(base_path, sets=('train', 'valid', 'test')):
source = os.listdir(base_path)
for f in source:
for s in sets:
if f.endswith('_' + s + '.csv'):
final_name = f[: -(len('_' + s + '.csv'))] + '.csv'
f = os.path.join(base_path, f)
shutil.move(f, os.path.join(base_path, s, final_name))
# Returns false unless the story was already downloaded and
# has appropriate size
def METHOD_NAME(directory, row):
document_id, kind, story_url = row['document_id'], row['kind'], row['story_url']
story_path = os.path.join(directory, document_id + '.content')
actual_story_size = 0
if os.path.exists(story_path):
with PathManager.open(story_path, 'rb') as f:
actual_story_size = len(f.read())
if actual_story_size <= 19000:
if kind == 'gutenberg':
time.sleep(2)
build_data.download(story_url, directory, document_id + '.content')
else:
return True
file_type = subprocess.check_output(['file', '-b', story_path])
file_type = file_type.decode('utf-8')
if 'gzip compressed' in file_type:
gz_path = os.path.join(directory, document_id + '.content.gz')
shutil.move(story_path, gz_path)
build_data.untar(gz_path)
return False
def download_stories(path):
documents_csv = os.path.join(path, 'documents.csv')
tmp_dir = os.path.join(path, 'tmp')
build_data.make_dir(tmp_dir)
with PathManager.open(documents_csv, 'r') as f:
reader = csv.DictReader(f, delimiter=',')
for row in reader:
print("Downloading %s (%s)" % (row['wiki_title'], row['document_id']))
finished = METHOD_NAME(tmp_dir, row)
count = 0
while not finished and count < 5:
if count != 0:
print("Retrying (%d retries left)" % (5 - count - 1))
finished = METHOD_NAME(tmp_dir, row)
count += 1
def build(opt):
dpath = os.path.join(opt['datapath'], 'NarrativeQA')
version = None
if not build_data.built(dpath, version_string=version):
print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# an older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
for downloadable_file in RESOURCES:
downloadable_file.download_file(dpath)
print('downloading stories now')
base_path = os.path.join(dpath, 'narrativeqa-master')
download_stories(base_path)
# move from tmp to stories
tmp_stories_path = os.path.join(base_path, 'tmp')
new_stories_path = os.path.join(base_path, 'stories')
shutil.move(tmp_stories_path, new_stories_path)
# divide into train, valid and test for summaries
summaries_csv_path = os.path.join(
base_path, 'third_party', 'wikipedia', 'summaries.csv'
)
new_path = os.path.join(base_path, 'summaries.csv')
shutil.move(summaries_csv_path, new_path)
divide_csv_into_sets(new_path)
# divide into sets for questions
questions_path = os.path.join(base_path, 'qaps.csv')
divide_csv_into_sets(questions_path)
# divide into sets for documents
documents_path = os.path.join(base_path, 'documents.csv')
divide_csv_into_sets(documents_path)
# move specific set's files into their set's folder
make_folders(base_path)
move_files(base_path)
# move narrativeqa-master to narrative_qa
new_path = os.path.join(dpath, 'narrative_qa')
shutil.move(base_path, new_path)
# mark the data as built
build_data.mark_done(dpath, version_string=version) |
5,828 | process failure and return | """
Integrates the computes together
"""
import warnings
from typing import TYPE_CHECKING, Any, Dict, Optional, Union
from qcelemental.models import AtomicInput, AtomicResult, FailedOperation, OptimizationResult
from .config import get_config
from .exceptions import InputError, RandomError
from .procedures import get_procedure
from .programs import get_program
from .util import compute_wrapper, environ_context, handle_output_metadata, model_wrapper
if TYPE_CHECKING:
try:
from pydantic.v1.main import BaseModel
except ImportError:
from pydantic.main import BaseModel
from qcelemental.models import AtomicResult
__all__ = ["compute", "compute_procedure"]
def METHOD_NAME(model, return_dict, raise_error):
if isinstance(model, FailedOperation):
if raise_error:
raise InputError(model.error.error_message)
elif return_dict:
return model.dict()
else:
return model
else:
return False
def compute(
input_data: Union[Dict[str, Any], "AtomicInput"],
program: str,
raise_error: bool = False,
task_config: Optional[Dict[str, Any]] = None,
local_options: Optional[Dict[str, Any]] = None,
return_dict: bool = False,
) -> Union["AtomicResult", "FailedOperation", Dict[str, Any]]:
"""Executes a single CMS program given a QCSchema input.
The full specification can be found at:
http://molssi-qc-schema.readthedocs.io/en/latest/index.html#
Parameters
----------
input_data
A QCSchema input specification in dictionary or model from QCElemental.models
program
The CMS program with which to execute the input.
raise_error
Determines if compute should raise an error or not.
retries : int, optional
The number of random tries to retry for.
task_config
A dictionary of local configuration options corresponding to a TaskConfig object.
local_options
Deprecated parameter, renamed to ``task_config``
return_dict
Returns a dict instead of qcelemental.models.AtomicResult
Returns
-------
result
AtomicResult, FailedOperation, or Dict representation of either object type
A QCSchema representation of the requested output, type depends on return_dict key.
"""
output_data = input_data.copy() # lgtm [py/multiple-definition]
with compute_wrapper(capture_output=False, raise_error=raise_error) as metadata:
# Grab the executor and build the input model
executor = get_program(program)
# Build the model and validate
input_data = model_wrapper(input_data, AtomicInput)
# Build out task_config
if task_config is None:
task_config = {}
if local_options:
warnings.warn(
"Using the `local_options` keyword argument is deprecated in favor of using `task_config`, "
"in version 0.30.0 it will stop working.",
category=FutureWarning,
stacklevel=2,
)
task_config = {**local_options, **task_config}
input_engine_options = input_data.extras.pop("_qcengine_local_config", {})
task_config = {**task_config, **input_engine_options}
config = get_config(task_config=task_config)
# Set environment parameters and execute
with environ_context(config=config):
# Handle optional retries
for x in range(config.retries + 1):
try:
output_data = executor.compute(input_data, config)
break
except RandomError as e:
if x == config.retries:
raise e
else:
metadata["retries"] += 1
except:
raise
return handle_output_metadata(output_data, metadata, raise_error=raise_error, return_dict=return_dict)
def compute_procedure(
input_data: Union[Dict[str, Any], "BaseModel"],
procedure: str,
raise_error: bool = False,
task_config: Optional[Dict[str, str]] = None,
local_options: Optional[Dict[str, str]] = None,
return_dict: bool = False,
) -> Union["OptimizationResult", "FailedOperation", Dict[str, Any]]:
"""Runs a procedure (a collection of the quantum chemistry executions)
Parameters
----------
input_data : dict or qcelemental.models.OptimizationInput
A JSON input specific to the procedure executed in dictionary or model from QCElemental.models
procedure : {"geometric", "berny"}
The name of the procedure to run
raise_error : bool, option
Determines if compute should raise an error or not.
task_config
A dictionary of local configuration options corresponding to a TaskConfig object.
local_options
Deprecated parameter, renamed to ``task_config``
return_dict : bool, optional, default True
Returns a dict instead of qcelemental.models.AtomicInput
Returns
------
dict, OptimizationResult, FailedOperation
A QC Schema representation of the requested output, type depends on return_dict key.
"""
# Build out task_config
if task_config is None:
task_config = {}
if local_options:
warnings.warn(
"Using the `local_options` keyword argument is depreciated in favor of using `task_config`, "
"in version 0.30.0 it will stop working.",
category=FutureWarning,
stacklevel=2,
)
task_config = {**local_options, **task_config}
output_data = input_data.copy() # lgtm [py/multiple-definition]
with compute_wrapper(capture_output=False, raise_error=raise_error) as metadata:
# Grab the executor and build the input model
executor = get_procedure(procedure)
config = get_config(task_config=task_config)
input_data = executor.build_input_model(input_data)
# Create a base output data in case of errors
output_data = input_data.copy() # lgtm [py/multiple-definition]
# Set environment parameters and execute
with environ_context(config=config):
output_data = executor.compute(input_data, config)
return handle_output_metadata(output_data, metadata, raise_error=raise_error, return_dict=return_dict) |
5,829 | plot for multiple json | """
Quick Plot
----------
A tool to have quick and simple visualization from your data.
It can be use such as:
.. code-block:: console
$ deephyper-analytics quickplot nas_big_data/combo/exp_sc21/combo_1gpu_8_age/infos/results.csv
$ deephyper-analytics quickplot save/history/*.json --xy time val_r2
$ deephyper-analytics quickplot save/history/*.json --xy epochs val_r2
"""
import json
from datetime import datetime
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
from deephyper.core.exceptions import DeephyperRuntimeError
width = 8
height = width / 1.618
fontsize = 18
matplotlib.rcParams.update(
{
"font.size": fontsize,
"figure.figsize": (width, height),
"figure.facecolor": "white",
"savefig.dpi": 72,
"figure.subplot.bottom": 0.125,
"figure.edgecolor": "white",
"xtick.labelsize": fontsize,
"ytick.labelsize": fontsize,
}
)
def add_subparser(subparsers):
subparser_name = "quickplot"
function_to_call = main
parser = subparsers.add_parser(
subparser_name, help="Tool to generate a quick 2D plot from file."
)
# best search_spaces
parser.add_argument("path", nargs="+", type=str)
parser.add_argument(
"--xy",
metavar="xy",
type=str,
nargs=2,
default=[],
help="name of x y variables in the CSV file.",
)
return subparser_name, function_to_call
def plot_for_single_csv(path: str, xy: list):
"""Generate a plot from a single CSV file.
:meta private:
Args:
path (str): Path to the CSV file.
xy (list): If empty ``list`` then it will use ``"elapsed_sec"`` for x-axis and ``"objective"`` for the y-axis.
Raises:
DeephyperRuntimeError: if only 1 or more than 2 arguments are provided.
"""
if len(xy) == 0:
xy = ["elapsed_sec", "objective"]
elif len(xy) != 2:
raise DeephyperRuntimeError(
"--xy must take two arguments such as '--xy elapsed_sec objective'"
)
df = pd.read_csv(path)
plt.figure()
plt.scatter(df[xy[0]], df[xy[1]], s=5, alpha=1.0)
plt.xlabel(xy[0])
plt.ylabel(xy[1])
plt.grid()
plt.tight_layout()
plt.show()
def plot_for_single_json(path: str, xy: list):
"""[summary]
:meta private:
Args:
path (str): [description]
xy (list): [description]
Raises:
DeephyperRuntimeError: [description]
"""
if len(xy) == 0:
xy = ["epochs", "val_loss"]
elif len(xy) != 2:
raise DeephyperRuntimeError(
"--xy must take two arguments such as '--xy epochs val_loss'"
)
xlabel, ylabel = xy
with open(path, "r") as f:
history = json.load(f)
x = list(range(len(history[ylabel]))) if xlabel == "epochs" else history[xlabel]
y = history[ylabel]
plt.figure()
plt.plot(x, y)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.grid()
plt.tight_layout()
plt.show()
def plot_multiple_training(path: list, ylabel: str):
"""[summary]
:meta private:
Args:
path (list): [description]
ylabel (str): [description]
"""
for p in path:
with open(p, "r") as f:
history = json.load(f)
x = list(range(len(history[ylabel])))
y = history[ylabel]
plt.plot(x, y)
plt.xlabel("Epochs")
def plot_multiple_objective_wrp_time(path: list, ylabel: str):
"""[summary]
:meta private:
Args:
path (list): [description]
ylabel (str): [description]
"""
times = []
objectives = []
for p in path:
with open(p, "r") as f:
history = json.load(f)
time = "_".join(p[:-5].split("_")[-2:])
time = datetime.strptime(time, "%d-%b-%Y_%H-%M-%S").timestamp()
times.append(time)
objective = max(history[ylabel])
objectives.append(objective)
plt.scatter(times, objectives)
plt.xlabel("Time")
def METHOD_NAME(path: list, xy: list):
"""
:meta private:
"""
if len(xy) == 0:
xy = ["epochs", "val_loss"]
elif len(xy) != 2:
raise DeephyperRuntimeError(
"--xy must take two arguments such as '--xy epochs val_loss'"
)
xlabel, ylabel = xy
plt.figure()
if xlabel == "epochs":
plot_multiple_training(path, ylabel)
elif xlabel == "time":
plot_multiple_objective_wrp_time(path, ylabel)
plt.ylabel(ylabel)
plt.grid()
plt.tight_layout()
plt.show()
def main(path: list, xy: list, *args, **kwargs):
"""
:meta private:
"""
def extension(path):
return path.split(".")[-1]
if len(path) == 1:
if extension(path[0]) == "csv":
plot_for_single_csv(path[0], xy)
elif extension(path[0]) == "json":
plot_for_single_json(path[0], xy)
else:
raise DeephyperRuntimeError(
f"Extension of input file '{extension(path[0])}' is not yet supported."
)
else:
# Comparing multiple results.csv files (different search experiments)
if all([extension(p) == "csv" for p in path]):
raise DeephyperRuntimeError(
"Comparison of multiple experiments is not yet supported."
)
# Comparing multiple history.json files (different neural networks)
elif all([extension(p) == "json" for p in path]):
METHOD_NAME(path, xy)
else:
raise DeephyperRuntimeError(
"Multiple input files should all have the same extension '.csv' or '.json'"
) |
5,830 | wrong return type | # This sample tests the use of field's converter parameter
# described in PEP 712.
from dataclasses import dataclass, field
from typing import Callable, overload
def converter_simple(s: str) -> int:
return int(s)
def converter_with_param_before_args(s: str, *args: int, **kwargs: int) -> int:
return int(s)
def converter_with_args(*args: str) -> int:
return int(args[0])
def converter_with_extra_defaulted_params(
s: str, extra: int = 1, *, extraKwarg: int = 1
) -> int:
return int(s)
def converter_with_default_for_first_param(s: str = "1") -> int:
return int(s)
def converter_with_more_specialized_return_type(s: str) -> int:
return int(s)
class ConverterClass:
@overload
def __init__(self, val: str) -> None:
...
@overload
def __init__(self, val: bytes) -> None:
...
def __init__(self, val: str | bytes) -> None:
pass
@dataclass
class Foo:
# This should generate an error because "converter" is not an official property yet.
field0: int = field(converter=converter_simple)
# This should generate an error because "converter" is not an official property yet.
field1: int = field(converter=converter_with_param_before_args)
# This should generate an error because "converter" is not an official property yet.
field2: int = field(converter=converter_with_args)
# This should generate an error because "converter" is not an official property yet.
field3: int = field(converter=converter_with_extra_defaulted_params)
# This should generate an error because "converter" is not an official property yet.
field4: int = field(converter=converter_with_default_for_first_param)
# This should generate an error because "converter" is not an official property yet.
field5: int | str = field(converter=converter_with_more_specialized_return_type)
# This should generate an error because "converter" is not an official property yet.
field6: ConverterClass = field(converter=ConverterClass)
reveal_type(
Foo.__init__,
expected_text="(self: Foo, field0: str, field1: str, field2: str, field3: str, field4: str, field5: str, field6: str | bytes) -> None",
)
# This overload will be ignored because it has too many arguments.
@overload
def overloaded_converter(s: float, secondParam: str, /) -> int:
...
# This overload will be ignored because its return type doesn't match the field type.
@overload
def overloaded_converter(s: float) -> str:
...
@overload
def overloaded_converter(s: str) -> int:
...
@overload
def overloaded_converter(s: list[str]) -> int:
...
def overloaded_converter(s: float | str | list[str], *args: str) -> int | float | str:
return 0
@dataclass
class Overloads:
# This should generate an error because "converter" is not an official property yet.
field0: int = field(converter=overloaded_converter)
reveal_type(
Overloads.__init__,
expected_text="(self: Overloads, field0: str | list[str]) -> None",
)
class CallableObject:
@overload
def __call__(self, arg1: int) -> str:
...
@overload
def __call__(self, arg1: str) -> int:
...
def __call__(self, arg1: str | int | list[str]) -> int | str:
return 1
callable: Callable[[str], int] = converter_simple
callable_union: Callable[[str], int] | Callable[[int], str] = converter_simple
@dataclass
class Callables:
# This should generate an error because "converter" is not an official property yet.
field0: int = field(converter=CallableObject())
# This should generate an error because "converter" is not an official property yet.
field1: int = field(converter=callable)
# This should generate an error because "converter" is not an official property yet.
field2: int = field(converter=callable_union)
reveal_type(
Callables.__init__,
expected_text="(self: Callables, field0: str, field1: str, field2: str) -> None",
)
def METHOD_NAME(s: str) -> str:
return s
def wrong_number_of_params(x: str, x2: str, /) -> int:
return 1
@overload
def wrong_converter_overload(s: float) -> str:
...
@overload
def wrong_converter_overload(s: str) -> str:
...
def wrong_converter_overload(s: float | str) -> int | str:
return 1
@dataclass
class Errors:
# This should generate an error because "converter" is not an official property yet
# and a second error because the return type doesn't match the field type.
field0: int = field(converter=METHOD_NAME)
# This should generate an error because "converter" is not an official property yet
# and a second error because the converter has the wrong number of parameters.
field1: int = field(converter=wrong_number_of_params)
# This should generate an error because "converter" is not an official property yet
# and a second error because none of the overloads match the field type.
field2: int = field(converter=wrong_converter_overload) |
5,831 | test versions rejects invalid version strings | # SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
# Modifications Copyright OpenSearch Contributors. See
# GitHub history for details.
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import random
import re
import pytest # type: ignore
from osbenchmark import exceptions
from osbenchmark.utils import versions
class TestsVersions:
def test_is_version_identifier(self):
assert versions.is_version_identifier(None) is False
assert versions.is_version_identifier("") is False
assert versions.is_version_identifier(" \t ") is False
assert versions.is_version_identifier("5-ab-c") is False
assert versions.is_version_identifier("5.1") is False
assert versions.is_version_identifier("5") is False
assert versions.is_version_identifier("5.0.0")
assert versions.is_version_identifier("1.7.3")
assert versions.is_version_identifier("20.3.7-SNAPSHOT")
assert versions.is_version_identifier(None, strict=False) is False
assert versions.is_version_identifier("", strict=False) is False
assert versions.is_version_identifier("5.1", strict=False)
assert versions.is_version_identifier("5", strict=False)
assert versions.is_version_identifier("23", strict=False)
assert versions.is_version_identifier("20.3.7-SNAPSHOT", strict=False)
def test_finds_components_for_valid_version(self):
assert versions.components("5.0.3") == (5, 0, 3, None)
assert versions.components("7.12.1-SNAPSHOT") == (7, 12, 1, "SNAPSHOT")
assert versions.components("25", strict=False) == (25, None, None, None)
assert versions.components("5.1", strict=False) == (5, 1, None, None)
def test_major_version(self):
assert versions.major_version("7.10.2") == 7
assert versions.major_version("7.12.1-SNAPSHOT") == 7
assert versions.major_version("25.0.3") == 25
@pytest.mark.parametrize("seed", range(40))
def test_latest_bounded_minor(self, seed):
_alternatives = ["7", "7.10", "7.11.2", "7.2", "5", "6", "main"]
random.seed(seed)
alternatives = _alternatives.copy()
random.shuffle(alternatives)
assert versions.latest_bounded_minor(alternatives, versions.VersionVariants("7.6.3")) == 2
assert versions.latest_bounded_minor(alternatives, versions.VersionVariants("7.12.3")) == 10,\
"Nearest alternative with major.minor, skip alternatives with major.minor.patch"
assert versions.latest_bounded_minor(alternatives, versions.VersionVariants("7.11.2")) == 10,\
"Skips all alternatives with major.minor.patch, even if exact match"
assert versions.latest_bounded_minor(alternatives, versions.VersionVariants("7.1.0")) is None,\
"No matching alternative with minor version"
def test_components_ignores_invalid_versions(self):
with pytest.raises(
exceptions.InvalidSyntax,
match=re.escape(
r"version string '5.0.0a' does not conform to pattern "
r"'^(\d+)\.(\d+)\.(\d+)(?:-(.+))?$'")):
versions.components("5.0.0a")
def test_versionvariants_parses_correct_version_string(self):
assert versions.VersionVariants("5.0.3").all_versions == [
("5.0.3", "with_patch"),
("5.0", "with_minor"),
("5", "with_major")]
assert versions.VersionVariants("7.12.1-SNAPSHOT").all_versions == [
("7.12.1-SNAPSHOT", "with_suffix"),
("7.12.1", "with_patch"),
("7.12", "with_minor"),
("7", "with_major")]
assert versions.VersionVariants("10.3.63").all_versions == [
("10.3.63", "with_patch"),
("10.3", "with_minor"),
("10", "with_major")]
def METHOD_NAME(self):
with pytest.raises(
exceptions.InvalidSyntax,
match=re.escape(r"version string '5.0.0a-SNAPSHOT' does not conform to pattern "
r"'^(\d+)\.(\d+)\.(\d+)(?:-(.+))?$'")
):
versions.VersionVariants("5.0.0a-SNAPSHOT")
def test_find_best_match(self):
assert versions.best_match(["1.7", "2", "5.0.0-alpha1", "5", "main"], "6.0.0-alpha1") == "main",\
"Assume main for versions newer than latest alternative available"
assert versions.best_match(["1.7", "2", "5.0.0-alpha1", "5", "main"], "5.1.0-SNAPSHOT") == "5",\
"Best match for specific version"
assert versions.best_match(["1.7", "2", "5.0.0-alpha1", "5", "main"], None) == "main",\
"Assume main on unknown version"
assert versions.best_match(["1.7", "2", "5.0.0-alpha1", "5", "main"], "0.4") is None,\
"Reject versions that are too old"
assert versions.best_match(["7", "7.10.2", "7.11", "7.2", "5", "6", "main"], "7.10.2") == "7.10.2", \
"Exact match"
assert versions.best_match(["7", "7.10", "main"], "7.1.0") == "7", \
"Best match is major version"
assert versions.best_match(["7", "7.11", "7.2", "5", "6", "main"], "7.11.0") == "7.11",\
"Best match for specific minor version"
assert versions.best_match(["7", "7.11", "7.2", "5", "6", "main"], "7.12.0") == "7.11",\
"If no exact match, best match is the nearest prior minor"
assert versions.best_match(["7", "7.11", "7.2", "5", "6", "main"], "7.3.0") == "7.2",\
"If no exact match, best match is the nearest prior minor"
assert versions.best_match(["7", "7.11", "7.2", "5", "6", "main"], "7.10.0") == "7.2", \
"If no exact match, best match is the nearest prior minor"
assert versions.best_match(["7", "7.1", "7.11.1", "7.11.0", "7.2", "5", "6", "main"], "7.12.0") == "7.2",\
"Patch or patch-suffix branches are not supported and ignored, best match is nearest prior minor"
assert versions.best_match(["7", "7.11", "7.2", "5", "6", "main"], "7.1.0") == "7",\
"If no exact match and no minor match, next best match is major version"
def test_version_comparison(self):
assert versions.Version.from_string("7.10.2") < versions.Version.from_string("7.11.0")
assert versions.Version.from_string("7.10.2") == versions.Version.from_string("7.10.2") |
5,832 | aks upgrades table format | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from collections import OrderedDict
from jmespath import Options
from jmespath import compile as compile_jmes
from jmespath import functions
def aks_agentpool_show_table_format(result):
"""Format an agent pool as summary results for display with "-o table"."""
return [_aks_agentpool_table_format(result)]
def _aks_agentpool_table_format(result):
parsed = compile_jmes("""{
name: name,
osType: osType,
kubernetesVersion: orchestratorVersion,
vmSize: vmSize,
osDiskSizeGB: osDiskSizeGB,
count: count,
maxPods: maxPods,
provisioningState: provisioningState,
mode: mode
}""")
# use ordered dicts so headers are predictable
return parsed.search(result, Options(dict_cls=OrderedDict))
def aks_agentpool_list_table_format(results):
"""Format an agent pool list for display with "-o table"."""
return [_aks_agentpool_table_format(r) for r in results]
def aks_list_table_format(results):
""""Format a list of managed clusters as summary results for display with "-o table"."""
return [_aks_table_format(r) for r in results]
def aks_run_command_result_format(cmdResult):
result = OrderedDict()
if cmdResult['provisioningState'] == "Succeeded":
result['exit code'] = cmdResult['exitCode']
result['logs'] = cmdResult['logs']
return result
if cmdResult['provisioningState'] == "Failed":
result['provisioning state'] = cmdResult['provisioningState']
result['reason'] = cmdResult['reason']
return result
result['provisioning state'] = cmdResult['provisioningState']
result['started At'] = cmdResult['startedAt']
return result
def aks_show_table_format(result):
"""Format a managed cluster as summary results for display with "-o table"."""
return [_aks_table_format(result)]
def _aks_table_format(result):
parsed = compile_jmes("""{
name: name,
location: location,
resourceGroup: resourceGroup,
kubernetesVersion: kubernetesVersion,
currentKubernetesVersion: currentKubernetesVersion,
provisioningState: provisioningState,
fqdn: fqdn || privateFqdn
}""")
# use ordered dicts so headers are predictable
return parsed.search(result, Options(dict_cls=OrderedDict))
def METHOD_NAME(result):
"""Format get-upgrades results as a summary for display with "-o table"."""
preview = {}
def find_preview_versions(versions_bag):
for upgrade in versions_bag.get('upgrades', []):
if upgrade.get('isPreview', False):
preview[upgrade['kubernetesVersion']] = True
find_preview_versions(result.get('controlPlaneProfile', {}))
# This expression assumes there is one node pool, and that the master and nodes upgrade in lockstep.
parsed = compile_jmes("""{
name: name,
resourceGroup: resourceGroup,
masterVersion: controlPlaneProfile.kubernetesVersion || `unknown`,
upgrades: controlPlaneProfile.upgrades[].kubernetesVersion || [`None available`] | sort_versions(@) | set_preview_array(@) | join(`, `, @)
}""")
# use ordered dicts so headers are predictable
return parsed.search(result, Options(dict_cls=OrderedDict, custom_functions=_custom_functions(preview)))
def aks_versions_table_format(result):
"""Format get-versions results as a summary for display with "-o table"."""
version_table = flatten_version_table(result.get("values", []))
parsed = compile_jmes("""[].{
kubernetesVersion: version,
isPreview: isPreview,
upgrades: upgrades || [`None available`] | sort_versions(@) | join(`, `, @)
}""")
# use ordered dicts so headers are predictable
results = parsed.search(version_table, Options(
dict_cls=OrderedDict, custom_functions=_custom_functions({})))
return sorted(results, key=lambda x: version_to_tuple(x.get("kubernetesVersion")), reverse=True)
def aks_list_nodepool_snapshot_table_format(results):
""""Format a list of nodepool snapshots as summary results for display with "-o table"."""
return [_aks_nodepool_snapshot_table_format(r) for r in results]
def aks_show_nodepool_snapshot_table_format(result):
"""Format a nodepool snapshot as summary results for display with "-o table"."""
return [_aks_nodepool_snapshot_table_format(result)]
def _aks_nodepool_snapshot_table_format(result):
parsed = compile_jmes("""{
name: name,
location: location,
resourceGroup: resourceGroup,
nodeImageVersion: nodeImageVersion,
kubernetesVersion: kubernetesVersion,
osType: osType,
enableFIPS: enableFIPS
}""")
# use ordered dicts so headers are predictable
return parsed.search(result, Options(dict_cls=OrderedDict))
def version_to_tuple(version):
"""Removes preview suffix"""
if version.endswith('(preview)'):
version = version[:-len('(preview)')]
return tuple(map(int, (version.split('.'))))
def flatten_version_table(release_info):
"""Flattens version table"""
flattened = []
for release in release_info:
isPreview = release.get("isPreview", False)
for k, v in release.get("patchVersions", {}).items():
item = {"version": k, "upgrades": v.get("upgrades", []), "isPreview": isPreview}
flattened.append(item)
return flattened
def _custom_functions(preview_versions):
class CustomFunctions(functions.Functions): # pylint: disable=too-few-public-methods
@functions.signature({'types': ['array']})
def _func_sort_versions(self, versions): # pylint: disable=no-self-use
"""Custom JMESPath `sort_versions` function that sorts an array of strings as software versions."""
try:
return sorted(versions, key=version_to_tuple)
# if it wasn't sortable, return the input so the pipeline continues
except (TypeError, ValueError):
return versions
@functions.signature({'types': ['array']})
def _func_set_preview_array(self, versions):
"""Custom JMESPath `set_preview_array` function that suffixes preview version"""
try:
for i, _ in enumerate(versions):
versions[i] = self._func_set_preview(versions[i])
return versions
except (TypeError, ValueError):
return versions
@functions.signature({'types': ['string']})
def _func_set_preview(self, version): # pylint: disable=no-self-use
"""Custom JMESPath `set_preview` function that suffixes preview version"""
try:
if preview_versions.get(version, False):
return version + '(preview)'
return version
except (TypeError, ValueError):
return version
return CustomFunctions() |
5,833 | get status | import datetime
import io
import logging.handlers
import subprocess
import time
from _typeshed import StrOrBytesPath
from collections.abc import Callable, Iterable, Mapping, Sequence
from contextlib import AbstractContextManager
from email.message import Message
from hashlib import _Hash
from typing import IO, Any, TypeVar
from typing_extensions import TypeAlias
import boto.connection
_KT = TypeVar("_KT")
_VT = TypeVar("_VT")
_Provider: TypeAlias = Any # TODO replace this with boto.provider.Provider once stubs exist
_LockType: TypeAlias = Any # TODO replace this with _thread.LockType once stubs exist
JSONDecodeError: type[ValueError]
qsa_of_interest: list[str]
def unquote_v(nv: str) -> str | tuple[str, str]: ...
def canonical_string(
method: str, path: str, headers: Mapping[str, str | None], expires: int | None = None, provider: _Provider | None = None
) -> str: ...
def merge_meta(
headers: Mapping[str, str], metadata: Mapping[str, str], provider: _Provider | None = None
) -> Mapping[str, str]: ...
def get_aws_metadata(headers: Mapping[str, str], provider: _Provider | None = None) -> Mapping[str, str]: ...
def retry_url(url: str, retry_on_404: bool = True, num_retries: int = 10, timeout: int | None = None) -> str: ...
class LazyLoadMetadata(dict[_KT, _VT]):
def __init__(self, url: str, num_retries: int, timeout: int | None = None) -> None: ...
def get_instance_metadata(
version: str = "latest",
url: str = "http://169.254.169.254",
data: str = "meta-data/",
timeout: int | None = None,
num_retries: int = 5,
) -> LazyLoadMetadata[Any, Any] | None: ...
def get_instance_identity(
version: str = "latest", url: str = "http://169.254.169.254", timeout: int | None = None, num_retries: int = 5
) -> Mapping[str, Any] | None: ...
def get_instance_userdata(
version: str = "latest",
sep: str | None = None,
url: str = "http://169.254.169.254",
timeout: int | None = None,
num_retries: int = 5,
) -> Mapping[str, str]: ...
ISO8601: str
ISO8601_MS: str
RFC1123: str
LOCALE_LOCK: _LockType
def setlocale(name: str | tuple[str, str]) -> AbstractContextManager[str]: ...
def get_ts(ts: time.struct_time | None = None) -> str: ...
def parse_ts(ts: str) -> datetime.datetime: ...
def find_class(module_name: str, class_name: str | None = None) -> type[Any] | None: ...
def update_dme(username: str, password: str, dme_id: str, ip_address: str) -> str: ...
def fetch_file(
uri: str, file: IO[str] | None = None, username: str | None = None, password: str | None = None
) -> IO[str] | None: ...
class ShellCommand:
exit_code: int
command: subprocess._CMD
log_fp: io.StringIO
wait: bool
fail_fast: bool
def __init__(
self, command: subprocess._CMD, wait: bool = True, fail_fast: bool = False, cwd: StrOrBytesPath | None = None
) -> None: ...
process: subprocess.Popen[Any]
def run(self, cwd: subprocess._CMD | None = None) -> int | None: ...
def setReadOnly(self, value) -> None: ...
def METHOD_NAME(self) -> int | None: ...
status: int | None
def getOutput(self) -> str: ...
output: str
class AuthSMTPHandler(logging.handlers.SMTPHandler):
username: str
password: str
def __init__(
self, mailhost: str, username: str, password: str, fromaddr: str, toaddrs: Sequence[str], subject: str
) -> None: ...
class LRUCache(dict[_KT, _VT]):
class _Item:
previous: LRUCache._Item | None
next: LRUCache._Item | None
key = ...
value = ...
def __init__(self, key, value) -> None: ...
_dict: dict[_KT, LRUCache._Item]
capacity: int
head: LRUCache._Item | None
tail: LRUCache._Item | None
def __init__(self, capacity: int) -> None: ...
# This exists to work around Password.str's name shadowing the str type
_Str: TypeAlias = str
class Password:
hashfunc: Callable[[bytes], _Hash]
str: _Str | None
def __init__(self, str: _Str | None = None, hashfunc: Callable[[bytes], _Hash] | None = None) -> None: ...
def set(self, value: bytes | _Str) -> None: ...
def __eq__(self, other: _Str | bytes | None) -> bool: ... # type: ignore[override]
def __len__(self) -> int: ...
def notify(
subject: str,
body: str | None = None,
html_body: Sequence[str] | str | None = None,
to_string: str | None = None,
attachments: Iterable[Message] | None = None,
append_instance_id: bool = True,
) -> None: ...
def get_utf8_value(value: str) -> bytes: ...
def mklist(value: Any) -> list[Any]: ...
def pythonize_name(name: str) -> str: ...
def write_mime_multipart(
content: list[tuple[str, str]], compress: bool = False, deftype: str = "text/plain", delimiter: str = ":"
) -> str: ...
def guess_mime_type(content: str, deftype: str) -> str: ...
def compute_md5(fp: IO[Any], buf_size: int = 8192, size: int | None = None) -> tuple[str, str, int]: ...
def compute_hash(
fp: IO[Any], buf_size: int = 8192, size: int | None = None, hash_algorithm: Any = ...
) -> tuple[str, str, int]: ...
def find_matching_headers(name: str, headers: Mapping[str, str | None]) -> list[str]: ...
def merge_headers_by_name(name: str, headers: Mapping[str, str | None]) -> str: ...
class RequestHook:
def handle_request_data(
self, request: boto.connection.HTTPRequest, response: boto.connection.HTTPResponse, error: bool = False
) -> Any: ...
def host_is_ipv6(hostname: str) -> bool: ...
def parse_host(hostname: str) -> str: ... |
5,834 | test types for adam | """Tests for SparseApply of AdamOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework.test_util import TensorFlowTestCase
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import training_ops
class AdamSparseApplyTest(TensorFlowTestCase):
def _toType(self, dtype):
if dtype == np.float16:
return dtypes.float16
elif dtype == np.float32:
return dtypes.float32
elif dtype == np.float64:
return dtypes.float64
elif dtype == np.int32:
return dtypes.int32
elif dtype == np.int64:
return dtypes.int64
else:
assert False, (dtype)
def _testTypesForSparseAdam(self, x, m, v, t, lr, beta1, beta2, epsilon, grad, indices, use_gpu):
self.setUp()
with self.test_session(use_gpu=use_gpu):
var = variables.VariableV1(x)
m_a = variables.VariableV1(m)
v_a = variables.VariableV1(v)
variables.global_variables_initializer().run()
beta1_power = beta1**t
beta2_power = beta2**t
self.assertAllCloseAccordingToType(x, var.eval())
sparse_apply_adam = training_ops.sparse_apply_adam(
var, m_a, v_a, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad,
constant_op.constant(indices, self._toType(indices.dtype)))
out = sparse_apply_adam.eval()
self.assertShapeEqual(out, sparse_apply_adam)
for (i, index) in enumerate(indices):
new_var, new_m, new_v, = self._adamUpdateNumpy(x[index], grad[i], t, m[index], v[index], lr, beta1, beta2, epsilon)
self.assertAllCloseAccordingToType(new_var, out[index])
self.assertAllCloseAccordingToType(new_m, m_a.eval()[index])
self.assertAllCloseAccordingToType(new_v, v_a.eval()[index])
def testSparseApplyAdam(self):
for (dtype, index_type, use_gpu) in itertools.product(
[np.float16, np.float32, np.float64], [np.int32, np.int64], [False, True]):
x_val = [np.arange(10), np.arange(10, 20), np.arange(20, 30)]
m_val = [np.arange(1, 11), np.arange(11, 21), np.arange(21, 31)]
v_val = [np.arange(2, 12), np.arange(12, 22), np.arange(22, 32)]
x = np.array(x_val).astype(dtype)
m = np.array(m_val).astype(dtype)
v = np.array(v_val).astype(dtype)
t = 1
lr = np.array(1).astype(dtype)
beta1 = np.array(2).astype(dtype)
beta2 = np.array(3).astype(dtype)
epsilon = np.array(4).astype(dtype)
grad_val = [np.arange(10), np.arange(10)]
grad = np.array(grad_val).astype(dtype)
indices = np.array([0, 2]).astype(index_type)
self._testTypesForSparseAdam(x, m, v, t, lr, beta1, beta2, epsilon, grad, indices, use_gpu)
def testSparseApplyAdamDim1(self):
for (dtype, index_type, use_gpu) in itertools.product(
[np.float16, np.float32, np.float64], [np.int32, np.int64], [False, True]):
x_val = [[1.0], [2.0], [3.0]]
m_val = [[4.0], [5.0], [6.0]]
v_val = [[7.0], [8.0], [9.0]]
x = np.array(x_val).astype(dtype)
m = np.array(m_val).astype(dtype)
v = np.array(v_val).astype(dtype)
t = 1
lr = np.array(1).astype(dtype)
beta1 = np.array(2).astype(dtype)
beta2 = np.array(3).astype(dtype)
epsilon = np.array(4).astype(dtype)
grad_val = [[1.5], [2.5]]
grad = np.array(grad_val).astype(dtype)
indices = np.array([0, 2]).astype(index_type)
self._testTypesForSparseAdam(x, m, v, t, lr, beta1, beta2, epsilon, grad, indices, use_gpu)
def testApplyAdam(self):
for dtype, use_gpu in itertools.product(
[np.float16, np.float32, np.float64], [False, True]):
var = np.arange(100).astype(dtype)
m = np.arange(1, 101).astype(dtype)
v = np.arange(101, 201).astype(dtype)
grad = np.arange(100).astype(dtype)
self.METHOD_NAME(var, m, v, grad, use_gpu)
def METHOD_NAME(self, var, m, v, grad, use_gpu):
self.setUp()
with self.test_session(use_gpu=use_gpu):
var_t = variables.VariableV1(var)
m_t = variables.VariableV1(m)
v_t = variables.VariableV1(v)
t = 1
beta1 = np.array(0.9, dtype=var.dtype)
beta2 = np.array(0.999, dtype=var.dtype)
beta1_power = beta1**t
beta2_power = beta2**t
lr = np.array(0.001, dtype=var.dtype)
epsilon = np.array(1e-8, dtype=var.dtype)
beta1_t = constant_op.constant(beta1, self._toType(var.dtype), [])
beta2_t = constant_op.constant(beta2, self._toType(var.dtype), [])
beta1_power_t = variables.VariableV1(beta1_power)
beta2_power_t = variables.VariableV1(beta2_power)
lr_t = constant_op.constant(lr, self._toType(var.dtype), [])
epsilon_t = constant_op.constant(epsilon, self._toType(var.dtype), [])
variables.global_variables_initializer().run()
self.assertAllCloseAccordingToType(var, var_t.eval())
new_var, _, _ = self._adamUpdateNumpy(var, grad, t, m, v, lr, beta1,
beta2, epsilon)
apply_adam = training_ops.apply_adam(var_t, m_t, v_t, beta1_power_t,
beta2_power_t, lr_t, beta1_t,
beta2_t, epsilon_t, grad)
out = apply_adam.eval()
self.assertShapeEqual(out, apply_adam)
self.assertAllCloseAccordingToType(new_var, out)
def _adamUpdateNumpy(self, param, g_t, t, m, v, alpha, beta1, beta2, epsilon):
alpha_t = alpha * np.sqrt(1 - beta2**t) / (1 - beta1**t)
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = param - alpha_t * m_t / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
if __name__ == '__main__':
googletest.main() |
5,835 | handler | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"vm deallocate",
)
class Deallocate(AAZCommand):
"""Deallocate a VM so that computing resources are no longer allocated (charges no longer apply). The status will change from 'Stopped' to 'Stopped (Deallocated)'.
For an end-to-end tutorial, see https://docs.microsoft.com/azure/virtual-machines/linux/capture-image
:example: Deallocate, generalize, and capture a stopped virtual machine.
az vm deallocate -g MyResourceGroup -n MyVm
az vm generalize -g MyResourceGroup -n MyVm
az vm capture -g MyResourceGroup -n MyVm --vhd-name-prefix MyPrefix
:example: Deallocate, generalize, and capture multiple stopped virtual machines.
az vm deallocate --ids vms_ids
az vm generalize --ids vms_ids
az vm capture --ids vms_ids --vhd-name-prefix MyPrefix
:example: Deallocate a VM.
az vm deallocate --name MyVm --no-wait --resource-group MyResourceGroup
"""
_aaz_info = {
"version": "2022-11-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.compute/virtualmachines/{}/deallocate", "2022-11-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def METHOD_NAME(self, command_args):
super().METHOD_NAME(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.vm_name = AAZStrArg(
options=["-n", "--name", "--vm-name"],
help="The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=<name>`",
required=True,
id_part="name",
configured_default="vm",
)
_args_schema.hibernate = AAZBoolArg(
options=["--hibernate"],
help="Optional parameter to hibernate a virtual machine. (Feature in Preview)",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.VirtualMachinesDeallocate(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class VirtualMachinesDeallocate(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/deallocate",
**self.url_parameters
)
@property
def method(self):
return "POST"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"vmName", self.ctx.args.vm_name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"hibernate", self.ctx.args.hibernate,
),
**self.serialize_query_param(
"api-version", "2022-11-01",
required=True,
),
}
return parameters
def on_200(self, session):
pass
class _DeallocateHelper:
"""Helper class for Deallocate"""
__all__ = ["Deallocate"] |
5,836 | is creation allowed for project | from __future__ import annotations
from datetime import timedelta
from typing import Any, Mapping, Optional
from sentry import features
from sentry.issues.grouptype import PerformanceRenderBlockingAssetSpanGroupType
from sentry.issues.issue_occurrence import IssueEvidence
from sentry.models import Organization, Project
from ..base import (
DetectorType,
PerformanceDetector,
fingerprint_resource_span,
get_notification_attachment_body,
get_span_duration,
get_span_evidence_value,
)
from ..performance_problem import PerformanceProblem
from ..types import Span
class RenderBlockingAssetSpanDetector(PerformanceDetector):
__slots__ = ("stored_problems", "fcp", "transaction_start")
type = DetectorType.RENDER_BLOCKING_ASSET_SPAN
settings_key = DetectorType.RENDER_BLOCKING_ASSET_SPAN
MAX_SIZE_BYTES = 1_000_000_000 # 1GB
def init(self):
self.stored_problems = {}
self.transaction_start = timedelta(seconds=self.event().get("start_timestamp", 0))
self.fcp = None
self.fcp_value = 0
# Only concern ourselves with transactions where the FCP is within the
# range we care about.
measurements = self.event().get("measurements") or {}
fcp_hash = measurements.get("fcp") or {}
fcp_value = fcp_hash.get("value")
if fcp_value and ("unit" not in fcp_hash or fcp_hash["unit"] == "millisecond"):
fcp = timedelta(milliseconds=fcp_value)
fcp_minimum_threshold = timedelta(
milliseconds=self.settings.get("fcp_minimum_threshold")
)
fcp_maximum_threshold = timedelta(
milliseconds=self.settings.get("fcp_maximum_threshold")
)
if fcp >= fcp_minimum_threshold and fcp < fcp_maximum_threshold:
self.fcp = fcp
self.fcp_value = fcp_value
def is_creation_allowed_for_organization(self, organization: Optional[Organization]) -> bool:
return features.has(
"organizations:performance-issues-render-blocking-assets-detector",
organization,
actor=None,
)
def METHOD_NAME(self, project: Project) -> bool:
return self.settings["detection_enabled"]
def visit_span(self, span: Span):
if not self.fcp:
return
op = span.get("op", None)
if op not in ["resource.link", "resource.script"]:
return False
if self._is_blocking_render(span):
span_id = span.get("span_id", None)
fingerprint = self._fingerprint(span)
if span_id and fingerprint:
self.stored_problems[fingerprint] = PerformanceProblem(
fingerprint=fingerprint,
op=op,
desc=span.get("description") or "",
type=PerformanceRenderBlockingAssetSpanGroupType,
offender_span_ids=[span_id],
parent_span_ids=[],
cause_span_ids=[],
evidence_data={
"op": op,
"parent_span_ids": [],
"cause_span_ids": [],
"offender_span_ids": [span_id],
"transaction_name": self.event().get("description", ""),
"slow_span_description": span.get("description", ""),
"slow_span_duration": self._get_duration(span),
"transaction_duration": self._get_duration(self._event),
"fcp": self.fcp_value,
"repeating_spans": get_span_evidence_value(span),
"repeating_spans_compact": get_span_evidence_value(span, include_op=False),
},
evidence_display=[
IssueEvidence(
name="Offending Spans",
value=get_notification_attachment_body(
op,
span.get("description") or "",
),
# Has to be marked important to be displayed in the notifications
important=True,
)
],
)
# If we visit a span that starts after FCP, then we know we've already
# seen all possible render-blocking resource spans.
span_start_timestamp = timedelta(seconds=span.get("start_timestamp", 0))
fcp_timestamp = self.transaction_start + self.fcp
if span_start_timestamp >= fcp_timestamp:
# Early return for all future span visits.
self.fcp = None
self.fcp_value = 0
def _get_duration(self, item: Mapping[str, Any] | None) -> float:
if not item:
return 0
start = float(item.get("start_timestamp", 0))
end = float(item.get("timestamp", 0))
return (end - start) * 1000
def _is_blocking_render(self, span):
data = span.get("data", None)
render_blocking_status = data and data.get("resource.render_blocking_status")
if render_blocking_status == "non-blocking":
return False
span_end_timestamp = timedelta(seconds=span.get("timestamp", 0))
fcp_timestamp = self.transaction_start + self.fcp
if span_end_timestamp >= fcp_timestamp:
return False
minimum_size_bytes = self.settings.get("minimum_size_bytes")
# TODO(nar): `Encoded Body Size` can be removed once SDK adoption has increased and
# we are receiving `http.response_content_length` consistently, likely beyond October 2023
encoded_body_size = (
data
and (data.get("http.response_content_length", 0) or data.get("Encoded Body Size", 0))
or 0
)
if encoded_body_size < minimum_size_bytes or encoded_body_size > self.MAX_SIZE_BYTES:
return False
span_duration = get_span_duration(span)
fcp_ratio_threshold = self.settings.get("fcp_ratio_threshold")
return span_duration / self.fcp > fcp_ratio_threshold
def _fingerprint(self, span: Span):
resource_url_hash = fingerprint_resource_span(span)
return f"1-{PerformanceRenderBlockingAssetSpanGroupType.type_id}-{resource_url_hash}" |
5,837 | eda simulate | # -*- coding: utf-8 -*-
import numpy as np
from ..misc import check_random_state, check_random_state_children
from ..signal import signal_distort, signal_merge
def METHOD_NAME(
duration=10,
length=None,
sampling_rate=1000,
noise=0.01,
scr_number=1,
drift=-0.01,
random_state=None,
random_state_distort="spawn",
):
"""**Simulate Electrodermal Activity (EDA) signal**
Generate an artificial (synthetic) EDA signal of a given duration and sampling rate.
Parameters
----------
duration : int
Desired recording length in seconds.
sampling_rate : int
The desired sampling rate (in Hz, i.e., samples/second). Defaults to 1000Hz.
length : int
The desired length of the signal (in samples). Defaults to None.
noise : float
Noise level (amplitude of the laplace noise). Defaults to 0.01.
scr_number : int
Desired number of skin conductance responses (SCRs), i.e., peaks. Defaults to 1.
drift : float or list
The slope of a linear drift of the signal. Defaults to -0.01.
random_state : None, int, numpy.random.RandomState or numpy.random.Generator
Seed for the random number generator. See for ``misc.check_random_state`` for further information.
random_state_distort : {'legacy', 'spawn'}, None, int, numpy.random.RandomState or numpy.random.Generator
Random state to be used to distort the signal. If ``"legacy"``, use the same random state used to
generate the signal (discouraged as it creates dependent random streams). If ``"spawn"``, spawn
independent children random number generators from the random_state argument. If any of the other types,
generate independent children random number generators from the random_state_distort provided (this
allows generating multiple version of the same signal distorted by different random noise realizations).
Returns
----------
array
Vector containing the EDA signal.
Examples
----------
.. ipython:: python
import neurokit2 as nk
import pandas as pd
eda = nk.eda_simulate(duration=10, scr_number=3)
@savefig p_eda_simulate1.png scale=100%
fig = nk.signal_plot(eda)
@suppress
plt.close()
See Also
--------
ecg_simulate, rsp_simulate, emg_simulate, ppg_simulate
References
-----------
* Bach, D. R., Flandin, G., Friston, K. J., & Dolan, R. J. (2010). Modelling event-related skin
conductance responses. International Journal of Psychophysiology, 75(3), 349-356.
"""
# Seed the random generator for reproducible results
rng = check_random_state(random_state)
random_state_distort = check_random_state_children(random_state, random_state_distort, n_children=1)
# Generate number of samples automatically if length is unspecified
if length is None:
length = duration * sampling_rate
eda = np.full(length, 1.0)
eda += drift * np.linspace(0, duration, length)
time = [0, duration]
start_peaks = np.linspace(0, duration, scr_number, endpoint=False)
for start_peak in start_peaks:
relative_time_peak = np.abs(rng.normal(0, 5, size=1)) + 3.0745
scr = _eda_simulate_scr(sampling_rate=sampling_rate, time_peak=relative_time_peak)
time_scr = [start_peak, start_peak + 9]
if time_scr[0] < 0:
scr = scr[int(np.round(np.abs(time_scr[0]) * sampling_rate)) : :]
time_scr[0] = 0
if time_scr[1] > duration:
scr = scr[0 : int(np.round((duration - time_scr[0]) * sampling_rate))]
time_scr[1] = duration
eda = signal_merge(signal1=eda, signal2=scr, time1=time, time2=time_scr)
# Add random noise
if noise > 0:
eda = signal_distort(
eda,
sampling_rate=sampling_rate,
noise_amplitude=noise,
noise_frequency=[5, 10, 100],
noise_shape="laplace",
silent=True,
random_state=random_state_distort[0],
)
return eda
def _eda_simulate_scr(sampling_rate=1000, length=None, time_peak=3.0745, rise=0.7013, decay=[3.1487, 14.1257]):
"""Simulate a canonical skin conductance response (SCR)
Based on `Bach (2010)
<https://sourceforge.net/p/scralyze/code/HEAD/tree/branches/version_b2.1.8/scr_bf_crf.m#l24>`_
Parameters
-----------
sampling_rate : int
The desired sampling rate (in Hz, i.e., samples/second). Defaults to 1000Hz.
length : int
The desired length of the signal (in samples). Defaults to None.
time_peak : float
Time to peak.
rise : float
Variance of rise defining gaussian.
decay : list
Decay constants.
Returns
----------
array
Vector containing the SCR signal.
Examples
--------
# scr1 = _eda_simulate_scr(time_peak=3.0745)
# scr2 = _eda_simulate_scr(time_peak=10)
# pd.DataFrame({"SCR1": scr1, "SCR2": scr2}).plot()
"""
if length is None:
length = 9 * sampling_rate
t = np.linspace(sampling_rate / 10000, 90, length)
gt = np.exp(-((t - time_peak) ** 2) / (2 * rise ** 2))
ht = np.exp(-t / decay[0]) + np.exp(-t / decay[1]) # pylint: disable=E1130
ft = np.convolve(gt, ht)
ft = ft[0 : len(t)]
ft = ft / np.max(ft)
return ft
def _eda_simulate_bateman(sampling_rate=1000, t1=0.75, t2=2):
"""Generates the bateman function:
:math:`b = e^{-t/T1} - e^{-t/T2}`
Parameters
----------
sampling_rate : float
Sampling frequency
t1 : float
Defaults to 0.75.
t2 : float
Defaults to 2.
Parameters of the bateman function
Returns
-------
bateman : array
The bateman function
Examples
----------
# bateman = _eda_simulate_bateman()
# nk.signal_plot(bateman)
"""
idx_T1 = t1 * sampling_rate
idx_T2 = t2 * sampling_rate
len_bat = idx_T2 * 10
idx_bat = np.arange(len_bat)
bateman = np.exp(-idx_bat / idx_T2) - np.exp(-idx_bat / idx_T1)
# normalize
bateman = sampling_rate * bateman / np.sum(bateman)
return bateman |
5,838 | create wtf field | # This file is part of Indico.
# Copyright (C) 2002 - 2023 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from wtforms.fields import IntegerField, SelectField
from wtforms.validators import DataRequired, Length, NumberRange, Optional, ValidationError
from indico.util.i18n import _, ngettext
from indico.web.fields.base import BaseField
from indico.web.forms.fields import IndicoRadioField, IndicoSelectMultipleCheckboxField, MultiStringField
from indico.web.forms.validators import HiddenUnless
class _ChoiceFieldBase(BaseField):
def copy_field_data(self):
"""
Return a copy of the field's configuration data without
the IDs used to identify selected options.
"""
field_data_copy = super().copy_field_data()
for option in field_data_copy['options']:
del option['id']
return field_data_copy
class SingleChoiceConfigForm:
display_type = IndicoRadioField(_('Display type'), [DataRequired()],
description=_('Widget that will be used to render the available options'),
choices=[('radio', _('Radio buttons')),
('select', _('Drop-down list'))],
default='radio')
radio_display_type = IndicoRadioField(_('Alignment'),
[HiddenUnless('display_type', 'radio'), DataRequired()],
description=_('The arrangement of the options'),
choices=[('vertical', _('Vertical')),
('horizontal', _('Horizontal'))])
options = MultiStringField(_('Options'), [DataRequired()], field=('option', _('option')), unique=True,
uuid_field='id', sortable=True,
description=_('Specify the options the user can choose from'))
class _EmptyNoneSelectField(SelectField):
def process_formdata(self, valuelist):
super().process_formdata(valuelist)
if not self.data:
self.data = None
class _EmptyNoneRadioField(IndicoRadioField):
def process_formdata(self, valuelist):
super().process_formdata(valuelist)
if not self.data:
self.data = None
class SingleChoiceField(_ChoiceFieldBase):
name = 'single_choice'
friendly_name = _('Single Choice')
config_form = SingleChoiceConfigForm
log_type = 'string'
def METHOD_NAME(self):
field_options = {'coerce': lambda x: x}
choices = [(x['id'], x['option']) for x in self.object.field_data['options']]
if self.object.field_data['display_type'] == 'select':
field_class = _EmptyNoneSelectField
choices = [('', '')] + choices
else:
field_class = _EmptyNoneRadioField
field_options['orientation'] = self.object.field_data['radio_display_type']
if field_options['orientation'] == 'vertical' and not self.object.is_required:
field_options['default'] = ''
choices = [('', _('No selection'))] + choices
return self._make_wtforms_field(field_class, choices=choices, **field_options)
def is_value_empty(self, value):
# No selection is also a valid option
return False
def get_friendly_value(self, value):
option_map = {option_dict['id']: option_dict['option'] for option_dict in self.object.field_data['options']}
return option_map.get(value) or ''
class MultiSelectConfigForm:
options = MultiStringField(_('Options'), [DataRequired()], field=('option', _('option')), unique=True,
uuid_field='id', sortable=True, description=_('Specify the answers the user can select'))
min_choices = IntegerField(_('Minimum choices'), [HiddenUnless('is_required'), Optional(), NumberRange(min=0)],
description=_('The minimum amount of options the user has to choose.'))
max_choices = IntegerField(_('Maximum choices'), [HiddenUnless('is_required'), Optional(), NumberRange(min=1)],
description=_('The maximum amount of options the user may choose.'))
def _validate_min_max_choices(self):
if (self.min_choices.data is not None and self.max_choices.data is not None and
self.min_choices.data > self.max_choices.data):
raise ValidationError(_('Maximum choices must be greater than minimum choices.'))
def validate_min_choices(self, field):
if field.data is None:
return
if field.data >= len(self.options.data):
raise ValidationError(_('Minimum choices must be fewer than the total number of options.'))
def validate_max_choices(self, field):
if field.data is None:
return
self._validate_min_max_choices()
if field.data > len(self.options.data):
raise ValidationError(_('Maximum choices must be fewer or equal than the total number of options.'))
class MultiSelectField(_ChoiceFieldBase):
name = 'multiselect'
friendly_name = _('Select multiple')
config_form = MultiSelectConfigForm
wtf_field_class = IndicoSelectMultipleCheckboxField
log_type = 'list'
@property
def validators(self):
min_choices = self.object.field_data.get('min_choices')
max_choices = self.object.field_data.get('max_choices')
if min_choices is None and max_choices is None:
return
if min_choices is None:
min_choices = -1
if max_choices is None:
max_choices = -1
if max_choices == -1:
message = ngettext('Please select at least %(min)d option.',
'Please select at least %(min)d options.', min_choices)
elif min_choices == -1:
message = ngettext('Please select no more than %(max)d option.',
'Please select no more than %(max)d options.', max_choices)
else:
message = _('Please select between %(min)d and %(max)d options.')
return [Length(min=min_choices, max=max_choices, message=message)]
@property
def wtf_field_kwargs(self):
return {'choices': [(x['id'], x['option']) for x in self.object.field_data['options']],
'coerce': lambda x: x}
def get_friendly_value(self, value):
option_map = {option_dict['id']: option_dict['option'] for option_dict in self.object.field_data['options']}
return [option_map[id_] for id_ in value if id_ in option_map] |
5,839 | list clusters | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import datetime
from google.cloud import dataproc_v1
from airflow import models
from airflow.models.variable import Variable
from airflow.operators.python import PythonOperator
from airflow.operators.python import BranchPythonOperator
from airflow.providers.google.cloud.operators.dataproc import (
DataprocCreateClusterOperator,
DataprocSubmitJobOperator,
ClusterGenerator
)
# -----------------------------------------------------------------
# Task functions
# -----------------------------------------------------------------
def check_cluster_status():
cluster_name='ephemeral-cluster-test'
print("cluster_name:"+ cluster_name +" project:"+Variable.get('project')+" region:"+Variable.get('region'))
clusters = METHOD_NAME()
return (cluster_name in clusters and clusters[cluster_name]=="RUNNING")
def METHOD_NAME():
clusters = {}
if Variable.get('region') == "global":
# Use the default gRPC global endpoints.
dataproc_cluster_client = dataproc_v1.ClusterControllerClient()
else:
# Use a regional gRPC endpoint. See:
# https://cloud.google.com/dataproc/docs/concepts/regional-endpoints
dataproc_cluster_client = dataproc_v1.ClusterControllerClient(
client_options={"api_endpoint": f"{Variable.get('region')}-dataproc.googleapis.com:443"}
)
print("clusters list:")
for cluster in dataproc_cluster_client.METHOD_NAME(
request={"project_id": Variable.get('project'), "region": Variable.get('region')}
):
print(("{} - {}".format(cluster.cluster_name, cluster.status.state.name)))
clusters[cluster.cluster_name] = cluster.status.state.name
return clusters
def branch_task(**kwargs):
if (kwargs['ti'].xcom_pull(task_ids='check_cluster_status')):
return 'run_job'
else:
return 'create_cluster'
# -----------------------------------------------------------------
# Default configurations
# -----------------------------------------------------------------
default_dag_args = {
'start_date': datetime.datetime(2022, 9, 13),
}
CLUSTER_GENERATOR_CONFIG = ClusterGenerator(
project_id=Variable.get('project'),
master_machine_type='n1-standard-4',
worker_machine_type='n1-standard-4',
num_workers=2,
subnetwork_uri=Variable.get('subnetwork'),
internal_ip_only=True,
autoscaling_policy='projects/' + Variable.get('project') + '/locations/' + Variable.get('region') + '/autoscalingPolicies/' + Variable.get('autoscaling_policy'),
idle_delete_ttl=300,
service_account=Variable.get('dataproc_service_account')
).make()
PYSPARK_JOB = {
"reference": {"project_id": Variable.get('project')},
"placement": {"cluster_name": 'ephemeral-cluster-test'},
"pyspark_job": {"main_python_file_uri": f"gs://{Variable.get('jobs_bucket')}/jobs/{'hello_world_spark.py'}"},
}
# -----------------------------------------------------------------
# Define a DAG of tasks.
# -----------------------------------------------------------------
# Any task you create within the context manager is automatically added to the DAG object.
with models.DAG(
'ephemeral_cluster_job_1',
catchup=False,
schedule_interval='@daily',
default_args=default_dag_args) as dag:
check_cluster_status = PythonOperator(
task_id="check_cluster_status",
provide_context=True,
python_callable=check_cluster_status,
retries=0
)
branch_task = BranchPythonOperator(
task_id="branch_task",
provide_context=True,
python_callable=branch_task,
retries=0
)
run_job = DataprocSubmitJobOperator(
task_id="run_job",
job=PYSPARK_JOB,
region=Variable.get('region'),
project_id=Variable.get('project'),
retries=0
)
run_job_im = DataprocSubmitJobOperator(
task_id="run_job_im",
job=PYSPARK_JOB,
region=Variable.get('region'),
project_id=Variable.get('project'),
retries=0
)
create_cluster = DataprocCreateClusterOperator(
task_id="create_cluster",
project_id=Variable.get('project'),
cluster_config=CLUSTER_GENERATOR_CONFIG,
region=Variable.get('region'),
cluster_name='ephemeral-cluster-test',
retries=0
)
check_cluster_status >> branch_task >> run_job
check_cluster_status >> branch_task >> create_cluster >> run_job_i |
5,840 | test some very low effective balances that | from eth2spec.test.context import with_all_phases, with_phases, spec_state_test
from eth2spec.test.helpers.constants import PHASE0
from eth2spec.test.helpers.rewards import leaking
import eth2spec.test.helpers.rewards as rewards_helpers
@with_all_phases
@spec_state_test
@leaking()
def test_empty_leak(spec, state):
yield from rewards_helpers.run_test_empty(spec, state)
@with_all_phases
@spec_state_test
@leaking()
def test_full_leak(spec, state):
yield from rewards_helpers.run_test_full_all_correct(spec, state)
@with_all_phases
@spec_state_test
@leaking()
def test_half_full_leak(spec, state):
yield from rewards_helpers.run_test_half_full(spec, state)
@with_all_phases
@spec_state_test
@leaking()
def test_quarter_full_leak(spec, state):
yield from rewards_helpers.run_test_partial(spec, state, 0.25)
@with_all_phases
@spec_state_test
@leaking()
def test_full_but_partial_participation_leak(spec, state):
yield from rewards_helpers.run_test_full_but_partial_participation(spec, state)
@with_phases([PHASE0])
@spec_state_test
@leaking()
def test_one_attestation_one_correct_leak(spec, state):
yield from rewards_helpers.run_test_one_attestation_one_correct(spec, state)
@with_all_phases
@spec_state_test
@leaking()
def test_with_not_yet_activated_validators_leak(spec, state):
yield from rewards_helpers.run_test_with_not_yet_activated_validators(spec, state)
@with_all_phases
@spec_state_test
@leaking()
def test_with_exited_validators_leak(spec, state):
yield from rewards_helpers.run_test_with_exited_validators(spec, state)
@with_all_phases
@spec_state_test
@leaking()
def test_with_slashed_validators_leak(spec, state):
yield from rewards_helpers.run_test_with_slashed_validators(spec, state)
@with_all_phases
@spec_state_test
@leaking()
def test_some_very_low_effective_balances_that_attested_leak(spec, state):
yield from rewards_helpers.run_test_some_very_low_effective_balances_that_attested(spec, state)
@with_all_phases
@spec_state_test
@leaking()
def METHOD_NAME(spec, state):
yield from rewards_helpers.run_test_some_very_low_effective_balances_that_did_not_attest(spec, state)
#
# NOTE: No source incorrect tests
# All PendingAttestations in state have source validated
# We choose to keep this invariant in these tests to not force clients to test with degenerate states
#
@with_phases([PHASE0])
@spec_state_test
@leaking()
def test_full_half_correct_target_incorrect_head_leak(spec, state):
yield from rewards_helpers.run_test_full_fraction_incorrect(
spec, state,
correct_target=True,
correct_head=False,
fraction_incorrect=0.5,
)
@with_phases([PHASE0])
@spec_state_test
@leaking()
def test_full_correct_target_incorrect_head_leak(spec, state):
yield from rewards_helpers.run_test_full_fraction_incorrect(
spec, state,
correct_target=True,
correct_head=False,
fraction_incorrect=1.0,
)
@with_phases([PHASE0])
@spec_state_test
@leaking()
def test_full_half_incorrect_target_incorrect_head_leak(spec, state):
yield from rewards_helpers.run_test_full_fraction_incorrect(
spec, state,
correct_target=False,
correct_head=False,
fraction_incorrect=0.5,
)
@with_phases([PHASE0])
@spec_state_test
@leaking()
def test_full_half_incorrect_target_correct_head_leak(spec, state):
yield from rewards_helpers.run_test_full_fraction_incorrect(
spec, state,
correct_target=False,
correct_head=True,
fraction_incorrect=0.5,
)
@with_all_phases
@spec_state_test
@leaking()
def test_full_random_leak(spec, state):
yield from rewards_helpers.run_test_full_random(spec, state)
@with_all_phases
@spec_state_test
@leaking(epochs=7)
def test_full_random_seven_epoch_leak(spec, state):
yield from rewards_helpers.run_test_full_random(spec, state)
@with_all_phases
@spec_state_test
@leaking(epochs=10)
def test_full_random_ten_epoch_leak(spec, state):
yield from rewards_helpers.run_test_full_random(spec, state) |
5,841 | test intersect param | # -*- coding: utf-8 -*
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import os
import requests
from astropy import coordinates
from astropy.table import Table
from ..core import MOCServer
from astroquery.utils.mocks import MockResponse
pytest.importorskip("mocpy")
pytest.importorskip("regions")
from regions import CircleSkyRegion, PolygonSkyRegion # noqa: E402
DATA_FILES = {
"PROPERTIES_SEARCH": "properties.json",
"HIPS_FROM_SAADA_AND_ALASKY": "hips_from_saada_alasky.json",
}
@pytest.fixture
def patch_get(request):
mp = request.getfixturevalue("monkeypatch")
mp.setattr(requests.Session, "request", get_mockreturn)
return mp
def get_mockreturn(
self,
method,
url,
data=None,
timeout=10,
files=None,
params=None,
headers=None,
**kwargs
):
filename = data_path(DATA_FILES[data])
with open(filename, "rb") as infile:
content = infile.read()
return MockResponse(content)
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), "data")
return os.path.join(data_dir, filename)
"""List of all the constrain we want to test"""
# SPATIAL CONSTRAINTS DEFINITIONS
polygon1 = coordinates.SkyCoord(
[57.376, 56.391, 56.025, 56.616],
[24.053, 24.622, 24.049, 24.291],
frame="icrs",
unit="deg",
)
polygon2 = coordinates.SkyCoord(
[58.376, 53.391, 56.025, 54.616],
[24.053, 25.622, 22.049, 27.291],
frame="icrs",
unit="deg",
)
# PROPERTY CONSTRAINTS DEFINITIONS
meta_data_ex = "ID = *SDSS* && moc_sky_fraction<=0.01"
meta_data_hips_from_saada_alasky = (
"(hips_service_url*=http://saada*) && (hips_service_url*=http://alasky.*)"
)
"""
Combination of one spatial with a property constrain
Each tuple(spatial, property) characterizes a specific query and is tested
with regards to the true results stored in a file located in the data directory
"""
@pytest.mark.parametrize(
"datafile", ["PROPERTIES_SEARCH", "HIPS_FROM_SAADA_AND_ALASKY"]
)
def test_request_results(patch_get, datafile):
"""
Compare the request result obtained with the astroquery.Mocserver API
with the one obtained on the http://alasky.unistra.fr/MocServer/query
"""
results = MOCServer.query_region(
get_query_payload=False, verbose=True, data=datafile
)
assert isinstance(results, Table)
"""
Spatial Constrains requests
We test a polygon/cone/moc search and ensure the
request param 'intersect' is correct
"""
@pytest.mark.parametrize(
"RA, DEC, RADIUS", [(10.8, 6.5, 0.5), (25.6, -23.2, 1.1), (150.6, 45.1, 1.5)]
)
def test_cone_search_spatial_request(RA, DEC, RADIUS):
center = coordinates.SkyCoord(ra=RA, dec=DEC, unit="deg")
radius = coordinates.Angle(RADIUS, unit="deg")
cone_region = CircleSkyRegion(center=center, radius=radius)
request_payload = MOCServer.query_region(
region=cone_region, get_query_payload=True, intersect="overlaps"
)
assert (
(request_payload["DEC"] == str(DEC))
and (request_payload["RA"] == str(RA))
and (request_payload["SR"] == str(RADIUS))
)
@pytest.mark.parametrize(
"poly, poly_payload",
[
(polygon1, "Polygon 57.376 24.053 56.391 24.622 56.025 24.049 56.616 24.291"),
(polygon2, "Polygon 58.376 24.053 53.391 25.622 56.025 22.049 54.616 27.291"),
],
)
def test_polygon_spatial_request(poly, poly_payload):
polygon_region = PolygonSkyRegion(vertices=poly)
request_payload = MOCServer.query_region(
region=polygon_region, intersect="overlaps", get_query_payload=True
)
assert request_payload["stc"] == poly_payload
@pytest.mark.parametrize("intersect", ["encloses", "overlaps", "covers"])
def METHOD_NAME(intersect):
center = coordinates.SkyCoord(ra=10.8, dec=32.2, unit="deg")
radius = coordinates.Angle(1.5, unit="deg")
cone_region = CircleSkyRegion(center, radius)
request_payload = MOCServer.query_region(
region=cone_region, intersect=intersect, get_query_payload=True
)
if intersect == "encloses":
assert request_payload["intersect"] == "enclosed"
else:
assert request_payload["intersect"] == intersect |
5,842 | test sanitize title | from __future__ import annotations
import os
import pytest
import comicapi.utils
import comictalker.talker_utils
def test_os_sorted():
page_name_list = [
"cover.jpg",
"Page1.jpeg",
"!cover.jpg",
"page4.webp",
"test/!cover.tar.gz",
"!cover.tar.gz",
"00.jpg",
"ignored.txt",
"page0.jpg",
"test/00.tar.gz",
".ignored.jpg",
"Page3.gif",
"!cover.tar.gz",
"Page2.png",
"page10.jpg",
"!cover",
]
assert comicapi.utils.os_sorted(page_name_list) == [
"!cover",
"!cover.jpg",
"!cover.tar.gz",
"!cover.tar.gz", # Depending on locale punctuation or numbers might come first (Linux, MacOS)
".ignored.jpg",
"00.jpg",
"cover.jpg",
"ignored.txt",
"page0.jpg",
"Page1.jpeg",
"Page2.png",
"Page3.gif",
"page4.webp",
"page10.jpg",
"test/!cover.tar.gz",
"test/00.tar.gz",
]
def test_recursive_list_with_file(tmp_path) -> None:
foo_png = tmp_path / "foo.png"
foo_png.write_text("not a png")
temp_folder = tmp_path / "bar"
temp_folder.mkdir()
temp_file = temp_folder / "test.cbz"
temp_file.write_text("not a zip")
temp_folder2 = tmp_path / "bar" / "baz" / "something else"
temp_folder2.mkdir(parents=True)
temp_cbr = temp_folder2 / "bar.cbr"
temp_cbr.write_text("not a rar")
temp_txt = tmp_path / "info.txt"
temp_txt.write_text("this is here")
temp_txt2 = tmp_path / "info2.txt"
temp_txt2.write_text("this is here")
glob_in_name = tmp_path / "[e-b]"
glob_in_name.mkdir()
expected_result = {str(foo_png), str(temp_cbr), str(temp_file), str(temp_txt), str(temp_txt2)}
result = set(comicapi.utils.get_recursive_filelist([str(temp_txt2), tmp_path, str(glob_in_name)]))
assert result == expected_result
xlate_values = [
("", None),
(None, None),
("9", "9"),
(9, "9"),
]
xlate_int_values = [
(None, None),
(" ", None),
("", None),
("9..", None),
(9, 9),
("9", 9),
(9.3, 9),
("9.3", 9),
("9.", 9),
(" 9 . 3 l", 9),
]
xlate_float_values = [
(9, 9.0),
("9", 9.0),
(9.3, 9.3),
("9.3", 9.3),
("9.", 9.0),
(" 9 . 3 l", 9.3),
]
@pytest.mark.parametrize("value, result", xlate_values)
def test_xlate(value, result):
assert comicapi.utils.xlate(value) == result
@pytest.mark.parametrize("value, result", xlate_float_values)
def test_xlate_float(value, result):
assert comicapi.utils.xlate_float(value) == result
@pytest.mark.parametrize("value, result", xlate_int_values)
def test_xlate_int(value, result):
assert comicapi.utils.xlate_int(value) == result
language_values = [
("english", "en"),
("ENGLISH", "en"),
("EnglisH", "en"),
("", ""),
("aaa", None), # does not have a 2-letter code
(None, None),
]
@pytest.mark.parametrize("value, result", language_values)
def test_get_language_iso(value, result):
assert result == comicapi.utils.get_language_iso(value)
combine_values = [
("hello", "english", "en", "hello\nenglish"),
("hello en", "english", "en", "hello english"),
("hello en goodbye", "english", "en", "hello english"),
("hello en en goodbye", "english", "en", "hello en english"),
("", "english", "en", "english"),
(None, "english", "en", "english"),
("hello", "", "en", "hello"),
("hello", None, "en", "hello"),
("hello", "hello", "hel", "hello"),
]
@pytest.mark.parametrize("existing_notes, new_notes, split, result", combine_values)
def test_combine_notes(existing_notes, new_notes, split, result):
assert result == comicapi.utils.combine_notes(existing_notes, new_notes, split)
def test_unique_file(tmp_path):
file = tmp_path / "test.cbz"
assert file == comicapi.utils.unique_file(file)
file.mkdir()
assert (tmp_path / "test (1).cbz") == comicapi.utils.unique_file(file)
def test_add_to_path(monkeypatch):
monkeypatch.setenv("PATH", os.path.abspath("/usr/bin"))
comicapi.utils.add_to_path("/bin")
assert os.environ["PATH"] == (os.path.abspath("/bin") + os.pathsep + os.path.abspath("/usr/bin"))
comicapi.utils.add_to_path("/usr/bin")
comicapi.utils.add_to_path("/usr/bin/")
assert os.environ["PATH"] == (os.path.abspath("/bin") + os.pathsep + os.path.abspath("/usr/bin"))
titles = [
(("", ""), True),
(("Conan el Barbaro", "Conan el Bárbaro"), True),
(("鋼の錬金術師", "鋼の錬金術師"), True),
(("钢之炼金术师", "鋼の錬金術師"), False),
(("batmans grave", "The Batman's Grave"), True),
(("batman grave", "The Batman's Grave"), True),
(("bats grave", "The Batman's Grave"), False),
]
@pytest.mark.parametrize("value, result", titles)
def test_titles_match(value, result):
assert comicapi.utils.titles_match(value[0], value[1]) == result
titles_2 = [
("", ""),
("鋼の錬金術師", "鋼の錬金術師"),
("Conan el Bárbaro", "Conan el Barbaro"),
("The Batman's Grave", "batmans grave"),
("A+X", "ax"),
("ms. marvel", "ms marvel"),
("spider-man/deadpool", "spider man deadpool"),
]
@pytest.mark.parametrize("value, result", titles_2)
def METHOD_NAME(value, result):
assert comicapi.utils.sanitize_title(value) == result.casefold()
urls = [
("", ""),
("http://test.test", "http://test.test/"),
("http://test.test/", "http://test.test/"),
("http://test.test/..", "http://test.test/"),
("http://test.test/../hello", "http://test.test/hello/"),
("http://test.test/../hello/", "http://test.test/hello/"),
("http://test.test/../hello/..", "http://test.test/"),
("http://test.test/../hello/../", "http://test.test/"),
]
@pytest.mark.parametrize("value, result", urls)
def test_fix_url(value, result):
assert comictalker.talker_utils.fix_url(value) == result
split = [
(("1,2,,3", ","), ["1", "2", "3"]),
(("1 ,2,,3", ","), ["1", "2", "3"]),
(("1 ,2,,3 ", ","), ["1", "2", "3"]),
(("\n1 \n2\n\n3 ", ","), ["1 \n2\n\n3"]),
(("\n1 \n2\n\n3 ", "\n"), ["1", "2", "3"]),
((None, ","), []),
]
@pytest.mark.parametrize("value, result", split)
def test_split(value, result):
assert comicapi.utils.split(*value) == result |
5,843 | test docstring | import math
import textwrap
import sys
import pytest
import threading
import traceback
import time
import numpy as np
from numpy.testing import IS_PYPY
from . import util
class TestF77Callback(util.F2PyTest):
sources = [util.getpath("tests", "src", "callback", "foo.f")]
@pytest.mark.parametrize("name", "t,t2".split(","))
def test_all(self, name):
self.check_function(name)
@pytest.mark.xfail(IS_PYPY,
reason="PyPy cannot modify tp_doc after PyType_Ready")
def METHOD_NAME(self):
expected = textwrap.dedent("""\
a = t(fun,[fun_extra_args])
Wrapper for ``t``.
Parameters
----------
fun : call-back function
Other Parameters
----------------
fun_extra_args : input tuple, optional
Default: ()
Returns
-------
a : int
Notes
-----
Call-back functions::
def fun(): return a
Return objects:
a : int
""")
assert self.module.t.__doc__ == expected
def check_function(self, name):
t = getattr(self.module, name)
r = t(lambda: 4)
assert r == 4
r = t(lambda a: 5, fun_extra_args=(6, ))
assert r == 5
r = t(lambda a: a, fun_extra_args=(6, ))
assert r == 6
r = t(lambda a: 5 + a, fun_extra_args=(7, ))
assert r == 12
r = t(lambda a: math.degrees(a), fun_extra_args=(math.pi, ))
assert r == 180
r = t(math.degrees, fun_extra_args=(math.pi, ))
assert r == 180
r = t(self.module.func, fun_extra_args=(6, ))
assert r == 17
r = t(self.module.func0)
assert r == 11
r = t(self.module.func0._cpointer)
assert r == 11
class A:
def __call__(self):
return 7
def mth(self):
return 9
a = A()
r = t(a)
assert r == 7
r = t(a.mth)
assert r == 9
@pytest.mark.skipif(sys.platform == 'win32',
reason='Fails with MinGW64 Gfortran (Issue #9673)')
def test_string_callback(self):
def callback(code):
if code == "r":
return 0
else:
return 1
f = getattr(self.module, "string_callback")
r = f(callback)
assert r == 0
@pytest.mark.skipif(sys.platform == 'win32',
reason='Fails with MinGW64 Gfortran (Issue #9673)')
def test_string_callback_array(self):
# See gh-10027
cu1 = np.zeros((1, ), "S8")
cu2 = np.zeros((1, 8), "c")
cu3 = np.array([""], "S8")
def callback(cu, lencu):
if cu.shape != (lencu,):
return 1
if cu.dtype != "S8":
return 2
if not np.all(cu == b""):
return 3
return 0
f = getattr(self.module, "string_callback_array")
for cu in [cu1, cu2, cu3]:
res = f(callback, cu, cu.size)
assert res == 0
def test_threadsafety(self):
# Segfaults if the callback handling is not threadsafe
errors = []
def cb():
# Sleep here to make it more likely for another thread
# to call their callback at the same time.
time.sleep(1e-3)
# Check reentrancy
r = self.module.t(lambda: 123)
assert r == 123
return 42
def runner(name):
try:
for j in range(50):
r = self.module.t(cb)
assert r == 42
self.check_function(name)
except Exception:
errors.append(traceback.format_exc())
threads = [
threading.Thread(target=runner, args=(arg, ))
for arg in ("t", "t2") for n in range(20)
]
for t in threads:
t.start()
for t in threads:
t.join()
errors = "\n\n".join(errors)
if errors:
raise AssertionError(errors)
def test_hidden_callback(self):
try:
self.module.hidden_callback(2)
except Exception as msg:
assert str(msg).startswith("Callback global_f not defined")
try:
self.module.hidden_callback2(2)
except Exception as msg:
assert str(msg).startswith("cb: Callback global_f not defined")
self.module.global_f = lambda x: x + 1
r = self.module.hidden_callback(2)
assert r == 3
self.module.global_f = lambda x: x + 2
r = self.module.hidden_callback(2)
assert r == 4
del self.module.global_f
try:
self.module.hidden_callback(2)
except Exception as msg:
assert str(msg).startswith("Callback global_f not defined")
self.module.global_f = lambda x=0: x + 3
r = self.module.hidden_callback(2)
assert r == 5
# reproducer of gh18341
r = self.module.hidden_callback2(2)
assert r == 3
class TestF77CallbackPythonTLS(TestF77Callback):
"""
Callback tests using Python thread-local storage instead of
compiler-provided
"""
options = ["-DF2PY_USE_PYTHON_TLS"]
class TestF90Callback(util.F2PyTest):
sources = [util.getpath("tests", "src", "callback", "gh17797.f90")]
def test_gh17797(self):
def incr(x):
return x + 123
y = np.array([1, 2, 3], dtype=np.int64)
r = self.module.gh17797(incr, y)
assert r == 123 + 1 + 2 + 3
class TestGH18335(util.F2PyTest):
"""The reproduction of the reported issue requires specific input that
extensions may break the issue conditions, so the reproducer is
implemented as a separate test class. Do not extend this test with
other tests!
"""
sources = [util.getpath("tests", "src", "callback", "gh18335.f90")]
def test_gh18335(self):
def foo(x):
x[0] += 1
r = self.module.gh18335(foo)
assert r == 123 + 1 |
5,844 | get leaf operations | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Union
import torch
from torch.nn.modules.linear import Identity
from torch.quantization import QuantWrapper
__all__ = ["get_leaf_operations", "is_quantized", "get_precision_information"]
def METHOD_NAME(
model: torch.nn.Module,
operations_to_skip: Optional[List[torch.nn.Module]] = None,
operations_to_unwrap: Optional[List[torch.nn.Module]] = None,
) -> List[torch.nn.Module]:
"""
Get the leaf operations in the model
(those that do not have operations as children)
:param model: the model to get the leaf operations from
:param operations_to_skip: a list of leaf operations that will be
omitted when getting the leaf operations. If None passed, by
default the Identity operation will be skipped
:param operations_to_unwrap: a list of operations that will be unwrapped
when getting the leaf operations. Unwrapping means that we directly
add the module(s) that is/are wrapped by the operation (i.e. operation's
`module` attribute) to the list
of leaf operations. If None passed, by default the QuantWrapper
operation will be unwrapped
:return: a list of the leaf operations
"""
if operations_to_skip is None:
operations_to_skip = [Identity]
if operations_to_unwrap is None:
operations_to_unwrap = [QuantWrapper]
leaf_operations = []
children = list(model.children())
if children == []:
return model
else:
for child in children:
if isinstance(child, tuple(operations_to_unwrap)):
leaf_operations.append(child.module)
continue
try:
leaf_operations.extend(METHOD_NAME(child))
except TypeError:
leaf_operations.append(METHOD_NAME(child))
leaf_operations = [
op for op in leaf_operations if not isinstance(op, tuple(operations_to_skip))
]
return leaf_operations
def is_quantized(operation: torch.nn.Module) -> bool:
"""
Check whether the operation is quantized (contains
a quantization scheme)
"""
return hasattr(operation, "quantization_scheme")
def get_precision_information(
operation: torch.nn.Module,
) -> Union[None, int, "QuantizationScheme"]: # noqa F821
"""
Get the information about the precision of the operation.
1) If operation is quantized, returns the quantization
scheme of the operation.
2) If operation is not quantized, returns the numer of bits
of the operation's weights.
3) If operation is not quantized and does not have a weights,
returns None.
:param operation: the operation to get the quantization scheme from
:return: the quantization scheme of the operation, the number of bits
of the operation's weights, or None if the operation is not quantized
and does not have a weight
"""
if hasattr(operation, "quantization_scheme"):
return getattr(operation, "quantization_scheme")
elif hasattr(operation, "weight"):
return _get_num_bits(operation.weight.dtype)
else:
return None
def _get_num_bits(dtype: torch.dtype) -> int:
# Get the number of bits of a torch dtype
if dtype == torch.float16:
return 16
elif dtype == torch.float32:
return 32
elif dtype == torch.float64:
return 64
elif dtype == torch.int8:
return 8
elif dtype == torch.int16:
return 16
elif dtype == torch.int32:
return 32
elif dtype == torch.int64:
return 64
else:
raise ValueError("Unknown dtype: {}".format(dtype)) |
5,845 | valid starter settings | """
helicopter monitoring and control module gas helicopters
"""
import os, sys, math, time
from pymavlink import mavutil
from MAVProxy.modules.lib import mp_util
from MAVProxy.modules.lib import mp_module
from MAVProxy.modules.lib import mp_settings
class GasHeliModule(mp_module.MPModule):
def __init__(self, mpstate):
super(GasHeliModule, self).__init__(mpstate, "gas_heli", "Gas Heli", public=False)
self.console.set_status('IGN', 'IGN', row=4)
self.console.set_status('THR', 'THR', row=4)
self.console.set_status('RPM', 'RPM: 0', row=4)
self.add_command('gasheli', self.cmd_gasheli,
'gas helicopter control',
['<start|stop>',
'set (GASHELISETTINGS)'])
self.gasheli_settings = mp_settings.MPSettings(
[ ('ignition_chan', int, 0),
('ignition_disable_time', float, 0.5),
('ignition_stop_time', float, 3),
('starter_chan', int, 0),
('starter_time', float, 3.0),
('starter_pwm_on', int, 2000),
('starter_pwm_off', int, 1000),
]
)
self.add_completion_function('(GASHELISETTINGS)', self.gasheli_settings.completion)
self.starting_motor = False
self.stopping_motor = False
self.motor_t1 = None
self.old_override = 0
def mavlink_packet(self, msg):
'''handle an incoming mavlink packet'''
type = msg.get_type()
master = self.master
# add some status fields
if type in [ 'RC_CHANNELS_RAW' ]:
rc6 = msg.chan6_raw
if rc6 > 1500:
ign_colour = 'green'
else:
ign_colour = 'red'
self.console.set_status('IGN', 'IGN', fg=ign_colour, row=4)
if type in [ 'SERVO_OUTPUT_RAW' ]:
rc8 = msg.servo8_raw
if rc8 < 1200:
thr_colour = 'red'
elif rc8 < 1300:
thr_colour = 'orange'
else:
thr_colour = 'green'
self.console.set_status('THR', 'THR', fg=thr_colour, row=4)
if type in [ 'RPM' ]:
rpm = msg.rpm1
if rpm < 3000:
rpm_colour = 'red'
elif rpm < 10000:
rpm_colour = 'orange'
else:
rpm_colour = 'green'
self.console.set_status('RPM', 'RPM: %u' % rpm, fg=rpm_colour, row=4)
def METHOD_NAME(self):
'''check starter settings'''
if self.gasheli_settings.ignition_chan <= 0 or self.gasheli_settings.ignition_chan > 8:
print("Invalid ignition channel %d" % self.gasheli_settings.ignition_chan)
return False
if self.gasheli_settings.starter_chan <= 0 or self.gasheli_settings.starter_chan > 14:
print("Invalid starter channel %d" % self.gasheli_settings.starter_chan)
return False
return True
def idle_task(self):
'''run periodic tasks'''
if self.starting_motor:
if self.gasheli_settings.ignition_disable_time > 0:
elapsed = time.time() - self.motor_t1
if elapsed >= self.gasheli_settings.ignition_disable_time:
self.module('rc').set_override_chan(self.gasheli_settings.ignition_chan-1, self.old_override)
self.starting_motor = False
if self.stopping_motor:
elapsed = time.time() - self.motor_t1
if elapsed >= self.gasheli_settings.ignition_stop_time:
# hand back control to RC
self.module('rc').set_override_chan(self.gasheli_settings.ignition_chan-1, self.old_override)
self.stopping_motor = False
def start_motor(self):
'''start motor'''
if not self.METHOD_NAME():
return
self.motor_t1 = time.time()
self.stopping_motor = False
if self.gasheli_settings.ignition_disable_time > 0:
self.old_override = self.module('rc').get_override_chan(self.gasheli_settings.ignition_chan-1)
self.module('rc').set_override_chan(self.gasheli_settings.ignition_chan-1, 1000)
self.starting_motor = True
else:
# nothing more to do
self.starting_motor = False
# setup starter run
self.master.mav.command_long_send(self.target_system,
self.target_component,
mavutil.mavlink.MAV_CMD_DO_REPEAT_SERVO, 0,
self.gasheli_settings.starter_chan,
self.gasheli_settings.starter_pwm_on,
1,
self.gasheli_settings.starter_time*2,
0, 0, 0)
print("Starting motor")
def stop_motor(self):
'''stop motor'''
if not self.METHOD_NAME():
return
self.motor_t1 = time.time()
self.starting_motor = False
self.stopping_motor = True
self.old_override = self.module('rc').get_override_chan(self.gasheli_settings.ignition_chan-1)
self.module('rc').set_override_chan(self.gasheli_settings.ignition_chan-1, 1000)
print("Stopping motor")
def cmd_gasheli(self, args):
'''gas help commands'''
usage = "Usage: gasheli <start|stop|set>"
if len(args) < 1:
print(usage)
return
if args[0] == "start":
self.start_motor()
elif args[0] == "stop":
self.stop_motor()
elif args[0] == "set":
self.gasheli_settings.command(args[1:])
else:
print(usage)
def init(mpstate):
'''initialise module'''
return GasHeliModule(mpstate) |
5,846 | patch base class | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from unittest.mock import MagicMock
from pytest import fixture
from source_qualaroo.source import QualarooStream, Responses, Surveys
from .helpers import NO_SLEEP_HEADERS, read_all_records
@fixture
def METHOD_NAME(mocker):
# Mock abstract methods to enable instantiating abstract class
mocker.patch.object(QualarooStream, "path", "v0/example_endpoint")
mocker.patch.object(QualarooStream, "primary_key", "test_primary_key")
mocker.patch.object(QualarooStream, "__abstractmethods__", set())
def test_request_params(METHOD_NAME, config):
stream = QualarooStream(**config)
inputs = {"stream_slice": None, "stream_state": None, "next_page_token": {"before": "id"}}
expected_params = {"limit": 500, "start_date": "start_date", "before": "id"}
assert stream.request_params(**inputs) == expected_params
def test_next_page_token(METHOD_NAME, config):
stream = QualarooStream(**config)
inputs = {"response": MagicMock()}
expected_token = None
assert stream.next_page_token(**inputs) == expected_token
def test_surveys_stream(requests_mock):
mock_surveys_request = requests_mock.get(
"https://api.qualaroo.com/api/v1/nudges?limit=500&start_date=2021-02-11T08%3A35%3A49.540Z",
headers=NO_SLEEP_HEADERS,
json=[{"id": "b11111111111111111111111", "name": "survey_1"}, {"id": "b22222222222222222222222", "name": "survey_2"}],
)
args = {"authenticator": None, "start_date": "2021-02-11T08:35:49.540Z", "survey_ids": []}
stream1 = Surveys(**args)
records = read_all_records(stream1)
assert records == [{"id": "b11111111111111111111111", "name": "survey_1"}, {"id": "b22222222222222222222222", "name": "survey_2"}]
args["survey_ids"] = ["b22222222222222222222222"]
stream2 = Surveys(**args)
records = read_all_records(stream2)
assert records == [{"id": "b22222222222222222222222", "name": "survey_2"}]
args["survey_ids"] = ["not-found"]
stream3 = Surveys(**args)
records = read_all_records(stream3)
assert records == []
assert mock_surveys_request.call_count == 3
def test_responses_stream(requests_mock):
mock_surveys_request = requests_mock.get(
"https://api.qualaroo.com/api/v1/nudges?limit=500&start_date=2021-02-11T08%3A35%3A49.540Z",
headers=NO_SLEEP_HEADERS,
json=[{"id": "b11111111111111111111111", "name": "survey_1"}, {"id": "b22222222222222222222222", "name": "survey_2"}],
)
mock_responses_request_1 = requests_mock.get(
"https://api.qualaroo.com/api/v1/nudges/b11111111111111111111111/responses.json",
headers=NO_SLEEP_HEADERS,
json=[{"id": "c11111111111111111111111", "name": "response_1"}, {"id": "c22222222222222222222222", "name": "response_2"}],
)
mock_responses_request_2 = requests_mock.get(
"https://api.qualaroo.com/api/v1/nudges/b22222222222222222222222/responses.json",
headers=NO_SLEEP_HEADERS,
json=[{"id": "c33333333333333333333333", "name": "response_3"}, {"id": "c44444444444444444444444", "name": "response_4"}],
)
args = {"authenticator": None, "start_date": "2021-02-11T08:35:49.540Z", "survey_ids": []}
stream1 = Responses(**args)
records = read_all_records(stream1)
assert records == [
{"id": "c11111111111111111111111", "name": "response_1"},
{"id": "c22222222222222222222222", "name": "response_2"},
{"id": "c33333333333333333333333", "name": "response_3"},
{"id": "c44444444444444444444444", "name": "response_4"},
]
args["survey_ids"] = ["b22222222222222222222222"]
stream2 = Responses(**args)
records = read_all_records(stream2)
assert records == [{"id": "c33333333333333333333333", "name": "response_3"}, {"id": "c44444444444444444444444", "name": "response_4"}]
args["survey_ids"] = ["not-found"]
stream3 = Responses(**args)
records = read_all_records(stream3)
assert records == []
assert mock_surveys_request.call_count == 3
assert mock_responses_request_1.call_count == 1
assert mock_responses_request_2.call_count == 2 |
5,847 | setup dummy data | import pytest
from datetime import datetime, timedelta
from pytz import utc
import pandas as pd
from flexmeasures.data.models.planning.utils import initialize_index
from flexmeasures.data.models.generic_assets import GenericAsset, GenericAssetType
from flexmeasures.data.models.data_sources import DataSource
from flexmeasures.data.models.time_series import Sensor, TimedBelief
@pytest.fixture(scope="module")
def generic_report(db, app):
report_asset_type = GenericAssetType(name="ReportAssetType")
db.session.add(report_asset_type)
generic_report = GenericAsset(
name="GenericReport", generic_asset_type=report_asset_type
)
db.session.add(generic_report)
return generic_report
@pytest.fixture(scope="module")
def profit_report(db, app, generic_report, add_market_prices, setup_sources):
device_type = GenericAssetType(name="Device")
db.session.add(device_type)
electricity_device = GenericAsset(
name="Electricity Consuming Device", generic_asset_type=device_type
)
db.session.add(electricity_device)
power_sensor = Sensor(
"power",
generic_asset=electricity_device,
event_resolution=timedelta(minutes=15),
unit="MW",
timezone="Europe/Amsterdam",
)
energy_sensor = Sensor(
"energy",
generic_asset=electricity_device,
event_resolution=timedelta(minutes=15),
unit="MWh",
timezone="Europe/Amsterdam",
)
profit_sensor_hourly = Sensor(
"profit hourly",
generic_asset=generic_report,
event_resolution=timedelta(hours=1),
unit="EUR",
timezone="Europe/Amsterdam",
)
profit_sensor_daily = Sensor(
"profit daily",
generic_asset=generic_report,
event_resolution=timedelta(hours=24),
unit="EUR",
timezone="Europe/Amsterdam",
)
db.session.add_all(
[profit_sensor_hourly, profit_sensor_daily, energy_sensor, power_sensor]
)
time_slots = initialize_index(
start=pd.Timestamp("2015-01-03").tz_localize("Europe/Amsterdam"),
end=pd.Timestamp("2015-01-04").tz_localize("Europe/Amsterdam"),
resolution="15min",
)
def save_values(sensor, values):
beliefs = [
TimedBelief(
event_start=dt,
belief_horizon=timedelta(hours=0),
event_value=val,
source=setup_sources["Seita"],
sensor=sensor,
)
for dt, val in zip(time_slots, values)
]
db.session.add_all(beliefs)
# periodic pattern of producing 100kW for 4h and consuming 100kW for 4h:
# i.e. [0.1 0.1 0.1 0.1 ... -0.1 -0.1 -0.1 -0.1]
save_values(power_sensor, ([0.1] * 16 + [-0.1] * 16) * 3)
# creating the same pattern as above but with energy
# a flat consumption / production rate of 100kW is equivalent to consume / produce 25kWh
# every 15min block for 1h
save_values(energy_sensor, ([0.025] * 16 + [-0.025] * 16) * 3)
db.session.commit()
yield profit_sensor_hourly, profit_sensor_daily, power_sensor, energy_sensor
@pytest.fixture(scope="module")
def METHOD_NAME(db, app, generic_report):
"""
Create 2 Sensors, 1 Asset and 1 AssetType
"""
dummy_asset_type = GenericAssetType(name="DummyGenericAssetType")
db.session.add(dummy_asset_type)
dummy_asset = GenericAsset(
name="DummyGenericAsset", generic_asset_type=dummy_asset_type
)
db.session.add(dummy_asset)
sensor1 = Sensor("sensor 1", generic_asset=dummy_asset, event_resolution="1h")
db.session.add(sensor1)
sensor2 = Sensor("sensor 2", generic_asset=dummy_asset, event_resolution="1h")
db.session.add(sensor2)
sensor3 = Sensor(
"sensor 3",
generic_asset=dummy_asset,
event_resolution="1h",
timezone="Europe/Amsterdam",
)
db.session.add(sensor3)
report_sensor = Sensor(
"report sensor", generic_asset=generic_report, event_resolution="1h"
)
db.session.add(report_sensor)
daily_report_sensor = Sensor(
"daily report sensor",
generic_asset=generic_report,
event_resolution="1D",
timezone="Europe/Amsterdam",
)
db.session.add(daily_report_sensor)
"""
Create 2 DataSources
"""
source1 = DataSource("source1")
source2 = DataSource("source2")
"""
Create TimedBeliefs
"""
beliefs = []
for sensor in [sensor1, sensor2]:
for si, source in enumerate([source1, source2]):
for t in range(10):
print(si)
beliefs.append(
TimedBelief(
event_start=datetime(2023, 4, 10, tzinfo=utc)
+ timedelta(hours=t + si),
belief_horizon=timedelta(hours=24),
event_value=t,
sensor=sensor,
source=source,
)
)
# add simple data for testing the AggregatorReporter:
# 24 hourly events with value 1 for sensor1 and value -1 for sensor2
for sensor, source, value in zip([sensor1, sensor2], [source1, source2], [1, -1]):
for t in range(24):
beliefs.append(
TimedBelief(
event_start=datetime(2023, 5, 10, tzinfo=utc) + timedelta(hours=t),
belief_horizon=timedelta(hours=24),
event_value=value,
sensor=sensor,
source=source,
)
)
# add simple data for testing DST transition
for t in range(24 * 4): # create data for 4 days
# UTC+1 -> UTC+2
beliefs.append(
TimedBelief(
event_start=datetime(2023, 3, 24, tzinfo=utc) + timedelta(hours=t),
belief_horizon=timedelta(hours=24),
event_value=t,
sensor=sensor3,
source=source1,
)
)
# UTC+2 -> UTC+1
beliefs.append(
TimedBelief(
event_start=datetime(2023, 10, 27, tzinfo=utc) + timedelta(hours=t),
belief_horizon=timedelta(hours=24),
event_value=t,
sensor=sensor3,
source=source1,
)
)
# Add data source transition, from DataSource 1 to DataSource 2
# At 12:00, there is one event from both of the sources
for t in range(12): # create data for 4 days
# 00:00 -> 12:00
beliefs.append(
TimedBelief(
event_start=datetime(2023, 4, 24, tzinfo=utc) + timedelta(hours=t),
belief_horizon=timedelta(hours=24),
event_value=1,
sensor=sensor3,
source=source1,
)
)
# 12:00 -> 24:00
beliefs.append(
TimedBelief(
event_start=datetime(2023, 4, 24, tzinfo=utc) + timedelta(hours=t + 12),
belief_horizon=timedelta(hours=24),
event_value=-1,
sensor=sensor3,
source=source2,
)
)
# add a belief belonging to Source 2 in the second half of the day ()
beliefs.append(
TimedBelief(
event_start=datetime(2023, 4, 24, tzinfo=utc) + timedelta(hours=12),
belief_horizon=timedelta(hours=24),
event_value=1,
sensor=sensor3,
source=source1,
)
)
db.session.add_all(beliefs)
db.session.commit()
yield sensor1, sensor2, sensor3, report_sensor, daily_report_sensor |
5,848 | id | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetConnectionResult',
'AwaitableGetConnectionResult',
'get_connection',
'get_connection_output',
]
@pulumi.output_type
class GetConnectionResult:
"""
A collection of values returned by getConnection.
"""
def __init__(__self__, arn=None, connection_status=None, host_arn=None, METHOD_NAME=None, name=None, provider_type=None, tags=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if connection_status and not isinstance(connection_status, str):
raise TypeError("Expected argument 'connection_status' to be a str")
pulumi.set(__self__, "connection_status", connection_status)
if host_arn and not isinstance(host_arn, str):
raise TypeError("Expected argument 'host_arn' to be a str")
pulumi.set(__self__, "host_arn", host_arn)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provider_type and not isinstance(provider_type, str):
raise TypeError("Expected argument 'provider_type' to be a str")
pulumi.set(__self__, "provider_type", provider_type)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def arn(self) -> str:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="connectionStatus")
def connection_status(self) -> str:
"""
CodeStar Connection status. Possible values are `PENDING`, `AVAILABLE` and `ERROR`.
"""
return pulumi.get(self, "connection_status")
@property
@pulumi.getter(name="hostArn")
def host_arn(self) -> str:
"""
ARN of the host associated with the connection.
"""
return pulumi.get(self, "host_arn")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the CodeStar Connection. The name is unique in the calling AWS account.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="providerType")
def provider_type(self) -> str:
"""
Name of the external provider where your third-party code repository is configured. Possible values are `Bitbucket` and `GitHub`. For connections to a GitHub Enterprise Server instance, you must create an codestarconnections.Host resource and use `host_arn` instead.
"""
return pulumi.get(self, "provider_type")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
Map of key-value resource tags to associate with the resource.
"""
return pulumi.get(self, "tags")
class AwaitableGetConnectionResult(GetConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetConnectionResult(
arn=self.arn,
connection_status=self.connection_status,
host_arn=self.host_arn,
METHOD_NAME=self.METHOD_NAME,
name=self.name,
provider_type=self.provider_type,
tags=self.tags)
def get_connection(arn: Optional[str] = None,
name: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetConnectionResult:
"""
Provides details about CodeStar Connection.
## Example Usage
### By ARN
```python
import pulumi
import pulumi_aws as aws
example = aws.codestarconnections.get_connection(arn=aws_codestarconnections_connection["example"]["arn"])
```
### By Name
```python
import pulumi
import pulumi_aws as aws
example = aws.codestarconnections.get_connection(name=aws_codestarconnections_connection["example"]["name"])
```
:param str arn: CodeStar Connection ARN.
:param str name: CodeStar Connection name.
> **NOTE:** When both `arn` and `name` are specified, `arn` takes precedence.
:param Mapping[str, str] tags: Map of key-value resource tags to associate with the resource.
"""
__args__ = dict()
__args__['arn'] = arn
__args__['name'] = name
__args__['tags'] = tags
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('aws:codestarconnections/getConnection:getConnection', __args__, opts=opts, typ=GetConnectionResult).value
return AwaitableGetConnectionResult(
arn=pulumi.get(__ret__, 'arn'),
connection_status=pulumi.get(__ret__, 'connection_status'),
host_arn=pulumi.get(__ret__, 'host_arn'),
METHOD_NAME=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
provider_type=pulumi.get(__ret__, 'provider_type'),
tags=pulumi.get(__ret__, 'tags'))
@_utilities.lift_output_func(get_connection)
def get_connection_output(arn: Optional[pulumi.Input[Optional[str]]] = None,
name: Optional[pulumi.Input[Optional[str]]] = None,
tags: Optional[pulumi.Input[Optional[Mapping[str, str]]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetConnectionResult]:
"""
Provides details about CodeStar Connection.
## Example Usage
### By ARN
```python
import pulumi
import pulumi_aws as aws
example = aws.codestarconnections.get_connection(arn=aws_codestarconnections_connection["example"]["arn"])
```
### By Name
```python
import pulumi
import pulumi_aws as aws
example = aws.codestarconnections.get_connection(name=aws_codestarconnections_connection["example"]["name"])
```
:param str arn: CodeStar Connection ARN.
:param str name: CodeStar Connection name.
> **NOTE:** When both `arn` and `name` are specified, `arn` takes precedence.
:param Mapping[str, str] tags: Map of key-value resource tags to associate with the resource.
"""
... |
5,849 | downgrade | """c_haines
Revision ID: 81c96876355a
Revises: ad4f37763020
Create Date: 2021-03-02 18:00:42.129765
"""
from alembic import op
import sqlalchemy as sa
import geoalchemy2
# revision identifiers, used by Alembic.
revision = '81c96876355a'
down_revision = 'ad4f37763020'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic ! ###
op.create_table('c_haines_model_runs',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('model_run_timestamp', sa.TIMESTAMP(timezone=True), nullable=False),
sa.Column('prediction_model_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['prediction_model_id'], ['prediction_models.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('model_run_timestamp', 'prediction_model_id'),
comment='Identifies the model run and prediction for a particular set of c-haines calculations'
)
op.create_index(op.f('ix_c_haines_model_runs_id'), 'c_haines_model_runs', ['id'], unique=False)
op.create_index(op.f('ix_c_haines_model_runs_prediction_model_id'),
'c_haines_model_runs', ['prediction_model_id'], unique=False)
op.create_table('c_haines_predictions',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('prediction_timestamp', sa.TIMESTAMP(timezone=True), nullable=False),
sa.Column('model_run_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['model_run_id'], ['c_haines_model_runs.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('prediction_timestamp', 'model_run_id'),
comment='Identifies the model run and prediction for a particular set of c-haines calculations'
)
op.create_index(op.f('ix_c_haines_predictions_id'), 'c_haines_predictions', ['id'], unique=False)
op.create_index(op.f('ix_c_haines_predictions_model_run_id'),
'c_haines_predictions', ['model_run_id'], unique=False)
op.create_table('c_haines_polygons',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('geom', geoalchemy2.types.Geometry(geometry_type='POLYGON',
spatial_index=False,
from_text='ST_GeomFromEWKT', name='geometry'), nullable=False),
sa.Column('c_haines_index', sa.Enum('<4', '4-8', '8-11', '>11',
name='c_haines_severity_levels'), nullable=False),
sa.Column('c_haines_prediction_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['c_haines_prediction_id'], ['c_haines_predictions.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('idx_c_haines_polygons_geom', 'c_haines_polygons',
['geom'], unique=False, postgresql_using='gist')
op.create_index(op.f('ix_c_haines_polygons_c_haines_prediction_id'),
'c_haines_polygons', ['c_haines_prediction_id'], unique=False)
op.create_index(op.f('ix_c_haines_polygons_id'), 'c_haines_polygons', ['id'], unique=False)
op.create_index('idx_prediction_model_grid_subsets_geom', 'prediction_model_grid_subsets',
['geom'], unique=False, postgresql_using='gist')
# ### end Alembic commands ###
def METHOD_NAME():
# ### commands auto generated by Alembic ! ###
op.drop_index('idx_prediction_model_grid_subsets_geom', table_name='prediction_model_grid_subsets')
op.drop_index(op.f('ix_c_haines_polygons_id'), table_name='c_haines_polygons')
op.drop_index(op.f('ix_c_haines_polygons_c_haines_prediction_id'), table_name='c_haines_polygons')
op.drop_index('idx_c_haines_polygons_geom', table_name='c_haines_polygons')
op.drop_table('c_haines_polygons')
op.drop_index(op.f('ix_c_haines_predictions_model_run_id'), table_name='c_haines_predictions')
op.drop_index(op.f('ix_c_haines_predictions_id'), table_name='c_haines_predictions')
op.drop_table('c_haines_predictions')
op.drop_index(op.f('ix_c_haines_model_runs_prediction_model_id'), table_name='c_haines_model_runs')
op.drop_index(op.f('ix_c_haines_model_runs_id'), table_name='c_haines_model_runs')
op.drop_table('c_haines_model_runs')
op.execute('DROP TYPE c_haines_severity_levels')
# ### end Alembic commands ### |
5,850 | run loop | import asyncio
import copy
import logging
import time
from concurrent.futures import ThreadPoolExecutor
import cronitor
import pymongo
from discord.ext import commands, tasks
from requests.exceptions import HTTPError
from utils.cfg import cfg
from utils.reporter import report_error
from utils.rocketpool import rp
from utils.shared_w3 import w3, bacon
from utils.solidity import to_float
from utils.time_debug import timerun
log = logging.getLogger("minipool_task")
log.setLevel(cfg["log_level"])
cronitor.api_key = cfg["cronitor_secret"]
monitor = cronitor.Monitor('gather-minipools')
class MinipoolTask(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.mongo = pymongo.MongoClient(cfg["mongodb_uri"])
self.db = self.mongo.rocketwatch
self.minipool_manager = rp.get_contract_by_name("rocketMinipoolManager")
if not self.METHOD_NAME.is_running() and bot.is_ready():
self.METHOD_NAME.start()
@commands.Cog.listener()
async def on_ready(self):
if self.METHOD_NAME.is_running():
return
self.METHOD_NAME.start()
@tasks.loop(seconds=60 ** 2)
async def METHOD_NAME(self):
p_id = time.time()
monitor.ping(state='run', series=p_id)
executor = ThreadPoolExecutor()
loop = asyncio.get_event_loop()
futures = [loop.run_in_executor(executor, self.task)]
try:
await asyncio.gather(*futures)
monitor.ping(state='complete', series=p_id)
except Exception as err:
await report_error(err)
monitor.ping(state='fail', series=p_id)
@timerun
def get_untracked_minipools(self):
minipool_count = rp.call("rocketMinipoolManager.getMinipoolCount")
minipool_addresses = []
for i in range(0, minipool_count, 10000):
log.debug(f"getting minipool addresses for {i}/{minipool_count}")
i_end = min(i + 10000, minipool_count)
minipool_addresses += [
w3.toChecksumAddress(r.results[0]) for r in rp.multicall.aggregate(
self.minipool_manager.functions.getMinipoolAt(i) for i in range(i, i_end)).results]
# remove address that are already in the minipool collection
tracked_addresses = self.db.minipools.distinct("address")
return [a for a in minipool_addresses if a not in tracked_addresses]
@timerun
def get_public_keys(self, addresses):
# optimizing this doesn't seem to help much, so keep it simple for readability
# batch the same way as get_untracked_minipools
minipool_pubkeys = []
for i in range(0, len(addresses), 10000):
log.debug(f"getting minipool pubkeys for {i}/{len(addresses)}")
i_end = min(i + 10000, len(addresses))
minipool_pubkeys += [
f"0x{r.results[0].hex()}" for r in rp.multicall.aggregate(
self.minipool_manager.functions.getMinipoolPubkey(a) for a in addresses[i:i_end]).results]
return minipool_pubkeys
@timerun
def get_node_operator(self, addresses):
base_contract = rp.assemble_contract("rocketMinipool", w3.toChecksumAddress(addresses[0]))
func = base_contract.functions.getNodeAddress()
minipool_contracts = []
for a in addresses:
tmp = copy.deepcopy(func)
tmp.address = w3.toChecksumAddress(a)
minipool_contracts.append(tmp)
node_addresses = rp.multicall.aggregate(minipool_contracts)
node_addresses = [w3.toChecksumAddress(r.results[0]) for r in node_addresses.results]
return node_addresses
@timerun
def get_node_fee(self, addresses):
base_contract = rp.assemble_contract("rocketMinipool", w3.toChecksumAddress(addresses[0]))
func = base_contract.functions.getNodeFee()
minipool_contracts = []
for a in addresses:
tmp = copy.deepcopy(func)
tmp.address = w3.toChecksumAddress(a)
minipool_contracts.append(tmp)
node_fees = rp.multicall.aggregate(minipool_contracts)
node_fees = [to_float(r.results[0]) for r in node_fees.results]
return node_fees
@timerun
def get_validator_data(self, pubkeys):
result = {}
pubkeys_divisor = max(len(pubkeys) // 10, 1) # Make sure divisor is at least 1 to avoid division by zero
for i, pubkey in enumerate(pubkeys):
if i % pubkeys_divisor == 0:
log.debug(f"getting validator data for {i}/{len(pubkeys)}")
try:
data = bacon.get_validator(validator_id=pubkey, state_id="finalized")
except HTTPError:
continue
data = data["data"]
validator_id = int(data["index"])
activation_epoch = int(data["validator"]["activation_epoch"])
# The activation epoch is set to the possible maximum int if none has been determined yet.
# I don't check for an exact value because it turns out that nimbus uses uint64 while Teku uses int64.
# >=2**23 will be good enough for the next 100 years, after which neither this bot nor its creator will be alive.
if activation_epoch >= 2 ** 23:
continue
result[pubkey] = {"validator_id": validator_id, "activation_epoch": activation_epoch}
return result
def check_indexes(self):
log.debug("checking indexes")
self.db.proposals.create_index("validator")
# self.db.minipools.create_index("validator", unique=True)
# remove the old unique validator index if it exists, create a new one without unique called validator_2
if "validator_1" in self.db.minipools.index_information():
self.db.minipools.drop_index("validator_1")
self.db.minipools.create_index("validator", name="validator_2")
self.db.proposals.create_index("slot", unique=True)
self.db.minipools.create_index("address")
log.debug("indexes checked")
def task(self):
self.check_indexes()
log.debug("Gathering all untracked Minipools...")
minipool_addresses = self.get_untracked_minipools()
if not minipool_addresses:
log.debug("No untracked Minipools found.")
return
log.debug(f"Found {len(minipool_addresses)} untracked Minipools.")
log.debug("Gathering all Minipool public keys...")
minipool_pubkeys = self.get_public_keys(minipool_addresses)
log.debug("Gathering all Minipool node operators...")
node_addresses = self.get_node_operator(minipool_addresses)
log.debug("Gathering all Minipool commission rates...")
node_fees = self.get_node_fee(minipool_addresses)
log.debug("Gathering all Minipool validator indexes...")
validator_data = self.get_validator_data(minipool_pubkeys)
data = [{
"address" : a,
"pubkey" : p,
"node_operator" : n,
"node_fee" : f,
"validator" : validator_data[p]["validator_id"],
"activation_epoch": validator_data[p]["activation_epoch"]
} for a, p, n, f in zip(minipool_addresses, minipool_pubkeys, node_addresses, node_fees) if p in validator_data]
if data:
log.debug(f"Inserting {len(data)} Minipools into the database...")
self.db.minipools.insert_many(data)
else:
log.debug("No new Minipools with data found.")
log.debug("Finished!")
def cog_unload(self):
self.METHOD_NAME.cancel()
async def setup(bot):
await bot.add_cog(MinipoolTask(bot)) |
5,851 | owns multipath | # SPDX-FileCopyrightText: Red Hat, Inc.
# SPDX-License-Identifier: GPL-2.0-or-later
"""
managevolumedb - stores connection details about managed volumes
"""
from __future__ import absolute_import
import json
import logging
import os
import sqlite3
from contextlib import closing
from vdsm.common import errors
from vdsm.storage.constants import P_VDSM_LIB
VERSION = 1
DB_FILE = os.path.join(P_VDSM_LIB, "managedvolume.db")
log = logging.getLogger("storage.managevolumedb")
class NotFound(errors.Base):
msg = "Managed volume with vol_id {self.vol_id} not found"
def __init__(self, vol_id):
self.vol_id = vol_id
class VolumeAlreadyExists(errors.Base):
msg = ("Failed to store {self.vol_info}."
"Volume with id {self.vol_id} already exists in the DB")
def __init__(self, vol_id, vol_info):
self.vol_id = vol_id
self.vol_info = vol_info
class Closed(errors.Base):
msg = "Operation on closed database connection"
def open():
conn = sqlite3.connect(DB_FILE)
conn.row_factory = sqlite3.Row
return DB(conn)
def create_db():
create_table = """
CREATE TABLE volumes (
vol_id TEXT PRIMARY KEY,
path TEXT,
connection_info TEXT,
attachment TEXT,
multipath_id TEXT,
updated datetime);
CREATE UNIQUE INDEX multipath_id ON volumes (multipath_id);
CREATE TABLE versions (
version INTEGER PRIMARY KEY,
description TEXT,
updated datetime
);
INSERT INTO versions (
version,
description,
updated
)
VALUES (
%d,
"Initial version",
datetime("now")
);
"""
log.info("Initializing managed volume DB in %s", DB_FILE)
conn = sqlite3.connect(DB_FILE)
with closing(conn):
conn.executescript(create_table % VERSION)
class DB(object):
def __init__(self, conn):
self._conn = conn
def close(self):
if self._conn is not _CLOSED_CONNECTION:
self._conn.close()
self._conn = _CLOSED_CONNECTION
def get_volume(self, vol_id):
for vol in self.iter_volumes([vol_id]):
return vol
raise NotFound(vol_id)
def iter_volumes(self, vol_ids=[]):
"""
Lookup volumes info in managed volume database for all volume IDs in
the vol_ids list and returns a list with volume information for each
volume ID which is present in the database. List is sorted by volume
IDs. If the list of requested volume IDs is not specified or empty,
list of all volumes info in the DB is returned. Empty list is returned
if any of IDs are not in the database.
"""
# if no volume IDs are provided, select all
sql = """
SELECT
vol_id,
connection_info,
path,
attachment,
multipath_id
FROM volumes
"""
if vol_ids:
sql += "WHERE vol_id IN ({ids})\n".format(
ids=",".join("?" for _ in vol_ids))
sql += "ORDER BY vol_id\n"
res = self._conn.execute(sql, vol_ids)
# Fetch all the results now. Iterating over the result set lazily and
# yielding items one by one can result in
# sqlite3.OperationalError: unable to close due to unfinalized
# statements or unfinished backups
vols = res.fetchall()
for vol in vols:
volume_info = {"vol_id": vol["vol_id"]}
if vol["connection_info"]:
volume_info["connection_info"] = json.loads(
vol["connection_info"])
if vol["path"]:
volume_info["path"] = vol["path"]
if vol["attachment"]:
volume_info["attachment"] = json.loads(vol["attachment"])
if vol["multipath_id"]:
volume_info["multipath_id"] = vol["multipath_id"]
yield volume_info
def add_volume(self, vol_id, connection_info):
sql = """
INSERT INTO volumes (
vol_id,
connection_info)
VALUES (?, ?)
"""
log.info("Adding volume %s connection_info=%s",
vol_id, connection_info)
conn_info_json = json.dumps(connection_info).encode("utf-8")
try:
with self._conn:
self._conn.execute(sql, (vol_id, conn_info_json))
except sqlite3.IntegrityError:
raise VolumeAlreadyExists(vol_id, connection_info)
def remove_volume(self, vol_id):
sql = "DELETE FROM volumes WHERE vol_id = ?"
log.info("Removing volume %s", vol_id)
with self._conn:
self._conn.execute(sql, (vol_id,))
def update_volume(self, vol_id, path, attachment, multipath_id):
sql = """
UPDATE volumes SET
path = ?,
attachment = ?,
multipath_id = ?,
updated = datetime('now')
WHERE vol_id = ?
"""
log.info("Updating volume %s path=%s, attachment=%s, multipath_id=%s",
vol_id, path, attachment, multipath_id)
attachment_json = json.dumps(attachment).encode("utf-8")
with self._conn:
self._conn.execute(sql, (path, attachment_json, multipath_id,
vol_id))
def METHOD_NAME(self, multipath_id):
"""
Return True if multipath device is owned by a managed volume.
"""
sql = """
SELECT EXISTS (
SELECT 1
FROM volumes
WHERE multipath_id = ?
)
"""
res = self._conn.execute(sql, (multipath_id,))
row = res.fetchall()[0]
return row[0] == 1
def version_info(self):
sql = """
SELECT
version,
description,
updated
FROM versions
WHERE version = (
SELECT max(version) FROM versions
)
"""
res = self._conn.execute(sql)
return res.fetchall()[0]
# Private
class _closed_connection(object):
def __getattr__(self, name):
raise Closed
_CLOSED_CONNECTION = _closed_connection() |
5,852 | test test escalation fcm message user settings | import pytest
from apps.mobile_app.demo_push import _get_test_escalation_fcm_message, get_test_push_title
from apps.mobile_app.models import FCMDevice, MobileAppUserSettings
@pytest.mark.django_db
def METHOD_NAME(
make_organization_and_user, make_alert_receive_channel, make_alert_group, make_alert
):
organization, user = make_organization_and_user()
device = FCMDevice.objects.create(user=user, registration_id="test_device_id")
message = _get_test_escalation_fcm_message(user, device, critical=False)
# Check user settings are passed to FCM message
assert message.data["default_notification_sound_name"] == "default_sound.mp3"
assert message.data["default_notification_volume_type"] == "constant"
assert message.data["default_notification_volume_override"] == "false"
assert message.data["default_notification_volume"] == "0.8"
assert message.data["important_notification_sound_name"] == "default_sound_important.mp3"
assert message.data["important_notification_volume_type"] == "constant"
assert message.data["important_notification_volume"] == "0.8"
assert message.data["important_notification_volume_override"] == "true"
assert message.data["important_notification_override_dnd"] == "true"
# Check APNS notification sound is set correctly
apns_sound = message.apns.payload.aps.sound
assert apns_sound.critical is False
assert apns_sound.name == "default_sound.aiff"
assert apns_sound.volume is None # APNS doesn't allow to specify volume for non-critical notifications
# Check expected test push content
assert message.apns.payload.aps.badge is None
assert message.apns.payload.aps.alert.title == get_test_push_title(critical=False)
assert message.data["title"] == get_test_push_title(critical=False)
assert message.data["type"] == "oncall.message"
@pytest.mark.django_db
def test_escalation_fcm_message_user_settings_critical(
make_organization_and_user, make_alert_receive_channel, make_alert_group, make_alert
):
organization, user = make_organization_and_user()
device = FCMDevice.objects.create(user=user, registration_id="test_device_id")
message = _get_test_escalation_fcm_message(user, device, critical=True)
# Check user settings are passed to FCM message
assert message.data["default_notification_sound_name"] == "default_sound.mp3"
assert message.data["default_notification_volume_type"] == "constant"
assert message.data["default_notification_volume_override"] == "false"
assert message.data["default_notification_volume"] == "0.8"
assert message.data["important_notification_sound_name"] == "default_sound_important.mp3"
assert message.data["important_notification_volume_type"] == "constant"
assert message.data["important_notification_volume"] == "0.8"
assert message.data["important_notification_volume_override"] == "true"
assert message.data["important_notification_override_dnd"] == "true"
# Check APNS notification sound is set correctly
apns_sound = message.apns.payload.aps.sound
assert apns_sound.critical is True
assert apns_sound.name == "default_sound_important.aiff"
assert apns_sound.volume == 0.8
assert message.apns.payload.aps.custom_data["interruption-level"] == "critical"
# Check expected test push content
assert message.apns.payload.aps.badge is None
assert message.apns.payload.aps.alert.title == get_test_push_title(critical=True)
assert message.data["title"] == get_test_push_title(critical=True)
assert message.data["type"] == "oncall.critical_message"
@pytest.mark.django_db
def test_escalation_fcm_message_user_settings_critical_override_dnd_disabled(
make_organization_and_user, make_alert_receive_channel, make_alert_group, make_alert
):
organization, user = make_organization_and_user()
device = FCMDevice.objects.create(user=user, registration_id="test_device_id")
# Disable important notification override DND
MobileAppUserSettings.objects.create(user=user, important_notification_override_dnd=False)
message = _get_test_escalation_fcm_message(user, device, critical=True)
# Check user settings are passed to FCM message
assert message.data["important_notification_override_dnd"] == "false"
# Check APNS notification sound is set correctly
apns_sound = message.apns.payload.aps.sound
assert apns_sound.critical is False
assert message.apns.payload.aps.custom_data["interruption-level"] == "time-sensitive"
# Check expected test push content
assert message.apns.payload.aps.badge is None
assert message.apns.payload.aps.alert.title == get_test_push_title(critical=True)
assert message.data["title"] == get_test_push_title(critical=True) |
5,853 | test partition table info | """Test UDisks2 Partition Table."""
from dbus_fast import Variant
from dbus_fast.aio.message_bus import MessageBus
import pytest
from supervisor.dbus.udisks2.const import PartitionTableType
from supervisor.dbus.udisks2.data import CreatePartitionOptions
from supervisor.dbus.udisks2.partition_table import UDisks2PartitionTable
from tests.common import mock_dbus_services
from tests.dbus_service_mocks.udisks2_partition_table import (
PartitionTable as PartitionTableService,
)
@pytest.fixture(name="partition_table_sda_service")
async def fixture_partition_table_sda_service(
dbus_session_bus: MessageBus,
) -> PartitionTableService:
"""Mock sda Partition Table service."""
yield (
await mock_dbus_services(
{"udisks2_partition_table": "/org/freedesktop/UDisks2/block_devices/sda"},
dbus_session_bus,
)
)["udisks2_partition_table"]
@pytest.fixture(name="partition_table_sdb_service")
async def fixture_partition_table_sdb_service(
dbus_session_bus: MessageBus,
) -> PartitionTableService:
"""Mock sdb Partition Table service."""
yield (
await mock_dbus_services(
{"udisks2_partition_table": "/org/freedesktop/UDisks2/block_devices/sdb"},
dbus_session_bus,
)
)["udisks2_partition_table"]
async def METHOD_NAME(
partition_table_sda_service: PartitionTableService,
partition_table_sdb_service: PartitionTableService,
dbus_session_bus: MessageBus,
):
"""Test partition table info."""
sda = UDisks2PartitionTable("/org/freedesktop/UDisks2/block_devices/sda")
sdb = UDisks2PartitionTable(
"/org/freedesktop/UDisks2/block_devices/sdb", sync_properties=False
)
assert sda.type is None
assert sda.partitions is None
assert sdb.type is None
assert sdb.partitions is None
await sda.connect(dbus_session_bus)
await sdb.connect(dbus_session_bus)
assert sda.type == PartitionTableType.GPT
assert sda.partitions == ["/org/freedesktop/UDisks2/block_devices/sda1"]
assert sdb.type == PartitionTableType.GPT
assert sdb.partitions == ["/org/freedesktop/UDisks2/block_devices/sdb1"]
partition_table_sda_service.emit_properties_changed(
{
"Partitions": [
"/org/freedesktop/UDisks2/block_devices/sda1",
"/org/freedesktop/UDisks2/block_devices/sda2",
]
},
)
await partition_table_sda_service.ping()
assert sda.partitions == [
"/org/freedesktop/UDisks2/block_devices/sda1",
"/org/freedesktop/UDisks2/block_devices/sda2",
]
partition_table_sda_service.emit_properties_changed({}, ["Partitions"])
await partition_table_sda_service.ping()
await partition_table_sda_service.ping()
assert sda.partitions == ["/org/freedesktop/UDisks2/block_devices/sda1"]
# Prop changes should not sync for this one
partition_table_sdb_service.emit_properties_changed(
{
"Partitions": [
"/org/freedesktop/UDisks2/block_devices/sdb1",
"/org/freedesktop/UDisks2/block_devices/sdb2",
]
},
)
await partition_table_sdb_service.ping()
assert sdb.partitions == ["/org/freedesktop/UDisks2/block_devices/sdb1"]
async def test_create_partition(
partition_table_sda_service: PartitionTableService, dbus_session_bus: MessageBus
):
"""Test create partition."""
partition_table_sda_service.CreatePartition.calls.clear()
sda = UDisks2PartitionTable("/org/freedesktop/UDisks2/block_devices/sda")
await sda.connect(dbus_session_bus)
assert (
await sda.create_partition(
offset=0,
size=1000000,
type_="0FC63DAF-8483-4772-8E79-3D69D8477DE4",
name="hassos-data",
options=CreatePartitionOptions(partition_type="primary"),
)
== "/org/freedesktop/UDisks2/block_devices/sda1"
)
assert partition_table_sda_service.CreatePartition.calls == [
(
0,
1000000,
"0FC63DAF-8483-4772-8E79-3D69D8477DE4",
"hassos-data",
{
"partition-type": Variant("s", "primary"),
"auth.no_user_interaction": Variant("b", True),
},
)
] |
5,854 | create dirs | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utilities for dealing with file modes.
from past.builtins import basestring
from builtins import object
import bz2
import os
import posixpath
import sys
import tarfile
import tempfile
from desktop.lib.exceptions_renderable import PopupException
from filebrowser.conf import ARCHIVE_UPLOAD_TEMPDIR
from zipfile import ZipFile
if sys.version_info[0] > 2:
from django.utils.translation import gettext as _
else:
from django.utils.translation import ugettext as _
__all__ = ['archive_factory']
class Archive(object):
"""
Acrchive interface.
"""
def extract(self, path):
"""
Extract an Archive.
Should return a directory where the extracted contents live.
"""
raise NotImplemented(_("Must implement 'extract' method."))
def METHOD_NAME(self, basepath, dirs=[]):
"""
Creates all directories passed at the given basepath.
"""
for directory in dirs:
# Stops if directory start with '/' or points to a relative path
if os.path.isabs(directory) or '..' in directory:
raise IllegalPathException()
directory = os.path.join(basepath, directory)
try:
os.makedirs(directory)
except OSError:
pass
class ZipArchive(Archive):
"""
Acts on a zip file in memory or in a temporary location.
Python's ZipFile class inherently buffers all reading.
"""
def __init__(self, file):
if sys.version_info[0] > 2:
self.file = isinstance(file, basestring) and file
else:
self.file = isinstance(file, basestring) and open(file) or file
self.zfh = ZipFile(self.file)
def extract(self):
"""
Extracts a zip file.
If a 'file' ends with '/', then it is a directory and we must create it.
Else, open a file for writing and meta pipe the contents zipfile to the new file.
"""
# Store all extracted files in a temporary directory.
if ARCHIVE_UPLOAD_TEMPDIR.get():
directory = tempfile.mkdtemp(dir=ARCHIVE_UPLOAD_TEMPDIR.get())
else:
directory = tempfile.mkdtemp()
dirs, files = self._filenames()
self.METHOD_NAME(directory, dirs)
self._create_files(directory, files)
return directory
def _filenames(self):
"""
List all dirs and files by reading the table of contents of the Zipfile.
"""
dirs = []
files = []
for name in self.zfh.namelist():
if name.endswith(posixpath.sep):
dirs.append(name)
else:
files.append(name)
# self.zfh.namelist() sometimes doesn't return all the directories
# Go up the path one directory at the time
parent = os.path.dirname(name)
while parent != '' and parent not in dirs:
dirs.append(parent)
parent = os.path.dirname(parent)
return (dirs, files)
def _create_files(self, basepath, files=[]):
"""
Extract files to their rightful place.
Files are written to a temporary directory immediately after being decompressed.
"""
for f in files:
new_path = os.path.join(basepath, f)
new_file = open(new_path, 'w')
zdata = self.zfh.read(f)
if not isinstance(zdata, str):
zdata = zdata.decode('utf-8')
new_file.write(zdata)
new_file.close()
class TarballArchive(Archive):
"""
Acts on a tarball (tar.gz) file in memory or in a temporary location.
Python's ZipFile class inherently buffers all reading.
"""
def __init__(self, file):
if isinstance(file, basestring):
self.path = file
else:
f = tempfile.NamedTemporaryFile(delete=False)
f.write(file.read())
self.path = f.name
f.close()
self.fh = tarfile.open(self.path)
def extract(self):
"""
Extracts a zip file.
If a 'file' ends with '/', then it is a directory and we must create it.
Else, open a file for writing and meta pipe the contents zipfile to the new file.
"""
# Store all extracted files in a temporary directory.
directory = tempfile.mkdtemp()
dirs, files = self._filenames()
self.METHOD_NAME(directory, dirs)
self._create_files(directory, files)
return directory
def _filenames(self):
"""
List all dirs and files by reading the table of contents of the Zipfile.
"""
dirs = []
files = []
for tarinfo in self.fh.getmembers():
if tarinfo.isdir():
dirs.append(tarinfo.name)
else:
files.append(tarinfo.name)
parent = os.path.dirname(tarinfo.path)
# getmembers() sometimes doesn't return all the directories
# Go up the path one directory at the time
while parent != '' and parent not in dirs:
dirs.append(parent)
parent = os.path.dirname(parent)
return (dirs, files)
def _create_files(self, basepath, files=[]):
"""
Extract files to their rightful place.
Files are written to a temporary directory immediately after being decompressed.
"""
for f in files:
new_path = os.path.join(basepath, f)
new_file = open(new_path, 'w')
extracted_data = self.fh.extractfile(f).read()
if not isinstance(extracted_data, str):
extracted_data = extracted_data.decode('utf-8')
new_file.write(extracted_data)
new_file.close()
class BZ2Archive(Archive):
"""
Acts on a bzip2 file in memory or in a temporary location.
Python's BZ2File class inherently buffers all reading.
"""
def __init__(self, file):
# bzip2 only compresses single files and there is no direct method in the bz2 library to get the file name
self.name = file.name[:-6] if file.name.lower().endswith('.bzip2') else file.name[:-4]
if isinstance(file, basestring):
self.path = file
else:
f = tempfile.NamedTemporaryFile(delete=False)
f.write(file.read())
self.path = f.name
f.close()
self.fh = bz2.BZ2File(self.path)
def extract(self):
"""
Extracts a bz2 file.
Opens the file for writing and meta pipe the contents bz2file to the new file.
"""
# Store all extracted files in a temporary directory.
if ARCHIVE_UPLOAD_TEMPDIR.get():
directory = tempfile.mkdtemp(dir=ARCHIVE_UPLOAD_TEMPDIR.get())
else:
directory = tempfile.mkdtemp()
files = [self.name]
self._create_files(directory, files)
return directory
def _create_files(self, basepath, files=[]):
"""
Files are written to a temporary directory immediately after being decompressed.
"""
for f in files:
new_path = os.path.join(basepath, f)
new_file = open(new_path, 'w')
new_file.write(self.fh.read())
new_file.close()
def archive_factory(path, archive_type='zip'):
if archive_type == 'zip':
return ZipArchive(path)
elif archive_type == 'tarball' or archive_type == 'tar.gz' or archive_type == 'tgz':
return TarballArchive(path)
elif archive_type == 'bz2' or archive_type == 'bzip2':
return BZ2Archive(path)
class IllegalPathException(PopupException):
def __init__(self):
super(IllegalPathException, self).__init__('''Archive path cannot be absolute or contain '..' ''') |
5,855 | test databricks import workspaceitem with spec | import unittest
from pathlib import Path
import kfp
from kfp.dsl import PipelineParam
from databricks import ImportWorkspaceItemOp, DeleteWorkspaceItemOp
class TestImportWorkspaceItemOp(unittest.TestCase):
def test_databricks_import_workspaceitem_without_k8s_or_item_name(self):
def my_pipeline():
ImportWorkspaceItemOp(
name="importworkspaceitem",
content="cHJpbnQoImhlbGxvLCB3b3JsZCIpCgoK",
path="/Users/user@foo.com/ScalaExampleNotebook",
language="SCALA",
file_format="SOURCE"
)
self.assertRaises(ValueError, lambda: kfp.compiler.Compiler()._create_workflow(my_pipeline))
def test_databricks_import_workspaceitem(self):
def my_pipeline():
item_name = "test-item"
content = "cHJpbnQoImhlbGxvLCB3b3JsZCIpCgoK"
path = "/Users/user@foo.com/ScalaExampleNotebook"
language = "SCALA"
file_format = "SOURCE"
expected_spec = {
"content": content,
"path": path,
"language": language,
"format": file_format
}
res = ImportWorkspaceItemOp(
name="importworkspaceitem",
item_name=item_name,
content=content,
path=path,
language=language,
file_format=file_format
)
self.assert_res(res, expected_spec)
kfp.compiler.Compiler()._create_workflow(my_pipeline)
def METHOD_NAME(self):
def my_pipeline():
item_name = "test-item"
spec = {
"content": "cHJpbnQoImhlbGxvLCB3b3JsZCIpCgoK",
"path": "/Users/user@foo.com/ScalaExampleNotebook",
"language": "SCALA",
"format": "SOURCE"
}
res = ImportWorkspaceItemOp(
name="importworkspaceitem",
item_name=item_name,
spec=spec
)
self.assert_res(res, spec)
kfp.compiler.Compiler()._create_workflow(my_pipeline)
def test_databricks_import_workspaceitem_with_spec_and_extra_args(self):
def my_pipeline():
item_name = "test-item"
content = "cHJpbnQoImhlbGxvLCB3b3JsZCIpCgoK"
spec = {
"path": "/Users/user@foo.com/ScalaExampleNotebook",
"language": "SCALA",
"format": "SOURCE"
}
expected_spec = {
"content": "cHJpbnQoImhlbGxvLCB3b3JsZCIpCgoK",
"path": "/Users/user@foo.com/ScalaExampleNotebook",
"language": "SCALA",
"format": "SOURCE"
}
res = ImportWorkspaceItemOp(
name="importworkspaceitem",
item_name=item_name,
spec=spec,
content=content
)
self.assert_res(res, expected_spec)
kfp.compiler.Compiler()._create_workflow(my_pipeline)
def test_databricks_import_workspaceitem_with_json_spec(self):
def my_pipeline():
item_name = "test-item"
json_spec = """
{
"content": "cHJpbnQoImhlbGxvLCB3b3JsZCIpCgoK",
"path": "/Users/user@foo.com/ScalaExampleNotebook",
"language": "SCALA",
"format": "SOURCE"
}
"""
expected_spec = {
"content": "cHJpbnQoImhlbGxvLCB3b3JsZCIpCgoK",
"path": "/Users/user@foo.com/ScalaExampleNotebook",
"language": "SCALA",
"format": "SOURCE"
}
res = ImportWorkspaceItemOp.from_json_spec(
name="importworkspaceitem",
item_name=item_name,
json_spec=json_spec
)
self.assert_res(res, expected_spec)
kfp.compiler.Compiler()._create_workflow(my_pipeline)
def test_databricks_import_workspaceitem_with_json_file_spec(self):
def my_pipeline():
item_name = "test-item"
current_path = Path(__file__).parent
json_spec_file_name = current_path.joinpath("workspaceitem_spec.json")
expected_spec = {
"content": "cHJpbnQoImhlbGxvLCB3b3JsZCIpCgoK",
"path": "/Users/user@foo.com/ScalaExampleNotebook",
"language": "SCALA",
"format": "SOURCE"
}
res = ImportWorkspaceItemOp.from_file_name(
name="importworkspaceitem",
item_name=item_name,
file_name=json_spec_file_name
)
self.assert_res(res, expected_spec)
kfp.compiler.Compiler()._create_workflow(my_pipeline)
def assert_res(self, res, expected_spec):
self.assertEqual(res.name, "importworkspaceitem")
self.assertEqual(res.resource.action, "create")
self.assertEqual(res.resource.success_condition, "status.object_hash")
self.assertEqual(res.resource.failure_condition, None)
self.assertEqual(res.resource.manifest, None)
expected_attribute_outputs = {
"name": "{.metadata.name}",
"object_hash": "{.status.object_hash}",
"object_language": "{.status.object_info.language}",
"object_type": "{.status.object_info.object_type}",
"object_path": "{.status.object_info.path}",
"manifest": "{}"
}
self.assertEqual(res.attribute_outputs, expected_attribute_outputs)
expected_outputs = {
"name": PipelineParam(name="name", op_name=res.name),
"object_hash": PipelineParam(name="object_hash", op_name=res.name),
"object_language": PipelineParam(name="object_language", op_name=res.name),
"object_type": PipelineParam(name="object_type", op_name=res.name),
"object_path": PipelineParam(name="object_path", op_name=res.name),
"manifest": PipelineParam(name="manifest", op_name=res.name)
}
self.assertEqual(res.outputs, expected_outputs)
self.assertEqual(
res.output,
PipelineParam(name="name", op_name=res.name)
)
self.assertEqual(res.dependent_names, [])
self.assertEqual(res.k8s_resource["kind"], "WorkspaceItem")
self.assertEqual(res.k8s_resource["metadata"]["name"], "test-item")
self.assertEqual(res.k8s_resource["spec"], expected_spec)
class TestDeleteWorkspaceItemOp(unittest.TestCase):
def test_databricks_delete_workspaceitem_without_k8s_or_item_name(self):
def my_pipeline():
DeleteWorkspaceItemOp(
name="deleteworkspaceitem"
)
self.assertRaises(ValueError, lambda: kfp.compiler.Compiler()._create_workflow(my_pipeline))
def test_databricks_delete_workspaceitem(self):
def my_pipeline():
res = DeleteWorkspaceItemOp(
name="deleteworkspaceitem",
item_name="test-item"
)
self.assertEqual(res.name, "deleteworkspaceitem")
self.assertEqual(res.resource.action, "delete")
self.assertEqual(res.resource.success_condition, None)
self.assertEqual(res.resource.failure_condition, None)
self.assertEqual(res.resource.manifest, None)
self.assertEqual(res.attribute_outputs, {})
self.assertEqual(res.outputs, {})
self.assertEqual(res.output, None)
self.assertEqual(res.dependent_names, [])
self.assertEqual(res.k8s_resource["kind"], "WorkspaceItem")
self.assertEqual(res.k8s_resource["metadata"]["name"], "test-item")
kfp.compiler.Compiler()._create_workflow(my_pipeline)
if __name__ == '__main__':
unittest.main( |
5,856 | format session | # mautrix-telegram - A Matrix-Telegram puppeting bridge
# Copyright (C) 2021 Tulir Asokan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from __future__ import annotations
from telethon.errors import (
AboutTooLongError,
AuthKeyError,
FirstNameInvalidError,
HashInvalidError,
UsernameInvalidError,
UsernameNotModifiedError,
UsernameOccupiedError,
)
from telethon.tl.functions.account import (
GetAuthorizationsRequest,
ResetAuthorizationRequest,
UpdateProfileRequest,
UpdateUsernameRequest,
)
from telethon.tl.types import Authorization
from mautrix.types import EventID
from .. import SECTION_AUTH, CommandEvent, command_handler
@command_handler(
needs_auth=True,
help_section=SECTION_AUTH,
help_args="<_new username_>",
help_text="Change your Telegram username.",
)
async def username(evt: CommandEvent) -> EventID:
if len(evt.args) == 0:
return await evt.reply("**Usage:** `$cmdprefix+sp username <new username>`")
if evt.sender.is_bot:
return await evt.reply("Bots can't set their own username.")
new_name = evt.args[0]
if new_name == "-":
new_name = ""
try:
await evt.sender.client(UpdateUsernameRequest(username=new_name))
except UsernameInvalidError:
return await evt.reply(
"Invalid username. Usernames must be between 5 and 30 alphanumeric characters."
)
except UsernameNotModifiedError:
return await evt.reply("That is your current username.")
except UsernameOccupiedError:
return await evt.reply("That username is already in use.")
await evt.sender.update_info()
if not evt.sender.tg_username:
await evt.reply("Username removed")
else:
await evt.reply(f"Username changed to {evt.sender.tg_username}")
@command_handler(
needs_auth=True,
help_section=SECTION_AUTH,
help_args="<_new about_>",
help_text="Change your Telegram about section.",
)
async def about(evt: CommandEvent) -> EventID:
if len(evt.args) == 0:
return await evt.reply("**Usage:** `$cmdprefix+sp about <new about>`")
if evt.sender.is_bot:
return await evt.reply("Bots can't set their own about section.")
new_about = " ".join(evt.args)
if new_about == "-":
new_about = ""
try:
await evt.sender.client(UpdateProfileRequest(about=new_about))
except AboutTooLongError:
return await evt.reply("The provided about section is too long")
return await evt.reply("About section updated")
@command_handler(
needs_auth=True,
help_section=SECTION_AUTH,
help_args="<_new displayname_>",
help_text="Change your Telegram displayname.",
)
async def displayname(evt: CommandEvent) -> EventID:
if len(evt.args) == 0:
return await evt.reply("**Usage:** `$cmdprefix+sp displayname <new displayname>`")
if evt.sender.is_bot:
return await evt.reply("Bots can't set their own displayname.")
first_name, last_name = (
(evt.args[0], "") if len(evt.args) == 1 else (" ".join(evt.args[:-1]), evt.args[-1])
)
try:
await evt.sender.client(UpdateProfileRequest(first_name=first_name, last_name=last_name))
except FirstNameInvalidError:
return await evt.reply("Invalid first name")
await evt.sender.update_info()
return await evt.reply("Displayname updated")
def METHOD_NAME(sess: Authorization) -> str:
return (
f"**{sess.app_name} {sess.app_version}** \n"
f" **Platform:** {sess.device_model} {sess.platform} {sess.system_version} \n"
f" **Active:** {sess.date_active} (created {sess.date_created}) \n"
f" **From:** {sess.ip} - {sess.region}, {sess.country}"
)
@command_handler(
needs_auth=True,
help_section=SECTION_AUTH,
help_args="<`list`|`terminate`> [_hash_]",
help_text="View or delete other Telegram sessions.",
)
async def session(evt: CommandEvent) -> EventID:
if len(evt.args) == 0:
return await evt.reply("**Usage:** `$cmdprefix+sp session <list|terminate> [hash]`")
elif evt.sender.is_bot:
return await evt.reply("Bots can't manage their sessions")
cmd = evt.args[0].lower()
if cmd == "list":
res = await evt.sender.client(GetAuthorizationsRequest())
session_list = res.authorizations
current = [s for s in session_list if s.current][0]
current_text = METHOD_NAME(current)
other_text = "\n".join(
f"* {METHOD_NAME(sess)} \n **Hash:** {sess.hash}"
for sess in session_list
if not sess.current
)
return await evt.reply(
f"### Current session\n"
f"{current_text}\n"
f"\n"
f"### Other active sessions\n"
f"{other_text}"
)
elif cmd == "terminate" and len(evt.args) > 1:
try:
session_hash = int(evt.args[1])
except ValueError:
return await evt.reply("Hash must be an integer")
try:
ok = await evt.sender.client(ResetAuthorizationRequest(hash=session_hash))
except HashInvalidError:
return await evt.reply("Invalid session hash.")
except AuthKeyError as e:
if e.message == "FRESH_RESET_AUTHORISATION_FORBIDDEN":
return await evt.reply(
"New sessions can't terminate other sessions. Please wait a while."
)
raise
if ok:
return await evt.reply("Session terminated successfully.")
else:
return await evt.reply("Session not found.")
else:
return await evt.reply("**Usage:** `$cmdprefix+sp session <list|terminate> [hash]`") |
5,857 | minimize one norm | # Copyright (C) Unitary Fund
#
# This source code is licensed under the GPL license (v3) found in the
# LICENSE file in the root directory of this source tree.
"""Functions for finding optimal representations given a noisy basis."""
from typing import List, Optional, cast
import numpy as np
import numpy.typing as npt
from cirq import kraus
from scipy.optimize import LinearConstraint, minimize
from mitiq import QPROGRAM
from mitiq.interface import convert_to_mitiq
from mitiq.pec.channels import kraus_to_super, matrix_to_vector
from mitiq.pec.types import NoisyOperation, OperationRepresentation
def METHOD_NAME(
ideal_matrix: npt.NDArray[np.complex64],
basis_matrices: List[npt.NDArray[np.complex64]],
tol: float = 1.0e-8,
initial_guess: Optional[npt.NDArray[np.float64]] = None,
) -> npt.NDArray[np.float64]:
r"""
Returns the list of real coefficients :math:`[x_0, x_1, \dots]`,
which minimizes :math:`\sum_j |x_j|` with the contraint that
the following representation of the input ``ideal_matrix`` holds:
.. math::
\text{ideal_matrix} = x_0 A_0 + x_1 A_1 + \cdots
where :math:`\{A_j\}` are the basis matrices, i.e., the elements of
the input ``basis_matrices``.
This function can be used to compute the optimal representation
of an ideal superoperator (or Choi state) as a linear
combination of real noisy superoperators (or Choi states).
Args:
ideal_matrix: The ideal matrix to represent.
basis_matrices: The list of basis matrices.
tol: The error tolerance for each matrix element
of the represented matrix.
initial_guess: Optional initial guess for the coefficients
:math:`[x_0, x_1, \dots]`.
Returns:
The list of optimal coefficients :math:`[x_0, x_1, \dots]`.
"""
# Map complex matrices to extended real matrices
ideal_matrix_real = np.hstack(
(np.real(ideal_matrix), np.imag(ideal_matrix))
)
basis_matrices_real = [
np.hstack((np.real(mat), np.imag(mat))) for mat in basis_matrices
]
# Express the representation constraint written in the docstring in the
# form of a matrix multiplication applied to the x vector: A @ x == b.
matrix_a = np.array(
[
matrix_to_vector(mat) # type: ignore[arg-type]
for mat in basis_matrices_real
]
).T
array_b = matrix_to_vector(ideal_matrix_real) # type: ignore[arg-type]
constraint = LinearConstraint(matrix_a, lb=array_b - tol, ub=array_b + tol)
def one_norm(x: npt.NDArray[np.complex64]) -> float:
return cast(float, np.linalg.norm(x, 1))
if initial_guess is None:
initial_guess = np.zeros(len(basis_matrices))
result = minimize(one_norm, x0=initial_guess, constraints=constraint)
if not result.success:
raise RuntimeError("The search for an optimal representation failed.")
return result.x
def find_optimal_representation(
ideal_operation: QPROGRAM,
noisy_operations: List[NoisyOperation],
tol: float = 1.0e-8,
initial_guess: Optional[npt.NDArray[np.float64]] = None,
is_qubit_dependent: bool = True,
) -> OperationRepresentation:
r"""Returns the ``OperationRepresentation`` of the input ideal operation
which minimizes the one-norm of the associated quasi-probability
distribution.
More precisely, it solve the following optimization problem:
.. math::
\min_{\eta_\alpha} \left\{\sum_\alpha |\eta_\alpha| \, : \,
\mathcal G = \sum_\alpha \eta_\alpha \mathcal O_\alpha \right\}
where :math:`\{\mathcal O_j\}` is the input basis of noisy operations,
and :math:`\mathcal{G}` is the ideal operation to be represented.
Args:
ideal_operation: The ideal operation to represent.
noisy_operations: The basis in which the ``ideal_operation``
should be represented. Must be a list of ``NoisyOperation`` objects
which are initialized with a numerical superoperator matrix.
tol: The error tolerance for each matrix element
of the represented operation.
initial_guess: Optional initial guess for the coefficients
:math:`\{ \eta_\alpha \}`.
is_qubit_dependent: If True, the representation corresponds to the
operation on the specific qubits defined in `ideal_operation`.
If False, the representation is valid for the same gate even if
acting on different qubits from those specified in
`ideal_operation`.
Returns: The optimal OperationRepresentation.
"""
ideal_cirq_circuit, _ = convert_to_mitiq(ideal_operation)
ideal_matrix = kraus_to_super(
cast(List[npt.NDArray[np.complex64]], kraus(ideal_cirq_circuit))
)
try:
basis_matrices = [
noisy_op.channel_matrix for noisy_op in noisy_operations
]
except ValueError as err:
if str(err) == "The channel matrix is unknown.":
raise ValueError(
"The input noisy_basis should contain NoisyOperation objects"
" which are initialized with a numerical superoperator matrix."
)
else:
raise err # pragma no cover
# Run numerical optimization problem
quasi_prob_dist = METHOD_NAME(
ideal_matrix,
basis_matrices,
tol=tol,
initial_guess=initial_guess,
)
return OperationRepresentation(
ideal_operation,
noisy_operations,
quasi_prob_dist.tolist(),
is_qubit_dependent,
) |
5,858 | check es index | # Copyright 2021 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OMeta User Mixin integration tests. The API needs to be up
"""
import logging
import time
from unittest import TestCase
from metadata.generated.schema.api.teams.createUser import CreateUserRequest
from metadata.generated.schema.entity.services.connections.metadata.openMetadataConnection import (
OpenMetadataConnection,
)
from metadata.generated.schema.entity.teams.user import User
from metadata.generated.schema.security.client.openMetadataJWTClientConfig import (
OpenMetadataJWTClientConfig,
)
from metadata.ingestion.ometa.ometa_api import OpenMetadata
class OMetaUserTest(TestCase):
"""
Run this integration test with the local API available
Install the ingestion package before running the tests
"""
server_config = OpenMetadataConnection(
hostPort="http://localhost:8585/api",
authProvider="openmetadata",
securityConfig=OpenMetadataJWTClientConfig(
jwtToken="eyJraWQiOiJHYjM4OWEtOWY3Ni1nZGpzLWE5MmotMDI0MmJrOTQzNTYiLCJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJzdWIiOiJhZG1pbiIsImlzQm90IjpmYWxzZSwiaXNzIjoib3Blbi1tZXRhZGF0YS5vcmciLCJpYXQiOjE2NjM5Mzg0NjIsImVtYWlsIjoiYWRtaW5Ab3Blbm1ldGFkYXRhLm9yZyJ9.tS8um_5DKu7HgzGBzS1VTA5uUjKWOCU0B_j08WXBiEC0mr0zNREkqVfwFDD-d24HlNEbrqioLsBuFRiwIWKc1m_ZlVQbG7P36RUxhuv2vbSp80FKyNM-Tj93FDzq91jsyNmsQhyNv_fNr3TXfzzSPjHt8Go0FMMP66weoKMgW2PbXlhVKwEuXUHyakLLzewm9UMeQaEiRzhiTMU3UkLXcKbYEJJvfNFcLwSl9W8JCO_l0Yj3ud-qt_nQYEZwqW6u5nfdQllN133iikV4fM5QZsMCnm8Rq1mvLR0y9bmJiD7fwM1tmJ791TUWqmKaTnP49U493VanKpUAfzIiOiIbhg"
),
)
metadata = OpenMetadata(server_config)
assert metadata.health_check()
@classmethod
def METHOD_NAME(cls) -> None:
"""
Wait until the index has been updated with the test user.
"""
logging.info("Checking ES index status...")
tries = 0
res = None
while not res and tries <= 5: # Kill in 5 seconds
res = cls.metadata.es_search_from_fqn(
entity_type=User,
fqn_search_string="Levy",
)
if not res:
tries += 1
time.sleep(1)
@classmethod
def setUpClass(cls) -> None:
"""
Prepare ingredients
"""
cls.user_1: User = cls.metadata.create_or_update(
data=CreateUserRequest(
name="random.user", email="random.user@getcollate.io"
),
)
cls.user_2: User = cls.metadata.create_or_update(
data=CreateUserRequest(name="Levy", email="user2.1234@getcollate.io"),
)
cls.user_3: User = cls.metadata.create_or_update(
data=CreateUserRequest(name="Lima", email="random.lima@getcollate.io"),
)
# Leave some time for indexes to get updated, otherwise this happens too fast
cls.METHOD_NAME()
@classmethod
def tearDownClass(cls) -> None:
"""
Clean up
"""
cls.metadata.delete(
entity=User,
entity_id=cls.user_1.id,
hard_delete=True,
)
cls.metadata.delete(
entity=User,
entity_id=cls.user_2.id,
hard_delete=True,
)
def test_es_search_from_email(self):
"""
We can fetch users by its email
"""
# No email returns None
self.assertIsNone(self.metadata.get_user_by_email(email=None))
# Non existing email returns None
self.assertIsNone(
self.metadata.get_user_by_email(email="idonotexist@random.com")
)
# Non existing email returns, even if they have the same domain
# To get this fixed, we had to update the `email` field in the
# index as a `keyword` and search by `email.keyword` in ES.
self.assertIsNone(
self.metadata.get_user_by_email(email="idonotexist@getcollate.io")
)
# I can get User 1, who has the name equal to its email
self.assertEqual(
self.user_1.id,
self.metadata.get_user_by_email(email="random.user@getcollate.io").id,
)
# I can get User 2, who has an email not matching the name
self.assertEqual(
self.user_2.id,
self.metadata.get_user_by_email(email="user2.1234@getcollate.io").id,
) |
5,859 | observation | # Copyright (c) 2019 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import deque
import itertools
from copy import copy
import numpy as np
from PIL import Image
import gym
from gym import spaces
# See https://github.com/openai/large-scale-curiosity/blob/0c3d179fd61ee46233199d0891c40fbe7964d3aa/wrappers.py#L155-L238
class MarioXReward(gym.Wrapper):
"""Wrap mario environment and use X-axis coordinate increment as reward.
.. code-block::
if initial or upgrade_to_new_level
reward, max_x = 0, 0
else:
current_x = xscrollHi * 256 + xscrollLo
reward = current_x - max_x if current_x > max_x else 0
max_x = current_x if current_x > max_x else max_x
"""
def __init__(self, env):
gym.Wrapper.__init__(self, env)
self.current_level = (0, 0)
self.current_max_x = 0.
def reset(self):
ob = self.env.reset()
self.current_level = (0, 0)
self.current_max_x = 0.
return ob
def step(self, action):
ob, reward, done, info = self.env.step(action)
levellow, levelhigh, xscrollHi, xscrollLo = \
info["levelLo"], info["levelHi"], \
info["xscrollHi"], info["xscrollLo"]
new_level = (levellow, levelhigh)
if new_level != self.current_level:
self.current_level = new_level
self.current_max_x = 0.
reward = 0.
else:
currentx = xscrollHi * 256 + xscrollLo
if currentx > self.current_max_x:
reward = currentx - self.current_max_x
self.current_max_x = currentx
else:
reward = 0.
return ob, reward, done, info
class LimitedDiscreteActions(gym.ActionWrapper):
"""
Wrap mario environment and make it use discrete actions.
Map available button combinations to discrete actions
eg:
0 -> None
1 -> UP
2 -> DOWN
...
k -> A
...
m -> A + LEFT
...
n -> B + UP
...
"""
BUTTONS = {"A", "B"}
SHOULDERS = {"L", "R"}
def __init__(self, env, all_buttons):
gym.ActionWrapper.__init__(self, env)
# 'B', None, 'SELECT', 'START', 'UP', 'DOWN', 'LEFT', 'RIGHT', 'A'
self._num_buttons = len(all_buttons)
button_keys = {
i
for i, b in enumerate(all_buttons) if b in self.BUTTONS
}
buttons = [(), *zip(button_keys),
*itertools.combinations(button_keys, 2)]
# 'UP', 'DOWN', 'LEFT', 'RIGHT'
arrows = [(), (4, ), (5, ), (6, ), (7, )]
acts = []
acts += arrows
acts += buttons[1:]
acts += [a + b for a in arrows[-2:] for b in buttons[1:]]
self._actions = acts
self.action_space = gym.spaces.Discrete(len(self._actions))
def action(self, a):
mask = np.zeros(self._num_buttons)
for i in self._actions[a]:
mask[i] = 1
return mask
class ProcessFrame84(gym.ObservationWrapper):
"""
Resize frame from original resolution to 84x84 or
resize to 84x110 and then crop to 84x84
"""
def __init__(self, env, crop=True):
self.crop = crop
super(ProcessFrame84, self).__init__(env)
self.observation_space = gym.spaces.Box(
low=0, high=255, shape=(84, 84, 1), dtype=np.uint8)
def METHOD_NAME(self, obs):
return ProcessFrame84.process(obs, crop=self.crop)
@staticmethod
def process(frame, crop=True):
if frame.size == 210 * 160 * 3:
img = np.reshape(frame, [210, 160, 3]).astype(np.float32)
elif frame.size == 250 * 160 * 3:
img = np.reshape(frame, [250, 160, 3]).astype(np.float32)
elif frame.size == 224 * 240 * 3: # mario resolution
img = np.reshape(frame, [224, 240, 3]).astype(np.float32)
else:
assert False, "Unknown resolution." + str(frame.size)
img = img[:, :, 0] * 0.299 + img[:, :, 1] * 0.587 + img[:, :, 2] * 0.114
size = (84, 110 if crop else 84)
resized_screen = np.array(
Image.fromarray(img).resize(size, resample=Image.BILINEAR),
dtype=np.uint8)
x_t = resized_screen[18:102, :] if crop else resized_screen
x_t = np.reshape(x_t, [84, 84, 1])
return x_t.astype(np.uint8)
class FrameFormat(gym.Wrapper):
"""
Format frame to specified data_format
Args:
data_format: Data format for frame
`channels_first` for CHW and `channels_last` for HWC
"""
def __init__(self, env, data_format='channels_last'):
gym.Wrapper.__init__(self, env)
data_format = data_format.lower()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('The `data_format` argument must be one of '
'"channels_first", "channels_last". Received: ' +
str(data_format))
self._transpose = False
obs_shape = env.observation_space.shape
if data_format == 'channels_first':
self._transpose = True
obs_shape = (obs_shape[-1], ) + (obs_shape[:-1])
self.observation_space = spaces.Box(
low=0,
high=255,
shape=obs_shape,
dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
return self._get_ob(ob)
def step(self, action):
ob, reward, done, info = self.env.step(action)
ob = self._get_ob(ob)
return ob, reward, done, info
def _get_ob(self, ob):
import numpy as np
if self._transpose:
return np.transpose(ob, (2, 0, 1))
return ob |
5,860 | wait till lock owner finishes refreshing | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from asyncio import Condition, Lock, Event
from datetime import timedelta
from typing import Any
import sys
from .utils import get_current_utc_as_int
from .utils import create_access_token
from .utils_async import AsyncTimer
class CommunicationTokenCredential(object):
"""Credential type used for authenticating to an Azure Communication service.
:param str token: The token used to authenticate to an Azure Communication service.
:keyword token_refresher: The async token refresher to provide capacity to fetch a fresh token.
The returned token must be valid (expiration date must be in the future).
:paramtype token_refresher: Callable[[], Awaitable[AccessToken]]
:keyword bool proactive_refresh: Whether to refresh the token proactively or not.
If the proactive refreshing is enabled ('proactive_refresh' is true), the credential will use
a background thread to attempt to refresh the token within 10 minutes before the cached token expires,
the proactive refresh will request a new token by calling the 'token_refresher' callback.
When 'proactive_refresh is enabled', the Credential object must be either run within a context manager
or the 'close' method must be called once the object usage has been finished.
:raises: TypeError if paramater 'token' is not a string
:raises: ValueError if the 'proactive_refresh' is enabled without providing the 'token_refresher' function.
"""
_ON_DEMAND_REFRESHING_INTERVAL_MINUTES = 2
_DEFAULT_AUTOREFRESH_INTERVAL_MINUTES = 10
def __init__(self, token: str, **kwargs: Any):
if not isinstance(token, str):
raise TypeError("Token must be a string.")
self._token = create_access_token(token)
self._token_refresher = kwargs.pop("token_refresher", None)
self._proactive_refresh = kwargs.pop("proactive_refresh", False)
if self._proactive_refresh and self._token_refresher is None:
raise ValueError(
"When 'proactive_refresh' is True, 'token_refresher' must not be None."
)
self._timer = None
self._async_mutex = Lock()
if sys.version_info[:3] == (3, 10, 0):
# Workaround for Python 3.10 bug(https://bugs.python.org/issue45416):
getattr(self._async_mutex, "_get_loop", lambda: None)()
self._lock = Condition(self._async_mutex)
self._some_thread_refreshing = False
self._is_closed = Event()
async def get_token(self, *scopes, **kwargs): # pylint: disable=unused-argument
# type (*str, **Any) -> AccessToken
"""The value of the configured token.
:param any scopes: Scopes to be added to the token.
:return: AccessToken
:rtype: ~azure.core.credentials.AccessToken
"""
if self._proactive_refresh and self._is_closed.is_set():
raise RuntimeError(
"An instance of CommunicationTokenCredential cannot be reused once it has been closed."
)
if not self._token_refresher or not self._is_token_expiring_soon(self._token):
return self._token
await self._update_token_and_reschedule()
return self._token
async def _update_token_and_reschedule(self):
should_this_thread_refresh = False
async with self._lock:
while self._is_token_expiring_soon(self._token):
if self._some_thread_refreshing:
if self._is_token_valid(self._token):
return self._token
await self.METHOD_NAME()
else:
should_this_thread_refresh = True
self._some_thread_refreshing = True
break
if should_this_thread_refresh:
try:
new_token = await self._token_refresher()
if not self._is_token_valid(new_token):
raise ValueError(
"The token returned from the token_refresher is expired."
)
async with self._lock:
self._token = new_token
self._some_thread_refreshing = False
self._lock.notify_all()
except:
async with self._lock:
self._some_thread_refreshing = False
self._lock.notify_all()
raise
if self._proactive_refresh:
self._schedule_refresh()
return self._token
def _schedule_refresh(self):
if self._is_closed.is_set():
return
if self._timer is not None:
self._timer.cancel()
token_ttl = self._token.expires_on - get_current_utc_as_int()
if self._is_token_expiring_soon(self._token):
# Schedule the next refresh for when it reaches a certain percentage of the remaining lifetime.
timespan = token_ttl // 2
else:
# Schedule the next refresh for when it gets in to the soon-to-expire window.
timespan = (
token_ttl
- timedelta(
minutes=self._DEFAULT_AUTOREFRESH_INTERVAL_MINUTES
).total_seconds()
)
self._timer = AsyncTimer(timespan, self._update_token_and_reschedule)
self._timer.start()
async def METHOD_NAME(self):
self._lock.release()
await self._lock.acquire()
def _is_token_expiring_soon(self, token):
if self._proactive_refresh:
interval = timedelta(minutes=self._DEFAULT_AUTOREFRESH_INTERVAL_MINUTES)
else:
interval = timedelta(minutes=self._ON_DEMAND_REFRESHING_INTERVAL_MINUTES)
return (token.expires_on - get_current_utc_as_int()) < interval.total_seconds()
@classmethod
def _is_token_valid(cls, token):
return get_current_utc_as_int() < token.expires_on
async def __aenter__(self):
if self._proactive_refresh:
if self._is_closed.is_set():
raise RuntimeError(
"An instance of CommunicationTokenCredential cannot be reused once it has been closed."
)
self._schedule_refresh()
return self
async def __aexit__(self, *args):
await self.close()
async def close(self) -> None:
if self._timer is not None:
self._timer.cancel()
self._timer = None
self._is_closed.set() |
5,861 | wait on sys message | #!/usr/bin/python
# To run these tests you must copy one of the example files to "creds.py" in
# this directory, and edit it for your environment. There are 2 example files
# in this directory:
#
# remote_creds.example.py: Configure the Selenium tests to run with BrowserStack
# local_creds.example.py: Configure the Selenium tests to run locally
import re
from creds import SITE_URL, USER, PASS, get_driver
from selenium.webdriver.common.by import By
from selenium.common import exceptions
from contextlib import contextmanager
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as exp_cond
INI_PATH = '../../hm3.ini'
class WebTest:
driver = None
def __init__(self, cap=None):
self.read_ini()
self.driver = get_driver(cap)
self.browser = False
if 'browserName' in self.driver.capabilities:
self.browser = self.driver.capabilities['browserName'].lower()
self.load()
def read_ini(self):
self.modules = []
ini = open(INI_PATH)
for row in ini.readlines():
if re.match('^modules\[\]\=', row):
parts = row.split('=')
self.modules.append(parts[1].strip())
def load(self):
print(" - loading site")
self.go(SITE_URL)
try:
self.driver.maximize_window()
except Exception:
print(" - Could not maximize browser :(")
if self.browser == 'safari':
try:
self.driver.set_window_size(1920,1080)
except Exception:
print(" - Could not maximize Safari")
def mod_active(self, name):
if name in self.modules:
return True
print(" - module not enabled: %s" % name)
return False
def go(self, url):
self.driver.get(url)
def login(self, user, password):
print(" - logging in")
user_el = self.by_name('username')
pass_el = self.by_name('password')
user_el.send_keys(user)
pass_el.send_keys(password)
self.by_css('input[value=Login]').click()
def change_val(self, el, val):
self.driver.execute_script('''
var e=arguments[0]; var v=arguments[1]; e.value=v;''',
el, val)
def confirm_alert(self):
WebDriverWait(self.driver, 3).until(exp_cond.alert_is_present(), 'timed out')
alert = self.driver.switch_to.alert
alert.accept()
def logout_no_save(self):
print(" - logging out")
self.driver.find_element_by_class_name('logout_link').click()
logout = self.by_id('logout_without_saving').click()
def logout(self):
print(" - logging out")
self.driver.find_element_by_class_name('logout_link').click()
def end(self):
self.driver.quit()
def by_id(self, el_id):
print(" - finding element by id {0}".format(el_id))
return self.driver.find_element_by_id(el_id)
def by_tag(self, name):
print(" - finding element by tag name {0}".format(name))
return self.driver.find_element_by_tag_name(name)
def by_name(self, name):
print(" - finding element by name {0}".format(name))
return self.driver.find_element_by_name(name)
def by_css(self, selector):
print(" - finding element by selector {0}".format(selector))
return self.driver.find_element_by_css_selector(selector)
def by_class(self, class_name):
print(" - finding element by class {0}".format(class_name))
return self.driver.find_element_by_class_name(class_name)
def wait(self, el_type=By.TAG_NAME, el_value="body", timeout=30):
print(" - waiting for page by {0}: {1} ...".format(el_type, el_value))
element = WebDriverWait(self.driver, timeout).until(
exp_cond.presence_of_element_located((el_type, el_value)))
def wait_on_class(self, class_name, timeout=30):
self.wait(By.CLASS_NAME, class_name)
def wait_with_folder_list(self):
self.wait(By.CLASS_NAME, "main_menu")
def METHOD_NAME(self, timeout=30):
wait = WebDriverWait(self.driver, timeout)
element = wait.until(wait_for_non_empty_text((By.CLASS_NAME, "sys_messages"))
)
def safari_workaround(self, timeout=1):
if self.browser == 'safari':
print(" - waiting {0} extra second for Safari".format(timeout))
self.driver.implicitly_wait(timeout)
class wait_for_non_empty_text(object):
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
try:
element_text = exp_cond._find_element(driver, self.locator).text.strip()
print(element_text)
return element_text != ""
except exceptions.StaleElementReferenceException:
return False |
5,862 | test structures convert isodose to roi button | import os
import pytest
from src.Controller.GUIController import MainWindow
from src.Model.PatientDictContainer import PatientDictContainer
from src.Model import ImageLoading
from pathlib import Path
from pydicom import dcmread
from pydicom.errors import InvalidDicomError
def find_DICOM_files(file_path):
"""Function to find DICOM files in a given folder.
:param file_path: File path of folder to search.
:return: List of file paths of DICOM files in given folder.
"""
dicom_files = []
# Walk through directory
for root, dirs, files in os.walk(file_path, topdown=True):
for name in files:
# Attempt to open file as a DICOM file
try:
dcmread(os.path.join(root, name))
except (InvalidDicomError, FileNotFoundError):
pass
else:
dicom_files.append(os.path.join(root, name))
return dicom_files
class TestIso2RoiGui:
"""
Class that initializes OnkoDICOM window for testing
Iso2ROI GUI - using files from testdata directory
"""
__test__ = False
def __init__(self):
# Load test DICOM files
desired_path = Path.cwd().joinpath('test', 'testdata')
# list of DICOM test files
selected_files = find_DICOM_files(desired_path)
# file path of DICOM files
file_path = os.path.dirname(os.path.commonprefix(selected_files))
read_data_dict, file_names_dict = \
ImageLoading.get_datasets(selected_files)
# Create patient dict container object
self.patient_dict_container = PatientDictContainer()
self.patient_dict_container.clear()
self.patient_dict_container.set_initial_values\
(file_path, read_data_dict, file_names_dict)
# Set additional attributes in patient dict container
# (otherwise program will crash and test will fail)
if "rtss" in file_names_dict:
dataset_rtss = dcmread(file_names_dict['rtss'])
self.patient_dict_container.set("existing_rtss_files",
[])
self.rois = ImageLoading.get_roi_info(dataset_rtss)
dict_raw_contour_data, dict_numpoints = \
ImageLoading.get_raw_contour_data(dataset_rtss)
dict_pixluts = ImageLoading.get_pixluts(read_data_dict)
self.patient_dict_container.set("rois", self.rois)
self.patient_dict_container.set("raw_contour",
dict_raw_contour_data)
self.patient_dict_container.set("num_points", dict_numpoints)
self.patient_dict_container.set("pixluts", dict_pixluts)
# Open the main window
self.main_window = MainWindow()
self.initial_structure_count = \
self.main_window.structures_tab.layout_content.count()
self.initial_roi_count = len(self.main_window.structures_tab.rois)
@pytest.fixture(scope="module")
def test_object():
"""
Function to pass a shared TestIso2RoiGui object
to each test.
"""
return TestIso2RoiGui()
def METHOD_NAME(test_object):
"""
Test will simulate the 'Convert isodose to roi' button being
pressed, assert structure count is greater than or equal to what it
originally was.
:param test_object: test_object function, for accessing the shared
TestIso2RoiGui object.
"""
# Simulate 'Convert Isodose to ROI' button being pressed
test_object.main_window.isodoses_tab.iso2roi_button_clicked()
# Get the new length of structures in Structures Tab
current_structure_count = \
test_object.main_window.structures_tab.layout_content.count()
# Assert the length has not changed
assert current_structure_count >= test_object.initial_structure_count
def test_rois_convert_isodose_to_roi_button_pressed(test_object):
"""
Test will simulate the 'Convert isodose to roi' button being
pressed, assert ROI count is greater than or equal to what it
originally was.
:param test_object: test_object function, for accessing the shared
TestIso2RoiGui object.
"""
# Simulate 'Convert Isodose to ROI' button being pressed
test_object.main_window.isodoses_tab.iso2roi_button_clicked()
# Get the new length of RIO's in Structures Tab
current_roi_count = len(test_object.main_window.structures_tab.rois)
# Assert the length has not changed
assert current_roi_count >= test_object.initial_roi_count |
5,863 | parse args | #!/usr/bin/env python3
# -*- coding: utf-8; py-indent-offset: 4 -*-
#
# Author: Linuxfabrik GmbH, Zurich, Switzerland
# Contact: info (at) linuxfabrik (dot) ch
# https://www.linuxfabrik.ch/
# License: The Unlicense, see LICENSE file.
# https://github.com/Linuxfabrik/monitoring-plugins/blob/main/CONTRIBUTING.rst
"""See the check's README for more details.
"""
import argparse # pylint: disable=C0413
import sys # pylint: disable=C0413
import lib.args # pylint: disable=C0413
import lib.base # pylint: disable=C0413
import lib.shell # pylint: disable=C0413
import lib.test # pylint: disable=C0413
import lib.txt # pylint: disable=C0413
from lib.globals import (STATE_CRIT, STATE_OK, # pylint: disable=C0413
STATE_UNKNOWN, STATE_WARN)
__author__ = 'Linuxfabrik GmbH, Zurich/Switzerland'
__version__ = '2023071203'
DESCRIPTION = """Checks dmesg for emerg, alert, crit and err messages. Executes `dmesg
--level=emerg,alert,crit,err --ctime `. If you fixed the issues (or just want to
clear them), use `dmesg --clear` to clear the Kernel Ring Buffer Messages."""
DEFAULT_SEVERITY = 'crit'
cmd = 'dmesg --level=emerg,alert,crit,err --ctime'
# ignore false positives / bugs / messages without impact on anything:
DEFAULT_IGNORE = [
' Asking for cache data failed',
' Assuming drive cache: write through',
' brcmfmac: brcmf_c_preinit_dcmds: Firmware: BCM4345/6', # Raspbian
' brcmfmac: brcmf_fw_alloc_request: using brcm/brcmfmac43455-sdio for chip BCM4345/6', # Raspbian
' CIFS VFS: Free previous auth_key.response = ',
' cpufreq: __cpufreq_add_dev: ->get() failed',
' EFI MOKvar config table is not in EFI runtime memory', # https://rockylinux.org/news/rocky-linux-8-5-ga-release/
' ERST: Failed to get Error Log Address Range.',
' i8042: No controller found',
' Ignoring unsafe software power cap!',
' ioctl error in smb2_get_dfs_refer rc=-5', # https://access.redhat.com/solutions/3496971
' kvm_set_msr_common: MSR_IA32_DEBUGCTLMSR ', # is mostly caused by Windows-VMs on KVM/oVirt
' mokvar: EFI MOKvar config table is not in EFI runtime memory', # https://rockylinux.org/news/rocky-linux-8-5-ga-release/
' No Caching mode page found',
' SMBus base address uninitialized - upgrade BIOS or use ', # https://access.redhat.com/solutions/2115401
' SMBus Host Controller not enabled!',
' tsc: Fast TSC calibration failed',
' unhandled rdmsr: ', # https://access.redhat.com/solutions/59299
' unhandled wrmsr: ', # https://bugzilla.redhat.com/show_bug.cgi?id=874627
' vcpu0 disabled perfctr wrmsr', # https://access.redhat.com/solutions/2188061
]
def METHOD_NAME():
"""Parse command line arguments using argparse.
"""
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument(
'-V', '--version',
action='version',
version='{0}: v{1} by {2}'.format('%(prog)s', __version__, __author__)
)
parser.add_argument(
'--always-ok',
help='Always returns OK.',
dest='ALWAYS_OK',
action='store_true',
default=False,
)
parser.add_argument(
'--ignore',
help='Ignore a kernel message (case-sensitive, repeating). Default: %(default)s',
dest='IGNORE',
default=DEFAULT_IGNORE,
action='append',
)
parser.add_argument(
'--severity',
help='Severity for alerting. Default: %(default)s',
dest='SEVERITY',
default=DEFAULT_SEVERITY,
choices=['warn', 'crit'],
)
parser.add_argument(
'--test',
help='For unit tests. Needs "path-to-stdout-file,path-to-stderr-file,expected-retc".',
dest='TEST',
type=lib.args.csv,
)
return parser.METHOD_NAME()
def main():
"""The main function. Hier spielt die Musik.
"""
# parse the command line, exit with UNKNOWN if it fails
try:
args = METHOD_NAME()
except SystemExit:
sys.exit(STATE_UNKNOWN)
# fetch data
if args.TEST is None:
# execute the shell command and return its result and exit code
stdout, stderr, retc = lib.base.coe(lib.shell.shell_exec(cmd))
if (stderr or retc != 0):
lib.base.cu(stderr)
else:
# do not call the command, put in test data
stdout, stderr, retc = lib.test.test(args.TEST)
# analyze data
result = stdout.strip()
result = lib.txt.filter_mltext(result, args.IGNORE).strip()
# build the message
if len(result) > 0:
cnt = result.count('\n') + 1
if cnt > 10:
# shorten the message
result = result.split('\n')
result = result[0:5] + ['...'] + result[-5:]
result = '\n'.join(result)
msg = '{} {} in Kernel Ring Buffer.\n\n{}'.format(cnt, lib.txt.pluralize('error', cnt), result)
state = STATE_CRIT if args.SEVERITY == 'crit' else STATE_WARN
else:
msg = 'Everything is ok.'
state = STATE_OK
# over and out
lib.base.oao(msg, state, always_ok=args.ALWAYS_OK)
if __name__ == '__main__':
try:
main()
except Exception: # pylint: disable=W0703
lib.base.cu() |
5,864 | test not matches | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test type checker based on python's type annotations"""
import sys
from typing import Dict, List, Tuple, Union, Callable
import pytest
import _pytest
import tvm
from tvm.tir.schedule._type_checker import type_checked
def int_func(x: int) -> int:
return 2 * x
def str_func(x: str) -> str:
return 2 * x
test_cases = [
{
"type_annotation": int,
"positive_cases": [5],
"negative_cases": ["5"],
},
{
"type_annotation": List[int],
"positive_cases": [
[5],
[],
# Tuples are allowed to be used as lists, because both are
# represented in FFI as tvm::runtime::Array.
(1, 2, 3),
],
"negative_cases": [
None,
5,
["5"],
],
},
{
"type_annotation": Dict[str, int],
"positive_cases": [
{"key1": 0, "key2": 1, "key3": -1},
],
"negative_cases": [None, [1], {1: "1"}],
},
{
"type_annotation": Tuple[int],
"positive_cases": [
(5,),
],
"negative_cases": [
None,
(1, 2, 3),
[1],
5,
["5"],
],
},
{
"type_annotation": Tuple[str, int],
"positive_cases": [
("x", 5),
],
"negative_cases": [
42,
("x", 5, 6),
("x", 5, "y"),
("x", 5.0),
(None, 5),
],
},
{
"type_annotation": Union[str, int],
"positive_cases": [
"x",
5,
],
"negative_cases": [
5.0,
("x", 5, 6),
None,
],
},
{
"type_annotation": Callable,
"positive_cases": [str_func, int_func],
"negative_cases": [
None,
"x",
42,
],
},
{
"type_annotation": Callable[[int], int],
"positive_cases": [int_func],
"negative_cases": [
None,
"x",
42,
pytest.param(
str_func,
marks=pytest.mark.xfail(
reason="Signature of Callable arguments not currently checked"
),
),
],
},
]
def make_parametrization(type_annotation, case):
if isinstance(case, _pytest.mark.structures.ParameterSet):
marks = case.marks
(case,) = case.values
else:
marks = []
try:
annotation_name = type_annotation.__name__
except AttributeError:
annotation_name = str(type_annotation).replace("typing.", "")
if hasattr(case, "__name__"):
case_name = case.__name__
else:
case_name = str(case)
name = f"{annotation_name}, {case_name}"
return pytest.param(type_annotation, case, marks=marks, id=name)
positive_cases = [
make_parametrization(config["type_annotation"], case)
for config in test_cases
for case in config["positive_cases"]
]
negative_cases = [
make_parametrization(config["type_annotation"], case)
for config in test_cases
for case in config["negative_cases"]
]
@pytest.mark.parametrize(
["type_annotation", "case"],
positive_cases,
)
def test_matches_type(type_annotation, case):
@type_checked
def func(_: type_annotation):
pass
func(case)
@pytest.mark.parametrize(
["type_annotation", "case"],
negative_cases,
)
def METHOD_NAME(type_annotation, case):
@type_checked
def func(_: type_annotation):
pass
with pytest.raises(TypeError):
func(case)
if __name__ == "__main__":
tvm.testing.main() |
5,865 | test output | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.scripts.display_data import display_data as display, setup_args
from parlai.agents.repeat_label.repeat_label import RepeatLabelAgent
from parlai.core.message import Message
from parlai.core.metrics import F1Metric, AverageMetric
from parlai.core.teachers import create_task_agent_from_taskname
from parlai.core.worlds import create_task
from parlai.tasks.wizard_of_wikipedia.agents import TOKEN_KNOWLEDGE
import unittest
import itertools
import parlai.utils.testing as testing_utils
def product_dict(dictionary):
keys = dictionary.keys()
vals = dictionary.values()
for instance in itertools.product(*vals):
yield dict(zip(keys, instance))
class TestWoW(unittest.TestCase):
"""
Basic tests on the train_model.py example.
"""
@unittest.skip
def METHOD_NAME(self):
dts = ['train', 'valid', 'test']
main_task = 'wizard_of_wikipedia'
variants = [
'WizardOfWikipediaTeacher',
'WizardDialogKnowledgeTeacher',
'BasicdialogTeacher',
'DocreaderTeacher',
'GeneratorTeacher',
]
variant_args = {
'WizardOfWikipediaTeacher': {},
'WizardDialogKnowledgeTeacher': {
'label_type': ['response', 'chosen_sent'],
'include_knowledge': [False, True],
'include_checked_sentence': [False, True],
},
'BasicdialogTeacher': {'wizard_dialog': [False, True]},
'DocreaderTeacher': {
'teacher_type': [
'docs',
'docs_sentence',
'more_docs',
'more_docs_sentence',
'span',
]
},
'GeneratorTeacher': {
'only_checked_knowledge': [False, True],
'ignorant_dropout': [0, 0.5, 1],
},
}
splits = ['random_split', 'topic_split']
for datatype in dts:
for task_var in variants:
for split in splits:
task_name = '{}:{}:{}'.format(main_task, task_var, split)
opt_defaults = {'task': task_name, 'datatype': datatype}
task_args = variant_args[task_var]
if len(task_args) == 0:
print('Testing {} with args {}'.format(task_name, opt_defaults))
self._run_display_test(opt_defaults)
else:
for combo in product_dict(task_args):
args = {**opt_defaults, **combo}
print('Testing {} with args {}'.format(task_name, args))
self._run_display_test(args)
def _run_display_test(self, kwargs):
with testing_utils.capture_output() as stdout:
parser = setup_args()
parser.set_defaults(**kwargs)
opt = parser.parse_args([])
agent = RepeatLabelAgent(opt)
world = create_task(opt, agent)
display(opt)
str_output = stdout.getvalue()
self.assertTrue(
'loaded {} episodes with a total of {} examples'.format(
world.num_episodes(), world.num_examples()
)
in str_output,
'Wizard of Wikipedia failed with following args: {}'.format(opt)
+ str_output,
)
def test_custom_eval(self):
"""
Test whether custom evaluation works.
"""
parser = setup_args()
opt = parser.parse_args(
[
'--task',
'wizard_of_wikipedia',
'--datatype',
'valid',
'--label-type',
'chosen_sent',
]
)
teacher = create_task_agent_from_taskname(opt)[0]
title = 'Gardening'
cands = list('four')
text = "Gardening\nI like Gardening, even when I've only been doing it for a short time."
response = 'I live on a farm, we garden all year long, it is very relaxing.'
checked_sent = (
'Gardening is considered by many people to be a relaxing activity.'
)
checked_sent_label = f'{title}{TOKEN_KNOWLEDGE}{checked_sent}'
retrieval_metric_keys = ['passage_r@1', 'passage_r@5', 'title_r@1', 'title_r@5']
chosen_sent_teacher_action = Message(
{
'text': text,
'labels': [checked_sent_label],
'title': [title],
'checked_sentence': [checked_sent],
}
)
correct_chosen_sent_response = Message(
{
'text': checked_sent_label,
'title_candidates': [title] + cands,
'text_candidates': [checked_sent_label] + cands,
}
)
top5_chosen_sent_response = Message(
{
'text': f'hello{TOKEN_KNOWLEDGE}goodbye',
'title_candidates': cands + [title],
'text_candidates': cands + [checked_sent_label],
}
)
incorrect_chosen_sent_response = Message(
{
'text': f'hello{TOKEN_KNOWLEDGE}goodbye',
'title_candidates': cands,
'text_candidates': cands,
}
)
response_teacher_action = Message(
{'text': text, 'labels': [response], 'checked_sentence': checked_sent}
)
high_f1_response = Message({'text': checked_sent})
low_f1_response = Message({'text': 'incorrect'})
# 1) Test with correct top sentence
teacher.reset_metrics()
teacher.custom_evaluation(
chosen_sent_teacher_action,
[checked_sent_label],
correct_chosen_sent_response,
)
report = teacher.report()
for k in retrieval_metric_keys:
assert k in report
assert report[k] == AverageMetric(1)
# 2) Test with top sentence in top 5
teacher.reset_metrics()
teacher.custom_evaluation(
chosen_sent_teacher_action, [checked_sent_label], top5_chosen_sent_response
)
report = teacher.report()
for k in retrieval_metric_keys:
assert k in report
assert report[k] == AverageMetric(1) if '5' in k else AverageMetric(0)
# 3) Test with no top sentences
teacher.reset_metrics()
teacher.custom_evaluation(
chosen_sent_teacher_action,
[checked_sent_label],
incorrect_chosen_sent_response,
)
report = teacher.report()
for k in retrieval_metric_keys:
assert k in report
assert report[k] == AverageMetric(0)
# 4) Test knowledge f1 with high f1
teacher.label_type = 'response'
teacher.reset_metrics()
teacher.custom_evaluation(response_teacher_action, [response], high_f1_response)
report = teacher.report()
assert 'knowledge_f1' in report
assert report['knowledge_f1'] == F1Metric(1)
# 5) Test knowledge f1 with low f1
teacher.reset_metrics()
teacher.custom_evaluation(response_teacher_action, [response], low_f1_response)
report = teacher.report()
assert 'knowledge_f1' in report
assert report['knowledge_f1'] == F1Metric(0)
if __name__ == '__main__':
unittest.main() |
5,866 | decorator | '''
The client class
'''
import certifi
import errno
import select
import six
import socket
import ssl
import threading
import websocket
import zlib
from contextlib import closing
def validate(self, skip_utf8_validation=False):
'''Patching websocket.ABNF frame class to pass rsv1 validation
'''
self.rsv1 = 0
validate.proto(self, skip_utf8_validation)
class ZlibDecoder(object):
def __init__(self):
self.obj = zlib.decompressobj(-zlib.MAX_WBITS)
def decode(self, data):
return self.obj.decompress(data + b'\x00\x00\xff\xff')
class WebSocket(websocket.WebSocket):
def __init__(self, **kwargs):
super(WebSocket, self).__init__(**kwargs)
self.ping_interval = kwargs.pop('ping_interval', 0)
self.decoder = None
def _get_ext(self, headers):
if headers:
for key, value in headers.items():
if key.lower() == 'sec-websocket-extensions':
return value.lower()
def _cmp_ext(self, e1, e2):
return sorted([v.strip() for v in e1.split(';')]) == sorted(
[v.strip() for v in e2.split(';')])
def connect(self, url, **options):
super(WebSocket, self).connect(url, **options)
extensions = self._get_ext(self.headers)
if extensions:
if 'header' not in options or not self._cmp_ext(
extensions, self._get_ext(options['header'])):
raise websocket.WebSocketException(
'Unsupported websocket extensions: ' + extensions)
if 'permessage-deflate' in extensions:
self.decoder = ZlibDecoder()
else:
raise websocket.WebSocketException(
'Unsupported websocket compression: ' + extensions)
def recv(self):
data = super(WebSocket, self).recv()
if self.decoder:
return self.decoder.decode(data)
return data
class Client(object):
def __init__(self, configuration, credentials, logger):
self.config = configuration
self.credentials = credentials
self.logger = logger
def _patch_ABNF(self):
six.PY3 = False
validate.proto = websocket.ABNF.validate
websocket.ABNF.validate = validate
def _get_url(self, cid):
if self.config.api == 'v1':
url = '%s://%s:%s/v1/stream?cid=%s&type=%s' % (
self.config.protocol, self.config.hostname, self.config.port,
cid, self.config.msg_type)
if self.config.since_time:
url += '&sinceTime=' + self.config.since_time
if self.config.to_time:
url += '&toTime=' +self.config.to_time
else:
raise ValueError('%s PoD Logging API is not supported')
return url
def _pinger(fn):
def METHOD_NAME(self, ws):
thread = event = None
if ws.ping_interval:
event = threading.Event()
thread = threading.Thread(target=self._ping, args=(ws, event))
thread.start()
try:
return fn(self, ws)
finally:
if thread and thread.isAlive():
event.set()
thread.join()
return METHOD_NAME
def _ping(self, ws, event):
interval = ws.ping_interval
while not event.wait(interval):
try:
self.logger.debug('Pinging websocket server')
ws.ping()
except Exception as e:
self.logger.warning('send_ping routine terminated: %s', e)
break
def create_connection(self):
skip_utf8_validation = False
cid, token = self.credentials
url = self._get_url(cid)
header = {'Authorization': 'Bearer %s' % (token,)}
if self.config.websocket_extensions:
skip_utf8_validation = True
header['Sec-WebSocket-Extensions'] = self.config.websocket_extensions
sslopt = {}
if self.config.protocol.lower() == 'wss':
sslopt = {'cert_reqs': ssl.CERT_REQUIRED,
'ca_certs': certifi.where(),
'check_hostname': True}
sockopt = ((socket.IPPROTO_TCP, socket.TCP_NODELAY, 1),)
enable_multithread = True if self.config.ping_interval else False
websocket.enableTrace(self.config.trace)
return websocket.create_connection(
url,
header=header,
sockopt=sockopt,
sslopt=sslopt,
enable_multithread=enable_multithread,
skip_utf8_validation=skip_utf8_validation,
ping_interval=self.config.ping_interval,
http_proxy_host=self.config.http_proxy_host,
http_proxy_port=self.config.http_proxy_port,
class_=WebSocket)
@_pinger
def handle_connection(self, ws):
# TODO: Error handling - 403, etc
try:
while ws.connected:
r, _, _ = select.select((ws.sock,), (), ())
if r:
message = ws.recv()
if len(message) < 1:
break
if self.config.filename:
self.logger.info("writing to file")
file_path = self.config.filename
with open(file_path, "ab") as output_file:
output_file.write(message)
else:
print(message)
else:
break
except select.error as e:
if e[0] != errno.EINTR:
self.logger.warning('I/O error: %s', e)
def run(self):
self.logger.info('PoD Logging client is starting up')
self._patch_ABNF()
try:
if not self.config.hostname:
raise ValueError('PoD Logging host is not set.')
with closing(self.create_connection()) as ws:
self.handle_connection(ws)
except socket.error as e:
self.logger.warning('Connection error: %s', e)
except websocket.WebSocketException as e:
self.logger.warning('Protocol error: %s', e)
except KeyboardInterrupt as e:
pass
except ValueError as e:
self.logger.warning(
'Missing or invalid configuration value: %s', e)
finally:
self.logger.info('PoD Logging client is stopped')
class Pinger(object):
def __init__(self, func):
self.func = func
def __call__(self, *args):
ws = args[0]
thread = event = None
if ws.ping_interval:
event = threading.Event()
thread = threading.Thread(target=self._ping, args=(ws, event))
thread.start()
try:
return self.func(*args)
finally:
if thread and thread.isAlive():
event.set()
thread.join()
def _ping(self, ws, event):
interval = ws.ping_interval
while not event.wait(interval):
try:
# logger.debug('Pinging websocket server')
ws.ping()
except Exception as e:
# logger.warning('send_ping routine terminated: %s', e)
brea |
5,867 | dates for week |
# Copyright 2010-2016 Jaap Karssenberg <jaap.karssenberg@gmail.com>
'''Thin wrapper for 'datetime' module from the standard library.
Provides timezone info for the local time. Based on example code
from standard library datetime documentation.
Main usage of this module is the function L{now()}. It imports all
from the standard datetime, so it can be used as a transparant
replacement.
Also adds a L{strfcal()} method and extends L{strftime()} to deal
with weeknumbers correctly.
'''
import re
import locale
from datetime import *
import logging
logger = logging.getLogger('zim')
def now():
'''Like C{datetime.now()} but with local timezone info'''
# Also setting microsecond to zero, to give isoformat() a nicer look
return datetime.now(LocalTimezone()).replace(microsecond=0)
# A class capturing the platform's idea of local time.
import time as _time
ZERO = timedelta(0)
STDOFFSET = timedelta(seconds = -_time.timezone)
if _time.daylight:
DSTOFFSET = timedelta(seconds = -_time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
class LocalTimezone(tzinfo):
'''Implementation of tzinfo with the current time zone, based on
the platform's idea of local time
'''
def utcoffset(self, dt):
if self._isdst(dt):
return DSTOFFSET
else:
return STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, -1)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
# Initialize setting for first day of the week. This is locale
# dependent, and the Gtk.Calendar widget already has good code to find it out.
# Unfortunately, the widget keeps that data private *%#*$()()*) !
MONDAY = 0 # iso calendar starts week at Monday
SUNDAY = 6
FIRST_DAY_OF_WEEK = None
def init_first_day_of_week():
global FIRST_DAY_OF_WEEK
try:
import babel
mylocale = babel.Locale(locale.getdefaultlocale()[0])
if mylocale.first_week_day == 0:
FIRST_DAY_OF_WEEK = MONDAY
else:
FIRST_DAY_OF_WEEK = SUNDAY
logger.debug('According to babel first day of week is %i', FIRST_DAY_OF_WEEK)
except Exception as e:
if not isinstance(e, ImportError):
logger.exception('Exception while loading \'babel\' library for first day of week')
# Fallback gleaned from gtkcalendar.c - hence the inconsistency
# with weekday numbers in iso calendar...
t = _("calendar:week_start:0")
# T: Translate to "calendar:week_start:0" if you want Sunday to be the first day of the week or to "calendar:week_start:1" if you want Monday to be the first day of the week
if t[-1] == '0':
FIRST_DAY_OF_WEEK = SUNDAY
elif t[-1] == '1':
FIRST_DAY_OF_WEEK = MONDAY
else:
logger.warning("Whoever translated 'calendar:week_start:0' did so wrongly.")
FIRST_DAY_OF_WEEK = SUNDAY
def METHOD_NAME(year, week):
'''Returns the first and last day of the week for a given
week number of a given year.
@param year: year as int (e.g. 2012)
@param week: week number as int (0 .. 53)
@returns: a 2-tuple of:
- a C{datetime.date} object for the start date of the week
- a C{datetime.date} object for the end dateof the week
@note: first day of the week can be either C{MONDAY} or C{SUNDAY},
this is configured in C{FIRST_DAY_OF_WEEK} based on the locale.
'''
# Note that the weeknumber in the isocalendar does NOT depend on the
# first day being Sunday or Monday, but on the first Thursday in the
# new year. See datetime.isocalendar() for details.
# If the year starts with e.g. a Friday, January 1st still belongs
# to week 53 of the previous year.
# Day of week in isocalendar starts with 1 for Mon and is 7 for Sun,
# and week starts on Monday.
if FIRST_DAY_OF_WEEK is None:
init_first_day_of_week()
jan1 = date(year, 1, 1)
_, jan1_week, jan1_weekday = jan1.isocalendar()
if FIRST_DAY_OF_WEEK == MONDAY:
days = jan1_weekday - 1
# if Jan 1 is a Monday, days is 0
else:
days = jan1_weekday
# if Jan 1 is a Monday, days is 1
# for Sunday it becomes 7 (or -1 week)
if jan1_week == 1:
weeks = week - 1
else:
# Jan 1st is still wk53 of the previous year
weeks = week
start = jan1 + timedelta(days=-days, weeks=weeks)
end = start + timedelta(days=6)
return start, end
def weekcalendar(date):
'''Get the year, week number and week day for a specific date.
Like C{datetime.date.isocalendar()} but takes into account
C{FIRST_DAY_OF_WEEK} correctly.
@param date: a C{datetime.date} or C{datetime.datetime} object
@returns: a year, a week number and a weekday as integers
The weekday numbering depends on locale, 1 is always first day
of the week, either a Sunday or a Monday.
'''
# Both strftime %W and %U are not correct, they use differnt
# week number count than the isocalendar. See datetime
# module for details.
# In short Jan 1st can still be week 53 of the previous year
# So we can use isocalendar(), however this does not take
# into accout FIRST_DAY_OF_WEEK, see comment in dates_for_week()
if FIRST_DAY_OF_WEEK is None:
init_first_day_of_week()
year, week, weekday = date.isocalendar()
if FIRST_DAY_OF_WEEK == SUNDAY and weekday == 7:
# iso calendar gives us the week ending this sunday,
# we want the next week
monday = date + timedelta(days=1)
year, week, weekday = monday.isocalendar()
elif FIRST_DAY_OF_WEEK == SUNDAY:
weekday += 1
return year, week, weekday
def strfcal(format, date):
'''Method similar to strftime, but dealing with the weeknumber,
day of the week and the year of that week.
Week 1 is the first week where the Thursday is in the new year. So e.g. the
last day of 2012 is a Monday. And therefore the calendar week for 31 Dec 2012
is already week 1 2013.
The locale decides whether a week starts on Monday (as the ISO standard would have
it) or on Sunday. So depending on your locale Sun 6 Jan 2013 is either still week
1 or already the first day of week 2.
Codes supported by this method:
- C{%w} is replaced by the weekday as a decimal number [1,7], with 1 representing
either Monday or Sunday depending on the locale
- C{%W} is replaced by the weeknumber depending on the locale
- C{%Y} is replaced by the year with century as a decimal number, the year depends
on the weeknumber depending on the locale
- C{%%} is replaced by %
Difference between this method and strftime is that:
1. It uses locale to determine the first day of the week
2. It returns the year that goes with the weeknumber
'''
# TODO: may want to add version of the codes that allow forcing
# Monday or Sunday as first day, e.g. using %u %U %X and %v %V %Z
year, week, weekday = weekcalendar(date)
def replacefunc(matchobj):
code = matchobj.group(0)
if code == '%w':
return str(weekday)
elif code == '%W':
return '%02d' % week
elif code == '%Y':
return str(year)
elif code == '%%':
return '%'
else:
return code # ignore unsupported codes
return re.sub(r'%.', replacefunc, format)
def strftime(format, date):
# TODO: deprecate this function
return date.strftime(format)
if __name__ == '__main__': #pragma: no cover
import gettext
gettext.install('zim', None, names=('_', 'gettext', 'ngettext'))
init_first_day_of_week()
if FIRST_DAY_OF_WEEK == SUNDAY:
print('First day of week: Sunday')
else:
print('First day of week: Monday')
print('Now:', now().isoformat(), strftime("%z, %Z", now()))
print('Calendar:', strfcal('day %w of week %W %Y', now())) |
5,868 | get module name | """
Module loader, adapted for Cobbler usage
"""
# SPDX-License-Identifier: GPL-2.0-or-later
# SPDX-FileCopyrightText: Copyright 2006-2009, Red Hat, Inc and Others
# SPDX-FileCopyrightText: Adrian Likins <alikins@redhat.com>
# SPDX-FileCopyrightText: Michael DeHaan <michael.dehaan AT gmail>
import glob
import logging
import os
from importlib import import_module
from types import ModuleType
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
from cobbler.cexceptions import CX
from cobbler.utils import log_exc
# add cobbler/modules to python path
import cobbler # isort: skip
if TYPE_CHECKING:
from cobbler.api import CobblerAPI
class ModuleLoader:
"""
Class for dynamically loading Cobbler Plugins on startup
"""
def __init__(self, api: "CobblerAPI", module_path: str = ""):
"""
Constructor to initialize the ModuleLoader class.
:param api: CobblerAPI
:param module_path: The path which should be considered as the root module path. If this an empty string, try to
auto-detect the path.
"""
self.logger = logging.getLogger()
self.mod_path = os.path.join(
os.path.abspath(os.path.dirname(cobbler.__file__)), "modules"
)
if module_path:
self.mod_path = module_path
self.module_cache: Dict[str, ModuleType] = {}
self.modules_by_category: Dict[str, Dict[str, ModuleType]] = {}
self.api = api
def load_modules(
self,
) -> Tuple[Dict[str, ModuleType], Dict[str, Dict[str, ModuleType]]]:
"""
Load the modules from the path handed to the function into Cobbler.
:return: Two dictionary's with the dynamically loaded modules.
"""
filenames = glob.glob(f"{self.mod_path}/*.py")
filenames += glob.glob(f"{self.mod_path}/*.pyc")
filenames += glob.glob(f"{self.mod_path}/*.pyo")
# Allow recursive modules
filenames += glob.glob(f"{self.mod_path}/**/*.py")
filenames += glob.glob(f"{self.mod_path}/**/*.pyc")
filenames += glob.glob(f"{self.mod_path}/**/*.pyo")
for filename in filenames:
basename = filename.replace(self.mod_path, "")
modname = ""
if "__pycache__" in basename or "__init__.py" in basename:
continue
if basename[0] == "/":
basename = basename[1:]
basename = basename.replace("/", ".")
if basename[-3:] == ".py":
modname = basename[:-3]
elif basename[-4:] in [".pyc", ".pyo"]:
modname = basename[:-4]
self.__import_module(modname)
return self.module_cache, self.modules_by_category
def __import_module(self, modname: str) -> None:
"""
Import a module which is not part of the core functionality of Cobbler.
:param modname: The name of the module.
"""
try:
blip = import_module(f"cobbler.modules.{modname}")
if not hasattr(blip, "register"):
self.logger.debug(
"%s.%s is not a proper module", self.mod_path, modname
)
return
category = blip.register()
if category:
self.module_cache[modname] = blip
if category not in self.modules_by_category:
self.modules_by_category[category] = {}
self.modules_by_category[category][modname] = blip
except Exception:
self.logger.info("Exception raised when loading module %s", modname)
log_exc()
def get_module_by_name(self, name: str) -> Optional[ModuleType]:
"""
Get a module by its name. The category of the module is not needed.
:param name: The name of the module.
:return: The module asked by the function parameter.
"""
return self.module_cache.get(name, None)
def METHOD_NAME(
self, category: str, field: str, fallback_module_name: Optional[str] = None
) -> str:
"""
Get module name from the settings.
:param category: Field category in configuration file.
:param field: Field in configuration file
:param fallback_module_name: Default value used if category/field is not found in configuration file
:raises FileNotFoundError: If unable to find configuration file.
:raises ValueError: If the category does not exist or the field is empty.
:raises CX: If the field could not be read and no fallback_module_name was given.
:returns: The name of the module.
"""
# FIXME: We can't enabled this check since it is to strict atm.
# if category not in MODULES_BY_CATEGORY:
# raise ValueError("category must be one of: %s" % MODULES_BY_CATEGORY.keys())
if field.isspace():
raise ValueError('field cannot be empty. Did you mean "module" maybe?')
try:
value = self.api.settings().modules.get(category, {}).get("module")
if value is None:
raise ModuleNotFoundError("Requested module could not be retrieved")
except Exception as exception:
if fallback_module_name is None:
raise CX(
f"Cannot find config file setting for: {category}.{field}"
) from exception
value = fallback_module_name
self.logger.warning(
'Requested module "%s.%s" not found. Using fallback module: "%s"',
category,
field,
value,
)
return value
def get_module_from_file(
self, category: str, field: str, fallback_module_name: Optional[str] = None
) -> ModuleType:
"""
Get Python module, based on name defined in configuration file
:param category: field category in configuration file
:param field: field in configuration file
:param fallback_module_name: default value used if category/field is not found in configuration file
:raises CX: If unable to load Python module
:returns: A Python module.
"""
module_name = self.METHOD_NAME(category, field, fallback_module_name)
requested_module = self.module_cache.get(module_name, None)
if requested_module is None:
raise CX(f"Failed to load module for {category}.{field}")
return requested_module
def get_modules_in_category(self, category: str) -> List[ModuleType]:
"""
Return all modules of a module category.
:param category: The module category.
:return: A list of all modules of that category. Returns an empty list if the Category does not exist.
"""
if category not in self.modules_by_category:
# FIXME: We can't enabled this check since it is to strict atm.
# raise ValueError("category must be one of: %s" % MODULES_BY_CATEGORY.keys())
return []
return list(self.modules_by_category[category].values()) |
5,869 | test fcm message user settings critical | from unittest.mock import patch
import pytest
from apps.base.models import UserNotificationPolicy, UserNotificationPolicyLogRecord
from apps.mobile_app.models import FCMDevice, MobileAppUserSettings
from apps.mobile_app.tasks.new_alert_group import _get_fcm_message, notify_user_about_new_alert_group
MOBILE_APP_BACKEND_ID = 5
CLOUD_LICENSE_NAME = "Cloud"
OPEN_SOURCE_LICENSE_NAME = "OpenSource"
@patch("apps.mobile_app.tasks.new_alert_group.send_push_notification")
@pytest.mark.django_db
def test_notify_user_about_new_alert_group(
mock_send_push_notification,
make_organization_and_user,
make_user_notification_policy,
make_alert_receive_channel,
make_channel_filter,
make_alert_group,
make_alert,
):
# create a user and connect a mobile device
organization, user = make_organization_and_user()
FCMDevice.objects.create(user=user, registration_id="test_device_id")
# set up notification policy and alert group
notification_policy = make_user_notification_policy(
user,
UserNotificationPolicy.Step.NOTIFY,
notify_by=MOBILE_APP_BACKEND_ID,
)
alert_receive_channel = make_alert_receive_channel(organization=organization)
channel_filter = make_channel_filter(alert_receive_channel)
alert_group = make_alert_group(alert_receive_channel, channel_filter=channel_filter)
make_alert(alert_group=alert_group, raw_request_data={})
notify_user_about_new_alert_group(
user_pk=user.pk,
alert_group_pk=alert_group.pk,
notification_policy_pk=notification_policy.pk,
critical=False,
)
mock_send_push_notification.assert_called_once()
@patch("apps.mobile_app.tasks.new_alert_group.send_push_notification")
@pytest.mark.django_db
def test_notify_user_about_new_alert_group_no_device_connected(
mock_send_push_notification,
make_organization_and_user,
make_user_notification_policy,
make_alert_receive_channel,
make_channel_filter,
make_alert_group,
make_alert,
):
# create a user without mobile device
organization, user = make_organization_and_user()
# set up notification policy and alert group
notification_policy = make_user_notification_policy(
user,
UserNotificationPolicy.Step.NOTIFY,
notify_by=MOBILE_APP_BACKEND_ID,
)
alert_receive_channel = make_alert_receive_channel(organization=organization)
channel_filter = make_channel_filter(alert_receive_channel)
alert_group = make_alert_group(alert_receive_channel, channel_filter=channel_filter)
make_alert(alert_group=alert_group, raw_request_data={})
notify_user_about_new_alert_group(
user_pk=user.pk,
alert_group_pk=alert_group.pk,
notification_policy_pk=notification_policy.pk,
critical=False,
)
mock_send_push_notification.assert_not_called()
log_record = alert_group.personal_log_records.last()
assert log_record.type == UserNotificationPolicyLogRecord.TYPE_PERSONAL_NOTIFICATION_FAILED
@pytest.mark.django_db
def test_fcm_message_user_settings(
make_organization_and_user, make_alert_receive_channel, make_alert_group, make_alert
):
organization, user = make_organization_and_user()
device = FCMDevice.objects.create(user=user, registration_id="test_device_id")
alert_receive_channel = make_alert_receive_channel(organization=organization)
alert_group = make_alert_group(alert_receive_channel)
make_alert(alert_group=alert_group, raw_request_data={})
message = _get_fcm_message(alert_group, user, device, critical=False)
# Check user settings are passed to FCM message
assert message.data["default_notification_sound_name"] == "default_sound.mp3"
assert message.data["default_notification_volume_type"] == "constant"
assert message.data["default_notification_volume_override"] == "false"
assert message.data["default_notification_volume"] == "0.8"
assert message.data["important_notification_sound_name"] == "default_sound_important.mp3"
assert message.data["important_notification_volume_type"] == "constant"
assert message.data["important_notification_volume"] == "0.8"
assert message.data["important_notification_volume_override"] == "true"
assert message.data["important_notification_override_dnd"] == "true"
assert message.data["type"] == "oncall.message"
# Check APNS notification sound is set correctly
apns_sound = message.apns.payload.aps.sound
assert apns_sound.critical is False
assert apns_sound.name == "default_sound.aiff"
assert apns_sound.volume is None # APNS doesn't allow to specify volume for non-critical notifications
@pytest.mark.django_db
def METHOD_NAME(
make_organization_and_user, make_alert_receive_channel, make_alert_group, make_alert
):
organization, user = make_organization_and_user()
device = FCMDevice.objects.create(user=user, registration_id="test_device_id")
alert_receive_channel = make_alert_receive_channel(organization=organization)
alert_group = make_alert_group(alert_receive_channel)
make_alert(alert_group=alert_group, raw_request_data={})
message = _get_fcm_message(alert_group, user, device, critical=True)
# Check user settings are passed to FCM message
assert message.data["default_notification_sound_name"] == "default_sound.mp3"
assert message.data["default_notification_volume_type"] == "constant"
assert message.data["default_notification_volume_override"] == "false"
assert message.data["default_notification_volume"] == "0.8"
assert message.data["important_notification_sound_name"] == "default_sound_important.mp3"
assert message.data["important_notification_volume_type"] == "constant"
assert message.data["important_notification_volume"] == "0.8"
assert message.data["important_notification_volume_override"] == "true"
assert message.data["important_notification_override_dnd"] == "true"
assert message.data["type"] == "oncall.critical_message"
# Check APNS notification sound is set correctly
apns_sound = message.apns.payload.aps.sound
assert apns_sound.critical is True
assert apns_sound.name == "default_sound_important.aiff"
assert apns_sound.volume == 0.8
assert message.apns.payload.aps.custom_data["interruption-level"] == "critical"
@pytest.mark.django_db
def test_fcm_message_user_settings_critical_override_dnd_disabled(
make_organization_and_user, make_alert_receive_channel, make_alert_group, make_alert
):
organization, user = make_organization_and_user()
device = FCMDevice.objects.create(user=user, registration_id="test_device_id")
alert_receive_channel = make_alert_receive_channel(organization=organization)
alert_group = make_alert_group(alert_receive_channel)
make_alert(alert_group=alert_group, raw_request_data={})
# Disable important notification override DND
MobileAppUserSettings.objects.create(user=user, important_notification_override_dnd=False)
message = _get_fcm_message(alert_group, user, device, critical=True)
# Check user settings are passed to FCM message
assert message.data["important_notification_override_dnd"] == "false"
# Check APNS notification sound is set correctly
apns_sound = message.apns.payload.aps.sound
assert apns_sound.critical is False
assert message.apns.payload.aps.custom_data["interruption-level"] == "time-sensitive" |
5,870 | image verify | #!/usr/bin/env python3
# group: rw
#
# Tests for shrinking images
#
# Copyright (c) 2016-2017 Parallels International GmbH
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os, random, iotests, struct, qcow2, sys
from iotests import qemu_img, qemu_io, image_size
test_img = os.path.join(iotests.test_dir, 'test.img')
check_img = os.path.join(iotests.test_dir, 'check.img')
def size_to_int(str):
suff = ['B', 'K', 'M', 'G', 'T']
return int(str[:-1]) * 1024**suff.index(str[-1:])
class ShrinkBaseClass(iotests.QMPTestCase):
image_len = '128M'
shrink_size = '10M'
chunk_size = '16M'
refcount_bits = '16'
def __qcow2_check(self, filename):
entry_bits = 3
entry_size = 1 << entry_bits
l1_mask = 0x00fffffffffffe00
div_roundup = lambda n, d: (n + d - 1) // d
def split_by_n(data, n):
for x in range(0, len(data), n):
yield struct.unpack('>Q', data[x:x + n])[0] & l1_mask
def check_l1_table(h, l1_data):
l1_list = list(split_by_n(l1_data, entry_size))
real_l1_size = div_roundup(h.size,
1 << (h.cluster_bits*2 - entry_size))
used, unused = l1_list[:real_l1_size], l1_list[real_l1_size:]
self.assertTrue(len(used) != 0, "Verifying l1 table content")
self.assertFalse(any(unused), "Verifying l1 table content")
def check_reftable(fd, h, reftable):
for offset in split_by_n(reftable, entry_size):
if offset != 0:
fd.seek(offset)
cluster = fd.read(1 << h.cluster_bits)
self.assertTrue(any(cluster), "Verifying reftable content")
with open(filename, "rb") as fd:
h = qcow2.QcowHeader(fd)
fd.seek(h.l1_table_offset)
l1_table = fd.read(h.l1_size << entry_bits)
fd.seek(h.refcount_table_offset)
reftable = fd.read(h.refcount_table_clusters << h.cluster_bits)
check_l1_table(h, l1_table)
check_reftable(fd, h, reftable)
def __raw_check(self, filename):
pass
image_check = {
'qcow2' : __qcow2_check,
'raw' : __raw_check
}
def setUp(self):
if iotests.imgfmt == 'raw':
qemu_img('create', '-f', iotests.imgfmt, test_img, self.image_len)
qemu_img('create', '-f', iotests.imgfmt, check_img,
self.shrink_size)
else:
qemu_img('create', '-f', iotests.imgfmt,
'-o', 'cluster_size=' + self.cluster_size +
',refcount_bits=' + self.refcount_bits,
test_img, self.image_len)
qemu_img('create', '-f', iotests.imgfmt,
'-o', 'cluster_size=%s'% self.cluster_size,
check_img, self.shrink_size)
qemu_io('-c', 'write -P 0xff 0 ' + self.shrink_size, check_img)
def tearDown(self):
os.remove(test_img)
os.remove(check_img)
def METHOD_NAME(self):
self.assertEqual(image_size(test_img), image_size(check_img),
"Verifying image size")
self.image_check[iotests.imgfmt](self, test_img)
if iotests.imgfmt == 'raw':
return
qemu_img('check', test_img)
def test_empty_image(self):
qemu_img('resize', '-f', iotests.imgfmt, '--shrink', test_img,
self.shrink_size)
self.assertEqual(
qemu_io('-c', 'read -P 0x00 %s'%self.shrink_size, test_img),
qemu_io('-c', 'read -P 0x00 %s'%self.shrink_size, check_img),
"Verifying image content")
self.METHOD_NAME()
def test_sequential_write(self):
for offs in range(0, size_to_int(self.image_len),
size_to_int(self.chunk_size)):
qemu_io('-c', 'write -P 0xff %d %s' % (offs, self.chunk_size),
test_img)
qemu_img('resize', '-f', iotests.imgfmt, '--shrink', test_img,
self.shrink_size)
qemu_img("compare", test_img, check_img)
self.METHOD_NAME()
def test_random_write(self):
offs_list = list(range(0, size_to_int(self.image_len),
size_to_int(self.chunk_size)))
random.shuffle(offs_list)
for offs in offs_list:
qemu_io('-c', 'write -P 0xff %d %s' % (offs, self.chunk_size),
test_img)
qemu_img('resize', '-f', iotests.imgfmt, '--shrink', test_img,
self.shrink_size)
qemu_img("compare", test_img, check_img)
self.METHOD_NAME()
class TestShrink512(ShrinkBaseClass):
image_len = '3M'
shrink_size = '1M'
chunk_size = '256K'
cluster_size = '512'
refcount_bits = '64'
class TestShrink64K(ShrinkBaseClass):
cluster_size = '64K'
class TestShrink1M(ShrinkBaseClass):
cluster_size = '1M'
refcount_bits = '1'
ShrinkBaseClass = None
if __name__ == '__main__':
iotests.main(supported_fmts=['raw', 'qcow2'],
supported_protocols=['file'],
unsupported_imgopts=['compat']) |
5,871 | cl gen workspace | #
# File : codelite.py
# This file is part of RT-Thread RTOS
# COPYRIGHT (C) 2006 - 2020, RT-Thread Development Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Change Logs:
# Date Author Notes
# 2020-10-14 LiuMin Add copyright information
#
import os
import sys
import string
import building
import rtconfig
import xml.etree.ElementTree as etree
from xml.etree.ElementTree import SubElement
from utils import _make_path_relative
from utils import xml_indent
import utils
fs_encoding = sys.getfilesystemencoding()
def CLSetCFlags(root, flags):
node = root.find('Settings').find('Configuration').find('Compiler')
node.attrib['C_Options'] = flags
def CLSetCxxFlags(root, flags):
node = root.find('Settings').find('Configuration').find('Compiler')
node.attrib['Options'] = flags
def CLSetAsFlags(root, flags):
node = root.find('Settings').find('Configuration').find('Compiler')
node.attrib['Assembler'] = flags
def CLAddIncludePath(root, path):
node = root.find('Settings').find('Configuration').find('Compiler')
node = SubElement(node, 'IncludePath')
node.attrib['Value'] = path
def CLAddPreprocessor(root, value):
node = root.find('Settings').find('Configuration').find('Compiler')
node = SubElement(node, 'Preprocessor')
node.attrib['Value'] = value
def CLSetLdFlags(root, flags):
node = root.find('Settings').find('Configuration').find('Linker')
node.attrib['Options'] = flags
def CLAddLibrary_path(root, path):
node = root.find('Settings').find('Configuration').find('Linker')
node = SubElement(node, 'LibraryPath')
node.attrib['Value'] = path
def CLAddLibrary(root, lib):
node = root.find('Settings').find('Configuration').find('Linker')
node = SubElement(node, 'Library')
node.attrib['Value'] = lib
def CLAddFile(root, file_path):
file_path = file_path.replace('\\', '/')
dir_list = file_path.split('/')
dir_list.pop()
if not len(dir_list):
dir_list.append(os.path.abspath('.').replace('\\', '/').split('/')[-1])
parent = root
for dir_name in dir_list:
if dir_name == '..':
continue
node = None
nodes = parent.findall('VirtualDirectory')
for iter in nodes:
if iter.attrib['Name'] == dir_name:
node = iter
break
if node is None:
node = SubElement(parent, 'VirtualDirectory')
node.attrib['Name'] = dir_name
parent = node
if parent != root:
node = SubElement(parent, 'File')
node.attrib['Name'] = file_path
def CLAddHeaderFiles(parent, program, project_path):
utils.source_ext = []
utils.source_ext = ["h"]
for item in program:
utils.walk_children(item)
utils.source_list.sort()
for f in utils.source_list:
path = _make_path_relative(project_path, f)
CLAddFile(parent, path)
def CLAddCFiles(parent, files, project_path):
for f in files:
fn = f.rfile()
name = fn.name
path = os.path.dirname(fn.abspath)
path = _make_path_relative(project_path, path)
path = os.path.join(path, name)
CLAddFile(parent, path)
def METHOD_NAME(project_name, project_path):
if os.path.isfile('codelite_template.workspace'):
tree = etree.parse('codelite_template.workspace')
else:
tree = etree.parse(os.path.join(os.path.dirname(__file__), 'codelite_template.workspace'))
root = tree.getroot()
root.attrib['Name'] = project_name
node = root.find('Project')
node.attrib['Name'] = project_name
node.attrib['Path'] = project_name + '.project'
node = root.find('BuildMatrix').find('WorkspaceConfiguration').find('Project')
node.attrib['Name'] = project_name
out = open(project_name + '.workspace', 'w')
out.write('<?xml version="1.0" encoding="UTF-8"?>\n')
xml_indent(root)
out.write(etree.tostring(root, encoding='utf-8'))
out.close()
def TargetCodelite(script, program):
project_name = os.path.abspath('.').replace('\\', '/').split('/')[-1]
#project_name.replace('-', '_')
project_path = os.path.abspath('.')
METHOD_NAME(project_name, project_path)
if os.path.isfile('codelite_template.project'):
tree = etree.parse('codelite_template.project')
else:
tree = etree.parse(os.path.join(os.path.dirname(__file__), 'codelite_template.project'))
root = tree.getroot()
root.attrib['Name'] = project_name
out = open(project_name + '.project', 'w')
out.write('<?xml version="1.0" encoding="UTF-8"?>\n')
# add files
for group in script:
CLAddCFiles(root, group['src'], project_path)
# add header file
CLAddHeaderFiles(root, program, project_path)
# SECTION 2.
# write head include path
if 'CPPPATH' in building.Env:
cpp_path = building.Env['CPPPATH']
paths = set()
for path in cpp_path:
inc = _make_path_relative(project_path, os.path.normpath(path))
paths.add(inc) #.replace('\\', '/')
paths = [i for i in paths]
paths.sort()
# write include path, definitions
for elem in tree.iter(tag='Compiler'):
break
for path in paths:
CLAddIncludePath(root, path)
#print building.Env.get('LIBPATH', [])
#print building.Env.get('LIBS', [])
CLSetCFlags(root, building.Env.get('CFLAGS', []))
CLSetCxxFlags(root, building.Env.get('CFLAGS', []))
asflags = building.Env.get('ASFLAGS', [])
asflags = asflags.replace('-ffunction-sections', '')
asflags = asflags.replace('-fdata-sections', '')
asflags = asflags.replace('-x', '')
asflags = asflags.replace('-Wa,', '')
asflags = asflags.replace('assembler-with-cpp', '')
CLSetAsFlags(root, asflags)
CLSetLdFlags(root, building.Env.get('LINKFLAGS', []))
for macro in building.Env.get('CPPDEFINES', []):
for d in macro:
CLAddPreprocessor(root, d)
xml_indent(root)
out.write(etree.tostring(root, encoding='utf-8'))
out.close() |
5,872 | pointwise become partial | from __future__ import annotations
__all__ = ["OpenGLPMobject", "OpenGLPGroup", "OpenGLPMPoint"]
import moderngl
import numpy as np
from manim.constants import *
from manim.mobject.opengl.opengl_mobject import OpenGLMobject
from manim.utils.bezier import interpolate
from manim.utils.color import BLACK, WHITE, YELLOW, color_gradient, color_to_rgba
from manim.utils.config_ops import _Uniforms
from manim.utils.iterables import resize_with_interpolation
class OpenGLPMobject(OpenGLMobject):
shader_folder = "true_dot"
# Scale for consistency with cairo units
OPENGL_POINT_RADIUS_SCALE_FACTOR = 0.01
shader_dtype = [
("point", np.float32, (3,)),
("color", np.float32, (4,)),
]
point_radius = _Uniforms()
def __init__(
self, stroke_width=2.0, color=YELLOW, render_primitive=moderngl.POINTS, **kwargs
):
self.stroke_width = stroke_width
super().__init__(color=color, render_primitive=render_primitive, **kwargs)
self.point_radius = (
self.stroke_width * OpenGLPMobject.OPENGL_POINT_RADIUS_SCALE_FACTOR
)
def reset_points(self):
self.rgbas = np.zeros((1, 4))
self.points = np.zeros((0, 3))
return self
def get_array_attrs(self):
return ["points", "rgbas"]
def add_points(self, points, rgbas=None, color=None, opacity=None):
"""Add points.
Points must be a Nx3 numpy array.
Rgbas must be a Nx4 numpy array if it is not None.
"""
if rgbas is None and color is None:
color = YELLOW
self.append_points(points)
# rgbas array will have been resized with points
if color is not None:
if opacity is None:
opacity = self.rgbas[-1, 3]
new_rgbas = np.repeat([color_to_rgba(color, opacity)], len(points), axis=0)
elif rgbas is not None:
new_rgbas = rgbas
elif len(rgbas) != len(points):
raise ValueError("points and rgbas must have same length")
self.rgbas = np.append(self.rgbas, new_rgbas, axis=0)
return self
def thin_out(self, factor=5):
"""
Removes all but every nth point for n = factor
"""
for mob in self.family_members_with_points():
num_points = mob.get_num_points()
def thin_func():
return np.arange(0, num_points, factor)
if len(mob.points) == len(mob.rgbas):
mob.set_rgba_array_direct(mob.rgbas[thin_func()])
mob.set_points(mob.points[thin_func()])
return self
def set_color_by_gradient(self, *colors):
self.rgbas = np.array(
list(map(color_to_rgba, color_gradient(*colors, self.get_num_points()))),
)
return self
def set_colors_by_radial_gradient(
self,
center=None,
radius=1,
inner_color=WHITE,
outer_color=BLACK,
):
start_rgba, end_rgba = list(map(color_to_rgba, [inner_color, outer_color]))
if center is None:
center = self.get_center()
for mob in self.family_members_with_points():
distances = np.abs(self.points - center)
alphas = np.linalg.norm(distances, axis=1) / radius
mob.rgbas = np.array(
np.array(
[interpolate(start_rgba, end_rgba, alpha) for alpha in alphas],
),
)
return self
def match_colors(self, pmobject):
self.rgbas[:] = resize_with_interpolation(pmobject.rgbas, self.get_num_points())
return self
def fade_to(self, color, alpha, family=True):
rgbas = interpolate(self.rgbas, color_to_rgba(color), alpha)
for mob in self.submobjects:
mob.fade_to(color, alpha, family)
self.set_rgba_array_direct(rgbas)
return self
def filter_out(self, condition):
for mob in self.family_members_with_points():
to_keep = ~np.apply_along_axis(condition, 1, mob.points)
for key in mob.data:
mob.data[key] = mob.data[key][to_keep]
return self
def sort_points(self, function=lambda p: p[0]):
"""
function is any map from R^3 to R
"""
for mob in self.family_members_with_points():
indices = np.argsort(np.apply_along_axis(function, 1, mob.points))
for key in mob.data:
mob.data[key] = mob.data[key][indices]
return self
def ingest_submobjects(self):
for key in self.data:
self.data[key] = np.vstack([sm.data[key] for sm in self.get_family()])
return self
def point_from_proportion(self, alpha):
index = alpha * (self.get_num_points() - 1)
return self.points[int(index)]
def METHOD_NAME(self, pmobject, a, b):
lower_index = int(a * pmobject.get_num_points())
upper_index = int(b * pmobject.get_num_points())
for key in self.data:
self.data[key] = pmobject.data[key][lower_index:upper_index]
return self
def get_shader_data(self):
shader_data = np.zeros(len(self.points), dtype=self.shader_dtype)
self.read_data_to_shader(shader_data, "point", "points")
self.read_data_to_shader(shader_data, "color", "rgbas")
return shader_data
@staticmethod
def get_mobject_type_class():
return OpenGLPMobject
class OpenGLPGroup(OpenGLPMobject):
def __init__(self, *pmobs, **kwargs):
if not all(isinstance(m, OpenGLPMobject) for m in pmobs):
raise Exception("All submobjects must be of type OpenglPMObject")
super().__init__(**kwargs)
self.add(*pmobs)
def fade_to(self, color, alpha, family=True):
if family:
for mob in self.submobjects:
mob.fade_to(color, alpha, family)
class OpenGLPMPoint(OpenGLPMobject):
def __init__(self, location=ORIGIN, stroke_width=4.0, **kwargs):
self.location = location
super().__init__(stroke_width=stroke_width, **kwargs)
def init_points(self):
self.points = np.array([self.location], dtype=np.float32) |
5,873 | test save and delete image | import os
from django.test import TestCase
from django.urls import reverse
from zds.gallery.tests.factories import GalleryFactory, UserGalleryFactory, ImageFactory
from zds.member.tests.factories import ProfileFactory
from django.conf import settings
class UserGalleryTest(TestCase):
def setUp(self):
self.profile = ProfileFactory()
self.gallery = GalleryFactory()
self.image1 = ImageFactory(gallery=self.gallery)
self.image2 = ImageFactory(gallery=self.gallery)
self.user_gallery = UserGalleryFactory(user=self.profile.user, gallery=self.gallery)
def tearDown(self):
self.image1.delete()
self.image2.delete()
self.user_gallery.delete()
self.gallery.delete()
def test_can_write(self):
self.user_gallery.mode = "W"
self.assertTrue(self.user_gallery.can_write())
self.assertFalse(self.user_gallery.can_read())
def test_can_read(self):
self.user_gallery.mode = "R"
self.assertFalse(self.user_gallery.can_write())
self.assertTrue(self.user_gallery.can_read())
def test_get_images(self):
self.assertEqual(2, len(self.user_gallery.get_images()))
self.assertEqual(self.image1, self.user_gallery.get_images()[0])
self.assertEqual(self.image2, self.user_gallery.get_images()[1])
class ImageTest(TestCase):
def setUp(self):
self.gallery = GalleryFactory()
self.image = ImageFactory(gallery=self.gallery)
def tearDown(self):
self.image.delete()
self.gallery.delete()
def test_get_absolute_url(self):
absolute_url = f"{settings.MEDIA_URL}/{self.image.physical}".replace("//", "/")
self.assertEqual(absolute_url, self.image.get_absolute_url())
def test_get_extension(self):
self.assertEqual("jpg", self.image.get_extension())
def METHOD_NAME(self):
test_image = ImageFactory(gallery=self.gallery)
image_path = test_image.physical.path
self.assertTrue(os.path.isfile(image_path))
test_image.delete()
self.assertFalse(os.path.isfile(image_path))
class GalleryTest(TestCase):
def setUp(self):
self.profile = ProfileFactory()
self.gallery = GalleryFactory()
self.image1 = ImageFactory(gallery=self.gallery)
self.image2 = ImageFactory(gallery=self.gallery)
self.user_gallery = UserGalleryFactory(user=self.profile.user, gallery=self.gallery)
def tearDown(self):
self.image1.delete()
self.image2.delete()
self.user_gallery.delete()
self.gallery.delete()
def test_get_absolute_url(self):
absolute_url = reverse("gallery:details", args=[self.gallery.pk, self.gallery.slug])
self.assertEqual(absolute_url, self.gallery.get_absolute_url())
def test_get_linked_users(self):
self.assertEqual(1, len(self.gallery.get_linked_users()))
self.assertEqual(self.user_gallery, self.gallery.get_linked_users()[0])
def test_get_images(self):
self.assertEqual(2, len(self.gallery.get_images()))
self.assertEqual(self.image1, self.gallery.get_images()[0])
self.assertEqual(self.image2, self.gallery.get_images()[1])
def test_get_last_image(self):
self.assertEqual(self.image2, self.gallery.get_last_image())
def test_delete_empty_gallery(self):
test_gallery = GalleryFactory()
path = test_gallery.get_gallery_path()
test_gallery.delete()
self.assertFalse(os.path.isdir(path))
def test_delete_gallery_with_image(self):
test_gallery = GalleryFactory()
test_image = ImageFactory(gallery=test_gallery)
path_gallery = test_gallery.get_gallery_path()
self.assertTrue(os.path.isdir(path_gallery))
path_image = test_image.physical.path
self.assertTrue(os.path.isfile(path_image))
# Destroy the gallery and the image
test_gallery.delete()
self.assertFalse(os.path.isdir(path_gallery))
self.assertFalse(os.path.isfile(path_image)) |
5,874 | step enable remote vendor management | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# --------------------------------------------------------------------------------------------
# pylint: disable=too-few-public-methods,unnecessary-pass,unused-argument
"""
StorageAppliance tests scenarios
"""
from azure.cli.testsdk import ResourceGroupPreparer, ScenarioTest
from azure.cli.testsdk.scenario_tests import AllowLargeResponse
from .config import CONFIG
def setup_scenario1(test):
"""Env setup_scenario1"""
pass
def cleanup_scenario1(test):
"""Env cleanup_scenario1"""
pass
def call_scenario1(test):
"""# Testcase: scenario1"""
setup_scenario1(test)
METHOD_NAME(
test,
checks=[test.check("status", "Succeeded")],
)
step_disable_remote_vendor_management(
test,
checks=[test.check("status", "Succeeded")],
)
step_show(test, checks=[])
step_list_subscription(test, checks=[])
step_list_resource_group(test, checks=[])
step_update(
test,
checks=[
test.check("tags", "{tagsUpdate}"),
test.check("provisioningState", "Succeeded"),
],
)
cleanup_scenario1(test)
def METHOD_NAME(test, checks=None):
"""StorageAppliance enable remote vendor management operation"""
if checks is None:
checks = []
test.cmd(
"az networkcloud storageappliance enable-remote-vendor-management --resource-group {resourceGroup} --storage-appliance-name {name}",
checks=checks,
)
def step_disable_remote_vendor_management(test, checks=None):
"""StorageAppliance disable remote vendor management operation"""
if checks is None:
checks = []
test.cmd(
"az networkcloud storageappliance disable-remote-vendor-management --resource-group {resourceGroup} --storage-appliance-name {name}",
checks=checks,
)
def step_show(test, checks=None):
"""StorageAppliance show operation"""
if checks is None:
checks = []
test.cmd(
"az networkcloud storageappliance show --resource-group {resourceGroup} --storage-appliance-name {name}"
)
def step_list_resource_group(test, checks=None):
"""StorageAppliance list by resource group operation"""
if checks is None:
checks = []
test.cmd("az networkcloud storageappliance list --resource-group {resourceGroup}")
def step_list_subscription(test, checks=None):
"""StorageAppliance list by subscription operation"""
if checks is None:
checks = []
test.cmd("az networkcloud storageappliance list")
# skip run-read-command as it's not implemented yet
# def step_run_read_command(test, checks=None):
def step_update(test, checks=None):
"""StorageAppliance update operation"""
if checks is None:
checks = []
test.cmd(
"az networkcloud storageappliance update --resource-group {resourceGroup} --storage-appliance-name {name} --serial-number {serialNumber} --tags {tagsUpdate}"
)
# As storage appliance is hydrated resource, it won't be provisioned in a testing rg
# instead, we will use a pre-provisioned storage appliance (from an actual lab) for testing
class StorageApplianceScenarioTest(ScenarioTest):
"""StorageAppliance scenario test"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.kwargs.update(
{
"name": CONFIG.get("STORAGE_APPLIANCE", "name"),
"resourceGroup": CONFIG.get("STORAGE_APPLIANCE", "resource_group"),
"tagsUpdate": CONFIG.get("STORAGE_APPLIANCE", "tags_update"),
"serialNumber": CONFIG.get("STORAGE_APPLIANCE", "serial_number"),
}
)
@AllowLargeResponse()
def test_storage_appliance_scenario1(self):
"""test scenario for StorageAppliance CRUD operations"""
call_scenario1(self) |
5,875 | pandas deserializer | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import io
import json
import numpy as np
import pandas as pd
import pytest
from sagemaker.deserializers import (
StringDeserializer,
BytesDeserializer,
CSVDeserializer,
StreamDeserializer,
NumpyDeserializer,
JSONDeserializer,
PandasDeserializer,
JSONLinesDeserializer,
)
def test_string_deserializer():
deserializer = StringDeserializer()
result = deserializer.deserialize(io.BytesIO(b"[1, 2, 3]"), "application/json")
assert result == "[1, 2, 3]"
def test_bytes_deserializer():
deserializer = BytesDeserializer()
result = deserializer.deserialize(io.BytesIO(b"[1, 2, 3]"), "application/json")
assert result == b"[1, 2, 3]"
@pytest.fixture
def csv_deserializer():
return CSVDeserializer()
def test_csv_deserializer_single_element(csv_deserializer):
result = csv_deserializer.deserialize(io.BytesIO(b"1"), "text/csv")
assert result == [["1"]]
def test_csv_deserializer_array(csv_deserializer):
result = csv_deserializer.deserialize(io.BytesIO(b"1,2,3"), "text/csv")
assert result == [["1", "2", "3"]]
def test_csv_deserializer_2dimensional(csv_deserializer):
result = csv_deserializer.deserialize(io.BytesIO(b"1,2,3\n3,4,5"), "text/csv")
assert result == [["1", "2", "3"], ["3", "4", "5"]]
def test_csv_deserializer_posix_compliant(csv_deserializer):
result = csv_deserializer.deserialize(io.BytesIO(b"1,2,3\n3,4,5\n"), "text/csv")
assert result == [["1", "2", "3"], ["3", "4", "5"]]
def test_stream_deserializer():
deserializer = StreamDeserializer()
stream, content_type = deserializer.deserialize(io.BytesIO(b"[1, 2, 3]"), "application/json")
try:
result = stream.read()
finally:
stream.close()
assert result == b"[1, 2, 3]"
assert content_type == "application/json"
@pytest.fixture
def numpy_deserializer():
return NumpyDeserializer()
def test_numpy_deserializer_from_csv(numpy_deserializer):
stream = io.BytesIO(b"1,2,3\n4,5,6")
array = numpy_deserializer.deserialize(stream, "text/csv")
assert np.array_equal(array, np.array([[1, 2, 3], [4, 5, 6]]))
def test_numpy_deserializer_from_csv_ragged(numpy_deserializer):
stream = io.BytesIO(b"1,2,3\n4,5,6,7")
with pytest.raises(ValueError) as error:
numpy_deserializer.deserialize(stream, "text/csv")
assert "errors were detected" in str(error)
def test_numpy_deserializer_from_csv_alpha():
numpy_deserializer = NumpyDeserializer(dtype="U5")
stream = io.BytesIO(b"hello,2,3\n4,5,6")
array = numpy_deserializer.deserialize(stream, "text/csv")
assert np.array_equal(array, np.array([["hello", 2, 3], [4, 5, 6]]))
def test_numpy_deserializer_from_json(numpy_deserializer):
stream = io.BytesIO(b"[[1,2,3],\n[4,5,6]]")
array = numpy_deserializer.deserialize(stream, "application/json")
assert np.array_equal(array, np.array([[1, 2, 3], [4, 5, 6]]))
# Sadly, ragged arrays work fine in JSON (giving us a 1D array of Python lists)
def test_numpy_deserializer_from_json_ragged(numpy_deserializer):
stream = io.BytesIO(b"[[1,2,3],\n[4,5,6,7]]")
with pytest.raises(ValueError) as error:
numpy_deserializer.deserialize(stream, "application/json")
assert "requested array has an inhomogeneous shape" in str(error)
def test_numpy_deserializer_from_json_alpha():
numpy_deserializer = NumpyDeserializer(dtype="U5")
stream = io.BytesIO(b'[["hello",2,3],\n[4,5,6]]')
array = numpy_deserializer.deserialize(stream, "application/json")
assert np.array_equal(array, np.array([["hello", 2, 3], [4, 5, 6]]))
def test_numpy_deserializer_from_npy(numpy_deserializer):
array = np.ones((2, 3))
stream = io.BytesIO()
np.save(stream, array)
stream.seek(0)
result = numpy_deserializer.deserialize(stream, "application/x-npy")
assert np.array_equal(array, result)
def test_numpy_deserializer_from_npy_object_array(numpy_deserializer):
array = np.array([{"a": "", "b": ""}, {"c": "", "d": ""}])
stream = io.BytesIO()
np.save(stream, array)
stream.seek(0)
result = numpy_deserializer.deserialize(stream, "application/x-npy")
assert np.array_equal(array, result)
def test_numpy_deserializer_from_npy_object_array_with_allow_pickle_false():
numpy_deserializer = NumpyDeserializer(allow_pickle=False)
array = np.array([{"a": "", "b": ""}, {"c": "", "d": ""}])
stream = io.BytesIO()
np.save(stream, array)
stream.seek(0)
with pytest.raises(ValueError):
numpy_deserializer.deserialize(stream, "application/x-npy")
def test_numpy_deserializer_from_npz(numpy_deserializer):
arrays = {
"alpha": np.ones((2, 3)),
"beta": np.zeros((3, 2)),
}
stream = io.BytesIO()
np.savez_compressed(stream, **arrays)
stream.seek(0)
result = numpy_deserializer.deserialize(stream, "application/x-npz")
assert isinstance(result, np.lib.npyio.NpzFile)
assert set(arrays.keys()) == set(result.keys())
for key, arr in arrays.items():
assert np.array_equal(arr, result[key])
@pytest.fixture
def json_deserializer():
return JSONDeserializer()
def test_json_deserializer_array(json_deserializer):
result = json_deserializer.deserialize(io.BytesIO(b"[1, 2, 3]"), "application/json")
assert result == [1, 2, 3]
def test_json_deserializer_2dimensional(json_deserializer):
result = json_deserializer.deserialize(
io.BytesIO(b"[[1, 2, 3], [3, 4, 5]]"), "application/json"
)
assert result == [[1, 2, 3], [3, 4, 5]]
def test_json_deserializer_invalid_data(json_deserializer):
with pytest.raises(ValueError) as error:
json_deserializer.deserialize(io.BytesIO(b"[[1]"), "application/json")
assert "column" in str(error)
@pytest.fixture
def METHOD_NAME():
return PandasDeserializer()
def test_pandas_deserializer_json(METHOD_NAME):
data = {"col 1": {"row 1": "a", "row 2": "c"}, "col 2": {"row 1": "b", "row 2": "d"}}
stream = io.BytesIO(json.dumps(data).encode("utf-8"))
result = METHOD_NAME.deserialize(stream, "application/json")
expected = pd.DataFrame(
[["a", "b"], ["c", "d"]], index=["row 1", "row 2"], columns=["col 1", "col 2"]
)
assert result.equals(expected)
def test_pandas_deserializer_csv(METHOD_NAME):
stream = io.BytesIO(b"col 1,col 2\na,b\nc,d")
result = METHOD_NAME.deserialize(stream, "text/csv")
expected = pd.DataFrame([["a", "b"], ["c", "d"]], columns=["col 1", "col 2"])
assert result.equals(expected)
@pytest.fixture
def json_lines_deserializer():
return JSONLinesDeserializer()
@pytest.mark.parametrize(
"source, expected",
[
(b'["Name", "Score"]\n["Gilbert", 24]', [["Name", "Score"], ["Gilbert", 24]]),
(b'["Name", "Score"]\n["Gilbert", 24]\n', [["Name", "Score"], ["Gilbert", 24]]),
(
b'{"Name": "Gilbert", "Score": 24}\n{"Name": "Alexa", "Score": 29}',
[{"Name": "Gilbert", "Score": 24}, {"Name": "Alexa", "Score": 29}],
),
],
)
def test_json_lines_deserializer(json_lines_deserializer, source, expected):
stream = io.BytesIO(source)
content_type = "application/jsonlines"
actual = json_lines_deserializer.deserialize(stream, content_type)
assert actual == expected |
5,876 | schema | """Base class for SQL-type streams."""
from __future__ import annotations
import abc
import typing as t
import singer_sdk.helpers._catalog as catalog
from singer_sdk._singerlib import CatalogEntry, MetadataMapping
from singer_sdk.connectors import SQLConnector
from singer_sdk.streams.core import Stream
if t.TYPE_CHECKING:
from singer_sdk.tap_base import Tap
class SQLStream(Stream, metaclass=abc.ABCMeta):
"""Base class for SQLAlchemy-based streams."""
connector_class = SQLConnector
_cached_schema: dict | None = None
def __init__(
self,
tap: Tap,
catalog_entry: dict,
connector: SQLConnector | None = None,
) -> None:
"""Initialize the database stream.
If `connector` is omitted, a new connector will be created.
Args:
tap: The parent tap object.
catalog_entry: Catalog entry dict.
connector: Optional connector to reuse.
"""
self._connector: SQLConnector
self._connector = connector or self.connector_class(dict(tap.config))
self.catalog_entry = catalog_entry
super().__init__(
tap=tap,
METHOD_NAME=self.METHOD_NAME,
name=self.tap_stream_id,
)
@property
def _singer_catalog_entry(self) -> CatalogEntry:
"""Return catalog entry as specified by the Singer catalog spec.
Returns:
A CatalogEntry object.
"""
return t.cast(CatalogEntry, CatalogEntry.from_dict(self.catalog_entry))
@property
def connector(self) -> SQLConnector:
"""Return a connector object.
Returns:
The connector object.
"""
return self._connector
@property
def metadata(self) -> MetadataMapping:
"""Return the Singer metadata.
Metadata from an input catalog will override standard metadata.
Returns:
Metadata object as specified in the Singer spec.
"""
return self._singer_catalog_entry.metadata
@property # TODO: Investigate @cached_property after py > 3.7
def METHOD_NAME(self) -> dict:
"""Return metadata object (dict) as specified in the Singer spec.
Metadata from an input catalog will override standard metadata.
Returns:
The schema object.
"""
if not self._cached_schema:
self._cached_schema = t.cast(
dict,
self._singer_catalog_entry.METHOD_NAME.to_dict(),
)
return self._cached_schema
@property
def tap_stream_id(self) -> str:
"""Return the unique ID used by the tap to identify this stream.
Generally, this is the same value as in `Stream.name`.
In rare cases, such as for database types with multi-part names,
this may be slightly different from `Stream.name`.
Returns:
The unique tap stream ID as a string.
"""
return self._singer_catalog_entry.tap_stream_id
@property
def primary_keys(self) -> list[str] | None:
"""Get primary keys from the catalog entry definition.
Returns:
A list of primary key(s) for the stream.
"""
return self._singer_catalog_entry.metadata.root.table_key_properties or []
@primary_keys.setter
def primary_keys(self, new_value: list[str]) -> None:
"""Set or reset the primary key(s) in the stream's catalog entry.
Args:
new_value: a list of one or more column names
"""
self._singer_catalog_entry.metadata.root.table_key_properties = new_value
@property
def fully_qualified_name(self) -> str:
"""Generate the fully qualified version of the table name.
Raises:
ValueError: If table_name is not able to be detected.
Returns:
The fully qualified name.
"""
catalog_entry = self._singer_catalog_entry
if not catalog_entry.table:
msg = f"Missing table name in catalog entry: {catalog_entry.to_dict()}"
raise ValueError(msg)
return self.connector.get_fully_qualified_name(
table_name=catalog_entry.table,
schema_name=catalog_entry.metadata.root.schema_name,
db_name=catalog_entry.database,
)
def get_selected_schema(self) -> dict:
"""Return a copy of the Stream JSON schema, dropping any fields not selected.
Returns:
A dictionary containing a copy of the Stream JSON schema, filtered
to any selection criteria.
"""
return catalog.get_selected_schema(
stream_name=self.name,
METHOD_NAME=self.METHOD_NAME,
mask=self.mask,
logger=self.logger,
)
# Get records from stream
def get_records(self, context: dict | None) -> t.Iterable[dict[str, t.Any]]:
"""Return a generator of record-type dictionary objects.
If the stream has a replication_key value defined, records will be sorted by the
incremental key. If the stream also has an available starting bookmark, the
records will be filtered for values greater than or equal to the bookmark value.
Args:
context: If partition context is provided, will read specifically from this
data slice.
Yields:
One dict per record.
Raises:
NotImplementedError: If partition is passed in context and the stream does
not support partitioning.
"""
if context:
msg = f"Stream '{self.name}' does not support partitioning."
raise NotImplementedError(msg)
selected_column_names = self.get_selected_schema()["properties"].keys()
table = self.connector.get_table(
full_table_name=self.fully_qualified_name,
column_names=selected_column_names,
)
query = table.select()
if self.replication_key:
replication_key_col = table.columns[self.replication_key]
query = query.order_by(replication_key_col)
start_val = self.get_starting_replication_key_value(context)
if start_val:
query = query.where(replication_key_col >= start_val)
if self.ABORT_AT_RECORD_COUNT is not None:
# Limit record count to one greater than the abort threshold. This ensures
# `MaxRecordsLimitException` exception is properly raised by caller
# `Stream._sync_records()` if more records are available than can be
# processed.
query = query.limit(self.ABORT_AT_RECORD_COUNT + 1)
with self.connector._connect() as conn:
for record in conn.execute(query):
transformed_record = self.post_process(dict(record._mapping))
if transformed_record is None:
# Record filtered out during post_process()
continue
yield transformed_record
__all__ = ["SQLStream", "SQLConnector"] |
5,877 | describe | # Copyright (C) 2013-2016 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
DXAnalysis Handler
++++++++++++++++++
Analyses are DNAnexus entities that capture an instantiation of a
running workflow. They can be created from
:func:`dxpy.bindings.dxworkflow.DXWorkflow.run` or from an
existing analysis ID.
"""
from __future__ import print_function, unicode_literals, division, absolute_import
import time
import dxpy
from dxpy.bindings import (DXObject, )
from dxpy.exceptions import DXJobFailureError
##############
# DXAnalysis #
##############
class DXAnalysis(DXObject):
'''
Remote analysis object handler.
'''
_class = "analysis"
def __init__(self, dxid=None):
self._test_harness_result = None
DXObject.__init__(self, dxid=dxid)
self.set_id(dxid)
def METHOD_NAME(self, fields=None, **kwargs):
"""
:param fields: dict where the keys are field names that should
be returned, and values should be set to True (by default,
all fields are returned)
:type fields: dict
:returns: Description of the analysis
:rtype: dict
Returns a hash with key-value pairs containing information
about the analysis
"""
describe_input = {}
if fields is not None:
describe_input['fields'] = fields
self._desc = dxpy.api.analysis_describe(self._dxid, describe_input, **kwargs)
return self._desc
def add_tags(self, tags, **kwargs):
"""
:param tags: Tags to add to the analysis
:type tags: list of strings
Adds each of the specified tags to the analysis. Takes no
action for tags that are already listed for the analysis.
"""
dxpy.api.analysis_add_tags(self._dxid, {"tags": tags}, **kwargs)
def remove_tags(self, tags, **kwargs):
"""
:param tags: Tags to remove from the analysis
:type tags: list of strings
Removes each of the specified tags from the analysis. Takes
no action for tags that the analysis does not currently have.
"""
dxpy.api.analysis_remove_tags(self._dxid, {"tags": tags}, **kwargs)
def set_properties(self, properties, **kwargs):
"""
:param properties: Property names and values given as key-value pairs of strings
:type properties: dict
Given key-value pairs in *properties* for property names and
values, the properties are set on the analysis for the given
property names. Any property with a value of :const:`None`
indicates the property will be deleted.
.. note:: Any existing properties not mentioned in *properties*
are not modified by this method.
"""
dxpy.api.analysis_set_properties(self._dxid, {"properties": properties}, **kwargs)
def wait_on_done(self, interval=2, timeout=3600*24*7, **kwargs):
'''
:param interval: Number of seconds between queries to the analysis's state
:type interval: integer
:param timeout: Maximum amount of time to wait, in seconds, until the analysis is done (or at least partially failed)
:type timeout: integer
:raises: :exc:`~dxpy.exceptions.DXError` if the timeout is reached before the analysis has finished running, or :exc:`~dxpy.exceptions.DXJobFailureError` if some job in the analysis has failed
Waits until the analysis has finished running.
'''
elapsed = 0
while True:
state = self._get_state(**kwargs)
if state == "done":
break
if state in ["failed", "partially_failed"]:
desc = self.METHOD_NAME(**kwargs)
err_msg = "Analysis has failed because of {failureReason}: {failureMessage}".format(**desc)
if desc.get("failureFrom") != None and desc["failureFrom"]["id"] != desc["id"]:
err_msg += " (failure from {id})".format(id=desc['failureFrom']['id'])
raise DXJobFailureError(err_msg)
if state == "terminated":
raise DXJobFailureError("Analysis was terminated.")
if elapsed >= timeout or elapsed < 0:
raise DXJobFailureError("Reached timeout while waiting for the analysis to finish")
time.sleep(interval)
elapsed += interval
def terminate(self, **kwargs):
'''
Terminates the associated analysis.
'''
dxpy.api.analysis_terminate(self._dxid, **kwargs)
def get_output_ref(self, field, index=None, metadata=None):
'''
:param field: Output field name of this analysis
:type field: string
:param index: If the referenced field is an array, optionally specify an index (starting from 0) to indicate a particular member of the array
:type index: int
:param metadata: If the referenced field is of a data object class, a string indicating the metadata that should be read, e.g. "name", "properties.propkey", "details.refgenome"
:type metadata: string
Returns a dict containing a valid reference to an output of this analysis.
'''
link = {"$dnanexus_link": {"analysis": self._dxid, "field": field}}
if index is not None:
link["$dnanexus_link"]["index"] = index
if metadata is not None:
link["$dnanexus_link"]["metadata"] = metadata
return link
def _get_state(self, **kwargs):
'''
:returns: State of the remote object
:rtype: string
Queries the API server for the analysis's state.
Note that this function is shorthand for:
dxanalysis.describe(**kwargs)["state"]
'''
return self.METHOD_NAME(fields=dict(state=True), **kwargs)["state"] |
5,878 | read buffer | # -*- coding: utf-8 -*-
"""
Modified from https://github.com/plasmon360/python_newport_1918_powermeter
"""
from past.utils import old_div
from nplab.instrument import Instrument
from ctypes import *
import time
import numpy as np
class NewportPowermeter(Instrument):
def __init__(self, product_id, **kwargs):
"""
:param product_id: go to Device Manager, double click on instrument, go to Details, in the Property drop-down,
select Hardware IDs. If the ID is something like PID_ABC1, use product_id = 0xACB1
:param kwargs:
"""
super(NewportPowermeter, self).__init__()
if "libname" in kwargs:
libname = kwargs["libname"]
else:
libname = "usbdll.dll"
self.dll = windll.LoadLibrary(libname)
self.product_id = product_id
self.open_device_with_product_id()
self.instrument = self.get_instrument_list()
self.device_id, self.model_number, self.serial_number = self.instrument
self.wvl_range = [int(self.query('PM:MIN:Lambda?')), int(self.query('PM:MAX:Lambda?'))]
# def __del__(self):
# self.close_device()
def _dllWrapper(self, command, *args):
"""Simple dll wrapper
Takes care of the error checking for all dll calls
:param command: string with the command name
:param args: list of (optional) arguments to pass to the dll function
:return:
"""
self._logger.debug("Calling DLL with: %s %s" % (command, args))
status = getattr(self.dll, command)(*args)
if status != 0:
raise Exception('%s failed with status %s' % (command, status))
else:
pass
def open_device_all_products_all_devices(self):
self._dllWrapper("newp_usb_init_system")
self._logger.info("You have connected to one or more Newport products")
def open_device_with_product_id(self):
"""
opens a device with a certain product id
"""
cproductid = c_int(self.product_id)
useusbaddress = c_bool(1) # We will only use deviceids or addresses
num_devices = c_int()
self._dllWrapper("newp_usb_open_devices", cproductid, useusbaddress, byref(num_devices))
def close_device(self):
self._dllWrapper("newp_usb_uninit_system")
def get_instrument_list(self):
arInstruments = c_int()
arInstrumentsModel = c_int()
arInstrumentsSN = c_int()
nArraySize = c_int()
self._dllWrapper("GetInstrumentList", byref(arInstruments), byref(arInstrumentsModel), byref(arInstrumentsSN),
byref(nArraySize))
instrument_list = [arInstruments.value, arInstrumentsModel.value, arInstrumentsSN.value]
return instrument_list
def query(self, query_string):
"""
Write a query and read the response from the device
:rtype : String
:param query_string: Check Manual for commands, ex '*IDN?'
:return:
"""
self.write(query_string)
return self.read()
def read(self):
cdevice_id = c_long(self.device_id)
time.sleep(0.2)
response = create_string_buffer(('\000' * 1024).encode())
leng = c_ulong(1024)
read_bytes = c_ulong()
self._dllWrapper("newp_usb_get_ascii", cdevice_id, byref(response), leng, byref(read_bytes))
answer = response.value[0:read_bytes.value].rstrip(b'\r\n')
return answer
def write(self, command_string):
"""
Write a string to the device
:param command_string: Name of the string to be sent. Check Manual for commands
:raise:
"""
command = create_string_buffer(command_string.encode())
length = c_ulong(sizeof(command))
cdevice_id = c_long(self.device_id)
self._dllWrapper("newp_usb_send_ascii", cdevice_id, byref(command), length)
@property
def channel(self):
return self.query("PM:CHANnel?")
@channel.setter
def channel(self, channel):
assert channel in [1, 2]
self.write("PM:CHANnel " + str(channel))
@property
def wavelength(self):
self._logger.debug("Reading wavelength")
return self.query('PM:Lambda?')
@wavelength.setter
def wavelength(self, wavelength):
"""
Sets the wavelength on the device
:param wavelength int: float
"""
self._logger.debug("Setting wavelength")
if not isinstance(wavelength, int):
self._logger.info('Wavelength has to be an integer. Converting to integer')
wavelength = int(wavelength)
assert self.wvl_range[0] <= wavelength <= self.wvl_range[1]
self.write('PM:Lambda ' + str(wavelength))
def set_filtering(self, filter_type=0):
"""
Set the filtering on the device
:param filter_type:
0:No filtering
1:Analog filter
2:Digital filter
3:Analog and Digital filter
"""
if filter_type in [0, 1, 2, 3]:
self.write("PM:FILT %d" % filter_type)
else:
raise ValueError("filter_type needs to be between 0 and 3")
def METHOD_NAME(self, wavelength=700, buff_size=1000, interval_ms=1):
"""
Stores the power values at a certain wavelength.
:param wavelength: float: Wavelength at which this operation should be done. float.
:param buff_size: int: nuber of readings that will be taken
:param interval_ms: float: Time between readings in ms.
:return: [actualwavelength,mean_power,std_power]
"""
self.wavelength = wavelength
self.write('PM:DS:Clear')
self.write('PM:DS:SIZE ' + str(buff_size))
self.write('PM:DS:INT ' + str(
interval_ms * 10)) # to set 1 ms rate we have to give int value of 10. This is strange as manual says the INT should be in ms
self.write('PM:DS:ENable 1')
while int(self.query('PM:DS:COUNT?')) < buff_size: # Waits for the buffer is full or not.
time.sleep(old_div(0.001 * interval_ms * buff_size, 10))
actualwavelength = self.query('PM:Lambda?')
mean_power = self.query('PM:STAT:MEAN?')
std_power = self.query('PM:STAT:SDEV?')
self.write('PM:DS:Clear')
return [actualwavelength, mean_power, std_power]
@property
def power(self):
"""
Reads the instantaneous power
"""
power = self.query('PM:Power?')
return float(power)
if __name__ == '__main__':
nd = NewportPowermeter(0xCEC7)
nd._logger.setLevel("DEBUG")
print('Init finished')
print(nd.get_instrument_list())
print(nd.wavelength)
print(nd.power)
print(nd.wavelength)
print(nd.power) |
5,879 | gen script | # linux kernel packaging helpers
def get_arch(arch):
match arch:
case "ppc64le" | "ppc64" | "ppc":
return "powerpc"
case "aarch64":
return "arm64"
case "x86_64":
return "x86_64"
case "riscv64":
return "riscv"
case _:
# unknown, fill in
return None
def METHOD_NAME(pkg, script, flavor, args=""):
pkg.scriptlets[
script
] = f'/usr/libexec/base-kernel/script-{script} "$1"{args} "{flavor}"'
def generate_scriptlets(pkg, flavor):
# generate scriptlets for packaging, just hooking to base-kernel helpers
METHOD_NAME(pkg, "pre-install", flavor)
METHOD_NAME(pkg, "pre-upgrade", flavor, ' "$2"')
METHOD_NAME(pkg, "pre-deinstall", flavor)
METHOD_NAME(pkg, "post-install", flavor)
METHOD_NAME(pkg, "post-upgrade", flavor, ' "$2"')
def _build_env(pkg, menv, base_env, env):
renv = dict(menv)
# needed for efistub
renv["CBUILD_BYPASS_STRIP_WRAPPER"] = "1"
if base_env:
renv.update(base_env)
if env:
renv.update(env)
return renv
def configure(pkg, flavor, build_dir=None, env=None):
cfgarch = pkg.profile().arch
cfgname = f"config-{cfgarch}.{flavor}"
pkg.cp(pkg.files_path / cfgname, pkg.cwd)
epoch = pkg.source_date_epoch or 0
args = []
if pkg.profile().cross:
args += [f"CROSS_COMPILE={pkg.profile().triplet}"]
bdir = build_dir
if not bdir:
bdir = pkg.make_dir
pkg.do(
"chimera-buildkernel",
"prepare",
f"ARCH={get_arch(cfgarch)}",
f"CONFIG_FILE={pkg.chroot_cwd}/{cfgname}",
f"OBJDIR={bdir}",
f"JOBS={pkg.make_jobs}",
f"LOCALVERSION=-{pkg.pkgrel}-{flavor}",
f"EPOCH={epoch}",
*args,
env=_build_env(pkg, pkg.configure_env, None, env),
)
def update_configs(pkg, archs, flavor):
for a in archs:
with pkg.profile(a):
with pkg.stamp(f"{a}_config"):
pkg.log(f"configuring {a}...")
configure(pkg, flavor, f"{pkg.make_dir}-{a}")
pkg.log("now perform other config (press enter once done)")
input()
pkg.cp(
f"{pkg.make_dir}-{a}/.config",
pkg.files_path / f"config-{a}.{flavor}",
)
pkg.error("kernel configs have been updated")
def build(pkg, flavor, env=None):
pkg.do(
"chimera-buildkernel",
"build",
env=_build_env(pkg, pkg.make_env, pkg.make_build_env, env),
)
def install(pkg, flavor, env=None):
pkg.do(
"chimera-buildkernel",
"install",
pkg.chroot_destdir,
env=_build_env(pkg, pkg.make_env, pkg.make_install_env, env),
)
kpath = f"usr/lib/modules/{pkg.pkgver}-{pkg.pkgrel}-{flavor}"
# mutable files go to a separate dist directory, to be handled by hooks
pkg.install_dir(f"{kpath}/apk-dist")
for f in (pkg.destdir / kpath).glob("modules.*"):
pkg.mv(f, f.parent / "apk-dist")
# api to manipulate out of tree modules
def get_version(pkg, expected=None):
from cbuild.core import paths
kver = None
for f in (paths.bldroot() / "usr/lib/modules").iterdir():
if kver:
pkg.error(f"kernel version already set: {kver}")
kver = f.name
if expected and expected not in kver:
pkg.error(f"kernel mismatch: {kver} (expected {expected})")
return kver
def get_modsrc(pkg, modname, modver):
from cbuild.core import paths
return paths.bldroot() / f"usr/src/{modname}-{modver}"
def generate_scriptlets_ckms(pkg, modname, kernver):
prescript = f"""rm -f /boot/initramfs-{kernver}.img || :
rm -f /boot/initrd.img-{kernver} || :"""
postscript = f"""if [ -f /boot/System.map-{kernver} ]; then
depmod -a -F /boot/System.map-{kernver} {kernver} || :
else
depmod -a {kernver} || :
fi"""
pkg.scriptlets["pre-install"] = (
prescript
+ f"""
if [ -x /usr/bin/ckms ]; then
ckms -q -k {kernver} uninstall {modname} > /dev/null 2>&1 || :
fi"""
)
pkg.scriptlets["pre-upgrade"] = prescript
pkg.scriptlets["pre-deinstall"] = prescript
pkg.scriptlets["post-install"] = postscript
pkg.scriptlets["post-upgrade"] = postscript
pkg.scriptlets["post-deinstall"] = postscript
def _call_ckms(pkg, kver, *args):
pkg.do("ckms", "-s", pkg.chroot_cwd, "-k", kver, *args)
def ckms_configure(pkg, modname, modver, kver):
_call_ckms(pkg, kver, "add", f"/usr/src/{modname}-{modver}")
def ckms_build(pkg, modname, modver, kver):
_call_ckms(pkg, kver, "build", f"{modname}={modver}")
def ckms_install(pkg, modname, modver, kver):
modbase = "usr/lib/modules"
moddest = f"{modbase}/{kver}"
pkg.install_dir(moddest)
_call_ckms(
pkg,
kver,
"-d",
pkg.chroot_destdir / modbase,
"-D",
"-x",
"gz",
"install",
f"{modname}={modver}",
)
cdpath = f"{moddest}/ckms-disable/{modname}"
pkg.install_dir(cdpath)
(pkg.destdir / cdpath / modver).touch(0o644) |
5,880 | do get | import functools
import hashlib
import http.server
import multiprocessing
import os
import random
import shutil
import socket
import sys
from contextlib import closing
import pytest
import salt.utils.files
class RequestHandler(http.server.SimpleHTTPRequestHandler):
"""
Modified request handler class
"""
def __init__(self, *args, directory=None, **kwargs):
if directory is None:
directory = os.getcwd()
self.directory = directory
if sys.version_info.minor < 7:
super().__init__(*args, **kwargs)
else:
super().__init__(*args, directory=directory, **kwargs)
def METHOD_NAME(self):
"""
GET request handling
"""
none_match = self.headers.get("If-None-Match")
status_code = 200
try:
# Retrieve the local file from the web root to serve to clients
with salt.utils.files.fopen(
os.path.join(self.directory, self.path[1:])
) as reqfp:
return_text = reqfp.read().encode("utf-8")
# We're using this checksum as the etag to show file changes
checksum = hashlib.md5(return_text).hexdigest()
if none_match == checksum:
# Status code 304 Not Modified is returned if the file is unchanged
status_code = 304
except: # pylint: disable=bare-except
# Something went wrong. We didn't find the requested file
status_code = 404
return_text = None
checksum = None
self.send_response(status_code)
# Return the Etag header if we have the checksum
if checksum:
# IMPORTANT: This introduces randomness into the tests. The Etag header key
# will be converted to lowercase in the code... but if someone breaks that,
# it'll rear it's head here as random failures that are hard to reproduce.
# Any alternatives seem overly complex. So... don't break the case insensitivity
# in the code.
possible_etags = ["Etag", "ETag"]
self.send_header(random.choice(possible_etags), checksum)
self.end_headers()
# Return file content
if return_text:
self.wfile.write(return_text)
def serve(port=8000, directory=None):
"""
Function to serve a directory via http.server
"""
handler = functools.partial(RequestHandler, directory=directory)
s = http.server.HTTPServer(("127.0.0.1", port), handler)
s.serve_forever()
@pytest.fixture(scope="module")
def free_port():
"""
Utility fixture to grab a free port for the web server
"""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(("", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
@pytest.fixture(autouse=True, scope="module")
def server(free_port, web_root):
"""
Web server fixture
"""
p = multiprocessing.Process(target=serve, args=(free_port, web_root))
p.start()
yield
p.terminate()
p.join()
@pytest.fixture(scope="module")
def web_root(tmp_path_factory):
"""
Temporary directory fixture for the web server root
"""
_web_root = tmp_path_factory.mktemp("web_root")
try:
yield str(_web_root)
finally:
shutil.rmtree(str(_web_root), ignore_errors=True)
@pytest.mark.slow_test
def test_file_managed_web_source_etag_operation(
states, free_port, web_root, minion_opts
):
"""
This functional test checks the operation of the use_etag parameter to the
file.managed state. There are four (4) invocations of file.managed with a
web source, but only three (3) will trigger a call to the web server as
shown below and in comments within.
127.0.0.1 - - [08/Jan/2022 00:53:11] "GET /foo.txt HTTP/1.1" 200 -
127.0.0.1 - - [08/Jan/2022 00:53:11] "GET /foo.txt HTTP/1.1" 304 -
127.0.0.1 - - [08/Jan/2022 00:53:12] "GET /foo.txt HTTP/1.1" 200 -
Checks are documented in the comments.
"""
# Create file in the web root directory to serve
states.file.managed(
name=os.path.join(web_root, "foo.txt"), contents="this is my file"
)
# File should not be cached yet
cached_file = os.path.join(
minion_opts["cachedir"],
"extrn_files",
"base",
f"localhost:{free_port}",
"foo.txt",
)
cached_etag = cached_file + ".etag"
assert not os.path.exists(cached_file)
assert not os.path.exists(cached_etag)
# Pull the file from the web server
# Web server returns 200 status code with content:
# 127.0.0.1 - - [08/Jan/2022 00:53:11] "GET /foo.txt HTTP/1.1" 200 -
states.file.managed(
name=os.path.join(web_root, "bar.txt"),
source=f"http://localhost:{free_port}/foo.txt",
use_etag=True,
)
# Now the file is cached
assert os.path.exists(cached_file)
assert os.path.exists(cached_etag)
# Store the original modified time of the cached file
cached_file_mtime = os.path.getmtime(cached_file)
# Pull the file again. Etag hasn't changed. No download occurs.
# Web server returns 304 status code and no content:
# 127.0.0.1 - - [08/Jan/2022 00:53:11] "GET /foo.txt HTTP/1.1" 304 -
states.file.managed(
name=os.path.join(web_root, "bar.txt"),
source=f"http://localhost:{free_port}/foo.txt",
use_etag=True,
)
# Check that the modified time of the cached file hasn't changed
assert cached_file_mtime == os.path.getmtime(cached_file)
# Change file in the web root directory
states.file.managed(
name=os.path.join(web_root, "foo.txt"), contents="this is my changed file"
)
# Don't use Etag. Cached file is there, Salt won't try to download.
# No call to the web server will be made.
states.file.managed(
name=os.path.join(web_root, "bar.txt"),
source=f"http://localhost:{free_port}/foo.txt",
use_etag=False,
)
# Check that the modified time of the cached file hasn't changed
assert cached_file_mtime == os.path.getmtime(cached_file)
# Now use Etag again. Cached file changes
# Web server returns 200 status code with content
# 127.0.0.1 - - [08/Jan/2022 00:53:12] "GET /foo.txt HTTP/1.1" 200 -
states.file.managed(
name=os.path.join(web_root, "bar.txt"),
source=f"http://localhost:{free_port}/foo.txt",
use_etag=True,
)
# The modified time of the cached file now changes
assert cached_file_mtime != os.path.getmtime(cached_file)
def test_file_symlink_replace_existing_link(states, tmp_path):
# symlink name and target for state
name = tmp_path / "foo"
target = tmp_path / "baz"
# create existing symlink to replace
old_target = tmp_path / "bar"
name.symlink_to(old_target)
ret = states.file.symlink(
name=str(name),
target=str(target),
)
assert ret.filtered == {
"name": str(name),
"changes": {"new": str(name)},
"comment": f"Created new symlink {str(name)} -> {str(target)}",
"result": True,
} |
5,881 | extra options | ##
# Copyright 2009-2023 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for installing (precompiled) software which is supplied as a tarball,
implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Alex Domingo (Vrije Universiteit Brussel)
@author: Pavel Grochal (INUITS)
"""
import os
from easybuild.framework.extensioneasyblock import ExtensionEasyBlock
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import copy_dir, extract_file, remove_dir
from easybuild.tools.run import run_cmd
class Tarball(ExtensionEasyBlock):
"""
Precompiled software supplied as a tarball: will unpack binary and copy it to the install dir
"""
@staticmethod
def METHOD_NAME(extra_vars=None):
"""Extra easyconfig parameters specific to Tarball."""
extra_vars = ExtensionEasyBlock.METHOD_NAME(extra_vars=extra_vars)
extra_vars.update({
'install_type': [None, "Defaults to extract tarball into clean directory. Options: 'merge' merges tarball "
"to existing directory, 'subdir' extracts tarball into its own sub-directory", CUSTOM],
'preinstall_cmd': [None, "Command to execute before installation", CUSTOM],
})
return extra_vars
def configure_step(self):
"""
Dummy configure method
"""
pass
def build_step(self):
"""
Dummy build method: nothing to build
"""
pass
def run(self, *args, **kwargs):
"""Install as extension: unpack sources and copy (via install step)."""
if self.cfg['install_type'] is None:
self.log.info("Auto-enabled install_type=merge because Tarball is being used to install an extension")
self.cfg['install_type'] = 'merge'
# unpack sources and call install_step to copy unpacked sources to install directory
srcdir = extract_file(self.src, self.builddir, change_into_dir=False)
kwargs['src'] = srcdir
self.install_step(*args, **kwargs)
def install_step(self, src=None):
"""Install by copying from specified source directory (or 'start_dir' if not specified)."""
# Run preinstallopts and/or preinstall_cmd before copy of source directory
preinstall_cmd = None
if self.cfg['preinstallopts']:
preinstall_opts = self.cfg['preinstallopts'].split('&&')
preinstall_cmd = '&&'.join([opt for opt in preinstall_opts if opt and not opt.isspace()])
if self.cfg['preinstall_cmd']:
preinstall_cmd = '&& '.join([cmd for cmd in [preinstall_cmd, self.cfg['preinstall_cmd']] if cmd])
if preinstall_cmd:
self.log.info("Preparing installation of %s using command '%s'..." % (self.name, preinstall_cmd))
run_cmd(preinstall_cmd, log_all=True, simple=True)
# Copy source directory
source_path = src or self.cfg['start_dir']
if self.cfg['install_type'] == 'subdir':
# Wipe and install in a sub-directory with the name of the package
install_path = os.path.join(self.installdir, self.name.lower())
dirs_exist_ok = False
install_logmsg = "Copying tarball contents of %s to sub-directory %s..."
elif self.cfg['install_type'] == 'merge':
# Enable merging with root of existing installdir
install_path = self.installdir
dirs_exist_ok = True
install_logmsg = "Merging tarball contents of %s into %s..."
elif self.cfg['install_type'] is None:
# Wipe and copy root of installation directory (default)
install_path = self.installdir
dirs_exist_ok = False
install_logmsg = "Copying tarball contents of %s into %s after wiping it..."
else:
raise EasyBuildError("Unknown option '%s' for index_type.", self.cfg['install_type'])
self.log.info(install_logmsg, self.name, install_path)
if not dirs_exist_ok:
remove_dir(install_path)
copy_dir(source_path, install_path, symlinks=self.cfg['keepsymlinks'], dirs_exist_ok=dirs_exist_ok)
def sanity_check_rpath(self):
"""Skip the rpath sanity check, this is binary software"""
self.log.info("RPATH sanity check is skipped when using %s easyblock (derived from Tarball)",
self.__class__.__name__) |
5,882 | save config | """
Common code for command line utilities (see bin/)
"""
import argparse
import difflib
import logging
import os
import sys
import tron
from tron import yaml
from tron.commands.client import Client
log = logging.getLogger("tron.commands")
class ExitCode:
"""Enumeration of exit status codes."""
success = 0
fail = 1
GLOBAL_CONFIG_FILE_NAME = os.environ.get("TRON_CONFIG",) or "/etc/tron/tron.yaml"
CONFIG_FILE_NAME = os.path.expanduser("~/.tron")
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 8089
DEFAULT_CONFIG = {
"server": "http://%s:%d" % (DEFAULT_HOST, DEFAULT_PORT),
"display_color": False,
"cluster_name": "Unnamed Cluster",
}
TAB_COMPLETE_FILE = "/var/cache/tron_tab_completions"
opener = open
def get_default_server():
return DEFAULT_CONFIG["server"]
def filter_jobs_actions_runs(prefix, inputs):
dots = prefix.count(".")
if prefix == "":
# If the user hasn't begun to type anything, we need to get them started with all jobs
return [i for i in inputs if i.count(".") == 1]
elif dots == 0:
# If the user hasn't completed a job, we need to get them started with all jobs
# that start with what they have
return [i for i in inputs if i.count(".") == 1 and i.startswith(prefix)]
elif prefix in inputs:
# If what a user typed is exactly what is already in a suggestion, then we need to give them
# Even more suggestions (+1)
return [i for i in inputs if i.startswith(prefix) and (i.count(".") == dots or i.count(".") == dots + 1)]
else:
# Otherwise we only want to scope our suggestions to those that are on the same "level"
# which in string form means they have the same number of dots
return [i for i in inputs if i.startswith(prefix) and i.count(".") == dots]
def tron_jobs_completer(prefix, **kwargs):
if os.path.isfile(TAB_COMPLETE_FILE):
with opener(TAB_COMPLETE_FILE, "r") as f:
jobs = f.readlines()
return filter_jobs_actions_runs(prefix=prefix, inputs=[job.strip("\n\r") for job in jobs],)
else:
if "client" not in kwargs:
client = Client(get_default_server())
else:
client = kwargs["client"]
return filter_jobs_actions_runs(prefix=prefix, inputs=[job["name"] for job in client.jobs()],)
def build_option_parser(usage=None, epilog=None):
parser = argparse.ArgumentParser(usage=usage, epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter,)
parser.add_argument(
"--version", action="version", version=f"{parser.prog} {tron.__version__}",
)
parser.add_argument(
"-v", "--verbose", action="count", help="Verbose logging", default=None,
)
parser.add_argument(
"--server", default=None, help="Url including scheme, host and port, Default: %(default)s",
)
parser.add_argument(
"--cluster_name", default=None, help="Human friendly tron cluster name",
)
parser.add_argument(
"-s", "--save", action="store_true", dest="save_config", help="Save options used on this job for next time.",
)
return parser
def get_client_config():
config_file_list = [CONFIG_FILE_NAME, GLOBAL_CONFIG_FILE_NAME]
for config_file in config_file_list:
filename = os.path.expanduser(config_file)
if os.access(filename, os.R_OK):
config = read_config(filename)
if config:
return config
log.debug("Could not find a config in: %s." % ", ".join(config_file_list))
return {}
def load_config(options):
"""Attempt to load a user specific configuration or a global config file
and set any unset options based on values from the config. Finally fallback
to DEFAULT_CONFIG for those settings.
Also save back options to the config if options.save_config is True.
"""
config = get_client_config()
for opt_name in DEFAULT_CONFIG.keys():
if not hasattr(options, opt_name):
continue
if getattr(options, opt_name) is not None:
continue
default_value = DEFAULT_CONFIG[opt_name]
setattr(options, opt_name, config.get(opt_name, default_value))
if options.METHOD_NAME:
METHOD_NAME(options)
def read_config(filename=CONFIG_FILE_NAME):
try:
with opener(filename, "r") as config_file:
return yaml.load(config_file)
except OSError:
log.info("Failed to read config file: %s" % CONFIG_FILE_NAME)
return {}
def write_config(config):
with open(CONFIG_FILE_NAME, "w") as config_file:
yaml.dump(config, config_file)
def METHOD_NAME(options):
config = read_config()
for opt_name in DEFAULT_CONFIG.keys():
if not hasattr(options, opt_name):
continue
config[opt_name] = getattr(options, opt_name)
write_config(config)
def setup_logging(options):
if options.verbose is None:
level = logging.CRITICAL
elif options.verbose == 1:
level = logging.WARNING
elif options.verbose == 2:
level = logging.INFO
else:
level = logging.NOTSET
logging.basicConfig(
level=level, format="%(name)s %(levelname)s %(message)s", stream=sys.stdout,
)
def suggest_possibilities(word, possibilities, max_suggestions=6):
suggestions = difflib.get_close_matches(word=word, possibilities=possibilities, n=max_suggestions,)
if len(suggestions) == 1:
return f"\nDid you mean: {suggestions[0]}?"
elif len(suggestions) >= 1:
return f"\nDid you mean one of: {', '.join(suggestions)}?"
else:
return "" |
5,883 | apply texture displace axis x | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
from colorsys import rgb_to_hls
from itertools import repeat
from mathutils import Vector, Color
from sverchok.data_structure import iter_list_match_func
from sverchok.utils.sv_bmesh_utils import bmesh_from_pydata
from sverchok.utils.modules.color_utils import color_channels
mapper_funcs = {
'UV': lambda v, v_uv: Vector((v_uv[0]*2-1, v_uv[1]*2-1, v_uv[2])),
'Mesh Matrix': lambda v, m: m @ v,
'Texture Matrix': lambda v, m: m @ v
}
def end_vector(vertex, eval_v, mid_level, strength, scale_out):
vx = vertex[0] + (eval_v[0] - mid_level) * strength * scale_out[0]
vy = vertex[1] + (eval_v[1] - mid_level) * strength * scale_out[1]
vz = vertex[2] + (eval_v[2] - mid_level) * strength * scale_out[2]
return [vx, vy, vz]
def texture_displace_rgb(params, mapper_func):
vertex, texture, scale_out, matrix, mid_level, strength = params
v_vertex = Vector(vertex)
eval_v = texture.evaluate(mapper_func(v_vertex, matrix))[:]
return end_vector(vertex, eval_v, mid_level, strength, scale_out)
def texture_displace_hsv(params, mapper_func):
vertex, texture, scale_out, matrix, mid_level, strength = params
v_vertex = Vector(vertex)
eval_v = Color(texture.evaluate(mapper_func(v_vertex, matrix))[:3]).hsv
return end_vector(vertex, eval_v, mid_level, strength, scale_out)
def texture_displace_hls(params, mapper_func):
vertex, texture, scale_out, matrix, mid_level, strength = params
v_vertex = Vector(vertex)
eval_v = rgb_to_hls(*texture.evaluate(mapper_func(v_vertex, matrix))[:3])
return end_vector(vertex, eval_v, mid_level, strength, scale_out)
def texture_displace_vector_channel(params, mapper_func, extract_func):
vertex, texture, scale_out, matrix, mid_level, strength, normal = params
v_vertex = Vector(vertex)
col = texture.evaluate(mapper_func(v_vertex, matrix))
eval_s = (extract_func(col) - mid_level) * strength
vx = vertex[0] + normal[0] * eval_s * scale_out[0]
vy = vertex[1] + normal[1] * eval_s * scale_out[1]
vz = vertex[2] + normal[2] * eval_s * scale_out[2]
return [vx, vy, vz]
def apply_texture_displace_rgb(verts, pols, m_prop, channel, mapper_func, result):
func = texture_displace_rgb
result.append([func(v_prop, mapper_func) for v_prop in zip(verts, *m_prop)])
def apply_texture_displace_hsv(verts, pols, m_prop, channel, mapper_func, result):
func = texture_displace_hsv
result.append([func(v_prop, mapper_func) for v_prop in zip(verts, *m_prop)])
def apply_texture_displace_hls(verts, pols, m_prop, channel, mapper_func, result):
func = texture_displace_hls
result.append([func(v_prop, mapper_func) for v_prop in zip(verts, *m_prop)])
def METHOD_NAME(verts, pols, m_prop, channel, mapper_func, result):
apply_texture_displace_axis(verts, pols, m_prop, channel, mapper_func, result, [1, 0, 0])
def apply_texture_displace_axis_y(verts, pols, m_prop, channel, mapper_func, result):
apply_texture_displace_axis(verts, pols, m_prop, channel, mapper_func, result, [0, 1, 0])
def apply_texture_displace_axis_z(verts, pols, m_prop, channel, mapper_func, result):
apply_texture_displace_axis(verts, pols, m_prop, channel, mapper_func, result, [0, 0, 1])
def apply_texture_displace_axis_custom(verts, pols, m_prop, channel, mapper_func, result):
func = texture_displace_vector_channel
extract_func = color_channels[channel][1]
result.append([func(v_prop, mapper_func, extract_func) for v_prop in zip(verts, *m_prop[:-1], m_prop[-1])])
def apply_texture_displace_axis(verts, pols, m_prop, channel, mapper_func, result, axis):
func = texture_displace_vector_channel
extract_func = color_channels[channel][1]
result.append([func(v_prop, mapper_func, extract_func) for v_prop in zip(verts, *m_prop, repeat(axis))])
def apply_texture_displace_normal(verts, pols, m_prop, channel, mapper_func, result):
bm = bmesh_from_pydata(verts, [], pols, normal_update=True)
normals = [v.normal for v in bm.verts]
func = texture_displace_vector_channel
extract_func = color_channels[channel][1]
result.append([func(v_prop, mapper_func, extract_func) for v_prop in zip(verts, *m_prop, normals)])
bm.free()
def meshes_texture_diplace(params, constant, matching_f):
'''
This function prepares the data to pass to the different displace functions.
params are verts, pols, texture, scale_out, matrix, mid_level, strength, axis
- verts, scale_out, and axis should be list as [[[float, float, float],],] (Level 3)
- pols should be list as [[[int, int, int, ...],],] (Level 3)
- texture can be [texture, texture] or [[texture, texture],[texture]] for per vertex texture
- matrix can be [matrix, matrix] or [[matrix, matrix],[texture]] for per vertex matrix,
in case of UV Coors in mapping_mode it should be [[[float, float, float],],] (Level 3)
mid_level and strength should be list as [[float, float, ..], [float, ..], ..] (Level 2)
desired_levels = [3, 3, 2, 3, 2 or 3, 2, 2, 3]
constant are the function options (data that does not need to be matched)
matching_f stands for list matching formula to use
'''
result = []
displace_mode, displace_function, color_channel, match_mode, mapping_mode = constant
params = matching_f(params)
local_match = iter_list_match_func[match_mode]
mapper_func = mapper_funcs[mapping_mode]
for props in zip(*params):
verts, pols, texture, scale_out, matrix, mid_level, strength, axis = props
if mapping_mode == 'Texture Matrix':
if type(matrix) == list:
matrix = [m.inverted() for m in matrix]
else:
matrix = [matrix.inverted()]
elif mapping_mode == 'Mesh Matrix':
if not type(matrix) == list:
matrix = [matrix]
if not type(texture) == list:
texture = [texture]
if displace_mode == 'Custom Axis':
axis_n = [Vector(v).normalized() for v in axis]
m_prop = local_match([texture, scale_out, matrix, mid_level, strength, axis_n])
else:
m_prop = local_match([texture, scale_out, matrix, mid_level, strength])
displace_function(verts, pols, m_prop, color_channel, mapper_func, result)
return result
displace_funcs = {
'NORMAL': apply_texture_displace_normal,
'X': METHOD_NAME,
'Y': apply_texture_displace_axis_y,
'Z': apply_texture_displace_axis_z,
'Custom Axis': apply_texture_displace_axis_custom,
'RGB to XYZ': apply_texture_displace_rgb,
'HSV to XYZ': apply_texture_displace_hsv,
'HLS to XYZ': apply_texture_displace_hls
|
5,884 | earning and deduction entries does not exists | # Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
import frappe
from frappe import _
from frappe.model.document import Document
from frappe.utils import flt, getdate
class DuplicateAssignment(frappe.ValidationError):
pass
class SalaryStructureAssignment(Document):
def onload(self):
if self.employee:
self.set_onload(
"earning_and_deduction_entries_does_not_exists",
self.METHOD_NAME(),
)
def validate(self):
self.validate_dates()
self.validate_income_tax_slab()
self.set_payroll_payable_account()
if self.METHOD_NAME():
if not self.taxable_earnings_till_date and not self.tax_deducted_till_date:
frappe.msgprint(
_(
"""
Not found any salary slip record(s) for the employee {0}. <br><br>
Please specify {1} and {2} (if any),
for the correct tax calculation in future salary slips.
"""
).format(
self.employee,
"<b>" + _("Taxable Earnings Till Date") + "</b>",
"<b>" + _("Tax Deducted Till Date") + "</b>",
),
indicator="orange",
title=_("Warning"),
)
if not self.get("payroll_cost_centers"):
self.set_payroll_cost_centers()
self.validate_cost_center_distribution()
def validate_dates(self):
joining_date, relieving_date = frappe.db.get_value(
"Employee", self.employee, ["date_of_joining", "relieving_date"]
)
if self.from_date:
if frappe.db.exists(
"Salary Structure Assignment",
{"employee": self.employee, "from_date": self.from_date, "docstatus": 1},
):
frappe.throw(_("Salary Structure Assignment for Employee already exists"), DuplicateAssignment)
if joining_date and getdate(self.from_date) < joining_date:
frappe.throw(
_("From Date {0} cannot be before employee's joining Date {1}").format(
self.from_date, joining_date
)
)
# flag - old_employee is for migrating the old employees data via patch
if relieving_date and getdate(self.from_date) > relieving_date and not self.flags.old_employee:
frappe.throw(
_("From Date {0} cannot be after employee's relieving Date {1}").format(
self.from_date, relieving_date
)
)
def validate_income_tax_slab(self):
if not self.income_tax_slab:
return
income_tax_slab_currency = frappe.db.get_value(
"Income Tax Slab", self.income_tax_slab, "currency"
)
if self.currency != income_tax_slab_currency:
frappe.throw(
_("Currency of selected Income Tax Slab should be {0} instead of {1}").format(
self.currency, income_tax_slab_currency
)
)
def set_payroll_payable_account(self):
if not self.payroll_payable_account:
payroll_payable_account = frappe.db.get_value(
"Company", self.company, "default_payroll_payable_account"
)
if not payroll_payable_account:
payroll_payable_account = frappe.db.get_value(
"Account",
{
"account_name": _("Payroll Payable"),
"company": self.company,
"account_currency": frappe.db.get_value("Company", self.company, "default_currency"),
"is_group": 0,
},
)
self.payroll_payable_account = payroll_payable_account
@frappe.whitelist()
def set_payroll_cost_centers(self):
self.payroll_cost_centers = []
default_payroll_cost_center = self.get_payroll_cost_center()
if default_payroll_cost_center:
self.append(
"payroll_cost_centers", {"cost_center": default_payroll_cost_center, "percentage": 100}
)
def get_payroll_cost_center(self):
payroll_cost_center = frappe.db.get_value("Employee", self.employee, "payroll_cost_center")
if not payroll_cost_center and self.department:
payroll_cost_center = frappe.db.get_value("Department", self.department, "payroll_cost_center")
return payroll_cost_center
def validate_cost_center_distribution(self):
if self.get("payroll_cost_centers"):
total_percentage = sum([flt(d.percentage) for d in self.get("payroll_cost_centers", [])])
if total_percentage != 100:
frappe.throw(_("Total percentage against cost centers should be 100"))
@frappe.whitelist()
def METHOD_NAME(self):
if self.enabled_settings_to_specify_earnings_and_deductions_till_date():
if not self.joined_in_the_same_month() and not self.have_salary_slips():
return True
else:
if self.docstatus in [1, 2] and (
self.taxable_earnings_till_date or self.tax_deducted_till_date
):
return True
return False
else:
return False
def enabled_settings_to_specify_earnings_and_deductions_till_date(self):
"""returns True if settings are enabled to specify earnings and deductions till date else False"""
if frappe.db.get_single_value(
"Payroll Settings", "define_opening_balance_for_earning_and_deductions"
):
return True
return False
def have_salary_slips(self):
"""returns True if salary structure assignment has salary slips else False"""
salary_slip = frappe.db.get_value(
"Salary Slip", filters={"employee": self.employee, "docstatus": 1}
)
if salary_slip:
return True
return False
def joined_in_the_same_month(self):
"""returns True if employee joined in same month as salary structure assignment from date else False"""
date_of_joining = frappe.db.get_value("Employee", self.employee, "date_of_joining")
from_date = getdate(self.from_date)
if not self.from_date or not date_of_joining:
return False
elif date_of_joining.month == from_date.month:
return True
else:
return False
def get_assigned_salary_structure(employee, on_date):
if not employee or not on_date:
return None
salary_structure = frappe.db.sql(
"""
select salary_structure from `tabSalary Structure Assignment`
where employee=%(employee)s
and docstatus = 1
and %(on_date)s >= from_date order by from_date desc limit 1""",
{
"employee": employee,
"on_date": on_date,
},
)
return salary_structure[0][0] if salary_structure else None
@frappe.whitelist()
def get_employee_currency(employee):
employee_currency = frappe.db.get_value(
"Salary Structure Assignment", {"employee": employee}, "currency"
)
if not employee_currency:
frappe.throw(
_("There is no Salary Structure assigned to {0}. First assign a Salary Stucture.").format(
employee
)
)
return employee_currency |
5,885 | get git revision | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Utilities for version control systems"""
import os
import os.path as osp
import subprocess
import sys
# Local imports
from spyder.config.base import running_under_pytest
from spyder.utils import programs
from spyder.utils.misc import abspardir
SUPPORTED = [
{
'name': 'Mercurial',
'rootdir': '.hg',
'actions': dict(
commit=( ('thg', ['commit']),
('hgtk', ['commit']) ),
browse=( ('thg', ['log']),
('hgtk', ['log']) ))
}, {
'name': 'Git',
'rootdir': '.git',
'actions': dict(
commit=( ('git', ['gui' if os.name == 'nt' else 'cola']), ),
browse=( ('gitk', []), ))
}]
class ActionToolNotFound(RuntimeError):
"""Exception to transmit information about supported tools for
failed attempt to execute given action"""
def __init__(self, vcsname, action, tools):
RuntimeError.__init__(self)
self.vcsname = vcsname
self.action = action
self.tools = tools
def get_vcs_info(path):
"""Return support status dict if path is under VCS root"""
for info in SUPPORTED:
vcs_path = osp.join(path, info['rootdir'])
if osp.isdir(vcs_path):
return info
def get_vcs_root(path):
"""Return VCS root directory path
Return None if path is not within a supported VCS repository"""
previous_path = path
while get_vcs_info(path) is None:
path = abspardir(path)
if path == previous_path:
return
else:
previous_path = path
return osp.abspath(path)
def is_vcs_repository(path):
"""Return True if path is a supported VCS repository"""
return get_vcs_root(path) is not None
def run_vcs_tool(path, action):
"""If path is a valid VCS repository, run the corresponding VCS tool
Supported VCS actions: 'commit', 'browse'
Return False if the VCS tool is not installed"""
info = get_vcs_info(get_vcs_root(path))
tools = info['actions'][action]
for tool, args in tools:
if programs.find_program(tool):
if not running_under_pytest():
programs.run_program(tool, args, cwd=path)
else:
return True
return
else:
cmdnames = [name for name, args in tools]
raise ActionToolNotFound(info['name'], action, cmdnames)
def is_hg_installed():
"""Return True if Mercurial is installed"""
return programs.find_program('hg') is not None
def get_hg_revision(repopath):
"""Return Mercurial revision for the repository located at repopath
Result is a tuple (global, local, branch), with None values on error
For example:
>>> get_hg_revision(".")
('eba7273c69df+', '2015+', 'default')
"""
try:
assert osp.isdir(osp.join(repopath, '.hg'))
proc = programs.run_program('hg', ['id', '-nib', repopath])
output, _err = proc.communicate()
# output is now: ('eba7273c69df+ 2015+ default\n', None)
# Split 2 times max to allow spaces in branch names.
return tuple(output.decode().strip().split(None, 2))
except (subprocess.CalledProcessError, AssertionError, AttributeError,
OSError):
return (None, None, None)
def METHOD_NAME(repopath):
"""
Return Git revision for the repository located at repopath
Result is a tuple (latest commit hash, branch), falling back to
SPY_COMMIT and SPY_BRANCH environment variables, if present, else None.
"""
try:
git = programs.find_git()
assert git is not None and osp.isdir(osp.join(repopath, '.git'))
commit = programs.run_program(git, ['rev-parse', '--short', 'HEAD'],
cwd=repopath).communicate()
commit = commit[0].strip()
commit = commit.decode(sys.getdefaultencoding())
# Branch
branches = programs.run_program(git, ['branch'],
cwd=repopath).communicate()
branches = branches[0]
branches = branches.decode(sys.getdefaultencoding())
branches = branches.split('\n')
active_branch = [b for b in branches if b.startswith('*')]
if len(active_branch) != 1:
branch = None
else:
branch = active_branch[0].split(None, 1)[1]
return commit, branch
except (subprocess.CalledProcessError, AssertionError, AttributeError,
OSError):
return os.environ.get('SPY_COMMIT'), os.environ.get('SPY_BRANCH')
def get_git_refs(repopath):
"""
Return Git active branch, state, branches (plus tags).
"""
tags = []
branches = []
branch = ''
files_modifed = []
if os.path.isfile(repopath):
repopath = os.path.dirname(repopath)
git = programs.find_git()
if git:
try:
# Files modified
out, err = programs.run_program(
git, ['status', '-s'],
cwd=repopath,
).communicate()
out = out.decode(sys.getdefaultencoding())
files_modifed = [line.strip() for line in out.split('\n') if line]
# Tags
out, err = programs.run_program(
git, ['tag'],
cwd=repopath,
).communicate()
out = out.decode(sys.getdefaultencoding())
tags = [line.strip() for line in out.split('\n') if line]
# Branches
out, err = programs.run_program(
git, ['branch', '-a'],
cwd=repopath,
).communicate()
out = out.decode(sys.getdefaultencoding())
lines = [line.strip() for line in out.split('\n') if line]
for line in lines:
if line.startswith('*'):
line = line.replace('*', '').strip()
branch = line
branches.append(line)
except (subprocess.CalledProcessError, AttributeError, OSError):
pass
return branches + tags, branch, files_modifed
def get_git_remotes(fpath):
"""Return git remotes for repo on fpath."""
remote_data = {}
data, __ = programs.run_program(
'git',
['remote', '-v'],
cwd=osp.dirname(fpath),
).communicate()
data = data.decode(sys.getdefaultencoding())
lines = [line.strip() for line in data.split('\n') if line]
for line in lines:
if line:
remote, value = line.split('\t')
remote_data[remote] = value.split(' ')[0]
return remote_data
def remote_to_url(remote):
"""Convert a git remote to a url."""
url = ''
if remote.startswith('git@'):
url = remote.replace('git@', '')
url = url.replace(':', '/')
url = 'https://' + url.replace('.git', '')
else:
url = remote.replace('.git', '')
return url |
5,886 | test restore with rename | """Test snapshot restore functionality"""
# pylint: disable=missing-function-docstring, missing-class-docstring, line-too-long
import os
import time
from curator.helpers.getters import get_indices, get_snapshot
from . import CuratorTestCase
from . import testvars
HOST = os.environ.get('TEST_ES_SERVER', 'http://127.0.0.1:9200')
# ' repository: {0}\n'
# ' name: {1}\n'
# ' indices: {2}\n'
# ' include_aliases: {3}\n'
# ' ignore_unavailable: {4}\n'
# ' include_global_state: {5}\n'
# ' partial: {6}\n'
# ' rename_pattern: {7}\n'
# ' rename_replacement: {8}\n'
# ' extra_settings: {9}\n'
# ' wait_for_completion: {10}\n'
# ' skip_repo_fs_check: {11}\n'
# ' timeout_override: {12}\n'
# ' wait_interval: {13}\n'
# ' max_wait: {14}\n'
class TestActionFileRestore(CuratorTestCase):
"""Test file-based configuration restore operations"""
def test_restore(self):
"""Test restore action"""
indices = []
for i in range(1,4):
self.add_docs(f'my_index{i}')
indices.append(f'my_index{i}')
snap_name = 'snapshot1'
self.create_snapshot(snap_name, ','.join(indices))
snapshot = get_snapshot(self.client, self.args['repository'], '*')
self.assertEqual(1, len(snapshot['snapshots']))
assert 1 == len(snapshot['snapshots'])
self.client.indices.delete(index=','.join(indices))
self.assertEqual([], get_indices(self.client))
assert not get_indices(self.client)
self.write_config(self.args['configfile'], testvars.client_config.format(HOST))
self.write_config(self.args['actionfile'],
testvars.restore_snapshot_proto.format(
self.args['repository'],
snap_name,
indices,
False,
False,
True,
False,
' ',
' ',
' ',
True,
False,
301,
1,
3
)
)
self.invoke_runner()
restored_indices = sorted(get_indices(self.client))
self.assertEqual(indices, restored_indices)
assert indices == restored_indices
# The test runs so fast that it tries to execute the cleanup step
# and delete the repository before Elasticsearch is actually ready
time.sleep(0.5)
def METHOD_NAME(self):
"""Test restore action with renaming enabled"""
indices = []
for i in range(1,4):
self.add_docs(f'my_index{i}')
indices.append(f'my_index{i}')
snap_name = 'snapshot1'
self.create_snapshot(snap_name, ','.join(indices))
snapshot = get_snapshot(self.client, self.args['repository'], '*')
time.sleep(1)
self.assertEqual(1, len(snapshot['snapshots']))
assert 1 == len(snapshot['snapshots'])
self.client.indices.delete(index=','.join(indices))
self.assertEqual([], get_indices(self.client))
assert not get_indices(self.client)
self.write_config(self.args['configfile'], testvars.client_config.format(HOST))
self.write_config(self.args['actionfile'],
testvars.restore_snapshot_proto.format(
self.args['repository'],
snap_name,
indices,
False,
False,
True,
False,
'my_index(.+)',
'new_index$1',
' ',
True,
False,
301,
1,
3,
)
)
self.invoke_runner()
time.sleep(1)
restored_indices = sorted(get_indices(self.client))
self.assertEqual(
['new_index1', 'new_index2', 'new_index3'],
restored_indices
)
assert ['new_index1', 'new_index2', 'new_index3'] == restored_indices
# The test runs so fast that it tries to execute the cleanup step
# and delete the repository before Elasticsearch is actually ready
time.sleep(1)
def test_restore_wildcard(self):
"""Test restore with wildcard"""
indices = []
my_indices = []
wildcard = ['my_*']
for i in range(1,4):
for prefix in ['my_', 'not_my_']:
self.add_docs(f'{prefix}index{i}')
indices.append(f'{prefix}index{i}')
if prefix == 'my_':
my_indices.append(f'{prefix}index{i}')
snap_name = 'snapshot1'
self.create_snapshot(snap_name, ','.join(indices))
snapshot = get_snapshot(self.client, self.args['repository'], '*')
self.assertEqual(1, len(snapshot['snapshots']))
assert 1 == len(snapshot['snapshots'])
self.client.indices.delete(index=','.join(indices))
self.assertEqual([], get_indices(self.client))
assert not get_indices(self.client)
self.write_config(self.args['configfile'], testvars.client_config.format(HOST))
self.write_config(self.args['actionfile'],
testvars.restore_snapshot_proto.format(
self.args['repository'],
snap_name,
wildcard,
False,
False,
True,
False,
' ',
' ',
' ',
True,
False,
301,
1,
3
)
)
self.invoke_runner()
restored_indices = sorted(get_indices(self.client))
self.assertEqual(my_indices, restored_indices)
assert my_indices == restored_indices
# The test runs so fast that it tries to execute the cleanup step
# and delete the repository before Elasticsearch is actually ready
time.sleep(0.5)
class TestCLIRestore(CuratorTestCase):
def test_restore(self):
indices = []
for i in range(1, 4):
self.add_docs(f'my_index{i}')
indices.append(f'my_index{i}')
snap_name = 'snapshot1'
self.create_snapshot(snap_name, ','.join(indices))
snapshot = get_snapshot(self.client, self.args['repository'], '*')
self.assertEqual(1, len(snapshot['snapshots']))
assert 1 == len(snapshot['snapshots'])
self.client.indices.delete(index=f'{",".join(indices)}')
self.assertEqual([], get_indices(self.client))
assert not get_indices(self.client)
args = self.get_runner_args()
args += [
'--config', self.args['configfile'],
'restore',
'--repository', self.args['repository'],
'--name', snap_name,
'--index', indices[0],
'--index', indices[1],
'--index', indices[2],
'--wait_interval', '1',
'--max_wait', '3',
'--filter_list', '{"filtertype":"none"}',
]
# self.assertEqual(0, self.run_subprocess(args, logname='TestCLIRestore.test_restore'))
assert 0 == self.run_subprocess(args, logname='TestCLIRestore.test_restore')
restored_indices = sorted(get_indices(self.client))
self.assertEqual(indices, restored_indices)
assert indices == restored_indices
# The test runs so fast that it tries to execute the cleanup step
# and delete the repository before Elasticsearch is actually ready
time.sleep(0.5) |
5,887 | test dashboard rendering with two courses | """
Tests for the recently enrolled messaging within the Dashboard.
"""
import datetime
import ddt
from django.urls import reverse
from django.utils.timezone import now
from opaque_keys.edx import locator
from pytz import UTC
from common.test.utils import XssTestMixin
from common.djangoapps.student.models import CourseEnrollment, DashboardConfiguration
from common.djangoapps.student.tests.factories import UserFactory
from common.djangoapps.student.views import get_course_enrollments
from common.djangoapps.student.views.dashboard import _get_recently_enrolled_courses
from openedx.core.djangolib.testing.utils import skip_unless_lms
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.tests.factories import CourseFactory # lint-amnesty, pylint: disable=wrong-import-order
@skip_unless_lms
@ddt.ddt
class TestRecentEnrollments(ModuleStoreTestCase, XssTestMixin):
"""
Unit tests for getting the list of courses for a logged in user
"""
PASSWORD = 'test'
def setUp(self):
"""
Add a student
"""
super().setUp()
self.student = UserFactory()
self.student.set_password(self.PASSWORD)
self.student.save()
# Old Course
old_course_location = locator.CourseLocator('Org0', 'Course0', 'Run0')
__, enrollment = self._create_course_and_enrollment(old_course_location)
enrollment.created = datetime.datetime(1900, 12, 31, 0, 0, 0, 0, tzinfo=UTC)
enrollment.save()
# New Course
course_location = locator.CourseLocator('Org1', 'Course1', 'Run1')
self.course, self.enrollment = self._create_course_and_enrollment(course_location)
def _create_course_and_enrollment(self, course_location):
""" Creates a course and associated enrollment. """
course = CourseFactory.create(
org=course_location.org,
number=course_location.course,
run=course_location.run
)
enrollment = CourseEnrollment.enroll(self.student, course.id)
return course, enrollment
def _configure_message_timeout(self, timeout):
"""Configure the amount of time the enrollment message will be displayed. """
config = DashboardConfiguration(recent_enrollment_time_delta=timeout)
config.save()
def test_recently_enrolled_courses(self):
"""
Test if the function for filtering recent enrollments works appropriately.
"""
self._configure_message_timeout(60)
# get courses through iterating all courses
courses_list = list(get_course_enrollments(self.student, None, []))
assert len(courses_list) == 2
recent_course_list = _get_recently_enrolled_courses(courses_list)
assert len(recent_course_list) == 1
def test_zero_second_delta(self):
"""
Tests that the recent enrollment list is empty if configured to zero seconds.
"""
self._configure_message_timeout(0)
courses_list = list(get_course_enrollments(self.student, None, []))
assert len(courses_list) == 2
recent_course_list = _get_recently_enrolled_courses(courses_list)
assert len(recent_course_list) == 0
def test_enrollments_sorted_most_recent(self):
"""
Test that the list of newly created courses are properly sorted to show the most
recent enrollments first.
Also test recent enrollment message rendered appropriately for more than two courses.
"""
self._configure_message_timeout(600)
# Create a number of new enrollments and courses, and force their creation behind
# the first enrollment
courses = []
for idx, seconds_past in zip(list(range(2, 6)), [5, 10, 15, 20]):
course_location = locator.CourseLocator(
f'Org{idx}',
f'Course{idx}',
f'Run{idx}'
)
course, enrollment = self._create_course_and_enrollment(course_location)
enrollment.created = now() - datetime.timedelta(seconds=seconds_past)
enrollment.save()
courses.append(course)
courses_list = list(get_course_enrollments(self.student, None, []))
assert len(courses_list) == 6
recent_course_list = _get_recently_enrolled_courses(courses_list)
assert len(recent_course_list) == 5
assert recent_course_list[1].course.id == courses[0].id
assert recent_course_list[2].course.id == courses[1].id
assert recent_course_list[3].course.id == courses[2].id
assert recent_course_list[4].course.id == courses[3].id
self.client.login(username=self.student.username, password=self.PASSWORD)
response = self.client.get(reverse("dashboard"))
# verify recent enrollment message
self.assertContains(response, 'Thank you for enrolling in:')
self.assertContains(
response,
', '.join(enrollment.course.display_name for enrollment in recent_course_list)
)
def test_dashboard_rendering_with_single_course(self):
"""
Tests that the dashboard renders the recent enrollment message appropriately for single course.
"""
self._configure_message_timeout(600)
self.client.login(username=self.student.username, password=self.PASSWORD)
response = self.client.get(reverse("dashboard"))
self.assertContains(
response,
f"Thank you for enrolling in {self.course.display_name}"
)
def METHOD_NAME(self):
"""
Tests that the dashboard renders the recent enrollment message appropriately for two courses.
"""
self._configure_message_timeout(600)
course_location = locator.CourseLocator(
'Org2',
'Course2',
'Run2'
)
course, _ = self._create_course_and_enrollment(course_location) # lint-amnesty, pylint: disable=unused-variable
self.client.login(username=self.student.username, password=self.PASSWORD)
response = self.client.get(reverse("dashboard"))
courses_enrollments = list(get_course_enrollments(self.student, None, []))
courses_enrollments.sort(key=lambda x: x.created, reverse=True)
assert len(courses_enrollments) == 3
recent_course_enrollments = _get_recently_enrolled_courses(courses_enrollments)
assert len(recent_course_enrollments) == 2
self.assertContains(
response,
"Thank you for enrolling in:"
)
self.assertContains(
response,
' and '.join(enrollment.course.display_name for enrollment in recent_course_enrollments)
)
def test_dashboard_escaped_rendering(self):
"""
Tests that the dashboard renders the escaped recent enrollment messages appropriately.
"""
self._configure_message_timeout(600)
self.client.login(username=self.student.username, password=self.PASSWORD)
# New Course
course_location = locator.CourseLocator('TestOrg', 'TestCourse', 'TestRun')
xss_content = "<script>alert('XSS')</script>"
course = CourseFactory.create(
org=course_location.org,
number=course_location.course,
run=course_location.run,
display_name=xss_content
)
CourseEnrollment.enroll(self.student, course.id)
response = self.client.get(reverse("dashboard"))
self.assertContains(response, "Thank you for enrolling in")
# Check if response is escaped
self.assert_no_xss(response, xss_content) |
5,888 | perm | import itertools
from ..core import symbols
from ..utilities.iterables import rotate_left
from .permutations import Permutation
def symmetric(n):
"""
Generates the symmetric group of order n, Sn.
Examples
========
>>> Permutation.print_cyclic = True
>>> list(symmetric(3))
[Permutation(2), Permutation(1, 2), Permutation(2)(0, 1),
Permutation(0, 1, 2), Permutation(0, 2, 1), Permutation(0, 2)]
"""
for METHOD_NAME in itertools.permutations(range(n)):
yield Permutation(METHOD_NAME)
def cyclic(n):
"""
Generates the cyclic group of order n, Cn.
Examples
========
>>> Permutation.print_cyclic = True
>>> list(cyclic(5))
[Permutation(4), Permutation(0, 1, 2, 3, 4), Permutation(0, 2, 4, 1, 3),
Permutation(0, 3, 1, 4, 2), Permutation(0, 4, 3, 2, 1)]
See Also
========
dihedral
"""
gen = list(range(n))
for _ in range(n):
yield Permutation(gen)
gen = rotate_left(gen, 1)
def alternating(n):
"""
Generates the alternating group of order n, An.
Examples
========
>>> Permutation.print_cyclic = True
>>> list(alternating(3))
[Permutation(2), Permutation(0, 1, 2), Permutation(0, 2, 1)]
"""
for METHOD_NAME in itertools.permutations(range(n)):
p = Permutation(METHOD_NAME)
if p.is_even:
yield p
def dihedral(n):
"""
Generates the dihedral group of order 2n, Dn.
The result is given as a subgroup of Sn, except for the special cases n=1
(the group S2) and n=2 (the Klein 4-group) where that's not possible
and embeddings in S2 and S4 respectively are given.
Examples
========
>>> Permutation.print_cyclic = True
>>> list(dihedral(3))
[Permutation(2), Permutation(0, 2), Permutation(0, 1, 2),
Permutation(1, 2), Permutation(0, 2, 1), Permutation(2)(0, 1)]
See Also
========
cyclic
"""
if n == 1:
yield Permutation([0, 1])
yield Permutation([1, 0])
elif n == 2:
yield Permutation([0, 1, 2, 3])
yield Permutation([1, 0, 3, 2])
yield Permutation([2, 3, 0, 1])
yield Permutation([3, 2, 1, 0])
else:
gen = list(range(n))
for _ in range(n):
yield Permutation(gen)
yield Permutation(gen[::-1])
gen = rotate_left(gen, 1)
def rubik_cube_generators():
"""Return the permutations of the 3x3 Rubik's cube, see
http://www.gap-system.org/Doc/Examples/rubik.html
"""
a = [
[(1, 3, 8, 6), (2, 5, 7, 4), (9, 33, 25, 17), (10, 34, 26, 18),
(11, 35, 27, 19)],
[(9, 11, 16, 14), (10, 13, 15, 12), (1, 17, 41, 40), (4, 20, 44, 37),
(6, 22, 46, 35)],
[(17, 19, 24, 22), (18, 21, 23, 20), (6, 25, 43, 16), (7, 28, 42, 13),
(8, 30, 41, 11)],
[(25, 27, 32, 30), (26, 29, 31, 28), (3, 38, 43, 19), (5, 36, 45, 21),
(8, 33, 48, 24)],
[(33, 35, 40, 38), (34, 37, 39, 36), (3, 9, 46, 32), (2, 12, 47, 29),
(1, 14, 48, 27)],
[(41, 43, 48, 46), (42, 45, 47, 44), (14, 22, 30, 38),
(15, 23, 31, 39), (16, 24, 32, 40)]
]
return [Permutation([[i - 1 for i in xi] for xi in x], size=48) for x in a]
def rubik(n):
"""Return permutations for an nxn Rubik's cube.
Permutations returned are for rotation of each of the slice
from the face up to the last face for each of the 3 sides (in this order):
front, right and bottom. Hence, the first n - 1 permutations are for the
slices from the front.
"""
from ..matrices import Matrix
if n < 2:
raise ValueError('dimension of cube must be > 1')
# 1-based reference to rows and columns in Matrix
def getr(f, i):
return faces[f][:, n - i]
def getl(f, i):
return faces[f][:, i - 1]
def getu(f, i):
return faces[f][i - 1, :]
def getd(f, i):
return faces[f][n - i, :]
def setr(f, i, s):
faces[f][:, n - i] = Matrix(n, 1, s)
def setl(f, i, s):
faces[f][:, i - 1] = Matrix(n, 1, s)
def setu(f, i, s):
faces[f][i - 1, :] = Matrix(1, n, s)
def setd(f, i, s):
faces[f][n - i, :] = Matrix(1, n, s)
# motion of a single face
def cw(F, r=1):
for _ in range(r):
face = faces[F]
rv = []
for c in range(n):
for r in range(n - 1, -1, -1):
rv.append(face[r, c])
faces[F] = Matrix(n, n, rv)
def ccw(F):
cw(F, 3)
# motion of plane i from the F side;
# fcw(0) moves the F face, fcw(1) moves the plane
# just behind the front face, etc...
def fcw(i, r=1):
for _ in range(r):
if i == 0:
cw(F)
i += 1
temp = getr(L, i)
setr(L, i, list(getu(D, i)))
setu(D, i, list(reversed(getl(R, i))))
setl(R, i, list(getd(U, i)))
setd(U, i, list(reversed(temp)))
i -= 1
def fccw(i):
fcw(i, 3)
# motion of the entire cube from the F side
def FCW(r=1):
for _ in range(r):
cw(F)
ccw(B)
cw(U)
t = faces[U]
cw(L)
faces[U] = faces[L]
cw(D)
faces[L] = faces[D]
cw(R)
faces[D] = faces[R]
faces[R] = t
def FCCW():
FCW(3)
# motion of the entire cube from the U side
def UCW(r=1):
for _ in range(r):
cw(U)
ccw(D)
t = faces[F]
faces[F] = faces[R]
faces[R] = faces[B]
faces[B] = faces[L]
faces[L] = t
def UCCW():
UCW(3)
# defining the permutations for the cube
U, F, R, B, L, D = names = symbols('U, F, R, B, L, D')
# the faces are represented by nxn matrices
faces = {}
count = 0
for fi in range(6):
f = []
for _ in range(n**2):
f.append(count)
count += 1
faces[names[fi]] = Matrix(n, n, f)
# this will either return the value of the current permutation
# (show != 1) or else append the permutation to the group, g
def METHOD_NAME(show=0):
# add perm to the list of perms
p = []
for f in names:
p.extend(faces[f])
if show:
return p
g.append(Permutation(p))
g = [] # container for the group's permutations
I = list(range(6*n**2)) # the identity permutation used for checking
# define permutations corresponding to cw rotations of the planes
# up TO the last plane from that direction; by not including the
# last plane, the orientation of the cube is maintained.
# F slices
for i in range(n - 1):
fcw(i)
METHOD_NAME()
fccw(i) # restore
assert METHOD_NAME(1) == I
# R slices
# bring R to front
UCW()
for i in range(n - 1):
fcw(i)
# put it back in place
UCCW()
# record
METHOD_NAME()
# restore
# bring face to front
UCW()
fccw(i)
# restore
UCCW()
assert METHOD_NAME(1) == I
# D slices
# bring up bottom
FCW()
UCCW()
FCCW()
for i in range(n - 1):
# turn strip
fcw(i)
# put bottom back on the bottom
FCW()
UCW()
FCCW()
# record
METHOD_NAME()
# restore
# bring up bottom
FCW()
UCCW()
FCCW()
# turn strip
fccw(i)
# put bottom back on the bottom
FCW()
UCW()
FCCW()
assert METHOD_NAME(1) == I
return g |
5,889 | update completed partitions | from datetime import datetime
from typing import Sequence, Set
from snuba.redis import RedisClientType
OPTIMIZE_PREFIX = "snuba-optimize"
class NoOptimizedStateException(Exception):
"""
This exception indicates that there is no state stored in the optimized
tracker.
"""
pass
class OptimizedPartitionTracker:
"""
This class keeps track of partitions which have already been
optimized by keeping state of optimized partitions in redis.
"""
def __init__(
self,
redis_client: RedisClientType,
host: str,
port: int,
database: str,
table: str,
expire_time: datetime,
) -> None:
self.__redis_client = redis_client
self.__host = host
self.__port = port
self.__database = database
self.__table = table
today = datetime.now().date()
common_prefix = (
f"{OPTIMIZE_PREFIX}:{self.__host}:{self.__port}:{self.__database}:"
f"{self.__table}:{today}"
)
self.__all_bucket = f"{common_prefix}:all"
self.__completed_bucket = f"{common_prefix}:completed"
self.__key_expire_time = expire_time
def __get_partitions(self, bucket: str) -> Set[str]:
"""
Get the partitions from a given bucket.
"""
partitions_set: Set[str] = set()
partitions = self.__redis_client.smembers(bucket)
if partitions:
for partition in partitions:
assert isinstance(partition, bytes)
partitions_set.add(partition.decode("utf-8"))
return partitions_set
def get_all_partitions(self) -> Set[str]:
"""
Get a set of partitions which need to be optimized.
"""
return self.__get_partitions(self.__all_bucket)
def get_completed_partitions(self) -> Set[str]:
"""
Get a set of partitions that have completed optimization.
"""
return self.__get_partitions(self.__completed_bucket)
def __update_partitions(
self, bucket: str, encoded_part_names: Sequence[bytes]
) -> None:
"""
Update the partitions in the bucket.
"""
pipe = self.__redis_client.pipeline()
pipe.sadd(bucket, *encoded_part_names)
pipe.expireat(bucket, self.__key_expire_time)
pipe.execute()
def update_all_partitions(self, part_names: Sequence[str]) -> None:
"""
Update the list of all partitions which need to be optimized.
"""
if len(part_names) == 0:
return
encoded_part_names = [part.encode("utf-8") for part in part_names]
self.__update_partitions(self.__all_bucket, encoded_part_names)
def METHOD_NAME(self, part_name: str) -> None:
"""
Add partitions that have completed optimization.
"""
self.__update_partitions(self.__completed_bucket, [part_name.encode("utf-8")])
def get_partitions_to_optimize(self) -> Set[str]:
"""
Get a set of partition names which need optimization.
When getting the partitions to optimize, NoOptimizedStateException
exception can be raised to indicate that there is no state
information and we need to populate the state. The exception is
returned when the optimization is run for the first time on a given
day. In all other cases a set object is returned.
"""
all_partitions = self.get_all_partitions()
completed_partitions = self.get_completed_partitions()
if not all_partitions:
raise NoOptimizedStateException
if not completed_partitions:
return all_partitions
else:
return all_partitions - completed_partitions
def delete_all_states(self) -> None:
"""
Delete the sets of partitions which had to be optimized and
which have already been optimized.
"""
pipe = self.__redis_client.pipeline()
pipe.delete(self.__all_bucket)
pipe.delete(self.__completed_bucket)
pipe.execute() |
5,890 | update release commit sha | #!/usr/bin/env python3
import os
import re
import subprocess
import sys
from urllib.parse import urlparse
import requests
import simplejson
github_token = os.environ["GITHUB_TOKEN"]
auth = (github_token, "x-oauth-basic")
repo = "getredash/redash"
def _github_request(method, path, params=None, headers={}):
if urlparse(path).hostname != "api.github.com":
url = "https://api.github.com/{}".format(path)
else:
url = path
if params is not None:
params = simplejson.dumps(params)
response = requests.request(method, url, data=params, auth=auth)
return response
def exception_from_error(message, response):
return Exception("({}) {}: {}".format(response.status_code, message, response.json().get("message", "?")))
def rc_tag_name(version):
return "v{}-rc".format(version)
def get_rc_release(version):
tag = rc_tag_name(version)
response = _github_request("get", "repos/{}/releases/tags/{}".format(repo, tag))
if response.status_code == 404:
return None
elif response.status_code == 200:
return response.json()
raise exception_from_error("Unknown error while looking RC release: ", response)
def create_release(version, commit_sha):
tag = rc_tag_name(version)
params = {
"tag_name": tag,
"name": "{} - RC".format(version),
"target_commitish": commit_sha,
"prerelease": True,
}
response = _github_request("post", "repos/{}/releases".format(repo), params)
if response.status_code != 201:
raise exception_from_error("Failed creating new release", response)
return response.json()
def upload_asset(release, filepath):
upload_url = release["upload_url"].replace("{?name,label}", "")
filename = filepath.split("/")[-1]
with open(filepath) as file_content:
headers = {"Content-Type": "application/gzip"}
response = requests.post(
upload_url, file_content, params={"name": filename}, headers=headers, auth=auth, verify=False
)
if response.status_code != 201: # not 200/201/...
raise exception_from_error("Failed uploading asset", response)
return response
def remove_previous_builds(release):
for asset in release["assets"]:
response = _github_request("delete", asset["url"])
if response.status_code != 204:
raise exception_from_error("Failed deleting asset", response)
def get_changelog(commit_sha):
latest_release = _github_request("get", "repos/{}/releases/latest".format(repo))
if latest_release.status_code != 200:
raise exception_from_error("Failed getting latest release", latest_release)
latest_release = latest_release.json()
previous_sha = latest_release["target_commitish"]
args = [
"git",
"--no-pager",
"log",
"--merges",
"--grep",
"Merge pull request",
'--pretty=format:"%h|%s|%b|%p"',
"{}...{}".format(previous_sha, commit_sha),
]
log = subprocess.check_output(args)
changes = ["Changes since {}:".format(latest_release["name"])]
for line in log.split("\n"):
try:
sha, subject, body, parents = line[1:-1].split("|")
except ValueError:
continue
try:
pull_request = re.match(r"Merge pull request #(\d+)", subject).groups()[0]
pull_request = " #{}".format(pull_request)
except Exception:
pull_request = ""
author = subprocess.check_output(["git", "log", "-1", '--pretty=format:"%an"', parents.split(" ")[-1]])[1:-1]
changes.append("{}{}: {} ({})".format(sha, pull_request, body.strip(), author))
return "\n".join(changes)
def METHOD_NAME(release, commit_sha):
params = {
"target_commitish": commit_sha,
}
response = _github_request("patch", "repos/{}/releases/{}".format(repo, release["id"]), params)
if response.status_code != 200:
raise exception_from_error("Failed updating commit sha for existing release", response)
return response.json()
def update_release(version, build_filepath, commit_sha):
try:
release = get_rc_release(version)
if release:
release = METHOD_NAME(release, commit_sha)
else:
release = create_release(version, commit_sha)
print("Using release id: {}".format(release["id"]))
remove_previous_builds(release)
response = upload_asset(release, build_filepath)
changelog = get_changelog(commit_sha)
response = _github_request("patch", release["url"], {"body": changelog})
if response.status_code != 200:
raise exception_from_error("Failed updating release description", response)
except Exception as ex:
print(ex)
if __name__ == "__main__":
commit_sha = sys.argv[1]
version = sys.argv[2]
filepath = sys.argv[3]
# TODO: make sure running from git directory & remote = repo
update_release(version, filepath, commit_sha) |
5,891 | from numpy dataset | from typing import Dict, List, Optional, Tuple
import numpy as np
import pandas as pd
from ludwig.api_annotations import DeveloperAPI
from ludwig.constants import DASK_MODULE_NAME
from ludwig.data.dataframe.base import DataFrameEngine
from ludwig.utils.types import DataFrame
@DeveloperAPI
def is_dask_lib(df_lib) -> bool:
"""Returns whether the dataframe library is dask."""
return df_lib.__name__ == DASK_MODULE_NAME
@DeveloperAPI
def is_dask_backend(backend: Optional["Backend"]) -> bool: # noqa: F821
"""Returns whether the backend's dataframe is dask."""
return backend is not None and is_dask_lib(backend.df_engine.df_lib)
@DeveloperAPI
def is_dask_series_or_df(df: DataFrame, backend: Optional["Backend"]) -> bool: # noqa: F821
if is_dask_backend(backend):
import dask.dataframe as dd
return isinstance(df, dd.Series) or isinstance(df, dd.DataFrame)
return False
@DeveloperAPI
def flatten_df(df: DataFrame, df_engine: DataFrameEngine) -> Tuple[DataFrame, Dict[str, Tuple]]: # noqa: F821
"""Returns a flattened dataframe with a dictionary of the original shapes, keyed by dataframe columns."""
# Workaround for: https://issues.apache.org/jira/browse/ARROW-5645
column_shapes = {}
for c in df.columns:
df = df_engine.persist(df)
shape = df_engine.compute(
df_engine.map_objects(
df[c],
lambda x: np.array(x).shape,
).max()
)
if len(shape) > 1:
column_shapes[c] = shape
df[c] = df_engine.map_objects(df[c], lambda x: np.array(x).reshape(-1))
return df, column_shapes
@DeveloperAPI
def unflatten_df(df: DataFrame, column_shapes: Dict[str, Tuple], df_engine: DataFrameEngine) -> DataFrame: # noqa: F821
"""Returns an unflattened dataframe, the reverse of flatten_df."""
for c in df.columns:
shape = column_shapes.get(c)
if shape:
df[c] = df_engine.map_objects(df[c], lambda x: np.array(x).reshape(shape))
return df
@DeveloperAPI
def to_numpy_dataset(df: DataFrame, backend: Optional["Backend"] = None) -> Dict[str, np.ndarray]: # noqa: F821
"""Returns a dictionary of numpy arrays, keyed by the columns of the given dataframe."""
dataset = {}
for col in df.columns:
res = df[col]
if backend and is_dask_backend(backend):
res = res.compute()
if len(df.index) != 0:
dataset[col] = np.stack(res.to_numpy())
else:
# Dataframe is empty.
# Use to_list() directly, as np.stack() requires at least one array to stack.
dataset[col] = res.to_list()
return dataset
@DeveloperAPI
def METHOD_NAME(dataset) -> pd.DataFrame:
"""Returns a pandas dataframe from the dataset."""
col_mapping = {}
for k, v in dataset.items():
if len(v.shape) > 1:
# unstacking, needed for ndarrays of dimension 2 and more
(*vals,) = v
else:
# not unstacking. Needed because otherwise pandas casts types
# the way it wants, like converting a list of float32 scalats
# to a column of float64
vals = v
col_mapping[k] = vals
return pd.DataFrame.from_dict(col_mapping)
@DeveloperAPI
def set_index_name(pd_df: pd.DataFrame, name: str) -> pd.DataFrame:
pd_df.index.name = name
return pd_df
@DeveloperAPI
def to_batches(df: pd.DataFrame, batch_size: int) -> List[pd.DataFrame]:
return [df[i : i + batch_size].copy() for i in range(0, df.shape[0], batch_size)]
@DeveloperAPI
def from_batches(batches: List[pd.DataFrame]) -> pd.DataFrame:
return pd.concat(batches)
@DeveloperAPI
def to_scalar_df(df: pd.DataFrame) -> pd.DataFrame:
"""Converts all columns in a pd.DataFrame to be scalar types.
For object columns of lists, each element of the list is expanded into its own column named {column}_{index}. We
assume all object columns are lists of the same length (i.e., tensor format output from preprocessing). It's also
important that the relative order of the columns is preserved, to maintain consistency with other conversions like
the one for Hummingbird.
"""
scalar_df = df
column_ordering = []
for c, s in df.items():
if s.dtype == "object":
s_list = s.to_list()
try:
ncols = s_list[0].shape[0]
split_cols = [f"{c}_{k}" for k in range(ncols)]
sdf = pd.DataFrame(s_list, columns=split_cols)
scalar_df = pd.concat([scalar_df, sdf], axis=1)
column_ordering += split_cols
except AttributeError as e:
raise ValueError(f"Expected series of lists, but found {s_list[0]}") from e
else:
column_ordering.append(c)
return scalar_df[column_ordering] |
5,892 | fwi gradient | import numpy as np
from devito import configuration, Function, norm, mmax, mmin
from examples.seismic import demo_model, AcquisitionGeometry, Receiver
from examples.seismic.acoustic import AcousticWaveSolver
from inversion_utils import compute_residual, update_with_box
# Turn off logging
configuration['log-level'] = "ERROR"
# Setup
nshots = 9 # Number of shots to create gradient from
nreceivers = 101 # Number of receiver locations per shot
fwi_iterations = 5 # Number of outer FWI iterations
# Define true and initial model
shape = (101, 101) # Number of grid point (nx, nz)
spacing = (10., 10.) # Grid spacing in m. The domain size is now 1km by 1km
origin = (0., 0.) # Need origin to define relative source and receiver locations
model = demo_model('circle-isotropic', vp_circle=3.0, vp_background=2.5,
origin=origin, shape=shape, spacing=spacing, nbl=40)
model0 = demo_model('circle-isotropic', vp_circle=2.5, vp_background=2.5,
origin=origin, shape=shape, spacing=spacing, nbl=40,
grid=model.grid)
assert model.grid == model0.grid
assert model.vp.grid == model0.vp.grid
# Acquisition geometry
t0 = 0.
tn = 1000.
f0 = 0.010
# Source at 20m depth and center of x
src_coordinates = np.empty((1, 2))
src_coordinates[0, :] = np.array(model.domain_size) * .5
src_coordinates[0, 0] = 20. # Depth is 20m
# Initialize receivers for synthetic and imaging data
rec_coordinates = np.empty((nreceivers, 2))
rec_coordinates[:, 1] = np.linspace(0, model.domain_size[0], num=nreceivers)
rec_coordinates[:, 0] = 980.
# Geometry
geometry = AcquisitionGeometry(model, rec_coordinates, src_coordinates,
t0, tn, f0=f0, src_type='Ricker')
# Wave solver
solver = AcousticWaveSolver(model, geometry, space_order=4)
# Prepare the varying source locations sources
source_locations = np.empty((nshots, 2), dtype=np.float32)
source_locations[:, 0] = 20.
source_locations[:, 1] = np.linspace(0., 1000, num=nshots)
def METHOD_NAME(vp_in):
# Create symbols to hold the gradient
grad = Function(name="grad", grid=model.grid)
objective = 0.
for i in range(nshots):
# Create placeholders for the data residual and data
residual = Receiver(name='residual', grid=model.grid,
time_range=geometry.time_axis,
coordinates=geometry.rec_positions)
d_obs = Receiver(name='d_obs', grid=model.grid,
time_range=geometry.time_axis,
coordinates=geometry.rec_positions)
d_syn = Receiver(name='d_syn', grid=model.grid,
time_range=geometry.time_axis,
coordinates=geometry.rec_positions)
# Update source location
solver.geometry.src_positions[0, :] = source_locations[i, :]
# Generate synthetic data from true model
solver.forward(vp=model.vp, rec=d_obs)
# Compute smooth data and full forward wavefield u0
_, u0, _ = solver.forward(vp=vp_in, save=True, rec=d_syn)
# Compute gradient from data residual and update objective function
residual = compute_residual(residual, d_obs, d_syn)
objective += .5*norm(residual)**2
solver.jacobian_adjoint(rec=residual, u=u0, vp=vp_in, grad=grad)
return objective, grad
# Compute gradient of initial model
ff, update = METHOD_NAME(model0.vp)
print(ff, mmin(update), mmax(update))
assert np.isclose(ff, 39113, atol=1e1, rtol=0)
assert np.isclose(mmin(update), -821, atol=1e1, rtol=0)
assert np.isclose(mmax(update), 2442, atol=1e1, rtol=0)
# Run FWI with gradient descent
history = np.zeros((fwi_iterations, 1))
for i in range(0, fwi_iterations):
# Compute the functional value and gradient for the current
# model estimate
phi, direction = METHOD_NAME(model0.vp)
# Store the history of the functional values
history[i] = phi
# Artificial Step length for gradient descent
# In practice this would be replaced by a Linesearch (Wolfe, ...)
# that would guarantee functional decrease Phi(m-alpha g) <= epsilon Phi(m)
# where epsilon is a minimum decrease constant
alpha = .05 / mmax(direction)
# Update the model estimate and enforce minimum/maximum values
update_with_box(model0.vp, alpha, direction)
# Log the progress made
print('Objective value is %f at iteration %d' % (phi, i+1))
assert np.isclose(history[-1], 3828, atol=1e1, rtol=0) |
5,893 | action probabilities | # Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NFSP agents trained on Kuhn Poker."""
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
from open_spiel.python import policy
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import exploitability
from open_spiel.python.algorithms import nfsp
FLAGS = flags.FLAGS
flags.DEFINE_integer("num_train_episodes", int(3e6),
"Number of training episodes.")
flags.DEFINE_integer("eval_every", 10000,
"Episode frequency at which the agents are evaluated.")
flags.DEFINE_list("hidden_layers_sizes", [
128,
], "Number of hidden units in the avg-net and Q-net.")
flags.DEFINE_integer("replay_buffer_capacity", int(2e5),
"Size of the replay buffer.")
flags.DEFINE_integer("reservoir_buffer_capacity", int(2e6),
"Size of the reservoir buffer.")
flags.DEFINE_float("anticipatory_param", 0.1,
"Prob of using the rl best response as episode policy.")
class NFSPPolicies(policy.Policy):
"""Joint policy to be evaluated."""
def __init__(self, env, nfsp_policies, mode):
game = env.game
player_ids = [0, 1]
super(NFSPPolicies, self).__init__(game, player_ids)
self._policies = nfsp_policies
self._mode = mode
self._obs = {"info_state": [None, None], "legal_actions": [None, None]}
def METHOD_NAME(self, state, player_id=None):
cur_player = state.current_player()
legal_actions = state.legal_actions(cur_player)
self._obs["current_player"] = cur_player
self._obs["info_state"][cur_player] = (
state.information_state_tensor(cur_player))
self._obs["legal_actions"][cur_player] = legal_actions
info_state = rl_environment.TimeStep(
observations=self._obs, rewards=None, discounts=None, step_type=None)
with self._policies[cur_player].temp_mode_as(self._mode):
p = self._policies[cur_player].step(info_state, is_evaluation=True).probs
prob_dict = {action: p[action] for action in legal_actions}
return prob_dict
def main(unused_argv):
game = "kuhn_poker"
num_players = 2
env_configs = {"players": num_players}
env = rl_environment.Environment(game, **env_configs)
info_state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
hidden_layers_sizes = [int(l) for l in FLAGS.hidden_layers_sizes]
kwargs = {
"replay_buffer_capacity": FLAGS.replay_buffer_capacity,
"epsilon_decay_duration": FLAGS.num_train_episodes,
"epsilon_start": 0.06,
"epsilon_end": 0.001,
}
with tf.Session() as sess:
# pylint: disable=g-complex-comprehension
agents = [
nfsp.NFSP(sess, idx, info_state_size, num_actions, hidden_layers_sizes,
FLAGS.reservoir_buffer_capacity, FLAGS.anticipatory_param,
**kwargs) for idx in range(num_players)
]
expl_policies_avg = NFSPPolicies(env, agents, nfsp.MODE.average_policy)
sess.run(tf.global_variables_initializer())
for ep in range(FLAGS.num_train_episodes):
if (ep + 1) % FLAGS.eval_every == 0:
losses = [agent.loss for agent in agents]
logging.info("Losses: %s", losses)
expl = exploitability.exploitability(env.game, expl_policies_avg)
logging.info("[%s] Exploitability AVG %s", ep + 1, expl)
logging.info("_____________________________________________")
time_step = env.reset()
while not time_step.last():
player_id = time_step.observations["current_player"]
agent_output = agents[player_id].step(time_step)
action_list = [agent_output.action]
time_step = env.step(action_list)
# Episode is over, step all agents with final info state.
for agent in agents:
agent.step(time_step)
if __name__ == "__main__":
app.run(main) |
5,894 | eval mel cepstral distortion | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Signal processing-based evaluation using waveforms
"""
import csv
import numpy as np
import os.path as op
import torch
import tqdm
from tabulate import tabulate
import torchaudio
from examples.speech_synthesis.utils import batch_mel_spectral_distortion
from fairseq.tasks.text_to_speech import batch_mel_cepstral_distortion
def load_eval_spec(path):
with open(path) as f:
reader = csv.DictReader(f, delimiter='\t')
samples = list(reader)
return samples
def eval_distortion(samples, distortion_fn, device="cuda"):
nmiss = 0
results = []
for sample in tqdm.tqdm(samples):
if not op.isfile(sample["ref"]) or not op.isfile(sample["syn"]):
nmiss += 1
results.append(None)
continue
# assume single channel
yref, sr = torchaudio.load(sample["ref"])
ysyn, _sr = torchaudio.load(sample["syn"])
yref, ysyn = yref[0].to(device), ysyn[0].to(device)
assert sr == _sr, f"{sr} != {_sr}"
distortion, extra = distortion_fn([yref], [ysyn], sr, None)[0]
_, _, _, _, _, pathmap = extra
nins = torch.sum(pathmap.sum(dim=1) - 1) # extra frames in syn
ndel = torch.sum(pathmap.sum(dim=0) - 1) # missing frames from syn
results.append(
(distortion.item(), # path distortion
pathmap.size(0), # yref num frames
pathmap.size(1), # ysyn num frames
pathmap.sum().item(), # path length
nins.item(), # insertion
ndel.item(), # deletion
)
)
return results
def METHOD_NAME(samples, device="cuda"):
return eval_distortion(samples, batch_mel_cepstral_distortion, device)
def eval_mel_spectral_distortion(samples, device="cuda"):
return eval_distortion(samples, batch_mel_spectral_distortion, device)
def print_results(results, show_bin):
results = np.array(list(filter(lambda x: x is not None, results)))
np.set_printoptions(precision=3)
def _print_result(results):
dist, dur_ref, dur_syn, dur_ali, nins, ndel = results.sum(axis=0)
res = {
"nutt": len(results),
"dist": dist,
"dur_ref": int(dur_ref),
"dur_syn": int(dur_syn),
"dur_ali": int(dur_ali),
"dist_per_ref_frm": dist/dur_ref,
"dist_per_syn_frm": dist/dur_syn,
"dist_per_ali_frm": dist/dur_ali,
"ins": nins/dur_ref,
"del": ndel/dur_ref,
}
print(tabulate(
[res.values()],
res.keys(),
floatfmt=".4f"
))
print(">>>> ALL")
_print_result(results)
if show_bin:
edges = [0, 200, 400, 600, 800, 1000, 2000, 4000]
for i in range(1, len(edges)):
mask = np.logical_and(results[:, 1] >= edges[i-1],
results[:, 1] < edges[i])
if not mask.any():
continue
bin_results = results[mask]
print(f">>>> ({edges[i-1]}, {edges[i]})")
_print_result(bin_results)
def main(eval_spec, mcd, msd, show_bin):
samples = load_eval_spec(eval_spec)
device = "cpu"
if mcd:
print("===== Evaluate Mean Cepstral Distortion =====")
results = METHOD_NAME(samples, device)
print_results(results, show_bin)
if msd:
print("===== Evaluate Mean Spectral Distortion =====")
results = eval_mel_spectral_distortion(samples, device)
print_results(results, show_bin)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("eval_spec")
parser.add_argument("--mcd", action="store_true")
parser.add_argument("--msd", action="store_true")
parser.add_argument("--show-bin", action="store_true")
args = parser.parse_args()
main(args.eval_spec, args.mcd, args.msd, args.show_bin) |
5,895 | get schema update command | import difflib
import re
from collections import namedtuple
from pathlib import Path
import pytest
import sqlalchemy
from packaging.version import Version
from sqlalchemy import create_engine
from sqlalchemy.schema import CreateTable, MetaData
import mlflow
from mlflow.environment_variables import MLFLOW_TRACKING_URI
pytestmark = pytest.mark.notrackingurimock
def get_database_dialect(uri):
return create_engine(uri).dialect.name
def get_tracking_uri():
return MLFLOW_TRACKING_URI.get()
def dump_schema(db_uri):
engine = create_engine(db_uri)
created_tables_metadata = MetaData()
created_tables_metadata.reflect(bind=engine)
# Write out table schema as described in
# https://docs.sqlalchemy.org/en/13/faq/metadata_schema.html#how-can-i-get-the-create-table-drop-table-output-as-a-string
lines = []
for table in created_tables_metadata.sorted_tables:
# Apply `str.rstrip` to remove trailing whitespaces
lines += map(str.rstrip, str(CreateTable(table)).splitlines())
return "\n".join(lines)
_CreateTable = namedtuple("_CreateTable", ["table", "columns"])
_CREATE_TABLE_REGEX = re.compile(
r"""
CREATE TABLE (?P<table>\S+?) \(
(?P<columns>.+?)
\)
""".strip(),
flags=re.DOTALL,
)
def parse_create_tables(schema):
return [
_CreateTable(
table=m.group("table"),
columns=set(m.group("columns").splitlines()),
)
for m in _CREATE_TABLE_REGEX.finditer(schema)
]
def schema_equal(schema_a, schema_b):
create_tables_a = parse_create_tables(schema_a)
create_tables_b = parse_create_tables(schema_b)
assert create_tables_a != []
assert create_tables_b != []
return create_tables_a == create_tables_b
def get_schema_path(db_uri):
return Path(__file__).parent / "schemas" / (get_database_dialect(db_uri) + ".sql")
def iter_parameter_sets():
a = """
CREATE TABLE table (
col VARCHAR(10)
)
"""
b = """
CREATE TABLE table (
col VARCHAR(10)
)
"""
yield pytest.param(a, b, True, id="identical schemas")
a = """
CREATE TABLE table1 (
col VARCHAR(10)
)
"""
b = """
CREATE TABLE table2 (
col VARCHAR(10)
)
"""
yield pytest.param(a, b, False, id="different table names")
a = """
CREATE TABLE table (
col1 VARCHAR(10)
)
"""
b = """
CREATE TABLE table (
col2 VARCHAR(10)
)
"""
yield pytest.param(a, b, False, id="different column names")
@pytest.mark.parametrize(("a", "b", "expected"), iter_parameter_sets())
def test_schema_equal(a, b, expected):
assert schema_equal(a, b) is expected
def initialize_database():
with mlflow.start_run():
pass
def METHOD_NAME(dialect):
this_script = Path(__file__).relative_to(Path.cwd())
docker_compose_yml = this_script.parent / "compose.yml"
return f"docker-compose -f {docker_compose_yml} run --rm mlflow-{dialect} python {this_script}"
@pytest.mark.skipif(
Version(sqlalchemy.__version__) > Version("1.4"), reason="Use 1.4 for schema check"
)
def test_schema_is_up_to_date():
initialize_database()
tracking_uri = get_tracking_uri()
schema_path = get_schema_path(tracking_uri)
existing_schema = schema_path.read_text()
latest_schema = dump_schema(tracking_uri)
dialect = get_database_dialect(tracking_uri)
update_command = METHOD_NAME(dialect)
message = (
f"{schema_path.relative_to(Path.cwd())} is not up-to-date. "
f"Please run this command to update it: {update_command}"
)
diff = "".join(
difflib.ndiff(
existing_schema.splitlines(keepends=True), latest_schema.splitlines(keepends=True)
)
)
rel_path = schema_path.relative_to(Path.cwd())
message = f"""
=================================== EXPECTED ===================================
{latest_schema}
==================================== ACTUAL ====================================
{existing_schema}
===================================== DIFF =====================================
{diff}
================================== HOW TO FIX ==================================
Manually copy & paste the expected schema in {rel_path} or run the following command:
{update_command}
"""
assert schema_equal(existing_schema, latest_schema), message
def main():
tracking_uri = get_tracking_uri()
assert tracking_uri, f"Environment variable {MLFLOW_TRACKING_URI} must be set"
get_database_dialect(tracking_uri) # Ensure `tracking_uri` is a database URI
mlflow.set_tracking_uri(tracking_uri)
initialize_database()
schema_path = get_schema_path(tracking_uri)
existing_schema = schema_path.read_text()
latest_schema = dump_schema(tracking_uri)
if not schema_equal(existing_schema, latest_schema):
schema_path.write_text(latest_schema)
if __name__ == "__main__":
main() |
5,896 | test all new methods are called | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Unit tests for abc.py."""
import unittest, weakref
from test import test_support
import abc
from inspect import isabstract
class TestABC(unittest.TestCase):
def test_abstractmethod_basics(self):
@abc.abstractmethod
def foo(self): pass
self.assertTrue(foo.__isabstractmethod__)
def bar(self): pass
self.assertFalse(hasattr(bar, "__isabstractmethod__"))
def test_abstractproperty_basics(self):
@abc.abstractproperty
def foo(self): pass
self.assertTrue(foo.__isabstractmethod__)
def bar(self): pass
self.assertFalse(hasattr(bar, "__isabstractmethod__"))
class C:
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def foo(self): return 3
class D(C):
@property
def foo(self): return super(D, self).foo
self.assertEqual(D().foo, 3)
def test_abstractmethod_integration(self):
for abstractthing in [abc.abstractmethod, abc.abstractproperty]:
class C:
__metaclass__ = abc.ABCMeta
@abstractthing
def foo(self): pass # abstract
def bar(self): pass # concrete
self.assertEqual(C.__abstractmethods__, set(["foo"]))
self.assertRaises(TypeError, C) # because foo is abstract
self.assertTrue(isabstract(C))
class D(C):
def bar(self): pass # concrete override of concrete
self.assertEqual(D.__abstractmethods__, set(["foo"]))
self.assertRaises(TypeError, D) # because foo is still abstract
self.assertTrue(isabstract(D))
class E(D):
def foo(self): pass
self.assertEqual(E.__abstractmethods__, set())
E() # now foo is concrete, too
self.assertFalse(isabstract(E))
class F(E):
@abstractthing
def bar(self): pass # abstract override of concrete
self.assertEqual(F.__abstractmethods__, set(["bar"]))
self.assertRaises(TypeError, F) # because bar is abstract now
self.assertTrue(isabstract(F))
def test_subclass_oldstyle_class(self):
class A:
__metaclass__ = abc.ABCMeta
class OldstyleClass:
pass
self.assertFalse(issubclass(OldstyleClass, A))
self.assertFalse(issubclass(A, OldstyleClass))
def test_isinstance_class(self):
class A:
__metaclass__ = abc.ABCMeta
class OldstyleClass:
pass
self.assertFalse(isinstance(OldstyleClass, A))
self.assertTrue(isinstance(OldstyleClass, type(OldstyleClass)))
self.assertFalse(isinstance(A, OldstyleClass))
# This raises a recursion depth error, but is low-priority:
# self.assertTrue(isinstance(A, abc.ABCMeta))
def test_registration_basics(self):
class A:
__metaclass__ = abc.ABCMeta
class B(object):
pass
b = B()
self.assertFalse(issubclass(B, A))
self.assertFalse(issubclass(B, (A,)))
self.assertNotIsInstance(b, A)
self.assertNotIsInstance(b, (A,))
A.register(B)
self.assertTrue(issubclass(B, A))
self.assertTrue(issubclass(B, (A,)))
self.assertIsInstance(b, A)
self.assertIsInstance(b, (A,))
class C(B):
pass
c = C()
self.assertTrue(issubclass(C, A))
self.assertTrue(issubclass(C, (A,)))
self.assertIsInstance(c, A)
self.assertIsInstance(c, (A,))
def test_isinstance_invalidation(self):
class A:
__metaclass__ = abc.ABCMeta
class B(object):
pass
b = B()
self.assertFalse(isinstance(b, A))
self.assertFalse(isinstance(b, (A,)))
A.register(B)
self.assertTrue(isinstance(b, A))
self.assertTrue(isinstance(b, (A,)))
def test_registration_builtins(self):
class A:
__metaclass__ = abc.ABCMeta
A.register(int)
self.assertIsInstance(42, A)
self.assertIsInstance(42, (A,))
self.assertTrue(issubclass(int, A))
self.assertTrue(issubclass(int, (A,)))
class B(A):
pass
B.register(basestring)
self.assertIsInstance("", A)
self.assertIsInstance("", (A,))
self.assertTrue(issubclass(str, A))
self.assertTrue(issubclass(str, (A,)))
def test_registration_edge_cases(self):
class A:
__metaclass__ = abc.ABCMeta
A.register(A) # should pass silently
class A1(A):
pass
self.assertRaises(RuntimeError, A1.register, A) # cycles not allowed
class B(object):
pass
A1.register(B) # ok
A1.register(B) # should pass silently
class C(A):
pass
A.register(C) # should pass silently
self.assertRaises(RuntimeError, C.register, A) # cycles not allowed
C.register(B) # ok
def test_register_non_class(self):
class A(object):
__metaclass__ = abc.ABCMeta
self.assertRaisesRegexp(TypeError, "Can only register classes",
A.register, 4)
def test_registration_transitiveness(self):
class A:
__metaclass__ = abc.ABCMeta
self.assertTrue(issubclass(A, A))
self.assertTrue(issubclass(A, (A,)))
class B:
__metaclass__ = abc.ABCMeta
self.assertFalse(issubclass(A, B))
self.assertFalse(issubclass(A, (B,)))
self.assertFalse(issubclass(B, A))
self.assertFalse(issubclass(B, (A,)))
class C:
__metaclass__ = abc.ABCMeta
A.register(B)
class B1(B):
pass
self.assertTrue(issubclass(B1, A))
self.assertTrue(issubclass(B1, (A,)))
class C1(C):
pass
B1.register(C1)
self.assertFalse(issubclass(C, B))
self.assertFalse(issubclass(C, (B,)))
self.assertFalse(issubclass(C, B1))
self.assertFalse(issubclass(C, (B1,)))
self.assertTrue(issubclass(C1, A))
self.assertTrue(issubclass(C1, (A,)))
self.assertTrue(issubclass(C1, B))
self.assertTrue(issubclass(C1, (B,)))
self.assertTrue(issubclass(C1, B1))
self.assertTrue(issubclass(C1, (B1,)))
C1.register(int)
class MyInt(int):
pass
self.assertTrue(issubclass(MyInt, A))
self.assertTrue(issubclass(MyInt, (A,)))
self.assertIsInstance(42, A)
self.assertIsInstance(42, (A,))
def METHOD_NAME(self):
class A:
__metaclass__ = abc.ABCMeta
class B(object):
counter = 0
def __new__(cls):
B.counter += 1
return super(B, cls).__new__(cls)
class C(A, B):
pass
self.assertEqual(B.counter, 0)
C()
self.assertEqual(B.counter, 1)
@test_support.cpython_only
def test_cache_leak(self):
# See issue #2521.
class A(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def f(self):
pass
class C(A):
def f(self):
A.f(self)
r = weakref.ref(C)
# Trigger cache.
C().f()
del C
test_support.gc_collect()
self.assertEqual(r(), None)
def test_main():
test_support.run_unittest(TestABC)
if __name__ == "__main__":
unittest.main() |
5,897 | test simple object | # -*- coding: utf-8 -*-
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from .... import __file__ as mars_file
from ..config import (
SecurityConfig,
AppFileConfig,
AppMasterConfig,
MarsApplicationConfig,
MarsSupervisorConfig,
MarsWorkerConfig,
)
def METHOD_NAME():
config = SecurityConfig("/path/to/cert.pem", "/path/to/key.pem").build()
assert config["cert_file"] == "/path/to/cert.pem"
assert config["key_file"] == "/path/to/key.pem"
config = AppFileConfig(source="/path/to/file").build()
assert config == "/path/to/file"
config = AppFileConfig(source="/path/to/file", file_type="archive").build()
assert config["source"] == "/path/to/file"
assert config["type"] == "archive"
config = AppMasterConfig(
security=SecurityConfig("/path/to/cert.pem", "/path/to/key.pem"),
cpu=1,
memory="512 MiB",
).build()
assert config["security"]["cert_file"] == "/path/to/cert.pem"
assert config["security"]["key_file"] == "/path/to/key.pem"
assert config["resources"]["vcores"] == 1
def test_supervisor_config():
config = MarsSupervisorConfig(
"/path/to/packed.tar.gz",
"mars.test_mod",
cpu=2,
memory="10 GiB",
env={"TEST_ENV": "test_val"},
extra_args="-Dsupervisor.default_cpu_usage=0",
).build()
assert config["files"]["mars_env"] == "/path/to/packed.tar.gz"
assert "mars.deploy.yarn.supervisor" in config["script"]
config_envs = config["env"]
assert config_envs["TEST_ENV"] == "test_val"
assert config_envs["MKL_NUM_THREADS"] == "2"
assert config_envs["MARS_CPU_TOTAL"] == "2"
assert int(config_envs["MARS_MEMORY_TOTAL"]) == 10 * 1024**3
assert config_envs["MARS_LOAD_MODULES"] == "mars.test_mod"
config = MarsSupervisorConfig(
"conda://path/to_env",
"mars.test_mod",
cpu=2,
memory="10 GiB",
log_config="logging.conf",
env={"TEST_ENV": "test_val"},
extra_args="-Dsupervisor.default_cpu_usage=0",
).build()
config_envs = config["env"]
assert config_envs["MARS_SOURCE_PATH"] == os.path.dirname(
os.path.dirname(mars_file)
)
config = MarsSupervisorConfig(
"venv://path/to_env",
"mars.test_mod",
cpu=2,
log_config="logging.conf",
env={"TEST_ENV": "test_val"},
extra_args="-Dsupervisor.default_cpu_usage=0",
).build()
config_envs = config["env"]
assert config_envs["MARS_SOURCE_PATH"] == os.path.dirname(
os.path.dirname(mars_file)
)
def test_worker_config():
config = MarsWorkerConfig("/path/to/packed.tar.gz").build()
assert "mars.deploy.yarn.worker" in config["script"]
assert config["depends"] == [MarsSupervisorConfig.service_name]
config = MarsWorkerConfig(
"/path/to/packed.tar.gz",
worker_cache_mem="10g",
spill_dirs=["/spill/dir1", "/spill/dir2"],
).build()
config_envs = config["env"]
assert config_envs["MARS_CACHE_MEM_SIZE"] == "10g"
assert config_envs["MARS_SPILL_DIRS"].split(":") == ["/spill/dir1", "/spill/dir2"]
def test_app_config():
supervisor_config = MarsSupervisorConfig(
"/path/to/packed.tar.gz",
"mars.test_mod",
cpu=2,
memory="10 GiB",
env={"TEST_ENV": "test_val"},
extra_args="-Dsupervisor.default_cpu_usage=0",
)
worker_config = MarsWorkerConfig(
"/path/to/packed.tar.gz",
worker_cache_mem="10g",
spill_dirs=["/spill/dir1", "/spill/dir2"],
)
config = MarsApplicationConfig(
name="config-name",
queue="default",
supervisor_config=supervisor_config,
worker_config=worker_config,
).build()
assert config["name"] == "config-name"
assert config["queue"] == "default" |
5,898 | parse section break | #!/usr/bin/env python3
"""Convert Google Docs V1 API's JSON to Markdown.
"""
__copyright__ = "Copyright (C) 2019 Martin Blais"
__license__ = "GNU GPLv2"
from os import path
import argparse
import textwrap
import datetime
import hashlib
import collections
import json
import logging
import os
import pickle
import pprint
import re
import shelve
import shutil
import subprocess
import tempfile
import urllib.parse
from typing import List
import bs4
import apiclient.errors
from apiclient import discovery
import httplib2
from oauth2client import service_account
def _get(obj, path):
for comp in path.split('/'):
if comp not in obj:
return
obj = obj[comp]
return obj
def _dispatch(table, elem):
celem = elem.copy()
celem.pop('startIndex', None)
celem.pop('endIndex', None)
assert len(celem) == 1
etype, econtents = celem.popitem()
return table[etype](econtents)
TextRun = collections.namedtuple('TextRun', 'text family')
def parse_TextRun(contents):
family = _get(contents, 'textStyle/weightedFontFamily/fontFamily')
return TextRun(contents['content'], family)
def parse_AutoText(contents):
raise NotImplementedError
def parse_PageBreak(contents):
pass
def parse_ColumnBreak(contents):
raise NotImplementedError
def parse_FootnoteReference(contents):
pass
#raise NotImplementedError(pprint.pformat(contents))
def parse_HorizontalRule(contents):
pass
def parse_Equation(contents):
raise NotImplementedError
def parse_InlineObjectElement(contents):
pass
#raise NotImplementedError
_dispatch_Element = {
'textRun': parse_TextRun,
'autoText': parse_AutoText,
'pageBreak': parse_PageBreak,
'columnBreak': parse_ColumnBreak,
'footnoteReference': parse_FootnoteReference,
'horizontalRule': parse_HorizontalRule,
'equation': parse_Equation,
'inlineObjectElement': parse_InlineObjectElement,
}
def parse_Element(elem):
return _dispatch(_dispatch_Element, elem)
def METHOD_NAME(econtents):
assert econtents.keys() == {'sectionStyle'}, econtents
def parse_Table(econtents):
pass
#raise NotImplementedError
def parse_Paragraph(paragraph):
style = paragraph['paragraphStyle']['namedStyleType']
# Compress runs of text together.
parelems = []
for element in paragraph['elements']:
pelem = parse_Element(element)
if isinstance(pelem, TextRun):
last = parelems[-1] if parelems else None
if last and isinstance(last, TextRun) and last.family == pelem.family:
parelems.pop(-1)
pelem = TextRun(last.text + pelem.text, last.family)
parelems.append(pelem)
else:
assert pelem is None
# Convert all the hard newlines to soft ones.
parelems = [elem._replace(text=elem.text.replace('\x0b', '\n'))
if isinstance(elem, TextRun)
else elem
for elem in parelems]
return (style, parelems)
def parse_TableOfContents(econtents):
assert econtents.keys() == {'content'}, econtents.keys()
_dispatch_StructuralElement = {
'sectionBreak': METHOD_NAME,
'paragraph': parse_Paragraph,
'table': parse_Table,
'tableOfContents': parse_TableOfContents,
}
def parse_StructuralElement(selem):
return _dispatch(_dispatch_StructuralElement, selem)
def parse_Body(body):
assert set(body.keys()) == {'content'}
return list(filter(None, [parse_StructuralElement(selem)
for selem in body['content']]))
def parse_Document(document):
return (document['title'], parse_Body(document['body']))
def remove_default_fonts(body, default_font='Cambria'):
"""Remove text runs with the default font."""
new_body = []
for etype, runs in body:
new_runs = []
for run in runs:
if run.family == default_font:
run = run._replace(family=None)
new_runs.append(run)
new_body.append((etype, new_runs))
return new_body
def merge_runs(body):
"""Merge consecutive text runs with the same font."""
new_body = []
for etype, runs in body:
new_runs = []
last_run = None
for run in runs:
if last_run is None:
last_run = run
elif run.family == last_run.family:
run = last_run = run._replace(text=(last_run.text + run.text))
new_runs.pop(-1)
new_runs.append(run)
new_body.append((etype, new_runs))
return new_body
class Renderer:
def __init__(self, outfile):
self.file = outfile
def TITLE(self, item):
print("= {} =\n".format(item.text.strip()), file=self.file)
def HEADING_1(self, item):
print("== {} ==\n".format(item.text.strip()), file=self.file)
def HEADING_2(self, item):
print("=== {} ===\n".format(item.text.strip()), file=self.file)
def HEADING_3(self, item):
print("==== {} ====\n".format(item.text.strip()), file=self.file)
def HEADING_4(self, item):
print("===== {} =====\n".format(item.text.strip()), file=self.file)
def HEADING_5(self, item):
print("====== {} ======\n".format(item.text.strip()), file=self.file)
def NORMAL_TEXT(self, item):
if item.family == 'Consolas':
lines = item.text.split('\n')
print('\n'.join(" {}".format(line) for line in lines), file=self.file)
else:
print(textwrap.fill(item.text.strip(), 80), file=self.file)
print(file=self.file)
def main():
logging.basicConfig(level=logging.INFO, format='%(levelname)-8s: %(message)s')
parser = argparse.ArgumentParser(description=__doc__.strip())
parser.add_argument('--fileordir', action='store', default=os.getcwd(),
help="The JSON file or directory to process")
args = parser.parse_args()
if path.isfile(args.fileordir):
filenames = [args.fileordir]
else:
filenames = [path.join(args.fileordir, x)
for x in os.listdir(args.fileordir)
if re.search('\.json$', x)]
for filename in filenames:
with open(filename, 'r') as infile:
document = json.load(infile)
title, body = parse_Document(document)
for item in body:
assert len(item) == 2
body = remove_default_fonts(body)
body = merge_runs(body)
output_filename = filename.replace('.json', '.md')
with open(output_filename, 'w') as outfile:
renderer = Renderer(outfile)
for etype, runs in body:
fun = getattr(renderer, etype, None)
if fun is None:
print(etype)
else:
for run in runs:
fun(run)
# print(title, file=outfile)
# print(pprint.pformat(body), file=outfile)
if __name__ == '__main__':
main() |
5,899 | test jit script signature | # Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import torch
from nncf.config import NNCFConfig
from nncf.torch.dynamic_graph.context import TracingContext
from nncf.torch.dynamic_graph.patch_pytorch import _ORIG_JIT_SCRIPT
from nncf.torch.dynamic_graph.patch_pytorch import MagicFunctionsToPatch
from nncf.torch.dynamic_graph.trace_tensor import TensorMeta
from nncf.torch.dynamic_graph.trace_tensor import TracedTensor
from nncf.torch.graph.operator_metatypes import PT_OPERATOR_METATYPES
from tests.shared.isolation_runner import run_pytest_case_function_in_separate_process
from tests.torch.helpers import BasicConvTestModel
from tests.torch.helpers import create_compressed_model_and_algo_for_test
from tests.torch.helpers import register_bn_adaptation_init_args
from tests.torch.pytorch_patch_isolated import test_jit_if_tracing_script_source_equals
from tests.torch.pytorch_patch_isolated import test_jit_script_exception_preserves_patching_isolated
def test_get_all_aliases_is_valid():
operator_names_to_function_name = {}
for operator in PT_OPERATOR_METATYPES.registry_dict:
operator_names_to_function_name[operator] = PT_OPERATOR_METATYPES.get(operator).get_all_aliases()
invalid_metatypes = []
for operator_metatypes, function_names in operator_names_to_function_name.items():
if not function_names:
invalid_metatypes.append(operator_metatypes)
assert not invalid_metatypes, f"There are metatypes with invalid `get_all_aliaces` method: {invalid_metatypes}"
def test_are_all_magic_functions_patched():
for operator in PT_OPERATOR_METATYPES.registry_dict:
for function_name in PT_OPERATOR_METATYPES.get(operator).get_all_aliases():
if function_name.startswith("__") and function_name.endswith("__"):
is_contained = False
for _, functions in MagicFunctionsToPatch.MAGIC_FUNCTIONS_TO_PATCH.items():
if function_name in functions:
is_contained = True
break
assert is_contained
def test_tensor_printing_does_not_inflate_graph():
context_to_use = TracingContext()
context_to_use.enable_trace_dynamic_graph()
with context_to_use as _ctx:
with torch.no_grad():
tensor = torch.ones([1, 2])
print(tensor)
str(tensor)
tensor.__repr__()
tensor = TracedTensor.from_torch_tensor(tensor, TensorMeta(0, 0, tensor.shape))
print(tensor)
str(tensor)
tensor.__repr__()
assert _ctx.graph.get_nodes_count() == 0
def test_jit_if_tracing_script_patching(tmp_path):
@torch.jit.script_if_tracing
def test_fn(x: torch.Tensor):
return torch.empty(x.shape)
class TestModel(torch.nn.Module):
def forward(self, x: torch.Tensor):
return test_fn(x)
# ONNX export should work correctly because torch.jit.script_if_tracing is patched
torch.onnx.export(TestModel(), (torch.zeros((1,)),), str(tmp_path / "jit_if_tracing_test_model.onnx"))
def test_jit_if_tracing_script_source():
# Run test case in a separate process to track patching of torch by NNCF
run_pytest_case_function_in_separate_process(test_jit_if_tracing_script_source_equals)
def test_jit_script_exception_preserves_patching():
# Run test case in a separate process to track patching of torch by NNCF
run_pytest_case_function_in_separate_process(test_jit_script_exception_preserves_patching_isolated)
def METHOD_NAME():
# Check that torch.jit.script has the same signature as the wrapper was designed for
signature = inspect.signature(_ORIG_JIT_SCRIPT)
assert "obj" in signature.parameters and "_rcb" in signature.parameters and "_frames_up" in signature.parameters
def test_jit_script_class():
# Define an outside function to test custom resolution callback inside torch_jit_script_wrapper
def outside_function(x):
return x + torch.tensor(1.0)
class TestClass:
def class_method(self, x):
return outside_function(x)
# Scripting a class instead of a method to trigger custom resolution callback usage
torch.jit.script(TestClass)
def test_jit_trace_model():
model = BasicConvTestModel()
config = NNCFConfig()
config.update(
{
"model": "model",
"input_info": {"sample_size": model.INPUT_SIZE},
"compression": {"algorithm": "quantization"},
}
)
register_bn_adaptation_init_args(config)
compressed_model, compression_ctrl = create_compressed_model_and_algo_for_test(model, config)
torch.jit.trace(compressed_model, example_inputs=torch.rand(model.INPUT_SIZE))
model = compression_ctrl.strip()
torch.jit.trace(model, example_inputs=torch.rand(model.INPUT_SIZE)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.