id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
7,300 | prop descriptions | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Unselected(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatterpolargl"
_path_str = "scatterpolargl.unselected"
_valid_props = {"marker", "textfont"}
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterpolargl.unselected.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
color
Sets the marker color of unselected points,
applied only when a selection exists.
opacity
Sets the marker opacity of unselected points,
applied only when a selection exists.
size
Sets the marker size of unselected points,
applied only when a selection exists.
Returns
-------
plotly.graph_objs.scatterpolargl.unselected.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
# textfont
# --------
@property
def textfont(self):
"""
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterpolargl.unselected.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
Sets the text font color of unselected points,
applied only when a selection exists.
Returns
-------
plotly.graph_objs.scatterpolargl.unselected.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
# Self properties description
# ---------------------------
@property
def METHOD_NAME(self):
return """\
marker
:class:`plotly.graph_objects.scatterpolargl.unselected.
Marker` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.scatterpolargl.unselected.
Textfont` instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, textfont=None, **kwargs):
"""
Construct a new Unselected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatterpolargl.Unselected`
marker
:class:`plotly.graph_objects.scatterpolargl.unselected.
Marker` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.scatterpolargl.unselected.
Textfont` instance or dict with compatible properties
Returns
-------
Unselected
"""
super(Unselected, self).__init__("unselected")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterpolargl.Unselected
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolargl.Unselected`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("marker", None)
_v = marker if marker is not None else _v
if _v is not None:
self["marker"] = _v
_v = arg.pop("textfont", None)
_v = textfont if textfont is not None else _v
if _v is not None:
self["textfont"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False |
7,301 | query package | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Kim Nørgaard
# Written by Kim Nørgaard <jasen@jasen.dk>
# Based on pkgng module written by bleader <bleader@ratonland.org>
# that was based on pkgin module written by Shaun Zinck <shaun.zinck at gmail.com>
# that was based on pacman module written by Afterburn <https://github.com/afterburn>
# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
#
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: slackpkg
short_description: Package manager for Slackware >= 12.2
description:
- Manage binary packages for Slackware using 'slackpkg' which
is available in versions after 12.2.
extends_documentation_fragment:
- community.general.attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
options:
name:
description:
- name of package to install/remove
required: true
type: list
elements: str
aliases: [pkg]
state:
description:
- State of the package, you can use V(installed) as an alias for V(present) and V(removed) as one for V(absent).
choices: [ 'present', 'absent', 'latest', 'installed', 'removed' ]
required: false
default: present
type: str
update_cache:
description:
- update the package database first
required: false
default: false
type: bool
author: Kim Nørgaard (@KimNorgaard)
requirements: [ "Slackware >= 12.2" ]
'''
EXAMPLES = '''
- name: Install package foo
community.general.slackpkg:
name: foo
state: present
- name: Remove packages foo and bar
community.general.slackpkg:
name: foo,bar
state: absent
- name: Make sure that it is the most updated package
community.general.slackpkg:
name: foo
state: latest
'''
from ansible.module_utils.basic import AnsibleModule
def METHOD_NAME(module, slackpkg_path, name):
import platform
import os
import re
machine = platform.machine()
# Exception for kernel-headers package on x86_64
if name == 'kernel-headers' and machine == 'x86_64':
machine = 'x86'
pattern = re.compile('^%s-[^-]+-(%s|noarch|fw)-[^-]+$' % (re.escape(name), re.escape(machine)))
packages = [f for f in os.listdir('/var/log/packages') if pattern.match(f)]
if len(packages) > 0:
return True
return False
def remove_packages(module, slackpkg_path, packages):
remove_c = 0
# Using a for loop in case of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not METHOD_NAME(module, slackpkg_path, package):
continue
if not module.check_mode:
rc, out, err = module.run_command("%s -default_answer=y -batch=on \
remove %s" % (slackpkg_path,
package))
if not module.check_mode and METHOD_NAME(module, slackpkg_path,
package):
module.fail_json(msg="failed to remove %s: %s" % (package, out))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, slackpkg_path, packages):
install_c = 0
for package in packages:
if METHOD_NAME(module, slackpkg_path, package):
continue
if not module.check_mode:
rc, out, err = module.run_command("%s -default_answer=y -batch=on \
install %s" % (slackpkg_path,
package))
if not module.check_mode and not METHOD_NAME(module, slackpkg_path,
package):
module.fail_json(msg="failed to install %s: %s" % (package, out),
stderr=err)
install_c += 1
if install_c > 0:
module.exit_json(changed=True, msg="present %s package(s)"
% (install_c))
module.exit_json(changed=False, msg="package(s) already present")
def upgrade_packages(module, slackpkg_path, packages):
install_c = 0
for package in packages:
if not module.check_mode:
rc, out, err = module.run_command("%s -default_answer=y -batch=on \
upgrade %s" % (slackpkg_path,
package))
if not module.check_mode and not METHOD_NAME(module, slackpkg_path,
package):
module.fail_json(msg="failed to install %s: %s" % (package, out),
stderr=err)
install_c += 1
if install_c > 0:
module.exit_json(changed=True, msg="present %s package(s)"
% (install_c))
module.exit_json(changed=False, msg="package(s) already present")
def update_cache(module, slackpkg_path):
rc, out, err = module.run_command("%s -batch=on update" % (slackpkg_path))
if rc != 0:
module.fail_json(msg="Could not update package cache")
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default="present", choices=['installed', 'removed', 'absent', 'present', 'latest']),
name=dict(aliases=["pkg"], required=True, type='list', elements='str'),
update_cache=dict(default=False, type='bool'),
),
supports_check_mode=True)
slackpkg_path = module.get_bin_path('slackpkg', True)
p = module.params
pkgs = p['name']
if p["update_cache"]:
update_cache(module, slackpkg_path)
if p['state'] == 'latest':
upgrade_packages(module, slackpkg_path, pkgs)
elif p['state'] in ['present', 'installed']:
install_packages(module, slackpkg_path, pkgs)
elif p["state"] in ['removed', 'absent']:
remove_packages(module, slackpkg_path, pkgs)
if __name__ == '__main__':
main() |
7,302 | main | # -*- coding: utf-8 -*-
import os
import aredis
import pytest
from ddtrace import Pin
from ddtrace.contrib.aredis.patch import patch
from ddtrace.contrib.aredis.patch import unpatch
from ddtrace.internal.schema.span_attribute_schema import _DEFAULT_SPAN_SERVICE_NAMES
from ddtrace.vendor.wrapt import ObjectProxy
from tests.opentracer.utils import init_tracer
from tests.utils import override_config
from ..config import REDIS_CONFIG
@pytest.fixture(autouse=True)
async def traced_aredis():
r = aredis.StrictRedis(port=REDIS_CONFIG["port"])
await r.flushall()
patch()
try:
yield
finally:
unpatch()
r = aredis.StrictRedis(port=REDIS_CONFIG["port"])
await r.flushall()
def test_patching():
"""
When patching aredis library
We wrap the correct methods
When unpatching aredis library
We unwrap the correct methods
"""
assert isinstance(aredis.client.StrictRedis.execute_command, ObjectProxy)
assert isinstance(aredis.client.StrictRedis.pipeline, ObjectProxy)
assert isinstance(aredis.pipeline.StrictPipeline.execute, ObjectProxy)
assert isinstance(aredis.pipeline.StrictPipeline.immediate_execute_command, ObjectProxy)
unpatch()
assert not isinstance(aredis.client.StrictRedis.execute_command, ObjectProxy)
assert not isinstance(aredis.client.StrictRedis.pipeline, ObjectProxy)
assert not isinstance(aredis.pipeline.StrictPipeline.execute, ObjectProxy)
assert not isinstance(aredis.pipeline.StrictPipeline.immediate_execute_command, ObjectProxy)
@pytest.mark.asyncio
async def test_long_command(snapshot_context):
with snapshot_context():
r = aredis.StrictRedis(port=REDIS_CONFIG["port"])
await r.mget(*range(1000))
@pytest.mark.skip(reason="No traces sent to the test agent")
@pytest.mark.subprocess(env=dict(DD_AREDIS_CMD_MAX_LENGTH="10"), ddtrace_run=True)
@pytest.mark.snapshot
def test_cmd_max_length_env():
import asyncio
import aredis
from tests.contrib.config import REDIS_CONFIG
async def METHOD_NAME():
r = aredis.StrictRedis(port=REDIS_CONFIG["port"])
await r.get("here-is-a-long-key")
asyncio.run(METHOD_NAME())
@pytest.mark.asyncio
async def test_cmd_max_length(snapshot_context):
with override_config("aredis", dict(cmd_max_length=7)):
with snapshot_context():
r = aredis.StrictRedis(port=REDIS_CONFIG["port"])
await r.get("here-is-a-long-key")
@pytest.mark.asyncio
async def test_basics(snapshot_context):
with snapshot_context():
r = aredis.StrictRedis(port=REDIS_CONFIG["port"])
await r.get("cheese")
@pytest.mark.asyncio
async def test_unicode(snapshot_context):
with snapshot_context():
r = aredis.StrictRedis(port=REDIS_CONFIG["port"])
await r.get(u"😐")
@pytest.mark.asyncio
async def test_analytics_without_rate(snapshot_context):
with override_config("aredis", dict(analytics_enabled=True)):
with snapshot_context():
r = aredis.StrictRedis(port=REDIS_CONFIG["port"])
await r.get("cheese")
@pytest.mark.asyncio
async def test_analytics_with_rate(snapshot_context):
with override_config("aredis", dict(analytics_enabled=True, analytics_sample_rate=0.5)):
with snapshot_context():
r = aredis.StrictRedis(port=REDIS_CONFIG["port"])
await r.get("cheese")
@pytest.mark.asyncio
async def test_pipeline_traced(snapshot_context):
with snapshot_context():
r = aredis.StrictRedis(port=REDIS_CONFIG["port"])
p = await r.pipeline(transaction=False)
await p.set("blah", 32)
await p.rpush("foo", u"éé")
await p.hgetall("xxx")
await p.execute()
@pytest.mark.asyncio
async def test_pipeline_immediate(snapshot_context):
with snapshot_context():
r = aredis.StrictRedis(port=REDIS_CONFIG["port"])
p = await r.pipeline()
await p.set("a", 1)
await p.immediate_execute_command("SET", "a", 1)
await p.execute()
@pytest.mark.asyncio
async def test_meta_override(tracer, test_spans):
r = aredis.StrictRedis(port=REDIS_CONFIG["port"])
pin = Pin.get_from(r)
assert pin is not None
pin.clone(tags={"cheese": "camembert"}, tracer=tracer).onto(r)
await r.get("cheese")
test_spans.assert_trace_count(1)
test_spans.assert_span_count(1)
assert test_spans.spans[0].service == "redis"
assert test_spans.spans[0].get_tag("component") == "aredis"
assert test_spans.spans[0].get_tag("span.kind") == "client"
assert test_spans.spans[0].get_tag("db.system") == "redis"
assert "cheese" in test_spans.spans[0].get_tags() and test_spans.spans[0].get_tag("cheese") == "camembert"
@pytest.mark.parametrize(
"schema_tuplets",
[
(None, None, "redis", "redis.command"),
(None, "v0", "redis", "redis.command"),
(None, "v1", _DEFAULT_SPAN_SERVICE_NAMES["v1"], "redis.command"),
("mysvc", None, "redis", "redis.command"),
("mysvc", "v0", "redis", "redis.command"),
("mysvc", "v1", "mysvc", "redis.command"),
],
)
def test_schematization_of_service_and_operation(ddtrace_run_python_code_in_subprocess, schema_tuplets):
service, schema, expected_service, expected_operation = schema_tuplets
code = """
import asyncio
import pytest
import sys
from tests.conftest import *
from ddtrace.pin import Pin
import aredis
from tests.contrib.config import REDIS_CONFIG
from tests.contrib.aredis.test_aredis import traced_aredis
@pytest.mark.asyncio
async def test(tracer, test_spans):
r = aredis.StrictRedis(port=REDIS_CONFIG["port"])
pin = Pin.get_from(r)
assert pin is not None
pin.clone(tags={{"cheese": "camembert"}}, tracer=tracer).onto(r)
await r.get("cheese")
test_spans.assert_trace_count(1)
test_spans.assert_span_count(1)
assert test_spans.spans[0].service == "{}"
assert test_spans.spans[0].name == "{}"
if __name__ == "__main__":
sys.exit(pytest.main(["-x", __file__]))
""".format(
expected_service, expected_operation
)
env = os.environ.copy()
if service:
env["DD_SERVICE"] = service
if schema:
env["DD_TRACE_SPAN_ATTRIBUTE_SCHEMA"] = schema
out, err, status, _ = ddtrace_run_python_code_in_subprocess(code, env=env)
assert status == 0, (err.decode(), out.decode())
assert err == b"", err.decode()
@pytest.mark.asyncio
async def test_opentracing(tracer, snapshot_context):
"""Ensure OpenTracing works with redis."""
with snapshot_context():
r = aredis.StrictRedis(port=REDIS_CONFIG["port"])
pin = Pin.get_from(r)
ot_tracer = init_tracer("redis_svc", pin.tracer)
with ot_tracer.start_active_span("redis_get"):
await r.get("cheese") |
7,303 | publish pose | #!/usr/bin/env python3
#
# Software License Agreement (BSD License)
# Copyright © 2011, 2022-2023 belongs to Shadow Robot Company Ltd.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of Shadow Robot Company Ltd nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# This software is provided by Shadow Robot Company Ltd "as is" and any express
# or implied warranties, including, but not limited to, the implied warranties of
# merchantability and fitness for a particular purpose are disclaimed. In no event
# shall the copyright holder be liable for any direct, indirect, incidental, special,
# exemplary, or consequential damages (including, but not limited to, procurement of
# substitute goods or services; loss of use, data, or profits; or business interruption)
# however caused and on any theory of liability, whether in contract, strict liability,
# or tort (including negligence or otherwise) arising in any way out of the use of this
# software, even if advised of the possibility of such damage.
"""
This program is a simple script that runs the hand through different given poses.
It is a good example on how to use the latching feature of the
ros publisher to make sure a data is received even if we don't stream
the data.
"""
import time
import rospy
from std_msgs.msg import Float64
class LatchingExample:
# type of controller that is running
controller_type = "_position_controller"
def __init__(self):
# Prefix of hand, can be lh for left or rh for right
self.prefix = 'rh'
self.keys = ['FFJ0', 'FFJ3', 'FFJ4',
'MFJ0', 'MFJ3', 'MFJ4',
'RFJ0', 'RFJ3', 'RFJ4',
'LFJ0', 'LFJ3', 'LFJ4', 'LFJ5',
'THJ1', 'THJ2', 'THJ3', 'THJ4', 'THJ5',
'WRJ1', 'WRJ2']
self.keys_prefixed = [f"{self.prefix}_{joint}" for joint in self.keys]
self.hand_publishers = self.create_hand_publishers(self.keys_prefixed)
self.sleep_time = 3.0
def run(self):
"""
Runs the hand through different predefined position in a given order.
"""
start = [0.00, 0.00, 0.00, 0.00,
0.00, 0.00, 0.00, 0.00,
0.00, 0.00, 0.00, 0.00,
0.00, 0.00, 0.00, 0.00, 0.00,
0.00, 0.00, 0.00, 0.00, 0.00,
0.00, 0.00]
fist = [3.14, 1.57, 0.00,
3.14, 1.57, 0.00,
3.14, 1.57, 0.00,
3.4, 1.57, 0.00, 0.00,
1.33, 0.00, 1.15, 0.26,
0.00, 0.00]
victory = [0.00, 0.00, -0.35,
0.00, 0.00, 0.35,
3.14, 1.57, -0.17,
3.14, 1.57, -0.17, 0.00,
1.05, 0.00, 0.87, 0.61,
0.00, 0.00]
wave_1 = [-0.35]
wave_2 = [0.09]
start_pose = dict(zip(self.keys_prefixed, start))
fist_pose = dict(zip(self.keys_prefixed, fist))
victory_pose = dict(zip(self.keys_prefixed, victory))
wave_1_pose = dict(zip(self.keys_prefixed[-1:], wave_1))
wave_2_pose = dict(zip(self.keys_prefixed[-1:], wave_2))
# publish each pose and sleep for time 'sleep_time' between each command
self.METHOD_NAME(start_pose)
time.sleep(self.sleep_time)
self.METHOD_NAME(fist_pose)
time.sleep(self.sleep_time)
self.METHOD_NAME(start_pose)
time.sleep(self.sleep_time)
self.METHOD_NAME(victory_pose)
time.sleep(self.sleep_time)
self.METHOD_NAME(start_pose)
time.sleep(self.sleep_time)
for _ in range(4):
self.METHOD_NAME(wave_1_pose)
time.sleep(self.sleep_time / 2)
self.METHOD_NAME(wave_2_pose)
time.sleep(self.sleep_time / 2)
def METHOD_NAME(self, pose):
"""
Publish a given pose.
"""
for joint, pos in pose.items():
self.hand_publishers[joint].publish(pos)
def create_hand_publishers(self, keys_prefixed):
"""
Creates a dictionary of publishers to send the targets to the controllers
on /sh_??_??j?_mixed_position_velocity_controller/command
or /sh_??_??j?_position_controller/command
"""
hand_pub = {}
for joint in keys_prefixed:
# Here we initialize the publisher with the latch set to True.
# this will ensure that the hand gets the message, even though we're
# using the messages more as a service (we don't stream the data, we
# just ask the hand to take a given position)
hand_pub[joint] = rospy.Publisher(
'sh_' + joint.lower() + self.controller_type + '/command', Float64,
latch=True, queue_size=1)
return hand_pub
def main():
rospy.init_node('sr_latching_example', anonymous=True)
example = LatchingExample()
example.run()
if __name__ == '__main__':
main() |
7,304 | set fixed parameters | #
# HES1 Michaelis-Menten model of regulatory dynamics.
#
# This file is part of PINTS (https://github.com/pints-team/pints/) which is
# released under the BSD 3-clause license. See accompanying LICENSE.md for
# copyright notice and full license details.
#
import numpy as np
import pints
import scipy
from . import ToyODEModel
class Hes1Model(ToyODEModel, pints.ForwardModelS1):
"""
HES1 Michaelis-Menten model of regulatory dynamics [1]_.
This model describes the expression level of the transcription factor
Hes1.
.. math::
\\frac{dm}{dt} &= -k_{deg}m + \\frac{1}{1 + (p_2/P_0)^h} \\\\
\\frac{dp_1}{dt} &= -k_{deg} p_1 + \\nu m - k_1 p_1 \\\\
\\frac{dp_2}{dt} &= -k_{deg} p_2 + k_1 p_1
The system is determined by 3 state variables :math:`m`, :math:`p_1`, and
:math:`p_2`. It is assumed that only :math:`m` can be observed, that is
only :math:`m` is an observable. The initial condition of the other two
state variables and :math:`k_{deg}` are treated as implicit parameters of
the system. The input order of parameters of interest is
:math:`\\{ P_0, \\nu, k_1, h \\}`.
Extends :class:`pints.ForwardModel`, :class:`pints.toy.ToyModel`.
Parameters
----------
m0 : float
The initial condition of the observable ``m``. Requires ``m0 >= 0``.
fixed_parameters
The fixed parameters of the model which are not inferred, given as a
vector ``[p1_0, p2_0, k_deg]`` with ``p1_0, p2_0, k_deg >= 0``.
References
----------
.. [1] Silk, D., el al. 2011. Designing attractive models via automated
identification of chaotic and oscillatory dynamical regimes. Nature
communications, 2, p.489.
https://doi.org/10.1038/ncomms1496
"""
def __init__(self, m0=None, fixed_parameters=None):
if fixed_parameters is None:
self.METHOD_NAME([5., 3., 0.03])
else:
self.METHOD_NAME(fixed_parameters)
if m0 is None:
self.set_m0(2)
else:
self.set_m0(m0)
def _dfdp(self, state, time, parameters):
""" See :meth:`pints.ToyModel.jacobian()`. """
m, p1, p2 = state
P0, v, k1, h = parameters
p2_over_p0 = p2 / P0
p2_over_p0_h = p2_over_p0**h
one_plus_p2_expression_sq = (1 + p2_over_p0_h)**2
ret = np.empty((self.n_states(), self.n_parameters()))
ret[0, 0] = h * p2 * p2_over_p0**(h - 1) / (
P0**2 * one_plus_p2_expression_sq)
ret[0, 1] = 0
ret[0, 2] = 0
ret[0, 3] = - (p2_over_p0_h * np.log(p2_over_p0)) / (
one_plus_p2_expression_sq
)
ret[1, 0] = 0
ret[1, 1] = m
ret[1, 2] = -p1
ret[1, 3] = 0
ret[2, 0] = 0
ret[2, 1] = 0
ret[2, 2] = p1
ret[2, 3] = 0
return ret
def m0(self):
"""
Returns the initial conditions of the ``m`` variable.
"""
return self._y0[0]
def fixed_parameters(self):
"""
Returns the fixed parameters of the model which are not inferred, given
as a vector ``[p1_0, p2_0, k_deg]``.
"""
return [self._p0[0], self._p0[1], self._kdeg]
def jacobian(self, state, time, parameters):
""" See :meth:`pints.ToyModel.jacobian()`. """
m, p1, p2 = state
P0, v, k1, h = parameters
k_deg = self._kdeg
p2_over_p0 = p2 / P0
p2_over_p0_h = p2_over_p0**h
one_plus_p2_expression_sq = (1 + p2_over_p0_h)**2
ret = np.zeros((self.n_states(), self.n_states()))
ret[0, 0] = -k_deg
ret[0, 1] = 0
ret[0, 2] = -h * p2_over_p0**(h - 1) / (P0 * one_plus_p2_expression_sq)
ret[1, 0] = v
ret[1, 1] = -k1 - k_deg
ret[1, 2] = 0
ret[2, 0] = 0
ret[2, 1] = k1
ret[2, 2] = -k_deg
return ret
def n_states(self):
""" See :meth:`pints.ToyODEModel.n_states()`. """
return 3
def n_outputs(self):
""" See :meth:`pints.ForwardModel.n_outputs()`. """
return 1
def n_parameters(self):
""" See :meth:`pints.ForwardModel.n_parameters()`. """
return 4
def _rhs(self, state, time, parameters):
"""
Right-hand side equation of the ode to solve.
"""
m, p1, p2 = state
P0, v, k1, h = parameters
output = np.array([
- self._kdeg * m + 1. / (1. + (p2 / P0)**h),
- self._kdeg * p1 + v * m - k1 * p1,
- self._kdeg * p2 + k1 * p1])
return output
def set_m0(self, m0):
"""
Sets the initial conditions of the ``m`` variable.
"""
if m0 < 0:
raise ValueError('Initial condition cannot be negative.')
y0 = [m0, self._p0[0], self._p0[1]]
super(Hes1Model, self).set_initial_conditions(y0)
def METHOD_NAME(self, k):
"""
Changes the implicit parameters for this model.
"""
a, b, c = k
if a < 0 or b < 0 or c < 0:
raise ValueError('Implicit parameters cannot be negative.')
self._p0 = [a, b]
self._kdeg = c
def simulate_all_states(self, parameters, times):
"""
Returns all state variables that ``simulate()`` does not return.
"""
solved_states = scipy.integrate.odeint(
self._rhs, self._y0, times, args=(parameters,))
# Return all states
return solved_states
def suggested_parameters(self):
""" See :meth:`pints.toy.ToyModel.suggested_parameters()`. """
return np.array([2.4, 0.025, 0.11, 6.9])
def suggested_times(self):
""" See :meth:`pints.toy.ToyModel.suggested_times()`. """
return np.arange(0, 270, 30)
def suggested_values(self):
"""
Returns a suggested set of values that matches
:meth:`suggested_times()`.
"""
return np.array([2, 1.20, 5.90, 4.58, 2.64, 5.38, 6.42, 5.60, 4.48]) |
7,305 | measure | from common import (
identity,
identity_script,
heavy,
heavy_script,
identity_cuda,
identity_script_cuda,
heavy_cuda,
heavy_script_cuda,
stamp_time,
compute_delay,
NUM_RPC,
)
from torch.distributed import rpc
from functools import partial
from statistics import stdev
import torch
import time
import os
import threading
GPU_ID = 5
def METHOD_NAME(*, name=None, func=None, args=None, cuda=False, out_file=None):
# warmup
futs = []
for _ in range(NUM_RPC):
futs.append(rpc.rpc_async("server", func, args=args))
torch.futures.wait_all(futs)
torch.cuda.current_stream(GPU_ID).synchronize()
# warmup done
timestamps = {}
states = {
"lock": threading.Lock(),
"future": torch.futures.Future(),
"pending": NUM_RPC,
}
def mark_complete(index, cuda, fut):
timestamps[index]["tok"] = stamp_time(cuda)
with states["lock"]:
states["pending"] -= 1
if states["pending"] == 0:
states["future"].set_result(0)
start = time.time()
futs = []
for index in range(NUM_RPC):
timestamps[index] = {}
timestamps[index]["tik"] = stamp_time(cuda)
fut = rpc.rpc_async("server", func, args=args)
futs.append(fut)
fut.add_done_callback(partial(mark_complete, index, cuda))
torch.futures.wait_all(futs)
states["future"].wait()
# torch.cuda.current_stream(GPU_ID).synchronize()
delays = []
for index in range(len(timestamps)):
delays.append(compute_delay(timestamps[index], cuda))
end = time.time()
mean = sum(delays) / len(delays)
stdv = stdev(delays)
total = end - start
print(
f"{name}_{'cuda' if cuda else 'cpu'}: mean = {mean}, stdev = {stdv}, total = {end - start}",
flush=True,
)
if out_file:
out_file.write(f"{name}, {mean}, {stdv}, {total}\n")
return mean, stdv, total
def run(addr="localhost", port="29500"):
os.environ["CUDA_VISIBLE_DEVICES"] = "5"
assert torch.cuda.device_count() == 1
os.environ["MASTER_ADDR"] = addr
os.environ["MASTER_PORT"] = port
options = rpc.TensorPipeRpcBackendOptions(
num_worker_threads=256, device_maps={"server": {GPU_ID: GPU_ID}}
)
rpc.init_rpc("client", rank=1, world_size=2, rpc_backend_options=options)
for size in [100, 1000, 10000]:
# for size in [100, 1000]:
print(f"======= size = {size} =====")
f = open(f"logs/single_pt_rpc_{size}.log", "w")
tensor = torch.ones(size, size)
# identity
METHOD_NAME(
name="identity",
func=identity,
args=(tensor,),
cuda=False,
out_file=f,
)
# identity
METHOD_NAME(
name="identity",
func=identity,
args=(tensor,),
cuda=False,
out_file=f,
)
# identity script
METHOD_NAME(
name="identity_script",
func=identity_script,
args=(tensor,),
cuda=False,
out_file=f,
)
# heavy
METHOD_NAME(
name="heavy",
func=heavy,
args=(tensor,),
cuda=False,
out_file=f,
)
# heavy script
METHOD_NAME(
name="heavy_script",
func=heavy_script,
args=(tensor,),
cuda=False,
out_file=f,
)
tensor = tensor.to(GPU_ID)
torch.cuda.current_stream(GPU_ID).synchronize()
# identity cuda
METHOD_NAME(
name="identity",
func=identity_cuda,
args=(tensor,),
cuda=True,
out_file=f,
)
# identity script cuda
METHOD_NAME(
name="identity_script",
func=identity_script_cuda,
args=(tensor,),
cuda=True,
out_file=f,
)
# heavy cuda
METHOD_NAME(
name="heavy",
func=heavy_cuda,
args=(tensor,),
cuda=True,
out_file=f,
)
# heavy script cuda
METHOD_NAME(
name="heavy_script",
func=heavy_script_cuda,
args=(tensor,),
cuda=True,
out_file=f,
)
f.close()
rpc.shutdown() |
7,306 | support | import torch
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.utils import (
broadcast_all,
lazy_property,
logits_to_probs,
probs_to_logits,
)
__all__ = ["Binomial"]
def _clamp_by_zero(x):
# works like clamp(x, min=0) but has grad at 0 is 0.5
return (x.clamp(min=0) + x - x.clamp(max=0)) / 2
class Binomial(Distribution):
r"""
Creates a Binomial distribution parameterized by :attr:`total_count` and
either :attr:`probs` or :attr:`logits` (but not both). :attr:`total_count` must be
broadcastable with :attr:`probs`/:attr:`logits`.
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> m = Binomial(100, torch.tensor([0 , .2, .8, 1]))
>>> x = m.sample()
tensor([ 0., 22., 71., 100.])
>>> m = Binomial(torch.tensor([[5.], [10.]]), torch.tensor([0.5, 0.8]))
>>> x = m.sample()
tensor([[ 4., 5.],
[ 7., 6.]])
Args:
total_count (int or Tensor): number of Bernoulli trials
probs (Tensor): Event probabilities
logits (Tensor): Event log-odds
"""
arg_constraints = {
"total_count": constraints.nonnegative_integer,
"probs": constraints.unit_interval,
"logits": constraints.real,
}
has_enumerate_support = True
def __init__(self, total_count=1, probs=None, logits=None, validate_args=None):
if (probs is None) == (logits is None):
raise ValueError(
"Either `probs` or `logits` must be specified, but not both."
)
if probs is not None:
(
self.total_count,
self.probs,
) = broadcast_all(total_count, probs)
self.total_count = self.total_count.type_as(self.probs)
else:
(
self.total_count,
self.logits,
) = broadcast_all(total_count, logits)
self.total_count = self.total_count.type_as(self.logits)
self._param = self.probs if probs is not None else self.logits
batch_shape = self._param.size()
super().__init__(batch_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Binomial, _instance)
batch_shape = torch.Size(batch_shape)
new.total_count = self.total_count.expand(batch_shape)
if "probs" in self.__dict__:
new.probs = self.probs.expand(batch_shape)
new._param = new.probs
if "logits" in self.__dict__:
new.logits = self.logits.expand(batch_shape)
new._param = new.logits
super(Binomial, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def _new(self, *args, **kwargs):
return self._param.new(*args, **kwargs)
@constraints.dependent_property(is_discrete=True, event_dim=0)
def METHOD_NAME(self):
return constraints.integer_interval(0, self.total_count)
@property
def mean(self):
return self.total_count * self.probs
@property
def mode(self):
return ((self.total_count + 1) * self.probs).floor().clamp(max=self.total_count)
@property
def variance(self):
return self.total_count * self.probs * (1 - self.probs)
@lazy_property
def logits(self):
return probs_to_logits(self.probs, is_binary=True)
@lazy_property
def probs(self):
return logits_to_probs(self.logits, is_binary=True)
@property
def param_shape(self):
return self._param.size()
def sample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
with torch.no_grad():
return torch.binomial(
self.total_count.expand(shape), self.probs.expand(shape)
)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
log_factorial_n = torch.lgamma(self.total_count + 1)
log_factorial_k = torch.lgamma(value + 1)
log_factorial_nmk = torch.lgamma(self.total_count - value + 1)
# k * log(p) + (n - k) * log(1 - p) = k * (log(p) - log(1 - p)) + n * log(1 - p)
# (case logit < 0) = k * logit - n * log1p(e^logit)
# (case logit > 0) = k * logit - n * (log(p) - log(1 - p)) + n * log(p)
# = k * logit - n * logit - n * log1p(e^-logit)
# (merge two cases) = k * logit - n * max(logit, 0) - n * log1p(e^-|logit|)
normalize_term = (
self.total_count * _clamp_by_zero(self.logits)
+ self.total_count * torch.log1p(torch.exp(-torch.abs(self.logits)))
- log_factorial_n
)
return (
value * self.logits - log_factorial_k - log_factorial_nmk - normalize_term
)
def entropy(self):
total_count = int(self.total_count.max())
if not self.total_count.min() == total_count:
raise NotImplementedError(
"Inhomogeneous total count not supported by `entropy`."
)
log_prob = self.log_prob(self.enumerate_support(False))
return -(torch.exp(log_prob) * log_prob).sum(0)
def enumerate_support(self, expand=True):
total_count = int(self.total_count.max())
if not self.total_count.min() == total_count:
raise NotImplementedError(
"Inhomogeneous total count not supported by `enumerate_support`."
)
values = torch.arange(
1 + total_count, dtype=self._param.dtype, device=self._param.device
)
values = values.view((-1,) + (1,) * len(self._batch_shape))
if expand:
values = values.expand((-1,) + self._batch_shape)
return values |
7,307 | deprecated func | """Class for recording and reporting deprecations"""
from __future__ import annotations
import functools
import re
import typing as ty
import warnings
if ty.TYPE_CHECKING: # pragma: no cover
T = ty.TypeVar('T')
P = ty.ParamSpec('P')
_LEADING_WHITE = re.compile(r'^(\s*)')
TESTSETUP = """
.. testsetup::
>>> import pytest
>>> import warnings
>>> _suppress_warnings = pytest.deprecated_call()
>>> _ = _suppress_warnings.__enter__()
"""
TESTCLEANUP = """
.. testcleanup::
>>> warnings.warn("Avoid error if no doctests to run...", DeprecationWarning)
>>> _ = _suppress_warnings.__exit__(None, None, None)
"""
class ExpiredDeprecationError(RuntimeError):
"""Error for expired deprecation
Error raised when a called function or method has passed out of its
deprecation period.
"""
pass
def _ensure_cr(text: str) -> str:
"""Remove trailing whitespace and add carriage return
Ensures that `text` always ends with a carriage return
"""
return text.rstrip() + '\n'
def _add_dep_doc(
old_doc: str,
dep_doc: str,
setup: str = '',
cleanup: str = '',
) -> str:
"""Add deprecation message `dep_doc` to docstring in `old_doc`
Parameters
----------
old_doc : str
Docstring from some object.
dep_doc : str
Deprecation warning to add to top of docstring, after initial line.
setup : str, optional
Doctest setup text
cleanup : str, optional
Doctest teardown text
Returns
-------
new_doc : str
`old_doc` with `dep_doc` inserted after any first lines of docstring.
"""
dep_doc = _ensure_cr(dep_doc)
if not old_doc:
return dep_doc
old_doc = _ensure_cr(old_doc)
old_lines = old_doc.splitlines()
new_lines = []
for line_no, line in enumerate(old_lines):
if line.strip():
new_lines.append(line)
else:
break
next_line = line_no + 1
if next_line >= len(old_lines):
# nothing following first paragraph, just append message
return old_doc + '\n' + dep_doc
leading_white = _LEADING_WHITE.match(old_lines[next_line])
assert leading_white is not None # Type narrowing, since this always matches
indent = leading_white.group()
setup_lines = [indent + L for L in setup.splitlines()]
dep_lines = [indent + L for L in [''] + dep_doc.splitlines() + ['']]
cleanup_lines = [indent + L for L in cleanup.splitlines()]
return '\n'.join(
new_lines + dep_lines + setup_lines + old_lines[next_line:] + cleanup_lines + ['']
)
class Deprecator:
"""Class to make decorator marking function or method as deprecated
The decorated function / method will:
* Raise the given `warning_class` warning when the function / method gets
called, up to (and including) version `until` (if specified);
* Raise the given `error_class` error when the function / method gets
called, when the package version is greater than version `until` (if
specified).
Parameters
----------
version_comparator : callable
Callable accepting string as argument, and return 1 if string
represents a higher version than encoded in the `version_comparator`, 0
if the version is equal, and -1 if the version is lower. For example,
the `version_comparator` may compare the input version string to the
current package version string.
warn_class : class, optional
Class of warning to generate for deprecation.
error_class : class, optional
Class of error to generate when `version_comparator` returns 1 for a
given argument of ``until`` in the ``__call__`` method (see below).
"""
def __init__(
self,
version_comparator: ty.Callable[[str], int],
warn_class: type[Warning] = DeprecationWarning,
error_class: type[Exception] = ExpiredDeprecationError,
) -> None:
self.version_comparator = version_comparator
self.warn_class = warn_class
self.error_class = error_class
def is_bad_version(self, version_str: str) -> bool:
"""Return True if `version_str` is too high
Tests `version_str` with ``self.version_comparator``
Parameters
----------
version_str : str
String giving version to test
Returns
-------
is_bad : bool
True if `version_str` is for version below that expected by
``self.version_comparator``, False otherwise.
"""
return self.version_comparator(version_str) == -1
def __call__(
self,
message: str,
since: str = '',
until: str = '',
warn_class: type[Warning] | None = None,
error_class: type[Exception] | None = None,
) -> ty.Callable[[ty.Callable[P, T]], ty.Callable[P, T]]:
"""Return decorator function function for deprecation warning / error
Parameters
----------
message : str
Message explaining deprecation, giving possible alternatives.
since : str, optional
Released version at which object was first deprecated.
until : str, optional
Last released version at which this function will still raise a
deprecation warning. Versions higher than this will raise an
error.
warn_class : None or class, optional
Class of warning to generate for deprecation (overrides instance
default).
error_class : None or class, optional
Class of error to generate when `version_comparator` returns 1 for a
given argument of ``until`` (overrides class default).
Returns
-------
deprecator : func
Function returning a decorator.
"""
exception = error_class if error_class is not None else self.error_class
warning = warn_class if warn_class is not None else self.warn_class
messages = [message]
if (since, until) != ('', ''):
messages.append('')
if since:
messages.append('* deprecated from version: ' + since)
if until:
messages.append(
f"* {'Raises' if self.is_bad_version(until) else 'Will raise'} "
f'{exception} as of version: {until}'
)
message = '\n'.join(messages)
def deprecator(func: ty.Callable[P, T]) -> ty.Callable[P, T]:
@functools.wraps(func)
def METHOD_NAME(*args: P.args, **kwargs: P.kwargs) -> T:
if until and self.is_bad_version(until):
raise exception(message)
warnings.warn(message, warning, stacklevel=2)
return func(*args, **kwargs)
keep_doc = METHOD_NAME.__doc__
if keep_doc is None:
keep_doc = ''
setup = TESTSETUP
cleanup = TESTCLEANUP
# After expiration, remove all but the first paragraph.
# The details are no longer relevant, but any code will likely
# raise exceptions we don't need.
if keep_doc and until and self.is_bad_version(until):
lines = '\n'.join(line.rstrip() for line in keep_doc.splitlines())
keep_doc = lines.split('\n\n', 1)[0]
setup = ''
cleanup = ''
METHOD_NAME.__doc__ = _add_dep_doc(keep_doc, message, setup, cleanup)
return METHOD_NAME
return deprecator |
7,308 | to numpy form | # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
from __future__ import annotations
__all__ = ("EmptyForm",)
from collections.abc import Callable
from inspect import signature
import awkward as ak
from awkward._errors import deprecate
from awkward._nplikes.numpylike import NumpyMetadata
from awkward._nplikes.shape import ShapeItem
from awkward._typing import Iterator, JSONSerializable, Self, final
from awkward._util import UNSET
from awkward.forms.form import Form, JSONMapping
np = NumpyMetadata.instance()
@final
class EmptyForm(Form):
is_numpy = True
is_unknown = True
def __init__(self, *, parameters: JSONMapping | None = None, form_key=None):
if not (parameters is None or len(parameters) == 0):
raise TypeError(f"{type(self).__name__} cannot contain parameters")
self._init(parameters=parameters, form_key=form_key)
def copy(
self, *, parameters: JSONMapping | None = UNSET, form_key=UNSET
) -> EmptyForm:
if not (parameters is UNSET or parameters is None or len(parameters) == 0):
raise TypeError(f"{type(self).__name__} cannot contain parameters")
return EmptyForm(
form_key=self._form_key if form_key is UNSET else form_key,
)
@classmethod
def simplified(cls, *, parameters=None, form_key=None) -> Form:
if not (parameters is None or len(parameters) == 0):
raise TypeError(f"{cls.__name__} cannot contain parameters")
return cls(parameters=parameters, form_key=form_key)
def __repr__(self):
args = self._repr_args()
return "{}({})".format(type(self).__name__, ", ".join(args))
def _to_dict_part(self, verbose, toplevel):
return self._to_dict_extra({"class": "EmptyArray"}, verbose)
@property
def type(self):
return ak.types.UnknownType()
def __eq__(self, other) -> bool:
return isinstance(other, EmptyForm) and self._form_key == other._form_key
def METHOD_NAME(self, *args, **kwargs):
def legacy_impl(dtype):
deprecate(
f"the `dtype` parameter in {type(self).__name__}.to_NumpyForm is deprecated, "
f"in favour of a new `primitive` argument. Pass `primitive` by keyword to opt-in to the new behavior.",
version="2.4.0",
)
return ak.forms.numpyform.from_dtype(dtype)
def new_impl(*, primitive):
return ak.forms.numpyform.NumpyForm(primitive)
dispatch_table = [
new_impl,
legacy_impl,
]
for func in dispatch_table:
sig = signature(func)
try:
bound_arguments = sig.bind(*args, **kwargs)
except TypeError:
continue
else:
return func(*bound_arguments.args, **bound_arguments.kwargs)
raise AssertionError(
f"{type(self).__name__}.to_NumpyForm accepts either the new `primitive` argument as a keyword-only "
f"argument, or the legacy `dtype` argument as positional or keyword"
)
def purelist_parameters(self, *keys: str) -> JSONSerializable:
return None
@property
def purelist_isregular(self) -> bool:
return True
@property
def purelist_depth(self) -> int:
return 1
@property
def is_identity_like(self) -> bool:
return True
@property
def minmax_depth(self) -> tuple[int, int]:
return (1, 1)
@property
def branch_depth(self) -> tuple[bool, int]:
return (False, 1)
@property
def fields(self) -> list[str]:
return []
@property
def is_tuple(self) -> bool:
return False
@property
def dimension_optiontype(self) -> bool:
return False
def _columns(self, path, output, list_indicator):
output.append(".".join(path))
def _select_columns(self, match_specifier):
return self
def _prune_columns(self, is_inside_record_or_union: bool) -> Self:
return self
def _column_types(self) -> tuple[str, ...]:
return ("empty",)
def _length_one_buffer_lengths(self) -> Iterator[ShapeItem]:
yield 0
def __setstate__(self, state):
if isinstance(state, dict):
# read data pickled in Awkward 2.x
self.__dict__.update(state)
else:
# read data pickled in Awkward 1.x
# https://github.com/scikit-hep/awkward/blob/main-v1/src/python/forms.cpp#L240-L244
has_identities, parameters, form_key = state
if form_key is not None:
form_key = "part0-" + form_key # only the first partition
self.__init__(form_key=form_key)
def _expected_from_buffers(
self, getkey: Callable[[Form, str], str]
) -> Iterator[tuple[str, np.dtype]]:
yield from () |
7,309 | calc graph |
import plotly.graph_objs as go
import cea.plots.demand
from cea.plots.variable_naming import LOGO, COLOR, NAMING
class EnergySupplyIntensityPlot(cea.plots.demand.DemandPlotBase):
name = "Energy Final Use Intensity"
def __init__(self, project, parameters, cache):
super(EnergySupplyIntensityPlot, self).__init__(project, parameters, cache)
self.analysis_fields = ["DH_hs_MWhyr", "DH_ww_MWhyr", 'SOLAR_ww_MWhyr', 'SOLAR_hs_MWhyr', "DC_cs_MWhyr",
'DC_cdata_MWhyr', 'DC_cre_MWhyr', 'PV_MWhyr', 'NG_hs_MWhyr',
'COAL_hs_MWhyr', 'OIL_hs_MWhyr', 'WOOD_hs_MWhyr', 'NG_ww_MWhyr', 'COAL_ww_MWhyr',
'OIL_ww_MWhyr', 'WOOD_ww_MWhyr',
'GRID_a_MWhyr',
'GRID_l_MWhyr',
'GRID_v_MWhyr',
'GRID_ve_MWhyr',
'GRID_cs_MWhyr',
'GRID_aux_MWhyr',
'GRID_data_MWhyr',
'GRID_pro_MWhyr',
'GRID_ww_MWhyr',
'GRID_hs_MWhyr',
'GRID_cdata_MWhyr',
'GRID_cre_MWhyr'
]
@property
def layout(self):
return go.Layout(barmode='stack',
yaxis=dict(title='Energy Use Intensity [kWh/m2.yr]'), showlegend=True)
def METHOD_NAME(self):
analysis_fields = self.remove_unused_fields(self.data, self.analysis_fields)
if len(self.buildings) == 1:
assert len(self.data) == 1, 'Expected DataFrame with only one row'
building_data = self.data.iloc[0]
traces = []
area = building_data["GFA_m2"]
x = ["Absolute [MWh/yr]", "Relative [kWh/m2.yr]"]
for field in analysis_fields:
name = NAMING[field]
y = [building_data[field], building_data[field] / area * 1000]
trace = go.Bar(x=x, y=y, name=name, marker=dict(color=COLOR[field]))
traces.append(trace)
return traces
else:
traces = []
dataframe = self.data
for field in analysis_fields:
dataframe[field] = dataframe[field] * 1000 / dataframe["GFA_m2"] # in kWh/m2y
dataframe['total'] = dataframe[analysis_fields].sum(axis=1)
dataframe.sort_values(by='total', ascending=False, inplace=True)
dataframe.reset_index(inplace=True, drop=True)
for field in analysis_fields:
y = dataframe[field]
name = NAMING[field]
total_percent = (y / dataframe['total'] * 100).round(2).values
total_percent_txt = ["(%.2f %%)" % x for x in total_percent]
trace = go.Bar(x=dataframe["Name"], y=y, name=name, text=total_percent_txt, marker=dict(color=COLOR[field]))
traces.append(trace)
return traces
def main():
import cea.config
import cea.inputlocator
config = cea.config.Configuration()
locator = cea.inputlocator.InputLocator(config.scenario)
# cache = cea.plots.cache.PlotCache(config.project)
cache = cea.plots.cache.NullPlotCache()
EnergySupplyIntensityPlot(config.project, {'buildings': None,
'scenario-name': config.scenario_name},
cache).plot(auto_open=True)
EnergySupplyIntensityPlot(config.project, {'buildings': locator.get_zone_building_names()[0:2],
'scenario-name': config.scenario_name},
cache).plot(auto_open=True)
EnergySupplyIntensityPlot(config.project, {'buildings': [locator.get_zone_building_names()[0]],
'scenario-name': config.scenario_name},
cache).plot(auto_open=True)
if __name__ == '__main__':
main() |
7,310 | fast | # Copyright 2021 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test Sample behavior
"""
import time
from unittest import TestCase
import pytest
from sqlalchemy import TEXT, Column, Integer, String, create_engine, func
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm import declarative_base
from metadata.ingestion.connections.session import create_and_bind_session
from metadata.profiler.api.models import ProfileSampleConfig
from metadata.profiler.processor.runner import QueryRunner
from metadata.profiler.processor.sampler.sqlalchemy.sampler import SQASampler
from metadata.utils.timeout import cls_timeout
Base = declarative_base()
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
name = Column(String(256))
fullname = Column(String(256))
nickname = Column(String(256))
comments = Column(TEXT)
age = Column(Integer)
class Timer:
"""
Helper to test timeouts
"""
def slow(self):
time.sleep(10)
return 1
def METHOD_NAME(self):
return 1
class RunnerTest(TestCase):
"""
Run checks on different metrics
"""
engine = create_engine("sqlite+pysqlite:///:memory:", echo=False, future=True)
session = create_and_bind_session(engine)
sampler = SQASampler(
client=session,
table=User,
profile_sample_config=ProfileSampleConfig(profile_sample=50.0),
)
sample = sampler.random_sample()
raw_runner = QueryRunner(session=session, table=User, sample=sample)
timeout_runner: Timer = cls_timeout(1)(Timer())
@classmethod
def setUpClass(cls) -> None:
"""
Prepare Ingredients
"""
User.__table__.create(bind=cls.engine)
# Insert 30 rows
for i in range(10):
data = [
User(
name="John",
fullname="John Doe",
nickname="johnny b goode",
comments="no comments",
age=30,
),
User(
name="Jane",
fullname="Jone Doe",
nickname=None,
comments="maybe some comments",
age=31,
),
User(
name="John",
fullname="John Doe",
nickname=None,
comments=None,
age=None,
),
]
cls.session.add_all(data)
cls.session.commit()
def test_select_from_table(self):
"""
We can run queries against the table
"""
res = self.raw_runner.select_first_from_table(func.count())
assert res[0] == 30
res = self.raw_runner.select_all_from_table(Column(User.name.name))
assert len(res) == 30
def test_select_from_sample(self):
"""
We can run queries against the sample
"""
res = self.raw_runner.select_first_from_sample(func.count())
assert res[0] < 30
# Note how we need to pass the column by name, not from the table
# object, or it will run a cartesian product.
res = self.raw_runner.select_all_from_sample(Column(User.name.name))
assert len(res) < 30
def test_select_from_query(self):
"""
We can pick up results from a given query
"""
query = self.session.query(func.count()).select_from(User)
res = self.raw_runner.select_first_from_query(query)
assert res[0] == 30
query = self.session.query(func.count()).select_from(self.sample)
res = self.raw_runner.select_first_from_query(query)
assert res[0] < 30
query = self.session.query(Column(User.name.name)).select_from(User)
res = self.raw_runner.select_all_from_query(query)
assert len(res) == 30
query = self.session.query(func.count()).select_from(self.sample)
res = self.raw_runner.select_all_from_query(query)
assert len(res) < 30
def test_timeout_runner(self):
"""
Check that timeout alarms get executed
"""
assert self.timeout_runner.METHOD_NAME() == 1
with pytest.raises(TimeoutError):
self.timeout_runner.slow()
def test_select_from_statement(self):
"""
Test querying using `from_statement` returns expected values
"""
stmt = "SELECT name FROM users"
self.raw_runner._profile_sample_query = stmt
res = self.raw_runner.select_all_from_table(Column(User.name.name))
assert len(res) == 30
res = self.raw_runner.select_first_from_table(Column(User.name.name))
assert len(res) == 1
stmt = "SELECT id FROM users"
self.raw_runner._profile_sample_query = stmt
with pytest.raises(OperationalError):
self.raw_runner.select_first_from_table(Column(User.name.name))
self.raw_runner._profile_sample_query = None |
7,311 | test callbacks 2 | import unittest
from ctypes import *
from ctypes.test import need_symbol
import _ctypes_test
dll = CDLL(_ctypes_test.__file__)
try:
CALLBACK_FUNCTYPE = WINFUNCTYPE
except NameError:
# fake to enable this test on Linux
CALLBACK_FUNCTYPE = CFUNCTYPE
class POINT(Structure):
_fields_ = [("x", c_int), ("y", c_int)]
class BasicWrapTestCase(unittest.TestCase):
def wrap(self, param):
return param
@need_symbol('c_wchar')
def test_wchar_parm(self):
f = dll._testfunc_i_bhilfd
f.argtypes = [c_byte, c_wchar, c_int, c_long, c_float, c_double]
result = f(self.wrap(1), self.wrap("x"), self.wrap(3), self.wrap(4), self.wrap(5.0), self.wrap(6.0))
self.assertEqual(result, 139)
self.assertTrue(type(result), int)
def test_pointers(self):
f = dll._testfunc_p_p
f.restype = POINTER(c_int)
f.argtypes = [POINTER(c_int)]
# This only works if the value c_int(42) passed to the
# function is still alive while the pointer (the result) is
# used.
v = c_int(42)
self.assertEqual(pointer(v).contents.value, 42)
result = f(self.wrap(pointer(v)))
self.assertEqual(type(result), POINTER(c_int))
self.assertEqual(result.contents.value, 42)
# This on works...
result = f(self.wrap(pointer(v)))
self.assertEqual(result.contents.value, v.value)
p = pointer(c_int(99))
result = f(self.wrap(p))
self.assertEqual(result.contents.value, 99)
def test_shorts(self):
f = dll._testfunc_callback_i_if
args = []
expected = [262144, 131072, 65536, 32768, 16384, 8192, 4096, 2048,
1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1]
def callback(v):
args.append(v)
return v
CallBack = CFUNCTYPE(c_int, c_int)
cb = CallBack(callback)
f(self.wrap(2**18), self.wrap(cb))
self.assertEqual(args, expected)
################################################################
def test_callbacks(self):
f = dll._testfunc_callback_i_if
f.restype = c_int
f.argtypes = None
MyCallback = CFUNCTYPE(c_int, c_int)
def callback(value):
#print "called back with", value
return value
cb = MyCallback(callback)
result = f(self.wrap(-10), self.wrap(cb))
self.assertEqual(result, -18)
# test with prototype
f.argtypes = [c_int, MyCallback]
cb = MyCallback(callback)
result = f(self.wrap(-10), self.wrap(cb))
self.assertEqual(result, -18)
result = f(self.wrap(-10), self.wrap(cb))
self.assertEqual(result, -18)
AnotherCallback = CALLBACK_FUNCTYPE(c_int, c_int, c_int, c_int, c_int)
# check that the prototype works: we call f with wrong
# argument types
cb = AnotherCallback(callback)
self.assertRaises(ArgumentError, f, self.wrap(-10), self.wrap(cb))
def METHOD_NAME(self):
# Can also use simple datatypes as argument type specifiers
# for the callback function.
# In this case the call receives an instance of that type
f = dll._testfunc_callback_i_if
f.restype = c_int
MyCallback = CFUNCTYPE(c_int, c_int)
f.argtypes = [c_int, MyCallback]
def callback(value):
#print "called back with", value
self.assertEqual(type(value), int)
return value
cb = MyCallback(callback)
result = f(self.wrap(-10), self.wrap(cb))
self.assertEqual(result, -18)
def test_longlong_callbacks(self):
f = dll._testfunc_callback_q_qf
f.restype = c_longlong
MyCallback = CFUNCTYPE(c_longlong, c_longlong)
f.argtypes = [c_longlong, MyCallback]
def callback(value):
self.assertIsInstance(value, int)
return value & 0x7FFFFFFF
cb = MyCallback(callback)
self.assertEqual(13577625587, int(f(self.wrap(1000000000000), self.wrap(cb))))
def test_byval(self):
# without prototype
ptin = POINT(1, 2)
ptout = POINT()
# EXPORT int _testfunc_byval(point in, point *pout)
result = dll._testfunc_byval(ptin, byref(ptout))
got = result, ptout.x, ptout.y
expected = 3, 1, 2
self.assertEqual(got, expected)
# with prototype
ptin = POINT(101, 102)
ptout = POINT()
dll._testfunc_byval.argtypes = (POINT, POINTER(POINT))
dll._testfunc_byval.restype = c_int
result = dll._testfunc_byval(self.wrap(ptin), byref(ptout))
got = result, ptout.x, ptout.y
expected = 203, 101, 102
self.assertEqual(got, expected)
def test_struct_return_2H(self):
class S2H(Structure):
_fields_ = [("x", c_short),
("y", c_short)]
dll.ret_2h_func.restype = S2H
dll.ret_2h_func.argtypes = [S2H]
inp = S2H(99, 88)
s2h = dll.ret_2h_func(self.wrap(inp))
self.assertEqual((s2h.x, s2h.y), (99*2, 88*3))
def test_struct_return_8H(self):
class S8I(Structure):
_fields_ = [("a", c_int),
("b", c_int),
("c", c_int),
("d", c_int),
("e", c_int),
("f", c_int),
("g", c_int),
("h", c_int)]
dll.ret_8i_func.restype = S8I
dll.ret_8i_func.argtypes = [S8I]
inp = S8I(9, 8, 7, 6, 5, 4, 3, 2)
s8i = dll.ret_8i_func(self.wrap(inp))
self.assertEqual((s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h),
(9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9))
def test_recursive_as_param(self):
from ctypes import c_int
class A(object):
pass
a = A()
a._as_parameter_ = a
with self.assertRaises(RuntimeError):
c_int.from_param(a)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class AsParamWrapper(object):
def __init__(self, param):
self._as_parameter_ = param
class AsParamWrapperTestCase(BasicWrapTestCase):
wrap = AsParamWrapper
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class AsParamPropertyWrapper(object):
def __init__(self, param):
self._param = param
def getParameter(self):
return self._param
_as_parameter_ = property(getParameter)
class AsParamPropertyWrapperTestCase(BasicWrapTestCase):
wrap = AsParamPropertyWrapper
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if __name__ == '__main__':
unittest.main() |
7,312 | get node | # SPDX-License-Identifier: LGPL-2.1-or-later
#
# Copyright (C) 2023 Collabora Limited
# Author: Guillaume Tucker <guillaume.tucker@collabora.com>
"""KernelCI API bindings for the latest version"""
import enum
from typing import Optional, Sequence
from cloudevents.http import from_json
import requests
from . import API
class NodeStates(enum.Enum):
"""Node states names"""
RUNNING = 'running'
AVAILABLE = 'available'
CLOSING = 'closing'
DONE = 'done'
class LatestAPI(API): # pylint: disable=too-many-public-methods
"""Latest API version
The 'latest' version is used to refer to the current development version,
so it's not pinned down. It's a moving target and shouldn't be used in
production environments.
"""
@property
def version(self) -> str:
return self.config.version
@property
def node_states(self):
return NodeStates
@property
def security_scopes(self) -> Sequence[str]:
return ['users', 'admin']
def hello(self) -> dict:
return self._get('/').json()
def whoami(self) -> dict:
return self._get('/whoami').json()
def password_hash(self, password: str) -> dict:
return self._post('/hash', {'password': password}).json()
def change_password(self, username: str, current: str, new: str) -> dict:
return self._post(
'/password',
{
'current_password': {'password': current},
'new_password': {'password': new},
},
{
'username': username,
},
).json()
def create_token(self, username: str, password: str,
scopes: Optional[Sequence[str]] = None) -> str:
data = {
'username': username,
'password': password,
}
# The form field name is scope (in singular), but it is actually a long
# string with "scopes" separated by spaces.
# https://fastapi.tiangolo.com/tutorial/security/simple-oauth2/#scope
if scopes:
data['scope'] = ' '.join(scopes)
url = self._make_url('/token')
resp = requests.post(url, data, timeout=self._timeout)
resp.raise_for_status()
return resp.json()
def subscribe(self, channel: str) -> int:
resp = self._post(f'subscribe/{channel}')
return resp.json()['id']
def unsubscribe(self, sub_id: int):
self._post(f'unsubscribe/{sub_id}')
def send_event(self, channel: str, data):
self._post('/'.join(['publish', channel]), data)
def receive_event(self, sub_id: int):
path = '/'.join(['listen', str(sub_id)])
while True:
resp = self._get(path)
data = resp.json().get('data')
if not data:
continue
event = from_json(data)
if event.data == 'BEEP':
continue
return event
def _get_api_objs(self, params: dict, path: str,
limit: Optional[int] = None,
offset: Optional[int] = None) -> list:
"""Helper function for getting objects from API with pagination
parameters"""
objs = []
if any((offset, limit)):
params.update({
'offset': offset or None,
'limit': limit or None,
})
resp = self._get(path, params=params)
objs = resp.json()['items']
else:
offset = 0
limit = 100
params['limit'] = limit
while True:
params['offset'] = offset
resp = self._get(path, params=params)
items = resp.json()['items']
objs.extend(items)
if len(items) < limit:
break
offset += limit
return objs
def METHOD_NAME(self, node_id: str) -> dict:
return self._get(f'node/{node_id}').json()
def get_nodes(
self, attributes: dict,
offset: Optional[int] = None, limit: Optional[int] = None
) -> Sequence[dict]:
params = attributes.copy() if attributes else {}
return self._get_api_objs(params=params, path='nodes',
limit=limit, offset=offset)
def count_nodes(self, attributes: dict) -> int:
return self._get('count', params=attributes).json()
def create_node(self, node: dict) -> dict:
return self._post('node', node).json()
def update_node(self, node: dict) -> dict:
return self._put('/'.join(['node', node['id']]), node).json()
def get_group(self, group_id: str) -> dict:
return self._get(f'group/{group_id}').json()
def get_groups(
self, attributes: dict,
offset: Optional[int] = None, limit: Optional[int] = None
) -> Sequence[dict]:
params = attributes.copy() if attributes else {}
return self._get_api_objs(params=params, path='groups',
limit=limit, offset=offset)
def get_user_profiles(
self, attributes: dict,
offset: Optional[int] = None, limit: Optional[int] = None
) -> Sequence[dict]:
params = attributes.copy() if attributes else {}
return self._get_api_objs(params=params, path='users/profile',
limit=limit, offset=offset)
def create_user(self, username: str, password: str, profile: dict) -> dict:
data = {
'password': password,
}
params = {
'email': profile['email'],
}
return self._post(f'user/{username}', data, params)
def get_api(config, token):
"""Get an API object for the latest version"""
return LatestAPI(config, token) |
7,313 | setup worker | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import hashlib
from twisted.internet import defer
from twisted.trial import unittest
from buildbot import util
from buildbot.config import ConfigErrors
from buildbot.interfaces import LatentWorkerFailedToSubstantiate
from buildbot.test.fake import fakemaster
from buildbot.test.fake import httpclientservice as fakehttpclientservice
from buildbot.test.fake.fakebuild import FakeBuildForRendering as FakeBuild
from buildbot.test.fake.fakeprotocol import FakeTrivialConnection as FakeBot
from buildbot.test.reactor import TestReactorMixin
from buildbot.worker import upcloud
# Please see https://developers.upcloud.com/ for details
upcloudStorageTemplatePayload = {
'storages': {
'storage': [
{
'access': 'public',
'title': 'rendered:test-image',
'uuid': '8b47d21b-b4c3-445d-b75c-5a723ff39681'
}
]
}
}
upcloudServerCreatePayload = {
'server': {
'hostname': 'worker',
'password': 'supersecret',
'state': 'maintenance',
'uuid': '438b5b08-4147-4193-bf64-a5318f51d3bd',
'title': 'buildbot-worker-87de7e',
'plan': '1xCPU-1GB'
}
}
upcloudServerStartedPayload = {
'server': {
'hostname': 'worker',
'password': 'supersecret',
'state': 'started',
'uuid': '438b5b08-4147-4193-bf64-a5318f51d3bd',
'title': 'buildbot-worker-87de7e',
'plan': '1xCPU-1GB'
}
}
upcloudServerStoppedPayload = {
'server': {
'hostname': 'worker',
'password': 'supersecret',
'state': 'stopped',
'uuid': '438b5b08-4147-4193-bf64-a5318f51d3bd',
'title': 'buildbot-worker-87de7e',
'plan': '1xCPU-1GB'
}
}
class TestUpcloudWorker(TestReactorMixin, unittest.TestCase):
worker = None
def setUp(self):
self.setup_test_reactor()
@defer.inlineCallbacks
def METHOD_NAME(self, *args, **kwargs):
worker = upcloud.UpcloudLatentWorker(
*args, api_username='test-api-user', api_password='test-api-password', **kwargs)
master = fakemaster.make_master(self, wantData=True)
self._http = worker.client = yield fakehttpclientservice.HTTPClientService.getService(
master, self, upcloud.DEFAULT_BASE_URL, auth=('test-api-user', 'test-api-password'),
debug=False)
worker.setServiceParent(master)
yield master.startService()
self.masterhash = hashlib.sha1(util.unicode2bytes(master.name)).hexdigest()[:6]
self.addCleanup(master.stopService)
self.worker = worker
return worker
def test_instantiate(self):
worker = upcloud.UpcloudLatentWorker('test-worker', image='test-image',
api_username='test-api-user',
api_password='test-api-password')
self.failUnlessIsInstance(worker, upcloud.UpcloudLatentWorker)
def test_missing_config(self):
worker = None
with self.assertRaises(ConfigErrors):
worker = upcloud.UpcloudLatentWorker('test-worker')
with self.assertRaises(ConfigErrors):
worker = upcloud.UpcloudLatentWorker('test-worker', image='test-image')
with self.assertRaises(ConfigErrors):
worker = upcloud.UpcloudLatentWorker('test-worker', image='test-image',
api_username='test-api-user')
self.assertTrue(worker is None)
@defer.inlineCallbacks
def test_missing_image(self):
worker = yield self.METHOD_NAME('worker', image='no-such-image')
self._http.expect(method='get', ep='/storage/template',
content_json=upcloudStorageTemplatePayload)
with self.assertRaises(LatentWorkerFailedToSubstantiate):
yield worker.substantiate(None, FakeBuild())
@defer.inlineCallbacks
def test_start_worker(self):
worker = yield self.METHOD_NAME('worker', image='test-image')
# resolve image to storage uuid
self._http.expect(method='get', ep='/storage/template',
content_json=upcloudStorageTemplatePayload)
# actually start server
self._http.expect(method='post', ep='/server', params=None, data=None, json={'server':
{'zone': 'de-fra1', 'title': 'buildbot-worker-87de7e', 'hostname': 'worker',
'user_data': '',
'login_user': {'username': 'root', 'ssh_keys': {'ssh_key': []}},
'password_delivery': 'none',
'storage_devices': {'storage_device': [
{'action': 'clone', 'storage': '8b47d21b-b4c3-445d-b75c-5a723ff39681', 'title':
f'buildbot-worker-{self.masterhash}', 'size': 10, 'tier': 'maxiops'}]},
'plan': '1xCPU-1GB'}},
content_json=upcloudServerCreatePayload, code=202)
# determine it's up & running
self._http.expect(method='get', ep='/server/438b5b08-4147-4193-bf64-a5318f51d3bd',
content_json=upcloudServerStartedPayload)
# get root password
self._http.expect(method='get', ep='/server/438b5b08-4147-4193-bf64-a5318f51d3bd',
content_json=upcloudServerStartedPayload)
# stop server
self._http.expect(method='post', ep='/server/438b5b08-4147-4193-bf64-a5318f51d3bd/stop',
json={'stop_server': {'stop_type': 'hard', 'timeout': '1'}},
content_json=upcloudServerStartedPayload)
# now it's stopped
self._http.expect(method='get', ep='/server/438b5b08-4147-4193-bf64-a5318f51d3bd',
content_json=upcloudServerStoppedPayload)
# then delete it
self._http.expect(method='delete',
ep='/server/438b5b08-4147-4193-bf64-a5318f51d3bd?storages=1', code=204)
d = worker.substantiate(None, FakeBuild())
yield worker.attached(FakeBot())
yield d |
7,314 | test when not logged in redirects to | # -*- coding: utf-8 -*-
# pylint: disable=too-many-ancestors
import os
import unittest
from http import HTTPStatus
from django import test
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.management import call_command
from django.urls import include, path, reverse
from django.utils.translation import gettext_lazy as _
from tcms import urls
from tcms.tests import LoggedInTestCase
from tcms.tests.factories import (
TestExecutionFactory,
TestPlanFactory,
TestRunFactory,
UserFactory,
)
class TestDashboard(LoggedInTestCase):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
# used to reproduce Sentry #KIWI-TCMS-38 where rendering fails
# with that particular value
cls.chinese_tp = TestPlanFactory(name="缺货反馈测试需求", author=cls.tester)
doc_url = (
"https://kiwitcms.readthedocs.io/en/latest/installing_docker.html"
"#configuration-of-kiwi-tcms-domain"
)
cls.base_url_error_message = _(
"Base URL is not configured! "
'See <a href="%(doc_url)s">documentation</a> and '
'<a href="%(admin_url)s">change it</a>'
) % {
"doc_url": doc_url,
"admin_url": reverse("admin:sites_site_change", args=[settings.SITE_ID]),
}
def METHOD_NAME(self):
self.client.logout()
response = self.client.get(reverse("core-views-index"))
self.assertRedirects(
response,
reverse("tcms-login") + "?next=/",
target_status_code=HTTPStatus.OK,
)
def test_when_logged_in_renders_dashboard(self):
response = self.client.get(reverse("core-views-index"))
self.assertContains(response, _("Test executions"))
self.assertContains(response, _("Dashboard"))
self.assertContains(response, _("Your Test plans"))
def test_dashboard_shows_testruns_for_manager(self):
test_run = TestRunFactory(manager=self.tester)
response = self.client.get(reverse("core-views-index"))
self.assertContains(response, test_run.summary)
def test_dashboard_shows_testruns_for_default_tester(self):
test_run = TestRunFactory(default_tester=self.tester)
response = self.client.get(reverse("core-views-index"))
self.assertContains(response, test_run.summary)
def test_dashboard_shows_testruns_for_execution_assignee(self):
execution = TestExecutionFactory(assignee=self.tester)
response = self.client.get(reverse("core-views-index"))
self.assertContains(response, execution.run.summary)
def test_check_base_url_not_configured(self):
response = self.client.get("/", follow=True)
self.assertContains(response, self.base_url_error_message)
def test_check_base_url_configured(self):
site = Site.objects.create(domain="example.com", name="example")
with test.override_settings(SITE_ID=site.pk):
response = self.client.get("/", follow=True)
self.assertNotContains(response, self.base_url_error_message)
def test_check_connection_not_using_ssl(self):
response = self.client.get("/", follow=True)
doc_url = (
"https://kiwitcms.readthedocs.io/en/latest/installing_docker.html"
"#ssl-configuration"
)
ssl_error_message = _(
"You are not using a secure connection. "
'See <a href="%(doc_url)s">documentation</a> and enable SSL.'
) % {"doc_url": doc_url}
self.assertContains(response, ssl_error_message)
@unittest.skipUnless(
os.getenv("TEST_DASHBOARD_CHECK_UNAPPLIED_MIGRATIONS"),
"Check for missing migrations testing is not enabled",
)
class TestDashboardCheckMigrations(test.TransactionTestCase):
unapplied_migration_message = _(
"unapplied migration(s). See "
'<a href="https://kiwitcms.readthedocs.io/en/latest/'
"installing_docker.html#initial-configuration-of-running-"
'container">documentation</a>'
)
def test_check_unapplied_migrations(self):
call_command("migrate", "bugs", "zero", verbosity=2, interactive=False)
tester = UserFactory()
tester.set_password("password")
tester.save()
self.client.login( # nosec:B106:hardcoded_password_funcarg
username=tester.username,
password="password",
)
response = self.client.get("/", follow=True)
self.assertContains(response, self.unapplied_migration_message)
def exception_view(request):
raise RuntimeError
urlpatterns = [
path("will-trigger-500/", exception_view),
path("", include(urls)),
]
handler500 = "tcms.core.views.server_error"
@test.override_settings(ROOT_URLCONF=__name__)
class TestServerError(test.TestCase):
def test_custom_server_error_view(self):
client = test.Client(raise_request_exception=False)
response = client.get("/will-trigger-500/")
self.assertEqual(response.status_code, 500)
self.assertTemplateUsed(response, "500.html") |
7,315 | test vector int | import pytest
pytestmark = pytest.mark.parameters_handler_yarp
import bipedal_locomotion_framework.bindings.parameters_handler as blf
import numpy as np
def test_bool():
handler = blf.YarpParametersHandler()
handler.set_parameter_bool(name="my_bool", value=True)
assert handler.get_parameter_bool(name="my_bool") is True
with pytest.raises(ValueError):
handler.get_parameter_int(name="my_bool")
with pytest.raises(ValueError):
handler.get_parameter_float(name="my_bool")
with pytest.raises(ValueError):
handler.get_parameter_string(name="my_bool")
def test_int():
handler = blf.YarpParametersHandler()
handler.set_parameter_int(name="my_int", value=42)
assert handler.get_parameter_int(name="my_int") == 42
with pytest.raises(ValueError):
handler.get_parameter_float(name="my_int")
with pytest.raises(ValueError):
handler.get_parameter_string(name="my_int")
def test_float():
handler = blf.YarpParametersHandler()
handler.set_parameter_float(name="my_float", value=3.1415)
assert handler.get_parameter_float(name="my_float") == pytest.approx(3.1415)
with pytest.raises(ValueError):
handler.get_parameter_bool(name="my_float")
with pytest.raises(ValueError):
handler.get_parameter_int(name="my_float")
with pytest.raises(ValueError):
handler.get_parameter_string(name="my_float")
def test_string():
handler = blf.YarpParametersHandler()
handler.set_parameter_string(name="my_string", value="foo")
assert handler.get_parameter_string(name="my_string") == "foo"
with pytest.raises(ValueError):
handler.get_parameter_bool(name="my_string")
with pytest.raises(ValueError):
handler.get_parameter_int(name="my_string")
with pytest.raises(ValueError):
handler.get_parameter_float(name="my_string")
def test_vector_bool():
handler = blf.YarpParametersHandler()
handler.set_parameter_vector_bool(name="my_vector_bool",value= [True, False, True])
assert handler.get_parameter_vector_bool(name="my_vector_bool") == [True, False, True]
with pytest.raises(ValueError):
handler.get_parameter_vector_int(name="my_vector_bool")
with pytest.raises(ValueError):
handler.get_parameter_vector_float(name="my_vector_bool")
with pytest.raises(ValueError):
handler.get_parameter_vector_string(name="my_vector_bool")
def METHOD_NAME():
handler = blf.YarpParametersHandler()
handler.set_parameter_vector_int(name="my_vector_int", value=[-1, 2, 10])
assert handler.get_parameter_vector_int(name="my_vector_int") == [-1, 2, 10]
with pytest.raises(ValueError):
handler.get_parameter_vector_bool(name="my_vector_int")
with pytest.raises(ValueError):
handler.get_parameter_vector_float(name="my_vector_int")
with pytest.raises(ValueError):
handler.get_parameter_vector_string(name="my_vector_int")
def test_vector_float():
handler = blf.YarpParametersHandler()
handler.set_parameter_vector_float(name="my_vector_float",
value=[-3.14, 2.7182, 42.0])
assert handler.get_parameter_vector_float(name="my_vector_float") == \
pytest.approx([-3.14, 2.7182, 42.0])
with pytest.raises(ValueError):
handler.get_parameter_vector_bool(name="my_vector_float")
with pytest.raises(ValueError):
handler.get_parameter_vector_int(name="my_vector_float")
with pytest.raises(ValueError):
handler.get_parameter_vector_string(name="my_vector_float")
def test_vector_string():
handler = blf.YarpParametersHandler()
handler.set_parameter_vector_string(name="my_vector_string",
value=["foo", "bar", "bipedal", "locomotion"])
assert handler.get_parameter_vector_string(name="my_vector_string") == \
["foo", "bar", "bipedal", "locomotion"]
with pytest.raises(ValueError):
handler.get_parameter_vector_bool(name="my_vector_string")
with pytest.raises(ValueError):
handler.get_parameter_vector_int(name="my_vector_string")
with pytest.raises(ValueError):
handler.get_parameter_vector_float(name="my_vector_string")
def test_vector_mixed():
handler = blf.YarpParametersHandler()
# 1. Mixed vector: store as more general type float
handler.set_parameter_vector_float(name="to_float", value=[42.0, 1, -3.14, False])
assert handler.get_parameter_vector_float(name="to_float") == \
pytest.approx([42.0, 1.0, -3.14, 0.0])
# 2. Mixed vector: store as more general type int
handler.set_parameter_vector_float(name="to_int", value=[42, 1, -3, False])
assert handler.get_parameter_vector_float(name="to_int") == \
pytest.approx([42, 1, -3, 0])
# 3. Mixed vector: store as less general type int
with pytest.raises(TypeError):
handler.set_parameter_vector_int(name="to_int_fail",
value=[42.0, 1, -3.14, False])
def test_clear():
handler = blf.YarpParametersHandler()
handler.set_parameter_bool(name="my_bool1", value=False)
handler.set_parameter_bool(name="my_bool2", value=True)
handler.set_parameter_float(name="my_float", value=-42.42)
handler.set_parameter_vector_string(name="my_vector_string", value=["bar", "foo"])
handler.clear()
with pytest.raises(ValueError):
_ = handler.get_parameter_bool(name="my_bool1")
with pytest.raises(ValueError):
_ = handler.get_parameter_bool(name="my_bool2")
with pytest.raises(ValueError):
_ = handler.get_parameter_float(name="my_float")
with pytest.raises(ValueError):
_ = handler.get_parameter_vector_string(name="my_float")
def test_load_from_file():
handler = blf.YarpParametersHandler()
assert handler.set_from_file_path('config.ini') == True
assert handler.get_parameter_int("answer_to_the_ultimate_question_of_life") == 42
assert handler.get_group("CARTOONS").get_parameter_string("John") == "Doe" |
7,316 | do filter | # SPDX-FileCopyrightText: 2015 National CyberSecurity Center
#
# SPDX-License-Identifier: AGPL-3.0-or-later
# -*- coding: utf-8 -*-
import re
from datetime import datetime, timedelta, timezone
from dateutil import parser
from intelmq.lib.bot import ExpertBot
from intelmq.lib.utils import parse_relative, TIMESPANS
class FilterExpertBot(ExpertBot):
"""Filter events, supports named paths for splitting the message flow"""
_message_processed_verb = 'Forwarded'
not_after = None
not_before = None
filter_action: str = None
filter_key: str = None
filter_regex: str = None # TODO: could be re
filter_value: str = None
def parse_timeattr(self, time_attr):
"""
Parses relative or absolute time specification, decides how to parse by
checking if the string contains any timespan identifier.
See also https://github.com/certtools/intelmq/issues/1523
dateutil.parser.parse detects strings like `10 hours` as absolute time.
"""
if any([timespan in time_attr for timespan in TIMESPANS.keys()]):
relative = timedelta(minutes=parse_relative(time_attr))
self.logger.info("Filtering out events to (relative time) %r.", relative)
return relative
else:
absolute = parser.parse(time_attr)
self.logger.info("Filtering out events to (absolute time) %r.", absolute)
return absolute
def init(self):
if self.not_after:
self.not_after = self.parse_timeattr(self.not_after)
if self.not_before:
self.not_before = self.parse_timeattr(self.not_before)
self.filter = True
if self.filter_key is None:
self.logger.info("No filter_key parameter found.")
self.filter = False
elif self.filter_value is None:
self.logger.info("No filter_value parameter found.")
self.filter = False
elif self.filter_action is None:
self.logger.info("No filter_action parameter found.")
self.filter = False
elif self.filter_action is not None and not \
(self.filter_action == "drop" or
self.filter_action == "keep"):
self.logger.info("Filter_action parameter definition unknown.")
self.filter = False
self.regex = False
if self.filter_regex is not None:
self.regex = re.compile(self.filter_value)
self.time_filter = self.not_after is not None or self.not_before is not None
if not (self.filter or self.time_filter):
raise ValueError("No relevant filter configuration found.")
def process(self):
event = self.receive_message()
# time based filtering
if self.time_filter and 'time.source' in event:
try:
event_time = parser.parse(str(event.get('time.source'))).replace(tzinfo=timezone.utc)
except ValueError:
self.logger.error("Could not parse time.source %s.", event.get('time.source'))
else:
if type(self.not_after) is datetime and event_time > self.not_after:
self.acknowledge_message()
self.logger.debug("Filtered out event with time.source %s.", event.get('time.source'))
return
if type(self.not_before) is datetime and event_time < self.not_before:
self.acknowledge_message()
self.logger.debug("Filtered out event with time.source %r.", event.get('time.source'))
return
now = datetime.now(tz=timezone.utc)
if type(self.not_after) is timedelta and event_time > (now - self.not_after):
self.acknowledge_message()
self.logger.debug("Filtered out event with time.source %r.", event.get('time.source'))
return
if type(self.not_before) is timedelta and event_time < (now - self.not_before):
self.acknowledge_message()
self.logger.debug("Filtered out event with time.source %r.", event.get('time.source'))
return
# key/value based filtering
if self.filter and self.filter_action == "drop":
if self.METHOD_NAME(event, self.filter_key,
self.filter_value):
# action == drop, filter matches
self.send_message(event, path='action_other',
path_permissive=True)
self.send_message(event, path='filter_match',
path_permissive=True)
self.acknowledge_message()
return
else:
# action == drop, filter not matches
self.send_message(event, path='filter_no_match',
path_permissive=True)
self.send_message(event)
self.acknowledge_message()
return
if self.filter and self.filter_action == "keep":
if self.METHOD_NAME(event, self.filter_key,
self.filter_value):
# action == keep, filter matches
self.send_message(event, path='filter_match',
path_permissive=True)
self.send_message(event)
self.acknowledge_message()
return
else:
# action == keep, filter not matches
self.send_message(event, path='action_other',
path_permissive=True)
self.send_message(event, path='filter_no_match',
path_permissive=True)
self.acknowledge_message()
return
self.send_message(event)
self.acknowledge_message()
def METHOD_NAME(self, event, key, condition):
if self.regex:
return self.regexSearchFilter(event, key)
else:
return self.equalsFilter(event, key, condition)
def equalsFilter(self, event, key, value):
return (key in event and
event.get(key) == value)
def regexSearchFilter(self, event, key):
if key in event:
return self.regex.search(str(event.get(key)))
else:
return False
BOT = FilterExpertBot |
7,317 | dpf vector double free | import ctypes
from ansys.dpf.gate import utils
from ansys.dpf.gate import errors
from ansys.dpf.gate.generated import capi
from ansys.dpf.gate.generated import dpf_vector_abstract_api
from ansys.dpf.gate.generated.data_processing_capi import DataProcessingCAPI
#-------------------------------------------------------------------------------
# DpfVector
#-------------------------------------------------------------------------------
class DpfVectorCAPI(dpf_vector_abstract_api.DpfVectorAbstractAPI):
@staticmethod
def init_dpf_vector_environment(object):
# get core api
DataProcessingCAPI.init_data_processing_environment(object)
object._deleter_func = (DataProcessingCAPI.data_processing_delete_shared_object, lambda obj: obj)
@staticmethod
def dpf_vector_new():
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.DpfVector_new(ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def METHOD_NAME(dpf_vector, data, size, modified):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.DpfVector_double_free(dpf_vector._internal_obj, utils.to_double_ptr_ptr(data), utils.to_int32_ptr(size), modified, ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def dpf_vector_char_free(dpf_vector, data, size, modified):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.DpfVector_char_free(dpf_vector._internal_obj, utils.to_char_ptr_ptr(data), utils.to_int32_ptr(size), modified, ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def dpf_vector_int_free(dpf_vector, data, size, modified):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.DpfVector_int_free(dpf_vector._internal_obj, utils.to_int32_ptr_ptr(data), utils.to_int32_ptr(size), modified, ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def dpf_vector_char_ptr_free(dpf_vector, data, size, modified):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.DpfVector_char_ptr_free(dpf_vector._internal_obj, utils.to_char_ptr_ptr_ptr(data), utils.to_int32_ptr(size), modified, ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def dpf_vector_double_commit(dpf_vector, data, size, modified):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.DpfVector_double_commit(dpf_vector._internal_obj, utils.to_double_ptr(data), utils.to_int32(size), modified, ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def dpf_vector_int_commit(dpf_vector, data, size, modified):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.DpfVector_int_commit(dpf_vector._internal_obj, utils.to_int32_ptr(data), utils.to_int32(size), modified, ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def dpf_vector_char_commit(dpf_vector, data, size, modified):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.DpfVector_char_commit(dpf_vector._internal_obj, utils.to_char_ptr(data), utils.to_int32(size), modified, ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def dpf_vector_char_ptr_commit(dpf_vector, data, size, modified):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.DpfVector_char_ptr_commit(dpf_vector._internal_obj, utils.to_char_ptr_ptr(data), utils.to_int32(size), modified, ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def dpf_vector_delete(dpf_vector):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.DpfVector_delete(dpf_vector._internal_obj, ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def dpf_string_free(dpf_vector, data, size):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.DpfString_free(dpf_vector, utils.to_char_ptr(data), utils.to_int32(size), ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def dpf_vector_duplicate_dpf_vector(dpf_vector):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.DpfVector_duplicate_dpf_vector(dpf_vector._internal_obj, ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def dpf_vector_new_for_object(api_to_use):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.DpfVector_new_for_object(api_to_use._internal_obj if api_to_use is not None else None, ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
@staticmethod
def dpf_string_free_for_object(api_to_use, dpf_vector, data, size):
errorSize = ctypes.c_int(0)
sError = ctypes.c_wchar_p()
res = capi.dll.DpfString_free_for_object(api_to_use._internal_obj if api_to_use is not None else None, dpf_vector, utils.to_char_ptr(data), utils.to_int32(size), ctypes.byref(utils.to_int32(errorSize)), ctypes.byref(sError))
if errorSize.value != 0:
raise errors.DPFServerException(sError.value)
return res
|
7,318 | classical | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2018-2022 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import operator
from openquake.baselib import sap, parallel, general
from openquake.hazardlib import contexts, calc
from openquake.commonlib import datastore
TWO20 = 2**20
MAX_MB = 500
'''Using shared memory
import logging
from openquake.hazardlib import probability_map
def classical(srcs, srcfilter, cmaker, pmap, monitor):
ctxs_mb = 0
allctxs = []
with pmap.shared as array:
slc = slice(cmaker.start, cmaker.start + len(cmaker.gsims))
pmap.array = array[:, :, slc]
for src in srcs:
sites = srcfilter.get_close_sites(src)
if sites is None:
continue
for ctx in cmaker.get_ctx_iter(src, sites):
allctxs.append(ctx)
ctxs_mb += ctx.nbytes / TWO20 # TWO20=1MB
if ctxs_mb > MAX_MB:
yield cmaker.update(pmap, contexts.concat(allctxs))
allctxs.clear()
ctxs_mb = 0
if ctxs_mb:
yield cmaker.update(pmap, contexts.concat(allctxs))
def classical_indep(calc_id: int):
"""
Classical calculator as postprocessor; works only for independent
sources and ruptures.
"""
parent = None if calc_id is None else datastore.read(calc_id)
dstore, log = datastore.build_dstore_log(parent=parent)
with dstore, log:
oq = dstore.parent['oqparam']
sitecol = dstore.parent['sitecol']
srcfilter = calc.filters.SourceFilter(sitecol, oq.maximum_distance)
cmakers = contexts.read_cmakers(dstore.parent)
N, L, G = len(sitecol), oq.imtls.size, sum(
len(cm.gsims) for cm in cmakers)
logging.info(f'{N=}, {L=}, {G=}, {general.humansize(N*L*G*8)=}')
pmap = probability_map.ProbabilityMap(sitecol.sids, L, G)
dstore.swmr_on()
smap = parallel.Starmap(classical, h5=dstore)
pmap.shared = smap.create_shared((N, L, G), value=1.)
csm = dstore['_csm']
maxw = csm.get_max_weight(oq)
for grp_id, sg in enumerate(csm.src_groups):
logging.info('Sending group #%d', grp_id)
for block in general.block_splitter(
sg, maxw, operator.attrgetter('weight'), sort=True):
smap.submit((block, srcfilter, cmakers[grp_id], pmap))
smap.reduce()
classical_indep.calc_id = 'parent calculation'
'''
def METHOD_NAME(srcs, srcfilter, cmaker, monitor):
ctxs_mb = 0
allctxs = []
for src in srcs:
sites = srcfilter.get_close_sites(src)
if sites is None:
continue
for ctx in cmaker.get_ctx_iter(src, sites):
allctxs.append(ctx)
ctxs_mb += ctx.nbytes / TWO20 # TWO20=1MB
if ctxs_mb > MAX_MB:
yield {'ctxs_mb': ctxs_mb,
'pmap': cmaker.get_pmap(contexts.concat(allctxs))}
allctxs.clear()
ctxs_mb = 0
if ctxs_mb:
yield {'ctxs_mb': ctxs_mb,
'pmap': cmaker.get_pmap(contexts.concat(allctxs))}
def classical_indep(calc_id: int):
"""
Classical calculator as postprocessor; works only for independent
sources and ruptures.
"""
parent = None if calc_id is None else datastore.read(calc_id)
dstore, log = datastore.build_dstore_log(parent=parent)
with dstore, log:
oq = dstore.parent['oqparam']
sitecol = dstore.parent['sitecol']
srcfilter = calc.filters.SourceFilter(sitecol, oq.maximum_distance)
cmakers = contexts.read_cmakers(dstore.parent)
dstore.swmr_on()
smap = parallel.Starmap(METHOD_NAME, h5=dstore)
csm = dstore['_csm']
maxw = csm.get_max_weight(oq)
for grp_id, sg in enumerate(csm.src_groups):
for block in general.block_splitter(
sg, maxw, operator.attrgetter('weight'), sort=True):
smap.submit((block, srcfilter, cmakers[grp_id]))
ctxs_mb = 0
for res in smap:
ctxs_mb += res['ctxs_mb']
print(f'{ctxs_mb=}')
classical_indep.calc_id = 'parent calculation'
if __name__ == '__main__':
sap.run(classical_indep) |
7,319 | configure | from conan import ConanFile
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, rmdir
from conan.tools.scm import Version
import os
required_conan_version = ">=1.53.0"
class BrotliConan(ConanFile):
name = "brotli"
description = "Brotli compression format"
license = "MIT",
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/google/brotli"
topics = ("brotli", "compression")
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"target_bits": [64, 32, None],
"endianness": ["big", "little", "neutral", None],
"enable_portable": [True, False],
"enable_rbit": [True, False],
"enable_debug": [True, False],
"enable_log": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"target_bits": None,
"endianness": None,
"enable_portable": False,
"enable_rbit": True,
"enable_debug": False,
"enable_log": False,
}
def export_sources(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def METHOD_NAME(self):
if self.options.shared:
self.options.rm_safe("fPIC")
self.settings.rm_safe("compiler.cppstd")
self.settings.rm_safe("compiler.libcxx")
def layout(self):
cmake_layout(self, src_folder="src")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.variables["BROTLI_BUNDLED_MODE"] = False
tc.variables["BROTLI_DISABLE_TESTS"] = True
if self.options.get_safe("target_bits") == 32:
tc.preprocessor_definitions["BROTLI_BUILD_32_BIT"] = 1
elif self.options.get_safe("target_bits") == 64:
tc.preprocessor_definitions["BROTLI_BUILD_64_BIT"] = 1
if self.options.get_safe("endianness") == "big":
tc.preprocessor_definitions["BROTLI_BUILD_BIG_ENDIAN"] = 1
elif self.options.get_safe("endianness") == "neutral":
tc.preprocessor_definitions["BROTLI_BUILD_ENDIAN_NEUTRAL"] = 1
elif self.options.get_safe("endianness") == "little":
tc.preprocessor_definitions["BROTLI_BUILD_LITTLE_ENDIAN"] = 1
if self.options.enable_portable:
tc.preprocessor_definitions["BROTLI_BUILD_PORTABLE"] = 1
if not self.options.enable_rbit:
tc.preprocessor_definitions["BROTLI_BUILD_NO_RBIT"] = 1
if self.options.enable_debug:
tc.preprocessor_definitions["BROTLI_DEBUG"] = 1
if self.options.enable_log:
tc.preprocessor_definitions["BROTLI_ENABLE_LOG"] = 1
# To install relocatable shared libs on Macos
tc.cache_variables["CMAKE_POLICY_DEFAULT_CMP0042"] = "NEW"
tc.generate()
def build(self):
apply_conandata_patches(self)
cmake = CMake(self)
cmake.METHOD_NAME()
cmake.build()
def package(self):
copy(self, "LICENSE", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
cmake = CMake(self)
cmake.install()
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
def package_info(self):
includedir = os.path.join("include", "brotli")
# brotlicommon
self.cpp_info.components["brotlicommon"].set_property("pkg_config_name", "libbrotlicommon")
self.cpp_info.components["brotlicommon"].includedirs.append(includedir)
self.cpp_info.components["brotlicommon"].libs = [self._get_decorated_lib("brotlicommon")]
if self.settings.os == "Windows" and self.options.shared:
self.cpp_info.components["brotlicommon"].defines.append("BROTLI_SHARED_COMPILATION")
# brotlidec
self.cpp_info.components["brotlidec"].set_property("pkg_config_name", "libbrotlidec")
self.cpp_info.components["brotlidec"].includedirs.append(includedir)
self.cpp_info.components["brotlidec"].libs = [self._get_decorated_lib("brotlidec")]
self.cpp_info.components["brotlidec"].requires = ["brotlicommon"]
# brotlienc
self.cpp_info.components["brotlienc"].set_property("pkg_config_name", "libbrotlienc")
self.cpp_info.components["brotlienc"].includedirs.append(includedir)
self.cpp_info.components["brotlienc"].libs = [self._get_decorated_lib("brotlienc")]
self.cpp_info.components["brotlienc"].requires = ["brotlicommon"]
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.components["brotlienc"].system_libs = ["m"]
# TODO: to remove in conan v2 once cmake_find_package* & pkg_config generators removed.
# do not set this target in CMakeDeps, it was a mistake, there is no official brotil config file, nor Find module file
self.cpp_info.names["cmake_find_package"] = "Brotli"
self.cpp_info.names["cmake_find_package_multi"] = "Brotli"
self.cpp_info.components["brotlicommon"].names["pkg_config"] = "libbrotlicommon"
self.cpp_info.components["brotlidec"].names["pkg_config"] = "libbrotlidec"
self.cpp_info.components["brotlienc"].names["pkg_config"] = "libbrotlienc"
def _get_decorated_lib(self, name):
libname = name
if Version(self.version) < "1.1.0" and not self.options.shared:
libname += "-static"
return libname |
7,320 | test sanitize yaml and save datasource raises | import pytest
from great_expectations.cli.datasource import sanitize_yaml_and_save_datasource
from great_expectations.util import is_library_loadable
pytestmark = pytest.mark.cli
def test_sanitize_yaml_and_save_datasource_raises_error_on_empty_yaml(
empty_data_context,
):
with pytest.raises(ValueError):
sanitize_yaml_and_save_datasource(empty_data_context, "")
def test_sanitize_yaml_and_save_datasource_raises_error_on_non_string(
empty_data_context,
):
with pytest.raises(TypeError):
for bad_input in [3, {"a", "b"}]:
sanitize_yaml_and_save_datasource(empty_data_context, bad_input)
def METHOD_NAME(
empty_data_context,
):
yaml_snippet = """
class_name: SimpleSqlalchemyDatasource
introspection:
whole_table:
data_asset_name_suffix: __whole_table
connection_string: sqlite://"""
with pytest.raises(ValueError):
sanitize_yaml_and_save_datasource(empty_data_context, yaml_snippet)
def test_sanitize_yaml_and_save_datasource_works_without_credentials(
sa,
empty_data_context,
):
context = empty_data_context
yaml_snippet = """
name: my_datasource
class_name: SimpleSqlalchemyDatasource
introspection:
whole_table:
data_asset_name_suffix: __whole_table
connection_string: sqlite://"""
assert len(context.list_datasources()) == 0
sanitize_yaml_and_save_datasource(context, yaml_snippet)
assert len(context.list_datasources()) == 1
assert context.list_datasources() == [
{
"class_name": "SimpleSqlalchemyDatasource",
"connection_string": "sqlite://",
"introspection": {
"whole_table": {"data_asset_name_suffix": "__whole_table"}
},
"module_name": "great_expectations.datasource",
"name": "my_datasource",
}
]
obs = context.config_variables
# remove the instance guid
obs.pop("instance_id")
assert obs == {}
@pytest.mark.skipif(
not is_library_loadable(library_name="psycopg2"),
reason="psycopg2 is not installed",
)
def test_sanitize_yaml_and_save_datasource_works_with_credentials(
sa,
empty_data_context,
):
context = empty_data_context
yaml_snippet = """
name: foo_datasource
class_name: SimpleSqlalchemyDatasource
credentials:
host: localhost
port: '5432'
username: user
password: pass
database: postgres
drivername: postgresql"""
assert len(context.list_datasources()) == 0
sanitize_yaml_and_save_datasource(context, yaml_snippet)
assert len(context.list_datasources()) == 1
assert context.list_datasources() == [
{
"class_name": "SimpleSqlalchemyDatasource",
"credentials": {
"database": "postgres",
"drivername": "postgresql",
"host": "localhost",
"password": "***",
"port": "5432",
"username": "user",
},
"module_name": "great_expectations.datasource",
"name": "foo_datasource",
}
]
obs = context.config_variables
# remove the instance guid
obs.pop("instance_id")
assert obs == {
"foo_datasource": {
"database": "postgres",
"drivername": "postgresql",
"host": "localhost",
"password": "pass",
"port": "5432",
"username": "user",
}
}
def test_sanitize_yaml_and_save_datasource_does_not_overwrite_duplicate_when_overwrite_existing_is_false(
capsys,
empty_data_context,
):
context = empty_data_context
pandas_yaml_snippet = """
name: my_datasource
class_name: Datasource
execution_engine:
class_name: PandasExecutionEngine
"""
assert len(context.list_datasources()) == 0
sanitize_yaml_and_save_datasource(
context, pandas_yaml_snippet, overwrite_existing=False
)
assert len(context.list_datasources()) == 1
datasource_from_context = [
{
"class_name": "Datasource",
"module_name": "great_expectations.datasource",
"name": "my_datasource",
"execution_engine": {
"class_name": "PandasExecutionEngine",
"module_name": "great_expectations.execution_engine",
},
}
]
assert context.list_datasources() == datasource_from_context
sanitize_yaml_and_save_datasource(
context, pandas_yaml_snippet, overwrite_existing=False
)
assert len(context.list_datasources()) == 1
stdout = capsys.readouterr().out.strip()
assert (
'**WARNING** A Datasource named "my_datasource" already exists in this Data Context. The Datasource has *not* been saved. Please use a different name or set overwrite_existing=True if you want to overwrite!'.strip()
== stdout
)
# retest with a different type of datasource with the same name
sql_yaml_snippet = """
name: my_datasource
class_name: SimpleSqlalchemyDatasource
introspection:
whole_table:
data_asset_name_suffix: __whole_table
connection_string: sqlite://"""
sanitize_yaml_and_save_datasource(
context, sql_yaml_snippet, overwrite_existing=False
)
assert len(context.list_datasources()) == 1
stdout = capsys.readouterr().out.strip()
assert (
'**WARNING** A Datasource named "my_datasource" already exists in this Data Context. The Datasource has *not* been saved. Please use a different name or set overwrite_existing=True if you want to overwrite!'.strip()
== stdout
)
def test_sanitize_yaml_and_save_datasource_does_overwrite_duplicate_when_overwrite_existing_is_true(
sa,
capsys,
empty_data_context,
):
context = empty_data_context
pandas_yaml_snippet = """
name: my_datasource
class_name: Datasource
execution_engine:
class_name: PandasExecutionEngine
"""
assert len(context.list_datasources()) == 0
sanitize_yaml_and_save_datasource(
context, pandas_yaml_snippet, overwrite_existing=True
)
assert len(context.list_datasources()) == 1
pandas_datasource_from_context = [
{
"class_name": "Datasource",
"module_name": "great_expectations.datasource",
"name": "my_datasource",
"execution_engine": {
"class_name": "PandasExecutionEngine",
"module_name": "great_expectations.execution_engine",
},
}
]
assert context.list_datasources() == pandas_datasource_from_context
sanitize_yaml_and_save_datasource(
context, pandas_yaml_snippet, overwrite_existing=True
)
assert len(context.list_datasources()) == 1
assert context.list_datasources() == pandas_datasource_from_context
stdout = capsys.readouterr().out.strip()
assert stdout == ""
# retest with a different type of datasource with the same name
sql_yaml_snippet = """
name: my_datasource
class_name: SimpleSqlalchemyDatasource
introspection:
whole_table:
data_asset_name_suffix: __whole_table
connection_string: sqlite://"""
sanitize_yaml_and_save_datasource(
context, sql_yaml_snippet, overwrite_existing=True
)
assert len(context.list_datasources()) == 1
stdout = capsys.readouterr().out
assert stdout == ""
assert context.list_datasources() != pandas_datasource_from_context
sql_datasource_from_context = [
{
"class_name": "SimpleSqlalchemyDatasource",
"connection_string": "sqlite://",
"introspection": {
"whole_table": {"data_asset_name_suffix": "__whole_table"}
},
"module_name": "great_expectations.datasource",
"name": "my_datasource",
}
]
assert context.list_datasources() == sql_datasource_from_context |
7,321 | fuse model | """MidashNet: Network for monocular depth estimation trained by mixing several datasets.
This file contains code that is adapted from
https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
"""
import torch
import torch.nn as nn
from .base_model import BaseModel
from .blocks import FeatureFusionBlock, FeatureFusionBlock_custom, Interpolate, _make_encoder
class MidasNet_small(BaseModel):
"""Network for monocular depth estimation.
"""
def __init__(self, path=None, features=64, backbone="efficientnet_lite3", non_negative=True, exportable=True, channels_last=False, align_corners=True,
blocks={'expand': True}):
"""Init.
Args:
path (str, optional): Path to saved model. Defaults to None.
features (int, optional): Number of features. Defaults to 256.
backbone (str, optional): Backbone network for encoder. Defaults to resnet50
"""
print("Loading weights: ", path)
super(MidasNet_small, self).__init__()
use_pretrained = False if path else True
self.channels_last = channels_last
self.blocks = blocks
self.backbone = backbone
self.groups = 1
features1=features
features2=features
features3=features
features4=features
self.expand = False
if "expand" in self.blocks and self.blocks['expand'] == True:
self.expand = True
features1=features
features2=features*2
features3=features*4
features4=features*8
self.pretrained, self.scratch = _make_encoder(self.backbone, features, use_pretrained, groups=self.groups, expand=self.expand, exportable=exportable)
self.scratch.activation = nn.ReLU(False)
self.scratch.refinenet4 = FeatureFusionBlock_custom(features4, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
self.scratch.refinenet3 = FeatureFusionBlock_custom(features3, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
self.scratch.refinenet2 = FeatureFusionBlock_custom(features2, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
self.scratch.refinenet1 = FeatureFusionBlock_custom(features1, self.scratch.activation, deconv=False, bn=False, align_corners=align_corners)
self.scratch.output_conv = nn.Sequential(
nn.Conv2d(features, features//2, kernel_size=3, stride=1, padding=1, groups=self.groups),
Interpolate(scale_factor=2, mode="bilinear"),
nn.Conv2d(features//2, 32, kernel_size=3, stride=1, padding=1),
self.scratch.activation,
nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
nn.ReLU(True) if non_negative else nn.Identity(),
nn.Identity(),
)
if path:
self.load(path)
def forward(self, x):
"""Forward pass.
Args:
x (tensor): input data (image)
Returns:
tensor: depth
"""
if self.channels_last==True:
print("self.channels_last = ", self.channels_last)
x.contiguous(memory_format=torch.channels_last)
layer_1 = self.pretrained.layer1(x)
layer_2 = self.pretrained.layer2(layer_1)
layer_3 = self.pretrained.layer3(layer_2)
layer_4 = self.pretrained.layer4(layer_3)
layer_1_rn = self.scratch.layer1_rn(layer_1)
layer_2_rn = self.scratch.layer2_rn(layer_2)
layer_3_rn = self.scratch.layer3_rn(layer_3)
layer_4_rn = self.scratch.layer4_rn(layer_4)
path_4 = self.scratch.refinenet4(layer_4_rn)
path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
out = self.scratch.output_conv(path_1)
return torch.squeeze(out, dim=1)
def METHOD_NAME(m):
prev_previous_type = nn.Identity()
prev_previous_name = ''
previous_type = nn.Identity()
previous_name = ''
for name, module in m.named_modules():
if prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d and type(module) == nn.ReLU:
# print("FUSED ", prev_previous_name, previous_name, name)
torch.quantization.fuse_modules(m, [prev_previous_name, previous_name, name], inplace=True)
elif prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d:
# print("FUSED ", prev_previous_name, previous_name)
torch.quantization.fuse_modules(m, [prev_previous_name, previous_name], inplace=True)
# elif previous_type == nn.Conv2d and type(module) == nn.ReLU:
# print("FUSED ", previous_name, name)
# torch.quantization.fuse_modules(m, [previous_name, name], inplace=True)
prev_previous_type = previous_type
prev_previous_name = previous_name
previous_type = type(module)
previous_name = nam |
7,322 | max word | """
The Blake2 Implementation
^^^^^^^^^^^^^^^^^^^^^^^^^^
"""
import struct
from dataclasses import dataclass
from typing import List, Tuple
from ethereum.base_types import Uint
def spit_le_to_uint(data: bytes, start: int, num_words: int) -> List[Uint]:
"""
Extracts 8 byte words from a given data.
Parameters
----------
data :
The data in bytes from which the words need to be extracted
start :
Position to start the extraction
num_words:
The number of words to be extracted
"""
words = []
for i in range(num_words):
start_position = start + (i * 8)
words.append(
Uint.from_le_bytes(data[start_position : start_position + 8])
)
return words
@dataclass
class Blake2:
"""
Implementation of the BLAKE2 cryptographic hashing algorithm.
Please refer the following document for details:
https://datatracker.ietf.org/doc/html/rfc7693
"""
w: int
mask_bits: int
word_format: str
R1: int
R2: int
R3: int
R4: int
@property
def METHOD_NAME(self) -> int:
"""
Largest value for a given Blake2 flavor.
"""
return 2**self.w
@property
def w_R1(self) -> int:
"""
(w - R1) value for a given Blake2 flavor.
Used in the function G
"""
return self.w - self.R1
@property
def w_R2(self) -> int:
"""
(w - R2) value for a given Blake2 flavor.
Used in the function G
"""
return self.w - self.R2
@property
def w_R3(self) -> int:
"""
(w - R3) value for a given Blake2 flavor.
Used in the function G
"""
return self.w - self.R3
@property
def w_R4(self) -> int:
"""
(w - R4) value for a given Blake2 flavor.
Used in the function G
"""
return self.w - self.R4
sigma: Tuple = (
(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15),
(14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3),
(11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4),
(7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8),
(9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13),
(2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9),
(12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11),
(13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10),
(6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5),
(10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0),
)
IV: Tuple = (
0x6A09E667F3BCC908,
0xBB67AE8584CAA73B,
0x3C6EF372FE94F82B,
0xA54FF53A5F1D36F1,
0x510E527FADE682D1,
0x9B05688C2B3E6C1F,
0x1F83D9ABFB41BD6B,
0x5BE0CD19137E2179,
)
@property
def sigma_len(self) -> int:
"""
Length of the sigma parameter.
"""
return len(self.sigma)
def get_blake2_parameters(self, data: bytes) -> Tuple:
"""
Extract the parameters required in the Blake2 compression function
from the provided bytes data.
Parameters
----------
data :
The bytes data that has been passed in the message.
"""
rounds = Uint.from_be_bytes(data[:4])
h = spit_le_to_uint(data, 4, 8)
m = spit_le_to_uint(data, 68, 16)
t_0, t_1 = spit_le_to_uint(data, 196, 2)
f = Uint.from_be_bytes(data[212:])
return (rounds, h, m, t_0, t_1, f)
def G(
self, v: List, a: int, b: int, c: int, d: int, x: int, y: int
) -> List:
"""
The mixing function used in Blake2
https://datatracker.ietf.org/doc/html/rfc7693#section-3.1
Parameters
----------
v :
The working vector to be mixed.
a, b, c, d :
Indexes within v of the words to be mixed.
x, y :
The two input words for the mixing.
"""
v[a] = (v[a] + v[b] + x) % self.METHOD_NAME
v[d] = ((v[d] ^ v[a]) >> self.R1) ^ (
(v[d] ^ v[a]) << self.w_R1
) % self.METHOD_NAME
v[c] = (v[c] + v[d]) % self.METHOD_NAME
v[b] = ((v[b] ^ v[c]) >> self.R2) ^ (
(v[b] ^ v[c]) << self.w_R2
) % self.METHOD_NAME
v[a] = (v[a] + v[b] + y) % self.METHOD_NAME
v[d] = ((v[d] ^ v[a]) >> self.R3) ^ (
(v[d] ^ v[a]) << self.w_R3
) % self.METHOD_NAME
v[c] = (v[c] + v[d]) % self.METHOD_NAME
v[b] = ((v[b] ^ v[c]) >> self.R4) ^ (
(v[b] ^ v[c]) << self.w_R4
) % self.METHOD_NAME
return v
def compress(
self,
num_rounds: Uint,
h: List[Uint],
m: List[Uint],
t_0: Uint,
t_1: Uint,
f: bool,
) -> bytes:
"""
'F Compression' from section 3.2 of RFC 7693:
https://tools.ietf.org/html/rfc7693#section-3.2
Parameters
----------
num_rounds :
The number of rounds. A 32-bit unsigned big-endian word
h :
The state vector. 8 unsigned 64-bit little-endian words
m :
The message block vector. 16 unsigned 64-bit little-endian words
t_0, t_1 :
Offset counters. 2 unsigned 64-bit little-endian words
f:
The final block indicator flag. An 8-bit word
"""
# Initialize local work vector v[0..15]
v = [0] * 16
v[0:8] = h # First half from state
v[8:15] = self.IV # Second half from IV
v[12] = t_0 ^ self.IV[4] # Low word of the offset
v[13] = t_1 ^ self.IV[5] # High word of the offset
if f:
v[14] = v[14] ^ self.mask_bits # Invert all bits for last block
# Mixing
for r in range(num_rounds):
# for more than sigma_len rounds, the schedule
# wraps around to the beginning
s = self.sigma[r % self.sigma_len]
v = self.G(v, 0, 4, 8, 12, m[s[0]], m[s[1]])
v = self.G(v, 1, 5, 9, 13, m[s[2]], m[s[3]])
v = self.G(v, 2, 6, 10, 14, m[s[4]], m[s[5]])
v = self.G(v, 3, 7, 11, 15, m[s[6]], m[s[7]])
v = self.G(v, 0, 5, 10, 15, m[s[8]], m[s[9]])
v = self.G(v, 1, 6, 11, 12, m[s[10]], m[s[11]])
v = self.G(v, 2, 7, 8, 13, m[s[12]], m[s[13]])
v = self.G(v, 3, 4, 9, 14, m[s[14]], m[s[15]])
result_message_words = (h[i] ^ v[i] ^ v[i + 8] for i in range(8))
return struct.pack("<8%s" % self.word_format, *result_message_words)
# Parameters specific to the Blake2b implementation
@dataclass
class Blake2b(Blake2):
"""
The Blake2b flavor (64-bits) of Blake2.
This version is used in the pre-compiled contract.
"""
w: int = 64
mask_bits: int = 0xFFFFFFFFFFFFFFFF
word_format: str = "Q"
R1: int = 32
R2: int = 24
R3: int = 16
R4: int = 63 |
7,323 | url | # -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Apprise - Push Notification Library.
# Copyright (c) 2023, Chris Caron <lead2gold@gmail.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
import os
from .ConfigBase import ConfigBase
from ..common import ConfigFormat
from ..common import ContentIncludeMode
from ..AppriseLocale import gettext_lazy as _
class ConfigFile(ConfigBase):
"""
A wrapper for File based configuration sources
"""
# The default descriptive name associated with the service
service_name = _('Local File')
# The default protocol
protocol = 'file'
# Configuration file inclusion can only be of the same type
allow_cross_includes = ContentIncludeMode.STRICT
def __init__(self, path, **kwargs):
"""
Initialize File Object
headers can be a dictionary of key/value pairs that you want to
additionally include as part of the server headers to post with
"""
super().__init__(**kwargs)
# Store our file path as it was set
self.path = os.path.abspath(os.path.expanduser(path))
# Update the config path to be relative to our file we just loaded
self.config_path = os.path.dirname(self.path)
return
def METHOD_NAME(self, privacy=False, *args, **kwargs):
"""
Returns the URL built dynamically based on specified arguments.
"""
# Prepare our cache value
if isinstance(self.cache, bool) or not self.cache:
cache = 'yes' if self.cache else 'no'
else:
cache = int(self.cache)
# Define any URL parameters
params = {
'encoding': self.encoding,
'cache': cache,
}
if self.config_format:
# A format was enforced; make sure it's passed back with the url
params['format'] = self.config_format
return 'file://{path}{params}'.format(
path=self.quote(self.path),
params='?{}'.format(self.urlencode(params)) if params else '',
)
def read(self, **kwargs):
"""
Perform retrieval of the configuration based on the specified request
"""
response = None
try:
if self.max_buffer_size > 0 and \
os.path.getsize(self.path) > self.max_buffer_size:
# Content exceeds maximum buffer size
self.logger.error(
'File size exceeds maximum allowable buffer length'
' ({}KB).'.format(int(self.max_buffer_size / 1024)))
return None
except OSError:
# getsize() can throw this acception if the file is missing
# and or simply isn't accessible
self.logger.error(
'File is not accessible: {}'.format(self.path))
return None
# Always call throttle before any server i/o is made
self.throttle()
try:
with open(self.path, "rt", encoding=self.encoding) as f:
# Store our content for parsing
response = f.read()
except (ValueError, UnicodeDecodeError):
# A result of our strict encoding check; if we receive this
# then the file we're opening is not something we can
# understand the encoding of..
self.logger.error(
'File not using expected encoding ({}) : {}'.format(
self.encoding, self.path))
return None
except (IOError, OSError):
# IOError is present for backwards compatibility with Python
# versions older then 3.3. >= 3.3 throw OSError now.
# Could not open and/or read the file; this is not a problem since
# we scan a lot of default paths.
self.logger.error(
'File can not be opened for read: {}'.format(self.path))
return None
# Detect config format based on file extension if it isn't already
# enforced
if self.config_format is None and \
re.match(r'^.*\.ya?ml\s*$', self.path, re.I) is not None:
# YAML Filename Detected
self.default_config_format = ConfigFormat.YAML
# Return our response object
return response
@staticmethod
def parse_url(METHOD_NAME):
"""
Parses the URL so that we can handle all different file paths
and return it as our path object
"""
results = ConfigBase.parse_url(METHOD_NAME, verify_host=False)
if not results:
# We're done early; it's not a good URL
return results
match = re.match(r'[a-z0-9]+://(?P<path>[^?]+)(\?.*)?', METHOD_NAME, re.I)
if not match:
return None
results['path'] = ConfigFile.unquote(match.group('path'))
return results |
7,324 | refinement points and weights | # Copyright 2020 Edoardo Zoni
#
# This file is part of WarpX
#
# License: BSD-3-Clause-LBNL
#-------------------------------------------------------------------------------
# Compute interpolation points and weights for coarsening and refinement in IO
# and MR applications in 1D (extensions to 2D and 3D are trivial). Weights are
# computed in order to guarantee total charge conservation for both cell-centered
# data (equal weights) and nodal data (weights depend on distance between points
# on fine and coarse grids).
#
# Notation:
# - index i refers to points on coarse grid
# - index ii refers to points on fine grid
# - sc denotes the staggering of the coarse data
# - sf denotes the staggering of the fine data
# - cr denotes the coarsening ratio (must be cr=1,2,4 only)
#
# For MR applications only the cases sc=sf=0 and sc=sf=1 are considered. Terms
# multiplied by (1-sf)*(1-sc) are ON for cell-centered data and OFF for nodal data,
# while terms multiplied by sf*sc are ON for nodal data and OFF for cell-centered
# data. C++ implementation in Source/ablastr/coarsen/average.(H/.cpp) and
# Source/ablastr/coarsen/sample.(H/.cpp)
#-------------------------------------------------------------------------------
import sys
import numpy as np
# Fine grid limits (without ghost cells)
def fine_grid_limits( sf ):
if ( sf == 0 ): # cell-centered
iimin = 0
iimax = 7
elif ( sf == 1 ): # nodal
iimin = 0
iimax = 8
return [ iimin, iimax ]
# Coarse grid limits (without ghost cells)
def coarse_grid_limits( sc, sf, iimin, iimax ):
imin = int( iimin/cr )
imax = int( iimax/cr )-(1-sc)*sf+(1-sf)*sc
return [ imin, imax ]
# Coarsening for MR: interpolation points and weights
def coarsening_points_and_weights( i, sc, sf, cr ):
if ( cr==1 ):
numpts = 1
idxmin = i
elif ( cr>=2 ):
numpts = cr*(1-sf)*(1-sc)+(2*(cr-1)+1)*sf*sc
idxmin = i*cr*(1-sf)*(1-sc)+(i*cr-cr+1)*sf*sc
weights = np.zeros( numpts )
for ir in range( numpts ):
ii = idxmin+ir
weights[ir] = (1/cr)*(1-sf)*(1-sc)+((abs(cr-abs(ii-i*cr)))/(cr*cr))*sf*sc
return [ numpts, idxmin, weights ]
# Refinement for MR: interpolation points and weights
def METHOD_NAME( ii, sc, sf, cr ):
if ( cr==1 ):
numpts = 1
idxmin = ii
elif ( cr>=2 ):
if ( ii%cr==0 ):
numpts = (1-sf)*(1-sc)+sf*sc
elif ( ii%cr!=0 ):
numpts = (1-sf)*(1-sc)+2*sf*sc
idxmin = (ii//cr)*(1-sf)*(1-sc)+(ii//cr)*sf*sc
weights = np.zeros( numpts )
for ir in range( numpts ):
i = idxmin+ir
if ( ii==iimin or ii==iimax ):
weights[ir] = (1-sf)*(1-sc)+((abs(cr-abs(ii-i*cr)))/(cr)+(cr/2-0.5))*sf*sc
else:
weights[ir] = (1-sf)*(1-sc)+((abs(cr-abs(ii-i*cr)))/(cr))*sf*sc
return [ numpts, idxmin, weights ]
## TODO Coarsening for IO: interpolation points and weights
#def coarsening_points_and_weights_for_IO( i, sf, sc, cr ):
# if ( cr==1 ):
# numpts = 1+abs(sf-sc)
# idxmin = i-sc*(1-sf)
# elif ( cr>=2 ):
# numpts = 2-sf
# idxmin = i*cr+cr//2*(1-sc)-(1-sf)
# weights = np.zeros( numpts )
# for ir in range( numpts ):
# weights[ir] = (1/numpts)*(1-sf)*(1-sc)+(1/numpts)*sf*sc
# return [ numpts, idxmin, weights ]
#-------------------------------------------------------------------------------
# Main
#-------------------------------------------------------------------------------
# Input coarsening ratio
cr = int( input( "\n Select coarsening ratio (cr=1,2,4): cr=" ) )
if ( cr!=1 and cr!=2 and cr!=4 ):
print()
sys.exit( 'coarsening ratio cr={} is not valid'.format( cr ) )
# Loop over possible staggering of coarse and fine grid (cell-centered or nodal)
for sc in [0,1]:
for sf in [0,1]:
print( '\n **************************************************' )
print( ' * Staggering of coarse grid: sc={}'.format( sc ), end='' )
if ( sc == 0 ):
print( ' cell-centered *' )
elif ( sc == 1 ):
print( ' nodal *' )
print( ' * Staggering of fine grid: sf={}'.format( sf ), end='' )
if ( sf == 0 ):
print( ' cell-centered *' )
elif ( sf == 1 ):
print( ' nodal *' )
print( ' **************************************************' )
iimin,iimax = fine_grid_limits( sf )
imin ,imax = coarse_grid_limits( sc, sf, iimin, iimax )
print( '\n Min and max index on coarse grid: imin={} imax={}'.format( imin, imax ) )
print( ' Min and max index on fine grid: iimin={} iimax={}'.format( iimin, iimax ) )
# Number of grid points
nc = imax-imin+1
nf = iimax-iimin+1
print( '\n Number of points on coarse grid: nc={}'.format( nc ) )
print( ' Number of points on fine grid: nf={}'.format( nf ) )
if ( sf!=sc ):
print( '\n WARNING: sc={} not equal to sf={}, not implemented for MR, continue ...'.format( sc, sf ) )
continue
print( '\n Coarsening for MR: check interpolation points and weights' )
print( ' ---------------------------------------------------------' )
# Coarsening for MR: interpolation points and weights
for i in range ( nc ): # index on coarse grid
numpts,idxmin,weights = coarsening_points_and_weights( i, sc, sf, cr )
print( '\n Find value at i={} by interpolating over the following points and weights:'.format( i ) )
for ir in range( numpts ): # interpolation points and weights
ii = idxmin+ir
print( ' ({},{})'.format( ii, weights[ir] ), end='' )
if not ( ir == numpts-1 ):
print( ' ', end='' )
print()
# Coarsening for MR: check conservation properties
for ii in range( nf ): # index on fine grid
ws = 0.0
for i in range( nc ): # index on coarse grid
numpts,idxmin,weights = coarsening_points_and_weights( i, sc, sf, cr )
for ir in range( numpts ): # interpolation points and weights
jj = idxmin+ir
if ( jj==ii ): # interpolation point matches point on fine grid
ws += weights[ir]
if ( ws!=1.0/cr ):
print( '\n ERROR: sum of weights ws={} should be 1/cr'.format( ws ) )
print( '\n Refinement for MR: check interpolation points and weights' )
print( ' ---------------------------------------------------------' )
# Refinement for MR: interpolation points and weights
for ii in range ( nf ): # index on fine grid
numpts,idxmin,weights = METHOD_NAME( ii, sc, sf, cr )
print( '\n Find value at ii={} by interpolating over the following points and weights:'.format( ii ) )
for ir in range( numpts ): # interpolation points and weights
i = idxmin+ir
print( ' ({},{})'.format( i, weights[ir] ), end='' )
if not ( ir == numpts-1 ):
print( ' ', end='' )
print()
# Refinement for MR: check conservation properties
for i in range( nc ): # index on coarse grid
ws = 0.0
for ii in range( nf ): # index on fine grid
numpts,idxmin,weights = METHOD_NAME( ii, sc, sf, cr )
for ir in range( numpts ): # interpolation points and weights
jj = idxmin+ir
if ( jj==i ): # interpolation point matches point on coarse grid
ws += weights[ir]
if ( ws!=cr ):
print( '\n ERROR: sum of weights ws={} should be cr'.format( ws ) ) |
7,325 | delete search | from stix_shifter_utils.stix_transmission.utils.RestApiClientAsync import RestApiClientAsync
from stix_shifter_utils.utils import logger
class APIClient():
# API METHODS
# These methods are used to call Ariel's API methods through http requests.
# Each method makes use of the http methods below to perform the requests.
# This class will encode any data or query parameters which will then be
# sent to the call_api() method of the RestApiClient
def __init__(self, connection, configuration):
# This version of the ariel APIClient is designed to function with
# version 6.0 of the ariel API.
self.logger = logger.set_logger(__name__)
self.endpoint_start = 'api/ariel/'
headers = dict()
host_port = connection.get('host') + ':' + \
str(connection.get('port', ''))
headers['version'] = '8.0'
headers['accept'] = 'application/json'
auth = configuration.get('auth')
if auth != None and auth.get('sec', None) != None:
headers['sec'] = auth.get('sec')
url_modifier_function = None
proxy = connection.get('proxy')
if proxy is not None:
proxy_url = proxy.get('url')
proxy_auth = proxy.get('auth')
if (proxy_url is not None and proxy_auth is not None):
headers['proxy'] = proxy_url
headers['proxy-authorization'] = 'Basic ' + proxy_auth
if proxy.get('x_forward_proxy', None) is not None:
headers['x-forward-url'] = 'https://' + \
host_port + '/' # + endpoint, is set by 'add_endpoint_to_url_header'
host_port = proxy.get('x_forward_proxy')
if proxy.get('x_forward_proxy_auth', None) is not None:
headers['x-forward-auth'] = proxy.get(
'x_forward_proxy_auth')
headers['user-agent'] = 'UDS'
url_modifier_function = self.add_endpoint_to_url_header
self.timeout = connection['options'].get('timeout')
self.client = RestApiClientAsync(host_port,
None,
headers,
url_modifier_function,
cert_verify=connection.get('selfSignedCert', True)
)
def add_endpoint_to_url_header(self, url, endpoint, headers):
# this function is called from 'call_api' with proxy forwarding,
# it concatenates the endpoint to the header containing the url.
headers['x-forward-url'] += endpoint
# url is returned since it points to the proxy for initial call
return url
async def ping_box(self):
# Sends a GET request
# to https://<server_ip>/api/help/resources
endpoint = 'api/help/resources' # no 'ariel' in the path
return await self.client.call_api(endpoint, 'GET', timeout=self.timeout)
async def get_databases(self):
# Sends a GET request
# to https://<server_ip>/api/ariel/databases
endpoint = self.endpoint_start + 'databases'
return await self.client.call_api(endpoint, 'GET', timeout=self.timeout)
async def get_database(self, database_name):
# Sends a GET request
# to https://<server_ip>/api/ariel/databases/<database_name>
endpoint = self.endpoint_start + 'databases' + '/' + database_name
return await self.client.call_api(endpoint, 'GET', timeout=self.timeout)
async def get_searches(self):
# Sends a GET request
# to https://<server_ip>/api/ariel/searches
endpoint = self.endpoint_start + "searches"
return await self.client.call_api(endpoint, 'GET', timeout=self.timeout)
async def create_search(self, query_expression):
# Sends a POST request
# to https://<server_ip>/api/ariel/searches
endpoint = self.endpoint_start + "searches"
data = {'query_expression': query_expression}
return await self.client.call_api(endpoint, 'POST', data=data, timeout=self.timeout)
async def get_search(self, search_id):
# Sends a GET request to
# https://<server_ip>/api/ariel/searches/<search_id>
endpoint = self.endpoint_start + "searches/" + search_id
return await self.client.call_api(endpoint, 'GET', timeout=self.timeout)
async def get_search_results(self, search_id, response_type, range_start=None, range_end=None):
# Sends a GET request to
# https://<server_ip>/api/ariel/searches/<search_id>
# response object body should contain information pertaining to search.
headers = dict()
headers['Accept'] = response_type
if ((range_start is not None) and (range_end is not None)):
headers['Range'] = ('items=' +
str(range_start) + '-' + str(range_end))
endpoint = self.endpoint_start + "searches/" + search_id + '/results'
return await self.client.call_api(endpoint, 'GET', headers, timeout=self.timeout)
async def update_search(self, search_id, save_results=None, status=None):
# Sends a POST request to
# https://<server_ip>/api/ariel/searches/<search_id>
# posts search result to site
endpoint = self.endpoint_start + "searches/" + search_id
data = {}
if save_results:
data['save_results'] = save_results
if status:
data['status'] = status
return await self.client.call_api(endpoint, 'POST', data=data, timeout=self.timeout)
async def METHOD_NAME(self, search_id):
# Sends a DELETE request to
# https://<server_ip>/api/ariel/searches/<search_id>
# deletes search created earlier.
endpoint = self.endpoint_start + "searches" + '/' + search_id
return await self.client.call_api(endpoint, 'DELETE', timeout=self.timeout) |
7,326 | test helpers | # -*- coding: utf-8 -*-
# File generated from our OpenAPI spec
from __future__ import absolute_import, division, print_function
from stripe import util
from stripe.api_resources.abstract import APIResourceTestHelpers
from stripe.api_resources.abstract import CreateableAPIResource
from stripe.api_resources.abstract import ListableAPIResource
from stripe.stripe_object import StripeObject
from typing import Any
from typing import Dict
from typing import Optional
from typing_extensions import Literal
from typing_extensions import Type
class OutboundTransfer(
CreateableAPIResource["OutboundTransfer"],
ListableAPIResource["OutboundTransfer"],
):
"""
Use OutboundTransfers to transfer funds from a [FinancialAccount](https://stripe.com/docs/api#financial_accounts) to a PaymentMethod belonging to the same entity. To send funds to a different party, use [OutboundPayments](https://stripe.com/docs/api#outbound_payments) instead. You can send funds over ACH rails or through a domestic wire transfer to a user's own external bank account.
Simulate OutboundTransfer state changes with the `/v1/test_helpers/treasury/outbound_transfers` endpoints. These methods can only be called on test mode objects.
"""
OBJECT_NAME = "treasury.outbound_transfer"
amount: int
cancelable: bool
created: str
currency: str
description: Optional[str]
destination_payment_method: Optional[str]
destination_payment_method_details: StripeObject
expected_arrival_date: str
financial_account: str
hosted_regulatory_receipt_url: Optional[str]
id: str
livemode: bool
metadata: Dict[str, str]
object: Literal["treasury.outbound_transfer"]
returned_details: Optional[StripeObject]
statement_descriptor: str
status: str
status_transitions: StripeObject
transaction: Any
@classmethod
def _cls_cancel(
cls,
outbound_transfer,
api_key=None,
stripe_version=None,
stripe_account=None,
**params
):
return cls._static_request(
"post",
"/v1/treasury/outbound_transfers/{outbound_transfer}/cancel".format(
outbound_transfer=util.sanitize_id(outbound_transfer)
),
api_key=api_key,
stripe_version=stripe_version,
stripe_account=stripe_account,
params=params,
)
@util.class_method_variant("_cls_cancel")
def cancel(self, idempotency_key=None, **params):
return self._request(
"post",
"/v1/treasury/outbound_transfers/{outbound_transfer}/cancel".format(
outbound_transfer=util.sanitize_id(self.get("id"))
),
idempotency_key=idempotency_key,
params=params,
)
class TestHelpers(APIResourceTestHelpers["OutboundTransfer"]):
_resource_cls: Type["OutboundTransfer"]
@classmethod
def _cls_fail(
cls,
outbound_transfer,
api_key=None,
stripe_version=None,
stripe_account=None,
**params
):
return cls._static_request(
"post",
"/v1/test_helpers/treasury/outbound_transfers/{outbound_transfer}/fail".format(
outbound_transfer=util.sanitize_id(outbound_transfer)
),
api_key=api_key,
stripe_version=stripe_version,
stripe_account=stripe_account,
params=params,
)
@util.class_method_variant("_cls_fail")
def fail(self, idempotency_key=None, **params):
return self.resource._request(
"post",
"/v1/test_helpers/treasury/outbound_transfers/{outbound_transfer}/fail".format(
outbound_transfer=util.sanitize_id(self.resource.get("id"))
),
idempotency_key=idempotency_key,
params=params,
)
@classmethod
def _cls_post(
cls,
outbound_transfer,
api_key=None,
stripe_version=None,
stripe_account=None,
**params
):
return cls._static_request(
"post",
"/v1/test_helpers/treasury/outbound_transfers/{outbound_transfer}/post".format(
outbound_transfer=util.sanitize_id(outbound_transfer)
),
api_key=api_key,
stripe_version=stripe_version,
stripe_account=stripe_account,
params=params,
)
@util.class_method_variant("_cls_post")
def post(self, idempotency_key=None, **params):
return self.resource._request(
"post",
"/v1/test_helpers/treasury/outbound_transfers/{outbound_transfer}/post".format(
outbound_transfer=util.sanitize_id(self.resource.get("id"))
),
idempotency_key=idempotency_key,
params=params,
)
@classmethod
def _cls_return_outbound_transfer(
cls,
outbound_transfer,
api_key=None,
stripe_version=None,
stripe_account=None,
**params
):
return cls._static_request(
"post",
"/v1/test_helpers/treasury/outbound_transfers/{outbound_transfer}/return".format(
outbound_transfer=util.sanitize_id(outbound_transfer)
),
api_key=api_key,
stripe_version=stripe_version,
stripe_account=stripe_account,
params=params,
)
@util.class_method_variant("_cls_return_outbound_transfer")
def return_outbound_transfer(self, idempotency_key=None, **params):
return self.resource._request(
"post",
"/v1/test_helpers/treasury/outbound_transfers/{outbound_transfer}/return".format(
outbound_transfer=util.sanitize_id(self.resource.get("id"))
),
idempotency_key=idempotency_key,
params=params,
)
@property
def METHOD_NAME(self):
return self.TestHelpers(self)
OutboundTransfer.TestHelpers._resource_cls = OutboundTransfer |
7,327 | test with aliased aliases | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from os.path import expanduser
from unittest import TestCase, mock
from preggy import expect
from thumbor.config import Config, format_value, generate_config
class ConfigTestCase(TestCase):
@mock.patch("derpconf.config.generate_config")
def test_can_generate_config(self, config_mock):
generate_config()
expect(config_mock.called).to_be_true()
def test_can_format_value(self):
expect(format_value("qwe")).to_equal("'qwe'")
expect(format_value(["qwe", "rty"])).to_equal("[\n# qwe# rty#]")
expect(format_value(230)).to_equal(230)
class ConfigValuesTestCase(TestCase):
def get_default_storage(self):
return "thumbor.storages.file_storage"
def get_config(self):
return (
("MAX_WIDTH", 0),
("MAX_HEIGHT", 0),
("ALLOWED_SOURCES", []),
("QUALITY", 80),
("LOADER", "thumbor.loaders.http_loader"),
("STORAGE", self.get_default_storage()),
("ENGINE", "thumbor.engines.pil"),
("GIF_ENGINE", "thumbor.engines.gif"),
("URL_SIGNER", "libthumbor.url_signers.base64_hmac_sha1"),
("ALLOW_UNSAFE_URL", True),
("FILE_LOADER_ROOT_PATH", expanduser("~")),
("STORAGE_EXPIRATION_SECONDS", 60 * 60 * 24 * 30),
("STORES_CRYPTO_KEY_FOR_EACH_IMAGE", False),
("MIXED_STORAGE_FILE_STORAGE", "thumbor.storages.no_storage"),
("MIXED_STORAGE_CRYPTO_STORAGE", "thumbor.storages.no_storage"),
("MIXED_STORAGE_DETECTOR_STORAGE", "thumbor.storages.no_storage"),
("DETECTORS", []),
("FACE_DETECTOR_CASCADE_FILE", "haarcascade_frontalface_alt.xml"),
(
"FILTERS",
[
"thumbor.filters.brightness",
"thumbor.filters.colorize",
"thumbor.filters.contrast",
"thumbor.filters.rgb",
"thumbor.filters.round_corner",
"thumbor.filters.quality",
"thumbor.filters.noise",
"thumbor.filters.watermark",
"thumbor.filters.equalize",
"thumbor.filters.fill",
"thumbor.filters.sharpen",
"thumbor.filters.strip_exif",
"thumbor.filters.strip_icc",
"thumbor.filters.frame",
"thumbor.filters.grayscale",
"thumbor.filters.rotate",
"thumbor.filters.format",
"thumbor.filters.max_bytes",
"thumbor.filters.convolution",
"thumbor.filters.blur",
"thumbor.filters.extract_focal",
"thumbor.filters.focal",
"thumbor.filters.no_upscale",
"thumbor.filters.saturation",
"thumbor.filters.max_age",
"thumbor.filters.curve",
"thumbor.filters.background_color",
"thumbor.filters.upscale",
"thumbor.filters.proportion",
"thumbor.filters.stretch",
],
),
)
def test_default_values(self):
cfg = Config()
for key, default_value in self.get_config():
config_value = getattr(cfg, key)
expect(config_value).not_to_be_null()
expect(config_value).to_equal(default_value)
def test_config_is_an_alias(self):
Config.alias("OTHER_ENGINE", "ENGINE")
cfg = Config(OTHER_ENGINE="x")
expect(cfg.ENGINE).to_equal("x")
expect(cfg.OTHER_ENGINE).to_equal("x")
def test_config_is_an_aliased_key(self):
Config.alias("LOADER_ALIAS", "LOADER")
cfg = Config(LOADER="y")
expect(cfg.LOADER).to_equal("y")
expect(cfg.LOADER_ALIAS).to_equal("y")
def METHOD_NAME(self):
Config.alias("STORAGE_ALIAS", "STORAGE")
Config.alias("STORAGE_ALIAS_ALIAS", "STORAGE_ALIAS")
cfg = Config(STORAGE_ALIAS_ALIAS="z")
expect(cfg.STORAGE).to_equal("z")
expect(cfg.STORAGE_ALIAS).to_equal("z")
expect(cfg.STORAGE_ALIAS_ALIAS).to_equal("z")
def test_with_aliased_aliases_with_default_values(self):
Config.alias("STORAGE_ALIAS", "STORAGE")
Config.alias("STORAGE_ALIAS_ALIAS", "STORAGE_ALIAS")
cfg = Config()
expect(cfg.STORAGE).to_equal(self.get_default_storage())
expect(cfg.STORAGE_ALIAS).to_equal(self.get_default_storage())
expect(cfg.STORAGE_ALIAS_ALIAS).to_equal(self.get_default_storage())
expect(cfg.__class__.__module__).to_equal("derpconf.config") |
7,328 | check if user follows | from organization.models.organization import Organization
from organization.models.followers import OrganizationFollower, ProjectFollower
from organization.models.project import Project
from organization.utility.notification import (
create_organization_follower_notification,
create_project_follower_notification,
)
from rest_framework import status
from rest_framework.exceptions import NotFound
from rest_framework.response import Response
from rest_framework.exceptions import ValidationError
from django.utils.translation import gettext as _
# IsUserFollowing View functions
def check_if_user_follows_project(
user,
url_slug,
):
return METHOD_NAME(
user=user,
url_slug=url_slug,
entity_model_being_checked=Project,
follower_model=ProjectFollower,
look_up_field_name="project",
error_msg="Project not found",
)
def check_if_user_follows_organization(user, url_slug):
return METHOD_NAME(
user=user,
url_slug=url_slug,
entity_model_being_checked=Organization,
follower_model=OrganizationFollower,
look_up_field_name="organization",
error_msg="Organization not found",
)
def METHOD_NAME(
user,
url_slug,
entity_model_being_checked, # type of model being checked for if it is followed by the user (Organization/Project)
follower_model, # type of follower model (OrganizationFollower/ProjectFollower)
look_up_field_name, # the name of the field that is to be looked up
error_msg,
):
try:
entity_model = entity_model_being_checked.objects.get(url_slug=url_slug)
except entity_model_being_checked.DoesNotExist:
raise NotFound(
detail=error_msg + url_slug,
code=status.HTTP_404_NOT_FOUND,
)
field_look_up_input = {
look_up_field_name: entity_model
} # this syntax is used for field_name=value in the .filter lookup below
is_following = follower_model.objects.filter(
user=user, **field_look_up_input
).exists()
return Response({"is_following": is_following}, status=status.HTTP_200_OK)
# SetFollow View functions
def set_user_following_project(request, url_slug):
return set_user_following(
request_data=request.data,
user=request.user,
entity_model_to_follow=Project,
url_slug=url_slug,
follower_model=ProjectFollower,
lookup_up_field_name="project",
)
def set_user_following_organization(request, url_slug):
return set_user_following(
request_data=request.data,
user=request.user,
entity_model_to_follow=Organization,
url_slug=url_slug,
follower_model=OrganizationFollower,
lookup_up_field_name="organization",
)
def set_user_following(
request_data,
user,
entity_model_to_follow, # type of model that user is trying to follow
url_slug,
follower_model, # type of follower model (organization/project)
lookup_up_field_name, # the name of the field being looked up
):
# what is a good way to make these different messages generic for this shared function?
# messages are either for project or organization and differ from eachother
if "following" not in request_data:
return Response(
{"message": _("Missing required parameters")},
status=status.HTTP_400_BAD_REQUEST,
)
try:
entity_model = entity_model_to_follow.objects.get(url_slug=url_slug)
except entity_model_to_follow.DoesNotExist:
message = (
_("Organization not found.")
if lookup_up_field_name == "organization"
else _("Project not found.")
)
raise NotFound(detail=message, code=status.HTTP_404_NOT_FOUND)
field_look_up_input = {
lookup_up_field_name: entity_model
} # this syntax is used for field_name=value in the .filter(s) lookup below
if request_data["following"] is True:
if follower_model.objects.filter(user=user, **field_look_up_input).exists():
message = (
_("You're already following this organization.")
if lookup_up_field_name == "organization"
else _("You're already following this project.")
)
raise ValidationError(message)
else:
entity_model_follower = follower_model.objects.create(
user=user, **field_look_up_input
)
if lookup_up_field_name == "project":
create_project_follower_notification(entity_model_follower)
elif lookup_up_field_name == "organization":
create_organization_follower_notification(entity_model_follower)
message = (
_(
"You are now following this organization. You will be notified when they post an update!"
)
if lookup_up_field_name == "organization"
else _(
"You are now following this project. You will be notified when they post an update!"
)
)
return Response(
{
"message": message,
"following": True,
},
status=status.HTTP_200_OK,
)
if request_data["following"] is False:
try:
follower_object = follower_model.objects.get(
user=user, **field_look_up_input
)
except follower_model.DoesNotExist:
message = (
_("You weren't following this organization.")
if lookup_up_field_name == "organization"
else _("You weren't following this project.")
)
raise NotFound(
detail=message,
code=status.HTTP_404_NOT_FOUND,
)
follower_object.delete()
message = (
_("You are not following this organization anymore.")
if lookup_up_field_name == "organization"
else _("You are not following this project anymore.")
)
return Response(
{
"message": message,
"following": False,
},
status=status.HTTP_200_OK,
)
else:
return Response(
{"message": _('Invalid value for variable "following"')},
status=status.HTTP_400_BAD_REQUEST,
)
# ListFollowerView functions
def get_list_of_organization_followers(self):
return get_list_of_followers(
list_of_followers_for_entity_model=Organization,
follower_model=OrganizationFollower,
look_up_field_name="organization",
self=self,
)
def get_list_of_project_followers(self):
return get_list_of_followers(
list_of_followers_for_entity_model=Project,
follower_model=ProjectFollower,
look_up_field_name="project",
self=self,
)
def get_list_of_followers(
list_of_followers_for_entity_model, # for which model should this list be for?
follower_model,
look_up_field_name,
self,
):
entity_model = list_of_followers_for_entity_model.objects.filter(
url_slug=self.kwargs["url_slug"]
)
if not entity_model.exists():
return None
look_up_field_input = {
look_up_field_name: entity_model[0]
} # this syntax is used for field_name=value in the .filter lookup below
followers = follower_model.objects.filter(**look_up_field_input)
return followers |
7,329 | set power | #!/usr/bin/python
##################
# piezo_e816.py
#
# Copyright David Baddeley, 2009
# d.baddeley@auckland.ac.nz
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################
import serial
import time
import threading
try:
import Queue
except ImportError:
import queue as Queue
from PYME.Acquire.Hardware.lasers import Laser
class PhoxxLaser(Laser):
def __init__(self, name,turnOn=False, portname='COM3', maxpower=0.14, **kwargs):
self.ser_port = serial.Serial(portname, 500000, timeout=.1, writeTimeout=2)
self.powerControlable = True
#self.isOn=False
self.doPoll=True
self.qLock = threading.Lock()
self.maxpower = maxpower
self.commandQueue = Queue.Queue()
self.replyQueue = Queue.Queue()
self.ilbit = '0'
#self.adhocQueue = Queue.Queue()
self.adHocVals = {}
self.threadPoll = threading.Thread(target=self._poll)
self.threadPoll.start()
#self.TurnOff()
#self.power = self._getOutputPower()
time.sleep(1)
try:
self.power = self._getOutputPower()
except RuntimeError:
self.power = 0
Laser.__init__(self, name, turnOn)
def IsOn(self):
return self.isOn
def TurnOn(self):
self._check_interlock()
ret, = self._query('LOn')
if not ret == '>':
raise RuntimeError('Error turning laser on')
self.isOn = True
def TurnOff(self):
self._check_interlock()
ret, = self._query('LOf')
if not ret == '>':
raise RuntimeError('Error turning laser on')
self.isOn = False
def _check_interlock(self):
if self.ilbit == '1':
raise RuntimeError('Interlock failure - this should reset automatically')
il, = self._query('GLF') # Get Latched Failure.
self.ilbit = format(int(il,16), '#018b')[-10] #convert the response to 16-bit binary and take the interlock bit
#print '647 laser interlock bit:' + self.ilbit
if self.ilbit == '1':
raise RuntimeError('Interlock failure - this should reset automatically')
def METHOD_NAME(self, power):
self._check_interlock()
if power < 0 or power > 1:
raise RuntimeError('Error setting laser power: Power must be between 0 and 1')
self.power = power
p = 0xFFF*power
ps = '%03X' %p
ret, = self._query('SLP', ps)
if not ret == '>':
raise RuntimeError('Error setting laser power')
#if self.isOn:
# self.TurnOn() #turning on actually sets power
def _getOutputPower(self):
self._check_interlock()
pm = float(0xFFF)
ret, = self._query('GLP')
return int(ret, 16)/pm
def _query(self, cmd, arg=None):
s = cmd
if arg:
s = s + arg
with self.qLock:
self.commandQueue.put('?%s\r' % s)
cmr, vals = self._decodeResponse(self.replyQueue.get(timeout=3))
if not cmd == cmr:
self._flush_queues()
raise RuntimeError('Queried with %s but got response to %s' % (cmd, cmr))
return vals
def _flush_queues(self):
with self.qLock:
try:
while True:
self.commandQueue.get(False)
except Empty:
pass
try:
while True:
self.replyQueue.get(False)
except Empty:
pass
def _readline(self):
s = []
c = self.ser_port.read()
while not c in ['', '\r']:
s.append(c)
c = self.ser_port.read()
return ''.join(s)
def _poll(self):
while self.doPoll:
if self.ilbit == '1':
print('Resetting 647 interlock')
self.ser_port.write('?RsC\r') # reset controller to clear the interlock error
time.sleep(30) # wait until reset is finished
self.ser_port.flush()
#flush our output queue
# try:
# while True:
# self.replyQueue.get(False)
# except Empty:
# pass
self.ilbit = '0'
print('647 Interlock reset')
#print 'p'
try:
cmd = self.commandQueue.get(False)
#print cmd
self.ser_port.write(cmd)
self.ser_port.flush()
except Queue.Empty:
pass
#wait a little for reply
time.sleep(.05)
ret = self._readline()
if (not ret == '') and (not self.ilbit == '1'): # ignore all replies during controller reset
#print ret
#process response - either a response or an ad-hoc message
if not ret.startswith('$'): #normal response
self.replyQueue.put(ret)
else: #adhoc
self._procAdHoc(ret)
#self.adhocQueue.put(ret)
def _decodeResponse(self, resp):
cmd = resp[1:4]
vals = resp[4:].split('\xA7')
return cmd, vals
def _procAdHoc(self, ret):
cmd, vals = self._decodeResponse(ret)
self.adHocVals[cmd] = vals
def Close(self):
try:
self.TurnOff()
#time.sleep(1)
finally:
self.doPoll = False
self.ser_port.close()
def __del__(self):
self.Close()
def GetOutputmW(self):
return float(self.adHocVals['MDP'][0])
def GetStatusText(self):
try:
pow = self.GetOutputmW()
return '%s laser power: %3.3f mW' % (self.name, pow)
except:
return '%s laser power: ERR' % self.name
def GetPower(self):
return self.power
#try:
# return self._getOutputPower()
#except:
# return |
7,330 | get clipboard | import functools
import socket
import struct
import time
import module.device.method.scrcpy.const as const
def inject(control_type: int):
"""
Inject control code, with this inject, we will be able to do unit test
Args:
control_type: event to send, TYPE_*
"""
def wrapper(f):
@functools.wraps(f)
def inner(self, *args, **kwargs):
package = struct.pack(">B", control_type) + f(self, *args, **kwargs)
if self.control_socket is not None:
with self.control_socket_lock:
self.control_socket.send(package)
return package
return inner
return wrapper
class ControlSender:
def __init__(self, parent):
self.parent = parent
@property
def control_socket(self):
return self.parent._scrcpy_control_socket
@property
def control_socket_lock(self):
return self.parent._scrcpy_control_socket_lock
@property
def resolution(self):
return self.parent._scrcpy_resolution
@inject(const.TYPE_INJECT_KEYCODE)
def keycode(
self, keycode: int, action: int = const.ACTION_DOWN, repeat: int = 0
) -> bytes:
"""
Send keycode to device
Args:
keycode: const.KEYCODE_*
action: ACTION_DOWN | ACTION_UP
repeat: repeat count
"""
return struct.pack(">Biii", action, keycode, repeat, 0)
@inject(const.TYPE_INJECT_TEXT)
def text(self, text: str) -> bytes:
"""
Send text to device
Args:
text: text to send
"""
buffer = text.encode("utf-8")
return struct.pack(">i", len(buffer)) + buffer
@inject(const.TYPE_INJECT_TOUCH_EVENT)
def touch(
self, x: int, y: int, action: int = const.ACTION_DOWN, touch_id: int = -1
) -> bytes:
"""
Touch screen
Args:
x: horizontal position
y: vertical position
action: ACTION_DOWN | ACTION_UP | ACTION_MOVE
touch_id: Default using virtual id -1, you can specify it to emulate multi finger touch
"""
x, y = max(x, 0), max(y, 0)
return struct.pack(
">BqiiHHHi",
action,
touch_id,
int(x),
int(y),
int(self.resolution[0]),
int(self.resolution[1]),
0xFFFF,
1,
)
@inject(const.TYPE_INJECT_SCROLL_EVENT)
def scroll(self, x: int, y: int, h: int, v: int) -> bytes:
"""
Scroll screen
Args:
x: horizontal position
y: vertical position
h: horizontal movement
v: vertical movement
"""
x, y = max(x, 0), max(y, 0)
return struct.pack(
">iiHHii",
int(x),
int(y),
int(self.resolution[0]),
int(self.resolution[1]),
int(h),
int(v),
)
@inject(const.TYPE_BACK_OR_SCREEN_ON)
def back_or_turn_screen_on(self, action: int = const.ACTION_DOWN) -> bytes:
"""
If the screen is off, it is turned on only on ACTION_DOWN
Args:
action: ACTION_DOWN | ACTION_UP
"""
return struct.pack(">B", action)
@inject(const.TYPE_EXPAND_NOTIFICATION_PANEL)
def expand_notification_panel(self) -> bytes:
"""
Expand notification panel
"""
return b""
@inject(const.TYPE_EXPAND_SETTINGS_PANEL)
def expand_settings_panel(self) -> bytes:
"""
Expand settings panel
"""
return b""
@inject(const.TYPE_COLLAPSE_PANELS)
def collapse_panels(self) -> bytes:
"""
Collapse all panels
"""
return b""
def METHOD_NAME(self) -> str:
"""
Get clipboard
"""
# Since this function need socket response, we can't auto inject it any more
s: socket.socket = self.control_socket
with self.control_socket_lock:
# Flush socket
s.setblocking(False)
while True:
try:
s.recv(1024)
except BlockingIOError:
break
s.setblocking(True)
# Read package
package = struct.pack(">B", const.TYPE_GET_CLIPBOARD)
s.send(package)
(code,) = struct.unpack(">B", s.recv(1))
assert code == 0
(length,) = struct.unpack(">i", s.recv(4))
return s.recv(length).decode("utf-8")
@inject(const.TYPE_SET_CLIPBOARD)
def set_clipboard(self, text: str, paste: bool = False) -> bytes:
"""
Set clipboard
Args:
text: the string you want to set
paste: paste now
"""
buffer = text.encode("utf-8")
return struct.pack(">?i", paste, len(buffer)) + buffer
@inject(const.TYPE_SET_SCREEN_POWER_MODE)
def set_screen_power_mode(self, mode: int = const.POWER_MODE_NORMAL) -> bytes:
"""
Set screen power mode
Args:
mode: POWER_MODE_OFF | POWER_MODE_NORMAL
"""
return struct.pack(">b", mode)
@inject(const.TYPE_ROTATE_DEVICE)
def rotate_device(self) -> bytes:
"""
Rotate device
"""
return b""
def swipe(
self,
start_x: int,
start_y: int,
end_x: int,
end_y: int,
move_step_length: int = 5,
move_steps_delay: float = 0.005,
) -> None:
"""
Swipe on screen
Args:
start_x: start horizontal position
start_y: start vertical position
end_x: start horizontal position
end_y: end vertical position
move_step_length: length per step
move_steps_delay: sleep seconds after each step
:return:
"""
self.touch(start_x, start_y, const.ACTION_DOWN)
next_x = start_x
next_y = start_y
if end_x > self.resolution[0]:
end_x = self.resolution[0]
if end_y > self.resolution[1]:
end_y = self.resolution[1]
decrease_x = True if start_x > end_x else False
decrease_y = True if start_y > end_y else False
while True:
if decrease_x:
next_x -= move_step_length
if next_x < end_x:
next_x = end_x
else:
next_x += move_step_length
if next_x > end_x:
next_x = end_x
if decrease_y:
next_y -= move_step_length
if next_y < end_y:
next_y = end_y
else:
next_y += move_step_length
if next_y > end_y:
next_y = end_y
self.touch(next_x, next_y, const.ACTION_MOVE)
if next_x == end_x and next_y == end_y:
self.touch(next_x, next_y, const.ACTION_UP)
break
time.sleep(move_steps_delay) |
7,331 | test missing fields | from timApp.auth.accesstype import AccessType
from timApp.tests.server.timroutetest import TimRouteTest
from timApp.timdb.sqa import db
class FeedbackReportTest(TimRouteTest):
def test_empty_report(self):
self.login_test1()
d = self.create_doc()
self.get(
f"/feedback/report/{d.path}",
expect_content="Full Name,"
"Username,"
"Result,"
"Item,"
"Selected option,"
"Feedback,"
"Time spent on item(sec),"
"Time spent on feedback(sec)\r\n",
)
def test_data_report(self):
self.login_test1()
d = self.create_doc(
initial_par="""
``` {#fb1 plugin="feedback"}
nextTask: ""
questionItems:
- pluginNames: [dropdown1]
words: []
choices:
- match: ["is cooking"]
correct: true
levels: &rightmatch
- "**Correct!** You answered: *|answer|*"
- match: [] # Empty brackets for default feedback.
levels: &defaultmatch
- "*Level 1 default feedback* in italics with *"
```
"""
)
e = self.get(d.url, as_tree=True).cssselect("feedback-runner")
self.assertTrue(e)
empty = self.post_answer(
plugin_type="feedback",
task_id=f"{d.id}.fb1",
user_input={
"correct": False,
"user_answer": "",
"correct_answer": "",
"feedback": "",
},
)
self.assertEqual({"result": "saved"}, empty["web"])
answer = self.post_answer(
plugin_type="feedback",
task_id=f"{d.id}.fb1",
user_input={
"correct": True,
"user_answer": "aaaaaa",
"correct_answer": "aaaaaa",
"feedback": "correct!",
},
)
self.assertEqual({"result": "saved"}, answer["web"])
exp_results = [
f"""Full Name,Username,Result,Item,Selected option,Feedback,Time spent on item(sec),Time spent on feedback(sec)
Test user 1,testuser1,right,aaaaaa,aaaaaa,correct!,0.0,0.{d}
""".replace(
"\n", "\r\n"
)
for d in range(10)
]
r = self.get(f"/feedback/report/{d.path}")
self.assertIn(r, exp_results)
def test_no_permissions(self):
self.login_test3()
d = self.create_doc()
self.login_test1()
self.get(f"/feedback/report/{d.path}", expect_status=403)
def test_grant_permission(self):
self.login_test3()
d = self.create_doc()
self.test_user_1.grant_access(d, AccessType.teacher)
db.session.commit()
d_path = d.path
self.login_test1()
self.get(f"/feedback/report/{d_path}")
def METHOD_NAME(self):
self.login_test1()
d = self.create_doc(
initial_par="""
``` {#fb1 plugin="feedback"}
nextTask: ""
questionItems:
- pluginNames: [dropdown1]
words: []
choices:
- match: ["is cooking"]
correct: true
levels: &rightmatch
- "**Correct!** You answered: *|answer|*"
- match: [] # Empty brackets for default feedback.
levels: &defaultmatch
- "*Level 1 default feedback* in italics with *"
```
"""
)
e = self.get(d.url, as_tree=True).cssselect("feedback-runner")
self.assertTrue(e)
missing_user_answer = self.post_answer(
plugin_type="feedback",
task_id=f"{d.id}.fb1",
user_input={"correct": False, "correct_answer": "", "feedback": ""},
)
self.assertEqual(
{
"error": '<div class="pluginError">\n'
"The following fields have invalid values:\n"
"<ul><li>user_answer: Missing data for required field.</li></ul>\n"
"</div>"
},
missing_user_answer["web"],
)
missing_correct = self.post_answer(
plugin_type="feedback",
task_id=f"{d.id}.fb1",
user_input={"user_answer": "", "correct_answer": "", "feedback": ""},
)
self.assertEqual(
{
"error": '<div class="pluginError">\n'
"The following fields have invalid values:\n"
"<ul><li>correct: Missing data for required field.</li></ul>\n"
"</div>"
},
missing_correct["web"],
)
missing_correct_answer = self.post_answer(
plugin_type="feedback",
task_id=f"{d.id}.fb1",
user_input={"correct": False, "user_answer": "", "feedback": ""},
)
self.assertEqual(
{
"error": '<div class="pluginError">\n'
"The following fields have invalid values:\n"
"<ul><li>correct_answer: Missing data for required field.</li></ul>\n"
"</div>"
},
missing_correct_answer["web"],
)
missing_feedback = self.post_answer(
plugin_type="feedback",
task_id=f"{d.id}.fb1",
user_input={"correct": False, "user_answer": "", "correct_answer": ""},
)
self.assertEqual(
{
"error": '<div class="pluginError">\n'
"The following fields have invalid values:\n"
"<ul><li>feedback: Missing data for required field.</li></ul>\n"
"</div>"
},
missing_feedback["web"],
)
empty = self.post_answer(
plugin_type="feedback",
task_id=f"{d.id}.fb1",
user_input={
"correct": False,
"user_answer": "",
"correct_answer": "",
"feedback": "",
},
)
self.assertEqual({"result": "saved"}, empty["web"]) |
7,332 | test vagrant get vm info | import os
import salt.exceptions
import salt.modules.vagrant as vagrant
import salt.utils.platform
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase
TEMP_DATABASE_FILE = "/tmp/salt-tests-tmpdir/test_vagrant.sqlite"
class VagrantTestCase(TestCase, LoaderModuleMockMixin):
"""
Unit TestCase for the salt.modules.vagrant module.
"""
LOCAL_OPTS = {
"extension_modules": "",
"vagrant_sdb_data": {
"driver": "sqlite3",
"database": TEMP_DATABASE_FILE,
"table": "sdb",
"create_table": True,
},
}
def setup_loader_modules(self):
vagrant_globals = {
"__opts__": self.LOCAL_OPTS,
}
return {vagrant: vagrant_globals}
def test_vagrant_get_vm_info_not_found(self):
mock_sdb = MagicMock(return_value=None)
with patch.dict(vagrant.__utils__, {"sdb.sdb_get": mock_sdb}):
with self.assertRaises(salt.exceptions.SaltInvocationError):
vagrant.get_vm_info("thisNameDoesNotExist")
def test_vagrant_init_positional(self):
path_nowhere = os.path.join(os.sep, "tmp", "nowhere")
if salt.utils.platform.is_windows():
path_nowhere = "c:{}".format(path_nowhere)
mock_sdb = MagicMock(return_value=None)
with patch.dict(vagrant.__utils__, {"sdb.sdb_set": mock_sdb}):
resp = vagrant.init(
"test1",
path_nowhere,
"onetest",
"nobody",
False,
"french",
{"different": "very"},
)
self.assertTrue(resp.startswith("Name test1 defined"))
expected = dict(
name="test1",
cwd=path_nowhere,
machine="onetest",
runas="nobody",
vagrant_provider="french",
different="very",
)
mock_sdb.assert_called_with(
"sdb://vagrant_sdb_data/onetest?{}".format(path_nowhere),
"test1",
self.LOCAL_OPTS,
)
mock_sdb.assert_any_call(
"sdb://vagrant_sdb_data/test1", expected, self.LOCAL_OPTS
)
def METHOD_NAME(self):
testdict = {"testone": "one", "machine": "two"}
mock_sdb = MagicMock(return_value=testdict)
with patch.dict(vagrant.__utils__, {"sdb.sdb_get": mock_sdb}):
resp = vagrant.get_vm_info("test1")
self.assertEqual(resp, testdict)
def test_vagrant_init_dict(self):
testdict = dict(
cwd="/tmp/anywhere",
machine="twotest",
runas="somebody",
vagrant_provider="english",
)
expected = testdict.copy()
expected["name"] = "test2"
mock_sdb = MagicMock(return_value=None)
with patch.dict(vagrant.__utils__, {"sdb.sdb_set": mock_sdb}):
vagrant.init("test2", vm=testdict)
mock_sdb.assert_any_call(
"sdb://vagrant_sdb_data/test2", expected, self.LOCAL_OPTS
)
def test_vagrant_init_arg_override(self):
testdict = dict(
cwd="/tmp/there",
machine="treetest",
runas="anybody",
vagrant_provider="spansh",
)
mock_sdb = MagicMock(return_value=None)
with patch.dict(vagrant.__utils__, {"sdb.sdb_set": mock_sdb}):
vagrant.init(
"test3",
cwd="/tmp",
machine="threetest",
runas="him",
vagrant_provider="polish",
vm=testdict,
)
expected = dict(
name="test3",
cwd="/tmp",
machine="threetest",
runas="him",
vagrant_provider="polish",
)
mock_sdb.assert_any_call(
"sdb://vagrant_sdb_data/test3", expected, self.LOCAL_OPTS
)
def test_vagrant_get_ssh_config_fails(self):
mock_sdb = MagicMock(return_value=None)
with patch.dict(vagrant.__utils__, {"sdb.sdb_set": mock_sdb}):
mock_sdb = MagicMock(return_value={})
with patch.dict(vagrant.__utils__, {"sdb.sdb_get": mock_sdb}):
vagrant.init("test3", cwd="/tmp")
with self.assertRaises(salt.exceptions.SaltInvocationError):
vagrant.get_ssh_config("test3") # has not been started
def test_vagrant_destroy(self):
path_mydir = os.path.join(os.sep, "my", "dir")
if salt.utils.platform.is_windows():
path_mydir = "c:{}".format(path_mydir)
mock_cmd = MagicMock(return_value={"retcode": 0})
with patch.dict(vagrant.__salt__, {"cmd.run_all": mock_cmd}):
mock_sdb = MagicMock(return_value=None)
with patch.dict(vagrant.__utils__, {"sdb.sdb_delete": mock_sdb}):
mock_sdb_get = MagicMock(
return_value={"machine": "macfour", "cwd": path_mydir}
)
with patch.dict(vagrant.__utils__, {"sdb.sdb_get": mock_sdb_get}):
self.assertTrue(vagrant.destroy("test4"))
mock_sdb.assert_any_call(
"sdb://vagrant_sdb_data/macfour?{}".format(path_mydir),
self.LOCAL_OPTS,
)
mock_sdb.assert_any_call(
"sdb://vagrant_sdb_data/test4", self.LOCAL_OPTS
)
cmd = "vagrant destroy -f macfour"
mock_cmd.assert_called_with(
cmd, runas=None, cwd=path_mydir, output_loglevel="info"
)
def test_vagrant_start(self):
mock_cmd = MagicMock(return_value={"retcode": 0})
with patch.dict(vagrant.__salt__, {"cmd.run_all": mock_cmd}):
mock_sdb_get = MagicMock(
return_value={
"machine": "five",
"cwd": "/the/dir",
"runas": "me",
"vagrant_provider": "him",
}
)
with patch.dict(vagrant.__utils__, {"sdb.sdb_get": mock_sdb_get}):
self.assertTrue(vagrant.start("test5"))
cmd = "vagrant up five --provider=him"
mock_cmd.assert_called_with(
cmd, runas="me", cwd="/the/dir", output_loglevel="info"
) |
7,333 | test vizier trial deletion | import pytest
from google.api_core import exceptions
from google.cloud import aiplatform
from google.cloud.aiplatform.vizier import Study
from google.cloud.aiplatform.vizier import Trial
from tests.system.aiplatform import e2e_base
from google.cloud.aiplatform.vizier import pyvizier
_TEST_STUDY_ID = 123
@pytest.mark.usefixtures("tear_down_resources")
class TestVizier(e2e_base.TestEndToEnd):
_temp_prefix = "temp_vertex_sdk_e2e_vizier_test"
def test_vizier_lifecycle(self, shared_state):
aiplatform.init(
project=e2e_base._PROJECT,
location=e2e_base._LOCATION,
)
sc = pyvizier.StudyConfig()
sc.algorithm = pyvizier.Algorithm.RANDOM_SEARCH
sc.metric_information.append(
pyvizier.MetricInformation(
name="pr-auc", goal=pyvizier.ObjectiveMetricGoal.MAXIMIZE
)
)
root = sc.search_space.select_root()
root.add_float_param(
"learning_rate", 0.00001, 1.0, scale_type=pyvizier.ScaleType.LINEAR
)
root.add_categorical_param("optimizer", ["adagrad", "adam", "experimental"])
sc.automated_stopping_config = (
pyvizier.AutomatedStoppingConfig.decay_curve_stopping_config(use_steps=True)
)
study = Study.create_or_load(display_name=self._temp_prefix, problem=sc)
shared_state["resources"] = [study]
trials = study.suggest(count=3, worker="halio_test_worker")
for trial in trials:
if not trial.should_stop():
measurement = pyvizier.Measurement()
measurement.metrics["pr-auc"] = 0.4
trial.add_measurement(measurement=measurement)
trial.complete(measurement=measurement)
optimal_trials = study.optimal_trials()
for trial in study.trials():
assert trial.status == pyvizier.TrialStatus.COMPLETED
assert optimal_trials[0].status == pyvizier.TrialStatus.COMPLETED
def test_vizier_study_deletion(self, shared_state):
aiplatform.init(
project=e2e_base._PROJECT,
location=e2e_base._LOCATION,
)
sc = pyvizier.StudyConfig()
sc.algorithm = pyvizier.Algorithm.RANDOM_SEARCH
sc.metric_information.append(
pyvizier.MetricInformation(
name="pr-auc", goal=pyvizier.ObjectiveMetricGoal.MAXIMIZE
)
)
root = sc.search_space.select_root()
root.add_float_param(
"learning_rate", 0.00001, 1.0, scale_type=pyvizier.ScaleType.LINEAR
)
root.add_categorical_param("optimizer", ["adagrad", "adam", "experimental"])
sc.automated_stopping_config = (
pyvizier.AutomatedStoppingConfig.decay_curve_stopping_config(use_steps=True)
)
study = Study.create_or_load(display_name=self._temp_prefix, problem=sc)
study.delete()
with pytest.raises(exceptions.NotFound):
study = Study(study_id=study.name)
def METHOD_NAME(self, shared_state):
aiplatform.init(
project=e2e_base._PROJECT,
location=e2e_base._LOCATION,
)
sc = pyvizier.StudyConfig()
sc.algorithm = pyvizier.Algorithm.RANDOM_SEARCH
sc.metric_information.append(
pyvizier.MetricInformation(
name="pr-auc", goal=pyvizier.ObjectiveMetricGoal.MAXIMIZE
)
)
root = sc.search_space.select_root()
root.add_float_param(
"learning_rate", 0.00001, 1.0, scale_type=pyvizier.ScaleType.LINEAR
)
root.add_categorical_param("optimizer", ["adagrad", "adam", "experimental"])
sc.automated_stopping_config = (
pyvizier.AutomatedStoppingConfig.decay_curve_stopping_config(use_steps=True)
)
study = Study.create_or_load(display_name=self._temp_prefix, problem=sc)
trials = study.suggest(count=1, worker="halio_test_worker")
trials[0].delete()
with pytest.raises(exceptions.NotFound):
study = Trial(study_id=study.name, trial_name=trials[0].name) |
7,334 | get oauth redirect url | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
微信账号体系相关的Account
"""
import time
import random
import urllib.request
import urllib.parse
import urllib.error
import logging
from django.http import HttpResponseRedirect, HttpResponse
from django.middleware.csrf import rotate_token
from django.utils.translation import ugettext_lazy as _
from . import settings as weixin_settings
from .api import WeiXinApi, QyWeiXinApi
from .models import BkWeixinUser
logger = logging.getLogger('root')
class WeixinAccountSingleton(object):
"""
单例基类
"""
_instance = None
def __new__(cls, *args, **kwargs):
if not isinstance(cls._instance, cls):
cls._instance = object.__new__(cls, *args, **kwargs)
return cls._instance
class WeixinAccount(WeixinAccountSingleton):
"""
微信账号体系相关的基类Account
提供通用的账号功能
"""
# 跳转到微信重定向链接
WEIXIN_OAUTH_URL = 'https://open.weixin.qq.com/connect/oauth2/authorize'
def __init__(self):
if weixin_settings.IS_QY_WEIXIN:
self.weixin_api = QyWeiXinApi()
else:
self.weixin_api = WeiXinApi()
@staticmethod
def is_weixin_visit(request):
"""
是否来自微信访问
"""
if weixin_settings.USE_WEIXIN and request.path.startswith(weixin_settings.WEIXIN_SITE_URL) and \
request.get_host() == weixin_settings.WEIXIN_APP_EXTERNAL_HOST:
return True
return False
def set_weixin_oauth_state(self, request, length=32):
"""
生成随机的state,并存储到session中
"""
allowed_chars = 'abcdefghijkmnpqrstuvwxyzABCDEFGHIJKLMNPQRSTUVWXYZ0123456789'
state = ''.join(random.choice(allowed_chars) for _ in range(length))
request.session['WEIXIN_OAUTH_STATE'] = state
request.session['WEIXIN_OAUTH_STATE_TIMESTAMP'] = time.time()
return state
def METHOD_NAME(self, callback_url, state='authenticated'):
"""
获取oauth访问链接
"""
params = {
'appid': weixin_settings.WEIXIN_APP_ID,
'redirect_uri': callback_url,
'response_type': 'code',
'scope': weixin_settings.WEIXIN_SCOPE,
'state': state
}
params = urllib.parse.urlencode(params)
redirect_uri = '%s?%s#wechat_redirect' % (self.WEIXIN_OAUTH_URL, params)
return redirect_uri
def redirect_weixin_login(self, request):
"""
跳转到微信登录
"""
url = urllib.parse.urlparse(request.build_absolute_uri())
path = weixin_settings.WEIXIN_LOGIN_URL
query = urllib.parse.urlencode({'c_url': request.get_full_path()})
callback_url = urllib.parse.urlunsplit((url.scheme, url.netloc, path, query, url.fragment))
state = self.set_weixin_oauth_state(request)
redirect_uri = self.METHOD_NAME(callback_url, state)
return HttpResponseRedirect(redirect_uri)
def verify_weixin_oauth_state(self, request, expires_in=60):
"""
验证state是否正确,防止csrf攻击
"""
try:
state = request.GET.get('state')
raw_state = request.session.get('WEIXIN_OAUTH_STATE')
raw_timestamp = request.session.get('WEIXIN_OAUTH_STATE_TIMESTAMP')
# 验证state
if not raw_state or raw_state != state:
return False
# 验证时间戳
if not raw_timestamp or time.time() - raw_timestamp > expires_in:
return False
# 验证成功后清空session
request.session['WEIXIN_OAUTH_STATE'] = None
request.session['WEIXIN_OAUTH_STATE_TIMESTAMP'] = None
return True
except Exception as e:
logger.exception(_("验证请求weixin code的 state参数出错: %s") % e)
return False
def verfiy_weixin_oauth_code(self, request):
"""
验证Code有效性
"""
code = request.GET.get('code')
is_ok, data = self.weixin_api.check_login_code(code)
return is_ok, data
def get_user_info(self, base_data):
"""
根据access_token, userid获取用户信息
"""
userid = base_data.get('userid')
access_token = base_data.get('access_token')
userinfo = self.weixin_api.get_user_info_for_account(access_token, userid)
return userinfo
def get_callback_url(self, request):
"""
获取实际访问的URL
"""
callback_url = request.GET.get('c_url') or weixin_settings.WEIXIN_SITE_URL
return callback_url
def login(self, request):
"""
微信登录后回调
"""
if not self.is_weixin_visit(request):
# TODO 改造为友好页面
return HttpResponse(_("非微信访问,或应用未启动微信访问"))
# 验证回调state
if not self.verify_weixin_oauth_state(request):
# TODO 改造为友好页面
return HttpResponse(_("State验证失败"))
# 验证code有效性
is_code_valid, base_data = self.verfiy_weixin_oauth_code(request)
if not is_code_valid:
# TODO 改造为友好页面
return HttpResponse(_("登录失败"))
# 获取用户信息并设置用户
userinfo = self.get_user_info(base_data)
logger.info('get_user_info kwargs: %s, result: %s' % (base_data, userinfo))
userid = userinfo.pop('userid')
user = BkWeixinUser.objects.get_update_or_create_user(userid, **userinfo)
# 设置session
request.session['weixin_user_id'] = user.id
setattr(request, 'weixin_user', user)
# need csrftoken
rotate_token(request)
# 跳转到用户实际访问URL
callback_url = self.get_callback_url(request)
return HttpResponseRedirect(callback_url) |
7,335 | test turnout percentage max 100 | from candidates.tests.auth import TestUserMixin
from candidates.tests.factories import MembershipFactory
from candidates.tests.uk_examples import UK2015ExamplesMixin
from django.test import TestCase
from people.tests.factories import PersonFactory
from uk_results.models import CandidateResult, ResultSet
class TestUKResults(TestUserMixin, UK2015ExamplesMixin, TestCase):
def setUp(self):
super().setUp()
ballot = self.local_post.ballot_set.get()
self.result_set = ResultSet.objects.create(
ballot=ballot,
num_turnout_reported=10000,
num_spoilt_ballots=30,
user=self.user,
ip_address="127.0.0.1",
source="Example ResultSet for testing",
total_electorate=50000,
)
# Create three people:
self.persons = [
PersonFactory.create(id=13, name="Alice"),
PersonFactory.create(id=14, name="Bob"),
PersonFactory.create(id=15, name="Carol"),
]
parties = [self.labour_party, self.conservative_party, self.ld_party]
# Create their candidacies:
candidacies = [
MembershipFactory.create(
ballot=ballot, person=person, post=self.local_post, party=party
)
for person, party in zip(self.persons, parties)
]
# Create their CandidateResult objects:
votes = [2000, 5000, 3000]
elected = [False, True, False]
self.candidate_results = []
for c, v, e in zip(candidacies, votes, elected):
c.elected = e
c.save()
result = CandidateResult.objects.create(
result_set=self.result_set, membership=c, num_ballots=v
)
self.candidate_results.append(result)
self.expected = {
"ballot_paper_id": "local.maidstone.DIW:E05005004.2016-05-05",
"created": self.result_set.modified.isoformat(),
"candidate_results": [
{
"elected": True,
"num_ballots": 5000,
"person_id": 14,
"person_name": "Bob",
},
{
"elected": False,
"num_ballots": 3000,
"person_id": 15,
"person_name": "Carol",
},
{
"elected": False,
"num_ballots": 2000,
"person_id": 13,
"person_name": "Alice",
},
],
"source": "Example ResultSet for testing",
"spoilt_ballots": 30,
"turnout": 10000,
"total_electorate": 50000,
"user": "john",
}
def test_as_dict(self):
self.maxDiff = None
self.assertEqual(self.result_set.as_dict(), self.expected)
def test_record_version(self):
self.maxDiff = None
self.assertEqual(self.result_set.versions, [])
self.result_set.record_version()
self.assertEqual(self.result_set.versions, [self.expected])
# Make sure we don't create duplicate versons
self.result_set.record_version()
self.result_set.record_version()
self.assertEqual(self.result_set.versions, [self.expected])
# Make sure we can force a duplicate though
self.result_set.record_version(force=True)
self.assertEqual(len(self.result_set.versions), 2)
self.assertTrue(
self.result_set.versions_equal(
self.result_set.versions[0], self.result_set.versions[1]
)
)
self.result_set.num_turnout_reported = 300
self.result_set.save()
self.result_set.record_version()
def test_turnout_percentage(self):
self.result_set.calculate_turnout_percentage()
self.assertEqual(self.result_set.turnout_percentage, 20.0)
self.result_set.num_turnout_reported = 3333
self.result_set.total_electorate = 10000
self.result_set.calculate_turnout_percentage()
# check rounded to 2 places max
self.assertEqual(self.result_set.turnout_percentage, 33.33)
def test_turnout_percentage_is_none(self):
results = [
ResultSet(num_turnout_reported=None, total_electorate=None),
ResultSet(num_turnout_reported=5000, total_electorate=None),
ResultSet(num_turnout_reported=None, total_electorate=5000),
]
for result in results:
with self.subTest(msg=result):
result.calculate_turnout_percentage()
self.assertIsNone(result.turnout_percentage)
def METHOD_NAME(self):
result = ResultSet(num_turnout_reported=100, total_electorate=50)
result.calculate_turnout_percentage()
self.assertEqual(result.turnout_percentage, 100) |
7,336 | test gracefully handles missing context | from contextlib import ExitStack
from unittest import mock
import pytest
from prefect import flow, task
from prefect.client.orchestration import get_client
from prefect.context import FlowRunContext
from prefect.events.related import (
MAX_CACHE_SIZE,
_get_and_cache_related_object,
related_resources_from_run_context,
)
from prefect.events.schemas import RelatedResource
from prefect.states import Running
@pytest.fixture
async def spy_client(test_database_connection_url):
async with get_client() as client:
exit_stack = ExitStack()
for method in [
"read_flow",
"read_flow_run",
"read_deployment",
"read_work_queue",
"read_work_pool",
]:
exit_stack.enter_context(
mock.patch.object(client, method, wraps=getattr(client, method)),
)
class NoOpClientWrapper:
def __init__(self, client):
self.client = client
async def __aenter__(self):
return self.client
async def __aexit__(self, *args):
pass
yield NoOpClientWrapper(client)
exit_stack.close()
async def METHOD_NAME():
related = await related_resources_from_run_context()
assert related == []
async def test_gets_related_from_run_context(
prefect_client, work_queue_1, worker_deployment_wq1
):
flow_run = await prefect_client.create_flow_run_from_deployment(
worker_deployment_wq1.id,
state=Running(),
tags=["flow-run-one"],
)
with FlowRunContext.construct(flow_run=flow_run):
related = await related_resources_from_run_context()
work_pool = work_queue_1.work_pool
db_flow = await prefect_client.read_flow(flow_run.flow_id)
assert related == [
RelatedResource(
__root__={
"prefect.resource.id": f"prefect.flow-run.{flow_run.id}",
"prefect.resource.role": "flow-run",
"prefect.resource.name": flow_run.name,
}
),
RelatedResource(
__root__={
"prefect.resource.id": f"prefect.flow.{db_flow.id}",
"prefect.resource.role": "flow",
"prefect.resource.name": db_flow.name,
}
),
RelatedResource(
__root__={
"prefect.resource.id": f"prefect.deployment.{worker_deployment_wq1.id}",
"prefect.resource.role": "deployment",
"prefect.resource.name": worker_deployment_wq1.name,
}
),
RelatedResource(
__root__={
"prefect.resource.id": f"prefect.work-queue.{work_queue_1.id}",
"prefect.resource.role": "work-queue",
"prefect.resource.name": work_queue_1.name,
}
),
RelatedResource(
__root__={
"prefect.resource.id": f"prefect.work-pool.{work_pool.id}",
"prefect.resource.role": "work-pool",
"prefect.resource.name": work_pool.name,
}
),
RelatedResource(
__root__={
"prefect.resource.id": "prefect.tag.flow-run-one",
"prefect.resource.role": "tag",
}
),
RelatedResource(
__root__={
"prefect.resource.id": "prefect.tag.test",
"prefect.resource.role": "tag",
}
),
]
async def test_can_exclude_by_resource_id(prefect_client):
@flow
async def test_flow():
flow_run_context = FlowRunContext.get()
assert flow_run_context is not None
exclude = {f"prefect.flow-run.{flow_run_context.flow_run.id}"}
return await related_resources_from_run_context(exclude=exclude)
state = await test_flow._run()
flow_run = await prefect_client.read_flow_run(state.state_details.flow_run_id)
related = await state.result()
assert f"prefect.flow-run.{flow_run.id}" not in related
async def test_gets_related_from_task_run_context(prefect_client):
@task
async def test_task():
# Clear the FlowRunContext to simulated a task run in a remote worker.
FlowRunContext.__var__.set(None)
return await related_resources_from_run_context()
@flow
async def test_flow():
return await test_task._run()
state = await test_flow._run()
task_state = await state.result()
flow_run = await prefect_client.read_flow_run(state.state_details.flow_run_id)
db_flow = await prefect_client.read_flow(flow_run.flow_id)
task_run = await prefect_client.read_task_run(task_state.state_details.task_run_id)
related = await task_state.result()
assert related == [
RelatedResource(
__root__={
"prefect.resource.id": f"prefect.flow-run.{flow_run.id}",
"prefect.resource.role": "flow-run",
"prefect.resource.name": flow_run.name,
}
),
RelatedResource(
__root__={
"prefect.resource.id": f"prefect.task-run.{task_run.id}",
"prefect.resource.role": "task-run",
"prefect.resource.name": task_run.name,
}
),
RelatedResource(
__root__={
"prefect.resource.id": f"prefect.flow.{db_flow.id}",
"prefect.resource.role": "flow",
"prefect.resource.name": db_flow.name,
}
),
]
async def test_caches_related_objects(spy_client):
@flow
async def test_flow():
flow_run_context = FlowRunContext.get()
assert flow_run_context is not None
with mock.patch("prefect.client.orchestration.get_client", lambda: spy_client):
await related_resources_from_run_context()
await related_resources_from_run_context()
await test_flow()
spy_client.client.read_flow.assert_called_once()
async def test_lru_cache_evicts_oldest():
cache = {}
async def fetch(obj_id):
return obj_id
await _get_and_cache_related_object("flow-run", "flow-run", fetch, "👴", cache)
assert "flow-run.👴" in cache
await _get_and_cache_related_object("flow-run", "flow-run", fetch, "👩", cache)
assert "flow-run.👴" in cache
for i in range(MAX_CACHE_SIZE):
await _get_and_cache_related_object(
"flow-run", "flow-run", fetch, f"👶 {i}", cache
)
assert "flow-run.👴" not in cache
async def test_lru_cache_timestamp_updated():
cache = {}
async def fetch(obj_id):
return obj_id
await _get_and_cache_related_object("flow-run", "flow-run", fetch, "👴", cache)
_, timestamp = cache["flow-run.👴"]
await _get_and_cache_related_object("flow-run", "flow-run", fetch, "👴", cache)
_, next_timestamp = cache["flow-run.👴"]
assert next_timestamp > timestamp |
7,337 | test conv2 d transpose 9 10 11 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from onnxbase import APIOnnx
from onnxbase import randtool
class Net(paddle.nn.Layer):
"""
simple Net
"""
def __init__(self,
in_channels=1,
out_channels=2,
stride=1,
padding=0,
output_padding=0,
dilation=1,
groups=1,
weight_attr=None,
bias_attr=None,
data_format="NCHW"):
super(Net, self).__init__()
self._conv2d_t = paddle.nn.Conv2DTranspose(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
output_padding=output_padding,
dilation=dilation,
groups=groups,
weight_attr=weight_attr,
bias_attr=bias_attr,
data_format=data_format)
def forward(self, inputs):
"""
forward
"""
x = self._conv2d_t(inputs)
return x
def test_Conv2DTranspose_9_10_11_12():
"""
api: paddle.Conv2DTranspose
op version: 9, 10, 11, 12
"""
op = Net()
op.eval()
# net, name, ver_list, delta=1e-6, rtol=1e-5
obj = APIOnnx(op, 'nn_Conv2DTranspose', [9, 10, 11, 12])
obj.set_input_data(
"input_data",
paddle.to_tensor(
randtool("float", -1, 1, [3, 1, 10, 10]).astype('float32')))
obj.run()
def test_Conv2DTranspose_9_output_padding():
"""
api: paddle.Conv2DTranspose
op version: 9
"""
op = Net(output_padding=1, stride=[3, 2])
op.eval()
# net, name, ver_list, delta=1e-6, rtol=1e-5
obj = APIOnnx(op, 'nn_Conv2DTranspose', [9])
obj.set_input_data(
"input_data",
paddle.to_tensor(
randtool("float", -1, 1, [3, 1, 10, 10]).astype('float32')))
obj.run()
def test_Conv2DTranspose_9_output_padding_1():
"""
api: paddle.Conv2DTranspose
op version: 9
"""
op = Net(output_padding=1, stride=[3, 2], padding=[1, 2])
op.eval()
# net, name, ver_list, delta=1e-6, rtol=1e-5
obj = APIOnnx(op, 'nn_Conv2DTranspose', [9])
obj.set_input_data(
"input_data",
paddle.to_tensor(
randtool("float", -1, 1, [3, 1, 10, 10]).astype('float32')))
obj.run()
def test_Conv2DTranspose_9_output_padding_2():
"""
api: paddle.Conv2DTranspose
op version: 9
"""
op = Net(output_padding=1, stride=[3, 2], padding=[1, 2, 3, 4])
op.eval()
# net, name, ver_list, delta=1e-6, rtol=1e-5
obj = APIOnnx(op, 'nn_Conv2DTranspose', [9])
obj.set_input_data(
"input_data",
paddle.to_tensor(
randtool("float", -1, 1, [3, 1, 10, 10]).astype('float32')))
obj.run()
def test_Conv2DTranspose_9_output_padding_3():
"""
api: paddle.Conv2DTranspose
op version: 9
"""
op = Net(output_padding=1,
stride=[3, 2],
padding=[[0, 0], [0, 0], [1, 2], [2, 3]])
op.eval()
# net, name, ver_list, delta=1e-6, rtol=1e-5
obj = APIOnnx(op, 'nn_Conv2DTranspose', [9])
obj.set_input_data(
"input_data",
paddle.to_tensor(
randtool("float", -1, 1, [3, 1, 10, 10]).astype('float32')))
obj.run()
def METHOD_NAME():
"""
api: paddle.Conv2DTranspose
op version: 9
"""
op = Net(in_channels=16,
out_channels=16,
groups=16,
padding=[[0, 0], [0, 0], [1, 2], [2, 3]])
op.eval()
# net, name, ver_list, delta=1e-6, rtol=1e-5
obj = APIOnnx(op, 'nn_Conv2DTranspose', [9, 10, 11, 12])
obj.set_input_data(
"input_data",
paddle.to_tensor(
randtool("float", -1, 1, [16, 16, 10, 10]).astype('float32')))
obj.run()
# if __name__ == '__main__':
# test_Conv2DTranspose_9_10_11_12_group()
# test_Conv2DTranspose_9_output_padding_3()
# test_Conv2DTranspose_9_output_padding_2()
# test_Conv2DTranspose_9_output_padding_1()
# test_Conv2DTranspose_9_output_padding()
# test_Conv2DTranspose_9_10_11_12( |
7,338 | cache test | import concurrent.futures
import threading
import time
import cachetools
import pytest
import large_image.cache_util.cache
from large_image import config
from large_image.cache_util import (LruCacheMetaclass, MemCache, cachesClear,
cachesInfo, getTileCache, methodcache,
strhash)
class Fib:
def num(self, k):
if k > 2:
return self.num(k - 1) + self.num(k - 2)
else:
return 1
def METHOD_NAME(specific_cache, maxNum=100):
temp = Fib()
temp.num = cachetools.cached(cache=specific_cache, key=strhash)(temp.num)
temp.num(maxNum)
if maxNum >= 3:
assert temp.num(3) == 2
if maxNum >= 100:
assert temp.num(100) == 354224848179261915075
def testLRUCacheTools():
METHOD_NAME(cachetools.Cache(1000))
def testCacheMemcached():
METHOD_NAME(MemCache())
def testCheckCacheMemcached():
cache = MemCache()
METHOD_NAME(cache)
val = cache['(2,)']
assert val == 1
val = cache['(100,)']
assert val == 354224848179261915075
def testBadMemcachedUrl():
# go though and check if all 100 fib numbers are in cache
# it is stored in cache as ('fib', #)
cache = MemCache(url=['192.0.2.254', '192.0.2.253'])
METHOD_NAME(cache, 3)
with pytest.raises(KeyError):
cache['(2,)']
@pytest.mark.singular()
def testGetTileCachePython():
large_image.cache_util.cache._tileCache = None
large_image.cache_util.cache._tileLock = None
config.setConfig('cache_backend', 'python')
tileCache, tileLock = getTileCache()
assert isinstance(tileCache, cachetools.LRUCache)
@pytest.mark.singular()
def testGetTileCacheMemcached():
large_image.cache_util.cache._tileCache = None
large_image.cache_util.cache._tileLock = None
config.setConfig('cache_backend', 'memcached')
tileCache, tileLock = getTileCache()
assert isinstance(tileCache, MemCache)
class TestClass:
def testLRUThreadSafety(self):
# The cachetools LRU cache is not thread safe, and if two threads ask
# to evict an old value concurrently, the cache will raise a KeyError
# and then be in a broken state. Test that we fall-back garcefully in
# this case. Better, is to use a threading lock when setting the
# cache, which should never have the problem.
self.cache = cachetools.LRUCache(10)
self.cache_lock = None
loopSize = 10000
sumDelta = 2
def keyFunc(x):
return x
@methodcache(keyFunc)
def add(self, x):
return x + sumDelta
def loop():
sum = 0
for x in range(loopSize):
sum += add(self, x)
sums.append(sum)
# Without a thread lock
sums = []
threadList = [threading.Thread(target=loop) for t in range(5)]
for t in threadList:
t.start()
for t in threadList:
t.join()
for sum in sums:
assert sum == loopSize * (loopSize - 1) / 2 + loopSize * sumDelta
# With a thread lock
self.cache = cachetools.LRUCache(10)
self.cache_lock = threading.Lock()
sums = []
threadList = [threading.Thread(target=loop) for t in range(5)]
for t in threadList:
t.start()
for t in threadList:
t.join()
for sum in sums:
assert sum == loopSize * (loopSize - 1) / 2 + loopSize * sumDelta
class ExampleWithMetaclass(metaclass=LruCacheMetaclass):
cacheName = 'test'
cacheMaxSize = 4
def __init__(self, arg):
if isinstance(arg, (int, float)):
time.sleep(arg)
@pytest.mark.singular()
def testCachesInfo(self):
cachesClear()
large_image.cache_util.cache._tileCache = None
large_image.cache_util.cache._tileLock = None
assert cachesInfo()['test']['used'] == 0
assert 'tileCache' not in cachesInfo()
self.ExampleWithMetaclass('test')
assert cachesInfo()['test']['used'] == 1
config.setConfig('cache_backend', 'python')
getTileCache()
assert 'tileCache' in cachesInfo()
large_image.cache_util.cache._tileCache = None
large_image.cache_util.cache._tileLock = None
config.setConfig('cache_backend', 'memcached')
getTileCache()
# memcached shows an items record as well
assert 'items' in cachesInfo()['tileCache']
@pytest.mark.singular()
def testCachesKeyLock(self):
cachesClear()
assert cachesInfo()['test']['used'] == 0
starttime = time.time()
with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:
executor.map(self.ExampleWithMetaclass, [3, 3, 2])
endtime = time.time()
# This really should be close to 3
assert endtime - starttime < 6
assert cachesInfo()['test']['used'] == 2
@pytest.mark.singular()
def testCachesClear(self):
cachesClear()
large_image.cache_util.cache._tileCache = None
large_image.cache_util.cache._tileLock = None
config.setConfig('cache_backend', 'python')
self.ExampleWithMetaclass('test')
getTileCache()
assert cachesInfo()['test']['used'] == 1
cachesClear()
assert cachesInfo()['test']['used'] == 0 |
7,339 | test substitution with square bracket | """
Pygments regex lexer tests
~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import time
import pytest
from pygments.token import Keyword, Name, String, Text
from pygments.lexers.perl import PerlLexer
@pytest.fixture(scope='module')
def lexer():
yield PerlLexer()
# Test runaway regexes.
# A previous version of the Perl lexer would spend a great deal of
# time backtracking when given particular strings. These tests show that
# the runaway backtracking doesn't happen any more (at least for the given
# cases).
# Test helpers.
def assert_single_token(lexer, s, token):
"""Show that a given string generates only one token."""
tokens = list(lexer.get_tokens_unprocessed(s))
assert len(tokens) == 1
assert s == tokens[0][2]
assert token == tokens[0][1]
def assert_tokens(lexer, strings, expected_tokens):
"""Show that a given string generates the expected tokens."""
tokens = list(lexer.get_tokens_unprocessed(''.join(strings)))
parsed_strings = [t[2] for t in tokens]
assert parsed_strings == strings
parsed_tokens = [t[1] for t in tokens]
assert parsed_tokens == expected_tokens
def assert_fast_tokenization(lexer, s):
"""Show that a given string is tokenized quickly."""
start = time.time()
tokens = list(lexer.get_tokens_unprocessed(s))
end = time.time()
# Isn't 10 seconds kind of a long time? Yes, but we don't want false
# positives when the tests are starved for CPU time.
if end-start > 10:
pytest.fail('tokenization took too long')
return tokens
# Strings.
def test_single_quote_strings(lexer):
assert_single_token(lexer, r"'foo\tbar\\\'baz'", String)
assert_fast_tokenization(lexer, "'" + '\\'*999)
def test_double_quote_strings(lexer):
assert_single_token(lexer, r'"foo\tbar\\\"baz"', String)
assert_fast_tokenization(lexer, '"' + '\\'*999)
def test_backtick_strings(lexer):
assert_single_token(lexer, r'`foo\tbar\\\`baz`', String.Backtick)
assert_fast_tokenization(lexer, '`' + '\\'*999)
# Regex matches with various delimiters.
def test_match(lexer):
assert_single_token(lexer, r'/aa\tbb/', String.Regex)
assert_fast_tokenization(lexer, '/' + '\\'*999)
def test_match_with_slash(lexer):
assert_tokens(lexer, ['m', '/\n\\t\\\\/'], [String.Regex, String.Regex])
assert_fast_tokenization(lexer, 'm/xxx\n' + '\\'*999)
def test_match_with_bang(lexer):
assert_tokens(lexer, ['m', r'!aa\t\!bb!'], [String.Regex, String.Regex])
assert_fast_tokenization(lexer, 'm!' + '\\'*999)
def test_match_with_brace(lexer):
assert_tokens(lexer, ['m', r'{aa\t\}bb}'], [String.Regex, String.Regex])
assert_fast_tokenization(lexer, 'm{' + '\\'*999)
def test_match_with_angle_brackets(lexer):
assert_tokens(lexer, ['m', r'<aa\t\>bb>'], [String.Regex, String.Regex])
assert_fast_tokenization(lexer, 'm<' + '\\'*999)
def test_match_with_parenthesis(lexer):
assert_tokens(lexer, ['m', r'(aa\t\)bb)'], [String.Regex, String.Regex])
assert_fast_tokenization(lexer, 'm(' + '\\'*999)
def test_match_with_at_sign(lexer):
assert_tokens(lexer, ['m', r'@aa\t\@bb@'], [String.Regex, String.Regex])
assert_fast_tokenization(lexer, 'm@' + '\\'*999)
def test_match_with_percent_sign(lexer):
assert_tokens(lexer, ['m', r'%aa\t\%bb%'], [String.Regex, String.Regex])
assert_fast_tokenization(lexer, 'm%' + '\\'*999)
def test_match_with_dollar_sign(lexer):
assert_tokens(lexer, ['m', r'$aa\t\$bb$'], [String.Regex, String.Regex])
assert_fast_tokenization(lexer, 'm$' + '\\'*999)
# Regex substitutions with various delimeters.
def test_substitution_with_slash(lexer):
assert_single_token(lexer, 's/aaa/bbb/g', String.Regex)
assert_fast_tokenization(lexer, 's/foo/' + '\\'*999)
def test_substitution_with_at_sign(lexer):
assert_single_token(lexer, r's@aaa@bbb@g', String.Regex)
assert_fast_tokenization(lexer, 's@foo@' + '\\'*999)
def test_substitution_with_percent_sign(lexer):
assert_single_token(lexer, r's%aaa%bbb%g', String.Regex)
assert_fast_tokenization(lexer, 's%foo%' + '\\'*999)
def test_substitution_with_brace(lexer):
assert_single_token(lexer, r's{aaa}', String.Regex)
assert_fast_tokenization(lexer, 's{' + '\\'*999)
def test_substitution_with_angle_bracket(lexer):
assert_single_token(lexer, r's<aaa>', String.Regex)
assert_fast_tokenization(lexer, 's<' + '\\'*999)
def METHOD_NAME(lexer):
assert_single_token(lexer, r's[aaa]', String.Regex)
assert_fast_tokenization(lexer, 's[' + '\\'*999)
def test_substitution_with_parenthesis(lexer):
assert_single_token(lexer, r's(aaa)', String.Regex)
assert_fast_tokenization(lexer, 's(' + '\\'*999)
# Namespaces/modules
def test_package_statement(lexer):
assert_tokens(lexer, ['package', ' ', 'Foo'],
[Keyword, Text.Whitespace, Name.Namespace])
assert_tokens(lexer, ['package', ' ', 'Foo::Bar'],
[Keyword, Text.Whitespace, Name.Namespace])
def test_use_statement(lexer):
assert_tokens(lexer, ['use', ' ', 'Foo'],
[Keyword, Text.Whitespace, Name.Namespace])
assert_tokens(lexer, ['use', ' ', 'Foo::Bar'],
[Keyword, Text.Whitespace, Name.Namespace])
def test_no_statement(lexer):
assert_tokens(lexer, ['no', ' ', 'Foo'],
[Keyword, Text.Whitespace, Name.Namespace])
assert_tokens(lexer, ['no', ' ', 'Foo::Bar'],
[Keyword, Text.Whitespace, Name.Namespace])
def test_require_statement(lexer):
assert_tokens(lexer, ['require', ' ', 'Foo'],
[Keyword, Text.Whitespace, Name.Namespace])
assert_tokens(lexer, ['require', ' ', 'Foo::Bar'],
[Keyword, Text.Whitespace, Name.Namespace])
assert_tokens(lexer, ['require', ' ', '"Foo/Bar.pm"'],
[Keyword, Text.Whitespace, String]) |
7,340 | roles by doctype | import requests
import frappe
STANDARD_ROLES = [
# standard roles
"Administrator",
"All",
"Guest",
# accounts
"Accounts Manager",
"Accounts User",
# projects
"Projects User",
"Projects Manager",
# framework
"Blogger",
"Dashboard Manager",
"Inbox User",
"Newsletter Manager",
"Prepared Report User",
"Report Manager",
"Script Manager",
"System Manager",
"Website Manager",
"Workspace Manager",
]
@frappe.whitelist(allow_guest=True)
def get_add_on_details(plan: str) -> dict[str, int]:
"""
Returns the number of employees to be billed under add-ons for SAAS subscription
site_details = {
"country": "India",
"plan": "Basic",
"credit_balance": 1000,
"add_ons": {
"employee": 2,
},
"expiry_date": "2021-01-01", # as per current usage
}
"""
EMPLOYEE_LIMITS = {"Basic": 25, "Essential": 50, "Professional": 100}
add_on_details = {}
employees_included_in_plan = EMPLOYEE_LIMITS.get(plan)
if employees_included_in_plan:
active_employees = get_active_employees()
add_on_employees = (
active_employees - employees_included_in_plan
if active_employees > employees_included_in_plan
else 0
)
else:
add_on_employees = 0
add_on_details["employees"] = add_on_employees
return add_on_details
def get_active_employees() -> int:
return frappe.db.count("Employee", {"status": "Active"})
@frappe.whitelist(allow_guest=True)
def subscription_updated(app: str, plan: str):
if app in ["hrms", "erpnext"] and plan:
update_erpnext_access()
def update_erpnext_access():
"""
ignores if user has no hrms subscription
enables erpnext workspaces and roles if user has subscribed to hrms and erpnext
disables erpnext workspaces and roles if user has subscribed to hrms but not erpnext
"""
if not frappe.utils.get_url().endswith(".frappehr.com"):
return
update_erpnext_workspaces(True)
update_erpnext_roles(True)
def update_erpnext_workspaces(disable: bool = True):
erpnext_workspaces = [
"Home",
"Assets",
"Accounting",
"Buying",
"CRM",
"ERPNext Integrations",
"ERPNext Settings",
"Manufacturing",
"Quality",
"Selling",
"Stock",
"Support",
]
for workspace in erpnext_workspaces:
try:
workspace_doc = frappe.get_doc("Workspace", workspace)
workspace_doc.flags.ignore_links = True
workspace_doc.flags.ignore_validate = True
workspace_doc.public = 0 if disable else 1
workspace_doc.save()
except Exception:
pass
def update_erpnext_roles(disable: bool = True):
roles = get_erpnext_roles()
for role in roles:
try:
role_doc = frappe.get_doc("Role", role)
role_doc.disabled = disable
role_doc.flags.ignore_links = True
role_doc.save()
except Exception:
pass
def get_erpnext_roles() -> set:
erpnext_roles = get_roles_for_app("erpnext")
hrms_roles = get_roles_for_app("hrms")
return erpnext_roles - hrms_roles - set(STANDARD_ROLES)
def get_roles_for_app(app_name: str) -> set:
erpnext_modules = get_modules_by_app(app_name)
doctypes = get_doctypes_by_modules(erpnext_modules)
roles = METHOD_NAME(doctypes)
return roles
def get_modules_by_app(app_name: str) -> list:
return frappe.db.get_all("Module Def", filters={"app_name": app_name}, pluck="name")
def get_doctypes_by_modules(modules: list) -> list:
return frappe.db.get_all("DocType", filters={"module": ("in", modules)}, pluck="name")
def METHOD_NAME(doctypes: list) -> set:
roles = []
for d in doctypes:
permissions = frappe.get_meta(d).permissions
for d in permissions:
roles.append(d.role)
return set(roles)
def hide_erpnext() -> bool:
hr_subscription = has_subscription(frappe.conf.sk_hrms)
erpnext_subscription = has_subscription(frappe.conf.sk_erpnext_smb or frappe.conf.sk_erpnext)
if not hr_subscription:
return False
if hr_subscription and erpnext_subscription:
# subscribed for ERPNext
return False
# no subscription for ERPNext
return True
def has_subscription(secret_key) -> bool:
url = f"https://frappecloud.com/api/method/press.api.developer.marketplace.get_subscription_status?secret_key={secret_key}"
response = requests.request(method="POST", url=url, timeout=5)
status = response.json().get("message")
return True if status == "Active" else False |
7,341 | eq flow vol permeate | #################################################################################
# WaterTAP Copyright (c) 2020-2023, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory, Oak Ridge National Laboratory,
# National Renewable Energy Laboratory, and National Energy Technology
# Laboratory (subject to receipt of any required approvals from the U.S. Dept.
# of Energy). All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license
# information, respectively. These files are also available online at the URL
# "https://github.com/watertap-org/watertap/"
#################################################################################
# Import Pyomo libraries
from pyomo.environ import (
Var,
NonNegativeReals,
value,
)
from idaes.core import declare_process_block_class
from idaes.core.util import scaling as iscale
from idaes.core.util.misc import add_object_reference
from watertap.core import ( # noqa # pylint: disable=unused-import
ConcentrationPolarizationType,
MembraneChannel0DBlock,
MassTransferCoefficient,
PressureChangeType,
)
from watertap.core.membrane_channel0d import CONFIG_Template
from watertap.unit_models.reverse_osmosis_base import (
ReverseOsmosisBaseData,
_add_has_full_reporting,
)
__author__ = "Tim Bartholomew, Adam Atia"
@declare_process_block_class("ReverseOsmosis0D")
class ReverseOsmosisData(ReverseOsmosisBaseData):
"""
Standard RO Unit Model Class:
- zero dimensional model
- steady state only
- single liquid phase only
"""
CONFIG = CONFIG_Template()
_add_has_full_reporting(CONFIG)
def _add_feed_side_membrane_channel_and_geometry(self):
# Build membrane channel control volume
self.feed_side = MembraneChannel0DBlock(
dynamic=False,
has_holdup=False,
property_package=self.config.property_package,
property_package_args=self.config.property_package_args,
)
if (self.config.pressure_change_type != PressureChangeType.fixed_per_stage) or (
self.config.mass_transfer_coefficient == MassTransferCoefficient.calculated
):
self._add_length_and_width()
self.feed_side.add_geometry(length_var=self.length, width_var=self.width)
self._add_area(include_constraint=True)
else:
self.feed_side.add_geometry(length_var=None, width_var=None)
self._add_area(include_constraint=False)
def _add_deltaP(self):
add_object_reference(self, "deltaP", self.feed_side.deltaP)
def _add_mass_transfer(self):
units_meta = self.config.property_package.get_metadata().get_derived_units
# not in 1DRO
@self.Constraint(
self.flowsheet().config.time, self.length_domain, doc="Permeate flowrate"
)
def METHOD_NAME(b, t, x):
return (
b.permeate_side[t, x].flow_vol_phase["Liq"]
== b.mixed_permeate[t].flow_vol_phase["Liq"]
)
@self.Expression(self.flowsheet().config.time, doc="Over pressure ratio")
def over_pressure_ratio(b, t):
return (
b.feed_side.properties_out[t].pressure_osm_phase["Liq"]
- b.permeate_side[t, 1.0].pressure_osm_phase["Liq"]
) / b.feed_side.properties_out[t].pressure
# mass transfer
def mass_transfer_phase_comp_initialize(b, t, p, j):
return value(
self.feed_side.properties_in[t].get_material_flow_terms("Liq", j)
* self.recovery_mass_phase_comp[t, "Liq", j]
)
self.mass_transfer_phase_comp = Var(
self.flowsheet().config.time,
self.config.property_package.phase_list,
self.config.property_package.component_list,
initialize=mass_transfer_phase_comp_initialize,
bounds=(0.0, 1e6),
domain=NonNegativeReals,
units=units_meta("mass") * units_meta("time") ** -1,
doc="Mass transfer to permeate",
)
@self.Constraint(
self.flowsheet().config.time,
self.config.property_package.phase_list,
self.config.property_package.component_list,
doc="Mass transfer term",
)
def eq_mass_transfer_term(self, t, p, j):
return (
self.mass_transfer_phase_comp[t, p, j]
== -self.feed_side.mass_transfer_term[t, p, j]
)
# Feed and permeate-side connection
@self.Constraint(
self.flowsheet().config.time,
self.config.property_package.phase_list,
self.config.property_package.component_list,
doc="Mass transfer from feed to permeate",
)
def eq_connect_mass_transfer(b, t, p, j):
return (
b.mixed_permeate[t].get_material_flow_terms(p, j)
== -b.feed_side.mass_transfer_term[t, p, j]
)
# Different expression in 1DRO
@self.Constraint(
self.flowsheet().config.time,
self.config.property_package.phase_list,
self.config.property_package.component_list,
doc="Permeate production",
)
def eq_permeate_production(b, t, p, j):
return (
b.mixed_permeate[t].get_material_flow_terms(p, j)
== b.area * b.flux_mass_phase_comp_avg[t, p, j]
)
# Not in 1DRO
@self.Constraint(
self.flowsheet().config.time,
self.length_domain,
self.config.property_package.solute_set,
doc="Permeate mass fraction",
)
def eq_mass_frac_permeate(b, t, x, j):
return (
b.permeate_side[t, x].mass_frac_phase_comp["Liq", j]
* sum(
self.flux_mass_phase_comp[t, x, "Liq", jj]
for jj in self.config.property_package.component_list
)
== self.flux_mass_phase_comp[t, x, "Liq", j]
)
def calculate_scaling_factors(self):
if iscale.get_scaling_factor(self.dens_solvent) is None:
sf = iscale.get_scaling_factor(
self.feed_side.properties_in[0].dens_mass_phase["Liq"]
)
iscale.set_scaling_factor(self.dens_solvent, sf)
super().calculate_scaling_factors()
for (t, p, j), v in self.mass_transfer_phase_comp.items():
sf = iscale.get_scaling_factor(
self.feed_side.properties_in[t].get_material_flow_terms(p, j)
)
if iscale.get_scaling_factor(v) is None:
iscale.set_scaling_factor(v, sf)
v = self.feed_side.mass_transfer_term[t, p, j]
if iscale.get_scaling_factor(v) is None:
iscale.set_scaling_factor(v, sf)
if hasattr(self, "length"):
if iscale.get_scaling_factor(self.length) is None:
iscale.set_scaling_factor(self.length, 1)
if hasattr(self, "width"):
if iscale.get_scaling_factor(self.width) is None:
iscale.set_scaling_factor(self.width, 1) |
7,342 | get cluster config | import time
from .base import AugerBaseApi
from .cluster import AugerClusterApi
from ..exceptions import AugerException
class AugerProjectApi(AugerBaseApi):
"""Auger Project API."""
def __init__(self, ctx, org_api,
project_name=None, project_id=None):
super(AugerProjectApi, self).__init__(
ctx, org_api, project_name, project_id)
assert org_api is not None, 'Organization must be set for Project'
self._set_api_request_path('AugerProjectApi')
def is_running(self):
return self.properties().get('status') == 'running'
def create(self):
return self._call_create({
'name': self.object_name, 'organization_id': self.parent_api.oid})
def start(self):
project_properties = self.properties()
if project_properties.get('status') != 'running':
self.ctx.log('Starting Project...')
self._do_start(project_properties)
self.ctx.log('Started Project %s' % self.object_name)
# else:
# self.ctx.log('Project is already running...')
self._update_cluster_settings()
def METHOD_NAME(self, local_config = True):
if local_config:
return AugerClusterApi.get_cluster_settings(self.ctx)
else:
project_properties = self.properties()
result = {
'worker_type_id': project_properties.get('worker_type_id'),
'workers_count': project_properties.get('workers_count')
}
if 'kubernetes_stack' in project_properties:
result['kubernetes_stack'] = project_properties.get('kubernetes_stack')
return result
def update_cluster_config(self, params):
remote_cluster = self.METHOD_NAME(local_config=False)
update_properties = {}
props_to_update = ['worker_type_id', 'workers_count', 'kubernetes_stack']
for prop in props_to_update:
if remote_cluster.get(prop, params.get(prop)) != params.get(prop):
update_properties[prop] = params.get(prop)
if update_properties:
self.ctx.log('Update project cluster: %s' % update_properties)
update_properties['id'] = self.object_id
self._call_update(update_properties, progress=['undeployed', 'deployed', 'scaling', 'zero_scaled', 'deploying'])
return True
def _update_cluster_settings(self):
local_cluster = self.METHOD_NAME(local_config=True)
self.update_cluster_config(local_cluster)
def _do_start(self, project_properties):
self._ensure_object_id()
status = project_properties.get('status')
project_status = ['deployed', 'deploying']
if status in project_status:
return self.wait_for_status(project_status)
cluster_id = project_properties.get('cluster_id')
cluster_api = AugerClusterApi(self.ctx, self, cluster_id)
cluster_settings = cluster_api.get_cluster_settings(self.ctx)
# self.rest_api.call('update_project', {
# 'id': self.object_id,
# 'cluster_autoterminate_minutes':
# cluster_settings.get('autoterminate_minutes')})
try:
self.rest_api.call('deploy_project', {
'id': self.object_id,
'worker_type_id': cluster_settings.get('worker_type_id'),
'workers_count' : cluster_settings.get('workers_count'),
'kubernetes_stack': cluster_settings.get('kubernetes_stack')})
except:
project_properties = self.properties()
status = project_properties.get('status')
project_status = ['deployed', 'deploying', 'running']
if not status in project_status:
raise
result = self.wait_for_status(
['undeployed', 'deployed', 'scaling', 'zero_scaled', 'deploying'])
time.sleep(20)
return result
def stop(self):
if self.status() != 'undeployed':
self.rest_api.call(
'undeploy_project', {'id': self.object_id})
return self.wait_for_status(['running', 'undeploying']) |
7,343 | list parentless stacks with tag | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from botocore.exceptions import ClientError
from pcluster.aws.aws_resources import StackInfo
from pcluster.aws.common import AWSClientError, AWSExceptionHandler, Boto3Client, StackNotFoundError
from pcluster.constants import PCLUSTER_IMAGE_ID_TAG, PCLUSTER_VERSION_TAG
from pcluster.utils import remove_none_values
LOGGER = logging.getLogger(__name__)
class CfnClient(Boto3Client):
"""Implement CFN Boto3 client."""
def __init__(self):
super().__init__("cloudformation")
@AWSExceptionHandler.handle_client_exception
def create_stack(
self, stack_name: str, disable_rollback: bool, tags: list, template_body: str, parameters: list = None
):
"""Create CFN stack by using the given template."""
optional_args = {
"Tags": tags,
"Parameters": parameters,
}
optional_args_with_value = remove_none_values(optional_args)
return self._client.create_stack(
StackName=stack_name,
TemplateBody=template_body,
Capabilities=["CAPABILITY_IAM", "CAPABILITY_NAMED_IAM"],
DisableRollback=disable_rollback,
**optional_args_with_value,
)
@AWSExceptionHandler.handle_client_exception
def create_stack_from_url(
self,
stack_name: str,
disable_rollback: bool,
tags: list,
template_url: str,
capabilities: str = "CAPABILITY_IAM",
parameters: list = None,
):
"""Create CFN stack by using the given template url."""
optional_args = {
"Tags": tags,
"Parameters": parameters,
}
optional_args_with_value = remove_none_values(optional_args)
return self._client.create_stack(
StackName=stack_name,
TemplateURL=template_url,
Capabilities=[capabilities, "CAPABILITY_NAMED_IAM"],
DisableRollback=disable_rollback,
**optional_args_with_value,
)
@AWSExceptionHandler.handle_client_exception
def delete_stack(self, stack_name: str):
"""Delete CFN stack."""
return self._client.delete_stack(StackName=stack_name)
@AWSExceptionHandler.handle_client_exception
def update_stack(self, stack_name: str, updated_template: str, params: list):
"""Update CFN stack."""
return self._client.update_stack(
StackName=stack_name,
TemplateBody=json.dumps(updated_template, indent=2), # Indent so it looks nice in the console
Parameters=params,
Capabilities=["CAPABILITY_IAM", "CAPABILITY_NAMED_IAM"],
)
@AWSExceptionHandler.handle_client_exception
def update_stack_from_url(self, stack_name: str, template_url: str, tags: list = None, parameters: list = None):
"""Update CFN stack by using the given template url."""
optional_args = {
"Tags": tags,
"Parameters": parameters,
}
optional_args_with_value = remove_none_values(optional_args)
return self._client.update_stack(
StackName=stack_name,
TemplateURL=template_url,
Capabilities=["CAPABILITY_IAM", "CAPABILITY_NAMED_IAM"],
**optional_args_with_value,
)
@AWSExceptionHandler.handle_client_exception
@AWSExceptionHandler.retry_on_boto3_throttling
def describe_stack(self, stack_name: str):
"""Get information for the given stack."""
try:
return self._client.describe_stacks(StackName=stack_name).get("Stacks")[0]
except ClientError as e:
if e.response["Error"]["Code"] == AWSClientError.ErrorCode.VALIDATION_ERROR.value:
LOGGER.info("Could not describe CloudFormation stack %s: %s", stack_name, e)
raise StackNotFoundError(function_name="describe_stack", stack_name=stack_name)
raise
@AWSExceptionHandler.handle_client_exception
@AWSExceptionHandler.retry_on_boto3_throttling
def get_stack_events(self, stack_name, next_token=None):
"""Return the events of a stack, start from next_token if provided."""
if next_token:
return self._client.describe_stack_events(StackName=stack_name, NextToken=next_token)
else:
return self._client.describe_stack_events(StackName=stack_name)
@staticmethod
def format_event(event):
"""Format CFN Stack event."""
return "{} {} {} {} {}".format(
event.get("Timestamp").isoformat(timespec="seconds"),
event.get("ResourceStatus"),
event.get("ResourceType"),
event.get("LogicalResourceId"),
event.get("ResourceStatusReason", ""),
)
def stack_exists(self, stack_name: str):
"""Return a boolean describing whether or not a stack by the given name exists."""
try:
self.describe_stack(stack_name)
return True
except StackNotFoundError:
return False
@AWSExceptionHandler.handle_client_exception
def get_stack_template(self, stack_name: str):
"""Get stack template."""
return self._client.get_template(StackName=stack_name).get("TemplateBody")
@AWSExceptionHandler.handle_client_exception
def list_pcluster_stacks(self, next_token=None):
"""List existing pcluster cluster stacks."""
stacks, result_token = self.METHOD_NAME(PCLUSTER_VERSION_TAG, next_token)
# Only return stacks without image-id tag, which means they are cluster stacks.
return [stack for stack in stacks if StackInfo(stack).get_tag(PCLUSTER_IMAGE_ID_TAG) is None], result_token
def describe_stack_resource(self, stack_name: str, logic_resource_id: str):
"""Get stack resource information."""
try:
return self._client.describe_stack_resource(StackName=stack_name, LogicalResourceId=logic_resource_id)
except Exception:
raise AWSClientError(
function_name="describe_stack_resource", message=f"No resource {logic_resource_id} found."
)
@AWSExceptionHandler.handle_client_exception
def describe_stack_resources(self, stack_name: str):
"""Get stack resources information."""
response = self._client.describe_stack_resources(StackName=stack_name).get("StackResources")
return {resource["LogicalResourceId"]: resource for resource in response} # Build dictionary for better query.
@AWSExceptionHandler.handle_client_exception
def get_imagebuilder_stacks(self, next_token=None):
"""List existing imagebuilder stacks."""
return self.METHOD_NAME(PCLUSTER_IMAGE_ID_TAG, next_token)
def METHOD_NAME(self, tag, next_token=None):
describe_stacks_kwargs = {}
if next_token:
describe_stacks_kwargs["NextToken"] = next_token
result = self._client.describe_stacks(**describe_stacks_kwargs)
stack_list = []
for stack in result.get("Stacks", []):
if stack.get("ParentId") is None and StackInfo(stack).get_tag(tag):
stack_list.append(stack)
return stack_list, result.get("NextToken") |
7,344 | test positional only | import unittest
from enum import Enum
from robot.running.arguments.argumentspec import ArgumentSpec, ArgInfo
from robot.utils.asserts import assert_equal
class TestStringRepr(unittest.TestCase):
def test_empty(self):
self._verify('')
def test_normal(self):
self._verify('a, b', ['a', 'b'])
def test_non_ascii_names(self):
self._verify('nön, äscii', ['nön', 'äscii'])
def test_default(self):
self._verify('a, b=c', ['a', 'b'], defaults={'b': 'c'})
self._verify('nön=äscii', ['nön'], defaults={'nön': 'äscii'})
self._verify('i=42', ['i'], defaults={'i': 42})
def test_default_as_bytes(self):
self._verify('b=ytes', ['b'], defaults={'b': b'ytes'})
self._verify('ä=\\xe4', ['ä'], defaults={'ä': b'\xe4'})
def test_type_as_class(self):
self._verify('a: int, b: bool', ['a', 'b'], types={'a': int, 'b': bool})
def test_type_as_string(self):
self._verify('a: Integer, b: Boolean', ['a', 'b'],
types={'a': 'Integer', 'b': 'Boolean'})
def test_type_and_default(self):
self._verify('arg: int = 1', ['arg'], types=[int], defaults={'arg': 1})
def METHOD_NAME(self):
self._verify('a, /', positional_only=['a'])
self._verify('a, /, b', positional_only=['a'], positional_or_named=['b'])
def test_positional_only_with_default(self):
self._verify('a, b=2, /', positional_only=['a', 'b'], defaults={'b': 2})
def test_positional_only_with_type(self):
self._verify('a: int, b, /', positional_only=['a', 'b'], types=[int])
self._verify('a: int, b: float, /, c: bool, d',
positional_only=['a', 'b'],
positional_or_named=['c', 'd'],
types=[int, float, bool])
def test_positional_only_with_type_and_default(self):
self._verify('a: int = 1, b=2, /',
positional_only=['a', 'b'],
types={'a': int},
defaults={'a': 1, 'b': 2})
def test_varargs(self):
self._verify('*varargs',
var_positional='varargs')
self._verify('a, *b',
positional_or_named=['a'],
var_positional='b')
def test_varargs_with_type(self):
self._verify('*varargs: float',
var_positional='varargs',
types={'varargs': float})
self._verify('a: int, *b: list[int]',
positional_or_named=['a'],
var_positional='b',
types=[int, 'list[int]'])
def test_named_only_without_varargs(self):
self._verify('*, kwo',
named_only=['kwo'])
def test_named_only_with_varargs(self):
self._verify('*varargs, k1, k2',
var_positional='varargs',
named_only=['k1', 'k2'])
def test_named_only_with_default(self):
self._verify('*, k=1, w, o=3',
named_only=['k', 'w', 'o'],
defaults={'k': 1, 'o': 3})
def test_named_only_with_types(self):
self._verify('*, k: int, w: float, o',
named_only=['k', 'w', 'o'],
types=[int, float])
self._verify('x: int, *y: float, z: bool',
positional_or_named=['x'],
var_positional='y',
named_only=['z'],
types=[int, float, bool])
def test_named_only_with_types_and_defaults(self):
self._verify('x: int = 1, *, y: float, z: bool = 3',
positional_or_named=['x'],
named_only=['y', 'z'],
types=[int, float, bool],
defaults={'x': 1, 'z': 3})
def test_kwargs(self):
self._verify('**kws',
var_named='kws')
self._verify('a, b=c, *d, e=f, g, **h',
positional_or_named=['a', 'b'],
var_positional='d',
named_only=['e', 'g'],
var_named='h',
defaults={'b': 'c', 'e': 'f'})
def test_kwargs_with_types(self):
self._verify('**kws: dict[str, int]',
var_named='kws',
types={'kws': 'dict[str, int]'})
self._verify('a: int, /, b: float, *c: list[int], d: bool, **e: dict[int, str]',
positional_only=['a'],
positional_or_named=['b'],
var_positional='c',
named_only=['d'],
var_named='e',
types=[int, float, 'list[int]', bool, 'dict[int, str]'])
def test_enum_with_few_members(self):
class Small(Enum):
ONLY_FEW_MEMBERS = 1
SO_THEY_CAN = 2
BE_PRETTY_LONG = 3
self._verify('e: Small',
['e'], types=[Small])
def test_enum_with_many_short_members(self):
class ManyShort(Enum):
ONE = 1
TWO = 2
THREE = 3
FOUR = 4
FIVE = 5
SIX = 6
self._verify('e: ManyShort',
['e'], types=[ManyShort])
def test_enum_with_many_long_members(self):
class Big(Enum):
MANY_MEMBERS = 1
THAT_ARE_LONGISH = 2
MEANS_THEY_ALL_DO_NOT_FIT = 3
AND_SOME_ARE_OMITTED = 4
FROM_THE_END = 5
self._verify('e: Big',
['e'], types=[Big])
def _verify(self, expected, positional_or_named=None, **config):
spec = ArgumentSpec(positional_or_named=positional_or_named, **config)
assert_equal(str(spec), expected)
assert_equal(bool(spec), bool(expected))
class TestArgInfo(unittest.TestCase):
def test_required_without_default(self):
for kind in (ArgInfo.POSITIONAL_ONLY,
ArgInfo.POSITIONAL_OR_NAMED,
ArgInfo.NAMED_ONLY):
assert_equal(ArgInfo(kind).required, True)
assert_equal(ArgInfo(kind, default=None).required, False)
def test_never_required(self):
for kind in (ArgInfo.VAR_POSITIONAL,
ArgInfo.VAR_NAMED,
ArgInfo.POSITIONAL_ONLY_MARKER,
ArgInfo.NAMED_ONLY_MARKER):
assert_equal(ArgInfo(kind).required, False)
if __name__ == '__main__':
unittest.main() |
7,345 | id | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetRouteFilterResult',
'AwaitableGetRouteFilterResult',
'get_route_filter',
'get_route_filter_output',
]
@pulumi.output_type
class GetRouteFilterResult:
"""
Route Filter Resource.
"""
def __init__(__self__, etag=None, METHOD_NAME=None, ipv6_peerings=None, location=None, name=None, peerings=None, provisioning_state=None, rules=None, tags=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if ipv6_peerings and not isinstance(ipv6_peerings, list):
raise TypeError("Expected argument 'ipv6_peerings' to be a list")
pulumi.set(__self__, "ipv6_peerings", ipv6_peerings)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if peerings and not isinstance(peerings, list):
raise TypeError("Expected argument 'peerings' to be a list")
pulumi.set(__self__, "peerings", peerings)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if rules and not isinstance(rules, list):
raise TypeError("Expected argument 'rules' to be a list")
pulumi.set(__self__, "rules", rules)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def METHOD_NAME(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ipv6Peerings")
def ipv6_peerings(self) -> Sequence['outputs.ExpressRouteCircuitPeeringResponse']:
"""
A collection of references to express route circuit ipv6 peerings.
"""
return pulumi.get(self, "ipv6_peerings")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def peerings(self) -> Sequence['outputs.ExpressRouteCircuitPeeringResponse']:
"""
A collection of references to express route circuit peerings.
"""
return pulumi.get(self, "peerings")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the route filter resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def rules(self) -> Optional[Sequence['outputs.RouteFilterRuleResponse']]:
"""
Collection of RouteFilterRules contained within a route filter.
"""
return pulumi.get(self, "rules")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetRouteFilterResult(GetRouteFilterResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRouteFilterResult(
etag=self.etag,
METHOD_NAME=self.METHOD_NAME,
ipv6_peerings=self.ipv6_peerings,
location=self.location,
name=self.name,
peerings=self.peerings,
provisioning_state=self.provisioning_state,
rules=self.rules,
tags=self.tags,
type=self.type)
def get_route_filter(expand: Optional[str] = None,
resource_group_name: Optional[str] = None,
route_filter_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRouteFilterResult:
"""
Gets the specified route filter.
Azure REST API version: 2023-02-01.
:param str expand: Expands referenced express route bgp peering resources.
:param str resource_group_name: The name of the resource group.
:param str route_filter_name: The name of the route filter.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['resourceGroupName'] = resource_group_name
__args__['routeFilterName'] = route_filter_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:network:getRouteFilter', __args__, opts=opts, typ=GetRouteFilterResult).value
return AwaitableGetRouteFilterResult(
etag=pulumi.get(__ret__, 'etag'),
METHOD_NAME=pulumi.get(__ret__, 'id'),
ipv6_peerings=pulumi.get(__ret__, 'ipv6_peerings'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
peerings=pulumi.get(__ret__, 'peerings'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
rules=pulumi.get(__ret__, 'rules'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_route_filter)
def get_route_filter_output(expand: Optional[pulumi.Input[Optional[str]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_filter_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetRouteFilterResult]:
"""
Gets the specified route filter.
Azure REST API version: 2023-02-01.
:param str expand: Expands referenced express route bgp peering resources.
:param str resource_group_name: The name of the resource group.
:param str route_filter_name: The name of the route filter.
"""
... |
7,346 | get config item | import json
import logging
import os
log = logging.getLogger(__name__)
# Define sentinel object NULL constant
NULL = object()
# pylint: disable=too-many-instance-attributes
# reviewed and disabled (jdh)
class Configs:
"""Configuration class to handle config file, args, env tasks.
Order of precedence (ordoprec) cli > envvars > config > defaults
"""
def __init__(self, args, fqpath=None, config=None):
self._args = args
log.debug(self._args)
self._fqpath = fqpath
# define args using sentinel (not None)
self._service_url = NULL
self._payload_dir = NULL
self._merge_launches = NULL
self._simple_xml = NULL
self._debug = NULL
self._config = {}
log.debug("Configs.init(): reading %s", self._fqpath)
if config is not None:
self._config = config
else:
self._config = self._read_fqpath(self.fqpath)
log.info("Configs.init(): CONFIG: %s", self._config)
log.debug("Configs.init(): CONFIG: %s", self._config)
# PROPERTIES
# using these to handle order of presidence of config, args, etc.
@property
def args(self):
"""args - the args object from CLI or REST API"""
return self._args
@property
def fqpath(self):
"""fqpath - config file fully qualified path"""
if self._fqpath is None:
self._fqpath = self.args.get("config_file", None)
return self._fqpath
@property
def config(self):
"""Return the config object"""
return self._config
@config.setter
def config(self, config):
self._config = config
@property
def service_config(self):
"""The RP PreProc section of the config file"""
return self._config.get("rp_preproc", None)
@property
def rp_config(self):
"""The RP PreProc section of the config file"""
if self._config is not None:
return self._config.get("reportportal", None)
return None
@property
def payload_dir(self):
"""Directory containing the payload files"""
if self._payload_dir is NULL:
self._payload_dir = self.METHOD_NAME(
"payload_dir", config=self.service_config
)
return self._payload_dir
@payload_dir.setter
def payload_dir(self, payload_dir):
self._payload_dir = payload_dir
@property
def service_url(self):
"""use_service - send to service or use local client"""
if self._service_url is NULL:
self._service_url = self.METHOD_NAME(
"service_url", config=self.service_config
)
return self._service_url
@property
def simple_xml(self):
"""Use simple xml import into ReportPortal instead of processing"""
if self._simple_xml is NULL:
self._simple_xml = self.METHOD_NAME("simple_xml", config=self.rp_config)
return self._simple_xml
@property
def merge_launches(self):
"""Config merge launches after import"""
if self._merge_launches is NULL:
self._merge_launches = self.METHOD_NAME(
"merge_launches", config=self.rp_config
)
return self._merge_launches
# PRIVATE METHODS
def _read_fqpath(self, fqpath=None):
"""Read a json formatted file into a dictionary"""
log.debug("Configs._read_fqpath(): CONFIG FQPATH: %s", fqpath)
if fqpath is not None:
self._fqpath = fqpath
configfd = open(self._fqpath, "r")
if configfd:
self._config = json.load(configfd)
return self._config
return None
def METHOD_NAME(self, config_item, config=None, ordoprec=None, default=None):
"""Order of precedence helper for configs"""
# cli > envvars > config > defaults
if config is None:
config = self.config
if ordoprec is None:
ordoprec = ["config", "env", "cli"]
ordoprec_dict = dict()
# cli
ordoprec_dict["cli"] = self.args.get(config_item, None)
# config
log.debug("Configs.get_config_item()...from config: %s", config)
ordoprec_dict["config"] = config.get(config_item, None)
# env
if config_item == "auto-dashboard":
env_name = "RP_AUTO_DASHBOARD"
else:
env_name = "RP_{}".format(config_item.upper())
ordoprec_dict["env"] = os.getenv(env_name, None)
config_value = default
for source in ordoprec:
log.debug(
"Configs.get_config_item()... " "Config item (%s): %s = %s",
config_item,
source,
ordoprec_dict[source],
)
if ordoprec_dict[source] is not None:
config_value = ordoprec_dict[source]
log.debug("Configs.get_config_item()... config_value: %s", config_value)
return config_value |
7,347 | demo | # Natural Language Toolkit: Cooper storage for Quantifier Ambiguity
#
# Copyright (C) 2001-2023 NLTK Project
# Author: Ewan Klein <ewan@inf.ed.ac.uk>
# URL: <https://www.nltk.org/>
# For license information, see LICENSE.TXT
from nltk.parse import load_parser
from nltk.parse.featurechart import InstantiateVarsChart
from nltk.sem.logic import ApplicationExpression, LambdaExpression, Variable
class CooperStore:
"""
A container for handling quantifier ambiguity via Cooper storage.
"""
def __init__(self, featstruct):
"""
:param featstruct: The value of the ``sem`` node in a tree from
``parse_with_bindops()``
:type featstruct: FeatStruct (with features ``core`` and ``store``)
"""
self.featstruct = featstruct
self.readings = []
try:
self.core = featstruct["CORE"]
self.store = featstruct["STORE"]
except KeyError:
print("%s is not a Cooper storage structure" % featstruct)
def _permute(self, lst):
"""
:return: An iterator over the permutations of the input list
:type lst: list
:rtype: iter
"""
remove = lambda lst0, index: lst0[:index] + lst0[index + 1 :]
if lst:
for index, x in enumerate(lst):
for y in self._permute(remove(lst, index)):
yield (x,) + y
else:
yield ()
def s_retrieve(self, trace=False):
r"""
Carry out S-Retrieval of binding operators in store. If hack=True,
serialize the bindop and core as strings and reparse. Ugh.
Each permutation of the store (i.e. list of binding operators) is
taken to be a possible scoping of quantifiers. We iterate through the
binding operators in each permutation, and successively apply them to
the current term, starting with the core semantic representation,
working from the inside out.
Binding operators are of the form::
bo(\P.all x.(man(x) -> P(x)),z1)
"""
for perm, store_perm in enumerate(self._permute(self.store)):
if trace:
print("Permutation %s" % (perm + 1))
term = self.core
for bindop in store_perm:
# we just want the arguments that are wrapped by the 'bo' predicate
quant, varex = tuple(bindop.args)
# use var to make an abstraction over the current term and then
# apply the quantifier to it
term = ApplicationExpression(
quant, LambdaExpression(varex.variable, term)
)
if trace:
print(" ", term)
term = term.simplify()
self.readings.append(term)
def parse_with_bindops(sentence, grammar=None, trace=0):
"""
Use a grammar with Binding Operators to parse a sentence.
"""
if not grammar:
grammar = "grammars/book_grammars/storage.fcfg"
parser = load_parser(grammar, trace=trace, chart_class=InstantiateVarsChart)
# Parse the sentence.
tokens = sentence.split()
return list(parser.parse(tokens))
def METHOD_NAME():
from nltk.sem import cooper_storage as cs
sentence = "every girl chases a dog"
# sentence = "a man gives a bone to every dog"
print()
print("Analysis of sentence '%s'" % sentence)
print("=" * 50)
trees = cs.parse_with_bindops(sentence, trace=0)
for tree in trees:
semrep = cs.CooperStore(tree.label()["SEM"])
print()
print("Binding operators:")
print("-" * 15)
for s in semrep.store:
print(s)
print()
print("Core:")
print("-" * 15)
print(semrep.core)
print()
print("S-Retrieval:")
print("-" * 15)
semrep.s_retrieve(trace=True)
print("Readings:")
print("-" * 15)
for i, reading in enumerate(semrep.readings):
print(f"{i + 1}: {reading}")
if __name__ == "__main__":
METHOD_NAME() |
7,348 | save osm data async | # -*- coding: utf-8 -*-
"""
OSM utility module.
"""
from __future__ import unicode_literals
import logging
from django.contrib.gis.geos import GeometryCollection, LineString, Point, Polygon
from django.contrib.gis.geos.error import GEOSException
from django.db import IntegrityError, models, transaction
from defusedxml.lxml import _etree, fromstring, tostring
from six import iteritems
from onadata.apps.logger.models.attachment import Attachment
from onadata.apps.logger.models.instance import Instance
from onadata.apps.logger.models.osmdata import OsmData
from onadata.apps.restservice.signals import trigger_webhook
from onadata.celeryapp import app
def _get_xml_obj(xml):
if not isinstance(xml, bytes):
xml = xml.strip().encode()
try:
return fromstring(xml)
except _etree.XMLSyntaxError as e: # pylint: disable=c-extension-no-member
if "Attribute action redefined" in str(e):
xml = xml.replace(b'action="modify" ', b"")
return _get_xml_obj(xml)
return None
def _get_node(ref, root):
point = None
nodes = root.xpath(f'//node[@id="{ref}"]')
if nodes:
node = nodes[0]
point = Point(float(node.get("lon")), float(node.get("lat")))
return point
def get_combined_osm(osm_list):
"""
Combine osm xml form list of OsmData objects
"""
xml = ""
if (osm_list and isinstance(osm_list, list)) or isinstance(
osm_list, models.QuerySet
):
osm = None
for osm_data in osm_list:
osm_xml = osm_data.xml
_osm = _get_xml_obj(osm_xml)
if _osm is None:
continue
if osm is None:
osm = _osm
continue
for child in _osm.getchildren():
osm.append(child)
if osm is not None:
# pylint: disable=no-member
return tostring(osm, encoding="utf-8", xml_declaration=True)
if isinstance(osm_list, dict):
if "detail" in osm_list:
xml = f"<error>{osm_list['detail']}</error>"
return xml.encode("utf-8")
def parse_osm_ways(osm_xml, include_osm_id=False):
"""Converts an OSM XMl to a list of GEOSGeometry objects"""
items = []
root = _get_xml_obj(osm_xml)
for way in root.findall("way"):
geom = None
points = []
for node in way.findall("nd"):
points.append(_get_node(node.get("ref"), root))
try:
geom = Polygon(points)
except GEOSException:
geom = LineString(points)
tags = parse_osm_tags(way, include_osm_id)
items.append(
{"osm_id": way.get("id"), "geom": geom, "tags": tags, "osm_type": "way"}
)
return items
def parse_osm_nodes(osm_xml, include_osm_id=False):
"""Converts an OSM XMl to a list of GEOSGeometry objects"""
items = []
root = _get_xml_obj(osm_xml)
for node in root.findall("node"):
point = Point(float(node.get("lon")), float(node.get("lat")))
tags = parse_osm_tags(node, include_osm_id)
items.append(
{"osm_id": node.get("id"), "geom": point, "tags": tags, "osm_type": "node"}
)
return items
def parse_osm_tags(node, include_osm_id=False):
"""Retrieves all the tags from a osm xml node"""
tags = {} if not include_osm_id else {node.tag + ":id": node.get("id")}
for tag in node.findall("tag"):
key, val = tag.attrib["k"], tag.attrib["v"]
if val == "" or val.upper() == "FIXME":
continue
tags.update({key: val})
return tags
def parse_osm(osm_xml, include_osm_id=False):
"""
Parses OSM XML and return a list of ways or nodes.
"""
ways = parse_osm_ways(osm_xml, include_osm_id)
if ways:
return ways
nodes = parse_osm_nodes(osm_xml, include_osm_id)
return nodes
@app.task()
def METHOD_NAME(instance_id):
"""
Async task for saving OSM data for the specified submission.
"""
save_osm_data(instance_id)
def save_osm_data(instance_id):
"""
Includes the OSM data in the specified submission json data.
"""
instance = Instance.objects.filter(pk=instance_id).first()
osm_attachments = (
instance.attachments.filter(extension=Attachment.OSM) if instance else None
)
if instance and osm_attachments:
fields = [
f.get_abbreviated_xpath()
for f in instance.xform.get_survey_elements_of_type("osm")
]
osm_filenames = {
field: instance.json[field] for field in fields if field in instance.json
}
for osm in osm_attachments:
try:
osm_xml = osm.media_file.read()
if isinstance(osm_xml, bytes):
osm_xml = osm_xml.decode("utf-8")
except IOError as e:
logging.exception("IOError saving osm data: %s", str(e))
continue
else:
filename = None
field_name = None
for k, v in osm_filenames.items():
if osm.filename.startswith(v.replace(".osm", "")):
filename = v
field_name = k
break
if field_name is None:
continue
filename = osm.filename if filename is None else filename
osm_list = parse_osm(osm_xml, include_osm_id=True)
for osmd in osm_list:
try:
with transaction.atomic():
osm_data = OsmData(
instance=instance,
xml=osm_xml,
osm_id=osmd["osm_id"],
osm_type=osmd["osm_type"],
tags=osmd["tags"],
geom=GeometryCollection(osmd["geom"]),
filename=filename,
field_name=field_name,
)
osm_data.save()
except IntegrityError:
with transaction.atomic():
osm_data = (
OsmData.objects.exclude(xml=osm_xml)
.filter(instance=instance, field_name=field_name)
.first()
)
if osm_data:
osm_data.xml = osm_xml
osm_data.osm_id = osmd["osm_id"]
osm_data.osm_type = osmd["osm_type"]
osm_data.tags = osmd["tags"]
osm_data.geom = GeometryCollection(osmd["geom"])
osm_data.filename = filename
osm_data.save()
instance.save()
trigger_webhook.send(sender=instance.__class__, instance=instance)
def osm_flat_dict(instance_id):
"""
Flat dict of OSM tags for the specified submission.
Each key starts with 'osm_*'.
"""
osm_data = OsmData.objects.filter(instance=instance_id)
tags = {}
for osm in osm_data:
for tag in osm.tags:
for (k, v) in iteritems(tag):
tags.update({f"osm_{k}": v})
return tags |
7,349 | test tracking line numbers | """Tests to ensure that the lxml tree builder generates good trees."""
import pickle
import pytest
import re
import warnings
from . import LXML_PRESENT, LXML_VERSION
if LXML_PRESENT:
from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
from bs4 import (
BeautifulSoup,
BeautifulStoneSoup,
)
from bs4.element import Comment, Doctype, SoupStrainer
from . import (
HTMLTreeBuilderSmokeTest,
XMLTreeBuilderSmokeTest,
SOUP_SIEVE_PRESENT,
SoupTest,
)
@pytest.mark.skipif(
not LXML_PRESENT,
reason="lxml seems not to be present, not testing its tree builder."
)
class TestLXMLTreeBuilder(SoupTest, HTMLTreeBuilderSmokeTest):
"""See ``HTMLTreeBuilderSmokeTest``."""
@property
def default_builder(self):
return LXMLTreeBuilder
def test_out_of_range_entity(self):
self.assert_soup(
"<p>foo�bar</p>", "<p>foobar</p>")
self.assert_soup(
"<p>foo�bar</p>", "<p>foobar</p>")
self.assert_soup(
"<p>foo�bar</p>", "<p>foobar</p>")
def test_entities_in_foreign_document_encoding(self):
# We can't implement this case correctly because by the time we
# hear about markup like "“", it's been (incorrectly) converted into
# a string like u'\x93'
pass
# In lxml < 2.3.5, an empty doctype causes a segfault. Skip this
# test if an old version of lxml is installed.
@pytest.mark.skipif(
not LXML_PRESENT or LXML_VERSION < (2,3,5,0),
reason="Skipping doctype test for old version of lxml to avoid segfault."
)
def test_empty_doctype(self):
soup = self.soup("<!DOCTYPE>")
doctype = soup.contents[0]
assert "" == doctype.strip()
def test_beautifulstonesoup_is_xml_parser(self):
# Make sure that the deprecated BSS class uses an xml builder
# if one is installed.
with warnings.catch_warnings(record=True) as w:
soup = BeautifulStoneSoup("<b />")
assert "<b/>" == str(soup.b)
[warning] = w
assert warning.filename == __file__
assert "BeautifulStoneSoup class is deprecated" in str(warning.message)
def METHOD_NAME(self):
# The lxml TreeBuilder cannot keep track of line numbers from
# the original markup. Even if you ask for line numbers, we
# don't have 'em.
#
# This means that if you have a tag like <sourceline> or
# <sourcepos>, attribute access will find it rather than
# giving you a numeric answer.
soup = self.soup(
"\n <p>\n\n<sourceline>\n<b>text</b></sourceline><sourcepos></p>",
store_line_numbers=True
)
assert "sourceline" == soup.p.sourceline.name
assert "sourcepos" == soup.p.sourcepos.name
@pytest.mark.skipif(
not LXML_PRESENT,
reason="lxml seems not to be present, not testing its XML tree builder."
)
class TestLXMLXMLTreeBuilder(SoupTest, XMLTreeBuilderSmokeTest):
"""See ``HTMLTreeBuilderSmokeTest``."""
@property
def default_builder(self):
return LXMLTreeBuilderForXML
def test_namespace_indexing(self):
soup = self.soup(
'<?xml version="1.1"?>\n'
'<root>'
'<tag xmlns="http://unprefixed-namespace.com">content</tag>'
'<prefix:tag2 xmlns:prefix="http://prefixed-namespace.com">content</prefix:tag2>'
'<prefix2:tag3 xmlns:prefix2="http://another-namespace.com">'
'<subtag xmlns="http://another-unprefixed-namespace.com">'
'<subsubtag xmlns="http://yet-another-unprefixed-namespace.com">'
'</prefix2:tag3>'
'</root>'
)
# The BeautifulSoup object includes every namespace prefix
# defined in the entire document. This is the default set of
# namespaces used by soupsieve.
#
# Un-prefixed namespaces are not included, and if a given
# prefix is defined twice, only the first prefix encountered
# in the document shows up here.
assert soup._namespaces == {
'xml': 'http://www.w3.org/XML/1998/namespace',
'prefix': 'http://prefixed-namespace.com',
'prefix2': 'http://another-namespace.com'
}
# A Tag object includes only the namespace prefixes
# that were in scope when it was parsed.
# We do not track un-prefixed namespaces as we can only hold
# one (the first one), and it will be recognized as the
# default namespace by soupsieve, even when operating from a
# tag with a different un-prefixed namespace.
assert soup.tag._namespaces == {
'xml': 'http://www.w3.org/XML/1998/namespace',
}
assert soup.tag2._namespaces == {
'prefix': 'http://prefixed-namespace.com',
'xml': 'http://www.w3.org/XML/1998/namespace',
}
assert soup.subtag._namespaces == {
'prefix2': 'http://another-namespace.com',
'xml': 'http://www.w3.org/XML/1998/namespace',
}
assert soup.subsubtag._namespaces == {
'prefix2': 'http://another-namespace.com',
'xml': 'http://www.w3.org/XML/1998/namespace',
}
@pytest.mark.skipif(
not SOUP_SIEVE_PRESENT, reason="Soup Sieve not installed"
)
def test_namespace_interaction_with_select_and_find(self):
# Demonstrate how namespaces interact with select* and
# find* methods.
soup = self.soup(
'<?xml version="1.1"?>\n'
'<root>'
'<tag xmlns="http://unprefixed-namespace.com">content</tag>'
'<prefix:tag2 xmlns:prefix="http://prefixed-namespace.com">content</tag>'
'<subtag xmlns:prefix="http://another-namespace-same-prefix.com">'
'<prefix:tag3>'
'</subtag>'
'</root>'
)
# soupselect uses namespace URIs.
assert soup.select_one('tag').name == 'tag'
assert soup.select_one('prefix|tag2').name == 'tag2'
# If a prefix is declared more than once, only the first usage
# is registered with the BeautifulSoup object.
assert soup.select_one('prefix|tag3') is None
# But you can always explicitly specify a namespace dictionary.
assert soup.select_one(
'prefix|tag3', namespaces=soup.subtag._namespaces
).name == 'tag3'
# And a Tag (as opposed to the BeautifulSoup object) will
# have a set of default namespaces scoped to that Tag.
assert soup.subtag.select_one('prefix|tag3').name=='tag3'
# the find() methods aren't fully namespace-aware; they just
# look at prefixes.
assert soup.find('tag').name == 'tag'
assert soup.find('prefix:tag2').name == 'tag2'
assert soup.find('prefix:tag3').name == 'tag3'
assert soup.subtag.find('prefix:tag3').name == 'tag3'
def test_pickle_restores_builder(self):
# The lxml TreeBuilder is not picklable, so when unpickling
# a document created with it, a new TreeBuilder of the
# appropriate class is created.
soup = self.soup("<a>some markup</a>")
assert isinstance(soup.builder, self.default_builder)
pickled = pickle.dumps(soup)
unpickled = pickle.loads(pickled)
assert "some markup" == unpickled.a.string
assert unpickled.builder != soup.builder
assert isinstance(unpickled.builder, self.default_builder) |
7,350 | test | #-*- coding: utf-8 -*-
###########################################################################
## ##
## Copyrights Frédéric Rodrigo 2012 ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################
from modules.OsmoseTranslation import T_
from plugins.Plugin import Plugin
class Structural_Multipolygon(Plugin):
def init(self, logger):
Plugin.init(self, logger)
self.errors[11701] = self.def_class(item = 1170, level = 2, tags = ['relation', 'multipolygon', 'fix:chair'],
title = T_('Inadequate role for multipolygon'),
detail = T_(
'''Possible roles are `outer`, `inner` or nothing (not recommended).'''))
self.errors[11702] = self.def_class(item = 1170, level = 2, tags = ['relation', 'multipolygon', 'fix:chair'],
title = T_('Inadequate member for multipolygon'),
detail = T_(
'''Members must be ways.'''))
self.errors[11703] = self.def_class(item = 1170, level = 1, tags = ['relation', 'multipolygon', 'fix:imagery'],
title = T_('Missing outer role for multipolygon'),
detail = T_(
'''At least one outer ring must be present.'''),
fix = T_(
'''Find the way outside, it may be deleted, check the history.'''))
self.errors[11704] = self.def_class(item = 1170, level = 2, tags = ['relation', 'multipolygon', 'fix:chair'],
title = T_('This multipolygon is a simple polygon'),
detail = T_(
'''Multipolygon relation actually defines a simple polygon.'''))
def relation(self, data, tags, members):
if tags.get('type') != 'multipolygon':
return
outer = 0
inner = 0
err_roles = []
err_members = []
for member in members:
if member['type'] == 'way':
if member['role'] not in ('', 'outer', 'inner'):
err_roles.append(member['role'])
if member['role'] in ('', 'outer'):
outer += 1
elif member['role'] == 'inner':
inner += 1
else:
err_members.append(u"{0} - {1}".format(member['type'], member['role']))
err = []
if len(err_roles) > 0:
err.append({"class": 11701, "subclass": 1, "text": {"en": ', '.join(err_roles)}})
if len(err_members) > 0:
err.append({"class": 11702, "subclass": 1, "text": {"en": ', '.join(err_members)}})
if outer == 0:
err.append({"class": 11703, "subclass": 1})
elif outer == 1 and inner == 0 and len(err_roles) == 0:
err.append({"class": 11704, "subclass": 1})
return err
###########################################################################
from plugins.Plugin import TestPluginCommon
class Test(TestPluginCommon):
def METHOD_NAME(self):
a = Structural_Multipolygon(None)
a.init(None)
for m in [[{"type": "way", "role": "xxx"}],
[{"type": "node", "role": u"éù"}],
[{"type": "way", "role": "inner"}],
[{"type": "way", "role": "outer"}],
[{"type": "node", "role": "outer"}],
[{"type": "relation", "role": "outer"}],
[{"type": "way", "role": "outer"}, {"type": "node", "role": "outer"}],
]:
self.check_err(a.relation(None, {"type": "multipolygon"}, m), m)
assert not a.relation(None, {"t": "multipolygon"}, m), m
assert not a.relation(None, {"type": "arf"}, m), m
for m in [[{"type": "way", "role": "outer"}] * 2,
[{"type": "way", "role": "outer"}] * 20,
[{"type": "way", "role": "outer"}] * 2 + [{"type": "way", "role": "inner"}],
[{"type": "way", "role": ""}] * 2 + [{"type": "way", "role": "inner"}],
]:
assert not a.relation(None, {"type": "multipolygon"}, m), m |
7,351 | can create from shape | from . import *
from pya import *
class Bragg_encoder(pya.PCellDeclarationHelper):
def __init__(self):
#Important: initialize the super class
super(Bragg_encoder,self).__init__()
TECHNOLOGY = get_technology_by_name('EBeam')
#declare the paramters
#parameters: binary code, start period, end period, corrugation width, length
self.param("N", self.TypeDouble, "Number of bits (N)", default = 8)
self.param("binary", self.TypeString, "identity (binary size N)", default = "10000000")
self.param("start_period", self.TypeDouble, "start period (microns)", default = 0.314)
self.param("stop_period", self.TypeDouble, "stop period (microns)", default = 0.342)
self.param("corrugation_widths",self.TypeList,"Corrugations widths (microns)", default = [0.08, 0.06, 0.04, 0.02, 0.02, 0.04, 0.06, 0.08])
self.param("wg_width", self.TypeDouble, "Waveguide width", default = 0.50)
self.param("length", self.TypeInt, "length (microns)", default = 200)
self.param("sum_format", self.TypeDouble, "sum format (1, 2, or 3)", default = 3)
self.param("layer", self.TypeLayer, "Layer", default = TECHNOLOGY['Si'])
self.param("pinrec", self.TypeLayer, "PinRec Layer", default = TECHNOLOGY['PinRec'])
self.param("devrec", self.TypeLayer, "DevRec Layer", default = TECHNOLOGY['DevRec'])
def display_text_impl(self):
#Provide a descriptive text for the cell
return "Bragg_encoder%s-%.3f-%.3f-%.3f" % \
(int(self.binary),self.start_period,self.stop_period,self.length)
def coerce_parameters_impl(self):
pass
def METHOD_NAME(self,layout,shape,layer):
return False
#function to create the device
def produce_impl(self):
#fetch the parameters
dbu = self.layout.dbu
ly = self.layout
shapes = self.cell.shapes
LayerSi = self.layer
LayerSiN = ly.layer(LayerSi)
LayerPinRecN = ly.layer(self.pinrec)
LayerDevRecN = ly.layer(self.devrec)
from SiEPIC.extend import to_itype
from math import pi, cos, sin, acos
N = int(self.N)
#draw the encoded bragg grating:
dx = 0.01
corrugations = self.corrugation_widths
if N != len(self.binary):
pya.MessageBox.warning("Array length mismatch!", "Number of bits (N) does NOT equal the bits array length!", pya.MessageBox.Ok)
return
elif N != len(self.corrugation_widths):
pya.MessageBox.warning("Array length mismatch!", "Number of bits (N) does NOT equal the corrugation widths array length!", pya.MessageBox.Ok)
return
dlambda = (self.stop_period - self.start_period)/(N-1)
npoints = int(self.length/dx)
w = to_itype(self.wg_width,dbu)
l = to_itype(self.length,dbu)
f = self.sum_format
E = []
wavelengths = []
for i in range(0,N):
E.append(int(self.binary[i]))
wavelengths.append(self.start_period + i*dlambda)
round(wavelengths[i],3)
x = 0
y1 = 0
y2 = 0
pts1 = [Point(x,0)]
pts3 = [Point(x,0)]
for i in range(0, npoints + 1):
x1 = i*dx
#summation method
if f == 1:
for j in range(0, N):
corrugation = float(self.corrugation_widths[j])/10
y1 = y1 + corrugation*E[j]*sin((x1*2*pi)/wavelengths[j])
y2 = y1
elif f == 2:
#half half method (11110000 style):
for j in range(0, int(N/2)):
corrugation1 = float(self.corrugation_widths[j])/10
corrugation2 = float(self.corrugation_widths[j+int(N/2)])/10
y1 = y1 + corrugation1*E[j]*sin((x1*2*pi)/wavelengths[j])
y2 = y2 + corrugation2*E[j+int(N/2)]*sin((x1*2*pi)/wavelengths[j+int(N/2)])
elif f == 3:
#half half method (10101010 style):
for j in range(0, int(N/2)):
idx1 = int(j*2)
idx2 = int((j*2+1))
corrugation1 = float(self.corrugation_widths[idx1])/10
corrugation2 = float(self.corrugation_widths[idx2])/10
y1 = y1 + corrugation1*E[idx1]*sin((x1*2*pi)/wavelengths[idx1])
y2 = y2 + corrugation2*E[idx2]*sin((x1*2*pi)/wavelengths[idx2])
if self.N == 2:
dw1 = float(self.corrugation_widths[0])
dw2 = float(self.corrugation_widths[1])
else:
dw1 = 0
dw2 = 0
pts1.append( Point((x + x1)/dbu, ((self.wg_width-dw1)/2 + y1)/dbu))
pts3.append( Point((x + x1)/dbu, ((-self.wg_width+dw2)/2 - y2)/dbu))
pts1.append( Point((x + x1 + 20*dx)/dbu, self.wg_width/2/dbu))
pts3.append( Point((x + x1 + 20*dx)/dbu, -self.wg_width/2/dbu))
pts1.append( Point((x + x1 + 20*dx)/dbu, 0))
pts3.append( Point((x + x1 + 20*dx)/dbu, 0))
shapes(LayerSiN).insert(Polygon(pts1))
shapes(LayerSiN).insert(Polygon(pts3))
#create the pins on the waveguides, as short paths:
from SiEPIC._globals import PIN_LENGTH as pin_length
t = Trans(Trans.R0,0,0)
pin = Path([Point(pin_length/2,0), Point(-pin_length/2,0)],w)
pin_t = pin.transformed(t)
shapes(LayerPinRecN).insert(pin_t)
text = Text("pin1", t)
shape = shapes(LayerPinRecN).insert(text)
shape.text_size = 0.4/dbu
t = Trans(Trans.R0,(self.length+dx*20)/dbu,0)
pin = Path([Point(-pin_length/2,0),Point(pin_length/2,0)],w)
pin_t = pin.transformed(t)
shapes(LayerPinRecN).insert(pin_t)
text = Text("pin2",t)
shape = shapes(LayerPinRecN).insert(text)
shape.text_size = 0.4/dbu
|
7,352 | test solve gd p iterate over discrete | # ___________________________________________________________________________
#
# Pyomo: Python Optimization Modeling Objects
# Copyright (c) 2008-2022
# National Technology and Engineering Solutions of Sandia, LLC
# Under the terms of Contract DE-NA0003525 with National Technology and
# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
import pyomo.common.unittest as unittest
from pyomo.contrib.gdpopt.enumerate import GDP_Enumeration_Solver
from pyomo.environ import (
SolverFactory,
Objective,
maximize,
TerminationCondition,
value,
Var,
Integers,
Constraint,
ConcreteModel,
)
from pyomo.gdp import Disjunction
import pyomo.gdp.tests.models as models
@unittest.skipUnless(SolverFactory('gurobi').available(), 'Gurobi not available')
class TestGDPoptEnumerate(unittest.TestCase):
def test_solve_two_term_disjunction(self):
m = models.makeTwoTermDisj()
m.obj = Objective(expr=m.x, sense=maximize)
results = SolverFactory('gdpopt.enumerate').solve(m)
self.assertEqual(results.solver.iterations, 2)
self.assertEqual(
results.solver.termination_condition, TerminationCondition.optimal
)
self.assertEqual(results.problem.lower_bound, 9)
self.assertEqual(results.problem.upper_bound, 9)
self.assertEqual(value(m.x), 9)
self.assertTrue(value(m.d[0].indicator_var))
self.assertFalse(value(m.d[1].indicator_var))
def modify_two_term_disjunction(self, m):
# Make first disjunct feasible
m.a.setlb(0)
# Discrete variable
m.y = Var(domain=Integers, bounds=(2, 4))
m.d[1].c3 = Constraint(expr=m.x <= 6)
m.d[0].c2 = Constraint(expr=m.y + m.a - 5 <= 2)
m.obj = Objective(expr=-m.x - m.y)
def METHOD_NAME(self):
m = models.makeTwoTermDisj()
self.modify_two_term_disjunction(m)
results = SolverFactory('gdpopt.enumerate').solve(m, force_subproblem_nlp=True)
self.assertEqual(results.solver.iterations, 6)
self.assertEqual(
results.solver.termination_condition, TerminationCondition.optimal
)
self.assertEqual(results.problem.lower_bound, -11)
self.assertEqual(results.problem.upper_bound, -11)
self.assertEqual(value(m.x), 9)
self.assertEqual(value(m.y), 2)
self.assertTrue(value(m.d[0].indicator_var))
self.assertFalse(value(m.d[1].indicator_var))
def test_solve_GDP_do_not_iterate_over_discrete_variables(self):
m = models.makeTwoTermDisj()
self.modify_two_term_disjunction(m)
results = SolverFactory('gdpopt.enumerate').solve(m)
self.assertEqual(results.solver.iterations, 2)
self.assertEqual(
results.solver.termination_condition, TerminationCondition.optimal
)
self.assertEqual(results.problem.lower_bound, -11)
self.assertEqual(results.problem.upper_bound, -11)
self.assertEqual(value(m.x), 9)
self.assertEqual(value(m.y), 2)
self.assertTrue(value(m.d[0].indicator_var))
self.assertFalse(value(m.d[1].indicator_var))
def test_solve_GDP_iterate_over_Boolean_variables(self):
m = models.makeLogicalConstraintsOnDisjuncts()
results = SolverFactory('gdpopt.enumerate').solve(m, force_subproblem_nlp=True)
self.assertEqual(results.solver.iterations, 16)
self.assertEqual(
results.solver.termination_condition, TerminationCondition.optimal
)
self.assertEqual(results.problem.lower_bound, 8)
self.assertEqual(results.problem.upper_bound, 8)
self.assertTrue(value(m.d[2].indicator_var))
self.assertTrue(value(m.d[3].indicator_var))
self.assertFalse(value(m.d[1].indicator_var))
self.assertFalse(value(m.d[4].indicator_var))
self.assertEqual(value(m.x), 8)
# We don't know what values they take, but they have to be different
self.assertNotEqual(value(m.Y[1]), value(m.Y[2]))
def test_solve_GDP_do_not_iterate_over_Boolean_variables(self):
m = models.makeLogicalConstraintsOnDisjuncts()
results = SolverFactory('gdpopt.enumerate').solve(m)
self.assertEqual(results.solver.iterations, 4)
self.assertEqual(
results.solver.termination_condition, TerminationCondition.optimal
)
self.assertEqual(results.problem.lower_bound, 8)
self.assertEqual(results.problem.upper_bound, 8)
self.assertTrue(value(m.d[2].indicator_var))
self.assertTrue(value(m.d[3].indicator_var))
self.assertFalse(value(m.d[1].indicator_var))
self.assertFalse(value(m.d[4].indicator_var))
self.assertEqual(value(m.x), 8)
# We don't know what values they take, but they have to be different
self.assertNotEqual(value(m.Y[1]), value(m.Y[2]))
def test_stop_at_iteration_limit(self):
m = models.makeLogicalConstraintsOnDisjuncts()
results = SolverFactory('gdpopt.enumerate').solve(
m, iterlim=4, force_subproblem_nlp=True
)
self.assertEqual(results.solver.iterations, 4)
self.assertEqual(
results.solver.termination_condition, TerminationCondition.maxIterations
)
@unittest.skipUnless(SolverFactory('ipopt').available(), 'Ipopt not available')
def test_infeasible_GDP(self):
m = models.make_infeasible_gdp_model()
results = SolverFactory('gdpopt.enumerate').solve(m)
self.assertEqual(results.solver.iterations, 2)
self.assertEqual(
results.solver.termination_condition, TerminationCondition.infeasible
)
self.assertEqual(results.problem.lower_bound, float('inf'))
def test_unbounded_GDP(self):
m = ConcreteModel()
m.x = Var(bounds=(-1, 10))
m.y = Var(bounds=(2, 3))
m.z = Var()
m.d = Disjunction(expr=[[m.x + m.y >= 5], [m.x - m.y <= 3]])
m.o = Objective(expr=m.z)
results = SolverFactory('gdpopt.enumerate').solve(m)
self.assertEqual(results.solver.iterations, 1)
self.assertEqual(
results.solver.termination_condition, TerminationCondition.unbounded
)
self.assertEqual(results.problem.lower_bound, -float('inf'))
self.assertEqual(results.problem.upper_bound, -float('inf'))
@unittest.skipUnless(SolverFactory('ipopt').available(), 'Ipopt not available')
def test_algorithm_specified_to_solve(self):
m = models.twoDisj_twoCircles_easy()
results = SolverFactory('gdpopt').solve(m, algorithm='enumerate', tee=True)
self.assertEqual(results.solver.iterations, 2)
self.assertEqual(
results.solver.termination_condition, TerminationCondition.optimal
)
self.assertAlmostEqual(results.problem.lower_bound, 9)
self.assertAlmostEqual(results.problem.upper_bound, 9)
self.assertAlmostEqual(value(m.x), 2)
self.assertAlmostEqual(value(m.y), 7)
self.assertTrue(value(m.upper_circle.indicator_var))
self.assertFalse(value(m.lower_circle.indicator_var)) |
7,353 | endpoints | from enum import IntFlag
from typing import Dict, List
from pyroute2 import MPTCP
from socket import AF_INET, AF_INET6
from lnst.Common.IpAddress import ipaddress, BaseIpAddress
class MPTCPFlags(IntFlag):
# via https://github.com/torvalds/linux/blob/9d31d2338950293ec19d9b095fbaa9030899dcb4/include/uapi/linux/mptcp.h#L73
MPTCP_PM_ADDR_FLAG_SIGNAL = (1 << 0)
MPTCP_PM_ADDR_FLAG_SUBFLOW = (1 << 1)
MPTCP_PM_ADDR_FLAG_BACKUP = (1 << 2)
class MPTCPEndpoint:
@classmethod
def from_netlink(cls, nl_mptcp_ep_msg):
"""
..code py
>>> r = mptcp.endpoint('show')[0]
>>> type(r)
<class 'pyroute2.netlink.generic.mptcp.mptcp_msg'>
>>> r
{'cmd': 3, 'version': 1, 'reserved': 0, 'attrs': [('MPTCP_PM_ATTR_ADDR', {'attrs': [('MPTCP_PM_ADDR_ATTR_FAMILY', 2), ('MPTCP_PM_ADDR_ATTR_ID', 5), ('MPTCP_PM_ADDR_ATTR_FLAGS', 1), ('MPTCP_PM_ADDR_ATTR_ADDR4', '192.168.202.1')]}, 32768)], 'header': {'length': 56, 'type': 27, 'flags': 2, 'sequence_number': 257, 'pid': 26782, 'error': None, 'target': 'localhost', 'stats': Stats(qsize=0, delta=0, delay=0)}}
>>> a = r.get_attr("MPTCP_PM_ATTR_ADDR")
>>> type(a)
<class 'pyroute2.netlink.generic.mptcp.mptcp_msg.pm_addr'>
>>> a
{'attrs': [('MPTCP_PM_ADDR_ATTR_FAMILY', 2), ('MPTCP_PM_ADDR_ATTR_ID', 5), ('MPTCP_PM_ADDR_ATTR_FLAGS', 1), ('MPTCP_PM_ADDR_ATTR_ADDR4', '192.168.202.1')]}
:param nl_mptcp_ep_msg: the netlink message from mptcp.endpoint('show')
:return:
"""
addr = nl_mptcp_ep_msg.get_attr("MPTCP_PM_ATTR_ADDR")
addr_attr = dict(addr['attrs'])
return cls(addr_attr)
def __init__(self, attr: Dict):
self._attr = attr
self._ip = None
self._flags = None
@property
def id(self):
return self._attr['MPTCP_PM_ADDR_ATTR_ID']
@property
def ip_address(self):
if self._ip is None:
if self.ip_family == AF_INET:
self._ip = ipaddress(self._attr['MPTCP_PM_ADDR_ATTR_ADDR4'])
else:
self._ip = ipaddress(self._attr['MPTCP_PM_ADDR_ATTR_ADDR6'])
return self._ip
@property
def ip_family(self):
return self._attr['MPTCP_PM_ADDR_ATTR_FAMILY']
@property
def flags(self):
if self._flags is None:
self._flags = MPTCPFlags(self._attr['MPTCP_PM_ADDR_ATTR_FLAGS'])
return self._flags
@property
def is_signal(self):
return MPTCPFlags.MPTCP_PM_ADDR_FLAG_SIGNAL in self.flags
@property
def is_subflow(self):
return MPTCPFlags.MPTCP_PM_ADDR_FLAG_SUBFLOW in self.flags
@property
def is_backup(self):
return MPTCPFlags.MPTCP_PM_ADDR_FLAG_BACKUP in self.flags
class MPTCPManager:
def __init__(self):
self._mptcp = MPTCP()
self._endpoints = {}
@property
def METHOD_NAME(self):
self._endpoints = {}
nl_eps = self._mptcp.endpoint('show')
for nl_ep in nl_eps:
ep = MPTCPEndpoint.from_netlink(nl_ep)
self._endpoints[ep.id] = ep
return self._endpoints
@property
def subflows(self):
nl_msg = self._mptcp.limits("show")[0]
return nl_msg.get_attr("MPTCP_PM_ATTR_SUBFLOWS")
@subflows.setter
def subflows(self, n):
self._mptcp.limits("set", subflows=n)
@property
def add_addr_accepted(self):
nl_msg = self._mptcp.limits("show")[0]
return nl_msg.get_attr("MPTCP_PM_ATTR_RCV_ADD_ADDRS")
@add_addr_accepted.setter
def add_addr_accepted(self, n):
self._mptcp.limits("set", add_addr_accepted=n)
def add_endpoints(self, endpoint_ips: List[BaseIpAddress], flags: MPTCPFlags):
for ip in endpoint_ips:
if ip.family == AF_INET:
self._mptcp.endpoint("add", addr4=str(ip), flags=flags)
elif ip.family == AF_INET6:
self._mptcp.endpoint("add", addr6=str(ip), flags=flags)
def delete_all(self):
r = self._mptcp.endpoint("flush")
return r
|
7,354 | authenticate | """Base provider module for all Lexicon providers"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
from re import Pattern
from typing import Any, Dict, List, Optional, Union
from lexicon.config import ConfigResolver, legacy_config_resolver
class Provider(ABC):
"""
This is the abstract class for all lexicon Providers.
It provides common functionality and ensures that all implemented
Providers follow a standard ducktype.
All standardized options will be provided here as defaults, but can be overwritten
by environmental variables and cli arguments.
Common options are:
action
domain
type
name
content
ttl
priority
identifier
The provider_env_cli_options will also contain any Provider specific options:
auth_username
auth_token
auth_password
...
:param config: is a ConfigResolver object that contains all the options
for this provider, merged from CLI and Env variables.
"""
def __init__(self, config: Union[ConfigResolver, Dict]):
if not isinstance(config, ConfigResolver):
# If config is a plain dict, we are in a legacy situation.
# To protect the Provider API, the legacy dict is handled in a
# correctly defined ConfigResolver.
# Also, there may be some situation where `provider` key is not set in the config.
# It should not happen when Lexicon is called from Client, as it will set itself
# this key. However, there were no automated logic if the Provider is used directly.
# So we provide this logic here.
if not config.get("provider_name") and not config.get("provider"):
config[
"provider_name"
] = __name__ # Obviously we use the module name itself.
self.config = legacy_config_resolver(config)
else:
self.config = config
# Default ttl
self.config.with_dict({"ttl": 3600})
self.provider_name = self.config.resolve(
"lexicon:provider_name"
) or self.config.resolve("lexicon:provider")
self.domain = str(self.config.resolve("lexicon:domain"))
self.domain_id = None
# Provider API: instance methods
@abstractmethod
def METHOD_NAME(self) -> None:
"""
Authenticate against provider,
Make any requests required to get the domain's id for this provider,
so it can be used in subsequent calls.
Should throw AuthenticationError or requests.HTTPError if authentication fails for any reason,
of if the domain does not exist.
"""
def cleanup(self) -> None:
"""
Clean any relevant resource before this provider instance is closed.
"""
@abstractmethod
def create_record(self, rtype: str, name: str, content: str) -> bool:
"""
Create record. If record already exists with the same content, do nothing.
"""
@abstractmethod
def list_records(
self,
rtype: Optional[str] = None,
name: Optional[str] = None,
content: Optional[str] = None,
) -> List[Dict]:
"""
List all records. Return an empty list if no records found
type, name and content are used to filter records.
If possible filter during the query, otherwise filter after response is received.
"""
@abstractmethod
def update_record(
self,
identifier: Optional[str] = None,
rtype: Optional[str] = None,
name: Optional[str] = None,
content: Optional[str] = None,
) -> bool:
"""
Update a record. Identifier must be specified.
"""
@abstractmethod
def delete_record(
self,
identifier: Optional[str] = None,
rtype: Optional[str] = None,
name: Optional[str] = None,
content: Optional[str] = None,
) -> bool:
"""
Delete an existing record.
If record does not exist, do nothing.
If an identifier is specified, use it, otherwise do a lookup using type, name and content.
"""
# Provider API: static methods
@staticmethod
@abstractmethod
def get_nameservers() -> Union[List[str], List[Pattern]]:
"""
Return the list of nameservers for this DNS provider
"""
@staticmethod
@abstractmethod
def configure_parser(parser: ArgumentParser) -> None:
"""
Configure the given parser for the provider needs
(e.g. specific CLI flags for auth)
"""
# Helpers
def _request(
self,
action: str = "GET",
url: str = "/",
data: Optional[Dict] = None,
query_params: Optional[Dict] = None,
) -> Any:
"""Execute an HTTP request against the DNS provider API"""
raise NotImplementedError(
"You must implement _request() to use _get()/_post()/_put()/_patch()/_delete() methods."
)
def _get(self, url: str = "/", query_params: Optional[Dict] = None) -> Any:
return self._request("GET", url, query_params=query_params)
def _post(
self,
url: str = "/",
data: Optional[Dict] = None,
query_params: Optional[Dict] = None,
) -> Any:
return self._request("POST", url, data=data, query_params=query_params)
def _put(
self,
url: str = "/",
data: Optional[Dict] = None,
query_params: Optional[Dict] = None,
) -> Any:
return self._request("PUT", url, data=data, query_params=query_params)
def _patch(
self,
url: str = "/",
data: Optional[Dict] = None,
query_params: Optional[Dict] = None,
) -> Any:
return self._request("PATCH", url, data=data, query_params=query_params)
def _delete(self, url: str = "/", query_params: Optional[Dict] = None) -> Any:
return self._request("DELETE", url, query_params=query_params)
def _fqdn_name(self, record_name: str) -> str:
# strip trailing period from fqdn if present
record_name = record_name.rstrip(".")
# check if the record_name is fully specified
if not record_name.endswith(self.domain):
record_name = f"{record_name}.{self.domain}"
return f"{record_name}." # return the fqdn name
def _full_name(self, record_name: str) -> str:
# strip trailing period from fqdn if present
record_name = record_name.rstrip(".")
# check if the record_name is fully specified
if not record_name.endswith(self.domain):
record_name = f"{record_name}.{self.domain}"
return record_name
def _relative_name(self, record_name: str) -> str:
# strip trailing period from fqdn if present
record_name = record_name.rstrip(".")
# check if the record_name is fully specified
if record_name.endswith(self.domain):
record_name = record_name[: -len(self.domain)]
record_name = record_name.rstrip(".")
return record_name
def _clean_TXT_record(self, record: Dict) -> Dict:
if record["type"] == "TXT":
# Some providers have quotes around the TXT records,
# so we're going to remove those extra quotes
record["content"] = record["content"][1:-1]
return record
def _get_lexicon_option(self, option: str) -> Any:
return self.config.resolve(f"lexicon:{option}")
def _get_provider_option(self, option: str) -> Any:
return self.config.resolve(f"lexicon:{self.provider_name}:{option}") |
7,355 | get variable | """A class for abstracting and managing point model results in the form of
observation point data."""
import os
import pickle
import re
from dataclasses import dataclass, field
from typing import Union
import pandas as pd
from dataretrieval import nwis
from ILAMB import ilamblib as il
def is_binary_file(filename: str) -> bool:
"""Tests if the file is binary, as opposed to text."""
textchars = bytearray({7, 8, 9, 10, 12, 13, 27} | set(range(0x20, 0x100)) - {0x7F})
with open(filename, "rb") as fin:
return bool(fin.read(1024).translate(None, textchars))
@dataclass
class ModelPointResult:
"""A class for abstracting and managing point model results."""
name: str = "none"
color: tuple[float] = (0, 0, 0)
synonyms: dict = field(init=False, repr=False, default_factory=dict)
variables: pd.DataFrame = field(
init=False, repr=False, default_factory=lambda: None
)
sites: pd.DataFrame = field(init=False, repr=True, default_factory=lambda: None)
origin: pd.Timestamp = field(
init=False,
repr=False,
default_factory=lambda: pd.Timestamp("1980-01-01 00:00:00"),
)
def find_files(self, path: Union[str, list[str]], file_to_site: dict[str] = None):
"""Given a path or list of paths, find all"""
if isinstance(path, str):
path = [path]
model_data = []
site_data = []
for file_path in path:
for root, _, files in os.walk(file_path, followlinks=True):
for filename in files:
filepath = os.path.join(root, filename)
if is_binary_file(filepath):
continue
site = pd.NA
if file_to_site:
site = (
file_to_site[filename]
if filename in file_to_site
else pd.NA
)
if site:
rec = nwis.get_record(site=site, service="site")
assert len(rec) == 1
rec = rec.iloc[0]
site_data.append(
{
"sitecode": site,
"name": rec["station_nm"],
"lat": rec["dec_lat_va"],
"lon": rec["dec_long_va"],
"huc8": f"{rec['huc_cd']:08d}",
"path": filepath,
}
)
dfv = pd.read_csv(filepath, comment="#", nrows=0)
for key in dfv.columns:
match = re.search(r"(.*)\s\[(.*)\]", key)
if not match:
continue
model_data.append(
{
"variable": match.group(1),
"unit": match.group(2),
"column": key,
"sitecode": site,
}
)
self.variables = pd.DataFrame(model_data)
self.sites = pd.DataFrame(site_data).set_index("sitecode")
return self
def METHOD_NAME(
self,
vname: str,
sitecode: str,
synonyms: Union[str, list[str]] = None,
frequency: str = "ununsed",
):
"""Search the model database for the specified variable.
At the moment, the entire csv file is read and only the requested column
is returned. We may want to cache read in dataframes to trade memory for
time."""
dfv = self.variables[(self.variables["sitecode"] == sitecode)]
if len(dfv) == 0:
raise ValueError("The given sitecode is not part of this model result.")
# Synonym handling, possibly move to a separate function
possible = [vname]
if isinstance(synonyms, str):
possible.append(synonyms)
elif isinstance(synonyms, list):
possible += synonyms
possible_syms = [p for p in possible if p in self.synonyms]
possible += [var for syms in possible_syms for var in self.synonyms[syms]]
found = [p for p in possible if p in dfv["variable"].unique()]
if len(found) == 0:
raise ValueError(f"Variable '{vname}' not found in model '{self.name}'")
found = found[0]
dfv = dfv[dfv["variable"] == found].iloc[0]
# Process csv file
dfo = pd.read_csv(self.sites.loc[sitecode, "path"], comment="#")
time_col = [c for c in dfo.columns if ("time" in c or "date" in c)]
time_col = time_col[0]
new_time_col = "time"
try:
dfo[new_time_col] = pd.to_datetime(
dfo[time_col], unit="D", origin=self.origin
)
except (TypeError, ValueError):
dfo[new_time_col] = pd.to_datetime(dfo[time_col])
dfo = dfo.rename(columns={dfv["column"]: vname})
dfo = dfo.set_index(new_time_col)
dfo = dfo.tz_localize(None)
dfo = dfo.groupby(pd.Grouper(freq="D")).mean(numeric_only=True)
dfo.attrs["unit"] = dfv["unit"]
dfo = dfo[vname]
return dfo
def add_synonym(self, ats_variable: str, other_variable: str):
"""Add synonyms, preference given to earlier definitions."""
assert ats_variable in self.variables["variable"].unique()
if other_variable not in self.synonyms:
self.synonyms[other_variable] = []
self.synonyms[other_variable].append(ats_variable)
def to_pickle(self, filename: str):
"""."""
with open(filename, mode="wb") as pkl:
pickle.dump(self.__dict__, pkl)
def read_pickle(self, filename: str):
"""."""
with open(filename, mode="rb") as pkl:
# pylint: disable=no-value-for-parameter
obj = self.__new__(self.__class__)
obj.__dict__.update(pickle.load(pkl))
return obj
def extractTimeSeries(self, *args, **kwargs):
"""."""
raise il.VarNotInModel(
"The point model object does not yet handle gridded output."
) |
7,356 | test request timeout backoff for acquire access | import unittest
import requests
from unittest import mock
import tap_hubspot
class TestRequestTimeoutValue(unittest.TestCase):
def test_integer_request_timeout_in_config(self):
"""
Verify that if request_timeout is provided in config(integer value) then it should be use
"""
tap_hubspot.CONFIG.update({"request_timeout": 100}) # integer timeout in config
request_timeout = tap_hubspot.get_request_timeout()
self.assertEqual(request_timeout, 100.0) # Verify timeout value
def test_float_request_timeout_in_config(self):
"""
Verify that if request_timeout is provided in config(float value) then it should be use
"""
tap_hubspot.CONFIG.update({"request_timeout": 100.5}) # float timeout in config
request_timeout = tap_hubspot.get_request_timeout()
self.assertEqual(request_timeout, 100.5) # Verify timeout value
def test_string_request_timeout_in_config(self):
"""
Verify that if request_timeout is provided in config(string value) then it should be use
"""
tap_hubspot.CONFIG.update({"request_timeout": "100"}) # string format timeout in config
request_timeout = tap_hubspot.get_request_timeout()
self.assertEqual(request_timeout, 100.0) # Verify timeout value
def test_empty_string_request_timeout_in_config(self):
"""
Verify that if request_timeout is provided in config with empty string then default value is used
"""
tap_hubspot.CONFIG.update({"request_timeout": ""}) # empty string in config
request_timeout = tap_hubspot.get_request_timeout()
self.assertEqual(request_timeout, 300) # Verify timeout value
def test_zero_request_timeout_in_config(self):
"""
Verify that if request_timeout is provided in config with zero value then default value is used
"""
tap_hubspot.CONFIG.update({"request_timeout": 0}) # zero value in config
request_timeout = tap_hubspot.get_request_timeout()
self.assertEqual(request_timeout, 300) # Verify timeout value
def test_zero_string_request_timeout_in_config(self):
"""
Verify that if request_timeout is provided in config with zero in string format then default value is used
"""
tap_hubspot.CONFIG.update({"request_timeout": '0'}) # zero value in config
request_timeout = tap_hubspot.get_request_timeout()
self.assertEqual(request_timeout, 300) # Verify timeout value
def test_no_request_timeout_in_config(self):
"""
Verify that if request_timeout is not provided in config then default value is used
"""
tap_hubspot.CONFIG = {}
request_timeout = tap_hubspot.get_request_timeout()
self.assertEqual(request_timeout, 300) # Verify timeout value
@mock.patch("time.sleep")
class TestRequestTimeoutBackoff(unittest.TestCase):
@mock.patch('requests.Session.send', side_effect = requests.exceptions.Timeout)
@mock.patch("requests.Request.prepare")
@mock.patch('tap_hubspot.get_params_and_headers', return_value = ({}, {}))
def test_request_timeout_backoff(self, mocked_get, mocked_prepare, mocked_send, mocked_sleep):
"""
Verify request function is backoff for only 5 times on Timeout exception.
"""
try:
tap_hubspot.request('dummy_url', {})
except Exception:
pass
# Verify that Session.send is called 5 times
self.assertEqual(mocked_send.call_count, 5)
@mock.patch('tap_hubspot.get_params_and_headers', return_value = ({}, {}))
@mock.patch('requests.post', side_effect = requests.exceptions.Timeout)
def test_request_timeout_backoff_for_post_search_endpoint(self, mocked_post, mocked_get, mocked_sleep):
"""
Verify post_search_endpoint function is backoff for only 5 times on Timeout exception.
"""
try:
tap_hubspot.post_search_endpoint('dummy_url', {})
except Exception:
pass
# Verify that requests.post is called 5 times
self.assertEqual(mocked_post.call_count, 5)
@mock.patch('requests.post', side_effect = requests.exceptions.Timeout)
def METHOD_NAME(self, mocked_post, mocked_sleep):
"""
Verify request function is backoff for only 5 times instead of 25 times on Timeout exception that thrown from `acquire_access_token_from_refresh_token` method.
Here get_params_and_headers method called from request method and acquire_access_token_from_refresh_token called from get_params_and_headers method.
"""
try:
tap_hubspot.post_search_endpoint('dummy_url', {})
except Exception:
pass
# Verify that requests.post is called 5 times
self.assertEqual(mocked_post.call_count, 5) |
7,357 | test worker datastore access | # encoding: utf-8
import mock
import pytest
import sqlalchemy.exc
import ckan.lib.jobs as jobs
import ckan.plugins as p
import ckan.tests.factories as factories
import ckan.tests.helpers as helpers
import ckanext.datastore.backend as backend
import ckanext.datastore.backend.postgres as db
class TestCreateIndexes(object):
@pytest.mark.ckan_config("ckan.datastore.default_fts_index_method", None)
def test_creates_fts_index_using_gist_by_default(self):
connection = mock.MagicMock()
context = {"connection": connection}
resource_id = "resource_id"
data_dict = {"resource_id": resource_id}
db.create_indexes(context, data_dict)
self._assert_created_index_on(
"_full_text", connection, resource_id, method="gist"
)
@pytest.mark.ckan_config("ckan.datastore.default_fts_index_method", "gin")
def test_default_fts_index_method_can_be_overwritten_by_config_var(self):
connection = mock.MagicMock()
context = {"connection": connection}
resource_id = "resource_id"
data_dict = {"resource_id": resource_id}
db.create_indexes(context, data_dict)
self._assert_created_index_on(
"_full_text", connection, resource_id, method="gin"
)
@pytest.mark.ckan_config("ckan.datastore.default_fts_lang", None)
@mock.patch("ckanext.datastore.backend.postgres._get_fields")
def test_creates_fts_index_on_all_fields_except_dates_nested_and_arrays_with_english_as_default(
self, _get_fields
):
_get_fields.return_value = [
{"id": "text", "type": "text"},
{"id": "number", "type": "number"},
{"id": "nested", "type": "nested"},
{"id": "date", "type": "date"},
{"id": "text array", "type": "text[]"},
{"id": "timestamp", "type": "timestamp"},
]
connection = mock.MagicMock()
context = {"connection": connection}
resource_id = "resource_id"
data_dict = {"resource_id": resource_id}
db.create_indexes(context, data_dict)
self._assert_created_index_on(
"text", connection, resource_id, "english"
)
self._assert_created_index_on(
"number", connection, resource_id, "english", cast=True
)
@pytest.mark.ckan_config("ckan.datastore.default_fts_lang", "simple")
@mock.patch("ckanext.datastore.backend.postgres._get_fields")
def test_creates_fts_index_on_textual_fields_can_overwrite_lang_with_config_var(
self, _get_fields
):
_get_fields.return_value = [{"id": "foo", "type": "text"}]
connection = mock.MagicMock()
context = {"connection": connection}
resource_id = "resource_id"
data_dict = {"resource_id": resource_id}
db.create_indexes(context, data_dict)
self._assert_created_index_on("foo", connection, resource_id, "simple")
@pytest.mark.ckan_config("ckan.datastore.default_fts_lang", "simple")
@mock.patch("ckanext.datastore.backend.postgres._get_fields")
def test_creates_fts_index_on_textual_fields_can_overwrite_lang_using_lang_param(
self, _get_fields
):
_get_fields.return_value = [{"id": "foo", "type": "text"}]
connection = mock.MagicMock()
context = {"connection": connection}
resource_id = "resource_id"
data_dict = {"resource_id": resource_id, "language": "french"}
db.create_indexes(context, data_dict)
self._assert_created_index_on("foo", connection, resource_id, "french")
def _assert_created_index_on(
self,
field,
connection,
resource_id,
lang=None,
cast=False,
method="gist",
):
field = u'"{0}"'.format(field)
if cast:
field = u"cast({0} AS text)".format(field)
if lang is not None:
sql_str = (
u'ON "resource_id" '
u"USING {method}(to_tsvector('{lang}', {field}))"
)
sql_str = sql_str.format(method=method, lang=lang, field=field)
else:
sql_str = u"USING {method}({field})".format(
method=method, field=field
)
calls = connection.execute.call_args_list
was_called = [call for call in calls if call[0][0].find(sql_str) != -1]
assert was_called, (
"Expected 'connection.execute' to have been "
"called with a string containing '%s'" % sql_str
)
@mock.patch("ckanext.datastore.backend.postgres._get_fields")
def test_upsert_with_insert_method_and_invalid_data(mock_get_fields_function):
"""upsert_data() should raise InvalidDataError if given invalid data.
If the type of a field is numeric and upsert_data() is given a whitespace
value like " ", it should raise DataError.
In this case we're testing with "method": "insert" in the data_dict.
"""
mock_connection = mock.Mock()
mock_connection.execute.side_effect = sqlalchemy.exc.DataError(
"statement", "params", "orig", connection_invalidated=False
)
context = {"connection": mock_connection}
data_dict = {
"fields": [{"id": "value", "type": "numeric"}],
"records": [
{"value": 0},
{"value": 1},
{"value": 2},
{"value": 3},
{"value": " "}, # Invalid numeric value.
{"value": 5},
{"value": 6},
{"value": 7},
],
"method": "insert",
"resource_id": "fake-resource-id",
}
mock_get_fields_function.return_value = data_dict["fields"]
with pytest.raises(backend.InvalidDataError):
db.upsert_data(context, data_dict)
class TestGetAllResourcesIdsInDatastore(object):
@pytest.mark.ckan_config(u"ckan.plugins", u"datastore")
@pytest.mark.usefixtures(u"with_plugins", u"clean_db")
def test_get_all_resources_ids_in_datastore(self):
resource_in_datastore = factories.Resource()
resource_not_in_datastore = factories.Resource()
data = {"resource_id": resource_in_datastore["id"], "force": True}
helpers.call_action("datastore_create", **data)
resource_ids = backend.get_all_resources_ids_in_datastore()
assert resource_in_datastore["id"] in resource_ids
assert resource_not_in_datastore["id"] not in resource_ids
def datastore_job(res_id, value):
"""
A background job that uses the Datastore.
"""
app = helpers._get_test_app()
if not p.plugin_loaded(u"datastore"):
p.load("datastore")
data = {
"resource_id": res_id,
"method": "insert",
"records": [{"value": value}],
}
with app.flask_app.test_request_context():
helpers.call_action("datastore_upsert", **data)
class TestBackgroundJobs(helpers.RQTestBase):
"""
Test correct interaction with the background jobs system.
"""
@pytest.mark.ckan_config(u"ckan.plugins", u"datastore")
@pytest.mark.usefixtures(u"with_plugins", u"clean_db", u"with_request_context")
def METHOD_NAME(self, app):
"""
Test DataStore access from within a worker.
"""
pkg = factories.Dataset()
data = {
"resource": {"package_id": pkg["id"]},
"fields": [{"id": "value", "type": "int"}],
}
table = helpers.call_action("datastore_create", **data)
res_id = table["resource_id"]
for i in range(3):
self.enqueue(datastore_job, args=[res_id, i])
jobs.Worker().work(burst=True)
# Aside from ensuring that the job succeeded, this also checks
# that accessing the Datastore still works in the main process.
result = helpers.call_action("datastore_search", resource_id=res_id)
assert [0, 1, 2] == [r["value"] for r in result["records"]] |
7,358 | ssh execute | #!/usr/bin/env python3
#
# Copyright 2021 Kontain Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
""" test_release_remote
Test the release install process on a remote azure VM.
"""
import subprocess
import json
import logging
import argparse
import time
import os
RESOURCE_GROUP = f"kontain-release-testing-{time.monotonic_ns()}"
RESOURCE_GROUP_LOCATION = "westus"
TESTING_VM_NAME = "kontain-release-testing-vm"
#TESTING_VM_IMAGE = "Canonical:UbuntuServer:18.04-LTS:latest"
TESTING_VM_IMAGE = "Canonical:0001-com-ubuntu-server-focal:20_04-lts:latest"
TESTING_VM_SIZE = "Standard_D2s_v3"
TESTING_VM_ADMIN = "kontain"
TESTING_DEFAULT_VERSION = "v0.1-test"
def validate_version(version):
""" validate_version
Validate the formate the the version string. Version should start either:
* v*
* refs/tags/v* (from azure pipeline)
* refs/heads/* (from azure pipeline testing branch) -> default version v0.1-test
"""
logger = logging.getLogger("validate_version")
if version is None or version == "":
logger.warning(
"No version is set. Will use default latest version from install")
return version
if version.startswith("refs/tags/v"):
clean_version = version[len("refs/tags/"):]
elif version.startswith("refs/heads/"):
logger.warning(
"Release is triggered via branch %s. Using default version %s", version, TESTING_DEFAULT_VERSION)
clean_version = TESTING_DEFAULT_VERSION
else:
clean_version = version
if not clean_version.startswith("v"):
logger.warning(
"Version %s is not conforming to v* pattern.", clean_version)
return clean_version
def setup():
""" setup
* create a new resource group for testing. this is easier for cleanup.
* create the vm needed for testing.
"""
logger = logging.getLogger("setup")
logger.info("creating a new resource group...")
subprocess.run([
"az", "group", "create",
"--location", RESOURCE_GROUP_LOCATION,
"--name", RESOURCE_GROUP,
], check=True)
logger.info("successfully created a new resource group")
logger.info("creating a new vm for testing...")
ret = subprocess.run([
"az", "vm", "create",
"--resource-group", RESOURCE_GROUP,
"--name", TESTING_VM_NAME,
"--image", TESTING_VM_IMAGE,
"--size", TESTING_VM_SIZE,
"--admin-username", TESTING_VM_ADMIN,
], stdout=subprocess.PIPE, check=True)
print(ret.stdout)
logger.info("successfully created a new vm")
output = json.loads(ret.stdout)
return output["publicIpAddress"]
def clean_up():
""" clean up deletes everything """
logger = logging.getLogger("clean_up")
logger.info("Starts to clean up")
subprocess.run([
"az", "group", "delete",
"-y", "--no-wait", "--debug",
"--name", RESOURCE_GROUP,
], check=False)
logger.info("Clean up successful")
def METHOD_NAME(remote_ip, cmd, logger):
""" ssh_execute execute the cmd through ssh """
ssh_execute_cmd = [
"ssh",
"-o", "StrictHostKeyChecking=no",
"-o", "UserKnownHostsFile=/dev/null",
f"{TESTING_VM_ADMIN}@{remote_ip}",
cmd,
]
logger.info("ssh execute: %s", ssh_execute_cmd)
subprocess.run(ssh_execute_cmd, check=True)
def test(remote_ip, version, token):
""" test
Copy the local tests to the remote VM and execute.
"""
logger = logging.getLogger("test")
logger.info("start testing in %s", remote_ip)
# Sometimes, the VM is not completely ready when IP address is returned. In
# this case we need to retry for the first ssh command.
max_retry = 3
run = 0
while run < max_retry:
try:
METHOD_NAME(remote_ip, "python3 --version", logger)
except subprocess.CalledProcessError:
if run + 1 == max_retry:
raise
logger.warning(
"Failed ssh execute... Retry %d out of %d", run + 1, max_retry)
time.sleep(30)
continue
else:
break
finally:
run += 1
subprocess.run([
"scp",
"-o", "StrictHostKeyChecking=no",
"-o", "UserKnownHostsFile=/dev/null",
"-r",
"test_release_local",
f"{TESTING_VM_ADMIN}@{remote_ip}:~/"
], check=True)
METHOD_NAME(
remote_ip, "sudo mkdir -p /opt/kontain ; sudo chown kontain /opt/kontain", logger)
METHOD_NAME(remote_ip, "/usr/bin/cloud-init status --wait", logger)
METHOD_NAME(remote_ip, "sudo apt-get update", logger)
METHOD_NAME(remote_ip, "sudo apt-get install -y gcc docker.io libyajl2 libseccomp2 libcap2", logger)
METHOD_NAME(remote_ip, "sudo chmod 666 /dev/kvm", logger)
METHOD_NAME(remote_ip, f"sudo usermod -G docker {TESTING_VM_ADMIN}", logger)
if version is None or version == "":
version_flag = ""
else:
version_flag = f"--version {version}"
METHOD_NAME(
remote_ip, f"cd test_release_local; python3 test_release_local.py {version_flag} --token=${token}", logger)
logger.info("successfully tested")
def main():
""" main method """
parser = argparse.ArgumentParser()
parser.add_argument("--version", help="version of km to be tested")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
# GITHUB_RELEASE_TOKEN is required to get access to private repos. The
# token is the Github Personal Access Token (PAT)
token = os.environ.get("GITHUB_RELEASE_TOKEN")
if token is None:
raise ValueError("GITHUB_RELEASE_TOKEN is not set, cannot access private KM repo")
try:
version = validate_version(args.version)
remote_ip = setup()
test(remote_ip, version, token)
finally:
clean_up()
main() |
7,359 | register subtypes | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on November 6, 2017
@author: alfoa
comment: The ModelPlugIn Module is an Handler.
It inquires all the modules contained in the ./raven/contrib/plugins
and load the ones that refer to a model, constructing a '__interFaceDict' on the fly
"""
#for future compatibility with Python 3--------------------------------------------------------------
from __future__ import division, print_function, unicode_literals, absolute_import
#End compatibility block for Python 3----------------------------------------------------------------
#External Modules------------------------------------------------------------------------------------
import os
from glob import glob
import inspect
from collections import defaultdict
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from ..utils import utils
from .. import PluginManager
#Internal Modules End--------------------------------------------------------------------------------
# load the plugin_directory.xml and use that to establish the paths to the plugins
# for each of the plugin paths, load up the Entities that have models in them
## -> should we do this through some kind of a PluginLoader that registers the objects
## in other factories?
__basePluginClasses = {'ExternalModel':'ExternalModelPluginBase',
'Code': 'CodePluginBase',
'PostProcessor': 'PostProcessorPluginBase',
'SupervisedLearning': 'SupervisedLearningPlugin',
}
__interfaceDict = defaultdict(dict)
__knownTypes = [] # populate through registration
def METHOD_NAME(typeName, typeDict):
"""
Registers new subtypes.
@ In, typeName, str, name of entity subtype
@ In, typeDict, dict, dictionary mapping names to models
"""
for name, subtype in typeDict.items():
__interfaceDict[typeName][name] = subtype
__knownTypes.append(name)
def _registerAllPlugins():
"""
Method to register all plugins
@ In, None
@ Out, None
"""
for baseType, baseName in __basePluginClasses.items():
plugins = PluginManager.getEntities(baseType)
METHOD_NAME(baseType, plugins)
_registerAllPlugins()
def knownTypes():
"""
Method to return the list of known model plugins
@ In, None
@ Out, __knownTypes, list, the list of known types
"""
return __knownTypes
def loadPlugin(Type, subType):
"""
Tries to load the subType to make it available
@ In, Type, string, the type of plugin main class (e.g. ExternalModel)
@ In, subType, string, the subType of the plugin specialized class (e.g. CashFlow)
@ Out, None
"""
name = subType.split(".")[0]
PluginManager.finishLoadPlugin(name)
_registerAllPlugins()
def returnPlugin(Type,subType,caller):
"""
this allows the code(model) class to interact with a specific
code for which the interface is present in the CodeInterfaces module
@ In, Type, string, the type of plugin main class (e.g. ExternalModel)
@ In, subType, string, the subType of the plugin specialized class (e.g. CashFlow)
@ In, caller, instance, instance of the caller
"""
if subType not in knownTypes():
caller.raiseAnError(NameError,'not known '+__base+' type '+Type+' subType '+Type)
return __interfaceDict[Type][subType]() |
7,360 | test infer problem type big float data | import unittest
import numpy as np
import pandas as pd
import pytest
from autogluon.core.constants import BINARY, MULTICLASS, MULTICLASS_UPPER_LIMIT, REGRESSION
from autogluon.core.utils import infer_problem_type
from autogluon.core.utils.utils import generate_train_test_split
class TestInferProblemType(unittest.TestCase):
def test_infer_problem_type_empty(self):
with self.assertRaises(ValueError):
infer_problem_type(pd.Series([], dtype="float"))
def test_infer_problem_type_nan(self):
with self.assertRaises(ValueError):
infer_problem_type(pd.Series([np.nan]))
def test_infer_problem_type_inf(self):
with self.assertRaises(ValueError):
infer_problem_type(pd.Series([np.inf]))
def test_infer_problem_type_ninf(self):
with self.assertRaises(ValueError):
infer_problem_type(pd.Series([np.NINF]))
def test_infer_problem_type_binary(self):
inferred_problem_type = infer_problem_type(pd.Series([-1, -1, 99, -1, -1, 99]))
assert inferred_problem_type == BINARY
def test_infer_problem_type_binary_with_nan(self):
inferred_problem_type = infer_problem_type(pd.Series([-1, -1, 99, -1, -1, 99, np.nan]))
assert inferred_problem_type == BINARY
def test_infer_problem_type_str(self):
inferred_problem_type = infer_problem_type(pd.Series(["a", "b", "c"], dtype=str))
assert inferred_problem_type == MULTICLASS
def test_infer_problem_type_category(self):
inferred_problem_type = infer_problem_type(pd.Series(["a", "b", "c"], dtype="category"))
assert inferred_problem_type == MULTICLASS
def test_infer_problem_type_object(self):
inferred_problem_type = infer_problem_type(pd.Series(["a", "b", "c"], dtype="object"))
assert inferred_problem_type == MULTICLASS
def test_infer_problem_type_multiclass_with_nan(self):
inferred_problem_type = infer_problem_type(pd.Series(["a", "b", "c", np.nan]))
assert inferred_problem_type == MULTICLASS
def METHOD_NAME(self):
big_float_regression_series = pd.Series(np.repeat(np.linspace(0.0, 1.0, MULTICLASS_UPPER_LIMIT + 1), 2))
inferred_problem_type = infer_problem_type(big_float_regression_series)
assert inferred_problem_type == REGRESSION
def test_infer_problem_type_small_float_data_multiclass(self):
big_float_multiclass_series = pd.Series(np.repeat([1.0, 2.0, 3.0], MULTICLASS_UPPER_LIMIT - 1))
inferred_problem_type = infer_problem_type(big_float_multiclass_series)
assert inferred_problem_type == MULTICLASS
def test_infer_problem_type_small_float_data_regression(self):
small_float_regression_series = pd.Series(np.linspace(0.0, 1.0, MULTICLASS_UPPER_LIMIT - 1))
inferred_problem_type = infer_problem_type(small_float_regression_series)
assert inferred_problem_type == REGRESSION
def test_infer_problem_type_big_integer_data_regression(self):
big_integer_regression_series = pd.Series(np.repeat(np.arange(MULTICLASS_UPPER_LIMIT + 1), 2), dtype=np.int64)
inferred_problem_type = infer_problem_type(big_integer_regression_series)
assert inferred_problem_type == REGRESSION
def test_infer_problem_type_small_integer_data_multiclass(self):
small_integer_multiclass_series = pd.Series(np.repeat(np.arange(3), MULTICLASS_UPPER_LIMIT - 1), dtype=np.int64)
inferred_problem_type = infer_problem_type(small_integer_multiclass_series)
assert inferred_problem_type == MULTICLASS
def test_infer_problem_type_small_integer_data_regression(self):
small_integer_regression_series = pd.Series(np.arange(MULTICLASS_UPPER_LIMIT - 1), dtype=np.int64)
inferred_problem_type = infer_problem_type(small_integer_regression_series)
assert inferred_problem_type == REGRESSION
def test_generate_train_test_split_edgecase():
"""
Test rare edge-cases when data has many classes or very few samples when doing train test splits.
"""
data = pd.DataFrame(index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
data["label"] = [0, 1, 1, 2, 3, 4, 5, 5, 5, 5, 5, 5]
for test_size in range(1, 12):
"""
Normal Case: Regression should always work
"""
X_train, X_test, y_train, y_test = generate_train_test_split(X=data, y=data["label"], problem_type="regression", test_size=test_size)
assert len(X_train) == len(y_train)
assert list(X_train.index) == list(y_train.index)
assert len(X_test) == len(y_test)
assert list(X_test.index) == list(y_test.index)
assert len(X_test) == test_size
assert len(X_train) == len(data) - test_size
for test_size in range(1, 6):
"""
Edge-case: There are fewer test rows than classes
This only works because of special try/except logic in `generate_train_test_split`.
"""
X_train, X_test, y_train, y_test = generate_train_test_split(X=data, y=data["label"], problem_type="multiclass", test_size=test_size)
assert len(X_train) == len(y_train)
assert list(X_train.index) == list(y_train.index)
assert len(X_test) == len(y_test)
assert list(X_test.index) == list(y_test.index)
assert len(X_test) <= test_size
assert len(X_train) == len(data) - len(X_test)
for test_size in range(6, 7):
"""
Normal Case
"""
X_train, X_test, y_train, y_test = generate_train_test_split(X=data, y=data["label"], problem_type="multiclass", test_size=test_size)
assert len(X_train) == len(y_train)
assert list(X_train.index) == list(y_train.index)
assert len(X_test) == len(y_test)
assert list(X_test.index) == list(y_test.index)
assert len(X_test) <= test_size
assert len(X_train) == len(data) - len(X_test)
for test_size in range(7, 12):
"""
Edge-case: There are fewer train rows than classes
Error due to not enough training data to have at least one instance of every class in train.
Note: Ideally this shouldn't raise an exception, but writing the logic to avoid the error is tricky and the scenario should never occur in practice.
"""
with pytest.raises(ValueError):
X_train, X_test, y_train, y_test = generate_train_test_split(X=data, y=data["label"], problem_type="multiclass", test_size=test_size) |
7,361 | predict compressed model | import os
import shutil
import paddle
from ...analysis import TableLatencyPredictor
from .prune_model import get_sparse_model, get_prune_model
from .fake_ptq import post_quant_fake
from ...common.load_model import load_inference_model
import platform
def with_variable_shape(model_dir, model_filename=None, params_filename=None):
"""
Whether the shape of model's input is variable.
Args:
path_prefix(str | None): Directory path to save model + model name without suffix.
model_filename(str): specify model_filename if you don't want to use default name. Default : None.
params_filename(str): specify params_filename if you don't want to use default name. Default : None.
Returns:
bool: Whether the shape of model's input is variable.
"""
paddle.enable_static()
exe = paddle.static.Executor(paddle.CPUPlace())
inference_program, feed_target_names, fetch_targets = load_inference_model(
model_dir,
exe,
model_filename=model_filename,
params_filename=params_filename)
for var_ in inference_program.list_vars():
if var_.name in feed_target_names:
if var_.shape.count(-1) > 1:
return True
def METHOD_NAME(executor,
places,
model_dir,
model_filename,
params_filename,
hardware='SD710'):
"""
Evaluating the latency of the model under various compression strategies.
Args:
model_dir(str): The path of inference model that will be compressed, and
the model and params that saved by ``paddle.static.io.save_inference_model``
are under the path.
model_filename(str, optional): The name of model file. If parameters
are saved in separate files, set it as 'None'. Default: 'None'.
params_filename(str, optional): The name of params file.
When all parameters are saved in a single file, set it
as filename. If parameters are saved in separate files,
set it as 'None'. Default : 'None'.
hardware(str): Target device.
Returns:
latency_dict(dict): The latency latency of the model under various compression strategies.
"""
local_rank = paddle.distributed.get_rank()
quant_model_path = 'quant_model_rank_{}_tmp'.format(local_rank)
prune_model_path = f'prune_model_rank_{local_rank}_tmp'
sparse_model_path = f'sparse_model_rank_{local_rank}_tmp'
latency_dict = {}
model_file = os.path.join(model_dir, model_filename)
param_file = os.path.join(model_dir, params_filename)
try:
predictor = TableLatencyPredictor(hardware)
except NotImplementedError:
raise NotImplementedError(
"Latency predictor cannot used on the platform: {}. That means you can not use latency predictor to select compress strategy automatically, you can set deploy_hardware to None or set compress strategy in the yaml".
format(platform.system()))
latency = predictor.predict(
model_file=model_file, param_file=param_file, data_type='fp32')
latency_dict.update({'origin_fp32': latency})
paddle.enable_static()
post_quant_fake(
executor,
model_dir=model_dir,
model_filename=model_filename,
params_filename=params_filename,
save_model_path=quant_model_path,
quantizable_op_type=["conv2d", "depthwise_conv2d", "mul"],
is_full_quantize=False,
activation_bits=8,
weight_bits=8)
quant_model_file = os.path.join(quant_model_path, model_filename)
quant_param_file = os.path.join(quant_model_path, params_filename)
latency = predictor.predict(
model_file=quant_model_file,
param_file=quant_param_file,
data_type='int8')
latency_dict.update({'origin_int8': latency})
for prune_ratio in [0.3, 0.4, 0.5, 0.6]:
get_prune_model(
executor,
places,
model_file=model_file,
param_file=param_file,
ratio=prune_ratio,
save_path=prune_model_path)
prune_model_file = os.path.join(prune_model_path, model_filename)
prune_param_file = os.path.join(prune_model_path, params_filename)
latency = predictor.predict(
model_file=prune_model_file,
param_file=prune_param_file,
data_type='fp32')
latency_dict.update({f'prune_{prune_ratio}_fp32': latency})
post_quant_fake(
executor,
model_dir=prune_model_path,
model_filename=model_filename,
params_filename=params_filename,
save_model_path=quant_model_path,
quantizable_op_type=["conv2d", "depthwise_conv2d", "mul"],
is_full_quantize=False,
activation_bits=8,
weight_bits=8)
quant_model_file = os.path.join(quant_model_path, model_filename)
quant_param_file = os.path.join(quant_model_path, params_filename)
latency = predictor.predict(
model_file=quant_model_file,
param_file=quant_param_file,
data_type='int8')
latency_dict.update({f'prune_{prune_ratio}_int8': latency})
for sparse_ratio in [0.70, 0.75, 0.80, 0.85, 0.90, 0.95]:
get_sparse_model(
executor,
places,
model_file=model_file,
param_file=param_file,
ratio=sparse_ratio,
save_path=sparse_model_path)
sparse_model_file = os.path.join(sparse_model_path, model_filename)
sparse_param_file = os.path.join(sparse_model_path, params_filename)
latency = predictor.predict(
model_file=sparse_model_file,
param_file=sparse_param_file,
data_type='fp32')
latency_dict.update({f'sparse_{sparse_ratio}_fp32': latency})
post_quant_fake(
executor,
model_dir=sparse_model_path,
model_filename=model_filename,
params_filename=params_filename,
save_model_path=quant_model_path,
quantizable_op_type=["conv2d", "depthwise_conv2d", "mul"],
is_full_quantize=False,
activation_bits=8,
weight_bits=8)
quant_model_file = os.path.join(quant_model_path, model_filename)
quant_param_file = os.path.join(quant_model_path, params_filename)
latency = predictor.predict(
model_file=quant_model_file,
param_file=quant_param_file,
data_type='int8')
latency_dict.update({f'sparse_{sparse_ratio}_int8': latency})
# NOTE: Delete temporary model files
if os.path.exists(quant_model_path):
shutil.rmtree(quant_model_path, ignore_errors=True)
if os.path.exists(prune_model_path):
shutil.rmtree(prune_model_path, ignore_errors=True)
if os.path.exists(sparse_model_path):
shutil.rmtree(sparse_model_path, ignore_errors=True)
return latency_dict |
7,362 | prs | #!/usr/bin/env python3
import os
import comment_util
import sys
from typing import Optional, Dict, Iterable
import requests
github_username = os.environ['GITHUB_USERNAME']
github_token = os.environ['GITHUB_TOKEN']
owner = os.environ['CIRCLE_PROJECT_USERNAME']
repo = os.environ['CIRCLE_PROJECT_REPONAME']
pr_number = os.environ.get('CIRCLE_PR_NUMBER')
commit = os.environ.get('CIRCLE_SHA1')
github = requests.Session()
github.auth = (github_username, github_token)
def find_pr() -> str:
"""
Find the PR for this commit and return the API url
"""
if pr_number:
response = github.get(f'https://api.github.com/repos/{owner}/{repo}/pulls/{pr_number}')
response.raise_for_status()
return response.json()['url']
def METHOD_NAME() -> Iterable[Dict]:
url = f'https://api.github.com/repos/{owner}/{repo}/pulls'
while True:
response = github.get(url, params={'state': 'all'})
response.raise_for_status()
for pr in response.json():
yield pr
if 'next' in response.links:
url = response.links['next']['url']
else:
return
for pr in METHOD_NAME():
if pr['merge_commit_sha'] == commit:
return pr['url']
raise Exception(f'No PR found in {owner}/{repo} for commit {commit} (was it pushed directly to the target branch?)')
class TerraformComment:
"""
The GitHub comment for this specific terraform plan
"""
def __init__(self, pr_url: str):
self._plan = None
self._status = None
response = github.get(pr_url)
response.raise_for_status()
self._issue_url = response.json()['_links']['issue']['href'] + '/comments'
response = github.get(self._issue_url)
response.raise_for_status()
self._comment_url = None
for comment in response.json():
if comment['user']['login'] == github_username:
match = comment_util.re_comment_match(self._comment_identifier,
comment['body'])
if match:
self._comment_url = comment['url']
self._plan = match.group(1).strip()
self._status = match.group(2).strip()
return
@property
def _comment_identifier(self):
if self.label:
return f'Terraform plan for __{self.label}__'
label = f'Terraform plan in __{self.path}__'
if self.workspace != 'default':
label += f' in the __{self.workspace}__ workspace'
if self.init_args:
label += f'\nUsing init args: `{self.init_args}`'
if self.plan_args:
label += f'\nUsing plan args: `{self.plan_args}`'
return label
@property
def path(self) -> str:
return os.environ.get('path')
@property
def build_url(self) -> str:
return os.environ['CIRCLE_BUILD_URL']
@property
def build_num(self) -> str:
return os.environ['CIRCLE_BUILD_NUM']
@property
def job_name(self) -> str:
return os.environ['CIRCLE_JOB']
@property
def workspace(self) -> str:
return os.environ.get('workspace')
@property
def label(self) -> str:
return os.environ.get('label')
@property
def init_args(self) -> str:
return os.environ.get('INIT_ARGS')
@property
def plan_args(self) -> str:
return os.environ.get('PLAN_ARGS')
@property
def plan(self) -> Optional[str]:
return self._plan
@plan.setter
def plan(self, plan: str) -> None:
self._plan = plan.strip()
self._update_comment()
@property
def status(self) -> Optional[str]:
return self._status
@status.setter
def status(self, status: str) -> None:
self._status = status.strip()
self._update_comment()
def _update_comment(self):
comment = comment_util.comment_for_pr(self._comment_identifier,
self.plan)
if self.status:
comment += '\n' + self.status
else:
comment += (f'\nPlan generated in CircleCI Job '
f'[{self.job_name} {self.build_num}]'
f'({self.build_url})')
if self._comment_url is None:
# Create a new comment
response = github.post(self._issue_url, json={'body': comment})
else:
# Update existing comment
response = github.patch(self._comment_url, json={'body': comment})
response.raise_for_status()
self._comment_url = response.json()['url']
if __name__ == '__main__':
if len(sys.argv) < 2:
print(f'''Usage:
{sys.argv[0]} plan <plan.txt
{sys.argv[0]} status <status.txt
{sys.argv[0]} get >plan.txt''')
comment = TerraformComment(find_pr())
if sys.argv[1] == 'plan':
comment.plan = sys.stdin.read().strip()
elif sys.argv[1] == 'status':
if comment.plan:
comment.status = sys.stdin.read().strip()
print(comment.status)
elif sys.argv[1] == 'get':
print(comment.plan) |
7,363 | make url | # Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import hashlib
import logging
import threading
import os
import sys
import botocore
import botocore.exceptions
from s3transfer.manager import TransferManager
from s3transfer.subscribers import BaseSubscriber
from awscli.compat import collections_abc
LOG = logging.getLogger(__name__)
class NoSuchBucketError(Exception):
def __init__(self, **kwargs):
msg = self.fmt.format(**kwargs)
Exception.__init__(self, msg)
self.kwargs = kwargs
fmt = ("S3 Bucket does not exist. "
"Execute the command to create a new bucket"
"\n"
"aws s3 mb s3://{bucket_name}")
class S3Uploader(object):
"""
Class to upload objects to S3 bucket that use versioning. If bucket
does not already use versioning, this class will turn on versioning.
"""
@property
def artifact_metadata(self):
"""
Metadata to attach to the object(s) uploaded by the uploader.
"""
return self._artifact_metadata
@artifact_metadata.setter
def artifact_metadata(self, val):
if val is not None and not isinstance(val, collections_abc.Mapping):
raise TypeError("Artifact metadata should be in dict type")
self._artifact_metadata = val
def __init__(self, s3_client,
bucket_name,
prefix=None,
kms_key_id=None,
force_upload=False,
transfer_manager=None):
self.bucket_name = bucket_name
self.prefix = prefix
self.kms_key_id = kms_key_id or None
self.force_upload = force_upload
self.s3 = s3_client
self.transfer_manager = transfer_manager
if not transfer_manager:
self.transfer_manager = TransferManager(self.s3)
self._artifact_metadata = None
def upload(self, file_name, remote_path):
"""
Uploads given file to S3
:param file_name: Path to the file that will be uploaded
:param remote_path: be uploaded
:return: VersionId of the latest upload
"""
if self.prefix and len(self.prefix) > 0:
remote_path = "{0}/{1}".format(self.prefix, remote_path)
# Check if a file with same data exists
if not self.force_upload and self.file_exists(remote_path):
LOG.debug("File with same data already exists at {0}. "
"Skipping upload".format(remote_path))
return self.METHOD_NAME(remote_path)
try:
# Default to regular server-side encryption unless customer has
# specified their own KMS keys
additional_args = {
"ServerSideEncryption": "AES256"
}
if self.kms_key_id:
additional_args["ServerSideEncryption"] = "aws:kms"
additional_args["SSEKMSKeyId"] = self.kms_key_id
if self.artifact_metadata:
additional_args["Metadata"] = self.artifact_metadata
print_progress_callback = \
ProgressPercentage(file_name, remote_path)
future = self.transfer_manager.upload(file_name,
self.bucket_name,
remote_path,
additional_args,
[print_progress_callback])
future.result()
return self.METHOD_NAME(remote_path)
except botocore.exceptions.ClientError as ex:
error_code = ex.response["Error"]["Code"]
if error_code == "NoSuchBucket":
raise NoSuchBucketError(bucket_name=self.bucket_name)
raise ex
def upload_with_dedup(self, file_name, extension=None):
"""
Makes and returns name of the S3 object based on the file's MD5 sum
:param file_name: file to upload
:param extension: String of file extension to append to the object
:return: S3 URL of the uploaded object
"""
# This construction of remote_path is critical to preventing duplicate
# uploads of same object. Uploader will check if the file exists in S3
# and re-upload only if necessary. So the template points to same file
# in multiple places, this will upload only once
filemd5 = self.file_checksum(file_name)
remote_path = filemd5
if extension:
remote_path = remote_path + "." + extension
return self.upload(file_name, remote_path)
def file_exists(self, remote_path):
"""
Check if the file we are trying to upload already exists in S3
:param remote_path:
:return: True, if file exists. False, otherwise
"""
try:
# Find the object that matches this ETag
self.s3.head_object(
Bucket=self.bucket_name, Key=remote_path)
return True
except botocore.exceptions.ClientError:
# Either File does not exist or we are unable to get
# this information.
return False
def METHOD_NAME(self, obj_path):
return "s3://{0}/{1}".format(
self.bucket_name, obj_path)
def file_checksum(self, file_name):
with open(file_name, "rb") as file_handle:
md5 = hashlib.md5()
# Read file in chunks of 4096 bytes
block_size = 4096
# Save current cursor position and reset cursor to start of file
curpos = file_handle.tell()
file_handle.seek(0)
buf = file_handle.read(block_size)
while len(buf) > 0:
md5.update(buf)
buf = file_handle.read(block_size)
# Restore file cursor's position
file_handle.seek(curpos)
return md5.hexdigest()
def to_path_style_s3_url(self, key, version=None):
"""
This link describes the format of Path Style URLs
http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro
"""
base = self.s3.meta.endpoint_url
result = "{0}/{1}/{2}".format(base, self.bucket_name, key)
if version:
result = "{0}?versionId={1}".format(result, version)
return result
class ProgressPercentage(BaseSubscriber):
# This class was copied directly from S3Transfer docs
def __init__(self, filename, remote_path):
self._filename = filename
self._remote_path = remote_path
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
def on_progress(self, future, bytes_transferred, **kwargs):
# To simplify we'll assume this is hooked up
# to a single filename.
with self._lock:
self._seen_so_far += bytes_transferred
percentage = (self._seen_so_far / self._size) * 100
sys.stderr.write(
"\rUploading to %s %s / %s (%.2f%%)" %
(self._remote_path, self._seen_so_far,
self._size, percentage))
sys.stderr.flush() |
7,364 | get architecture | import os
from pathlib import Path
from wazuh_testing.qa_ctl.provisioning.wazuh_deployment.wazuh_package import WazuhPackage
from wazuh_testing.qa_ctl.provisioning.ansible.ansible_task import AnsibleTask
from wazuh_testing.qa_ctl import QACTL_LOGGER
from wazuh_testing.tools.logging import Logging
from wazuh_testing.tools.exceptions import QAValueError
from wazuh_testing.tools.s3_package import get_s3_package_url
from wazuh_testing.tools.file import join_path
class WazuhS3Package(WazuhPackage):
"""Install Wazuh from a S3 URL package
Args:
wazuh_target (string): Type of the Wazuh instance desired (agent or manager).
s3_package_url (string): URL of the S3 Wazuh package.
installation_files_path (string): Path where is located the Wazuh instalation files.
qa_ctl_configuration (QACTLConfiguration): QACTL configuration.
version (string): The version of Wazuh. Parameter set by default to 'None'.
system (string): System of the Wazuh installation files. Parameter set by default to 'None'.
revision (string): Revision of the wazuh package. Parameter set by default to 'None'.
repository (string): Repository of the wazuh package. Parameter set by default to 'None'.
architecture (string): Architecture of the Wazuh package. Parameter set by default to 'None'.
Attributes:
wazuh_target (string): Type of the Wazuh instance desired (agent or manager).
s3_package_url (string): URL of the S3 Wazuh package.
package_name (string): Name of the S3 package.
installation_files_path (string): Path where is located the Wazuh instalation files.
qa_ctl_configuration (QACTLConfiguration): QACTL configuration.
version (string): The version of Wazuh. Parameter set by default to 'None'.
system (string): System of the Wazuh installation files. Parameter set by default to 'None'.
revision (string): Revision of the wazuh package. Parameter set by default to 'None'.
repository (string): Repository of the wazuh package. Parameter set by default to 'None'.
architecture (string): Architecture of the Wazuh package. Parameter set by default to 'None'.
"""
LOGGER = Logging.get_logger(QACTL_LOGGER)
def __init__(self, wazuh_target, installation_files_path, qa_ctl_configuration,
s3_package_url=None, system=None, version=None, revision=None, repository=None):
self.system = system
self.revision = revision
self.repository = repository
self.s3_package_url = s3_package_url if s3_package_url is not None else self.__get_package_url()
super().__init__(wazuh_target=wazuh_target, installation_files_path=installation_files_path,
system=system, version=version, qa_ctl_configuration=qa_ctl_configuration)
def __get_package_url(self):
""" Get S3 package URL from repository, version, revision and system parameters.
Returns:
str: S3 package URL.
"""
if self.version is not None and self.repository is not None and self.system is not None and \
self.revision is not None:
architecture = WazuhS3Package.METHOD_NAME(self.system)
return get_s3_package_url(self.repository, self.wazuh_target, self.version,
self.revision, self.system, architecture)
else:
raise QAValueError('Could not get Wazuh Package S3 URL. s3_package_url or '
'(version, repository, system, revision) has None value', WazuhS3Package.LOGGER.error,
QACTL_LOGGER)
@staticmethod
def METHOD_NAME(system):
"""Get the needed architecture for the wazuh package
Args:
system (string): String with the system value given
Returns:
str: String with the default architecture for the system
"""
default_architectures = {
'deb': 'amd64',
'rpm': 'x86_64',
'rpm5': 'x86_64',
'windows': 'i386',
'macos': 'amd64',
'solaris10': 'i386',
'solaris11': 'i386',
'wpk-linux': 'x86_64',
'wpk-windows': 'i386',
}
return default_architectures[system]
def download_installation_files(self, inventory_file_path, hosts='all'):
"""Download the installation files of Wazuh in the given inventory file path
Args:
s3_package_url (string): URL of the S3 Wazuh package.
inventory_file_path (string): path where the instalation files are going to be stored.
hosts (string): Parameter set to `all` by default.
repository (string): Repository of the wazuh package.
wazuh_target (string): Type of the Wazuh instance desired (agent or manager).
version (string): The version of Wazuh.
revision (string): Revision of the wazuh package.
system (string): System for the wazuh package.
Returns:
str: String with the complete path of the downloaded installation package
"""
package_name = Path(self.s3_package_url).name
WazuhS3Package.LOGGER.debug(f"Downloading Wazuh S3 package from {self.s3_package_url} in {hosts} hosts")
download_unix_s3_package = AnsibleTask({
'name': 'Download S3 package (Unix)',
'get_url': {'url': self.s3_package_url, 'dest': self.installation_files_path},
'register': 'download_state', 'retries': 6, 'delay': 10,
'until': 'download_state is success',
'when': 'ansible_system != "Win32NT"'
})
download_windows_s3_package = AnsibleTask({
'name': 'Download S3 package (Windows)',
'win_get_url': {'url': self.s3_package_url, 'dest': self.installation_files_path},
'register': 'download_state', 'retries': 6, 'delay': 10,
'until': 'download_state is success',
'when': 'ansible_system == "Win32NT"'
})
WazuhS3Package.LOGGER.debug(f"Wazuh S3 package was successfully downloaded in {hosts} hosts")
super().download_installation_files(inventory_file_path, [download_unix_s3_package,
download_windows_s3_package], hosts)
package_system = 'windows' if '.msi' in package_name else 'generic'
path_list = self.installation_files_path.split('\\') if package_system == 'windows' else \
self.installation_files_path.split('/')
path_list.append(package_name)
return join_path(path_list, package_system) |
7,365 | create obj in | # emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Parse the "ASCCONV" meta data format found in a variety of Siemens MR files.
"""
import ast
import re
from collections import OrderedDict
ASCCONV_RE = re.compile(
r'### ASCCONV BEGIN((?:\s*[^=\s]+=[^=\s]+)*) ###\n(.*?)\n### ASCCONV END ###',
flags=re.M | re.S,
)
class AscconvParseError(Exception):
"""Error parsing ascconv file"""
class Atom:
"""Object to hold operation, object type and object identifier
An atom represents an element in an expression. For example::
a.b[0].c
has four elements. We call these elements "atoms".
We represent objects (like ``a``) as dicts for convenience.
The last element (``.c``) is an ``op = ast.Attribute`` operation where the
object type (`obj_type`) of ``c`` is not constrained (we can't tell from
the operation what type it is). The `obj_id` is the name of the object --
"c".
The second to last element ``[0]``, is ``op = ast.Subscript``, with object type
dict (we know from the subsequent operation ``.c`` that this must be an
object, we represent the object by a dict). The `obj_id` is the index 0.
Parameters
----------
op : {'name', 'attr', 'list'}
Assignment type. Assignment to name (root namespace), attribute or
list element.
obj_type : {list, dict, other}
Object type being assigned to.
obj_id : str or int
Key (``obj_type is dict``) or index (``obj_type is list``)
"""
def __init__(self, op, obj_type, obj_id):
self.op = op
self.obj_type = obj_type
self.obj_id = obj_id
class NoValue:
"""Signals no value present"""
def assign2atoms(assign_ast, default_class=int):
"""Parse single assignment ast from ascconv line into atoms
Parameters
----------
assign_ast : assignment statement ast
ast derived from single line of ascconv file.
default_class : class, optional
Class that will create an object where we cannot yet know the object
type in the assignment.
Returns
-------
atoms : list
List of :class:`atoms`. See docstring for :class:`atoms`. Defines
left to right sequence of assignment in `line_ast`.
"""
if not len(assign_ast.targets) == 1:
raise AscconvParseError('Too many targets in assign')
target = assign_ast.targets[0]
atoms = []
prev_target_type = default_class # Placeholder for any scalar value
while True:
if isinstance(target, ast.Name):
atoms.append(Atom(target, prev_target_type, target.id))
break
if isinstance(target, ast.Attribute):
atoms.append(Atom(target, prev_target_type, target.attr))
target = target.value
prev_target_type = OrderedDict
elif isinstance(target, ast.Subscript):
if isinstance(target.slice, ast.Constant): # PY39
index = target.slice.n
else: # PY38
index = target.slice.value.n
atoms.append(Atom(target, prev_target_type, index))
target = target.value
prev_target_type = list
else:
raise AscconvParseError(f'Unexpected LHS element {target}')
return reversed(atoms)
def METHOD_NAME(atom, root):
"""Find / create object defined in `atom` in dict-like given by `root`
Returns corresponding value if there is already a key matching
`atom.obj_id` in `root`.
Otherwise, create new object with ``atom.obj_type`, insert into dictionary,
and return new object.
Can therefore modify `root` in place.
"""
name = atom.obj_id
obj = root.get(name, NoValue)
if obj is not NoValue:
return obj
obj = atom.obj_type()
root[name] = obj
return obj
def _create_subscript_in(atom, root):
"""Find / create and insert object defined by `atom` from list `root`
The `atom` has an index, defined in ``atom.obj_id``. If `root` is long
enough to contain this index, return the object at that index. Otherwise,
extend `root` with None elements to contain index ``atom.obj_id``, then
create a new object via ``atom.obj_type()``, insert at the end of the list,
and return this object.
Can therefore modify `root` in place.
"""
curr_n = len(root)
index = atom.obj_id
if curr_n > index:
return root[index]
obj = atom.obj_type()
root += [None] * (index - curr_n) + [obj]
return obj
def obj_from_atoms(atoms, namespace):
"""Return object defined by list `atoms` in dict-like `namespace`
Parameters
----------
atoms : list
List of :class:`atoms`
namespace : dict-like
Namespace in which object will be defined.
Returns
-------
obj_root : object
Namespace such that we can set a desired value to the object defined in
`atoms` with ``obj_root[obj_key] = value``.
obj_key : str or int
Index into list or key into dictionary for `obj_root`.
"""
root_obj = namespace
for el in atoms:
prev_root = root_obj
if isinstance(el.op, (ast.Attribute, ast.Name)):
root_obj = METHOD_NAME(el, root_obj)
else:
root_obj = _create_subscript_in(el, root_obj)
if not isinstance(root_obj, el.obj_type):
raise AscconvParseError(f'Unexpected type for {el.obj_id} in {prev_root}')
return prev_root, el.obj_id
def _get_value(assign):
value = assign.value
if isinstance(value, ast.Num):
return value.n
if isinstance(value, ast.Str):
return value.s
if isinstance(value, ast.UnaryOp) and isinstance(value.op, ast.USub):
return -value.operand.n
raise AscconvParseError(f'Unexpected RHS of assignment: {value}')
def parse_ascconv(ascconv_str, str_delim='"'):
"""Parse the 'ASCCONV' format from `input_str`.
Parameters
----------
ascconv_str : str
The string we are parsing
str_delim : str, optional
String delimiter. Typically '"' or '""'
Returns
-------
prot_dict : OrderedDict
Meta data pulled from the ASCCONV section.
attrs : OrderedDict
Any attributes stored in the 'ASCCONV BEGIN' line
Raises
------
AsconvParseError
A line of the ASCCONV section could not be parsed.
"""
attrs, content = ASCCONV_RE.match(ascconv_str).groups()
attrs = OrderedDict(tuple(x.split('=')) for x in attrs.split())
# Normalize string start / end markers to something Python understands
content = content.replace(str_delim, '"""').replace('\\', '\\\\')
# Use Python's own parser to parse modified ASCCONV assignments
tree = ast.parse(content)
prot_dict = OrderedDict()
for assign in tree.body:
atoms = assign2atoms(assign)
obj_to_index, key = obj_from_atoms(atoms, prot_dict)
obj_to_index[key] = _get_value(assign)
return prot_dict, attrs |
7,366 | test spectrogram yscale deprecation | """
Test deprecations. For each deprecation two things must be tested:
1. Is a proper warning raised. This is done using
with pytest.warns(PendingDeprecationWarning, match="some text"):
call_of_function()
2. Was the function properly deprecated. This is done using:
if version.parse(pf.__version__) >= version.parse('0.5.0'):
with pytest.raises(AttributeError):
# remove get_nearest_k() from pyfar 0.5.0!
coords.get_nearest_k(1, 0, 0)
"""
import numpy as np
from packaging import version
import pathlib
import pytest
from unittest.mock import patch
import pyfar as pf
import pyfar.dsp.filter as pfilt
# This defines the plot size and the backend
from pyfar.testing.plot_utils import create_figure
# deprecate in 0.5.0 ----------------------------------------------------------
def test_get_nearest_deprecations():
"""Coordinates get_nearest* methods"""
coords = pf.Coordinates(np.arange(6), 0, 0)
if version.parse(pf.__version__) >= version.parse('0.5.0'):
with pytest.raises(AttributeError):
# remove get_nearest_k() from pyfar 0.5.0!
coords.get_nearest_k(1, 0, 0)
if version.parse(pf.__version__) >= version.parse('0.5.0'):
with pytest.raises(AttributeError):
# remove get_nearest_k() from pyfar 0.5.0!
coords.get_nearest_cart(2.5, 0, 0, 1.5)
if version.parse(pf.__version__) >= version.parse('0.5.0'):
with pytest.raises(AttributeError):
# remove get_nearest_k() from pyfar 0.5.0!
coords.get_nearest_sph(0, 0, 1, 1)
if version.parse(pf.__version__) >= version.parse('0.5.0'):
with pytest.raises(AttributeError):
# remove get_slice() from pyfar 0.5.0!
coords.get_slice('x', 'met', 0, 1)
def test_filter_deprecations():
"""Deprecate filter functions with non-verbose names"""
# butter
if version.parse(pf.__version__) >= version.parse('0.5.0'):
with pytest.raises(AttributeError):
# remove butter() from pyfar 0.5.0!
pfilt.butter(None, 2, 1000, 'lowpass', 44100)
# cheby1
if version.parse(pf.__version__) >= version.parse('0.5.0'):
with pytest.raises(AttributeError):
# remove cheby1() from pyfar 0.5.0!
pfilt.cheby1(None, 2, 1, 1000, 'lowpass', 44100)
# cheby2
if version.parse(pf.__version__) >= version.parse('0.5.0'):
with pytest.raises(AttributeError):
# remove cheby2() from pyfar 0.5.0!
pfilt.cheby2(None, 2, 40, 1000, 'lowpass', 44100)
# elipp
if version.parse(pf.__version__) >= version.parse('0.5.0'):
with pytest.raises(AttributeError):
# remove ellip() from pyfar 0.5.0!
pfilt.ellip(None, 2, 1, 40, 1000, 'lowpass', 44100)
# bell
if version.parse(pf.__version__) >= version.parse('0.5.0'):
with pytest.raises(AttributeError):
# remove peq() from pyfar 0.5.0!
pfilt.peq(None, 1000, 10, 2, sampling_rate=44100)
@patch('soundfile.read', return_value=(np.array([1., 2., 3.]), 1000))
def test_read_wav_deprecation(tmpdir):
"""Deprecate pf.io.read_wav"""
filename = 'test.wav'
if version.parse(pf.__version__) >= version.parse('0.5.0'):
with pytest.raises(AttributeError):
# remove read_wav from pyfar 0.5.0!
pf.io.read_wav(filename)
@patch('soundfile.write')
def test_write_wav_deprecation(write_mock, noise, tmpdir):
"""Deprecate pf.io.write_wav"""
filename = pathlib.Path(tmpdir, 'test_wav')
if version.parse(pf.__version__) >= version.parse('0.5.0'):
with pytest.raises(AttributeError):
# remove write_wav from pyfar 0.5.0!
pf.io.write_wav(noise, filename)
def test_linear_sweep_deprecation():
"""Deprecate pf.signals.linear_sweep"""
if version.parse(pf.__version__) >= version.parse('0.5.0'):
with pytest.raises(AttributeError):
# remove linear_sweep() from pyfar 0.5.0!
pf.signals.linear_sweep(2**10, [1e3, 20e3])
def test_exponential_sweep_deprecation():
"""Deprecate pf.signals.exponential_sweep"""
if version.parse(pf.__version__) >= version.parse('0.5.0'):
with pytest.raises(AttributeError):
# remove exponential_sweep() from pyfar 0.5.0!
pf.signals.exponential_sweep(2**10, [1e3, 20e3])
# deprecate in 0.6.0 ----------------------------------------------------------
@pytest.mark.parametrize('function', [
(pf.plot.freq), (pf.plot.phase), (pf.plot.group_delay),
(pf.plot.time_freq), (pf.plot.freq_phase), (pf.plot.freq_group_delay)])
def test_xscale_deprecation(function, handsome_signal):
"""Deprecate xscale parameter in plot functions"""
with pytest.warns(PendingDeprecationWarning,
match="The xscale parameter will be removed"):
create_figure()
function(handsome_signal, xscale='linear')
if version.parse(pf.__version__) >= version.parse('0.6.0'):
with pytest.raises(AttributeError):
# remove xscale from pyfar 0.6.0!
create_figure()
function(handsome_signal)
def METHOD_NAME(sine):
"""Deprecate yscale parameter in plot functions"""
with pytest.warns(PendingDeprecationWarning,
match="The yscale parameter will be removed"):
create_figure()
pf.plot.spectrogram(sine, yscale='linear')
if version.parse(pf.__version__) >= version.parse('0.6.0'):
with pytest.raises(AttributeError):
# remove xscale from pyfar 0.6.0!
create_figure()
pf.plot.spectrogram(sine)
def test__check_time_unit():
"""Deprecate unit=None in plots showing the time or group delay"""
with pytest.warns(PendingDeprecationWarning,
match="unit=None will be deprecated"):
create_figure()
pf.plot._utils._check_time_unit(None)
if version.parse(pf.__version__) >= version.parse('0.6.0'):
with pytest.raises(ValueError):
# remove xscale from pyfar 0.6.0!
create_figure()
pf.plot._utils._check_time_unit(None) |
7,367 | from monoid | # Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Tuple
import numpy as np
import lale.docstrings
import lale.operators
from lale.expressions import it
from lale.lib.dataframe import count, get_columns
from lale.lib.rasl import Map
from lale.lib.sklearn import select_k_best
from .monoid import Monoid, MonoidableOperator
from .scores import FClassif
class _SelectKBestMonoid(Monoid):
def __init__(self, *, n_samples_seen_, feature_names_in_, lifted_score_):
self.n_samples_seen_ = n_samples_seen_
self.feature_names_in_ = feature_names_in_
self.lifted_score_ = lifted_score_
def combine(self, other: "_SelectKBestMonoid"):
n_samples_seen_ = self.n_samples_seen_ + other.n_samples_seen_
assert list(self.feature_names_in_) == list(other.feature_names_in_)
feature_names_in_ = self.feature_names_in_
lifted_score_ = self.lifted_score_.combine(other.lifted_score_)
return _SelectKBestMonoid(
n_samples_seen_=n_samples_seen_,
feature_names_in_=feature_names_in_,
lifted_score_=lifted_score_,
)
class _SelectKBestImpl(MonoidableOperator[_SelectKBestMonoid]):
def __init__(self, monoidable_score_func=FClassif, score_func=None, *, k=10):
self._hyperparams = {
"score_func": monoidable_score_func(),
"k": k,
}
def transform(self, X):
if self._transformer is None:
self._transformer = self._build_transformer()
return self._transformer.transform(X)
@property
def n_samples_seen_(self):
return getattr(self._monoid, "n_samples_seen_", 0)
@property
def feature_names_in_(self):
return getattr(self._monoid, "feature_names_in_", None)
def METHOD_NAME(self, monoid: _SelectKBestMonoid):
self._monoid = monoid
score_func = self._hyperparams["score_func"]
lifted_score_ = self._monoid.lifted_score_
self.scores_, self.pvalues_ = score_func.METHOD_NAME(lifted_score_)
self.n_features_in_ = len(self._monoid.feature_names_in_)
self._transformer = None
def _build_transformer(self):
assert self._monoid is not None
k = self._hyperparams["k"]
scores = self.scores_.copy()
scores[np.isnan(scores)] = np.finfo(scores.dtype).min
ind = np.sort(np.argpartition(scores, -min(k, len(scores)))[-k:])
kbest = self._monoid.feature_names_in_[ind]
result = Map(columns={col: it[col] for col in kbest})
return result
def to_monoid(self, batch: Tuple[Any, Any]):
X, y = batch
score_func = self._hyperparams["score_func"]
n_samples_seen_ = count(X)
feature_names_in_ = get_columns(X)
lifted_score_ = score_func.to_monoid((X, y))
return _SelectKBestMonoid(
n_samples_seen_=n_samples_seen_,
feature_names_in_=feature_names_in_,
lifted_score_=lifted_score_,
)
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Relational algebra implementation of SelectKBest.",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.rasl.select_k_best.html",
"type": "object",
"tags": {
"pre": ["~categoricals"],
"op": ["transformer", "interpretable"],
"post": [],
},
"properties": {
"hyperparams": select_k_best._hyperparams_schema,
"input_fit": select_k_best._input_fit_schema,
"input_transform": select_k_best._input_transform_schema,
"output_transform": select_k_best._output_transform_schema,
},
}
SelectKBest: lale.operators.PlannedIndividualOp
SelectKBest = lale.operators.make_operator(_SelectKBestImpl, _combined_schemas)
lale.docstrings.set_docstrings(SelectKBest) |
7,368 | post operations | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"notification-hub namespace authorization-rule regenerate-keys",
is_experimental=True,
)
class RegenerateKeys(AAZCommand):
"""Regenerate the primary/secondary keys to the namespace authorization rule.
:example: Regenerate keys of the namesapce authorization rule
az notification-hub namespace authorization-rule regenerate-keys --resource-group MyResourceGroup --namespace-name my-namespace --name "RootManageSharedAccessKey" --policy-key "Secondary Key"
"""
_aaz_info = {
"version": "2017-04-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.notificationhubs/namespaces/{}/authorizationrules/{}/regeneratekeys", "2017-04-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="The connection string of the namespace for the specified authorizationRule.",
required=True,
id_part="child_name_1",
)
_args_schema.namespace_name = AAZStrArg(
options=["--namespace-name"],
help="The namespace name.",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
# define Arg Group "Parameters"
_args_schema = cls._args_schema
_args_schema.policy_key = AAZStrArg(
options=["--policy-key"],
arg_group="Parameters",
help="Name of the key that has to be regenerated for the Namespace/Notification Hub Authorization Rule. The value can be Primary Key/Secondary Key.",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.NamespacesRegenerateKeys(ctx=self.ctx)()
self.METHOD_NAME()
@register_callback
def pre_operations(self):
pass
@register_callback
def METHOD_NAME(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class NamespacesRegenerateKeys(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NotificationHubs/namespaces/{namespaceName}/AuthorizationRules/{authorizationRuleName}/regenerateKeys",
**self.url_parameters
)
@property
def method(self):
return "POST"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"authorizationRuleName", self.ctx.args.name,
required=True,
),
**self.serialize_url_param(
"namespaceName", self.ctx.args.namespace_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2017-04-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Content-Type", "application/json",
),
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
@property
def content(self):
_content_value, _builder = self.new_content_builder(
self.ctx.args,
typ=AAZObjectType,
typ_kwargs={"flags": {"required": True, "client_flatten": True}}
)
_builder.set_prop("policyKey", AAZStrType, ".policy_key")
return self.serialize_content(_content_value)
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.key_name = AAZStrType(
serialized_name="keyName",
)
_schema_on_200.primary_connection_string = AAZStrType(
serialized_name="primaryConnectionString",
)
_schema_on_200.primary_key = AAZStrType(
serialized_name="primaryKey",
)
_schema_on_200.secondary_connection_string = AAZStrType(
serialized_name="secondaryConnectionString",
)
_schema_on_200.secondary_key = AAZStrType(
serialized_name="secondaryKey",
)
return cls._schema_on_200
class _RegenerateKeysHelper:
"""Helper class for RegenerateKeys"""
__all__ = ["RegenerateKeys"] |
7,369 | lab vms startup | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'ListVirtualMachineApplicableSchedulesResult',
'AwaitableListVirtualMachineApplicableSchedulesResult',
'list_virtual_machine_applicable_schedules',
'list_virtual_machine_applicable_schedules_output',
]
@pulumi.output_type
class ListVirtualMachineApplicableSchedulesResult:
"""
Schedules applicable to a virtual machine. The schedules may have been defined on a VM or on lab level.
"""
def __init__(__self__, id=None, lab_vms_shutdown=None, METHOD_NAME=None, location=None, name=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if lab_vms_shutdown and not isinstance(lab_vms_shutdown, dict):
raise TypeError("Expected argument 'lab_vms_shutdown' to be a dict")
pulumi.set(__self__, "lab_vms_shutdown", lab_vms_shutdown)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'lab_vms_startup' to be a dict")
pulumi.set(__self__, "lab_vms_startup", METHOD_NAME)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
The identifier of the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="labVmsShutdown")
def lab_vms_shutdown(self) -> Optional['outputs.ScheduleResponse']:
"""
The auto-shutdown schedule, if one has been set at the lab or lab resource level.
"""
return pulumi.get(self, "lab_vms_shutdown")
@property
@pulumi.getter(name="labVmsStartup")
def METHOD_NAME(self) -> Optional['outputs.ScheduleResponse']:
"""
The auto-startup schedule, if one has been set at the lab or lab resource level.
"""
return pulumi.get(self, "lab_vms_startup")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The tags of the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableListVirtualMachineApplicableSchedulesResult(ListVirtualMachineApplicableSchedulesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListVirtualMachineApplicableSchedulesResult(
id=self.id,
lab_vms_shutdown=self.lab_vms_shutdown,
METHOD_NAME=self.METHOD_NAME,
location=self.location,
name=self.name,
tags=self.tags,
type=self.type)
def list_virtual_machine_applicable_schedules(lab_name: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListVirtualMachineApplicableSchedulesResult:
"""
Lists the applicable start/stop schedules, if any.
:param str lab_name: The name of the lab.
:param str name: The name of the virtual machine.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['labName'] = lab_name
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:devtestlab/v20180915:listVirtualMachineApplicableSchedules', __args__, opts=opts, typ=ListVirtualMachineApplicableSchedulesResult).value
return AwaitableListVirtualMachineApplicableSchedulesResult(
id=pulumi.get(__ret__, 'id'),
lab_vms_shutdown=pulumi.get(__ret__, 'lab_vms_shutdown'),
METHOD_NAME=pulumi.get(__ret__, 'lab_vms_startup'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(list_virtual_machine_applicable_schedules)
def list_virtual_machine_applicable_schedules_output(lab_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListVirtualMachineApplicableSchedulesResult]:
"""
Lists the applicable start/stop schedules, if any.
:param str lab_name: The name of the lab.
:param str name: The name of the virtual machine.
:param str resource_group_name: The name of the resource group.
"""
... |
7,370 | test get block size | import sys
import pytest
from PIL import Image
from .helper import is_pypy
def test_get_stats():
# Create at least one image
Image.new("RGB", (10, 10))
stats = Image.core.get_stats()
assert "new_count" in stats
assert "reused_blocks" in stats
assert "freed_blocks" in stats
assert "allocated_blocks" in stats
assert "reallocated_blocks" in stats
assert "blocks_cached" in stats
def test_reset_stats():
Image.core.reset_stats()
stats = Image.core.get_stats()
assert stats["new_count"] == 0
assert stats["reused_blocks"] == 0
assert stats["freed_blocks"] == 0
assert stats["allocated_blocks"] == 0
assert stats["reallocated_blocks"] == 0
assert stats["blocks_cached"] == 0
class TestCoreMemory:
def teardown_method(self):
# Restore default values
Image.core.set_alignment(1)
Image.core.set_block_size(1024 * 1024)
Image.core.set_blocks_max(0)
Image.core.clear_cache()
def test_get_alignment(self):
alignment = Image.core.get_alignment()
assert alignment > 0
def test_set_alignment(self):
for i in [1, 2, 4, 8, 16, 32]:
Image.core.set_alignment(i)
alignment = Image.core.get_alignment()
assert alignment == i
# Try to construct new image
Image.new("RGB", (10, 10))
with pytest.raises(ValueError):
Image.core.set_alignment(0)
with pytest.raises(ValueError):
Image.core.set_alignment(-1)
with pytest.raises(ValueError):
Image.core.set_alignment(3)
def METHOD_NAME(self):
block_size = Image.core.get_block_size()
assert block_size >= 4096
def test_set_block_size(self):
for i in [4096, 2 * 4096, 3 * 4096]:
Image.core.set_block_size(i)
block_size = Image.core.get_block_size()
assert block_size == i
# Try to construct new image
Image.new("RGB", (10, 10))
with pytest.raises(ValueError):
Image.core.set_block_size(0)
with pytest.raises(ValueError):
Image.core.set_block_size(-1)
with pytest.raises(ValueError):
Image.core.set_block_size(4000)
def test_set_block_size_stats(self):
Image.core.reset_stats()
Image.core.set_blocks_max(0)
Image.core.set_block_size(4096)
Image.new("RGB", (256, 256))
stats = Image.core.get_stats()
assert stats["new_count"] >= 1
assert stats["allocated_blocks"] >= 64
if not is_pypy():
assert stats["freed_blocks"] >= 64
def test_get_blocks_max(self):
blocks_max = Image.core.get_blocks_max()
assert blocks_max >= 0
def test_set_blocks_max(self):
for i in [0, 1, 10]:
Image.core.set_blocks_max(i)
blocks_max = Image.core.get_blocks_max()
assert blocks_max == i
# Try to construct new image
Image.new("RGB", (10, 10))
with pytest.raises(ValueError):
Image.core.set_blocks_max(-1)
if sys.maxsize < 2**32:
with pytest.raises(ValueError):
Image.core.set_blocks_max(2**29)
@pytest.mark.skipif(is_pypy(), reason="Images not collected")
def test_set_blocks_max_stats(self):
Image.core.reset_stats()
Image.core.set_blocks_max(128)
Image.core.set_block_size(4096)
Image.new("RGB", (256, 256))
Image.new("RGB", (256, 256))
stats = Image.core.get_stats()
assert stats["new_count"] >= 2
assert stats["allocated_blocks"] >= 64
assert stats["reused_blocks"] >= 64
assert stats["freed_blocks"] == 0
assert stats["blocks_cached"] == 64
@pytest.mark.skipif(is_pypy(), reason="Images not collected")
def test_clear_cache_stats(self):
Image.core.reset_stats()
Image.core.clear_cache()
Image.core.set_blocks_max(128)
Image.core.set_block_size(4096)
Image.new("RGB", (256, 256))
Image.new("RGB", (256, 256))
# Keep 16 blocks in cache
Image.core.clear_cache(16)
stats = Image.core.get_stats()
assert stats["new_count"] >= 2
assert stats["allocated_blocks"] >= 64
assert stats["reused_blocks"] >= 64
assert stats["freed_blocks"] >= 48
assert stats["blocks_cached"] == 16
def test_large_images(self):
Image.core.reset_stats()
Image.core.set_blocks_max(0)
Image.core.set_block_size(4096)
Image.new("RGB", (2048, 16))
Image.core.clear_cache()
stats = Image.core.get_stats()
assert stats["new_count"] >= 1
assert stats["allocated_blocks"] >= 16
assert stats["reused_blocks"] >= 0
assert stats["blocks_cached"] == 0
if not is_pypy():
assert stats["freed_blocks"] >= 16
class TestEnvVars:
def teardown_method(self):
# Restore default values
Image.core.set_alignment(1)
Image.core.set_block_size(1024 * 1024)
Image.core.set_blocks_max(0)
Image.core.clear_cache()
def test_units(self):
Image._apply_env_variables({"PILLOW_BLOCKS_MAX": "2K"})
assert Image.core.get_blocks_max() == 2 * 1024
Image._apply_env_variables({"PILLOW_BLOCK_SIZE": "2m"})
assert Image.core.get_block_size() == 2 * 1024 * 1024
@pytest.mark.parametrize(
"var",
(
{"PILLOW_ALIGNMENT": "15"},
{"PILLOW_BLOCK_SIZE": "1024"},
{"PILLOW_BLOCKS_MAX": "wat"},
),
)
def test_warnings(self, var):
with pytest.warns(UserWarning):
Image._apply_env_variables(var) |
7,371 | extract data | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(subscription_id: str, *, filter: Optional[str] = None, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2019-08-01"] = kwargs.pop("api_version", _params.pop("api-version", "2019-08-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.DataBoxEdge/skus")
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
if filter is not None:
_params["$filter"] = _SERIALIZER.query("filter", filter, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class SkusOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.databoxedge.v2019_08_01.DataBoxEdgeManagementClient`'s
:attr:`skus` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, filter: Optional[str] = None, **kwargs: Any) -> Iterable["_models.ResourceTypeSku"]:
"""List all the available Skus in the region and information related to them.
List all the available Skus in the region and information related to them.
:param filter: Specify $filter='location eq :code:`<location>`' to filter on location. Default
value is None.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceTypeSku or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.databoxedge.v2019_08_01.models.ResourceTypeSku]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2019-08-01"] = kwargs.pop("api_version", _params.pop("api-version", "2019-08-01"))
cls: ClsType[_models.SkuInformationList] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
filter=filter,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def METHOD_NAME(pipeline_response):
deserialized = self._deserialize("SkuInformationList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, METHOD_NAME)
list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.DataBoxEdge/skus"} |
7,372 | create time | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetRouteCalculatorResult',
'AwaitableGetRouteCalculatorResult',
'get_route_calculator',
'get_route_calculator_output',
]
@pulumi.output_type
class GetRouteCalculatorResult:
"""
A collection of values returned by getRouteCalculator.
"""
def __init__(__self__, calculator_arn=None, calculator_name=None, METHOD_NAME=None, data_source=None, description=None, id=None, tags=None, update_time=None):
if calculator_arn and not isinstance(calculator_arn, str):
raise TypeError("Expected argument 'calculator_arn' to be a str")
pulumi.set(__self__, "calculator_arn", calculator_arn)
if calculator_name and not isinstance(calculator_name, str):
raise TypeError("Expected argument 'calculator_name' to be a str")
pulumi.set(__self__, "calculator_name", calculator_name)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'create_time' to be a str")
pulumi.set(__self__, "create_time", METHOD_NAME)
if data_source and not isinstance(data_source, str):
raise TypeError("Expected argument 'data_source' to be a str")
pulumi.set(__self__, "data_source", data_source)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if update_time and not isinstance(update_time, str):
raise TypeError("Expected argument 'update_time' to be a str")
pulumi.set(__self__, "update_time", update_time)
@property
@pulumi.getter(name="calculatorArn")
def calculator_arn(self) -> str:
"""
ARN for the Route calculator resource. Use the ARN when you specify a resource across AWS.
"""
return pulumi.get(self, "calculator_arn")
@property
@pulumi.getter(name="calculatorName")
def calculator_name(self) -> str:
return pulumi.get(self, "calculator_name")
@property
@pulumi.getter(name="createTime")
def METHOD_NAME(self) -> str:
"""
Timestamp for when the route calculator resource was created in ISO 8601 format.
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter(name="dataSource")
def data_source(self) -> str:
"""
Data provider of traffic and road network data.
"""
return pulumi.get(self, "data_source")
@property
@pulumi.getter
def description(self) -> str:
"""
Optional description of the route calculator resource.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
Key-value map of resource tags for the route calculator.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="updateTime")
def update_time(self) -> str:
"""
Timestamp for when the route calculator resource was last updated in ISO 8601 format.
"""
return pulumi.get(self, "update_time")
class AwaitableGetRouteCalculatorResult(GetRouteCalculatorResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRouteCalculatorResult(
calculator_arn=self.calculator_arn,
calculator_name=self.calculator_name,
METHOD_NAME=self.METHOD_NAME,
data_source=self.data_source,
description=self.description,
id=self.id,
tags=self.tags,
update_time=self.update_time)
def get_route_calculator(calculator_name: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRouteCalculatorResult:
"""
Retrieve information about a Location Service Route Calculator.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.location.get_route_calculator(calculator_name="example")
```
:param str calculator_name: Name of the route calculator resource.
:param Mapping[str, str] tags: Key-value map of resource tags for the route calculator.
"""
__args__ = dict()
__args__['calculatorName'] = calculator_name
__args__['tags'] = tags
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('aws:location/getRouteCalculator:getRouteCalculator', __args__, opts=opts, typ=GetRouteCalculatorResult).value
return AwaitableGetRouteCalculatorResult(
calculator_arn=pulumi.get(__ret__, 'calculator_arn'),
calculator_name=pulumi.get(__ret__, 'calculator_name'),
METHOD_NAME=pulumi.get(__ret__, 'create_time'),
data_source=pulumi.get(__ret__, 'data_source'),
description=pulumi.get(__ret__, 'description'),
id=pulumi.get(__ret__, 'id'),
tags=pulumi.get(__ret__, 'tags'),
update_time=pulumi.get(__ret__, 'update_time'))
@_utilities.lift_output_func(get_route_calculator)
def get_route_calculator_output(calculator_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Optional[Mapping[str, str]]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetRouteCalculatorResult]:
"""
Retrieve information about a Location Service Route Calculator.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.location.get_route_calculator(calculator_name="example")
```
:param str calculator_name: Name of the route calculator resource.
:param Mapping[str, str] tags: Key-value map of resource tags for the route calculator.
"""
... |
7,373 | test host | from django.test import SimpleTestCase
from eulxml.xpath import parse as parse_xpath
from testil import eq, assert_raises
from corehq.apps.case_search.xpath_functions.ancestor_functions import is_ancestor_comparison, \
_is_ancestor_path_expression
from corehq.apps.case_search.filter_dsl import CaseFilterError
from corehq.apps.case_search.tests.utils import get_case_search_query
from corehq.apps.es.tests.test_case_search_es import BaseCaseSearchTest
from corehq.apps.es.tests.utils import es_test
from corehq.util.test_utils import generate_cases
class TestIsAncestorPath(SimpleTestCase):
@generate_cases([
("parent/name", False),
("parent/host/name", False),
("parent/host/@case_id", False),
("parent", False),
("parent = 'bob'", False),
("parent/name = 'bob'", True),
("parent/host/name = 'bob'", True),
])
def test_is_ancestor_query(self, expression, expected):
node = parse_xpath(expression)
eq(is_ancestor_comparison(node), expected)
@generate_cases([
("parent/name", True),
("parent/host/name", True),
("parent/host/parent/@case_id", True),
("parent", False),
("parent = 'bob'", False),
("parent/name = 'bob'", False),
("parent/host/name = 'bob'", False),
])
def test_is_ancestor_path_expression(self, expression, expected):
node = parse_xpath(expression)
eq(_is_ancestor_path_expression(node), expected)
@es_test
class TestAncestorQueries(BaseCaseSearchTest):
def setUp(self):
super(TestAncestorQueries, self).setUp()
self._create_case_search_config()
# Note that cases must be defined before other cases can reference them
# a1>p1(LA)>g1(CA)
# a1>p1(LA):>h3(USA)
# a2:>h1>(BOS):>h2(MA)
# a2:>h1>(BOS)>g2(USA)
# a3>c1(SF)
cases = [
{'_id': 'h3', 'country': 'USA', 'case_type': 'h'},
{'_id': 'g2', 'country': 'USA', 'case_type': 'g'},
{'_id': 'g1', 'state': 'CA', 'case_type': 'g'},
{'_id': 'p1', 'city': 'LA', 'case_type': 'p', 'index': {
'parent': ('g', 'g1'),
'host': ('h', 'h3', 'extension'),
}},
{'_id': 'h2', 'state': 'MA', 'case_type': 'g'},
{'_id': 'h1', 'city': 'BOS', 'case_type': 'h', 'index': {
'host': ('h', 'h2', 'extension'),
'parent': ('g', 'g2'),
}},
{'_id': 'c1', 'city': 'SF', 'case_type': 'c'},
{'_id': 'a1', 'case_type': 'a', 'index': {
'parent': ('p', 'p1'),
}},
{'_id': 'a2', 'case_type': 'a', 'index': {
'host': ('h', 'h1', 'extension'),
}},
{'_id': 'a3', 'city': 'SF', 'case_type': 'a', 'index': {
'parent': ('c', 'c1'),
}},
]
self._bootstrap_cases_in_es_for_domain(self.domain, cases)
def test_single_term_ancestor_query(self):
query1 = get_case_search_query(
self.domain,
['h'],
{'parent/country': 'USA'},
)
self.assertItemsEqual(query1.get_ids(), ['h1'])
def test_multi_term_ancestor_query(self):
query1 = get_case_search_query(
self.domain,
['a'],
{'parent/city': ['LA', 'SF']},
)
self.assertItemsEqual(query1.get_ids(), ['a1', 'a3'])
def test_case_id_shortcut(self):
query1 = get_case_search_query(
self.domain,
['h'],
{'parent/@case_id': 'g2'},
)
self.assertItemsEqual(query1.get_ids(), ['h1'])
query2 = get_case_search_query(
self.domain,
['h'],
{'_xpath_query': "ancestor-exists(parent,@case_id='g2')"},
)
self.assertItemsEqual(query2.get_ids(), ['h1'])
def test_parent(self):
query1 = get_case_search_query(
self.domain,
['a'],
{'_xpath_query': "ancestor-exists(parent, city='LA')"},
)
self.assertItemsEqual(query1.get_ids(), ['a1'])
def test_parentparent(self):
query1 = get_case_search_query(
self.domain,
['a'],
{'_xpath_query': "ancestor-exists(parent/parent, state='CA')"},
)
self.assertItemsEqual(query1.get_ids(), ['a1'])
def METHOD_NAME(self):
query1 = get_case_search_query(
self.domain,
['a'],
{'_xpath_query': "ancestor-exists(host, city='BOS')"},
)
self.assertItemsEqual(query1.get_ids(), ['a2'])
def test_hosthost(self):
query1 = get_case_search_query(
self.domain,
['a'],
{'_xpath_query': "ancestor-exists(host/host, state='MA')"},
)
self.assertItemsEqual(query1.get_ids(), ['a2'])
def test_hostparent(self):
query1 = get_case_search_query(
self.domain,
['a'],
{'_xpath_query': "ancestor-exists(host/parent, country='USA')"},
)
self.assertItemsEqual(query1.get_ids(), ['a2'])
def test_parenthost(self):
query1 = get_case_search_query(
self.domain,
['a'],
{'_xpath_query': "ancestor-exists(parent/host, country='USA')"},
)
self.assertItemsEqual(query1.get_ids(), ['a1'])
def test_nested_ancestor_exists(self):
xpath = "ancestor-exists(parent, city='LA' and ancestor-exists(parent,state='CA'))"
query1 = get_case_search_query(
self.domain,
['a'],
{'_xpath_query': xpath},
)
self.assertItemsEqual(query1.get_ids(), ['a1'])
@generate_cases([
("ancestor-exists(status='active' and subcase-exists('parent', city = 'LA'))",),
("ancestor-exists(status='active' and subcase-count('parent', city = 'LA')) > 3",),
])
def test_search_criteria_validate(self, xpath):
with assert_raises(CaseFilterError):
get_case_search_query(
self.domain,
['a'],
{'_xpath_query': xpath},
) |
7,374 | parse optimizer | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import collections
import copy
from pipeline.param.intersect_param import IntersectParam
from types import SimpleNamespace
from pipeline.param.base_param import BaseParam
from pipeline.param import consts
from pipeline.param.encrypt_param import EncryptParam
from pipeline.param.encrypted_mode_calculation_param import EncryptedModeCalculatorParam
from pipeline.param.predict_param import PredictParam
from pipeline.param.callback_param import CallbackParam
class FTLParam(BaseParam):
def __init__(self, alpha=1, tol=0.000001,
n_iter_no_change=False, validation_freqs=None, optimizer={'optimizer': 'Adam', 'learning_rate': 0.01},
nn_define={}, epochs=1, intersect_param=IntersectParam(consts.RSA), config_type='keras', batch_size=-1,
encrypte_param=EncryptParam(),
encrypted_mode_calculator_param=EncryptedModeCalculatorParam(mode="confusion_opt"),
predict_param=PredictParam(), mode='plain', communication_efficient=False,
local_round=5, callback_param=CallbackParam()):
"""
Args:
alpha: float, a loss coefficient defined in paper, it defines the importance of alignment loss
tol: float, loss tolerance
n_iter_no_change: bool, check loss convergence or not
validation_freqs: None or positive integer or container object in python. Do validation in training process or Not.
if equals None, will not do validation in train process;
if equals positive integer, will validate data every validation_freqs epochs passes;
if container object in python, will validate data if epochs belong to this container.
e.g. validation_freqs = [10, 15], will validate data when epoch equals to 10 and 15.
Default: None
The default value is None, 1 is suggested. You can set it to a number larger than 1 in order to
speed up training by skipping validation rounds. When it is larger than 1, a number which is
divisible by "epochs" is recommended, otherwise, you will miss the validation scores
of last training epoch.
optimizer: optimizer method, accept following types:
1. a string, one of "Adadelta", "Adagrad", "Adam", "Adamax", "Nadam", "RMSprop", "SGD"
2. a dict, with a required key-value pair keyed by "optimizer",
with optional key-value pairs such as learning rate.
defaults to "SGD"
nn_define: dict, a dict represents the structure of neural network, it can be output by tf-keras
epochs: int, epochs num
intersect_param: define the intersect method
config_type: now only 'tf-keras' is supported
batch_size: batch size when computing transformed feature embedding, -1 use full data.
encrypte_param: encrypted param
encrypted_mode_calculator_param:
predict_param: predict param
mode:
plain: will not use any encrypt algorithms, data exchanged in plaintext
encrypted: use paillier to encrypt gradients
communication_efficient:
bool, will use communication efficient or not. when communication efficient is enabled, FTL model will
update gradients by several local rounds using intermediate data
local_round: local update round when using communication efficient
"""
super(FTLParam, self).__init__()
self.alpha = alpha
self.tol = tol
self.n_iter_no_change = n_iter_no_change
self.validation_freqs = validation_freqs
self.optimizer = optimizer
self.nn_define = nn_define
self.epochs = epochs
self.intersect_param = copy.deepcopy(intersect_param)
self.config_type = config_type
self.batch_size = batch_size
self.encrypted_mode_calculator_param = copy.deepcopy(encrypted_mode_calculator_param)
self.encrypt_param = copy.deepcopy(encrypte_param)
self.predict_param = copy.deepcopy(predict_param)
self.mode = mode
self.communication_efficient = communication_efficient
self.local_round = local_round
self.callback_param = copy.deepcopy(callback_param)
def check(self):
self.intersect_param.check()
self.encrypt_param.check()
self.encrypted_mode_calculator_param.check()
self.optimizer = self.METHOD_NAME(self.optimizer)
supported_config_type = ["keras"]
if self.config_type not in supported_config_type:
raise ValueError(f"config_type should be one of {supported_config_type}")
if not isinstance(self.tol, (int, float)):
raise ValueError("tol should be numeric")
if not isinstance(self.epochs, int) or self.epochs <= 0:
raise ValueError("epochs should be a positive integer")
if self.nn_define and not isinstance(self.nn_define, dict):
raise ValueError("bottom_nn_define should be a dict defining the structure of neural network")
if self.batch_size != -1:
if not isinstance(self.batch_size, int) \
or self.batch_size < consts.MIN_BATCH_SIZE:
raise ValueError(
" {} not supported, should be larger than 10 or -1 represent for all data".format(self.batch_size))
if self.validation_freqs is None:
pass
elif isinstance(self.validation_freqs, int):
if self.validation_freqs < 1:
raise ValueError("validation_freqs should be larger than 0 when it's integer")
elif not isinstance(self.validation_freqs, collections.Container):
raise ValueError("validation_freqs should be None or positive integer or container")
assert isinstance(self.communication_efficient, bool), 'communication efficient must be a boolean'
assert self.mode in [
'encrypted', 'plain'], 'mode options: encrpyted or plain, but {} is offered'.format(
self.mode)
self.check_positive_integer(self.epochs, 'epochs')
self.check_positive_number(self.alpha, 'alpha')
self.check_positive_integer(self.local_round, 'local round')
@staticmethod
def METHOD_NAME(opt):
"""
Examples:
1. "optimize": "SGD"
2. "optimize": {
"optimizer": "SGD",
"learning_rate": 0.05
}
"""
kwargs = {}
if isinstance(opt, str):
return SimpleNamespace(optimizer=opt, kwargs=kwargs)
elif isinstance(opt, dict):
optimizer = opt.get("optimizer", kwargs)
if not optimizer:
raise ValueError(f"optimizer config: {opt} invalid")
kwargs = {k: v for k, v in opt.items() if k != "optimizer"}
return SimpleNamespace(optimizer=optimizer, kwargs=kwargs)
else:
raise ValueError(f"invalid type for optimize: {type(opt)}") |
7,375 | in directory | # ********************************************************************************
#
# Inviwo - Interactive Visualization Workshop
#
# Copyright (c) 2013-2023 Inviwo Foundation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ********************************************************************************
import os
import sys
import itertools
import datetime
import math
import subprocess
import time
import re
from . import colorprint as cp
from . import util
def subDirs(path):
if os.path.isdir(path):
return next(os.walk(path))[1]
else:
return []
def toPath(*list):
return "/".join(list)
def useForwardSlash(path):
return "/".join(path.split(os.sep))
def addPostfix(file, postfix):
parts = file.split(os.path.extsep)
parts[0] += postfix
return os.path.extsep.join(parts)
def METHOD_NAME(file, directory):
# make both absolute
directory = os.path.join(os.path.realpath(directory), '')
file = os.path.realpath(file)
# return true, if the common prefix of both is equal to directory
# e.g. /a/b/c/d.rst and directory is /a/b, the common prefix is /a/b
return os.path.commonprefix([file, directory]) == directory
def getScriptFolder():
import inspect
""" Get the directory of the script is calling this function """
return os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe().f_back)))
def mkdir(*path):
res = toPath(*path)
if not os.path.isdir(res):
os.mkdir(res)
return res
def partition(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def pad_infinite(iterable, padding=None):
return itertools.chain(iterable, itertools.repeat(padding))
def pad(iterable, size, padding=None):
return itertools.islice(pad_infinite(iterable, padding), size)
def addMidSteps(func, iterable, transform=lambda x: x):
''' s -> s1, func(s1,s2), s2, func(s2,s3), s3'''
tmp = next(iterable)
yield transform(tmp)
for n in iterable:
res = func(tmp, n)
try:
for r in res:
yield r
except TypeError:
yield res
tmp = n
yield transform(n)
def makeSlice(string):
def toInt(s):
try:
return int(s)
except ValueError:
return None
return slice(*list(pad(map(toInt, string.split(":")), 3)))
def dateToString(date):
return date.strftime("%Y-%m-%dT%H:%M:%S.%f")
def stringToDate(string):
return datetime.datetime.strptime(string, "%Y-%m-%dT%H:%M:%S.%f")
def safeget(dct, *keys, failure=None):
for key in keys:
if isinstance(dct, dict) and key in dct.keys():
dct = dct[key]
else:
return failure
return dct
def find_pyconfig(path):
while path != "":
if os.path.exists(toPath(path, "pyconfig.ini")):
return toPath(path, "pyconfig.ini")
else:
path = os.path.split(path)[0]
return None
def stats(l):
mean = sum(l) / len(l)
std = math.sqrt(sum([pow(mean - x, 2) for x in l]) / len(l))
return mean, std
def openWithDefaultApp(file):
print(file)
if sys.platform.startswith('linux'):
subprocess.call(["xdg-open", file])
elif sys.platform == "darwin":
subprocess.call(["open", file])
elif sys.platform == "win32":
os.startfile(file)
def writeTemplateFile(newfilename, templatefilename, comment, name, define, api, incfile, author,
force, verbose):
(path, filename) = os.path.split(newfilename)
util.mkdir(path)
if os.path.exists(newfilename) and not force:
cp.print_error("... File exists: " + file + ", use --force or overwrite")
return
elif os.path.exists(newfilename) and force:
cp.print_warn("... Overwriting existing file")
# Create the template in memory
datetimestr = time.strftime("%A, %B %d, %Y - %H:%M:%S")
lines = []
with open(templatefilename, 'r') as f:
for line in f:
line = line.replace("<name>", name)
line = line.replace("<dname>", re.sub(
"([a-z])([A-Z])", "\g<1> \g<2>", name.replace("Kx", "")))
line = line.replace("<lname>", name.lower())
line = line.replace("<uname>", name.upper())
line = line.replace("<api>", api)
line = line.replace("<define>", define)
line = line.replace("<incfile>", incfile)
line = line.replace("<author>", author)
line = line.replace("<datetime>", datetimestr)
lines.append(line)
if verbose:
print(line, end='')
if verbose:
print("")
finaltext = "".join(lines)
with open(newfilename, "w") as f:
print(comment + f.name)
f.write(finaltext) |
7,376 | rbda eq 4 12 | from __future__ import absolute_import, division, print_function
try: import scitbx
except ImportError: import scitbx_matrix as matrix
else: from scitbx import matrix
import math
class zero_dof_alignment(object):
def __init__(O):
O.cb_0b = matrix.rt(((1,0,0,0,1,0,0,0,1), (0,0,0)))
O.cb_b0 = O.cb_0b
class zero_dof(object):
degrees_of_freedom = 0
qd_zero = matrix.zeros(n=degrees_of_freedom)
qdd_zero = qd_zero
def __init__(O):
O.q_size = 0
O.cb_ps = matrix.rt(((1,0,0,0,1,0,0,0,1), (0,0,0)))
O.cb_sp = O.cb_ps
O.motion_subspace = matrix.rec(elems=(), n=(6,0))
def get_linear_velocity(O, qd):
return None
def new_linear_velocity(O, qd, value):
return None
def time_step_position(O, qd, delta_t):
return zero_dof()
def time_step_velocity(O, qd, qdd, delta_t):
return zero_dof.qd_zero
def tau_as_d_e_pot_d_q(O, tau):
return zero_dof.qd_zero
def get_q(O):
return ()
def new_q(O, q):
return zero_dof()
class six_dof_alignment(object):
def __init__(O, center_of_mass):
O.cb_0b = matrix.rt(((1,0,0,0,1,0,0,0,1), -center_of_mass))
O.cb_b0 = matrix.rt(((1,0,0,0,1,0,0,0,1), center_of_mass))
class six_dof(object):
degrees_of_freedom = 6
qd_zero = matrix.zeros(n=degrees_of_freedom)
qdd_zero = qd_zero
def __init__(O, qe, qr):
O.qe = qe
O.qr = qr
O.q_size = 7
O.unit_quaternion = qe.normalize() # RBDA, bottom of p. 86
O.e = METHOD_NAME(q=O.unit_quaternion)
O.r = qr
O.cb_ps = matrix.rt((O.e, -O.e * O.r)) # RBDA Eq. 2.28
O.cb_sp = matrix.rt((O.e.transpose(), O.r))
O.motion_subspace = None
def get_linear_velocity(O, qd):
return matrix.col(qd.elems[3:])
def new_linear_velocity(O, qd, value):
return matrix.col((matrix.col(qd.elems[:3]), value)).resolve_partitions()
def time_step_position(O, qd, delta_t):
w_body_frame, v_body_frame = matrix.col_list([qd.elems[:3], qd.elems[3:]])
qed = rbda_eq_4_13(q=O.unit_quaternion) * w_body_frame
new_qe = (O.qe + qed * delta_t).normalize()
qrd = O.e.transpose() * v_body_frame
new_qr = O.qr + qrd * delta_t
return six_dof(qe=new_qe, qr=new_qr)
def time_step_velocity(O, qd, qdd, delta_t):
return qd + qdd * delta_t
def tau_as_d_e_pot_d_q(O, tau):
d = d_unit_quaternion_d_qe_matrix(q=O.qe)
c = d * 4 * rbda_eq_4_13(q=O.unit_quaternion)
n, f = matrix.col_list([tau.elems[:3], tau.elems[3:]])
return matrix.col((c * n, O.e.transpose() * f)).resolve_partitions()
def get_q(O):
return O.qe.elems + O.qr.elems
def new_q(O, q):
new_qe, new_qr = matrix.col_list((q[:4], q[4:]))
return six_dof(qe=new_qe, qr=new_qr)
class spherical_alignment(object):
def __init__(O, pivot):
O.cb_0b = matrix.rt(((1,0,0,0,1,0,0,0,1), -pivot))
O.cb_b0 = matrix.rt(((1,0,0,0,1,0,0,0,1), pivot))
class spherical(object):
degrees_of_freedom = 3
qd_zero = matrix.zeros(n=degrees_of_freedom)
qdd_zero = qd_zero
def __init__(O, qe):
O.qe = qe
O.q_size = 4
O.unit_quaternion = qe.normalize() # RBDA, bottom of p. 86
e = METHOD_NAME(q=O.unit_quaternion)
O.cb_ps = matrix.rt((e, (0,0,0)))
O.cb_sp = matrix.rt((e.transpose(), (0,0,0)))
O.motion_subspace = matrix.rec((
1,0,0,
0,1,0,
0,0,1,
0,0,0,
0,0,0,
0,0,0), n=(6,3))
def get_linear_velocity(O, qd):
return None
def new_linear_velocity(O, qd, value):
return None
def time_step_position(O, qd, delta_t):
w_body_frame = qd
qed = rbda_eq_4_13(q=O.unit_quaternion) * w_body_frame
new_qe = (O.qe + qed * delta_t).normalize()
return spherical(qe=new_qe)
def time_step_velocity(O, qd, qdd, delta_t):
return qd + qdd * delta_t
def tau_as_d_e_pot_d_q(O, tau):
d = d_unit_quaternion_d_qe_matrix(q=O.qe)
c = d * 4 * rbda_eq_4_13(q=O.unit_quaternion)
n = tau
return c * n
def get_q(O):
return O.qe.elems
def new_q(O, q):
return spherical(qe=matrix.col(q))
class revolute_alignment(object):
def __init__(O, pivot, normal):
r = normal.vector_to_001_rotation()
O.cb_0b = matrix.rt((r, -r * pivot))
O.cb_b0 = matrix.rt((r.transpose(), pivot))
class revolute(object):
degrees_of_freedom = 1
qd_zero = matrix.zeros(n=degrees_of_freedom)
qdd_zero = qd_zero
def __init__(O, qe):
O.qe = qe
O.q_size = len(qe)
#
c, s = math.cos(qe[0]), math.sin(qe[0])
e = matrix.sqr((c, s, 0, -s, c, 0, 0, 0, 1)) # RBDA Tab. 2.2
#
O.cb_ps = matrix.rt((e, (0,0,0)))
O.cb_sp = matrix.rt((e.transpose(), (0,0,0)))
O.motion_subspace = matrix.col((0,0,1,0,0,0))
def get_linear_velocity(O, qd):
return None
def new_linear_velocity(O, qd, value):
return None
def time_step_position(O, qd, delta_t):
new_qe = O.qe + qd * delta_t
return revolute(qe=new_qe)
def time_step_velocity(O, qd, qdd, delta_t):
return qd + qdd * delta_t
def tau_as_d_e_pot_d_q(O, tau):
return tau
def get_q(O):
return O.qe.elems
def new_q(O, q):
return revolute(qe=matrix.col(q))
class translational_alignment(six_dof_alignment): pass
class translational(object):
degrees_of_freedom = 3
qd_zero = matrix.zeros(n=degrees_of_freedom)
qdd_zero = qd_zero
def __init__(O, qr):
O.qr = qr
O.q_size = 3
O.cb_ps = matrix.rt(((1,0,0,0,1,0,0,0,1), -qr))
O.cb_sp = matrix.rt(((1,0,0,0,1,0,0,0,1), qr))
O.motion_subspace = matrix.rec((
0,0,0,
0,0,0,
0,0,0,
1,0,0,
0,1,0,
0,0,1), n=(6,3))
def get_linear_velocity(O, qd):
return qd
def new_linear_velocity(O, qd, value):
return value
def time_step_position(O, qd, delta_t):
new_qr = O.qr + qd * delta_t
return translational(qr=new_qr)
def time_step_velocity(O, qd, qdd, delta_t):
return qd + qdd * delta_t
def tau_as_d_e_pot_d_q(O, tau):
return tau
def get_q(O):
return O.qr.elems
def new_q(O, q):
return translational(qr=matrix.col(q))
def METHOD_NAME(q):
p0, p1, p2, p3 = q
return matrix.sqr((
p0**2+p1**2-0.5, p1*p2+p0*p3, p1*p3-p0*p2,
p1*p2-p0*p3, p0**2+p2**2-0.5, p2*p3+p0*p1,
p1*p3+p0*p2, p2*p3-p0*p1, p0**2+p3**2-0.5)) * 2
def rbda_eq_4_13(q):
p0, p1, p2, p3 = q
return matrix.rec((
-p1, -p2, -p3,
p0, -p3, p2,
p3, p0, -p1,
-p2, p1, p0), n=(4,3)) * 0.5
def d_unit_quaternion_d_qe_matrix(q):
"""
Coefficent matrix for converting gradients w.r.t. normalized Euler
parameters to gradients w.r.t. non-normalized parameters, as produced
e.g. by a minimizer in the line search.
Mathematica code:
nsq = p0^2+p1^2+p2^2+p3^2
p0p = p0 / Sqrt[nsq]
p1p = p1 / Sqrt[nsq]
p2p = p2 / Sqrt[nsq]
p3p = p3 / Sqrt[nsq]
n3 = (p0^2+p1^2+p2^2+p3^2)^(3/2)
FortranForm[FullSimplify[D[p0p,p0]*n3]]
FortranForm[FullSimplify[D[p1p,p0]*n3]]
FortranForm[FullSimplify[D[p2p,p0]*n3]]
FortranForm[FullSimplify[D[p3p,p0]*n3]]
FortranForm[FullSimplify[D[p0p,p1]*n3]]
FortranForm[FullSimplify[D[p1p,p1]*n3]]
FortranForm[FullSimplify[D[p2p,p1]*n3]]
FortranForm[FullSimplify[D[p3p,p1]*n3]]
FortranForm[FullSimplify[D[p0p,p2]*n3]]
FortranForm[FullSimplify[D[p1p,p2]*n3]]
FortranForm[FullSimplify[D[p2p,p2]*n3]]
FortranForm[FullSimplify[D[p3p,p2]*n3]]
FortranForm[FullSimplify[D[p0p,p3]*n3]]
FortranForm[FullSimplify[D[p1p,p3]*n3]]
FortranForm[FullSimplify[D[p2p,p3]*n3]]
FortranForm[FullSimplify[D[p3p,p3]*n3]]
"""
p0,p1,p2,p3 = q
p0s,p1s,p2s,p3s = p0**2, p1**2, p2**2, p3**2
n3 = (p0s+p1s+p2s+p3s)**(3/2.)
c00 = p1s+p2s+p3s
c11 = p0s+p2s+p3s
c22 = p0s+p1s+p3s
c33 = p0s+p1s+p2s
c01 = -p0*p1
c02 = -p0*p2
c03 = -p0*p3
c12 = -p1*p2
c13 = -p1*p3
c23 = -p2*p3
return matrix.sqr((
c00, c01, c02, c03,
c01, c11, c12, c13,
c02, c12, c22, c23,
c03, c13, c23, c33)) / n3 |
7,377 | does read | """
Adaptor for reading COCO keypoint detection datasets.
See http://cocodataset.org/#format-data for details about this format.
"""
import os
import numpy as np
from sleap import Labels, Video, Skeleton
from sleap.gui.dialogs.missingfiles import MissingFilesDialog
from sleap.instance import Instance, LabeledFrame, Point, Track
from .adaptor import Adaptor, SleapObjectType
from .filehandle import FileHandle
class LabelsCocoAdaptor(Adaptor):
@property
def handles(self):
return SleapObjectType.labels
@property
def default_ext(self):
return "json"
@property
def all_exts(self):
return ["json"]
@property
def name(self):
return "COCO Dataset JSON"
def can_read_file(self, file: FileHandle):
if not self.does_match_ext(file.filename):
return False
if not file.is_json:
return False
if "annotations" not in file.json:
return False
if "categories" not in file.json:
return False
return True
def can_write_filename(self, filename: str):
return False
def METHOD_NAME(self) -> bool:
return True
def does_write(self) -> bool:
return False
@classmethod
def read(
cls,
file: FileHandle,
img_dir: str,
use_missing_gui: bool = False,
*args,
**kwargs,
) -> Labels:
dicts = file.json
# Make skeletons from "categories"
skeleton_map = dict()
for category in dicts["categories"]:
skeleton = Skeleton(name=category["name"])
skeleton_id = category["id"]
node_names = category["keypoints"]
skeleton.add_nodes(node_names)
try:
for src_idx, dst_idx in category["skeleton"]:
skeleton.add_edge(node_names[src_idx], node_names[dst_idx])
except IndexError as e:
# According to the COCO data format specifications[^1], the edges
# are supposed to be 1-indexed. But in some of their own
# dataset the edges are 1-indexed! So we'll try.
# [1]: http://cocodataset.org/#format-data
# Clear any edges we already created using 0-indexing
skeleton.clear_edges()
# Add edges
for src_idx, dst_idx in category["skeleton"]:
skeleton.add_edge(node_names[src_idx - 1], node_names[dst_idx - 1])
skeleton_map[skeleton_id] = skeleton
# Make videos from "images"
# Remove images that aren't referenced in the annotations
img_refs = [annotation["image_id"] for annotation in dicts["annotations"]]
dicts["images"] = list(filter(lambda im: im["id"] in img_refs, dicts["images"]))
# Key in JSON file should be "file_name", but sometimes it's "filename",
# so we have to check both.
img_filename_key = "file_name"
if img_filename_key not in dicts["images"][0].keys():
img_filename_key = "filename"
# First add the img_dir to each image filename
img_paths = [
os.path.join(img_dir, image[img_filename_key]) for image in dicts["images"]
]
# See if there are any missing files
img_missing = [not os.path.exists(path) for path in img_paths]
if sum(img_missing):
if use_missing_gui:
okay = MissingFilesDialog(img_paths, img_missing).exec_()
if not okay:
return None
else:
raise FileNotFoundError(
f"Images for COCO dataset could not be found in {img_dir}."
)
# Update the image paths (with img_dir or user selected path)
for image, path in zip(dicts["images"], img_paths):
image[img_filename_key] = path
# Create the video objects for the image files
image_video_map = dict()
vid_id_video_map = dict()
for image in dicts["images"]:
image_id = image["id"]
image_filename = image[img_filename_key]
# Sometimes images have a vid_id which links multiple images
# together as one video. If so, we'll use that as the video key.
# But if there isn't a vid_id, we'll treat each images as a
# distinct video and use the image id as the video id.
vid_id = image.get("vid_id", image_id)
if vid_id not in vid_id_video_map:
kwargs = dict(filenames=[image_filename])
for key in ("width", "height"):
if key in image:
kwargs[key] = image[key]
video = Video.from_image_filenames(**kwargs)
vid_id_video_map[vid_id] = video
frame_idx = 0
else:
video = vid_id_video_map[vid_id]
frame_idx = video.num_frames
video.backend.filenames.append(image_filename)
image_video_map[image_id] = (video, frame_idx)
# Make instances from "annotations"
lf_map = dict()
track_map = dict()
for annotation in dicts["annotations"]:
skeleton = skeleton_map[annotation["category_id"]]
image_id = annotation["image_id"]
video, frame_idx = image_video_map[image_id]
keypoints = np.array(annotation["keypoints"], dtype="int").reshape(-1, 3)
track = None
if "track_id" in annotation:
track_id = annotation["track_id"]
if track_id not in track_map:
track_map[track_id] = Track(frame_idx, str(track_id))
track = track_map[track_id]
points = dict()
any_visible = False
for i in range(len(keypoints)):
node = skeleton.nodes[i]
x, y, flag = keypoints[i]
if flag == 0:
# node not labeled for this instance
continue
is_visible = flag == 2
any_visible = any_visible or is_visible
points[node] = Point(x, y, is_visible)
if points:
# If none of the points had 2 has the "visible" flag, we'll
# assume this incorrect and just mark all as visible.
if not any_visible:
for point in points.values():
point.visible = True
inst = Instance(skeleton=skeleton, points=points, track=track)
if image_id not in lf_map:
lf_map[image_id] = LabeledFrame(video, frame_idx)
lf_map[image_id].insert(0, inst)
return Labels(labeled_frames=list(lf_map.values())) |
7,378 | get log analytic export request rate by | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = [
'GetLogAnalyticExportRequestRateByIntervalResult',
'AwaitableGetLogAnalyticExportRequestRateByIntervalResult',
'get_log_analytic_export_request_rate_by_interval',
'get_log_analytic_export_request_rate_by_interval_output',
]
@pulumi.output_type
class GetLogAnalyticExportRequestRateByIntervalResult:
"""
LogAnalytics operation status response
"""
def __init__(__self__, properties=None):
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter
def properties(self) -> 'outputs.LogAnalyticsOutputResponse':
"""
LogAnalyticsOutput
"""
return pulumi.get(self, "properties")
class AwaitableGetLogAnalyticExportRequestRateByIntervalResult(GetLogAnalyticExportRequestRateByIntervalResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetLogAnalyticExportRequestRateByIntervalResult(
properties=self.properties)
def METHOD_NAME(blob_container_sas_uri: Optional[str] = None,
from_time: Optional[str] = None,
group_by_client_application_id: Optional[bool] = None,
group_by_operation_name: Optional[bool] = None,
group_by_resource_name: Optional[bool] = None,
group_by_throttle_policy: Optional[bool] = None,
group_by_user_agent: Optional[bool] = None,
interval_length: Optional['IntervalInMins'] = None,
location: Optional[str] = None,
to_time: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetLogAnalyticExportRequestRateByIntervalResult:
"""
Export logs that show Api requests made by this subscription in the given time window to show throttling activities.
:param str blob_container_sas_uri: SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to.
:param str from_time: From time of the query
:param bool group_by_client_application_id: Group query result by Client Application ID.
:param bool group_by_operation_name: Group query result by Operation Name.
:param bool group_by_resource_name: Group query result by Resource Name.
:param bool group_by_throttle_policy: Group query result by Throttle Policy applied.
:param bool group_by_user_agent: Group query result by User Agent.
:param 'IntervalInMins' interval_length: Interval value in minutes used to create LogAnalytics call rate logs.
:param str location: The location upon which virtual-machine-sizes is queried.
:param str to_time: To time of the query
"""
__args__ = dict()
__args__['blobContainerSasUri'] = blob_container_sas_uri
__args__['fromTime'] = from_time
__args__['groupByClientApplicationId'] = group_by_client_application_id
__args__['groupByOperationName'] = group_by_operation_name
__args__['groupByResourceName'] = group_by_resource_name
__args__['groupByThrottlePolicy'] = group_by_throttle_policy
__args__['groupByUserAgent'] = group_by_user_agent
__args__['intervalLength'] = interval_length
__args__['location'] = location
__args__['toTime'] = to_time
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:compute/v20230701:getLogAnalyticExportRequestRateByInterval', __args__, opts=opts, typ=GetLogAnalyticExportRequestRateByIntervalResult).value
return AwaitableGetLogAnalyticExportRequestRateByIntervalResult(
properties=pulumi.get(__ret__, 'properties'))
@_utilities.lift_output_func(METHOD_NAME)
def get_log_analytic_export_request_rate_by_interval_output(blob_container_sas_uri: Optional[pulumi.Input[str]] = None,
from_time: Optional[pulumi.Input[str]] = None,
group_by_client_application_id: Optional[pulumi.Input[Optional[bool]]] = None,
group_by_operation_name: Optional[pulumi.Input[Optional[bool]]] = None,
group_by_resource_name: Optional[pulumi.Input[Optional[bool]]] = None,
group_by_throttle_policy: Optional[pulumi.Input[Optional[bool]]] = None,
group_by_user_agent: Optional[pulumi.Input[Optional[bool]]] = None,
interval_length: Optional[pulumi.Input['IntervalInMins']] = None,
location: Optional[pulumi.Input[str]] = None,
to_time: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetLogAnalyticExportRequestRateByIntervalResult]:
"""
Export logs that show Api requests made by this subscription in the given time window to show throttling activities.
:param str blob_container_sas_uri: SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to.
:param str from_time: From time of the query
:param bool group_by_client_application_id: Group query result by Client Application ID.
:param bool group_by_operation_name: Group query result by Operation Name.
:param bool group_by_resource_name: Group query result by Resource Name.
:param bool group_by_throttle_policy: Group query result by Throttle Policy applied.
:param bool group_by_user_agent: Group query result by User Agent.
:param 'IntervalInMins' interval_length: Interval value in minutes used to create LogAnalytics call rate logs.
:param str location: The location upon which virtual-machine-sizes is queried.
:param str to_time: To time of the query
"""
... |
7,379 | plans | """
@generated by mypy-protobuf. Do not edit manually!
isort:skip_file
"""
import builtins
import caffe2.proto.caffe2_pb2
import google.protobuf.descriptor
import google.protobuf.internal.containers
import google.protobuf.message
import typing
import typing_extensions
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor = ...
class ModelInfo(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor = ...
PROJECT_FIELD_NUMBER: int
MODELCLASS_FIELD_NUMBER: int
VERSION_FIELD_NUMBER: int
PREDICTORTYPE_FIELD_NUMBER: int
MODELID_FIELD_NUMBER: int
project: typing.Text = ...
modelClass: typing.Text = ...
version: typing.Text = ...
predictorType: typing.Text = ...
modelId: typing.Text = ...
def __init__(self,
*,
project : typing.Optional[typing.Text] = ...,
modelClass : typing.Optional[typing.Text] = ...,
version : typing.Optional[typing.Text] = ...,
predictorType : typing.Optional[typing.Text] = ...,
modelId : typing.Optional[typing.Text] = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal[u"modelClass",b"modelClass",u"modelId",b"modelId",u"predictorType",b"predictorType",u"project",b"project",u"version",b"version"]) -> bool: ...
def ClearField(self, field_name: typing_extensions.Literal[u"modelClass",b"modelClass",u"modelId",b"modelId",u"predictorType",b"predictorType",u"project",b"project",u"version",b"version"]) -> None: ...
global___ModelInfo = ModelInfo
class BlobsMap(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor = ...
KEY_FIELD_NUMBER: int
VALUE_FIELD_NUMBER: int
key: typing.Text = ...
value: google.protobuf.internal.containers.RepeatedScalarFieldContainer[typing.Text] = ...
def __init__(self,
*,
key : typing.Optional[typing.Text] = ...,
value : typing.Optional[typing.Iterable[typing.Text]] = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal[u"key",b"key"]) -> bool: ...
def ClearField(self, field_name: typing_extensions.Literal[u"key",b"key",u"value",b"value"]) -> None: ...
global___BlobsMap = BlobsMap
class NetsMap(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor = ...
KEY_FIELD_NUMBER: int
VALUE_FIELD_NUMBER: int
key: typing.Text = ...
@property
def value(self) -> caffe2.proto.caffe2_pb2.NetDef: ...
def __init__(self,
*,
key : typing.Optional[typing.Text] = ...,
value : typing.Optional[caffe2.proto.caffe2_pb2.NetDef] = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal[u"key",b"key",u"value",b"value"]) -> bool: ...
def ClearField(self, field_name: typing_extensions.Literal[u"key",b"key",u"value",b"value"]) -> None: ...
global___NetsMap = NetsMap
class PlansMap(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor = ...
KEY_FIELD_NUMBER: int
VALUE_FIELD_NUMBER: int
key: typing.Text = ...
@property
def value(self) -> caffe2.proto.caffe2_pb2.PlanDef: ...
def __init__(self,
*,
key : typing.Optional[typing.Text] = ...,
value : typing.Optional[caffe2.proto.caffe2_pb2.PlanDef] = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal[u"key",b"key",u"value",b"value"]) -> bool: ...
def ClearField(self, field_name: typing_extensions.Literal[u"key",b"key",u"value",b"value"]) -> None: ...
global___PlansMap = PlansMap
class StringMap(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor = ...
KEY_FIELD_NUMBER: int
VALUE_FIELD_NUMBER: int
key: typing.Text = ...
value: typing.Text = ...
def __init__(self,
*,
key : typing.Optional[typing.Text] = ...,
value : typing.Optional[typing.Text] = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal[u"key",b"key",u"value",b"value"]) -> bool: ...
def ClearField(self, field_name: typing_extensions.Literal[u"key",b"key",u"value",b"value"]) -> None: ...
global___StringMap = StringMap
class MetaNetDef(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor = ...
BLOBS_FIELD_NUMBER: int
NETS_FIELD_NUMBER: int
MODELINFO_FIELD_NUMBER: int
PLANS_FIELD_NUMBER: int
APPLICATIONSPECIFICINFO_FIELD_NUMBER: int
BLOBSORDER_FIELD_NUMBER: int
PRELOADBLOBS_FIELD_NUMBER: int
TENSORBOUNDSHAPES_FIELD_NUMBER: int
REQUESTONLYEMBEDDINGS_FIELD_NUMBER: int
AOTCONFIG_FIELD_NUMBER: int
blobsOrder: google.protobuf.internal.containers.RepeatedScalarFieldContainer[typing.Text] = ...
preLoadBlobs: google.protobuf.internal.containers.RepeatedScalarFieldContainer[typing.Text] = ...
requestOnlyEmbeddings: google.protobuf.internal.containers.RepeatedScalarFieldContainer[typing.Text] = ...
@property
def blobs(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BlobsMap]: ...
@property
def nets(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___NetsMap]: ...
@property
def modelInfo(self) -> global___ModelInfo: ...
@property
def METHOD_NAME(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___PlansMap]: ...
@property
def applicationSpecificInfo(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___StringMap]: ...
@property
def tensorBoundShapes(self) -> caffe2.proto.caffe2_pb2.TensorBoundShapes: ...
@property
def aotConfig(self) -> caffe2.proto.caffe2_pb2.AOTConfig: ...
def __init__(self,
*,
blobs : typing.Optional[typing.Iterable[global___BlobsMap]] = ...,
nets : typing.Optional[typing.Iterable[global___NetsMap]] = ...,
modelInfo : typing.Optional[global___ModelInfo] = ...,
METHOD_NAME : typing.Optional[typing.Iterable[global___PlansMap]] = ...,
applicationSpecificInfo : typing.Optional[typing.Iterable[global___StringMap]] = ...,
blobsOrder : typing.Optional[typing.Iterable[typing.Text]] = ...,
preLoadBlobs : typing.Optional[typing.Iterable[typing.Text]] = ...,
tensorBoundShapes : typing.Optional[caffe2.proto.caffe2_pb2.TensorBoundShapes] = ...,
requestOnlyEmbeddings : typing.Optional[typing.Iterable[typing.Text]] = ...,
aotConfig : typing.Optional[caffe2.proto.caffe2_pb2.AOTConfig] = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal[u"aotConfig",b"aotConfig",u"modelInfo",b"modelInfo",u"tensorBoundShapes",b"tensorBoundShapes"]) -> bool: ...
def ClearField(self, field_name: typing_extensions.Literal[u"aotConfig",b"aotConfig",u"applicationSpecificInfo",b"applicationSpecificInfo",u"blobs",b"blobs",u"blobsOrder",b"blobsOrder",u"modelInfo",b"modelInfo",u"nets",b"nets",u"plans",b"plans",u"preLoadBlobs",b"preLoadBlobs",u"requestOnlyEmbeddings",b"requestOnlyEmbeddings",u"tensorBoundShapes",b"tensorBoundShapes"]) -> None: ...
global___MetaNetDef = MetaNetDef |
7,380 | parse re flags | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from .robottypes import is_integer
from .unic import safe_str
def printable_name(string, code_style=False):
"""Generates and returns printable name from the given string.
Examples:
'simple' -> 'Simple'
'name with spaces' -> 'Name With Spaces'
'more spaces' -> 'More Spaces'
'Cases AND spaces' -> 'Cases AND Spaces'
'' -> ''
If 'code_style' is True:
'mixedCAPSCamel' -> 'Mixed CAPS Camel'
'camelCaseName' -> 'Camel Case Name'
'under_score_name' -> 'Under Score Name'
'under_and space' -> 'Under And Space'
'miXed_CAPS_nAMe' -> 'MiXed CAPS NAMe'
'' -> ''
"""
if code_style and '_' in string:
string = string.replace('_', ' ')
parts = string.split()
if code_style and len(parts) == 1 \
and not (string.isalpha() and string.islower()):
parts = _split_camel_case(parts[0])
return ' '.join(part[0].upper() + part[1:] for part in parts)
def _split_camel_case(string):
tokens = []
token = []
for prev, char, next in zip(' ' + string, string, string[1:] + ' '):
if _is_camel_case_boundary(prev, char, next):
if token:
tokens.append(''.join(token))
token = [char]
else:
token.append(char)
if token:
tokens.append(''.join(token))
return tokens
def _is_camel_case_boundary(prev, char, next):
if prev.isdigit():
return not char.isdigit()
if char.isupper():
return next.islower() or prev.isalpha() and not prev.isupper()
return char.isdigit()
def plural_or_not(item):
count = item if is_integer(item) else len(item)
return '' if count in (1, -1) else 's'
def seq2str(sequence, quote="'", sep=', ', lastsep=' and '):
"""Returns sequence in format `'item 1', 'item 2' and 'item 3'`."""
sequence = [f'{quote}{safe_str(item)}{quote}' for item in sequence]
if not sequence:
return ''
if len(sequence) == 1:
return sequence[0]
last_two = lastsep.join(sequence[-2:])
return sep.join(sequence[:-2] + [last_two])
def seq2str2(sequence):
"""Returns sequence in format `[ item 1 | item 2 | ... ]`."""
if not sequence:
return '[ ]'
return '[ %s ]' % ' | '.join(safe_str(item) for item in sequence)
def test_or_task(text: str, rpa: bool):
"""Replace 'test' with 'task' in the given `text` depending on `rpa`.
If given text is `test`, `test` or `task` is returned directly. Otherwise,
pattern `{test}` is searched from the text and occurrences replaced with
`test` or `task`.
In both cases matching the word `test` is case-insensitive and the returned
`test` or `task` has exactly same case as the original.
"""
def replace(test):
if not rpa:
return test
upper = [c.isupper() for c in test]
return ''.join(c.upper() if up else c for c, up in zip('task', upper))
if text.upper() == 'TEST':
return replace(text)
return re.sub('{(test)}', lambda m: replace(m.group(1)), text, flags=re.IGNORECASE)
def isatty(stream):
# first check if buffer was detached
if hasattr(stream, 'buffer') and stream.buffer is None:
return False
if not hasattr(stream, 'isatty'):
return False
try:
return stream.isatty()
except ValueError: # Occurs if file is closed.
return False
def METHOD_NAME(flags=None):
result = 0
if not flags:
return result
for flag in flags.split('|'):
try:
re_flag = getattr(re, flag.upper().strip())
except AttributeError:
raise ValueError(f'Unknown regexp flag: {flag}')
else:
if isinstance(re_flag, re.RegexFlag):
result |= re_flag
else:
raise ValueError(f'Unknown regexp flag: {flag}')
return result
class classproperty(property):
"""Property that works with classes in addition to instances.
Only supports getters. Setters and deleters cannot work with classes due
to how the descriptor protocol works, and they are thus explicitly disabled.
Metaclasses must be used if they are needed.
"""
def __init__(self, fget, fset=None, fdel=None, doc=None):
if fset:
self.setter(fset)
if fdel:
self.deleter(fset)
super().__init__(fget)
if doc:
self.__doc__ = doc
def __get__(self, instance, owner):
return self.fget(owner)
def setter(self, fset):
raise TypeError('Setters are not supported.')
def deleter(self, fset):
raise TypeError('Deleters are not supported.') |
7,381 | pre evaluate | # Copyright 2019-2022 ETH Zurich and the DaCe authors. All rights reserved.
import os
import tempfile
import math
import dace
import json
from typing import Dict, Generator, Any, List, Tuple
from dace.optimization import auto_tuner
from dace.optimization import utils as optim_utils
from dace.sdfg.sdfg import SDFG
from dace.sdfg.state import SDFGState
try:
from tqdm import tqdm
except (ImportError, ModuleNotFoundError):
tqdm = lambda x, **kwargs: x
class CutoutTuner(auto_tuner.AutoTuner):
"""
An auto-tuner that cuts out subgraphs of the original SDFG to tune separately.
In order to tune an SDFG, a "dry run" must first be called to collect data from intermediate
access nodes (in order to ensure correctness of the tuned subgraph). Subsequently, sub-classes of
this cutout tuning interface will select subgraphs to test transformations on.
For example::
tuner = DataLayoutTuner(sdfg)
# Create instrumented data report
tuner.dry_run(sdfg, arg1, arg2, arg3=4)
results = tuner.optimize()
# results will now contain the fastest data layout configurations for each array
"""
def __init__(self, task: str, sdfg: SDFG) -> None:
"""
Creates a cutout tuner.
:param task: Name of tuning task (for filename labeling).
:param sdfg: The SDFG to tune.
"""
super().__init__(sdfg=sdfg)
self._task = task
@property
def task(self) -> str:
return self._task
def file_name(self, label: str) -> str:
return f"{self._task}.{label}.tuning"
def try_load(self, file_name) -> Dict:
results = None
if os.path.exists(file_name):
print(f'Using cached {file_name}')
with open(file_name, 'r') as fp:
results = json.load(fp)
return results
def cutouts(self) -> Generator[Tuple[SDFGState, str], None, None]:
raise NotImplementedError
def space(self, **kwargs) -> Generator[Any, None, None]:
raise NotImplementedError
def search(self, cutout: SDFG, measurements: int, **kwargs) -> Dict:
raise NotImplementedError
def METHOD_NAME(self, **kwargs) -> Dict:
raise NotImplementedError
def evaluate(self, **kwargs) -> float:
raise NotImplementedError
def config_from_key(self, key: str, cutout: dace.SDFG, **kwargs) -> Any:
raise NotImplementedError
def apply(self, config, cutout, **kwargs) -> None:
raise NotImplementedError
def measure(self, cutout, dreport, repetitions: int = 30, timeout: float = 300.0) -> float:
dreport_ = {}
for cstate in cutout.nodes():
for dnode in cstate.data_nodes():
array = cutout.arrays[dnode.data]
if array.transient:
continue
try:
data = dreport.get_first_version(dnode.data)
dreport_[dnode.data] = data
except:
continue
runtime = optim_utils.subprocess_measure(cutout=cutout, dreport=dreport_, repetitions=repetitions, timeout=timeout)
return runtime
def optimize(self, measurements: int = 30, apply: bool = False, **kwargs) -> Dict[Any, Any]:
tuning_report = {}
for cutout, label in tqdm(list(self.cutouts())):
fn = self.file_name(label)
results = self.try_load(fn)
if results is None:
results = self.search(cutout, measurements, **kwargs)
if results is None:
tuning_report[label] = None
continue
with open(fn, 'w') as fp:
json.dump(results, fp)
best_config = min(results, key=results.get)
if apply:
config = self.config_from_key(best_config, cutout=cutout)
self.apply(config, label=label)
tuning_report[label] = results
return tuning_report
def search(self, cutout: SDFG, measurements: int,
**kwargs) -> Dict[str, float]:
kwargs = self.METHOD_NAME(cutout=cutout, measurements=measurements, **kwargs)
results = {}
key = kwargs["key"]
for config in tqdm(list(self.space(**(kwargs["space_kwargs"])))):
kwargs["config"] = config
runtime = self.evaluate(**kwargs)
results[key(config)] = runtime
return results
@staticmethod
def top_k_configs(tuning_report, k: int) -> List[Tuple[str, float]]:
all_configs = []
for cutout_label in tuning_report:
configs = tuning_report[cutout_label]
best_k_configs = [(key, value) for key, value in sorted(configs.items(), key=lambda item: item[1])][:min(len(configs), k)]
best_k_configs = filter(lambda c: c[1] != math.inf, best_k_configs)
best_k_configs = list(map(lambda c: (cutout_label, c[0]), best_k_configs))
all_configs.extend(best_k_configs)
return all_configs
@staticmethod
def dry_run(sdfg: SDFG, *args, **kwargs) -> Any:
# Check existing instrumented data for shape mismatch
kwargs.update({aname: a for aname, a in zip(sdfg.arg_names, args)})
dreport = sdfg.get_instrumented_data()
if dreport is not None:
for data in dreport.keys():
rep_arr = dreport.get_first_version(data)
sdfg_arr = sdfg.arrays[data]
# Potential shape mismatch
if rep_arr.shape != sdfg_arr.shape:
# Check given data first
if hasattr(kwargs[data], 'shape') and rep_arr.shape != kwargs[data].shape:
sdfg.clear_data_reports()
dreport = None
break
# If there is no valid instrumented data available yet, run in data instrumentation mode
if dreport is None:
for state in sdfg.nodes():
for node in state.nodes():
if isinstance(node, dace.nodes.AccessNode) and not node.desc(sdfg).transient:
node.instrument = dace.DataInstrumentationType.Save
result = sdfg(**kwargs)
# Disable data instrumentation from now on
for state in sdfg.nodes():
for node in state.nodes():
if isinstance(node, dace.nodes.AccessNode):
node.instrument = dace.DataInstrumentationType.No_Instrumentation
else:
return None
return result |
7,382 | check html | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
import os
import os.path
import re
import sys
import unittest
from ExtensionClass import Base
from Products.PageTemplates.engine import Program
from zope.component import provideUtility
from zope.pagetemplate.interfaces import IPageTemplateEngine
from zope.pagetemplate.pagetemplate import PageTemplateEngine
# Dummy TestCase to use the assertions outside the actual tests.
TEST_CASE = unittest.TestCase('__init__')
class Bruce(Base):
__allow_access_to_unprotected_subobjects__ = 1
isDocTemp = 0
def __str__(self):
return 'bruce'
def __int__(self):
return 42
def __float__(self):
return 42.0
def keys(self):
return ['bruce'] * 7
def values(self):
return [self] * 7
def items(self):
return [('bruce', self)] * 7
def __len__(self):
return 7
def __getitem__(self, index):
if isinstance(index, int) and (index < 0 or index > 6):
raise IndexError(index)
return self
def __getattr__(self, name):
if name[:1] == '_':
raise AttributeError(name)
return self
bruce = Bruce()
class arg(Base):
__allow_access_to_unprotected_subobjects__ = 1
def __init__(self, nn, aa):
self.num, self.arg = nn, aa
def __str__(self):
return str(self.arg)
class argv(Base):
__allow_access_to_unprotected_subobjects__ = 1
def __init__(self, argv=sys.argv[1:]):
args = self.args = []
for aa in argv:
args.append(arg(len(args) + 1, aa))
def items(self):
return [('spam%d' % a.num, a) for a in self.args]
def values(self):
return self.args
def getPhysicalRoot(self):
return self
def METHOD_NAME(s1, s2):
if not isinstance(s2, bytes) and isinstance(s1, bytes):
# convert to common type
s1 = s1.decode("utf-8") # our encoding
s1 = normalize_html(s1)
s2 = normalize_html(s2)
TEST_CASE.assertEqual(s1, s2)
def check_xml(s1, s2):
s1 = normalize_xml(s1)
s2 = normalize_xml(s2)
TEST_CASE.assertEqual(s1, s2, "XML Output Changed")
def normalize_html(s):
s = re.sub(r"[ \t]+", " ", s)
s = re.sub(r"/>", ">", s)
return s
def normalize_xml(s):
s = re.sub(r"\s+", " ", s)
s = re.sub(r"(?s)\s+<", "<", s)
s = re.sub(r"(?s)>\s+", ">", s)
return s
HERE = os.path.dirname(__file__)
input_dir = os.path.join(HERE, 'input')
output_dir = os.path.join(HERE, 'output')
def _open(filename, mode):
# Define explicit encoding for windows platform
return open(filename, mode, encoding='utf-8')
def read_input(filename):
filename = os.path.join(input_dir, filename)
with _open(filename, 'r') as fd:
data = fd.read()
return data
def read_output(filename):
filename = os.path.join(output_dir, filename)
with _open(filename, 'r') as fd:
data = fd.read()
return data
def exists_output(filename):
filename = os.path.join(output_dir, filename)
return os.path.exists(filename)
def useChameleonEngine():
# Force the use of the new chameleon rendering engine (the new default).
# Its use depends on a utility registration that is queried in
# zope.pagetemplate,pagetemplate.PageTemplate's _cook method. Unfortunately
# the fallback is the old Zope engine if there is no registration, so we
# force one here for use by unit tests.
provideUtility(Program, IPageTemplateEngine)
def useOldZopeEngine():
# BBB Force the use of the old Zope page template engine, which is needed
# for some tests that test features only supported by it.
provideUtility(PageTemplateEngine, IPageTemplateEngine) |
7,383 | build base hue | # Copyright (c) Princeton University.
# This source code is licensed under the BSD 3-Clause license found in the LICENSE file in the root directory of this source tree.
# Authors: Lingjie Mei
import colorsys
import bpy
import numpy as np
from numpy.random import uniform
import util.blender as butil
from .base import BaseMolluskFactory
from .shell import ShellBaseFactory, ScallopBaseFactory, ClamBaseFactory, MusselBaseFactory
from .snail import SnailBaseFactory, ConchBaseFactory, AugerBaseFactory, VoluteBaseFactory, NautilusBaseFactory
from assets.utils.misc import build_color_ramp, log_uniform
from ..utils.decorate import assign_material, subsurface2face_size
from nodes.node_wrangler import NodeWrangler, Nodes
from surfaces import surface
from placement.factory import AssetFactory
from util.math import FixedSeed
from assets.utils.tag import tag_object, tag_nodegroup
class MolluskFactory(AssetFactory):
def __init__(self, factory_seed, coarse=False, factory_method=None):
super(MolluskFactory, self).__init__(factory_seed, coarse)
with FixedSeed(factory_seed):
self.factory_methods = [SnailBaseFactory, ShellBaseFactory]
weights = np.array([1] * len(self.factory_methods))
self.weights = weights / weights.sum()
if factory_method is None:
factory_method = np.random.choice(self.factory_methods, p=self.weights)
self.factory: BaseMolluskFactory = factory_method(factory_seed, coarse)
base_hue = self.METHOD_NAME()
self.material = surface.shaderfunc_to_material(self.shader_mollusk, base_hue, self.factory.ratio,
self.factory.x_scale, self.factory.z_scale,
self.factory.distortion)
def create_asset(self, face_size=0.01, **params):
obj = self.factory.create_asset(**params)
for o in [obj] + obj.children_recursive:
if o.type == 'MESH':
self.decorate_mollusk(face_size, o)
return obj
def decorate_mollusk(self, face_size, obj):
subsurface2face_size(obj, face_size)
butil.modify_mesh(obj, 'SOLIDIFY', True, thickness=.005)
t = np.random.choice(['STUCCI', 'MARBLE'])
texture = bpy.data.textures.new(name='mollusk', type=t)
texture.noise_scale = log_uniform(.1, .2)
butil.modify_mesh(obj, 'DISPLACE', strength=self.factory.noise_strength, mid_level=0, texture=texture)
assign_material(obj, self.material)
tag_object(obj, 'mollusk')
return obj
@staticmethod
def METHOD_NAME():
if uniform(0, 1) < .4:
return uniform(0, .2)
else:
return uniform(.05, .12)
@staticmethod
def shader_mollusk(nw: NodeWrangler, base_hue, ratio=0, x_scale=2, z_scale=1, distortion=5):
roughness = uniform(.2, .8)
specular = .3
value_scale = log_uniform(1, 20)
saturation_scale = log_uniform(.4, 1)
def dark_color():
return *colorsys.hsv_to_rgb(base_hue + uniform(-.06, .06), uniform(.6, 1.) * saturation_scale,
.005 * value_scale ** 1.5), 1
def light_color():
return *colorsys.hsv_to_rgb(base_hue + uniform(-.06, .06), uniform(.6, 1.) * saturation_scale,
.05 * value_scale), 1
def color_fn(dark_prob):
return dark_color() if uniform(0, 1) < dark_prob else light_color()
vector = nw.new_node(Nodes.Attribute, attrs={'attribute_name': 'vector'}).outputs['Vector']
n = np.random.randint(3, 5)
texture_0 = nw.new_node(Nodes.WaveTexture,
input_kwargs={'Vector': vector, 'Distortion': distortion, 'Scale': x_scale},
attrs={'wave_profile': 'SAW', 'bands_direction': 'X'})
cr_0 = build_color_ramp(nw, texture_0, np.sort(uniform(0, 1, n)), [color_fn(.4) for _ in range(n)])
texture_1 = nw.new_node(Nodes.WaveTexture,
input_kwargs={'Vector': vector, 'Distortion': distortion, 'Scale': z_scale},
attrs={'wave_profile': 'SAW', 'bands_direction': 'Z'})
cr_1 = build_color_ramp(nw, texture_1, np.sort(uniform(0, 1, n)), [color_fn(.4) for _ in range(n)])
principled_bsdf = nw.new_node(Nodes.PrincipledBSDF, input_kwargs={
'Base Color': nw.new_node(Nodes.MixRGB, [ratio, cr_0, cr_1]),
'Specular': specular,
'Roughness': roughness
})
return principled_bsdf
class ScallopFactory(MolluskFactory):
def __init__(self, factory_seed, coarse=False):
super(ScallopFactory, self).__init__(factory_seed, coarse, ScallopBaseFactory)
class ClamFactory(MolluskFactory):
def __init__(self, factory_seed, coarse=False):
super(ClamFactory, self).__init__(factory_seed, coarse, ClamBaseFactory)
class MusselFactory(MolluskFactory):
def __init__(self, factory_seed, coarse=False):
super(MusselFactory, self).__init__(factory_seed, coarse, MusselBaseFactory)
class ConchFactory(MolluskFactory):
def __init__(self, factory_seed, coarse=False):
super(ConchFactory, self).__init__(factory_seed, coarse, ConchBaseFactory)
class AugerFactory(MolluskFactory):
def __init__(self, factory_seed, coarse=False):
super(AugerFactory, self).__init__(factory_seed, coarse, AugerBaseFactory)
class VoluteFactory(MolluskFactory):
def __init__(self, factory_seed, coarse=False):
super(VoluteFactory, self).__init__(factory_seed, coarse, VoluteBaseFactory)
class NautilusFactory(MolluskFactory):
def __init__(self, factory_seed, coarse=False):
super(NautilusFactory, self).__init__(factory_seed, coarse, NautilusBaseFactory) |
7,384 | test builtins | import unittest
import idlelib.CallTips as ct
CTi = ct.CallTips() # needed for get_entity test in 2.7
import textwrap
import types
import warnings
default_tip = ''
# Test Class TC is used in multiple get_argspec test methods
class TC(object):
'doc'
tip = "(ai=None, *args)"
def __init__(self, ai=None, *b): 'doc'
__init__.tip = "(self, ai=None, *args)"
def t1(self): 'doc'
t1.tip = "(self)"
def t2(self, ai, b=None): 'doc'
t2.tip = "(self, ai, b=None)"
def t3(self, ai, *args): 'doc'
t3.tip = "(self, ai, *args)"
def t4(self, *args): 'doc'
t4.tip = "(self, *args)"
def t5(self, ai, b=None, *args, **kw): 'doc'
t5.tip = "(self, ai, b=None, *args, **kwargs)"
def t6(no, self): 'doc'
t6.tip = "(no, self)"
def __call__(self, ci): 'doc'
__call__.tip = "(self, ci)"
# attaching .tip to wrapped methods does not work
@classmethod
def cm(cls, a): 'doc'
@staticmethod
def sm(b): 'doc'
tc = TC()
signature = ct.get_arg_text # 2.7 and 3.x use different functions
class Get_signatureTest(unittest.TestCase):
# The signature function must return a string, even if blank.
# Test a variety of objects to be sure that none cause it to raise
# (quite aside from getting as correct an answer as possible).
# The tests of builtins may break if the docstrings change,
# but a red buildbot is better than a user crash (as has happened).
# For a simple mismatch, change the expected output to the actual.
def METHOD_NAME(self):
# 2.7 puts '()\n' where 3.x does not, other minor differences
# Python class that inherits builtin methods
class List(list): "List() doc"
# Simulate builtin with no docstring for default argspec test
class SB: __call__ = None
def gtest(obj, out):
self.assertEqual(signature(obj), out)
if List.__doc__ is not None:
gtest(List, '()\n' + List.__doc__)
gtest(list.__new__,
'T.__new__(S, ...) -> a new object with type S, a subtype of T')
gtest(list.__init__,
'x.__init__(...) initializes x; see help(type(x)) for signature')
append_doc = "L.append(object) -- append object to end"
gtest(list.append, append_doc)
gtest([].append, append_doc)
gtest(List.append, append_doc)
gtest(types.MethodType, '()\ninstancemethod(function, instance, class)')
gtest(SB(), default_tip)
def test_signature_wrap(self):
# This is also a test of an old-style class
if textwrap.TextWrapper.__doc__ is not None:
self.assertEqual(signature(textwrap.TextWrapper), '''\
(width=70, initial_indent='', subsequent_indent='', expand_tabs=True,
replace_whitespace=True, fix_sentence_endings=False, break_long_words=True,
drop_whitespace=True, break_on_hyphens=True)''')
def test_docline_truncation(self):
def f(): pass
f.__doc__ = 'a'*300
self.assertEqual(signature(f), '()\n' + 'a' * (ct._MAX_COLS-3) + '...')
def test_multiline_docstring(self):
# Test fewer lines than max.
self.assertEqual(signature(list),
"()\nlist() -> new empty list\n"
"list(iterable) -> new list initialized from iterable's items")
# Test max lines and line (currently) too long.
def f():
pass
s = 'a\nb\nc\nd\n'
f.__doc__ = s + 300 * 'e' + 'f'
self.assertEqual(signature(f),
'()\n' + s + (ct._MAX_COLS - 3) * 'e' + '...')
def test_functions(self):
def t1(): 'doc'
t1.tip = "()"
def t2(a, b=None): 'doc'
t2.tip = "(a, b=None)"
def t3(a, *args): 'doc'
t3.tip = "(a, *args)"
def t4(*args): 'doc'
t4.tip = "(*args)"
def t5(a, b=None, *args, **kwds): 'doc'
t5.tip = "(a, b=None, *args, **kwargs)"
doc = '\ndoc' if t1.__doc__ is not None else ''
for func in (t1, t2, t3, t4, t5, TC):
self.assertEqual(signature(func), func.tip + doc)
def test_methods(self):
doc = '\ndoc' if TC.__doc__ is not None else ''
for meth in (TC.t1, TC.t2, TC.t3, TC.t4, TC.t5, TC.t6, TC.__call__):
self.assertEqual(signature(meth), meth.tip + doc)
self.assertEqual(signature(TC.cm), "(a)" + doc)
self.assertEqual(signature(TC.sm), "(b)" + doc)
def test_bound_methods(self):
# test that first parameter is correctly removed from argspec
doc = '\ndoc' if TC.__doc__ is not None else ''
for meth, mtip in ((tc.t1, "()"), (tc.t4, "(*args)"), (tc.t6, "(self)"),
(tc.__call__, '(ci)'), (tc, '(ci)'), (TC.cm, "(a)"),):
self.assertEqual(signature(meth), mtip + doc)
def test_starred_parameter(self):
# test that starred first parameter is *not* removed from argspec
class C:
def m1(*args): pass
def m2(**kwds): pass
def f1(args, kwargs, *a, **k): pass
def f2(args, kwargs, args1, kwargs1, *a, **k): pass
c = C()
self.assertEqual(signature(C.m1), '(*args)')
self.assertEqual(signature(c.m1), '(*args)')
self.assertEqual(signature(C.m2), '(**kwargs)')
self.assertEqual(signature(c.m2), '(**kwargs)')
self.assertEqual(signature(f1), '(args, kwargs, *args1, **kwargs1)')
self.assertEqual(signature(f2),
'(args, kwargs, args1, kwargs1, *args2, **kwargs2)')
def test_no_docstring(self):
def nd(s): pass
TC.nd = nd
self.assertEqual(signature(nd), "(s)")
self.assertEqual(signature(TC.nd), "(s)")
self.assertEqual(signature(tc.nd), "()")
def test_attribute_exception(self):
class NoCall(object):
def __getattr__(self, name):
raise BaseException
class Call(NoCall):
def __call__(self, ci):
pass
for meth, mtip in ((NoCall, '()'), (Call, '()'),
(NoCall(), ''), (Call(), '(ci)')):
self.assertEqual(signature(meth), mtip)
def test_non_callables(self):
for obj in (0, 0.0, '0', b'0', [], {}):
self.assertEqual(signature(obj), '')
class Get_entityTest(unittest.TestCase):
# In 3.x, get_entity changed from 'instance method' to module function
# since 'self' not used. Use dummy instance until change 2.7 also.
def test_bad_entity(self):
self.assertIsNone(CTi.get_entity('1//0'))
def test_good_entity(self):
self.assertIs(CTi.get_entity('int'), int)
class Py2Test(unittest.TestCase):
def test_paramtuple_float(self):
# 18539: (a,b) becomes '.0' in code object; change that but not 0.0
with warnings.catch_warnings():
# Suppess message of py3 deprecation of parameter unpacking
warnings.simplefilter("ignore")
exec "def f((a,b), c=0.0): pass"
self.assertEqual(signature(f), '(<tuple>, c=0.0)')
if __name__ == '__main__':
unittest.main(verbosity=2, exit=False) |
7,385 | dumps | # This file is originally part of Fed-BioMed
# SPDX-License-Identifier: Apache-2.0
"""MsgPack serialization utils, wrapped into a namespace class."""
from math import ceil
from typing import Any
import msgpack
import numpy as np
import torch
from declearn.model.api import Vector
from fedbiomed.common.exceptions import FedbiomedTypeError
from fedbiomed.common.logger import logger
__all__ = [
"Serializer",
]
class Serializer:
"""MsgPack-based (de)serialization utils, wrapped into a namespace class.
This class has no value being instantiated: it merely acts as a namespace
to pack together encoding and decoding utils to convert data to and from
MsgPack dump bytes or binary files.
The MsgPack encoding and decoding capabilities are enhanced to add support
for the following non-standard object types:
- numpy arrays and scalars
- torch tensors (that are always loaded on CPU)
- tuples (which would otherwise be converted to lists)
"""
@classmethod
def METHOD_NAME(cls, obj: Any) -> bytes:
"""Serialize data into MsgPack-encoded bytes.
Args:
obj: Data that needs encoding.
Returns:
MsgPack-encoded bytes that contains the input data.
"""
return msgpack.packb(obj, default=cls._default, strict_types=True)
@classmethod
def dump(cls, obj: Any, path: str) -> None:
"""Serialize data into a MsgPack binary dump file.
Args:
obj: Data that needs encoding.
path: Path to the created dump file.
"""
with open(path, "wb") as file:
msgpack.pack(obj, file, default=cls._default, strict_types=True)
@classmethod
def loads(cls, data: bytes) -> Any:
"""Load serialized data from a MsgPack-encoded string.
Args:
data: MsgPack-encoded bytes that needs decoding.
Returns:
Data loaded and decoded from the input bytes.
"""
return msgpack.unpackb(
data, object_hook=cls._object_hook, strict_map_key=False
)
@classmethod
def load(cls, path: str) -> Any:
"""Load serialized data from a MsgPack dump file.
Args:
path: Path to a MsgPack file, the contents of which to decode.
Returns:
Data loaded and decoded from the target file.
"""
with open(path, "rb") as file:
return msgpack.unpack(
file, object_hook=cls._object_hook, strict_map_key=False
)
@staticmethod
def _default(obj: Any) -> Any:
"""Encode non-default object types into MsgPack-serializable data.
The counterpart static method `unpack` may be used to recover
the input objects from their encoded data.
"""
# Big integer
if isinstance(obj, int):
return {"__type__": "int", "value": obj.to_bytes(
length=ceil(obj.bit_length()/8),
byteorder="big")}
if isinstance(obj, tuple):
return {"__type__": "tuple", "value": list(obj)}
if isinstance(obj, np.ndarray):
spec = [obj.tobytes(), obj.dtype.name, list(obj.shape)]
return {"__type__": "np.ndarray", "value": spec}
if isinstance(obj, np.generic):
spec = [obj.tobytes(), obj.dtype.name]
return {"__type__": "np.generic", "value": spec}
if isinstance(obj, torch.Tensor):
obj = obj.cpu().numpy()
spec = [obj.tobytes(), obj.dtype.name, list(obj.shape)]
return {"__type__": "torch.Tensor", "value": spec}
if isinstance(obj, Vector):
return {"__type__": "Vector", "value": obj.coefs}
# Raise on unsupported types.
raise FedbiomedTypeError(
f"Cannot serialize object of type '{type(obj)}'."
)
@staticmethod
def _object_hook(obj: Any) -> Any:
"""De-serialize non-default object types encoded with `_default`."""
if not (isinstance(obj, dict) and "__type__" in obj):
return obj
objtype = obj["__type__"]
if objtype == "tuple":
return tuple(obj["value"])
if objtype == "int":
return int.from_bytes(obj["value"], byteorder="big")
if objtype == "np.ndarray":
data, dtype, shape = obj["value"]
return np.frombuffer(data, dtype=dtype).reshape(shape).copy()
if objtype == "np.generic":
data, dtype = obj["value"]
return np.frombuffer(data, dtype=dtype)[0]
if objtype == "torch.Tensor":
data, dtype, shape = obj["value"]
array = np.frombuffer(data, dtype=dtype).reshape(shape).copy()
return torch.from_numpy(array)
if objtype == "Vector":
return Vector.build(obj["value"])
logger.warning(
"Encountered an object that cannot be properly deserialized."
)
return obj |
7,386 | prepare request | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import RecoveryServicesBackupClientMixinABC, _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-02-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.RecoveryServices/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.recoveryservicesbackup.activestamp.RecoveryServicesBackupClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.ClientDiscoveryValueForSingleApi"]:
"""Returns the list of available operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ClientDiscoveryValueForSingleApi or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.recoveryservicesbackup.activestamp.models.ClientDiscoveryValueForSingleApi]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.ClientDiscoveryResponse] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def METHOD_NAME(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ClientDiscoveryResponse", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = METHOD_NAME(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.RecoveryServices/operations"} |
7,387 | equalize channel | # Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing
@keras_cv_export("keras_cv.layers.Equalization")
class Equalization(BaseImageAugmentationLayer):
"""Equalization performs histogram equalization on a channel-wise basis.
Args:
value_range: a tuple or a list of two elements. The first value
represents the lower bound for values in passed images, the second
represents the upper bound. Images passed to the layer should have
values within `value_range`.
bins: Integer indicating the number of bins to use in histogram
equalization. Should be in the range [0, 256].
Usage:
```python
equalize = Equalization()
(images, labels), _ = keras.datasets.cifar10.load_data()
# Note that images are an int8 Tensor with values in the range [0, 255]
images = equalize(images)
```
Call arguments:
images: Tensor of pixels in range [0, 255], in RGB format. Can be
of type float or int. Should be in NHWC format.
"""
def __init__(self, value_range, bins=256, **kwargs):
super().__init__(**kwargs)
self.bins = bins
self.value_range = value_range
def METHOD_NAME(self, image, channel_index):
"""equalize_channel performs histogram equalization on a single channel.
Args:
image: int Tensor with pixels in range [0, 255], RGB format,
with channels last
channel_index: channel to equalize
"""
image = image[..., channel_index]
# Compute the histogram of the image channel.
histogram = tf.histogram_fixed_width(image, [0, 255], nbins=self.bins)
# For the purposes of computing the step, filter out the non-zeros.
# Zeroes are replaced by a big number while calculating min to keep
# shape constant across input sizes for compatibility with
# vectorized_map
big_number = 1410065408
histogram_without_zeroes = tf.where(
tf.equal(histogram, 0),
big_number,
histogram,
)
step = (
tf.reduce_sum(histogram) - tf.reduce_min(histogram_without_zeroes)
) // (self.bins - 1)
def build_mapping(histogram, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lookup_table = (tf.cumsum(histogram) + (step // 2)) // step
# Shift lookup_table, prepending with 0.
lookup_table = tf.concat([[0], lookup_table[:-1]], 0)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf.clip_by_value(lookup_table, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lookup table from the full histogram and step and then index from it.
result = tf.cond(
tf.equal(step, 0),
lambda: image,
lambda: tf.gather(build_mapping(histogram, step), image),
)
return result
def augment_image(self, image, **kwargs):
image = preprocessing.transform_value_range(
image, self.value_range, (0, 255), dtype=self.compute_dtype
)
image = tf.cast(image, tf.int32)
image = tf.map_fn(
lambda channel: self.METHOD_NAME(image, channel),
tf.range(tf.shape(image)[-1]),
)
image = tf.transpose(image, [1, 2, 0])
image = tf.cast(image, self.compute_dtype)
image = preprocessing.transform_value_range(
image, (0, 255), self.value_range, dtype=self.compute_dtype
)
return image
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_label(self, label, transformation=None, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def get_config(self):
config = super().get_config()
config.update({"bins": self.bins, "value_range": self.value_range})
return config |
7,388 | test split | # Copyright 2022 The MediaPipe Authors.s
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
from typing import NamedTuple
from unittest import mock as unittest_mock
from absl.testing import parameterized
import tensorflow as tf
from mediapipe.model_maker.python.vision.gesture_recognizer import dataset
from mediapipe.tasks.python.test import test_utils
from mediapipe.tasks.python.vision import hand_landmarker
_TEST_DATA_DIRNAME = 'raw_data'
class DatasetTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
# Mock tempfile.gettempdir() to be unique for each test to avoid race
# condition when downloading model since these tests may run in parallel.
mock_gettempdir = unittest_mock.patch.object(
tempfile,
'gettempdir',
return_value=self.create_tempdir(),
autospec=True,
)
self.mock_gettempdir = mock_gettempdir.start()
self.addCleanup(mock_gettempdir.stop)
def METHOD_NAME(self):
input_data_dir = test_utils.get_test_data_path(_TEST_DATA_DIRNAME)
data = dataset.Dataset.from_folder(
dirname=input_data_dir, hparams=dataset.HandDataPreprocessingParams())
train_data, test_data = data.split(0.5)
self.assertLen(train_data, 16)
for _, elem in enumerate(train_data.gen_tf_dataset(is_training=True)):
self.assertEqual(elem[0].shape, (1, 128))
self.assertEqual(elem[1].shape, ([1, 4]))
self.assertEqual(train_data.num_classes, 4)
self.assertEqual(train_data.label_names, ['none', 'call', 'four', 'rock'])
self.assertLen(test_data, 16)
for _, elem in enumerate(test_data.gen_tf_dataset(is_training=True)):
self.assertEqual(elem[0].shape, (1, 128))
self.assertEqual(elem[1].shape, ([1, 4]))
self.assertEqual(test_data.num_classes, 4)
self.assertEqual(test_data.label_names, ['none', 'call', 'four', 'rock'])
def test_from_folder(self):
input_data_dir = test_utils.get_test_data_path(_TEST_DATA_DIRNAME)
data = dataset.Dataset.from_folder(
dirname=input_data_dir, hparams=dataset.HandDataPreprocessingParams())
for _, elem in enumerate(data.gen_tf_dataset(is_training=True)):
self.assertEqual(elem[0].shape, (1, 128))
self.assertEqual(elem[1].shape, ([1, 4]))
self.assertLen(data, 32)
self.assertEqual(data.num_classes, 4)
self.assertEqual(data.label_names, ['none', 'call', 'four', 'rock'])
def test_create_dataset_from_empty_folder_raise_value_error(self):
with self.assertRaisesRegex(ValueError, 'Image dataset directory is empty'):
dataset.Dataset.from_folder(
dirname=self.get_temp_dir(),
hparams=dataset.HandDataPreprocessingParams())
def test_create_dataset_from_folder_without_none_raise_value_error(self):
input_data_dir = test_utils.get_test_data_path(_TEST_DATA_DIRNAME)
tmp_dir = self.create_tempdir()
# Copy input dataset to a temporary directory and skip 'None' directory.
for name in os.listdir(input_data_dir):
if name == 'none':
continue
src_dir = os.path.join(input_data_dir, name)
dst_dir = os.path.join(tmp_dir, name)
shutil.copytree(src_dir, dst_dir)
with self.assertRaisesRegex(ValueError,
'Label set does not contain label "None"'):
dataset.Dataset.from_folder(
dirname=tmp_dir, hparams=dataset.HandDataPreprocessingParams())
def test_create_dataset_from_folder_with_capital_letter_in_folder_name(self):
input_data_dir = test_utils.get_test_data_path(_TEST_DATA_DIRNAME)
tmp_dir = self.create_tempdir()
# Copy input dataset to a temporary directory and change the base folder
# name to upper case letter, e.g. 'none' -> 'NONE'
for name in os.listdir(input_data_dir):
src_dir = os.path.join(input_data_dir, name)
dst_dir = os.path.join(tmp_dir, name.upper())
shutil.copytree(src_dir, dst_dir)
upper_base_folder_name = list(os.listdir(tmp_dir))
self.assertCountEqual(upper_base_folder_name,
['CALL', 'FOUR', 'NONE', 'ROCK'])
data = dataset.Dataset.from_folder(
dirname=tmp_dir, hparams=dataset.HandDataPreprocessingParams())
for _, elem in enumerate(data.gen_tf_dataset(is_training=True)):
self.assertEqual(elem[0].shape, (1, 128))
self.assertEqual(elem[1].shape, ([1, 4]))
self.assertLen(data, 32)
self.assertEqual(data.num_classes, 4)
self.assertEqual(data.label_names, ['NONE', 'CALL', 'FOUR', 'ROCK'])
@parameterized.named_parameters(
dict(
testcase_name='none_handedness',
hand=hand_landmarker.HandLandmarkerResult(
handedness=None, hand_landmarks=[[2]],
hand_world_landmarks=[[3]])),
dict(
testcase_name='none_hand_landmarks',
hand=hand_landmarker.HandLandmarkerResult(
handedness=[[1]], hand_landmarks=None,
hand_world_landmarks=[[3]])),
dict(
testcase_name='none_hand_world_landmarks',
hand=hand_landmarker.HandLandmarkerResult(
handedness=[[1]], hand_landmarks=[[2]],
hand_world_landmarks=None)),
dict(
testcase_name='empty_handedness',
hand=hand_landmarker.HandLandmarkerResult(
handedness=[], hand_landmarks=[[2]], hand_world_landmarks=[[3]])),
dict(
testcase_name='empty_hand_landmarks',
hand=hand_landmarker.HandLandmarkerResult(
handedness=[[1]], hand_landmarks=[], hand_world_landmarks=[[3]])),
dict(
testcase_name='empty_hand_world_landmarks',
hand=hand_landmarker.HandLandmarkerResult(
handedness=[[1]], hand_landmarks=[[2]], hand_world_landmarks=[])),
)
def test_create_dataset_from_invalid_hand_data(self, hand: NamedTuple):
with unittest_mock.patch.object(
hand_landmarker.HandLandmarker, 'detect', return_value=hand
):
input_data_dir = test_utils.get_test_data_path(_TEST_DATA_DIRNAME)
with self.assertRaisesRegex(ValueError, 'No valid hand is detected'):
dataset.Dataset.from_folder(
dirname=input_data_dir,
hparams=dataset.HandDataPreprocessingParams())
if __name__ == '__main__':
tf.test.main() |
7,389 | opentag | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2019 Matt Post <post@cs.jhu.edu>
# Copyright 2019 David Wei Chiang <dchiang@nd.edu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Used to merge in missing information to many papers in commit 24ab9efd2ec05b9dcc80337695e5b33219aab679.
(See issue #173).
Author: David Chiang
"""
import logging
import difflib
import lxml.etree as etree
import copy
unique_attributes = {"id"}
recurse_elements = {"volume", "paper"}
exclude_elements = {
"paper"
} # papers are usually removed for a reason; don't put them back
def METHOD_NAME(node):
copy = etree.Element(node.tag)
copy.text = "\n"
for a, v in sorted(node.attrib.items()):
if a in unique_attributes:
copy.attrib[a] = v
s = etree.tostring(copy, encoding=str)
return s.splitlines()[0]
def merge(atree, btree):
def visit(anode, bnode):
if anode.tag not in recurse_elements and bnode.tag not in recurse_elements:
return
achildren = sorted(anode, key=METHOD_NAME)
bchildren = sorted(bnode, key=METHOD_NAME)
indent = anode.text or ""
after = anode[-1].tail or ""
assert indent.strip() == after.strip() == ""
for op, ai, aj, bi, bj in difflib.SequenceMatcher(
a=list(map(METHOD_NAME, achildren)), b=list(map(METHOD_NAME, bchildren))
).get_opcodes():
if op in ["insert", "replace"]:
for bchild in bchildren[bi:bj]:
if bchild.tag in exclude_elements:
logging.info(
"don't insert {}:<{}>".format(bchild.sourceline, bchild.tag)
)
continue
# hacky exception: papers shouldn't have editors
if bchild.tag == "editor" and bchild.getparent().find(
"bibtype"
).text in ["inproceedings", "incollection", "article"]:
logging.info(
"don't insert {}:<{}>".format(bchild.sourceline, bchild.tag)
)
continue
logging.info(
"insert {}:{}".format(
bchild.sourceline,
etree.tostring(bchild, encoding=str, with_tail=False),
)
)
anode[-1].tail = indent
anode.append(copy.deepcopy(bchild))
anode[-1].tail = after
elif op == "equal":
for achild, bchild in zip(achildren[ai:aj], bchildren[bi:bj]):
visit(achild, bchild)
aroot = atree.getroot()
broot = btree.getroot()
visit(aroot, broot)
if __name__ == "__main__":
import sys
import argparse
ap = argparse.ArgumentParser(description="Merge two XML files.")
ap.add_argument("afile", help="first XML file (fields in this file take priority")
ap.add_argument("bfile", help="second XML file")
ap.add_argument("-o", "--outfile", help="XML file to write (default stdout)")
args = ap.parse_args()
logging.basicConfig(level=logging.INFO)
if args.outfile:
outfile = open(args.outfile, "w")
else:
outfile = sys.stdout
atree = etree.parse(args.afile)
btree = etree.parse(args.bfile)
merge(atree, btree)
outfile.write(
etree.tostring(
atree, encoding="UTF-8", xml_declaration=True, with_tail=True
).decode("utf8")
) |
7,390 | set up class | # -*- coding: utf-8 -*-
import unittest
from unittest.mock import MagicMock
import pickle
from blivetgui.communication.server import BlivetUtilsServer, BlivetProxyObject
from blivetgui.communication.proxy_utils import ProxyID, ProxyDataContainer
from blivet.size import Size
class BlivetUtilsServerTest(unittest.TestCase):
def test_pickle_answer(self):
# string
msg = "abcdef"
pickled_msg = BlivetUtilsServer._pickle_answer(MagicMock(), msg)
self.assertEqual(msg, pickle.loads(pickled_msg))
# None
msg = None
pickled_msg = BlivetUtilsServer._pickle_answer(MagicMock(), msg)
self.assertEqual(msg, pickle.loads(pickled_msg))
# blivet.size.Size
msg = Size("8 GiB")
pickled_msg = BlivetUtilsServer._pickle_answer(MagicMock(), msg)
self.assertEqual(msg, pickle.loads(pickled_msg))
# list of multiple types
msg = ["abcdef", 1, 1.01, True]
pickled_msg = BlivetUtilsServer._pickle_answer(MagicMock(), msg)
self.assertEqual(msg, pickle.loads(pickled_msg))
# BlivetProxyObject
msg = BlivetProxyObject(MagicMock(), ProxyID())
pickled_msg = BlivetUtilsServer._pickle_answer(MagicMock(), msg)
# BlivetProxyObject is not pickled, instead of it we pickle its id (ProxyID object)
# we compare the id (int) of this id (ProxyID) with id of unpickled object
self.assertEqual(msg.id.id, pickle.loads(pickled_msg).id)
# unpicklable object
test_dict = {}
msg = MagicMock() # MagicMock is definitely not in picklable_types
pickled_msg = BlivetUtilsServer._pickle_answer(MagicMock(object_dict=test_dict), msg)
unpickled_msg = pickle.loads(pickled_msg)
# unpicklable objects are not pickled, instead a BlivetProxyObject is created
# and its ProxyID is pickled; test we really have a ProxyID object and test
# that original object was placed in the dict with proxied-object
self.assertTrue(isinstance(unpickled_msg, ProxyID))
self.assertEqual(test_dict[unpickled_msg.id].blivet_object, msg)
# unpicklable objects in list
test_dict = {}
msg = [MagicMock(), "abcdef"]
pickled_msg = BlivetUtilsServer._pickle_answer(MagicMock(object_dict=test_dict), msg)
unpickled_msg = pickle.loads(pickled_msg)
self.assertTrue(isinstance(unpickled_msg, list))
self.assertTrue(isinstance(unpickled_msg[0], ProxyID))
self.assertEqual(test_dict[unpickled_msg[0].id].blivet_object, msg[0])
self.assertEqual(unpickled_msg[1], msg[1])
def test_convert_args(self):
# 'normal' arguments
args = ["abcdef", 1, 1.01, True, None]
converted_args = BlivetUtilsServer._args_convertTo_objects(MagicMock(), args)
self.assertEqual(converted_args, args)
# ProxyID arguments
test_dict = {}
arg1 = ProxyID()
arg1_obj = MagicMock(blivet_object=MagicMock())
test_dict[arg1.id] = arg1_obj
arg2 = ProxyID()
arg2_obj = MagicMock(blivet_object=MagicMock())
test_dict[arg2.id] = arg2_obj
converted_args = BlivetUtilsServer._args_convertTo_objects(MagicMock(object_dict=test_dict), [arg1, arg2])
self.assertEqual(converted_args, [arg1_obj.blivet_object, arg2_obj.blivet_object])
# ProxyDataContainer as an argument
test_dict = {}
arg3 = ProxyID()
arg3_obj = MagicMock(blivet_object=MagicMock())
test_dict[arg3.id] = arg3_obj
args = [ProxyDataContainer(data1="abcdef", data2=1, data3=arg3)]
converted_args = BlivetUtilsServer._args_convertTo_objects(MagicMock(object_dict=test_dict), args)
self.assertEqual(converted_args[0]["data1"], "abcdef")
self.assertEqual(converted_args[0]["data2"], 1)
self.assertEqual(converted_args[0]["data3"], arg3_obj.blivet_object)
def test_convert_kwargs(self):
test_dict = {}
arg1 = ProxyID()
arg1_obj = MagicMock(blivet_object=MagicMock())
test_dict[arg1.id] = arg1_obj
arg2 = ProxyID()
arg2_obj = MagicMock(blivet_object=MagicMock())
test_dict[arg2.id] = arg2_obj
kwargs = {"a": 1, "b": arg1, "c": arg2}
server_mock = MagicMock(object_dict=test_dict,
_args_convertTo_objects=lambda args: BlivetUtilsServer._args_convertTo_objects(MagicMock(object_dict=test_dict), args))
converted_kwargs = BlivetUtilsServer._kwargs_convertTo_objects(server_mock, kwargs)
self.assertEqual(converted_kwargs, {"a": 1, "b": arg1_obj.blivet_object, "c": arg2_obj.blivet_object})
class BlivetProxyObjectTest(unittest.TestCase):
@classmethod
def METHOD_NAME(cls):
cls.blivet_object = MagicMock(set_test=None)
del cls.blivet_object.non_existing # mock non-existing attribute
cls.obj_id = ProxyID()
cls.proxy_object = BlivetProxyObject(cls.blivet_object, cls.obj_id)
def test_getattr(self):
self.assertEqual(self.proxy_object.existing, self.blivet_object.existing)
with self.assertRaises(AttributeError):
self.proxy_object.non_existing # pylint: disable=W0104
def test_setattr(self):
self.proxy_object.set_test = "test"
self.assertEqual(self.blivet_object.set_test, "test")
def test_getitem(self):
self.assertEqual(self.proxy_object["key"], self.blivet_object["key"]) # pylint: disable=unsubscriptable-object
def test_str(self):
self.assertEqual(str(self.proxy_object), str(self.blivet_object))
def test_len(self):
self.assertEqual(len(self.proxy_object), len(self.blivet_object))
if __name__ == "__main__":
unittest.main() |
7,391 | reset titles | # Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
"""SelectionContainer class.
Represents a multipage container that can be used to group other widgets into
pages.
"""
from .widget_box import Box
from .widget import register
from .widget_core import CoreWidget
from traitlets import Unicode, Dict, CInt, TraitError, validate, observe
from .trait_types import TypedTuple
from itertools import chain, repeat, islice
# Inspired by an itertools recipe: https://docs.python.org/3/library/itertools.html#itertools-recipes
def pad(iterable, padding=None, length=None):
"""Returns the sequence elements and then returns None up to the given size (or indefinitely if size is None)."""
return islice(chain(iterable, repeat(padding)), length)
class _SelectionContainer(Box, CoreWidget):
"""Base class used to display multiple child widgets."""
titles = TypedTuple(trait=Unicode(), help="Titles of the pages").tag(sync=True)
selected_index = CInt(
help="""The index of the selected page. This is either an integer selecting a particular sub-widget, or None to have no widgets selected.""",
allow_none=True,
default_value=None
).tag(sync=True)
@validate('selected_index')
def _validated_index(self, proposal):
if proposal.value is None or 0 <= proposal.value < len(self.children):
return proposal.value
else:
raise TraitError('Invalid selection: index out of bounds')
@validate('titles')
def _validate_titles(self, proposal):
return tuple(pad(proposal.value, '', len(self.children)))
@observe('children')
def _observe_children(self, change):
self._reset_selected_index()
self.METHOD_NAME()
def _reset_selected_index(self):
if self.selected_index is not None and len(self.children) < self.selected_index:
self.selected_index = None
def METHOD_NAME(self):
if len(self.titles) != len(self.children):
# Run validation function
self.titles = tuple(self.titles)
def set_title(self, index, title):
"""Sets the title of a container page.
Parameters
----------
index : int
Index of the container page
title : unicode
New title
"""
titles = list(self.titles)
# for backwards compatibility with ipywidgets 7.x
if title is None:
title = ''
titles[index]=title
self.titles = tuple(titles)
def get_title(self, index):
"""Gets the title of a container page.
Parameters
----------
index : int
Index of the container page
"""
return self.titles[index]
@register
class Accordion(_SelectionContainer):
"""Displays children each on a separate accordion page."""
_view_name = Unicode('AccordionView').tag(sync=True)
_model_name = Unicode('AccordionModel').tag(sync=True)
@register
class Tab(_SelectionContainer):
"""Displays children each on a separate accordion tab."""
_view_name = Unicode('TabView').tag(sync=True)
_model_name = Unicode('TabModel').tag(sync=True)
def __init__(self, children=(), **kwargs):
if len(children) > 0 and 'selected_index' not in kwargs:
kwargs['selected_index'] = 0
super().__init__(children=children, **kwargs)
def _reset_selected_index(self):
# if there are no tabs, then none should be selected
num_children = len(self.children)
if num_children == 0:
self.selected_index = None
# if there are tabs, but none is selected, select the first one
elif self.selected_index == None:
self.selected_index = 0
# if there are tabs and a selection, but the selection is no longer
# valid, select the last tab.
elif num_children < self.selected_index:
self.selected_index = num_children - 1
@register
class Stack(_SelectionContainer):
"""Displays only the selected child."""
_view_name = Unicode('StackView').tag(sync=True)
_model_name = Unicode('StackModel').tag(sync=True) |
7,392 | accepts password | # Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""OpenSSL/M2Crypto RSA implementation."""
from .cryptomath import *
from .rsakey import *
from .python_rsakey import Python_RSAKey
from .compat import compatAscii2Bytes, compat_b2a
#copied from M2Crypto.util.py, so when we load the local copy of m2
#we can still use it
def password_callback(v, prompt1='Enter private key passphrase:',
prompt2='Verify passphrase:'):
from getpass import getpass
while 1:
try:
p1=getpass(prompt1)
if v:
p2=getpass(prompt2)
if p1==p2:
break
else:
break
except KeyboardInterrupt:
return None
return p1
if m2cryptoLoaded:
import M2Crypto
class OpenSSL_RSAKey(RSAKey):
def __init__(self, n=0, e=0, key_type="rsa"):
self.rsa = None
self._hasPrivateKey = False
if (n and not e) or (e and not n):
raise AssertionError()
if n and e:
self.rsa = m2.rsa_new()
m2.rsa_set_n(self.rsa, numberToMPI(n))
m2.rsa_set_e(self.rsa, numberToMPI(e))
self.key_type = key_type
def __del__(self):
if self.rsa:
m2.rsa_free(self.rsa)
def __getattr__(self, name):
if name == 'e':
if not self.rsa:
return 0
return mpiToNumber(m2.rsa_get_e(self.rsa))
elif name == 'n':
if not self.rsa:
return 0
return mpiToNumber(m2.rsa_get_n(self.rsa))
else:
raise AttributeError
def hasPrivateKey(self):
return self._hasPrivateKey
def _rawPrivateKeyOp(self, message):
data = numberToByteArray(message, numBytes(self.n))
string = m2.rsa_private_encrypt(self.rsa, bytes(data),
m2.no_padding)
ciphertext = bytesToNumber(bytearray(string))
return ciphertext
def _raw_private_key_op_bytes(self, message):
return self._call_m2crypto(
m2.rsa_private_encrypt, message,
"Bad parameters to private key operation")
def _rawPublicKeyOp(self, ciphertext):
data = numberToByteArray(ciphertext, numBytes(self.n))
string = m2.rsa_public_decrypt(self.rsa, bytes(data),
m2.no_padding)
message = bytesToNumber(bytearray(string))
return message
def _call_m2crypto(self, method, param, err_msg):
try:
return bytearray(method(self.rsa, bytes(param), m2.no_padding))
except M2Crypto.RSA.RSAError:
raise ValueError(err_msg)
def _raw_public_key_op_bytes(self, ciphertext):
return self._call_m2crypto(
m2.rsa_public_decrypt, ciphertext,
"Bad parameters to public key operation")
def METHOD_NAME(self): return True
def write(self, password=None):
bio = m2.bio_new(m2.bio_s_mem())
if self._hasPrivateKey:
if password:
def f(v): return password
m2.rsa_write_key(self.rsa, bio, m2.des_ede_cbc(), f)
else:
def f(): pass
m2.rsa_write_key_no_cipher(self.rsa, bio, f)
else:
if password:
raise AssertionError()
m2.rsa_write_pub_key(self.rsa, bio)
s = m2.bio_read(bio, m2.bio_ctrl_pending(bio))
m2.bio_free(bio)
return s
@staticmethod
def generate(bits, key_type="rsa"):
key = OpenSSL_RSAKey()
def f():pass
# pylint: disable=no-member
key.rsa = m2.rsa_generate_key(bits, 65537, f)
# pylint: enable=no-member
key._hasPrivateKey = True
key.key_type = key_type
b64_key = compat_b2a(key.write())
py_key = Python_RSAKey.parsePEM(b64_key)
key.d = py_key.d
return key
@staticmethod
def parse(s, passwordCallback=None):
# Skip forward to the first PEM header
start = s.find("-----BEGIN ")
if start == -1:
raise SyntaxError()
s = s[start:]
if s.startswith("-----BEGIN "):
if passwordCallback==None:
callback = password_callback
else:
def f(v, prompt1=None, prompt2=None):
return passwordCallback()
callback = f
bio = m2.bio_new(m2.bio_s_mem())
try:
m2.bio_write(bio, compatAscii2Bytes(s))
key = OpenSSL_RSAKey()
# parse SSLay format PEM file
if s.startswith("-----BEGIN RSA PRIVATE KEY-----"):
def f():pass
key.rsa = m2.rsa_read_key(bio, callback)
if key.rsa == None:
raise SyntaxError()
key._hasPrivateKey = True
# parse a standard PKCS#8 PEM file
elif s.startswith("-----BEGIN PRIVATE KEY-----"):
def f():pass
key.rsa = m2.pkey_read_pem(bio, callback)
# the below code assumes RSA key while PKCS#8 files
# (and by extension the EVP_PKEY structure) can be
# also DSA or EC, thus the double check against None
# (first if the file was properly loaded and second
# if the file actually has a RSA key in it)
# tlslite doesn't support DSA or EC so it's useless
# to handle them in a different way
if key.rsa == None:
raise SyntaxError()
key.rsa = m2.pkey_get1_rsa(key.rsa)
if key.rsa == None:
raise SyntaxError()
key._hasPrivateKey = True
elif s.startswith("-----BEGIN PUBLIC KEY-----"):
key.rsa = m2.rsa_read_pub_key(bio)
if key.rsa == None:
raise SyntaxError()
key._hasPrivateKey = False
else:
raise SyntaxError()
if key._hasPrivateKey:
b64_key = compat_b2a(key.write())
py_key = Python_RSAKey.parsePEM(b64_key)
key.d = py_key.d
return key
finally:
m2.bio_free(bio)
else:
raise SyntaxError() |
7,393 | test strip px unit | from django.conf import settings
from django.core import mail
from django.core.exceptions import SuspiciousOperation
from django.template.loader import get_template
from django.test import TestCase
from pyquery import PyQuery
from openforms.config.models import GlobalConfiguration
from openforms.emails.context import _get_design_token_values
from openforms.emails.utils import send_mail_html
class HTMLEmailWrapperTest(TestCase):
def test_send_mail_html(self):
body = "<p>My Message</p>"
attachments = [("file.bin", b"content", "application/foo")]
send_mail_html(
"My Subject",
body,
"foo@sender.com",
["foo@bar.baz"],
attachment_tuples=attachments,
)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertEqual(message.subject, "My Subject")
self.assertEqual(message.recipients(), ["foo@bar.baz"])
self.assertEqual(message.from_email, "foo@sender.com")
# text
self.assertEquals(message.body, "My Message\n")
self.assertNotIn("<p>", message.body)
# html alternative
self.assertEqual(len(message.alternatives), 1)
content, mime_type = message.alternatives[0]
self.assertEquals(mime_type, "text/html")
self.assertIn("<p>My Message</p>", content)
self.assertIn("<table", content)
# TODO test html validity?
self.assertEqual(len(message.attachments), 1)
file = message.attachments[0]
self.assertEqual(file[0], "file.bin")
self.assertEqual(file[1], b"content") # still bytes
self.assertEqual(file[2], "application/foo")
def test_strip_non_allowed_urls(self):
config = GlobalConfiguration.get_solo()
config.email_template_netloc_allowlist = ["allowed.com"]
config.save()
body = "<p>test https://google.com https://www.google.com https://allowed.com test</p>"
body += settings.BASE_URL
send_mail_html(
"My Subject",
body,
"foo@sender.com",
["foo@bar.baz"],
)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
message_html = message.alternatives[0][0]
self.assertNotIn("google.com", message_html)
self.assertIn("https://allowed.com", message_html)
self.assertIn(settings.BASE_URL, message_html)
def test_strip_non_allowed_urls_without_config_strips_all_urls_execpt_base_url(
self,
):
config = GlobalConfiguration.get_solo()
config.email_template_netloc_allowlist = []
config.save()
body = "<p>test https://google.com https://www.google.com https://allowed.com test</p>"
body += settings.BASE_URL
send_mail_html(
"My Subject",
body,
"foo@sender.com",
["foo@bar.baz"],
)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
message_html = message.alternatives[0][0]
self.assertNotIn("google.com", message_html)
self.assertNotIn("allowed.com", message_html)
self.assertIn(settings.BASE_URL, message_html)
def test_oversize_content_raise_suspiciosu_operation(self):
body = "<p>My Message</p>" + ("123" * 1024 * 1024)
with self.assertRaisesMessage(
SuspiciousOperation, "email content-length exceeded safety limit"
):
send_mail_html(
"My Subject",
body,
"foo@sender.com",
["foo@bar.baz"],
)
class DesignTokenFilterTest(TestCase):
"""unit tests for ``emails.context._get_design_token_values``"""
def METHOD_NAME(self):
self.design_tokens = {
"of": {
"header-logo": {
"height": {"value": "100px"},
"width": {"value": "100px"},
}
}
}
result = _get_design_token_values(self.design_tokens)
self.assertEqual(result["logo"]["height"], "100px")
self.assertEqual(result["logo"]["width"], "100px")
self.assertEqual(result["logo"]["height_attr"], "100")
self.assertEqual(result["logo"]["width_attr"], "100")
def test_return_empty_string_for_non_px_unit(self):
self.design_tokens = {
"of": {
"header-logo": {
"height": {"value": "100em"},
"width": {"value": "100em"},
}
}
}
result = _get_design_token_values(self.design_tokens)
self.assertEqual(result["logo"]["height"], "100em")
self.assertEqual(result["logo"]["width"], "100em")
self.assertEqual(result["logo"]["height_attr"], "")
self.assertEqual(result["logo"]["width_attr"], "")
def test_return_empty_string_for_auto_size(self):
"""default for width: 'auto'"""
self.design_tokens = {"header-logo": {}}
result = _get_design_token_values(self.design_tokens)
self.assertEqual(result["logo"]["width"], "auto")
self.assertEqual(result["logo"]["width_attr"], "")
class TemplateRenderingTest(TestCase):
"""
integration test for the display of design token values
assumes that ``emails.context.get_wrapper_context`` works correctly,
hence the context variable is provided manually.
"""
def test_design_token_values(self):
template = get_template("emails/wrapper.html")
ctx = {
"content": "<p>Hello there!</p>",
"main_website_url": "https://logoresizingdoneright.com",
"style": {
"logo": {
"height": "150px",
"width": "auto",
"height_attr": "150",
"width_attr": "",
}
},
"logo_url": "https://logo.png",
}
html_message = template.render(ctx)
img = PyQuery(html_message)("img")
self.assertEqual(img.attr("height"), "150")
self.assertIsNone(img.attr("width"))
img_styles = img.attr("style").replace(" ", "").split(";")
self.assertIn("width:auto", img_styles)
self.assertIn("height:150px", img_styles) |
7,394 | log timing | #!/usr/bin/env python
"""
This parses a log file series (i.e. log, log.1, log.2, etc..) and
outputs timing and call frequency information for HAL messages.
Hazen 5/18
"""
from datetime import datetime
import os
pattern = '%Y-%m-%d %H:%M:%S,%f'
class Message(object):
"""
Storage for the timing of a single message.
"""
def __init__(self, m_type = None, source = None, time = None, zero_time = None, **kwds):
super().__init__(**kwds)
self.created_time = None
self.handled_by = {}
self.m_type = m_type
self.n_workers = 0
self.processing_time = None
self.queued_time = None
self.source = source
self.temp = self.parseTime(time)
self.created(zero_time)
def created(self, time):
t_time = self.parseTime(time)
self.created_time = (self.temp - t_time).total_seconds()
def handledBy(self, module_name):
if module_name in self.handled_by:
self.handled_by[module_name] += 1
else:
self.handled_by[module_name] = 1
def getCreatedTime(self):
"""
Returns the time when the message was created relative to first
time in the log file in seconds.
"""
return self.created_time
def getHandledBy(self):
"""
Get dictionary of modules that handled this message.
"""
return self.handled_by
def getNWorkers(self):
"""
Return the number of workers (QRunnables) that were employed
to process this message.
"""
return self.n_workers
def getProcessingTime(self):
"""
Return time to process in seconds.
"""
return self.processing_time
def getQueuedTime(self):
"""
Return time queued in seconds.
"""
return self.queued_time
def getSource(self):
"""
Returns the source of a message.
"""
return self.source
def getType(self):
"""
Return the message type.
"""
return self.m_type
def incNWorkers(self):
self.n_workers += 1
def isComplete(self):
"""
Returns true if we have all the timing data for this message.
"""
return (self.processing_time != None)
def parseTime(self, time):
return datetime.strptime(time, pattern)
def processed(self, time):
t_time = self.parseTime(time)
self.processing_time = (t_time - self.temp).total_seconds()
def sent(self, time):
t_time = self.parseTime(time)
self.queued_time = (t_time - self.temp).total_seconds()
self.temp = t_time
def getIterable(dict_or_list):
"""
Returns an iterable given a dictionary of a list.
"""
if isinstance(dict_or_list, dict):
iterable = list(dict_or_list.values())
elif isinstance(dict_or_list, list):
iterable = dict_or_list
else:
raise Exception("Unknown type '" + str(type(dict_or_list)) + "'")
return iterable
def groupByMsgType(messages):
"""
Returns a dictionary keyed by message type, with a list of one or
more message objects per message type.
"""
return groupByX(lambda x : x.getType(),
messages)
def groupBySource(messages):
"""
Returns a dictionary keyed by message source, with a list of one or
more message objects per message source.
"""
return groupByX(lambda x : x.getSource(),
messages)
def groupByX(grp_fn, messages):
"""
Returns a dictionary keyed by the requested group.
"""
m_grp = {}
for msg in getIterable(messages):
m_type = grp_fn(msg)
if m_type in m_grp:
m_grp[m_type].append(msg)
else:
m_grp[m_type] = [msg]
return m_grp
def METHOD_NAME(basename, ignore_incomplete = True):
"""
Returns a dictionary of Message objects keyed by their ID number.
"""
zero_time = None
messages = {}
for ext in [".5", ".4", ".3", ".2", ".1", ""]:
fname = basename + ".out" + ext
if not os.path.exists(fname):
print(fname, "not found.")
continue
with open(fname) as fp:
for line in fp:
try:
[time, command] = map(lambda x: x.strip(), line.split(":hal4000:INFO:"))
except ValueError:
continue
if zero_time is None:
zero_time = time
# Message handled by.
if (command.startswith("handled by,")):
[m_id, module_name, m_type] = command.split(",")[1:]
if m_id in messages:
messages[m_id].handledBy(module_name)
# Message queued.
elif (command.startswith("queued,")):
[m_id, source, m_type] = command.split(",")[1:]
messages[m_id] = Message(m_type = m_type,
source = source,
time = time,
zero_time = zero_time)
# Message sent.
elif (command.startswith("sent,")):
m_id = command.split(",")[1]
if m_id in messages:
messages[m_id].sent(time)
# Message processed.
elif (command.startswith("processed,")):
m_id = command.split(",")[1]
if m_id in messages:
messages[m_id].processed(time)
elif (command.startswith("worker done,")):
m_id = command.split(",")[1]
if m_id in messages:
messages[m_id].incNWorkers()
# Ignore messages that we don't have all the timing for.
if ignore_incomplete:
temp = {}
for m_id in messages:
msg = messages[m_id]
if msg.isComplete():
temp[m_id] = msg
return temp
else:
return messages
def processingTime(messages):
"""
Returns the total processing time for a collection of messages.
"""
accum_time = 0
for msg in getIterable(messages):
if isinstance(msg, list):
for elt in msg:
accum_time += elt.getProcessingTime()
else:
accum_time += msg.getProcessingTime()
return accum_time
def queuedTime(messages):
"""
Returns the total queued time for a a collection of messages.
"""
accum_time = 0
for msg in getIterable(messages):
if isinstance(msg, list):
for elt in msg:
accum_time += elt.getQueuedTime()
else:
accum_time += msg.getQueuedTime()
return accum_time
if (__name__ == "__main__"):
import sys
if (len(sys.argv) != 2):
print("usage: <log file>")
exit()
messages = METHOD_NAME(sys.argv[1])
groups = groupByMsgType(messages)
print()
print("All messages:")
for key in sorted(groups):
grp = groups[key]
print(key + ", {0:0d} counts, {1:.3f} seconds".format(len(grp), processingTime(grp)))
print("Total queued time {0:.3f} seconds".format(queuedTime(groups)))
print("Total processing time {0:.3f} seconds".format(processingTime(groups)))
print()
print("Film messages:")
groups = groupByMsgType(groupBySource(messages)["film"])
for key in sorted(groups):
grp = groups[key]
print(key + ", {0:0d} counts, {1:.3f} seconds".format(len(grp), processingTime(grp)))
print("Total processing time {0:.3f} seconds".format(processingTime(groups)))
|
7,395 | test meta inheritance with override | import pytest
from iommi.declarative.dispatch import dispatch
from iommi.declarative.namespace import Namespace
from iommi.declarative.with_meta import with_meta
def test_empty():
@with_meta
class Test:
def __init__(self, foo):
assert foo == 'bar'
Test('bar')
def test_constructor():
@with_meta
class Test:
class Meta:
foo = 'bar'
def __init__(self, foo):
assert foo == 'bar'
# noinspection PyArgumentList
Test()
def test_override():
@with_meta
class Test:
class Meta:
foo = 'bar'
def __init__(self, foo):
assert foo == 'baz'
Test(foo='baz')
def test_inheritance():
@with_meta
class Test:
class Meta:
foo = 'bar'
@with_meta
class TestSubclass(Test):
def __init__(self, foo):
assert foo == 'bar'
# noinspection PyArgumentList
TestSubclass()
def test_inheritance_base():
@with_meta
class Test:
def __init__(self, foo):
assert 'bar' == foo
class TestSubclass(Test):
class Meta:
foo = 'bar'
# noinspection PyArgumentList
TestSubclass()
def test_inheritance_with_override():
@with_meta
class Test:
class Meta:
foo = 'bar'
@with_meta
class TestSubclass(Test):
class Meta:
foo = 'baz'
def __init__(self, foo):
assert foo == 'baz'
# noinspection PyArgumentList
TestSubclass()
def test_pos_arg_override():
@with_meta
class Test:
class Meta:
foo = 'foo'
bar = 'bar'
def __init__(self, apa, foo, gapa, **kwargs):
assert apa == 'apa'
assert foo == 'foo'
assert gapa == 'gapa'
assert 'bar' in kwargs
# noinspection PyArgumentList
Test('apa', gapa='gapa')
def test_args_get_by_pos():
@with_meta
class Test:
class Meta:
foo = 'foo'
def __init__(self, foo):
assert foo == 'foo'
# noinspection PyArgumentList
Test()
def test_args_get_by_name():
@with_meta
class Test:
class Meta:
foo = 'foo'
def __init__(self, foo=None):
assert foo == 'foo'
Test()
def test_args_override_by_pos():
@with_meta
class Test:
class Meta:
foo = 'foo'
def __init__(self, foo):
assert foo == 'bar'
Test('bar')
def test_args_override_by_name():
@with_meta
class Test:
class Meta:
foo = 'foo'
def __init__(self, foo):
self.foo = foo
t = Test(foo='bar')
assert t.foo == 'bar'
def test_too_many_args_check():
@with_meta
class Test:
class Meta:
foo = 'foo'
# noinspection PyUnusedLocal
def __init__(self, foo):
pass
with pytest.raises(TypeError) as e:
# noinspection PyArgumentList
Test('foo', 'bar')
assert 'Too many positional arguments' == str(e.value)
# noinspection PyArgumentEqualDefault
def test_add_init_kwargs():
@with_meta(add_init_kwargs=True)
class Test:
class Meta:
foo = 'bar'
_bar = 'baz'
def __init__(self, foo):
assert 'bar' == foo
# noinspection PyArgumentList
Test()
def test_not_add_init_kwargs():
@with_meta(add_init_kwargs=False)
class Test:
class Meta:
foo = 'bar'
def __init__(self):
assert self.get_meta().foo == 'bar'
Test()
def test_namespaciness():
@with_meta(add_init_kwargs=False)
class Foo:
class Meta:
foo = {'bar': 17}
class Bar(Foo):
class Meta:
foo = {'baz': 42}
assert Bar().get_meta() == Namespace(
foo__bar=17,
foo__baz=42,
)
def test_namespaciness_override():
@with_meta()
class Foo:
class Meta:
foo = {'bar': 17}
@dispatch
def __init__(self, **kwargs):
self.kwargs = kwargs
assert Foo(foo__baz=42).kwargs == Namespace(
foo__bar=17,
foo__baz=42,
)
def test_semantics_after_none_from_meta():
@with_meta
class MyForm:
class Meta:
actions = None
@dispatch
def __init__(self, **kwargs):
self.kwargs = kwargs
form = MyForm(actions__magic__display_name="A magic button")
assert form.kwargs == Namespace(actions__magic__display_name="A magic button")
def test_none_semantics_over_meta():
@with_meta
class MyForm:
class Meta:
actions__magic__display_name = "A magic button"
@dispatch
def __init__(self, **kwargs):
self.kwargs = kwargs
form = MyForm(actions=None)
assert form.kwargs == Namespace(actions=None)
def test_dispatch_semantics_after_none_from_meta():
@with_meta
class MyForm:
class Meta:
actions = None
@dispatch(
actions__magic__display_name="A magic button",
)
def __init__(self, **kwargs):
self.kwargs = kwargs
form = MyForm()
assert form.kwargs == Namespace(actions=None)
def test_dispatch_none_semantics_after_meta():
@with_meta
class MyForm:
class Meta:
actions__magic__display_name = "A magic button"
@dispatch(
actions=None,
)
def __init__(self, **kwargs):
self.kwargs = kwargs
form = MyForm()
assert form.kwargs == Namespace(actions__magic__display_name="A magic button")
def test_dispatch_none_semantics_after_superclass_meta():
@with_meta
class MyForm:
class Meta:
actions__magic__display_name = "A magic button"
def __init__(self, **kwargs):
super().__init__(**kwargs)
class SubForm(MyForm):
@dispatch(
actions=None,
)
def __init__(self, **kwargs):
self.kwargs = kwargs
form = SubForm()
assert form.kwargs == Namespace(actions=None)
def test_dispatch_semantics_after_none_superclass_meta():
@with_meta
class MyForm:
class Meta:
actions = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
class SubForm(MyForm):
@dispatch(
actions__magic__display_name="A magic button",
)
def __init__(self, **kwargs):
self.kwargs = kwargs
form = SubForm()
assert form.kwargs == Namespace(actions__magic__display_name="A magic button")
def test_meta_staticmethod():
@with_meta
class Foo:
class Meta:
@staticmethod
def foo(bar):
return bar
def __init__(self, **_):
pass
assert Foo().get_meta().foo(17) == 17
def test_meta_inheritance():
class TestMetaMixin:
foo = 'bar'
@with_meta
class Test:
class Meta(TestMetaMixin):
pass
def __init__(self, foo):
assert foo == 'bar'
# noinspection PyArgumentList
Test()
def METHOD_NAME():
class TestMetaMixin:
foo = 'bar'
@with_meta
class Test:
class Meta(TestMetaMixin):
foo = 'baz'
def __init__(self, foo):
assert foo == 'baz'
# noinspection PyArgumentList
Test() |
7,396 | test url makes sense ok | from http import HTTPStatus
import pytest
import requests
import reconcile.checkpoint as sut
@pytest.fixture
def valid_app():
"""How a valid application looks like."""
return {
"sopsUrl": "https://www.redhat.com/sops",
"architectureDocument": "https://www.redhat.com/arch",
"grafanaUrl": "https://www.redhat.com/graf",
"serviceOwners": [{"name": "A Name", "email": "aname@adomain.com"}],
}
@pytest.fixture
def valid_owner():
"""How a valid owner looks like."""
return {"name": "A Name", "email": "a.name@redhat.com"}
def invalid_owners():
"""List the ways in which an owner can be invalid."""
return [
{"name": "A Name", "email": None},
{"name": "A Name", "email": "domainless"},
{"name": "A Name", "email": "@name.less"},
{"name": None, "email": "a-name@redhat.com"},
]
def test_valid_owner(valid_owner) -> None:
"""Confirm that the valid owner is recognized as such."""
assert sut.valid_owners([valid_owner])
@pytest.mark.parametrize("invalid_owner", invalid_owners())
def test_invalid_owners(invalid_owner):
"""Confirm that the invalid owners are flagged."""
assert not sut.valid_owners([invalid_owner])
@pytest.mark.parametrize("invalid_owner", invalid_owners())
def test_invalid_owners_remain_invalid(valid_owner, invalid_owner):
"""Confirm rejection of invalid owners even mixed with good ones."""
assert not sut.valid_owners([valid_owner, invalid_owner])
def METHOD_NAME(mocker):
"""Good URLs are accepted."""
get = mocker.patch.object(requests, "get", autospec=True)
r = requests.Response()
r.status_code = HTTPStatus.OK
get.return_value = r
assert sut.url_makes_sense("https://www.redhat.com/existing")
def test_url_makes_sense_unknown(mocker):
"""Ensure rejection of URLs pointing to missing documents."""
get = mocker.patch.object(requests, "get", autospec=True)
r = requests.Response()
r.status_code = HTTPStatus.NOT_FOUND
get.return_value = r
assert not sut.url_makes_sense("https://www.redhat.com/nonexisting")
def test_url_makes_sense_error():
"""Ensure rejection of URLs returning ConnectionError."""
assert not sut.url_makes_sense("https://TODO")
def test_url_makes_sense_empty():
"""Ensure rejection of empty URLs."""
assert not sut.url_makes_sense("")
def test_render_template():
"""Confirm rendering of all placeholders in the ticket template."""
txt = sut.render_template(
sut.MISSING_DATA_TEMPLATE, "aname", "apath", "afield", "avalue"
)
assert "aname" in txt
assert "apath" in txt
assert "afield" in txt
assert "avalue" in txt
def app_metadata():
"""List some metadata for some fake apps.
Returns the app structure and whether we expect it to have a
ticket associated with it
"""
return [
(
{
"name": "appname",
"sopsUrl": "https://www.somewhe.re",
"architectureDocument": "https://www.hereand.now",
"grafanaUrls": [],
},
False,
),
# Missing field - should cut a ticket
(
{
"name": "appname",
"sopsUrl": "https://www.somewhe.re",
"grafanaUrls": [],
},
True,
),
# Bad field - should cut a ticket
(
{
"name": "appname",
"architectureDocument": "",
"grafanaUrls": [],
"sopsUrl": "http://www.herea.nd",
},
True,
),
]
@pytest.mark.parametrize("app,needs_ticket", app_metadata())
def test_report_invalid_metadata(mocker, app, needs_ticket):
"""Test that valid apps don't get tickets and that invalid apps do."""
# TODO: I'm pretty sure a fixture can help with this
jira = mocker.patch.object(sut, "JiraClient", autospec=True)
filer = mocker.patch.object(sut, "file_ticket", autospec=True)
valid = sut.VALIDATORS
sut.VALIDATORS = {
"sopsUrl": bool,
"architectureDocument": bool,
"grafanaUrls": lambda _: True,
}
sut.report_invalid_metadata(app, "/a/path", "jiraboard", {}, "TICKET-123")
if needs_ticket:
filer.assert_called_once_with(
jira=jira.return_value,
app_name=app["name"],
labels=sut.DEFAULT_CHECKPOINT_LABELS,
parent="TICKET-123",
field="architectureDocument",
bad_value=str(app.get("architectureDocument")),
app_path="/a/path",
)
else:
filer.assert_not_called()
sut.VALIDATORS = valid
@pytest.mark.parametrize("app,needs_ticket", app_metadata())
def test_report_invalid_metadata_dry_run(mocker, app, needs_ticket):
"""Test the dry-run mode."""
renderer = mocker.patch.object(sut, "render_template", autospec=True)
valid = sut.VALIDATORS
sut.VALIDATORS = {
"sopsUrl": bool,
"architectureDocument": bool,
"grafanaUrls": lambda _: True,
}
sut.report_invalid_metadata(app, "/a/path", "jiraboard", {}, "TICKET-123", True)
if needs_ticket:
renderer.assert_called_once()
else:
renderer.assert_not_called()
sut.VALIDATORS = valid |
7,397 | infer net | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import paddle
from net import DSIN_layer
class StaticModel():
def __init__(self, config):
self.cost = None
self.config = config
self._init_hyper_parameters()
def _init_hyper_parameters(self):
self.user_size = self.config.get("hyper_parameters.user_size")
self.cms_segid_size = self.config.get(
"hyper_parameters.cms_segid_size")
self.cms_group_size = self.config.get(
"hyper_parameters.cms_group_size")
self.final_gender_size = self.config.get(
"hyper_parameters.final_gender_size")
self.age_level_size = self.config.get(
"hyper_parameters.age_level_size")
self.pvalue_level_size = self.config.get(
"hyper_parameters.pvalue_level_size")
self.shopping_level_size = self.config.get(
"hyper_parameters.shopping_level_size")
self.occupation_size = self.config.get(
"hyper_parameters.occupation_size")
self.new_user_class_level_size = self.config.get(
"hyper_parameters.new_user_class_level_size")
self.adgroup_size = self.config.get("hyper_parameters.adgroup_size")
self.cate_size = self.config.get("hyper_parameters.cate_size")
self.campaign_size = self.config.get("hyper_parameters.campaign_size")
self.customer_size = self.config.get("hyper_parameters.customer_size")
self.brand_size = self.config.get("hyper_parameters.brand_size")
self.pid_size = self.config.get("hyper_parameters.pid_size")
self.feat_embed_size = self.config.get(
"hyper_parameters.feat_embed_size")
self.learning_rate = self.config.get(
"hyper_parameters.optimizer.learning_rate", 0.008)
def create_loss(self, pred, label):
return paddle.nn.BCELoss()(pred, label)
def create_feeds(self, is_infer=False):
sparse_input = paddle.static.data(
name="sparse_tensor", shape=[None, 15], dtype="int64")
dense_input = paddle.static.data(
name="dense_tensor", shape=[None, ], dtype="float32")
sess_input = paddle.static.data(
name="sess_tensor", shape=[None, 10, 10], dtype="int64")
sess_length_input = paddle.static.data(
name="sess_length_tensor", shape=[None, ], dtype="int64")
label = paddle.static.data(name="label", shape=[None, ], dtype="int64")
feed_list = [
sparse_input, dense_input, sess_input, sess_length_input, label
]
#input = paddle.static.data(
# name="sparse_tensor", shape=[None, 4], dtype="float32")
#feed_list = [input]
return feed_list
def net(self, input, is_infer=False):
inputs, label = (input[0], input[1], input[2], input[3]), input[-1]
label = label.reshape([-1, 1])
DSIN_model = DSIN_layer(
self.user_size,
self.adgroup_size,
self.pid_size,
self.cms_segid_size,
self.cms_group_size,
self.final_gender_size,
self.age_level_size,
self.pvalue_level_size,
self.shopping_level_size,
self.occupation_size,
self.new_user_class_level_size,
self.campaign_size,
self.customer_size,
self.cate_size,
self.brand_size,
sparse_embed_size=self.feat_embed_size,
l2_reg_embedding=1e-6)
pred = DSIN_model.forward(inputs)
loss = self.create_loss(pred, paddle.cast(label, "float32"))
predict_2d = paddle.concat(x=[1 - pred, pred], axis=1)
auc, batch_auc, _ = paddle.static.auc(input=predict_2d,
label=label,
num_thresholds=2**12,
slide_steps=20)
auc = paddle.cast(auc, "float32")
if is_infer:
fetch_dict = {"auc": auc}
return fetch_dict
self._cost = loss
fetch_dict = {'auc': auc, 'cost': loss}
return fetch_dict
def create_optimizer(self, strategy=None):
optimizer = paddle.optimizer.Adam(
learning_rate=self.learning_rate, lazy_mode=False)
if strategy != None:
import paddle.distributed.fleet as fleet
optimizer = fleet.distributed_optimizer(optimizer, strategy)
optimizer.minimize(self._cost)
def METHOD_NAME(self, input):
return self.net(input, is_infer=True) |
7,398 | import gridprop from init | from typing import List, Union
import ecl_data_io as eclio
from typing_extensions import Literal
from ._find_gridprop_in_eclrun import (
find_gridprop_from_init_file,
find_gridprops_from_restart_file,
)
def decorate_name(name, dual_porosity, fracture, date=None):
"""Decorate a property name with date and matrix/fracture.
>>> decorate_name('PORO', True, False, 19991231)
'POROM_19991231'
"""
decorated_name = name
if dual_porosity:
if fracture:
decorated_name += "F"
else:
decorated_name += "M"
if date is not None:
decorated_name += "_" + str(date)
return decorated_name
def METHOD_NAME(pfile, name, grid, fracture=False):
"""Import one parameter with the given name from an init file.
Args:
pfile: The init file.
name: The name of the parmaeter
grid: The grid used by the simulator to produce the init file.
fracture: If a dual porosity module, indicates that the fracture
(as apposed to the matrix) grid property should be imported.
Raises:
ValueError: If the parameter does not exist in the file.
Returns:
GridProperty parameter dictionary.
"""
init_props = find_gridprop_from_init_file(pfile.file, [name], grid, fracture)
if len(init_props) != 1:
raise ValueError(f"Could not find property {name} in {pfile}")
init_props[0]["name"] = decorate_name(
init_props[0]["name"], grid.dualporo, fracture
)
return init_props[0]
def sanitize_date(
date: Union[int, str, Literal["first", "last"]]
) -> Union[List[int], Literal["first", "last"]]:
"""
Converts dateformats of the form 'YYYY-MM-DD', 'YYYYMMDD' or YYYYMMDD to
list of integers of the form [YYYYMMDD] (ie. suitible for find_gridprops
functions), but lets the special literals 'first' and 'last' remain
unchanged.
>>> sanitize_date('first')
'first'
>>> sanitize_date('last')
'last'
>>> sanitize_date('2020-01-01')
[20200101]
>>> sanitize_date('20200101')
[20200101]
>>> sanitize_date(20200101)
[20200101]
"""
if isinstance(date, int):
return [date]
if date not in ("first", "last"):
try:
if isinstance(date, str):
if len(date) == 10 and date[4] == "-" and date[7] == "-":
date = date.replace("-", "")
return [int(date)]
except ValueError as err:
raise ValueError(
"valid dates are either of the "
"form 'YYYY-MM-DD', 'YYYYMMDD' or 'first'/'last' "
f"got {date}"
) from err
return date
def sanitize_fformat(fformat: Literal["unrst", "funrst"]) -> eclio.Format:
"""Converts 'unrst' and 'funrst' to the corresponding eclio.Format.
>>> sanitize_fformat('unrst')
<Format.UNFORMATTED: 2>
>>> sanitize_fformat('funrst')
<Format.FORMATTED: 1>
"""
if fformat == "unrst":
return eclio.Format.UNFORMATTED
if fformat == "funrst":
return eclio.Format.FORMATTED
raise ValueError(f"fformat must be either 'unrst' or 'funrst' got {fformat}")
def import_gridprop_from_restart(
pfile,
name: str,
grid,
date: Union[int, str, Literal["first", "last"]],
fracture: bool = False,
fformat: Literal["unrst", "funrst"] = "unrst",
):
"""Import one parameter for the given name and date in a restart file.
Args:
pfile: The restart file.
name: The name of the parmaeter
date: xtgeo style date (e.g. int(19990101) or "YYYYMMDD"), also
accepts "YYYY-MM-DD". "last" and "first" can be given for
last or first date in the file
grid: The grid used by the simulator to produce the restart file.
fracture: If a dual porosity module, indicates that the fracture
(as apposed to the matrix) grid property should be imported.
Raises:
ValueError: If the parameter does not exist in the file.
Returns:
GridProperty parameter dictionary.
"""
restart_props = find_gridprops_from_restart_file(
pfile.file,
[name],
sanitize_date(date),
grid,
fracture,
sanitize_fformat(fformat),
)
if len(restart_props) == 0:
raise ValueError(f"Could not find property {name} for {date} in {pfile.file}")
if len(restart_props) > 1:
raise ValueError(f"Ambiguous property {name} for {date} in {pfile.file}")
restart_props[0]["name"] = decorate_name(
restart_props[0]["name"], grid.dualporo, fracture, restart_props[0]["date"]
)
return restart_props[0] |
7,399 | from stats json | from typing import TYPE_CHECKING, List, Optional, Sequence
import numpy as np
from rastervision.core.data.raster_transformer import RasterTransformer
from rastervision.core.raster_stats import RasterStats
from rastervision.pipeline.utils import repr_with_args
if TYPE_CHECKING:
from rastervision.core.data import RasterSource
class StatsTransformer(RasterTransformer):
"""Transforms non-uint8 to uint8 values using channel statistics.
This works as follows:
- Convert pixel values to z-scores using channel means and standard
deviations.
- Clip z-scores to the specified number of standard deviations (default 3)
on each side.
- Scale values to 0-255 and cast to uint8.
This transformation is not applied to NODATA pixels (assumed to be pixels
with all values equal to zero).
"""
def __init__(self,
means: Sequence[float],
stds: Sequence[float],
max_stds: float = 3.):
"""Construct a new StatsTransformer.
Args:
means (np.ndarray): Channel means.
means (np.ndarray): Channel standard deviations.
max_stds (float): Number of standard deviations to clip the
distribution to on both sides. Defaults to 3.
"""
# shape = (1, 1, num_channels)
self.means = np.array(means, dtype=float)
self.stds = np.array(stds, dtype=float)
self.max_stds = max_stds
def transform(self,
chip: np.ndarray,
channel_order: Optional[Sequence[int]] = None) -> np.ndarray:
"""Transform a chip.
Transforms non-uint8 to uint8 values using raster_stats.
Args:
chip: ndarray of shape [height, width, channels] This is assumed to already
have the channel_order applied to it if channel_order is set. In other
words, channels should be equal to len(channel_order).
channel_order: list of indices of channels that were extracted from the
raw imagery.
Returns:
[height, width, channels] uint8 numpy array
"""
if chip.dtype == np.uint8:
return chip
means = self.means
stds = self.stds
max_stds = self.max_stds
if channel_order is not None:
means = means[channel_order]
stds = stds[channel_order]
# Don't transform NODATA zero values.
nodata_mask = chip == 0
# Subtract mean and divide by std to get zscores.
chip = chip.astype(float)
chip -= means
chip /= stds
# Make zscores that fall between -max_stds and max_stds span 0 to 255.
# range: (-max_stds, max_stds)
chip = np.clip(chip, -max_stds, max_stds, out=chip)
# range: [0, 2 * max_stds]
chip += max_stds
# range: [0, 1]
chip /= (2 * max_stds)
# range: [0, 255]
chip *= 255
chip = chip.astype(np.uint8)
chip[nodata_mask] = 0
return chip
@classmethod
def from_raster_sources(cls,
raster_sources: List['RasterSource'],
sample_prob: Optional[float] = 0.1,
max_stds: float = 3.) -> 'StatsTransformer':
"""Build with stats from the given raster sources.
Args:
raster_sources (List[RasterSource]): List of raster sources to
compute stats from.
sample_prob (float, optional): Fraction of each raster to sample
for computing stats. For details see docs for
RasterStats.compute(). Defaults to 0.1.
max_stds (float, optional): Number of standard deviations to clip
the distribution to on both sides. Defaults to 3.
Returns:
StatsTransformer: A StatsTransformer.
"""
stats = RasterStats()
stats.compute(raster_sources=raster_sources, sample_prob=sample_prob)
stats_transformer = StatsTransformer.from_raster_stats(
stats, max_stds=max_stds)
return stats_transformer
@classmethod
def METHOD_NAME(cls, uri: str, **kwargs) -> 'StatsTransformer':
"""Build with stats from a JSON file.
The file is expected to be in the same format as written by
:meth:`.RasterStats.save`.
Args:
uri (str): URI of the JSON file.
**kwargs: Extra args for :meth:`.__init__`.
Returns:
StatsTransformer: A StatsTransformer.
"""
stats = RasterStats.load(uri)
stats_transformer = StatsTransformer.from_raster_stats(stats, **kwargs)
return stats_transformer
@classmethod
def from_raster_stats(cls, stats: RasterStats,
**kwargs) -> 'StatsTransformer':
"""Build with stats from a :class:`.RasterStats` instance.
The file is expected to be in the same format as written by
:meth:`.RasterStats.save`.
Args:
stats (RasterStats): A :class:`.RasterStats` instance with
non-None stats.
**kwargs: Extra args for :meth:`.__init__`.
Returns:
StatsTransformer: A StatsTransformer.
"""
stats_transformer = StatsTransformer(stats.means, stats.stds, **kwargs)
return stats_transformer
@property
def stats(self) -> RasterStats:
"""Current statistics as a :class:`.RasterStats` instance."""
return RasterStats(self.means, self.stds)
def __repr__(self) -> str:
return repr_with_args(
self, means=self.means, std=self.stds, max_stds=self.max_stds) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.