id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
7,100 | test nominal and do not remember me | from django.conf import settings
from django.urls import reverse
from django.test import TestCase
from django.utils.html import escape
from zds.member.forms import LoginForm
from zds.member.models import Profile
from zds.member.tests.factories import ProfileFactory, NonAsciiProfileFactory
class LoginTests(TestCase):
def setUp(self):
self.profile = ProfileFactory() # associated user is activated by default
self.correct_username = self.profile.user.username
self.wrong_username = "I_do_not_exist"
self.correct_password = "hostel77"
self.wrong_password = "XXXXX"
self.login_url = reverse("member-login")
self.test_ip = "192.168.0.110" # must be different from the one set by the factory to test actual change
self.assertNotEqual(self.test_ip, ProfileFactory.last_ip_address)
settings.SESSION_COOKIE_AGE = 1337
def test_form_action_redirect(self):
"""The form shall have the 'next' parameter in the action url of the form."""
next_fragment = "?next=" + reverse("member-detail", args=[self.correct_username])
full_url = self.login_url + next_fragment
result = self.client.get(full_url, follow=False)
self.assertContains(result, f'action="{full_url}"')
def test_nominal_and_remember_me(self):
"""
Nominal case: existing username, correct password, activated user, 'remember me' checked.
Expected: successful login, redirect to homepage, session expiration age set.
"""
result = self.client.post(
self.login_url,
{
"username": self.correct_username,
"password": self.correct_password,
"remember": "remember",
},
follow=False,
REMOTE_ADDR=self.test_ip,
)
self.assertRedirects(result, reverse("homepage"))
# Check cookie setting
self.assertFalse(self.client.session.get_expire_at_browser_close())
self.assertEqual(self.client.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
# Check IP recording
profile = Profile.objects.get(user=self.profile.user)
self.assertEqual(profile.last_ip_address, self.test_ip)
def METHOD_NAME(self):
"""
Nominal case: existing username, correct password, activated user, 'remember me' not checked.
Expected: successful login, redirect to homepage, session expiration at browser closing.
"""
result = self.client.post(
self.login_url,
{
"username": self.correct_username,
"password": self.correct_password,
},
follow=False,
)
self.assertRedirects(result, reverse("homepage"))
self.assertTrue(self.client.session.get_expire_at_browser_close())
def test_nonascii(self):
"""
Edge case: similar to nominal, but with non-ascii username and redirect to profile.
Expected: successful login and redirect to profile.
"""
user = NonAsciiProfileFactory()
result = self.client.post(
self.login_url + "?next=" + reverse("member-detail", args=[user.user.username]),
follow=False,
)
self.assertEqual(result.status_code, 200)
def test_empty_username_or_password(self):
"""
Error case: bad username, password not relevant.
Expected: cannot log in, errors associated to empty username and password.
"""
result = self.client.post(
self.login_url,
{
"username": "",
"password": "",
"remember": "remember",
},
follow=False,
)
self.assertContains(result, escape("Ce champ est obligatoire"), count=2)
def test_bad_username(self):
"""
Error case: bad username, password not relevant.
Expected: cannot log in, error associated to bad username.
"""
result = self.client.post(
self.login_url,
{
"username": self.wrong_username,
"password": self.wrong_password,
"remember": "remember",
},
follow=False,
)
self.assertContains(result, escape(LoginForm.error_messages["invalid_login"]))
def test_inactive_account(self):
"""
Error case: correct username, but inactive account.
Expected: cannot log in error associated to inactive account.
"""
self.profile.user.is_active = False
self.profile.user.save()
result = self.client.post(
self.login_url,
{
"username": self.correct_username,
"password": self.correct_password,
"remember": "remember",
},
follow=False,
)
self.assertContains(result, escape(LoginForm.error_messages["inactive"][:20]))
def test_correct_username_bad_password(self):
"""
Error case: existing username, activated account, but wrong password.
Expected: cannot log in, error associated to wrong password.
"""
result = self.client.post(
self.login_url,
{
"username": self.correct_username,
"password": self.wrong_password,
"remember": "remember",
},
follow=False,
)
self.assertContains(result, escape(LoginForm.error_messages["invalid_login"]))
def test_banned_user(self):
"""
Error case: correct username, activated user, correct password, but banned user.
Expected: cannot log in, error associated with the ban.
"""
# Equivalent to a permanently banned user
self.profile.can_read = False
self.profile.save()
result = self.client.post(
self.login_url,
{
"username": self.correct_username,
"password": self.correct_password,
"remember": "remember",
},
follow=False,
)
self.assertContains(result, escape(LoginForm.error_messages["banned"]))
def test_redirection_good_target(self):
"""Nominal case: redirection to an existing page with the parameter 'next'."""
result = self.client.post(
self.login_url + "?next=" + reverse("gallery:list"),
{
"username": self.correct_username,
"password": self.correct_password,
"remember": "remember",
},
follow=False,
)
self.assertRedirects(result, reverse("gallery:list"))
def test_redirection_bad_target(self):
"""Case failing gracefully: redirection to homepage when 'next' points to a non-existing page."""
result = self.client.post(
self.login_url + "?next=/this_does_not_exist",
{
"username": self.correct_username,
"password": self.correct_password,
"remember": "remember",
},
follow=False,
)
self.assertRedirects(result, reverse("homepage"))
def test_redirection_loop_avoidance(self):
"""
Case failing gracefully: redirection to homepage when 'next' risks creating a redirection loop.
"""
result = self.client.post(
self.login_url + "?next=" + self.login_url,
{
"username": self.correct_username,
"password": self.correct_password,
"remember": "remember",
},
follow=False,
)
self.assertRedirects(result, reverse("homepage")) |
7,101 | run | import json
import cv2
import base64
import threading
import time
from datetime import datetime
from websocket_server import WebsocketServer
import logging
import os
from interfaces.pose3d import ListenerPose3d
from interfaces.laser import ListenerLaser
from lap import Lap
from map import Map
# Graphical User Interface Class
class GUI:
map = None
# Initialization function
# The actual initialization
def __init__(self, host, hal):
t = threading.Thread(target=self.run_server)
self.payload = {'image': '', 'lap': '', 'map': ''}
self.server = None
self.client = None
self.host = host
# Image variables
self.image_to_be_shown = None
self.image_to_be_shown_updated = False
self.image_show_lock = threading.Lock()
self.acknowledge = False
self.acknowledge_lock = threading.Lock()
# Take the console object to set the same websocket and client
self.hal = hal
t.start()
# Create the map object
laser_object = ListenerLaser("/F1ROS/laser/scan")
pose3d_object = ListenerPose3d("/F1ROS/odom")
self.map = Map(laser_object, pose3d_object)
# Create the lap object
self.lap = Lap(self.map)
# Explicit initialization function
# Class method, so user can call it without instantiation
@classmethod
def initGUI(self):
# self.payload = {'image': '', 'shape': []}
pass
# Function to prepare image payload
# Encodes the image as a JSON string and sends through the WS
def payloadImage(self):
self.image_show_lock.acquire()
image_to_be_shown_updated = self.image_to_be_shown_updated
image_to_be_shown = self.image_to_be_shown
self.image_show_lock.release()
image = image_to_be_shown
payload = {'image': '', 'shape': ''}
if(image_to_be_shown_updated == False):
return payload
shape = image.shape
frame = cv2.imencode('.JPEG', image)[1]
encoded_image = base64.b64encode(frame)
payload['image'] = encoded_image.decode('utf-8')
payload['shape'] = shape
self.image_show_lock.acquire()
self.image_to_be_shown_updated = False
self.image_show_lock.release()
return payload
# Function for student to call
def showForces(self, vec1, vec2, vec3):
self.map.setCar(vec1[0], vec1[1])
self.map.setObs(vec2[0], vec2[1])
self.map.setAvg(vec3[0], vec3[1])
# Function for student to call
def showLocalTarget(self, newVec):
self.map.setTargetPos(newVec[0], newVec[1])
# Function for student to call
def showImage(self, image):
self.image_show_lock.acquire()
self.image_to_be_shown = image
self.image_to_be_shown_updated = True
self.image_show_lock.release()
# Function to get the client
# Called when a new client is received
def get_client(self, client, server):
self.client = client
# Function to get value of Acknowledge
def get_acknowledge(self):
self.acknowledge_lock.acquire()
acknowledge = self.acknowledge
self.acknowledge_lock.release()
return acknowledge
# Function to get value of Acknowledge
def set_acknowledge(self, value):
self.acknowledge_lock.acquire()
self.acknowledge = value
self.acknowledge_lock.release()
# Update the gui
def update_gui(self):
# Payload Image Message
payload = self.payloadImage()
self.payload['image'] = json.dumps(payload)
# Payload Lap Message
lapped = self.lap.check_threshold()
lap_message = ""
if(lapped != None):
self.payload["lap"] = str(lapped)
# Payload Map Message
map_message = self.map.get_json_data()
self.payload["map"] = map_message
message = "#gui" + json.dumps(self.payload)
self.server.send_message(self.client, message)
# Function to read the message from websocket
# Gets called when there is an incoming message from the client
def get_message(self, client, server, message):
# Acknowledge Message for GUI Thread
if(message[:4] == "#ack"):
self.set_acknowledge(True)
# Activate the server
def run_server(self):
self.server = WebsocketServer(port=2303, host=self.host)
self.server.set_fn_new_client(self.get_client)
self.server.set_fn_message_received(self.get_message)
home_dir = os.path.expanduser('~')
logged = False
while not logged:
try:
f = open(f"{home_dir}/ws_gui.log", "w")
f.write("websocket_gui=ready")
f.close()
logged = True
except:
time.sleep(0.1)
self.server.run_forever()
# Function to reset
def reset_gui(self):
self.lap.reset()
self.map.reset()
# This class decouples the user thread
# and the GUI update thread
class ThreadGUI:
def __init__(self, gui):
self.gui = gui
# Time variables
self.ideal_cycle = 80
self.measured_cycle = 80
self.iteration_counter = 0
# Function to start the execution of threads
def start(self):
self.measure_thread = threading.Thread(target=self.measure_thread)
self.thread = threading.Thread(target=self.METHOD_NAME)
self.measure_thread.start()
self.thread.start()
print("GUI Thread Started!")
# The measuring thread to measure frequency
def measure_thread(self):
while(self.gui.client == None):
pass
previous_time = datetime.now()
while(True):
# Sleep for 2 seconds
time.sleep(2)
# Measure the current time and subtract from previous time to get real time interval
current_time = datetime.now()
dt = current_time - previous_time
ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
previous_time = current_time
# Get the time period
try:
# Division by zero
self.measured_cycle = ms / self.iteration_counter
except:
self.measured_cycle = 0
# Reset the counter
self.iteration_counter = 0
# The main thread of execution
def METHOD_NAME(self):
while(self.gui.client == None):
pass
while(True):
start_time = datetime.now()
self.gui.update_gui()
acknowledge_message = self.gui.get_acknowledge()
while(acknowledge_message == False):
acknowledge_message = self.gui.get_acknowledge()
self.gui.set_acknowledge(False)
finish_time = datetime.now()
self.iteration_counter = self.iteration_counter + 1
dt = finish_time - start_time
ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
if(ms < self.ideal_cycle):
time.sleep((self.ideal_cycle-ms) / 1000.0) |
7,102 | error page | import logging
import sys
from datetime import timedelta
from functools import update_wrapper
from json import dumps
import user_agents
from flask import Response, current_app, make_response, render_template, request
from dallinger.config import get_config
logger = logging.getLogger(__name__)
def crossdomain(
origin=None,
methods=None,
headers=None,
max_age=21600,
attach_to_all=True,
automatic_options=True,
):
if methods is not None:
methods = ", ".join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, str):
headers = ", ".join(x.upper() for x in headers)
if not isinstance(origin, str):
origin = ", ".join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers["allow"]
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == "OPTIONS":
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != "OPTIONS":
return resp
h = resp.headers
h["Access-Control-Allow-Origin"] = origin
h["Access-Control-Allow-Methods"] = get_methods()
h["Access-Control-Max-Age"] = str(max_age)
if headers is not None:
h["Access-Control-Allow-Headers"] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
def date_handler(obj):
"""Serialize dates."""
return obj.isoformat() if hasattr(obj, "isoformat") else object
def nocache(func):
"""Stop caching for pages wrapped in nocache decorator."""
def new_func(*args, **kwargs):
"""No cache Wrapper."""
resp = make_response(func(*args, **kwargs))
resp.cache_control.no_cache = True
return resp
return update_wrapper(new_func, func)
class ExperimentError(Exception):
"""
Error class for experimental errors, such as subject not being found in
the database.
"""
def __init__(self, value):
experiment_errors = dict(
status_incorrectly_set=1000,
hit_assign_worker_id_not_set_in_mturk=1001,
hit_assign_worker_id_not_set_in_consent=1002,
hit_assign_worker_id_not_set_in_exp=1003,
hit_assign_appears_in_database_more_than_once=1004,
hit_assign_worker_id_not_set_by_recruiter=1006,
already_started_exp=1008,
already_started_exp_mturk=1009,
already_did_exp_hit=1010,
tried_to_quit=1011,
intermediate_save=1012,
improper_inputs=1013,
browser_type_not_allowed=1014,
api_server_not_reachable=1015,
ad_not_found=1016,
error_setting_worker_complete=1017,
hit_not_registered_with_ad_server=1018,
template_unsafe=1019,
insert_mode_failed=1020,
page_not_found=404,
in_debug=2005,
unknown_error=9999,
)
self.value = value
self.errornum = experiment_errors[self.value]
self.template = "error.html"
def __str__(self):
return repr(self.value)
class ValidatesBrowser(object):
"""Checks if participant's browser has been excluded via the Configuration."""
def __init__(self, config):
self.config = config
@property
def exclusions(self):
"""Return list of browser exclusion rules defined in the Configuration."""
exclusion_rules = [
r.strip()
for r in self.config.get("browser_exclude_rule", "").split(",")
if r.strip()
]
return exclusion_rules
def is_supported(self, user_agent_string):
"""Check user agent against configured exclusions."""
user_agent_obj = user_agents.parse(user_agent_string)
browser_ok = True
for rule in self.exclusions:
if rule in ["mobile", "tablet", "touchcapable", "pc", "bot"]:
if (
(rule == "mobile" and user_agent_obj.is_mobile)
or (rule == "tablet" and user_agent_obj.is_tablet)
or (rule == "touchcapable" and user_agent_obj.is_touch_capable)
or (rule == "pc" and user_agent_obj.is_pc)
or (rule == "bot" and user_agent_obj.is_bot)
):
browser_ok = False
elif rule in user_agent_string:
browser_ok = False
return browser_ok
"""Define some canned response types."""
def success_response(**data):
"""Return a generic success response."""
data_out = {}
data_out["status"] = "success"
data_out.update(data)
js = dumps(data_out, default=date_handler)
return Response(js, status=200, mimetype="application/json")
def error_response(
error_type="Internal server error",
error_text="",
status=400,
participant=None,
simple=False,
request_data="",
):
"""Return a generic server error response."""
last_exception = sys.exc_info()
if last_exception[0]:
logger.error(
"Failure for request: {!r}".format(dict(request.args)),
exc_info=last_exception,
)
data = {"status": "error"}
if simple:
data["message"] = error_text
else:
data["html"] = (
METHOD_NAME(
error_text=error_text,
error_type=error_type,
participant=participant,
request_data=request_data,
)
.get_data()
.decode("utf-8")
)
return Response(dumps(data), status=status, mimetype="application/json")
def METHOD_NAME(
participant=None,
error_text=None,
compensate=True,
error_type="default",
request_data="",
):
"""Render HTML for error page."""
config = _config()
if error_text is None:
error_text = "There has been an error and so you are unable to continue, sorry!"
if participant is not None:
hit_id = participant.hit_id
assignment_id = participant.assignment_id
worker_id = participant.worker_id
participant_id = participant.id
else:
hit_id = request.form.get("hit_id", "")
assignment_id = request.form.get("assignment_id", "")
worker_id = request.form.get("worker_id", "")
participant_id = request.form.get("participant_id", None)
if participant_id:
try:
participant_id = int(participant_id)
except (ValueError, TypeError):
participant_id = None
return make_response(
render_template(
"error.html",
error_text=error_text,
compensate=compensate,
contact_address=config.get("contact_email_on_error"),
error_type=error_type,
hit_id=hit_id,
assignment_id=assignment_id,
worker_id=worker_id,
request_data=request_data,
participant_id=participant_id,
),
500,
)
def _config():
config = get_config()
if not config.ready:
config.load()
return config |
7,103 | create finding from project | # SPDX-FileCopyrightText: the secureCodeBox authors
#
# SPDX-License-Identifier: Apache-2.0
import argparse
import logging
from datetime import datetime
from typing import List, Optional
import gitlab
from gitlab.v4.objects import Project, ProjectManager
from git_repo_scanner.abstract_scanner import AbstractScanner, FINDING
logger = logging.getLogger("git_repo_scanner")
class GitLabScanner(AbstractScanner):
LOGGER = logging.getLogger("git_repo_scanner")
def __init__(
self,
url: str,
access_token: str,
group: Optional[int],
ignored_groups: List[int],
ignore_repos: List[int],
obey_rate_limit: bool = True,
annotate_latest_commit_id: bool = False,
) -> None:
super().__init__()
if not url:
raise argparse.ArgumentError(None, "URL required for GitLab connection.")
if not access_token:
raise argparse.ArgumentError(
None, "Access token required for GitLab authentication."
)
self._url = url
self._access_token = access_token
self._group = group
self._ignored_groups = ignored_groups
self._ignore_repos = ignore_repos
self._obey_rate_limit = obey_rate_limit
self._annotate_latest_commit_id = annotate_latest_commit_id
self._gl: Optional[gitlab.Gitlab] = None
@property
def git_type(self) -> str:
return "GitLab"
def process(
self, start_time: Optional[datetime] = None, end_time: Optional[datetime] = None
) -> List[FINDING]:
self._authenticate()
projects: List[Project] = self._get_projects(start_time, end_time)
return self._process_projects(projects)
def _group_project_to_project(self, group_project):
# The GitLab API library gives us a GroupProject object, which has limited functionality.
# This function turns the GroupProject into a "real" project, which allows us to get the
# list of commits and include the SHA1 of the latest commit in the output later
return self._gl.projects.get(group_project.id, lazy=True)
def _get_projects(
self, start_time: Optional[datetime], end_time: Optional[datetime]
):
logger.info(
f"Get GitLab repositories with last activity between {start_time} and {end_time}."
)
project_manager: ProjectManager = self._gl.projects
options = dict(
all=True,
order_by="last_activity_at",
sort="desc",
obey_rate_limit=self._obey_rate_limit,
max_retries=12,
)
if start_time is not None:
options["last_activity_after"] = start_time
if end_time is not None:
options["last_activity_before"] = end_time
if self._group:
options["include_subgroups"] = True
project_manager = self._gl.groups.get(self._group).projects
return project_manager.list(**options)
def _process_projects(self, projects: List[Project]) -> List[FINDING]:
project_count = len(projects)
return [
self.METHOD_NAME(project, i, project_count)
for i, project in enumerate(projects)
if self._is_not_ignored(project)
]
def _authenticate(self):
logger.info("Start GitLab authentication")
try:
self._gl = gitlab.Gitlab(self._url, private_token=self._access_token)
self._gl.auth()
except gitlab.exceptions.GitlabAuthenticationError:
self._gl = gitlab.Gitlab(self._url, oauth_token=self._access_token)
self._gl.auth()
logger.info("GitLab authentication succeeded")
def _is_not_ignored(self, project: Project) -> bool:
id_project = project.id
kind = project.namespace["kind"]
id_namespace = project.namespace["id"]
if id_project in self._ignore_repos:
return False
if kind == "group" and id_namespace in self._ignored_groups:
return False
return True
def METHOD_NAME(
self, project: Project, index: int, total: int
) -> FINDING:
logger.info(
f"({index + 1}/{total}) Add finding for repo {project.name} with last activity at "
f"{datetime.fromisoformat(project.last_activity_at)}"
)
# Retrieve the latest commit ID
latest_commit_id: str = None
if self._annotate_latest_commit_id:
try:
latest_commit_id = (
self._group_project_to_project(project).commits.list()[0].id
)
except Exception as e:
logger.warn(
"Could not identify the latest commit ID - repository without commits?"
)
latest_commit_id = ""
return super()._create_finding(
project.id,
project.web_url,
project.path_with_namespace,
project.namespace["kind"],
project.namespace["id"],
project.namespace["name"],
project.created_at,
project.last_activity_at,
project.visibility,
project.archived,
project.topics,
latest_commit_id,
) |
7,104 | transform | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from onnx import ModelProto, helper, numpy_helper
from sparseml.exporters.transforms.onnx_transform import OnnxTransform
from sparseml.exporters.transforms.utils import (
INITIALIZER_MATCH,
MatchResult,
any_of,
get_quantization_params,
get_structural_matches,
optional_node,
quantize_array,
)
from sparseml.onnx.utils import ONNXGraph, get_node_attributes, get_node_output_nodes
__all__ = ["GemmToQLinearMatMul"]
class GemmToQLinearMatMul(OnnxTransform):
"""
Transforms Gemm nodes to QLinearMatMul.
NOTE: Does not match if the structure is
1. `Gemm -> QuantizeLinear -> DequantizeLinear -> Gemm`
2. `Gemm -> QuantizeLinear -> DequantizeLinear -> Softmax`
Transforms
```
| weight (initializer)
| |
| input Q
| | |
| Q/Dq Dq optional bias (initializer)
| | | |
| Gemm
| |
| optional Q/Dq
```
(where `Q` is QuantizeLinear, and `Dq` is DequantizeLinear)
into
```
input
|
QLinearMatMul
|
Dq bias (initializer)
| |
Add
```
"""
def METHOD_NAME(self, model: ModelProto) -> ModelProto:
graph = ONNXGraph(model)
matches = get_structural_matches(
graph,
op_type="Gemm",
parent_ops=[
[any_of("QuantizeLinear", "DequantizeLinear")],
[INITIALIZER_MATCH, "QuantizeLinear", "DequantizeLinear"],
],
children_ops=[
[
any_of("QuantizeLinear", "DequantizeLinear"),
optional_node("DequantizeLinear"),
]
],
)
for match in matches:
gemm_attributes = get_node_attributes(match.node)
if any(float(attribute) != 1.0 for attribute in gemm_attributes.values()):
# can only handle Gemm operations without alpha/beta/transB set
continue
output_dequant = match.children[0][1]
if output_dequant is not None:
output_dequant_child = graph.get_node_single_child(output_dequant)
if output_dequant_child and output_dequant_child.op_type in {
"Gemm",
"Softmax",
}:
# output quant is not a QDQ block for the current Gemm Node but,
# the input QDQ block for a new Gemm block this Gemm should be
# skipped and processed by _convert_quantizable_gemm_no_activations
continue
self.log_match(match)
self._transform_match(model, match)
return model
def _transform_match(self, model: ModelProto, match: MatchResult):
gemm_node = match.node
(input_quant,) = match.parents[0]
_, weight_quant, weight_dequant = match.parents[1]
(output_quant, opt_output_dequant) = match.children[0]
# can fold the input/output quant ops if they are trivial
fold_input_quant = input_quant.op_type == "DequantizeLinear"
fold_output_quant = output_quant.op_type == "QuantizeLinear"
weight_quantize_params = get_quantization_params(
model, weight_quant, include_target=True
)
# sanity check - matching will handle this
assert weight_quantize_params.target is not None
# quantize weight
quantized_weight = quantize_array(
weight_quantize_params.target,
weight_quantize_params.scale,
weight_quantize_params.zero_point,
weight_quantize_params.zero_point.dtype,
)
quantized_weight = quantized_weight.transpose() # Gemm has implicit transpose
quantized_weight_name = "{}.weight_quantized".format(gemm_node.name)
quantized_weight_initializer = numpy_helper.from_array(
quantized_weight, name=quantized_weight_name
)
model.graph.initializer.append(quantized_weight_initializer)
# get qmatmul inputs and outputs
qmatmul_input = input_quant.input[0] if fold_input_quant else gemm_node.input[0]
qmatmul_inputs = [
qmatmul_input, # x
input_quant.input[1], # x_scale
input_quant.input[2], # x_zero_point
quantized_weight_name, # w
weight_quant.input[1], # w_scale
weight_quant.input[2], # w_zero_point
output_quant.input[1], # y_scale
output_quant.input[2], # y_zero_point
]
# create qmatmul node and add it to graph
qmatmul_name = f"{gemm_node.name}_quant"
qmatmul_output = (
output_quant.output[0] if fold_output_quant else gemm_node.output[0]
)
qmatmul_node = helper.make_node(
"QLinearMatMul",
qmatmul_inputs,
[qmatmul_output],
name=qmatmul_name,
)
self.add_node_deferred(qmatmul_node)
# add bias term following FC in the graph
if len(gemm_node.input) > 2:
mm_child = opt_output_dequant if fold_output_quant else output_quant
qmatmul_output_name = f"{qmatmul_output}_pre_dq"
dequant_output_name = f"{qmatmul_output}_post_dq"
if mm_child is not None and mm_child.op_type == "DequantizeLinear":
# create hidden output layer for bias add
add_output_name = mm_child.output[0]
mm_child.output[0] = dequant_output_name
else:
# inject dequantize op for matmul
qmatmul_node.output[0] = qmatmul_output_name
mm_child = helper.make_node(
"DequantizeLinear",
[
qmatmul_output_name, # input
output_quant.input[1], # scale
output_quant.input[2], # zero point
],
[dequant_output_name],
name=f"{qmatmul_name}_injected_dq",
)
self.add_node_deferred(mm_child)
add_output_name = qmatmul_output # original qmatmul output name
# inject bias op for dequantized matmul output
self.add_node_deferred(
helper.make_node(
"Add",
# [add_input, gemm bias]
[dequant_output_name, gemm_node.input[2]],
[add_output_name],
f"{gemm_node.name}_injected_bias_add",
)
)
# Clean up
self.delete_node_deferred(weight_dequant)
self.delete_node_deferred(weight_quant)
if fold_input_quant and len(get_node_output_nodes(model, input_quant)) <= 1:
self.delete_node_deferred(input_quant)
if fold_output_quant:
self.delete_node_deferred(output_quant)
self.delete_node_deferred(gemm_node) |
7,105 | handle input | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on August 20 2016
@author: mandd
"""
#External Modules------------------------------------------------------------------------------------
import numpy as np
import copy
import scipy.spatial.distance as spatialDistance
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from .MetricInterface import MetricInterface
from ...utils import InputData, InputTypes
#Internal Modules End--------------------------------------------------------------------------------
class DTW(MetricInterface):
"""
Dynamic Time Warping Metric
Class for measuring similarity between two variables X and Y, i.e. two temporal sequences
"""
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
inputSpecification = super().getInputSpecification()
orderInputType = InputTypes.makeEnumType("order","orderType",["0","1"])
inputSpecification.addSub(InputData.parameterInputFactory("order",contentType=orderInputType),quantity=InputData.Quantity.one)
inputSpecification.addSub(InputData.parameterInputFactory("localDistance",contentType=InputTypes.StringType),quantity=InputData.Quantity.one)
return inputSpecification
def __init__(self):
"""
Constructor
@ In, None
@ Out, None
"""
super().__init__()
# order of DTW calculation, 0 specifices a classical DTW, and 1 specifies derivative DTW
self.order = None
# the ID of distance function to be employed to determine the local distance evaluation of two time series
# Available options are provided by scipy pairwise distances, i.e. cityblock, cosine, euclidean, manhattan.
self.localDistance = None
# True indicates the metric needs to be able to handle dynamic data
self._dynamicHandling = True
# True indicates the metric needs to be able to handle pairwise data
self._pairwiseHandling = True
def METHOD_NAME(self, paramInput):
"""
Method that reads the portion of the xml input that belongs to this specialized class
and initialize internal parameters
@ In, paramInput, InputData.parameterInput, input spec
@ Out, None
"""
for child in paramInput.subparts:
if child.getName() == "order":
self.order = int(child.value)
elif child.getName() == "localDistance":
self.localDistance = child.value
def run(self, x, y, weights=None, axis=0, **kwargs):
"""
This method computes DTW distance between two inputs x and y based on given metric
@ In, x, numpy.ndarray, array containing data of x, if 1D array is provided,
the array will be reshaped via x.reshape(-1,1), shape (n_samples, ), if 2D
array is provided, shape (n_samples, n_time_steps)
@ In, y, numpy.ndarray, array containing data of y, if 1D array is provided,
the array will be reshaped via y.reshape(-1,1), shape (n_samples, ), if 2D
array is provided, shape (n_samples, n_time_steps)
@ In, weights, array_like (numpy.array or list), optional, weights associated
with input, shape (n_samples) if axis = 0, otherwise shape (n_time_steps)
@ In, axis, integer, optional, axis along which a metric is performed, default is 0,
i.e. the metric will performed along the first dimension (the "rows").
If metric postprocessor is used, the first dimension is the RAVEN_sample_ID,
and the second dimension is the pivotParameter if HistorySet is provided.
@ In, kwargs, dict, dictionary of parameters characteristic of each metric
@ Out, value, float, metric result
"""
assert (isinstance(x, np.ndarray))
assert (isinstance(x, np.ndarray))
tempX = copy.copy(x)
tempY = copy.copy(y)
if axis == 0:
assert (len(x) == len(y))
elif axis == 1:
assert(x.shape[1] == y.shape[1]), self.raiseAnError(IOError, "The second dimension of first input is not \
the same as the second dimension of second input!")
tempX = tempX.T
tempY = tempY.T
else:
self.raiseAnError(IOError, "Valid axis value should be '0' or '1' for the evaluate method of metric", self.name)
if len(tempX.shape) == 1:
tempX = tempX.reshape(1,-1)
if len(tempY.shape) == 1:
tempY = tempY.reshape(1,-1)
X = np.empty(tempX.shape)
Y = np.empty(tempY.shape)
for index in range(len(tempX)):
if self.order == 1:
X[index] = np.gradient(tempX[index])
Y[index] = np.gradient(tempY[index])
else:
X[index] = tempX[index]
Y[index] = tempY[index]
value = self.dtwDistance(X, Y)
return value
def dtwDistance(self, x, y):
"""
This method actually calculates the distance between two histories x and y
@ In, x, numpy.ndarray, data matrix for x
@ In, y, numpy.ndarray, data matrix for y
@ Out, value, float, distance between x and y
"""
r, c = len(x[0,:]), len(y[0,:])
D0 = np.zeros((r + 1, c + 1))
D0[0, 1:] = np.inf
D0[1:, 0] = np.inf
D1 = D0[1:, 1:]
D1 = spatialDistance.cdist(x.T,y.T, metric=self.localDistance)
C = D1.copy()
for i in range(r):
for j in range(c):
D1[i, j] += min(D0[i, j], D0[i, j+1], D0[i+1, j])
if len(x)==1:
path = np.zeros(len(y)), range(len(y))
elif len(y) == 1:
path = range(len(x)), np.zeros(len(x))
else:
path = self.tracePath(D0)
return D1[-1, -1]
def tracePath(self, D):
"""
This method calculate the time warping path given a local distance matrix D
@ In, D, numpy.ndarray (2D), local distance matrix D
@ Out, p, numpy.ndarray (1D), path along horizontal direction
@ Out, q, numpy.ndarray (1D), path along vertical direction
"""
i,j = np.array(D.shape) - 2
p,q = [i], [j]
while ((i > 0) or (j > 0)):
tb = np.argmin((D[i, j], D[i, j+1], D[i+1, j]))
if (tb == 0):
i -= 1
j -= 1
elif (tb == 1):
i -= 1
else:
j -= 1
p.insert(0, i)
q.insert(0, j)
return np.array(p), np.array(q) |
7,106 | prefetch generic related | """
Common model classes
"""
import copy
from typing import Iterable
from django.conf import settings
from django.db import models
from django.core.exceptions import ValidationError
from django.db.models import (
DateTimeField,
ForeignKey,
Manager,
Model,
PROTECT,
prefetch_related_objects,
)
from django.db.models.query import QuerySet
from django.db import transaction
from mitxpro.utils import now_in_utc
class TimestampedModelQuerySet(QuerySet):
"""
Subclassed QuerySet for TimestampedModelManager
"""
def update(self, **kwargs):
"""
Automatically update updated_on timestamp when .update(). This is because .update()
does not go through .save(), thus will not auto_now, because it happens on the
database level without loading objects into memory.
"""
if "updated_on" not in kwargs:
kwargs["updated_on"] = now_in_utc()
return super().update(**kwargs)
class TimestampedModelManager(Manager):
"""
Subclassed manager for TimestampedModel
"""
def update(self, **kwargs):
"""
Allows access to TimestampedModelQuerySet's update method on the manager
"""
return self.get_queryset().update(**kwargs)
def get_queryset(self):
"""
Returns custom queryset
"""
return TimestampedModelQuerySet(self.model, using=self._db)
class TimestampedModel(Model):
"""
Base model for create/update timestamps
"""
objects = TimestampedModelManager()
created_on = DateTimeField(auto_now_add=True) # UTC
updated_on = DateTimeField(auto_now=True) # UTC
class Meta:
abstract = True
class AuditModel(TimestampedModel):
"""An abstract base class for audit models"""
acting_user = ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=PROTECT)
data_before = models.JSONField(blank=True, null=True)
data_after = models.JSONField(blank=True, null=True)
class Meta:
abstract = True
@classmethod
def get_related_field_name(cls):
"""
Returns:
str: A field name which links the Auditable model to this model
"""
raise NotImplementedError
class AuditableModel(Model):
"""An abstract base class for auditable models"""
class Meta:
abstract = True
def to_dict(self):
"""
Returns:
dict:
A serialized representation of the model object
"""
raise NotImplementedError
@classmethod
def objects_for_audit(cls):
"""
Returns the correct model manager for the auditable model. This defaults to `objects`, but if
a different manager is needed for any reason (for example, if `objects` is changed to a manager
that applies some default filters), it can be overridden.
Returns:
django.db.models.manager.Manager: The correct model manager for the auditable model
"""
return cls.objects
@classmethod
def get_audit_class(cls):
"""
Returns:
class of Model:
A class of a Django model used as the audit table
"""
raise NotImplementedError
@transaction.atomic
def save_and_log(self, acting_user, *args, **kwargs):
"""
Saves the object and creates an audit object.
Args:
acting_user (User):
The user who made the change to the model. May be None if inapplicable.
"""
before_obj = self.objects_for_audit().filter(id=self.id).first()
self.save(*args, **kwargs)
self.refresh_from_db()
before_dict = None
if before_obj is not None:
before_dict = before_obj.to_dict()
audit_kwargs = dict(
acting_user=acting_user, data_before=before_dict, data_after=self.to_dict()
)
audit_class = self.get_audit_class()
audit_kwargs[audit_class.get_related_field_name()] = self
audit_class.objects.create(**audit_kwargs)
class SingletonModel(Model):
"""Model class for models representing tables that should only have a single record"""
def save(
self, force_insert=False, force_update=False, using=None, update_fields=None
):
if force_insert and self._meta.model.objects.count() > 0:
raise ValidationError(
"Only one {} object should exist. Update the existing object instead "
"of creating a new one.".format(self.__class__.__name__)
)
return super().save(
force_insert=force_insert,
force_update=force_update,
using=using,
update_fields=update_fields,
)
class Meta:
abstract = True
def _items_for_class(content_type_field, items, model_cls):
"""Returns a list of items that matches a class by content_type"""
return [
item
for item in items
if getattr(item, content_type_field).model_class() == model_cls
]
class PrefetchGenericQuerySet(QuerySet):
"""QuerySet supporting for prefetching over generic relationships"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._prefetch_generic_related_lookups = {}
self._prefetch_generic_done = False
def METHOD_NAME(self, content_type_field, model_lookups):
"""
Configure prefetch_related over generic relations
Args:
content_type_field(str): the field name for the ContentType
model_lookups(dict of (list of class or class, list of str)):
a mapping of model classes to lookups
Returns:
QuerySet: the new queryset with prefetching configured
"""
qs = self._chain()
for model_classes, lookups in model_lookups.items():
# pylint: disable=isinstance-second-argument-not-valid-type
model_classes = (
model_classes
if isinstance(model_classes, Iterable)
else [model_classes]
)
for model_cls in model_classes:
key = (content_type_field, model_cls)
# pylint: disable=protected-access
qs._prefetch_generic_related_lookups[key] = [
*qs._prefetch_generic_related_lookups.get(key, []),
*lookups,
]
return qs
def _prefetch_generic_related_objects(self):
"""Prefetch related objects on a per-model basis"""
for (
(content_type_field, model_cls),
lookups,
) in self._prefetch_generic_related_lookups.items():
items = _items_for_class(content_type_field, self._result_cache, model_cls)
prefetch_related_objects(items, *lookups)
self._prefetch_generic_done = True
def _fetch_all(self):
"""Called when a query is evaluated"""
# first fetch non-generic data, this avoid N+1 issues on the generic items themselves
super()._fetch_all()
if self._prefetch_generic_related_lookups and not self._prefetch_generic_done:
self._prefetch_generic_related_objects()
def _clone(self):
"""Clone the queryset"""
# pylint: disable=protected-access
c = super()._clone()
c._prefetch_generic_related_lookups = copy.deepcopy(
self._prefetch_generic_related_lookups
)
return c |
7,107 | set build state string | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.internet import defer
from buildbot.test.fakedb.base import FakeDBComponent
from buildbot.test.fakedb.row import Row
from buildbot.test.util import validation
from buildbot.util import epoch2datetime
class Build(Row):
table = "builds"
id_column = 'id'
foreignKeys = ('buildrequestid', 'masterid', 'workerid', 'builderid')
required_columns = ('buildrequestid', 'masterid', 'workerid')
def __init__(self, id=None, number=29, buildrequestid=None, builderid=None,
workerid=-1, masterid=None,
started_at=1304262222, complete_at=None, state_string="test", results=None):
super().__init__(id=id, number=number, buildrequestid=buildrequestid, builderid=builderid,
workerid=workerid, masterid=masterid, started_at=started_at,
complete_at=complete_at, state_string=state_string, results=results)
class BuildProperty(Row):
table = "build_properties"
foreignKeys = ('buildid',)
required_columns = ('buildid',)
def __init__(self, buildid=None, name='prop', value=42, source='fakedb'):
super().__init__(buildid=buildid, name=name, value=value, source=source)
class FakeBuildsComponent(FakeDBComponent):
def setUp(self):
self.builds = {}
def insert_test_data(self, rows):
for row in rows:
if isinstance(row, Build):
build = self.builds[row.id] = row.values.copy()
build['properties'] = {}
for row in rows:
if isinstance(row, BuildProperty):
assert row.buildid in self.builds
self.builds[row.buildid]['properties'][
row.name] = (row.value, row.source)
# component methods
def _newId(self):
id = 100
while id in self.builds:
id += 1
return id
def _row2dict(self, row):
return {
"id": row['id'],
"number": row['number'],
"buildrequestid": row['buildrequestid'],
"builderid": row['builderid'],
"masterid": row['masterid'],
"workerid": row['workerid'],
"started_at": epoch2datetime(row['started_at']),
"complete_at": epoch2datetime(row['complete_at']),
"state_string": row['state_string'],
"results": row['results']
}
def getBuild(self, buildid):
row = self.builds.get(buildid)
if not row:
return defer.succeed(None)
return defer.succeed(self._row2dict(row))
def getBuildByNumber(self, builderid, number):
for row in self.builds.values():
if row['builderid'] == builderid and row['number'] == number:
return defer.succeed(self._row2dict(row))
return defer.succeed(None)
def getBuilds(self, builderid=None, buildrequestid=None, workerid=None, complete=None,
resultSpec=None):
ret = []
for row in self.builds.values():
if builderid is not None and row['builderid'] != builderid:
continue
if buildrequestid is not None and row['buildrequestid'] != buildrequestid:
continue
if workerid is not None and row['workerid'] != workerid:
continue
if complete is not None and complete != (row['complete_at'] is not None):
continue
ret.append(self._row2dict(row))
if resultSpec is not None:
ret = self.applyResultSpec(ret, resultSpec)
return defer.succeed(ret)
def addBuild(self, builderid, buildrequestid, workerid, masterid,
state_string):
validation.verifyType(self.t, 'state_string', state_string,
validation.StringValidator())
id = self._newId()
number = max([0] + [r['number'] for r in self.builds.values()
if r['builderid'] == builderid]) + 1
self.builds[id] = {
"id": id, "number": number,
"buildrequestid": buildrequestid,
"builderid": builderid,
"workerid": workerid,
"masterid": masterid,
"state_string": state_string,
"started_at": self.reactor.seconds(),
"complete_at": None,
"results": None
}
return defer.succeed((id, number))
def METHOD_NAME(self, buildid, state_string):
validation.verifyType(self.t, 'state_string', state_string,
validation.StringValidator())
b = self.builds.get(buildid)
if b:
b['state_string'] = state_string
return defer.succeed(None)
def finishBuild(self, buildid, results):
now = self.reactor.seconds()
b = self.builds.get(buildid)
if b:
b['complete_at'] = now
b['results'] = results
return defer.succeed(None)
def getBuildProperties(self, bid, resultSpec=None):
if bid in self.builds:
ret = [{"name": k, "source": v[1], "value": v[0]}
for k, v in self.builds[bid]['properties'].items()]
if resultSpec is not None:
ret = self.applyResultSpec(ret, resultSpec)
ret = {v['name']: (v['value'], v['source']) for v in ret}
return defer.succeed(ret)
def setBuildProperty(self, bid, name, value, source):
assert bid in self.builds
self.builds[bid]['properties'][name] = (value, source)
return defer.succeed(None)
@defer.inlineCallbacks
def getBuildsForChange(self, changeid):
change = yield self.db.changes.getChange(changeid)
bsets = yield self.db.buildsets.getBuildsets()
breqs = yield self.db.buildrequests.getBuildRequests()
builds = yield self.db.builds.getBuilds()
results = []
for bset in bsets:
for ssid in bset['sourcestamps']:
if change['sourcestampid'] == ssid:
bset['changeid'] = changeid
results.append({'buildsetid': bset['bsid']})
for breq in breqs:
for result in results:
if result['buildsetid'] == breq['buildsetid']:
result['buildrequestid'] = breq['buildrequestid']
for build in builds:
for result in results:
if result['buildrequestid'] == build['buildrequestid']:
result['id'] = build['id']
result['number'] = build['number']
result['builderid'] = build['builderid']
result['workerid'] = build['workerid']
result['masterid'] = build['masterid']
result['started_at'] = epoch2datetime(1304262222)
result['complete_at'] = build['complete_at']
result['state_string'] = build['state_string']
result['results'] = build['results']
for result in results:
del result['buildsetid']
return results |
7,108 | test files delete | import unittest
import sys
import os, json
from pactman import Consumer, Provider
# from .example import node_json, _build_node
api_key = os.getenv("OPENSCIENCEFRAMEWORK_API_KEY", default=None)
node_json = """
{
"data": {
"relationships": {
"files": {
"links": {
"related": {
"href": "https://api.osf.io/v2/nodes/f3szh/files/",
"meta": {}
}
}
},
"view_only_links": {
"links": {
"related": {
"href": "https://api.osf.io/v2/nodes/f3szh/view_only_links/",
"meta": {}
}
}
},
"citation": {
"links": {
"related": {
"href": "https://api.osf.io/v2/nodes/f3szh/citation/",
"meta": {}
}
}
},
"license": {
"links": {
"related": {
"href": "https://api.osf.io/v2/licenses/563c1ffbda3e240129e72c03/",
"meta": {}
}
}
},
"contributors": {
"links": {
"related": {
"href": "https://api.osf.io/v2/nodes/f3szh/contributors/",
"meta": {}
}
}
},
"forks": {
"links": {
"related": {
"href": "https://api.osf.io/v2/nodes/f3szh/forks/",
"meta": {}
}
}
},
"root": {
"links": {
"related": {
"href": "https://api.osf.io/v2/nodes/f3szh/",
"meta": {}
}
}
},
"identifiers": {
"links": {
"related": {
"href": "https://api.osf.io/v2/nodes/f3szh/identifiers/",
"meta": {}
}
}
},
"comments": {
"links": {
"related": {
"href": "https://api.osf.io/v2/nodes/f3szh/comments/?filter%5Btarget%5D=f3szh",
"meta": {}
}
}
},
"registrations": {
"links": {
"related": {
"href": "https://api.osf.io/v2/nodes/f3szh/registrations/",
"meta": {}
}
}
},
"logs": {
"links": {
"related": {
"href": "https://api.osf.io/v2/nodes/f3szh/logs/",
"meta": {}
}
}
},
"node_links": {
"links": {
"related": {
"href": "https://api.osf.io/v2/nodes/f3szh/node_links/",
"meta": {}
}
}
},
"linked_nodes": {
"links": {
"self": {
"href": "https://api.osf.io/v2/nodes/f3szh/relationships/linked_nodes/",
"meta": {}
},
"related": {
"href": "https://api.osf.io/v2/nodes/f3szh/linked_nodes/",
"meta": {}
}
}
},
"wikis": {
"links": {
"related": {
"href": "https://api.osf.io/v2/nodes/f3szh/wikis/",
"meta": {}
}
}
},
"affiliated_institutions": {
"links": {
"self": {
"href": "https://api.osf.io/v2/nodes/f3szh/relationships/institutions/",
"meta": {}
},
"related": {
"href": "https://api.osf.io/v2/nodes/f3szh/institutions/",
"meta": {}
}
}
},
"children": {
"links": {
"related": {
"href": "https://api.osf.io/v2/nodes/f3szh/children/",
"meta": {}
}
}
},
"preprints": {
"links": {
"related": {
"href": "https://api.osf.io/v2/nodes/f3szh/preprints/",
"meta": {}
}
}
},
"draft_registrations": {
"links": {
"related": {
"href": "https://api.osf.io/v2/nodes/f3szh/draft_registrations/",
"meta": {}
}
}
}
},
"links": {
"self": "https://api.osf.io/v2/nodes/f3szh/",
"html": "https://osf.io/f3szh/"
},
"attributes": {
"category": "project",
"fork": false,
"preprint": true,
"description": "this is a test for preprint citations",
"current_user_permissions": [
"read"
],
"date_modified": "2017-03-17T16:11:35.721000",
"title": "Preprint Citations Test",
"collection": false,
"registration": false,
"date_created": "2017-03-17T16:09:14.864000",
"current_user_can_comment": false,
"node_license": {
"copyright_holders": [],
"year": "2017"
},
"public": true,
"tags": [
"qatest"
]
},
"type": "{type}",
"id": "f3szh"
}
}
"""
def _build_node(type_):
node = json.loads(node_json)
node["data"]["type"] = type_
return node
project_node = _build_node("nodes")
def create_app():
from src import bootstrap
# creates a test client
app = bootstrap(use_default_error=True, address="http://localhost:3000").app
# propagate the exceptions to the test client
app.config.update({"TESTING": True})
return app
pact = Consumer("PortOSF").has_pact_with(Provider("OSF"), port=3000)
unittest.TestCase.maxDiff = None
class TestPortOSF(unittest.TestCase):
def setUp(self):
self.app = create_app()
self.client = self.app.test_client()
def test_metric(self):
self.assertTrue(True)
@unittest.skip("not implemented")
def test_project_index(self):
pass
@unittest.skip("not implemented")
def test_project_get(self):
pass
@unittest.skip("not implemented")
def test_project_post(self):
pass
@unittest.skip("not implemented")
def test_project_put(self):
pass
@unittest.skip("not implemented")
def test_project_patch(self):
pass
@unittest.skip("not implemented")
def test_project_delete(self):
pass
@unittest.skip("not implemented")
def test_files_index(self):
pass
@unittest.skip("not implemented")
def test_files_get(self):
pass
@unittest.skip("not implemented")
def test_files_post(self):
pass
@unittest.skip("not implemented")
def METHOD_NAME(self):
pass
def test_metadata_update_jsonld_complete(self):
return
import json
metadata = {
"https://schema.org/creator": [
{
"https://schema.org/affiliation": "Zenodo",
"https://schema.org/name": "Doe, John",
}
],
"https://schema.org/description": "This is my first upload",
"https://schema.org/identifier": 1234,
"https://schema.org/publicAccess": True,
"https://schema.org/name": "My first upload",
"https://www.research-data-services.org/jsonld/zenodocategory": "poster",
"https://www.research-data-services.org/jsonld/doi": "10.5072/zenodo.1234",
}
projectId = 5
expected_body = project_node
pact.given("access token is valid").upon_receiving(
"the corresponding user has an updated deposit with"
).with_request(
"PUT", f"/api/deposit/depositions/{projectId}"
).will_respond_with(
200, body=expected_body
)
expected_body["metadata"] = metadata
with pact:
data = {"metadata": metadata, "userId": "zenodo://user:ASD123GANZSICHA"}
result = self.client.patch(f"/metadata/project/{projectId}", json=data)
self.assertEqual(result.status_code, 200)
self.assertEqual(result.json, expected_body["metadata"])
if __name__ == "__main__":
unittest.main() |
7,109 | test file | # Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# The Universal Permissive License (UPL), Version 1.0
#
# Subject to the condition set forth below, permission is hereby granted to any
# person obtaining a copy of this software, associated documentation and/or
# data (collectively the "Software"), free of charge and under any and all
# copyright rights in the Software, and any and all patent rights owned or
# freely licensable by each licensor hereunder covering either (i) the
# unmodified Software as contributed to or provided by such licensor, or (ii)
# the Larger Works (as defined below), to deal in both
#
# (a) the Software, and
#
# (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if
# one is included with the Software each a "Larger Work" to which the Software
# is contributed by such licensors),
#
# without restriction, including without limitation the rights to copy, create
# derivative works of, display, perform, and distribute the Software and make,
# use, sell, offer for sale, import, export, have made, and have sold the
# Software and the Larger Work(s), and to sublicense the foregoing rights on
# either these or other terms.
#
# This license is subject to the following condition:
#
# The above copyright notice and either this complete permission notice or at a
# minimum a reference to the UPL must be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
def assert_raises(err, fn, *args, **kwargs):
raised = False
try:
fn(*args, **kwargs)
except err:
raised = True
assert raised
def unlink(file_name):
from test.support.os_helper import unlink
try:
unlink(file_name)
except OSError:
pass
def test_read_write_file():
import _pyio as pyio # Python implementation.
file_name = "dump.txt"
def try_one(s):
unlink(file_name)
f = pyio.open(file_name, "wb")
try:
# write once with \n and once without
f.write(s)
f.write(b"\n")
f.write(s)
f.close()
f = pyio.open(file_name, "rb")
line = f.readline()
assert line == s + b"\n"
line = f.readline()
assert line == s
line = f.readline()
assert not line # Must be at EOF
f.close()
finally:
unlink(file_name)
try_one(b"1234567890")
try_one(b"hello_world_12345")
try_one(b'\0' * 1000)
def METHOD_NAME():
import _pyio as pyio # Python implementation.
file_name = "dump.txt"
unlink(file_name)
try:
# verify weak references
from array import array
from weakref import proxy
from collections import UserList
f = pyio.open(file_name, "wb")
p = proxy(f)
p.write(b'teststring')
assert f.tell() == p.tell()
f.close()
f = None
# TODO: since weakref is not yet properly implemented this will not work
# assert_raises(ReferenceError, getattr, p, 'tell')
# verify expected attributes exist
f = pyio.open(file_name, "wb")
f.name # merely shouldn't blow up
f.mode # ditto
f.closed # ditto
f.close()
# verify writelines with instance sequence
f = pyio.open(file_name, "wb")
l = UserList([b'1', b'2'])
f.writelines(l)
f.close()
f = pyio.open(file_name, 'rb')
buf = f.read()
assert buf == b'12'
f.close()
# verify writelines with integers
f = pyio.open(file_name, "wb")
assert_raises(TypeError, f.writelines, [1, 2, 3])
f.close()
# verify writelines with integers in UserList
f = pyio.open(file_name, "wb")
l = UserList([1,2,3])
assert_raises(TypeError, f.writelines, l)
f.close()
# verify writelines with non-string object
class NonString:
pass
f = pyio.open(file_name, "wb")
assert_raises(TypeError, f.writelines, [NonString(), NonString()])
f.close()
f = pyio.open(file_name, "wb")
assert f.name == file_name
assert not f.isatty()
assert not f.closed
f.close()
assert f.closed
finally:
unlink(file_name)
def test_builtin_open():
import _pyio as pyio # Python implementation.
file_name = "mymodule.py"
f = pyio.open(file_name, "w")
f.write('print(42)\n')
f.close()
success = True
try:
exec(open(file_name).read())
exec(compile(open(file_name, "r").read(), file_name, "eval"))
except Exception as e:
print(e)
success = False
finally:
unlink(file_name)
assert success |
7,110 | test xy | # This code is part of a Qiskit project.
#
# (C) Copyright IBM 2022, 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test HeisenbergModel."""
from test import QiskitNatureTestCase
from rustworkx import PyGraph, is_isomorphic # type: ignore[attr-defined]
from qiskit_nature.second_q.hamiltonians.lattices import Lattice, LineLattice
from qiskit_nature.second_q.hamiltonians import HeisenbergModel, IsingModel
class TestHeisenbergModel(QiskitNatureTestCase):
"""TestHeisenbergModel"""
def test_init(self):
"""Test init."""
line = LineLattice(num_nodes=2)
heisenberg_model = HeisenbergModel(lattice=line)
with self.subTest("Check the graph."):
self.assertTrue(
is_isomorphic(
heisenberg_model.lattice.graph, line.graph, edge_matcher=lambda x, y: x == y
)
)
with self.subTest("Check the second q op representation."):
terms = [("X_0 X_1", 1.0), ("Y_0 Y_1", 1.0), ("Z_0 Z_1", 1.0)]
hamiltonian = terms
self.assertSetEqual(set(hamiltonian), set(heisenberg_model.second_q_op().items()))
def test_triangular(self):
"""Test triangular lattice."""
triangle_graph = PyGraph(multigraph=False)
triangle_graph.add_nodes_from(range(3))
triangle_weighted_edge_list = [
(0, 1, 1.0),
(0, 2, 1.0),
(1, 2, 1.0),
(0, 0, 1.0),
(1, 1, 1.0),
(2, 2, 1.0),
]
triangle_graph.add_edges_from(triangle_weighted_edge_list)
triangle_lattice = Lattice(triangle_graph)
ext_magnetic_field_y = (0.0, 1.0, 0.0)
triangle_y_heisenberg_model = HeisenbergModel(
triangle_lattice, ext_magnetic_field=ext_magnetic_field_y
)
with self.subTest("Check the graph of triangular model."):
self.assertTrue(
is_isomorphic(
triangle_y_heisenberg_model.lattice.graph,
triangle_lattice.graph,
edge_matcher=lambda x, y: x == y,
)
)
with self.subTest("Check the second q ops in the triangular lattice with param in y axis."):
terms = [
("X_0 X_1", 1.0),
("Y_0 Y_1", 1.0),
("Z_0 Z_1", 1.0),
("X_0 X_2", 1.0),
("Y_0 Y_2", 1.0),
("Z_0 Z_2", 1.0),
("X_1 X_2", 1.0),
("Y_1 Y_2", 1.0),
("Z_1 Z_2", 1.0),
("Y_0", 1.0),
("Y_1", 1.0),
("Y_2", 1.0),
]
hamiltonian = terms
self.assertSetEqual(
set(hamiltonian), set(triangle_y_heisenberg_model.second_q_op().items())
)
def test_ising(self):
"""Test Ising."""
line = LineLattice(num_nodes=2, onsite_parameter=1)
ism = IsingModel(lattice=line)
coupling_constants = (0.0, 0.0, 1.0)
ext_magnetic_field = (1.0, 0.0, 0.0)
hm_to_ism = HeisenbergModel(
lattice=line,
coupling_constants=coupling_constants,
ext_magnetic_field=ext_magnetic_field,
)
with self.subTest("Check if the HeisenbergModel reproduce IsingModel in a special case."):
self.assertSetEqual(
set(ism.second_q_op().items()),
set(hm_to_ism.second_q_op().items()),
)
def METHOD_NAME(self):
"""Test x and y directions."""
line = LineLattice(num_nodes=2)
xy_coupling = (0.5, 0.5, 0.0)
xy_ext_magnetic_field = (-0.75, 0.25, 0.0)
xy_test_hm = HeisenbergModel(
lattice=line, coupling_constants=xy_coupling, ext_magnetic_field=xy_ext_magnetic_field
)
with self.subTest("Check if if x and y params are being applied."):
terms = [
("X_0 X_1", 0.5),
("Y_0 Y_1", 0.5),
("X_0", -0.75),
("Y_0", 0.25),
("X_1", -0.75),
("Y_1", 0.25),
]
hamiltonian = terms
self.assertSetEqual(set(hamiltonian), set(xy_test_hm.second_q_op().items()))
def test_xyz_ext_field(self):
"""Test external field."""
line = LineLattice(num_nodes=2)
xyz_ext_magnetic_field = (1.0, 1.0, 1.0)
xyz_test_hm = HeisenbergModel(lattice=line, ext_magnetic_field=xyz_ext_magnetic_field)
with self.subTest("Check if if x, y and z params are being applied."):
terms = [
("X_0 X_1", 1.0),
("Y_0 Y_1", 1.0),
("Z_0 Z_1", 1.0),
("X_0", 1.0),
("X_1", 1.0),
("Y_0", 1.0),
("Y_1", 1.0),
("Z_0", 1.0),
("Z_1", 1.0),
]
hamiltonian = terms
self.assertSetEqual(set(hamiltonian), set(xyz_test_hm.second_q_op().items())) |
7,111 | using | from _typeshed import Incomplete, StrOrBytesPath, SupportsItems
from typing import Any
from typing_extensions import Self
class CryptPolicy:
@classmethod
def from_path(cls, path, section: str = "passlib", encoding: str = "utf-8"): ...
@classmethod
def from_string(cls, source, section: str = "passlib", encoding: str = "utf-8"): ...
@classmethod
def from_source(cls, source, _warn: bool = True): ...
@classmethod
def from_sources(cls, sources, _warn: bool = True): ...
def replace(self, *args, **kwds): ...
def __init__(self, *args, **kwds) -> None: ...
def has_schemes(self): ...
def iter_handlers(self): ...
def schemes(self, resolve: bool = False): ...
def get_handler(self, name: Incomplete | None = None, category: Incomplete | None = None, required: bool = False): ...
def get_min_verify_time(self, category: Incomplete | None = None): ...
def get_options(self, name, category: Incomplete | None = None): ...
def handler_is_deprecated(self, name, category: Incomplete | None = None): ...
def iter_config(self, ini: bool = False, resolve: bool = False): ...
def to_dict(self, resolve: bool = False): ...
def to_file(self, stream, section: str = "passlib") -> None: ...
def to_string(self, section: str = "passlib", encoding: Incomplete | None = None): ...
class CryptContext:
@classmethod
def from_string(cls, source: str | bytes, section: str = "passlib", encoding: str = "utf-8") -> Self: ...
@classmethod
def from_path(cls, path: StrOrBytesPath, section: str = "passlib", encoding: str = "utf-8") -> Self: ...
def copy(self, **kwds: Any) -> CryptContext: ...
def METHOD_NAME(self, **kwds: Any) -> CryptContext: ...
def replace(self, **kwds): ...
def __init__(self, schemes: Incomplete | None = None, policy=..., _autoload: bool = True, **kwds) -> None: ...
policy: CryptPolicy
def load_path(
self, path: StrOrBytesPath, update: bool = False, section: str = "passlib", encoding: str = "utf-8"
) -> None: ...
def load(
self,
source: str | bytes | SupportsItems[str, Any] | CryptContext,
update: bool = False,
section: str = "passlib",
encoding: str = "utf-8",
) -> None: ...
def update(self, *args: Any, **kwds: Any) -> None: ...
def schemes(self, resolve: bool = False, category: Incomplete | None = None, unconfigured: bool = False): ...
def default_scheme(self, category: Incomplete | None = None, resolve: bool = False, unconfigured: bool = False): ...
def handler(self, scheme: Incomplete | None = None, category: Incomplete | None = None, unconfigured: bool = False): ...
@property
def context_kwds(self): ...
def to_dict(self, resolve: bool = False) -> dict[str, Any]: ...
def to_string(self, section: str = "passlib") -> str: ...
mvt_estimate_max_samples: int
mvt_estimate_min_samples: int
mvt_estimate_max_time: int
mvt_estimate_resolution: float
harden_verify: Any
min_verify_time: int
def reset_min_verify_time(self) -> None: ...
def needs_update(
self, hash: str | bytes, scheme: str | None = None, category: str | None = None, secret: str | bytes | None = None
) -> bool: ...
def hash_needs_update(self, hash, scheme: Incomplete | None = None, category: Incomplete | None = None): ...
def genconfig(self, scheme: Incomplete | None = None, category: Incomplete | None = None, **settings): ...
def genhash(self, secret, config, scheme: Incomplete | None = None, category: Incomplete | None = None, **kwds): ...
def identify(
self, hash, category: Incomplete | None = None, resolve: bool = False, required: bool = False, unconfigured: bool = False
): ...
def hash(self, secret: str | bytes, scheme: str | None = None, category: str | None = None, **kwds: Any) -> str: ...
def encrypt(self, *args, **kwds): ...
def verify(
self, secret: str | bytes, hash: str | bytes | None, scheme: str | None = None, category: str | None = None, **kwds: Any
) -> bool: ...
def verify_and_update(
self, secret: str | bytes, hash: str | bytes | None, scheme: str | None = None, category: str | None = None, **kwds: Any
) -> tuple[bool, str | None]: ...
def dummy_verify(self, elapsed: int = 0): ...
def is_enabled(self, hash: str | bytes) -> bool: ...
def disable(self, hash: str | bytes | None = None) -> str: ...
def enable(self, hash: str | bytes) -> str: ...
class LazyCryptContext(CryptContext):
def __init__(self, schemes: Incomplete | None = None, **kwds) -> None: ...
def __getattribute__(self, attr: str) -> Any: ... |
7,112 | update tree | # -*- coding: utf-8 -*-
# vim:expandtab:autoindent:tabstop=4:shiftwidth=4:filetype=python:textwidth=0:
import errno
import os
import os.path
import shutil
import stat
import subprocess
import time
from . import exception
from .trace_decorator import getLog, traceLog
@traceLog()
def mkdirIfAbsent(*args):
for dirName in args:
getLog().debug("ensuring that dir exists: %s", dirName)
try:
os.makedirs(dirName)
getLog().debug("created dir: %s", dirName)
except OSError as e:
if e.errno != errno.EEXIST:
getLog().exception("Could not create dir %s. Error: %s", dirName, e)
raise exception.Error("Could not create dir %s. Error: %s" % (dirName, e))
@traceLog()
def touch(fileName):
getLog().debug("touching file: %s", fileName)
open(fileName, 'a').close()
@traceLog()
def rmtree(path, selinux=False, exclude=()):
"""Version of shutil.rmtree that ignores no-such-file-or-directory errors,
tries harder if it finds immutable files and supports excluding paths"""
if os.path.islink(path):
raise OSError("Cannot call rmtree on a symbolic link: %s" % path)
try_again = True
retries = 10
failed_to_handle = False
failed_filename = None
if path in exclude:
return
while try_again:
try_again = False
try:
names = os.listdir(path)
for name in names:
fullname = os.path.join(path, name)
if fullname not in exclude:
try:
mode = os.lstat(fullname).st_mode
except OSError:
mode = 0
if stat.S_ISDIR(mode):
try:
rmtree(fullname, selinux=selinux, exclude=exclude)
except OSError as e:
if e.errno in (errno.EPERM, errno.EACCES, errno.EBUSY):
# we already tried handling this on lower level and failed,
# there's no point in trying again now
failed_to_handle = True
raise
else:
os.remove(fullname)
os.rmdir(path)
except OSError as e:
if failed_to_handle:
raise
if e.errno == errno.ENOENT: # no such file or directory
pass
elif e.errno == errno.ENOTEMPTY: # there's something left
if exclude: # but it is excluded
pass
else: # likely during Ctrl+C something additional data
try_again = True
retries -= 1
if retries <= 0:
raise
time.sleep(2)
elif selinux and (e.errno == errno.EPERM or e.errno == errno.EACCES):
try_again = True
if failed_filename == e.filename:
raise
failed_filename = e.filename
os.system("chattr -R -i %s" % path)
elif e.errno == errno.EBUSY:
retries -= 1
if retries <= 0:
raise
try_again = True
getLog().debug("retrying failed tree remove after sleeping a bit")
time.sleep(2)
else:
raise
def is_in_dir(path, directory):
"""Tests whether `path` is inside `directory`."""
# use realpath to expand symlinks
path = os.path.realpath(path)
directory = os.path.realpath(directory)
return os.path.commonprefix([path, directory]) == directory
def get_fs_type(path):
cmd = ['/bin/stat', '-f', '-L', '-c', '%T', path]
p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE,
universal_newlines=True)
p.wait()
with p.stdout as f:
return f.readline().strip()
def find_non_nfs_dir():
dirs = ('/dev/shm', '/run', '/tmp', '/usr/tmp', '/')
for d in dirs:
if not get_fs_type(d).startswith('nfs'):
return d
raise exception.Error('Cannot find non-NFS directory in: %s' % dirs)
def unlink_if_exists(path):
"""
Unlink, ignore FileNotFoundError, but keep raising other exceptions.
"""
try:
os.unlink(path)
except FileNotFoundError:
pass
def _best_effort_removal(path, use_rmtree=True):
try:
os.unlink(path)
except OSError:
pass
if not use_rmtree:
return
try:
shutil.rmtree(path)
except OSError:
pass
def METHOD_NAME(dest, src):
"""
Copy files from SRC directory into DEST, recursively. The DEST directory
is created, including subdirectories (if not existent). The files in DEST
are created or updated (shutil.copy2). If file is about to replace
directory or vice versa, it is done without asking. Files that are in DEST
and not in SRC are kept untouched.
"""
getLog().debug("Updating files in %s with files from %s", dest, src)
mkdirIfAbsent(dest)
for dirpath, dirnames, filenames in os.walk(src):
raw_subpath = os.path.relpath(dirpath, src)
subpath = os.path.normpath(raw_subpath)
destpath = os.path.join(dest, subpath)
for filename in filenames:
file_from = os.path.join(dirpath, filename)
file_to = os.path.join(destpath, filename)
_best_effort_removal(file_to)
shutil.copy2(file_from, file_to)
for subdir in dirnames:
dest_subdir = os.path.join(destpath, subdir)
_best_effort_removal(dest_subdir, use_rmtree=False)
mkdirIfAbsent(dest_subdir) |
7,113 | sv init |
import numpy as np
import bpy
from bpy.props import FloatProperty, EnumProperty, BoolProperty, IntProperty
from mathutils import Vector
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode, zip_long_repeat, repeat_last_for_length, ensure_nesting_level
from sverchok.utils.curve.fillet import FILLET_ARC, FILLET_BEZIER, fillet_polyline_from_vertices
class SvFilletPolylineNode(SverchCustomTreeNode, bpy.types.Node):
"""
Triggers: Arc Fillet Polyline
Tooltip: Generate a polyline with arc fillets
"""
bl_idname = 'SvExFilletPolylineNode'
bl_label = 'Fillet Polyline'
bl_icon = 'OUTLINER_OB_EMPTY'
sv_icon = 'SV_FILLET_POLYLINE'
radius : FloatProperty(
name = "Radius",
description = "Fillet arc radius",
min = 0.0,
default = 0.2,
update = updateNode)
clamp : BoolProperty(
name = "Clamp",
description = "If checked, fillet will be limited to the maximum radius",
default = False,
update = updateNode)
concat : BoolProperty(
name = "Concatenate",
description = "If checked, then all straight and arc segments will be concatenated into a single curve. Otherwise, each segment will be output as a separate curve object",
default = True,
update = updateNode)
cyclic : BoolProperty(
name = "Cyclic",
description = "If checked, the node will generate a cyclic (closed) curve",
default = False,
update = updateNode)
scale_to_unit : BoolProperty(
name = "Even domains",
description = "Give each segment and each arc equal T parameter domain of [0; 1]",
default = False,
update = updateNode)
make_nurbs : BoolProperty(
name = "NURBS output",
description = "Generate a NURBS curve",
default = False,
update = updateNode)
arc_modes = [
(FILLET_ARC, "Circular arc", "Circular arc", 0),
(FILLET_BEZIER, "Quadratic Bezier arc", "Quadratic Bezier curve segment", 1)
]
arc_mode : EnumProperty(
name = "Fillet mode",
description = "Type of curve to generate for fillets",
items = arc_modes,
default = FILLET_ARC,
update = updateNode)
def draw_buttons(self, context, layout):
layout.label(text='Fillet mode:')
layout.prop(self, 'arc_mode', text='')
layout.prop(self, "concat")
if self.concat:
layout.prop(self, "scale_to_unit")
layout.prop(self, "cyclic")
layout.prop(self,'clamp')
def draw_buttons_ext(self, context, layout):
self.draw_buttons(context, layout)
layout.prop(self, 'make_nurbs')
def METHOD_NAME(self, context):
self.inputs.new('SvVerticesSocket', "Vertices")
self.inputs.new('SvStringsSocket', "Radius").prop_name = 'radius'
self.outputs.new('SvCurveSocket', "Curve")
self.outputs.new('SvMatrixSocket', "Centers")
def process(self):
if not any(socket.is_linked for socket in self.outputs):
return
verts_s = self.inputs['Vertices'].sv_get()
radius_s = self.inputs['Radius'].sv_get()
verts_s = ensure_nesting_level(verts_s, 3)
radius_s = ensure_nesting_level(radius_s, 2)
curves_out = []
centers_out = []
for vertices, radiuses in zip_long_repeat(verts_s, radius_s):
if len(vertices) < 3:
raise Exception("At least three vertices are required to make a fillet")
radiuses = repeat_last_for_length(radiuses, len(vertices))
curve, centers, _ = fillet_polyline_from_vertices(vertices, radiuses,
cyclic = self.cyclic,
concat = self.concat,
clamp = self.clamp,
arc_mode = self.arc_mode,
scale_to_unit = self.scale_to_unit,
make_nurbs = self.make_nurbs)
curves_out.append(curve)
centers_out.append(centers)
self.outputs['Curve'].sv_set(curves_out)
self.outputs['Centers'].sv_set(centers_out)
def register():
bpy.utils.register_class(SvFilletPolylineNode)
def unregister():
bpy.utils.unregister_class(SvFilletPolylineNode)
|
7,114 | test format list | """Test the codeclimate JSON formatter."""
from __future__ import annotations
import json
import pathlib
import subprocess
import sys
import pytest
from ansiblelint.errors import MatchError
from ansiblelint.file_utils import Lintable
from ansiblelint.formatters import CodeclimateJSONFormatter
from ansiblelint.rules import AnsibleLintRule
class TestCodeclimateJSONFormatter:
"""Unit test for CodeclimateJSONFormatter."""
rule = AnsibleLintRule()
matches: list[MatchError] = []
formatter: CodeclimateJSONFormatter | None = None
def setup_class(self) -> None:
"""Set up few MatchError objects."""
self.rule = AnsibleLintRule()
self.rule.id = "TCF0001"
self.rule.severity = "VERY_HIGH"
self.matches = []
self.matches.append(
MatchError(
message="message",
lineno=1,
details="hello",
lintable=Lintable("filename.yml", content=""),
rule=self.rule,
),
)
self.matches.append(
MatchError(
message="message",
lineno=2,
details="hello",
lintable=Lintable("filename.yml", content=""),
rule=self.rule,
ignored=True,
),
)
self.formatter = CodeclimateJSONFormatter(
pathlib.Path.cwd(),
display_relative_path=True,
)
def METHOD_NAME(self) -> None:
"""Test if the return value is a string."""
assert isinstance(self.formatter, CodeclimateJSONFormatter)
assert isinstance(self.formatter.format_result(self.matches), str)
def test_result_is_json(self) -> None:
"""Test if returned string value is a JSON."""
assert isinstance(self.formatter, CodeclimateJSONFormatter)
output = self.formatter.format_result(self.matches)
json.loads(output)
# https://github.com/ansible/ansible-navigator/issues/1490
assert "\n" not in output
def test_single_match(self) -> None:
"""Test negative case. Only lists are allowed. Otherwise a RuntimeError will be raised."""
assert isinstance(self.formatter, CodeclimateJSONFormatter)
with pytest.raises(RuntimeError):
self.formatter.format_result(self.matches[0]) # type: ignore[arg-type]
def test_result_is_list(self) -> None:
"""Test if the return JSON contains a list with a length of 2."""
assert isinstance(self.formatter, CodeclimateJSONFormatter)
result = json.loads(self.formatter.format_result(self.matches))
assert len(result) == 2
def test_validate_codeclimate_schema(self) -> None:
"""Test if the returned JSON is a valid codeclimate report."""
assert isinstance(self.formatter, CodeclimateJSONFormatter)
result = json.loads(self.formatter.format_result(self.matches))
single_match = result[0]
assert "type" in single_match
assert single_match["type"] == "issue"
assert "check_name" in single_match
assert "categories" in single_match
assert isinstance(single_match["categories"], list)
assert "severity" in single_match
assert single_match["severity"] == "major"
assert "description" in single_match
assert "fingerprint" in single_match
assert "location" in single_match
assert "path" in single_match["location"]
assert single_match["location"]["path"] == self.matches[0].filename
assert "lines" in single_match["location"]
assert single_match["location"]["lines"]["begin"] == self.matches[0].lineno
assert "positions" not in single_match["location"]
# check that the 2nd match is marked as 'minor' because it was created with ignored=True
assert result[1]["severity"] == "minor"
def test_validate_codeclimate_schema_with_positions(self) -> None:
"""Test if the returned JSON is a valid codeclimate report (containing 'positions' instead of 'lines')."""
assert isinstance(self.formatter, CodeclimateJSONFormatter)
result = json.loads(
self.formatter.format_result(
[
MatchError(
message="message",
lineno=1,
column=42,
details="hello",
lintable=Lintable("filename.yml", content=""),
rule=self.rule,
),
],
),
)
assert result[0]["location"]["positions"]["begin"]["line"] == 1
assert result[0]["location"]["positions"]["begin"]["column"] == 42
assert "lines" not in result[0]["location"]
def test_code_climate_parsable_ignored() -> None:
"""Test that -p option does not alter codeclimate format."""
cmd = [
sys.executable,
"-m",
"ansiblelint",
"-v",
"-p",
]
file = "examples/playbooks/empty_playbook.yml"
result = subprocess.run([*cmd, file], check=False)
result2 = subprocess.run([*cmd, "-p", file], check=False)
assert result.returncode == result2.returncode
assert result.stdout == result2.stdout |
7,115 | list | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(location_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2017-12-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/providers/Microsoft.DBforPostgreSQL/locations/{locationName}/performanceTiers",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"locationName": _SERIALIZER.url("location_name", location_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class LocationBasedPerformanceTierOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.rdbms.postgresql.PostgreSQLManagementClient`'s
:attr:`location_based_performance_tier` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = METHOD_NAME(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def METHOD_NAME(self, location_name: str, **kwargs: Any) -> Iterable["_models.PerformanceTierProperties"]:
"""List all the performance tiers at specified location in a given subscription.
:param location_name: The name of the location. Required.
:type location_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PerformanceTierProperties or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.rdbms.postgresql.models.PerformanceTierProperties]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2017-12-01"))
cls: ClsType[_models.PerformanceTierListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
location_name=location_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PerformanceTierListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
METHOD_NAME.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.DBforPostgreSQL/locations/{locationName}/performanceTiers"
} |
7,116 | test pickle | # Copyright (c) 2020-2023 by Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel, and University of Kassel. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
import os
import pandapipes
import pytest
from pandas.testing import assert_frame_equal
from pandapipes.test.multinet.test_control_multinet import get_gas_example, get_power_example_simple
from pandapipes.multinet.create_multinet import create_empty_multinet, add_nets_to_multinet
from pandapipes.multinet import MultiNet
from pandapower import nets_equal as nets_equal_pandapower
from pandapipes.toolbox import nets_equal
# @pytest.fixture()
def load_net():
# create test network
net = pandapipes.create_empty_network("test_net", fluid="lgas")
j1 = pandapipes.create_junction(net, pn_bar=1.05, tfluid_k=293.15,
name="Connection to External Grid", geodata=(0, 0))
j2 = pandapipes.create_junction(net, pn_bar=1.05, tfluid_k=293.15, name="Junction 2",
geodata=(2, 0))
j3 = pandapipes.create_junction(net, pn_bar=1.05, tfluid_k=293.15, name="Junction 3",
geodata=(7, 4))
j4 = pandapipes.create_junction(net, pn_bar=1.05, tfluid_k=293.15, name="Junction 4",
geodata=(7, -4))
j5 = pandapipes.create_junction(net, pn_bar=1.05, tfluid_k=293.15, name="Junction 5",
geodata=(5, 3))
j6 = pandapipes.create_junction(net, pn_bar=1.05, tfluid_k=293.15, name="Junction 6",
geodata=(5, -3))
pandapipes.create_ext_grid(net, junction=j1, p_bar=1.1, t_k=293.15, name="Grid Connection")
pandapipes.create_pipe_from_parameters(net, from_junction=j1, to_junction=j2, length_km=10,
diameter_m=0.05, name="Pipe 1", geodata=[(0, 0), (2, 0)])
pandapipes.create_pipe_from_parameters(net, from_junction=j2, to_junction=j3, length_km=2,
diameter_m=0.05, name="Pipe 2",
geodata=[(2, 0), (2, 4), (7, 4)])
pandapipes.create_pipe_from_parameters(net, from_junction=j2, to_junction=j4, length_km=2.5,
diameter_m=0.05, name="Pipe 3",
geodata=[(2, 0), (2, -4), (7, -4)])
pandapipes.create_pipe_from_parameters(net, from_junction=j3, to_junction=j5, length_km=1,
diameter_m=0.05, name="Pipe 4",
geodata=[(7, 4), (7, 3), (5, 3)])
pandapipes.create_pipe_from_parameters(net, from_junction=j4, to_junction=j6, length_km=1,
diameter_m=0.05, name="Pipe 5",
geodata=[(7, -4), (7, -3), (5, -3)])
pandapipes.create_valve(net, from_junction=j5, to_junction=j6, diameter_m=0.05,
opened=True)
pandapipes.create_sink(net, junction=j4, mdot_kg_per_s=5.45e-5, name="Sink 1")
pandapipes.create_source(net, junction=j3, mdot_kg_per_s=3.45e-5)
return net
def METHOD_NAME(tmp_path):
"""
Checks if a network saved and reloaded as a pickle file is identical.
:return:
:rtype:
"""
net = load_net()
filename = os.path.abspath(str(tmp_path)) + "test_net_1.p"
# save test network
pandapipes.to_pickle(net, filename)
# load test network
net2 = pandapipes.from_pickle(filename)
# check if saved and loaded versions are identical
assert pandapipes.nets_equal(net, net2), "Error in comparison after saving to Pickle."
def test_json(tmp_path):
"""
Checks if a network saved and reloaded as a json file is identical.
:return:
:rtype:
"""
net = load_net()
filename = os.path.abspath(str(tmp_path)) + "test_net_1.json"
# save test network
pandapipes.to_json(net, filename)
# load test network
net2 = pandapipes.from_json(filename)
# check if saved and loaded versions are identical
assert_frame_equal(net.pipe_geodata, net2.pipe_geodata)
del net.pipe_geodata
del net2.pipe_geodata
assert pandapipes.nets_equal(net, net2), "Error in comparison after saving to JSON."
def test_json_multinet(tmp_path, get_gas_example, get_power_example_simple):
"""
Checks if a network saved and reloaded as a json file is identical.
:return:
:rtype:
"""
net_gas = get_gas_example
net_power = get_power_example_simple
# set up multinet
mn = create_empty_multinet("test_p2g")
add_nets_to_multinet(mn, power=net_power, gas=net_gas)
filename = os.path.abspath(str(tmp_path)) + "test_net_1.json"
# save test network
pandapipes.to_json(mn, filename)
mn = pandapipes.from_json(filename)
assert isinstance(mn, MultiNet)
assert nets_equal_pandapower(mn['nets']['power'], net_power)
assert nets_equal(mn['nets']['gas'], net_gas)
def test_json_string():
"""
Checks if a network saved and reloaded as a json file is identical.
:return:
:rtype:
"""
net = load_net()
# save test network
json_string = pandapipes.to_json(net)
# load test network
net2 = pandapipes.from_json_string(json_string)
# check if saved and loaded versions are identical
assert_frame_equal(net.pipe_geodata, net2.pipe_geodata)
del net.pipe_geodata
del net2.pipe_geodata
assert pandapipes.nets_equal(net, net2), \
"Error in comparison after saving to JSON string."
def test_json_string_multinet(tmp_path, get_gas_example, get_power_example_simple):
"""
Checks if a network saved and reloaded as a json file is identical.
:return:
:rtype:
"""
net_gas = get_gas_example
net_power = get_power_example_simple
# set up multinet
mn = create_empty_multinet("test_p2g")
add_nets_to_multinet(mn, power=net_power, gas=net_gas)
# save test network
mn_str = pandapipes.to_json(mn)
mn = pandapipes.from_json_string(mn_str)
assert isinstance(mn, MultiNet)
assert nets_equal_pandapower(mn['nets']['power'], net_power)
assert nets_equal(mn['nets']['gas'], net_gas)
if __name__ == '__main__':
pytest.main(["test_file_io.py"]) |
7,117 | setparent callsign | from FreeTAKServer.model.FTSModel.fts_protocol_object import FTSProtocolObject
#######################################################
#
# Link.py
# Python implementation of the Class Link
# Generated by Enterprise Architect
# Created on(FTSProtocolObject): 11-Feb-2020 11(FTSProtocolObject):08(FTSProtocolObject):08 AM
# Original author: Corvo
#
#######################################################
import datetime as dt
from FreeTAKServer.model.FTSModelVariables.LinkVariables import LinkVariables as vars
class Link(FTSProtocolObject):
__modified = False
def __init__(self):
self.uid = None
self.relation = None
self.production_time = None
self.type = None
self.parent_callsign = None
@staticmethod
def VideoStream(UID=vars.VideoStream().UID, PRODUCTIONTIME=vars.VideoStream().PRODUCTIONTIME,
RELATIONSHIP=vars.VideoStream().RELATIONSHIP, PARENTCALLSIGN=vars.VideoStream().PARENTCALLSIGN):
link = Link()
link.setuid(UID)
link.setproduction_time(PRODUCTIONTIME)
link.setrelationship(RELATIONSHIP)
link.METHOD_NAME(PARENTCALLSIGN)
return link
@staticmethod
def drop_point(UID = vars.drop_point().UID, RELATION = vars.drop_point().RELATION,
PRODUCTIONTIME = vars.drop_point().PRODUCTIONTIME,
TYPE = vars.drop_point().TYPE, PARENTCALLSIGN = vars.drop_point().PARENTCALLSIGN):
link = Link()
link.setuid(UID)
link.setrelation(RELATION)
link.setproduction_time(PRODUCTIONTIME)
link.settype(TYPE)
link.METHOD_NAME(PARENTCALLSIGN)
return link
@staticmethod
def geochat(UID=vars.geochat().UID, RELATION=vars.geochat().RELATION,
PRODUCTIONTIME=vars.geochat().PRODUCTIONTIME, TYPE=vars.geochat().TYPE,
PARENTCALLSIGN=vars.geochat().PARENTCALLSIGN):
link = Link()
link.setuid(UID)
link.setrelation(RELATION)
link.settype(TYPE)
link.METHOD_NAME(PARENTCALLSIGN)
return link
@staticmethod
def emergency_on(UID=vars.emergency_on().UID, RELATION=vars.emergency_on().RELATION,
PRODUCTIONTIME=vars.emergency_on().PRODUCTIONTIME, TYPE=vars.emergency_on().TYPE,
PARENTCALLSIGN=vars.emergency_on().PARENTCALLSIGN):
link = Link()
link.setuid(UID)
link.setrelation(RELATION)
link.setproduction_time(PRODUCTIONTIME)
link.settype(TYPE)
link.METHOD_NAME(PARENTCALLSIGN)
return link
@staticmethod
def disconnect(UID=vars.disconnect().UID, TYPE=vars.disconnect().TYPE, RELATION=vars.disconnect().RELATION):
link = Link()
link.setuid(UID)
link.settype(TYPE)
link.setrelation(RELATION)
return link
@staticmethod
def DeleteVideo(UID=vars.DeleteVideo().UID, TYPE=vars.DeleteVideo().TYPE, RELATION=vars.DeleteVideo().RELATION):
link = Link()
link.setuid(UID)
link.settype(TYPE)
link.setrelation(RELATION)
return link
@staticmethod
def Route(UID=vars.Route().UID, TYPE=vars.Route().TYPE, RELATION=vars.Route().RELATION,
POINT=vars.Route().POINT, CALLSIGN=vars.Route().CALLSIGN, REMARKS=vars.Route().REMARKS,
):
link = Link()
link.setuid(UID)
link.settype(TYPE)
link.setrelation(RELATION)
link.setpoint(POINT)
link.setcallsign(CALLSIGN)
link.setremarks(REMARKS)
return link
@staticmethod
def SPISensor(UID=vars.SPISensor().UID, TYPE=vars.SPISensor().TYPE, RELATION=vars.SPISensor().RELATION):
link = Link()
link.setuid(UID)
link.settype(TYPE)
link.setrelation(RELATION)
return link
@staticmethod
def BitsImageryVideo(UID=vars.BitsImageryVideo().UID, PRODUCTIONTIME=vars.BitsImageryVideo().PRODUCTIONTIME):
link = Link()
link.setuid(UID)
link.setproduction_time(PRODUCTIONTIME)
return link
def getremarks(self):
return self.remarks
def setremarks(self, remarks):
self.__modified = True
self.remarks = remarks
def getcallsign(self):
return self.callsign
def setcallsign(self, callsign):
self.__modified = True
self.callsign = callsign
def getpoint(self):
return self.point
def setpoint(self, point):
self.__modified = True
self.point = point
# uid getter
def getuid(self):
import uuid
if self.uid:
return self.uid
else:
self.uid = uuid.uuid1()
return self.uid
# uid setter
def setuid(self, uid=0):
self.__modified = True
self.uid=uid
# production_time getter
def getproduction_time(self):
return self.production_time
# production_time setter
def setproduction_time(self, production_time=0):
self.__modified = True
DATETIME_FMT = "%Y-%m-%dT%H:%M:%SZ"
if production_time == None:
timer = dt.datetime
now = timer.utcnow()
zulu = now.strftime(DATETIME_FMT)
add = dt.timedelta(minutes=1)
production_time_part = dt.datetime.strptime(zulu, DATETIME_FMT) + add
self.production_time = production_time_part.strftime(DATETIME_FMT)
else:
self.production_time = production_time
# relation getter
def getrelation(self):
return self.relation
# relation setter
def setrelation(self, relation=0):
self.__modified = True
self.relation=relation
# type getter
def gettype(self):
return self.type
# type setter
def settype(self, type=0):
self.__modified = True
self.type=type
# parent_callsign getter
def getparent_callsign(self):
return self.parent_callsign
# parent_callsign setter
def METHOD_NAME(self, parent_callsign=0):
self.__modified = True
self.parent_callsign=parent_callsign
def setrelationship(self, relationship):
self.relationship=relationship
def getrelationship(self):
return self.relationshi |
7,118 | test jsonize | from datetime import datetime, timedelta
import jinja2
import pytest
from markupsafe import Markup
from uber.custom_tags import jsonize, linebreaksbr, datetime_local_filter, datetime_filter, full_datetime_local, \
hour_day_local, time_day_local, timedelta_filter, timestamp, url_to_link, basename, form_link, humanize_timedelta
from uber.jinja import JinjaEnv
from uber.models import WatchList
class TestDatetimeFilters(object):
@pytest.mark.parametrize('filter_function', [
datetime_local_filter,
datetime_filter,
full_datetime_local,
hour_day_local,
time_day_local,
timestamp,
basename
])
@pytest.mark.parametrize('test_input,expected', [
(None, ''),
('', ''),
([], ''),
({}, ''),
(jinja2.runtime.Undefined(), '')
])
def test_filters_allow_empty_arg(self, filter_function, test_input, expected):
assert expected == filter_function(test_input)
@pytest.mark.parametrize('timedelta_args,timedelta_kwargs', [
([], {}),
([], dict(days=1, seconds=30, microseconds=100, milliseconds=100, minutes=20, hours=12, weeks=2)),
([1, 30, 100, 100, 20, 12, 2], {}),
])
def test_timedelta_filter(self, timedelta_args, timedelta_kwargs):
dt = datetime.utcnow()
td = timedelta(*timedelta_args, **timedelta_kwargs)
expected = dt + td
assert expected == timedelta_filter(dt, *timedelta_args, **timedelta_kwargs)
def test_timedelta_filter_with_empty_date(self):
assert timedelta_filter(dt=None, days=1, seconds=3600) is None
assert timedelta_filter(dt='', days=1, seconds=3600) is None
assert timedelta_filter(None, 1, 3600) is None
assert timedelta_filter('', 1, 3600) is None
def test_timedelta_filter_in_template(self):
dt = datetime.utcnow()
env = JinjaEnv.env()
template = env.from_string('{{ dt|timedelta(days=-5)|datetime("%A, %B %-e") }}')
expected = (dt + timedelta(days=-5)).strftime("%A, %B %-e")
assert expected == template.render(dt=dt)
def test_timedelta_filter_in_template_with_empty_date(self):
env = JinjaEnv.env()
template = env.from_string('{{ dt|timedelta(days=-5)|datetime("%A, %B %-e") }}')
expected = ''
assert expected == template.render(dt=None)
@pytest.mark.parametrize('first_names,last_name,expected', [
('', '', 'Unknown'),
('', 'Last', 'Last'),
('First', '', 'First'),
('First', 'Last', 'First Last'),
('First, Second', 'Last', 'First, Second Last'),
('First, Second, Third', 'Last', 'First, Second, Third Last'),
])
def test_watch_list(first_names, last_name, expected):
assert form_link(WatchList(first_names=first_names, last_name=last_name)) == expected
class TestHumanizeTimedelta(object):
@pytest.mark.parametrize('test_args,test_kwargs,expected', [
([], {}, 'right now'),
([None], {}, 'right now'),
([0], {}, 'right now'),
([''], {}, 'right now'),
([jinja2.runtime.Undefined()], {}, 'right now'),
([timedelta()], {}, 'right now'),
([], {'years': 0}, 'right now'),
([], {'months': 0}, 'right now'),
([], {'days': 0}, 'right now'),
([], {'hours': 0}, 'right now'),
([], {'minutes': 0}, 'right now'),
([], {'seconds': 0}, 'right now'),
([], {'years': 1}, '1 year'),
([], {'months': 1}, '1 month'),
([], {'days': 1}, '1 day'),
([], {'hours': 1}, '1 hour'),
([], {'minutes': 1}, '1 minute'),
([], {'seconds': 1}, '1 second'),
([], {'years': 2}, '2 years'),
([], {'months': 2}, '2 months'),
([], {'days': 2}, '2 days'),
([], {'hours': 2}, '2 hours'),
([], {'minutes': 2}, '2 minutes'),
([], {'seconds': 2}, '2 seconds'),
([], {'months': 23}, '1 year and 11 months'),
([], {'hours': 28}, '1 day and 4 hours'),
([], {'minutes': 69}, '1 hour and 9 minutes'),
([], {'seconds': 4163}, '1 hour, 9 minutes, and 23 seconds'),
([], {'seconds': 4163, 'granularity': 'minutes'}, '1 hour and 9 minutes'),
])
def test_humanize_timedelta(self, test_args, test_kwargs, expected):
assert expected == humanize_timedelta(*test_args, **test_kwargs)
class TestJsonize(object):
@pytest.mark.parametrize('test_input,expected', [
(None, '{}'),
('', '""'),
('asdf', '"asdf"'),
({}, '{}'),
([], '[]'),
(True, 'true'),
(False, 'false'),
(jinja2.runtime.Undefined(), '{}'),
])
def METHOD_NAME(self, test_input, expected):
assert expected == jsonize(test_input)
class TestLinebreaksbr(object):
@pytest.mark.parametrize('test_input,expected', [
('', Markup('')),
(Markup(''), Markup('')),
('asdf', Markup('asdf')),
(Markup('asdf'), Markup('asdf')),
('asdf\nasdf', Markup('asdf<br />asdf')),
(Markup('asdf\nasdf'), Markup('asdf<br />asdf')),
('asdf\r\nasdf', Markup('asdf<br />asdf')),
('asdf\rasdf', Markup('asdf<br />asdf')),
('asdf<br />asdf', Markup('asdf<br />asdf')),
('asdf<br />asdf\nasdf', Markup('asdf<br />asdf<br />asdf')),
(Markup('asdf<br />asdf'), Markup('asdf<br />asdf')),
(Markup('asdf<br />asdf\nasdf'), Markup('asdf<br />asdf<br />asdf'))
])
def test_linebreaksbr(self, test_input, expected):
assert expected == linebreaksbr(test_input)
class TestUrlToLink(object):
@pytest.mark.parametrize('url_args, url_kwargs, expected', [
([''], {}, ''),
(['/regular/url'], {}, '<a href="/regular/url">/regular/url</a>'),
(['/regular/url', 'normaltext'], {}, '<a href="/regular/url">normaltext</a>'),
(['/regular/url', 'normaltext', '_blank'], {}, '<a href="/regular/url" target="_blank">normaltext</a>'),
(['&<>"\'', 'normaltext'], {}, '<a href="&<>"'">normaltext</a>'),
(['/regular/url', '&<>"\''], {}, '<a href="/regular/url">&<>"'</a>'),
(
['/regular/url', 'normaltext', '&<>"\''],
{},
'<a href="/regular/url" target="&<>"'">normaltext</a>'
),
])
def test_urltolink(self, url_args, url_kwargs, expected):
assert expected == url_to_link(*url_args, **url_kwargs) |
7,119 | replace merge conflict dialog | import sys
import textwrap
import pytest
from gaphor import UML
from gaphor.ui.filemanager import FileManager
try:
import pygit2
except ImportError:
pass
else:
from gaphor.storage.tests.fixtures import create_merge_conflict
@pytest.fixture
def file_manager(event_manager, element_factory, modeling_language):
main_window = None
return FileManager(event_manager, element_factory, modeling_language, main_window)
def test_save(element_factory, file_manager: FileManager, tmp_path):
element_factory.create(UML.Class)
out_file = tmp_path / "out.gaphor"
file_manager.save(filename=out_file)
assert out_file.exists()
def test_model_is_saved_with_utf8_encoding(
element_factory, file_manager: FileManager, tmp_path
):
class_ = element_factory.create(UML.Class)
class_.name = "üëïèàòù"
package = element_factory.create(UML.Package)
package.name = "안녕하세요 세계"
model_file = tmp_path / "model.gaphor"
file_manager.save(model_file)
with open(model_file, encoding="utf-8") as f:
f.read() # raises exception if characters can't be decoded
def test_model_is_loaded_with_utf8_encoding(
element_factory, file_manager: FileManager, tmp_path
):
class_name = "üëïèàòù"
package_name = "안녕하세요 세계"
class_ = element_factory.create(UML.Class)
class_.name = class_name
package = element_factory.create(UML.Package)
package.name = package_name
model_file = tmp_path / "model.gaphor"
file_manager.save(model_file)
element_factory.flush()
file_manager.load(model_file)
new_class = next(element_factory.select(UML.Class))
new_package = next(element_factory.select(UML.Package))
assert new_class.name == class_name
assert new_package.name == package_name
@pytest.mark.skipif(
sys.platform != "win32", reason="Standard encoding on Windows is not UTF-8"
)
def test_old_model_is_loaded_without_utf8_encoding(
file_manager: FileManager, test_models
):
model_file = test_models / "wrong-encoding.gaphor"
file_manager.load(model_file)
@pytest.mark.skipif("pygit2" not in globals(), reason="No pygit2 installed")
@pytest.mark.parametrize("resolution", ["current", "incoming"])
def test_load_model_with_merge_conflict(
file_manager: FileManager, element_factory, merge_conflict, monkeypatch, resolution
):
METHOD_NAME(monkeypatch, resolution)
file_manager.resolve_merge_conflict(merge_conflict)
assert element_factory.size() > 0
@pytest.mark.skipif("pygit2" not in globals(), reason="No pygit2 installed")
def test_load_model_merge_conflict_and_manual_resolution(
file_manager: FileManager, element_factory, merge_conflict, monkeypatch
):
METHOD_NAME(monkeypatch, "manual")
file_manager.resolve_merge_conflict(merge_conflict)
from gaphor.core.modeling import PendingChange
assert element_factory.lselect(PendingChange)
@pytest.mark.skipif("pygit2" not in globals(), reason="No pygit2 installed")
def test_load_model_with_merge_conflict_and_unknown_resolution(
file_manager: FileManager, merge_conflict, monkeypatch
):
METHOD_NAME(monkeypatch, "nonsense")
with pytest.raises(ValueError):
file_manager.resolve_merge_conflict(merge_conflict)
def METHOD_NAME(monkeypatch, resolution):
def mock_merge_conflict_dialog(_window, _filename, handler):
handler(resolution)
monkeypatch.setattr(
"gaphor.ui.filemanager.resolve_merge_conflict_dialog",
mock_merge_conflict_dialog,
)
@pytest.fixture
def merge_conflict(tmp_path):
initial_model = textwrap.dedent(
"""\
<?xml version="1.0" encoding="utf-8"?>
<gaphor xmlns="http://gaphor.sourceforge.net/model" version="3.0" gaphor-version="2.12.1">
<StyleSheet id="58d6989a-66f8-11ec-b4c8-0456e5e540ed"/>
<Package id="58d6c2e8-66f8-11ec-b4c8-0456e5e540ed">
<name>
<val>current</val>
</name>
</Package>
</gaphor>
"""
)
current_model = textwrap.dedent(
"""\
<?xml version="1.0" encoding="utf-8"?>
<gaphor xmlns="http://gaphor.sourceforge.net/model" version="3.0" gaphor-version="2.12.1">
<StyleSheet id="58d6989a-66f8-11ec-b4c8-0456e5e540ed"/>
<Package id="58d6c2e8-66f8-11ec-b4c8-0456e5e540ed">
<name>
<val>current</val>
</name>
<ownedDiagram>
<reflist>
<ref refid="58d6c536-66f8-11ec-b4c8-0456e5e540ed"/>
</reflist>
</ownedDiagram>
</Package>
<Diagram id="58d6c536-66f8-11ec-b4c8-0456e5e540ed">
<element>
<ref refid="58d6c2e8-66f8-11ec-b4c8-0456e5e540ed"/>
</element>
<name>
<val>diagram</val>
</name>
</Diagram>
</gaphor>"""
)
incoming_model = textwrap.dedent(
"""\
<?xml version="1.0" encoding="utf-8"?>
<gaphor xmlns="http://gaphor.sourceforge.net/model" version="3.0" gaphor-version="2.12.1">
<StyleSheet id="58d6989a-66f8-11ec-b4c8-0456e5e540ed"/>
<Package id="58d6c2e8-66f8-11ec-b4c8-0456e5e540ed">
<name>
<val>incoming</val>
</name>
</Package>
</gaphor>"""
)
model = tmp_path / "model.gaphor"
repo = pygit2.init_repository(tmp_path)
create_merge_conflict(repo, model, initial_model, current_model, incoming_model)
return model |
7,120 | parse version string | from collections import namedtuple
import re
import sys
from ast import literal_eval
from functools import total_ordering
from parso._compatibility import unicode
# The following is a list in Python that are line breaks in str.splitlines, but
# not in Python. In Python only \r (Carriage Return, 0xD) and \n (Line Feed,
# 0xA) are allowed to split lines.
_NON_LINE_BREAKS = (
u'\v', # Vertical Tabulation 0xB
u'\f', # Form Feed 0xC
u'\x1C', # File Separator
u'\x1D', # Group Separator
u'\x1E', # Record Separator
u'\x85', # Next Line (NEL - Equivalent to CR+LF.
# Used to mark end-of-line on some IBM mainframes.)
u'\u2028', # Line Separator
u'\u2029', # Paragraph Separator
)
Version = namedtuple('Version', 'major, minor, micro')
def split_lines(string, keepends=False):
r"""
Intended for Python code. In contrast to Python's :py:meth:`str.splitlines`,
looks at form feeds and other special characters as normal text. Just
splits ``\n`` and ``\r\n``.
Also different: Returns ``[""]`` for an empty string input.
In Python 2.7 form feeds are used as normal characters when using
str.splitlines. However in Python 3 somewhere there was a decision to split
also on form feeds.
"""
if keepends:
lst = string.splitlines(True)
# We have to merge lines that were broken by form feed characters.
merge = []
for i, line in enumerate(lst):
try:
last_chr = line[-1]
except IndexError:
pass
else:
if last_chr in _NON_LINE_BREAKS:
merge.append(i)
for index in reversed(merge):
try:
lst[index] = lst[index] + lst[index + 1]
del lst[index + 1]
except IndexError:
# index + 1 can be empty and therefore there's no need to
# merge.
pass
# The stdlib's implementation of the end is inconsistent when calling
# it with/without keepends. One time there's an empty string in the
# end, one time there's none.
if string.endswith('\n') or string.endswith('\r') or string == '':
lst.append('')
return lst
else:
return re.split(r'\n|\r\n|\r', string)
def python_bytes_to_unicode(source, encoding='utf-8', errors='strict'):
"""
Checks for unicode BOMs and PEP 263 encoding declarations. Then returns a
unicode object like in :py:meth:`bytes.decode`.
:param encoding: See :py:meth:`bytes.decode` documentation.
:param errors: See :py:meth:`bytes.decode` documentation. ``errors`` can be
``'strict'``, ``'replace'`` or ``'ignore'``.
"""
def detect_encoding():
"""
For the implementation of encoding definitions in Python, look at:
- http://www.python.org/dev/peps/pep-0263/
- http://docs.python.org/2/reference/lexical_analysis.html#encoding-declarations
"""
byte_mark = literal_eval(r"b'\xef\xbb\xbf'")
if source.startswith(byte_mark):
# UTF-8 byte-order mark
return 'utf-8'
first_two_lines = re.match(br'(?:[^\n]*\n){0,2}', source).group(0)
possible_encoding = re.search(br"coding[=:]\s*([-\w.]+)",
first_two_lines)
if possible_encoding:
return possible_encoding.group(1)
else:
# the default if nothing else has been set -> PEP 263
return encoding
if isinstance(source, unicode):
# only cast str/bytes
return source
encoding = detect_encoding()
if not isinstance(encoding, unicode):
encoding = unicode(encoding, 'utf-8', 'replace')
try:
# Cast to unicode
return unicode(source, encoding, errors)
except LookupError:
if errors == 'replace':
# This is a weird case that can happen if the given encoding is not
# a valid encoding. This usually shouldn't happen with provided
# encodings, but can happen if somebody uses encoding declarations
# like `# coding: foo-8`.
return unicode(source, 'utf-8', errors)
raise
def version_info():
"""
Returns a namedtuple of parso's version, similar to Python's
``sys.version_info``.
"""
from parso import __version__
tupl = re.findall(r'[a-z]+|\d+', __version__)
return Version(*[x if i == 3 else int(x) for i, x in enumerate(tupl)])
def _parse_version(version):
match = re.match(r'(\d+)(?:\.(\d{1,2})(?:\.\d+)?)?((a|b|rc)\d)?$', version)
if match is None:
raise ValueError('The given version is not in the right format. '
'Use something like "3.8" or "3".')
major = int(match.group(1))
minor = match.group(2)
if minor is None:
# Use the latest Python in case it's not exactly defined, because the
# grammars are typically backwards compatible?
if major == 2:
minor = "7"
elif major == 3:
minor = "6"
else:
raise NotImplementedError("Sorry, no support yet for those fancy new/old versions.")
minor = int(minor)
return PythonVersionInfo(major, minor)
@total_ordering
class PythonVersionInfo(namedtuple('Version', 'major, minor')):
def __gt__(self, other):
if isinstance(other, tuple):
if len(other) != 2:
raise ValueError("Can only compare to tuples of length 2.")
return (self.major, self.minor) > other
super(PythonVersionInfo, self).__gt__(other)
return (self.major, self.minor)
def __eq__(self, other):
if isinstance(other, tuple):
if len(other) != 2:
raise ValueError("Can only compare to tuples of length 2.")
return (self.major, self.minor) == other
super(PythonVersionInfo, self).__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
def METHOD_NAME(version=None):
"""
Checks for a valid version number (e.g. `3.8` or `2.7.1` or `3`) and
returns a corresponding version info that is always two characters long in
decimal.
"""
if version is None:
version = '%s.%s' % sys.version_info[:2]
if not isinstance(version, (unicode, str)):
raise TypeError('version must be a string like "3.8"')
return _parse_version(version) |
7,121 | create display widget | # -*- coding: utf-8 -*-
# Dioptas - GUI program for fast processing of 2D X-ray diffraction data
# Principal author: Clemens Prescher (clemens.prescher@gmail.com)
# Copyright (C) 2014-2019 GSECARS, University of Chicago, USA
# Copyright (C) 2015-2018 Institute for Geology and Mineralogy, University of Cologne, Germany
# Copyright (C) 2019-2020 DESY, Hamburg, Germany
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from qtpy import QtWidgets
from pyqtgraph import GraphicsLayoutWidget
from .plot_widgets import MaskImgWidget
from .CustomWidgets import NumberTextField, LabelAlignRight, SpinBoxAlignRight, HorizontalSpacerItem, \
CheckableFlatButton, FlatButton, VerticalSpacerItem, HorizontalLine
class MaskWidget(QtWidgets.QWidget):
"""
Defines the main structure of the mask widget, which is separated into two parts.
Mask Display Widget - shows the image and pattern
Mask Control Widget - shows all the controls on the right side of the widget
"""
def __init__(self, *args, **kwargs):
super(MaskWidget, self).__init__(*args, **kwargs)
self._layout = QtWidgets.QHBoxLayout()
self.METHOD_NAME()
self.create_control_widget()
self._layout.addWidget(self._display_widget)
self._layout.addWidget(self._control_widget)
self._layout.setContentsMargins(0, 0, 0, 0)
self._layout.setSpacing(6)
self.style_widgets()
self.setLayout(self._layout)
def METHOD_NAME(self):
self._display_widget = QtWidgets.QFrame(self)
self._display_layout = QtWidgets.QVBoxLayout()
self._display_layout.setContentsMargins(0, 0, 0, 0)
self.img_layout_widget = GraphicsLayoutWidget()
self.img_widget = MaskImgWidget(self.img_layout_widget)
self._display_layout.addWidget(self.img_layout_widget)
self._status_layout = QtWidgets.QHBoxLayout()
self._status_layout.addSpacerItem(HorizontalSpacerItem())
self.pos_lbl = LabelAlignRight('')
self._status_layout.addWidget(self.pos_lbl)
self._display_layout.addLayout(self._status_layout)
self._display_widget.setLayout(self._display_layout)
def create_control_widget(self):
self._control_widget = QtWidgets.QWidget(self)
self._control_layout = QtWidgets.QVBoxLayout(self._control_widget)
self._control_layout.setSpacing(6)
self._rb_layout = QtWidgets.QHBoxLayout()
self.mask_rb = QtWidgets.QRadioButton('mask')
self.unmask_rb = QtWidgets.QRadioButton('unmask')
self._rb_layout.addWidget(self.mask_rb)
self._rb_layout.addWidget(self.unmask_rb)
self._control_layout.addLayout(self._rb_layout)
self._control_layout.addWidget(HorizontalLine())
self._geometry_layout = QtWidgets.QGridLayout()
self.circle_btn = CheckableFlatButton('Circle')
self.rectangle_btn = CheckableFlatButton('Rectangle')
self.point_btn = CheckableFlatButton('Point')
self.point_size_sb = SpinBoxAlignRight()
self.polygon_btn = CheckableFlatButton('Polygon')
self.arc_btn = CheckableFlatButton('Arc')
self._geometry_layout.addWidget(self.circle_btn, 0, 0)
self._geometry_layout.addWidget(self.rectangle_btn, 0, 1)
self._geometry_layout.addWidget(self.point_btn, 1, 0)
self._geometry_layout.addWidget(self.point_size_sb, 1, 1)
self._geometry_layout.addWidget(self.polygon_btn, 2, 0)
self._geometry_layout.addWidget(self.arc_btn, 2, 1)
self._control_layout.addLayout(self._geometry_layout)
self._control_layout.addWidget(HorizontalLine())
self._threshold_layout = QtWidgets.QGridLayout()
self.above_thresh_btn = FlatButton('Above Thresh')
self.below_thresh_btn = FlatButton('Below Thresh')
self.above_thresh_txt = NumberTextField('')
self.below_thresh_txt = NumberTextField('')
self._threshold_layout.addWidget(self.above_thresh_btn, 0, 0)
self._threshold_layout.addWidget(self.above_thresh_txt, 0, 1)
self._threshold_layout.addWidget(self.below_thresh_btn, 1, 0)
self._threshold_layout.addWidget(self.below_thresh_txt, 1, 1)
self._control_layout.addLayout(self._threshold_layout)
self._control_layout.addWidget(HorizontalLine())
self._action_layout = QtWidgets.QGridLayout()
self.grow_btn = FlatButton('Grow')
self.shrink_btn = FlatButton('Shrink')
self.invert_mask_btn = FlatButton('Invert')
self.clear_mask_btn = FlatButton('Clear')
self.undo_btn = FlatButton('Undo')
self.redo_btn = FlatButton('Redo')
self._action_layout.addWidget(self.grow_btn, 0, 0)
self._action_layout.addWidget(self.shrink_btn, 0, 1)
self._action_layout.addWidget(self.invert_mask_btn, 1, 0)
self._action_layout.addWidget(self.clear_mask_btn, 1, 1)
self._action_layout.addWidget(self.undo_btn, 2, 0)
self._action_layout.addWidget(self.redo_btn, 2, 1)
self._control_layout.addLayout(self._action_layout)
self._control_layout.addWidget(HorizontalLine())
self.cosmic_btn = FlatButton('Cosmic Removal')
self._control_layout.addWidget(self.cosmic_btn)
self._control_layout.addWidget(HorizontalLine())
self._visibility_widget = QtWidgets.QWidget()
self._visibility_layout = QtWidgets.QHBoxLayout()
self.fill_rb = QtWidgets.QRadioButton('Fill')
self.transparent_rb = QtWidgets.QRadioButton('Transparent')
self._visibility_layout.addWidget(self.fill_rb)
self._visibility_layout.addWidget(self.transparent_rb)
self._visibility_widget.setLayout(self._visibility_layout)
self._control_layout.addWidget(self._visibility_widget)
self._control_layout.addSpacerItem(VerticalSpacerItem())
self._file_layout = QtWidgets.QGridLayout()
self.save_mask_btn = FlatButton('Save Mask')
self.load_mask_btn = FlatButton('Load Mask')
self.add_mask_btn = FlatButton('Add Mask')
self._file_layout.addWidget(self.save_mask_btn, 0, 0, 1, 2)
self._file_layout.addWidget(self.load_mask_btn, 1, 0)
self._file_layout.addWidget(self.add_mask_btn, 1, 1)
self._control_layout.addLayout(self._file_layout)
self._control_widget.setLayout(self._control_layout)
def style_widgets(self):
self.mask_rb.setChecked(True)
self.fill_rb.setChecked(True)
self.point_size_sb.setValue(20)
self._control_widget.setMinimumWidth(200)
self._control_widget.setMaximumWidth(200) |
7,122 | enable mod | """ Distribution specific override class for Debian family (Ubuntu/Debian) """
import logging
from certbot import errors
from certbot import util
from certbot.compat import filesystem
from certbot.compat import os
from certbot_apache._internal import apache_util
from certbot_apache._internal import configurator
from certbot_apache._internal.configurator import OsOptions
from certbot_apache._internal.obj import VirtualHost
logger = logging.getLogger(__name__)
class DebianConfigurator(configurator.ApacheConfigurator):
"""Debian specific ApacheConfigurator override class"""
OS_DEFAULTS = OsOptions(
enmod="a2enmod",
dismod="a2dismod",
handle_modules=True,
handle_sites=True,
)
def enable_site(self, vhost: VirtualHost) -> None:
"""Enables an available site, Apache reload required.
.. note:: Does not make sure that the site correctly works or that all
modules are enabled appropriately.
:param vhost: vhost to enable
:type vhost: :class:`~certbot_apache._internal.obj.VirtualHost`
:raises .errors.NotSupportedError: If filesystem layout is not
supported.
"""
if vhost.enabled:
return None
enabled_path = ("%s/sites-enabled/%s" %
(self.parser.root,
os.path.basename(vhost.filep)))
if not os.path.isdir(os.path.dirname(enabled_path)):
# For some reason, sites-enabled / sites-available do not exist
# Call the parent method
return super().enable_site(vhost)
self.reverter.register_file_creation(False, enabled_path)
try:
os.symlink(vhost.filep, enabled_path)
except OSError as err:
if os.path.islink(enabled_path) and filesystem.realpath(
enabled_path) == vhost.filep:
# Already in shape
vhost.enabled = True
return None
logger.error(
"Could not symlink %s to %s, got error: %s", enabled_path,
vhost.filep, err.strerror)
errstring = ("Encountered error while trying to enable a " +
"newly created VirtualHost located at {0} by " +
"linking to it from {1}")
raise errors.NotSupportedError(errstring.format(vhost.filep,
enabled_path))
vhost.enabled = True
logger.info("Enabling available site: %s", vhost.filep)
self.save_notes += "Enabled site %s\n" % vhost.filep
return None
def METHOD_NAME(self, mod_name: str, temp: bool = False) -> None:
"""Enables module in Apache.
Both enables and reloads Apache so module is active.
:param str mod_name: Name of the module to enable. (e.g. 'ssl')
:param bool temp: Whether or not this is a temporary action.
:raises .errors.NotSupportedError: If the filesystem layout is not
supported.
:raises .errors.MisconfigurationError: If a2enmod or a2dismod cannot be
run.
"""
avail_path = os.path.join(self.parser.root, "mods-available")
enabled_path = os.path.join(self.parser.root, "mods-enabled")
if not os.path.isdir(avail_path) or not os.path.isdir(enabled_path):
raise errors.NotSupportedError(
"Unsupported directory layout. You may try to enable mod %s "
"and try again." % mod_name)
deps = apache_util.get_mod_deps(mod_name)
# Enable all dependencies
for dep in deps:
if (dep + "_module") not in self.parser.modules:
self._enable_mod_debian(dep, temp)
self.parser.add_mod(dep)
note = "Enabled dependency of %s module - %s" % (mod_name, dep)
if not temp:
self.save_notes += note + os.linesep
logger.debug(note)
# Enable actual module
self._enable_mod_debian(mod_name, temp)
self.parser.add_mod(mod_name)
if not temp:
self.save_notes += "Enabled %s module in Apache\n" % mod_name
logger.info("Enabled Apache %s module", mod_name)
# Modules can enable additional config files. Variables may be defined
# within these new configuration sections.
# Reload is not necessary as DUMP_RUN_CFG uses latest config.
self.parser.update_runtime_variables()
def _enable_mod_debian(self, mod_name: str, temp: bool) -> None:
"""Assumes mods-available, mods-enabled layout."""
# Generate reversal command.
# Try to be safe here... check that we can probably reverse before
# applying enmod command
if (self.options.dismod is None or self.options.enmod is None
or not util.exe_exists(self.options.dismod)):
raise errors.MisconfigurationError(
"Unable to find a2dismod, please make sure a2enmod and "
"a2dismod are configured correctly for certbot.")
self.reverter.register_undo_command(temp, [self.options.dismod, "-f", mod_name])
util.run_script([self.options.enmod, mod_name]) |
7,123 | is report generated | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Purpose
Shows how to use the AWS SDK for Python (Boto3) with AWS Audit Manager to create an
assessment report that consists of only one day of evidence.
"""
# snippet-start:[python.example_code.auditmanager.Scenario_CreateAssessmentReport]
import dateutil.parser
import logging
import time
import urllib.request
import uuid
import boto3
from botocore.exceptions import ClientError
logger = logging.getLogger(__name__)
class AuditReport:
def __init__(self, auditmanager_client):
self.auditmanager_client = auditmanager_client
def get_input(self):
print('-' * 40)
try:
assessment_id = input('Provide assessment id [uuid]: ').lower()
try:
assessment_uuid = uuid.UUID(assessment_id)
except ValueError:
logger.error("Assessment Id is not a valid UUID: %s", assessment_id)
raise
evidence_folder = input('Provide evidence date [yyyy-mm-dd]: ')
try:
evidence_date = dateutil.parser.parse(evidence_folder).date()
except ValueError:
logger.error("Invalid date : %s", evidence_folder)
raise
try:
self.auditmanager_client.get_assessment(assessmentId=str(assessment_uuid))
except ClientError:
logger.exception("Couldn't get assessment %s.", assessment_uuid)
raise
except (ValueError, ClientError):
return None, None
else:
return assessment_uuid, evidence_date
def clear_staging(self, assessment_uuid, evidence_date):
"""
Find all the evidence in the report and clear it.
"""
next_token = None
page = 1
interested_folder_id_list = []
while True:
print(f"Page [{page}]")
if next_token is None:
folder_list = self.auditmanager_client.get_evidence_folders_by_assessment(
assessmentId=str(assessment_uuid),
maxResults=1000)
else:
folder_list = self.auditmanager_client.get_evidence_folders_by_assessment(
assessmentId=str(assessment_uuid),
nextToken=next_token,
maxResults=1000)
folders = folder_list.get('evidenceFolders')
print(f"Got {len(folders)} folders.")
for folder in folders:
folder_id = folder.get('id')
if folder.get('name') == str(evidence_date):
interested_folder_id_list.append(folder_id)
if folder.get('assessmentReportSelectionCount') == folder.get('totalEvidence'):
print(
f"Removing folder from report selection : {folder.get('name')} "
f"{folder_id} {folder.get('controlId')}")
self.auditmanager_client.disassociate_assessment_report_evidence_folder(
assessmentId=str(assessment_uuid),
evidenceFolderId=folder_id)
elif folder.get('assessmentReportSelectionCount') > 0:
# Get all evidence in the folder and
# add selected evidence in the selected_evidence_list.
evidence_list = self.auditmanager_client.get_evidence_by_evidence_folder(
assessmentId=str(assessment_uuid),
controlSetId=folder_id,
evidenceFolderId=folder_id,
maxResults=1000)
selected_evidence_list = []
for evidence in evidence_list.get('evidence'):
if evidence.get('assessmentReportSelection') == 'Yes':
selected_evidence_list.append(evidence.get('id'))
print(f"Removing evidence report selection : {folder.get('name')} "
f"{len(selected_evidence_list)}")
self.auditmanager_client.batch_disassociate_assessment_report_evidence(
assessmentId=str(assessment_uuid),
evidenceFolderId=folder_id,
evidenceIds=selected_evidence_list)
next_token = folder_list.get('nextToken')
if not next_token:
break
page += 1
return interested_folder_id_list
def add_folder_to_staging(self, assessment_uuid, folder_id_list):
print(f"Adding folders to report : {folder_id_list}")
for folder in folder_id_list:
self.auditmanager_client.associate_assessment_report_evidence_folder(
assessmentId=str(assessment_uuid),
evidenceFolderId=folder)
def get_report(self, assessment_uuid):
report = self.auditmanager_client.create_assessment_report(
name='ReportViaScript',
description='testing',
assessmentId=str(assessment_uuid))
if self.METHOD_NAME(report.get('assessmentReport').get('id')):
report_url = self.auditmanager_client.get_assessment_report_url(
assessmentReportId=report.get('assessmentReport').get('id'),
assessmentId=str(assessment_uuid))
print(report_url.get('preSignedUrl'))
urllib.request.urlretrieve(
report_url.get('preSignedUrl').get('link'),
report_url.get('preSignedUrl').get('hyperlinkName'))
print(f"Report saved as {report_url.get('preSignedUrl').get('hyperlinkName')}.")
else:
print("Report generation did not finish in 15 minutes.")
print("Failed to download report. Go to the console and manually download "
"the report.")
def METHOD_NAME(self, assessment_report_id):
max_wait_time = 0
while max_wait_time < 900:
print(f"Checking status of the report {assessment_report_id}")
report_list = self.auditmanager_client.list_assessment_reports(maxResults=1)
if (report_list.get('assessmentReports')[0].get('id') == assessment_report_id
and report_list.get('assessmentReports')[0].get('status') == 'COMPLETE'):
return True
print('Sleeping for 5 seconds...')
time.sleep(5)
max_wait_time += 5
def run_demo():
print('-' * 88)
print("Welcome to the AWS Audit Manager samples demo!")
print('-' * 88)
print("This script creates an assessment report for an assessment with all the "
"evidence collected on the provided date.")
print('-' * 88)
report = AuditReport(boto3.client('auditmanager'))
assessment_uuid, evidence_date = report.get_input()
if assessment_uuid is not None and evidence_date is not None:
folder_id_list = report.clear_staging(assessment_uuid, evidence_date)
report.add_folder_to_staging(assessment_uuid, folder_id_list)
report.get_report(assessment_uuid)
if __name__ == '__main__':
run_demo()
# snippet-end:[python.example_code.auditmanager.Scenario_CreateAssessmentReport] |
7,124 | get relay mode | #
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2022 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
from pymeasure.instruments import Instrument
from pymeasure.instruments.validators import strict_discrete_set
from pyvisa.constants import Parity
from enum import IntEnum
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
class LakeShore211(Instrument):
""" Represents the Lake Shore 211 Temperature Monitor and provides
a high-level interface for interacting with the instrument.
Untested properties and methods will be noted in their docstrings.
.. code-block:: python
controller = LakeShore211("GPIB::1")
print(controller.temperature_celsius) # Print the sensor temperature in celsius
"""
class AnalogMode(IntEnum):
VOLTAGE = 0
CURRENT = 1
class AnalogRange(IntEnum):
RANGE_20K = 0
RANGE_100K = 1
RANGE_200K = 2
RANGE_325K = 3
RANGE_475K = 4
RANGE_1000K = 5
class RelayNumber(IntEnum):
RELAY_ONE = 1
RELAY_TWO = 2
class RelayMode(IntEnum):
OFF = 0
ON = 1
ALARMS = 2
alarm_keys = ['on', 'high_value', 'low_value', 'deadband', 'latch']
def __init__(self, adapter, name="Lake Shore 211 Temperature Monitor", **kwargs):
super().__init__(
adapter,
name,
asrl={'data_bits': 7, 'parity': Parity.odd},
**kwargs
)
analog_configuration = Instrument.control(
"ANALOG?", "ANALOG %d,%d",
"""
Control the analog mode and analog range.
Values need to be supplied as a tuple of (analog mode, analog range)
Analog mode can be 0 or 1
+--------+--------+
| setting| mode |
+--------+--------+
| 0 | voltage|
+--------+--------+
| 1 | current|
+--------+--------+
Analog range can be 0 through 5
+--------+----------+
| setting| range |
+--------+----------+
| 0 | 0 – 20 K |
+--------+----------+
| 1 | 0 – 100 K|
+--------+----------+
| 2 | 0 – 200 K|
+--------+----------+
| 3 | 0 – 325 K|
+--------+----------+
| 4 | 0 – 475 K|
+--------+----------+
| 5 |0 – 1000 K|
+--------+----------+
""",
# Validate and return tuple v
validator=lambda v, vs: (
strict_discrete_set(v[0], vs[0]), strict_discrete_set(v[1], vs[1])),
values=[list(AnalogMode), list(AnalogRange)],
# These are the vs values in the validator lambda
get_process=lambda x: (LakeShore211.AnalogMode(x[0]), LakeShore211.AnalogRange(x[1])),
cast=int
)
analog_out = Instrument.measurement(
"AOUT?",
"""Measure the percentage of output of the analog output.
"""
)
display_units = Instrument.control(
"DISPFLD?", "DISPFLD %d",
"""
Control the input data to display. Valid entries:
+-------------+--------------+
| setting | units |
+-------------+--------------+
| 'kelvin' | Kelvin |
+-------------+--------------+
| 'celsius' | Celsius |
+-------------+--------------+
| 'sensor' | Sensor Units |
+-------------+--------------+
| 'fahrenheit'| Fahrenheit |
+-------------+--------------+
""",
values={'kelvin': 0, 'celsius': 1, 'sensor': 2, 'fahrenheit': 3},
map_values=True
)
temperature_celsius = Instrument.measurement(
"CRDG?",
"""Measure the temperature of the sensor in celsius
"""
)
temperature_fahrenheit = Instrument.measurement(
"FRDG?",
"""Measure the temperature of the sensor in fahrenheit
"""
)
temperature_sensor = Instrument.measurement(
"SRDG?",
"""Measure the temperature of the sensor in sensor units
"""
)
temperature_kelvin = Instrument.measurement(
"KRDG?",
"""Measure the temperature of the sensor in kelvin
"""
)
def METHOD_NAME(self, relay):
"""
Get the status of a relay
Property is UNTESTED
:param RelayNumber relay: Specify which relay to query
:return: Current RelayMode of queried relay
"""
relay = strict_discrete_set(relay, list(self.RelayNumber))
return int(self.ask("RELAY? %d" % relay))
def configure_relay(self, relay, mode):
"""
Configure the relay mode of a relay
Property is UNTESTED
:param RelayNumber relay: Specify which relay to configure
:param RelayMode mode: Specify which mode to assign
"""
relay = strict_discrete_set(relay, list(self.RelayNumber))
mode = strict_discrete_set(mode, list(self.RelayMode))
self.write('RELAY %d %d' % (relay, mode))
def get_alarm_status(self):
"""
Query the current alarm status
:return: Dictionary of current status [on, high_value, low_value, deadband, latch]
"""
status = self.values('ALARM?')
return dict(zip(self.alarm_keys,
[int(status[0]), float(status[1]), float(status[2]), float(status[3]),
int(status[4])]))
def configure_alarm(self, on=True, high_value=270.0, low_value=0.0, deadband=0, latch=False):
"""Configures the alarm parameters for the input.
:param on: Boolean setting of alarm, default True
:param high_value: High value the temperature is checked against to activate the alarm
:param low_value: Low value the temperature is checked against to activate the alarm
:param deadband: Value that the temperature must change outside of an alarm condition
:param latch: Specifies if the alarm should latch or not
"""
command_string = "ALARM %d,%g,%g,%g,%d" % (on, high_value, low_value, deadband, latch)
self.write(command_string)
def reset_alarm(self):
"""Resets the alarm of the Lakeshore 211
"""
self.write('ALMRST') |
7,125 | test nocolor force nounderline | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright 2006 - 2021, Tomas Babej, Paul Beckingham, Federico Hernandez.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# https://www.opensource.org/licenses/mit-license.php
#
###############################################################################
import sys
import os
import unittest
# Ensure python finds the local simpletap module
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from basetest import Task, TestCase
class TestUnderline(TestCase):
@classmethod
def setUpClass(cls):
cls.t = Task()
cls.t("add foo")
# Test the fontunderline config variable. The following truth table defines
# the different results which are to be confirmed.
#
# color _forcecolor fontunderline result
# ----- ----------- ------------- ---------
# 0 0 0 dashes
# 0 0 1 dashes
# 0 1 0 dashes
# 0 1 1 underline
# 1* 0 0 dashes
# 1* 0 1 dashes
# 1* 1 0 dashes
# 1* 1 1 underline
#
# * When isatty (fileno (stdout)) is false, color is automatically disabled.
def test_nocolor_noforce_nounderline(self):
code, out, err = self.t("1 info rc.color:off rc._forcecolor:off rc.fontunderline:off")
self.assertIn("--------", out)
def test_nocolor_noforce_underline(self):
code, out, err = self.t("1 info rc.color:off rc._forcecolor:off rc.fontunderline:on")
self.assertIn("--------", out)
def METHOD_NAME(self):
code, out, err = self.t("1 info rc.color:off rc._forcecolor:on rc.fontunderline:off")
self.assertIn("--------", out)
def test_nocolor_force_underline(self):
code, out, err = self.t("1 info rc.color:off rc._forcecolor:on rc.fontunderline:on")
self.assertNotIn("--------", out)
def test_color_noforce_nounderline(self):
code, out, err = self.t("1 info rc.color:on rc._forcecolor:off rc.fontunderline:off")
self.assertIn("--------", out)
def test_color_noforce_underline(self):
code, out, err = self.t("1 info rc.color:on rc._forcecolor:off rc.fontunderline:on")
self.assertIn("--------", out)
def test_color_force_nounderline(self):
code, out, err = self.t("1 info rc.color:on rc._forcecolor:on rc.fontunderline:off")
self.assertIn("--------", out)
def test_color_force_underline(self):
code, out, err = self.t("1 info rc.color:on rc._forcecolor:on rc.fontunderline:on")
self.assertNotIn("--------", out)
if __name__ == "__main__":
from simpletap import TAPTestRunner
unittest.main(testRunner=TAPTestRunner())
# vim: ai sts=4 et sw=4 ft=python |
7,126 | test logbook logger error level | # BSD 3-Clause License
#
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logbook
import pytest
from logbook import LogRecord
from elasticapm.conf.constants import ERROR
from elasticapm.handlers.logbook import LogbookHandler
@pytest.fixture()
def logbook_logger():
return logbook.Logger(__name__)
@pytest.fixture()
def logbook_handler(elasticapm_client):
elasticapm_client.config.include_paths = ["tests", "elasticapm"]
return LogbookHandler(elasticapm_client)
def METHOD_NAME(logbook_logger, logbook_handler):
with logbook_handler.applicationbound():
logbook_logger.error("This is a test error")
assert len(logbook_handler.client.events) == 1
event = logbook_handler.client.events[ERROR][0]
assert event["log"]["logger_name"] == __name__
assert event["log"]["level"] == "error"
assert event["log"]["message"] == "This is a test error"
assert "stacktrace" in event["log"]
assert "exception" not in event
assert "param_message" in event["log"]
assert event["log"]["param_message"] == "This is a test error"
def test_logger_warning_level(logbook_logger, logbook_handler):
with logbook_handler.applicationbound():
logbook_logger.warning("This is a test warning")
assert len(logbook_handler.client.events) == 1
event = logbook_handler.client.events[ERROR][0]
assert event["log"]["logger_name"] == __name__
assert event["log"]["level"] == "warning"
assert event["log"]["message"] == "This is a test warning"
assert "stacktrace" in event["log"]
assert "exception" not in event
assert "param_message" in event["log"]
assert event["log"]["param_message"] == "This is a test warning"
def test_logger_without_stacktrace_config(logbook_logger, logbook_handler):
logbook_handler.client.config.auto_log_stacks = False
with logbook_handler.applicationbound():
logbook_logger.warning("This is a test warning")
event = logbook_handler.client.events[ERROR][0]
assert "stacktrace" not in event["log"]
def test_logger_without_stacktrace_stack_false(logbook_logger, logbook_handler):
logbook_handler.client.config.auto_log_stacks = True
with logbook_handler.applicationbound():
logbook_logger.warning("This is a test warning", stack=False)
event = logbook_handler.client.events[ERROR][0]
assert "stacktrace" not in event["log"]
def test_logger_with_extra(logbook_logger, logbook_handler):
with logbook_handler.applicationbound():
logbook_logger.info("This is a test info with a url", extra=dict(url="http://example.com"))
assert len(logbook_handler.client.events) == 1
event = logbook_handler.client.events[ERROR][0]
assert event["context"]["custom"]["url"] == "http://example.com"
assert "stacktrace" in event["log"]
assert "exception" not in event
assert "param_message" in event["log"]
assert event["log"]["param_message"] == "This is a test info with a url"
def test_logger_with_exc_info(logbook_logger, logbook_handler):
with logbook_handler.applicationbound():
try:
raise ValueError("This is a test ValueError")
except ValueError:
logbook_logger.info("This is a test info with an exception", exc_info=True)
assert len(logbook_handler.client.events) == 1
event = logbook_handler.client.events[ERROR][0]
assert event["log"]["message"] == "This is a test info with an exception"
assert "stacktrace" in event["log"]
assert "exception" in event
exc = event["exception"]
assert exc["type"] == "ValueError"
assert exc["message"] == "ValueError: This is a test ValueError"
assert "param_message" in event["log"]
assert event["log"]["param_message"] == "This is a test info with an exception"
def test_logger_param_message(logbook_logger, logbook_handler):
with logbook_handler.applicationbound():
logbook_logger.info("This is a test of %s", "args")
assert len(logbook_handler.client.events) == 1
event = logbook_handler.client.events[ERROR][0]
assert event["log"]["message"] == "This is a test of args"
assert "stacktrace" in event["log"]
assert "exception" not in event
assert "param_message" in event["log"]
assert event["log"]["param_message"] == "This is a test of %s"
def test_client_arg(elasticapm_client):
handler = LogbookHandler(elasticapm_client)
assert handler.client == elasticapm_client
def test_client_kwarg(elasticapm_client):
handler = LogbookHandler(client=elasticapm_client)
assert handler.client == elasticapm_client
def test_invalid_first_arg_type():
with pytest.raises(ValueError):
LogbookHandler(object)
def test_missing_client_arg():
with pytest.raises(TypeError):
LogbookHandler()
def test_logbook_handler_emit_error(capsys, elasticapm_client):
handler = LogbookHandler(elasticapm_client)
handler._emit = lambda: 1 / 0
handler.emit(LogRecord("x", 1, "Oops"))
out, err = capsys.readouterr()
assert "Top level ElasticAPM exception caught" in err
assert "Oops" in err
def test_logbook_handler_emit_error_non_str_message(capsys, elasticapm_client):
handler = LogbookHandler(elasticapm_client)
handler._emit = lambda record: 1 / 0
handler.emit(LogRecord("x", 1, ValueError("oh no")))
out, err = capsys.readouterr()
assert "Top level ElasticAPM exception caught" in err
assert "oh no" in err
def test_logbook_handler_dont_emit_elasticapm(capsys, elasticapm_client):
handler = LogbookHandler(elasticapm_client)
handler.emit(LogRecord("elasticapm.errors", 1, "Oops"))
out, err = capsys.readouterr()
assert "Oops" in err
def test_arbitrary_object(elasticapm_client, logbook_logger, logbook_handler):
with logbook_handler.applicationbound():
logbook_logger.info(["a", "list", "of", "strings"])
assert len(logbook_handler.client.events) == 1
event = logbook_handler.client.events[ERROR][0]
assert "param_message" in event["log"]
assert event["log"]["param_message"] == "['a', 'list', 'of', 'strings']" |
7,127 | get activated threads | # Copyright (C) 2007 Samuel Abels
#
# This file is part of SpiffWorkflow.
#
# SpiffWorkflow is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# SpiffWorkflow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
from ..task import TaskState
from .base import TaskSpec
from .ThreadStart import ThreadStart
from ..operators import valueof
class ThreadSplit(TaskSpec):
"""
When executed, this task performs a split on the current my_task.
The number of outgoing my_tasks depends on the runtime value of a
specified data field.
If more than one input is connected, the task performs an implicit
multi merge.
This task has one or more inputs and may have any number of outputs.
"""
def __init__(self,
wf_spec,
name,
times=1,
suppress_threadstart_creation=False,
**kwargs):
"""
Constructor.
:type wf_spec: WorkflowSpec`
:param wf_spec: A reference to the workflow specification.
:type name: string
:param name: A name for the task.
:type times: int or :class:`SpiffWorkflow.operators.Term`
:param times: The number of tasks to create.
:type suppress_threadstart_creation: bool
:param suppress_threadstart_creation: Don't create a ThreadStart,
because the deserializer is about to.
:type kwargs: dict
:param kwargs: See :class:`SpiffWorkflow.specs.TaskSpec`.
"""
if times is None:
raise ValueError('times argument is required')
TaskSpec.__init__(self, wf_spec, name, **kwargs)
self.times = times
if not suppress_threadstart_creation:
self.thread_starter = ThreadStart(wf_spec, **kwargs)
self._outputs.append(self.thread_starter.name)
self.thread_starter._connect_notify(self)
else:
self.thread_starter = None
def connect(self, task_spec):
"""
Connect the *following* task to this one. In other words, the
given task is added as an output task.
task -- the task to connect to.
"""
self.thread_starter._outputs.append(task_spec.name)
task_spec._connect_notify(self.thread_starter)
def _get_activated_tasks(self, my_task, destination):
"""
Returns the list of tasks that were activated in the previous
call of execute(). Only returns tasks that point towards the
destination task, i.e. those which have destination as a
descendant.
my_task -- the task of this TaskSpec
destination -- the child task
"""
task = destination._find_ancestor(self.thread_starter)
return self.thread_starter._get_activated_tasks(task, destination)
def METHOD_NAME(self, my_task):
"""
Returns the list of threads that were activated in the previous
call of execute().
my_task -- the task of this TaskSpec
"""
return my_task.children
def _on_trigger(self, my_task):
"""
May be called after execute() was already completed to create an
additional outbound task.
"""
for output in self.outputs:
new_task = my_task.add_child(output, TaskState.READY)
new_task.triggered = True
def _get_predicted_outputs(self, my_task):
split_n = int(valueof(my_task, self.times))
return [self.thread_starter] * split_n
def _predict_hook(self, my_task):
# if we were created with thread_starter suppressed, connect it now.
if self.thread_starter is None:
self.thread_starter = self.outputs[0]
outputs = self._get_predicted_outputs(my_task)
if my_task._is_definite():
my_task._sync_children(outputs, TaskState.FUTURE)
else:
my_task._sync_children(outputs, TaskState.LIKELY)
def _run_hook(self, my_task):
outputs = self._get_predicted_outputs(my_task)
my_task._sync_children(outputs, TaskState.FUTURE)
return True
def serialize(self, serializer):
return serializer.serialize_thread_split(self)
@classmethod
def deserialize(self, serializer, wf_spec, s_state):
return serializer.deserialize_thread_split(wf_spec, s_state) |
7,128 | get event name | from typing import Callable, Dict, Optional
from django.http import HttpRequest, HttpResponse
from zerver.decorator import webhook_view
from zerver.lib.exceptions import UnsupportedWebhookEventTypeError
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.validator import WildValue, check_int, check_string, to_wild_value
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.lib.webhooks.git import (
TOPIC_WITH_BRANCH_TEMPLATE,
TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE,
get_pull_request_event_message,
get_push_commits_event_message,
)
from zerver.models import UserProfile
def get_code_pull_request_updated_body(payload: WildValue) -> str:
return get_pull_request_event_message(
user_name=get_code_pull_request_user_name(payload),
action="updated",
url=get_code_pull_request_url(payload),
number=get_code_pull_request_id(payload),
message=payload["detailedMessage"]["markdown"].tame(check_string),
title=get_code_pull_request_title(payload),
)
def get_code_pull_request_merged_body(payload: WildValue) -> str:
return get_pull_request_event_message(
user_name=get_code_pull_request_user_name(payload),
action="merged",
url=get_code_pull_request_url(payload),
number=get_code_pull_request_id(payload),
target_branch=payload["resource"]["sourceRefName"]
.tame(check_string)
.replace("refs/heads/", ""),
base_branch=payload["resource"]["targetRefName"]
.tame(check_string)
.replace("refs/heads/", ""),
title=get_code_pull_request_title(payload),
)
def get_code_pull_request_opened_body(payload: WildValue) -> str:
if payload["resource"].get("description"):
description = payload["resource"]["description"].tame(check_string)
else:
description = None
return get_pull_request_event_message(
user_name=get_code_pull_request_user_name(payload),
action="created",
url=get_code_pull_request_url(payload),
number=get_code_pull_request_id(payload),
target_branch=payload["resource"]["sourceRefName"]
.tame(check_string)
.replace("refs/heads/", ""),
base_branch=payload["resource"]["targetRefName"]
.tame(check_string)
.replace("refs/heads/", ""),
message=description,
title=get_code_pull_request_title(payload),
)
def get_code_push_commits_body(payload: WildValue) -> str:
compare_url = "{}/branchCompare?baseVersion=GC{}&targetVersion=GC{}&_a=files".format(
get_code_repository_url(payload),
payload["resource"]["refUpdates"][0]["oldObjectId"].tame(check_string),
payload["resource"]["refUpdates"][0]["newObjectId"].tame(check_string),
)
commits_data = [
{
"name": commit["author"]["name"].tame(check_string),
"sha": commit["commitId"].tame(check_string),
"url": "{}/commit/{}".format(
get_code_repository_url(payload), commit["commitId"].tame(check_string)
),
"message": commit["comment"].tame(check_string),
}
for commit in payload["resource"].get("commits", [])
]
return get_push_commits_event_message(
get_code_push_user_name(payload),
compare_url,
get_code_push_branch_name(payload),
commits_data,
)
def get_code_push_user_name(payload: WildValue) -> str:
return payload["resource"]["pushedBy"]["displayName"].tame(check_string)
def get_code_push_branch_name(payload: WildValue) -> str:
return (
payload["resource"]["refUpdates"][0]["name"].tame(check_string).replace("refs/heads/", "")
)
def get_code_repository_name(payload: WildValue) -> str:
return payload["resource"]["repository"]["name"].tame(check_string)
def get_code_repository_url(payload: WildValue) -> str:
return payload["resource"]["repository"]["remoteUrl"].tame(check_string)
def get_code_pull_request_id(payload: WildValue) -> int:
return payload["resource"]["pullRequestId"].tame(check_int)
def get_code_pull_request_title(payload: WildValue) -> str:
return payload["resource"]["title"].tame(check_string)
def get_code_pull_request_url(payload: WildValue) -> str:
return payload["resource"]["_links"]["web"]["href"].tame(check_string)
def get_code_pull_request_user_name(payload: WildValue) -> str:
return payload["resource"]["createdBy"]["displayName"].tame(check_string)
def get_topic_based_on_event(payload: WildValue, event: str) -> str:
if event == "git.push":
return TOPIC_WITH_BRANCH_TEMPLATE.format(
repo=get_code_repository_name(payload), branch=get_code_push_branch_name(payload)
)
elif "pullrequest" in event:
return TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_code_repository_name(payload),
type="PR",
id=get_code_pull_request_id(payload),
title=get_code_pull_request_title(payload),
)
return get_code_repository_name(payload) # nocoverage
def METHOD_NAME(payload: WildValue, branches: Optional[str]) -> Optional[str]:
event_name = payload["eventType"].tame(check_string)
if event_name == "git.push" and branches is not None:
branch = get_code_push_branch_name(payload)
if branches.find(branch) == -1:
return None
if event_name == "git.pullrequest.merged":
status = payload["resource"]["status"].tame(check_string)
merge_status = payload["resource"]["mergeStatus"].tame(check_string)
# azure devops sends webhook messages when a merge is attempted, i.e. there is a merge conflict
# after a PR is created, or when there is no conflict when PR is updated
# we're only interested in the case when the PR is merged successfully
if status != "completed" or merge_status != "succeeded":
return None
if event_name in EVENT_FUNCTION_MAPPER:
return event_name
raise UnsupportedWebhookEventTypeError(event_name)
EVENT_FUNCTION_MAPPER: Dict[str, Callable[[WildValue], str]] = {
"git.push": get_code_push_commits_body,
"git.pullrequest.created": get_code_pull_request_opened_body,
"git.pullrequest.merged": get_code_pull_request_merged_body,
"git.pullrequest.updated": get_code_pull_request_updated_body,
}
ALL_EVENT_TYPES = list(EVENT_FUNCTION_MAPPER.keys())
@webhook_view("AzureDevOps", all_event_types=ALL_EVENT_TYPES)
@has_request_variables
def api_azuredevops_webhook(
request: HttpRequest,
user_profile: UserProfile,
payload: WildValue = REQ(argument_type="body", converter=to_wild_value),
branches: Optional[str] = REQ(default=None),
) -> HttpResponse:
event = METHOD_NAME(payload, branches)
if event is None:
return json_success(request)
topic = get_topic_based_on_event(payload, event)
body_function = EVENT_FUNCTION_MAPPER[event]
body = body_function(payload)
check_send_webhook_message(request, user_profile, topic, body)
return json_success(request) |
7,129 | has syswow64 dir | # -*- coding: UTF-8 -*-
# A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2020-2023 NV Access Limited, Łukasz Golonka, Luke Davis
# This file may be used under the terms of the GNU General Public License, version 2 or later.
# For more details see: https://www.gnu.org/licenses/gpl-2.0.html
""" System related functions."""
import ctypes
from ctypes import (
byref,
create_unicode_buffer,
sizeof,
windll,
)
import winKernel
import winreg
import shellapi
import winUser
import functools
import shlobj
from os import startfile
from NVDAState import WritePaths
@functools.lru_cache(maxsize=1)
def METHOD_NAME() -> bool:
"""Returns `True` if the current system has separate system32 directories for 32-bit processes."""
nativeSys32 = shlobj.SHGetKnownFolderPath(shlobj.FolderId.SYSTEM)
Syswow64Sys32 = shlobj.SHGetKnownFolderPath(shlobj.FolderId.SYSTEM_X86)
return nativeSys32 != Syswow64Sys32
def openUserConfigurationDirectory():
"""Opens directory containing config files for the current user"""
shellapi.ShellExecute(0, None, WritePaths.configDir, None, None, winUser.SW_SHOWNORMAL)
def openDefaultConfigurationDirectory():
"""Opens the directory which would be used to store configuration by default.
Used as a fallback when trying to explore user config from the start menu,
and NVDA is not running."""
import config
path = config.getUserDefaultConfigPath()
if not path:
raise ValueError("no user default config path")
config.initConfigPath(path)
shellapi.ShellExecute(0, None, path, None, None, winUser.SW_SHOWNORMAL)
TokenUIAccess = 26
def hasUiAccess():
token = ctypes.wintypes.HANDLE()
ctypes.windll.advapi32.OpenProcessToken(
ctypes.windll.kernel32.GetCurrentProcess(),
winKernel.MAXIMUM_ALLOWED,
ctypes.byref(token)
)
try:
val = ctypes.wintypes.DWORD()
ctypes.windll.advapi32.GetTokenInformation(
token,
TokenUIAccess,
ctypes.byref(val),
ctypes.sizeof(ctypes.wintypes.DWORD),
ctypes.byref(ctypes.wintypes.DWORD())
)
return bool(val.value)
finally:
ctypes.windll.kernel32.CloseHandle(token)
#: Value from the TOKEN_INFORMATION_CLASS enumeration:
#: https://docs.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-token_information_class
#: When calling The Win32 GetTokenInformation function, the buffer receives a TOKEN_ORIGIN value.
#: If the token resulted from a logon that used explicit credentials, such as passing a name, domain,
#: and password to the LogonUser function, then the TOKEN_ORIGIN structure will contain the ID of
#: the logon session that created it.
#: If the token resulted from network authentication, then this value will be zero.
TOKEN_ORIGIN = 17 # TokenOrigin in winnt.h
class TokenOrigin(ctypes.Structure):
"""TOKEN_ORIGIN structure: https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-token_origin
This structure is used in calls to the Win32 GetTokenInformation function.
"""
_fields_ = [
("originatingLogonSession", ctypes.c_ulonglong) # OriginatingLogonSession in C structure
]
def getProcessLogonSessionId(processHandle: int) -> int:
"""
Retrieves the ID of the logon session that created the process that the given processHandle belongs to.
The function calls several Win32 functions:
* OpenProcessToken: opens the access token associated with a process.
* GetTokenInformation: retrieves a specified type of information about an access token.
The calling process must have appropriate access rights to obtain the information.
GetTokenInformation is called with the TokenOrigin Value from the TOKEN_INFORMATION_CLASS enumeration.
The resulting structure contains the session ID of the logon session that will be returned.
* CloseHandle: To close the token handle.
"""
token = ctypes.wintypes.HANDLE()
if not ctypes.windll.advapi32.OpenProcessToken(
processHandle,
winKernel.MAXIMUM_ALLOWED,
ctypes.byref(token)
):
raise ctypes.WinError()
try:
val = TokenOrigin()
if not ctypes.windll.advapi32.GetTokenInformation(
token,
TOKEN_ORIGIN,
ctypes.byref(val),
ctypes.sizeof(val),
ctypes.byref(ctypes.wintypes.DWORD())
):
raise ctypes.WinError()
return val.originatingLogonSession
finally:
ctypes.windll.kernel32.CloseHandle(token)
@functools.lru_cache(maxsize=1)
def getCurrentProcessLogonSessionId() -> int:
return getProcessLogonSessionId(winKernel.GetCurrentProcess())
def execElevated(path, params=None, wait=False, handleAlreadyElevated=False):
import subprocess
if params is not None:
params = subprocess.list2cmdline(params)
sei = shellapi.SHELLEXECUTEINFO(lpFile=path, lpParameters=params, nShow=winUser.SW_HIDE)
# IsUserAnAdmin is apparently deprecated so may not work above Windows 8
if not handleAlreadyElevated or not ctypes.windll.shell32.IsUserAnAdmin():
sei.lpVerb = "runas"
if wait:
sei.fMask = shellapi.SEE_MASK_NOCLOSEPROCESS
shellapi.ShellExecuteEx(sei)
if wait:
try:
h = ctypes.wintypes.HANDLE(sei.hProcess)
msg = ctypes.wintypes.MSG()
while ctypes.windll.user32.MsgWaitForMultipleObjects(1, ctypes.byref(h), False, -1, 255) == 1:
while ctypes.windll.user32.PeekMessageW(ctypes.byref(msg), None, 0, 0, 1):
ctypes.windll.user32.TranslateMessage(ctypes.byref(msg))
ctypes.windll.user32.DispatchMessageW(ctypes.byref(msg))
return winKernel.GetExitCodeProcess(sei.hProcess)
finally:
winKernel.closeHandle(sei.hProcess)
@functools.lru_cache(maxsize=1)
def _getDesktopName() -> str:
UOI_NAME = 2 # The name of the object, as a string
desktop = windll.user32.GetThreadDesktop(windll.kernel32.GetCurrentThreadId())
name = create_unicode_buffer(256)
windll.user32.GetUserObjectInformationW(
desktop,
UOI_NAME,
byref(name),
sizeof(name),
None
)
return name.value
def _displayTextFileWorkaround(file: str) -> None:
# os.startfile does not currently (NVDA 2023.1, Python 3.7) work reliably to open .txt files in Notepad under
# Windows 11, if relying on the default behavior (i.e. `operation="open"`). (#14725)
# Using `operation="edit"`, however, has the desired effect--opening the text file in Notepad. (#14816)
# Since this may be a bug in Python 3.7's os.startfile, or the underlying Win32 function, it may be
# possible to deprecate this workaround after a Python upgrade.
startfile(file, operation="edit")
def _isSystemClockSecondsVisible() -> bool:
"""
Query the value of 'ShowSecondsInSystemClock' DWORD32 value in the Windows registry under
the path HKEY_CURRENT_USER\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Advanced.
If the value is 1, return True, if the value is 0 or the key does not exist, return False.
@return: True if the 'ShowSecondsInSystemClock' value is 1, False otherwise.
"""
registry_path = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\Advanced"
value_name = "ShowSecondsInSystemClock"
try:
with winreg.OpenKey(winreg.HKEY_CURRENT_USER, registry_path) as key:
value, value_type = winreg.QueryValueEx(key, value_name)
return value == 1 and value_type == winreg.REG_DWORD
except FileNotFoundError:
return False
except OSError:
return False |
7,130 | of | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AST node annotation support.
Adapted from Tangent.
"""
import enum
# pylint:disable=g-bad-import-order
import gast
# pylint:enable=g-bad-import-order
# TODO(mdan): Shorten the names.
# These names are heavily used, and anno.blaa
# TODO(mdan): Replace the attr-dict mechanism with a more typed solution.
class NoValue(enum.Enum):
"""Base class for different types of AST annotations."""
def METHOD_NAME(self, node, default=None):
return getanno(node, self, default=default)
def add_to(self, node, value):
setanno(node, self, value)
def exists(self, node):
return hasanno(node, self)
def __repr__(self):
return str(self.name)
class Basic(NoValue):
"""Container for basic annotation keys.
The enum values are used strictly for documentation purposes.
"""
QN = 'Qualified name, as it appeared in the code. See qual_names.py.'
SKIP_PROCESSING = (
'This node should be preserved as is and not processed any further.')
INDENT_BLOCK_REMAINDER = (
'When a node is annotated with this, the remainder of the block should'
' be indented below it. The annotation contains a tuple'
' (new_body, name_map), where `new_body` is the new indented block and'
' `name_map` allows renaming symbols.')
ORIGIN = ('Information about the source code that converted code originated'
' from. See origin_information.py.')
DIRECTIVES = ('User directives associated with a statement or a variable.'
' Typically, they affect the immediately-enclosing statement.')
EXTRA_LOOP_TEST = (
'A special annotation containing additional test code to be executed in'
' for loops.')
class Static(NoValue):
"""Container for static analysis annotation keys.
The enum values are used strictly for documentation purposes.
"""
# Symbols
# These flags are boolean.
IS_PARAM = 'Symbol is a parameter to the function being analyzed.'
# Scopes
# Scopes are represented by objects of type activity.Scope.
SCOPE = 'The scope for the annotated node. See activity.py.'
# TODO(mdan): Drop these in favor of accessing the child's SCOPE.
ARGS_SCOPE = 'The scope for the argument list of a function call.'
COND_SCOPE = 'The scope for the test node of a conditional statement.'
BODY_SCOPE = (
'The scope for the main body of a statement (True branch for if '
'statements, main body for loops).')
ORELSE_SCOPE = (
'The scope for the orelse body of a statement (False branch for if '
'statements, orelse body for loops).')
# Static analysis annotations.
DEFINITIONS = (
'Reaching definition information. See reaching_definitions.py.')
ORIG_DEFINITIONS = (
'The value of DEFINITIONS that applied to the original code before any'
' conversion.')
DEFINED_FNS_IN = (
'Local function definitions that may exist when exiting the node. See'
' reaching_fndefs.py')
DEFINED_VARS_IN = (
'Symbols defined when entering the node. See reaching_definitions.py.')
LIVE_VARS_OUT = ('Symbols live when exiting the node. See liveness.py.')
LIVE_VARS_IN = ('Symbols live when entering the node. See liveness.py.')
TYPES = 'Static type information. See type_inference.py.'
CLOSURE_TYPES = 'Types of closure symbols at each detected call site.'
VALUE = 'Static value information. See type_inference.py.'
FAIL = object()
def keys(node, field_name='___pyct_anno'):
if not hasattr(node, field_name):
return frozenset()
return frozenset(getattr(node, field_name).keys())
def getanno(node, key, default=FAIL, field_name='___pyct_anno'):
if (default is FAIL or (hasattr(node, field_name) and
(key in getattr(node, field_name)))):
return getattr(node, field_name)[key]
return default
def hasanno(node, key, field_name='___pyct_anno'):
return hasattr(node, field_name) and key in getattr(node, field_name)
def setanno(node, key, value, field_name='___pyct_anno'):
annotations = getattr(node, field_name, {})
setattr(node, field_name, annotations)
annotations[key] = value
# So that the annotations survive gast_to_ast() and ast_to_gast()
if field_name not in node._fields:
node._fields += (field_name,)
def delanno(node, key, field_name='___pyct_anno'):
annotations = getattr(node, field_name)
del annotations[key]
if not annotations:
delattr(node, field_name)
node._fields = tuple(f for f in node._fields if f != field_name)
def copyanno(from_node, to_node, key, field_name='___pyct_anno'):
if hasanno(from_node, key, field_name=field_name):
setanno(
to_node,
key,
getanno(from_node, key, field_name=field_name),
field_name=field_name)
def dup(node, copy_map, field_name='___pyct_anno'):
"""Recursively copies annotations in an AST tree.
Args:
node: ast.AST
copy_map: Dict[Hashable, Hashable], maps a source anno key to a destination
key. All annotations with the source key will be copied to identical
annotations with the destination key.
field_name: str
"""
for n in gast.walk(node):
for k in copy_map:
if hasanno(n, k, field_name):
setanno(n, copy_map[k], getanno(n, k, field_name), field_name) |
7,131 | load pretrained | import logging
import torch
import os
from transformers import PreTrainedTokenizer
from primeqa.util.transformers_utils.hypers_base import HypersBase
logger = logging.getLogger(__name__)
def save_transformer(hypers: HypersBase, model, tokenizer, *, save_dir=None):
if hypers.global_rank == 0:
if save_dir is None:
save_dir = hypers.output_dir
# Create output directory if needed
os.makedirs(save_dir, exist_ok=True)
logger.info("Saving model checkpoint to %s", save_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
torch.save(hypers, os.path.join(save_dir, "training_args.bin"))
model_to_save.save_pretrained(save_dir)
if tokenizer is not None:
tokenizer.save_pretrained(save_dir)
def load_tokenizer(hypers: HypersBase, tokenizer_class, additional_special_tokens=()):
if len(additional_special_tokens) == 0 or len(additional_special_tokens[0]) == 0:
additional_special_tokens = None
if additional_special_tokens is not None:
tokenizer: PreTrainedTokenizer = tokenizer_class.from_pretrained(
hypers.tokenizer_name if hypers.tokenizer_name else hypers.model_name_or_path,
do_lower_case=hypers.do_lower_case,
cache_dir=hypers.cache_dir if hypers.cache_dir else None,
additional_special_tokens=additional_special_tokens
)
else:
tokenizer: PreTrainedTokenizer = tokenizer_class.from_pretrained(
hypers.tokenizer_name if hypers.tokenizer_name else hypers.model_name_or_path,
do_lower_case=hypers.do_lower_case,
cache_dir=hypers.cache_dir if hypers.cache_dir else None
)
return tokenizer
def METHOD_NAME(hypers: HypersBase, config_class, model_class, tokenizer_class,
additional_special_tokens=(), **extra_model_args):
# Load pretrained model and tokenizer
if hypers.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
config = config_class.from_pretrained(
hypers.config_name if hypers.config_name else hypers.model_name_or_path,
cache_dir=hypers.cache_dir if hypers.cache_dir else None,
**extra_model_args
)
if len(additional_special_tokens) == 0 or len(additional_special_tokens[0]) == 0:
additional_special_tokens = None
if additional_special_tokens is not None:
tokenizer: PreTrainedTokenizer = tokenizer_class.from_pretrained(
hypers.tokenizer_name if hypers.tokenizer_name else hypers.model_name_or_path,
do_lower_case=hypers.do_lower_case,
cache_dir=hypers.cache_dir if hypers.cache_dir else None,
additional_special_tokens=additional_special_tokens
)
else:
tokenizer: PreTrainedTokenizer = tokenizer_class.from_pretrained(
hypers.tokenizer_name if hypers.tokenizer_name else hypers.model_name_or_path,
do_lower_case=hypers.do_lower_case,
cache_dir=hypers.cache_dir if hypers.cache_dir else None
)
model = model_class.from_pretrained(
hypers.model_name_or_path,
from_tf=bool(".ckpt" in hypers.model_name_or_path),
config=config,
cache_dir=hypers.cache_dir if hypers.cache_dir else None,
)
if hypers.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
if additional_special_tokens is not None:
# do it when we load and again here
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens})
model.resize_token_embeddings(len(tokenizer))
model.to(hypers.device)
return model, tokenizer
def save_extended_model(hypers: HypersBase, model, tokenizer: PreTrainedTokenizer, *, save_dir=None):
if hypers.global_rank == 0:
if save_dir is None:
save_dir = hypers.output_dir
# Create output directory if needed
os.makedirs(save_dir, exist_ok=True)
logger.info("Saving model to %s", save_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
torch.save(hypers, os.path.join(save_dir, "training_args.bin"))
torch.save(model_to_save.state_dict(), os.path.join(save_dir, "model.bin"))
if tokenizer is not None:
tokenizer.save_pretrained(save_dir)
def load_only_extended_model(args: HypersBase, extended_model_class, saved_dir: str, *, strict=True):
logger.info(f'loading model from {saved_dir}')
if strict:
hypers = torch.load(os.path.join(saved_dir, "training_args.bin"), map_location='cpu')
hypers.device = args.device
else:
hypers = args
model = extended_model_class(hypers)
model_state_dict = torch.load(os.path.join(saved_dir, "model.bin"), map_location='cpu')
model.load_state_dict(model_state_dict, strict=strict)
model.to(args.device)
return model, hypers |
7,132 | migrate vatlayer | # Generated by Django 3.2.14 on 2022-08-18 10:31
from typing import List, Optional
from django_countries import countries
from django.db import migrations
VATLAYER_ID = "mirumee.taxes.vatlayer"
# Must be the same as in 0004_migrate_tax_class.py
TAX_CLASS_ZERO_RATE = "No Taxes"
def _clear_country_code(country_code: str) -> Optional[str]:
return countries.alpha2(country_code.strip()) if country_code else None
def _clear_str_list_country_codes(country_codes: str) -> List[str]:
countries = [_clear_country_code(cc) for cc in country_codes.split(",")]
return [cc for cc in countries if cc]
def create_tax_configurations(apps, vatlayer_configs):
TaxConfigurationPerCountry = apps.get_model("tax", "TaxConfigurationPerCountry")
# Map of countries to use origin country's tax, based on the
# `countries_to_calculate_taxes_from_origin` setting. If a country code appears
# more than once in the list, we override is with the last seen origin_country.
use_origin_country_map = {}
for vatlayer_config in vatlayer_configs:
config_dict = {
item["name"]: item["value"] for item in vatlayer_config.configuration
}
channel = vatlayer_config.channel
# Migrate `countries_to_calculate_taxes_from_origin` setting.
origin_country = _clear_country_code(config_dict.get("origin_country", ""))
countries_to_calculate_taxes_from_origin = _clear_str_list_country_codes(
config_dict.get("countries_to_calculate_taxes_from_origin", "")
)
if origin_country and countries_to_calculate_taxes_from_origin:
for country in countries_to_calculate_taxes_from_origin:
use_origin_country_map[country] = origin_country
# Migrate `excluded_countries` to new tax configuration.
excluded_countries = _clear_str_list_country_codes(
config_dict.get("excluded_countries", "")
)
if excluded_countries:
tax_configuration = channel.tax_configuration
for country in excluded_countries:
TaxConfigurationPerCountry.objects.update_or_create(
tax_configuration=tax_configuration,
country=country,
defaults={"charge_taxes": False},
)
return use_origin_country_map
def create_tax_rates(apps, use_origin_country_map):
TaxClass = apps.get_model("tax", "TaxClass")
TaxClassCountryRate = apps.get_model("tax", "TaxClassCountryRate")
tax_classes = TaxClass.objects.exclude(name=TAX_CLASS_ZERO_RATE)
# django_prices_vatlayer is removed in Saleor 3.15; if it's not installed, we're
# skipping this part of the migration.
try:
VAT = apps.get_model("django_prices_vatlayer", "VAT")
except LookupError:
vat_rates = []
else:
vat_rates = VAT.objects.all()
rates = {}
for tax_class in tax_classes:
for vat in vat_rates:
# Collect standard rates to create
standard_rate = TaxClassCountryRate(
tax_class=tax_class,
country=vat.country_code,
rate=vat.data["standard_rate"],
)
rates[(tax_class.id, vat.country_code)] = standard_rate
# Collect reduced rates to create
if tax_class.name in vat.data["reduced_rates"]:
reduced_rate = TaxClassCountryRate(
tax_class=tax_class,
country=vat.country_code,
rate=vat.data["reduced_rates"][tax_class.name],
)
rates[(tax_class.id, vat.country_code)] = reduced_rate
# Swap rates for countries that should use origin country tax rate instead of
# own rates.
for country_code, origin in use_origin_country_map.items():
country_rate_obj = rates.get((tax_class.id, country_code))
origin_rate_obj = rates.get((tax_class.id, origin))
if country_rate_obj and origin_rate_obj:
country_rate_obj.rate = origin_rate_obj.rate
rates[(tax_class.id, country_code)] = country_rate_obj
TaxClassCountryRate.objects.bulk_create(rates.values())
def METHOD_NAME(apps, _schema_editor):
PluginConfiguration = apps.get_model("plugins", "PluginConfiguration")
vatlayer_configs = PluginConfiguration.objects.filter(
active=True, identifier=VATLAYER_ID
)
is_vatlayer_enabled = vatlayer_configs.exists()
if is_vatlayer_enabled:
use_origin_country_map = create_tax_configurations(apps, vatlayer_configs)
create_tax_rates(apps, use_origin_country_map)
class Migration(migrations.Migration):
dependencies = [
("tax", "0004_migrate_tax_classes"),
]
operations = [migrations.RunPython(METHOD_NAME, migrations.RunPython.noop)] |
7,133 | get normalized output and leaky tests | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
r"""Tests the text output of Google C++ Mocking Framework.
To update the golden file:
gmock_output_test.py --build_dir=BUILD/DIR --gengolden
where BUILD/DIR contains the built gmock_output_test_ file.
gmock_output_test.py --gengolden
gmock_output_test.py
"""
from io import open # pylint: disable=redefined-builtin, g-importing-member
import os
import re
import sys
from googlemock.test import gmock_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
PROGRAM_PATH = gmock_test_utils.GetTestExecutablePath('gmock_output_test_')
COMMAND = [PROGRAM_PATH, '--gtest_stack_trace_depth=0', '--gtest_print_time=0']
GOLDEN_NAME = 'gmock_output_test_golden.txt'
GOLDEN_PATH = os.path.join(gmock_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveReportHeaderAndFooter(output):
"""Removes Google Test result report's header and footer from the output."""
output = re.sub(r'.*gtest_main.*\n', '', output)
output = re.sub(r'\[.*\d+ tests.*\n', '', output)
output = re.sub(r'\[.* test environment .*\n', '', output)
output = re.sub(r'\[=+\] \d+ tests .* ran.*', '', output)
output = re.sub(r'.* FAILED TESTS\n', '', output)
return output
def RemoveLocations(output):
"""Removes all file location info from a Google Test program's output.
Args:
output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\:', 'FILE:#:', output)
def NormalizeErrorMarker(output):
"""Normalizes the error marker, which is different on Windows vs on Linux."""
return re.sub(r' error: ', ' Failure\n', output)
def RemoveMemoryAddresses(output):
"""Removes memory addresses from the test output."""
return re.sub(r'@\w+', '@0x#', output)
def RemoveTestNamesOfLeakedMocks(output):
"""Removes the test names of leaked mock objects from the test output."""
return re.sub(r'\(used in test .+\) ', '', output)
def GetLeakyTests(output):
"""Returns a list of test names that leak mock objects."""
# findall() returns a list of all matches of the regex in output.
# For example, if '(used in test FooTest.Bar)' is in output, the
# list will contain 'FooTest.Bar'.
return re.findall(r'\(used in test (.+)\)', output)
def METHOD_NAME(output):
"""Normalizes the output of gmock_output_test_.
Args:
output: The test output.
Returns:
A tuple (the normalized test output, the list of test names that have
leaked mocks).
"""
output = ToUnixLineEnding(output)
output = RemoveReportHeaderAndFooter(output)
output = NormalizeErrorMarker(output)
output = RemoveLocations(output)
output = RemoveMemoryAddresses(output)
return (RemoveTestNamesOfLeakedMocks(output), GetLeakyTests(output))
def GetShellCommandOutput(cmd):
"""Runs a command in a sub-process, and returns its STDOUT in a string."""
return gmock_test_utils.Subprocess(cmd, capture_stderr=False).output
def GetNormalizedCommandOutputAndLeakyTests(cmd):
"""Runs a command and returns its normalized output and a list of leaky tests.
Args:
cmd: the shell command.
"""
# Disables exception pop-ups on Windows.
os.environ['GTEST_CATCH_EXCEPTIONS'] = '1'
return METHOD_NAME(GetShellCommandOutput(cmd))
class GMockOutputTest(gmock_test_utils.TestCase):
def testOutput(self):
(output, leaky_tests) = GetNormalizedCommandOutputAndLeakyTests(COMMAND)
golden_file = open(GOLDEN_PATH, 'rb')
golden = golden_file.read().decode('utf-8')
golden_file.close()
# The normalized output should match the golden file.
self.assertEqual(golden, output)
# The raw output should contain 2 leaked mock object errors for
# test GMockOutputTest.CatchesLeakedMocks.
self.assertEqual(['GMockOutputTest.CatchesLeakedMocks',
'GMockOutputTest.CatchesLeakedMocks'],
leaky_tests)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
(output, _) = GetNormalizedCommandOutputAndLeakyTests(COMMAND)
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
# Suppress the error "googletest was imported but a call to its main()
# was never detected."
os._exit(0)
else:
gmock_test_utils.Main() |
7,134 | test gcp repr | """Tests of ground control points"""
import numpy
import pytest
import rasterio
from rasterio.control import GroundControlPoint
def test_gcp_empty():
with pytest.raises(ValueError):
GroundControlPoint()
def test_gcp():
gcp = GroundControlPoint(1.0, 1.5, 100.0, 1000.0, z=0.0)
assert gcp.row == 1.0
assert gcp.col == 1.5
assert gcp.x == 100.0
assert gcp.y == 1000.0
assert gcp.z == 0.0
assert isinstance(gcp.id, str)
def METHOD_NAME():
gcp = GroundControlPoint(1.0, 1.5, 100.0, 1000.0, id='foo', info='bar')
copy = eval(repr(gcp))
for attr in ('id', 'info', 'row', 'col', 'x', 'y', 'z'):
assert getattr(copy, attr) == getattr(gcp, attr)
def test_gcp_dict():
gcp = GroundControlPoint(1.0, 1.5, 100.0, 1000.0, id='foo', info='bar')
assert gcp.asdict()['row'] == 1.0
assert gcp.asdict()['col'] == 1.5
assert gcp.asdict()['x'] == 100.0
def test_gcp_geo_interface():
gcp = GroundControlPoint(1.0, 1.5, 100.0, 1000.0, id='foo', info='bar')
assert gcp.__geo_interface__['geometry']['coordinates'] == (100.0, 1000.0)
assert gcp.__geo_interface__['type'] == 'Feature'
assert gcp.__geo_interface__['id'] == 'foo'
assert gcp.__geo_interface__['properties']['info'] == 'bar'
assert gcp.__geo_interface__['properties']['row'] == 1.0
assert gcp.__geo_interface__['properties']['col'] == 1.5
def test_gcp_geo_interface_z():
gcp = GroundControlPoint(1.0, 1.5, 100.0, 1000.0, z=0.0)
assert gcp.__geo_interface__['geometry']['coordinates'] == (100.0, 1000.0, 0.0)
def test_write_read_gcps(tmpdir):
tiffname = str(tmpdir.join('test.tif'))
gcps = [GroundControlPoint(1, 1, 100.0, 1000.0, z=0.0)]
with rasterio.open(tiffname, 'w', driver='GTiff', dtype='uint8', count=1,
width=10, height=10, crs='epsg:4326', gcps=gcps) as dst:
pass
with rasterio.open(tiffname, 'r+') as dst:
gcps, crs = dst.gcps
assert crs.to_epsg() == 4326
assert len(gcps) == 1
point = gcps[0]
assert (1, 1) == (point.row, point.col)
assert (100.0, 1000.0, 0.0) == (point.x, point.y, point.z)
dst.gcps = [
GroundControlPoint(1, 1, 100.0, 1000.0, z=0.0),
GroundControlPoint(2, 2, 200.0, 2000.0, z=0.0)], crs
gcps, crs = dst.gcps
assert crs.to_epsg() == 4326
assert len(gcps) == 2
point = gcps[1]
assert (2, 2) == (point.row, point.col)
assert (200.0, 2000.0, 0.0) == (point.x, point.y, point.z)
def test_write_read_gcps_buffereddatasetwriter(tmpdir):
filename = str(tmpdir.join('test.jpg'))
gcps = [GroundControlPoint(1, 1, 100.0, 1000.0, z=0.0)]
with rasterio.open(filename, 'w', driver='JPEG', dtype='uint8', count=3,
width=10, height=10, crs='epsg:4326', gcps=gcps) as dst:
dst.write(numpy.ones((3, 10, 10), dtype='uint8'))
with rasterio.open(filename, 'r+') as dst:
gcps, crs = dst.gcps
assert crs.to_epsg() == 4326
assert len(gcps) == 1
point = gcps[0]
assert (1, 1) == (point.row, point.col)
assert (100.0, 1000.0, 0.0) == (point.x, point.y, point.z)
dst.gcps = [
GroundControlPoint(1, 1, 100.0, 1000.0, z=0.0),
GroundControlPoint(2, 2, 200.0, 2000.0, z=0.0)], crs
gcps, crs = dst.gcps
assert crs.to_epsg() == 4326
assert len(gcps) == 2
point = gcps[1]
assert (2, 2) == (point.row, point.col)
assert (200.0, 2000.0, 0.0) == (point.x, point.y, point.z)
def test_read_vrt_gcps(tmpdir):
vrtfile = tmpdir.join('test.vrt')
vrtfile.write("""
<VRTDataset rasterXSize="512" rasterYSize="512">
<GCPList Projection="EPSG:4326">
<GCP Id="1" Info="a" Pixel="0.5" Line="0.5" X="0.0" Y="0.0" Z="0.0" />
<GCP Id="2" Info="b" Pixel="13.5" Line="23.5" X="1.0" Y="2.0" Z="0.0" />
</GCPList>
<GeoTransform>440720.0, 60.0, 0.0, 3751320.0, 0.0, -60.0</GeoTransform>
<VRTRasterBand dataType="Byte" band="1">
<ColorInterp>Gray</ColorInterp>
<SimpleSource>
<SourceFilename relativeToVRT="0">tests/data/RGB.byte.tif</SourceFilename>
<SourceBand>1</SourceBand>
<SrcRect xOff="0" yOff="0" xSize="512" ySize="512"/>
<DstRect xOff="0" yOff="0" xSize="512" ySize="512"/>
</SimpleSource>
</VRTRasterBand>
</VRTDataset>""")
with rasterio.open(str(vrtfile)) as src:
gcps, crs = src.gcps
assert crs.to_epsg() == 4326
assert len(gcps) == 2
assert [(0.5, 0.5), (13.5, 23.5)] == [(p.col, p.row) for p in gcps]
assert ['1', '2'] == [p.id for p in gcps]
assert ['a', 'b'] == [p.info for p in gcps] |
7,135 | test add activates session | # TODO(mc, 2022-07-07): these tests are very coupled to the implementation
# both the tests and the code under test likely needs to be rewritten
# if we want useful unit test coverage
from mock import patch, AsyncMock # type: ignore[attr-defined]
import pytest
from robot_server.service.session.errors import SessionCreationException
from robot_server.service.session.manager import SessionMetaData, BaseSession
from robot_server.service.session.models.common import create_identifier
from robot_server.service.session.models.session import SessionType
@pytest.fixture
async def session(session_manager) -> BaseSession:
"""An added session"""
return await session_manager.add(
session_type=SessionType.calibration_check, session_meta_data=SessionMetaData()
)
@pytest.fixture(autouse=True)
def mock_check_session_create():
"""Patch of Session.create"""
with patch("robot_server.service.session.manager.CheckSession.create") as m:
mock_session = AsyncMock()
mock_session.session_type = SessionType.calibration_check
m.return_value = mock_session
yield m
@pytest.fixture(autouse=True)
def mock_tip_length_session_create():
"""Patch of Session.create"""
with patch("robot_server.service.session.manager.TipLengthCalibration.create") as m:
mock_session = AsyncMock()
mock_session.session_type = SessionType.tip_length_calibration
m.return_value = mock_session
yield m
@pytest.fixture(autouse=True)
def mock_deck_calibration_session_create():
"""Patch of Session.create"""
with patch(
"robot_server.service.session.manager.DeckCalibrationSession.create"
) as m:
mock_session = AsyncMock()
mock_session.session_type = SessionType.deck_calibration
m.return_value = mock_session
yield m
async def test_add_calls_session_create(
session_manager, mock_check_session_create, session
):
mock_check_session_create.assert_called_once()
assert (
mock_check_session_create.call_args[1]["configuration"]
== session_manager._session_common
)
assert isinstance(
mock_check_session_create.call_args[1]["instance_meta"], SessionMetaData
)
async def test_add_no_class_doesnt_call_create(
session_manager, mock_check_session_create
):
# Patch the type to class dict
with patch("robot_server.service.session.manager.SessionTypeToClass", new={}):
with pytest.raises(SessionCreationException):
await session_manager.add(SessionType.calibration_check, SessionMetaData())
mock_check_session_create.assert_not_called()
async def test_add_stores_session(session_manager, session):
assert session_manager._sessions[session.meta.identifier] == session
async def METHOD_NAME(session_manager, session):
"""Test that adding a session also makes that new session active"""
assert session_manager._active.active_id == session.meta.identifier
async def test_remove_removes(session_manager, session):
assert await session_manager.remove(session.meta.identifier) is session
assert session.meta.identifier not in session_manager._sessions
async def test_remove_calls_cleanup(session_manager):
session = await session_manager.add(
SessionType.calibration_check, SessionMetaData()
)
session.clean_up = AsyncMock()
await session_manager.remove(session.meta.identifier)
session.clean_up.assert_called_once()
async def test_remove_active_session(session_manager, session):
session_manager._active.active_id = session.meta.identifier
await session_manager.remove(session.meta.identifier)
assert session_manager._active.active_id is None
async def test_remove_inactive_session(session_manager, session):
active_session = await session_manager.add(
SessionType.tip_length_calibration, SessionMetaData()
)
await session_manager.remove(session.meta.identifier)
assert session_manager._active.active_id is active_session.meta.identifier
async def test_remove_unknown_session(session_manager):
assert await session_manager.remove(create_identifier()) is None
def test_get_by_id_not_found(session_manager):
assert session_manager.get_by_id(create_identifier()) is None
# fixme(mm, 2022-01-14): This looks like a flaky test
# because the session_manager.add() tasks will run and return
# in a nondeterministic order.
# @pytest.mark.xfail(strict=False)
async def test_get_by_type(session_manager):
tip_length_session = await session_manager.add(
SessionType.tip_length_calibration, SessionMetaData()
)
deck_cal_session = await session_manager.add(
SessionType.deck_calibration, SessionMetaData()
)
assert session_manager.get(SessionType.tip_length_calibration) == (
tip_length_session,
)
assert session_manager.get(SessionType.deck_calibration) == (deck_cal_session,)
assert session_manager.get() == (tip_length_session, deck_cal_session)
assert session_manager.get(SessionType.calibration_check) == tuple()
async def test_get_active(session_manager, session):
session_manager._active.active_id = session.meta.identifier
assert session_manager.get_active() is session
async def test_is_active(session_manager, session):
session_manager._active.active_id = session.meta.identifier
assert session_manager.is_active(session.meta.identifier) is True
def test_is_active_not_active(session_manager):
assert session_manager.is_active(create_identifier()) is False
async def test_activate(session_manager, session):
assert session_manager.activate(session.meta.identifier) is session
assert session_manager._active.active_id == session.meta.identifier
def test_activate_unknown_session(session_manager):
assert session_manager.activate(create_identifier()) is None
assert session_manager._active.active_id is None
async def test_deactivate(session_manager, session):
session_manager._active.active_id = session.meta.identifier
assert session_manager.deactivate(session.meta.identifier) is session
assert session_manager._active.active_id is None
async def test_deactivate_unknown_session(session_manager, session):
session_manager._active.active_id = session.meta.identifier
assert session_manager.deactivate(create_identifier()) is None
assert session_manager._active.active_id is session.meta.identifier
def test_deactivate_non_active(session_manager):
session_manager._active.active_id = None
assert session_manager.deactivate(create_identifier()) is None |
7,136 | from matrix | import numpy as np
def METHOD_NAME(ranges_i, ranges_j, keep):
r"""Turns a boolean matrix into a KeOps-friendly **ranges** argument.
This routine is a helper for the **block-sparse** reduction mode of KeOps,
allowing you to turn clustering information (**ranges_i**,
**ranges_j**) and a cluster-to-cluster boolean mask (**keep**)
into integer tensors of indices that can be used to schedule the KeOps routines.
Suppose that you're working with variables :math:`x_i` (:math:`i \in [0,10^6)`),
:math:`y_j` (:math:`j \in [0,10^7)`), and that you want to compute a KeOps reduction
over indices :math:`i` or :math:`j`: Instead of performing the full
kernel dot product (:math:`10^6 \cdot 10^7 = 10^{13}` operations!),
you may want to restrict yourself to
interactions between points :math:`x_i` and :math:`y_j` that are "close" to each other.
With KeOps, the simplest way of doing so is to:
1. Compute cluster labels for the :math:`x_i`'s and :math:`y_j`'s, using e.g.
the :func:`grid_cluster` method.
2. Compute the ranges (**ranges_i**, **ranges_j**) and centroids associated
to each cluster, using e.g. the :func:`cluster_ranges_centroids` method.
3. Sort the tensors ``x_i`` and ``y_j`` with :func:`sort_clusters` to make sure that the
clusters are stored contiguously in memory (this step is **critical** for performance on GPUs).
At this point:
- the :math:`k`-th cluster of :math:`x_i`'s is given by ``x_i[ ranges_i[k,0]:ranges_i[k,1], : ]``, for :math:`k \in [0,M)`,
- the :math:`\ell`-th cluster of :math:`y_j`'s is given by ``y_j[ ranges_j[l,0]:ranges_j[l,1], : ]``, for :math:`\ell \in [0,N)`.
4. Compute the :math:`(M,N)` matrix **dist** of pairwise distances between cluster centroids.
5. Apply a threshold on **dist** to generate a boolean matrix ``keep = dist < threshold``.
6. Define a KeOps reduction ``my_genred = Genred(..., axis = 0 or 1)``, as usual.
7. Compute the block-sparse reduction through
``result = my_genred(x_i, y_j, ranges = from_matrix(ranges_i,ranges_j,keep) )``
:func:`from_matrix` is thus the routine that turns a **high-level description**
of your block-sparse computation (cluster ranges + boolean matrix)
into a set of **integer tensors** (the **ranges** optional argument),
used by KeOps to schedule computations on the GPU.
Args:
ranges_i ((M,2) integer array): List of :math:`[\text{start}_k,\text{end}_k)` indices.
For :math:`k \in [0,M)`, the :math:`k`-th cluster of ":math:`i`" variables is
given by ``x_i[ ranges_i[k,0]:ranges_i[k,1], : ]``, etc.
ranges_j ((N,2) integer array): List of :math:`[\text{start}_\ell,\text{end}_\ell)` indices.
For :math:`\ell \in [0,N)`, the :math:`\ell`-th cluster of ":math:`j`" variables is
given by ``y_j[ ranges_j[l,0]:ranges_j[l,1], : ]``, etc.
keep ((M,N) boolean array):
If the output ``ranges`` of :func:`from_matrix` is used in a KeOps reduction,
we will only compute and reduce the terms associated to pairs of "points"
:math:`x_i`, :math:`y_j` in clusters :math:`k` and :math:`\ell`
if ``keep[k,l] == 1``.
Returns:
A 6-uple of integer arrays that can be used as an optional **ranges**
argument of :func:`Genred <pykeops.numpy.Genred>`. See the documentation of :func:`Genred <pykeops.numpy.Genred>` for reference.
Example:
>>> r_i = np.array( [ [2,5], [7,12] ], dtype=int ) # 2 clusters: X[0] = x_i[2:5], X[1] = x_i[7:12]
>>> r_j = np.array( [ [1,4], [4,9], [20,30] ], dtype=int ) # 3 clusters: Y[0] = y_j[1:4], Y[1] = y_j[4:9], Y[2] = y_j[20:30]
>>> x,y = np.array([1., 0.]), np.array([1.5, .5, 2.5]) # dummy "centroids"
>>> dist = (x[:,None] - y[None,:])**2
>>> keep = (dist <= 1) # (2,3) matrix
>>> print(keep)
[[ True True False]
[False True False]]
--> X[0] interacts with Y[0] and Y[1], X[1] interacts with Y[1]
>>> (ranges_i,slices_i,redranges_j, ranges_j,slices_j,redranges_i) = from_matrix(r_i,r_j,keep)
--> (ranges_i,slices_i,redranges_j) will be used for reductions with respect to "j" (axis=1)
--> (ranges_j,slices_j,redranges_i) will be used for reductions with respect to "i" (axis=0)
Information relevant if **axis** = 1:
>>> print(ranges_i) # = r_i
[[ 2, 5],
[ 7, 12]]
--> Two "target" clusters in a reduction wrt. j
>>> print(slices_i)
[2, 3]
--> X[0] is associated to redranges_j[0:2]
--> X[1] is associated to redranges_j[2:3]
>>> print(redranges_j)
[[1, 4],
[4, 9],
[4, 9]]
--> For X[0], i in [2,3,4], we'll reduce over j in [1,2,3] and [4,5,6,7,8]
--> For X[1], i in [7,8,9,10,11], we'll reduce over j in [4,5,6,7,8]
Information relevant if **axis** = 0:
>>> print(ranges_j)
[[ 1, 4],
[ 4, 9],
[20, 30]]
--> Three "target" clusters in a reduction wrt. i
>>> print(slices_j)
[1, 3, 3]
--> Y[0] is associated to redranges_i[0:1]
--> Y[1] is associated to redranges_i[1:3]
--> Y[2] is associated to redranges_i[3:3] = no one...
>>> print(redranges_i)
[[ 2, 5],
[ 2, 5],
[ 7, 12]]
--> For Y[0], j in [1,2,3], we'll reduce over i in [2,3,4]
--> For Y[1], j in [4,5,6,7,8], we'll reduce over i in [2,3,4] and [7,8,9,10,11]
--> For Y[2], j in [20,21,...,29], there is no reduction to be done
"""
J, I = np.meshgrid(np.arange(0, keep.shape[1]), np.arange(0, keep.shape[0]))
redranges_i = ranges_i[
I.T[keep.T]
] # Use PyTorch indexing to "stack" copies of ranges_i[...]
redranges_j = ranges_j[J[keep]]
slices_i = np.cumsum(
np.sum(keep, axis=1), axis=0
) # slice indices in the "stacked" array redranges_j
slices_j = np.cumsum(
np.sum(keep, axis=0), axis=0
) # slice indices in the "stacked" array redranges_i
return (
ranges_i.astype("int32"),
slices_i.astype("int32"),
redranges_j.astype("int32"),
ranges_j.astype("int32"),
slices_j.astype("int32"),
redranges_i.astype("int32"),
) |
7,137 | on cr stirring effect changed | # Copyright 2020-2023 Capypara and the SkyTemple Contributors
#
# This file is part of SkyTemple.
#
# SkyTemple is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SkyTemple is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SkyTemple. If not, see <https://www.gnu.org/licenses/>.
from typing import TYPE_CHECKING, Type, Union, Optional
from gi.repository import Gtk
from skytemple.core.module_controller import AbstractController
from skytemple.core.ui_utils import glib_async
from skytemple_files.common.i18n_util import f, _
from skytemple_files.data.md.protocol import PokeType
from skytemple_files.hardcoded.dungeons import TilesetMapColor, TilesetStirringEffect, TilesetBaseEnum, \
TilesetSecretPowerEffect, TilesetNaturePowerMoveEntry, TilesetWeatherEffect, TilesetProperties
if TYPE_CHECKING:
from skytemple.module.dungeon_graphics.module import DungeonGraphicsModule
DUNGEON_GRAPHICS_NAME = _('Dungeon Graphics')
class MainController(AbstractController):
def __init__(self, module: 'DungeonGraphicsModule', *args):
self.module = module
self.builder: Gtk.Builder = None # type: ignore
self.lst = self.module.get_tileset_properties()
def get_view(self) -> Gtk.Widget:
self.builder = self._get_builder(__file__, 'main.glade')
assert self.builder
self._init_combo_stores()
self._init_values()
self.builder.connect_signals(self)
return self.builder.get_object('box_list')
@glib_async
def on_cr_map_color_changed(self, widget, path, new_iter, *args):
store: Gtk.Store = self.builder.get_object('list_tree_store')
cb_store: Gtk.Store = widget.props.model
store[path][2] = cb_store[new_iter][0]
store[path][9] = cb_store[new_iter][1]
self._save_list()
@glib_async
def METHOD_NAME(self, widget, path, new_iter, *args):
store: Gtk.Store = self.builder.get_object('list_tree_store')
cb_store: Gtk.Store = widget.props.model
store[path][3] = cb_store[new_iter][0]
store[path][10] = cb_store[new_iter][1]
self._save_list()
@glib_async
def on_cr_secret_power_effect_changed(self, widget, path, new_iter, *args):
store: Gtk.Store = self.builder.get_object('list_tree_store')
cb_store: Gtk.Store = widget.props.model
store[path][4] = cb_store[new_iter][0]
store[path][11] = cb_store[new_iter][1]
self._save_list()
@glib_async
def on_cr_camouflage_type_changed(self, widget, path, new_iter, *args):
store: Gtk.Store = self.builder.get_object('list_tree_store')
cb_store: Gtk.Store = widget.props.model
store[path][5] = cb_store[new_iter][0]
store[path][12] = cb_store[new_iter][1]
self._save_list()
@glib_async
def on_cr_nature_power_move_entry_changed(self, widget, path, new_iter, *args):
store: Gtk.Store = self.builder.get_object('list_tree_store')
cb_store: Gtk.Store = widget.props.model
store[path][6] = cb_store[new_iter][0]
store[path][13] = cb_store[new_iter][1]
self._save_list()
@glib_async
def on_cr_weather_effect_changed(self, widget, path, new_iter, *args):
store: Gtk.Store = self.builder.get_object('list_tree_store')
cb_store: Gtk.Store = widget.props.model
store[path][7] = cb_store[new_iter][0]
store[path][14] = cb_store[new_iter][1]
self._save_list()
def on_cr_full_water_floor_toggled(self, widget, path):
store: Gtk.Store = self.builder.get_object('list_tree_store')
store[path][8] = not widget.get_active()
self._save_list()
def _init_combo_stores(self):
# cr_map_color
self._create_for_enum(self.builder.get_object('cr_map_color'), TilesetMapColor)
# cr_stirring_effect
self._create_for_enum(self.builder.get_object('cr_stirring_effect'), TilesetStirringEffect)
# cr_secret_power_effect
self._create_for_enum(self.builder.get_object('cr_secret_power_effect'), TilesetSecretPowerEffect)
# cr_camouflage_type
self._create_for_enum(self.builder.get_object('cr_camouflage_type'), PokeType)
# cr_nature_power_move_entry
self._create_for_enum(self.builder.get_object('cr_nature_power_move_entry'), TilesetNaturePowerMoveEntry)
# cr_weather_effect
self._create_for_enum(self.builder.get_object('cr_weather_effect'), TilesetWeatherEffect)
def _create_for_enum(self, cr: Gtk.CellRendererCombo, en: Union[Type[TilesetBaseEnum], Type[PokeType]]):
store = Gtk.ListStore(int, str) # id, name
cr.props.model = store
for e in en:
store.append([e.value, e.print_name]) # type: ignore
def _init_values(self):
from skytemple.module.dungeon_graphics.module import NUMBER_OF_TILESETS
store: Gtk.ListStore = self.builder.get_object('list_tree_store')
for i, v in enumerate(self.lst):
store.append([
str(i), f"{_('Tileset')} {i}" if i < NUMBER_OF_TILESETS else f"{_('Background')} {i}",
v.map_color.value, v.stirring_effect.value, v.secret_power_effect.value,
v.camouflage_type.value, v.nature_power_move_entry.value, v.weather_effect.value,
v.full_water_floor,
v.map_color.print_name, v.stirring_effect.print_name, v.secret_power_effect.print_name,
v.camouflage_type.print_name, v.nature_power_move_entry.print_name, v.weather_effect.print_name,
])
def _save_list(self):
self.lst = []
for row in self.builder.get_object('list_tree_store'):
self.lst.append(TilesetProperties(
TilesetMapColor(row[2]), # type: ignore
TilesetStirringEffect(row[3]), # type: ignore
TilesetSecretPowerEffect(row[4]), # type: ignore
PokeType(row[5]), # type: ignore
TilesetNaturePowerMoveEntry(row[6]), # type: ignore
TilesetWeatherEffect(row[7]), # type: ignore
bool(row[8]),
))
self.module.set_tileset_properties(self.lst) |
7,138 | prod batch | #!/usr/bin/env python3
from __future__ import annotations
from typing import List, Optional, Tuple, Union
import torch
from jaxtyping import Float
from torch import Tensor
from ._linear_operator import IndexType, LinearOperator, to_dense
class DenseLinearOperator(LinearOperator):
def _check_args(self, tsr):
if not torch.is_tensor(tsr):
return "DenseLinearOperator must take a torch.Tensor; got {}".format(tsr.__class__.__name__)
if tsr.dim() < 2:
return "DenseLinearOperator expects a matrix (or batches of matrices) - got a Tensor of size {}.".format(
tsr.shape
)
def __init__(self, tsr):
"""
Not a lazy tensor
Args:
- tsr (Tensor: matrix) a Tensor
"""
super().__init__(tsr)
self.tensor = tsr
def _cholesky_solve(
self: Float[LinearOperator, "*batch N N"],
rhs: Union[Float[LinearOperator, "*batch2 N M"], Float[Tensor, "*batch2 N M"]],
upper: Optional[bool] = False,
) -> Union[Float[LinearOperator, "... N M"], Float[Tensor, "... N M"]]:
return torch.cholesky_solve(rhs, self.to_dense(), upper=upper)
def _diagonal(self: Float[LinearOperator, "... M N"]) -> Float[torch.Tensor, "... N"]:
return self.tensor.diagonal(dim1=-1, dim2=-2)
def _expand_batch(
self: Float[LinearOperator, "... M N"], batch_shape: Union[torch.Size, List[int]]
) -> Float[LinearOperator, "... M N"]:
return self.__class__(self.tensor.expand(*batch_shape, *self.matrix_shape))
def _get_indices(self, row_index: IndexType, col_index: IndexType, *batch_indices: IndexType) -> torch.Tensor:
# Perform the __getitem__
res = self.tensor[(*batch_indices, row_index, col_index)]
return res
def _getitem(self, row_index: IndexType, col_index: IndexType, *batch_indices: IndexType) -> LinearOperator:
# Perform the __getitem__
res = self.tensor[(*batch_indices, row_index, col_index)]
return self.__class__(res)
def _isclose(self, other, rtol: float = 1e-05, atol: float = 1e-08, equal_nan: bool = False) -> Tensor:
return torch.isclose(self.tensor, to_dense(other), rtol=rtol, atol=atol, equal_nan=equal_nan)
def _matmul(
self: Float[LinearOperator, "*batch M N"],
rhs: Union[Float[torch.Tensor, "*batch2 N C"], Float[torch.Tensor, "*batch2 N"]],
) -> Union[Float[torch.Tensor, "... M C"], Float[torch.Tensor, "... M"]]:
return torch.matmul(self.tensor, rhs)
def METHOD_NAME(self, dim: int) -> LinearOperator:
return self.__class__(self.tensor.prod(dim))
def _bilinear_derivative(self, left_vecs: Tensor, right_vecs: Tensor) -> Tuple[Optional[Tensor], ...]:
res = left_vecs.matmul(right_vecs.mT)
return (res,)
def _size(self) -> torch.Size:
return self.tensor.size()
def _sum_batch(self, dim: int) -> LinearOperator:
return self.__class__(self.tensor.sum(dim))
def _transpose_nonbatch(self: Float[LinearOperator, "*batch M N"]) -> Float[LinearOperator, "*batch N M"]:
return DenseLinearOperator(self.tensor.mT)
def _t_matmul(
self: Float[LinearOperator, "*batch M N"],
rhs: Union[Float[Tensor, "*batch2 M P"], Float[LinearOperator, "*batch2 M P"]],
) -> Union[Float[LinearOperator, "... N P"], Float[Tensor, "... N P"]]:
return torch.matmul(self.tensor.mT, rhs)
def to_dense(self: Float[LinearOperator, "*batch M N"]) -> Float[Tensor, "*batch M N"]:
return self.tensor
def __add__(
self: Float[LinearOperator, "... #M #N"],
other: Union[Float[Tensor, "... #M #N"], Float[LinearOperator, "... #M #N"], float],
) -> Union[Float[LinearOperator, "... M N"], Float[Tensor, "... M N"]]:
if isinstance(other, DenseLinearOperator):
return DenseLinearOperator(self.tensor + other.tensor)
elif isinstance(other, torch.Tensor):
return DenseLinearOperator(self.tensor + other)
else:
return super().__add__(other)
def to_linear_operator(obj: Union[torch.Tensor, LinearOperator]) -> LinearOperator:
"""
A function which ensures that `obj` is a LinearOperator.
- If `obj` is a LinearOperator, this function does nothing.
- If `obj` is a (normal) Tensor, this function wraps it with a `DenseLinearOperator`.
"""
if torch.is_tensor(obj):
return DenseLinearOperator(obj)
elif isinstance(obj, LinearOperator):
return obj
else:
raise TypeError("object of class {} cannot be made into a LinearOperator".format(obj.__class__.__name__))
__all__ = ["DenseLinearOperator", "to_linear_operator"] |
7,139 | windows stub disk partition | """
tests.pytests.unit.beacons.test_diskusage
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Disk usage beacon test cases
"""
from collections import namedtuple
import pytest
import salt.beacons.diskusage as diskusage
from tests.support.mock import MagicMock, Mock, patch
@pytest.fixture
def configure_loader_modules():
return {}
@pytest.fixture
def stub_disk_partition():
return [
namedtuple("partition", "device mountpoint fstype, opts")(
"tmpfs", "/mnt/tmp", "tmpfs", "rw,nosuid,nodev,relatime,size=10240k"
),
namedtuple("partition", "device mountpoint fstype, opts")(
"/dev/disk0s2", "/", "hfs", "rw,local,rootfs,dovolfs,journaled,multilabel"
),
]
@pytest.fixture
def METHOD_NAME():
return [
namedtuple("partition", "device mountpoint fstype, opts")(
"C:\\", "C:\\", "NTFS", "rw,fixed"
),
namedtuple("partition", "device mountpoint fstype, opts")(
"D:\\", "D:\\", "CDFS", "ro,cdrom"
),
]
@pytest.fixture
def stub_disk_usage():
return [
namedtuple("usage", "total used free percent")(1000, 500, 500, 50),
namedtuple("usage", "total used free percent")(100, 75, 25, 25),
]
@pytest.fixture
def windows_stub_disk_usage():
return namedtuple("usage", "total used free percent")(1000, 500, 500, 50)
def test_non_list_config():
config = {}
ret = diskusage.validate(config)
assert ret == (False, "Configuration for diskusage beacon must be a list.")
def test_empty_config():
config = [{}]
ret = diskusage.validate(config)
assert ret == (True, "Valid beacon configuration")
def test_diskusage_match(stub_disk_usage, stub_disk_partition):
disk_usage_mock = Mock(side_effect=stub_disk_usage)
with patch("salt.utils.platform.is_windows", MagicMock(return_value=False)), patch(
"psutil.disk_partitions", MagicMock(return_value=stub_disk_partition)
), patch("psutil.disk_usage", disk_usage_mock):
config = [{"/": "50%"}]
ret = diskusage.validate(config)
assert ret == (True, "Valid beacon configuration")
ret = diskusage.beacon(config)
assert ret == [{"diskusage": 50, "mount": "/"}]
def test_diskusage_match_no_percent(stub_disk_usage, stub_disk_partition):
disk_usage_mock = Mock(side_effect=stub_disk_usage)
with patch("salt.utils.platform.is_windows", MagicMock(return_value=False)), patch(
"psutil.disk_partitions", MagicMock(return_value=stub_disk_partition)
), patch("psutil.disk_usage", disk_usage_mock):
# Test without the percent
config = [{"/": 50}]
ret = diskusage.validate(config)
assert ret == (True, "Valid beacon configuration")
ret = diskusage.beacon(config)
assert ret == [{"diskusage": 50, "mount": "/"}]
def test_diskusage_nomatch(stub_disk_usage, stub_disk_partition):
disk_usage_mock = Mock(side_effect=stub_disk_usage)
with patch("salt.utils.platform.is_windows", MagicMock(return_value=False)), patch(
"psutil.disk_partitions", MagicMock(return_value=stub_disk_partition)
), patch("psutil.disk_usage", disk_usage_mock):
config = [{"/": "70%"}]
ret = diskusage.validate(config)
assert ret == (True, "Valid beacon configuration")
ret = diskusage.beacon(config)
assert ret != [{"diskusage": 50, "mount": "/"}]
def test_diskusage_match_regex(stub_disk_usage, stub_disk_partition):
disk_usage_mock = Mock(side_effect=stub_disk_usage)
with patch("salt.utils.platform.is_windows", MagicMock(return_value=False)), patch(
"psutil.disk_partitions", MagicMock(return_value=stub_disk_partition)
), patch("psutil.disk_usage", disk_usage_mock):
config = [{"/": "50%"}]
ret = diskusage.validate(config)
assert ret == (True, "Valid beacon configuration")
ret = diskusage.beacon(config)
assert ret == [{"diskusage": 50, "mount": "/"}]
def test_diskusage_windows_single_slash(
windows_stub_disk_usage, METHOD_NAME
):
r"""
This tests new behavior (C:\)
"""
disk_usage_mock = Mock(return_value=windows_stub_disk_usage)
with patch("salt.utils.platform.is_windows", MagicMock(return_value=True)):
with patch(
"psutil.disk_partitions",
MagicMock(return_value=METHOD_NAME),
), patch("psutil.disk_usage", disk_usage_mock):
config = [{"C:\\": "50%"}]
ret = diskusage.validate(config)
assert ret == (True, "Valid beacon configuration")
ret = diskusage.beacon(config)
assert ret == [{"diskusage": 50, "mount": "C:\\"}]
def test_diskusage_windows_double_slash(
windows_stub_disk_usage, METHOD_NAME
):
"""
This tests original behavior (C:\\)
"""
disk_usage_mock = Mock(return_value=windows_stub_disk_usage)
with patch("salt.utils.platform.is_windows", MagicMock(return_value=True)):
with patch(
"psutil.disk_partitions",
MagicMock(return_value=METHOD_NAME),
), patch("psutil.disk_usage", disk_usage_mock):
config = [{"C:\\\\": "50%"}]
ret = diskusage.validate(config)
assert ret == (True, "Valid beacon configuration")
ret = diskusage.beacon(config)
assert ret == [{"diskusage": 50, "mount": "C:\\"}]
def test_diskusage_windows_lowercase(
windows_stub_disk_usage, METHOD_NAME
):
r"""
This tests lowercase drive letter (c:\)
"""
disk_usage_mock = Mock(return_value=windows_stub_disk_usage)
with patch("salt.utils.platform.is_windows", MagicMock(return_value=True)):
with patch(
"psutil.disk_partitions",
MagicMock(return_value=METHOD_NAME),
), patch("psutil.disk_usage", disk_usage_mock):
config = [{"c:\\": "50%"}]
ret = diskusage.validate(config)
assert ret == (True, "Valid beacon configuration")
ret = diskusage.beacon(config)
assert ret == [{"diskusage": 50, "mount": "C:\\"}]
def test_diskusage_windows_match_regex(
windows_stub_disk_usage, METHOD_NAME
):
disk_usage_mock = Mock(return_value=windows_stub_disk_usage)
with patch("salt.utils.platform.is_windows", MagicMock(return_value=True)):
with patch(
"psutil.disk_partitions",
MagicMock(return_value=METHOD_NAME),
), patch("psutil.disk_usage", disk_usage_mock):
config = [{"^[a-zA-Z]:\\": "50%"}]
ret = diskusage.validate(config)
assert ret == (True, "Valid beacon configuration")
ret = diskusage.beacon(config)
_expected = [
{"diskusage": 50, "mount": "C:\\"},
{"diskusage": 50, "mount": "D:\\"},
]
assert ret == _expected |
7,140 | id | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetDefenderSettingResult',
'AwaitableGetDefenderSettingResult',
'get_defender_setting',
]
@pulumi.output_type
class GetDefenderSettingResult:
"""
IoT Defender settings
"""
def __init__(__self__, device_quota=None, evaluation_end_time=None, METHOD_NAME=None, mde_integration=None, name=None, onboarding_kind=None, sentinel_workspace_resource_ids=None, type=None):
if device_quota and not isinstance(device_quota, int):
raise TypeError("Expected argument 'device_quota' to be a int")
pulumi.set(__self__, "device_quota", device_quota)
if evaluation_end_time and not isinstance(evaluation_end_time, str):
raise TypeError("Expected argument 'evaluation_end_time' to be a str")
pulumi.set(__self__, "evaluation_end_time", evaluation_end_time)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if mde_integration and not isinstance(mde_integration, dict):
raise TypeError("Expected argument 'mde_integration' to be a dict")
pulumi.set(__self__, "mde_integration", mde_integration)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if onboarding_kind and not isinstance(onboarding_kind, str):
raise TypeError("Expected argument 'onboarding_kind' to be a str")
pulumi.set(__self__, "onboarding_kind", onboarding_kind)
if sentinel_workspace_resource_ids and not isinstance(sentinel_workspace_resource_ids, list):
raise TypeError("Expected argument 'sentinel_workspace_resource_ids' to be a list")
pulumi.set(__self__, "sentinel_workspace_resource_ids", sentinel_workspace_resource_ids)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="deviceQuota")
def device_quota(self) -> int:
"""
Size of the device quota. Value is required to be in multiples of 100.
"""
return pulumi.get(self, "device_quota")
@property
@pulumi.getter(name="evaluationEndTime")
def evaluation_end_time(self) -> str:
"""
End time of the evaluation period, if such exist
"""
return pulumi.get(self, "evaluation_end_time")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="mdeIntegration")
def mde_integration(self) -> 'outputs.DefenderSettingsPropertiesResponseMdeIntegration':
"""
MDE integration configuration
"""
return pulumi.get(self, "mde_integration")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="onboardingKind")
def onboarding_kind(self) -> str:
"""
The kind of onboarding for the subscription
"""
return pulumi.get(self, "onboarding_kind")
@property
@pulumi.getter(name="sentinelWorkspaceResourceIds")
def sentinel_workspace_resource_ids(self) -> Sequence[str]:
"""
Sentinel Workspace Resource Ids
"""
return pulumi.get(self, "sentinel_workspace_resource_ids")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetDefenderSettingResult(GetDefenderSettingResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDefenderSettingResult(
device_quota=self.device_quota,
evaluation_end_time=self.evaluation_end_time,
METHOD_NAME=self.METHOD_NAME,
mde_integration=self.mde_integration,
name=self.name,
onboarding_kind=self.onboarding_kind,
sentinel_workspace_resource_ids=self.sentinel_workspace_resource_ids,
type=self.type)
def get_defender_setting(opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDefenderSettingResult:
"""
Get IoT Defender Settings
Azure REST API version: 2021-02-01-preview.
"""
__args__ = dict()
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:iotsecurity:getDefenderSetting', __args__, opts=opts, typ=GetDefenderSettingResult).value
return AwaitableGetDefenderSettingResult(
device_quota=pulumi.get(__ret__, 'device_quota'),
evaluation_end_time=pulumi.get(__ret__, 'evaluation_end_time'),
METHOD_NAME=pulumi.get(__ret__, 'id'),
mde_integration=pulumi.get(__ret__, 'mde_integration'),
name=pulumi.get(__ret__, 'name'),
onboarding_kind=pulumi.get(__ret__, 'onboarding_kind'),
sentinel_workspace_resource_ids=pulumi.get(__ret__, 'sentinel_workspace_resource_ids'),
type=pulumi.get(__ret__, 'type')) |
7,141 | add event history | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: proto
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class SubscriberFeatures(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = SubscriberFeatures()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsSubscriberFeatures(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# SubscriberFeatures
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# SubscriberFeatures
def PublisherIdentification(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# SubscriberFeatures
def PatternBasedSubscription(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# SubscriberFeatures
def PublicationTrustlevels(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# SubscriberFeatures
def SubscriptionRevocation(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# SubscriberFeatures
def EventHistory(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# SubscriberFeatures
def AcknowledgeSubscriberReceived(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# SubscriberFeatures
def PayloadTransparency(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# SubscriberFeatures
def PayloadEncryptionCryptobox(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
def SubscriberFeaturesStart(builder): builder.StartObject(8)
def Start(builder):
return SubscriberFeaturesStart(builder)
def SubscriberFeaturesAddPublisherIdentification(builder, publisherIdentification): builder.PrependBoolSlot(0, publisherIdentification, 0)
def AddPublisherIdentification(builder, publisherIdentification):
return SubscriberFeaturesAddPublisherIdentification(builder, publisherIdentification)
def SubscriberFeaturesAddPatternBasedSubscription(builder, patternBasedSubscription): builder.PrependBoolSlot(1, patternBasedSubscription, 0)
def AddPatternBasedSubscription(builder, patternBasedSubscription):
return SubscriberFeaturesAddPatternBasedSubscription(builder, patternBasedSubscription)
def SubscriberFeaturesAddPublicationTrustlevels(builder, publicationTrustlevels): builder.PrependBoolSlot(2, publicationTrustlevels, 0)
def AddPublicationTrustlevels(builder, publicationTrustlevels):
return SubscriberFeaturesAddPublicationTrustlevels(builder, publicationTrustlevels)
def SubscriberFeaturesAddSubscriptionRevocation(builder, subscriptionRevocation): builder.PrependBoolSlot(3, subscriptionRevocation, 0)
def AddSubscriptionRevocation(builder, subscriptionRevocation):
return SubscriberFeaturesAddSubscriptionRevocation(builder, subscriptionRevocation)
def SubscriberFeaturesAddEventHistory(builder, eventHistory): builder.PrependBoolSlot(4, eventHistory, 0)
def METHOD_NAME(builder, eventHistory):
return SubscriberFeaturesAddEventHistory(builder, eventHistory)
def SubscriberFeaturesAddAcknowledgeSubscriberReceived(builder, acknowledgeSubscriberReceived): builder.PrependBoolSlot(5, acknowledgeSubscriberReceived, 0)
def AddAcknowledgeSubscriberReceived(builder, acknowledgeSubscriberReceived):
return SubscriberFeaturesAddAcknowledgeSubscriberReceived(builder, acknowledgeSubscriberReceived)
def SubscriberFeaturesAddPayloadTransparency(builder, payloadTransparency): builder.PrependBoolSlot(6, payloadTransparency, 0)
def AddPayloadTransparency(builder, payloadTransparency):
return SubscriberFeaturesAddPayloadTransparency(builder, payloadTransparency)
def SubscriberFeaturesAddPayloadEncryptionCryptobox(builder, payloadEncryptionCryptobox): builder.PrependBoolSlot(7, payloadEncryptionCryptobox, 0)
def AddPayloadEncryptionCryptobox(builder, payloadEncryptionCryptobox):
return SubscriberFeaturesAddPayloadEncryptionCryptobox(builder, payloadEncryptionCryptobox)
def SubscriberFeaturesEnd(builder): return builder.EndObject()
def End(builder):
return SubscriberFeaturesEnd(builder |
7,142 | domain gate | import logging
import os
from abc import ABC
from concurrent.futures import Future, ThreadPoolExecutor
from threading import Semaphore, Thread
from typing import Dict, Iterable, List, Optional, TypeVar
from tqdm import tqdm
from ..utils.ratelimit import RateLimiter
logger = logging.getLogger(__name__)
T = TypeVar("T")
MAX_WORKER_COUNT = 5
MAX_REQUESTS_PER_DOMAIN = 25
_resolver = Semaphore(1)
_host_semaphores: Dict[str, Semaphore] = {}
class TaskManager(ABC):
def __init__(self) -> None:
"""A helper class for task queueing and parallel task execution.
It is being used as a superclass of the Crawler.
Args:
- workers (int, optional): Number of concurrent workers to expect. Default: 10.
- ratelimit (float, optional): Number of requests per second.
"""
self.init_executor(MAX_WORKER_COUNT)
def __del__(self) -> None:
if hasattr(self, "_executor"):
self._submit = None
self._executor.shutdown(wait=False)
if hasattr(self, "_limiter"):
self._limiter.shutdown()
@property
def executor(self) -> ThreadPoolExecutor:
return self._executor
@property
def futures(self) -> List[Future]:
return self._futures
@property
def workers(self):
return self._executor._max_workers
def init_executor(
self,
workers: int = MAX_WORKER_COUNT,
ratelimit: Optional[float] = None,
):
"""Initializes a new executor.
If the number of workers are not the same as the current executor,
it will shutdown the current executor, and cancel all pending tasks.
Args:
- workers (int): Number of workers to expect in the new executor.
"""
self._futures: List[Future] = []
self.__del__() # cleanup previous initialization
if ratelimit and ratelimit > 0:
workers = 1 # use single worker if ratelimit is being applied
self._limiter = RateLimiter(ratelimit)
elif hasattr(self, "_limiter"):
del self._limiter
self._executor = ThreadPoolExecutor(
max_workers=workers,
thread_name_prefix="lncrawl_scraper",
)
self._submit = self._executor.submit
self._executor.submit = self.submit_task
def submit_task(self, fn, *args, **kwargs) -> Future:
"""Submits a callable to be executed with the given arguments.
Schedules the callable to be executed as fn(*args, **kwargs) and returns
a Future instance representing the execution of the callable.
Returns:
A Future representing the given call.
"""
if hasattr(self, "_limiter"):
fn = self._limiter.wrap(fn)
future = self._submit(fn, *args, **kwargs)
self._futures.append(future)
return future
def progress_bar(
self,
iterable=None,
desc=None,
total=None,
unit=None,
disable=False,
timeout: float = None,
):
if os.getenv("debug_mode"):
disable = True
if not disable:
# Since we are showing progress bar, it is not good to
# resolve multiple list of futures at once
if not _resolver.acquire(True, timeout):
pass
bar = tqdm(
iterable=iterable,
desc=desc,
unit=unit,
total=total,
disable=disable or os.getenv("debug_mode"),
)
original_close = bar.close
def extended_close():
if not bar.disable:
_resolver.release()
original_close()
bar.close = extended_close
return bar
def METHOD_NAME(self, hostname: str = ""):
"""Limit number of entry per hostname.
Args:
url: A fully qualified url.
Returns:
A semaphore object to wait.
Example:
with self.domain_gate(url):
self.scraper.get(url)
"""
if hostname not in _host_semaphores:
_host_semaphores[hostname] = Semaphore(MAX_REQUESTS_PER_DOMAIN)
return _host_semaphores[hostname]
def cancel_futures(self, futures: Iterable[Future]) -> None:
"""Cancels all the future that are not yet done.
Args:
futures: A iterable list of futures to cancel.
"""
if not futures:
return
for future in futures:
if not future.done():
future.cancel()
def resolve_futures(
self,
futures: Iterable[Future],
timeout: float = None,
disable_bar=False,
desc=None,
unit=None,
fail_fast=False,
) -> None:
"""Wait for the futures to be done.
Args:
futures: A iterable list of futures to resolve.
timeout: The number of seconds to wait for the result of a future.
If None, then there is no limit on the wait time.
disable_bar: Hides the progress bar if True.
desc: The progress bar description
unit: The progress unit name
"""
if not futures:
return
bar = self.progress_bar(
desc=desc,
unit=unit,
total=len(futures),
disable=disable_bar,
timeout=timeout,
)
try:
for future in futures:
if fail_fast:
future.result(timeout)
bar.update()
continue
try:
future.result(timeout)
except Exception as e:
if isinstance(e, KeyboardInterrupt):
break
if bar.disable:
logger.exception("Failure to resolve future")
else:
bar.clear()
logger.warning(f"{type(e).__name__}: {e}")
finally:
bar.update()
finally:
Thread(target=lambda: self.cancel_futures(futures)).start()
bar.close() |
7,143 | test get tile intersection tms | # This file is part of the MapProxy project.
# Copyright (C) 2010 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from io import BytesIO
import pytest
from mapproxy.request.wms import WMS111MapRequest
from mapproxy.compat.image import Image
from mapproxy.test.image import is_png, tmp_image
from mapproxy.test.http import mock_httpd
from mapproxy.test.system import SysTest
@pytest.fixture(scope="module")
def config_file():
return "coverage.yaml"
class TestCoverageWMS(SysTest):
def setup(self):
self.common_map_req = WMS111MapRequest(
url="/service?",
param=dict(
service="WMS",
version="1.1.1",
bbox="-180,0,0,80",
width="200",
height="200",
layers="wms_cache",
srs="EPSG:4326",
format="image/png",
styles="",
request="GetMap",
),
)
def test_capababilities(self, app):
resp = app.get("/service?request=GetCapabilities&service=WMS&version=1.1.1")
xml = resp.lxml
# First: combined root, second: wms_cache, third: tms_cache, last: seed_only
assert xml.xpath("//LatLonBoundingBox/@minx") == ["10", "10", "12", "14"]
assert xml.xpath("//LatLonBoundingBox/@miny") == ["10", "15", "10", "13"]
assert xml.xpath("//LatLonBoundingBox/@maxx") == ["35", "30", "35", "24"]
assert xml.xpath("//LatLonBoundingBox/@maxy") == ["31", "31", "30", "23"]
def test_get_map_outside(self, app):
self.common_map_req.params.bbox = -90, 0, 0, 90
self.common_map_req.params["bgcolor"] = "0xff0005"
resp = app.get(self.common_map_req)
assert resp.content_type == "image/png"
data = BytesIO(resp.body)
assert is_png(data)
img = Image.open(data)
assert img.mode == "RGB"
assert img.getcolors() == [(200 * 200, (255, 0, 5))]
def test_get_map_outside_transparent(self, app):
self.common_map_req.params.bbox = -90, 0, 0, 90
self.common_map_req.params.transparent = True
resp = app.get(self.common_map_req)
assert resp.content_type == "image/png"
data = BytesIO(resp.body)
assert is_png(data)
img = Image.open(data)
assert img.mode == "RGBA"
assert img.getcolors()[0][0] == 200 * 200
assert img.getcolors()[0][1][3] == 0 # transparent
def test_get_map_intersection(self, app, cache_dir):
with tmp_image((256, 256), format="jpeg") as img:
expected_req = (
{
"path": r"/service?LAYERs=foo,bar&SERVICE=WMS&FORMAT=image%2Fjpeg"
"&REQUEST=GetMap&HEIGHT=91&SRS=EPSG%3A4326&styles="
"&VERSION=1.1.1&BBOX=10,15,30,31"
"&WIDTH=114"
},
{"body": img.read(), "headers": {"content-type": "image/jpeg"}},
)
with mock_httpd(("localhost", 42423), [expected_req]):
self.common_map_req.params.bbox = 0, 0, 40, 40
self.common_map_req.params.transparent = True
resp = app.get(self.common_map_req)
assert resp.content_type == "image/png"
data = BytesIO(resp.body)
assert is_png(data)
assert Image.open(data).mode == "RGBA"
assert cache_dir.join(
"wms_cache_EPSG4326/03/000/000/004/000/000/002.jpeg"
).check()
class TestCoverageTMS(SysTest):
def test_get_tile_intersections(self, app, cache_dir):
with tmp_image((256, 256), format="jpeg") as img:
expected_req = (
{
"path": r"/service?LAYERs=foo,bar&SERVICE=WMS&FORMAT=image%2Fjpeg"
"&REQUEST=GetMap&HEIGHT=25&SRS=EPSG%3A900913&styles="
"&VERSION=1.1.1&BBOX=1113194.90793,1689200.13961,3339584.7238,3632749.14338"
"&WIDTH=28"
},
{"body": img.read(), "headers": {"content-type": "image/jpeg"}},
)
with mock_httpd(
("localhost", 42423), [expected_req], bbox_aware_query_comparator=True
):
resp = app.get("/tms/1.0.0/wms_cache/0/1/1.jpeg")
assert resp.content_type == "image/jpeg"
cache_dir.join("wms_cache_EPSG900913/01/000/000/001/000/000/001.jpeg").check()
def METHOD_NAME(self, app, cache_dir):
with tmp_image((256, 256), format="jpeg") as img:
expected_req = (
{"path": r"/tms/1.0.0/foo/1/1/1.jpeg"},
{"body": img.read(), "headers": {"content-type": "image/jpeg"}},
)
with mock_httpd(
("localhost", 42423), [expected_req], bbox_aware_query_comparator=True
):
resp = app.get("/tms/1.0.0/tms_cache/0/1/1.jpeg")
assert resp.content_type == "image/jpeg"
cache_dir.join("tms_cache_EPSG900913/01/000/000/001/000/000/001.jpeg").check() |
7,144 | numpy reverse | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for scan ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
def METHOD_NAME(x, axis):
length = len(x.shape)
if axis < 0:
axis = length + axis
ix = [
slice(None, None, -1) if i == axis else slice(None) for i in range(length)
]
return x[ix]
def handle_options(func, x, axis, exclusive, reverse):
"""Adds tf options to numpy scan ops."""
length = len(x.shape)
if axis < 0:
axis = length + axis
if reverse:
x = METHOD_NAME(x, axis)
if exclusive:
ix_head = [slice(0, 1) if i == axis else slice(None) for i in range(length)]
ix_init = [
slice(0, -1) if i == axis else slice(None) for i in range(length)
]
if func == np.cumsum:
init = np.zeros_like(x[ix_head])
elif func == np.cumprod:
init = np.ones_like(x[ix_head])
else:
raise ValueError("Unknown scan function.")
x = np.concatenate([init, func(x[ix_init], axis)], axis=axis)
else:
x = func(x, axis=axis)
if reverse:
x = METHOD_NAME(x, axis)
return x
class CumsumTest(xla_test.XLATestCase):
valid_dtypes = [np.float32, np.int32]
def axis_dtypes(self):
return set(self.int_types).intersection([np.int32, np.int64])
def _compare(self, x, axis, exclusive, reverse):
np_out = handle_options(np.cumsum, x, axis, exclusive, reverse)
with self.session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
tf_out = math_ops.cumsum(p, axis, exclusive, reverse).eval(
feed_dict={p: x})
self.assertAllClose(np_out, tf_out)
def _compareAll(self, x, axis):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compare(x, axis, exclusive, reverse)
def testEmpty(self):
for dtype in self.valid_dtypes:
x = np.zeros([0]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def testAxisType(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis_dtype in self.axis_dtypes():
with self.session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
axis = constant_op.constant(0, axis_dtype)
math_ops.cumsum(p, axis).eval(feed_dict={p: x})
def test1D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def test2D(self):
for dtype in self.valid_dtypes:
x = np.arange(0, 10).reshape([2, 5]).astype(dtype)
for axis in (-2, -1, 0, 1):
self._compareAll(x, axis)
def test3D(self):
for dtype in self.valid_dtypes:
x = np.arange(0, 20).reshape([2, 2, 5]).astype(dtype)
for axis in (-3, -2, -1, 0, 1, 2):
self._compareAll(x, axis)
def test6D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 145).reshape([2, 2, 3, 3, 2, 2]).astype(dtype)
for axis in range(-6, 6, 3):
self._compareAll(x, axis)
def testInvalidAxis(self):
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
with self.session(), self.test_scope():
input_tensor = ops.convert_to_tensor(x)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumsum(input_tensor, -3).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumsum(input_tensor, 2).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "axis must be a scalar" in str(e)):
math_ops.cumsum(input_tensor, [0]).eval()
class CumprodTest(xla_test.XLATestCase):
valid_dtypes = [np.float32, np.int32]
def axis_dtypes(self):
return set(self.int_types).intersection([np.int32, np.int64])
def _compare(self, x, axis, exclusive, reverse):
np_out = handle_options(np.cumprod, x, axis, exclusive, reverse)
with self.session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
prod = math_ops.cumprod(p, axis, exclusive, reverse)
tf_out = prod.eval(feed_dict={p: x})
self.assertAllClose(np_out, tf_out)
def _compareAll(self, x, axis):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compare(x, axis, exclusive, reverse)
def testEmpty(self):
for dtype in self.valid_dtypes:
x = np.zeros([0]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def testAxisType(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis_dtype in self.axis_dtypes():
with self.session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
axis = constant_op.constant(0, axis_dtype)
math_ops.cumprod(x, axis).eval(feed_dict={p: x})
def test1D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def test2D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 11).reshape([2, 5]).astype(dtype)
for axis in (-2, -1, 0, 1):
self._compareAll(x, axis)
def test3D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 21).reshape([2, 2, 5]).astype(dtype)
for axis in (-3, -2, -1, 0, 1, 2):
self._compareAll(x, axis)
def test6D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 145).reshape([2, 2, 3, 3, 2, 2]).astype(dtype)
for axis in range(-6, 6, 3):
self._compareAll(x, axis)
def testInvalidAxis(self):
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
with self.session(), self.test_scope():
input_tensor = ops.convert_to_tensor(x)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumprod(input_tensor, -3).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumprod(input_tensor, 2).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "axis must be a scalar" in str(e)):
math_ops.cumprod(input_tensor, [0]).eval()
if __name__ == "__main__":
test.main() |
7,145 | has icon |
from os import walk
from os.path import splitext
from glob import glob
import importlib
from inspect import getmembers, isclass, getfile
from sverchok.utils.testing import *
from sverchok.utils import yaml_parser
sverchok_directory = dirname(getfile(sverchok))
class UiTests(SverchokTestCase):
def test_all_nodes_have_icons(self):
def METHOD_NAME(node_class):
has_sv_icon = hasattr(node_class, "sv_icon")
has_bl_icon = hasattr(node_class, "bl_icon") and node_class.bl_icon and node_class.bl_icon != 'OUTLINER_OB_EMPTY'
# sv_logger.debug("Icon: %s: BL %s, SV %s", node_class.__name__, getattr(node_class, 'bl_icon', None), getattr(node_class, 'sv_icon', None))
return has_sv_icon or has_bl_icon
ignore_list = [
'SvIterationNode',
'SvExMinimalScalarFieldNode',
'SvExScalarFieldGraphNode',
'SvMeshSurfaceFieldNode',
'SvExMeshNormalFieldNode',
'SvExMinimalVectorFieldNode',
'SvSolidCenterOfMassNode',
'SvIsSolidClosedNode',
'SvRefineSolidNode',
'SvSolidValidateNode'
]
sv_init = sverchok.__file__
nodes_dir = join(dirname(sv_init), "nodes")
def check_category(directory):
category = basename(directory)
from sverchok.node_tree import SverchCustomTreeNode
for py_path in glob(join(directory, "*.py")):
py_file = basename(py_path)
py_name, ext = splitext(py_file)
module = importlib.import_module(f"sverchok.nodes.{category}.{py_name}")
for node_class_name, node_class in getmembers(module, isclass):
if node_class.__module__ != module.__name__:
continue
if node_class_name in ignore_list:
continue
sv_logger.debug("Check: %s: %s: %s", node_class, node_class.__bases__, SverchCustomTreeNode in node_class.__bases__)
if SverchCustomTreeNode in node_class.mro():
with self.subTest(node = node_class_name):
if not METHOD_NAME(node_class):
self.fail(f"Node <{node_class_name}> does not have icon!")
for directory, subdirs, fnames in walk(nodes_dir):
dir_name = basename(directory)
if dir_name == "nodes":
continue
with self.subTest(directory=dir_name):
check_category(directory)
def _load_node_names_from_menu(self, menu_path):
def search_in_list(data):
for item in data:
if isinstance(item, str) and item != '---':
yield item
elif isinstance(item, dict):
yield from search_in_dict(item)
def search_in_dict(data):
for key, value in data.items():
if key in {'icon_name', 'extra_menu', 'operator', 'custom_menu'}:
continue
if isinstance(value, list):
yield from search_in_list(value)
def search_node_names(menu):
for item in menu:
if isinstance(item, dict):
yield from search_in_dict(item)
elif isinstance(item, list):
yield from search_in_list(item)
menu = yaml_parser.load(menu_path)
nodes = set()
for name in search_node_names(menu):
nodes.add(name)
return nodes
def test_full_menu_presets(self):
index_path = join(sverchok_directory, 'index.yaml')
index_nodes = self._load_node_names_from_menu(index_path)
sv_init = dirname(sverchok.__file__)
for path in glob(join(sv_init, 'menus', 'full_*.yaml')):
with self.subTest(path = path):
preset_nodes = self._load_node_names_from_menu(path)
self.assertEqual(preset_nodes, index_nodes)
def test_partial_menu_presets(self):
index_path = join(sverchok_directory, 'index.yaml')
index_nodes = self._load_node_names_from_menu(index_path)
sv_init = dirname(sverchok.__file__)
for path in glob(join(sv_init, 'menus', 'partial_*.yaml')):
with self.subTest(path = path):
preset_nodes = self._load_node_names_from_menu(path)
self.assertTrue(preset_nodes.issubset(index_nodes))
|
7,146 | describe dataset | """
Command line utility which prints data about labels file.
"""
import os
def describe_labels(data_path, verbose=False):
from sleap.io.dataset import Labels
video_callback = Labels.make_video_callback([os.path.dirname(data_path)])
labels = Labels.load_file(data_path, video_search=video_callback)
print(f"Labeled frames: {len(labels)}")
print(f"Tracks: {len(labels.tracks)}")
print(f"Video files:")
total_user_frames = 0
for vid in labels.videos:
print(f" {vid.filename}")
lfs = labels.find(vid)
print(f" labeled frames: {len(lfs)}")
if not lfs:
continue
first_idx = min((lf.frame_idx for lf in lfs))
last_idx = max((lf.frame_idx for lf in lfs))
tracks = {inst.track for lf in lfs for inst in lf}
concurrent_count = max((len(lf.instances) for lf in lfs))
user_frames = labels.get_labeled_frame_count(vid, "user")
total_user_frames += user_frames
print(f" labeled frames from {first_idx} to {last_idx}")
print(f" user labeled frames: {user_frames}")
print(f" tracks: {len(tracks)}")
print(f" max instances in frame: {concurrent_count}")
if verbose:
print()
print(" labeled frames: bounding box top left (x, y)")
for lf in lfs:
bb_cords = [
f"({inst.bounding_box[0]:.2f}, {inst.bounding_box[1]:.2f}){'^' if hasattr(inst, 'score') else ''}"
for inst in lf.instances
]
pt_str = " ".join(bb_cords)
print(
f" frame {lf.frame_idx}: {len(lf.instances)} instances -> {pt_str}"
)
print()
print(f"Total user labeled frames: {total_user_frames}")
if labels.provenance:
print()
print(f"Provenance:")
for key, value in labels.provenance.items():
print(f" {key}: {value}")
def describe_model(model_path, verbose=False):
import sleap
import numpy as np
print("=====")
print("Model:", model_path)
print("=====")
rel_path = lambda x: os.path.join(model_path, x)
initial_cfg = sleap.load_config(rel_path("initial_config.json"))
cfg = sleap.load_config(rel_path("training_config.json"))
print("=====")
print("Heads:")
print("=====")
print(cfg.model.heads)
print("=====")
print()
print("=====")
print("Backbone:")
print("=====")
print(cfg.model.backbone)
print("=====")
print()
print()
def describe_metrics(metrics):
if isinstance(metrics, str):
metrics = np.load(metrics, allow_pickle=True)["metrics"].tolist()
print(
f"Dist (90%/95%/99%): {metrics['dist.p90']} / {metrics['dist.p95']} / {metrics['dist.p99']}"
)
print(
f"OKS VOC (mAP / mAR): {metrics['oks_voc.mAP']} / {metrics['oks_voc.mAR']}"
)
print(
f"PCK (mean {metrics['pck.thresholds'][0]}-{metrics['pck.thresholds'][-1]} px): {metrics['pck.mPCK']}"
)
def METHOD_NAME(split_name):
if os.path.exists(rel_path(f"labels_gt.{split_name}.slp")):
labels = sleap.load_file(rel_path(f"labels_gt.{split_name}.slp"))
print(
f"Frames: {len(labels.user_labeled_frames)} / Instances: {len(labels.user_instances)}"
)
if os.path.exists(rel_path(f"metrics.{split_name}.npz")):
print("Metrics:")
describe_metrics(rel_path(f"metrics.{split_name}.npz"))
print("=====")
print("Training set:")
print("=====")
METHOD_NAME("train")
print("=====")
print()
print("=====")
print("Validation set:")
print("=====")
METHOD_NAME("val")
print("=====")
print()
print("=====")
print("Test set:")
print("=====")
METHOD_NAME("test")
print("=====")
print()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("data_path", help="Path to labels file (.slp) or model folder")
parser.add_argument("--verbose", default=False, action="store_true")
args = parser.parse_args()
if args.data_path.endswith(".slp"):
describe_labels(args.data_path, verbose=args.verbose)
elif os.path.isdir(args.data_path):
if os.path.exists(os.path.join(args.data_path, "training_config.json")):
describe_model(args.data_path, verbose=args.verbose)
if __name__ == "__main__":
main() |
7,147 | sv init | # This file is part of project Sverchok. It's copyrighted by the contributors
# recorded in the version control history of the file, available from
# its original location https://github.com/nortikin/sverchok/commit/master
#
# SPDX-License-Identifier: GPL3
# License-Filename: LICENSE
import bpy
from bpy.props import FloatProperty, IntProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode, ensure_nesting_level, zip_long_repeat, get_data_nesting_level
from sverchok.utils.field.scalar import SvScalarField
from sverchok.utils.voronoi3d import lloyd_on_mesh, lloyd_in_mesh
class SvLloydOnMeshNode(SverchCustomTreeNode, bpy.types.Node):
"""
Triggers: Lloyd Mesh
Tooltip: Redistribute 3D points on the surface of a mesh uniformly by use of Lloyd's algorithm
"""
bl_idname = 'SvLloydOnMeshNode'
bl_label = 'Lloyd on Mesh'
bl_icon = 'OUTLINER_OB_EMPTY'
sv_icon = 'SV_VORONOI'
sv_dependencies = {'scipy'}
iterations : IntProperty(
name = "Iterations",
description = "Number of Lloyd algorithm iterations",
min = 0,
default = 3,
update = updateNode)
thickness : FloatProperty(
name = "Thickness",
default = 1.0,
min = 0.0,
description="Thickness of region where Voronoi diagram is generated",
update=updateNode)
modes = [
('SURFACE', "Surface", "Surface", 0),
('VOLUME', "Volume", "Volume", 1)
]
mode : bpy.props.EnumProperty(
name = "Mode",
items = modes,
default = 'SURFACE',
update=updateNode)
def draw_buttons(self, context, layout):
layout.prop(self, "mode")
def METHOD_NAME(self, context):
self.inputs.new('SvVerticesSocket', "Vertices")
self.inputs.new('SvStringsSocket', "Faces")
self.inputs.new('SvVerticesSocket', "Sites").enable_input_link_menu = False
self.inputs.new('SvStringsSocket', 'Iterations').prop_name = 'iterations'
self.inputs.new('SvStringsSocket', 'Thickness').prop_name = 'thickness'
self.inputs.new('SvScalarFieldSocket', 'Weights').enable_input_link_menu = False
self.outputs.new('SvVerticesSocket', "Sites")
def process(self):
if not any(socket.is_linked for socket in self.outputs):
return
verts_in = self.inputs['Vertices'].sv_get()
faces_in = self.inputs['Faces'].sv_get()
sites_in = self.inputs['Sites'].sv_get()
thickness_in = self.inputs['Thickness'].sv_get()
iterations_in = self.inputs['Iterations'].sv_get()
weights_in = self.inputs['Weights'].sv_get(default=[[None]])
verts_in = ensure_nesting_level(verts_in, 4)
input_level = get_data_nesting_level(sites_in)
sites_in = ensure_nesting_level(sites_in, 4)
faces_in = ensure_nesting_level(faces_in, 4)
thickness_in = ensure_nesting_level(thickness_in, 2)
iterations_in = ensure_nesting_level(iterations_in, 2)
if self.inputs['Weights'].is_linked:
weights_in = ensure_nesting_level(weights_in, 2, data_types=(SvScalarField,))
nested_output = input_level > 3
verts_out = []
for params in zip_long_repeat(verts_in, faces_in, sites_in, thickness_in, iterations_in, weights_in):
new_verts = []
for verts, faces, sites, thickness, iterations, weights in zip_long_repeat(*params):
if self.mode == 'SURFACE':
sites = lloyd_on_mesh(verts, faces, sites, thickness, iterations, weight_field = weights)
else:
sites = lloyd_in_mesh(verts, faces, sites, iterations, thickness=thickness, weight_field = weights)
new_verts.append(sites)
if nested_output:
verts_out.append(new_verts)
else:
verts_out.extend(new_verts)
self.outputs['Sites'].sv_set(verts_out)
def register():
bpy.utils.register_class(SvLloydOnMeshNode)
def unregister():
bpy.utils.unregister_class(SvLloydOnMeshNode) |
7,148 | assert almost equal | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import unittest
import torch
from fairseq.criterions.cross_entropy import CrossEntropyCriterion
from fairseq.criterions.label_smoothed_cross_entropy import LabelSmoothedCrossEntropyCriterion
import tests.utils as test_utils
class TestLabelSmoothing(unittest.TestCase):
def setUp(self):
# build dictionary
self.d = test_utils.dummy_dictionary(3)
vocab = len(self.d)
self.assertEqual(vocab, 4 + 3) # 4 special + 3 tokens
self.assertEqual(self.d.pad(), 1)
self.assertEqual(self.d.eos(), 2)
self.assertEqual(self.d.unk(), 3)
pad, eos, unk, w1, w2, w3 = 1, 2, 3, 4, 5, 6 # noqa: F841
# build dataset
self.data = [
# the first batch item has padding
{'source': torch.LongTensor([w1, eos]), 'target': torch.LongTensor([w1, eos])},
{'source': torch.LongTensor([w1, eos]), 'target': torch.LongTensor([w1, w1, eos])},
]
self.sample = next(test_utils.dummy_dataloader(self.data))
# build model
self.args = argparse.Namespace()
self.args.sentence_avg = False
self.args.probs = torch.FloatTensor([
# pad eos unk w1 w2 w3
[0.05, 0.05, 0.1, 0.05, 0.3, 0.4, 0.05],
[0.05, 0.10, 0.2, 0.05, 0.2, 0.3, 0.10],
[0.05, 0.15, 0.3, 0.05, 0.1, 0.2, 0.15],
]).unsqueeze(0).expand(2, 3, 7) # add batch dimension
self.task = test_utils.TestTranslationTask.setup_task(self.args, self.d, self.d)
self.model = self.task.build_model(self.args)
def test_nll_loss(self):
self.args.label_smoothing = 0.1
nll_crit = CrossEntropyCriterion(self.args, self.task)
smooth_crit = LabelSmoothedCrossEntropyCriterion(self.args, self.task)
nll_loss, nll_sample_size, nll_logging_output = nll_crit(self.model, self.sample)
smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit(self.model, self.sample)
self.assertLess(abs(nll_loss - nll_logging_output['loss']), 1e-6)
self.assertLess(abs(nll_loss - smooth_logging_output['nll_loss']), 1e-6)
def test_padding(self):
self.args.label_smoothing = 0.1
crit = LabelSmoothedCrossEntropyCriterion(self.args, self.task)
loss, _, logging_output = crit(self.model, self.sample)
def get_one_no_padding(idx):
# create a new sample with just a single batch item so that there's
# no padding
sample1 = next(test_utils.dummy_dataloader([self.data[idx]]))
args1 = copy.copy(self.args)
args1.probs = args1.probs[idx, :, :].unsqueeze(0)
model1 = self.task.build_model(args1)
loss1, _, _ = crit(model1, sample1)
return loss1
loss1 = get_one_no_padding(0)
loss2 = get_one_no_padding(1)
self.METHOD_NAME(loss, loss1 + loss2)
def test_reduction(self):
self.args.label_smoothing = 0.1
crit = LabelSmoothedCrossEntropyCriterion(self.args, self.task)
loss, _, logging_output = crit(self.model, self.sample, reduce=True)
unreduced_loss, _, _ = crit(self.model, self.sample, reduce=False)
self.METHOD_NAME(loss, unreduced_loss.sum())
def test_zero_eps(self):
self.args.label_smoothing = 0.0
nll_crit = CrossEntropyCriterion(self.args, self.task)
smooth_crit = LabelSmoothedCrossEntropyCriterion(self.args, self.task)
nll_loss, nll_sample_size, nll_logging_output = nll_crit(self.model, self.sample)
smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit(self.model, self.sample)
self.METHOD_NAME(nll_loss, smooth_loss)
def METHOD_NAME(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-6)
if __name__ == '__main__':
unittest.main() |
7,149 | get npoints | # lint-amnesty, pylint: disable=missing-module-docstring
# -----------------------------------------------------------------------------
# class used to store graded responses to CAPA questions
#
# Used by responsetypes and capa_problem
class CorrectMap(object):
"""
Stores map between answer_id and response evaluation result for each question
in a capa problem. The response evaluation result for each answer_id includes
(correctness, npoints, msg, hint, hintmode).
- correctness : 'correct', 'incorrect', 'partially-correct', or 'incomplete'
- npoints : None, or integer specifying number of points awarded for this answer_id
- msg : string (may have HTML) giving extra message response
(displayed below textline or textbox)
- hint : string (may have HTML) giving optional hint
(displayed below textline or textbox, above msg)
- hintmode : one of (None,'on_request','always') criteria for displaying hint
- queuestate : Dict {key:'', time:''} where key is a secret string, and time is a string dump
of a DateTime object in the format '%Y%m%d%H%M%S'. Is None when not queued
Behaves as a dict.
"""
def __init__(self, *args, **kwargs):
# start with empty dict
self.cmap = {}
self.items = self.cmap.items
self.keys = self.cmap.keys
self.overall_message = ""
self.set(*args, **kwargs)
def __getitem__(self, *args, **kwargs):
return self.cmap.__getitem__(*args, **kwargs)
def __iter__(self):
return self.cmap.__iter__()
# See the documentation for 'set_dict' for the use of kwargs
def set( # lint-amnesty, pylint: disable=missing-function-docstring
self, # lint-amnesty, pylint: disable=unused-argument
answer_id=None,
correctness=None,
npoints=None,
msg='',
hint='',
hintmode=None,
queuestate=None,
answervariable=None,
**kwargs
):
if answer_id is not None:
self.cmap[answer_id] = {
'correctness': correctness,
'npoints': npoints,
'msg': msg,
'hint': hint,
'hintmode': hintmode,
'queuestate': queuestate,
'answervariable': answervariable,
}
def __repr__(self):
return repr(self.cmap)
def get_dict(self):
"""
return dict version of self
"""
return self.cmap
def set_dict(self, correct_map):
"""
Set internal dict of CorrectMap to provided correct_map dict
correct_map is saved by LMS as a plaintext JSON dump of the correctmap dict. This
means that when the definition of CorrectMap (e.g. its properties) are altered,
an existing correct_map dict will not coincide with the newest CorrectMap format as
defined by self.set.
For graceful migration, feed the contents of each correct map to self.set, rather than
making a direct copy of the given correct_map dict. This way, the common keys between
the incoming correct_map dict and the new CorrectMap instance will be written, while
mismatched keys will be gracefully ignored.
Special migration case:
If correct_map is a one-level dict, then convert it to the new dict of dicts format.
"""
# empty current dict
self.__init__() # pylint: disable=unnecessary-dunder-call
if not correct_map:
return
# create new dict entries
if not isinstance(list(correct_map.values())[0], dict):
# special migration
for k in correct_map:
self.set(k, correctness=correct_map[k])
else:
for k in correct_map:
self.set(k, **correct_map[k])
def is_correct(self, answer_id):
"""
Takes an answer_id
Returns true if the problem is correct OR partially correct.
"""
if answer_id in self.cmap:
return self.cmap[answer_id]['correctness'] in ['correct', 'partially-correct']
return None
def is_partially_correct(self, answer_id):
"""
Takes an answer_id
Returns true if the problem is partially correct.
"""
if answer_id in self.cmap:
return self.cmap[answer_id]['correctness'] == 'partially-correct'
return None
def is_queued(self, answer_id):
return answer_id in self.cmap and self.cmap[answer_id]['queuestate'] is not None
def is_right_queuekey(self, answer_id, test_key):
return self.is_queued(answer_id) and self.cmap[answer_id]['queuestate']['key'] == test_key
def get_queuetime_str(self, answer_id):
if self.cmap[answer_id]['queuestate']:
return self.cmap[answer_id]['queuestate']['time']
else:
return None
def METHOD_NAME(self, answer_id):
"""Return the number of points for an answer, used for partial credit."""
npoints = self.get_property(answer_id, 'npoints')
if npoints is not None:
return npoints
elif self.is_correct(answer_id):
return 1
# if not correct and no points have been assigned, return 0
return 0
def set_property(self, answer_id, property, value): # lint-amnesty, pylint: disable=redefined-builtin
if answer_id in self.cmap:
self.cmap[answer_id][property] = value
else:
self.cmap[answer_id] = {property: value}
def get_property(self, answer_id, property, default=None): # lint-amnesty, pylint: disable=redefined-builtin
if answer_id in self.cmap:
return self.cmap[answer_id].get(property, default)
return default
def get_correctness(self, answer_id):
return self.get_property(answer_id, 'correctness')
def get_msg(self, answer_id):
return self.get_property(answer_id, 'msg', '')
def get_hint(self, answer_id):
return self.get_property(answer_id, 'hint', '')
def get_hintmode(self, answer_id):
return self.get_property(answer_id, 'hintmode', None)
def set_hint_and_mode(self, answer_id, hint, hintmode):
"""
- hint : (string) HTML text for hint
- hintmode : (string) mode for hint display ('always' or 'on_request')
"""
self.set_property(answer_id, 'hint', hint)
self.set_property(answer_id, 'hintmode', hintmode)
def update(self, other_cmap):
"""
Update this CorrectMap with the contents of another CorrectMap
"""
if not isinstance(other_cmap, CorrectMap):
raise Exception('CorrectMap.update called with invalid argument %s' % other_cmap)
self.cmap.update(other_cmap.get_dict())
self.set_overall_message(other_cmap.get_overall_message())
def set_overall_message(self, message_str):
""" Set a message that applies to the question as a whole,
rather than to individual inputs. """
self.overall_message = str(message_str) if message_str else ""
def get_overall_message(self):
""" Retrieve a message that applies to the question as a whole.
If no message is available, returns the empty string """
return self.overall_message |
7,150 | pending tasks | from .utils import NamespacedClient, query_params, _make_path
class ClusterClient(NamespacedClient):
@query_params('level', 'local', 'master_timeout', 'timeout',
'wait_for_active_shards', 'wait_for_nodes',
'wait_for_relocating_shards', 'wait_for_status')
def health(self, index=None, params=None):
"""
Get a very simple status on the health of the cluster.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html>`_
:arg index: Limit the information returned to a specific index
:arg level: Specify the level of detail for returned information,
default 'cluster', valid choices are: 'cluster', 'indices', 'shards'
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg timeout: Explicit operation timeout
:arg wait_for_active_shards: Wait until the specified number of shards
is active
:arg wait_for_nodes: Wait until the specified number of nodes is
available
:arg wait_for_relocating_shards: Wait until the specified number of
relocating shards is finished
:arg wait_for_status: Wait until cluster is in a specific state, default
None, valid choices are: 'green', 'yellow', 'red'
"""
_, data = self.transport.perform_request('GET', _make_path('_cluster',
'health', index), params=params)
return data
@query_params('local', 'master_timeout')
def METHOD_NAME(self, params=None):
"""
The pending cluster tasks API returns a list of any cluster-level
changes (e.g. create index, update mapping, allocate or fail shard)
which have not yet been executed.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-pending.html>`_
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Specify timeout for connection to master
"""
_, data = self.transport.perform_request('GET',
'/_cluster/pending_tasks', params=params)
return data
@query_params('allow_no_indices', 'expand_wildcards', 'flat_settings',
'ignore_unavailable', 'local', 'master_timeout')
def state(self, metric=None, index=None, params=None):
"""
Get a comprehensive state information of the whole cluster.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-state.html>`_
:arg metric: Limit the information returned to the specified metrics
:arg index: A comma-separated list of index names; use `_all` or empty
string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg flat_settings: Return settings in flat format (default: false)
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Specify timeout for connection to master
"""
if index and not metric:
metric = '_all'
_, data = self.transport.perform_request('GET', _make_path('_cluster',
'state', metric, index), params=params)
return data
@query_params('flat_settings', 'human')
def stats(self, node_id=None, params=None):
"""
The Cluster Stats API allows to retrieve statistics from a cluster wide
perspective. The API returns basic index metrics and information about
the current nodes that form the cluster.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html>`_
:arg node_id: A comma-separated list of node IDs or names to limit the
returned information; use `_local` to return information from the
node you're connecting to, leave empty to get information from all
nodes
:arg flat_settings: Return settings in flat format (default: false)
:arg human: Whether to return time and byte values in human-readable
format., default False
"""
url = '/_cluster/stats'
if node_id:
url = _make_path('_cluster/stats/nodes', node_id)
_, data = self.transport.perform_request('GET', url, params=params)
return data
@query_params('dry_run', 'explain', 'master_timeout', 'metric', 'timeout')
def reroute(self, body=None, params=None):
"""
Explicitly execute a cluster reroute allocation command including specific commands.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-reroute.html>`_
:arg body: The definition of `commands` to perform (`move`, `cancel`,
`allocate`)
:arg dry_run: Simulate the operation only and return the resulting state
:arg explain: Return an explanation of why the commands can or cannot be
executed
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg metric: Limit the information returned to the specified metrics.
Defaults to all but metadata, valid choices are: '_all', 'blocks',
'metadata', 'nodes', 'routing_table', 'master_node', 'version'
:arg timeout: Explicit operation timeout
"""
_, data = self.transport.perform_request('POST', '/_cluster/reroute',
params=params, body=body)
return data
@query_params('flat_settings', 'master_timeout', 'timeout')
def get_settings(self, params=None):
"""
Get cluster settings.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-update-settings.html>`_
:arg flat_settings: Return settings in flat format (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg timeout: Explicit operation timeout
"""
_, data = self.transport.perform_request('GET', '/_cluster/settings',
params=params)
return data
@query_params('flat_settings', 'master_timeout', 'timeout')
def put_settings(self, body=None, params=None):
"""
Update cluster wide specific settings.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-update-settings.html>`_
:arg body: The settings to be updated. Can be either `transient` or
`persistent` (survives cluster restart).
:arg flat_settings: Return settings in flat format (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg timeout: Explicit operation timeout
"""
_, data = self.transport.perform_request('PUT', '/_cluster/settings',
params=params, body=body)
return data
|
7,151 | finish | import time
import signal
import re
from lnst.Controller.RecipeResults import ResultLevel
from lnst.RecipeCommon.Perf.Results import PerfInterval
from lnst.RecipeCommon.Perf.Results import SequentialPerfResult
from lnst.RecipeCommon.Perf.Measurements.BaseFlowMeasurement import BaseFlowMeasurement
from lnst.RecipeCommon.Perf.Measurements.BaseFlowMeasurement import NetworkFlowTest
from lnst.RecipeCommon.Perf.Measurements.Results import FlowMeasurementResults
from lnst.RecipeCommon.Perf.Measurements.MeasurementError import MeasurementError
from lnst.Tests.TRex import TRexServer, TRexClient
class TRexFlowMeasurement(BaseFlowMeasurement):
_MEASUREMENT_VERSION = 1
def __init__(self, flows, trex_dir, server_cpu_cores, recipe_conf=None):
super(TRexFlowMeasurement, self).__init__(recipe_conf)
self._flows = flows
self._trex_dir = trex_dir
self._server_cpu_cores = server_cpu_cores
self._running_measurements = []
self._finished_measurements = []
self._hosts_versions = {}
@property
def flows(self):
return self._flows
@property
def version(self):
if not self._hosts_versions:
for flow in self._flows:
if flow.generator not in self._hosts_versions:
self._hosts_versions[flow.generator] = self._get_host_trex_version(flow.generator)
return {"measurement_version": self._MEASUREMENT_VERSION,
"hosts_trex_versions": self._hosts_versions}
def _get_host_trex_version(self, host):
version_job = host.run(f"cd {self._trex_dir} ; ./t-rex-64 --help", job_level = ResultLevel.DEBUG)
if version_job.passed:
match = re.match(r"Starting TRex (v.+?) please wait ...", version_job.stdout)
if match:
return match.group(1)
return None
def start(self):
if len(self._running_measurements) > 0:
raise MeasurementError("Measurement already running!")
tests = self._prepare_tests(self._flows)
result = None
for test in tests:
test.server_job.start(bg=True)
#wait for Trex server to start
time.sleep(15)
for test in tests:
test.client_job.start(bg=True)
self._running_measurements = tests
def METHOD_NAME(self):
tests = self._running_measurements
try:
for test in tests:
client_test = test.client_job.what
test.client_job.wait(timeout=client_test.runtime_estimate())
test.server_job.kill(signal.SIGINT)
test.server_job.wait(5)
finally:
for test in tests:
test.server_job.kill()
test.client_job.kill()
self._running_measurements = []
self._finished_measurements = tests
def _prepare_tests(self, flows):
tests = []
flows_by_generator = self._flows_by_generator(flows)
for generator, flows in list(flows_by_generator.items()):
flow_tuples = [(flow.generator_bind, flow.receiver_bind)
for flow in flows]
server_job = generator.prepare_job(
TRexServer(
trex_dir=self._trex_dir,
flows=flow_tuples,
cores=self._server_cpu_cores))
client_job = generator.prepare_job(
TRexClient(
trex_dir=self._trex_dir,
ports=list(range(len(flow_tuples))),
flows=flow_tuples,
module=flows[0].type,
duration=flows[0].duration,
msg_size=flows[0].msg_size))
test = NetworkFlowTest(flows, server_job, client_job)
tests.append(test)
return tests
def collect_results(self):
tests = self._finished_measurements
results = []
for test in tests:
for port, flow in enumerate(test.flow):
flow_results = self._parse_results_by_port(
test.client_job, port, flow)
results.append(flow_results)
return results
def _flows_by_generator(self, flows):
result = dict()
for flow in flows:
if flow.generator in result:
result[flow.generator].append(flow)
else:
result[flow.generator] = [flow]
for generator, flows in list(result.items()):
for flow in flows:
if (flow.duration != flows[0].duration or
flow.msg_size != flows[0].msg_size):
raise MeasurementError("Flows on the same generator need to have the same duration and msg_size at the moment")
return result
def _parse_results_by_port(self, job, port, flow):
results = FlowMeasurementResults(measurement=self, flow=flow, warmup_duration=flow.warmup_duration)
results.generator_results = SequentialPerfResult()
results.generator_cpu_stats = SequentialPerfResult()
results.receiver_results = SequentialPerfResult()
results.receiver_cpu_stats = SequentialPerfResult()
if not job.passed:
timestamp = time.time()
results.generator_results.append(PerfInterval(0, 0, "packets", timestamp))
results.generator_cpu_stats.append(PerfInterval(0, 0, "cpu_percent", timestamp))
results.receiver_results.append(PerfInterval(0, 0, "packets", timestamp))
results.receiver_cpu_stats.append(PerfInterval(0, 0, "cpu_percent", timestamp))
else:
prev_time = job.result["start_time"]
prev_tx_val = 0
prev_rx_val = 0
for i in job.result["data"]:
time_delta = i["timestamp"] - prev_time
tx_delta = i["measurement"][port]["opackets"] - prev_tx_val
rx_delta = i["measurement"][port]["ipackets"] - prev_rx_val
results.generator_results.append(PerfInterval(
tx_delta,
time_delta,
"pkts", i["timestamp"]))
results.receiver_results.append(PerfInterval(
rx_delta,
time_delta,
"pkts", i["timestamp"]))
prev_time = i["timestamp"]
prev_tx_val = i["measurement"][port]["opackets"]
prev_rx_val = i["measurement"][port]["ipackets"]
cpu_delta = i["measurement"]["global"]["cpu_util"]
results.generator_cpu_stats.append(PerfInterval(
cpu_delta,
time_delta,
"cpu_percent", i["timestamp"]))
results.receiver_cpu_stats.append(PerfInterval(
cpu_delta,
time_delta,
"cpu_percent", i["timestamp"]))
return results |
7,152 | modify and assert | '''
copyright: Copyright (C) 2015-2022, Wazuh Inc.
Created by Wazuh, Inc. <info@wazuh.com>.
This program is free software; you can redistribute it and/or modify it under the terms of GPLv2
type: integration
brief: File Integrity Monitoring (FIM) system watches selected files and triggering alerts when
these files are modified. Specifically, these tests will check if FIM monitors the target
of a 'symbolic link' when it is changed and when that change is reverted.
The FIM capability is managed by the 'wazuh-syscheckd' daemon, which checks configured
files for changes to the checksums, permissions, and ownership.
components:
- fim
suite: files_follow_symbolic_link
targets:
- agent
- manager
daemons:
- wazuh-syscheckd
os_platform:
- linux
- macos
- solaris
os_version:
- Arch Linux
- Amazon Linux 2
- Amazon Linux 1
- CentOS 8
- CentOS 7
- Debian Buster
- Red Hat 8
- Solaris 10
- Solaris 11
- macOS Catalina
- macOS Server
- Ubuntu Focal
- Ubuntu Bionic
references:
- https://documentation.wazuh.com/current/user-manual/capabilities/file-integrity/index.html
- https://documentation.wazuh.com/current/user-manual/reference/ossec-conf/syscheck.html#directories
pytest_args:
- fim_mode:
realtime: Enable real-time monitoring on Linux (using the 'inotify' system calls) and Windows systems.
whodata: Implies real-time monitoring but adding the 'who-data' information.
- tier:
0: Only level 0 tests are performed, they check basic functionalities and are quick to perform.
1: Only level 1 tests are performed, they check functionalities of medium complexity.
2: Only level 2 tests are performed, they check advanced functionalities and are slow to perform.
tags:
- fim_follow_symbolic_link
'''
import os
import pytest
import wazuh_testing.fim as fim
from test_fim.test_files.test_follow_symbolic_link.common import configurations_path, testdir1, \
modify_symlink, testdir_link, wait_for_symlink_check
# noinspection PyUnresolvedReferences
from test_fim.test_files.test_follow_symbolic_link.common import test_directories, extra_configuration_before_yield, \
extra_configuration_after_yield
from wazuh_testing import logger
from wazuh_testing.tools.configuration import load_wazuh_configurations, check_apply_test
from wazuh_testing.tools.monitoring import FileMonitor
# Marks
pytestmark = [pytest.mark.linux, pytest.mark.sunos5, pytest.mark.darwin, pytest.mark.tier(level=1)]
wazuh_log_monitor = FileMonitor(fim.LOG_FILE_PATH)
# configurations
conf_params, conf_metadata = fim.generate_params(extra_params={'FOLLOW_MODE': 'yes'})
configurations = load_wazuh_configurations(configurations_path, __name__,
params=conf_params,
metadata=conf_metadata
)
# fixtures
@pytest.fixture(scope='module', params=configurations)
def get_configuration(request):
"""Get configurations from the module."""
return request.param
# tests
@pytest.mark.parametrize('tags_to_apply', [
{'monitored_file'}
])
def test_symbolic_revert_symlink(tags_to_apply, get_configuration, configure_environment,
restart_syscheckd, wait_for_fim_start):
'''
description: Check if the 'wazuh-syscheckd' daemon detects new targets when monitoring a directory with
a symlink and its target is changed. For this purpose, the test will create a 'symbolic link'
to a file/directory. Then, it will change the target to a directory and create some files
inside, expecting all the FIM events. After the events are processed, the test will change
the link to its previous target, and finally, it will make file operations and expect FIM events.
wazuh_min_version: 4.2.0
tier: 1
parameters:
- tags_to_apply:
type: set
brief: Run test if matches with a configuration identifier, skip otherwise.
- get_configuration:
type: fixture
brief: Get configurations from the module.
- configure_environment:
type: fixture
brief: Configure a custom environment for testing.
- restart_syscheckd:
type: fixture
brief: Clear the 'ossec.log' file and start a new monitor.
- wait_for_fim_start:
type: fixture
brief: Wait for realtime start, whodata start, or end of initial FIM scan.
assertions:
- Verify that FIM events are generated when a monitored 'symbolic link' target
is changed to a new directory.
- Verify that FIM events are generated when a monitored 'symbolic link' target
is reverted to the previous directory.
input_description: A test case (monitored_file) is contained in external YAML file (wazuh_conf.yaml) which
includes configuration settings for the 'wazuh-syscheckd' daemon and, these are combined
with the testing directories to be monitored defined in the common.py module.
expected_output:
- r'.*Sending FIM event: (.+)$' ('added' and 'modified' events)
tags:
- scheduled
- time_travel
'''
def METHOD_NAME(file):
fim.modify_file_content(testdir1, file, new_content='Sample modification')
fim.check_time_travel(scheduled, monitor=wazuh_log_monitor)
ev = wazuh_log_monitor.start(timeout=3, callback=fim.callback_detect_event).result()
assert 'modified' in ev['data']['type'] and os.path.join(testdir1, file) in ev['data']['path'], \
f"'modified' event not matching for {testdir1} {file}"
check_apply_test(tags_to_apply, get_configuration['tags'])
scheduled = get_configuration['metadata']['fim_mode'] == 'scheduled'
whodata = get_configuration['metadata']['fim_mode'] == 'whodata'
file1 = 'regular1'
file2 = 'regular2'
# Don't expect an event since it is not being monitored yet
fim.modify_file_content(testdir1, file2, new_content='Sample modification')
fim.check_time_travel(scheduled, monitor=wazuh_log_monitor)
with pytest.raises(TimeoutError):
event = wazuh_log_monitor.start(timeout=3, callback=fim.callback_detect_event)
logger.error(f'Unexpected event {event.result()}')
raise AttributeError(f'Unexpected event {event.result()}')
# Change the target to the folder and now expect an event
modify_symlink(testdir1, os.path.join(testdir_link, 'symlink'))
wait_for_symlink_check(wazuh_log_monitor)
fim.wait_for_audit(whodata, wazuh_log_monitor)
METHOD_NAME(file2)
# Modify symlink target, wait for sym_check to update it
modify_symlink(os.path.join(testdir1, file1), os.path.join(testdir_link, 'symlink'))
wait_for_symlink_check(wazuh_log_monitor)
# Wait for audit to reload the rules
fim.wait_for_audit(whodata, wazuh_log_monitor)
fim.modify_file_content(testdir1, file2, new_content='Sample modification2')
fim.check_time_travel(scheduled, monitor=wazuh_log_monitor)
with pytest.raises(TimeoutError):
event = wazuh_log_monitor.start(timeout=3, callback=fim.callback_detect_event)
logger.error(f'Unexpected event {event.result()}')
raise AttributeError(f'Unexpected event {event.result()}')
METHOD_NAME(file1) |
7,153 | main | from great_ape_safe import GreatApeSafe
from helpers.addresses import registry
from brownie import interface
from rich.console import Console
CONSOLE = Console()
DEV_MULTISIG = registry.eth.badger_wallets.dev_multisig
OLD_OPS_MULTISIG = registry.eth.badger_wallets.ops_multisig_old
TREASURY_VAULT = registry.eth.badger_wallets.treasury_vault_multisig
BADGER_GEYSER = registry.eth.badger_geyser
TOLERANCE = 0.95
def METHOD_NAME(upgrade="true", simulation="false"):
badger_token = interface.IBadger(registry.eth.treasury_tokens.BADGER)
old_multi = GreatApeSafe(OLD_OPS_MULTISIG)
old_devProxyAdmin = interface.IProxyAdmin(
registry.eth.badger_wallets.opsProxyAdmin_old, owner=old_multi.account
)
dao_treasury = interface.ISimpleTimelockWithVoting(
registry.eth.badger_wallets.DAO_treasury, owner=old_multi.account
)
if upgrade == "true":
## == Upgrade DAO_Treasury to new logic from the opsMultisig_old == ##
# Save storage variabls for later
treasury_balance = badger_token.balanceOf(dao_treasury.address)
token = dao_treasury.token()
releaseTime = dao_treasury.releaseTime()
beneficiary = dao_treasury.beneficiary()
# Upgrades logic and verifies storage
new_logic = registry.eth.logic.SimpleTimelockWithVoting
old_devProxyAdmin.upgrade(dao_treasury.address, new_logic)
assert treasury_balance == badger_token.balanceOf(dao_treasury.address)
assert token == dao_treasury.token()
assert releaseTime == dao_treasury.releaseTime()
assert beneficiary == dao_treasury.beneficiary()
# Beneficiary is currently the DAO_Agent
assert dao_treasury.beneficiary() == registry.eth.badger_wallets.fees
# Nonces are messed up on this multi - replace 76 (on-chain rejection)
old_multi.post_safe_tx(replace_nonce=76)
else:
# Simulates upgrade of contract
if simulation == "true":
new_logic = registry.eth.logic.SimpleTimelockWithVoting
old_devProxyAdmin.upgrade(dao_treasury.address, new_logic)
safe = GreatApeSafe(DEV_MULTISIG)
treasury_vault = GreatApeSafe(TREASURY_VAULT)
safe.take_snapshot(tokens=[registry.eth.treasury_tokens.BADGER])
treasury_vault.take_snapshot(tokens=[registry.eth.treasury_tokens.BADGER])
# Contracts needed
controller = interface.IController(
registry.eth.controllers.native, owner=safe.account
)
balance_checker = interface.IBalanceChecker(
registry.eth.helpers.balance_checker, owner=safe.account
)
dao_treasury = interface.ISimpleTimelockWithVoting(
registry.eth.badger_wallets.DAO_treasury, owner=safe.account
)
bBadger_vault = interface.ISettV4h(
registry.eth.sett_vaults.bBADGER, owner=safe.account
)
bBadger_strat = interface.IStrategy(
registry.eth.strategies["native.badger"], owner=safe.account
)
## == Release all BADGER from DAO_Treasury to devMulti == ##
# Sets new beneficiary
dao_treasury.setBeneficiary(DEV_MULTISIG)
assert dao_treasury.beneficiary() == DEV_MULTISIG
treasury_balance = badger_token.balanceOf(dao_treasury.address)
gov_balance = badger_token.balanceOf(DEV_MULTISIG)
# Release BADGER to devMultisig
dao_treasury.release()
balance_checker.verifyBalance(
badger_token.address, DEV_MULTISIG, treasury_balance + gov_balance
)
## == Transfer missing BADGER to Geyser and withdrawAll to vault == ##
geyser_balance = badger_token.balanceOf(BADGER_GEYSER)
strategy_balanceOf = bBadger_strat.balanceOf()
geyser_deficit = strategy_balanceOf - geyser_balance
# Ensure geyser is in deficit
assert geyser_deficit > 0
# Transfer missing BADGER to Geyser
badger_token.transfer(BADGER_GEYSER, geyser_deficit, {"from": safe.address})
CONSOLE.print(f"{geyser_deficit / 1e18} BADGERs were transfer to the Geyser!\n")
balance_checker.verifyBalance(
badger_token.address,
BADGER_GEYSER,
geyser_deficit
+ (geyser_balance * TOLERANCE), # Geyser balance may change from post
)
# Once the Geyser has enough BADGER within, we can
# withdrawAll to transfer all BADGER from strat to the vault
controller.withdrawAll(bBadger_vault.token())
assert bBadger_vault.available() >= strategy_balanceOf
assert bBadger_strat.balanceOf() == 0
assert badger_token.balanceOf(BADGER_GEYSER) == 0
# Transfer remaining of amount received from the DAO_treasury to
# the treaury vault multisig
amount = treasury_balance - geyser_deficit
treasury_vault_balance = badger_token.balanceOf(TREASURY_VAULT)
badger_token.transfer(TREASURY_VAULT, amount, {"from": safe.address})
balance_checker.verifyBalance(
badger_token.address, TREASURY_VAULT, amount + treasury_vault_balance
)
safe.print_snapshot()
treasury_vault.print_snapshot()
safe.post_safe_tx(post=(simulation != "true")) |
7,154 | test tlimit | import logging
import time
from qcodes.instrument import InstrumentBase
from qcodes.instrument_drivers.Lakeshore.Model_336 import Model_336
from .test_lakeshore import (
DictClass,
MockVisaInstrument,
command,
instrument_fixture,
query,
split_args,
)
log = logging.getLogger(__name__)
VISA_LOGGER = ".".join((InstrumentBase.__module__, "com", "visa"))
class Model_336_Mock(MockVisaInstrument, Model_336):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
# initial values
self.heaters: dict[str, DictClass] = {}
self.heaters["1"] = DictClass(
P=1,
I=2,
D=3,
mode=1, # 'off'
input_channel=1, # 'A'
powerup_enable=0,
polarity=0,
use_filter=0,
delay=1,
output_range=0,
setpoint=4,
)
self.heaters["2"] = DictClass(
P=1,
I=2,
D=3,
mode=2, # 'closed_loop'
input_channel=2, # 'B'
powerup_enable=0,
polarity=0,
use_filter=0,
delay=1,
output_range=0,
setpoint=4,
)
self.heaters["3"] = DictClass(
mode=4, # 'monitor_out'
input_channel=2, # 'B'
powerup_enable=0,
polarity=0,
use_filter=0,
delay=1,
output_range=0,
setpoint=4,
)
self.heaters["4"] = DictClass(
mode=5, # 'warm_up'
input_channel=1, # 'A'
powerup_enable=0,
polarity=0,
use_filter=0,
delay=1,
output_range=0,
setpoint=4,
)
self.channel_mock = {
str(i): DictClass(
t_limit=i,
T=4,
sensor_name=f"sensor_{i}",
sensor_type=1, # 'diode',
auto_range_enabled=0, # 'off',
range=0,
compensation_enabled=0, # False,
units=1,
) # 'kelvin')
for i in self.channel_name_command.keys()
}
# simulate delayed heating
self.simulate_heating = False
self.start_heating_time = time.perf_counter()
def start_heating(self):
self.start_heating_time = time.perf_counter()
self.simulate_heating = True
def get_t_when_heating(self):
"""
Simply define a fixed setpoint of 4 k for now
"""
delta = abs(time.perf_counter() - self.start_heating_time)
# make it simple to start with: linear ramp 1K per second
# start at 7K.
return max(4, 7 - delta)
@query("PID?")
def pidq(self, arg):
heater = self.heaters[arg]
return f"{heater.P},{heater.I},{heater.D}"
@command("PID")
@split_args()
def pid(self, output, P, I, D): # noqa E741
for a, v in zip(["P", "I", "D"], [P, I, D]):
setattr(self.heaters[output], a, v)
@query("OUTMODE?")
def outmodeq(self, arg):
heater = self.heaters[arg]
return f"{heater.mode},{heater.input_channel},{heater.powerup_enable}"
@command("OUTMODE")
@split_args()
def outputmode(self, output, mode, input_channel, powerup_enable):
h = self.heaters[output]
h.output = output
h.mode = mode
h.input_channel = input_channel
h.powerup_enable = powerup_enable
@query("INTYPE?")
def intypeq(self, channel):
ch = self.channel_mock[channel]
return (
f"{ch.sensor_type},"
f"{ch.auto_range_enabled},{ch.range},"
f"{ch.compensation_enabled},{ch.units}"
)
@command("INTYPE")
@split_args()
def intype(
self,
channel,
sensor_type,
auto_range_enabled,
range_,
compensation_enabled,
units,
):
ch = self.channel_mock[channel]
ch.sensor_type = sensor_type
ch.auto_range_enabled = auto_range_enabled
ch.range = range_
ch.compensation_enabled = compensation_enabled
ch.units = units
@query("RANGE?")
def rangeq(self, heater):
h = self.heaters[heater]
return f"{h.output_range}"
@command("RANGE")
@split_args()
def range_cmd(self, heater, output_range):
h = self.heaters[heater]
h.output_range = output_range
@query("SETP?")
def setpointq(self, heater):
h = self.heaters[heater]
return f"{h.setpoint}"
@command("SETP")
@split_args()
def setpoint(self, heater, setpoint):
h = self.heaters[heater]
h.setpoint = setpoint
@query("TLIMIT?")
def tlimitq(self, channel):
chan = self.channel_mock[channel]
return f"{chan.tlimit}"
@command("TLIMIT")
@split_args()
def tlimitcmd(self, channel, tlimit):
chan = self.channel_mock[channel]
chan.tlimit = tlimit
@query("KRDG?")
def temperature(self, output):
chan = self.channel_mock[output]
if self.simulate_heating:
return self.get_t_when_heating()
return f"{chan.T}"
@instrument_fixture(scope="function")
def lakeshore_336():
return Model_336_Mock(
"lakeshore_336_fixture",
"GPIB::2::INSTR",
pyvisa_sim_file="lakeshore_model336.yaml",
device_clear=False,
)
def test_pid_set(lakeshore_336) -> None:
ls = lakeshore_336
P, I, D = 1, 2, 3 # noqa E741
# Only current source outputs/heaters have PID parameters,
# voltages source outputs/heaters do not.
outputs = [ls.output_1, ls.output_2]
for h in outputs: # a.k.a. heaters
h.P(P)
h.I(I)
h.D(D)
assert (h.P(), h.I(), h.D()) == (P, I, D)
def test_output_mode(lakeshore_336) -> None:
ls = lakeshore_336
mode = "off"
input_channel = "A"
powerup_enable = True
outputs = [getattr(ls, f"output_{n}") for n in range(1, 5)]
for h in outputs: # a.k.a. heaters
h.mode(mode)
h.input_channel(input_channel)
h.powerup_enable(powerup_enable)
assert h.mode() == mode
assert h.input_channel() == input_channel
assert h.powerup_enable() == powerup_enable
def test_range(lakeshore_336) -> None:
ls = lakeshore_336
output_range = "medium"
outputs = [getattr(ls, f"output_{n}") for n in range(1, 5)]
for h in outputs: # a.k.a. heaters
h.output_range(output_range)
assert h.output_range() == output_range
def METHOD_NAME(lakeshore_336) -> None:
ls = lakeshore_336
tlimit = 5.1
for ch in ls.channels:
ch.t_limit(tlimit)
assert ch.t_limit() == tlimit
def test_setpoint(lakeshore_336) -> None:
ls = lakeshore_336
setpoint = 5.1
outputs = [getattr(ls, f"output_{n}") for n in range(1, 5)]
for h in outputs: # a.k.a. heaters
h.setpoint(setpoint)
assert h.setpoint() == setpoint
def test_select_range_limits(lakeshore_336) -> None:
h = lakeshore_336.output_1
ranges = [1, 2, 3]
h.range_limits(ranges)
for i in ranges:
h.set_range_from_temperature(i - 0.5)
assert h.output_range() == h.INVERSE_RANGES[i]
i = 3
h.set_range_from_temperature(i + 0.5)
assert h.output_range() == h.INVERSE_RANGES[len(ranges)]
def test_set_and_wait_unit_setpoint_reached(lakeshore_336) -> None:
ls = lakeshore_336
ls.output_1.setpoint(4)
ls.start_heating()
ls.output_1.wait_until_set_point_reached()
def test_blocking_t(lakeshore_336) -> None:
ls = lakeshore_336
h = ls.output_1
ranges = [1.2, 2.4, 3.1]
h.range_limits(ranges)
ls.start_heating()
h.blocking_t(4) |
7,155 | initialize | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import numpy as np
import torch
from nvflare.apis.dxo import DXO, DataKind
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.aggregators.assembler import Assembler
from nvflare.app_common.app_constant import AppConstants
class FedCEAssembler(Assembler):
def __init__(self, fedce_mode, model):
super().__init__(data_kind=DataKind.WEIGHT_DIFF)
# mode, plus or times
self.fedce_mode = fedce_mode
self.model = model
self.fedce_cos_param_list = []
# Aggregator needs to keep record of historical
# cosine similarity for FedCM coefficients
self.fedce_cos_sim = {}
self.fedce_coef = {}
def METHOD_NAME(self, fl_ctx: FLContext):
# convert str model description to model
if isinstance(self.model, str):
# treat it as model component ID
model_component_id = self.model
engine = fl_ctx.get_engine()
self.model = engine.get_component(model_component_id)
if not self.model:
self.system_panic(
reason=f"cannot find model component '{model_component_id}'",
fl_ctx=fl_ctx,
)
return
if not isinstance(self.model, torch.nn.Module):
self.system_panic(
reason=f"expect model component '{model_component_id}' to be torch.nn.Module but got {type(self.model_selector)}",
fl_ctx=fl_ctx,
)
return
elif self.model and not isinstance(self.model, torch.nn.Module):
self.system_panic(
reason=f"expect model to be torch.nn.Module but got {type(self.model)}",
fl_ctx=fl_ctx,
)
return
# only include params requires_grad for cosine similarity computation
for name, param in self.model.named_parameters():
if param.requires_grad:
self.fedce_cos_param_list.append(name)
self.log_info(fl_ctx, "FedCE model assembler initialized")
def get_model_params(self, dxo: DXO):
data = dxo.data
meta = dxo.meta
return {"model": data, "fedce_minus_val": meta["fedce_minus_val"]}
def handle_event(self, event: str, fl_ctx: FLContext):
if event == EventType.START_RUN:
self.METHOD_NAME(fl_ctx)
def assemble(self, data: Dict[str, dict], fl_ctx: FLContext) -> DXO:
current_round = fl_ctx.get_prop(AppConstants.CURRENT_ROUND)
site_list = data.keys()
fedce_minus_vals = []
self.fedce_cos_sim[current_round] = {}
for site in site_list:
if current_round == 0:
# round 0, initialize uniform fedce_coef
self.fedce_coef[site] = 1 / len(site_list)
# get minus_val from submissions
fedce_minus_vals.append(data[site]["fedce_minus_val"])
# generate consensus gradient with current FedCE coefficients
consensus_grad = []
global_weights = self.model.state_dict()
for idx, name in enumerate(global_weights):
if name in self.fedce_cos_param_list:
temp = torch.zeros_like(global_weights[name])
for site in site_list:
temp += self.fedce_coef[site] * torch.as_tensor(data[site]["model"][name])
consensus_grad.append(temp.data.view(-1))
# flatten for cosine similarity computation
consensus_grads_vec = torch.cat(consensus_grad).to("cpu")
# generate minus gradients and compute cosine similarity
for site in site_list:
site_grad = []
for name in self.fedce_cos_param_list:
site_grad.append(torch.as_tensor(data[site]["model"][name]).data.view(-1))
site_grads_vec = torch.cat(site_grad).to("cpu")
# minus gradient
minus_grads_vec = consensus_grads_vec - self.fedce_coef[site] * site_grads_vec
# compute cosine similarity
fedce_cos_sim_site = (
torch.cosine_similarity(site_grads_vec, minus_grads_vec, dim=0).detach().cpu().numpy().item()
)
# append to record dict
self.fedce_cos_sim[current_round][site] = fedce_cos_sim_site
# compute cos_weights and minus_vals based on the record for each site
fedce_cos_weights = []
for site in site_list:
# cosine similarity
cos_accu_avg = np.mean([self.fedce_cos_sim[i][site] for i in range(current_round + 1)])
fedce_cos_weights.append(1.0 - cos_accu_avg)
# normalize
fedce_cos_weights /= np.sum(fedce_cos_weights)
fedce_cos_weights = np.clip(fedce_cos_weights, a_min=1e-3, a_max=None)
fedce_minus_vals /= np.sum(fedce_minus_vals)
fedce_minus_vals = np.clip(fedce_minus_vals, a_min=1e-3, a_max=None)
# two aggregation strategies
if self.fedce_mode == "times":
new_fedce_coef = [c_w * mv_w for c_w, mv_w in zip(fedce_cos_weights, fedce_minus_vals)]
elif self.fedce_mode == "plus":
new_fedce_coef = [c_w + mv_w for c_w, mv_w in zip(fedce_cos_weights, fedce_minus_vals)]
else:
raise NotImplementedError
# normalize again
new_fedce_coef /= np.sum(new_fedce_coef)
new_fedce_coef = np.clip(new_fedce_coef, a_min=1e-3, a_max=None)
# update fedce_coef
fedce_coef = {}
idx = 0
for site in site_list:
fedce_coef[site] = new_fedce_coef[idx]
idx += 1
# compute global model update with the new fedce weights
global_updates = {}
for idx, name in enumerate(global_weights):
temp = torch.zeros_like(global_weights[name], dtype=torch.float32)
for site in site_list:
weight = fedce_coef[site]
temp += weight * data[site]["model"][name]
global_updates[name] = temp.detach().cpu().numpy()
meta = {"fedce_coef": fedce_coef}
dxo = DXO(data_kind=self.expected_data_kind, data=global_updates, meta=meta)
return dxo |
7,156 | assert same structure | import os
import pytest
import random
import re
import requests
import string
import unittest
from base import BaseTest
INTEGRATION_TESTS = os.environ.get('INTEGRATION_TESTS', False)
@unittest.skip("flaky: https://github.com/elastic/beats/issues/16247")
class Test(BaseTest):
def setUp(self):
super(BaseTest, self).setUp()
self.es = self.get_elasticsearch_instance()
self.es_monitoring = self.get_elasticsearch_instance(url=self.get_elasticsearch_monitoring_url())
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
@pytest.mark.tag('integration')
def test_direct_to_monitoring_cluster(self):
"""
Test shipping monitoring data directly to the monitoring cluster.
Make sure expected documents are indexed in monitoring cluster.
"""
self.render_config_template(
"mockbeat",
monitoring={
"elasticsearch": {
"hosts": [self.get_elasticsearch_monitoring_url()]
}
}
)
self.clean_output_cluster()
self.clean_monitoring_cluster()
proc = self.start_beat(config="mockbeat.yml")
self.wait_until(lambda: self.log_contains("mockbeat start running."))
self.wait_until(lambda: self.log_contains(re.compile(r"\[monitoring\].*Publish event")))
self.wait_until(lambda: self.log_contains(re.compile(
r"Connection to .*elasticsearch\({}\).* established".format(self.get_elasticsearch_monitoring_url()))))
self.wait_until(lambda: self.monitoring_doc_exists('beats_stats'))
self.wait_until(lambda: self.monitoring_doc_exists('beats_state'))
proc.check_kill_and_wait()
for monitoring_doc_type in ['beats_stats', 'beats_state']:
field_names = ['cluster_uuid', 'timestamp', 'interval_ms', 'type', monitoring_doc_type]
self.assert_monitoring_doc_contains_fields(monitoring_doc_type, field_names)
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
@pytest.mark.tag('integration')
def test_cluster_uuid_setting(self):
"""
Test that monitoring.cluster_uuid setting may be set without any other monitoring.* settings
"""
test_cluster_uuid = self.random_string(10)
self.render_config_template(
"mockbeat",
monitoring={
"cluster_uuid": test_cluster_uuid
},
http_enabled="true"
)
proc = self.start_beat(config="mockbeat.yml")
self.wait_until(lambda: self.log_contains("mockbeat start running."))
state = self.get_beat_state()
proc.check_kill_and_wait()
self.assertEqual(test_cluster_uuid, state["monitoring"]["cluster_uuid"])
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
@pytest.mark.tag('integration')
def test_cluster_uuid_setting_monitoring_disabled(self):
"""
Test that monitoring.cluster_uuid setting may be set with monitoring.enabled explicitly set to false
"""
test_cluster_uuid = self.random_string(10)
self.render_config_template(
"mockbeat",
monitoring={
"enabled": False,
"cluster_uuid": test_cluster_uuid
},
http_enabled="true"
)
proc = self.start_beat(config="mockbeat.yml")
self.wait_until(lambda: self.log_contains("mockbeat start running."))
state = self.get_beat_state()
proc.check_kill_and_wait()
self.assertEqual(test_cluster_uuid, state["monitoring"]["cluster_uuid"])
def search_monitoring_doc(self, monitoring_type):
results = self.es_monitoring.search(
index='.monitoring-beats-*',
q='type:' + monitoring_type,
size=1
)
return results['hits']['hits']
def monitoring_doc_exists(self, monitoring_type):
hits = self.search_monitoring_doc(monitoring_type)
return len(hits) == 1
def get_monitoring_doc(self, monitoring_type):
hits = self.search_monitoring_doc(monitoring_type)
if len(hits) != 1:
return None
return hits[0]['_source']
def assert_monitoring_doc_contains_fields(self, monitoring_type, field_names):
results = self.es_monitoring.search(
index='.monitoring-beats-*',
q='type:' + monitoring_type,
size=1
)
hits = results['hits']['hits']
source = hits[0]['_source']
for field_name in field_names:
self.assertIn(field_name, source)
def METHOD_NAME(self, dict1, dict2):
dict1_keys = list(dict1.keys())
dict2_keys = list(dict2.keys())
self.assertEqual(len(dict1_keys), len(dict2_keys))
for key in dict1_keys:
dict1_val = dict1[key]
dict2_val = dict2[key]
# Cast ints to floats for more practical type comparison further down
if isinstance(dict1_val, int):
dict1_val = float(dict1_val)
if isinstance(dict2_val, int):
dict2_val = float(dict2_val)
self.assertEqual(type(dict1_val), type(dict2_val))
if isinstance(dict1_val, dict):
self.METHOD_NAME(dict1_val, dict2_val)
def clean_output_cluster(self):
# Remove all exporters
self.es.cluster.put_settings(body={
"transient": {
"xpack.monitoring.exporters.*": None
}
})
# Disable collection
self.es.cluster.put_settings(body={
"transient": {
"xpack.monitoring.collection.enabled": None
}
})
def clean_monitoring_cluster(self):
# Delete any old beats monitoring data
self.es_monitoring.indices.delete(index=".monitoring-beats-*", ignore=[404])
def get_elasticsearch_monitoring_url(self):
return "http://{host}:{port}".format(
host=os.getenv("ES_MONITORING_HOST", "localhost"),
port=os.getenv("ES_MONITORING_PORT", "9210")
)
def get_beat_state(self):
url = "http://localhost:5066/state"
return requests.get(url).json()
def random_string(self, size):
char_pool = string.ascii_letters + string.digits
return ''.join(random.choice(char_pool) for i in range(size)) |
7,157 | build arguments schema | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"vmss perform-maintenance",
)
class PerformMaintenance(AAZCommand):
"""Perform maintenance on one or more virtual machines in a VM scale set. Operation on instances which are not eligible for perform maintenance will be failed. Please refer to best practices for more details: https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-maintenance-notifications
"""
_aaz_info = {
"version": "2023-03-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.compute/virtualmachinescalesets/{}/performmaintenance", "2023-03-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def METHOD_NAME(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super().METHOD_NAME(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.vm_scale_set_name = AAZStrArg(
options=["-n", "--name", "--vm-scale-set-name"],
help="The name of the VM scale set.",
required=True,
id_part="name",
)
# define Arg Group "VmInstanceIDs"
_args_schema = cls._args_schema
_args_schema.instance_ids = AAZListArg(
options=["--instance-ids"],
arg_group="VmInstanceIDs",
help="The virtual machine scale set instance ids. Omitting the virtual machine scale set instance ids will result in the operation being performed on all virtual machines in the virtual machine scale set.",
)
instance_ids = cls._args_schema.instance_ids
instance_ids.Element = AAZStrArg()
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.VirtualMachineScaleSetsPerformMaintenance(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class VirtualMachineScaleSetsPerformMaintenance(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/performMaintenance",
**self.url_parameters
)
@property
def method(self):
return "POST"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"vmScaleSetName", self.ctx.args.vm_scale_set_name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2023-03-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Content-Type", "application/json",
),
}
return parameters
@property
def content(self):
_content_value, _builder = self.new_content_builder(
self.ctx.args,
typ=AAZObjectType,
typ_kwargs={"flags": {"client_flatten": True}}
)
_builder.set_prop("instanceIds", AAZListType, ".instance_ids")
instance_ids = _builder.get(".instanceIds")
if instance_ids is not None:
instance_ids.set_elements(AAZStrType, ".")
return self.serialize_content(_content_value)
def on_200(self, session):
pass
class _PerformMaintenanceHelper:
"""Helper class for PerformMaintenance"""
__all__ = ["PerformMaintenance"] |
7,158 | test zero shape | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.stateful_random_ops.binomial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.kernel_tests.random import util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import stateful_random_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
# All supported dtypes for binomial().
_SUPPORTED_DTYPES = (dtypes.float16, dtypes.float32, dtypes.float64,
dtypes.int32, dtypes.int64)
class RandomBinomialTest(test.TestCase):
"""This is a large test due to the moments computation taking some time."""
def _Sampler(self, num, counts, probs, dtype, seed=None):
def func():
rng = stateful_random_ops.Generator.from_seed(seed).binomial(
shape=[10 * num], counts=counts, probs=probs, dtype=dtype)
ret = array_ops.reshape(rng, [10, num])
ret = self.evaluate(ret)
return ret
return func
@test_util.run_v2_only
def testMoments(self):
try:
from scipy import stats # pylint: disable=g-import-not-at-top
except ImportError as e:
tf_logging.warn("Cannot test moments: %s", e)
return
# The moments test is a z-value test. This is the largest z-value
# we want to tolerate. Since the z-test approximates a unit normal
# distribution, it should almost definitely never exceed 6.
z_limit = 6.0
for dt in _SUPPORTED_DTYPES:
# Test when n * p > 10, and n * p < 10
for stride in 0, 4, 10:
for counts in (1., 10., 22., 50.):
for prob in (0.1, 0.5, 0.8):
sampler = self._Sampler(int(1e5), counts, prob, dt, seed=12345)
z_scores = util.test_moment_matching(
# Use float64 samples.
sampler().astype(np.float64),
number_moments=6,
dist=stats.binom(counts, prob),
stride=stride,
)
self.assertAllLess(z_scores, z_limit)
@test_util.run_v2_only
def testSeed(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sx = self._Sampler(1000, counts=10., probs=0.4, dtype=dt, seed=345)
sy = self._Sampler(1000, counts=10., probs=0.4, dtype=dt, seed=345)
self.assertAllEqual(sx(), sy())
def METHOD_NAME(self):
rnd = stateful_random_ops.Generator.from_seed(12345).binomial([0], [], [])
self.assertEqual([0], rnd.shape.as_list())
def testShape(self):
rng = stateful_random_ops.Generator.from_seed(12345)
# Scalar parameters.
rnd = rng.binomial(shape=[10], counts=np.float32(2.), probs=np.float32(0.5))
self.assertEqual([10], rnd.shape.as_list())
# Vector parameters.
rnd = rng.binomial(
shape=[10],
counts=array_ops.ones([10], dtype=np.float32),
probs=0.3 * array_ops.ones([10], dtype=np.float32))
self.assertEqual([10], rnd.shape.as_list())
rnd = rng.binomial(
shape=[2, 5],
counts=array_ops.ones([2], dtype=np.float32),
probs=0.4 * array_ops.ones([2], dtype=np.float32))
self.assertEqual([2, 5], rnd.shape.as_list())
# Scalar counts, vector probs.
rnd = rng.binomial(
shape=[10],
counts=np.float32(5.),
probs=0.8 * array_ops.ones([10], dtype=np.float32))
self.assertEqual([10], rnd.shape.as_list())
# Vector counts, scalar probs.
rnd = rng.binomial(
shape=[10],
counts=array_ops.ones([10], dtype=np.float32),
probs=np.float32(0.9))
self.assertEqual([10], rnd.shape.as_list())
@test_util.run_v2_only
def testCornerCases(self):
rng = stateful_random_ops.Generator.from_seed(12345)
counts = np.array([5, 5, 5, 0, 0, 0], dtype=np.float32)
probs = np.array([0, 1, float("nan"), -10, 10, float("nan")],
dtype=np.float32)
expected = np.array([0, 5, float("nan"), 0, 0, 0], dtype=np.float32)
result = rng.binomial(
shape=[6], counts=counts, probs=probs, dtype=np.float32)
self.assertAllEqual(expected, self.evaluate(result))
if __name__ == "__main__":
test.main() |
7,159 | test list of scan indices | # /*##########################################################################
# Copyright (C) 2016-2023 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ############################################################################*/
"""Tests for old specfile wrapper"""
__authors__ = ["P. Knobel"]
__license__ = "MIT"
__date__ = "15/05/2017"
import logging
import numpy
import os
import sys
import tempfile
import unittest
logger1 = logging.getLogger(__name__)
from ..specfilewrapper import Specfile
sftext = """#F /tmp/sf.dat
#E 1455180875
#D Thu Feb 11 09:54:35 2016
#C imaging User = opid17
#U00 user comment first line
#U01 This is a dummy file to test SpecFile parsing
#U02
#U03 last line
#O0 Pslit HGap MRTSlit UP MRTSlit DOWN
#O1 Sslit1 VOff Sslit1 HOff Sslit1 VGap
#o0 pshg mrtu mrtd
#o2 ss1vo ss1ho ss1vg
#J0 Seconds IA ion.mono Current
#J1 xbpmc2 idgap1 Inorm
#S 1 ascan ss1vo -4.55687 -0.556875 40 0.2
#D Thu Feb 11 09:55:20 2016
#T 0.2 (Seconds)
#G0 0
#G1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
#G3 0 0 0 0 0 0 0 0 0
#G4 0
#Q
#P0 180.005 -0.66875 0.87125
#P1 14.74255 16.197579 12.238283
#UMI0 Current AutoM Shutter
#UMI1 192.51 OFF FE open
#UMI2 Refill in 39883 sec, Fill Mode: uniform multibunch / Message: Feb 11 08:00 Delivery:Next Refill at 21:00;
#N 4
#L first column second column 3rd_col
-1.23 5.89 8
8.478100E+01 5 1.56
3.14 2.73 -3.14
1.2 2.3 3.4
#S 25 ascan c3th 1.33245 1.52245 40 0.15
#D Thu Feb 11 10:00:31 2016
#P0 80.005 -1.66875 1.87125
#P1 4.74255 6.197579 2.238283
#N 5
#L column0 column1 col2 col3
0.0 0.1 0.2 0.3
1.0 1.1 1.2 1.3
2.0 2.1 2.2 2.3
3.0 3.1 3.2 3.3
#F /tmp/sf.dat
#E 1455180876
#D Thu Feb 11 09:54:36 2016
#S 1 aaaaaa
#U first duplicate line
#U second duplicate line
#@MCADEV 1
#@MCA %16C
#@CHANN 3 0 2 1
#@CALIB 1 2 3
#N 3
#L uno duo
1 2
@A 0 1 2
3 4
@A 3.1 4 5
5 6
@A 6 7.7 8
"""
class TestSpecfilewrapper(unittest.TestCase):
@classmethod
def setUpClass(cls):
fd, cls.fname1 = tempfile.mkstemp(text=False)
os.write(fd, bytes(sftext, 'ascii'))
os.close(fd)
@classmethod
def tearDownClass(cls):
os.unlink(cls.fname1)
def setUp(self):
self.sf = Specfile(self.fname1)
self.scan1 = self.sf[0]
self.scan1_2 = self.sf.select("1.2")
self.scan25 = self.sf.select("25.1")
def tearDown(self):
self.sf.close()
def test_number_of_scans(self):
self.assertEqual(3, len(self.sf))
def METHOD_NAME(self):
self.assertEqual(self.sf.list(),
'1,25,1')
self.assertEqual(self.sf.keys(),
["1.1", "25.1", "1.2"])
def test_scan_headers(self):
self.assertEqual(self.scan25.header('S'),
["#S 25 ascan c3th 1.33245 1.52245 40 0.15"])
self.assertEqual(self.scan1.header("G0"), ['#G0 0'])
# parsing headers with long keys
# parsing empty headers
self.assertEqual(self.scan1.header('Q'), ['#Q '])
def test_file_headers(self):
self.assertEqual(self.scan1.header("E"),
['#E 1455180875'])
self.assertEqual(self.sf.title(),
"imaging")
self.assertEqual(self.sf.epoch(),
1455180875)
self.assertEqual(self.sf.allmotors(),
["Pslit HGap", "MRTSlit UP", "MRTSlit DOWN",
"Sslit1 VOff", "Sslit1 HOff", "Sslit1 VGap"])
def test_scan_labels(self):
self.assertEqual(self.scan1.alllabels(),
['first column', 'second column', '3rd_col'])
def test_data(self):
self.assertAlmostEqual(self.scan1.dataline(3)[2],
-3.14)
self.assertAlmostEqual(self.scan1.datacol(1)[2],
3.14)
# tests for data transposition between original file and .data attr
self.assertAlmostEqual(self.scan1.data()[2, 0],
8)
self.assertEqual(self.scan1.data().shape, (3, 4))
self.assertAlmostEqual(numpy.sum(self.scan1.data()), 113.631)
def test_date(self):
self.assertEqual(self.scan1.date(),
"Thu Feb 11 09:55:20 2016")
def test_motors(self):
self.assertEqual(len(self.sf.allmotors()), 6)
self.assertEqual(len(self.scan1.allmotorpos()), 6)
self.assertAlmostEqual(sum(self.scan1.allmotorpos()),
223.385912)
self.assertEqual(self.sf.allmotors()[1], 'MRTSlit UP')
def test_mca(self):
self.assertEqual(self.scan1_2.mca(2)[2], 5)
self.assertEqual(sum(self.scan1_2.mca(3)), 21.7)
def test_mca_header(self):
self.assertEqual(self.scan1_2.header("CALIB"),
["#@CALIB 1 2 3"]) |
7,160 | run | #
# Copyright 2018 BhaaL
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Convert Gettext PO localization files to flat XML files.
See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/flatxml2po.html
for examples and usage instructions.
"""
from translate.convert import convert
from translate.storage import flatxml, po
class po2flatxml:
"""Convert to a single PO file to a single XML file, optionally
applying modifications to a template file instead of creating
one from scratch based on input parameters.
"""
TargetStoreClass = flatxml.FlatXMLFile
TargetUnitClass = flatxml.FlatXMLUnit
def __init__(
self,
inputfile,
outputfile,
templatefile=None,
root="root",
value="str",
key="key",
ns=None,
indent=2,
):
"""Initialize the converter."""
self.inputfile = inputfile
self.outputfile = outputfile
self.templatefile = templatefile
self.value_name = value
self.key_name = key
self.namespace = ns
indent_chars = None
if indent > 0:
indent_chars = " " * indent
self.source_store = po.pofile(inputfile)
self.target_store = self.TargetStoreClass(
templatefile,
root_name=root,
value_name=value,
key_name=key,
namespace=ns,
indent_chars=indent_chars,
)
def convert_unit(self, unit):
"""Convert a source format unit to a target format unit."""
target_unit = self.TargetUnitClass(
source=None,
namespace=self.namespace,
element_name=self.value_name,
attribute_name=self.key_name,
)
target_unit.source = unit.source
if unit.istranslated() or not bool(unit.source):
target_unit.target = unit.target
else:
target_unit.target = unit.source
return target_unit
def convert_store(self):
"""Convert a single source file to a target format file."""
for unit in self.source_store.units:
key = unit.source
if not key:
continue
target_unit = self.target_store.findid(key)
if target_unit is None:
target_unit = self.convert_unit(unit)
self.target_store.addunit(target_unit)
else:
target_unit.target = unit.target
def METHOD_NAME(self):
"""Run the converter."""
self.convert_store()
if self.target_store.isempty():
return 0
self.target_store.serialize(self.outputfile)
return 1
def run_converter(
inputfile,
outputfile,
templatefile=None,
root="root",
value="str",
key="key",
ns=None,
indent=2,
):
"""Wrapper around the converter."""
return po2flatxml(
inputfile, outputfile, templatefile, root, value, key, ns, indent
).METHOD_NAME()
formats = {
("po"): ("xml", run_converter),
("po", "xml"): ("xml", run_converter),
}
def main(argv=None):
parser = convert.ConvertOptionParser(
formats, usetemplates=True, description=__doc__
)
parser.add_option(
"-r",
"--root",
action="store",
dest="root",
default="root",
help='name of the XML root element (default: "root")',
)
parser.add_option(
"-v",
"--value",
action="store",
dest="value",
default="str",
help='name of the XML value element (default: "str")',
)
parser.add_option(
"-k",
"--key",
action="store",
dest="key",
default="key",
help='name of the XML key attribute (default: "key")',
)
parser.add_option(
"-n",
"--namespace",
action="store",
dest="ns",
default=None,
help="XML namespace uri (default: None)",
)
parser.add_option(
"-w",
"--indent",
action="store",
dest="indent",
type="int",
default=2,
help="indent width in spaces, 0 for no indent (default: 2)",
)
parser.passthrough.append("root")
parser.passthrough.append("value")
parser.passthrough.append("key")
parser.passthrough.append("ns")
parser.passthrough.append("indent")
parser.METHOD_NAME(argv)
if __name__ == "__main__":
main() |
7,161 | test split attribute saved during graph building | # Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import pytest
import tensorflow as tf
from tensorflow.keras import backend
from tensorflow.keras import layers
from tensorflow.keras import models
from nncf.common.graph import INPUT_NOOP_METATYPES
from nncf.common.graph import OUTPUT_NOOP_METATYPES
from nncf.common.graph.layer_attributes import MultipleInputLayerAttributes
from nncf.common.graph.layer_attributes import PermuteLayerAttributes
from nncf.tensorflow.graph.converter import BaseFunctionalSequentialConverter
from nncf.tensorflow.graph.converter import convert_keras_model_to_nncf_graph
from nncf.tensorflow.graph.metatypes.common import DIMENSION_PERMUTATION_METATYPES
from nncf.tensorflow.graph.metatypes.common import LAYER_METATYPES_AGNOSTIC_TO_DATA_PRECISION_WITH_MULTIPLE_INPUTS
from nncf.tensorflow.graph.metatypes.common import LAYER_METATYPES_AGNOSTIC_TO_DATA_PRECISION_WITH_MULTIPLE_OUTPUTS
from nncf.tensorflow.graph.metatypes.common import RESHAPE_METATYPES
from tests.tensorflow.helpers import create_compressed_model_and_algo_for_test
from tests.tensorflow.helpers import get_basic_conv_test_model
from tests.tensorflow.pruning.helpers import get_concat_test_model
from tests.tensorflow.pruning.helpers import get_split_test_model
from tests.tensorflow.quantization.test_algorithm_quantization import get_basic_quantization_config
def test_struct_auxiliary_nodes_nncf_graph():
model = get_basic_conv_test_model()
config = get_basic_quantization_config()
compressed_model, _ = create_compressed_model_and_algo_for_test(model, config, force_no_init=True)
nncf_graph = convert_keras_model_to_nncf_graph(compressed_model)
input_nodes = nncf_graph.get_input_nodes()
output_nodes = nncf_graph.get_output_nodes()
assert len(input_nodes) == 1
assert len(output_nodes) == 1
assert input_nodes[0].metatype in INPUT_NOOP_METATYPES
assert output_nodes[0].metatype in OUTPUT_NOOP_METATYPES
class CustomLayerForTest(tf.keras.layers.Layer):
CUSTOM_LAYER_NAME = "custom_layer_for_test"
def __init__(self):
super().__init__(name=self.CUSTOM_LAYER_NAME)
self.w = self.add_weight(shape=(1,))
def call(self, inputs, **kwargs):
return tf.multiply(inputs, self.w)
def ModelForCustomLayerTest():
input_shape = (None, None, 3)
img_input = layers.Input(shape=input_shape) # non-custom
channel_axis = 1 if backend.image_data_format() == "channels_first" else -1
x = img_input
x = layers.Rescaling(scale=1.0 / 127.5, offset=-1.0)(x) # non-custom, but experimental
x = layers.Conv2D(16, kernel_size=3, strides=(2, 2), padding="same", use_bias=False, name="Conv")(x) # non-custom
x = CustomLayerForTest()(x) # custom!
x = layers.BatchNormalization(axis=channel_axis, epsilon=1e-3, momentum=0.999, name="Conv/BatchNorm")(
x
) # non-custom
x = tf.multiply(x, x) # TensorFlowOpLayer, should be treated as non-custom
model = models.Model(img_input, x, name="ModelForCustomLayerTest")
return model
def test_get_custom_layers():
model = ModelForCustomLayerTest()
model.build([16, 16, 3])
custom_layers = BaseFunctionalSequentialConverter.get_custom_layers(model)
assert len(custom_layers) == 1
assert CustomLayerForTest.CUSTOM_LAYER_NAME in custom_layers
assert isinstance(custom_layers[CustomLayerForTest.CUSTOM_LAYER_NAME], CustomLayerForTest)
def get_model_with_reshapes_and_concats(batch_size=None):
inputs = layers.Input((64,), batch_size=batch_size)
x = tf.reshape(inputs, (32, -1))
x = layers.Reshape((16, -1))(x)
ones = tf.ones_like(x)
t1 = layers.concatenate([x, ones])
# pylint: disable=E1120,E1123
t2 = tf.concat([x, ones], axis=-1)
y = tf.concat([t1, t2], axis=-1)
y = tf.transpose(y, [2, 0, 1])
y = tf.keras.layers.Flatten()(y)
return models.Model(inputs, y, name="ModelWithReshape")
CONCAT_MODELS = [partial(get_concat_test_model, input_shape=[1, 8, 8, 1]), get_model_with_reshapes_and_concats]
REF_CONCAT_ATTRS = [
{"tf.concat": {"axis": [-1, 3]}, "tf.concat_1": {"axis": [-1, 3]}},
{"concatenate": {"axis": [-1, 2]}, "tf.concat": {"axis": [-1, 2]}, "tf.concat_1": {"axis": [-1, 2]}},
]
@pytest.mark.parametrize("model, ref_attrs", list(zip(CONCAT_MODELS, REF_CONCAT_ATTRS)))
def test_concat_attributes_saved_during_graph_building(model, ref_attrs):
model = model()
graph = convert_keras_model_to_nncf_graph(model)
for node in graph.get_all_nodes():
if node.metatype in LAYER_METATYPES_AGNOSTIC_TO_DATA_PRECISION_WITH_MULTIPLE_INPUTS:
assert node.node_name in ref_attrs
assert node.layer_attributes is not None
assert isinstance(node.layer_attributes, MultipleInputLayerAttributes)
assert node.layer_attributes.axis in ref_attrs[node.node_name]["axis"]
def test_reshape_attributes_saved_during_graph_building():
model = get_model_with_reshapes_and_concats()
graph = convert_keras_model_to_nncf_graph(model)
ref_reshape_nodes = {
"tf.reshape": {"input_shape": (None, 64), "output_shape": (32, None)},
"reshape": {"input_shape": (32, None), "output_shape": (32, 16, None)},
"flatten": {"input_shape": (None, 32, 16), "output_shape": (None, 512)},
}
for node in graph.get_all_nodes():
if node.metatype in RESHAPE_METATYPES:
assert node.node_name in ref_reshape_nodes
assert node.layer_attributes is not None
assert node.layer_attributes.input_shape == ref_reshape_nodes[node.node_name]["input_shape"]
assert node.layer_attributes.output_shape == ref_reshape_nodes[node.node_name]["output_shape"]
def METHOD_NAME():
sample_size = [1, 8, 8, 1]
model = get_split_test_model(sample_size)
graph = convert_keras_model_to_nncf_graph(model)
ref_split_nodes = {"tf.split": {"chunks": 2, "axis": 3}}
for node in graph.get_all_nodes():
if node.metatype in LAYER_METATYPES_AGNOSTIC_TO_DATA_PRECISION_WITH_MULTIPLE_OUTPUTS:
assert node.node_name in ref_split_nodes
assert node.layer_attributes is not None
assert node.layer_attributes.chunks == ref_split_nodes[node.node_name]["chunks"]
assert node.layer_attributes.axis == ref_split_nodes[node.node_name]["axis"]
def get_model_with_transpose_and_permute(batch_size=None):
inputs = layers.Input((10, 10, 10, 10), batch_size=batch_size)
x = tf.transpose(inputs, perm=[0, 3, 2, 1, 4])
y = tf.keras.layers.Permute([3, 2, 1, 4])(x)
return models.Model(inputs, y)
def test_permute_attribute_saved_during_graph_building():
model = get_model_with_transpose_and_permute()
graph = convert_keras_model_to_nncf_graph(model)
ref_split_nodes = {
"tf.compat.v1.transpose": PermuteLayerAttributes([0, 3, 2, 1, 4]),
"permute": PermuteLayerAttributes([0, 3, 2, 1, 4]),
}
for node in graph.get_all_nodes():
if node.metatype in DIMENSION_PERMUTATION_METATYPES:
assert node.node_name in ref_split_nodes
assert node.layer_attributes == ref_split_nodes[node.node_name] |
7,162 | dumps | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Used code from commentjson library : https://github.com/vaidik/commentjson
"""Add JavaScript or Python style comments in JSON.
commentjson (Comment JSON) is a Python package that helps you create JSON files
with Python and JavaScript style inline comments. Its API is very similar to
the Python standard library’s json module.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
import codecs
import traceback
import json
import lark
from lark import Lark
from lark.lexer import Token
from lark.reconstruct import Reconstructor
from lark.tree import Tree
parser = Lark('''
?start: value
?value: object
| array
| string
| SIGNED_NUMBER -> number
| "true" -> true
| "false" -> false
| "null" -> null
array : "[" [value ("," value)*] TRAILING_COMMA? "]"
object : "{" [pair ("," pair)*] TRAILING_COMMA? "}"
pair : string ":" value
string : ESCAPED_STRING
COMMENT: "/*" /(.|\\n)+?/ "*/"
| /(#|\\/\\/)[^\\n]*/
TRAILING_COMMA: ","
%import common.ESCAPED_STRING
%import common.SIGNED_NUMBER
%import common.WS
%ignore WS
%ignore COMMENT
''', maybe_placeholders=False, parser='lalr')
serializer = Reconstructor(parser)
def detect_encoding(b):
'''
Taken from `json` package in CPython 3.7.
Source can be found at https://bit.ly/2OHqCIK.
'''
bstartswith = b.startswith
if bstartswith((codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE)):
return 'utf-32'
if bstartswith((codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE)):
return 'utf-16'
if bstartswith(codecs.BOM_UTF8):
return 'utf-8-sig'
if len(b) >= 4:
if not b[0]:
# 00 00 -- -- - utf-32-be
# 00 XX -- -- - utf-16-be
return 'utf-16-be' if b[1] else 'utf-32-be'
if not b[1]:
# XX 00 00 00 - utf-32-le
# XX 00 00 XX - utf-16-le
# XX 00 XX -- - utf-16-le
return 'utf-16-le' if b[2] or b[3] else 'utf-32-le'
elif len(b) == 2:
if not b[0]:
# 00 XX - utf-16-be
return 'utf-16-be'
if not b[1]:
# XX 00 - utf-16-le
return 'utf-16-le'
# default
return 'utf-8'
class BaseException(Exception):
''' Base exception to be implemented and raised while handling exceptions
raised by libraries used in `commentjson`.
Sets message of self in a way that it clearly calls out that the exception
was raised by another library, along with the entire stacktrace of the
exception raised by the other library.
'''
def __init__(self, exc):
if self.library is None:
raise NotImplementedError(
'Value of library must be set in the '
'inherited exception class.')
tb = traceback.format_exc()
tb = '\n'.join(' ' * 4 + line_ for line_ in tb.split('\n'))
error = None
try:
error = exc.msg
except AttributeError:
try:
error = exc.message
except AttributeError:
error = str(exc)
self.message = '\n'.join([
'JSON Library Exception\n',
('Exception thrown by library (%s): '
'\033[4;37m%s\033[0m\n' % (self.library, error)),
'%s' % tb,
])
Exception.__init__(self, self.message)
class ParserException(BaseException):
'''Exception raised when the `lark` raises an exception i.e.
the exception is not caused by `commentjson` and caused by the use of
`lark` in `commentjson`.
'''
library = 'lark'
class JSONLibraryException(BaseException):
'''Exception raised when the `json` raises an exception i.e.
the exception is not caused by `commentjson` and caused by the use of
`json` in `commentjson`.
.. note::
As of now, ``commentjson`` supports only standard library's ``json``
module. It might start supporting other widely-used contributed JSON
libraries in the future.
'''
library = 'json'
def _remove_trailing_commas(tree):
if isinstance(tree, Tree):
tree.children = [
_remove_trailing_commas(ch) for ch in tree.children
if not (isinstance(ch, Token) and ch.type == 'TRAILING_COMMA')
]
return tree
def loads(text, *args, **kwargs):
''' Deserialize `text` (a `str` or `unicode` instance containing a JSON
document with Python or JavaScript like comments) to a Python object.
:param text: serialized JSON string with or without comments.
:param kwargs: all the arguments that `json.loads <http://docs.python.org/
2/library/json.html#json.loads>`_ accepts.
:returns: dict or list.
'''
if isinstance(text, (bytes, bytearray)):
text = text.decode(detect_encoding(text), 'surrogatepass')
try:
parsed = _remove_trailing_commas(parser.parse(text))
final_text = serializer.reconstruct(parsed)
except lark.exceptions.UnexpectedCharacters:
raise ValueError('Unable to parse text', text)
return json.loads(final_text, *args, **kwargs)
def METHOD_NAME(obj, **kwargs):
''' Serialize `obj` to a JSON formatted `str`. Accepts the same arguments
as `json` module in stdlib.
:param obj: a JSON serializable Python object.
:param kwargs: all the arguments that `json.dumps <http://docs.python.org/
2/library/json.html#json.dumps>`_ accepts.
:raises: commentjson.JSONLibraryException
:returns str: serialized string.
'''
return json.METHOD_NAME(obj, **kwargs)
def load(fp, **kwargs):
''' Deserialize `fp` (a `.read()`-supporting file-like object containing a
JSON document with Python or JavaScript like comments) to a Python object.
:param fp: a `.read()`-supporting file-like object containing a JSON
document with or without comments.
:param kwargs: all the arguments that `json.load <http://docs.python.org/
2/library/json.html#json.load>`_ accepts.
:raises: commentjson.JSONLibraryException
:returns: dict or list.
'''
try:
return loads(fp.read(), **kwargs)
except Exception as e:
raise JSONLibraryException(e)
def dump(obj, fp, **kwargs):
''' Serialize `obj` as a JSON formatted stream to `fp` (a
`.write()`-supporting file-like object). Accepts the same arguments as
`json` module in stdlib.
:param obj: a JSON serializable Python object.
:param fp: a `.read()`-supporting file-like object containing a JSON
document with or without comments.
:param kwargs: all the arguments that `json.dump <http://docs.python.org/
2/library/json.html#json.dump>`_ accepts.
:raises: commentjson.JSONLibraryException
'''
try:
json.dump(obj, fp, **kwargs)
except Exception as e:
raise JSONLibraryException(e) |
7,163 | test gogogo | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: William Baker
import unittest
import genpy
import numpy as np
import rospy
import rostest
import os
from moveit_msgs.msg import RobotState
from sensor_msgs.msg import JointState
from moveit_commander import (
RobotCommander,
PlanningSceneInterface,
MoveItCommanderException,
)
class PythonMoveitCommanderTest(unittest.TestCase):
PLANNING_GROUP = "manipulator"
JOINT_NAMES = [
"joint_1",
"joint_2",
"joint_3",
"joint_4",
"joint_5",
"joint_6",
]
@classmethod
def setUpClass(self):
self.commander = RobotCommander("robot_description")
self.group = self.commander.get_group(self.PLANNING_GROUP)
@classmethod
def tearDown(self):
pass
def test_enforce_bounds_empty_state(self):
in_msg = RobotState()
with self.assertRaises(genpy.DeserializationError):
self.group.enforce_bounds(in_msg)
def test_enforce_bounds(self):
in_msg = RobotState()
in_msg.joint_state.header.frame_id = "base_link"
in_msg.joint_state.name = self.JOINT_NAMES
in_msg.joint_state.position = [0] * 6
in_msg.joint_state.position[0] = 1000
out_msg = self.group.enforce_bounds(in_msg)
self.assertEqual(in_msg.joint_state.position[0], 1000)
self.assertLess(out_msg.joint_state.position[0], 1000)
def test_get_current_state(self):
expected_state = RobotState()
expected_state.joint_state.header.frame_id = "base_link"
expected_state.multi_dof_joint_state.header.frame_id = "base_link"
expected_state.joint_state.name = self.JOINT_NAMES
expected_state.joint_state.position = [0] * 6
self.assertEqual(self.group.get_current_state(), expected_state)
def check_target_setting(self, expect, *args):
if len(args) == 0:
args = [expect]
self.group.set_joint_value_target(*args)
res = self.group.get_joint_value_target()
self.assertTrue(
np.all(np.asarray(res) == np.asarray(expect)),
"Setting failed for %s, values: %s" % (type(args[0]), res),
)
def test_target_setting(self):
n = self.group.get_variable_count()
self.check_target_setting([0.1] * n)
self.check_target_setting((0.2,) * n)
self.check_target_setting(np.zeros(n))
self.check_target_setting(
[0.3] * n, {name: 0.3 for name in self.group.get_active_joints()}
)
self.check_target_setting([0.5] + [0.3] * (n - 1), "joint_1", 0.5)
js_target = JointState(name=self.JOINT_NAMES, position=[0.1] * n)
self.check_target_setting([0.1] * n, js_target)
# name and position should have the same size, or raise exception
with self.assertRaises(MoveItCommanderException):
js_target.position = []
self.check_target_setting(None, js_target)
def plan(self, target):
self.group.set_joint_value_target(target)
return self.group.plan()
def test_plan(self):
state = JointState(name=self.JOINT_NAMES, position=[0, 0, 0, 0, 0, 0])
self.assertTrue(self.group.plan(state.position)[0])
self.assertTrue(self.group.plan("current")[0])
self.assertTrue(state, self.group.plan()[0])
def test_validation(self):
current = np.asarray(self.group.get_current_joint_values())
success1, plan1, time1, err1 = self.plan(current + 0.2)
success2, plan2, time2, err2 = self.plan(current + 0.2)
self.assertTrue(success1)
self.assertTrue(success2)
# first plan should execute
self.assertTrue(self.group.execute(plan1))
# second plan should be invalid now (due to modified start point) and rejected
self.assertFalse(self.group.execute(plan2))
# newly planned trajectory should execute again
success3, plan3, time3, err3 = self.plan(current)
self.assertTrue(success3)
self.assertTrue(self.group.execute(plan3))
def METHOD_NAME(self):
current_joints = np.asarray(self.group.get_current_joint_values())
self.group.set_joint_value_target(current_joints)
self.assertTrue(self.group.go(True))
self.assertTrue(self.group.go(current_joints))
self.assertTrue(self.group.go(list(current_joints)))
self.assertTrue(self.group.go(tuple(current_joints)))
self.assertTrue(
self.group.go(JointState(name=self.JOINT_NAMES, position=current_joints))
)
self.group.remember_joint_values("current")
self.assertTrue(self.group.go("current"))
current_pose = self.group.get_current_pose()
self.assertTrue(self.group.go(current_pose))
def test_planning_scene_interface(self):
planning_scene = PlanningSceneInterface()
if __name__ == "__main__":
PKGNAME = "moveit_ros_planning_interface"
NODENAME = "moveit_test_python_moveit_commander"
rospy.init_node(NODENAME)
rostest.rosrun(PKGNAME, NODENAME, PythonMoveitCommanderTest)
# suppress cleanup segfault
os._exit(0) |
7,164 | test get repo state | from unittest.mock import MagicMock
import pytest
from reconcile.gql_definitions.fragments.vault_secret import VaultSecret
from reconcile.gql_definitions.terraform_repo.terraform_repo import (
AWSAccountV1,
TerraformRepoV1,
)
from reconcile.terraform_repo import (
TerraformRepoIntegration,
TerraformRepoIntegrationParams,
)
from reconcile.utils.exceptions import ParameterError
from reconcile.utils.state import State
A_REPO = "https://git-example/tf-repo-example"
A_REPO_SHA = "a390f5cb20322c90861d6d80e9b70c6a579be1d0"
B_REPO = "https://git-example/tf-repo-example2"
B_REPO_SHA = "94edb90815e502b387c25358f5ec602e52d0bfbb"
AWS_UID = "000000000000"
AUTOMATION_TOKEN_PATH = "aws-secrets/terraform/foo"
@pytest.fixture
def existing_repo(aws_account) -> TerraformRepoV1:
return TerraformRepoV1(
name="a_repo",
repository=A_REPO,
ref=A_REPO_SHA,
account=aws_account,
projectPath="tf",
delete=False,
)
@pytest.fixture
def new_repo(aws_account) -> TerraformRepoV1:
return TerraformRepoV1(
name="b_repo",
repository=B_REPO,
ref=B_REPO_SHA,
account=aws_account,
projectPath="tf",
delete=False,
)
@pytest.fixture()
def automation_token() -> VaultSecret:
return VaultSecret(path=AUTOMATION_TOKEN_PATH, version=1, field="all", format=None)
@pytest.fixture
def aws_account(automation_token) -> AWSAccountV1:
return AWSAccountV1(
name="foo",
uid="000000000000",
automationToken=automation_token,
)
@pytest.fixture
def int_params() -> TerraformRepoIntegrationParams:
return TerraformRepoIntegrationParams(print_to_file=None, validate_git=False)
@pytest.fixture()
def state_mock() -> MagicMock:
return MagicMock(spec=State)
def test_addition_to_existing_repo(existing_repo, new_repo, int_params, state_mock):
existing = [existing_repo]
desired = [existing_repo, new_repo]
integration = TerraformRepoIntegration(params=int_params)
diff = integration.calculate_diff(
existing_state=existing, desired_state=desired, dry_run=False, state=state_mock
)
assert diff == [new_repo]
# ensure that the state is saved for the new repo
state_mock.add.assert_called_once_with(
new_repo.name, new_repo.dict(by_alias=True), force=True
)
def test_updating_repo_ref(existing_repo, int_params, state_mock):
existing = [existing_repo]
updated_repo = TerraformRepoV1.copy(existing_repo)
updated_repo.ref = B_REPO_SHA
integration = TerraformRepoIntegration(params=int_params)
diff = integration.calculate_diff(
existing_state=existing,
desired_state=[updated_repo],
dry_run=False,
state=state_mock,
)
assert diff == [updated_repo]
state_mock.add.assert_called_once_with(
updated_repo.name, updated_repo.dict(by_alias=True), force=True
)
def test_fail_on_update_invalid_repo_params(existing_repo, int_params):
existing = [existing_repo]
updated_repo = TerraformRepoV1.copy(existing_repo)
updated_repo.name = "c_repo"
updated_repo.project_path = "c_repo"
updated_repo.repository = B_REPO
updated_repo.ref = B_REPO_SHA
updated_repo.delete = True
integration = TerraformRepoIntegration(params=int_params)
with pytest.raises(ParameterError):
integration.calculate_diff(
existing_state=existing,
desired_state=[updated_repo],
dry_run=True,
state=None,
)
def test_delete_repo(existing_repo, int_params, state_mock):
existing = [existing_repo]
updated_repo = TerraformRepoV1.copy(existing_repo)
updated_repo.delete = True
integration = TerraformRepoIntegration(params=int_params)
diff = integration.calculate_diff(
existing_state=existing,
desired_state=[updated_repo],
dry_run=False,
state=state_mock,
)
assert diff == [updated_repo]
state_mock.rm.assert_called_once_with(updated_repo.name)
def test_delete_repo_without_flag(existing_repo, int_params):
existing = [existing_repo]
integration = TerraformRepoIntegration(params=int_params)
with pytest.raises(ParameterError):
integration.calculate_diff(
existing_state=existing, desired_state=[], dry_run=True, state=None
)
def METHOD_NAME(s3_state_builder, int_params, existing_repo):
state = s3_state_builder(
{
"ls": [
"/a_repo",
],
"get": {
"a_repo": {
"name": "a_repo",
"repository": A_REPO,
"ref": A_REPO_SHA,
"projectPath": "tf",
"delete": False,
"account": {
"name": "foo",
"uid": AWS_UID,
"automationToken": {
"path": AUTOMATION_TOKEN_PATH,
"field": "all",
"version": 1,
"format": None,
},
},
}
},
}
)
integration = TerraformRepoIntegration(params=int_params)
existing_state = integration.get_existing_state(state=state)
assert existing_state == [existing_repo]
def test_update_repo_state(int_params, existing_repo, state_mock):
integration = TerraformRepoIntegration(params=int_params)
existing_state: list = []
desired_state = [existing_repo]
integration.calculate_diff(
existing_state=existing_state,
desired_state=desired_state,
dry_run=False,
state=state_mock,
)
state_mock.add.assert_called_once_with(
existing_repo.name, existing_repo.dict(by_alias=True), force=True
)
def test_fail_on_multiple_repos_dry_run(int_params, existing_repo, new_repo):
integration = TerraformRepoIntegration(params=int_params)
desired_state = [existing_repo, new_repo]
with pytest.raises(Exception):
integration.calculate_diff(
existing_state=[], desired_state=desired_state, dry_run=True, state=None
)
def test_succeed_on_multiple_repos_non_dry_run(int_params, existing_repo, new_repo):
integration = TerraformRepoIntegration(params=int_params)
desired_state = [existing_repo, new_repo]
diff = integration.calculate_diff(
existing_state=[], desired_state=desired_state, dry_run=False, state=None
)
assert diff
if diff:
assert diff.sort(key=lambda r: r.name) == desired_state.sort(
key=lambda r: r.name
)
def test_no_op_succeeds(int_params, existing_repo):
integration = TerraformRepoIntegration(params=int_params)
state = [existing_repo]
diff = integration.calculate_diff(
existing_state=state, desired_state=state, dry_run=True, state=None
)
assert diff is None |
7,165 | main | import argparse
import os
import yaml
def METHOD_NAME(args):
folder = os.path.join(
args.config_root,
args.config_type,
"zc_only" if args.zc_only else "zc_architecture",
"use_zc_api" if args.use_zc_api else "use_predictor",
args.optimizer,
f'{args.search_space}-{args.start_seed}',
args.dataset
)
print(folder)
os.makedirs(folder, exist_ok=True)
args.start_seed = int(args.start_seed)
args.trials = int(args.trials)
for i in range(args.start_seed, args.start_seed + args.trials):
config = {
'seed': i,
'search_space': args.search_space,
'dataset': args.dataset,
'optimizer': args.optimizer,
'config_type': args.config_type,
'predictor': args.predictor,
'out_dir': args.out_dir,
'test_size': args.test_size,
'train_portion': args.train_portion,
'batch_size': args.batch_size,
'cutout': args.cutout,
'cutout_length': args.cutout_length,
'cutout_prob': args.cutout_prob,
}
config_keys = set(config.keys())
args_keys = set([arg for arg in vars(args)])
search_args = args_keys.difference(config_keys)
search_config = {arg:getattr(args, arg) for arg in search_args}
del(search_config['config_root'])
del(search_config['trials'])
del(search_config['start_seed'])
search_config['seed'] = i
config['search'] = search_config
with open(folder + f'/config_{i}.yaml', 'w') as fh:
yaml.dump(config, fh)
if __name__ == "__main__":
""" This is executed when run from the command line """
parser = argparse.ArgumentParser()
parser.add_argument("--search_space", type=str, default='nasbench201', help="nasbench101/201/301/tnb101")
parser.add_argument("--dataset", type=str, default='cifar10', help="Which dataset")
parser.add_argument("--optimizer", type=str, default='npenas', help="Blackbox optimizer to use")
parser.add_argument("--predictor", type=str, default='zc', help="which predictor")
parser.add_argument("--out_dir", type=str, default='run', help="Output directory")
parser.add_argument("--start_seed", type=int, default=9000, help="starting seed")
parser.add_argument("--trials", type=int, default=100, help="Number of trials")
parser.add_argument("--test_size", type=int, default=1000, help="Test set size for predictor")
parser.add_argument("--train_portion", type=float, default=0.7, help="Train portion")
parser.add_argument("--batch_size", type=int, default=32, help="Batch size")
parser.add_argument("--cutout", type=bool, default=False, help="Cutout")
parser.add_argument("--cutout_length", type=int, default=16, help="Cutout length")
parser.add_argument("--cutout_prob", type=float, default=1.0, help="Cutout probability")
parser.add_argument("--config_root", type=str, default='configs', help="Root config directory")
# Search options
parser.add_argument("--epochs", type=int, default=200, help="Number of search epochs")
parser.add_argument("--fidelity", type=int, default=-1, help="Number of epochs")
parser.add_argument("--sample_size", type=int, default=10, help="Number of samples")
parser.add_argument("--population_size", type=int, default=50, help="Number of individuals")
parser.add_argument("--checkpoint_freq", type=int, default=1001, help="Checkpoint frequency")
parser.add_argument("--zc_names", nargs='+', default=['params', 'flops', 'jacov', 'plain', 'grasp', 'snip', 'fisher', 'grad_norm', 'epe_nas', 'synflow', 'l2_norm'], help="Names of ZC predictors to use")
parser.add_argument("--k", type=int, default=10, help="Top k candidates to choose in each batch")
parser.add_argument("--num_init", type=int, default=10, help="Root config directory")
parser.add_argument("--num_ensemble", type=int, default=1, help="Root config directory")
parser.add_argument("--acq_fn_type", type=str, default='its', help="Root config directory")
parser.add_argument("--acq_fn_optimization", type=str, default='mutation', help="Root config directory")
parser.add_argument("--encoding_type", type=str, default='adjacency', help="Root config directory")
parser.add_argument("--num_arches_to_mutate", type=int, default=2, help="Root config directory")
parser.add_argument("--max_mutations", type=int, default=1, help="Root config directory")
parser.add_argument("--num_candidates", type=int, default=100, help="Root config directory")
parser.add_argument("--predictor_type", type=str, default='xgb', help="Root config directory")
parser.add_argument("--config_type", type=str, default='zc_and_adjacency', help="Type of experiment")
parser.add_argument("--zc_ensemble", type=bool, default=True, help="True to use ensemble of ZC predictors")
parser.add_argument("--zc", type= lambda x : (True if x == "True" else False), default=True, help="Whether zerocost is used or not")
parser.add_argument("--zc_only", type= lambda x : (True if x == "True" else False), default=False, help="Specify how to define zerocost features into the tree")
parser.add_argument("--use_zc_api", type= lambda x : (True if x == "True" else False), default=False, help="Whether to use zc_api (True) or zc_predictor (False)")
args = parser.parse_args()
METHOD_NAME(args) |
7,166 | test dense | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test layout rewrite support for whole neural networks"""
import sys
import tempfile
import pytest
import numpy as np
import tvm
from tvm import relay, auto_scheduler
from tvm.contrib import graph_executor
import tvm.testing
def get_np_array(var, dtype):
return np.random.randn(*[int(x) for x in var.type_annotation.shape]).astype(dtype)
def get_relay_conv2d(
outc=32,
inc=32,
height=14,
width=14,
kh=3,
kw=3,
batch=1,
pad=0,
stride=1,
dilation=1,
layout="NHWC",
):
dtype = "float32"
if layout == "NHWC":
kernel_layout = "HWIO"
d = relay.var("data", shape=(batch, height, width, inc), dtype=dtype)
w = relay.var("weight", shape=(kh, kw, inc, outc), dtype=dtype)
elif layout == "NCHW":
kernel_layout = "OIHW"
d = relay.var("data", shape=(batch, inc, height, width), dtype=dtype)
w = relay.var("weight", shape=(outc, inc, kh, kw), dtype=dtype)
y = relay.nn.conv2d(
d,
w,
padding=pad,
kernel_size=(kh, kw),
strides=(stride, stride),
dilation=(dilation, dilation),
channels=outc,
groups=1,
data_layout=layout,
kernel_layout=kernel_layout,
)
mod = tvm.IRModule()
mod["main"] = relay.Function([d, w], y)
data, weight = get_np_array(d, dtype), get_np_array(w, dtype)
return mod, data, weight
def get_relay_conv3d(
outc=8,
inc=8,
depth=8,
height=7,
width=7,
kd=1,
kh=1,
kw=1,
batch=1,
pad=0,
stride=1,
dilation=1,
layout="NDHWC",
):
dtype = "float32"
if layout == "NDHWC":
kernel_layout = "DHWIO"
d = relay.var("data", shape=(batch, depth, height, width, inc), dtype=dtype)
w = relay.var("weight", shape=(kd, kh, kw, inc, outc), dtype=dtype)
elif layout == "NCDHW":
kernel_layout = "OIDHW"
d = relay.var("data", shape=(batch, inc, depth, height, width), dtype=dtype)
w = relay.var("weight", shape=(outc, inc, kd, kh, kw), dtype=dtype)
y = relay.nn.conv3d(
d,
w,
padding=pad,
kernel_size=(kd, kh, kw),
strides=(stride, stride, stride),
dilation=(dilation, dilation, dilation),
channels=outc,
groups=1,
data_layout=layout,
kernel_layout=kernel_layout,
)
mod = tvm.IRModule()
mod["main"] = relay.Function([d, w], y)
data, weight = get_np_array(d, dtype), get_np_array(w, dtype)
return mod, data, weight
def get_relay_dense(m=128, n=128, k=128):
dtype = "float32"
d = relay.var("data", shape=(m, k), dtype=dtype)
w = relay.var("weight", shape=(n, k), dtype=dtype)
y = relay.nn.dense(d, w)
mod = tvm.IRModule()
mod["main"] = relay.Function([d, w], y)
data, weight = get_np_array(d, dtype), get_np_array(w, dtype)
return mod, data, weight
def get_relay_batchmm(batch=4, m=128, n=128, k=128):
dtype = "float32"
d = relay.var("data", shape=(batch, m, k), dtype=dtype)
w = relay.var("weight", shape=(batch, n, k), dtype=dtype)
y = relay.nn.batch_matmul(d, w)
mod = tvm.IRModule()
mod["main"] = relay.Function([d, w], y)
data, weight = get_np_array(d, dtype), get_np_array(w, dtype)
return mod, data, weight
def tune_and_check(mod, data, weight, target, dev):
# Extract tasks from a relay program
tasks, task_weights = auto_scheduler.extract_tasks(
mod, target=target, params={"weight": weight}
)
with tempfile.NamedTemporaryFile() as fp:
log_file = fp.name
# Tune tasks
tuner = auto_scheduler.TaskScheduler(tasks, task_weights, callbacks=[])
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=1,
num_measures_per_round=1,
builder=auto_scheduler.LocalBuilder(timeout=60),
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
)
tuner.tune(tune_option, search_policy="sketch.random")
# Compile
with auto_scheduler.ApplyHistoryBest(log_file):
with tvm.transform.PassContext(
opt_level=3,
config={"relay.backend.use_auto_scheduler": True},
):
lib = relay.build(mod, target=target, params={"weight": weight})
# Compile without auto-scheduler for correctness check
with tvm.transform.PassContext(opt_level=0):
lib2 = relay.build(mod, target=target, params={"weight": weight})
def get_output(data, lib):
module = graph_executor.GraphModule(lib["default"](dev))
module.set_input("data", data)
module.run()
return module.get_output(0).numpy()
# Check correctness
actual_output = get_output(data, lib)
expected_output = get_output(data, lib2)
tvm.testing.assert_allclose(actual_output, expected_output, rtol=1e-4, atol=2e-4)
# layout rewriting only works on CPU targets
@tvm.testing.parametrize_targets("llvm", "llvm -device=arm_cpu")
def test_conv2d(target, dev):
mod, data, weight = get_relay_conv2d(kh=1, kw=1)
tune_and_check(mod, data, weight, target, dev)
@tvm.testing.parametrize_targets("llvm", "llvm -device=arm_cpu")
def test_conv2d_winograd(target, dev):
mod, data, weight = get_relay_conv2d(outc=128, kh=3, kw=3)
tune_and_check(mod, data, weight, target, dev)
@tvm.testing.parametrize_targets("llvm", "llvm -device=arm_cpu")
def test_conv3d(target, dev):
mod, data, weight = get_relay_conv3d()
tune_and_check(mod, data, weight, target, dev)
@tvm.testing.parametrize_targets("llvm", "llvm -device=arm_cpu")
def METHOD_NAME(target, dev):
mod, data, weight = get_relay_dense()
tune_and_check(mod, data, weight, target, dev)
@tvm.testing.parametrize_targets("llvm", "llvm -device=arm_cpu")
def test_batch_matmul(target, dev):
mod, data, weight = get_relay_batchmm()
tune_and_check(mod, data, weight, target, dev)
if __name__ == "__main__":
tvm.testing.main() |
7,167 | publication channels | import pytest
from briefcase.commands import PublishCommand
from briefcase.commands.base import full_options
from briefcase.config import AppConfig
from briefcase.console import Console, Log
from ...utils import create_file
class DummyPublishCommand(PublishCommand):
"""A dummy publish command that doesn't actually do anything.
It only serves to track which actions would be performed.
"""
# Platform and format contain upper case to test case normalization
platform = "Tester"
output_format = "Dummy"
description = "Dummy publish command"
def __init__(self, *args, **kwargs):
kwargs.setdefault("logger", Log())
kwargs.setdefault("console", Console())
super().__init__(*args, apps={}, **kwargs)
self.actions = []
def briefcase_toml(self, app):
# default any app to an empty `briefcase.toml`
return self._briefcase_toml.get(app, {})
def binary_path(self, app):
return self.bundle_path(app) / f"{app.app_name}.bin"
def verify_host(self):
super().verify_host()
self.actions.append(("verify-host",))
def verify_tools(self):
super().verify_tools()
self.actions.append(("verify-tools",))
def finalize_app_config(self, app):
super().finalize_app_config(app=app)
self.actions.append(("finalize-app-config", app.app_name))
def verify_app_template(self, app):
super().verify_app_template(app=app)
self.actions.append(("verify-app-template", app.app_name))
def verify_app_tools(self, app):
super().verify_app_tools(app=app)
self.actions.append(("verify-app-tools", app.app_name))
@property
def METHOD_NAME(self):
return ["s3", "alternative"]
@property
def default_publication_channel(self):
return "s3"
def publish_app(self, app, channel, **kwargs):
self.actions.append(("publish", app.app_name, channel, kwargs.copy()))
return full_options({"publish_state": app.app_name}, kwargs)
# These commands override the default behavior, simply tracking that
# they were invoked, rather than instantiating a Create/Update/Build command.
# This is for testing purposes.
def create_command(self, app, **kwargs):
self.actions.append(("create", app.app_name, kwargs.copy()))
# Remove arguments consumed by the underlying call to create_app()
kwargs.pop("test_mode", None)
return full_options({"create_state": app.app_name}, kwargs)
def update_command(self, app, **kwargs):
self.actions.append(("update", app.app_name, kwargs.copy()))
# Remove arguments consumed by the underlying call to update_app()
kwargs.pop("test_mode", None)
kwargs.pop("update_requirements", None)
kwargs.pop("update_resources", None)
kwargs.pop("update_support", None)
return full_options({"update_state": app.app_name}, kwargs)
def build_command(self, app, **kwargs):
self.actions.append(("build", app.app_name, kwargs.copy()))
# Remove arguments consumed by the underlying call to build_app()
kwargs.pop("test_mode", None)
kwargs.pop("update", None)
return full_options({"build_state": app.app_name}, kwargs)
@pytest.fixture
def publish_command(tmp_path):
return DummyPublishCommand(base_path=tmp_path / "base_path")
@pytest.fixture
def first_app_config():
return AppConfig(
app_name="first",
bundle="com.example",
version="0.0.1",
description="The first simple app",
sources=["src/first"],
)
@pytest.fixture
def first_app_unbuilt(first_app_config, tmp_path):
# The same fixture as first_app_config,
# but ensures that the bundle for the app exists
create_file(
tmp_path
/ "base_path"
/ "build"
/ "tester"
/ "first"
/ "tester"
/ "dummy"
/ "first.bundle",
"first.bundle",
)
return first_app_config
@pytest.fixture
def first_app(first_app_unbuilt, tmp_path):
# The same fixture as first_app_config,
# but ensures that the binary for the app exists
create_file(
tmp_path / "base_path" / "build" / "first" / "tester" / "dummy" / "first.bin",
"first.bin",
)
return first_app_unbuilt
@pytest.fixture
def second_app_config():
return AppConfig(
app_name="second",
bundle="com.example",
version="0.0.2",
description="The second simple app",
sources=["src/second"],
)
@pytest.fixture
def second_app(second_app_config, tmp_path):
# The same fixture as second_app_config,
# but ensures that the binary for the app exists
create_file(
tmp_path
/ "base_path"
/ "build"
/ "second"
/ "tester"
/ "dummy"
/ "second.bundle",
"second.bundle",
)
create_file(
tmp_path / "base_path" / "build" / "second" / "tester" / "dummy" / "second.bin",
"second.bin",
)
return second_app_config |
7,168 | get graph url | #
# Copyright (C) 2012-2019 Uninett AS
# Copyright (C) 2022 Sikt
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details. You should have received a copy of the GNU General Public License
# along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""Template tags used in info subsystem"""
from datetime import datetime, timedelta
import time
from django import template
from django.utils.timesince import timesince
# pylint: disable=C0103
register = template.Library()
@register.filter
def time_since(timestamp):
"""Convert a timestamp to human readable time since"""
mapping = {'minute': 'min', 'hour': 'hr', 'week': 'wk', 'month': 'mo', 'year': 'yr'}
if timestamp is None:
return "Never"
if _is_more_or_less_now(timestamp):
return "Now"
else:
text = timesince(timestamp)
for key, replacement in mapping.items():
text = text.replace(key, replacement)
return text
@register.filter
def days_since(timestamp):
"""Convert a timestamp to human readable time using days"""
if timestamp is None:
return "Never"
if _is_more_or_less_now(timestamp):
return "Now"
elif timestamp.date() == datetime.now().date():
return "Today"
elif timestamp.date() == datetime.now().date() - timedelta(days=1):
return "Yesterday"
else:
return "%s days" % (datetime.now().date() - timestamp.date()).days
def _is_more_or_less_now(timestamp):
interval = datetime.now() - timestamp
less_than_a_minute = interval.total_seconds() < 60
return timestamp == datetime.max or less_than_a_minute
@register.filter
def is_max_timestamp(timestamp):
"""Check if timestamp is max"""
if timestamp == datetime.max:
return True
else:
return False
@register.filter
def run(function, arg):
"""Run a function with given argument"""
return function(arg)
@register.filter
def get_attr(value, arg):
"""Lookup attribute on object
value: an object instance - i.e. interface
arg: i.e. id
Supports chaining (arg = netbox.sysname)
If nothing is found, return empty string
"""
if arg.count('.'):
return find_attr(value, arg.split('.'))
else:
return getattr(value, arg, "")
def find_attr(obj, attrlist):
"""Recursive search for attributes in attrlist"""
try:
attr = getattr(obj, attrlist[0])
except AttributeError:
return ""
if len(attrlist) > 1:
return find_attr(attr, attrlist[1:])
else:
return attr
@register.filter
def lookup(value, key):
"""Lookup key in a dictionary"""
return value.get(key, value)
@register.filter
def interval(value):
"""Create a human readable interval
Arguments:
value -- a number of seconds
"""
return time.strftime('%H:%M:%S', time.gmtime(value))
@register.filter
def add_interval(value, seconds):
"""Create a new timestamp based on value and interval
Arguments:
value -- a datetime object
interval -- interval in seconds
"""
try:
return value + timedelta(seconds=seconds)
except TypeError:
return value
@register.filter
def METHOD_NAME(obj, time_frame):
return obj.METHOD_NAME(time_frame=time_frame)
@register.filter
def get_netbox_availability(netbox, time_frame):
"""Get availability for a given netbox and time frame
:type netbox: nav.models.manage.Netbox
:type time_frame: basestring
"""
availability = netbox.get_availability()
try:
return "%.2f%%" % availability["availability"][time_frame]
except (KeyError, TypeError):
return "N/A"
@register.filter
def get_value(something, key):
"""Gets value from something using key"""
try:
return something.get(key)
except AttributeError:
pass
@register.filter
def sortdict(dictionary, reverse=False):
"""Returns a list of sorted dictionary items"""
return sorted(dictionary.items(), reverse=bool(reverse))
@register.filter
def is_list(value):
"""Returns True if the value is a list"""
return isinstance(value, list)
@register.filter
def dunderless(mapping):
"""Returns a mapping with all elements of the input mapping except for ones whose key starts with dunder"""
mapping = {k: v for k, v in mapping.items() if not k.startswith('__')}
return mapping |
7,169 | list templates | # -*- coding: utf-8 -*-
"""
flask.templating
~~~~~~~~~~~~~~~~
Implements the bridge to Jinja2.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import posixpath
from jinja2 import BaseLoader, Environment as BaseEnvironment, \
TemplateNotFound
from .globals import _request_ctx_stack, _app_ctx_stack
from .signals import template_rendered
from .module import blueprint_is_module
from ._compat import itervalues, iteritems
def _default_template_ctx_processor():
"""Default template context processor. Injects `request`,
`session` and `g`.
"""
reqctx = _request_ctx_stack.top
appctx = _app_ctx_stack.top
rv = {}
if appctx is not None:
rv['g'] = appctx.g
if reqctx is not None:
rv['request'] = reqctx.request
rv['session'] = reqctx.session
return rv
class Environment(BaseEnvironment):
"""Works like a regular Jinja2 environment but has some additional
knowledge of how Flask's blueprint works so that it can prepend the
name of the blueprint to referenced templates if necessary.
"""
def __init__(self, app, **options):
if 'loader' not in options:
options['loader'] = app.create_global_jinja_loader()
BaseEnvironment.__init__(self, **options)
self.app = app
class DispatchingJinjaLoader(BaseLoader):
"""A loader that looks for templates in the application and all
the blueprint folders.
"""
def __init__(self, app):
self.app = app
def get_source(self, environment, template):
for loader, local_name in self._iter_loaders(template):
try:
return loader.get_source(environment, local_name)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
def _iter_loaders(self, template):
loader = self.app.jinja_loader
if loader is not None:
yield loader, template
# old style module based loaders in case we are dealing with a
# blueprint that is an old style module
try:
module, local_name = posixpath.normpath(template).split('/', 1)
blueprint = self.app.blueprints[module]
if blueprint_is_module(blueprint):
loader = blueprint.jinja_loader
if loader is not None:
yield loader, local_name
except (ValueError, KeyError):
pass
for blueprint in itervalues(self.app.blueprints):
if blueprint_is_module(blueprint):
continue
loader = blueprint.jinja_loader
if loader is not None:
yield loader, template
def METHOD_NAME(self):
result = set()
loader = self.app.jinja_loader
if loader is not None:
result.update(loader.METHOD_NAME())
for name, blueprint in iteritems(self.app.blueprints):
loader = blueprint.jinja_loader
if loader is not None:
for template in loader.METHOD_NAME():
prefix = ''
if blueprint_is_module(blueprint):
prefix = name + '/'
result.add(prefix + template)
return list(result)
def _render(template, context, app):
"""Renders the template and fires the signal"""
rv = template.render(context)
template_rendered.send(app, template=template, context=context)
return rv
def render_template(template_name_or_list, **context):
"""Renders a template from the template folder with the given
context.
:param template_name_or_list: the name of the template to be
rendered, or an iterable with template names
the first one existing will be rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(ctx.app.jinja_env.get_or_select_template(template_name_or_list),
context, ctx.app)
def render_template_string(source, **context):
"""Renders a template from the given template source string
with the given context.
:param source: the sourcecode of the template to be
rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(ctx.app.jinja_env.from_string(source),
context, ctx.app) |
7,170 | test check results | #!/usr/bin/env python3
# Copyright (c) 2021 CINN Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from cinn.common import *
from cinn.frontend import *
from op_test import OpTest, OpTestTool
from op_test_helper import TestCaseHelper
@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
class TestConstantOp(OpTest):
def setUp(self):
print(f"\nRunning {self.__class__.__name__}: {self.case}")
self.inputs = {}
self.prepare_inputs()
def prepare_inputs(self):
self.name = "x"
dtype = self.case["dtype"]
if "constant_value" in self.case:
if "bool" in dtype:
self.value = bool(self.case["constant_value"])
elif "int" in dtype:
self.value = int(self.case["constant_value"])
elif "float" in dtype:
self.value = float(self.case["constant_value"])
else:
self.value = self.random(self.case["shape"], dtype).tolist()
self.dtype = dtype
def build_paddle_program(self, target):
x = paddle.to_tensor(self.value, dtype=self.dtype)
self.paddle_outputs = [x]
def build_cinn_program(self, target):
builder = NetBuilder("constant")
x = builder.constant(self.value, self.name, self.dtype)
prog = builder.build()
res = self.get_cinn_output(prog, target, [], [], [x])
self.cinn_outputs = res
def METHOD_NAME(self):
self.check_outputs_and_grads(all_equal=True)
class TestConstantOpShape(TestCaseHelper):
def init_attrs(self):
self.class_name = "TestConstantOpShape"
self.cls = TestConstantOp
self.inputs = [
{
"constant_value": 10,
},
{
"constant_value": -5,
},
{
"shape": [10],
},
{
"shape": [8, 5],
},
{
"shape": [10, 3, 5],
},
{
"shape": [1, 2, 4, 8],
},
# known issue: https://github.com/PaddlePaddle/CINN/pull/1453
# The compilation time is particularly long for AssignValue op.
# {
# "shape": [16, 4, 8, 32],
# },
{
"shape": [1],
},
{
"shape": [512],
},
{
"shape": [1024],
},
# very slow for the shape 2048
{
"shape": [2048],
},
{
"shape": [1, 1, 1, 1],
},
]
self.dtypes = [
{
"dtype": "float32"
},
]
self.attrs = []
class TestConstantOpDtype(TestCaseHelper):
def init_attrs(self):
self.class_name = "TestConstantOpDtype"
self.cls = TestConstantOp
self.inputs = [
{
"constant_value": 1,
},
{
"shape": [10],
},
{
"shape": [8, 5],
},
{
"shape": [10, 3, 5],
},
]
self.dtypes = [
{
"dtype": "float16"
},
{
"dtype": "float32"
},
{
"dtype": "float64"
},
{
"dtype": "bool"
},
{
"dtype": "uint8"
},
{
"dtype": "int8"
},
{
"dtype": "int32"
},
{
"dtype": "int64"
},
]
self.attrs = []
if __name__ == "__main__":
TestConstantOpShape().run()
TestConstantOpDtype().run() |
7,171 | test multi captured | from contextlib import redirect_stderr, redirect_stdout
from io import StringIO
from pybind11_tests import iostream as m
def test_captured(capsys):
msg = "I've been redirected to Python, I hope!"
m.captured_output(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
m.captured_err(msg)
stdout, stderr = capsys.readouterr()
assert stdout == ""
assert stderr == msg
def test_captured_large_string(capsys):
# Make this bigger than the buffer used on the C++ side: 1024 chars
msg = "I've been redirected to Python, I hope!"
msg = msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_captured_utf8_2byte_offset0(capsys):
msg = "\u07FF"
msg = "" + msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_captured_utf8_2byte_offset1(capsys):
msg = "\u07FF"
msg = "1" + msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_captured_utf8_3byte_offset0(capsys):
msg = "\uFFFF"
msg = "" + msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_captured_utf8_3byte_offset1(capsys):
msg = "\uFFFF"
msg = "1" + msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_captured_utf8_3byte_offset2(capsys):
msg = "\uFFFF"
msg = "12" + msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_captured_utf8_4byte_offset0(capsys):
msg = "\U0010FFFF"
msg = "" + msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_captured_utf8_4byte_offset1(capsys):
msg = "\U0010FFFF"
msg = "1" + msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_captured_utf8_4byte_offset2(capsys):
msg = "\U0010FFFF"
msg = "12" + msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_captured_utf8_4byte_offset3(capsys):
msg = "\U0010FFFF"
msg = "123" + msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_guard_capture(capsys):
msg = "I've been redirected to Python, I hope!"
m.guard_output(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_series_captured(capture):
with capture:
m.captured_output("a")
m.captured_output("b")
assert capture == "ab"
def test_flush(capfd):
msg = "(not flushed)"
msg2 = "(flushed)"
with m.ostream_redirect():
m.noisy_function(msg, flush=False)
stdout, stderr = capfd.readouterr()
assert stdout == ""
m.noisy_function(msg2, flush=True)
stdout, stderr = capfd.readouterr()
assert stdout == msg + msg2
m.noisy_function(msg, flush=False)
stdout, stderr = capfd.readouterr()
assert stdout == msg
def test_not_captured(capfd):
msg = "Something that should not show up in log"
stream = StringIO()
with redirect_stdout(stream):
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == msg
assert stderr == ""
assert stream.getvalue() == ""
stream = StringIO()
with redirect_stdout(stream):
m.captured_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == ""
assert stderr == ""
assert stream.getvalue() == msg
def test_err(capfd):
msg = "Something that should not show up in log"
stream = StringIO()
with redirect_stderr(stream):
m.raw_err(msg)
stdout, stderr = capfd.readouterr()
assert stdout == ""
assert stderr == msg
assert stream.getvalue() == ""
stream = StringIO()
with redirect_stderr(stream):
m.captured_err(msg)
stdout, stderr = capfd.readouterr()
assert stdout == ""
assert stderr == ""
assert stream.getvalue() == msg
def METHOD_NAME(capfd):
stream = StringIO()
with redirect_stdout(stream):
m.captured_output("a")
m.raw_output("b")
m.captured_output("c")
m.raw_output("d")
stdout, stderr = capfd.readouterr()
assert stdout == "bd"
assert stream.getvalue() == "ac"
def test_dual(capsys):
m.captured_dual("a", "b")
stdout, stderr = capsys.readouterr()
assert stdout == "a"
assert stderr == "b"
def test_redirect(capfd):
msg = "Should not be in log!"
stream = StringIO()
with redirect_stdout(stream):
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == msg
assert stream.getvalue() == ""
stream = StringIO()
with redirect_stdout(stream):
with m.ostream_redirect():
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == ""
assert stream.getvalue() == msg
stream = StringIO()
with redirect_stdout(stream):
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == msg
assert stream.getvalue() == ""
def test_redirect_err(capfd):
msg = "StdOut"
msg2 = "StdErr"
stream = StringIO()
with redirect_stderr(stream):
with m.ostream_redirect(stdout=False):
m.raw_output(msg)
m.raw_err(msg2)
stdout, stderr = capfd.readouterr()
assert stdout == msg
assert stderr == ""
assert stream.getvalue() == msg2
def test_redirect_both(capfd):
msg = "StdOut"
msg2 = "StdErr"
stream = StringIO()
stream2 = StringIO()
with redirect_stdout(stream):
with redirect_stderr(stream2):
with m.ostream_redirect():
m.raw_output(msg)
m.raw_err(msg2)
stdout, stderr = capfd.readouterr()
assert stdout == ""
assert stderr == ""
assert stream.getvalue() == msg
assert stream2.getvalue() == msg2
def test_threading():
with m.ostream_redirect(stdout=True, stderr=False):
# start some threads
threads = []
# start some threads
for _j in range(20):
threads.append(m.TestThread())
# give the threads some time to fail
threads[0].sleep()
# stop all the threads
for t in threads:
t.stop()
for t in threads:
t.join()
# if a thread segfaults, we don't get here
assert True |
7,172 | write choice | #
# Copyright (c) 2014-2015 Sylvain Peyrefitte
# Copyright (c) 2018, 2019, 2022 GoSecure Inc.
#
# This file is part of rdpy and PyRDP.
#
# PyRDP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
PER encoding / decoding functions
"""
from typing import BinaryIO, Tuple
from pyrdp.core.packing import Uint8, Uint16BE, Uint32BE
def readLength(s: BinaryIO) -> int:
"""
Unpack a PER length indicator
:param s: stream
"""
byte = Uint8.unpack(s.read(1))
if byte & 0x80:
byte &= ~0x80
return (byte << 8) + Uint8.unpack(s.read(1))
else:
return byte
def writeLength(value: int) -> bytes:
"""
Pack a PER length indicator
"""
if value > 0x7f:
return Uint16BE.pack(value | 0x8000)
else:
return Uint8.pack(value)
def readChoice(s: BinaryIO) -> int:
"""
Unpack a PER choice
:param s: stream
"""
return Uint8.unpack(s.read(1))
def METHOD_NAME(choice: int) -> bytes:
"""
Pack a PER choice
:param choice: choice value
"""
return Uint8.pack(choice)
def readSelection(s: BinaryIO) -> int:
"""
Unpack a PER selection
:param s: stream
"""
return Uint8.unpack(s.read(1))
def writeSelection(selection: int) -> bytes:
"""
Pack a PER selection
:param selection: selection value
"""
return Uint8.pack(selection)
def readNumberOfSet(s: BinaryIO) -> int:
"""
Unpack a PER NumberOfSet
:param s: stream
"""
return Uint8.unpack(s.read(1))
def writeNumberOfSet(numberOfSet: int) -> bytes:
"""
Pack a PER NumberOfSet
:param numberOfSet: NumberOfSet value
"""
return Uint8.pack(numberOfSet)
def readEnumeration(s: BinaryIO) -> int:
"""
Unpack a PER enumeration format
:param s: stream
"""
return Uint8.unpack(s.read(1))
def writeEnumeration(enum: int) -> bytes:
"""
Pack a PER enumeration
:param enum: enumeration value
"""
return Uint8.pack(enum)
def readInteger(s: BinaryIO) -> int:
"""
Unpack a PER integer
:param s: stream
@raise InvalidValue: if the size of the integer is invalid
"""
size = readLength(s)
if size == 1:
return Uint8.unpack(s.read(1))
elif size == 2:
return Uint16BE.unpack(s.read(2))
elif size == 4:
return Uint32BE.unpack(s.read(4))
else:
raise ValueError("invalid integer size %d" % size)
def writeInteger(value: int) -> bytes:
"""
Pack a PER integer
"""
if value <= 0xff:
return writeLength(1) + Uint8.pack(value)
elif value < 0xffff:
return writeLength(2) + Uint16BE.pack(value)
else:
return writeLength(4) + Uint32BE.pack(value)
def readObjectIdentifier(s: BinaryIO):
"""
Unpack a PER object identifier (tuple of 6 integers)
:param s: stream
:return: (int, int, int, int, int, int)
"""
size = readLength(s)
if size != 5:
raise ValueError("Object identifier size must be 5 (got %d instead)" % size)
a_oid = [0, 0, 0, 0, 0, 0]
t12 = Uint8.unpack(s.read(1))
a_oid[0] = t12 >> 4
a_oid[1] = t12 & 0x0f
a_oid[2] = Uint8.unpack(s.read(1))
a_oid[3] = Uint8.unpack(s.read(1))
a_oid[4] = Uint8.unpack(s.read(1))
a_oid[5] = Uint8.unpack(s.read(1))
return tuple(a_oid)
def writeObjectIdentifier(oid: Tuple[int, int, int, int, int, int]) -> bytes:
"""
Pack a PER object identifier
:param oid: object identifier (tuple of 6 integers)
"""
return writeLength(5) + Uint8.pack((oid[0] << 4) & (oid[1] & 0x0f)) + b"".join(Uint8.pack(b) for b in oid[2 :])
def readNumericString(s: BinaryIO, minValue: int) -> str:
"""
Unpack a PER numeric string
:param s: stream
:param minValue: minimum string length
"""
length = readLength(s)
length = (length + minValue + 1) // 2
data = s.read(length)
result = ""
for b in data:
c1 = (b >> 4) + 0x30
c2 = (b & 0xf) + 0x30
result += chr(c1) + chr(c2)
return result
def writeNumericString(string: str, minValue: int) -> bytes:
"""
Pack a PER numeric string
:param string: numeric string
:param minValue: minimum string length
"""
length = len(string)
mlength = minValue
if length >= minValue:
mlength = length - minValue
result = b""
for i in range(0, length, 2):
c1 = ord(string[i : i + 1])
if i + 1 < length:
c2 = ord(string[i + 1 : i + 2])
else:
c2 = 0x30
c1 = (c1 - 0x30) % 10
c2 = (c2 - 0x30) % 10
result += Uint8.pack((c1 << 4) | c2)
return writeLength(mlength) + result
def readOctetStream(s: BinaryIO, minValue: int = 0) -> bytes:
"""
Unpack a PER octet stream
:param s: stream
:param minValue: minimum string length
"""
size = readLength(s) + minValue
return s.read(size)
def writeOctetStream(bytes: bytes, minValue: int = 0) -> bytes:
"""
Pack a PER octet stream
:param bytes: octet stream
:param minValue: minimum string length
"""
length = len(bytes)
mlength = minValue
if length >= minValue:
mlength = length - minValue
return writeLength(mlength) + bytes
def writeOctetStreamAlternate(bytes: bytes) -> bytes:
"""
Pack a PER octect stream with the alternate read length indicator
:param bytes: octet stream
Currently unused, implemented to match exactly what was sent by mstsc.exe
on the wire.
"""
length = len(bytes)
return Uint16BE.pack(length | 0x8000) + bytes |
7,173 | get fq name | #!/usr/bin/env python3
#
# Copyright (c) 2021, General Electric Company, Inc.
#
# All Rights Reserved
#
# This material is based upon work supported by the Defense Advanced Research
# Projects Agency (DARPA) under Contract No. FA8750-20-C-0203.
#
# Any opinions, findings and conclusions or recommendations expressed in this
# material are those of the author(s) and do not necessarily reflect the views
# of the Defense Advanced Research Projects Agency (DARPA).
from Evidence.Add import COMPONENT, FILE
from Logging import *
from lxml import etree
__xmlroot__ = None
__xmlpath__ = None
__sourceType__ = None
__fileIdentifier__=None
handlers = None
def getAttribute(e, attr ,ns=None):
trace()
# helper function to get the attribute, if a namespace is provided this name space will be looked up from the xml
if ns is not None:
return e.get("{<<NS>>}<<attr>>"
.replace("<<NS>>", e.nsmap[ns])
.replace("<<attr>>", attr))
else:
return e.get(attr)
def getQualifiedTag(tag ,ns=None, e=__xmlroot__):
'''
Helper function to get a fully qualified tag name,
if the optional ns attribute is provided then a name space is included in the string
the namespace is it is looked from the element "e" if provide, otherwise the xml root is used.
'''
global __xmlroot__
if ns in __xmlroot__.nsmap:
return "{<<NS>>}<<tag>>".replace("<<NS>>", __xmlroot__.nsmap[ns]).replace("<<tag>>", tag)
else:
return tag
def getParentNamespace(e):
parent = None
if e !=None:
parent = e.getparent()
while parent.tag != getQualifiedTag("namespace"):
parent = parent.getparent()
if parent == None:
break
return parent
def METHOD_NAME(e):
name = e.find(getQualifiedTag("name"))
parent = getParentNamespace(e)
if parent != None:
if METHOD_NAME(parent) != None:
s = METHOD_NAME(parent) + "."+str(name.text)
return s
else:
return name.text
else:
return name.text
def getFQPackage():
global __xmlroot__
package = __xmlroot__.find(getQualifiedTag("package"))
name = package.find(getQualifiedTag("name"))
nameStr = str(etree.tostring(name).decode('utf-8'))\
.replace("<name>","")\
.replace("</name>","")\
.replace("<operator>","")\
.replace("</operator>","")\
.replace('<name xmlns="http://www.srcML.org/srcML/src">',"")\
.replace(';',"")
return nameStr
def packageJava(e):
subcomponentOf_identifierStr = None
lastDot = getFQPackage().rfind(".")
if lastDot !=-1:
subcomponentOf_identifierStr ="Source:"+getFQPackage()[0:lastDot]
COMPONENT(fileParent_identifier = __fileIdentifier__,
identifier="Source:"+getFQPackage(),
title=getFQPackage(),
subcomponentOf_identifier = subcomponentOf_identifierStr,
componentType_identifier = e.tag.split("}")[-1])
def classJava(e):
global __fileIdentifier__
subcomponentOf_identifierStr = "Source:"+getFQPackage()
name = e.find(getQualifiedTag("name"))
#log(str(etree.tostring(e,pretty_print=True).decode('utf-8')))
if name is not None:
if name.text is not None:
COMPONENT(fileParent_identifier = __fileIdentifier__,
identifier="Source:"+getFQPackage()+"."+name.text,
title=getFQPackage()+"."+name.text,
subcomponentOf_identifier = subcomponentOf_identifierStr,
componentType_identifier = e.tag.split("}")[-1])
def componentCpp(e):
global __fileIdentifier__
subcomponentOf_identifierStr = None
if getParentNamespace(e) is not None:
subcomponentOf_identifierStr = "Source:"+METHOD_NAME(getParentNamespace(e))
COMPONENT(fileParent_identifier = __fileIdentifier__,
identifier="Source:"+METHOD_NAME(e),
title=METHOD_NAME(e),
subcomponentOf_identifier = subcomponentOf_identifierStr,
componentType_identifier = e.tag.split("}")[-1])
def getroot():
global __xmlroot__
return __xmlroot__
def initialize(xmlPath):
global __xmlroot__, handlers, __xmlpath__, __sourceType__, __fileIdentifier__
__xmlpath__ = xmlPath
__xmlroot__ = etree.parse(xmlPath).getroot()
__sourceType__ = getAttribute(__xmlroot__,"language")
__fileIdentifier__ = getAttribute(__xmlroot__,"filename")
FILE(identifier = __fileIdentifier__)
# Initialize the tag handlers.
if __sourceType__ == "C++":
handlers = {getQualifiedTag("namespace"):componentCpp,
getQualifiedTag("class"):componentCpp}
elif __sourceType__ == "Java":
handlers = {getQualifiedTag("class"):classJava,
getQualifiedTag("package"):packageJava,}
else:
log(str_bad("Handlers for "+__sourceType__ + " not defined."))
handlers = {}
|
7,174 | test resample | #!/usr/bin/env python
# Copyright (C) 2006-2021 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
from essentia.streaming import EasyLoader, EqloudLoader
import sys
class TestEqloudLoader_Streaming(TestCase):
def load(self, inputSampleRate, outputSampleRate,
eqloudfilename, normalfilename,
downmix, replayGain, startTime, endTime):
eqloudloader = EqloudLoader(filename=normalfilename,
sampleRate = outputSampleRate,
downmix = downmix,
startTime = startTime,
endTime = endTime,
replayGain = replayGain)
easyloader = EasyLoader(filename=eqloudfilename,
sampleRate = outputSampleRate,
downmix = downmix,
startTime = startTime,
endTime = endTime,
replayGain = replayGain)
pool = Pool()
easyloader.audio >> (pool, 'expected')
run(easyloader)
eqloudloader.audio >> (pool, 'eqloud')
run(eqloudloader)
for val1, val2 in zip(pool['eqloud'][outputSampleRate:],
pool['expected'][outputSampleRate:]):
self.assertAlmostEqual(val1-val2, 0, 5e-3)
def testNoResample(self):
eqloud=join(testdata.audio_dir,'generated','doublesize','sin_30_seconds_eqloud.wav')
normal=join(testdata.audio_dir,'generated','doublesize','sin_30_seconds.wav')
self.load(44100, 44100, eqloud, normal, "left" , -6.0, 0., 30.);
self.load(44100, 44100, eqloud, normal, "left", -6.0, 3.35, 5.68);
self.load(44100, 44100, eqloud, normal, "left" , -6.0, 0.169, 8.333);
def METHOD_NAME(self):
eqloud=join(testdata.audio_dir,'generated','doublesize','sin_30_seconds_eqloud.wav')
normal=join(testdata.audio_dir,'generated','doublesize','sin_30_seconds.wav')
self.load(44100, 48000, eqloud, normal, "left", -6.0, 3.35, 5.68);
self.load(44100, 32000, eqloud, normal, "left", -6.0, 3.35, 5.68);
def testInvalidParam(self):
filename = join(testdata.audio_dir, 'generated','synthesised','impulse','resample',
'impulses_1samp_44100.wav')
self.assertConfigureFails(EqloudLoader(), {'filename':'unknown.wav'})
self.assertConfigureFails(EqloudLoader(), {'filename':filename, 'downmix' : 'stereo'})
self.assertConfigureFails(EqloudLoader(), {'filename':filename, 'sampleRate' : 0})
self.assertConfigureFails(EqloudLoader(), {'filename':filename, 'startTime' : -1})
self.assertConfigureFails(EqloudLoader(), {'filename':filename, 'endTime' : -1})
self.assertConfigureFails(EqloudLoader(), {'filename':filename, 'startTime':10, 'endTime' : 1})
def testResetStandard(self):
from essentia.standard import EqloudLoader as stdEqloudLoader
audiofile = join(testdata.audio_dir,'recorded','musicbox.wav')
loader = stdEqloudLoader(filename=audiofile, endTime=31)
audio1 = loader();
audio2 = loader();
loader.reset();
audio3 = loader();
self.assertAlmostEqualVector(audio3, audio1)
self.assertEqualVector(audio2, audio1)
def testLoadMultiple(self):
from essentia.standard import EqloudLoader as stdEqloudLoader
aiffpath = join('generated','synthesised','impulse','aiff')
filename = join(testdata.audio_dir,aiffpath,'impulses_1second_44100.aiff')
algo = stdEqloudLoader(filename=filename)
audio1 = algo()
audio2 = algo()
audio3 = algo()
self.assertEquals(len(audio1), 441000);
self.assertEquals(len(audio2), 441000);
self.assertEquals(len(audio3), 441000);
self.assertEqualVector(audio2, audio1)
self.assertEqualVector(audio2, audio3)
suite = allTests(TestEqloudLoader_Streaming)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite) |
7,175 | test write before nested map exit | # Copyright 2019-2023 ETH Zurich and the DaCe authors. All rights reserved.
import copy
import dace
import numpy as np
from dace.transformation.dataflow import RemoveIntermediateWrite
def test_write_before_map_exit():
sdfg = dace.SDFG('test_write_before_map_exit')
sdfg.add_array('A', (10, ), dace.int32)
sdfg.add_array('B', (10, ), dace.int32)
state = sdfg.add_state('state')
me, mx = state.add_map('map', dict(i='0:10'))
a_access = state.add_read('A')
b_access = state.add_write('B')
tasklet = state.add_tasklet('tasklet', {'__inp'}, {'__out'}, '__out = __inp')
state.add_memlet_path(a_access, me, tasklet, memlet=dace.Memlet(data='A', subset='i'), dst_conn='__inp')
state.add_edge(tasklet, '__out', b_access, None, dace.Memlet(data='B', subset='i'))
state.add_edge(b_access, None, mx, None, dace.Memlet())
A = np.arange(10, dtype=np.int32)
ref = A
before_val = np.empty((10, ), dtype=np.int32)
after_val = np.empty((10, ), dtype=np.int32)
sdfg_before = copy.deepcopy(sdfg)
sdfg_before(A=A, B=before_val)
assert np.allclose(before_val, ref)
sdfg.apply_transformations_repeated(RemoveIntermediateWrite)
sdfg(A=A, B=after_val)
assert np.allclose(after_val, ref)
def METHOD_NAME():
sdfg = dace.SDFG('test_write_before_nested_map_exit')
sdfg.add_array('A', (10, 10), dace.int32)
sdfg.add_array('B', (10, 10), dace.int32)
state = sdfg.add_state('state')
me0, mx0 = state.add_map('map', dict(i='0:10'))
me1, mx1 = state.add_map('map2', dict(j='0:10'))
a_access = state.add_read('A')
b_access = state.add_write('B')
tasklet = state.add_tasklet('tasklet', {'__inp'}, {'__out'}, '__out = __inp')
state.add_memlet_path(a_access, me0, me1, tasklet, memlet=dace.Memlet(data='A', subset='i, j'), dst_conn='__inp')
state.add_edge(tasklet, '__out', b_access, None, dace.Memlet(data='B', subset='i, j'))
state.add_nedge(b_access, mx1, dace.Memlet())
state.add_nedge(mx1, mx0, dace.Memlet())
A = np.arange(100, dtype=np.int32).reshape((10, 10)).copy()
ref = A
before_val = np.empty((10, 10), dtype=np.int32)
after_val = np.empty((10, 10), dtype=np.int32)
sdfg_before = copy.deepcopy(sdfg)
sdfg_before(A=A, B=before_val)
assert np.allclose(before_val, ref)
sdfg.apply_transformations_repeated(RemoveIntermediateWrite)
sdfg(A=A, B=after_val)
assert np.allclose(after_val, ref)
def test_write_before_nested_map_exit_2():
sdfg = dace.SDFG('test_write_before_nested_map_exit_2')
sdfg.add_array('A', (10, 10), dace.int32)
sdfg.add_array('B', (10, 10), dace.int32)
sdfg.add_array('C', (10, ), dace.int32, transient=True)
state = sdfg.add_state('state')
me0, mx0 = state.add_map('map', dict(i='0:10'))
me1, mx1 = state.add_map('map2', dict(j='0:10'))
a_access = state.add_read('A')
b_access = state.add_write('B')
c_access = state.add_write('C')
tasklet0 = state.add_tasklet('tasklet0', {'__inp'}, {'__out'}, '__out = __inp')
tasklet1 = state.add_tasklet('tasklet1', {'__inp'}, {'__out'}, '__out = __inp')
state.add_memlet_path(a_access, me0, me1, tasklet0, memlet=dace.Memlet(data='A', subset='i, j'), dst_conn='__inp')
state.add_memlet_path(a_access, me0, me1, tasklet1, memlet=dace.Memlet(data='A', subset='i, j'), dst_conn='__inp')
state.add_edge(tasklet0, '__out', b_access, None, dace.Memlet(data='B', subset='i, j'))
state.add_edge(tasklet1, '__out', c_access, None, dace.Memlet(data='C', subset='j'))
state.add_nedge(b_access, mx1, dace.Memlet())
state.add_nedge(c_access, mx1, dace.Memlet())
state.add_nedge(mx1, mx0, dace.Memlet())
A = np.arange(100, dtype=np.int32).reshape((10, 10)).copy()
ref = A
before_val = np.empty((10, 10), dtype=np.int32)
after_val = np.empty((10, 10), dtype=np.int32)
sdfg_before = copy.deepcopy(sdfg)
sdfg_before(A=A, B=before_val)
assert np.allclose(before_val, ref)
sdfg.apply_transformations_repeated(RemoveIntermediateWrite)
c_nodes = [n for n in state.data_nodes() if n.data == 'C']
assert len(c_nodes) == 1
assert len(state.edges_between(tasklet1, c_nodes[0])) == 0
assert len(state.edges_between(c_nodes[0], mx1)) == 0
assert len(state.edges_between(mx1, c_nodes[0])) == 1
assert len(state.edges_between(c_nodes[0], mx0)) == 1
assert len(state.edges_between(mx0, c_nodes[0])) == 0
sdfg(A=A, B=after_val)
assert np.allclose(after_val, ref)
if __name__ == '__main__':
test_write_before_map_exit()
METHOD_NAME()
test_write_before_nested_map_exit_2() |
7,176 | get geography | from core.mixins import UpdateElectionsTimestampedModel
from django.contrib.gis.db import models
from django.urls import reverse
from model_utils import Choices
from .mixins import DateConstraintMixin, DateDisplayMixin
class OrganisationManager(models.QuerySet):
def get_date_filter(self, date):
return models.Q(start_date__lte=date) & (
models.Q(end_date__gte=date) | models.Q(end_date=None)
)
def filter_by_date(self, date):
return self.filter(self.get_date_filter(date))
def get_by_date(self, organisation_type, official_identifier, date):
return self.get(
models.Q(organisation_type=organisation_type)
& models.Q(official_identifier=official_identifier)
& self.get_date_filter(date)
)
class Organisation(UpdateElectionsTimestampedModel, DateDisplayMixin):
"""
An organisation that can hold an election in the UK
"""
ORGTYPES = Choices(
("combined-authority", "combined-authority"),
("sp", "sp"),
("gla", "gla"),
("local-authority", "local-authority"),
("naw", "naw"),
("senedd", "senedd"),
("nia", "nia"),
("parl", "parl"),
("police-area", "police-area"),
("europarl", "europarl"),
)
official_identifier = models.CharField(
blank=False, max_length=255, db_index=True
)
organisation_type = models.CharField(
blank=False, max_length=255, choices=ORGTYPES, default="local-authority"
)
organisation_subtype = models.CharField(blank=True, max_length=255)
official_name = models.CharField(blank=True, max_length=255)
common_name = models.CharField(blank=True, max_length=255)
slug = models.CharField(blank=True, max_length=100)
territory_code = models.CharField(blank=True, max_length=10)
election_types = models.ManyToManyField(
"elections.ElectionType", through="elections.ElectedRole"
)
election_name = models.CharField(blank=True, max_length=255)
start_date = models.DateField(null=False)
end_date = models.DateField(blank=True, null=True)
legislation_url = models.CharField(blank=True, max_length=500, null=True)
ValidationError = ValueError
objects = OrganisationManager().as_manager()
def __str__(self):
return "{} ({})".format(self.name, self.active_period_text)
@property
def name(self):
return (
self.official_name or self.common_name or self.official_identifier
)
class Meta:
ordering = ("official_name", "-start_date")
get_latest_by = "start_date"
unique_together = (
("official_identifier", "organisation_type", "start_date"),
("official_identifier", "organisation_type", "end_date"),
)
"""
Note:
This model also has an additional constraint to prevent
overlapping start and end dates which is defined in
organisations/migrations/0034_end_date_constraint.py
"""
def get_url(self, view, ext=None):
args = (
self.organisation_type,
self.official_identifier,
self.start_date,
)
args = args + (ext,) if ext else args
return reverse(view, args=args)
def get_absolute_url(self):
return self.get_url("organisation_view")
def format_geography_link(self):
if len(self.geographies.all()) == 0:
return None
if not self.geographies.latest().gss:
return None
return "https://mapit.mysociety.org/area/{}".format(
self.geographies.latest().gss
)
def METHOD_NAME(self, date):
if len(self.geographies.all()) == 0:
return None
if len(self.geographies.all()) == 1:
return self.geographies.all()[0]
if date < self.start_date:
raise ValueError(
"date %s is before organisation start_date (%s)"
% (date.isoformat(), self.start_date.isoformat())
)
if self.end_date and date > self.end_date:
raise ValueError(
"date %s is after organisation end_date (%s)"
% (date.isoformat(), self.end_date.isoformat())
)
try:
return self.geographies.get(
(models.Q(start_date__lte=date) | models.Q(start_date=None))
& (models.Q(end_date__gte=date) | models.Q(end_date=None))
)
except OrganisationGeography.DoesNotExist:
return None
class OrganisationGeography(
DateConstraintMixin, DateDisplayMixin, models.Model
):
organisation = models.ForeignKey(
"Organisation", related_name="geographies", on_delete=models.CASCADE
)
start_date = models.DateField(blank=True, null=True)
end_date = models.DateField(blank=True, null=True)
gss = models.CharField(blank=True, max_length=20)
legislation_url = models.CharField(blank=True, max_length=500, null=True)
geography = models.MultiPolygonField(null=True)
source = models.CharField(blank=True, max_length=255)
def __str__(self):
if self.gss:
return self.gss
return "{name} ({dates})".format(
name=self.organisation.name, dates=self.active_period_text
)
def save(self, *args, **kwargs):
self.check_start_date()
self.check_end_date()
return super().save(*args, **kwargs)
class Meta:
verbose_name_plural = "Organisation Geographies"
ordering = ("-start_date",)
get_latest_by = "start_date"
unique_together = (
("organisation", "start_date"),
("organisation", "end_date"),
)
"""
Note:
This model also has an additional constraint to prevent
overlapping start and end dates (but allows both to be NULL).
This is defined in
organisations/migrations/0040_end_date_constraint.py
"""
class OrganisationGeographySubdivided(models.Model):
geography = models.PolygonField(db_index=True, spatial_index=True)
organisation_geography = models.ForeignKey(
OrganisationGeography,
on_delete=models.CASCADE,
related_name="subdivided",
)
POPULATE_SQL = """
TRUNCATE organisations_organisationgeographysubdivided;
INSERT INTO organisations_organisationgeographysubdivided (geography, organisation_geography_id)
SELECT st_subdivide(geography) as geography, id as organisation_geography_id
FROM organisations_organisationgeography;
""" |
7,177 | test input rotation cell with qubits | # Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import cirq
from cirq.interop.quirk.cells.testing import assert_url_to_circuit_returns
from cirq import quirk_url_to_circuit
def test_input_rotation_cells():
with pytest.raises(ValueError, match='classical constant'):
_ = quirk_url_to_circuit(
'https://algassert.com/quirk#circuit={"cols":[["Z^(A/2^n)",{"id":"setA","arg":3}]]}'
)
with pytest.raises(ValueError, match="Missing input 'a'"):
_ = quirk_url_to_circuit('https://algassert.com/quirk#circuit={"cols":[["X^(A/2^n)"]]}')
assert_url_to_circuit_returns(
'{"cols":[["Z^(A/2^n)","inputA2"]]}',
diagram="""
0: ───Z^(A/2^2)───
│
1: ───A0──────────
│
2: ───A1──────────
""",
unitary=np.diag([1, 1, 1, 1, 1j**0, 1j**0.5, 1j**1, 1j**1.5]),
)
assert_url_to_circuit_returns(
'{"cols":[["Z^(-A/2^n)","inputA1"]]}', unitary=np.diag([1, 1, 1, -1j])
)
assert_url_to_circuit_returns(
'{"cols":[["H"],["X^(A/2^n)","inputA2"],["H"]]}',
unitary=np.diag([1, 1, 1, 1, 1j**0, 1j**0.5, 1j**1, 1j**1.5]),
)
assert_url_to_circuit_returns(
'{"cols":[["H"],["X^(-A/2^n)","inputA2"],["H"]]}',
unitary=np.diag([1, 1, 1, 1, 1j**0, 1j**-0.5, 1j**-1, 1j**-1.5]),
)
assert_url_to_circuit_returns(
'{"cols":[["X^-½"],["Y^(A/2^n)","inputA2"],["X^½"]]}',
unitary=np.diag([1, 1, 1, 1, 1j**0, 1j**0.5, 1j**1, 1j**1.5]),
)
assert_url_to_circuit_returns(
'{"cols":[["X^-½"],["Y^(-A/2^n)","inputA2"],["X^½"]]}',
unitary=np.diag([1, 1, 1, 1, 1j**0, 1j**-0.5, 1j**-1, 1j**-1.5]),
)
assert_url_to_circuit_returns(
'{"cols":[["•","Z^(A/2^n)","inputA2"]]}',
diagram="""
0: ───@^(A/2^2)───
│
1: ───@───────────
│
2: ───A0──────────
│
3: ───A1──────────
""",
unitary=np.diag([1 + 0j] * 13 + [1j**0.5, 1j, 1j**1.5]),
)
assert_url_to_circuit_returns(
'{"cols":[["X^(-A/2^n)","inputA2"]]}',
diagram="""
0: ───X^(-A/2^2)───
│
1: ───A0───────────
│
2: ───A1───────────
""",
)
assert_url_to_circuit_returns(
'{"cols":[["•","X^(-A/2^n)","inputA2"]]}',
diagram="""
0: ───@────────────
│
1: ───X^(-A/2^2)───
│
2: ───A0───────────
│
3: ───A1───────────
""",
)
assert_url_to_circuit_returns(
'{"cols":[["Z^(A/2^n)","inputA1","inputB1"],[1,1,"Z"],[1,1,"Z"]]}',
unitary=np.diag([1, 1, 1, 1, 1, 1, 1j, 1j]),
)
def test_input_rotation_cells_repr():
circuit = quirk_url_to_circuit(
'http://algassert.com/quirk#circuit={"cols":[["•","X^(-A/2^n)","inputA2"]]}'
)
op = circuit[0].operations[0]
cirq.testing.assert_equivalent_repr(op)
def test_validation():
with pytest.raises(ValueError, match='sign'):
_ = cirq.interop.quirk.QuirkInputRotationOperation(
identifier='xxx',
register=cirq.LineQubit.range(4),
base_operation=cirq.X.on(cirq.LineQubit(5)),
exponent_sign=2,
)
def test_input_rotation_with_qubits():
a, b, c, d, e = cirq.LineQubit.range(5)
x, y, z, t, w = cirq.LineQubit.range(10, 15)
op = cirq.interop.quirk.QuirkInputRotationOperation(
identifier='test',
register=[a, b, c],
base_operation=cirq.X(d).controlled_by(e),
exponent_sign=-1,
)
assert op.qubits == (e, d, a, b, c)
assert op.with_qubits(x, y, z, t, w) == (
cirq.interop.quirk.QuirkInputRotationOperation(
identifier='test',
register=[z, t, w],
base_operation=cirq.X(y).controlled_by(x),
exponent_sign=-1,
)
)
def METHOD_NAME():
a, b, c, d, e = cirq.LineQubit.range(5)
x, y, z, t, w = cirq.LineQubit.range(10, 15)
cell = cirq.interop.quirk.cells.input_rotation_cells.InputRotationCell(
identifier='test',
register=[a, b, c],
base_operation=cirq.X(d).controlled_by(e),
exponent_sign=-1,
)
assert cell.with_line_qubits_mapped_to([x, y, z, t, w]) == (
cirq.interop.quirk.cells.input_rotation_cells.InputRotationCell(
identifier='test',
register=[x, y, z],
base_operation=cirq.X(t).controlled_by(w),
exponent_sign=-1,
)
)
def test_input_rotation_cell_with_qubits_before_register_specified():
d, e = cirq.LineQubit.range(3, 5)
x, y, z, t, w = cirq.LineQubit.range(10, 15)
cell = cirq.interop.quirk.cells.input_rotation_cells.InputRotationCell(
identifier='test',
register=None,
base_operation=cirq.X(d).controlled_by(e),
exponent_sign=-1,
)
assert cell.with_line_qubits_mapped_to([x, y, z, t, w]) == (
cirq.interop.quirk.cells.input_rotation_cells.InputRotationCell(
identifier='test',
register=None,
base_operation=cirq.X(t).controlled_by(w),
exponent_sign=-1,
)
)
def test_repr():
a, b, c, d, e = cirq.LineQubit.range(5)
cirq.testing.assert_equivalent_repr(
cirq.interop.quirk.cells.input_rotation_cells.InputRotationCell(
identifier='test',
register=[a, b, c],
base_operation=cirq.X(d).controlled_by(e),
exponent_sign=-1,
)
) |
7,178 | test set file type failure 2 | import os
from django.core.files.uploadedfile import UploadedFile
from rest_framework import status
from hs_core.hydroshare import resource
from hs_core.hydroshare.utils import resource_file_add_process
from hs_core.views.utils import create_folder, move_or_rename_file_or_folder
from .base import HSRESTTestCase
class TestSetFileTypeEndPoint(HSRESTTestCase):
def setUp(self):
super(TestSetFileTypeEndPoint, self).setUp()
self.raster_file_name = 'cea.tif'
self.raster_file_path = 'hs_core/tests/data/cea.tif'
self.rtype = 'CompositeResource'
self.title = 'My Test resource'
self.resource = resource.create_resource(self.rtype,
self.user,
self.title)
self.resources_to_delete.append(self.resource.short_id)
def test_set_file_type_success_1(self):
# here we will set the tif file to GeoRaster file type
# resource should have no file at this point
self.assertEqual(self.resource.files.count(), 0)
# add the tif file to the composite resource
tif_file_obj = open(self.raster_file_path, "rb")
uploaded_file = UploadedFile(file=tif_file_obj,
name=os.path.basename(tif_file_obj.name))
resource_file_add_process(resource=self.resource, files=(uploaded_file,), user=self.user,
auto_aggregate=False)
# resource should have one file at this point
self.assertEqual(self.resource.files.count(), 1)
res_file = self.resource.files.all().first()
self.assertEqual(res_file.file_name, self.raster_file_name)
# test the set file type endpoint
url_template = "/hsapi/resource/{res_id}/functions/set-file-type/{file_path}/{file_type}/"
set_file_type_url = url_template.format(res_id=self.resource.short_id,
file_path=self.raster_file_name,
file_type="GeoRaster")
response = self.client.post(set_file_type_url, {}, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_set_file_type_success_2(self):
# here we will set the tif file (the file being not in root dir)to GeoRaster file type
# resource should have no file at this point
self.assertEqual(self.resource.files.count(), 0)
# add the tif file to the composite resource
tif_file_obj = open(self.raster_file_path, "rb")
uploaded_file = UploadedFile(file=tif_file_obj,
name=os.path.basename(tif_file_obj.name))
resource_file_add_process(resource=self.resource, files=(uploaded_file,), user=self.user,
auto_aggregate=False)
# resource should have one file at this point
self.assertEqual(self.resource.files.count(), 1)
res_file = self.resource.files.all().first()
self.assertEqual(res_file.file_name, self.raster_file_name)
create_folder(self.resource.short_id, 'data/contents/sub_test_dir')
# move the first two files in file_name_list to the new folder
move_or_rename_file_or_folder(self.user, self.resource.short_id,
'data/contents/' + self.raster_file_name,
'data/contents/sub_test_dir/' + self.raster_file_name)
res_file = self.resource.files.all().first()
self.assertEqual(res_file.short_path, "sub_test_dir/" + self.raster_file_name)
# test the set file type endpoint
url_template = "/hsapi/resource/{res_id}/functions/set-file-type/{file_path}/{file_type}/"
set_file_type_url = url_template.format(res_id=self.resource.short_id,
file_path=res_file.short_path,
file_type="GeoRaster")
response = self.client.post(set_file_type_url, {}, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_set_file_type_failure_1(self):
# here we will set the tif file to NetCDF file type which should fail
# resource should have no file at this point
self.assertEqual(self.resource.files.count(), 0)
# add the tif file to the composite resource
tif_file_obj = open(self.raster_file_path, "rb")
uploaded_file = UploadedFile(file=tif_file_obj,
name=os.path.basename(tif_file_obj.name))
resource_file_add_process(resource=self.resource, files=(uploaded_file,), user=self.user,
auto_aggregate=False)
# resource should have one file at this point
self.assertEqual(self.resource.files.count(), 1)
res_file = self.resource.files.all().first()
self.assertEqual(res_file.file_name, self.raster_file_name)
# test the set file type endpoint using a wrong file type (NetCDF)
url_template = "/hsapi/resource/{res_id}/functions/set-file-type/{file_path}/{file_type}/"
set_file_type_url = url_template.format(res_id=self.resource.short_id,
file_path=self.raster_file_name,
file_type="NetCDF")
response = self.client.post(set_file_type_url, {}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def METHOD_NAME(self):
# here we will set the tif file to GeoRaster file type with an invalid file path
# which should fail
# resource should have no file at this point
self.assertEqual(self.resource.files.count(), 0)
# add the tif file to the composite resource
tif_file_obj = open(self.raster_file_path, "rb")
uploaded_file = UploadedFile(file=tif_file_obj,
name=os.path.basename(tif_file_obj.name))
resource_file_add_process(resource=self.resource, files=(uploaded_file,), user=self.user,
auto_aggregate=False)
# resource should have one file at this point
self.assertEqual(self.resource.files.count(), 1)
res_file = self.resource.files.all().first()
self.assertEqual(res_file.file_name, self.raster_file_name)
# test the set file type endpoint using a wrong file path
url_template = "/hsapi/resource/{res_id}/functions/set-file-type/{file_path}/{file_type}/"
file_path = os.path.join("no-such-folder", self.raster_file_name)
set_file_type_url = url_template.format(res_id=self.resource.short_id,
file_path=file_path,
file_type="GeoRaster")
response = self.client.post(set_file_type_url, {}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) |
7,179 | set status to queued | """This type stub file was generated by pyright."""
from __future__ import annotations
from concurrent import futures
from typing import (
TYPE_CHECKING,
Any,
Callable,
ClassVar,
Dict,
List,
NamedTuple,
Optional,
Set,
Type,
Union,
)
from typing_extensions import Literal
if TYPE_CHECKING:
from logging import Logger
from traceback import TracebackException
from .tasks import Task
from .utils import CallArgs
logger: Logger = ...
class TaskTag(NamedTuple):
name: str
class BaseTransferFuture:
@property
def meta(self) -> BaseTransferMeta: ...
def done(self) -> bool: ...
def result(self) -> Any: ...
def cancel(self) -> None: ...
class BaseTransferMeta:
@property
def call_args(self) -> CallArgs: ...
@property
def transfer_id(self) -> Optional[str]: ...
@property
def user_context(self) -> Dict[Any, Any]: ...
class TransferFuture(BaseTransferFuture):
def __init__(
self,
meta: Optional[TransferMeta] = ...,
coordinator: Optional[TransferCoordinator] = ...,
) -> None: ...
@property
def meta(self) -> TransferMeta: ...
def done(self) -> bool: ...
def result(self) -> Any: ...
def cancel(self) -> None: ...
def set_exception(self, exception: Exception) -> None: ...
class TransferMeta(BaseTransferMeta):
"""Holds metadata about the TransferFuture"""
def __init__(
self, call_args: Optional[CallArgs] = ..., transfer_id: Optional[str] = ...
) -> None: ...
@property
def call_args(self) -> CallArgs: ...
@property
def transfer_id(self) -> Optional[str]: ...
@property
def size(self) -> Optional[int]: ...
@property
def user_context(self) -> Dict[Any, Any]: ...
def provide_transfer_size(self, size: int) -> None: ...
class TransferCoordinator:
"""A helper class for managing TransferFuture"""
def __init__(self, transfer_id: Optional[str] = ...) -> None: ...
def __repr__(self) -> str: ...
@property
def exception(self) -> Optional[Exception]: ...
@property
def associated_futures(self) -> Set[futures.Future[Any]]: ...
@property
def failure_cleanups(self) -> List[Callable[..., Any]]: ...
@property
def status(
self,
) -> Literal[
"not-started", "queued", "running", "cancelled", "failed", "success"
]: ...
def set_result(self, result: Any) -> None: ...
def set_exception(self, exception: Exception, override: bool = ...) -> None: ...
def result(self) -> Any: ...
def cancel(self, msg: str = ..., exc_type: Exception = ...) -> None: ...
def METHOD_NAME(self) -> None: ...
def set_status_to_running(self) -> None: ...
def submit(
self, executor: BoundedExecutor, task: Task, tag: Optional[TaskTag] = ...
) -> futures.Future: ...
def done(self) -> bool: ...
def add_associated_future(self, future: futures.Future[Any]) -> None: ...
def remove_associated_future(self, future: futures.Future[Any]) -> None: ...
def add_done_callback(
self, function: Callable[..., Any], *args: Any, **kwargs: Any
) -> None: ...
def add_failure_cleanup(
self, function: Callable[..., Any], *args: Any, **kwargs: Any
) -> None: ...
def announce_done(self) -> None: ...
class BoundedExecutor:
EXECUTOR_CLS: ClassVar[futures.ThreadPoolExecutor] = ...
def __init__(
self,
max_size: int,
max_num_threads: int,
tag_semaphores: Dict[str, Any] = ...,
executor_cls: Type[Union[BaseExecutor, futures.ThreadPoolExecutor]] = ...,
) -> None: ...
def submit(
self, task: Task, tag: Optional[TaskTag] = ..., block: bool = ...
) -> ExecutorFuture: ...
def shutdown(self, wait: bool = ...) -> None: ...
class ExecutorFuture:
def __init__(self, future: futures.Future[Any]) -> None: ...
def result(self) -> Any: ...
def add_done_callback(self, fn: Callable[..., Any]) -> None: ...
def done(self) -> bool: ...
class BaseExecutor:
def __init__(self, max_workers: Optional[int] = ...) -> None: ...
def submit(
self, fn: Callable[..., Any], *args: Any, **kwargs: Any
) -> NonThreadedExecutorFuture: ...
def shutdown(self, wait: bool = ...) -> None: ...
class NonThreadedExecutor(BaseExecutor):
def submit(
self, fn: Callable[..., Any], *args: Any, **kwargs: Any
) -> NonThreadedExecutorFuture: ...
def shutdown(self, wait: bool = ...) -> None: ...
class NonThreadedExecutorFuture:
def __init__(self) -> None: ...
def set_result(self, result: Any) -> None: ...
def set_exception_info(
self, exception: Exception, traceback: TracebackException
) -> None: ...
def result(self, timeout: Optional[int] = ...) -> Any: ...
def done(self) -> bool: ...
def add_done_callback(self, fn: Callable[..., Any]) -> None: ...
IN_MEMORY_UPLOAD_TAG: TaskTag = ...
IN_MEMORY_DOWNLOAD_TAG: TaskTag = ... |
7,180 | test retrieve pipeline | # Copyright 2018 Google Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from absl.testing import absltest
from common import crmint_logging
from controller import models
from tests import controller_utils
class TestPipelineViews(controller_utils.ControllerAppTest):
def test_empty_list(self):
response = self.client.get('/api/pipelines')
self.assertEqual(response.status_code, 200)
def test_list_with_one_pipeline(self):
"""Ensures that the blueprint registration works with multiple tests."""
models.Pipeline.create()
response = self.client.get('/api/pipelines')
self.assertEqual(response.status_code, 200)
def test_put_pipeline(self):
pipeline = models.Pipeline.create()
models.Job.create(pipeline_id=pipeline.id)
models.Schedule.create(pipeline_id=pipeline.id)
request = {
'name': 'Scenario 1 - Training',
'run_on_schedule': 'False',
'schedules': [{'id': 1, 'pipeline_id': 1, 'cron': '0 0 13 6,9,12,3 *'}],
'params': []
}
response = self.client.put('/api/pipelines/1', json=request)
self.assertEqual(response.status_code, 200)
def test_put_active_pipeline(self):
pipeline = models.Pipeline.create()
pipeline.status = models.Pipeline.STATUS.RUNNING
pipeline.save()
models.Job.create(pipeline_id=pipeline.id)
response = self.client.put('/api/pipelines/1')
self.assertEqual(response.status_code, 422)
def test_put_pipeline_invalid_schedule(self):
pipeline = models.Pipeline.create()
models.Job.create(pipeline_id=pipeline.id)
models.Schedule.create(pipeline_id=pipeline.id)
request = {
'name': 'Scenario 1 - Training',
'run_on_schedule': 'False',
'schedules': [
{'id': 1, 'pipeline_id': 1, 'cron': '0 0 13 6,9,12,15 *'}],
'params': []
}
response = self.client.put('/api/pipelines/1', json=request)
self.assertEqual(response.status_code, 400)
def test_missing_pipeline(self):
response = self.client.get('/api/pipelines/1')
self.assertEqual(response.status_code, 404)
def METHOD_NAME(self):
pipeline = models.Pipeline.create()
models.Job.create(pipeline_id=pipeline.id)
response = self.client.get('/api/pipelines/1')
self.assertEqual(response.status_code, 200)
def test_start_pipeline(self):
pipeline = models.Pipeline.create()
models.Job.create(pipeline_id=pipeline.id)
response = self.client.post('/api/pipelines/1/start')
self.assertEqual(response.status_code, 200)
self.assertEqual(pipeline.status, models.Job.STATUS.RUNNING)
def test_stop_pipeline(self):
pipeline = models.Pipeline.create(status=models.Pipeline.STATUS.RUNNING)
models.Job.create(pipeline_id=pipeline.id, status=models.Job.STATUS.RUNNING)
response = self.client.post('/api/pipelines/1/stop')
self.assertEqual(response.status_code, 200)
self.assertEqual(pipeline.status, models.Job.STATUS.STOPPING)
def test_export_pipeline(self):
pipeline = models.Pipeline.create(name='My Pipeline')
models.Job.create(pipeline_id=pipeline.id)
response = self.client.get('/api/pipelines/1/export')
self.assertEqual(response.status_code, 200)
def test_enable_run_on_schedule(self):
pipeline = models.Pipeline.create()
response = self.client.patch(
'/api/pipelines/1/run_on_schedule?run_on_schedule=True')
self.assertEqual(response.status_code, 200)
self.assertTrue(pipeline.run_on_schedule)
def test_disable_run_on_schedule(self):
pipeline = models.Pipeline.create()
response = self.client.patch(
'/api/pipelines/1/run_on_schedule?run_on_schedule=False')
self.assertEqual(response.status_code, 200)
self.assertFalse(pipeline.run_on_schedule)
def test_retrieve_logs(self):
self.enter_context(
mock.patch.object(crmint_logging, 'get_logger', autospec=True))
models.Pipeline.create()
response = self.client.get('/api/pipelines/1/logs')
self.assertEqual(response.status_code, 200)
if __name__ == '__main__':
absltest.main() |
7,181 | email new user | import datetime
from newsblur_web.celeryapp import app
from apps.profile.models import Profile, RNewUserQueue
from utils import log as logging
from apps.reader.models import UserSubscription, UserSubscriptionFolders
from apps.social.models import MSocialServices, MActivity, MInteraction
@app.task(name="email-new-user")
def METHOD_NAME(user_id):
user_profile = Profile.objects.get(user__pk=user_id)
user_profile.send_new_user_email()
@app.task(name="email-new-premium")
def EmailNewPremium(user_id):
user_profile = Profile.objects.get(user__pk=user_id)
user_profile.send_new_premium_email()
@app.task()
def FetchArchiveFeedsForUser(user_id):
# subs = UserSubscription.objects.filter(user=user_id)
# user_profile = Profile.objects.get(user__pk=user_id)
# logging.user(user_profile.user, f"~FCBeginning archive feed fetches for ~SB~FG{subs.count()} feeds~SN...")
UserSubscription.fetch_archive_feeds_for_user(user_id)
@app.task()
def FetchArchiveFeedsChunk(user_id, feed_ids):
# logging.debug(" ---> Fetching archive stories: %s for %s" % (feed_ids, user_id))
UserSubscription.fetch_archive_feeds_chunk(user_id, feed_ids)
@app.task()
def FinishFetchArchiveFeeds(results, user_id, start_time, starting_story_count):
# logging.debug(" ---> Fetching archive stories finished for %s" % (user_id))
ending_story_count, pre_archive_count = UserSubscription.finish_fetch_archive_feeds(user_id, start_time, starting_story_count)
user_profile = Profile.objects.get(user__pk=user_id)
user_profile.send_new_premium_archive_email(ending_story_count, pre_archive_count)
@app.task(name="email-new-premium-pro")
def EmailNewPremiumPro(user_id):
user_profile = Profile.objects.get(user__pk=user_id)
user_profile.send_new_premium_pro_email()
@app.task(name="premium-expire")
def PremiumExpire(**kwargs):
# Get expired but grace period users
two_days_ago = datetime.datetime.now() - datetime.timedelta(days=2)
thirty_days_ago = datetime.datetime.now() - datetime.timedelta(days=30)
expired_profiles = Profile.objects.filter(is_premium=True,
premium_expire__lte=two_days_ago,
premium_expire__gt=thirty_days_ago)
logging.debug(" ---> %s users have expired premiums, emailing grace..." % expired_profiles.count())
for profile in expired_profiles:
if profile.grace_period_email_sent():
continue
profile.setup_premium_history()
if profile.premium_expire < two_days_ago:
profile.send_premium_expire_grace_period_email()
# Get fully expired users
expired_profiles = Profile.objects.filter(is_premium=True,
premium_expire__lte=thirty_days_ago)
logging.debug(" ---> %s users have expired premiums, deactivating and emailing..." % expired_profiles.count())
for profile in expired_profiles:
profile.setup_premium_history()
if profile.premium_expire < thirty_days_ago:
profile.send_premium_expire_email()
profile.deactivate_premium()
@app.task(name="activate-next-new-user")
def ActivateNextNewUser():
RNewUserQueue.activate_next()
@app.task(name="cleanup-user")
def CleanupUser(user_id):
UserSubscription.trim_user_read_stories(user_id)
UserSubscription.verify_feeds_scheduled(user_id)
Profile.count_all_feed_subscribers_for_user(user_id)
MInteraction.trim(user_id)
MActivity.trim(user_id)
UserSubscriptionFolders.add_missing_feeds_for_user(user_id)
UserSubscriptionFolders.compact_for_user(user_id)
UserSubscription.refresh_stale_feeds(user_id)
try:
ss = MSocialServices.objects.get(user_id=user_id)
except MSocialServices.DoesNotExist:
logging.debug(" ---> ~FRCleaning up user, can't find social_services for user_id: ~SB%s" % user_id)
return
ss.sync_twitter_photo()
@app.task(name="clean-spam")
def CleanSpam():
logging.debug(" ---> Finding spammers...")
Profile.clear_dead_spammers(confirm=True)
@app.task(name="reimport-stripe-history")
def ReimportStripeHistory():
logging.debug(" ---> Reimporting Stripe history...")
Profile.reimport_stripe_history(limit=10, days=1)
|
7,182 | test localization loss | import numpy as np
import pytest
from paz.optimization.losses.multi_box_loss import MultiBoxLoss
@pytest.fixture
def loss():
return MultiBoxLoss()
@pytest.fixture
def y_true():
y_true = np.array(
[[38.38629, 48.666668, 10.362101, 11.512976, 0., 1.,
0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.,
0.],
[27.143208, 34.41253, 8.629259, 9.7801285, 1., 0.,
0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.,
0.],
[27.143208, 68.82506, 8.629259, 13.245829, 1., 0.,
0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.,
0.]], dtype='float32')
return np.expand_dims(y_true, axis=0)
@pytest.fixture
def y_pred():
y_pred = np.array(
[[36.99653894, 46.4176432, 10.35266677, 10.1656072, 0.05621409,
0.98060555, 0.01017545, 0.03181472, 0.02227341, 0.00503445,
0.00746015, 0.15980312, 0.10174269, 0.01672697, 0.0111077,
0.02144868, 0.07346129, 0.03899017, 0.01071656, 0.03946776,
0.0830264, 0.06763985, 0.04077367, 0.07804006, 0.04347721],
[26.614379, 32.0909085, 4.2000501, 7.0869583, 0.0423508, 0.91125538,
0.04441671, 0.03053759, 0.07411292, 0.03454058, 0.04849431,
0.0592223, 0.0134144, 0.09800261, 0.0433236, 0.04486571,
0.01135817, 0.08123691, 0.02096761, 0.03070671, 0.04680151,
0.12522466, 0.06783583, 0.05873021, 0.01260151],
[2.16936564, 4.4787911, 6.314962, 4.42737758, 0.83406942,
0.04166197, 0.01605819, 0.04750001, 0.01329675, 0.0126452,
0.02085183, 0.0172693, 0.03088947, 0.02661936, 0.01231482,
0.04099588, 0.02453831, 0.07038483, 0.06579002, 0.13424149,
0.04614118, 0.03297557, 0.1374058, 0.15315633, 0.02119431]],
dtype='float32')
return np.expand_dims(y_pred, axis=0)
@pytest.fixture
def target_multibox_loss():
return 6.8489789962768555
@pytest.fixture
def target_smooth_l1_loss():
return np.array([[3.5220284, 8.989227, 98.507996]], dtype='float32')
@pytest.fixture
def target_cross_entropy_loss():
return np.array([[0.019584997, 3.161768, 0.18143862]], dtype='float32')
@pytest.fixture
def target_localization_loss():
return np.array(3.4861877, dtype='float32')
@pytest.fixture
def target_positive_classification_loss():
return np.array(0.019584997, dtype='float32')
@pytest.fixture
def target_negative_classification_loss():
return np.array(3.3432066, dtype='float32')
def test_multiboxloss(y_true, y_pred, loss, target_multibox_loss):
total_loss = loss.compute_loss(y_true, y_pred)
assert (float(total_loss) == target_multibox_loss)
def test_smooth_l1_loss(y_true, y_pred, loss, target_smooth_l1_loss):
smooth_l1_loss = loss._smooth_l1(y_true, y_pred)
smooth_l1_loss = np.asarray(smooth_l1_loss, dtype='float32')
print(smooth_l1_loss - target_smooth_l1_loss)
assert np.allclose(smooth_l1_loss, target_smooth_l1_loss)
def test_cross_entropy_loss(y_true, y_pred, loss, target_cross_entropy_loss):
cross_entropy_loss = loss._cross_entropy(y_true, y_pred)
cross_entropy_loss = np.asarray(cross_entropy_loss, dtype='float32')
assert np.allclose(cross_entropy_loss, target_cross_entropy_loss)
def METHOD_NAME(y_true, y_pred, loss, target_localization_loss):
localization_loss = loss.localization(y_true, y_pred)
localization_loss = np.asarray(localization_loss, dtype='float32')
assert np.allclose(localization_loss, target_localization_loss)
def test_positive_classification_loss(y_true, y_pred, loss,
target_positive_classification_loss):
positive_classification_loss = loss.positive_classification(y_true, y_pred)
positive_classification_loss = np.asarray(
positive_classification_loss, dtype='float32')
assert np.allclose(
positive_classification_loss, target_positive_classification_loss)
def test_negative_classification_loss(y_true, y_pred, loss,
target_negative_classification_loss):
negative_classification_loss = loss.negative_classification(y_true, y_pred)
negative_classification_loss = np.asarray(
negative_classification_loss, dtype='float32')
assert np.allclose(
negative_classification_loss, target_negative_classification_loss) |
7,183 | compute shape from block spec | # Copyright 2023 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for pallas-core functionality."""
from __future__ import annotations
from collections.abc import Sequence
import contextlib
import dataclasses
import functools
from typing import Any, Callable, Iterator
from jax._src import core as jax_core
from jax._src import linear_util as lu
from jax._src import state
from jax._src import tree_util
from jax._src import util
from jax._src.interpreters import partial_eval as pe
from jax._src.state import discharge as state_discharge
import jax.numpy as jnp
# TODO(sharadmv): enable type checking
# mypy: ignore-errors
partial = functools.partial
Grid = tuple[int, ...]
split_list = util.split_list
map, unsafe_map = util.safe_map, map
zip, unsafe_zip = util.safe_zip, zip
@dataclasses.dataclass
class GridEnv:
axis_index: Any
axis_size: int
_grid_env_stack: list[tuple[GridEnv, ...]] = []
@contextlib.contextmanager
def grid_env(env: tuple[tuple[Any, int], ...]) -> Iterator[None]:
_grid_env_stack.append(tuple(GridEnv(axis_index, axis_size)
for axis_index, axis_size in env))
try:
yield
finally:
_grid_env_stack.pop()
def current_grid_env() -> tuple[GridEnv, ...] | None:
if not _grid_env_stack:
return None
return _grid_env_stack[-1]
class Mapped:
pass
mapped = Mapped()
@dataclasses.dataclass(frozen=True)
class BlockSpec:
index_map: Callable[..., Any]
block_shape: tuple[int | None, ...]
def compute_index(self, *args):
out = self.index_map(*args)
if not isinstance(out, tuple):
out = (out,)
return out
@dataclasses.dataclass(frozen=True)
class BlockMapping:
block_shape: tuple[Mapped | int, ...]
index_map_jaxpr: jax_core.ClosedJaxpr
def compute_start_indices(self, loop_idx, *args):
discharged_jaxpr, discharged_consts = state_discharge.discharge_state(
self.index_map_jaxpr.jaxpr, self.index_map_jaxpr.consts
)
jaxpr = jax_core.ClosedJaxpr(discharged_jaxpr, discharged_consts)
block_indices_and_rest = jax_core.jaxpr_as_fun(jaxpr)(*loop_idx, *args)
# Since we're passing in `Ref`s potentially, we need to split out their
# updated values since we only care about the return values.
block_indices, _ = split_list(block_indices_and_rest,
[len(self.block_shape)])
return tuple(i if b is mapped else b * i
for b, i in zip(self.block_shape, block_indices))
replace = dataclasses.replace
@dataclasses.dataclass(frozen=True)
class GridMapping:
grid: tuple[int, ...]
block_mappings: tuple[BlockMapping | None, ...]
mapped_dims: tuple[int, ...]
num_index_operands: int
replace = dataclasses.replace
def _preprocess_grid(grid: Grid | int | None) -> Grid:
if grid is None:
return ()
if isinstance(grid, int):
return (grid,)
return grid
def _convert_block_spec_to_block_mapping(
in_avals: list[jax_core.ShapedArray], block_spec: BlockSpec | None,
) -> BlockSpec | None:
if block_spec is _no_block_spec:
return None
block_shape = tuple(
mapped if s is None else s for s in block_spec.block_shape)
jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(
lu.wrap_init(block_spec.compute_index), in_avals)
return BlockMapping(block_shape, jax_core.ClosedJaxpr(jaxpr, consts))
def METHOD_NAME(block_spec: BlockSpec | None,
arg_shape: tuple[int, ...]
) -> tuple[int, ...]:
if block_spec is _no_block_spec:
return arg_shape
return tuple(s for s in block_spec.block_shape if s is not None)
def _get_ref_avals(grid, in_avals, in_specs, out_avals, out_specs):
if grid is None:
in_specs = [None] * len(in_avals)
out_specs = [None] * len(out_avals)
in_ref_avals = [state.shaped_array_ref(arg.shape, arg.dtype)
for arg in in_avals]
out_ref_avals = [state.shaped_array_ref(arg.shape, arg.dtype)
for arg in out_avals]
else:
in_ref_avals = [
state.shaped_array_ref(
METHOD_NAME(
block_spec, arg.shape), arg.dtype)
for block_spec, arg in zip(in_specs, in_avals)]
out_ref_avals = [
state.shaped_array_ref(
METHOD_NAME(
block_spec, arg.shape), arg.dtype)
for block_spec, arg in zip(out_specs, out_avals)]
return in_specs, in_ref_avals, out_specs, out_ref_avals
_no_block_spec = object()
@dataclasses.dataclass(init=False)
class GridSpec:
grid: Grid
in_specs: Sequence[BlockSpec | None] | None
out_specs: tuple[BlockSpec | None, ...] | None
def __init__(
self,
grid: Grid | None = None,
in_specs: Sequence[BlockSpec | None] | None = None,
out_specs: BlockSpec | Sequence[BlockSpec | None] | None = None,
):
if grid is None:
if in_specs is not None:
raise ValueError("Cannot specify `in_specs` with a `None` grid.")
if out_specs is not None:
raise ValueError("Cannot specify `out_specs` with a `None` grid.")
self.grid = _preprocess_grid(grid)
self.in_specs = in_specs
if out_specs is not None and not isinstance(out_specs, (tuple, list)):
out_specs = (out_specs,)
if out_specs is not None and not isinstance(out_specs, tuple):
out_specs = tuple(out_specs)
self.out_specs = out_specs
def get_grid_mapping(
self, in_avals, in_tree, out_avals, out_tree
) -> tuple[tuple[jax_core.AbstractValue, ...], GridMapping]:
if self.in_specs is not None:
in_specs = self.in_specs
in_spec_tree = tree_util.tree_structure(tuple(in_specs))
if in_spec_tree != in_tree:
raise ValueError(
"Pytree specs for arguments and `in_specs` must match: "
f"{in_tree} vs. {in_spec_tree}")
else:
in_specs = [_no_block_spec] * len(in_avals)
if self.out_specs is not None:
out_specs = self.out_specs
out_spec_tree = tree_util.tree_structure(out_specs)
if out_spec_tree != out_tree:
raise ValueError(
"Pytree specs for `out_shape` and `out_specs` must match: "
f"{out_tree} vs. {out_spec_tree}")
else:
out_specs = [_no_block_spec] * len(out_avals)
flat_in_specs = tree_util.tree_leaves(in_specs)
flat_out_specs = tree_util.tree_leaves(out_specs)
in_specs, in_ref_avals, out_specs, out_ref_avals = _get_ref_avals(
self.grid, in_avals, flat_in_specs, out_avals,
flat_out_specs)
grid_avals = [jax_core.ShapedArray((), jnp.dtype("int32"))] * len(self.grid)
in_block_mappings = map(
partial(_convert_block_spec_to_block_mapping, grid_avals), in_specs)
out_block_mappings = map(
partial(_convert_block_spec_to_block_mapping, grid_avals), out_specs)
grid_mapping = GridMapping(
self.grid, (*in_block_mappings, *out_block_mappings), (),
num_index_operands=0)
jaxpr_in_avals = tree_util.tree_unflatten(in_tree, in_ref_avals)
jaxpr_out_avals = tree_util.tree_unflatten(out_tree, out_ref_avals)
return (*jaxpr_in_avals, *jaxpr_out_avals), grid_mapping |
7,184 | rgbd odometry | # ----------------------------------------------------------------------------
# - Open3D: www.open3d.org -
# ----------------------------------------------------------------------------
# Copyright (c) 2018-2023 www.open3d.org
# SPDX-License-Identifier: MIT
# ----------------------------------------------------------------------------
from tqdm import tqdm
import numpy as np
import open3d as o3d
import open3d.core as o3c
from config import ConfigParser
from common import load_rgbd_file_names, load_depth_file_names, save_poses, load_intrinsic, load_extrinsics, get_default_dataset
def read_legacy_rgbd_image(color_file, depth_file, convert_rgb_to_intensity):
color = o3d.io.read_image(color_file)
depth = o3d.io.read_image(depth_file)
rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(
color,
depth,
depth_scale=1000.0,
depth_trunc=3.0,
convert_rgb_to_intensity=convert_rgb_to_intensity)
return rgbd_image
def rgbd_loop_closure(depth_list, color_list, intrinsic, config):
# TODO: load it from config
device = o3c.Device('CUDA:0')
interval = config.odometry_loop_interval
n_files = len(depth_list)
key_indices = list(range(0, n_files, interval))
n_key_indices = len(key_indices)
edges = []
poses = []
infos = []
pairs = []
criteria_list = [
o3d.t.pipelines.odometry.OdometryConvergenceCriteria(20),
o3d.t.pipelines.odometry.OdometryConvergenceCriteria(10),
o3d.t.pipelines.odometry.OdometryConvergenceCriteria(5)
]
method = o3d.t.pipelines.odometry.Method.PointToPlane
for i in range(n_key_indices - 1):
key_i = key_indices[i]
depth_curr = o3d.t.io.read_image(depth_list[key_i]).to(device)
color_curr = o3d.t.io.read_image(color_list[key_i]).to(device)
rgbd_curr = o3d.t.geometry.RGBDImage(color_curr, depth_curr)
for j in range(i + 1, n_key_indices):
key_j = key_indices[j]
depth_next = o3d.t.io.read_image(depth_list[key_j]).to(device)
color_next = o3d.t.io.read_image(color_list[key_j]).to(device)
rgbd_next = o3d.t.geometry.RGBDImage(color_next, depth_next)
# TODO: add OpenCV initialization if necessary
# TODO: better failure check
try:
res = o3d.t.pipelines.odometry.rgbd_odometry_multi_scale(
rgbd_curr, rgbd_next, intrinsic, o3c.Tensor(np.eye(4)),
1000.0, 3.0, criteria_list, method)
info = o3d.t.pipelines.odometry.compute_odometry_information_matrix(
depth_curr, depth_next, intrinsic, res.transformation, 0.07,
1000.0, 3.0)
except Exception as e:
pass
else:
if info[5, 5] / (depth_curr.columns * depth_curr.rows) > 0.3:
edges.append((key_i, key_j))
poses.append(res.transformation.cpu().numpy())
infos.append(info.cpu().numpy())
# pcd_src = o3d.t.geometry.PointCloud.create_from_rgbd_image(
# rgbd_curr, intrinsic)
# pcd_dst = o3d.t.geometry.PointCloud.create_from_rgbd_image(
# rgbd_next, intrinsic)
# o3d.visualization.draw([pcd_src, pcd_dst])
# o3d.visualization.draw(
# [pcd_src.transform(res.transformation), pcd_dst])
return edges, poses, infos
def METHOD_NAME(depth_list, color_list, intrinsic, config):
# TODO: load it from config
device = o3c.Device('CUDA:0')
n_files = len(depth_list)
depth_curr = o3d.t.io.read_image(depth_list[0]).to(device)
color_curr = o3d.t.io.read_image(color_list[0]).to(device)
rgbd_curr = o3d.t.geometry.RGBDImage(color_curr, depth_curr)
# TODO: load all params and scale/max factors from config
edges = []
poses = []
infos = []
criteria_list = [
o3d.t.pipelines.odometry.OdometryConvergenceCriteria(20),
o3d.t.pipelines.odometry.OdometryConvergenceCriteria(10),
o3d.t.pipelines.odometry.OdometryConvergenceCriteria(5)
]
method = o3d.t.pipelines.odometry.Method.PointToPlane
for i in tqdm(range(0, n_files - 1)):
depth_next = o3d.t.io.read_image(depth_list[i + 1]).to(device)
color_next = o3d.t.io.read_image(color_list[i + 1]).to(device)
rgbd_next = o3d.t.geometry.RGBDImage(color_next, depth_next)
res = o3d.t.pipelines.odometry.rgbd_odometry_multi_scale(
rgbd_curr, rgbd_next, intrinsic, o3c.Tensor(np.eye(4)), 1000.0, 3.0,
criteria_list, method)
info = o3d.t.pipelines.odometry.compute_odometry_information_matrix(
depth_curr, depth_next, intrinsic, res.transformation, 0.07, 1000.0,
3.0)
edges.append((i, i + 1))
poses.append(res.transformation.cpu().numpy())
infos.append(info.cpu().numpy())
color_curr = color_next
depth_curr = depth_next
rgbd_curr = rgbd_next
return edges, poses, infos
if __name__ == '__main__':
parser = ConfigParser()
parser.add('--config',
is_config_file=True,
help='YAML config file path.'
'Please refer to config.py for the options,'
'and default_config.yml for default settings '
'It overrides the default config file, but will be '
'overridden by other command line inputs.')
parser.add('--default_dataset',
help='Default dataset is used when config file is not provided. '
'Default dataset may be selected from the following options: '
'[lounge, jack_jack]',
default='lounge')
config = parser.get_config()
if config.path_dataset == '':
config = get_default_dataset(config)
depth_file_names, color_file_names = load_rgbd_file_names(config)
intrinsic = load_intrinsic(config)
i = 0
j = 10
depth_src = o3d.t.io.read_image(depth_file_names[i])
color_src = o3d.t.io.read_image(color_file_names[i])
depth_dst = o3d.t.io.read_image(depth_file_names[j])
color_dst = o3d.t.io.read_image(color_file_names[j])
rgbd_src = o3d.t.geometry.RGBDImage(color_src, depth_src)
rgbd_dst = o3d.t.geometry.RGBDImage(color_dst, depth_dst)
# RGBD odmetry and information matrix computation
res = o3d.t.pipelines.odometry.rgbd_odometry_multi_scale(
rgbd_src, rgbd_dst, intrinsic)
info = o3d.t.pipelines.odometry.compute_odometry_information_matrix(
depth_src, depth_dst, intrinsic, res.transformation, 0.07)
print(res.transformation, info)
print(info[5, 5] / (depth_src.columns * depth_src.rows))
# Legacy for reference, can be a little bit different due to minor implementation discrepancies
rgbd_src_legacy = read_legacy_rgbd_image(color_file_names[i],
depth_file_names[i], True)
rgbd_dst_legacy = read_legacy_rgbd_image(color_file_names[j],
depth_file_names[j], True)
intrinsic_legacy = o3d.camera.PinholeCameraIntrinsic(
o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault)
success, trans, info = o3d.pipelines.odometry.compute_rgbd_odometry(
rgbd_src_legacy, rgbd_dst_legacy, intrinsic_legacy, np.eye(4),
o3d.pipelines.odometry.RGBDOdometryJacobianFromHybridTerm())
print(trans, info)
# Visualization
pcd_src = o3d.t.geometry.PointCloud.create_from_rgbd_image(
rgbd_src, intrinsic)
pcd_dst = o3d.t.geometry.PointCloud.create_from_rgbd_image(
rgbd_dst, intrinsic)
o3d.visualization.draw([pcd_src, pcd_dst])
o3d.visualization.draw([pcd_src.transform(res.transformation), pcd_dst]) |
7,185 | clip sparse | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Delegating optimizer to clip norm for specified variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import optimizer
__all__ = ["VariableClippingOptimizer"]
class VariableClippingOptimizer(optimizer.Optimizer):
"""Wrapper optimizer that clips the norm of specified variables after update.
This optimizer delegates all aspects of gradient calculation and application
to an underlying optimizer. After applying gradients, this optimizer then
clips the variable to have a maximum L2 norm along specified dimensions.
NB: this is quite different from clipping the norm of the gradients.
Multiple instances of `VariableClippingOptimizer` may be chained to specify
different max norms for different subsets of variables.
This is more efficient at serving-time than using normalization during
embedding lookup, at the expense of more expensive training and fewer
guarantees about the norms.
@@__init__
"""
def __init__(self,
opt,
vars_to_clip_dims,
max_norm,
use_locking=False,
colocate_clip_ops_with_vars=False,
name="VariableClipping"):
"""Construct a new clip-norm optimizer.
Args:
opt: The actual optimizer that will be used to compute and apply the
gradients. Must be one of the Optimizer classes.
vars_to_clip_dims: A dict with keys as Variables and values as lists
of dimensions along which to compute the L2-norm. See
`tf.clip_by_norm` for more details.
max_norm: The L2-norm to clip to, for all variables specified.
use_locking: If `True` use locks for clip update operations.
colocate_clip_ops_with_vars: If `True`, try colocating the clip norm
ops with the corresponding variable.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "VariableClipping".
"""
super(VariableClippingOptimizer, self).__init__(use_locking, name)
self._opt = opt
# Defensive copy of input dict
self._vars_to_clip_dims = {
var: clip_dims[:] for var, clip_dims in vars_to_clip_dims.items()}
self._max_norm = max_norm
self._colocate_clip_ops_with_vars = colocate_clip_ops_with_vars
def compute_gradients(self, *args, **kwargs):
return self._opt.compute_gradients(*args, **kwargs)
def get_slot(self, *args, **kwargs):
return self._opt.get_slot(*args, **kwargs)
def get_slot_names(self, *args, **kwargs):
return self._opt.get_slot_names(*args, **kwargs)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
with ops.name_scope(name, self._name) as name:
update_op = self._opt.apply_gradients(
grads_and_vars, global_step=global_step)
clip_update_ops = []
with ops.control_dependencies([update_op]):
for grad, var in grads_and_vars:
if grad is None or var not in self._vars_to_clip_dims:
continue
with ops.name_scope("clip_" + var.op.name):
if isinstance(grad, ops.Tensor):
clip_update_ops.append(self._clip_dense(var))
else:
clip_update_ops.append(self.METHOD_NAME(grad, var))
# In case no var was clipped, still need to run the update_op.
return control_flow_ops.group(*([update_op] + clip_update_ops), name=name)
def _clip_dense(self, var):
with self._maybe_colocate_with(var):
updated_var_value = var.read_value()
normalized_var = clip_ops.clip_by_norm(
updated_var_value, self._max_norm, self._vars_to_clip_dims[var])
delta = updated_var_value - normalized_var
with ops.colocate_with(var):
return var.assign_sub(delta, use_locking=self._use_locking)
def METHOD_NAME(self, grad, var):
assert isinstance(grad, ops.IndexedSlices)
clip_dims = self._vars_to_clip_dims[var]
if 0 in clip_dims:
logging.warning("Clipping norm across dims %s for %s is inefficient "
"when including sparse dimension 0.", clip_dims,
var.op.name)
return self._clip_dense(var)
with ops.colocate_with(var):
var_subset = array_ops.gather(var, grad.indices)
with self._maybe_colocate_with(var):
normalized_var_subset = clip_ops.clip_by_norm(
var_subset, self._max_norm, clip_dims)
delta = ops.IndexedSlices(
var_subset - normalized_var_subset, grad.indices, grad.dense_shape)
with ops.colocate_with(var):
return var.scatter_sub(delta, use_locking=self._use_locking)
@contextlib.contextmanager
def _maybe_colocate_with(self, var):
"""Context to colocate with `var` if `colocate_clip_ops_with_vars`."""
if self._colocate_clip_ops_with_vars:
with ops.colocate_with(var):
yield
else:
yield |
7,186 | make plot | """
* GTDynamics Copyright 2020, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* See LICENSE for the license information
*
* @file jr_visualizer.py
* @brief Visualize the jumping trajectory of the jumping robot.
* @author Yetong Zhang
"""
import inspect
import os.path as osp
import sys
currentdir = osp.dirname(osp.abspath(inspect.getfile(inspect.currentframe())))
parentdir = osp.dirname(currentdir)
sys.path.insert(0, parentdir)
sys.path.insert(0, currentdir)
import gtdynamics as gtd
import gtsam
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.animation import FuncAnimation
from jumping_robot import Actuator, JumpingRobot
def update_jr_frame(ax, values, jr, k):
""" Update the jr animation frame. """
link_names = ["shank_r", "thigh_r", "torso", "thigh_l", "shank_l"]
colors = ["red", "orange", "black", "green", "blue"]
ax.clear()
for name, color in zip(link_names, colors):
link = jr.robot.link(name)
i = link.id()
pose = gtd.Pose(values, i, k)
y = pose.y()
z = pose.z()
theta = pose.rotation().roll()
l = 0.55
start_y = y - l / 2 * np.cos(theta)
start_z = z - l / 2 * np.sin(theta)
end_y = y + l / 2 * np.cos(theta)
end_z = z + l / 2 * np.sin(theta)
ax.plot([start_y, end_y], [start_z, end_z], color=color)
# for link in jr.robot.links():
# i = link.id()
# pose = link.bMcom()
# y = pose.y()
# z = pose.z()
# theta = pose.rotation().roll()
# l = 0.55
# start_y = y - l/2 * np.cos(theta)
# start_z = z - l/2 * np.sin(theta)
# end_y = y + l/2 * np.cos(theta)
# end_z = z + l/2 * np.sin(theta)
# ax.plot([start_y, end_y], [start_z, end_z], color='k', alpha=0.2)
ax.set_aspect('equal', adjustable='box')
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 2)
def visualize_jr(values: gtsam.Values, jr: JumpingRobot, k: int):
""" Visualize the jumping robot.
Args:
values (gtsam.Values): all values of the time step
jr (JumpingRobot): jumping robot
k (int): time step to visualize
"""
fig = plt.figure(figsize=(10, 10), dpi=80)
ax = fig.add_subplot(1, 1, 1)
update_jr_frame(ax, values, jr, k)
plt.show()
def visualize_jr_trajectory(values, jr, num_steps, step=1):
""" Visualize the jumping robot trajectory as animation. """
fig = plt.figure(figsize=(10, 10), dpi=80)
ax = fig.add_subplot(1, 1, 1)
def animate(i):
update_jr_frame(ax, values, jr, i)
frames = np.arange(0, num_steps, step)
FuncAnimation(fig, animate, frames=frames, interval=10)
plt.show()
def METHOD_NAME(values, jr, num_steps):
""" Draw plots of all quantities with time. """
joint_names = ["knee_r", "hip_r", "hip_l", "knee_l"]
colors = {
"knee_r": "red",
"hip_r": "orange",
"hip_l": "green",
"knee_l": "blue",
"source": "black"
}
qs_dict = {name: [] for name in joint_names}
vs_dict = {name: [] for name in joint_names}
torques_dict = {name: [] for name in joint_names}
pressures_dict = {name: [] for name in joint_names}
masses_dict = {name: [] for name in joint_names}
mdots_dict = {name: [] for name in joint_names}
contractions_dict = {name: [] for name in joint_names}
forces_dict = {name: [] for name in joint_names}
pressures_dict["source"] = []
masses_dict["source"] = []
time_list = []
for k in range(num_steps):
for name in joint_names:
j = jr.robot.joint(name).id()
qs_dict[name].append(gtd.JointAngle(values, j, k))
vs_dict[name].append(gtd.JointVel(values, j, k))
torques_dict[name].append(gtd.Torque(values, j, k))
pressures_dict[name].append(
values.atDouble(Actuator.PressureKey(j, k)))
masses_dict[name].append(values.atDouble(Actuator.MassKey(j, k)))
mdots_dict[name].append(
values.atDouble(Actuator.MassRateActualKey(j, k)))
contractions_dict[name].append(
values.atDouble(Actuator.ContractionKey(j, k)))
forces_dict[name].append(values.atDouble(Actuator.ForceKey(j, k)))
masses_dict["source"].append(values.atDouble(
Actuator.SourceMassKey(k)))
pressures_dict["source"].append(
values.atDouble(Actuator.SourcePressureKey(k)))
time_list.append(values.atDouble(gtd.TimeKey(k)))
fig, axs = plt.subplots(2, 3, sharex=True, figsize=(10, 6.7), dpi=80)
for name in qs_dict.keys():
axs[0, 0].plot(time_list,
qs_dict[name],
label=name,
color=colors[name])
axs[0, 0].set_title("joint angle")
for name in torques_dict.keys():
axs[0, 1].plot(time_list,
torques_dict[name],
label=name,
color=colors[name])
axs[0, 1].set_title("torque")
for name in forces_dict.keys():
axs[0, 2].plot(time_list,
forces_dict[name],
label=name,
color=colors[name])
axs[0, 2].set_title("force")
for name in pressures_dict.keys():
axs[1, 0].plot(time_list,
pressures_dict[name],
label=name,
color=colors[name])
axs[1, 0].set_title("pressure")
for name in masses_dict.keys():
axs[1, 1].plot(time_list,
masses_dict[name],
label=name,
color=colors[name])
axs[1, 1].set_title("mass")
for name in contractions_dict.keys():
axs[1, 2].plot(time_list,
contractions_dict[name],
label=name,
color=colors[name])
axs[1, 2].set_title("contraction")
plt.show()
if __name__ == "__main__":
yaml_file_path = osp.join(parentdir, "yaml", "robot_config.yaml")
jr = JumpingRobot(yaml_file_path, JumpingRobot.create_init_config())
values = gtsam.Values()
k = 0
for link in jr.robot.links():
i = link.id()
pose = link.bMcom()
gtd.InsertPose(values, i, k, pose)
visualize_jr(values, jr, k) |
7,187 | convert datetime | import datetime
from decimal import Decimal
from .err import DataError
import re
import time
def escape_item(val, mapping=None):
if mapping is None:
mapping = encoders
encoder = mapping.get(type(val))
# Fallback to default when no encoder found
if not encoder:
try:
encoder = mapping[str]
except KeyError:
raise TypeError("no default type converter defined")
val = encoder(val, mapping)
return val
def escape_dict(val, mapping=None):
n = {}
for k, v in val.items():
quoted = escape_item(v, mapping)
n[k] = quoted
return n
def escape_sequence(val, mapping=None):
n = []
for item in val:
quoted = escape_item(item, mapping)
n.append(quoted)
return "(" + ",".join(n) + ")"
def escape_set(val, mapping=None):
return ','.join([escape_item(x, mapping) for x in val])
def escape_bool(value, mapping=None):
return str(int(value))
def escape_object(value, mapping=None):
return str(value)
def escape_int(value, mapping=None):
return str(value)
def escape_float(value, mapping=None):
return '%.15g' % value
_escape_table = [chr(x) for x in range(128)]
_escape_table[ord("'")] = u"''"
def _escape_unicode(value, mapping=None):
"""escapes *value* with adding single quote.
Value should be unicode
"""
return value.translate(_escape_table)
escape_string = _escape_unicode
# On Python ~3.5, str.decode('ascii', 'surrogateescape') is slow.
# (fixed in Python 3.6, http://bugs.python.org/issue24870)
# Workaround is str.decode('latin1') then translate 0x80-0xff into 0udc80-0udcff.
# We can escape special chars and surrogateescape at once.
_escape_bytes_table = _escape_table + [chr(i) for i in range(0xdc80, 0xdd00)]
def escape_bytes(value, mapping=None):
return "'%s'" % value.decode('latin1').translate(_escape_bytes_table)
def escape_unicode(value, mapping=None):
return u"'%s'" % _escape_unicode(value)
def escape_str(value, mapping=None):
return "'%s'" % escape_string(str(value), mapping)
def escape_None(value, mapping=None):
return 'NULL'
def escape_timedelta(obj, mapping=None):
seconds = int(obj.seconds) % 60
minutes = int(obj.seconds // 60) % 60
hours = int(obj.seconds // 3600) % 24 + int(obj.days) * 24
if obj.microseconds:
fmt = "'{0:02d}:{1:02d}:{2:02d}.{3:06d}'"
else:
fmt = "'{0:02d}:{1:02d}:{2:02d}'"
return fmt.format(hours, minutes, seconds, obj.microseconds)
def escape_time(obj, mapping=None):
return "'{}'".format(obj.isoformat(timespec='microseconds'))
def escape_datetime(obj, mapping=None):
return "'{}'".format(obj.isoformat(sep=' ', timespec='microseconds'))
# if obj.microsecond:
# fmt = "'{0.year:04}-{0.month:02}-{0.day:02} {0.hour:02}:{0.minute:02}:{0.second:02}.{0.microsecond:06}'"
# else:
# fmt = "'{0.year:04}-{0.month:02}-{0.day:02} {0.hour:02}:{0.minute:02}:{0.second:02}'"
# return fmt.format(obj)
def escape_date(obj, mapping=None):
return "'{}'".format(obj.isoformat())
def escape_struct_time(obj, mapping=None):
return escape_datetime(datetime.datetime(*obj[:6]))
def _convert_second_fraction(s):
if not s:
return 0
# Pad zeros to ensure the fraction length in microseconds
s = s.ljust(6, '0')
return int(s[:6])
def METHOD_NAME(obj):
"""Returns a DATETIME or TIMESTAMP column value as a datetime object:
>>> datetime_or_None('2007-02-25 23:06:20')
datetime.datetime(2007, 2, 25, 23, 6, 20)
Illegal values are raise DataError
"""
if isinstance(obj, (bytes, bytearray)):
obj = obj.decode('ascii')
try:
time_obj = datetime.datetime.strptime(obj, '%Y-%m-%d %H:%M:%S')
return time_obj
except Exception as err:
raise DataError("Not valid datetime struct: %s" % err)
TIMEDELTA_RE = re.compile(r"(-)?(\d{1,3}):(\d{1,2}):(\d{1,2})(?:.(\d{1,6}))?")
def convert_timedelta(obj):
"""Returns a TIME column as a timedelta object:
>>> timedelta_or_None('25:06:17')
datetime.timedelta(1, 3977)
>>> timedelta_or_None('-25:06:17')
datetime.timedelta(-2, 83177)
Illegal values are returned as None:
>>> timedelta_or_None('random crap') is None
True
Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but
can accept values as (+|-)DD HH:MM:SS. The latter format will not
be parsed correctly by this function.
"""
if isinstance(obj, (bytes, bytearray)):
obj = obj.decode('ascii')
m = TIMEDELTA_RE.match(obj)
if not m:
return obj
try:
groups = list(m.groups())
groups[-1] = _convert_second_fraction(groups[-1])
negate = -1 if groups[0] else 1
hours, minutes, seconds, microseconds = groups[1:]
tdelta = datetime.timedelta(
hours=int(hours),
minutes=int(minutes),
seconds=int(seconds),
microseconds=int(microseconds)
) * negate
return tdelta
except ValueError as err:
raise DataError("Not valid time or timedelta struct: %s" % err)
def convert_time(obj):
"""Returns a TIME column as a time object:
>>> time_or_None('15:06:17')
datetime.time(15, 6, 17)
Illegal values are returned DataError:
"""
if isinstance(obj, (bytes, bytearray)):
obj = obj.decode('ascii')
try:
time_obj = datetime.datetime.strptime(obj, '%H:%M:%S')
return time_obj.time()
except Exception:
return convert_timedelta(obj)
def convert_date(obj):
"""Returns a DATE column as a date object:
>>> date_or_None('2007-02-26')
datetime.date(2007, 2, 26)
Illegal values are returned as None:
>>> date_or_None('2007-02-31') is None
True
>>> date_or_None('0000-00-00') is None
True
"""
if isinstance(obj, (bytes, bytearray)):
obj = obj.decode('ascii')
try:
time_obj = datetime.datetime.strptime(obj, '%Y-%m-%d')
return time_obj.date()
except Exception as err:
raise DataError("Not valid date struct: %s" % err)
def convert_set(s):
if isinstance(s, (bytes, bytearray)):
return set(s.split(b","))
return set(s.split(","))
def convert_characters(connection, data):
if connection.use_unicode:
data = data.decode("utf8")
return data
def convert_column_data(column_type, column_data):
data = column_data
# Null
if data is None:
return data
if not isinstance(column_type, str):
return data
column_type = column_type.lower().strip()
if column_type == 'time':
data = convert_time(column_data)
elif column_type == 'date':
data = convert_date(column_data)
elif column_type == 'datetime':
data = METHOD_NAME(column_data)
return data
encoders = {
bool: escape_bool,
int: escape_int,
float: escape_float,
str: escape_unicode,
tuple: escape_sequence,
list: escape_sequence,
set: escape_sequence,
frozenset: escape_sequence,
dict: escape_dict,
type(None): escape_None,
datetime.date: escape_date,
datetime.datetime: escape_datetime,
datetime.timedelta: escape_timedelta,
datetime.time: escape_time,
time.struct_time: escape_struct_time,
Decimal: escape_object,
} |
7,188 | test old heartbeat cleanup | # -*- coding: utf-8 -*-
# Copyright European Organization for Nuclear Research (CERN) since 2012
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import threading
from datetime import datetime, timedelta
import pytest
from rucio.core.heartbeat import live, die, cardiac_arrest, list_payload_counts, list_heartbeats, sanity_check
from rucio.db.sqla.models import Heartbeats
from rucio.db.sqla.session import transactional_session
@pytest.fixture
def executable_factory(function_scope_prefix, db_session):
executables = []
def _create_executable():
executable = f'{function_scope_prefix}_{len(executables)}'
executables.append(executable)
return executable
yield _create_executable
db_session.query(Heartbeats).where(Heartbeats.executable.in_(executables)).delete()
@pytest.fixture
def thread_factory():
created_threads = []
def _create_thread():
th = threading.Thread()
created_threads.append(th)
th.start()
return th
yield _create_thread
for thread in created_threads:
thread.join()
class TestHeartbeat:
def _pid(self):
return random.randint(2, 2**16)
def test_heartbeat_0(self, thread_factory, executable_factory):
""" HEARTBEAT (CORE): Single instance """
pid = self._pid()
thread = thread_factory()
executable = executable_factory()
assert live(executable, 'host0', pid, thread) == {'assign_thread': 0, 'nr_threads': 1}
assert live(executable, 'host0', pid, thread) == {'assign_thread': 0, 'nr_threads': 1}
assert live(executable, 'host0', pid, thread) == {'assign_thread': 0, 'nr_threads': 1}
def test_heartbeat_1(self, thread_factory, executable_factory):
""" HEARTBEAT (CORE): Multiple instance """
pids = [self._pid() for _ in range(4)]
threads = [thread_factory() for _ in range(4)]
executable = executable_factory()
assert live(executable, 'host0', pids[0], threads[0]) == {'assign_thread': 0, 'nr_threads': 1}
assert live(executable, 'host1', pids[1], threads[1]) == {'assign_thread': 1, 'nr_threads': 2}
assert live(executable, 'host0', pids[0], threads[0]) == {'assign_thread': 0, 'nr_threads': 2}
assert live(executable, 'host2', pids[2], threads[2]) == {'assign_thread': 2, 'nr_threads': 3}
assert live(executable, 'host0', pids[0], threads[0]) == {'assign_thread': 0, 'nr_threads': 3}
assert live(executable, 'host3', pids[3], threads[3]) == {'assign_thread': 3, 'nr_threads': 4}
assert live(executable, 'host1', pids[1], threads[1]) == {'assign_thread': 1, 'nr_threads': 4}
assert live(executable, 'host2', pids[2], threads[2]) == {'assign_thread': 2, 'nr_threads': 4}
assert live(executable, 'host3', pids[3], threads[3]) == {'assign_thread': 3, 'nr_threads': 4}
def test_heartbeat_2(self, thread_factory, executable_factory):
""" HEARTBEAT (CORE): Multiple instance with removal"""
pids = [self._pid() for _ in range(4)]
threads = [thread_factory() for _ in range(4)]
executable = executable_factory()
assert live(executable, 'host0', pids[0], threads[0]) == {'assign_thread': 0, 'nr_threads': 1}
assert live(executable, 'host1', pids[1], threads[1]) == {'assign_thread': 1, 'nr_threads': 2}
assert live(executable, 'host0', pids[0], threads[0]) == {'assign_thread': 0, 'nr_threads': 2}
assert live(executable, 'host2', pids[2], threads[2]) == {'assign_thread': 2, 'nr_threads': 3}
assert live(executable, 'host0', pids[0], threads[0]) == {'assign_thread': 0, 'nr_threads': 3}
die(executable, 'host0', pids[0], threads[0])
assert live(executable, 'host3', pids[3], threads[3]) == {'assign_thread': 2, 'nr_threads': 3}
assert live(executable, 'host1', pids[1], threads[1]) == {'assign_thread': 0, 'nr_threads': 3}
assert live(executable, 'host2', pids[2], threads[2]) == {'assign_thread': 1, 'nr_threads': 3}
die(executable, 'host2', pids[2], threads[2])
assert live(executable, 'host3', pids[3], threads[3]) == {'assign_thread': 1, 'nr_threads': 2}
def test_heartbeat_3(self, executable_factory):
""" HEARTBEAT (CORE): Single instance without thread. """
pids = [self._pid() for _ in range(3)]
executable = executable_factory()
assert live(executable, 'host0', pids[0]) == {'assign_thread': 0, 'nr_threads': 1}
assert live(executable, 'host1', pids[1]) == {'assign_thread': 1, 'nr_threads': 2}
assert live(executable, 'host0', pids[0]) == {'assign_thread': 0, 'nr_threads': 2}
def test_heartbeat_payload(self, thread_factory, executable_factory):
""" HEARTBEAT (CORE): Test heartbeat with payload"""
pids = [self._pid() for _ in range(6)]
threads = [thread_factory() for _ in range(6)]
executable = executable_factory()
live(executable, 'host0', pids[0], threads[0], payload='payload1')
live(executable, 'host0', pids[1], threads[1], payload='payload1')
live(executable, 'host0', pids[2], threads[2], payload='payload1')
live(executable, 'host1', pids[3], threads[3], payload='payload2')
live(executable, 'host2', pids[4], threads[4], payload='payload3')
live(executable, 'host3', pids[5], threads[5], payload='payload4')
assert list_payload_counts(executable) == {'payload4': 1, 'payload2': 1, 'payload3': 1, 'payload1': 3}
die(executable, 'host0', pids[0], threads[0])
die(executable, 'host0', pids[1], threads[1])
die(executable, 'host0', pids[2], threads[2])
die(executable, 'host1', pids[3], threads[3])
die(executable, 'host2', pids[4], threads[4])
die(executable, 'host3', pids[5], threads[5])
assert list_payload_counts('test5') == {}
@pytest.mark.noparallel(reason='performs a heartbeat cardiac_arrest')
@pytest.mark.dirty
def METHOD_NAME(self, thread_factory, executable_factory):
cardiac_arrest()
pids = [self._pid() for _ in range(2)]
thread = thread_factory()
executable1 = executable_factory()
executable2 = executable_factory()
live(executable1, 'host0', pids[0], thread)
live(executable2, 'host0', pids[1], thread)
live(executable1, 'host1', pids[0], thread)
live(executable2, 'host1', pids[1], thread)
live(executable1, 'host2', pids[0], thread)
live(executable2, 'host2', pids[1], thread)
assert len(list_heartbeats()) == 6
@transactional_session
def __forge_updated_at(*, session=None):
two_days_ago = datetime.utcnow() - timedelta(days=2)
a_dozen_hours_ago = datetime.utcnow() - timedelta(hours=12)
session.query(Heartbeats).filter_by(hostname='host1').update({'updated_at': two_days_ago})
session.query(Heartbeats).filter_by(hostname='host2').update({'updated_at': a_dozen_hours_ago})
__forge_updated_at()
# Default expiration delay. Host1 health checks should get removed.
sanity_check(executable=None, hostname=None)
assert len(list_heartbeats()) == 4
# Custom expiration delay. Host2 health checks should get removed too.
sanity_check(executable2, 'host2', expiration_delay=timedelta(hours=5).total_seconds())
assert len(list_heartbeats()) == 2 |
7,189 | remove special chars |
# Copyright 2009 Pablo Angulo
'''Script to export zim wiki pages to trac / mediawiki
To use it, call
python trac2zim.py notebook output_folder prefix
where prefix is a string you put before each wiki page name. It will
fill output_folder with plain text files ready to be loaded with trac-admin:
trac-admin /path/to/project wiki load output_folder
zim links like [[:Software:note taking:zim|zim]] are flattened to wiki
entries like [Software_note_taking_zim zim].
'''
import re
import sys
import os
#buscaCabeceras=re.compile('(={1:5})([^=]*)(={1:5})')
def flatten(linkName):
'''Changes a zim link, possibly with categories, to a trac link
it also removes accents and other spanish special characters
'''
#remove final ':' character and
name = linkName[:-1] if linkName[-1] == ':' else linkName
return METHOD_NAME(name.replace(':', '_').replace(' ', '_'))
def METHOD_NAME(s):
'''certain trac installation reported problems with special chars
other trac systems loaded all files without problem
the problem is only for file names and wiki pages names, not for content
'''
return s.replace('á', 'a').replace('é', 'e').replace('í', 'i').replace('ó', 'o').replace('ú', 'u').replace('ñ', 'n').replace('Á', 'A').replace('É', 'E').replace('Í', 'I').replace('Ó', 'O').replace('Ú', 'U').replace('Ñ', 'ñ')
cabecera = re.compile("(={1,6})([^=/]+?)(={1,6})")
inlineVerbatim = re.compile("''([^']+?)''")
#~ multilineVerbatim=re.compile("\n[\t](.+?)\n")
negrita = re.compile('\*\*([^*]+?)\*\*')
italic = re.compile('//([^/\n\]]+?)//')
bracketedURL = re.compile('\[\[(http://[^|]+)\|([^|]+?)\]\]')
#TODO: separar links relativos y absolutos
simpleRelLink = re.compile('\[\[([^:][^|]+?)\]\]')
namedRelLink = re.compile('\[\[([^:][^|]+?)\|([^|]+?)\]\]')
simpleAbsLink = re.compile('\[\[:([^|]+?)\]\]')
namedAbsLink = re.compile('\[\[:([^|]+?)\|([^|]+?)\]\]')
images = re.compile('([^{]){{/(.+?)\}\}')
def translate(nota, prefix1, prefix2):
'''Takes a note in zim format and returns a note in trac format
'''
#duplicate all line breaks
nota = nota.replace('\n', '\n\n')
# Headings
mm = cabecera.search(nota)
lista = []
lastIndex = 0
while mm:
lista.append(nota[lastIndex:mm.start()])
gg = mm.groups()
iguales = len(gg[0])
lista.append("=" * (7 - iguales) + gg[1] + "=" * (7 - iguales))
lastIndex = mm.end()
mm = cabecera.search(nota, lastIndex)
lista.append(nota[lastIndex:])
nota = ''.join(lista)
#inlineVerbatim
nota = inlineVerbatim.sub("{{{\\1}}}", nota)
#multiline verbatim
#TODO
#bold
nota = negrita.sub("'''\\1'''", nota)
#italic
nota = italic.sub("''\\1''", nota)
#bracketedURL
nota = bracketedURL.sub("[\\1 \\2]", nota)
#~ #simple links
#~ nota=simpleLink.sub("[wiki:\\1]",nota)
#~ #named links
#~ nota=namedLink.sub("[wiki:\\1 \\2]",nota)
#simple relative links
mm = simpleRelLink.search(nota)
lista = []
lastIndex = 0
while mm:
lista.append(nota[lastIndex:mm.start()])
gg0 = mm.groups()[0]
lista.append("[wiki:" + prefix1 + prefix2 + flatten(gg0) + " " + gg0 + "]")
lastIndex = mm.end()
mm = simpleRelLink.search(nota, lastIndex)
lista.append(nota[lastIndex:])
nota = ''.join(lista)
mm = simpleAbsLink.search(nota)
lista = []
lastIndex = 0
while mm:
lista.append(nota[lastIndex:mm.start()])
gg0 = mm.groups()[0]
lista.append("[wiki:" + prefix1 + flatten(gg0) + " " + gg0 + "]")
lastIndex = mm.end()
mm = simpleAbsLink.search(nota, lastIndex)
lista.append(nota[lastIndex:])
nota = ''.join(lista)
#named relativelinks
mm = namedRelLink.search(nota)
lista = []
lastIndex = 0
while mm:
lista.append(nota[lastIndex:mm.start()])
gg = mm.groups()
lista.append("[wiki:" + prefix1 + prefix2 + flatten(gg[0]) + " " + gg[1] + "]")
lastIndex = mm.end()
mm = namedRelLink.search(nota, lastIndex)
lista.append(nota[lastIndex:])
nota = ''.join(lista)
#named absolute links
mm = namedAbsLink.search(nota)
lista = []
lastIndex = 0
while mm:
lista.append(nota[lastIndex:mm.start()])
gg = mm.groups()
lista.append("[wiki:" + prefix1 + flatten(gg[0]) + " " + gg[1] + "]")
lastIndex = mm.end()
mm = namedAbsLink.search(nota, lastIndex)
lista.append(nota[lastIndex:])
nota = ''.join(lista)
#lists
nota = nota.replace('\n* ', '\n * ')
#images
nota = images.sub("\\1[[Image(\\2)]]", nota)
return nota
def processPath(pathin, pathout, prefix1, prefix2=''):
for archivo in os.listdir(pathin):
fullPath = os.path.join(pathin, archivo)
if archivo[-3:] == 'txt':
fichero = open(fullPath, mode='r')
nota = fichero.read()
fichero.close()
nota_out = translate(nota, prefix1, prefix2)
#~ nameout= prefix+"_"+archivo[:-4] if prefix else archivo[:-4]
fichero = open(os.path.join(pathout, prefix1 + prefix2 + METHOD_NAME(archivo[:-4])), mode='w')
fichero.write(nota_out)
fichero.close()
elif os.path.isdir(fullPath):
print(pathin, archivo, fullPath)
processPath(fullPath, pathout, prefix1, prefix2 + METHOD_NAME(archivo) + "_")
if __name__ == '__main__':
pathin = sys.argv[1]
pathout = sys.argv[2]
prefix = sys.argv[3]
processPath(pathin, pathout, prefix) |
7,190 | list | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from .. import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class OperationOperations(object):
"""OperationOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~storage_import_export.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def METHOD_NAME(
self,
**kwargs # type: Any
):
# type: (...) -> "models.ListOperationsResponse"
"""Returns the list of operations supported by the import/export resource provider.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ListOperationsResponse or the result of cls(response)
:rtype: ~storage_import_export.models.ListOperationsResponse
:raises: ~storage_import_export.models.ErrorResponseException:
"""
cls = kwargs.pop('cls', None ) # type: ClsType["models.ListOperationsResponse"]
error_map = kwargs.pop('error_map', {})
api_version = "2016-11-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.METHOD_NAME.metadata['url']
else:
url = next_link
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
if self._config.acceptlanguage is not None:
header_parameters['Accept-Language'] = self._serialize.header("self._config.acceptlanguage", self._config.acceptlanguage, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListOperationsResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.ErrorResponseException.from_response(response, self._deserialize)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
METHOD_NAME.metadata = {'url': '/providers/Microsoft.ImportExport/operations'} |
7,191 | test set parameters | from test import test_support
test_support.requires('audio')
from test.test_support import findfile
ossaudiodev = test_support.import_module('ossaudiodev')
import errno
import sys
import sunau
import time
import audioop
import unittest
# Arggh, AFMT_S16_NE not defined on all platforms -- seems to be a
# fairly recent addition to OSS.
try:
from ossaudiodev import AFMT_S16_NE
except ImportError:
if sys.byteorder == "little":
AFMT_S16_NE = ossaudiodev.AFMT_S16_LE
else:
AFMT_S16_NE = ossaudiodev.AFMT_S16_BE
def read_sound_file(path):
with open(path, 'rb') as fp:
au = sunau.open(fp)
rate = au.getframerate()
nchannels = au.getnchannels()
encoding = au._encoding
fp.seek(0)
data = fp.read()
if encoding != sunau.AUDIO_FILE_ENCODING_MULAW_8:
raise RuntimeError("Expect .au file with 8-bit mu-law samples")
# Convert the data to 16-bit signed.
data = audioop.ulaw2lin(data, 2)
return (data, rate, 16, nchannels)
class OSSAudioDevTests(unittest.TestCase):
def play_sound_file(self, data, rate, ssize, nchannels):
try:
dsp = ossaudiodev.open('w')
except IOError, msg:
if msg.args[0] in (errno.EACCES, errno.ENOENT,
errno.ENODEV, errno.EBUSY):
raise unittest.SkipTest(msg)
raise
# at least check that these methods can be invoked
dsp.bufsize()
dsp.obufcount()
dsp.obuffree()
dsp.getptr()
dsp.fileno()
# Make sure the read-only attributes work.
self.assertFalse(dsp.closed)
self.assertEqual(dsp.name, "/dev/dsp")
self.assertEqual(dsp.mode, "w", "bad dsp.mode: %r" % dsp.mode)
# And make sure they're really read-only.
for attr in ('closed', 'name', 'mode'):
try:
setattr(dsp, attr, 42)
except TypeError:
pass
else:
self.fail("dsp.%s not read-only" % attr)
# Compute expected running time of sound sample (in seconds).
expected_time = float(len(data)) / (ssize//8) / nchannels / rate
# set parameters based on .au file headers
dsp.setparameters(AFMT_S16_NE, nchannels, rate)
self.assertTrue(abs(expected_time - 3.51) < 1e-2, expected_time)
t1 = time.time()
dsp.write(data)
dsp.close()
t2 = time.time()
elapsed_time = t2 - t1
percent_diff = (abs(elapsed_time - expected_time) / expected_time) * 100
self.assertTrue(percent_diff <= 10.0,
"elapsed time > 10% off of expected time")
def set_parameters(self, dsp):
# Two configurations for testing:
# config1 (8-bit, mono, 8 kHz) should work on even the most
# ancient and crufty sound card, but maybe not on special-
# purpose high-end hardware
# config2 (16-bit, stereo, 44.1kHz) should work on all but the
# most ancient and crufty hardware
config1 = (ossaudiodev.AFMT_U8, 1, 8000)
config2 = (AFMT_S16_NE, 2, 44100)
for config in [config1, config2]:
(fmt, channels, rate) = config
if (dsp.setfmt(fmt) == fmt and
dsp.channels(channels) == channels and
dsp.speed(rate) == rate):
break
else:
raise RuntimeError("unable to set audio sampling parameters: "
"you must have really weird audio hardware")
# setparameters() should be able to set this configuration in
# either strict or non-strict mode.
result = dsp.setparameters(fmt, channels, rate, False)
self.assertEqual(result, (fmt, channels, rate),
"setparameters%r: returned %r" % (config, result))
result = dsp.setparameters(fmt, channels, rate, True)
self.assertEqual(result, (fmt, channels, rate),
"setparameters%r: returned %r" % (config, result))
def set_bad_parameters(self, dsp):
# Now try some configurations that are presumably bogus: eg. 300
# channels currently exceeds even Hollywood's ambitions, and
# negative sampling rate is utter nonsense. setparameters() should
# accept these in non-strict mode, returning something other than
# was requested, but should barf in strict mode.
fmt = AFMT_S16_NE
rate = 44100
channels = 2
for config in [(fmt, 300, rate), # ridiculous nchannels
(fmt, -5, rate), # impossible nchannels
(fmt, channels, -50), # impossible rate
]:
(fmt, channels, rate) = config
result = dsp.setparameters(fmt, channels, rate, False)
self.assertNotEqual(result, config,
"unexpectedly got requested configuration")
try:
result = dsp.setparameters(fmt, channels, rate, True)
except ossaudiodev.OSSAudioError, err:
pass
else:
self.fail("expected OSSAudioError")
def test_playback(self):
sound_info = read_sound_file(findfile('audiotest.au'))
self.play_sound_file(*sound_info)
def METHOD_NAME(self):
dsp = ossaudiodev.open("w")
try:
self.set_parameters(dsp)
# Disabled because it fails under Linux 2.6 with ALSA's OSS
# emulation layer.
#self.set_bad_parameters(dsp)
finally:
dsp.close()
self.assertTrue(dsp.closed)
def test_main():
try:
dsp = ossaudiodev.open('w')
except (ossaudiodev.error, IOError), msg:
if msg.args[0] in (errno.EACCES, errno.ENOENT,
errno.ENODEV, errno.EBUSY):
raise unittest.SkipTest(msg)
raise
dsp.close()
test_support.run_unittest(__name__)
if __name__ == "__main__":
test_main() |
7,192 | test not contains | import re
import numpy as np
import pytest
from gymnasium.spaces import Discrete, Graph, GraphInstance
def test_node_space_sample():
space = Graph(node_space=Discrete(3), edge_space=None)
space.seed(0)
sample = space.sample(
mask=(tuple(np.array([0, 1, 0], dtype=np.int8) for _ in range(5)), None),
num_nodes=5,
)
assert sample in space
assert np.all(sample.nodes == 1)
sample = space.sample(
(
(np.array([1, 0, 0], dtype=np.int8), np.array([0, 1, 0], dtype=np.int8)),
None,
),
num_nodes=2,
)
assert sample in space
assert np.all(sample.nodes == np.array([0, 1]))
with pytest.warns(
UserWarning,
match=re.escape("The number of edges is set (5) but the edge space is None."),
):
sample = space.sample(num_edges=5)
assert sample in space
# Change the node_space or edge_space to a non-Box or discrete space.
# This should not happen, test is primarily to increase coverage.
with pytest.raises(
TypeError,
match=re.escape(
"Expects base space to be Box and Discrete, actual space: <class 'str'>"
),
):
space.node_space = "abc"
space.sample()
def test_edge_space_sample():
space = Graph(node_space=Discrete(3), edge_space=Discrete(3))
space.seed(0)
# When num_nodes>1 then num_edges is set to 0
assert space.sample(num_nodes=1).edges is None
assert 0 <= len(space.sample(num_edges=3).edges) < 6
sample = space.sample(mask=(None, np.array([0, 1, 0], dtype=np.int8)))
assert np.all(sample.edges == 1) or sample.edges is None
sample = space.sample(
mask=(
None,
(
np.array([1, 0, 0], dtype=np.int8),
np.array([0, 1, 0], dtype=np.int8),
np.array([0, 0, 1], dtype=np.int8),
),
),
num_edges=3,
)
assert np.all(sample.edges == np.array([0, 1, 2]))
with pytest.raises(
AssertionError,
match="Expects the number of edges to be greater than 0, actual value: -1",
):
space.sample(num_edges=-1)
space = Graph(node_space=Discrete(3), edge_space=None)
with pytest.warns(
UserWarning,
match=re.escape(
"\x1b[33mWARN: The number of edges is set (5) but the edge space is None.\x1b[0m"
),
):
sample = space.sample(num_edges=5)
assert sample.edges is None
@pytest.mark.parametrize(
"sample",
[
"abc",
GraphInstance(
nodes=None, edges=np.array([0, 1]), edge_links=np.array([[0, 1], [1, 0]])
),
GraphInstance(
nodes=np.array([10, 1, 0]),
edges=np.array([0, 1]),
edge_links=np.array([[0, 1], [1, 0]]),
),
GraphInstance(
nodes=np.array([0, 1]), edges=None, edge_links=np.array([[0, 1], [1, 0]])
),
GraphInstance(nodes=np.array([0, 1]), edges=np.array([0, 1]), edge_links=None),
GraphInstance(
nodes=np.array([1, 2]),
edges=np.array([10, 1]),
edge_links=np.array([[0, 1], [1, 0]]),
),
GraphInstance(
nodes=np.array([1, 2]),
edges=np.array([0, 1]),
edge_links=np.array([[0.5, 1.0], [2.0, 1.0]]),
),
GraphInstance(
nodes=np.array([1, 2]), edges=np.array([10, 1]), edge_links=np.array([0, 1])
),
GraphInstance(
nodes=np.array([1, 2]),
edges=np.array([0, 1]),
edge_links=np.array([[[0], [1]], [[0], [0]]]),
),
GraphInstance(
nodes=np.array([1, 2]),
edges=np.array([0, 1]),
edge_links=np.array([[10, 1], [0, 0]]),
),
GraphInstance(
nodes=np.array([1, 2]),
edges=np.array([0, 1]),
edge_links=np.array([[-10, 1], [0, 0]]),
),
],
)
def METHOD_NAME(sample):
space = Graph(node_space=Discrete(2), edge_space=Discrete(2))
assert sample not in space |
7,193 | test no request timeout in config | import unittest
import requests
from unittest import mock
import tap_hubspot
class TestRequestTimeoutValue(unittest.TestCase):
def test_integer_request_timeout_in_config(self):
"""
Verify that if request_timeout is provided in config(integer value) then it should be use
"""
tap_hubspot.CONFIG.update({"request_timeout": 100}) # integer timeout in config
request_timeout = tap_hubspot.get_request_timeout()
self.assertEqual(request_timeout, 100.0) # Verify timeout value
def test_float_request_timeout_in_config(self):
"""
Verify that if request_timeout is provided in config(float value) then it should be use
"""
tap_hubspot.CONFIG.update({"request_timeout": 100.5}) # float timeout in config
request_timeout = tap_hubspot.get_request_timeout()
self.assertEqual(request_timeout, 100.5) # Verify timeout value
def test_string_request_timeout_in_config(self):
"""
Verify that if request_timeout is provided in config(string value) then it should be use
"""
tap_hubspot.CONFIG.update({"request_timeout": "100"}) # string format timeout in config
request_timeout = tap_hubspot.get_request_timeout()
self.assertEqual(request_timeout, 100.0) # Verify timeout value
def test_empty_string_request_timeout_in_config(self):
"""
Verify that if request_timeout is provided in config with empty string then default value is used
"""
tap_hubspot.CONFIG.update({"request_timeout": ""}) # empty string in config
request_timeout = tap_hubspot.get_request_timeout()
self.assertEqual(request_timeout, 300) # Verify timeout value
def test_zero_request_timeout_in_config(self):
"""
Verify that if request_timeout is provided in config with zero value then default value is used
"""
tap_hubspot.CONFIG.update({"request_timeout": 0}) # zero value in config
request_timeout = tap_hubspot.get_request_timeout()
self.assertEqual(request_timeout, 300) # Verify timeout value
def test_zero_string_request_timeout_in_config(self):
"""
Verify that if request_timeout is provided in config with zero in string format then default value is used
"""
tap_hubspot.CONFIG.update({"request_timeout": '0'}) # zero value in config
request_timeout = tap_hubspot.get_request_timeout()
self.assertEqual(request_timeout, 300) # Verify timeout value
def METHOD_NAME(self):
"""
Verify that if request_timeout is not provided in config then default value is used
"""
tap_hubspot.CONFIG = {}
request_timeout = tap_hubspot.get_request_timeout()
self.assertEqual(request_timeout, 300) # Verify timeout value
@mock.patch("time.sleep")
class TestRequestTimeoutBackoff(unittest.TestCase):
@mock.patch('requests.Session.send', side_effect = requests.exceptions.Timeout)
@mock.patch("requests.Request.prepare")
@mock.patch('tap_hubspot.get_params_and_headers', return_value = ({}, {}))
def test_request_timeout_backoff(self, mocked_get, mocked_prepare, mocked_send, mocked_sleep):
"""
Verify request function is backoff for only 5 times on Timeout exception.
"""
try:
tap_hubspot.request('dummy_url', {})
except Exception:
pass
# Verify that Session.send is called 5 times
self.assertEqual(mocked_send.call_count, 5)
@mock.patch('tap_hubspot.get_params_and_headers', return_value = ({}, {}))
@mock.patch('requests.post', side_effect = requests.exceptions.Timeout)
def test_request_timeout_backoff_for_post_search_endpoint(self, mocked_post, mocked_get, mocked_sleep):
"""
Verify post_search_endpoint function is backoff for only 5 times on Timeout exception.
"""
try:
tap_hubspot.post_search_endpoint('dummy_url', {})
except Exception:
pass
# Verify that requests.post is called 5 times
self.assertEqual(mocked_post.call_count, 5)
@mock.patch('requests.post', side_effect = requests.exceptions.Timeout)
def test_request_timeout_backoff_for_acquire_access_token_from_refresh_token(self, mocked_post, mocked_sleep):
"""
Verify request function is backoff for only 5 times instead of 25 times on Timeout exception that thrown from `acquire_access_token_from_refresh_token` method.
Here get_params_and_headers method called from request method and acquire_access_token_from_refresh_token called from get_params_and_headers method.
"""
try:
tap_hubspot.post_search_endpoint('dummy_url', {})
except Exception:
pass
# Verify that requests.post is called 5 times
self.assertEqual(mocked_post.call_count, 5) |
7,194 | test try except else finally |
from test.test_support import run_unittest
import unittest
class ExceptionTestCase(unittest.TestCase):
def METHOD_NAME(self):
hit_except = False
hit_else = False
hit_finally = False
try:
raise Exception, 'nyaa!'
except:
hit_except = True
else:
hit_else = True
finally:
hit_finally = True
self.assertTrue(hit_except)
self.assertTrue(hit_finally)
self.assertFalse(hit_else)
def test_try_except_else_finally_no_exception(self):
hit_except = False
hit_else = False
hit_finally = False
try:
pass
except:
hit_except = True
else:
hit_else = True
finally:
hit_finally = True
self.assertFalse(hit_except)
self.assertTrue(hit_finally)
self.assertTrue(hit_else)
def test_try_except_finally(self):
hit_except = False
hit_finally = False
try:
raise Exception, 'yarr!'
except:
hit_except = True
finally:
hit_finally = True
self.assertTrue(hit_except)
self.assertTrue(hit_finally)
def test_try_except_finally_no_exception(self):
hit_except = False
hit_finally = False
try:
pass
except:
hit_except = True
finally:
hit_finally = True
self.assertFalse(hit_except)
self.assertTrue(hit_finally)
def test_try_except(self):
hit_except = False
try:
raise Exception, 'ahoy!'
except:
hit_except = True
self.assertTrue(hit_except)
def test_try_except_no_exception(self):
hit_except = False
try:
pass
except:
hit_except = True
self.assertFalse(hit_except)
def test_try_except_else(self):
hit_except = False
hit_else = False
try:
raise Exception, 'foo!'
except:
hit_except = True
else:
hit_else = True
self.assertFalse(hit_else)
self.assertTrue(hit_except)
def test_try_except_else_no_exception(self):
hit_except = False
hit_else = False
try:
pass
except:
hit_except = True
else:
hit_else = True
self.assertFalse(hit_except)
self.assertTrue(hit_else)
def test_try_finally_no_exception(self):
hit_finally = False
try:
pass
finally:
hit_finally = True
self.assertTrue(hit_finally)
def test_nested(self):
hit_finally = False
hit_inner_except = False
hit_inner_finally = False
try:
try:
raise Exception, 'inner exception'
except:
hit_inner_except = True
finally:
hit_inner_finally = True
finally:
hit_finally = True
self.assertTrue(hit_inner_except)
self.assertTrue(hit_inner_finally)
self.assertTrue(hit_finally)
def test_nested_else(self):
hit_else = False
hit_finally = False
hit_except = False
hit_inner_except = False
hit_inner_else = False
try:
try:
pass
except:
hit_inner_except = True
else:
hit_inner_else = True
raise Exception, 'outer exception'
except:
hit_except = True
else:
hit_else = True
finally:
hit_finally = True
self.assertFalse(hit_inner_except)
self.assertTrue(hit_inner_else)
self.assertFalse(hit_else)
self.assertTrue(hit_finally)
self.assertTrue(hit_except)
def test_main():
run_unittest(ExceptionTestCase)
if __name__ == '__main__':
test_main() |
7,195 | is valid | import itertools
class CombinationRow:
"""
Row object store all combinations between two parameters into dictionary.
Keys in dictionary are values of combinations and values in dictionary are
information about coverage. Row object has information how many combinations
are uncovered and how many of them are covered more than ones.
"""
def __init__(self, input_data, t_value, parameters):
"""
:param input_data: list of data from user
:param t_value: t number from user
:param parameters: the tuple of parameters whose combinations Row object represents
"""
self.hash_table = {}
self.covered_more_than_ones = 0
self.uncovered = 0
array = []
"Creation of combinations"
for i in range(t_value):
array.append(list(range(input_data[parameters[i]])))
for i in itertools.product(*array):
self.uncovered += 1
self.hash_table[i] = 0
def cover_cell(self, key):
"""
Cover one combination inside Row
:param key: combination to be covered
:return: number of new covered combinations and number of new covered combinations more than ones
"""
old_uncovered = self.uncovered
old_covered_more_than_ones = self.covered_more_than_ones
value = self.hash_table[tuple(key)]
if value is not None:
if value == 0:
self.uncovered -= 1
elif value == 1:
self.covered_more_than_ones += 1
self.hash_table[tuple(key)] += 1
return (
self.uncovered - old_uncovered,
self.covered_more_than_ones - old_covered_more_than_ones,
)
def uncover_cell(self, key):
"""
Uncover one combination inside Row
:param key: combination to be uncovered
:return: number of new covered combinations and number of new covered combinations more than ones
"""
old_uncovered = self.uncovered
old_covered_more_than_ones = self.covered_more_than_ones
value = self.hash_table[tuple(key)]
if value is not None and value > 0:
if value == 1:
self.uncovered += 1
elif value == 2:
self.covered_more_than_ones -= 1
self.hash_table[tuple(key)] -= 1
return (
self.uncovered - old_uncovered,
self.covered_more_than_ones - old_covered_more_than_ones,
)
def completely_uncover(self):
"""
Uncover all combinations inside Row
"""
self.uncovered = 0
self.covered_more_than_ones = 0
for key, value in self.hash_table.items():
if value is not None:
self.hash_table[key] = 0
self.uncovered += 1
def del_cell(self, key):
"""
Disable one combination. If combination is disabled it means that
the combination does not match the constraints
:param key: combination to be disabled
:return: number of new covered combinations
"""
key = tuple(key)
if self.hash_table[key] is not None:
self.hash_table[key] = None
self.uncovered -= 1
return -1
else:
return 0
def METHOD_NAME(self, key):
"""
Is the combination match the constraints.
:param key: combination to valid
"""
key = tuple(key)
if self.hash_table.get(key, 0) is None:
return False
else:
return True
def get_all_uncovered_combinations(self):
"""
:return: list of all uncovered combination
"""
combinations = []
for key, value in self.hash_table.items():
if value == 0:
combinations.append(key)
return combinations
def __eq__(self, other):
return (
self.covered_more_than_ones == other.covered_more_than_ones
and self.uncovered == other.uncovered
and self.hash_table == other.hash_table
) |
7,196 | get transform | """
Copyright (c) 2018-2023 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cv2
import numpy as np
from utils import cut_rois, resize_input
from ie_module import Module
class FaceIdentifier(Module):
# Taken from the description of the model:
# intel_models/face-reidentification-retail-0095
REFERENCE_LANDMARKS = [
(30.2946 / 96, 51.6963 / 112), # left eye
(65.5318 / 96, 51.5014 / 112), # right eye
(48.0252 / 96, 71.7366 / 112), # nose tip
(33.5493 / 96, 92.3655 / 112), # left lip corner
(62.7299 / 96, 92.2041 / 112)] # right lip corner
UNKNOWN_ID = -1
UNKNOWN_ID_LABEL = "Unknown"
class Result:
def __init__(self, id, distance, desc):
self.id = id
self.distance = distance
self.descriptor = desc
def __init__(self, core, model, match_threshold=0.5, match_algo='HUNGARIAN'):
super(FaceIdentifier, self).__init__(core, model, 'Face Reidentification')
if len(self.model.inputs) != 1:
raise RuntimeError("The model expects 1 input layer")
if len(self.model.outputs) != 1:
raise RuntimeError("The model expects 1 output layer")
self.input_tensor_name = self.model.inputs[0].get_any_name()
self.input_shape = self.model.inputs[0].shape
self.nchw_layout = self.input_shape[1] == 3
output_shape = self.model.outputs[0].shape
if len(output_shape) not in (2, 4):
raise RuntimeError("The model expects output shape [1, n, 1, 1] or [1, n], got {}".format(output_shape))
self.faces_database = None
self.match_threshold = match_threshold
self.match_algo = match_algo
def set_faces_database(self, database):
self.faces_database = database
def get_identity_label(self, id):
if not self.faces_database or id == self.UNKNOWN_ID:
return self.UNKNOWN_ID_LABEL
return self.faces_database[id].label
def preprocess(self, frame, rois, landmarks):
image = frame.copy()
inputs = cut_rois(image, rois)
self._align_rois(inputs, landmarks)
inputs = [resize_input(input, self.input_shape, self.nchw_layout) for input in inputs]
return inputs
def enqueue(self, input):
return super(FaceIdentifier, self).enqueue({self.input_tensor_name: input})
def start_async(self, frame, rois, landmarks):
inputs = self.preprocess(frame, rois, landmarks)
for input in inputs:
self.enqueue(input)
def get_threshold(self):
return self.match_threshold
def postprocess(self):
descriptors = self.get_descriptors()
matches = []
if len(descriptors) != 0:
matches = self.faces_database.match_faces(descriptors, self.match_algo)
results = []
unknowns_list = []
for num, match in enumerate(matches):
id = match[0]
distance = match[1]
if self.match_threshold < distance:
id = self.UNKNOWN_ID
unknowns_list.append(num)
results.append(self.Result(id, distance, descriptors[num]))
return results, unknowns_list
def get_descriptors(self):
return [out.flatten() for out in self.get_outputs()]
@staticmethod
def normalize(array, axis):
mean = array.mean(axis=axis)
array -= mean
std = array.std()
array /= std
return mean, std
@staticmethod
def METHOD_NAME(src, dst):
assert np.array_equal(src.shape, dst.shape) and len(src.shape) == 2, \
'2d input arrays are expected, got {}'.format(src.shape)
src_col_mean, src_col_std = FaceIdentifier.normalize(src, axis=0)
dst_col_mean, dst_col_std = FaceIdentifier.normalize(dst, axis=0)
u, _, vt = np.linalg.svd(np.matmul(src.T, dst))
r = np.matmul(u, vt).T
transform = np.empty((2, 3))
transform[:, 0:2] = r * (dst_col_std / src_col_std)
transform[:, 2] = dst_col_mean.T - np.matmul(transform[:, 0:2], src_col_mean.T)
return transform
def _align_rois(self, face_images, face_landmarks):
assert len(face_images) == len(face_landmarks), \
'Input lengths differ, got {} and {}'.format(len(face_images), len(face_landmarks))
for image, image_landmarks in zip(face_images, face_landmarks):
scale = np.array((image.shape[1], image.shape[0]))
desired_landmarks = np.array(self.REFERENCE_LANDMARKS, dtype=float) * scale
landmarks = image_landmarks * scale
transform = FaceIdentifier.METHOD_NAME(desired_landmarks, landmarks)
cv2.warpAffine(image, transform, tuple(scale), image, flags=cv2.WARP_INVERSE_MAP) |
7,197 | test anonymous cannot create proposal | import pytest
from django.urls import reverse
from adhocracy4.test.helpers import assert_template_response
from adhocracy4.test.helpers import freeze_phase
from adhocracy4.test.helpers import freeze_post_phase
from adhocracy4.test.helpers import redirect_target
from adhocracy4.test.helpers import setup_phase
from apps.budgeting import models
from apps.budgeting import phases
@pytest.mark.django_db
def test_create_view(
client,
phase_factory,
proposal_factory,
user,
category_factory,
area_settings_factory,
):
phase, module, project, item = setup_phase(
phase_factory, proposal_factory, phases.RequestPhase
)
area_settings_factory(module=module)
category = category_factory(module=module)
url = reverse(
"a4_candy_budgeting:proposal-create",
kwargs={
"organisation_slug": project.organisation.slug,
"module_slug": module.slug,
},
)
with freeze_phase(phase):
client.login(username=user.email, password="password")
response = client.get(url)
assert_template_response(
response, "a4_candy_budgeting/proposal_create_form.html"
)
data = {
"name": "Proposal",
"description": "description",
"category": category.pk,
"budget": 123,
"point": (0, 0),
"point_label": "somewhere",
"organisation_terms_of_use": True,
}
response = client.post(url, data)
assert redirect_target(response) == "proposal-detail"
@pytest.mark.django_db
def METHOD_NAME(client, phase_factory):
phase = phase_factory(phase_content=phases.RequestPhase())
module = phase.module
url = reverse(
"a4_candy_budgeting:proposal-create",
kwargs={
"organisation_slug": module.project.organisation.slug,
"module_slug": module.slug,
},
)
with freeze_phase(phase):
count = models.Proposal.objects.all().count()
assert count == 0
response = client.get(url)
assert response.status_code == 302
assert redirect_target(response) == "account_login"
@pytest.mark.django_db
def test_user_can_create_proposal_during_active_phase(
client, phase_factory, user, category_factory, area_settings_factory
):
phase = phase_factory(phase_content=phases.RequestPhase())
module = phase.module
area_settings_factory(module=module)
category = category_factory(module=module)
url = reverse(
"a4_candy_budgeting:proposal-create",
kwargs={
"organisation_slug": module.project.organisation.slug,
"module_slug": module.slug,
},
)
with freeze_phase(phase):
count = models.Proposal.objects.all().count()
assert count == 0
client.login(username=user.email, password="password")
response = client.get(url)
assert_template_response(
response, "a4_candy_budgeting/proposal_create_form.html"
)
assert response.status_code == 200
proposal = {
"name": "Proposal",
"description": "description",
"category": category.pk,
"budget": 123,
"point": (0, 0),
"point_label": "somewhere",
"organisation_terms_of_use": True,
}
response = client.post(url, proposal)
assert response.status_code == 302
assert redirect_target(response) == "proposal-detail"
count = models.Proposal.objects.all().count()
assert count == 1
@pytest.mark.django_db
def test_user_cannot_create_proposal_past_phase(client, phase_factory, user):
phase = phase_factory(phase_content=phases.RequestPhase())
module = phase.module
url = reverse(
"a4_candy_budgeting:proposal-create",
kwargs={
"organisation_slug": module.project.organisation.slug,
"module_slug": module.slug,
},
)
with freeze_post_phase(phase):
response = client.get(url)
assert response.status_code == 302
client.login(username=user.email, password="password")
response = client.get(url)
assert response.status_code == 403
@pytest.mark.django_db
def test_user_can_create_proposal_only_with_terms_agreement(
client,
phase_factory,
user,
category_factory,
organisation_terms_of_use_factory,
area_settings_factory,
):
phase = phase_factory(phase_content=phases.RequestPhase())
module = phase.module
area_settings_factory(module=module)
category = category_factory(module=module)
url = reverse(
"a4_candy_budgeting:proposal-create",
kwargs={
"organisation_slug": module.project.organisation.slug,
"module_slug": module.slug,
},
)
with freeze_phase(phase):
count = models.Proposal.objects.all().count()
assert count == 0
client.login(username=user.email, password="password")
response = client.get(url)
assert_template_response(
response, "a4_candy_budgeting/proposal_create_form.html"
)
assert response.status_code == 200
proposal = {
"name": "Proposal",
"description": "description",
"category": category.pk,
"budget": 123,
"point": (0, 0),
"point_label": "somewhere",
}
response = client.post(url, proposal)
assert response.status_code == 200
organisation_terms_of_use_factory(
user=user,
organisation=module.project.organisation,
has_agreed=True,
)
response = client.post(url, proposal)
assert response.status_code == 302
assert redirect_target(response) == "proposal-detail"
count = models.Proposal.objects.all().count()
assert count == 1
@pytest.mark.django_db
def test_admin_can_create_proposal_past_phase(
client, phase_factory, admin, category_factory, area_settings_factory
):
phase = phase_factory(phase_content=phases.RequestPhase())
module = phase.module
area_settings_factory(module=module)
category = category_factory(module=module)
url = reverse(
"a4_candy_budgeting:proposal-create",
kwargs={
"organisation_slug": module.project.organisation.slug,
"module_slug": module.slug,
},
)
with freeze_post_phase(phase):
client.login(username=admin.email, password="password")
response = client.get(url)
assert_template_response(
response, "a4_candy_budgeting/proposal_create_form.html"
)
assert response.status_code == 200
proposal = {
"name": "Proposal",
"description": "description",
"category": category.pk,
"budget": 123,
"point": (0, 0),
"point_label": "somewhere",
"organisation_terms_of_use": True,
}
response = client.post(url, proposal)
assert response.status_code == 302
assert redirect_target(response) == "proposal-detail"
count = models.Proposal.objects.all().count()
assert count == 1 |
7,198 | store credential | """Base Indy Holder class."""
from abc import ABC, ABCMeta, abstractmethod
from typing import Tuple, Union
from ..core.error import BaseError
from ..ledger.base import BaseLedger
class IndyHolderError(BaseError):
"""Base class for holder exceptions."""
class IndyHolder(ABC, metaclass=ABCMeta):
"""Base class for holder."""
RECORD_TYPE_MIME_TYPES = "attribute-mime-types"
CHUNK = 256
def __repr__(self) -> str:
"""Return a human readable representation of this class.
Returns:
A human readable string for this class
"""
return "<{}>".format(self.__class__.__name__)
@abstractmethod
async def get_credential(self, credential_id: str) -> str:
"""Get a credential stored in the wallet.
Args:
credential_id: Credential id to retrieve
"""
@abstractmethod
async def credential_revoked(
self, ledger: BaseLedger, credential_id: str, fro: int = None, to: int = None
) -> bool:
"""Check ledger for revocation status of credential by cred id.
Args:
credential_id: Credential id to check
"""
@abstractmethod
async def delete_credential(self, credential_id: str):
"""Remove a credential stored in the wallet.
Args:
credential_id: Credential id to remove
"""
@abstractmethod
async def get_mime_type(
self, credential_id: str, attr: str = None
) -> Union[dict, str]:
"""Get MIME type per attribute (or for all attributes).
Args:
credential_id: credential id
attr: attribute of interest or omit for all
Returns: Attribute MIME type or dict mapping attribute names to MIME types
attr_meta_json = all_meta.tags.get(attr)
"""
@abstractmethod
async def create_presentation(
self,
presentation_request: dict,
requested_credentials: dict,
schemas: dict,
credential_definitions: dict,
rev_states: dict = None,
) -> str:
"""Get credentials stored in the wallet.
Args:
presentation_request: Valid indy format presentation request
requested_credentials: Indy format requested credentials
schemas: Indy formatted schemas JSON
credential_definitions: Indy formatted credential definitions JSON
rev_states: Indy format revocation states JSON
"""
@abstractmethod
async def create_credential_request(
self, credential_offer: dict, credential_definition: dict, holder_did: str
) -> Tuple[str, str]:
"""Create a credential request for the given credential offer.
Args:
credential_offer: The credential offer to create request for
credential_definition: The credential definition to create an offer for
holder_did: the DID of the agent making the request
Returns:
A tuple of the credential request and credential request metadata
"""
@abstractmethod
async def METHOD_NAME(
self,
credential_definition: dict,
credential_data: dict,
credential_request_metadata: dict,
credential_attr_mime_types=None,
credential_id: str = None,
rev_reg_def: dict = None,
):
"""Store a credential in the wallet.
Args:
credential_definition: Credential definition for this credential
credential_data: Credential data generated by the issuer
credential_request_metadata: credential request metadata generated
by the issuer
credential_attr_mime_types: dict mapping attribute names to (optional)
MIME types to store as non-secret record, if specified
credential_id: optionally override the stored credential id
rev_reg_def: revocation registry definition in json
Returns:
the ID of the stored credential
"""
@abstractmethod
async def create_revocation_state(
self,
cred_rev_id: str,
rev_reg_def: dict,
rev_reg_delta: dict,
timestamp: int,
tails_file_path: str,
) -> str:
"""Create current revocation state for a received credential.
Args:
cred_rev_id: credential revocation id in revocation registry
rev_reg_def: revocation registry definition
rev_reg_delta: revocation delta
timestamp: delta timestamp
Returns:
the revocation state
""" |
7,199 | on 204 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"sentinel incident comment delete",
is_experimental=True,
confirmation="Are you sure you want to perform this operation?",
)
class Delete(AAZCommand):
"""Delete the incident comment.
"""
_aaz_info = {
"version": "2022-06-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.operationalinsights/workspaces/{}/providers/microsoft.securityinsights/incidents/{}/comments/{}", "2022-06-01-preview"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return None
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.incident_comment_id = AAZStrArg(
options=["-n", "--name", "--incident-comment-id"],
help="Incident comment ID",
required=True,
id_part="child_name_2",
)
_args_schema.incident_id = AAZStrArg(
options=["--incident-id"],
help="Incident ID",
required=True,
id_part="child_name_1",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.workspace_name = AAZStrArg(
options=["-w", "--workspace-name"],
help="The name of the workspace.",
required=True,
is_experimental=True,
id_part="name",
)
return cls._args_schema
def _execute_operations(self):
self.IncidentCommentsDelete(ctx=self.ctx)()
class IncidentCommentsDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
if session.http_response.status_code in [204]:
return self.METHOD_NAME(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/incidents/{incidentId}/comments/{incidentCommentId}",
**self.url_parameters
)
@property
def method(self):
return "DELETE"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"incidentCommentId", self.ctx.args.incident_comment_id,
required=True,
),
**self.serialize_url_param(
"incidentId", self.ctx.args.incident_id,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"workspaceName", self.ctx.args.workspace_name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-06-01-preview",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def METHOD_NAME(self, session):
pass
__all__ = ["Delete"] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.