id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
6,900 | set up | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Thales Netherlands
# Copyright (c) 2021, Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible_collections.community.general.tests.unit.compat import unittest
from ansible_collections.community.general.tests.unit.compat.mock import patch
from ansible_collections.community.general.tests.unit.mock.loader import DictDataLoader
from ansible.plugins import AnsiblePlugin
from ansible.template import Templar
from ansible.errors import AnsibleError
from ansible.utils.display import Display
from ansible_collections.community.general.plugins.lookup import merge_variables
class TestMergeVariablesLookup(unittest.TestCase):
def METHOD_NAME(self):
self.loader = DictDataLoader({})
self.templar = Templar(loader=self.loader, variables={})
self.merge_vars_lookup = merge_variables.LookupModule(loader=self.loader, templar=self.templar)
@patch.object(AnsiblePlugin, 'set_options')
@patch.object(AnsiblePlugin, 'get_option', side_effect=[None, 'ignore', 'suffix'])
@patch.object(Templar, 'template', side_effect=[['item1'], ['item3']])
def test_merge_list(self, mock_set_options, mock_get_option, mock_template):
results = self.merge_vars_lookup.run(['__merge_list'], {
'testlist1__merge_list': ['item1'],
'testlist2': ['item2'],
'testlist3__merge_list': ['item3']
})
self.assertEqual(results, [['item1', 'item3']])
@patch.object(AnsiblePlugin, 'set_options')
@patch.object(AnsiblePlugin, 'get_option', side_effect=[['initial_item'], 'ignore', 'suffix'])
@patch.object(Templar, 'template', side_effect=[['item1'], ['item3']])
def test_merge_list_with_initial_value(self, mock_set_options, mock_get_option, mock_template):
results = self.merge_vars_lookup.run(['__merge_list'], {
'testlist1__merge_list': ['item1'],
'testlist2': ['item2'],
'testlist3__merge_list': ['item3']
})
self.assertEqual(results, [['initial_item', 'item1', 'item3']])
@patch.object(AnsiblePlugin, 'set_options')
@patch.object(AnsiblePlugin, 'get_option', side_effect=[None, 'ignore', 'suffix'])
@patch.object(Templar, 'template', side_effect=[{'item1': 'test', 'list_item': ['test1']},
{'item2': 'test', 'list_item': ['test2']}])
def test_merge_dict(self, mock_set_options, mock_get_option, mock_template):
results = self.merge_vars_lookup.run(['__merge_dict'], {
'testdict1__merge_dict': {
'item1': 'test',
'list_item': ['test1']
},
'testdict2__merge_dict': {
'item2': 'test',
'list_item': ['test2']
}
})
self.assertEqual(results, [
{
'item1': 'test',
'item2': 'test',
'list_item': ['test1', 'test2']
}
])
@patch.object(AnsiblePlugin, 'set_options')
@patch.object(AnsiblePlugin, 'get_option', side_effect=[{'initial_item': 'random value', 'list_item': ['test0']},
'ignore', 'suffix'])
@patch.object(Templar, 'template', side_effect=[{'item1': 'test', 'list_item': ['test1']},
{'item2': 'test', 'list_item': ['test2']}])
def test_merge_dict_with_initial_value(self, mock_set_options, mock_get_option, mock_template):
results = self.merge_vars_lookup.run(['__merge_dict'], {
'testdict1__merge_dict': {
'item1': 'test',
'list_item': ['test1']
},
'testdict2__merge_dict': {
'item2': 'test',
'list_item': ['test2']
}
})
self.assertEqual(results, [
{
'initial_item': 'random value',
'item1': 'test',
'item2': 'test',
'list_item': ['test0', 'test1', 'test2']
}
])
@patch.object(AnsiblePlugin, 'set_options')
@patch.object(AnsiblePlugin, 'get_option', side_effect=[None, 'warn', 'suffix'])
@patch.object(Templar, 'template', side_effect=[{'item': 'value1'}, {'item': 'value2'}])
@patch.object(Display, 'warning')
def test_merge_dict_non_unique_warning(self, mock_set_options, mock_get_option, mock_template, mock_display):
results = self.merge_vars_lookup.run(['__merge_non_unique'], {
'testdict1__merge_non_unique': {'item': 'value1'},
'testdict2__merge_non_unique': {'item': 'value2'}
})
self.assertTrue(mock_display.called)
self.assertEqual(results, [{'item': 'value2'}])
@patch.object(AnsiblePlugin, 'set_options')
@patch.object(AnsiblePlugin, 'get_option', side_effect=[None, 'error', 'suffix'])
@patch.object(Templar, 'template', side_effect=[{'item': 'value1'}, {'item': 'value2'}])
def test_merge_dict_non_unique_error(self, mock_set_options, mock_get_option, mock_template):
with self.assertRaises(AnsibleError):
self.merge_vars_lookup.run(['__merge_non_unique'], {
'testdict1__merge_non_unique': {'item': 'value1'},
'testdict2__merge_non_unique': {'item': 'value2'}
})
@patch.object(AnsiblePlugin, 'set_options')
@patch.object(AnsiblePlugin, 'get_option', side_effect=[None, 'ignore', 'suffix'])
@patch.object(Templar, 'template', side_effect=[{'item1': 'test', 'list_item': ['test1']},
['item2', 'item3']])
def test_merge_list_and_dict(self, mock_set_options, mock_get_option, mock_template):
with self.assertRaises(AnsibleError):
self.merge_vars_lookup.run(['__merge_var'], {
'testlist__merge_var': {
'item1': 'test',
'list_item': ['test1']
},
'testdict__merge_var': ['item2', 'item3']
}) |
6,901 | sync db update processing job | """ The database is used to store information regarding jobs and workspaces.
Jobs: for every process-request a job is inserted into the database with an uuid, status and
information about the process like parameters and file groups. It is mainly used to track the status
(`ocrd_network.models.job.StateEnum`) of a job so that the state of a job can be queried. Finished
jobs are not deleted from the database.
Workspaces: A job or a processor always runs on a workspace. So a processor needs the information
where the workspace is available. This information can be set with providing an absolute path or a
workspace_id. With the latter, the database is used to convert the workspace_id to a path.
XXX: Currently the information is not preserved after the processing-server shuts down as the
database (runs in docker) currently has no volume set.
"""
from beanie import init_beanie
from motor.motor_asyncio import AsyncIOMotorClient
from .models import (
DBProcessorJob,
DBWorkspace
)
from .utils import call_sync
async def initiate_database(db_url: str):
client = AsyncIOMotorClient(db_url)
await init_beanie(
database=client.get_default_database(default='ocrd'),
document_models=[DBProcessorJob, DBWorkspace]
)
@call_sync
async def sync_initiate_database(db_url: str):
await initiate_database(db_url)
async def db_get_workspace(workspace_id: str = None, workspace_mets_path: str = None) -> DBWorkspace:
workspace = None
if not workspace_id and not workspace_mets_path:
raise ValueError(f'Either `workspace_id` or `workspace_mets_path` field must be used as a search key')
if workspace_id:
workspace = await DBWorkspace.find_one(
DBWorkspace.workspace_id == workspace_id
)
if not workspace:
raise ValueError(f'Workspace with id "{workspace_id}" not in the DB.')
if workspace_mets_path:
workspace = await DBWorkspace.find_one(
DBWorkspace.workspace_mets_path == workspace_mets_path
)
if not workspace:
raise ValueError(f'Workspace with path "{workspace_mets_path}" not in the DB.')
return workspace
@call_sync
async def sync_db_get_workspace(workspace_id: str = None, workspace_mets_path: str = None) -> DBWorkspace:
return await db_get_workspace(workspace_id=workspace_id, workspace_mets_path=workspace_mets_path)
async def db_update_workspace(workspace_id: str = None, workspace_mets_path: str = None, **kwargs):
workspace = None
if not workspace_id and not workspace_mets_path:
raise ValueError(f'Either `workspace_id` or `workspace_mets_path` field must be used as a search key')
if workspace_id:
workspace = await DBWorkspace.find_one(
DBWorkspace.workspace_id == workspace_id
)
if not workspace:
raise ValueError(f'Workspace with id "{workspace_id}" not in the DB.')
if workspace_mets_path:
workspace = await DBWorkspace.find_one(
DBWorkspace.workspace_mets_path == workspace_mets_path
)
if not workspace:
raise ValueError(f'Workspace with path "{workspace_mets_path}" not in the DB.')
job_keys = list(workspace.__dict__.keys())
for key, value in kwargs.items():
if key not in job_keys:
raise ValueError(f'Field "{key}" is not available.')
if key == 'workspace_id':
workspace.workspace_id = value
elif key == 'workspace_mets_path':
workspace.workspace_mets_path = value
elif key == 'ocrd_identifier':
workspace.ocrd_identifier = value
elif key == 'bagit_profile_identifier':
workspace.bagit_profile_identifier = value
elif key == 'ocrd_base_version_checksum':
workspace.ocrd_base_version_checksum = value
elif key == 'ocrd_mets':
workspace.ocrd_mets = value
elif key == 'bag_info_adds':
workspace.bag_info_adds = value
elif key == 'deleted':
workspace.deleted = value
elif key == 'pages_locked':
workspace.pages_locked = value
else:
raise ValueError(f'Field "{key}" is not updatable.')
await workspace.save()
@call_sync
async def sync_db_update_workspace(workspace_id: str = None, workspace_mets_path: str = None, **kwargs):
await db_update_workspace(workspace_id=workspace_id, workspace_mets_path=workspace_mets_path, **kwargs)
async def db_get_processing_job(job_id: str) -> DBProcessorJob:
job = await DBProcessorJob.find_one(
DBProcessorJob.job_id == job_id)
if not job:
raise ValueError(f'Processing job with id "{job_id}" not in the DB.')
return job
@call_sync
async def sync_db_get_processing_job(job_id: str) -> DBProcessorJob:
return await db_get_processing_job(job_id)
async def db_update_processing_job(job_id: str, **kwargs):
job = await DBProcessorJob.find_one(
DBProcessorJob.job_id == job_id)
if not job:
raise ValueError(f'Processing job with id "{job_id}" not in the DB.')
job_keys = list(job.__dict__.keys())
for key, value in kwargs.items():
if key not in job_keys:
raise ValueError(f'Field "{key}" is not available.')
if key == 'state':
job.state = value
elif key == 'start_time':
job.start_time = value
elif key == 'end_time':
job.end_time = value
elif key == 'path_to_mets':
job.path_to_mets = value
elif key == 'exec_time':
job.exec_time = value
else:
raise ValueError(f'Field "{key}" is not updatable.')
await job.save()
@call_sync
async def METHOD_NAME(job_id: str, **kwargs):
await db_update_processing_job(job_id=job_id, **kwargs) |
6,902 | generate input | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('../')
from auto_scan_test import AutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume
import hypothesis.strategies as st
import argparse
import numpy as np
from functools import partial
import copy
class TestLookupTablebleOp(AutoScanTest):
def __init__(self, *args, **kwargs):
AutoScanTest.__init__(self, *args, **kwargs)
self.enable_testing_on_place(
TargetType.X86,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 2])
self.enable_testing_on_place(
TargetType.ARM,
PrecisionType.Any,
DataLayoutType.NCHW,
thread=[1, 2, 4])
self.enable_testing_on_place(TargetType.NNAdapter, PrecisionType.FP32)
self.enable_devices_on_nnadapter(device_names=["intel_openvino"])
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
return True
def sample_program_configs(self, draw):
in_shape = draw(
st.lists(
st.integers(
min_value=2, max_value=100),
min_size=2,
max_size=2))
id_shape = draw(
st.lists(
st.integers(
min_value=2, max_value=100),
min_size=2,
max_size=3))
pidx = draw(st.sampled_from([-1, 0, 1, 2]))
op_type_str = draw(
st.sampled_from(["lookup_table", "lookup_table_v2"]))
if "intel_openvino" in self.get_nnadapter_device_name():
op_type_str = "lookup_table_v2"
def METHOD_NAME(*args, **kwargs):
return np.random.random(in_shape).astype(np.float32)
def generate_ids(*args, **kwargs):
extend_id = copy.deepcopy(id_shape)
extend_id.append(1)
return np.random.randint(
low=0, high=in_shape[0], size=extend_id).astype(np.int64)
build_ops = OpConfig(
type=op_type_str,
inputs={
"W": ["w_data"],
"Ids": ["ids_data"],
},
outputs={"Out": ["output_data"], },
attrs={"padding_idx": int(pidx), })
program_config = ProgramConfig(
ops=[build_ops],
weights={},
inputs={
"w_data": TensorConfig(data_gen=partial(METHOD_NAME)),
"ids_data": TensorConfig(data_gen=partial(generate_ids)),
},
outputs=["output_data"])
return program_config
def sample_predictor_configs(self):
return self.get_predictor_configs(), ["lookup_table_v1_and_v2"], (1e-5,
1e-5)
def add_ignore_pass_case(self):
def _teller1(program_config, predictor_config):
target_type = predictor_config.target()
pidx = program_config.ops[0].attrs["padding_idx"]
if target_type == TargetType.NNAdapter:
if pidx != -1:
return True
self.add_ignore_check_case(
_teller1, IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"Lite does not support this op when 'padding_idx != -1' on nnadapter."
)
def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=50)
if __name__ == "__main__":
unittest.main(argv=['']) |
6,903 | print packages | # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import click
from tabulate import tabulate
from platformio import util
from platformio.account.client import AccountClient
@click.command("show", short_help="PlatformIO Account information")
@click.option("--offline", is_flag=True)
@click.option("--json-output", is_flag=True)
def account_show_cmd(offline, json_output):
client = AccountClient()
info = client.get_account_info(offline)
if json_output:
click.echo(json.dumps(info))
return
click.echo()
if info.get("profile"):
print_profile(info["profile"])
if info.get("packages"):
METHOD_NAME(info["packages"])
if info.get("subscriptions"):
print_subscriptions(info["subscriptions"])
click.echo()
def print_profile(profile):
click.secho("Profile", fg="cyan", bold=True)
click.echo("=" * len("Profile"))
data = []
if profile.get("username"):
data.append(("Username:", profile["username"]))
if profile.get("email"):
data.append(("Email:", profile["email"]))
if profile.get("firstname"):
data.append(("First name:", profile["firstname"]))
if profile.get("lastname"):
data.append(("Last name:", profile["lastname"]))
click.echo(tabulate(data, tablefmt="plain"))
def METHOD_NAME(packages):
click.echo()
click.secho("Packages", fg="cyan")
click.echo("=" * len("Packages"))
for package in packages:
click.echo()
click.secho(package.get("name"), bold=True)
click.echo("-" * len(package.get("name")))
if package.get("description"):
click.echo(package.get("description"))
data = []
expire = "-"
if "subscription" in package:
expire = util.parse_datetime(
package["subscription"].get("end_at")
or package["subscription"].get("next_bill_at")
).strftime("%Y-%m-%d")
data.append(("Expire:", expire))
services = []
for key in package:
if not key.startswith("service."):
continue
if isinstance(package[key], dict):
services.append(package[key].get("title"))
else:
services.append(package[key])
if services:
data.append(("Services:", ", ".join(services)))
click.echo(tabulate(data, tablefmt="plain"))
def print_subscriptions(subscriptions):
click.echo()
click.secho("Subscriptions", fg="cyan")
click.echo("=" * len("Subscriptions"))
for subscription in subscriptions:
click.echo()
click.secho(subscription.get("product_name"), bold=True)
click.echo("-" * len(subscription.get("product_name")))
data = [("State:", subscription.get("status"))]
begin_at = util.parse_datetime(subscription.get("begin_at")).strftime("%c")
data.append(("Start date:", begin_at or "-"))
end_at = subscription.get("end_at")
if end_at:
end_at = util.parse_datetime(subscription.get("end_at")).strftime("%c")
data.append(("End date:", end_at or "-"))
next_bill_at = subscription.get("next_bill_at")
if next_bill_at:
next_bill_at = util.parse_datetime(
subscription.get("next_bill_at")
).strftime("%c")
data.append(("Next payment:", next_bill_at or "-"))
data.append(
("Edit:", click.style(subscription.get("update_url"), fg="blue") or "-")
)
data.append(
("Cancel:", click.style(subscription.get("cancel_url"), fg="blue") or "-")
)
click.echo(tabulate(data, tablefmt="plain")) |
6,904 | recreate prow config | #!/usr/bin/env python
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# USAGE: have KUBECONFIG pointed at your prow cluster then from test-infra root:
#
# prow/recreate_prow_configmaps.py [--wet]
#
from __future__ import print_function
import os
import sys
import argparse
import subprocess
def METHOD_NAME(wet, configmap_name, path):
print('recreating prow config:')
cmd = (
'kubectl create configmap %s'
' --from-file=config.yaml=%s'
' --dry-run -o yaml | kubectl replace configmap %s -f -'
) % (configmap_name, path, configmap_name)
real_cmd = ['/bin/sh', '-c', cmd]
print(real_cmd)
if wet:
subprocess.check_call(real_cmd)
def recreate_plugins_config(wet, configmap_name, path):
print('recreating plugins config:')
cmd = (
'kubectl create configmap %s'
' --from-file=plugins.yaml=%s'
' --dry-run -o yaml | kubectl replace configmap %s -f -'
) % (configmap_name, path, configmap_name)
real_cmd = ['/bin/sh', '-c', cmd]
print(real_cmd)
if wet:
subprocess.check_call(real_cmd)
def recreate_job_config(wet, job_configmap, job_config_dir):
print('recreating jobs config:')
# regenerate
paths = []
cmd = ["kubectl", "create", "configmap", job_configmap]
for root, _, files in os.walk(job_config_dir):
for name in files:
if name.endswith(".yaml"):
path = os.path.join(root, name)
real_cmd = ['/bin/sh', '-c', 'gzip -k ' + path]
print(real_cmd)
if wet:
subprocess.check_call(real_cmd)
paths.append(path)
cmd.append('--from-file=%s=%s' % (name, path + '.gz'))
cmd.append('--dry-run -o yaml | kubectl replace configmap %s -f -' % (job_configmap))
real_cmd = ['/bin/sh', '-c', ' '.join(cmd)]
print(real_cmd)
if wet:
subprocess.check_call(real_cmd)
for path in paths:
real_cmd = ['/bin/sh', '-c', 'rm ' + path + '.gz']
print(real_cmd)
if wet:
subprocess.check_call(real_cmd)
def main():
parser = argparse.ArgumentParser()
# jobs config
parser.add_argument("--job-configmap", default="job-config", help="name of prow jobs configmap")
parser.add_argument(
"--job-config-dir", default="config/jobs",
help="root dir of prow jobs configmap")
# prow config
parser.add_argument("--prow-configmap", default="config",
help="name of prow primary configmap")
parser.add_argument(
"--prow-config-path", default="config.yaml",
help="path to the primary prow config")
# plugins config
parser.add_argument("--plugins-configmap", default="plugins",
help="name of prow plugins configmap")
parser.add_argument(
"--plugins-config-path", default="plugins.yaml",
help="path to the prow plugins config")
# wet or dry?
parser.add_argument("--wet", action="store_true")
parser.add_argument("--silent", action="store_true",
help="if confirmation is needed for the change")
args = parser.parse_args()
# debug the current context
out = subprocess.check_output(['kubectl', 'config', 'current-context'])
print('Current KUBECONFIG context: ' + out.decode("utf-8"))
# require additional confirmation in --wet mode
prompt = '!' * 65 + (
"\n!! WARNING THIS WILL RECREATE **ALL** PROW CONFIGMAPS. !!"
"\n!! ARE YOU SURE YOU WANT TO DO THIS? IF SO, ENTER 'YES'. !! "
) + '\n' + '!' * 65 + '\n\n: '
if args.wet and not args.silent:
if input(prompt) != "YES":
print("you did not enter 'YES'")
sys.exit(-1)
# first prow config
METHOD_NAME(args.wet, args.prow_configmap, args.prow_config_path)
print('')
# then plugins config
recreate_plugins_config(args.wet, args.plugins_configmap, args.plugins_config_path)
print('')
# finally jobs config
recreate_job_config(args.wet, args.job_configmap, args.job_config_dir)
if __name__ == '__main__':
main() |
6,905 | make client | # Copyright (C) 2022 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
import json
from http import HTTPStatus
import pytest
from cvat_sdk.api_client import ApiClient, Configuration, models
from shared.utils.config import BASE_URL, USER_PASS, make_api_client
@pytest.mark.usefixtures("restore_db_per_class")
class TestBasicAuth:
def test_can_do_basic_auth(self, admin_user: str):
username = admin_user
config = Configuration(host=BASE_URL, username=username, password=USER_PASS)
with ApiClient(config) as client:
(user, response) = client.users_api.retrieve_self()
assert response.status == HTTPStatus.OK
assert user.username == username
@pytest.mark.usefixtures("restore_db_per_function")
class TestTokenAuth:
@staticmethod
def login(client: ApiClient, username: str) -> models.Token:
(auth, _) = client.auth_api.create_login(
models.LoginSerializerExRequest(username=username, password=USER_PASS)
)
client.set_default_header("Authorization", "Token " + auth.key)
return auth
@classmethod
def METHOD_NAME(cls, username: str) -> ApiClient:
with ApiClient(Configuration(host=BASE_URL)) as client:
cls.login(client, username)
return client
def test_can_do_token_auth_and_manage_cookies(self, admin_user: str):
username = admin_user
with ApiClient(Configuration(host=BASE_URL)) as api_client:
auth = self.login(api_client, username=username)
assert "sessionid" in api_client.cookies
assert "csrftoken" in api_client.cookies
assert auth.key
(user, response) = api_client.users_api.retrieve_self()
assert response.status == HTTPStatus.OK
assert user.username == username
def test_can_do_token_auth_from_config(self, admin_user: str):
username = admin_user
with make_api_client(username) as api_client:
auth = self.login(api_client, username=username)
config = Configuration(
host=BASE_URL,
api_key={
"sessionAuth": api_client.cookies["sessionid"].value,
"csrfAuth": api_client.cookies["csrftoken"].value,
"tokenAuth": auth.key,
},
)
with ApiClient(config) as api_client:
auth = self.login(api_client, username=username)
assert "sessionid" in api_client.cookies
assert "csrftoken" in api_client.cookies
assert auth.key
(user, response) = api_client.users_api.retrieve_self()
assert response.status == HTTPStatus.OK
assert user.username == username
def test_can_do_logout(self, admin_user: str):
username = admin_user
with self.METHOD_NAME(username) as api_client:
(_, response) = api_client.auth_api.create_logout()
assert response.status == HTTPStatus.OK
(_, response) = api_client.users_api.retrieve_self(
_parse_response=False, _check_status=False
)
assert response.status == HTTPStatus.UNAUTHORIZED
@pytest.mark.usefixtures("restore_db_per_function")
class TestCredentialsManagement:
def test_can_register(self):
username = "newuser"
email = "123@456.com"
with ApiClient(Configuration(host=BASE_URL)) as api_client:
(user, response) = api_client.auth_api.create_register(
models.RegisterSerializerExRequest(
username=username, password1=USER_PASS, password2=USER_PASS, email=email
)
)
assert response.status == HTTPStatus.CREATED
assert user.username == username
with make_api_client(username) as api_client:
(user, response) = api_client.users_api.retrieve_self()
assert response.status == HTTPStatus.OK
assert user.username == username
assert user.email == email
def test_can_change_password(self, admin_user: str):
username = admin_user
new_pass = "5w4knrqaW#$@gewa"
with make_api_client(username) as api_client:
(info, response) = api_client.auth_api.create_password_change(
models.PasswordChangeRequest(
old_password=USER_PASS, new_password1=new_pass, new_password2=new_pass
)
)
assert response.status == HTTPStatus.OK
assert info.detail == "New password has been saved."
(_, response) = api_client.users_api.retrieve_self(
_parse_response=False, _check_status=False
)
assert response.status == HTTPStatus.UNAUTHORIZED
api_client.configuration.password = new_pass
(user, response) = api_client.users_api.retrieve_self()
assert response.status == HTTPStatus.OK
assert user.username == username
def test_can_report_weak_password(self, admin_user: str):
username = admin_user
new_pass = "pass"
with make_api_client(username) as api_client:
(_, response) = api_client.auth_api.create_password_change(
models.PasswordChangeRequest(
old_password=USER_PASS, new_password1=new_pass, new_password2=new_pass
),
_parse_response=False,
_check_status=False,
)
assert response.status == HTTPStatus.BAD_REQUEST
assert json.loads(response.data) == {
"new_password2": [
"This password is too short. It must contain at least 8 characters.",
"This password is too common.",
]
}
def test_can_report_mismatching_passwords(self, admin_user: str):
username = admin_user
with make_api_client(username) as api_client:
(_, response) = api_client.auth_api.create_password_change(
models.PasswordChangeRequest(
old_password=USER_PASS, new_password1="3j4tb13/T$#", new_password2="q#@$n34g5"
),
_parse_response=False,
_check_status=False,
)
assert response.status == HTTPStatus.BAD_REQUEST
assert json.loads(response.data) == {
"new_password2": ["The two password fields didn’t match."]
} |
6,906 | test rename stereotype attrs field | import pytest
from gaphor.storage.parser import element
from gaphor.storage.storage import load_elements
from gaphor.storage.upgrade_canvasitem import upgrade_canvasitem
from gaphor.UML import diagramitems
@pytest.fixture
def loader(element_factory, modeling_language):
def _loader(*parsed_items):
for item in parsed_items:
if item.type.endswith("Item"):
item.references["diagram"] = "1"
upgrade_canvasitem(item, "1.0.0")
parsed_data = {
"1": element(id="1", type="Diagram"),
**{p.id: p for p in parsed_items},
}
load_elements(parsed_data, element_factory, modeling_language)
return list(element_factory.lselect()[0].get_all_items())
return _loader
def test_upgrade_metaclass_item_to_class_item(loader):
item = loader(element(id="2", type="MetaclassItem"))[0]
assert type(item) == diagramitems.ClassItem
def test_upgrade_subsystem_item_to_class_item(loader):
item = loader(element(id="2", type="SubsystemItem"))[0]
assert type(item) == diagramitems.ComponentItem
def METHOD_NAME(loader):
parsed_item = element(id="2", type="ClassItem")
parsed_item.values["show_stereotypes_attrs"] = "1"
item = loader(parsed_item)[0]
assert not hasattr(item, "show_stereotypes_attrs")
assert item.show_stereotypes
def test_rename_show_attributes_and_operations_field(loader):
parsed_item = element(id="2", type="ClassItem")
parsed_item.values["show-attributes"] = "0"
parsed_item.values["show-operations"] = "0"
item = loader(parsed_item)[0]
assert not item.show_attributes
assert not item.show_operations
def test_interface_drawing_style_normal(loader):
parsed_item = element(id="2", type="InterfaceItem")
parsed_item.values["drawing-style"] = "0" # DRAW_NONE
item = loader(parsed_item)[0]
assert item.folded.name == "NONE"
def test_interface_drawing_style_folded(loader):
parsed_item = element(id="2", type="InterfaceItem")
parsed_item.values["drawing-style"] = "3" # DRAW_ICON
item = loader(parsed_item)[0]
assert item.folded.name == "PROVIDED"
def test_upgrade_generalization_arrow_direction(loader):
cls1 = element(id="2", type="ClassItem")
cls2 = element(id="3", type="ClassItem")
gen_item = element(id="4", type="GeneralizationItem")
gen_item.references["head-connection"] = ["2"]
gen_item.references["tail-connection"] = ["3"]
cls1, cls2, gen_item = loader(cls1, cls2, gen_item)
assert gen_item.diagram.connections.get_connection(gen_item.head).connected is cls2
assert gen_item.diagram.connections.get_connection(gen_item.tail).connected is cls1
def test_upgrade_flow_item_to_control_flow_item(element_factory, modeling_language):
diagram = element(id="1", type="Diagram")
objnode = element(id="2", type="ControlFlow")
item = element(id="3", type="FlowItem")
item.references["diagram"] = diagram.id
item.references["subject"] = objnode.id
load_elements(
{p.id: p for p in (diagram, objnode, item)}, element_factory, modeling_language
)
assert element_factory.lselect(diagramitems.ControlFlowItem)
def test_upgrade_flow_item_to_object_flow_item(element_factory, modeling_language):
diagram = element(id="1", type="Diagram")
objnode = element(id="2", type="ObjectFlow")
item = element(id="3", type="FlowItem")
item.references["diagram"] = diagram.id
item.references["subject"] = objnode.id
load_elements(
{p.id: p for p in (diagram, objnode, item)}, element_factory, modeling_language
)
assert element_factory.lselect(diagramitems.ObjectFlowItem)
def test_upgradedecision_node_item_show_type(loader):
parsed_item = element(id="2", type="DecisionNodeItem")
parsed_item.values["show_type"] = "1"
item = loader(parsed_item)[0]
assert item.show_underlying_type == 1
@pytest.mark.parametrize("type", ["Property", "Port", "ProxyPort"])
def test_upgrade_delete_property_information_flow(
element_factory, modeling_language, type
):
prop = element(id="1", type=type)
iflow = element(id="2", type="InformationFlow")
prop.references["informationFlow"] = ["1"]
load_elements({e.id: e for e in (prop, iflow)}, element_factory, modeling_language)
assert element_factory.lselect(modeling_language.lookup_element(type))
assert element_factory.lselect(modeling_language.lookup_element("InformationFlow"))
def test_upgrade_note_on_model_element(loader, element_factory):
cls_item = element(id="2", type="ClassItem")
cls = element(id="3", type="Class")
cls_item.values["note"] = "my note"
cls_item.references["subject"] = cls.id
loader(cls_item, cls)
_, cls_item, cls, *_ = element_factory.lselect()
assert not cls_item.note
assert cls.note == "my note"
def test_upgrade_append_notes_on_model_element(loader, element_factory):
cls_item1 = element(id="2", type="ClassItem")
cls_item2 = element(id="3", type="ClassItem")
cls = element(id="44", type="Class")
cls_item1.values["note"] = "my note"
cls_item1.references["subject"] = cls.id
cls_item2.values["note"] = "another note"
cls_item2.references["subject"] = cls.id
loader(cls_item1, cls_item2, cls)
_, cls_item1, cls_item2, cls, *_ = element_factory.lselect()
assert not cls_item1.note
assert not cls_item2.note
assert cls.note == "my note\n\nanother note" |
6,907 | get next | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._vendor import _convert_request
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_list_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2022-12-01-preview") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.MachineLearningServices/operations")
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
# fmt: on
class Operations(object):
"""Operations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.machinelearningservices.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AmlOperationListResult"]
"""Lists all of the available Azure Machine Learning Services REST API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AmlOperationListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.AmlOperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-12-01-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.AmlOperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("AmlOperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def METHOD_NAME(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
METHOD_NAME, extract_data
)
list.metadata = {'url': "/providers/Microsoft.MachineLearningServices/operations"} # type: ignore |
6,908 | model bernoulli likelihood | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
"""
Example: Horseshoe Regression
=============================
We demonstrate how to use NUTS to do sparse regression using
the Horseshoe prior [1] for both continuous- and binary-valued
responses. For a more complex modeling and inference approach
that also supports quadratic interaction terms in a way that
is efficient in high dimensions see examples/sparse_regression.py.
References:
[1] "Handling Sparsity via the Horseshoe,"
Carlos M. Carvalho, Nicholas G. Polson, James G. Scott.
"""
import argparse
import os
import time
import numpy as np
from scipy.special import expit
import jax.numpy as jnp
import jax.random as random
import numpyro
from numpyro.diagnostics import summary
import numpyro.distributions as dist
from numpyro.infer import MCMC, NUTS
# regression model with continuous-valued outputs/responses
def model_normal_likelihood(X, Y):
D_X = X.shape[1]
# sample from horseshoe prior
lambdas = numpyro.sample("lambdas", dist.HalfCauchy(jnp.ones(D_X)))
tau = numpyro.sample("tau", dist.HalfCauchy(jnp.ones(1)))
# note that in practice for a normal likelihood we would probably want to
# integrate out the coefficients (as is done for example in sparse_regression.py).
# however, this trick wouldn't be applicable to other likelihoods
# (e.g. bernoulli, see below) so we don't make use of it here.
unscaled_betas = numpyro.sample("unscaled_betas", dist.Normal(0.0, jnp.ones(D_X)))
scaled_betas = numpyro.deterministic("betas", tau * lambdas * unscaled_betas)
# compute mean function using linear coefficients
mean_function = jnp.dot(X, scaled_betas)
prec_obs = numpyro.sample("prec_obs", dist.Gamma(3.0, 1.0))
sigma_obs = 1.0 / jnp.sqrt(prec_obs)
# observe data
numpyro.sample("Y", dist.Normal(mean_function, sigma_obs), obs=Y)
# regression model with binary-valued outputs/responses
def METHOD_NAME(X, Y):
D_X = X.shape[1]
# sample from horseshoe prior
lambdas = numpyro.sample("lambdas", dist.HalfCauchy(jnp.ones(D_X)))
tau = numpyro.sample("tau", dist.HalfCauchy(jnp.ones(1)))
# note that this reparameterization (i.e. coordinate transformation) improves
# posterior geometry and makes NUTS sampling more efficient
unscaled_betas = numpyro.sample("unscaled_betas", dist.Normal(0.0, jnp.ones(D_X)))
scaled_betas = numpyro.deterministic("betas", tau * lambdas * unscaled_betas)
# compute mean function using linear coefficients
mean_function = jnp.dot(X, scaled_betas)
# observe data
numpyro.sample("Y", dist.Bernoulli(logits=mean_function), obs=Y)
# helper function for HMC inference
def run_inference(model, args, rng_key, X, Y):
start = time.time()
kernel = NUTS(model)
mcmc = MCMC(
kernel,
num_warmup=args.num_warmup,
num_samples=args.num_samples,
num_chains=args.num_chains,
progress_bar=False if "NUMPYRO_SPHINXBUILD" in os.environ else True,
)
mcmc.run(rng_key, X, Y)
mcmc.print_summary(exclude_deterministic=False)
samples = mcmc.get_samples()
summary_dict = summary(samples, group_by_chain=False)
print("\nMCMC elapsed time:", time.time() - start)
return summary_dict
# create artificial regression dataset with 3 non-zero regression coefficients
def get_data(N=50, D_X=3, sigma_obs=0.05, response="continuous"):
assert response in ["continuous", "binary"]
assert D_X >= 3
np.random.seed(0)
X = np.random.randn(N, D_X)
# the response only depends on X_0, X_1, and X_2
W = np.array([2.0, -1.0, 0.50])
Y = jnp.dot(X[:, :3], W)
Y -= jnp.mean(Y)
if response == "continuous":
Y += sigma_obs * np.random.randn(N)
elif response == "binary":
Y = np.random.binomial(1, expit(Y))
assert X.shape == (N, D_X)
assert Y.shape == (N,)
return X, Y
def main(args):
N, D_X = args.num_data, 32
print("[Experiment with continuous-valued responses]")
# first generate and analyze data with continuous-valued responses
X, Y = get_data(N=N, D_X=D_X, response="continuous")
# do inference
rng_key, rng_key_predict = random.split(random.PRNGKey(0))
summary = run_inference(model_normal_likelihood, args, rng_key, X, Y)
# lambda should only be large for the first 3 dimensions, which
# correspond to relevant covariates (see get_data)
print("Posterior median over lambdas (leading 5 dimensions):")
print(summary["lambdas"]["median"][:5])
print("Posterior mean over betas (leading 5 dimensions):")
print(summary["betas"]["mean"][:5])
print("[Experiment with binary-valued responses]")
# next generate and analyze data with binary-valued responses
# (note we use more data for the case of binary-valued responses,
# since each response carries less information than a real number)
X, Y = get_data(N=4 * N, D_X=D_X, response="binary")
# do inference
rng_key, rng_key_predict = random.split(random.PRNGKey(0))
summary = run_inference(METHOD_NAME, args, rng_key, X, Y)
# lambda should only be large for the first 3 dimensions, which
# correspond to relevant covariates (see get_data)
print("Posterior median over lambdas (leading 5 dimensions):")
print(summary["lambdas"]["median"][:5])
print("Posterior mean over betas (leading 5 dimensions):")
print(summary["betas"]["mean"][:5])
if __name__ == "__main__":
assert numpyro.__version__.startswith("0.13.0")
parser = argparse.ArgumentParser(description="Horseshoe regression example")
parser.add_argument("-n", "--num-samples", nargs="?", default=2000, type=int)
parser.add_argument("--num-warmup", nargs="?", default=1000, type=int)
parser.add_argument("--num-chains", nargs="?", default=1, type=int)
parser.add_argument("--num-data", nargs="?", default=100, type=int)
parser.add_argument("--device", default="cpu", type=str, help='use "cpu" or "gpu".')
args = parser.parse_args()
numpyro.set_platform(args.device)
numpyro.set_host_device_count(args.num_chains)
main(args) |
6,909 | test ipu to dense batch no batch | import pytest
import torch
from torch_geometric.data import Data, Batch
from graphium.ipu.to_dense_batch import to_dense_batch
from warnings import warn
# General imports
import yaml
import unittest as ut
import numpy as np
from copy import deepcopy
from warnings import warn
from lightning import Trainer, LightningModule
from lightning_graphcore import IPUStrategy
from functools import partial
import torch
from torch.utils.data.dataloader import default_collate
# Current library imports
from graphium.config._loader import load_datamodule, load_metrics, load_architecture, load_accelerator
@pytest.mark.ipu
class TestIPUBatch:
@pytest.fixture(autouse=True)
def setup_class(self):
self.in_dim = 12
self.out_dim = 12
self.in_dim_edges = 10
self.out_dim_edges = 10
self.edge_idx1 = torch.stack(
[torch.tensor([0, 1, 2, 3, 2], dtype=torch.int), torch.tensor([1, 2, 3, 0, 0], dtype=torch.int)]
)
self.edge_idx2 = torch.stack(
[torch.tensor([0, 0, 0, 1], dtype=torch.int), torch.tensor([0, 1, 2, 0], dtype=torch.int)]
)
self.x1 = torch.randn(self.edge_idx1.max().item() + 1, self.in_dim, dtype=torch.float32)
self.e1 = torch.randn(self.edge_idx1.shape[-1], self.in_dim_edges, dtype=torch.float32)
self.x2 = torch.randn(self.edge_idx2.max().item() + 1, self.in_dim, dtype=torch.float32)
self.e2 = torch.randn(self.edge_idx2.shape[-1], self.in_dim_edges, dtype=torch.float32)
self.g1 = Data(feat=self.x1, edge_index=self.edge_idx1, edge_feat=self.e1)
self.g2 = Data(feat=self.x2, edge_index=self.edge_idx2, edge_feat=self.e2)
self.bg = Batch.from_data_list([self.g1, self.g2])
self.attn_kwargs = {"embed_dim": self.in_dim, "num_heads": 2, "batch_first": True}
# @pytest.mark.skip
@pytest.mark.parametrize("max_num_nodes_per_graph, batch_size", [(10, 5), (20, 10), (30, 15)])
def test_ipu_to_dense_batch(self, max_num_nodes_per_graph, batch_size):
# Run this test only if poptorch is available
try:
import poptorch
opts = poptorch.Options()
opts.useIpuModel(True)
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x, batch):
return to_dense_batch(
x,
batch=batch,
batch_size=batch_size,
max_num_nodes_per_graph=max_num_nodes_per_graph,
drop_nodes_last_graph=False,
)
model = MyModel()
model = model.eval()
poptorch_model_inf = poptorch.inferenceModel(model, options=opts)
# for data in train_dataloader:
out, mask, idx = poptorch_model_inf(self.bg.feat, self.bg.batch)
# Check the output sizes
assert out.size() == torch.Size([batch_size, max_num_nodes_per_graph, 12])
# Check the mask for true / false values
assert mask.size() == torch.Size([batch_size, max_num_nodes_per_graph])
assert torch.sum(mask) == 7
assert (mask[0][:4] == True).all()
assert (mask[0][4:] == False).all()
assert (mask[1][:3] == True).all()
assert (mask[1][3:] == False).all()
assert (mask[2:] == False).all()
# Check the idx are all the true values in the mask
assert (mask.flatten()[idx] == True).all()
poptorch_model_inf.detachFromDevice()
except ImportError:
pytest.skip("Skipping this test because poptorch is not available")
def test_ipu_to_dense_batch_no_batch_no_max_nodes(self):
h_dense, mask = to_dense_batch(
self.bg.feat,
batch=None,
batch_size=None,
max_num_nodes_per_graph=None,
drop_nodes_last_graph=False,
)
# Add assertions to check the output as needed
assert torch.allclose(h_dense, self.bg.feat.unsqueeze(0), atol=1e-5), "Tensors are not equal"
assert mask.size(1) == h_dense.size(1)
assert mask.all().item(), "Not all values in the tensor are True"
def METHOD_NAME(self):
max_nodes_per_graph = 10
h_dense, mask, id = to_dense_batch(
self.bg.feat,
batch=None,
batch_size=None,
max_num_nodes_per_graph=max_nodes_per_graph,
drop_nodes_last_graph=False,
)
assert mask.size() == (1, max_nodes_per_graph)
assert torch.sum(mask) == 7
assert torch.equal(id, torch.arange(7))
assert h_dense.size() == (1, max_nodes_per_graph, self.bg.feat.size(-1))
def test_ipu_to_dense_batch_drop_last(self):
out, mask, idx = to_dense_batch(
self.bg.feat,
batch=None,
batch_size=None,
max_num_nodes_per_graph=3,
drop_nodes_last_graph=True,
)
# Add assertions to check the output as needed
assert mask.size(1) == out.size(1)
# Check the mask and output have been clipped
assert mask.size() == torch.Size([1, 3])
assert mask.all().item(), "Not all values in the tensor are True" |
6,910 | integer range | # This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Copyright the Hypothesis Authors.
# Individual contributors are listed in AUTHORS.rst and the git log.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
import hashlib
import math
from random import Random
from hypothesis import Verbosity, assume, settings
from hypothesis.database import ExampleDatabase
from hypothesis.internal.compat import PYPY
from hypothesis.internal.floats import float_to_int, int_to_float, is_negative
from hypothesis.stateful import Bundle, RuleBasedStateMachine, rule
from hypothesis.strategies import (
binary,
booleans,
complex_numbers,
data,
decimals,
floats,
fractions,
integers,
just,
lists,
none,
sampled_from,
text,
tuples,
)
AVERAGE_LIST_LENGTH = 2
def clamp(lower, value, upper):
"""Given a value and optional lower/upper bounds, 'clamp' the value so that
it satisfies lower <= value <= upper."""
if (lower is not None) and (upper is not None) and (lower > upper):
raise ValueError(f"Cannot clamp with lower > upper: {lower!r} > {upper!r}")
if lower is not None:
value = max(lower, value)
if upper is not None:
value = min(value, upper)
return value
class HypothesisSpec(RuleBasedStateMachine):
def __init__(self):
super().__init__()
self.database = None
strategies = Bundle("strategy")
strategy_tuples = Bundle("tuples")
objects = Bundle("objects")
basic_data = Bundle("basic")
varied_floats = Bundle("varied_floats")
def teardown(self):
self.clear_database()
@rule()
def clear_database(self):
if self.database is not None:
self.database = None
@rule()
def set_database(self):
self.teardown()
self.database = ExampleDatabase()
@rule(
target=strategies,
spec=sampled_from(
(
integers(),
booleans(),
floats(),
complex_numbers(),
fractions(),
decimals(),
text(),
binary(),
none(),
tuples(),
)
),
)
def strategy(self, spec):
return spec
@rule(target=strategies, values=lists(integers() | text(), min_size=1))
def sampled_from_strategy(self, values):
return sampled_from(values)
@rule(target=strategies, spec=strategy_tuples)
def strategy_for_tupes(self, spec):
return tuples(*spec)
@rule(target=strategies, source=strategies, level=integers(1, 10), mixer=text())
def filtered_strategy(self, source, level, mixer):
def is_good(x):
seed = hashlib.sha384((mixer + repr(x)).encode()).digest()
return bool(Random(seed).randint(0, level))
return source.filter(is_good)
@rule(target=strategies, elements=strategies)
def list_strategy(self, elements):
return lists(elements)
@rule(target=strategies, left=strategies, right=strategies)
def or_strategy(self, left, right):
return left | right
@rule(target=varied_floats, source=floats())
def float(self, source):
return source
@rule(target=varied_floats, source=varied_floats, offset=integers(-100, 100))
def adjust_float(self, source, offset):
return int_to_float(clamp(0, float_to_int(source) + offset, 2**64 - 1))
@rule(target=strategies, left=varied_floats, right=varied_floats)
def float_range(self, left, right):
assume(math.isfinite(left) and math.isfinite(right))
left, right = sorted((left, right))
assert left <= right
# exclude deprecated case where left = 0.0 and right = -0.0
assume(left or right or not (is_negative(right) and not is_negative(left)))
return floats(left, right)
@rule(
target=strategies,
source=strategies,
result1=strategies,
result2=strategies,
mixer=text(),
p=floats(0, 1),
)
def flatmapped_strategy(self, source, result1, result2, mixer, p):
assume(result1 is not result2)
def do_map(value):
rep = repr(value)
random = Random(hashlib.sha384((mixer + rep).encode()).digest())
if random.random() <= p:
return result1
else:
return result2
return source.flatmap(do_map)
@rule(target=strategies, value=objects)
def just_strategy(self, value):
return just(value)
@rule(target=strategy_tuples, source=strategies)
def single_tuple(self, source):
return (source,)
@rule(target=strategy_tuples, left=strategy_tuples, right=strategy_tuples)
def cat_tuples(self, left, right):
return left + right
@rule(target=objects, strat=strategies, data=data())
def get_example(self, strat, data):
data.draw(strat)
@rule(target=strategies, left=integers(), right=integers())
def METHOD_NAME(self, left, right):
left, right = sorted((left, right))
return integers(left, right)
@rule(strat=strategies)
def repr_is_good(self, strat):
assert " at 0x" not in repr(strat)
MAIN = __name__ == "__main__"
TestHypothesis = HypothesisSpec.TestCase
TestHypothesis.settings = settings(
TestHypothesis.settings,
stateful_step_count=10 if PYPY else 50,
verbosity=max(TestHypothesis.settings.verbosity, Verbosity.verbose),
max_examples=10000 if MAIN else 200,
)
if MAIN:
TestHypothesis().runTest() |
6,911 | url | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"networkfabric interface wait",
)
class Wait(AAZWaitCommand):
"""Place the CLI in a waiting state until a condition is met.
"""
_aaz_info = {
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.managednetworkfabric/networkdevices/{}/networkinterfaces/{}", "2023-06-15"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.network_device_name = AAZStrArg(
options=["--device", "--network-device-name"],
help="Name of the Network Device.",
required=True,
id_part="name",
)
_args_schema.resource_name = AAZStrArg(
options=["--resource-name"],
help="Name of the Network Interface.",
required=True,
id_part="child_name_1",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
help="Name of the resource group",
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.NetworkInterfacesGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=False)
return result
class NetworkInterfacesGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def METHOD_NAME(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetworkFabric/networkDevices/{networkDeviceName}/networkInterfaces/{networkInterfaceName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"networkDeviceName", self.ctx.args.network_device_name,
required=True,
),
**self.serialize_url_param(
"networkInterfaceName", self.ctx.args.resource_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2023-06-15",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.properties = AAZObjectType(
flags={"required": True, "client_flatten": True},
)
_schema_on_200.system_data = AAZObjectType(
serialized_name="systemData",
flags={"read_only": True},
)
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.properties
properties.administrative_state = AAZStrType(
serialized_name="administrativeState",
flags={"read_only": True},
)
properties.annotation = AAZStrType()
properties.connected_to = AAZStrType(
serialized_name="connectedTo",
flags={"read_only": True},
)
properties.interface_type = AAZStrType(
serialized_name="interfaceType",
flags={"read_only": True},
)
properties.ipv4_address = AAZStrType(
serialized_name="ipv4Address",
flags={"read_only": True},
)
properties.ipv6_address = AAZStrType(
serialized_name="ipv6Address",
flags={"read_only": True},
)
properties.physical_identifier = AAZStrType(
serialized_name="physicalIdentifier",
flags={"read_only": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
system_data = cls._schema_on_200.system_data
system_data.created_at = AAZStrType(
serialized_name="createdAt",
)
system_data.created_by = AAZStrType(
serialized_name="createdBy",
)
system_data.created_by_type = AAZStrType(
serialized_name="createdByType",
)
system_data.last_modified_at = AAZStrType(
serialized_name="lastModifiedAt",
)
system_data.last_modified_by = AAZStrType(
serialized_name="lastModifiedBy",
)
system_data.last_modified_by_type = AAZStrType(
serialized_name="lastModifiedByType",
)
return cls._schema_on_200
class _WaitHelper:
"""Helper class for Wait"""
__all__ = ["Wait"] |
6,912 | get coarse ds | import intake
import xarray as xr
import os
from dask.diagnostics import ProgressBar
import fsspec
from vcm.catalog import catalog as CATALOG
from vcm.fv3.metadata import standardize_fv3_diagnostics
from vcm import convert_timestamps
from vcm.cubedsphere import center_and_rotate_xy_winds
from vcm.safe import get_variables
WIND_ROTATION_MATRIX = CATALOG["wind_rotation/c48"].to_dask()
GRID = CATALOG["grid/c48"].to_dask()
COARSE_NUDGED_PATH = "gs://vcm-ml-experiments/cloud-ml/2022-08-23/cloud-ml-training-data-nudge-to-fine-v4/fv3gfs_run" # noqa: E501
FINE_RESTARTS_KEYS = [
"40day_c48_restarts_as_zarr_may2020",
"40day_c48_gfsphysics_15min_may2020",
]
FINE_TO_COARSE_RENAME = {
"T": "air_temperature",
"sphum": "specific_humidity",
"eastward_wind": "eastward_wind",
"northward_wind": "northward_wind",
"DZ": "vertical_thickness_of_atmospheric_layer",
"delp": "pressure_thickness_of_atmospheric_layer",
"liq_wat": "cloud_water_mixing_ratio",
"ice_wat": "cloud_ice_mixing_ratio",
"rainwat": "rain_mixing_ratio",
"snowwat": "snow_mixing_ratio",
"graupel": "graupel_mixing_ratio",
"cld_amt": "cloud_amount",
"DCLWRFsfc": "clear_sky_downward_longwave_flux_at_surface",
"DCSWRFsfc": "clear_sky_downward_shortwave_flux_at_surface",
"DLWRFsfc": "total_sky_downward_longwave_flux_at_surface",
"DSWRFsfc": "total_sky_downward_shortwave_flux_at_surface",
"DSWRFtoa": "total_sky_downward_shortwave_flux_at_top_of_atmosphere",
"UCLWRFsfc": "clear_sky_upward_longwave_flux_at_surface",
"UCLWRFtoa": "clear_sky_upward_longwave_flux_at_top_of_atmosphere",
"UCSWRFsfc": "clear_sky_upward_shortwave_flux_at_surface",
"UCSWRFtoa": "clear_sky_upward_shortwave_flux_at_top_of_atmosphere",
"ULWRFsfc": "total_sky_upward_longwave_flux_at_surface",
"ULWRFtoa": "total_sky_upward_longwave_flux_at_top_of_atmosphere",
"USWRFsfc": "total_sky_upward_shortwave_flux_at_surface",
"USWRFtoa": "total_sky_upward_shortwave_flux_at_top_of_atmosphere",
}
RENAME_DIMS = {"pfull": "z"}
COORD_VARS = ["x", "y", "z", "tile"]
OUTPUT_CHUNKS = {"tile": 6}
OUTPUT_PATH = "gs://vcm-ml-experiments/cloud-ml/2022-08-23/fine-coarse-3d-fields.zarr"
def rotate_winds(ds):
eastward_wind, northward_wind = center_and_rotate_xy_winds(
WIND_ROTATION_MATRIX, ds.u, ds.v
)
ds["eastward_wind"] = eastward_wind
ds["northward_wind"] = northward_wind
return ds.drop_vars(["u", "v"])
def get_fine_ds():
datasets = []
for key in FINE_RESTARTS_KEYS:
dataset = CATALOG[key].to_dask()
if isinstance(dataset.time[0].item(), str):
dataset = dataset.assign_coords({"time": convert_timestamps(dataset.time)})
dataset = standardize_fv3_diagnostics(dataset)
if "pfull" in dataset.dims:
dataset = dataset.rename(RENAME_DIMS)
datasets.append(dataset)
ds = xr.merge(datasets)
rotate_winds(ds)
ds_3d = xr.Dataset()
for restart_name, python_name in FINE_TO_COARSE_RENAME.items():
ds_3d[python_name] = ds[restart_name]
return ds_3d.drop_vars(COORD_VARS)
def METHOD_NAME():
full_path = os.path.join(COARSE_NUDGED_PATH, "state_after_timestep.zarr")
ds = intake.open_zarr(full_path, consolidated=True).to_dask()
ds_3d = xr.Dataset()
for var in FINE_TO_COARSE_RENAME.values():
ds_3d[var] = ds[var]
return ds_3d
def merge_coarse_fine(coarse, fine):
# subset times and variables to intersection
common_times = xr.DataArray(
data=sorted(list(set(coarse.time.values).intersection(set(fine.time.values)))),
dims=["time"],
)
coarse, fine = coarse.sel(time=common_times), fine.sel(time=common_times)
common_vars = set(coarse.data_vars).intersection(fine.data_vars)
coarse, fine = get_variables(coarse, common_vars), get_variables(fine, common_vars)
# add 'res' dimension and concatenate
merged = xr.concat(
[coarse.expand_dims({"res": ["coarse"]}), fine.expand_dims({"res": ["fine"]})],
dim="res",
)
# add grid variables back
return xr.merge([merged, GRID])
def main():
"""
Let's make a merged dataset of fine and coarse 3D (really, time-tile-x-y-z)
variables, including the water tracer species. Specifically:
- air temperature
- specific humidity
- northward wind
- eastward wind
- layer pressure thickness
- layer height thickness
- cloud water
- cloud ice
- graupel water
- rain water
- snow water
- cloud fraction
- clear_sky_downward_longwave_flux_at_surface
- clear_sky_downward_shortwave_flux_at_surface
- total_sky_downward_longwave_flux_at_surface
- total_sky_downward_shortwave_flux_at_surface
- total_sky_downward_shortwave_flux_at_top_of_atmosphere
- clear_sky_upward_longwave_flux_at_surface
- clear_sky_upward_longwave_flux_at_top_of_atmosphere
- clear_sky_upward_shortwave_flux_at_surface
- clear_sky_upward_shortwave_flux_at_top_of_atmosphere
- total_sky_upward_longwave_flux_at_surface
- total_sky_upward_longwave_flux_at_top_of_atmosphere
- total_sky_upward_shortwave_flux_at_surface
- total_sky_upward_shortwave_flux_at_top_of_atmosphere
"""
fine = get_fine_ds()
coarse = METHOD_NAME()
merged = merge_coarse_fine(coarse, fine)
merged = merged.unify_chunks().chunk(OUTPUT_CHUNKS)
with ProgressBar():
merged.to_zarr(fsspec.get_mapper(OUTPUT_PATH), consolidated=True)
if __name__ == "__main__":
main() |
6,913 | test unix mbox | # This set of tests exercises the backward-compatibility class
# in mailbox.py (the ones without write support).
from __future__ import with_statement
import mailbox
import os
import time
import unittest
from test import test_support
# cleanup earlier tests
try:
os.unlink(test_support.TESTFN)
except os.error:
pass
FROM_ = "From some.body@dummy.domain Sat Jul 24 13:43:35 2004\n"
DUMMY_MESSAGE = """\
From: some.body@dummy.domain
To: me@my.domain
Subject: Simple Test
This is a dummy message.
"""
class MaildirTestCase(unittest.TestCase):
def setUp(self):
# create a new maildir mailbox to work with:
self._dir = test_support.TESTFN
os.mkdir(self._dir)
os.mkdir(os.path.join(self._dir, "cur"))
os.mkdir(os.path.join(self._dir, "tmp"))
os.mkdir(os.path.join(self._dir, "new"))
self._counter = 1
self._msgfiles = []
def tearDown(self):
map(os.unlink, self._msgfiles)
os.rmdir(os.path.join(self._dir, "cur"))
os.rmdir(os.path.join(self._dir, "tmp"))
os.rmdir(os.path.join(self._dir, "new"))
os.rmdir(self._dir)
def createMessage(self, dir, mbox=False):
t = int(time.time() % 1000000)
pid = self._counter
self._counter += 1
filename = os.extsep.join((str(t), str(pid), "myhostname", "mydomain"))
tmpname = os.path.join(self._dir, "tmp", filename)
newname = os.path.join(self._dir, dir, filename)
fp = open(tmpname, "w")
self._msgfiles.append(tmpname)
if mbox:
fp.write(FROM_)
fp.write(DUMMY_MESSAGE)
fp.close()
if hasattr(os, "link"):
os.link(tmpname, newname)
else:
fp = open(newname, "w")
fp.write(DUMMY_MESSAGE)
fp.close()
self._msgfiles.append(newname)
return tmpname
def assert_and_close(self, message):
self.assert_(message is not None)
message.fp.close()
def test_empty_maildir(self):
"""Test an empty maildir mailbox"""
# Test for regression on bug #117490:
self.mbox = mailbox.Maildir(test_support.TESTFN)
self.assert_(len(self.mbox) == 0)
self.assert_(self.mbox.next() is None)
self.assert_(self.mbox.next() is None)
def test_nonempty_maildir_cur(self):
self.createMessage("cur")
self.mbox = mailbox.Maildir(test_support.TESTFN)
self.assert_(len(self.mbox) == 1)
self.assert_and_close(self.mbox.next())
self.assert_(self.mbox.next() is None)
self.assert_(self.mbox.next() is None)
def test_nonempty_maildir_new(self):
self.createMessage("new")
self.mbox = mailbox.Maildir(test_support.TESTFN)
self.assert_(len(self.mbox) == 1)
self.assert_and_close(self.mbox.next())
self.assert_(self.mbox.next() is None)
self.assert_(self.mbox.next() is None)
def test_nonempty_maildir_both(self):
self.createMessage("cur")
self.createMessage("new")
self.mbox = mailbox.Maildir(test_support.TESTFN)
self.assert_(len(self.mbox) == 2)
self.assert_and_close(self.mbox.next())
self.assert_and_close(self.mbox.next())
self.assert_(self.mbox.next() is None)
self.assert_(self.mbox.next() is None)
def METHOD_NAME(self):
### should be better!
import email.Parser
fname = self.createMessage("cur", True)
n = 0
with open(fname) as fp:
for msg in mailbox.PortableUnixMailbox(fp,
email.Parser.Parser().parse):
n += 1
self.assertEqual(msg["subject"], "Simple Test")
self.assertEqual(len(str(msg)), len(FROM_)+len(DUMMY_MESSAGE))
self.assertEqual(n, 1)
class MboxTestCase(unittest.TestCase):
def setUp(self):
# create a new maildir mailbox to work with:
self._path = test_support.TESTFN
def tearDown(self):
os.unlink(self._path)
def test_from_regex (self):
# Testing new regex from bug #1633678
f = open(self._path, 'w')
f.write("""From fred@example.com Mon May 31 13:24:50 2004 +0200
Subject: message 1
body1
From fred@example.com Mon May 31 13:24:50 2004 -0200
Subject: message 2
body2
From fred@example.com Mon May 31 13:24:50 2004
Subject: message 3
body3
From fred@example.com Mon May 31 13:24:50 2004
Subject: message 4
body4
""")
f.close()
box = mailbox.UnixMailbox(open(self._path, 'rb'))
messages = list(iter(box))
self.assert_(len(messages) == 4)
for message in messages:
message.fp.close()
box.fp.close() # Jython addition: explicit close needed
# XXX We still need more tests!
def test_main():
test_support.run_unittest(MaildirTestCase, MboxTestCase)
if __name__ == "__main__":
test_main() |
6,914 | test select from datasets | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class DirectedInterleaveDatasetTest(test_base.DatasetTestBase):
def testBasic(self):
selector_dataset = dataset_ops.Dataset.range(10).repeat(100)
input_datasets = [
dataset_ops.Dataset.from_tensors(i).repeat(100) for i in range(10)
]
dataset = interleave_ops._DirectedInterleaveDataset(selector_dataset,
input_datasets)
next_element = self.getNext(dataset)
for _ in range(100):
for i in range(10):
self.assertEqual(i, self.evaluate(next_element()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def _normalize(self, vec):
return vec / vec.sum()
def _chi2(self, expected, actual):
actual = np.asarray(actual)
expected = np.asarray(expected)
diff = actual - expected
chi2 = np.sum(diff * diff / expected, axis=0)
return chi2
def _testSampleFromDatasetsHelper(self, weights, num_datasets, num_samples):
# Create a dataset that samples each integer in `[0, num_datasets)`
# with probability given by `weights[i]`.
dataset = interleave_ops.sample_from_datasets([
dataset_ops.Dataset.from_tensors(i).repeat(None)
for i in range(num_datasets)
], weights)
dataset = dataset.take(num_samples)
next_element = self.getNext(dataset)
freqs = np.zeros([num_datasets])
for _ in range(num_samples):
freqs[self.evaluate(next_element())] += 1
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
return freqs
def testSampleFromDatasets(self):
random_seed.set_random_seed(1619)
num_samples = 5000
rand_probs = self._normalize(np.random.random_sample((15,)))
# Use chi-squared test to assert that the observed distribution matches the
# expected distribution. Based on the implementation in
# "third_party/tensorflow/python/kernel_tests/multinomial_op_test.py".
for probs in [[.85, .05, .1], rand_probs, [1.]]:
probs = np.asarray(probs)
classes = len(probs)
freqs = self._testSampleFromDatasetsHelper(probs, classes, num_samples)
self.assertLess(self._chi2(probs, freqs / num_samples), 1e-2)
# Also check that `weights` as a dataset samples correctly.
probs_ds = dataset_ops.Dataset.from_tensors(probs).repeat()
freqs = self._testSampleFromDatasetsHelper(probs_ds, classes, num_samples)
self.assertLess(self._chi2(probs, freqs / num_samples), 1e-2)
def METHOD_NAME(self):
words = [b"foo", b"bar", b"baz"]
datasets = [dataset_ops.Dataset.from_tensors(w).repeat() for w in words]
choice_array = np.random.randint(3, size=(15,), dtype=np.int64)
choice_dataset = dataset_ops.Dataset.from_tensor_slices(choice_array)
dataset = interleave_ops.choose_from_datasets(datasets, choice_dataset)
next_element = self.getNext(dataset)
for i in choice_array:
self.assertEqual(words[i], self.evaluate(next_element()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def testErrors(self):
with self.assertRaisesRegexp(ValueError,
r"vector of length `len\(datasets\)`"):
interleave_ops.sample_from_datasets(
[dataset_ops.Dataset.range(10),
dataset_ops.Dataset.range(20)],
weights=[0.25, 0.25, 0.25, 0.25])
with self.assertRaisesRegexp(TypeError, "`tf.float32` or `tf.float64`"):
interleave_ops.sample_from_datasets(
[dataset_ops.Dataset.range(10),
dataset_ops.Dataset.range(20)],
weights=[1, 1])
with self.assertRaisesRegexp(TypeError, "must have the same type"):
interleave_ops.sample_from_datasets([
dataset_ops.Dataset.from_tensors(0),
dataset_ops.Dataset.from_tensors(0.0)
])
with self.assertRaisesRegexp(TypeError, "tf.int64"):
interleave_ops.choose_from_datasets([
dataset_ops.Dataset.from_tensors(0),
dataset_ops.Dataset.from_tensors(1)
], choice_dataset=dataset_ops.Dataset.from_tensors(1.0))
with self.assertRaisesRegexp(TypeError, "scalar"):
interleave_ops.choose_from_datasets([
dataset_ops.Dataset.from_tensors(0),
dataset_ops.Dataset.from_tensors(1)
], choice_dataset=dataset_ops.Dataset.from_tensors([1.0]))
with self.assertRaisesRegexp(errors.InvalidArgumentError, "out of range"):
dataset = interleave_ops.choose_from_datasets(
[dataset_ops.Dataset.from_tensors(0)],
choice_dataset=dataset_ops.Dataset.from_tensors(
constant_op.constant(1, dtype=dtypes.int64)))
next_element = self.getNext(dataset)
self.evaluate(next_element())
if __name__ == "__main__":
test.main() |
6,915 | inverse | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Two-qubit YY-rotation gate."""
import math
from typing import Optional
import numpy as np
from qiskit.circuit.gate import Gate
from qiskit.circuit.quantumregister import QuantumRegister
from qiskit.circuit.parameterexpression import ParameterValueType
class RYYGate(Gate):
r"""A parametric 2-qubit :math:`Y \otimes Y` interaction (rotation about YY).
This gate is symmetric, and is maximally entangling at :math:`\theta = \pi/2`.
Can be applied to a :class:`~qiskit.circuit.QuantumCircuit`
with the :meth:`~qiskit.circuit.QuantumCircuit.ryy` method.
**Circuit Symbol:**
.. parsed-literal::
┌─────────┐
q_0: ┤1 ├
│ Ryy(ϴ) │
q_1: ┤0 ├
└─────────┘
**Matrix Representation:**
.. math::
\newcommand{\th}{\frac{\theta}{2}}
R_{YY}(\theta) = \exp\left(-i \th Y{\otimes}Y\right) =
\begin{pmatrix}
\cos\left(\th\right) & 0 & 0 & i\sin\left(\th\right) \\
0 & \cos\left(\th\right) & -i\sin\left(\th\right) & 0 \\
0 & -i\sin\left(\th\right) & \cos\left(\th\right) & 0 \\
i\sin\left(\th\right) & 0 & 0 & \cos\left(\th\right)
\end{pmatrix}
**Examples:**
.. math::
R_{YY}(\theta = 0) = I
.. math::
R_{YY}(\theta = \pi) = i Y \otimes Y
.. math::
R_{YY}\left(\theta = \frac{\pi}{2}\right) = \frac{1}{\sqrt{2}}
\begin{pmatrix}
1 & 0 & 0 & i \\
0 & 1 & -i & 0 \\
0 & -i & 1 & 0 \\
i & 0 & 0 & 1
\end{pmatrix}
"""
def __init__(self, theta: ParameterValueType, label: Optional[str] = None):
"""Create new RYY gate."""
super().__init__("ryy", 2, [theta], label=label)
def _define(self):
"""Calculate a subcircuit that implements this unitary."""
# pylint: disable=cyclic-import
from qiskit.circuit.quantumcircuit import QuantumCircuit
from .x import CXGate
from .rx import RXGate
from .rz import RZGate
# ┌─────────┐ ┌──────────┐
# q_0: ┤ Rx(π/2) ├──■─────────────■──┤ Rx(-π/2) ├
# ├─────────┤┌─┴─┐┌───────┐┌─┴─┐├──────────┤
# q_1: ┤ Rx(π/2) ├┤ X ├┤ Rz(0) ├┤ X ├┤ Rx(-π/2) ├
# └─────────┘└───┘└───────┘└───┘└──────────┘
q = QuantumRegister(2, "q")
theta = self.params[0]
qc = QuantumCircuit(q, name=self.name)
rules = [
(RXGate(np.pi / 2), [q[0]], []),
(RXGate(np.pi / 2), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(RZGate(theta), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(RXGate(-np.pi / 2), [q[0]], []),
(RXGate(-np.pi / 2), [q[1]], []),
]
for instr, qargs, cargs in rules:
qc._append(instr, qargs, cargs)
self.definition = qc
def METHOD_NAME(self):
"""Return inverse RYY gate (i.e. with the negative rotation angle)."""
return RYYGate(-self.params[0])
def __array__(self, dtype=None):
"""Return a numpy.array for the RYY gate."""
theta = float(self.params[0])
cos = math.cos(theta / 2)
isin = 1j * math.sin(theta / 2)
return np.array(
[[cos, 0, 0, isin], [0, cos, -isin, 0], [0, -isin, cos, 0], [isin, 0, 0, cos]],
dtype=dtype,
)
def power(self, exponent: float):
"""Raise gate to a power."""
(theta,) = self.params
return RYYGate(exponent * theta) |
6,916 | test checkin overdue item | # -*- coding: utf-8 -*-
#
# RERO ILS
# Copyright (C) 2019 RERO
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Tests REST return an item API methods in the item api_views."""
from datetime import date, datetime, timedelta, timezone
from flask import url_for
from flask_babelex import gettext as _
from invenio_accounts.testutils import login_user_via_session
from utils import get_json, postdata
from rero_ils.modules.items.api import Item
from rero_ils.modules.items.models import ItemStatus
from rero_ils.modules.loans.utils import get_circ_policy, sum_for_fees
from rero_ils.modules.patron_transactions.utils import \
get_last_transaction_by_loan_pid
def test_checkin_an_item(
client, librarian_martigny, lib_martigny,
item_on_loan_martigny_patron_and_loan_on_loan, loc_public_martigny,
item2_on_loan_martigny_patron_and_loan_on_loan,
circulation_policies):
"""Test the frontend return a checked-out item action."""
# test passes when all required parameters are given
login_user_via_session(client, librarian_martigny.user)
item, patron, loan = item_on_loan_martigny_patron_and_loan_on_loan
# test fails when there is a missing required parameter
res, data = postdata(
client,
'api_item.checkin',
dict(
item_pid=item.pid
)
)
assert res.status_code == 400
# test fails when there is a missing required parameter
res, data = postdata(
client,
'api_item.checkin',
dict(
item_pid=item.pid,
transaction_location_pid=loc_public_martigny.pid
)
)
assert res.status_code == 400
# test fails when there is a missing required parameter
# when item record not found in database, api returns 404
res, data = postdata(
client,
'api_item.checkin',
dict(
transaction_location_pid=loc_public_martigny.pid,
transaction_user_pid=librarian_martigny.pid
)
)
assert res.status_code == 404
# test passes when the transaction location pid is given
res, data = postdata(
client,
'api_item.checkin',
dict(
item_pid=item.pid,
transaction_location_pid=loc_public_martigny.pid,
transaction_user_pid=librarian_martigny.pid
)
)
assert res.status_code == 200
item = Item.get_record_by_pid(item.pid)
assert item.status == ItemStatus.ON_SHELF
# test passes when the transaction library pid is given
item, patron, loan = item2_on_loan_martigny_patron_and_loan_on_loan
res, data = postdata(
client,
'api_item.checkin',
dict(
item_pid=item.pid,
transaction_library_pid=lib_martigny.pid,
transaction_user_pid=librarian_martigny.pid
)
)
assert res.status_code == 200
item = Item.get_record_by_pid(item.pid)
assert item.status == ItemStatus.ON_SHELF
def test_auto_checkin_else(client, librarian_martigny,
patron_martigny, loc_public_martigny,
item_lib_martigny, json_header, lib_martigny,
loc_public_saxon):
"""Test item checkin no action."""
login_user_via_session(client, librarian_martigny.user)
res, data = postdata(
client,
'api_item.checkin',
dict(
item_pid=item_lib_martigny.pid,
transaction_library_pid=lib_martigny.pid,
transaction_user_pid=librarian_martigny.pid
)
)
assert res.status_code == 400
assert get_json(res)['status'] == \
_('error: No circulation action performed: on shelf')
def METHOD_NAME(
client, librarian_martigny, loc_public_martigny,
item_on_loan_martigny_patron_and_loan_on_loan):
"""Test a checkin for an overdue item with incremental fees."""
login_user_via_session(client, librarian_martigny.user)
item, patron, loan = item_on_loan_martigny_patron_and_loan_on_loan
# Update the circulation policy corresponding to the loan
# Update the loan due date
cipo = get_circ_policy(loan)
cipo['overdue_fees'] = {
'intervals': [
{'from': 1, 'to': 5, 'fee_amount': 0.50},
{'from': 6, 'to': 10, 'fee_amount': 1},
{'from': 11, 'fee_amount': 2},
]
}
cipo.update(data=cipo, dbcommit=True, reindex=True)
end = date.today() - timedelta(days=30)
end = datetime(end.year, end.month, end.day, tzinfo=timezone.utc)
end = end - timedelta(microseconds=1)
loan['end_date'] = end.isoformat()
loan = loan.update(loan, dbcommit=True, reindex=True)
fees = loan.get_overdue_fees
total_fees = sum_for_fees(fees)
assert len(fees) > 0
assert total_fees > 0
# Check overdues preview API and check result
url = url_for('api_loan.preview_loan_overdue', loan_pid=loan.pid)
res = client.get(url)
data = get_json(res)
assert res.status_code == 200
assert len(data['steps']) > 0
assert data['total'] > 0
url = url_for(
'api_patrons.patron_overdue_preview_api',
patron_pid=patron.pid
)
res = client.get(url)
data = get_json(res)
assert res.status_code == 200
assert len(data) == 1
assert data[0]['loan']['pid'] == loan.pid
assert len(data[0]['fees']['steps']) > 0
assert data[0]['fees']['total'] > 0
# Do the checkin on the item
res, data = postdata(
client,
'api_item.checkin',
dict(
item_pid=item.pid,
transaction_location_pid=loc_public_martigny.pid,
transaction_user_pid=librarian_martigny.pid
)
)
assert res.status_code == 200
item = Item.get_record_by_pid(item.pid)
assert item.status == ItemStatus.ON_SHELF
# check if overdue transaction are created
trans = get_last_transaction_by_loan_pid(loan.pid)
assert trans.total_amount == total_fees
events = list(trans.events)
assert len(events) == 1
assert len(events[0].get('steps', [])) == len(fees)
# reset the cipo
del cipo['overdue_fees']
cipo.update(data=cipo, dbcommit=True, reindex=True) |
6,917 | build params | # Copyright 2016 Free Software Foundation, Inc.
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
import itertools
import re
from ..Constants import ADVANCED_PARAM_TAB
from ..utils import to_list
from ..Messages import send_warning
from .block import Block
from ._flags import Flags
from ._templates import MakoTemplates
def build(id, label='', category='', flags='', documentation='',
value=None, asserts=None,
parameters=None, inputs=None, outputs=None, templates=None, cpp_templates=None, **kwargs):
block_id = id
cls = type(str(block_id), (Block,), {})
cls.key = block_id
cls.label = label or block_id.title()
cls.category = [cat.strip() for cat in category.split('/') if cat.strip()]
cls.flags = Flags(flags)
if re.match(r'options$|variable|virtual', block_id):
cls.flags.set(Flags.NOT_DSP, Flags.DISABLE_BYPASS)
cls.documentation = {'': documentation.strip('\n\t ').replace('\\\n', '')}
cls.asserts = [_single_mako_expr(a, block_id) for a in to_list(asserts)]
cls.inputs_data = build_ports(inputs, 'sink') if inputs else []
cls.outputs_data = build_ports(outputs, 'source') if outputs else []
cls.parameters_data = METHOD_NAME(parameters or [],
bool(cls.inputs_data), bool(cls.outputs_data), cls.flags, block_id)
cls.extra_data = kwargs
templates = templates or {}
cls.templates = MakoTemplates(
imports=templates.get('imports', ''),
make=templates.get('make', ''),
callbacks=templates.get('callbacks', []),
var_make=templates.get('var_make', ''),
)
cpp_templates = cpp_templates or {}
cls.cpp_templates = MakoTemplates(
includes=cpp_templates.get('includes', []),
make=cpp_templates.get('make', ''),
callbacks=cpp_templates.get('callbacks', []),
var_make=cpp_templates.get('var_make', ''),
link=cpp_templates.get('link', []),
packages=cpp_templates.get('packages', []),
translations=cpp_templates.get('translations', []),
declarations=cpp_templates.get('declarations', ''),
)
# todo: MakoTemplates.compile() to check for errors
cls.value = _single_mako_expr(value, block_id)
return cls
def build_ports(ports_raw, direction):
ports = []
port_ids = set()
stream_port_ids = itertools.count()
for i, port_params in enumerate(ports_raw):
port = port_params.copy()
port['direction'] = direction
port_id = port.setdefault('id', str(next(stream_port_ids)))
if port_id in port_ids:
raise Exception(
'Port id "{}" already exists in {}s'.format(port_id, direction))
port_ids.add(port_id)
ports.append(port)
return ports
def METHOD_NAME(params_raw, have_inputs, have_outputs, flags, block_id):
params = []
def add_param(**data):
params.append(data)
if flags.SHOW_ID in flags:
add_param(id='id', name='ID', dtype='id', hide='none')
else:
add_param(id='id', name='ID', dtype='id', hide='all')
if not flags.not_dsp:
add_param(id='alias', name='Block Alias', dtype='string',
hide='part', category=ADVANCED_PARAM_TAB)
if have_outputs or have_inputs:
add_param(id='affinity', name='Core Affinity', dtype='int_vector',
hide='part', category=ADVANCED_PARAM_TAB)
if have_outputs:
add_param(id='minoutbuf', name='Min Output Buffer', dtype='int',
hide='part', default='0', category=ADVANCED_PARAM_TAB)
add_param(id='maxoutbuf', name='Max Output Buffer', dtype='int',
hide='part', default='0', category=ADVANCED_PARAM_TAB)
base_params_n = {}
for param_data in params_raw:
param_id = param_data['id']
if param_id in params:
raise Exception('Param id "{}" is not unique'.format(param_id))
base_key = param_data.get('base_key', None)
param_data_ext = base_params_n.get(base_key, {}).copy()
param_data_ext.update(param_data)
if 'option_attributes' in param_data:
_validate_option_attributes(param_data_ext, block_id)
add_param(**param_data_ext)
base_params_n[param_id] = param_data_ext
add_param(id='comment', name='Comment', dtype='_multiline', hide='part',
default='', category=ADVANCED_PARAM_TAB)
return params
def _single_mako_expr(value, block_id):
if not value:
return None
value = value.strip()
if not (value.startswith('${') and value.endswith('}')):
raise ValueError(
'{} is not a mako substitution in {}'.format(value, block_id))
return value[2:-1].strip()
def _validate_option_attributes(param_data, block_id):
if param_data['dtype'] != 'enum':
send_warning(
'{} - option_attributes are for enums only, ignoring'.format(block_id))
del param_data['option_attributes']
else:
for key in list(param_data['option_attributes'].keys()):
if key in dir(str):
del param_data['option_attributes'][key]
send_warning(
'{} - option_attribute "{}" overrides str, ignoring'.format(block_id, key)) |
6,918 | download file | import multiprocessing
import os
import subprocess
import tempfile
import time
from pathlib import Path
import boto3
from django.conf import settings
from django.core.management import BaseCommand
from django.db import DEFAULT_DB_ALIAS, connections
APPROX_ADDRESS_BASE_BYTES = 3_300_000_000
def import_single_file(file_name, table_name, database):
file_sql_template = f"""
BEGIN;
SET LOCAL synchronous_commit TO OFF;
COPY {table_name} FROM STDIN CSV;
COMMIT;
"""
host = settings.DATABASES[database]["HOST"]
password = settings.DATABASES[database]["PASSWORD"]
database_name = settings.DATABASES[database]["NAME"]
command = f"""cat {file_name} | psql postgresql://postgres:{password}@{host}/{database_name} -c "{file_sql_template}" """
process = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
)
process.communicate()
return True
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
"--database",
default=DEFAULT_DB_ALIAS,
help=f"Nominates a database to import in to. Defaults to the '{DEFAULT_DB_ALIAS}' database.",
)
parser.add_argument(
"--processes",
default=10,
type=int,
help="The number of jobs to run in parallel",
)
parser.add_argument(
"--import-type",
choices=["addressbase", "uprntocouncil"],
required=True,
help="The type of data to import",
)
parser.add_argument(
"--local-file-path",
action="store",
help="If provided, use a local file rather than downloading",
)
def handle(self, *args, **options):
self.database = options["database"]
self.processes = options["processes"]
self.import_type = options["import_type"]
_tmp_dir = tempfile.TemporaryDirectory()
self.tmp_dir = Path(_tmp_dir.name)
self.local_file_path = Path(options["local_file_path"])
if self.local_file_path:
self.file_path = self.local_file_path
self.s3_client = boto3.client(
"s3", region_name=os.environ.get("AWS_REGION", "eu-west-2")
)
if self.import_type == "addressbase":
self.table_name = "addressbase_address"
else:
self.table_name = "addressbase_uprntocouncil"
if not self.local_file_path:
# Download the file to the tempdir
self.METHOD_NAME()
# Split the file and save the parts in a list
self.split_files = self.split_file()
# Pass that list to the import function
with connections[self.database].cursor() as cursor:
self.cursor = cursor
self.cursor.execute(
f"ALTER TABLE {self.table_name} SET (autovacuum_enabled = false);"
)
self.stdout.write("clearing existing data..")
cursor.execute(f"TRUNCATE TABLE {self.table_name} CASCADE;")
self.run_processes()
self.cursor.execute(
f"ALTER TABLE {self.table_name} SET (autovacuum_enabled = true);"
)
_tmp_dir.cleanup()
def METHOD_NAME(self):
"""
Find the latest file of the file type and download it to the temp_dir
"""
files = self.s3_client.list_objects_v2(
Bucket=settings.PRIVATE_DATA_BUCKET_NAME, Prefix=f"{self.import_type}/"
)["Contents"]
latest_file_key = sorted(files, key=lambda f: f["LastModified"])[0]["Key"]
print(latest_file_key)
file = Path(self.tmp_dir.name) / self.import_type / "full.csv"
file.parent.mkdir(exist_ok=True, parents=True)
self.file_path = file
with file.open("wb") as f:
self.s3_client.download_fileobj(
settings.PRIVATE_DATA_BUCKET_NAME, latest_file_key, f
)
def run_processes(self):
pool = multiprocessing.Pool(self.processes)
results = []
for file_name in self.split_files:
result = pool.apply_async(
import_single_file, (file_name, self.table_name, self.database)
)
results.append(result)
pool.close()
while True:
time.sleep(1)
# catch exception if results are not ready yet
self.cursor.execute(
f"select SUM(bytes_processed) / {APPROX_ADDRESS_BASE_BYTES} from pg_stat_progress_copy;"
)
self.stdout.write(f"Rough % done: {self.cursor.fetchone()}")
ready = [result.ready() for result in results]
successful = [result.successful() for result in results if result.ready()]
self.stdout.write(f"{ready=}")
self.stdout.write(f"{successful=}")
# exit loop if all tasks returned success
if len(successful) == self.processes and all(successful):
break
# raise exception reporting exceptions received from workers
if len(successful) == self.processes:
raise Exception(
f"Workers raised following exceptions {[result._value for result in results if not result.successful()]}"
)
def split_file(self):
self.split_dir = self.tmp_dir / "split"
self.split_dir.mkdir(parents=True, exist_ok=True)
self.stdout.write(
f"Splitting {self.file_path} in to {self.processes} parts, saving to {self.split_dir}"
)
args = [
"split",
"-n",
f"l/{self.processes}",
"--additional-suffix=.csv",
f"{self.local_file_path}",
f"{self.split_dir}/{self.import_type}_split_",
]
command = subprocess.Popen(args)
command.communicate()
return list(self.split_dir.glob("*")) |
6,919 | get route filter | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetRouteFilterResult',
'AwaitableGetRouteFilterResult',
'get_route_filter',
'get_route_filter_output',
]
@pulumi.output_type
class GetRouteFilterResult:
"""
Route Filter Resource.
"""
def __init__(__self__, etag=None, id=None, ipv6_peerings=None, location=None, name=None, peerings=None, provisioning_state=None, rules=None, tags=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ipv6_peerings and not isinstance(ipv6_peerings, list):
raise TypeError("Expected argument 'ipv6_peerings' to be a list")
pulumi.set(__self__, "ipv6_peerings", ipv6_peerings)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if peerings and not isinstance(peerings, list):
raise TypeError("Expected argument 'peerings' to be a list")
pulumi.set(__self__, "peerings", peerings)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if rules and not isinstance(rules, list):
raise TypeError("Expected argument 'rules' to be a list")
pulumi.set(__self__, "rules", rules)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ipv6Peerings")
def ipv6_peerings(self) -> Sequence['outputs.ExpressRouteCircuitPeeringResponse']:
"""
A collection of references to express route circuit ipv6 peerings.
"""
return pulumi.get(self, "ipv6_peerings")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def peerings(self) -> Sequence['outputs.ExpressRouteCircuitPeeringResponse']:
"""
A collection of references to express route circuit peerings.
"""
return pulumi.get(self, "peerings")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the route filter resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def rules(self) -> Optional[Sequence['outputs.RouteFilterRuleResponse']]:
"""
Collection of RouteFilterRules contained within a route filter.
"""
return pulumi.get(self, "rules")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetRouteFilterResult(GetRouteFilterResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRouteFilterResult(
etag=self.etag,
id=self.id,
ipv6_peerings=self.ipv6_peerings,
location=self.location,
name=self.name,
peerings=self.peerings,
provisioning_state=self.provisioning_state,
rules=self.rules,
tags=self.tags,
type=self.type)
def METHOD_NAME(expand: Optional[str] = None,
resource_group_name: Optional[str] = None,
route_filter_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRouteFilterResult:
"""
Gets the specified route filter.
:param str expand: Expands referenced express route bgp peering resources.
:param str resource_group_name: The name of the resource group.
:param str route_filter_name: The name of the route filter.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['resourceGroupName'] = resource_group_name
__args__['routeFilterName'] = route_filter_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:network/v20230401:getRouteFilter', __args__, opts=opts, typ=GetRouteFilterResult).value
return AwaitableGetRouteFilterResult(
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
ipv6_peerings=pulumi.get(__ret__, 'ipv6_peerings'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
peerings=pulumi.get(__ret__, 'peerings'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
rules=pulumi.get(__ret__, 'rules'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(METHOD_NAME)
def get_route_filter_output(expand: Optional[pulumi.Input[Optional[str]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_filter_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetRouteFilterResult]:
"""
Gets the specified route filter.
:param str expand: Expands referenced express route bgp peering resources.
:param str resource_group_name: The name of the resource group.
:param str route_filter_name: The name of the route filter.
"""
... |
6,920 | doc test suite | import types
import unittest
from _typeshed import ExcInfo
from collections.abc import Callable
from typing import Any, NamedTuple
from typing_extensions import TypeAlias
__all__ = [
"register_optionflag",
"DONT_ACCEPT_TRUE_FOR_1",
"DONT_ACCEPT_BLANKLINE",
"NORMALIZE_WHITESPACE",
"ELLIPSIS",
"SKIP",
"IGNORE_EXCEPTION_DETAIL",
"COMPARISON_FLAGS",
"REPORT_UDIFF",
"REPORT_CDIFF",
"REPORT_NDIFF",
"REPORT_ONLY_FIRST_FAILURE",
"REPORTING_FLAGS",
"FAIL_FAST",
"Example",
"DocTest",
"DocTestParser",
"DocTestFinder",
"DocTestRunner",
"OutputChecker",
"DocTestFailure",
"UnexpectedException",
"DebugRunner",
"testmod",
"testfile",
"run_docstring_examples",
"DocTestSuite",
"DocFileSuite",
"set_unittest_reportflags",
"script_from_examples",
"testsource",
"debug_src",
"debug",
]
class TestResults(NamedTuple):
failed: int
attempted: int
OPTIONFLAGS_BY_NAME: dict[str, int]
def register_optionflag(name: str) -> int: ...
DONT_ACCEPT_TRUE_FOR_1: int
DONT_ACCEPT_BLANKLINE: int
NORMALIZE_WHITESPACE: int
ELLIPSIS: int
SKIP: int
IGNORE_EXCEPTION_DETAIL: int
COMPARISON_FLAGS: int
REPORT_UDIFF: int
REPORT_CDIFF: int
REPORT_NDIFF: int
REPORT_ONLY_FIRST_FAILURE: int
FAIL_FAST: int
REPORTING_FLAGS: int
BLANKLINE_MARKER: str
ELLIPSIS_MARKER: str
class Example:
source: str
want: str
exc_msg: str | None
lineno: int
indent: int
options: dict[int, bool]
def __init__(
self,
source: str,
want: str,
exc_msg: str | None = None,
lineno: int = 0,
indent: int = 0,
options: dict[int, bool] | None = None,
) -> None: ...
def __hash__(self) -> int: ...
def __eq__(self, other: object) -> bool: ...
class DocTest:
examples: list[Example]
globs: dict[str, Any]
name: str
filename: str | None
lineno: int | None
docstring: str | None
def __init__(
self,
examples: list[Example],
globs: dict[str, Any],
name: str,
filename: str | None,
lineno: int | None,
docstring: str | None,
) -> None: ...
def __hash__(self) -> int: ...
def __lt__(self, other: DocTest) -> bool: ...
def __eq__(self, other: object) -> bool: ...
class DocTestParser:
def parse(self, string: str, name: str = "<string>") -> list[str | Example]: ...
def get_doctest(self, string: str, globs: dict[str, Any], name: str, filename: str | None, lineno: int | None) -> DocTest: ...
def get_examples(self, string: str, name: str = "<string>") -> list[Example]: ...
class DocTestFinder:
def __init__(
self, verbose: bool = False, parser: DocTestParser = ..., recurse: bool = True, exclude_empty: bool = True
) -> None: ...
def find(
self,
obj: object,
name: str | None = None,
module: None | bool | types.ModuleType = None,
globs: dict[str, Any] | None = None,
extraglobs: dict[str, Any] | None = None,
) -> list[DocTest]: ...
_Out: TypeAlias = Callable[[str], object]
class DocTestRunner:
DIVIDER: str
optionflags: int
original_optionflags: int
tries: int
failures: int
test: DocTest
def __init__(self, checker: OutputChecker | None = None, verbose: bool | None = None, optionflags: int = 0) -> None: ...
def report_start(self, out: _Out, test: DocTest, example: Example) -> None: ...
def report_success(self, out: _Out, test: DocTest, example: Example, got: str) -> None: ...
def report_failure(self, out: _Out, test: DocTest, example: Example, got: str) -> None: ...
def report_unexpected_exception(self, out: _Out, test: DocTest, example: Example, exc_info: ExcInfo) -> None: ...
def run(
self, test: DocTest, compileflags: int | None = None, out: _Out | None = None, clear_globs: bool = True
) -> TestResults: ...
def summarize(self, verbose: bool | None = None) -> TestResults: ...
def merge(self, other: DocTestRunner) -> None: ...
class OutputChecker:
def check_output(self, want: str, got: str, optionflags: int) -> bool: ...
def output_difference(self, example: Example, got: str, optionflags: int) -> str: ...
class DocTestFailure(Exception):
test: DocTest
example: Example
got: str
def __init__(self, test: DocTest, example: Example, got: str) -> None: ...
class UnexpectedException(Exception):
test: DocTest
example: Example
exc_info: ExcInfo
def __init__(self, test: DocTest, example: Example, exc_info: ExcInfo) -> None: ...
class DebugRunner(DocTestRunner): ...
master: DocTestRunner | None
def testmod(
m: types.ModuleType | None = None,
name: str | None = None,
globs: dict[str, Any] | None = None,
verbose: bool | None = None,
report: bool = True,
optionflags: int = 0,
extraglobs: dict[str, Any] | None = None,
raise_on_error: bool = False,
exclude_empty: bool = False,
) -> TestResults: ...
def testfile(
filename: str,
module_relative: bool = True,
name: str | None = None,
package: None | str | types.ModuleType = None,
globs: dict[str, Any] | None = None,
verbose: bool | None = None,
report: bool = True,
optionflags: int = 0,
extraglobs: dict[str, Any] | None = None,
raise_on_error: bool = False,
parser: DocTestParser = ...,
encoding: str | None = None,
) -> TestResults: ...
def run_docstring_examples(
f: object,
globs: dict[str, Any],
verbose: bool = False,
name: str = "NoName",
compileflags: int | None = None,
optionflags: int = 0,
) -> None: ...
def set_unittest_reportflags(flags: int) -> int: ...
class DocTestCase(unittest.TestCase):
def __init__(
self,
test: DocTest,
optionflags: int = 0,
setUp: Callable[[DocTest], Any] | None = None,
tearDown: Callable[[DocTest], Any] | None = None,
checker: OutputChecker | None = None,
) -> None: ...
def runTest(self) -> None: ...
def format_failure(self, err: str) -> str: ...
def __hash__(self) -> int: ...
def __eq__(self, other: object) -> bool: ...
class SkipDocTestCase(DocTestCase):
def __init__(self, module: types.ModuleType) -> None: ...
def test_skip(self) -> None: ...
class _DocTestSuite(unittest.TestSuite): ...
def METHOD_NAME(
module: None | str | types.ModuleType = None,
globs: dict[str, Any] | None = None,
extraglobs: dict[str, Any] | None = None,
test_finder: DocTestFinder | None = None,
**options: Any,
) -> _DocTestSuite: ...
class DocFileCase(DocTestCase): ...
def DocFileTest(
path: str,
module_relative: bool = True,
package: None | str | types.ModuleType = None,
globs: dict[str, Any] | None = None,
parser: DocTestParser = ...,
encoding: str | None = None,
**options: Any,
) -> DocFileCase: ...
def DocFileSuite(*paths: str, **kw: Any) -> _DocTestSuite: ...
def script_from_examples(s: str) -> str: ...
def testsource(module: None | str | types.ModuleType, name: str) -> str: ...
def debug_src(src: str, pm: bool = False, globs: dict[str, Any] | None = None) -> None: ...
def debug_script(src: str, pm: bool = False, globs: dict[str, Any] | None = None) -> None: ...
def debug(module: None | str | types.ModuleType, name: str, pm: bool = False) -> None: ... |
6,921 | main | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
import warnings
from collections import defaultdict
import paddle
import numpy as np
from paddle.optimizer.lr import StepDecay
from dataset.reader import read_trigraph
from dataset.dataset import create_dataloaders
from models.ke_model import KGEModel
from models.loss_func import LossFunction
from utils import set_seed, set_logger, print_log
from utils import evaluate
from config import prepare_config
def METHOD_NAME():
"""Main function for shallow knowledge embedding methods.
"""
args = prepare_config()
set_seed(args.seed)
set_logger(args)
trigraph = read_trigraph(args.data_path, args.data_name, args.use_dict,
args.kv_mode)
if args.valid_percent < 1:
trigraph.sampled_subgraph(args.valid_percent, dataset='valid')
use_filter_set = args.filter_sample or args.filter_eval or args.weighted_loss
if use_filter_set:
filter_dict = {
'head': trigraph.true_heads_for_tail_rel,
'tail': trigraph.true_tails_for_head_rel
}
else:
filter_dict = None
model = KGEModel(args.model_name, trigraph, args)
if args.async_update:
model.start_async_update()
if len(model.parameters()) > 0:
if args.optimizer == 'adam':
optim_func = paddle.optimizer.Adam
elif args.optimizer == 'adagrad':
optim_func = paddle.optimizer.Adagrad
else:
errors = 'Optimizer {} not supported!'.format(args.optimizer)
raise ValueError(errors)
if args.scheduler_interval > 0:
scheduler = StepDecay(
learning_rate=args.lr,
step_size=args.scheduler_interval,
gamma=0.5,
last_epoch=-1,
verbose=True)
optimizer = optim_func(
learning_rate=scheduler,
epsilon=1e-10,
parameters=model.parameters())
else:
optimizer = optim_func(
learning_rate=args.lr,
epsilon=1e-10,
parameters=model.parameters())
else:
warnings.warn('There is no model parameter on gpu, optimizer is None.',
RuntimeWarning)
optimizer = None
loss_func = LossFunction(
name=args.loss_type,
pairwise=args.pairwise,
margin=args.margin,
neg_adv_spl=args.neg_adversarial_sampling,
neg_adv_temp=args.adversarial_temperature)
train_loader, valid_loader, test_loader = create_dataloaders(
trigraph,
args,
filter_dict=filter_dict if use_filter_set else None,
shared_ent_path=model.shared_ent_path if args.mix_cpu_gpu else None)
timer = defaultdict(int)
log = defaultdict(int)
ts = t_step = time.time()
step = 1
stop = False
for epoch in range(args.num_epoch):
for indexes, prefetch_embeddings, mode in train_loader:
h, r, t, neg_ents, all_ents = indexes
all_ents_emb, rel_emb, weights = prefetch_embeddings
r = r.cuda()
if all_ents is not None:
all_ents = all_ents.cuda()
if rel_emb is not None:
rel_emb = rel_emb.cuda()
rel_emb.stop_gradient = False
if all_ents_emb is not None:
all_ents_emb = all_ents_emb.cuda()
all_ents_emb.stop_gradient = False
timer['sample'] += (time.time() - ts)
ts = time.time()
h_emb, r_emb, t_emb, neg_emb, mask = model.prepare_inputs(
h, r, t, all_ents, neg_ents, all_ents_emb, rel_emb, mode, args)
pos_score = model.forward(h_emb, r_emb, t_emb)
if mode == 'head':
neg_score = model.get_neg_score(t_emb, r_emb, neg_emb, True,
mask)
elif mode == 'tail':
neg_score = model.get_neg_score(h_emb, r_emb, neg_emb, False,
mask)
else:
raise ValueError('Unsupported negative mode {}.'.format(mode))
neg_score = neg_score.reshape([args.batch_size, -1])
loss = loss_func(pos_score, neg_score, weights)
log['loss'] += float(loss)
if args.use_embedding_regularization:
reg_loss = model.get_regularization(h_emb, r_emb, t_emb,
neg_emb)
log['reg'] += float(reg_loss)
loss = loss + reg_loss
timer['forward'] += (time.time() - ts)
ts = time.time()
loss.backward()
timer['backward'] += (time.time() - ts)
ts = time.time()
if optimizer is not None:
optimizer.step()
optimizer.clear_grad()
if args.mix_cpu_gpu:
ent_trace, rel_trace = model.create_trace(
all_ents, all_ents_emb, r, r_emb)
model.step(ent_trace, rel_trace)
else:
model.step()
timer['update'] += (time.time() - ts)
if args.log_interval > 0 and (step + 1) % args.log_interval == 0:
print_log(step, args.log_interval, log, timer,
time.time() - t_step)
timer = defaultdict(int)
log = defaultdict(int)
t_step = time.time()
if args.valid and (step + 1) % args.eval_interval == 0:
evaluate(
model,
valid_loader,
'valid',
filter_dict if args.filter_eval else None,
data_mode=args.data_name)
if args.scheduler_interval > 0 and step % args.scheduler_interval == 0:
scheduler.step()
step += 1
if args.save_interval > 0 and step % args.save_interval == 0:
model.save(args.step_path)
if step >= args.max_steps:
stop = True
break
ts = time.time()
if stop:
break
if args.async_update:
model.finish_async_update()
if args.test:
evaluate(
model,
test_loader,
'test',
filter_dict if args.filter_eval else None,
args.save_path,
data_mode=args.data_name)
paddle.save(model.state_dict(),
os.path.join(args.save_path, "params.pdparams"))
if __name__ == '__main__':
METHOD_NAME() |
6,922 | id | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetApplicationSecurityGroupResult',
'AwaitableGetApplicationSecurityGroupResult',
'get_application_security_group',
'get_application_security_group_output',
]
@pulumi.output_type
class GetApplicationSecurityGroupResult:
"""
An application security group in a resource group.
"""
def __init__(__self__, etag=None, METHOD_NAME=None, location=None, name=None, provisioning_state=None, resource_guid=None, tags=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def METHOD_NAME(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the application security group resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
The resource GUID property of the application security group resource. It uniquely identifies a resource, even if the user changes its name or migrate the resource across subscriptions or resource groups.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetApplicationSecurityGroupResult(GetApplicationSecurityGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetApplicationSecurityGroupResult(
etag=self.etag,
METHOD_NAME=self.METHOD_NAME,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
resource_guid=self.resource_guid,
tags=self.tags,
type=self.type)
def get_application_security_group(application_security_group_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApplicationSecurityGroupResult:
"""
Gets information about the specified application security group.
:param str application_security_group_name: The name of the application security group.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['applicationSecurityGroupName'] = application_security_group_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:network/v20230401:getApplicationSecurityGroup', __args__, opts=opts, typ=GetApplicationSecurityGroupResult).value
return AwaitableGetApplicationSecurityGroupResult(
etag=pulumi.get(__ret__, 'etag'),
METHOD_NAME=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
resource_guid=pulumi.get(__ret__, 'resource_guid'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_application_security_group)
def get_application_security_group_output(application_security_group_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetApplicationSecurityGroupResult]:
"""
Gets information about the specified application security group.
:param str application_security_group_name: The name of the application security group.
:param str resource_group_name: The name of the resource group.
"""
... |
6,923 | test rotate | from builtins import range
import unittest
import os
import sys
from datetime import timedelta
from time import sleep
from nose.plugins.attrib import attr
from WMCore.Database.CMSCouch import RotatingDatabase, CouchServer, CouchNotFoundError
class RotatingDatabaseTest(unittest.TestCase):
def setUp(self):
self.couchURL = os.getenv("COUCHURL")
self.server = CouchServer(self.couchURL)
testname = self.id().split('.')[-1].lower()
self.dbname = 'rotdb_unittest_%s' % testname
self.arcname = 'rotdb_unittest_%s_archive' % testname
self.seedname = 'rotdb_unittest_%s_seedcfg' % testname
# set a long value for times, tests do operations explicitly
self.timing = {'archive':timedelta(seconds=1), 'expire':timedelta(seconds=2)}
self.db = RotatingDatabase(dbname = self.dbname, url = self.couchURL,
archivename = self.arcname, timing = self.timing)
def tearDown(self):
"""Delete all the test couchdb databases"""
to_go = [db for db in self.server.listDatabases() if db.startswith('rotdb_unittest_')]
for dbname in to_go:
try:
self.server.deleteDatabase(dbname)
except CouchNotFoundError:
# db has already gone
pass
def METHOD_NAME(self):
"""
Test that rotation works
"""
start_name = self.db.name
self.db._rotate()
end_name = self.db.name
databases = [db for db in self.server.listDatabases() if db.startswith('rotdb_unittest_')]
self.assertTrue(start_name in databases)
self.assertTrue(end_name in databases)
# This test repeatably inserts either 20 or 25 documents into couch.
# Disabled until it's stable
@attr("integration")
def testArchive(self):
"""
Test that archiving views works
"""
dummy_view = {'_id':'_design/foo', 'language': 'javascript','views':{
'bar':{'map':"function(doc) {if (doc.foo) {emit(doc.int, 1);}}", 'reduce':'_sum'}
}
}
archive_view = {'_id':'_design/foo', 'language': 'javascript','views':{
'bar':{'map':"function(doc) {emit(doc.key, doc.value);}", 'reduce':'_sum'}
}
}
seed_db = self.server.connectDatabase(self.seedname)
seed_db.commit(dummy_view)
# Need to have the timing long enough so the data isn't archived by accident
self.timing = {'archive':timedelta(seconds=1000), 'expire':timedelta(seconds=2000)}
self.db = RotatingDatabase(dbname = self.dbname, url = self.couchURL, views=['foo/bar'],
archivename = self.arcname, timing = self.timing)
self.db.archive_db.commitOne(archive_view)
runs = 5
docs = 5
for run in range(runs):
for i in range(docs):
self.db.queue({'foo':'bar', 'int': i, 'run': run})
self.db.commit()
self.db._rotate()
self.db._archive()
view_result = self.db.archive_db.loadView('foo','bar')
arch_sum = view_result['rows'][0]['value']
self.assertEqual(arch_sum, runs * docs)
def testExpire(self):
"""
Test that expiring databases works
"""
# rotate out the original db
self.db._rotate()
archived = self.db.archived_dbs()
self.assertEqual(1, len(archived), 'test not starting from clean state, bail!')
# Make sure the db has expired
sleep(2)
self.db._expire()
self.assertEqual(0, len(self.db.archived_dbs()))
self.assertFalse(archived[0] in self.server.listDatabases())
@attr("integration")
def testCycle(self):
"""
Test that committing data to different databases happens
This is a bit of a dodgy test - if timings go funny it will fail
"""
self.timing = {'archive':timedelta(seconds=0.5), 'expire':timedelta(seconds=1)}
self.db = RotatingDatabase(dbname = self.dbname, url = self.couchURL,
archivename = self.arcname, timing = self.timing)
my_name = self.db.name
self.db.commit({'foo':'bar'})
sleep(5)
self.db.commit({'foo':'bar'})
# the initial db should have expired by now
self.db.commit({'foo':'bar'})
self.assertFalse(my_name in self.server.listDatabases(), "")
if __name__ == "__main__":
if len(sys.argv) >1 :
suite = unittest.TestSuite()
suite.addTest(RotatingDatabaseTest(sys.argv[1]))
unittest.TextTestRunner().run(suite)
else:
unittest.main() |
6,924 | last modified by | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'SystemDataResponse',
]
@pulumi.output_type
class SystemDataResponse(dict):
"""
Metadata pertaining to creation and last modification of the resource.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "createdAt":
suggest = "created_at"
elif key == "createdBy":
suggest = "created_by"
elif key == "createdByType":
suggest = "created_by_type"
elif key == "lastModifiedAt":
suggest = "last_modified_at"
elif key == "lastModifiedBy":
suggest = "last_modified_by"
elif key == "lastModifiedByType":
suggest = "last_modified_by_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SystemDataResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SystemDataResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SystemDataResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
created_at: Optional[str] = None,
created_by: Optional[str] = None,
created_by_type: Optional[str] = None,
last_modified_at: Optional[str] = None,
METHOD_NAME: Optional[str] = None,
last_modified_by_type: Optional[str] = None):
"""
Metadata pertaining to creation and last modification of the resource.
:param str created_at: The timestamp of resource creation (UTC).
:param str created_by: The identity that created the resource.
:param str created_by_type: The type of identity that created the resource.
:param str last_modified_at: The timestamp of resource last modification (UTC)
:param str last_modified_by: The identity that last modified the resource.
:param str last_modified_by_type: The type of identity that last modified the resource.
"""
if created_at is not None:
pulumi.set(__self__, "created_at", created_at)
if created_by is not None:
pulumi.set(__self__, "created_by", created_by)
if created_by_type is not None:
pulumi.set(__self__, "created_by_type", created_by_type)
if last_modified_at is not None:
pulumi.set(__self__, "last_modified_at", last_modified_at)
if METHOD_NAME is not None:
pulumi.set(__self__, "last_modified_by", METHOD_NAME)
if last_modified_by_type is not None:
pulumi.set(__self__, "last_modified_by_type", last_modified_by_type)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[str]:
"""
The timestamp of resource creation (UTC).
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="createdBy")
def created_by(self) -> Optional[str]:
"""
The identity that created the resource.
"""
return pulumi.get(self, "created_by")
@property
@pulumi.getter(name="createdByType")
def created_by_type(self) -> Optional[str]:
"""
The type of identity that created the resource.
"""
return pulumi.get(self, "created_by_type")
@property
@pulumi.getter(name="lastModifiedAt")
def last_modified_at(self) -> Optional[str]:
"""
The timestamp of resource last modification (UTC)
"""
return pulumi.get(self, "last_modified_at")
@property
@pulumi.getter(name="lastModifiedBy")
def METHOD_NAME(self) -> Optional[str]:
"""
The identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter(name="lastModifiedByType")
def last_modified_by_type(self) -> Optional[str]:
"""
The type of identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_by_type")
|
6,925 | create tree | """distutils.dir_util
Utility functions for manipulating directories and directory trees."""
import os
import errno
from distutils.errors import DistutilsFileError, DistutilsInternalError
from distutils import log
# cache for by mkpath() -- in addition to cheapening redundant calls,
# eliminates redundant "creating /foo/bar/baz" messages in dry-run mode
_path_created = {}
# I don't use os.makedirs because a) it's new to Python 1.5.2, and
# b) it blows up if the directory already exists (I want to silently
# succeed in that case).
def mkpath(name, mode=0o777, verbose=1, dry_run=0):
"""Create a directory and any missing ancestor directories.
If the directory already exists (or if 'name' is the empty string, which
means the current directory, which of course exists), then do nothing.
Raise DistutilsFileError if unable to create some directory along the way
(eg. some sub-path exists, but is a file rather than a directory).
If 'verbose' is true, print a one-line summary of each mkdir to stdout.
Return the list of directories actually created.
"""
global _path_created
# Detect a common bug -- name is None
if not isinstance(name, str):
raise DistutilsInternalError(
"mkpath: 'name' must be a string (got %r)" % (name,))
# XXX what's the better way to handle verbosity? print as we create
# each directory in the path (the current behaviour), or only announce
# the creation of the whole path? (quite easy to do the latter since
# we're not using a recursive algorithm)
name = os.path.normpath(name)
created_dirs = []
if os.path.isdir(name) or name == '':
return created_dirs
if _path_created.get(os.path.abspath(name)):
return created_dirs
(head, tail) = os.path.split(name)
tails = [tail] # stack of lone dirs to create
while head and tail and not os.path.isdir(head):
(head, tail) = os.path.split(head)
tails.insert(0, tail) # push next higher dir onto stack
# now 'head' contains the deepest directory that already exists
# (that is, the child of 'head' in 'name' is the highest directory
# that does *not* exist)
for d in tails:
#print "head = %s, d = %s: " % (head, d),
head = os.path.join(head, d)
abs_head = os.path.abspath(head)
if _path_created.get(abs_head):
continue
if verbose >= 1:
log.info("creating %s", head)
if not dry_run:
try:
os.mkdir(head, mode)
except OSError as exc:
if not (exc.errno == errno.EEXIST and os.path.isdir(head)):
raise DistutilsFileError(
"could not create '%s': %s" % (head, exc.args[-1]))
created_dirs.append(head)
_path_created[abs_head] = 1
return created_dirs
def METHOD_NAME(base_dir, files, mode=0o777, verbose=1, dry_run=0):
"""Create all the empty directories under 'base_dir' needed to put 'files'
there.
'base_dir' is just the name of a directory which doesn't necessarily
exist yet; 'files' is a list of filenames to be interpreted relative to
'base_dir'. 'base_dir' + the directory portion of every file in 'files'
will be created if it doesn't already exist. 'mode', 'verbose' and
'dry_run' flags are as for 'mkpath()'.
"""
# First get the list of directories to create
need_dir = set()
for file in files:
need_dir.add(os.path.join(base_dir, os.path.dirname(file)))
# Now create them
for dir in sorted(need_dir):
mkpath(dir, mode, verbose=verbose, dry_run=dry_run)
def copy_tree(src, dst, preserve_mode=1, preserve_times=1,
preserve_symlinks=0, update=0, verbose=1, dry_run=0):
"""Copy an entire directory tree 'src' to a new location 'dst'.
Both 'src' and 'dst' must be directory names. If 'src' is not a
directory, raise DistutilsFileError. If 'dst' does not exist, it is
created with 'mkpath()'. The end result of the copy is that every
file in 'src' is copied to 'dst', and directories under 'src' are
recursively copied to 'dst'. Return the list of files that were
copied or might have been copied, using their output name. The
return value is unaffected by 'update' or 'dry_run': it is simply
the list of all files under 'src', with the names changed to be
under 'dst'.
'preserve_mode' and 'preserve_times' are the same as for
'copy_file'; note that they only apply to regular files, not to
directories. If 'preserve_symlinks' is true, symlinks will be
copied as symlinks (on platforms that support them!); otherwise
(the default), the destination of the symlink will be copied.
'update' and 'verbose' are the same as for 'copy_file'.
"""
from distutils.file_util import copy_file
if not dry_run and not os.path.isdir(src):
raise DistutilsFileError(
"cannot copy tree '%s': not a directory" % src)
try:
names = os.listdir(src)
except OSError as e:
if dry_run:
names = []
else:
raise DistutilsFileError(
"error listing files in '%s': %s" % (src, e.strerror))
if not dry_run:
mkpath(dst, verbose=verbose)
outputs = []
for n in names:
src_name = os.path.join(src, n)
dst_name = os.path.join(dst, n)
if n.startswith('.nfs'):
# skip NFS rename files
continue
if preserve_symlinks and os.path.islink(src_name):
link_dest = os.readlink(src_name)
if verbose >= 1:
log.info("linking %s -> %s", dst_name, link_dest)
if not dry_run:
os.symlink(link_dest, dst_name)
outputs.append(dst_name)
elif os.path.isdir(src_name):
outputs.extend(
copy_tree(src_name, dst_name, preserve_mode,
preserve_times, preserve_symlinks, update,
verbose=verbose, dry_run=dry_run))
else:
copy_file(src_name, dst_name, preserve_mode,
preserve_times, update, verbose=verbose,
dry_run=dry_run)
outputs.append(dst_name)
return outputs
def _build_cmdtuple(path, cmdtuples):
"""Helper for remove_tree()."""
for f in os.listdir(path):
real_f = os.path.join(path,f)
if os.path.isdir(real_f) and not os.path.islink(real_f):
_build_cmdtuple(real_f, cmdtuples)
else:
cmdtuples.append((os.remove, real_f))
cmdtuples.append((os.rmdir, path))
def remove_tree(directory, verbose=1, dry_run=0):
"""Recursively remove an entire directory tree.
Any errors are ignored (apart from being reported to stdout if 'verbose'
is true).
"""
global _path_created
if verbose >= 1:
log.info("removing '%s' (and everything under it)", directory)
if dry_run:
return
cmdtuples = []
_build_cmdtuple(directory, cmdtuples)
for cmd in cmdtuples:
try:
cmd[0](cmd[1])
# remove dir from cache if it's already there
abspath = os.path.abspath(cmd[1])
if abspath in _path_created:
del _path_created[abspath]
except OSError as exc:
log.warn("error removing %s: %s", directory, exc)
def ensure_relative(path):
"""Take the full path 'path', and make it a relative path.
This is useful to make 'path' the second argument to os.path.join().
"""
drive, path = os.path.splitdrive(path)
if path[0:1] == os.sep:
path = drive + path[1:]
return path |
6,926 | conditional entropy compute | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from typing import Optional, Union
import torch
from torch import Tensor
from typing_extensions import Literal
from torchmetrics.functional.classification.confusion_matrix import _multiclass_confusion_matrix_update
from torchmetrics.functional.nominal.utils import (
_drop_empty_rows_and_cols,
_handle_nan_in_data,
_nominal_input_validation,
)
def METHOD_NAME(confmat: Tensor) -> Tensor:
r"""Compute Conditional Entropy Statistic based on a pre-computed confusion matrix.
.. math::
H(X|Y) = \sum_{x, y ~ (X, Y)} p(x, y)\frac{p(y)}{p(x, y)}
Args:
confmat: Confusion matrix for observed data
Returns:
Conditional Entropy Value
"""
confmat = _drop_empty_rows_and_cols(confmat)
total_occurrences = confmat.sum()
# iterate over all i, j combinations
p_xy_m = confmat / total_occurrences
# get p_y by summing over x dim (=1)
p_y = confmat.sum(1) / total_occurrences
# repeat over rows (shape = p_xy_m.shape[1]) for tensor multiplication
p_y_m = p_y.unsqueeze(1).repeat(1, p_xy_m.shape[1])
# entropy calculated as p_xy * log (p_xy / p_y)
return torch.nansum(p_xy_m * torch.log(p_y_m / p_xy_m))
def _theils_u_update(
preds: Tensor,
target: Tensor,
num_classes: int,
nan_strategy: Literal["replace", "drop"] = "replace",
nan_replace_value: Optional[Union[int, float]] = 0.0,
) -> Tensor:
"""Compute the bins to update the confusion matrix with for Theil's U calculation.
Args:
preds: 1D or 2D tensor of categorical (nominal) data
target: 1D or 2D tensor of categorical (nominal) data
num_classes: Integer specifing the number of classes
nan_strategy: Indication of whether to replace or drop ``NaN`` values
nan_replace_value: Value to replace ``NaN`s when ``nan_strategy = 'replace```
Returns:
Non-reduced confusion matrix
"""
preds = preds.argmax(1) if preds.ndim == 2 else preds
target = target.argmax(1) if target.ndim == 2 else target
preds, target = _handle_nan_in_data(preds, target, nan_strategy, nan_replace_value)
return _multiclass_confusion_matrix_update(preds, target, num_classes)
def _theils_u_compute(confmat: Tensor) -> Tensor:
"""Compute Theil's U statistic based on a pre-computed confusion matrix.
Args:
confmat: Confusion matrix for observed data
Returns:
Theil's U statistic
"""
confmat = _drop_empty_rows_and_cols(confmat)
# compute conditional entropy
s_xy = METHOD_NAME(confmat)
# compute H(x)
total_occurrences = confmat.sum()
p_x = confmat.sum(0) / total_occurrences
s_x = -torch.sum(p_x * torch.log(p_x))
# compute u statistic
if s_x == 0:
return torch.tensor(0, device=confmat.device)
return (s_x - s_xy) / s_x
def theils_u(
preds: Tensor,
target: Tensor,
nan_strategy: Literal["replace", "drop"] = "replace",
nan_replace_value: Optional[Union[int, float]] = 0.0,
) -> Tensor:
r"""Compute `Theils Uncertainty coefficient`_ statistic measuring the association between two nominal data series.
.. math::
U(X|Y) = \frac{H(X) - H(X|Y)}{H(X)}
where :math:`H(X)` is entropy of variable :math:`X` while :math:`H(X|Y)` is the conditional entropy of :math:`X`
given :math:`Y`.
Theils's U is an asymmetric coefficient, i.e. :math:`TheilsU(preds, target) \neq TheilsU(target, preds)`.
The output values lies in [0, 1]. 0 means y has no information about x while value 1 means y has complete
information about x.
Args:
preds: 1D or 2D tensor of categorical (nominal) data
- 1D shape: (batch_size,)
- 2D shape: (batch_size, num_classes)
target: 1D or 2D tensor of categorical (nominal) data
- 1D shape: (batch_size,)
- 2D shape: (batch_size, num_classes)
nan_strategy: Indication of whether to replace or drop ``NaN`` values
nan_replace_value: Value to replace ``NaN``s when ``nan_strategy = 'replace'``
Returns:
Tensor containing Theil's U statistic
Example:
>>> from torchmetrics.functional.nominal import theils_u
>>> _ = torch.manual_seed(42)
>>> preds = torch.randint(10, (10,))
>>> target = torch.randint(10, (10,))
>>> theils_u(preds, target)
tensor(0.8530)
"""
num_classes = len(torch.cat([preds, target]).unique())
confmat = _theils_u_update(preds, target, num_classes, nan_strategy, nan_replace_value)
return _theils_u_compute(confmat)
def theils_u_matrix(
matrix: Tensor,
nan_strategy: Literal["replace", "drop"] = "replace",
nan_replace_value: Optional[Union[int, float]] = 0.0,
) -> Tensor:
r"""Compute `Theil's U`_ statistic between a set of multiple variables.
This can serve as a convenient tool to compute Theil's U statistic for analyses of correlation between categorical
variables in your dataset.
Args:
matrix: A tensor of categorical (nominal) data, where:
- rows represent a number of data points
- columns represent a number of categorical (nominal) features
nan_strategy: Indication of whether to replace or drop ``NaN`` values
nan_replace_value: Value to replace ``NaN``s when ``nan_strategy = 'replace'``
Returns:
Theil's U statistic for a dataset of categorical variables
Example:
>>> from torchmetrics.functional.nominal import theils_u_matrix
>>> _ = torch.manual_seed(42)
>>> matrix = torch.randint(0, 4, (200, 5))
>>> theils_u_matrix(matrix)
tensor([[1.0000, 0.0202, 0.0142, 0.0196, 0.0353],
[0.0202, 1.0000, 0.0070, 0.0136, 0.0065],
[0.0143, 0.0070, 1.0000, 0.0125, 0.0206],
[0.0198, 0.0137, 0.0125, 1.0000, 0.0312],
[0.0352, 0.0065, 0.0204, 0.0308, 1.0000]])
"""
_nominal_input_validation(nan_strategy, nan_replace_value)
num_variables = matrix.shape[1]
theils_u_matrix_value = torch.ones(num_variables, num_variables, device=matrix.device)
for i, j in itertools.combinations(range(num_variables), 2):
x, y = matrix[:, i], matrix[:, j]
num_classes = len(torch.cat([x, y]).unique())
confmat = _theils_u_update(x, y, num_classes, nan_strategy, nan_replace_value)
theils_u_matrix_value[i, j] = _theils_u_compute(confmat)
theils_u_matrix_value[j, i] = _theils_u_compute(confmat.T)
return theils_u_matrix_value |
6,927 | last modified at | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'SystemDataResponse',
'WatchlistUserInfoResponse',
]
@pulumi.output_type
class SystemDataResponse(dict):
"""
Metadata pertaining to creation and last modification of the resource.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "createdAt":
suggest = "created_at"
elif key == "createdBy":
suggest = "created_by"
elif key == "createdByType":
suggest = "created_by_type"
elif key == "lastModifiedAt":
suggest = "last_modified_at"
elif key == "lastModifiedBy":
suggest = "last_modified_by"
elif key == "lastModifiedByType":
suggest = "last_modified_by_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SystemDataResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SystemDataResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SystemDataResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
created_at: Optional[str] = None,
created_by: Optional[str] = None,
created_by_type: Optional[str] = None,
METHOD_NAME: Optional[str] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[str] = None):
"""
Metadata pertaining to creation and last modification of the resource.
:param str created_at: The timestamp of resource creation (UTC).
:param str created_by: The identity that created the resource.
:param str created_by_type: The type of identity that created the resource.
:param str last_modified_at: The timestamp of resource last modification (UTC)
:param str last_modified_by: The identity that last modified the resource.
:param str last_modified_by_type: The type of identity that last modified the resource.
"""
if created_at is not None:
pulumi.set(__self__, "created_at", created_at)
if created_by is not None:
pulumi.set(__self__, "created_by", created_by)
if created_by_type is not None:
pulumi.set(__self__, "created_by_type", created_by_type)
if METHOD_NAME is not None:
pulumi.set(__self__, "last_modified_at", METHOD_NAME)
if last_modified_by is not None:
pulumi.set(__self__, "last_modified_by", last_modified_by)
if last_modified_by_type is not None:
pulumi.set(__self__, "last_modified_by_type", last_modified_by_type)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[str]:
"""
The timestamp of resource creation (UTC).
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="createdBy")
def created_by(self) -> Optional[str]:
"""
The identity that created the resource.
"""
return pulumi.get(self, "created_by")
@property
@pulumi.getter(name="createdByType")
def created_by_type(self) -> Optional[str]:
"""
The type of identity that created the resource.
"""
return pulumi.get(self, "created_by_type")
@property
@pulumi.getter(name="lastModifiedAt")
def METHOD_NAME(self) -> Optional[str]:
"""
The timestamp of resource last modification (UTC)
"""
return pulumi.get(self, "last_modified_at")
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> Optional[str]:
"""
The identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter(name="lastModifiedByType")
def last_modified_by_type(self) -> Optional[str]:
"""
The type of identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_by_type")
@pulumi.output_type
class WatchlistUserInfoResponse(dict):
"""
User information that made some action
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "objectId":
suggest = "object_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WatchlistUserInfoResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WatchlistUserInfoResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WatchlistUserInfoResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
email: str,
name: str,
object_id: Optional[str] = None):
"""
User information that made some action
:param str email: The email of the user.
:param str name: The name of the user.
:param str object_id: The object id of the user.
"""
pulumi.set(__self__, "email", email)
pulumi.set(__self__, "name", name)
if object_id is not None:
pulumi.set(__self__, "object_id", object_id)
@property
@pulumi.getter
def email(self) -> str:
"""
The email of the user.
"""
return pulumi.get(self, "email")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the user.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="objectId")
def object_id(self) -> Optional[str]:
"""
The object id of the user.
"""
return pulumi.get(self, "object_id")
|
6,928 | url | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"vm redeploy",
)
class Redeploy(AAZCommand):
"""Redeploy an existing VM.
:example: Redeploy a VM.
az vm redeploy -g MyResourceGroup -n MyVm
:example: Redeploy all VMs in a resource group.
az vm redeploy --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
_aaz_info = {
"version": "2022-11-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.compute/virtualmachines/{}/redeploy", "2022-11-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.vm_name = AAZStrArg(
options=["-n", "--name", "--vm-name"],
help="The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=<name>`",
required=True,
id_part="name",
configured_default="vm",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.VirtualMachinesRedeploy(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class VirtualMachinesRedeploy(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def METHOD_NAME(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/redeploy",
**self.url_parameters
)
@property
def method(self):
return "POST"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"vmName", self.ctx.args.vm_name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-11-01",
required=True,
),
}
return parameters
def on_200(self, session):
pass
class _RedeployHelper:
"""Helper class for Redeploy"""
__all__ = ["Redeploy"] |
6,929 | add two | import pytest
import random
import typing as ty
from ..functions import task, annotate
from ...engine.task import FunctionTask
def test_task_equivalence():
def METHOD_NAME(a):
return a + 2
canonical = FunctionTask(METHOD_NAME, a=3)
decorated1 = task(METHOD_NAME)(a=3)
@task
def addtwo(a):
return a + 2
decorated2 = addtwo(a=3)
assert canonical.checksum == decorated1.checksum
c_res = canonical._run()
d1_res = decorated1._run()
d2_res = decorated2._run()
assert c_res.output.hash == d1_res.output.hash
assert c_res.output.hash == d2_res.output.hash
def test_annotation_equivalence_1():
"""testing various ways of annotation: one output, only types provided"""
def direct(a: int) -> int:
return a + 2
@annotate({"return": int})
def partial(a: int):
return a + 2
@annotate({"a": int, "return": int})
def indirect(a):
return a + 2
# checking if the annotations are equivalent
assert direct.__annotations__ == partial.__annotations__
assert direct.__annotations__ == indirect.__annotations__
# Run functions to ensure behavior is unaffected
a = random.randint(0, (1 << 32) - 3)
assert direct(a) == partial(a)
assert direct(a) == indirect(a)
# checking if the annotation is properly converted to output_spec if used in task
task_direct = task(direct)()
assert task_direct.output_spec.fields[0] == ("out", int)
def test_annotation_equivalence_2():
"""testing various ways of annotation: multiple outputs, using a tuple for output annot."""
def direct(a: int) -> (int, float):
return a + 2, a + 2.0
@annotate({"return": (int, float)})
def partial(a: int):
return a + 2, a + 2.0
@annotate({"a": int, "return": (int, float)})
def indirect(a):
return a + 2, a + 2.0
# checking if the annotations are equivalent
assert direct.__annotations__ == partial.__annotations__
assert direct.__annotations__ == indirect.__annotations__
# Run functions to ensure behavior is unaffected
a = random.randint(0, (1 << 32) - 3)
assert direct(a) == partial(a)
assert direct(a) == indirect(a)
# checking if the annotation is properly converted to output_spec if used in task
task_direct = task(direct)()
assert task_direct.output_spec.fields == [("out1", int), ("out2", float)]
def test_annotation_equivalence_3():
"""testing various ways of annotation: using dictionary for output annot."""
def direct(a: int) -> {"out1": int}:
return a + 2
@annotate({"return": {"out1": int}})
def partial(a: int):
return a + 2
@annotate({"a": int, "return": {"out1": int}})
def indirect(a):
return a + 2
# checking if the annotations are equivalent
assert direct.__annotations__ == partial.__annotations__
assert direct.__annotations__ == indirect.__annotations__
# Run functions to ensure behavior is unaffected
a = random.randint(0, (1 << 32) - 3)
assert direct(a) == partial(a)
assert direct(a) == indirect(a)
# checking if the annotation is properly converted to output_spec if used in task
task_direct = task(direct)()
assert task_direct.output_spec.fields[0] == ("out1", int)
def test_annotation_equivalence_4():
"""testing various ways of annotation: using ty.NamedTuple for the output"""
def direct(a: int) -> ty.NamedTuple("Output", [("sum", int), ("sub", int)]):
return a + 2, a - 2
@annotate({"return": ty.NamedTuple("Output", [("sum", int), ("sub", int)])})
def partial(a: int):
return a + 2, a - 2
@annotate(
{"a": int, "return": ty.NamedTuple("Output", [("sum", int), ("sub", int)])}
)
def indirect(a):
return a + 2, a - 2
# checking if the annotations are equivalent
assert (
direct.__annotations__["return"].__annotations__
== partial.__annotations__["return"].__annotations__
== indirect.__annotations__["return"].__annotations__
)
assert (
direct.__annotations__["return"].__name__
== partial.__annotations__["return"].__name__
== indirect.__annotations__["return"].__name__
)
# Run functions to ensure behavior is unaffected
a = random.randint(0, (1 << 32) - 3)
assert direct(a) == partial(a)
assert direct(a) == indirect(a)
# checking if the annotation is properly converted to output_spec if used in task
task_direct = task(direct)()
assert task_direct.output_spec.fields == [("sum", int), ("sub", int)]
def test_annotation_override():
@annotate({"a": float, "return": float})
def annotated(a: int) -> int:
return a + 2
assert annotated.__annotations__ == {"a": float, "return": float}
def test_invalid_annotation():
with pytest.raises(TypeError):
@annotate({"b": int})
def addtwo(a):
return a + 2
def test_annotated_task():
@task
def square(in_val: float):
return in_val**2
res = square(in_val=2.0)()
assert res.output.out == 4.0
def test_return_annotated_task():
@task
@annotate({"in_val": float, "return": {"squared": float}})
def square(in_val):
return in_val**2
res = square(in_val=2.0)()
assert res.output.squared == 4.0
def test_return_halfannotated_annotated_task():
@task
@annotate({"in_val": float, "return": float})
def square(in_val):
return in_val**2
res = square(in_val=2.0)()
assert res.output.out == 4.0
def test_return_annotated_task_multiple_output():
@task
@annotate({"in_val": float, "return": {"squared": float, "cubed": float}})
def square(in_val):
return in_val**2, in_val**3
res = square(in_val=2.0)()
assert res.output.squared == 4.0
assert res.output.cubed == 8.0
def test_return_halfannotated_task_multiple_output():
@task
@annotate({"in_val": float, "return": (float, float)})
def square(in_val):
return in_val**2, in_val**3
res = square(in_val=2.0)()
assert res.output.out1 == 4.0
assert res.output.out2 == 8.0 |
6,930 | test image out painting | # SPDX-License-Identifier: Apache-2.0
import os
import sys
import unittest
import mock_keras2onnx
from mock_keras2onnx import set_converter
import numpy as np
from mock_keras2onnx.proto import keras
from os.path import dirname, abspath
sys.path.insert(0, os.path.join(dirname(abspath(__file__)), '../../keras2onnx_tests/'))
from test_utils import run_keras_and_ort, test_level_0, is_keras_older_than, convert_InstanceNormalizationLayer
K = keras.backend
Activation = keras.layers.Activation
AtrousConvolution2D = keras.layers.AtrousConvolution2D
AveragePooling2D = keras.layers.AveragePooling2D
Add = keras.layers.Add
BatchNormalization = keras.layers.BatchNormalization
concatenate = keras.layers.concatenate
Conv2D = keras.layers.Conv2D
Conv2DTranspose = keras.layers.Conv2DTranspose
Dense = keras.layers.Dense
Dropout = keras.layers.Dropout
Embedding = keras.layers.Embedding
Flatten = keras.layers.Flatten
GlobalAveragePooling2D = keras.layers.GlobalAveragePooling2D
Input = keras.layers.Input
Lambda = keras.layers.Lambda
LeakyReLU = keras.layers.LeakyReLU
MaxPooling2D = keras.layers.MaxPooling2D
multiply = keras.layers.multiply
Permute = keras.layers.Permute
Reshape = keras.layers.Reshape
SeparableConv2D = keras.layers.SeparableConv2D
UpSampling2D = keras.layers.UpSampling2D
ZeroPadding2D = keras.layers.ZeroPadding2D
if not is_keras_older_than("2.2.4"):
ReLU = keras.layers.ReLU
import keras_contrib
InstanceNormalization = keras_contrib.layers.InstanceNormalization
Sequential = keras.models.Sequential
Model = keras.models.Model
INPUT_SHAPE = (256, 256, 3)
EPOCHS = 500
BATCH = 1
# 25% i.e 64 width size will be mask from both side
MASK_PERCENTAGE = .25
EPSILON = 1e-9
ALPHA = 0.0004
d_input_shape = (INPUT_SHAPE[0], int(INPUT_SHAPE[1] * (MASK_PERCENTAGE *2)), INPUT_SHAPE[2])
d_dropout = 0.25
def d_build_conv(layer_input, filter_size, kernel_size=4, strides=2, activation='leakyrelu', dropout_rate=d_dropout,
norm=True):
c = Conv2D(filter_size, kernel_size=kernel_size, strides=strides, padding='same')(layer_input)
if activation == 'leakyrelu':
c = LeakyReLU(alpha=0.2)(c)
if dropout_rate:
c = Dropout(dropout_rate)(c)
if norm == 'inst':
c = InstanceNormalization()(c)
return c
def build_discriminator():
d_input = Input(shape=d_input_shape)
d = d_build_conv(d_input, 32, 5, strides=2, norm=False)
d = d_build_conv(d, 64, 5, strides=2)
d = d_build_conv(d, 64, 5, strides=2)
d = d_build_conv(d, 128, 5, strides=2)
d = d_build_conv(d, 128, 5, strides=2)
flat = Flatten()(d)
fc1 = Dense(1024, activation='relu')(flat)
d_output = Dense(1, activation='sigmoid')(fc1)
return Model(d_input, d_output)
g_input_shape = (INPUT_SHAPE[0], int(INPUT_SHAPE[1] * (MASK_PERCENTAGE *2)), INPUT_SHAPE[2])
g_dropout = 0.25
def g_build_conv(layer_input, filter_size, kernel_size=4, strides=2, activation='leakyrelu', dropout_rate=g_dropout,
norm='inst', dilation=1):
c = AtrousConvolution2D(filter_size, kernel_size=kernel_size, strides=strides, atrous_rate=(dilation, dilation),
padding='same')(layer_input)
if activation == 'leakyrelu':
c = ReLU()(c)
if dropout_rate:
c = Dropout(dropout_rate)(c)
if norm == 'inst':
c = InstanceNormalization()(c)
return c
def g_build_deconv(layer_input, filter_size, kernel_size=3, strides=2, activation='relu', dropout=0):
d = Conv2DTranspose(filter_size, kernel_size=kernel_size, strides=strides, padding='same')(layer_input)
if activation == 'relu':
d = ReLU()(d)
return d
def build_generator():
g_input = Input(shape=g_input_shape)
g1 = g_build_conv(g_input, 64, 5, strides=1)
g2 = g_build_conv(g1, 128, 4, strides=2)
g3 = g_build_conv(g2, 256, 4, strides=2)
g4 = g_build_conv(g3, 512, 4, strides=1)
g5 = g_build_conv(g4, 512, 4, strides=1)
g6 = g_build_conv(g5, 512, 4, strides=1, dilation=2)
g7 = g_build_conv(g6, 512, 4, strides=1, dilation=4)
g8 = g_build_conv(g7, 512, 4, strides=1, dilation=8)
g9 = g_build_conv(g8, 512, 4, strides=1, dilation=16)
g10 = g_build_conv(g9, 512, 4, strides=1)
g11 = g_build_conv(g10, 512, 4, strides=1)
g12 = g_build_deconv(g11, 256, 4, strides=2)
g13 = g_build_deconv(g12, 128, 4, strides=2)
g14 = g_build_conv(g13, 128, 4, strides=1)
g15 = g_build_conv(g14, 64, 4, strides=1)
g_output = AtrousConvolution2D(3, kernel_size=4, strides=(1, 1), activation='tanh', padding='same',
atrous_rate=(1, 1))(g15)
return Model(g_input, g_output)
set_converter(keras_contrib.layers.InstanceNormalization, convert_InstanceNormalizationLayer)
# Model from https://github.com/bendangnuksung/Image-OutPainting
class TestImageOutPainting(unittest.TestCase):
def setUp(self):
self.model_files = []
def tearDown(self):
for fl in self.model_files:
os.remove(fl)
@unittest.skipIf(test_level_0,
"Test level 0 only.")
def METHOD_NAME(self):
K.clear_session()
DCRM = build_discriminator()
GEN = build_generator()
IMAGE = Input(shape=g_input_shape)
GENERATED_IMAGE = GEN(IMAGE)
CONF_GENERATED_IMAGE = DCRM(GENERATED_IMAGE)
keras_model = Model(IMAGE, [CONF_GENERATED_IMAGE, GENERATED_IMAGE])
g_input_shape_batch = (2,) + g_input_shape
data = np.random.rand(*g_input_shape_batch).astype(np.float32)
expected = keras_model.predict(data)
onnx_model = mock_keras2onnx.convert_keras(keras_model, keras_model.name)
self.assertTrue(
run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files,
atol=1e-2, rtol=1e-2))
if __name__ == "__main__":
unittest.main() |
6,931 | compare counts | # -*- coding: utf-8 -*-
# Copyright 2021, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
import json
import os
import sys
from qiskit.result import Result
def assertDictAlmostEqual(dict1, dict2, delta=None, msg=None, places=None, default_value=0):
"""Assert two dictionaries with numeric values are almost equal.
Fail if the two dictionaries are unequal as determined by
comparing that the difference between values with the same key are
not greater than delta (default 1e-8), or that difference rounded
to the given number of decimal places is not zero. If a key in one
dictionary is not in the other the default_value keyword argument
will be used for the missing value (default 0). If the two objects
compare equal then they will automatically compare almost equal.
Args:
dict1 (dict): a dictionary.
dict2 (dict): a dictionary.
delta (number): threshold for comparison (defaults to 1e-8).
msg (str): return a custom message on failure.
places (int): number of decimal places for comparison.
default_value (number): default value for missing keys.
Raises:
TypeError: raises TestCase failureException if the test fails.
"""
if dict1 == dict2:
# Shortcut
return
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if places is not None:
success = True
standard_msg = ""
# check value for keys in target
keys1 = set(dict1.keys())
for key in keys1:
val1 = dict1.get(key, default_value)
val2 = dict2.get(key, default_value)
if round(abs(val1 - val2), places) != 0:
success = False
standard_msg += "(%s: %s != %s), " % (key, val1, val2)
# check values for keys in counts, not in target
keys2 = set(dict2.keys()) - keys1
for key in keys2:
val1 = dict1.get(key, default_value)
val2 = dict2.get(key, default_value)
if round(abs(val1 - val2), places) != 0:
success = False
standard_msg += "(%s: %s != %s), " % (key, val1, val2)
if success is True:
return
standard_msg = standard_msg[:-2] + " within %s places" % places
else:
if delta is None:
delta = 1e-8 # default delta value
success = True
standard_msg = ""
# check value for keys in target
keys1 = set(dict1.keys())
for key in keys1:
val1 = dict1.get(key, default_value)
val2 = dict2.get(key, default_value)
if abs(val1 - val2) > delta:
success = False
standard_msg += "(%s: %s != %s), " % (key, val1, val2)
# check values for keys in counts, not in target
keys2 = set(dict2.keys()) - keys1
for key in keys2:
val1 = dict1.get(key, default_value)
val2 = dict2.get(key, default_value)
if abs(val1 - val2) > delta:
success = False
standard_msg += "(%s: %s != %s), " % (key, val1, val2)
if success is True:
return
standard_msg = standard_msg[:-2] + " within %s delta" % delta
raise Exception(standard_msg)
def METHOD_NAME(result, target, delta=0):
"""Compare counts dictionary to targets."""
# Don't use get_counts method which converts hex
output = result.data(0)["counts"]
assertDictAlmostEqual(output, target, delta=delta)
if __name__ == "__main__":
if len(sys.argv) == 2:
with open(sys.argv[1], "rt") as fp:
result_dict = json.load(fp)
else:
result_dict = json.load(sys.stdin)
result = Result.from_dict(result_dict)
assert result.status == "COMPLETED"
assert result.success is True
if os.getenv("USE_MPI", False):
assert result.metadata["num_mpi_processes"] > 1
shots = result.results[0].shots
targets = {"0x0": 5 * shots / 8, "0x1": shots / 8, "0x2": shots / 8, "0x3": shots / 8}
METHOD_NAME(result, targets, delta=0.05 * shots)
print("Input result JSON is valid!") |
6,932 | test thread root id returns id if | from uuid import UUID
import pytest
from h.db.types import URLSafeUUID
from h.models.annotation import Annotation
def test_parent_id_of_direct_reply():
ann = Annotation(references=["parent_id"])
assert ann.parent_id == "parent_id"
def test_parent_id_of_reply_to_reply():
ann = Annotation(references=["reply1", "reply2", "parent_id"])
assert ann.parent_id == "parent_id"
def test_reply_is_reply():
ann = Annotation(references=["parent_id"])
assert ann.is_reply is True
def test_non_reply_is_not_reply():
ann = Annotation()
assert not ann.is_reply
def test_parent_id_of_annotation():
ann = Annotation()
assert ann.parent_id is None
def METHOD_NAME():
annotation = Annotation(id="GBhy1DoHEea6htPothzqZQ")
assert annotation.thread_root_id == "GBhy1DoHEea6htPothzqZQ"
def test_thread_root_id_returns_id_if_references_empty():
annotation = Annotation(id="jANlljoHEea6hsv8FY7ipw", references=[])
assert annotation.thread_root_id == "jANlljoHEea6hsv8FY7ipw"
def test_thread_root_id_returns_reference_if_only_one_reference():
annotation = Annotation(
id="qvJnIjoHEea6hiv0nJK7gw", references=["yiSVIDoHEea6hjcSFuROLw"]
)
assert annotation.thread_root_id == "yiSVIDoHEea6hjcSFuROLw"
def test_thread_root_id_returns_first_reference_if_many_references():
annotation = Annotation(
id="uK9yVjoHEea6hsewWuiKtQ",
references=[
"1Ife3DoHEea6hpv8vWujdQ",
"uVuItjoHEea6hiNgv1wvmg",
"Qe7fpc5ZRgWy0RSHEP9UNg",
],
)
assert annotation.thread_root_id == "1Ife3DoHEea6hpv8vWujdQ"
class TestTarget:
def test_it(self, factories):
annotation = factories.Annotation.build()
assert annotation.target == [
{"source": annotation.target_uri, "selector": annotation.target_selectors}
]
def test_it_with_no_selectors(self, factories):
annotation = factories.Annotation.build(target_selectors=[])
assert "selector" not in annotation.target
def test_text_setter_renders_markdown(markdown_render):
markdown_render.render.return_value = "<p>foobar</p>"
annotation = Annotation()
annotation.text = "foobar"
markdown_render.render.assert_called_once_with("foobar")
assert ( # pylint: disable=comparison-with-callable
annotation.text_rendered == markdown_render.render.return_value
)
@pytest.mark.parametrize(
"userid,authority",
[
("acct:bmason@hypothes.is", "hypothes.is"),
("acct:kaylawatson@elifesciences.org", "elifesciences.org"),
],
)
def test_authority(factories, userid, authority):
assert factories.Annotation(userid=userid).authority == authority
def test_authority_when_annotation_has_no_userid():
assert Annotation().authority is None
def test_setting_extras_inline_is_persisted(db_session, factories):
"""
In-place changes to Annotation.extra should be persisted.
Setting an Annotation.extra value in-place:
my_annotation.extra['foo'] = 'bar'
should be persisted to the database.
"""
annotation = factories.Annotation(userid="fred")
annotation.extra["foo"] = "bar"
# We need to commit the db session here so that the in-place change to
# annotation.extra above would be lost if annotation.extra was a normal
# dict. Without this commit() this test would never fail.
db_session.commit()
annotation = db_session.query(Annotation).get(annotation.id)
assert annotation.extra == {"foo": "bar"}
def test_deleting_extras_inline_is_persisted(db_session, factories):
"""
In-place changes to Annotation.extra should be persisted.
Deleting an Annotation.extra value in-place should be persisted to the
database.
"""
annotation = factories.Annotation(userid="fred", extra={"foo": "bar"})
del annotation.extra["foo"]
db_session.commit()
annotation = db_session.query(Annotation).get(annotation.id)
assert "foo" not in annotation.extra
def test_appending_tags_inline_is_persisted(db_session, factories):
"""
In-place changes to Annotation.tags should be persisted.
Changes made by Annotation.tags.append() should be persisted to the
database.
"""
annotation = factories.Annotation(userid="fred", tags=["foo"])
annotation.tags.append("bar")
db_session.commit()
annotation = db_session.query(Annotation).get(annotation.id)
assert "bar" in annotation.tags
def test_deleting_tags_inline_is_persisted(db_session, factories):
"""In-place deletions of annotation tags should be persisted."""
annotation = factories.Annotation(userid="fred", tags=["foo"])
del annotation.tags[0]
db_session.commit()
annotation = db_session.query(Annotation).get(annotation.id)
assert "foo" not in annotation.tags
class TestAnnotationGroup:
def test_it(self, group, factories):
annotation = factories.Annotation(group=group)
assert annotation.group == group
def test_it_works_with_pubid(self, group, factories):
annotation = factories.Annotation(groupid=group.pubid)
assert annotation.group == group
def test_it_returns_world_by_default(self, factories):
annotation = factories.Annotation()
assert annotation.group.pubid == "__world__"
@pytest.fixture
def group(self, factories):
return factories.Group(pubid="12345678")
class TestThread:
def test_empty_thread(self, root):
assert root.thread == []
def test_empty_thread_ids(self, root):
assert root.thread_ids == []
def test_thread_with_replies(self, root, reply, subreply):
assert set(root.thread) == {reply, subreply}
def test_thread_ids_with_replies(self, root, reply, subreply):
assert set(root.thread_ids) == {reply.id, subreply.id}
@pytest.mark.usefixtures("subreply")
def test_reply_has_no_thread(self, reply):
assert reply.thread == []
@pytest.mark.usefixtures("subreply")
def test_reply_has_no_thread_ids(self, reply):
assert reply.thread_ids == []
@pytest.fixture
def root(self, factories):
return factories.Annotation()
@pytest.fixture
def reply(self, factories, root):
return factories.Annotation(references=[root.id])
@pytest.fixture
def subreply(self, factories, root, reply):
return factories.Annotation(references=[root.id, reply.id])
@pytest.mark.parametrize("has_moderation", (True, False))
def test_is_hidden(factories, has_moderation):
annotation = factories.Annotation(
moderation=factories.AnnotationModeration() if has_moderation else None
)
assert annotation.is_hidden == has_moderation
def test_uuid(factories):
annotation = factories.Annotation()
assert annotation.uuid == UUID(URLSafeUUID.url_safe_to_hex(annotation.id))
@pytest.fixture
def markdown_render(patch):
return patch("h.models.annotation.markdown_render") |
6,933 | compress frames | import logging
from logging import LogRecord, Formatter, ERROR
from traceback import TracebackException
from colorlog import ColoredFormatter
from os.path import join, abspath
HISTORY_SIZE = 1000
PATH_BASE = abspath(join(__file__, "..", ".."))
logging.addLevelName(5, "TRACE")
logging.TRACE = 5
class HistoryHandler(logging.Handler):
def __init__(self):
super(HistoryHandler, self).__init__()
self.history = [None] * HISTORY_SIZE
self.history_index = 0
def reset(self):
self.history = [None] * HISTORY_SIZE
self.history_index = 0
def emit(self, record: LogRecord):
self.history[self.history_index % HISTORY_SIZE] = record
self.history_index += 1
def getHistory(self, start=0, html=False):
end = self.history_index
if end - start >= HISTORY_SIZE:
start = end - HISTORY_SIZE
for x in range(start, end):
item = self.history[x % HISTORY_SIZE]
if html:
if item.levelno == logging.WARN:
style = "console-warning"
elif item.levelno == logging.ERROR:
style = "console-error"
elif item.levelno == logging.DEBUG:
style = "console-debug"
elif item.levelno == logging.CRITICAL:
style = "console-critical"
elif item.levelno == logging.FATAL:
style = "console-fatal"
elif item.levelno == logging.WARNING:
style = "console-warning"
elif item.levelno == logging.TRACE:
style = "console-trace"
else:
style = "console-default"
line = "<span class='" + style + \
"'>" + self.format(item) + "</span>"
yield (x + 1, line)
else:
yield (x + 1, self.format(item))
def getLast(self) -> LogRecord:
return self.history[(self.history_index - 1) % HISTORY_SIZE]
CONSOLE = logging.StreamHandler()
CONSOLE.setLevel(logging.INFO)
formatter_color = ColoredFormatter(
'%(log_color)s%(asctime)s %(levelname)s %(message)s%(reset)s',
datefmt='%m-%d %H:%M:%S',
reset=True,
log_colors={
"DEBUG": "cyan",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "red",
"TRACE": "white",
},
)
CONSOLE.setFormatter(formatter_color)
HISTORY = HistoryHandler()
HISTORY.setLevel(logging.DEBUG)
HISTORY.setFormatter(Formatter('%(asctime)s %(levelname)s [%(name)s] %(message)s', '%m-%d %H:%M:%S'))
class StandardLogger(logging.Logger):
def __init__(self, name):
super().__init__(name)
self.setLevel(logging.TRACE)
self.addHandler(CONSOLE)
self.addHandler(HISTORY)
def trace(self, msg, *args, **kwargs):
self.log(logging.TRACE, msg, *args, **kwargs)
def printException(self, ex: Exception, level=ERROR):
self.log(level, self.formatException(ex))
def formatException(self, e: Exception) -> str:
trace = None
if (hasattr(e, "__traceback__")):
trace = e.__traceback__
tbe = TracebackException(type(e), e, trace, limit=None)
lines = list(self._format(tbe))
return '\n%s' % ''.join(lines)
def _format(self, tbe):
if (tbe.__context__ is not None and not tbe.__suppress_context__):
yield from self._format(tbe.__context__)
yield "Whose handling caused:\n"
is_addon, stack = self._formatStack(tbe)
yield from stack
yield from tbe.format_exception_only()
def _formatStack(self, tbe):
_RECURSIVE_CUTOFF = 3
result = []
last_file = None
last_line = None
last_name = None
count = 0
is_addon = False
buffer = []
for frame in tbe.stack:
line_internal = True
if (last_file is None or last_file != frame.filename or last_line is None or last_line != frame.lineno or last_name is None or last_name != frame.name):
if count > _RECURSIVE_CUTOFF:
count -= _RECURSIVE_CUTOFF
result.append(
f' [Previous line repeated {count} more '
f'time{"s" if count > 1 else ""}]\n'
)
last_file = frame.filename
last_line = frame.lineno
last_name = frame.name
count = 0
count += 1
if count > _RECURSIVE_CUTOFF:
continue
fileName = frame.filename
pos = fileName.rfind(PATH_BASE)
if pos >= 0:
is_addon = True
line_internal = False
fileName = "addon" + \
fileName[pos + len(PATH_BASE):]
pos = fileName.rfind("site-packages")
if pos > 0:
fileName = fileName[pos - 1:]
pos = fileName.rfind("python3.7")
if pos > 0:
fileName = fileName[pos - 1:]
pass
line = ' {}:{} ({})\n'.format(fileName, frame.lineno, frame.name)
if line_internal:
buffer.append(line)
else:
result.extend(self.METHOD_NAME(buffer))
buffer = []
result.append(line)
if count > _RECURSIVE_CUTOFF:
count -= _RECURSIVE_CUTOFF
result.append(
f' [Previous line repeated {count} more '
f'time{"s" if count > 1 else ""}]\n'
)
result.extend(self.METHOD_NAME(buffer))
return is_addon, result
def overrideLevel(self, console, history):
CONSOLE.setLevel(console)
HISTORY.setLevel(history)
def METHOD_NAME(self, buffer):
if len(buffer) > 1:
yield buffer[0]
if len(buffer) == 3:
yield buffer[1]
elif len(buffer) > 2:
yield " [{} hidden frames]\n".format(len(buffer) - 2)
yield buffer[len(buffer) - 1]
elif len(buffer) > 0:
yield buffer[len(buffer) - 1]
pass
def getLogger(name):
return StandardLogger(name)
def getHistory(index, html):
return HISTORY.getHistory(index, html)
def getLast() -> LogRecord:
return HISTORY.getLast()
def reset() -> None:
return HISTORY.reset()
class TraceLogger(StandardLogger):
def __init__(self, name):
super().__init__(name)
self.setLevel(logging.TRACE)
def log(self, lvl, msg, *args, **kwargs):
super().log(logging.TRACE, msg, *args, **kwargs)
def info(self, *args, **kwargs):
super().log(logging.TRACE, *args, **kwargs)
def error(self, *args, **kwargs):
super().log(logging.TRACE, *args, **kwargs)
def warn(self, *args, **kwargs):
super().log(logging.TRACE, *args, **kwargs) |
6,934 | check checkout on base branch | # -----------------------------------------------------------------------------
# Copyright (c) 2012 - 2023, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import annotations
# Standard library imports
import json
import os
from pathlib import Path
# External imports
from packaging.version import Version as V
# Bokeh imports
from .action import FAILED, PASSED, ActionReturn
from .config import Config
from .pipeline import StepType
from .system import System
from .util import skip_for_prerelease
__all__ = (
"check_anaconda_present",
"check_aws_present",
"check_checkout_matches_remote",
"check_checkout_on_base_branch",
"check_checkout_is_clean",
"check_docs_version_config",
"check_git_present",
"check_milestone_labels",
"check_release_tag_is_available",
"check_npm_present",
"check_repo_is_bokeh",
"check_staging_branch_is_available",
"check_twine_present",
"check_version_order",
)
def _check_app_present(app: str) -> StepType:
def func(config: Config, system: System) -> ActionReturn:
try:
system.run(f"which {app}")
return PASSED(f"Command {app!r} is available")
except RuntimeError:
return FAILED(f"Command {app!r} is missing")
func.__name__ = f"check_{app}_present"
return func
check_anaconda_present = _check_app_present("anaconda")
check_aws_present = _check_app_present("aws")
check_git_present = _check_app_present("git")
check_npm_present = _check_app_present("npm")
check_twine_present = _check_app_present("twine")
def check_repo_is_bokeh(config: Config, system: System) -> ActionReturn:
try:
system.run("git status")
except RuntimeError:
return FAILED("Executing outside of a git repository")
try:
remote = system.run("git config --get remote.origin.url")
if remote.strip() in ("git@github.com:bokeh/bokeh.git", "https://github.com/bokeh/bokeh"):
return PASSED("Executing inside the the bokeh/bokeh repository")
else:
return FAILED(f"Executing OUTSIDE the bokeh/bokeh repository (bad remote: {remote})")
except RuntimeError as e:
return FAILED("Could not determine Git config remote.origin.url", details=e.args)
@skip_for_prerelease
def check_release_notes_present(config: Config, system: System) -> ActionReturn:
try:
if os.path.exists(Path(f"docs/bokeh/source/docs/releases/{config.version}.rst")):
return PASSED(f"Release notes file '{config.version}.rst' exists")
else:
return FAILED(f"Release notes file '{config.version}.rst' does NOT exist")
except RuntimeError as e:
return FAILED("Could not check presence of release notes file", details=e.args)
def METHOD_NAME(config: Config, system: System) -> ActionReturn:
try:
branch = system.run("git rev-parse --abbrev-ref HEAD").strip()
if branch == config.base_branch:
return PASSED(f"Working on base branch {config.base_branch!r} for release {config.version!r}")
else:
return FAILED(f"NOT working on base branch {config.base_branch!r} for release {config.version!r}")
except RuntimeError as e:
return FAILED("Could not check the checkout branch", details=e.args)
def check_checkout_is_clean(config: Config, system: System) -> ActionReturn:
try:
extras = system.run("git status --porcelain").split("\n")
extras = [x for x in extras if x != ""]
if extras:
return FAILED("Local checkout is NOT clean", extras)
else:
return PASSED("Local checkout is clean")
except RuntimeError as e:
return FAILED("Could not check the checkout state", details=e.args)
def check_checkout_matches_remote(config: Config, system: System) -> ActionReturn:
try:
system.run("git remote update")
local = system.run("git rev-parse @")
remote = system.run("git rev-parse @{u}")
base = system.run("git merge-base @ @{u}")
if local == remote:
return PASSED("Checkout is up to date with GitHub")
else:
if local == base:
status = "NEED TO PULL"
elif remote == base:
status = "NEED TO PUSH"
else:
status = "DIVERGED"
return FAILED(f"Checkout is NOT up to date with GitHub ({status})")
except RuntimeError as e:
return FAILED("Could not check whether local and GitHub are up to date", details=e.args)
@skip_for_prerelease
def check_docs_version_config(config: Config, system: System) -> ActionReturn:
try:
with open(Path("docs/bokeh/switcher.json")) as fp:
switcher = json.load(fp)
all_versions = set(x["version"] for x in switcher if "version" in x)
if config.version not in all_versions:
return FAILED(f"Version {config.version!r} is missing from switcher.json")
return PASSED("Docs versions config is correct")
except RuntimeError as e:
return FAILED("Could not check docs versions config", details=e.args)
def check_release_tag_is_available(config: Config, system: System) -> ActionReturn:
try:
out = system.run("git for-each-ref --sort=-taggerdate --format '%(tag)' refs/tags")
tags = [x.strip("'\"") for x in out.split("\n")]
if config.version in tags:
return FAILED(f"There is already an existing tag for new version {config.version!r}")
else:
return PASSED(f"New version {config.version!r} does not already have a tag")
except RuntimeError as e:
return FAILED("Could not check release tag availability", details=e.args)
def check_version_order(config: Config, system: System) -> ActionReturn:
try:
out = system.run("git for-each-ref --sort=-taggerdate --format '%(tag)' refs/tags")
tags = [x.strip("'\"") for x in out.split("\n")]
if all(V(config.version) > V(tag) for tag in tags if tag.startswith(config.release_level)):
return PASSED(f"Version {config.version!r} is newer than any tag at release level {config.release_level!r}")
else:
return FAILED(f"Version {config.version!r} is older than an existing tag at release level {config.release_level!r}")
except RuntimeError as e:
return FAILED("Could compare tag version order", details=e.args)
def check_staging_branch_is_available(config: Config, system: System) -> ActionReturn:
out = system.run(f"git branch --list {config.staging_branch}")
if out:
return FAILED(f"Release branch {config.staging_branch!r} ALREADY exists")
else:
return PASSED(f"Release branch {config.staging_branch!r} does not already exist")
@skip_for_prerelease
def check_milestone_labels(config: Config, system: System) -> ActionReturn:
try:
# system.run(f"python scripts/milestone.py {config.version} --check-only")
return PASSED("Milestone labels are BEP-1 compliant")
except RuntimeError as e:
return FAILED("Milesstone labels are NOT BEP-1 compliant", e.args) |
6,935 | open | # Copyright 2021-2022 python-tuf contributors
# SPDX-License-Identifier: MIT OR Apache-2.0
"""Simple example of using the repository library to build a repository"""
import copy
import json
import logging
from collections import defaultdict
from datetime import datetime, timedelta
from typing import Dict, List
from securesystemslib import keys
from securesystemslib.signer import Key, Signer, SSlibKey, SSlibSigner
from tuf.api.exceptions import RepositoryError
from tuf.api.metadata import (
DelegatedRole,
Delegations,
Metadata,
MetaFile,
Root,
Snapshot,
TargetFile,
Targets,
Timestamp,
)
from tuf.repository import Repository
logger = logging.getLogger(__name__)
_signed_init = {
Root.type: Root,
Snapshot.type: Snapshot,
Targets.type: Targets,
Timestamp.type: Timestamp,
}
class SimpleRepository(Repository):
"""Very simple in-memory repository implementation
This repository keeps the metadata for all versions of all roles in memory.
It also keeps all target content in memory.
Attributes:
role_cache: Every historical metadata version of every role in this
repository. Keys are role names and values are lists of Metadata
signer_cache: All signers available to the repository. Keys are role
names, values are lists of signers
target_cache: All target files served by the repository. Keys are
target paths and values are file contents as bytes.
"""
expiry_period = timedelta(days=1)
def __init__(self) -> None:
# all versions of all metadata
self.role_cache: Dict[str, List[Metadata]] = defaultdict(list)
# all current keys
self.signer_cache: Dict[str, List[Signer]] = defaultdict(list)
# all target content
self.target_cache: Dict[str, bytes] = {}
# version cache for snapshot and all targets, updated in close().
# The 'defaultdict(lambda: ...)' trick allows close() to easily modify
# the version without always creating a new MetaFile
self._snapshot_info = MetaFile(1)
self._targets_infos: Dict[str, MetaFile] = defaultdict(
lambda: MetaFile(1)
)
# setup a basic repository, generate signing key per top-level role
with self.edit_root() as root:
for role in ["root", "timestamp", "snapshot", "targets"]:
key = keys.generate_ed25519_key()
self.signer_cache[role].append(SSlibSigner(key))
root.add_key(SSlibKey.from_securesystemslib_key(key), role)
for role in ["timestamp", "snapshot", "targets"]:
with self.edit(role):
pass
@property
def targets_infos(self) -> Dict[str, MetaFile]:
return self._targets_infos
@property
def snapshot_info(self) -> MetaFile:
return self._snapshot_info
def METHOD_NAME(self, role: str) -> Metadata:
"""Return current Metadata for role from 'storage' (or create a new one)"""
if role not in self.role_cache:
signed_init = _signed_init.get(role, Targets)
md = Metadata(signed_init())
# this makes version bumping in close() simpler
md.signed.version = 0
return md
# return latest metadata from storage (but don't return a reference)
return copy.deepcopy(self.role_cache[role][-1])
def close(self, role: str, md: Metadata) -> None:
"""Store a version of metadata. Handle version bumps, expiry, signing"""
md.signed.version += 1
md.signed.expires = datetime.utcnow() + self.expiry_period
md.signatures.clear()
for signer in self.signer_cache[role]:
md.sign(signer, append=True)
# store new metadata version, update version caches
self.role_cache[role].append(md)
if role == "snapshot":
self._snapshot_info.version = md.signed.version
elif role not in ["root", "timestamp"]:
self._targets_infos[f"{role}.json"].version = md.signed.version
def add_target(self, path: str, content: str) -> None:
"""Add a target to top-level targets metadata"""
data = bytes(content, "utf-8")
# add content to cache for serving to clients
self.target_cache[path] = data
# add a target in the targets metadata
with self.edit_targets() as targets:
targets.targets[path] = TargetFile.from_data(path, data)
logger.debug("Targets v%d", targets.version)
# update snapshot, timestamp
self.do_snapshot()
self.do_timestamp()
def submit_delegation(self, rolename: str, data: bytes) -> bool:
"""Add a delegation to a (offline signed) delegated targets metadata"""
try:
logger.debug("Processing new delegation to role %s", rolename)
keyid, keydict = next(iter(json.loads(data).items()))
key = Key.from_dict(keyid, keydict)
# add delegation and key
role = DelegatedRole(rolename, [], 1, True, [f"{rolename}/*"])
with self.edit_targets() as targets:
if targets.delegations is None:
targets.delegations = Delegations({}, {})
if targets.delegations.roles is None:
targets.delegations.roles = {}
targets.delegations.roles[rolename] = role
targets.add_key(key, rolename)
except (RepositoryError, json.JSONDecodeError) as e:
logger.info("Failed to add delegation for %s: %s", rolename, e)
return False
logger.debug("Targets v%d", targets.version)
# update snapshot, timestamp
self.do_snapshot()
self.do_timestamp()
return True
def submit_role(self, role: str, data: bytes) -> bool:
"""Add a new version of a delegated roles metadata"""
try:
logger.debug("Processing new version for role %s", role)
if role in ["root", "snapshot", "timestamp", "targets"]:
raise ValueError("Only delegated targets are accepted")
md = Metadata.from_bytes(data)
for targetpath in md.signed.targets:
if not targetpath.startswith(f"{role}/"):
raise ValueError(f"targets allowed under {role}/ only")
self.targets().verify_delegate(role, md.signed_bytes, md.signatures)
if md.signed.version != self.targets(role).version + 1:
raise ValueError("Invalid version {md.signed.version}")
except (RepositoryError, ValueError) as e:
logger.info("Failed to add new version for %s: %s", role, e)
return False
# Checks passed: Add new delegated role version
self.role_cache[role].append(md)
self._targets_infos[f"{role}.json"].version = md.signed.version
logger.debug("%s v%d", role, md.signed.version)
# To keep it simple, target content is generated from targetpath
for targetpath in md.signed.targets:
self.target_cache[targetpath] = bytes(f"{targetpath}", "utf-8")
# update snapshot, timestamp
self.do_snapshot()
self.do_timestamp()
return True |
6,936 | test creation func | # Copyright 2021-2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from itertools import product
import numpy as np
import pytest
import cunumeric as num
def test_array():
x = num.array([1, 2, 3])
y = np.array([1, 2, 3])
z = num.array(y)
assert np.array_equal(x, z)
assert x.dtype == z.dtype
x = num.array([1, 2, 3])
y = num.array(x)
assert num.array_equal(x, y)
assert x.dtype == y.dtype
CREATION_FUNCTIONS = ("zeros", "ones")
FILLED_VALUES = [0, 1, 1000, 123.456]
SIZES = (0, 1, 2)
NDIMS = 5
DTYPES = (np.uint32, np.int32, np.float64, np.complex128)
def test_empty():
par = (SIZES, range(NDIMS), DTYPES)
for size, ndims, dtype in product(*par):
shape = ndims * [size]
xf = num.empty(shape, dtype=dtype)
yf = np.empty(shape, dtype=dtype)
assert xf.shape == yf.shape
assert xf.dtype == yf.dtype
@pytest.mark.parametrize("fn", CREATION_FUNCTIONS)
def METHOD_NAME(fn):
num_f = getattr(num, fn)
np_f = getattr(np, fn)
par = (SIZES, range(NDIMS), DTYPES)
for size, ndims, dtype in product(*par):
shape = ndims * [size]
xf = num_f(shape, dtype=dtype)
yf = np_f(shape, dtype=dtype)
assert np.array_equal(xf, yf)
assert xf.dtype == yf.dtype
@pytest.mark.parametrize("value", FILLED_VALUES)
def test_full(value):
par = (SIZES, range(NDIMS), DTYPES)
for size, ndims, dtype in product(*par):
shape = ndims * [size]
xf = num.full(shape, value, dtype=dtype)
yf = np.full(shape, value, dtype=dtype)
assert np.array_equal(xf, yf)
assert xf.dtype == yf.dtype
SHAPES_NEGATIVE = [
-1,
(-1, 2, 3),
np.array([2, -3, 4]),
]
class TestCreationErrors:
def setup_method(self):
self.bad_type_shape = (2, 3.0)
@pytest.mark.parametrize("shape", SHAPES_NEGATIVE, ids=str)
class TestNegativeShape:
@pytest.mark.parametrize("fn", ("empty", "zeros", "ones"))
def test_creation(self, shape, fn):
with pytest.raises(ValueError):
getattr(num, fn)(shape)
def test_full(self, shape):
with pytest.raises(ValueError):
num.full(shape, 10)
@pytest.mark.parametrize("fn", ("empty", "zeros", "ones"))
def test_creation_bad_type(self, fn):
with pytest.raises(TypeError):
getattr(num, fn)(self.bad_type_shape)
def test_full_bad_type(self):
with pytest.raises(TypeError):
num.full(self.bad_type_shape, 10)
# additional special case for full
def test_full_bad_filled_value(self):
with pytest.raises(ValueError):
num.full((2, 3), [10, 20, 30])
DATA_ARGS = [
# Array scalars
(np.array(3.0), None),
(np.array(3), "f8"),
# 1D arrays
(np.array([]), None),
(np.arange(6, dtype="f4"), None),
(np.arange(6), "c16"),
# 2D arrays
(np.array([[]]), None),
(np.arange(6).reshape(2, 3), None),
(np.arange(6).reshape(3, 2), "i1"),
# 3D arrays
(np.array([[[]]]), None),
(np.arange(24).reshape(2, 3, 4), None),
(np.arange(24).reshape(4, 3, 2), "f4"),
]
LIKE_FUNCTIONS = ("zeros_like", "ones_like")
SHAPE_ARG = (None, (-1,), (1, -1))
@pytest.mark.parametrize("x_np,dtype", DATA_ARGS)
@pytest.mark.parametrize("shape", SHAPE_ARG)
def test_empty_like(x_np, dtype, shape):
shape = shape if shape is None else x_np.reshape(shape).shape
x = num.array(x_np)
xfl = num.empty_like(x, dtype=dtype, shape=shape)
yfl = np.empty_like(x_np, dtype=dtype, shape=shape)
assert xfl.shape == yfl.shape
assert xfl.dtype == yfl.dtype
@pytest.mark.parametrize("x_np,dtype", DATA_ARGS)
@pytest.mark.parametrize("fn", LIKE_FUNCTIONS)
@pytest.mark.parametrize("shape", SHAPE_ARG)
def test_func_like(fn, x_np, dtype, shape):
shape = shape if shape is None else x_np.reshape(shape).shape
num_f = getattr(num, fn)
np_f = getattr(np, fn)
x = num.array(x_np)
xfl = num_f(x, dtype=dtype, shape=shape)
yfl = np_f(x_np, dtype=dtype, shape=shape)
assert np.array_equal(xfl, yfl)
assert xfl.dtype == yfl.dtype
@pytest.mark.parametrize("value", FILLED_VALUES)
@pytest.mark.parametrize("x_np, dtype", DATA_ARGS)
@pytest.mark.parametrize("shape", SHAPE_ARG)
def test_full_like(x_np, dtype, value, shape):
shape = shape if shape is None else x_np.reshape(shape).shape
x = num.array(x_np)
xfl = num.full_like(x, value, dtype=dtype, shape=shape)
yfl = np.full_like(x_np, value, dtype=dtype, shape=shape)
assert np.array_equal(xfl, yfl)
assert xfl.dtype == yfl.dtype
def test_full_like_bad_filled_value():
x = num.array([[1, 2, 3], [4, 5, 6]])
with pytest.raises(ValueError):
num.full_like(x, [10, 20, 30])
ARANGE_ARGS = [
(0,),
(10,),
(3.5,),
(3.0, 8, None),
(-10,),
(2, 10),
(2, -10),
(-2.5, 10.0),
(1, -10, -2.5),
(1.0, -10.0, -2.5),
(-10, 10, 10),
(-10, 10, -100),
]
@pytest.mark.parametrize("args", ARANGE_ARGS, ids=str)
def test_arange(args):
x = num.arange(*args)
y = np.arange(*args)
assert np.array_equal(x, y)
assert x.dtype == y.dtype
@pytest.mark.parametrize("dtype", [np.int32, np.float64], ids=str)
@pytest.mark.parametrize("args", ARANGE_ARGS, ids=str)
def test_arange_with_dtype(args, dtype):
x = num.arange(*args, dtype=dtype)
y = np.arange(*args, dtype=dtype)
assert np.array_equal(x, y)
assert x.dtype == y.dtype
ARANGE_ARGS_STEP_ZERO = [
(0, 0, 0),
(0, 10, 0),
(-10, 10, 0),
(1, 10, 0),
(10, -10, 0),
(0.0, 0.0, 0.0),
(0.0, 10.0, 0.0),
(-10.0, 10.0, 0.0),
(1.0, 10.0, 0.0),
(10.0, -10.0, 0.0),
]
class TestArrangeErrors:
def test_inf(self):
with pytest.raises(OverflowError):
num.arange(0, num.inf)
def test_nan(self):
with pytest.raises(ValueError):
num.arange(0, 1, num.nan)
@pytest.mark.parametrize("args", ARANGE_ARGS_STEP_ZERO, ids=str)
def test_zero_division(self, args):
with pytest.raises(ZeroDivisionError):
num.arange(*args)
def test_zero_with_nd_ndarray_shape():
shape = num.array([2, 3, 4])
x = num.zeros(shape)
y = np.zeros(shape)
assert np.array_equal(x, y)
shape = np.array([2, 3, 4])
x = num.zeros(shape)
y = np.zeros(shape)
assert np.array_equal(x, y)
def test_zero_with_0d_ndarray_shape():
shape = num.array(3)
x = num.zeros(shape)
y = np.zeros(shape)
assert np.array_equal(x, y)
shape = np.array(3)
x = num.zeros(shape)
y = np.zeros(shape)
assert np.array_equal(x, y)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(sys.argv)) |
6,937 | test get columns returns all columns | from mindsdb.integrations.handlers.binance_handler.binance_tables import BinanceAggregatedTradesTable
from mindsdb.integrations.handlers.binance_handler.binance_handler import BinanceHandler
from mindsdb_sql.parser import ast
from mindsdb_sql.parser.ast.select.star import Star
from mindsdb_sql.parser.ast.select.identifier import Identifier
from unittest.mock import Mock
import pandas as pd
import unittest
class BinanceAggregatedTradesTableTest(unittest.TestCase):
def METHOD_NAME(self):
api_handler = Mock(BinanceHandler)
trades_table = BinanceAggregatedTradesTable(api_handler)
# Order matters.
expected_columns = [
'symbol',
'open_time',
'open_price',
'high_price',
'low_price',
'close_price',
'volume',
'close_time',
'quote_asset_volume',
'number_of_trades',
'taker_buy_base_asset_volume',
'taker_buy_quote_asset_volume'
]
self.assertListEqual(trades_table.get_columns(), expected_columns)
def test_select_returns_all_columns(self):
api_handler = Mock(BinanceHandler)
api_handler.call_binance_api.return_value = pd.DataFrame([
[
'symbol', # Symbol
1499040000000, # Kline open time
'0.01634790', # Open price
'0.80000000', # High price
'0.01575800', # Low price
'0.01577100', # Close price
'148976.11427815', # Volume
1499644799999, # Kline Close time
'2434.19055334', # Quote asset volume
308, # Number of trades
'1756.87402397', # Taker buy base asset volume
'28.46694368', # Taker buy quote asset volume
]
])
trades_table = BinanceAggregatedTradesTable(api_handler)
select_all = ast.Select(
targets=[Star()],
from_table='aggregated_trade_data',
where='aggregated_trade_data.symbol = "symbol"'
)
all_trade_data = trades_table.select(select_all)
first_trade_data = all_trade_data.iloc[0]
self.assertEqual(all_trade_data.shape[1], 12)
self.assertEqual(first_trade_data['symbol'], 'symbol')
self.assertEqual(first_trade_data['open_time'], 1499040000000)
self.assertEqual(first_trade_data['open_price'], '0.01634790')
self.assertEqual(first_trade_data['high_price'], '0.80000000')
self.assertEqual(first_trade_data['low_price'], '0.01575800')
self.assertEqual(first_trade_data['close_price'], '0.01577100')
self.assertEqual(first_trade_data['volume'], '148976.11427815')
self.assertEqual(first_trade_data['close_time'], 1499644799999)
self.assertEqual(first_trade_data['quote_asset_volume'], '2434.19055334')
self.assertEqual(first_trade_data['number_of_trades'], 308)
self.assertEqual(first_trade_data['taker_buy_base_asset_volume'], '1756.87402397')
self.assertEqual(first_trade_data['taker_buy_quote_asset_volume'], '28.46694368')
def test_select_returns_only_selected_columns(self):
api_handler = Mock(BinanceHandler)
api_handler.call_binance_api.return_value = pd.DataFrame([
[
'symbol', # Symbol
1499040000000, # Kline open time
'0.01634790', # Open price
'0.80000000', # High price
'0.01575800', # Low price
'0.01577100', # Close price
'148976.11427815', # Volume
1499644799999, # Kline Close time
'2434.19055334', # Quote asset volume
308, # Number of trades
'1756.87402397', # Taker buy base asset volume
'28.46694368', # Taker buy quote asset volume
]
])
trades_table = BinanceAggregatedTradesTable(api_handler)
open_time_identifier = Identifier(path_str='open_time')
close_time_identifier = Identifier(path_str='close_time')
select_times = ast.Select(
targets=[open_time_identifier, close_time_identifier],
from_table='aggregated_trade_data',
where='aggregated_trade_data.symbol = "symbol"'
)
all_trade_data = trades_table.select(select_times)
first_trade_data = all_trade_data.iloc[0]
self.assertEqual(all_trade_data.shape[1], 2)
self.assertEqual(first_trade_data['open_time'], 1499040000000)
self.assertEqual(first_trade_data['close_time'], 1499644799999) |
6,938 | distribution | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from collections import ChainMap
from typing import List, Optional, Tuple
import numpy as np
from mxnet import gluon
from gluonts.core.component import validated
from gluonts.mx import Tensor
from . import Distribution
from .bijection import AffineTransformation
from .bijection_output import BijectionOutput
from .distribution_output import ArgProj, DistributionOutput
from .transformed_distribution import TransformedDistribution
class TransformedDistributionOutput(DistributionOutput):
r"""
Class to connect a network to a distribution that is transformed
by a sequence of learnable bijections.
"""
@validated()
def __init__(
self,
base_distr_output: DistributionOutput,
transforms_output: List[BijectionOutput],
) -> None:
super().__init__()
self.base_distr_output = base_distr_output
self.transforms_output = transforms_output
self.base_distr_args_dim = base_distr_output.args_dim
self.transforms_args_dim = [
transform.args_dim for transform in transforms_output
]
def _fuse(t1: Tuple, t2: Tuple) -> Tuple:
if len(t1) > len(t2):
t1, t2 = t2, t1
# from here on len(t2) >= len(t1)
assert t2[-len(t1) :] == t1
return t2
self._event_shape: Tuple[int, ...] = ()
for to in self.transforms_output:
self._event_shape = _fuse(self._event_shape, to.event_shape)
def get_args_proj(self, prefix: Optional[str] = None) -> ArgProj:
return ArgProj(
args_dim=dict(
self.base_distr_args_dim,
**dict(ChainMap(*self.transforms_args_dim)),
),
domain_map=gluon.nn.HybridLambda(self.domain_map),
prefix=prefix,
)
def _split_args(self, args):
# Since hybrid_forward does not support dictionary, we have to separate
# the raw outputs of the network based on the indices and map them to
# the learnable parameters
num_distr_args = len(self.base_distr_args_dim)
distr_args = args[0:num_distr_args]
num_transforms_args = [
len(transform_dim_args)
for transform_dim_args in self.transforms_args_dim
]
# starting indices of arguments for each transformation
num_args_cumsum = np.cumsum([num_distr_args] + num_transforms_args)
# get the arguments for each of the transformations
transforms_args = list(
map(
lambda ixs: args[ixs[0] : ixs[1]],
zip(num_args_cumsum, num_args_cumsum[1:]),
)
)
return distr_args, transforms_args
def domain_map(self, F, *args: Tensor):
distr_args, transforms_args = self._split_args(args)
distr_params = self.base_distr_output.domain_map(F, *distr_args)
transforms_params = [
transform_output.domain_map(F, *transform_args)
for transform_output, transform_args in zip(
self.transforms_output, transforms_args
)
]
# flatten the nested tuple
return sum(tuple([distr_params] + transforms_params), ())
def METHOD_NAME(
self,
distr_args,
loc: Optional[Tensor] = None,
scale: Optional[Tensor] = None,
) -> Distribution:
distr_args, transforms_args = self._split_args(distr_args)
distr = self.base_distr_output.distr_cls(*distr_args)
transforms = [
transform_output.bij_cls(*bij_args)
for transform_output, bij_args in zip(
self.transforms_output, transforms_args
)
]
trans_distr = TransformedDistribution(distr, transforms)
# Apply scaling as well at the end if scale is not None!
if loc is None and scale is None:
return trans_distr
else:
return TransformedDistribution(
trans_distr, [AffineTransformation(loc=loc, scale=scale)]
)
@property
def event_shape(self) -> Tuple:
return self._event_shape |
6,939 | plotting error | # -*- coding: utf-8 -*-
"""
Description:
define the object to initialize the plot option subwindow
Author: YingzhiGou
Date: 20/06/2017
"""
from qtpy import QtCore
from qtpy.QtWidgets import QWidget, QMessageBox
from mtpy.gui.SmartMT.gui.busy_indicators import BusyOverlay
from mtpy.gui.SmartMT.ui_asset.plot_options import Ui_PlotOption
from mtpy.gui.SmartMT.visualization import *
from mtpy.gui.SmartMT.visualization.visualization_base import MPLCanvasWidget
from mtpy.utils.mtpylog import MtPyLog
class PlotOption(QWidget):
def __init__(self, parent, file_handler, selected_files):
"""
:param parent:
:type parent: StartQt4
:param file_handler:
:type file_handler: FileHandler
:param selected_files:
:type selected_files: set
"""
QWidget.__init__(self, parent)
self._parent = parent
self._logger = MtPyLog.get_mtpy_logger(self.__class__.__name__)
self.file_handler = file_handler
self.selected_stations = selected_files
self._current_plot = None
self.ui = Ui_PlotOption()
self.ui.setupUi(self)
# hide cancel button
self.ui.pushButton_cancel.hide()
# populate dropdown menu
self.plotOptions = []
# print VisualizationBase.__subclasses__()
for child in VisualizationBase.__subclasses__():
name = child.plot_name()
if name not in self.plotOptions:
self.plotOptions.append(child)
self.ui.comboBoxSelect_Plot.addItem(name)
else:
raise Exception("Duplicated Plot Name: %s in class %s" % (name, child.__name__))
# busy overlay
self._busy_overlay = BusyOverlay(self)
self._busy_overlay.hide()
# connect signals
self.ui.comboBoxSelect_Plot.currentIndexChanged.connect(self._selection_changed)
self.ui.pushButton_plot.clicked.connect(self._create_plot)
self.ui.pushButton_cancel.clicked.connect(self._cancel_plot)
if VisualizationBase.__subclasses__():
self.ui.comboBoxSelect_Plot.setEnabled(True)
self.ui.comboBoxSelect_Plot.setCurrentIndex(0)
else:
self.ui.comboBoxSelect_Plot.setEnabled(False)
def resizeEvent(self, event):
size = event.size()
size.setHeight(size.height() - self.ui.pushButton_plot.height()) # give space to the buttons
self._busy_overlay.resize(size)
# self._busy_overlay.resize(event.size())
event.accept()
def _selection_changed(self, *args, **kwargs):
# print "selection changed"
index = self.ui.comboBoxSelect_Plot.currentIndex()
plot_option = self.plotOptions[index]
description = plot_option.plot_description()
# print description
self.ui.textEditPlot_Description.setText(description)
# set parameter ui
if self._current_plot is not None:
# self.ui.verticalLayout.removeWidget(self._current_plot.parameter_ui)
self._current_plot.parameter_ui.deleteLater()
self._current_plot = plot_option(self)
# connect signal
self._current_plot.started.connect(self._busy_overlay.show)
self._current_plot.started.connect(self._tuggle_plot_cancel)
self._current_plot.finished.connect(self._busy_overlay.hide)
self._current_plot.finished.connect(self._tuggle_plot_cancel)
self._current_plot.plotting_completed.connect(self._show_plot)
self._current_plot.plotting_error.connect(self.METHOD_NAME)
self.ui.verticalLayout.addWidget(self._current_plot.parameter_ui)
# self.resize(self.width(), self.sizeHint().height())
self.update_ui()
def _create_plot(self):
self._current_plot.start(QtCore.QThread.HighPriority)
def _cancel_plot(self):
# self._current_plot.terminate() # this does not work
self._current_plot.wait()
def _tuggle_plot_cancel(self):
self.ui.pushButton_cancel.setHidden(not self.ui.pushButton_cancel.isHidden())
self.ui.pushButton_plot.setHidden(not self.ui.pushButton_plot.isHidden())
def METHOD_NAME(self, msg, trace):
msg_box = QMessageBox(self)
msg_box.setIcon(QMessageBox.Critical)
msg_box.setText('Plotting Error')
msg_box.setInformativeText(msg)
msg_box.setDetailedText(trace)
msg_box.setStandardButtons(QMessageBox.Close)
msg_box.exec_()
# QtGui.QMessageBox.critical(self,
# 'Plotting Error', msg,
# QtGui.QMessageBox.Close)
def _show_plot(self):
fig = self._current_plot.get_fig()
if fig:
# self._fig.show()
widget = MPLCanvasWidget(fig)
self._parent.create_subwindow(widget, "%s" % self._current_plot.plot_name(), override=False,
tooltip=self._current_plot.get_plot_tooltip())
def update_ui(self):
if self._current_plot is not None:
self._current_plot.set_data(self.get_mt_objs())
def get_mt_objs(self):
mt_objs = []
for selected_station in self.selected_stations:
ref = self.file_handler.station2ref(selected_station)
mt_obj = self.file_handler.get_MT_obj(ref)
mt_objs.append(mt_obj)
return mt_objs
def data_changed(self):
enabled = bool(self.selected_stations)
self.ui.pushButton_plot.setEnabled(enabled)
self.ui.comboBoxSelect_Plot.setEnabled(enabled)
self.ui.comboBoxSelect_Plot.currentIndexChanged.emit(0)
self.update_ui() |
6,940 | encrypt | '''
Cramer-Shoup Public Key Encryption Scheme (Decisional Diffie-Hellman Assumption in groups of prime order)
| From: "R. Cramer, V. Shoup: A practical public key cryptosystem provably secure against adaptive chosen ciphertext attack"
| Published in: CRYPTO 1998
| Available from: http://knot.kaist.ac.kr/seminar/archive/46/46.pdf
| Notes:
* type: encryption (public key)
* setting: DDH-hard EC groups of prime order (F_p) or Integer Groups
* assumption: DDH
* Name: PKEnc_DDH_CCA_CS98
:Authors: Matthew Green
:Date: 1/2011
'''
from charm.toolbox.ecgroup import G
from charm.toolbox.PKEnc import PKEnc
# type definitions
#pk_t = { 'g1' : G, 'g2' : G, 'c' : G, 'd' : G, 'h' : G }
#sk_t = { 'x1' : ZR, 'x2' : ZR, 'y1' : ZR, 'y2' : ZR, 'z' : ZR }
#c_t = { 'u1' : G, 'u2' : G, 'e' : G, 'v' : G }
#str_t = str
debug = False
class CS98(PKEnc):
"""
>>> from charm.toolbox.eccurve import prime192v1
>>> from charm.toolbox.ecgroup import ECGroup
>>> groupObj = ECGroup(prime192v1)
>>> pkenc = CS98(groupObj)
>>> (public_key, secret_key) = pkenc.keygen()
>>> msg = b"hello world!!!123456"
>>> cipher_text = pkenc.encrypt(public_key, msg)
>>> decrypted_msg = pkenc.decrypt(public_key, secret_key, cipher_text)
>>> decrypted_msg == msg
True
>>> from charm.toolbox.integergroup import IntegerGroup, integer
>>> p = integer(156053402631691285300957066846581395905893621007563090607988086498527791650834395958624527746916581251903190331297268907675919283232442999706619659475326192111220545726433895802392432934926242553363253333261282122117343404703514696108330984423475697798156574052962658373571332699002716083130212467463571362679)
>>> q = integer(78026701315845642650478533423290697952946810503781545303994043249263895825417197979312263873458290625951595165648634453837959641616221499853309829737663096055610272863216947901196216467463121276681626666630641061058671702351757348054165492211737848899078287026481329186785666349501358041565106233731785681339)
>>> groupObj = IntegerGroup()
>>> pkenc = CS98(groupObj, p, q)
>>> (public_key, secret_key) = pkenc.keygen(1024)
>>> msg = b"hello world. test message"
>>> cipher_text = pkenc.encrypt(public_key, msg)
>>> decrypted_msg = pkenc.decrypt(public_key, secret_key, cipher_text)
>>> decrypted_msg == msg
True
"""
def __init__(self, groupObj, p=0, q=0):
PKEnc.__init__(self)
global group
group = groupObj
if group.groupSetting() == 'integer':
group.p, group.q, group.r = p, q, 2
# @output(pk_t, sk_t)
def keygen(self, secparam=0):
if group.groupSetting() == 'integer':
if group.p == 0 or group.q == 0:
group.paramgen(secparam)
p = group.p
g1, g2 = group.randomGen(), group.randomGen()
elif group.groupSetting() == 'elliptic_curve':
group.paramgen(secparam)
g1, g2 = group.random(G), group.random(G)
x1, x2, y1, y2, z = group.random(), group.random(), group.random(), group.random(), group.random()
c = ((g1 ** x1) * (g2 ** x2))
d = ((g1 ** y1) * (g2 ** y2))
h = (g1 ** z)
pk = { 'g1' : g1, 'g2' : g2, 'c' : c, 'd' : d, 'h' : h }
sk = { 'x1' : x1, 'x2' : x2, 'y1' : y1, 'y2' : y2, 'z' : z }
return (pk, sk)
# @input(pk_t, bytes)
# @output(c_t)
def METHOD_NAME(self, pk, M):
r = group.random()
u1 = (pk['g1'] ** r)
u2 = (pk['g2'] ** r)
e = group.encode(M) * (pk['h'] ** r)
alpha = group.hash((u1, u2, e))
v = (pk['c'] ** r) * (pk['d'] ** (r * alpha))
# Assemble the ciphertext
c = { 'u1' : u1, 'u2' : u2, 'e' : e, 'v' : v }
return c
# @input(pk_t, sk_t, c_t)
# @output(bytes)
def decrypt(self, pk, sk, c):
alpha = group.hash((c['u1'], c['u2'], c['e']))
v_prime = (c['u1'] ** (sk['x1'] + (sk['y1'] * alpha))) * (c['u2'] ** (sk['x2'] + (sk['y2'] * alpha)))
if (c['v'] != v_prime):
return 'ERROR'
if debug: print("c['v'] => %s" % c['v'])
if debug: print("v' => %s" % v_prime)
return group.decode(c['e'] / (c['u1'] ** sk['z']))
|
6,941 | reorder encoder out | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.models import (
FairseqEncoder,
FairseqEncoderModel,
register_model,
register_model_architecture,
)
from fairseq.modules.fairseq_dropout import FairseqDropout
default_conv_enc_config = """[
(400, 13, 170, 0.2),
(440, 14, 0, 0.214),
(484, 15, 0, 0.22898),
(532, 16, 0, 0.2450086),
(584, 17, 0, 0.262159202),
(642, 18, 0, 0.28051034614),
(706, 19, 0, 0.30014607037),
(776, 20, 0, 0.321156295296),
(852, 21, 0, 0.343637235966),
(936, 22, 0, 0.367691842484),
(1028, 23, 0, 0.393430271458),
(1130, 24, 0, 0.42097039046),
(1242, 25, 0, 0.450438317792),
(1366, 26, 0, 0.481969000038),
(1502, 27, 0, 0.51570683004),
(1652, 28, 0, 0.551806308143),
(1816, 29, 0, 0.590432749713),
]"""
@register_model("asr_w2l_conv_glu_encoder")
class W2lConvGluEncoderModel(FairseqEncoderModel):
def __init__(self, encoder):
super().__init__(encoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="encoder input dimension per input channel",
)
parser.add_argument(
"--in-channels",
type=int,
metavar="N",
help="number of encoder input channels",
)
parser.add_argument(
"--conv-enc-config",
type=str,
metavar="EXPR",
help="""
an array of tuples each containing the configuration of one conv layer
[(out_channels, kernel_size, padding, dropout), ...]
""",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
conv_enc_config = getattr(args, "conv_enc_config", default_conv_enc_config)
encoder = W2lConvGluEncoder(
vocab_size=len(task.target_dictionary),
input_feat_per_channel=args.input_feat_per_channel,
in_channels=args.in_channels,
conv_enc_config=eval(conv_enc_config),
)
return cls(encoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
lprobs.batch_first = False
return lprobs
class W2lConvGluEncoder(FairseqEncoder):
def __init__(
self, vocab_size, input_feat_per_channel, in_channels, conv_enc_config
):
super().__init__(None)
self.input_dim = input_feat_per_channel
if in_channels != 1:
raise ValueError("only 1 input channel is currently supported")
self.conv_layers = nn.ModuleList()
self.linear_layers = nn.ModuleList()
self.dropouts = []
cur_channels = input_feat_per_channel
for out_channels, kernel_size, padding, dropout in conv_enc_config:
layer = nn.Conv1d(cur_channels, out_channels, kernel_size, padding=padding)
layer.weight.data.mul_(math.sqrt(3)) # match wav2letter init
self.conv_layers.append(nn.utils.weight_norm(layer))
self.dropouts.append(
FairseqDropout(dropout, module_name=self.__class__.__name__)
)
if out_channels % 2 != 0:
raise ValueError("odd # of out_channels is incompatible with GLU")
cur_channels = out_channels // 2 # halved by GLU
for out_channels in [2 * cur_channels, vocab_size]:
layer = nn.Linear(cur_channels, out_channels)
layer.weight.data.mul_(math.sqrt(3))
self.linear_layers.append(nn.utils.weight_norm(layer))
cur_channels = out_channels // 2
def forward(self, src_tokens, src_lengths, **kwargs):
"""
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
B, T, _ = src_tokens.size()
x = src_tokens.transpose(1, 2).contiguous() # (B, feat, T) assuming C == 1
for layer_idx in range(len(self.conv_layers)):
x = self.conv_layers[layer_idx](x)
x = F.glu(x, dim=1)
x = self.dropouts[layer_idx](x)
x = x.transpose(1, 2).contiguous() # (B, T, 908)
x = self.linear_layers[0](x)
x = F.glu(x, dim=2)
x = self.dropouts[-1](x)
x = self.linear_layers[1](x)
assert x.size(0) == B
assert x.size(1) == T
encoder_out = x.transpose(0, 1) # (T, B, vocab_size)
# need to debug this -- find a simpler/elegant way in pytorch APIs
encoder_padding_mask = (
torch.arange(T).view(1, T).expand(B, -1).to(x.device)
>= src_lengths.view(B, 1).expand(-1, T)
).t() # (B x T) -> (T x B)
return {
"encoder_out": encoder_out, # (T, B, vocab_size)
"encoder_padding_mask": encoder_padding_mask, # (T, B)
}
def METHOD_NAME(self, encoder_out, new_order):
encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
1, new_order
)
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(1, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return (1e6, 1e6) # an arbitrary large number
@register_model_architecture("asr_w2l_conv_glu_encoder", "w2l_conv_glu_enc")
def w2l_conv_glu_enc(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.in_channels = getattr(args, "in_channels", 1)
args.conv_enc_config = getattr(args, "conv_enc_config", default_conv_enc_config) |
6,942 | close | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from msrest import Deserializer, Serializer
from azure.core import AsyncPipelineClient
from azure.core.rest import AsyncHttpResponse, HttpRequest
from .. import models
from ._configuration import FormRecognizerClientConfiguration
from .operations import FormRecognizerClientOperationsMixin
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class FormRecognizerClient(FormRecognizerClientOperationsMixin):
"""Extracts content, layout, and structured data from documents.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param endpoint: Supported Cognitive Services endpoints (protocol and hostname, for
example: https://westus2.api.cognitive.microsoft.com).
:type endpoint: str
:keyword api_version: Api Version. Default value is "2022-08-31". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
endpoint: str,
**kwargs: Any
) -> None:
_base_url = '{endpoint}/formrecognizer'
self._config = FormRecognizerClientConfiguration(credential=credential, endpoint=endpoint, **kwargs)
self._client = AsyncPipelineClient(base_url=_base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
def _send_request(
self,
request: HttpRequest,
**kwargs: Any
) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments)
return self._client.send_request(request_copy, **kwargs)
async def METHOD_NAME(self) -> None:
await self._client.METHOD_NAME()
async def __aenter__(self) -> "FormRecognizerClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details) |
6,943 | handle sequence | # pylint:disable=unused-argument,no-self-use
from collections import OrderedDict
import claripy
import ailment
from ailment.expression import negate
from .sequence_walker import SequenceWalker
from .structuring.structurer_nodes import (
MultiNode,
SequenceNode,
CodeNode,
ConditionNode,
SwitchCaseNode,
ConditionalBreakNode,
BreakNode,
LoopNode,
CascadingConditionNode,
ContinueNode,
)
from .condition_processor import ConditionProcessor
class EmptyNodeRemover:
"""
Rewrites a node and its children to remove empty nodes.
The following optimizations are performed at the same time:
- Convert if (A) { } else { ... } to if(!A) { ... } else { }
:ivar _claripy_ast_conditions: True if all node conditions are claripy ASTs. False if all node conditions are AIL
expressions.
"""
def __init__(self, node, claripy_ast_conditions: bool = True):
self.root = node
self._claripy_ast_conditions = claripy_ast_conditions
self.removed_sequences = []
self.replaced_sequences = {}
handlers = {
SequenceNode: self.METHOD_NAME,
CodeNode: self._handle_Code,
ConditionNode: self._handle_Condition,
CascadingConditionNode: self._handle_CascadingCondition,
SwitchCaseNode: self._handle_SwitchCase,
LoopNode: self._handle_Loop,
ContinueNode: self._handle_Continue,
MultiNode: self._handle_MultiNode,
BreakNode: self._handle_Default,
ConditionalBreakNode: self._handle_Default,
ailment.Block: self._handle_Block,
}
self._walker = SequenceWalker(handlers=handlers)
r = self._walker.walk(self.root)
if r is None:
self.result = SequenceNode(None, nodes=[])
else:
# Make sure it's still a sequence node
if not isinstance(r, SequenceNode):
r = SequenceNode(node.addr, nodes=[r])
self.result = r
#
# Handlers
#
def METHOD_NAME(self, node, **kwargs):
new_nodes = []
for node_ in node.nodes:
new_node = self._walker._handle(node_)
if new_node is not None:
if isinstance(new_node, SequenceNode):
new_nodes.extend(new_node.nodes)
else:
new_nodes.append(new_node)
if not new_nodes:
self.removed_sequences.append(node)
return None
if len(new_nodes) == 1:
# Remove the unnecessary sequence node
self.replaced_sequences[node] = new_nodes[0]
return new_nodes[0]
sn = SequenceNode(node.addr, nodes=new_nodes)
self.replaced_sequences[node] = sn
return sn
def _handle_MultiNode(self, node: MultiNode, **kwargs):
new_nodes = []
for node_ in node.nodes:
new_node = self._walker._handle(node_)
if new_node is not None:
if isinstance(new_node, MultiNode):
new_nodes.extend(new_node.nodes)
else:
new_nodes.append(new_node)
if not new_nodes:
return None
if len(new_nodes) == 1:
# Remove the unnecessary MultiNode
return new_nodes[0]
return MultiNode(new_nodes)
def _handle_Code(self, node, **kwargs):
inner_node = self._walker._handle(node.node)
if inner_node is None:
return None
if (
self._claripy_ast_conditions
and node.reaching_condition is not None
and claripy.is_true(node.reaching_condition)
):
# Remove the unnecessary CodeNode
return inner_node
if self._claripy_ast_conditions and isinstance(inner_node, CodeNode):
# unpack the codenode so we don't have directly nested CodeNodes
return CodeNode(inner_node.node, claripy.And(node.reaching_condition, inner_node.reaching_condition))
return CodeNode(inner_node, node.reaching_condition)
def _handle_Condition(self, node, **kwargs):
true_node = self._walker._handle(node.true_node)
false_node = self._walker._handle(node.false_node)
if true_node is None and false_node is None:
# empty node
return None
if true_node is None and false_node is not None:
# swap them
return ConditionNode(
node.addr,
node.reaching_condition,
ConditionProcessor.simplify_condition(claripy.Not(node.condition))
if self._claripy_ast_conditions
else negate(node.condition),
false_node,
false_node=None,
)
if (
self._claripy_ast_conditions
and claripy.is_true(node.condition)
and node.true_node is not None
and node.false_node is None
):
return node.true_node
return ConditionNode(node.addr, node.reaching_condition, node.condition, true_node, false_node=false_node)
def _handle_CascadingCondition(self, node: CascadingConditionNode, **kwargs):
new_else_node = None if node.else_node is None else self._walker._handle(node.else_node)
new_cond_and_nodes = []
for cond, child_node in node.condition_and_nodes:
new_node = self._walker._handle(child_node)
if new_node is not None:
new_cond_and_nodes.append((cond, new_node))
else:
if new_else_node is not None:
# do not allow any empty condition nodes, otherwise the condition for the else node will be wrong
new_cond_and_nodes.append((cond, child_node))
if not new_cond_and_nodes and new_else_node is None:
# empty node
return None
return CascadingConditionNode(node.addr, new_cond_and_nodes, else_node=new_else_node)
def _handle_Loop(self, node: LoopNode, **kwargs):
new_seq = self._walker._handle(node.sequence_node)
if (
new_seq is None
and node.sort == "while"
and (node.condition is None or (isinstance(node.condition, ailment.Const) and node.condition.value == 0))
):
return None
result = node.copy()
result.sequence_node = new_seq
return result
def _handle_Continue(self, node: ContinueNode, **kwargs):
return node
def _handle_SwitchCase(self, node, **kwargs):
new_cases = OrderedDict()
for idx, case in node.cases.items():
new_case = self._walker._handle(case)
if new_case is not None:
new_cases[idx] = new_case
new_default_node = self._walker._handle(node.default_node)
if not new_cases and new_default_node is None:
return None
return SwitchCaseNode(node.switch_expr, new_cases, new_default_node, addr=node.addr)
@staticmethod
def _handle_Default(node, **kwargs):
return node
@staticmethod
def _handle_Block(block, **kwargs):
if not block.statements:
return None
return block |
6,944 | assert in custom modules | # -*- coding: utf-8 -*-
import filecmp
import json
import os
import pkgutil
import zipfile
import hypothesis
import pytest
from verta.tracking.entities._deployable_entity import _DeployableEntity
from verta._internal_utils.custom_modules import CustomModules
from .. import utils
from . import contexts
class TestPipInstalledModule:
@staticmethod
def METHOD_NAME(custom_modules, module_name):
module = CustomModules.get_module_path(module_name)
with utils.tempdir() as custom_modules_dir:
with zipfile.ZipFile(custom_modules, "r") as zipf:
zipf.extractall(custom_modules_dir)
# TODO: extract sys.path from _verta_config.py instead of walking
for parent_dir, dirnames, filenames in os.walk(custom_modules_dir):
if os.path.basename(module) in dirnames + filenames:
retrieved_module = os.path.join(
parent_dir,
os.path.basename(module),
)
break
else:
raise ValueError("module not found in custom modules")
if os.path.isfile(module):
assert filecmp.cmp(module, retrieved_module)
else:
utils.assert_dirs_match(module, retrieved_module)
@pytest.mark.parametrize(
"name",
sorted(module[1] for module in pkgutil.iter_modules()),
)
def test_module(self, name):
"""pip-installed module can be collected."""
if name == "tests" or name == "conftest" or name.startswith("test_"):
pytest.skip(
"pytest modifies both import mechanisms and module objects,"
" which we can't handle right now"
)
if CustomModules.get_module_path(name) in ("built-in", "frozen"):
pytest.skip("built into Python; no module file to collect")
custom_modules = _DeployableEntity._custom_modules_as_artifact([name])
self.METHOD_NAME(custom_modules, name)
@pytest.mark.parametrize(
"names",
[
["cloudpickle", "hypothesis"],
["cloudpickle", "hypothesis", "pytest"],
],
)
def test_multiple_modules(self, names):
"""Multiple pip-installed modules can be collected at once."""
custom_modules = _DeployableEntity._custom_modules_as_artifact(names)
for name in names:
self.METHOD_NAME(custom_modules, name)
def test_module_and_local_dir_have_same_name(self, worker_id):
"""If a pip-installed module and a local directory share a name, the module is collected.
If a user can import a package "foo" in their environment, and uses
custom modules to find "foo", we will prefer that package over a
directory/file "foo" in the cwd. Otherwise, it is very difficult or
impossible to force the installed package.
"""
name = worker_id
# avoid using an existing package name
hypothesis.assume(not CustomModules.is_importable(name))
with utils.chtempdir():
# create local directory with same name as package
local_dir = os.path.abspath(name)
os.mkdir(local_dir)
with open(os.path.join(local_dir, "empty.json"), "w") as f:
json.dump({}, f)
# create package in another directory and install
with utils.tempdir() as tempd:
with contexts.installable_package(name, dir=tempd) as pkg_dir:
with contexts.installed_local_package(pkg_dir, name):
# collect and validate custom modules
custom_modules = _DeployableEntity._custom_modules_as_artifact(
[name],
)
self.METHOD_NAME(custom_modules, name)
def test_module_and_local_pkg_have_same_name(self, worker_id):
"""A specific case of :meth:`test_module_and_local_dir_have_same_name`.
The local directory *is* a Python package repository (but not directly
importable without ``cd``ing one level into it).
A user may have a monolithic project with model management scripts
alongside Python package directories (that may *also* be installed
into the environment).
"""
name = worker_id
# avoid using an existing package name
hypothesis.assume(not CustomModules.is_importable(name))
with utils.chtempdir():
# create package in *current* directory and install
with contexts.installable_package(name, dir=".") as pkg_dir:
with contexts.installed_local_package(pkg_dir, name):
# collect and validate custom modules
custom_modules = _DeployableEntity._custom_modules_as_artifact(
[name],
)
self.METHOD_NAME(custom_modules, name) |
6,945 | test latest release with environment | from datetime import datetime
from sentry.models import Release, Rule
from sentry.rules.filters.latest_release import LatestReleaseFilter, get_project_release_cache_key
from sentry.testutils.cases import RuleTestCase
from sentry.utils.cache import cache
class LatestReleaseFilterTest(RuleTestCase):
rule_cls = LatestReleaseFilter
def test_latest_release(self):
event = self.get_event()
oldRelease = Release.objects.create(
organization_id=self.organization.id,
version="1",
date_added=datetime(2020, 9, 1, 3, 8, 24, 880386),
)
oldRelease.add_project(self.project)
newRelease = Release.objects.create(
organization_id=self.organization.id,
version="2",
date_added=datetime(2020, 9, 2, 3, 8, 24, 880386),
)
newRelease.add_project(self.project)
event.data["tags"] = (("release", newRelease.version),)
rule = self.get_rule()
self.assertPasses(rule, event)
def test_latest_release_no_match(self):
event = self.get_event()
oldRelease = Release.objects.create(
organization_id=self.organization.id,
version="1",
date_added=datetime(2020, 9, 1, 3, 8, 24, 880386),
)
oldRelease.add_project(self.project)
newRelease = Release.objects.create(
organization_id=self.organization.id,
version="2",
date_added=datetime(2020, 9, 2, 3, 8, 24, 880386),
)
newRelease.add_project(self.project)
event.data["tags"] = (("release", oldRelease.version),)
rule = self.get_rule()
self.assertDoesNotPass(rule, event)
def test_caching(self):
event = self.get_event()
oldRelease = Release.objects.create(
organization_id=self.organization.id,
version="1",
date_added=datetime(2020, 9, 1, 3, 8, 24, 880386),
)
oldRelease.add_project(self.project)
event.data["tags"] = (("release", oldRelease.version),)
rule = self.get_rule()
self.assertPasses(rule, event)
newRelease = Release.objects.create(
organization_id=self.organization.id,
version="2",
date_added=datetime(2020, 9, 2, 3, 8, 24, 880386),
)
newRelease.add_project(self.project)
# ensure we clear the cache after creating a new release
cache_key = get_project_release_cache_key(event.group.project_id)
assert cache.get(cache_key) is None
self.assertDoesNotPass(rule, event)
# ensure we clear the cache when a release is deleted
newRelease.safe_delete()
cache_key = get_project_release_cache_key(event.group.project_id)
assert cache.get(cache_key) is None
# rule should pass again because the latest release is oldRelease
self.assertPasses(rule, event)
def METHOD_NAME(self):
event = self.get_event()
self.create_release(
project=event.group.project,
version="1",
date_added=datetime(2020, 9, 1, 3, 8, 24, 880386),
environments=[self.environment],
)
new_release = self.create_release(
project=event.group.project,
version="2",
date_added=datetime(2020, 9, 2, 3, 8, 24, 880386),
environments=[self.environment],
)
other_env_release = self.create_release(
project=event.group.project,
version="4",
date_added=datetime(2020, 9, 3, 3, 8, 24, 880386),
)
event.data["tags"] = (("release", new_release.version),)
environment_rule = self.get_rule(rule=Rule(environment_id=self.environment.id))
self.assertPasses(environment_rule, event)
event.data["tags"] = (("release", other_env_release.version),)
environment_rule = self.get_rule(rule=Rule(environment_id=self.environment.id))
self.assertDoesNotPass(environment_rule, event) |
6,946 | test matrix iqr | #!/usr/bin/env python
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011, The QIIME Project"
#remember to add yourself if you make changes
__credits__ = ["Rob Knight", "Daniel McDonald", "Greg Caporaso",
"Justin Kuczynski", "Catherine Lozupone",
"Jai Ram Rideout", "Yoshiki Vazquez Baeza"]
__license__ = "BSD"
__version__ = "1.7.0-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
__status__ = "Development"
from unittest import TestCase, main
from numpy.testing import assert_almost_equal
from numpy import array, isnan, asarray, arange
from scipy.spatial import procrustes
from emperor.qiime_backports.util import (summarize_pcoas, _flip_vectors,
_compute_jn_pcoa_avg_ranges,
matrix_IQR, idealfourths, IQR)
class TopLevelTests(TestCase):
def setup(self):
pass
def test_flip_vectors(self):
"""_flip_vectors makes a new PCA matrix with correct signs"""
m_matrix = array([[1.0, 0.0, 1.0], [2.0, 4.0, 4.0]])
jn_matrix = array([[1.2, 0.1, -1.2], [2.5, 4.0, -4.5]])
new_matrix = _flip_vectors(jn_matrix, m_matrix)
assert_almost_equal(new_matrix, array([[1.2, 0.1, 1.2], [2.5, 4.0, 4.5]]))
def test_compute_jn_pcoa_avg_ranges(self):
"""_compute_jn_pcoa_avg_ranges works
"""
jn_flipped_matrices = [array([[2.0,4.0, -4.5],[-1.2,-0.1,1.2]]),\
array([[3.0,4.0, -4.5],[-1.2,-0.1,1.2]]),\
array([[4.0,4.0, -4.5],[-1.2,-0.1,1.2]]),\
array([[5.0,4.0, -4.5],[-1.2,-0.1,1.2]]),\
array([[6.0,4.0, -4.5],[-1.2,-0.1,1.2]]),\
array([[7.0,4.0, -4.5],[-1.2,-0.1,1.2]]),\
array([[1.0,4.0, -4.5],[-1.2,-0.1,1.2]])]
avg_matrix, low_matrix, high_matrix = _compute_jn_pcoa_avg_ranges(\
jn_flipped_matrices, 'ideal_fourths')
assert_almost_equal(avg_matrix[(0,0)], 4.0)
assert_almost_equal(avg_matrix[(0,2)], -4.5)
assert_almost_equal(low_matrix[(0,0)], 2.16666667)
assert_almost_equal(high_matrix[(0,0)], 5.83333333)
avg_matrix, low_matrix, high_matrix = _compute_jn_pcoa_avg_ranges(\
jn_flipped_matrices, 'sdev')
x = array([m[0,0] for m in jn_flipped_matrices])
self.assertEqual(x.mean(),avg_matrix[0,0])
self.assertEqual(-x.std(ddof=1)/2,low_matrix[0,0])
self.assertEqual(x.std(ddof=1)/2,high_matrix[0,0])
def test_summarize_pcoas(self):
"""summarize_pcoas works
"""
master_pcoa = [['1', '2', '3'], \
array([[-1.0, 0.0, 1.0], [2.0, 4.0, -4.0]]), \
array([.76, .24])]
jn1 = [['1', '2', '3'], \
array([[1.2, 0.1, -1.2],[-2.5, -4.0, 4.5]]), \
array([0.80, .20])]
jn2 = [['1', '2', '3'], \
array([[-1.4, 0.05, 1.3],[2.6, 4.1, -4.7]]), \
array([0.76, .24])]
jn3 = [['1', '2', '3'], \
array([[-1.5, 0.05, 1.6],[2.4, 4.0, -4.8]]), \
array([0.84, .16])]
jn4 = [['1', '2', '3'], \
array([[-1.5, 0.05, 1.6],[2.4, 4.0, -4.8]]), \
array([0.84, .16])]
support_pcoas = [jn1, jn2, jn3, jn4]
#test with the ideal_fourths option
matrix_average, matrix_low, matrix_high, eigval_average, m_names = \
summarize_pcoas(master_pcoa, support_pcoas, 'ideal_fourths',
apply_procrustes=False)
self.assertEqual(m_names, ['1', '2', '3'])
assert_almost_equal(matrix_average[(0,0)], -1.4)
assert_almost_equal(matrix_average[(0,1)], 0.0125)
assert_almost_equal(matrix_low[(0,0)], -1.5)
assert_almost_equal(matrix_high[(0,0)], -1.28333333)
assert_almost_equal(matrix_low[(0,1)], -0.0375)
assert_almost_equal(matrix_high[(0,1)], 0.05)
assert_almost_equal(eigval_average[0], 0.81)
assert_almost_equal(eigval_average[1], 0.19)
#test with the IQR option
matrix_average, matrix_low, matrix_high, eigval_average, m_names = \
summarize_pcoas(master_pcoa, support_pcoas, method='IQR',
apply_procrustes=False)
assert_almost_equal(matrix_low[(0,0)], -1.5)
assert_almost_equal(matrix_high[(0,0)], -1.3)
#test with procrustes option followed by sdev
m, m1, msq = procrustes(master_pcoa[1],jn1[1])
m, m2, msq = procrustes(master_pcoa[1],jn2[1])
m, m3, msq = procrustes(master_pcoa[1],jn3[1])
m, m4, msq = procrustes(master_pcoa[1],jn4[1])
matrix_average, matrix_low, matrix_high, eigval_average, m_names = \
summarize_pcoas(master_pcoa, support_pcoas, method='sdev',
apply_procrustes=True)
x = array([m1[0,0],m2[0,0],m3[0,0],m4[0,0]])
self.assertEqual(x.mean(),matrix_average[0,0])
self.assertEqual(-x.std(ddof=1)/2,matrix_low[0,0])
self.assertEqual(x.std(ddof=1)/2,matrix_high[0,0])
def test_IQR(self):
"IQR returns the interquartile range for list x"
#works for odd with odd split
x = [2,3,4,5,6,7,1]
minv, maxv = IQR(x)
self.assertEqual(minv, 2)
self.assertEqual(maxv, 6)
#works for even with odd split
x = [1,2,3,4,5,6]
minv, maxv = IQR(x)
self.assertEqual(minv, 2)
self.assertEqual(maxv, 5)
#works for even with even split
x = [1,2,3,4,5,6,7,8]
minv, maxv = IQR(x)
self.assertEqual(minv, 2.5)
self.assertEqual(maxv, 6.5)
#works with array
#works for odd with odd split
x = array([2,3,4,5,6,7,1])
minv, maxv = IQR(x)
self.assertEqual(minv, 2)
self.assertEqual(maxv, 6)
#works for even with odd split
x = array([1,2,3,4,5,6])
minv, maxv = IQR(x)
self.assertEqual(minv, 2)
self.assertEqual(maxv, 5)
#works for even with even split
x = array([1,2,3,4,5,6,7,8])
minv, maxv = IQR(x)
self.assertEqual(minv, 2.5)
self.assertEqual(maxv, 6.5)
def METHOD_NAME(self):
"""matrix_IQR calcs the IQR for each column in an array correctly
"""
x = array([[1,2,3],[4,5,6],[7,8,9], [10,11,12]])
min_vals, max_vals = matrix_IQR(x)
assert_almost_equal(min_vals, array([2.5,3.5,4.5]))
assert_almost_equal(max_vals, array([8.5,9.5,10.5]))
def test_idealfourths(self):
"""idealfourths: tests the ideal-fourths function which was imported from scipy
at the following location (http://projects.scipy.org/scipy/browser/trunk/scipy/stats/tests/test_mmorestats.py?rev=4154)
"""
test = arange(100)
self.assertEqual(idealfourths(test),
[24.416666666666668, 74.583333333333343])
test_2D = test.repeat(3).reshape(-1,3)
# used to be assertAlmostEqualRel but assert_almost_equal from numpy
# seems to be working just fine
assert_almost_equal(asarray(idealfourths(test_2D, axis=0)),\
array([[24.41666667, 24.41666667, 24.41666667], \
[74.58333333, 74.58333333, 74.58333333]]))
assert_almost_equal(idealfourths(test_2D, axis=1),
test.repeat(2).reshape(-1,2))
test = [0,0]
_result = idealfourths(test)
self.assertEqual(isnan(_result).all(), True)
#run unit tests if run from command-line
if __name__ == '__main__':
main() |
6,947 | test failed get policies | from unittest.mock import Mock, patch
from canonicalwebteam.store_api.exceptions import StoreApiResponseErrorList
from tests.admin.tests_models import TestModelServiceEndpoints
class TestGetPolicies(TestModelServiceEndpoints):
@patch("webapp.admin.views.admin_api.get_store_model_policies")
def test_get_policies(self, mock_get_store_model_policies):
mock_get_store_model_policies.return_value = ["policy1", "policy2"]
response = self.client.get("/admin/store/1/models/Model1/policies")
data = response.json
self.assertEqual(response.status_code, 200)
self.assertTrue(data["success"])
self.assertEqual(data["data"], ["policy1", "policy2"])
@patch("webapp.admin.views.admin_api.get_store_model_policies")
def METHOD_NAME(self, mock_get_store_model_policies):
mock_get_store_model_policies.side_effect = StoreApiResponseErrorList(
"An error occurred", 500, [{"message": "An error occurred"}]
)
response = self.client.get("/admin/store/1/models/Model1/policies")
data = response.json
self.assertEqual(response.status_code, 500)
self.assertFalse(data["success"])
self.assertEqual(data["message"], "An error occurred")
class TestCreatePolicies(TestModelServiceEndpoints):
@patch("webapp.admin.views.admin_api.get_store_signing_keys")
@patch("webapp.admin.views.admin_api.create_store_model_policy")
def test_create_policy(
self, mock_create_store_model_policy, mock_get_store_signing_keys
):
mock_get_store_signing_keys.return_value = [
{"sha3-384": "valid_signing_key"}
]
mock_create_store_model_policy.return_value = None
payload = {"signing_key": "valid_signing_key"}
response = self.client.post(
"/admin/store/1/models/Model1/policies", data=payload
)
data = response.json
self.assertEqual(response.status_code, 200)
self.assertTrue(data["success"])
@patch("webapp.admin.views.admin_api.get_store_signing_keys")
def test_missing_signing_key(self, mock_get_store_signing_keys):
mock_get_store_signing_keys.return_value = [
{"sha3-384": "valid_signing_key"}
]
payload = {}
response = self.client.post(
"/admin/store/1/models/Model1/policies", data=payload
)
data = response.json
self.assertEqual(response.status_code, 500)
self.assertFalse(data["success"])
self.assertEqual(data["message"], "Signing key required")
@patch("webapp.admin.views.admin_api.get_store_signing_keys")
def test_invalid_signing_key(self, mock_get_store_signing_keys):
mock_get_store_signing_keys.return_value = [{"sha3-384": "valid_key"}]
payload = {"signing_key": "invalid_key"}
response = self.client.post(
"/admin/store/1/models/Model1/policies", data=payload
)
data = response.json
self.assertEqual(response.status_code, 500)
self.assertFalse(data["success"])
self.assertEqual(data["message"], "Invalid signing key")
@patch("webapp.admin.views.admin_api.get_store_signing_keys")
@patch("webapp.admin.views.admin_api.create_store_model_policy")
def test_exception_in_create_policy(
self,
mock_create_store_model_policy,
mock_get_store_signing_keys,
):
mock_get_store_signing_keys.return_value = [{"sha3-384": "valid_key"}]
mock_create_store_model_policy.side_effect = StoreApiResponseErrorList(
"Simulated failure", 500, [{"message": "An error occurred"}]
)
payload = {"signing_key": "valid_key"}
response = self.client.post(
"/admin/store/1/models/Model1/policies", data=payload
)
data = response.json
self.assertEqual(response.status_code, 500)
self.assertFalse(data["success"])
self.assertEqual(data["message"], "An error occurred")
class TestDeletePolicies(TestModelServiceEndpoints):
@patch("webapp.admin.views.admin_api.delete_store_model_policy")
def test_successful_delete_policy(self, mock_delete_store_model_policy):
mock_delete_store_model_policy.return_value = Mock(status_code=204)
response = self.client.delete(
"/admin/store/1/models/Model1/policies/1"
)
data = response.json
self.assertEqual(response.status_code, 200)
self.assertTrue(data["success"])
@patch("webapp.admin.views.admin_api.delete_store_model_policy")
def test_policy_not_found(self, mock_delete_store_model_policy):
mock_delete_store_model_policy.return_value = Mock(status_code=404)
response = self.client.delete(
"/admin/store/1/models/Model1/policies/1"
)
data = response.json
self.assertEqual(response.status_code, 500)
self.assertFalse(data["success"])
self.assertEqual(data["message"], "Policy not found")
@patch("webapp.admin.views.admin_api.delete_store_model_policy")
def test_exception_in_delete_policy(self, mock_delete_store_model_policy):
mock_delete_store_model_policy.side_effect = StoreApiResponseErrorList(
"An error occured", 500, [{"message": "An error occurred"}]
)
response = self.client.delete(
"/admin/store/1/models/Model1/policies/1"
)
data = response.json
self.assertEqual(response.status_code, 500)
self.assertFalse(data["success"])
self.assertEqual(data["message"], "An error occurred") |
6,948 | set crc | """NRF24L01 driver for MicroPython
"""
from micropython import const
import utime
# nRF24L01+ registers
CONFIG = const(0x00)
EN_RXADDR = const(0x02)
SETUP_AW = const(0x03)
SETUP_RETR = const(0x04)
RF_CH = const(0x05)
RF_SETUP = const(0x06)
STATUS = const(0x07)
RX_ADDR_P0 = const(0x0A)
TX_ADDR = const(0x10)
RX_PW_P0 = const(0x11)
FIFO_STATUS = const(0x17)
DYNPD = const(0x1C)
# CONFIG register
EN_CRC = const(0x08) # enable CRC
CRCO = const(0x04) # CRC encoding scheme; 0=1 byte, 1=2 bytes
PWR_UP = const(0x02) # 1=power up, 0=power down
PRIM_RX = const(0x01) # RX/TX control; 0=PTX, 1=PRX
# RF_SETUP register
POWER_0 = const(0x00) # -18 dBm
POWER_1 = const(0x02) # -12 dBm
POWER_2 = const(0x04) # -6 dBm
POWER_3 = const(0x06) # 0 dBm
SPEED_1M = const(0x00)
SPEED_2M = const(0x08)
SPEED_250K = const(0x20)
# STATUS register
RX_DR = const(0x40) # RX data ready; write 1 to clear
TX_DS = const(0x20) # TX data sent; write 1 to clear
MAX_RT = const(0x10) # max retransmits reached; write 1 to clear
# FIFO_STATUS register
RX_EMPTY = const(0x01) # 1 if RX FIFO is empty
# constants for instructions
R_RX_PL_WID = const(0x60) # read RX payload width
R_RX_PAYLOAD = const(0x61) # read RX payload
W_TX_PAYLOAD = const(0xA0) # write TX payload
FLUSH_TX = const(0xE1) # flush TX FIFO
FLUSH_RX = const(0xE2) # flush RX FIFO
NOP = const(0xFF) # use to read STATUS register
class NRF24L01:
def __init__(self, spi, cs, ce, channel=46, payload_size=16):
assert payload_size <= 32
self.buf = bytearray(1)
# store the pins
self.spi = spi
self.cs = cs
self.ce = ce
# init the SPI bus and pins
self.init_spi(4000000)
# reset everything
ce.init(ce.OUT, value=0)
cs.init(cs.OUT, value=1)
self.payload_size = payload_size
self.pipe0_read_addr = None
utime.sleep_ms(5)
# set address width to 5 bytes and check for device present
self.reg_write(SETUP_AW, 0b11)
if self.reg_read(SETUP_AW) != 0b11:
raise OSError("nRF24L01+ Hardware not responding")
# disable dynamic payloads
self.reg_write(DYNPD, 0)
# auto retransmit delay: 1750us
# auto retransmit count: 8
self.reg_write(SETUP_RETR, (6 << 4) | 8)
# set rf power and speed
self.set_power_speed(POWER_3, SPEED_250K) # Best for point to point links
# init CRC
self.METHOD_NAME(2)
# clear status flags
self.reg_write(STATUS, RX_DR | TX_DS | MAX_RT)
# set channel
self.set_channel(channel)
# flush buffers
self.flush_rx()
self.flush_tx()
def init_spi(self, baudrate):
try:
master = self.spi.MASTER
except AttributeError:
self.spi.init(baudrate=baudrate, polarity=0, phase=0)
else:
self.spi.init(master, baudrate=baudrate, polarity=0, phase=0)
def reg_read(self, reg):
self.cs(0)
self.spi.readinto(self.buf, reg)
self.spi.readinto(self.buf)
self.cs(1)
return self.buf[0]
def reg_write_bytes(self, reg, buf):
self.cs(0)
self.spi.readinto(self.buf, 0x20 | reg)
self.spi.write(buf)
self.cs(1)
return self.buf[0]
def reg_write(self, reg, value):
self.cs(0)
self.spi.readinto(self.buf, 0x20 | reg)
ret = self.buf[0]
self.spi.readinto(self.buf, value)
self.cs(1)
return ret
def flush_rx(self):
self.cs(0)
self.spi.readinto(self.buf, FLUSH_RX)
self.cs(1)
def flush_tx(self):
self.cs(0)
self.spi.readinto(self.buf, FLUSH_TX)
self.cs(1)
# power is one of POWER_x defines; speed is one of SPEED_x defines
def set_power_speed(self, power, speed):
setup = self.reg_read(RF_SETUP) & 0b11010001
self.reg_write(RF_SETUP, setup | power | speed)
# length in bytes: 0, 1 or 2
def METHOD_NAME(self, length):
config = self.reg_read(CONFIG) & ~(CRCO | EN_CRC)
if length == 0:
pass
elif length == 1:
config |= EN_CRC
else:
config |= EN_CRC | CRCO
self.reg_write(CONFIG, config)
def set_channel(self, channel):
self.reg_write(RF_CH, min(channel, 125))
# address should be a bytes object 5 bytes long
def open_tx_pipe(self, address):
assert len(address) == 5
self.reg_write_bytes(RX_ADDR_P0, address)
self.reg_write_bytes(TX_ADDR, address)
self.reg_write(RX_PW_P0, self.payload_size)
# address should be a bytes object 5 bytes long
# pipe 0 and 1 have 5 byte address
# pipes 2-5 use same 4 most-significant bytes as pipe 1, plus 1 extra byte
def open_rx_pipe(self, pipe_id, address):
assert len(address) == 5
assert 0 <= pipe_id <= 5
if pipe_id == 0:
self.pipe0_read_addr = address
if pipe_id < 2:
self.reg_write_bytes(RX_ADDR_P0 + pipe_id, address)
else:
self.reg_write(RX_ADDR_P0 + pipe_id, address[0])
self.reg_write(RX_PW_P0 + pipe_id, self.payload_size)
self.reg_write(EN_RXADDR, self.reg_read(EN_RXADDR) | (1 << pipe_id))
def start_listening(self):
self.reg_write(CONFIG, self.reg_read(CONFIG) | PWR_UP | PRIM_RX)
self.reg_write(STATUS, RX_DR | TX_DS | MAX_RT)
if self.pipe0_read_addr is not None:
self.reg_write_bytes(RX_ADDR_P0, self.pipe0_read_addr)
self.flush_rx()
self.flush_tx()
self.ce(1)
utime.sleep_us(130)
def stop_listening(self):
self.ce(0)
self.flush_tx()
self.flush_rx()
# returns True if any data available to recv
def any(self):
return not bool(self.reg_read(FIFO_STATUS) & RX_EMPTY)
def recv(self):
# get the data
self.cs(0)
self.spi.readinto(self.buf, R_RX_PAYLOAD)
buf = self.spi.read(self.payload_size)
self.cs(1)
# clear RX ready flag
self.reg_write(STATUS, RX_DR)
return buf
# blocking wait for tx complete
def send(self, buf, timeout=500):
self.send_start(buf)
start = utime.ticks_ms()
result = None
while result is None and utime.ticks_diff(utime.ticks_ms(), start) < timeout:
result = self.send_done() # 1 == success, 2 == fail
if result == 2:
raise OSError("send failed")
# non-blocking tx
def send_start(self, buf):
# power up
self.reg_write(CONFIG, (self.reg_read(CONFIG) | PWR_UP) & ~PRIM_RX)
utime.sleep_us(150)
# send the data
self.cs(0)
self.spi.readinto(self.buf, W_TX_PAYLOAD)
self.spi.write(buf)
if len(buf) < self.payload_size:
self.spi.write(b"\x00" * (self.payload_size - len(buf))) # pad out data
self.cs(1)
# enable the chip so it can send the data
self.ce(1)
utime.sleep_us(15) # needs to be >10us
self.ce(0)
# returns None if send still in progress, 1 for success, 2 for fail
def send_done(self):
if not (self.reg_read(STATUS) & (TX_DS | MAX_RT)):
return None # tx not finished
# either finished or failed: get and clear status flags, power down
status = self.reg_write(STATUS, RX_DR | TX_DS | MAX_RT)
self.reg_write(CONFIG, self.reg_read(CONFIG) & ~PWR_UP)
return 1 if status & TX_DS else 2 |
6,949 | issue command | #!/usr/bin/env python
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import http.client
import json
import ssl
import subprocess
def METHOD_NAME(cmd, force_info_log=False, suppress_warning=False,
env=None):
"""Tries running the provided command once.
Args:
cmd: A list of strings such as is given to the subprocess.Popen()
constructor.
env: A dict of key/value strings, such as is given to the subprocess.Popen()
constructor, that contains environment variables to be injected.
Returns:
A tuple of stdout, and retcode from running the provided command.
"""
print('=== Running: %s' % ' '.join(cmd))
process = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE)
stdout = ''
while True:
output = process.stdout.readline()
if not output and process.poll() is not None:
break
if output:
stdout += str(output)
print('= ' + str(output.strip()))
rc = process.poll()
print('=== Finished with code %d' % rc)
return stdout, rc
COLOR_RED = '\033[91m'
COLOR_GREEN = '\033[92m'
COLOR_END = '\033[0m'
HTTPS_PREFIX = 'https://'
HTTP_PREFIX = 'http://'
def green(text):
return COLOR_GREEN + text + COLOR_END
def red(text):
return COLOR_RED + text + COLOR_END
def http_connection(host, allow_unverified_cert):
if host.startswith(HTTPS_PREFIX):
host = host[len(HTTPS_PREFIX):]
ssl_ctx = ssl.create_default_context()
if allow_unverified_cert:
ssl_ctx.check_hostname = False
ssl_ctx.verify_mode = ssl.CERT_NONE
else:
ssl_ctx.check_hostname = True
ssl_ctx.verify_mode = ssl.CERT_REQUIRED
return http.client.HTTPSConnection(host, timeout=10, context=ssl_ctx)
else:
if host.startswith(HTTP_PREFIX):
host = host[len(HTTP_PREFIX):]
else:
host = host
return http.client.HTTPConnection(host)
class Response(object):
"""A class to wrap around httplib.response class."""
def __init__(self, r):
self.text = r.read()
self.status_code = r.status
self.headers = r.getheaders()
self.content_type = r.getheader('content-type')
if self.content_type != None:
self.content_type = self.content_type.lower()
def json(self):
try:
return json.loads(self.text)
except ValueError as e:
print('Error: failed in JSON decode: %s' % self.text)
return {}
def is_json(self):
if self.content_type != 'application/json':
return False
try:
json.loads(self.text)
return True
except ValueError as e:
return False
def __str__(self):
return "status_code: {}, text: {}, headers: {}".format(self.status_code,
self.text,
self.headers)
class ApiProxyClientTest(object):
def __init__(self, host, host_header, allow_unverified_cert, verbose=False):
self._failed_tests = 0
self._passed_tests = 0
self.host = host
self.host_header = host_header
self.allow_unverified_cert = allow_unverified_cert
self._verbose = verbose
def fail(self, msg):
print('%s: %s' % (red('FAILED'), msg if msg else ''))
self._failed_tests += 1
def assertEqual(self, got, want):
msg = 'assertEqual(got=%s, want=%s)' % (str(got), str(want))
if got == want:
print('%s: %s' % (green('OK'), msg))
self._passed_tests += 1
else:
self.fail(msg)
def assertGE(self, a, b):
msg = 'assertGE(%s, %s)' % (str(a), str(b))
if a >= b:
print('%s: %s' % (green('OK'), msg))
self._passed_tests += 1
else:
self.fail(msg)
def assertLE(self, a, b):
msg = 'assertLE(%s, %s)' % (str(a), str(b))
if a <= b:
print('%s: %s' % (green('OK'), msg))
self._passed_tests += 1
else:
self.fail(msg)
def _call_http(self, path, api_key=None, auth=None, data=None, method=None,
userHeaders={}):
"""Makes a http call and returns its response."""
url = path
if api_key:
url += '?key=' + api_key
headers = {'Content-Type': 'application/json'}
if auth:
headers['Authorization'] = 'Bearer ' + auth
if self.host_header:
headers["Host"] = self.host_header
body = json.dumps(data) if data else None
for key, value in userHeaders.items():
headers[key] = value
if not method:
method = 'POST' if data else 'GET'
if self._verbose:
print('HTTP: %s %s' % (method, url))
print('headers: %s' % str(headers))
print('body: %s' % body)
conn = http_connection(self.host, self.allow_unverified_cert)
conn.request(method, url, body, headers)
response = Response(conn.getresponse())
print(response.status_code)
if self._verbose:
print('Status: %s, body=%s' % (response.status_code, response.text))
return response
def set_verbose(self, verbose):
self._verbose = verbose |
6,950 | calls | from multiprocessing import Queue, get_context
from multiprocessing.managers import SyncManager
from time import sleep
from typing import Sequence
import pytest
from pydantic import SecretStr
from tests.data_for_tests.propagation_credentials import (
CREDENTIALS,
PASSWORD_1,
PASSWORD_3,
USERNAME,
)
from tests.unit_tests.infection_monkey.base_island_api_client import BaseIslandAPIClient
from common.credentials import Credentials, LMHash, NTHash, Password, SSHKeypair, Username
from infection_monkey.island_api_client import IIslandAPIClient
from infection_monkey.propagation_credentials_repository import PropagationCredentialsRepository
STOLEN_USERNAME_1 = "user1"
STOLEN_USERNAME_2 = "user2"
STOLEN_USERNAME_3 = "user3"
STOLEN_PASSWORD_1 = SecretStr("abcdefg")
STOLEN_PASSWORD_2 = SecretStr("super_secret")
STOLEN_PUBLIC_KEY_1 = "some_public_key_1"
STOLEN_PUBLIC_KEY_2 = "some_public_key_2"
STOLEN_LM_HASH = SecretStr("AAD3B435B51404EEAAD3B435B51404EE")
STOLEN_NT_HASH = SecretStr("C0172DFF622FE29B5327CB79DC12D24C")
STOLEN_PRIVATE_KEY_1 = SecretStr("some_private_key_1")
STOLEN_PRIVATE_KEY_2 = SecretStr("some_private_key_2")
STOLEN_CREDENTIALS = [
Credentials(
identity=Username(username=STOLEN_USERNAME_1),
secret=Password(password=PASSWORD_1),
),
Credentials(
identity=Username(username=STOLEN_USERNAME_1), secret=Password(password=STOLEN_PASSWORD_1)
),
Credentials(
identity=Username(username=STOLEN_USERNAME_2),
secret=SSHKeypair(public_key=STOLEN_PUBLIC_KEY_1, private_key=STOLEN_PRIVATE_KEY_1),
),
Credentials(
identity=None,
secret=Password(password=STOLEN_PASSWORD_2),
),
Credentials(
identity=Username(username=STOLEN_USERNAME_2), secret=LMHash(lm_hash=STOLEN_LM_HASH)
),
Credentials(
identity=Username(username=STOLEN_USERNAME_2), secret=NTHash(nt_hash=STOLEN_NT_HASH)
),
Credentials(identity=Username(username=STOLEN_USERNAME_3), secret=None),
]
STOLEN_SSH_KEYS_CREDENTIALS = [
Credentials(
identity=Username(username=USERNAME),
secret=SSHKeypair(public_key=STOLEN_PUBLIC_KEY_2, private_key=STOLEN_PRIVATE_KEY_2),
)
]
NEW_CREDENTIALS = [
Credentials(
identity=Username(username="new"),
secret=Password(password=PASSWORD_3),
)
]
@pytest.fixture
def control_channel() -> IIslandAPIClient:
return StubIslandAPIClient(CREDENTIALS)
class StubIslandAPIClient(BaseIslandAPIClient):
def __init__(self, credentials: Sequence[Credentials]):
self._credentials = credentials
self._calls = get_context("spawn").Value("i", 0)
@property
def METHOD_NAME(self) -> int:
return self._calls.value
def get_credentials_for_propagation(self) -> Sequence[Credentials]:
with self._calls.get_lock():
self._calls.value += 1
return self._credentials
@pytest.fixture
def manager() -> SyncManager:
return get_context("spawn").Manager()
@pytest.fixture
def propagation_credentials_repository(
control_channel: StubIslandAPIClient,
manager: SyncManager,
) -> PropagationCredentialsRepository:
return PropagationCredentialsRepository(control_channel, manager)
def test_get_credentials__retrieves_from_control_channel(
propagation_credentials_repository: PropagationCredentialsRepository,
):
actual_stored_credentials = propagation_credentials_repository.get_credentials()
assert set(actual_stored_credentials) == set(CREDENTIALS)
def test_add_credentials(propagation_credentials_repository: PropagationCredentialsRepository):
propagation_credentials_repository.add_credentials(STOLEN_CREDENTIALS)
propagation_credentials_repository.add_credentials(STOLEN_SSH_KEYS_CREDENTIALS)
actual_stored_credentials = propagation_credentials_repository.get_credentials()
assert set(actual_stored_credentials) == set(
STOLEN_CREDENTIALS + STOLEN_SSH_KEYS_CREDENTIALS + CREDENTIALS
)
def test_credentials_obtained_if_propagation_credentials_fails(
propagation_credentials_repository: PropagationCredentialsRepository,
control_channel: StubIslandAPIClient,
monkeypatch,
):
def func() -> Sequence[Credentials]:
raise Exception("No credentials for you!")
monkeypatch.setattr(control_channel, "get_credentials_for_propagation", func)
credentials = propagation_credentials_repository.get_credentials()
assert credentials is not None
def test_get_credentials__uses_cached_credentials(
propagation_credentials_repository: PropagationCredentialsRepository,
control_channel: StubIslandAPIClient,
):
credentials1 = propagation_credentials_repository.get_credentials()
credentials2 = propagation_credentials_repository.get_credentials()
assert set(credentials1) == set(credentials2)
assert control_channel.METHOD_NAME == 1
def get_credentials(
propagation_credentials_repository: PropagationCredentialsRepository, queue: Queue
):
credentials = propagation_credentials_repository.get_credentials()
queue.put(credentials)
def test_get_credentials__used_cached_credentials_multiprocess(
propagation_credentials_repository: PropagationCredentialsRepository,
control_channel: StubIslandAPIClient,
):
context = get_context("spawn")
queue = context.Queue()
p1 = context.Process(target=get_credentials, args=(propagation_credentials_repository, queue))
p2 = context.Process(target=get_credentials, args=(propagation_credentials_repository, queue))
p1.start()
p2.start()
credentials1 = queue.get()
credentials2 = queue.get()
p1.join()
p2.join()
assert set(credentials1) == set(credentials2)
assert control_channel.METHOD_NAME == 1
@pytest.mark.slow
def test_get_credentials__updates_cache_after_timeout_period(
control_channel: StubIslandAPIClient,
manager: SyncManager,
):
propagation_credentials_repository = PropagationCredentialsRepository(
control_channel, manager, 0.01
)
context = get_context("spawn")
queue = context.Queue()
p1 = context.Process(target=get_credentials, args=(propagation_credentials_repository, queue))
p1.start()
p1.join()
# Sleep so that the poll period times out
sleep(0.02)
p2 = context.Process(target=get_credentials, args=(propagation_credentials_repository, queue))
p2.start()
p2.join()
assert control_channel.METHOD_NAME == 2 |
6,951 | code container properties | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetCodeContainerResult',
'AwaitableGetCodeContainerResult',
'get_code_container',
'get_code_container_output',
]
@pulumi.output_type
class GetCodeContainerResult:
"""
Azure Resource Manager resource envelope.
"""
def __init__(__self__, METHOD_NAME=None, id=None, name=None, system_data=None, type=None):
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'code_container_properties' to be a dict")
pulumi.set(__self__, "code_container_properties", METHOD_NAME)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="codeContainerProperties")
def METHOD_NAME(self) -> 'outputs.CodeContainerResponse':
"""
[Required] Additional attributes of the entity.
"""
return pulumi.get(self, "code_container_properties")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetCodeContainerResult(GetCodeContainerResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCodeContainerResult(
METHOD_NAME=self.METHOD_NAME,
id=self.id,
name=self.name,
system_data=self.system_data,
type=self.type)
def get_code_container(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCodeContainerResult:
"""
Azure Resource Manager resource envelope.
:param str name: Container name. This is case-sensitive.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:machinelearningservices/v20230401preview:getCodeContainer', __args__, opts=opts, typ=GetCodeContainerResult).value
return AwaitableGetCodeContainerResult(
METHOD_NAME=pulumi.get(__ret__, 'code_container_properties'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_code_container)
def get_code_container_output(name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetCodeContainerResult]:
"""
Azure Resource Manager resource envelope.
:param str name: Container name. This is case-sensitive.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
... |
6,952 | test authcode wellformed | """Tests for the OAuth2 API endpoints."""
from base64 import b64encode
from secrets import token_urlsafe
from typing import Dict
from urllib.parse import parse_qs, urlsplit
import pytest
from funnel import models
def test_authcode_requires_login(client) -> None:
"""The authcode endpoint requires a login."""
rv = client.get('/api/1/auth', follow_redirects=True)
assert rv.status_code == 200
assert rv.metarefresh is not None
# Auth is attempting to reload itself using meta-refresh to get a cookie
assert urlsplit(rv.metarefresh.url).path == '/api/1/auth'
rv = client.get(rv.metarefresh.url)
assert rv.status_code == 302
assert urlsplit(rv.location).path == '/login'
def METHOD_NAME(
client, login, user_rincewind, client_hex_credential
) -> None:
"""The authcode endpoint will raise 403 if not well formed."""
login.as_(user_rincewind)
# Incomplete request
query_params: Dict[str, str] = {}
rv = client.get('/api/1/auth', query_string=query_params)
assert rv.status_code == 403
assert "Missing client_id" in rv.get_data(as_text=True)
# Unknown client
query_params['client_id'] = 'unknown'
rv = client.get('/api/1/auth', query_string=query_params)
assert rv.status_code == 403
assert "Unknown client_id" in rv.get_data(as_text=True)
# Missing redirect URI (error is sent to client as a query parameter)
query_params['client_id'] = client_hex_credential.cred.name
rv = client.get('/api/1/auth', query_string=query_params)
assert rv.status_code == 303
assert parse_qs(urlsplit(rv.location).query)['error'] == ['invalid_request']
# TODO: Add redirect_uri, response_type, state, scope
@pytest.mark.dbcommit()
@pytest.mark.filterwarnings("ignore:Object of type <AuthToken> not in session")
def test_auth_untrusted_confidential(
client, login, user_rincewind, client_hex, client_hex_credential, csrf_token
) -> None:
"""Test auth on an untrusted confidential auth client."""
login.as_(user_rincewind)
# --- Create a typical auth code request -------------------------------------------
authcode_params = {
'client_id': client_hex_credential.cred.name,
'response_type': 'code',
'state': token_urlsafe(),
'scope': 'id',
'redirect_uri': client_hex.redirect_uri,
}
rv = client.get(
'/api/1/auth',
query_string=authcode_params,
)
# We got an auth page
assert rv.status_code == 200
# There is no existing AuthToken for this client and user
assert models.AuthToken.get_for(client_hex, account=user_rincewind) is None
# Submit form with `accept` and CSRF token
rv = client.post(
'/api/1/auth',
query_string=authcode_params,
data={'accept': '', 'csrf_token': csrf_token},
)
assert rv.status_code == 303
rparams = parse_qs(urlsplit(rv.location).query)
assert rparams['state'] == [authcode_params['state']]
code = rparams['code'][0]
assert code is not None
# --- Exchange code for a token ----------------------------------------------------
authtoken_params = {
'grant_type': 'authorization_code',
'code': code,
# For verification, the scope and redirect URI must be presented again
'scope': authcode_params['scope'],
'redirect_uri': authcode_params['redirect_uri'],
}
auth_header = (
'Basic '
+ b64encode(
(
client_hex_credential.cred.name + ':' + client_hex_credential.secret
).encode()
).decode()
)
rv = client.post(
'/api/1/token',
headers={'Authorization': auth_header},
data=authtoken_params,
)
assert rv.status_code == 200
data = rv.get_json()
# Confirm we have an access token
assert data['token_type'] == 'bearer'
assert data['access_token'] is not None
assert data['scope'] == authtoken_params['scope']
authtoken = models.AuthToken.get_for(client_hex, account=user_rincewind)
assert authtoken is not None
assert authtoken.token == data['access_token']
assert authtoken.token_type == data['token_type']
# --- Ask for an auth code again, with the same scope ------------------------------
authcode_params['state'] = token_urlsafe()
rv = client.get(
'/api/1/auth',
query_string=authcode_params,
)
# This time there is no authorization page asking for user permission. We got
# a redirect back, with the authcode
assert rv.status_code == 303
rparams = parse_qs(urlsplit(rv.location).query)
assert rparams['code'][0] is not None
# However, increasing the scope requires authorization once again
authcode_params['state'] = token_urlsafe()
authcode_params['scope'] = 'id email'
rv = client.get(
'/api/1/auth',
query_string=authcode_params,
)
assert rv.status_code == 200
# TODO: Test flow for trusted auth clients, and for public (non-confidential) clients |
6,953 | test from address | import unittest, sys
from ctypes import *
import _ctypes_test
ctype_types = [c_byte, c_ubyte, c_short, c_ushort, c_int, c_uint,
c_long, c_ulong, c_longlong, c_ulonglong, c_double, c_float]
python_types = [int, int, int, int, int, int,
int, int, int, int, float, float]
class PointersTestCase(unittest.TestCase):
def test_pointer_crash(self):
class A(POINTER(c_ulong)):
pass
POINTER(c_ulong)(c_ulong(22))
# Pointer can't set contents: has no _type_
self.assertRaises(TypeError, A, c_ulong(33))
def test_pass_pointers(self):
dll = CDLL(_ctypes_test.__file__)
func = dll._testfunc_p_p
if sizeof(c_longlong) == sizeof(c_void_p):
func.restype = c_longlong
else:
func.restype = c_long
i = c_int(12345678)
## func.argtypes = (POINTER(c_int),)
address = func(byref(i))
self.assertEqual(c_int.from_address(address).value, 12345678)
func.restype = POINTER(c_int)
res = func(pointer(i))
self.assertEqual(res.contents.value, 12345678)
self.assertEqual(res[0], 12345678)
def test_change_pointers(self):
dll = CDLL(_ctypes_test.__file__)
func = dll._testfunc_p_p
i = c_int(87654)
func.restype = POINTER(c_int)
func.argtypes = (POINTER(c_int),)
res = func(pointer(i))
self.assertEqual(res[0], 87654)
self.assertEqual(res.contents.value, 87654)
# C code: *res = 54345
res[0] = 54345
self.assertEqual(i.value, 54345)
# C code:
# int x = 12321;
# res = &x
x = c_int(12321)
res.contents = x
self.assertEqual(i.value, 54345)
x.value = -99
self.assertEqual(res.contents.value, -99)
def test_callbacks_with_pointers(self):
# a function type receiving a pointer
PROTOTYPE = CFUNCTYPE(c_int, POINTER(c_int))
self.result = []
def func(arg):
for i in range(10):
## print arg[i],
self.result.append(arg[i])
## print
return 0
callback = PROTOTYPE(func)
dll = CDLL(_ctypes_test.__file__)
# This function expects a function pointer,
# and calls this with an integer pointer as parameter.
# The int pointer points to a table containing the numbers 1..10
doit = dll._testfunc_callback_with_pointer
## i = c_int(42)
## callback(byref(i))
## self.assertEqual(i.value, 84)
doit(callback)
## print self.result
doit(callback)
## print self.result
def test_basics(self):
from operator import delitem
for ct, pt in zip(ctype_types, python_types):
i = ct(42)
p = pointer(i)
## print type(p.contents), ct
self.assertIs(type(p.contents), ct)
# p.contents is the same as p[0]
## print p.contents
## self.assertEqual(p.contents, 42)
## self.assertEqual(p[0], 42)
self.assertRaises(TypeError, delitem, p, 0)
def METHOD_NAME(self):
from array import array
a = array('i', [100, 200, 300, 400, 500])
addr = a.buffer_info()[0]
p = POINTER(POINTER(c_int))
## print dir(p)
## print p.from_address
## print p.from_address(addr)[0][0]
def test_other(self):
class Table(Structure):
_fields_ = [("a", c_int),
("b", c_int),
("c", c_int)]
pt = pointer(Table(1, 2, 3))
self.assertEqual(pt.contents.a, 1)
self.assertEqual(pt.contents.b, 2)
self.assertEqual(pt.contents.c, 3)
pt.contents.c = 33
from ctypes import _pointer_type_cache
del _pointer_type_cache[Table]
def test_basic(self):
p = pointer(c_int(42))
# Although a pointer can be indexed, it has no length
self.assertRaises(TypeError, len, p)
self.assertEqual(p[0], 42)
self.assertEqual(p[0:1], [42])
self.assertEqual(p.contents.value, 42)
def test_charpp(self):
"""Test that a character pointer-to-pointer is correctly passed"""
dll = CDLL(_ctypes_test.__file__)
func = dll._testfunc_c_p_p
func.restype = c_char_p
argv = (c_char_p * 2)()
argc = c_int( 2 )
argv[0] = b'hello'
argv[1] = b'world'
result = func( byref(argc), argv )
self.assertEqual(result, b'world')
def test_bug_1467852(self):
# http://sourceforge.net/tracker/?func=detail&atid=532154&aid=1467852&group_id=71702
x = c_int(5)
dummy = []
for i in range(32000):
dummy.append(c_int(i))
y = c_int(6)
p = pointer(x)
pp = pointer(p)
q = pointer(y)
pp[0] = q # <==
self.assertEqual(p[0], 6)
def test_c_void_p(self):
# http://sourceforge.net/tracker/?func=detail&aid=1518190&group_id=5470&atid=105470
if sizeof(c_void_p) == 4:
self.assertEqual(c_void_p(0xFFFFFFFF).value,
c_void_p(-1).value)
self.assertEqual(c_void_p(0xFFFFFFFFFFFFFFFF).value,
c_void_p(-1).value)
elif sizeof(c_void_p) == 8:
self.assertEqual(c_void_p(0xFFFFFFFF).value,
0xFFFFFFFF)
self.assertEqual(c_void_p(0xFFFFFFFFFFFFFFFF).value,
c_void_p(-1).value)
self.assertEqual(c_void_p(0xFFFFFFFFFFFFFFFFFFFFFFFF).value,
c_void_p(-1).value)
self.assertRaises(TypeError, c_void_p, 3.14) # make sure floats are NOT accepted
self.assertRaises(TypeError, c_void_p, object()) # nor other objects
def test_pointers_bool(self):
# NULL pointers have a boolean False value, non-NULL pointers True.
self.assertEqual(bool(POINTER(c_int)()), False)
self.assertEqual(bool(pointer(c_int())), True)
self.assertEqual(bool(CFUNCTYPE(None)(0)), False)
self.assertEqual(bool(CFUNCTYPE(None)(42)), True)
# COM methods are boolean True:
if sys.platform == "win32":
mth = WINFUNCTYPE(None)(42, "name", (), None)
self.assertEqual(bool(mth), True)
def test_pointer_type_name(self):
LargeNamedType = type('T' * 2 ** 25, (Structure,), {})
self.assertTrue(POINTER(LargeNamedType))
# to not leak references, we must clean _pointer_type_cache
# GraalVM change
# from ctypes import _pointer_type_cache
# del _pointer_type_cache[LargeNamedType]
def test_pointer_type_str_name(self):
large_string = 'T' * 2 ** 25
P = POINTER(large_string)
self.assertTrue(P)
# to not leak references, we must clean _pointer_type_cache
# GraalVM change
# from ctypes import _pointer_type_cache
# del _pointer_type_cache[id(P)]
def test_abstract(self):
from ctypes import _Pointer
self.assertRaises(TypeError, _Pointer.set_type, 42)
if __name__ == '__main__':
unittest.main() |
6,954 | build list request | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def METHOD_NAME(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2018-08-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.ContainerService/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.containerservice.v2018_08_01_preview.ContainerServiceClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.OperationValue"]:
"""Gets a list of compute operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationValue or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2018_08_01_preview.models.OperationValue]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop(
"api_version", _params.pop("api-version", self._api_version or "2018-08-01-preview")
)
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = METHOD_NAME(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.ContainerService/operations"} |
6,955 | test delete tenant fails with invalid id | import uuid
import pytest
from prefect import api, models
def random_id() -> str:
return str(uuid.uuid4())
class TestCreateTenant:
async def test_create_tenant(self):
name = random_id()
slug = random_id()
tenant_id = await api.tenants.create_tenant(name=name, slug=slug)
assert await models.Tenant.where(id=tenant_id).first()
async def test_create_tenant_stores_attributes(self):
name = random_id()
slug = random_id()
tenant_id = await api.tenants.create_tenant(name=name, slug=slug)
tenant = await models.Tenant.where(id=tenant_id).first({"id", "name", "slug"})
assert tenant.id == tenant_id
assert tenant.name == name
assert tenant.slug == slug
async def test_create_tenant_fails_with_duplicate_slug(self):
await api.tenants.create_tenant(name="hi", slug="slug")
with pytest.raises(ValueError) as exc:
await api.tenants.create_tenant(name="hi again", slug="slug")
assert "Uniqueness violation" in str(exc.value)
async def test_create_tenant_with_default_slug(self):
name = "Hello there"
tenant_id = await api.tenants.create_tenant(name=name)
tenant = await models.Tenant.where(id=tenant_id).first({"slug"})
assert tenant.slug == "hello-there"
async def test_create_tenant_fails_with_duplicate_default_slug(self):
await api.tenants.create_tenant(name="hi")
with pytest.raises(ValueError) as exc:
await api.tenants.create_tenant(name="hi")
assert "Uniqueness violation" in str(exc.value)
async def test_create_tenant_with_complex_slug_fails(self):
name = random_id()
with pytest.raises(ValueError) as exc:
await api.tenants.create_tenant(name=name, slug="hello there")
assert 'Slug must be "slugified"' in str(exc.value)
class TestTenantSettings:
async def test_update_settings(self, tenant_id):
tenant = await models.Tenant.where(id=tenant_id).first("settings")
assert tenant.settings == {}
assert await api.tenants.update_settings(
tenant_id=tenant_id, settings={"x": "y"}
)
tenant = await models.Tenant.where(id=tenant_id).first("settings")
assert tenant.settings == {"x": "y"}
async def test_update_settings_without_overwriting(self, tenant_id):
tenant = await models.Tenant.where(id=tenant_id).first("settings")
assert tenant.settings == {}
assert await api.tenants.update_settings(
tenant_id=tenant_id, settings={"x": "y"}
)
assert await api.tenants.update_settings(
tenant_id=tenant_id, settings={"a": "b"}
)
tenant = await models.Tenant.where(id=tenant_id).first("settings")
assert tenant.settings == {"x": "y", "a": "b"}
async def test_update_settings_with_bad_user_id(self):
with pytest.raises(ValueError) as exc:
await api.tenants.update_settings(
tenant_id=str(uuid.uuid4()), settings={"a": "b"}
)
assert "Invalid tenant id" in str(exc.value)
class TestUpdateTenantName:
async def test_tenant_name(self, tenant_id):
assert await api.tenants.update_name(tenant_id=tenant_id, name="new name")
tenant = await models.Tenant.where(id=tenant_id).first({"name"})
assert tenant.name == "new name"
class TestUpdateTenantSlug:
async def test_tenant_slug(self, tenant_id):
assert await api.tenants.update_slug(tenant_id=tenant_id, slug="new-slug")
tenant = await models.Tenant.where(id=tenant_id).first({"slug"})
assert tenant.slug == "new-slug"
async def test_invalid_tenant_slug_fails(self, tenant_id):
with pytest.raises(ValueError, match=r"Slug must be \"slugified\""):
assert await api.tenants.update_slug(tenant_id=tenant_id, slug="new slug")
tenant = await models.Tenant.where(id=tenant_id).first({"slug"})
assert tenant.slug == "test-tenant"
async def test_duplicate_slug_fails(self, tenant_id):
await api.tenants.create_tenant(name="a new tenant", slug="a-new-slug")
with pytest.raises(ValueError, match="Uniqueness violation"):
assert await api.tenants.update_slug(tenant_id=tenant_id, slug="a-new-slug")
tenant = await models.Tenant.where(id=tenant_id).first({"slug"})
assert tenant.slug == "test-tenant"
class TestDeleteTenant:
async def test_delete_tenant(self, tenant_id):
result = await api.tenants.delete_tenant(tenant_id=tenant_id)
tenant = await models.Tenant.where(id=tenant_id).first()
assert result is True
assert tenant is None
async def METHOD_NAME(self):
result = await api.tenants.delete_tenant(tenant_id=str(uuid.uuid4()))
assert result is False
@pytest.mark.parametrize(
"bad_value",
[None, ""],
)
async def test_delete_tenant_fails_if_none(self, bad_value):
with pytest.raises(ValueError, match="Invalid tenant ID"):
await api.tenants.delete_tenant(tenant_id=bad_value) |
6,956 | test creator can delete in active phase | import pytest
from django.urls import reverse
from adhocracy4.test.helpers import assert_template_response
from adhocracy4.test.helpers import freeze_phase
from adhocracy4.test.helpers import redirect_target
from adhocracy4.test.helpers import setup_phase
from apps.mapideas import models
from apps.mapideas import phases
@pytest.mark.django_db
def test_anonymous_cannot_delete(client, map_idea_factory):
mapidea = map_idea_factory()
url = reverse(
"a4_candy_mapideas:mapidea-delete",
kwargs={
"organisation_slug": mapidea.project.organisation.slug,
"pk": mapidea.pk,
"year": mapidea.created.year,
},
)
response = client.get(url)
assert response.status_code == 302
assert redirect_target(response) == "account_login"
@pytest.mark.django_db
def test_user_cannot_delete(client, map_idea_factory, user):
mapidea = map_idea_factory()
client.login(username=user.email, password="password")
url = reverse(
"a4_candy_mapideas:mapidea-delete",
kwargs={
"organisation_slug": mapidea.project.organisation.slug,
"pk": mapidea.pk,
"year": mapidea.created.year,
},
)
response = client.get(url)
assert response.status_code == 403
@pytest.mark.django_db
def test_creator_cannot_delete(client, map_idea_factory):
mapidea = map_idea_factory()
client.login(username=mapidea.creator.email, password="password")
url = reverse(
"a4_candy_mapideas:mapidea-delete",
kwargs={
"organisation_slug": mapidea.project.organisation.slug,
"pk": mapidea.pk,
"year": mapidea.created.year,
},
)
response = client.get(url)
assert response.status_code == 403
@pytest.mark.django_db
def test_moderator_can_delete(client, map_idea_factory):
mapidea = map_idea_factory()
moderator = mapidea.module.project.moderators.first()
client.login(username=moderator.email, password="password")
url = reverse(
"a4_candy_mapideas:mapidea-delete",
kwargs={
"organisation_slug": mapidea.project.organisation.slug,
"pk": mapidea.pk,
"year": mapidea.created.year,
},
)
response = client.get(url)
assert response.status_code == 200
@pytest.mark.django_db
def test_initator_can_delete(client, map_idea_factory):
mapidea = map_idea_factory()
initiator = mapidea.module.project.organisation.initiators.first()
client.login(username=initiator.email, password="password")
url = reverse(
"a4_candy_mapideas:mapidea-delete",
kwargs={
"organisation_slug": mapidea.project.organisation.slug,
"pk": mapidea.pk,
"year": mapidea.created.year,
},
)
response = client.get(url)
assert response.status_code == 200
@pytest.mark.django_db
def test_admin_can_delete(client, map_idea_factory, admin):
mapidea = map_idea_factory()
client.login(username=admin.email, password="password")
url = reverse(
"a4_candy_mapideas:mapidea-delete",
kwargs={
"organisation_slug": mapidea.project.organisation.slug,
"pk": mapidea.pk,
"year": mapidea.created.year,
},
)
response = client.get(url)
assert response.status_code == 200
@pytest.mark.django_db
def METHOD_NAME(client, phase_factory, map_idea_factory):
phase, module, project, mapidea = setup_phase(
phase_factory, map_idea_factory, phases.IssuePhase
)
url = reverse(
"a4_candy_mapideas:mapidea-delete",
kwargs={
"organisation_slug": mapidea.project.organisation.slug,
"pk": mapidea.pk,
"year": mapidea.created.year,
},
)
with freeze_phase(phase):
count = models.MapIdea.objects.all().count()
assert count == 1
client.login(username=mapidea.creator.email, password="password")
response = client.get(url)
assert response.status_code == 200
assert_template_response(
response, "a4_candy_mapideas/mapidea_confirm_delete.html"
)
response = client.post(url)
assert redirect_target(response) == "project-detail"
assert response.status_code == 302
count = models.MapIdea.objects.all().count()
assert count == 0
@pytest.mark.django_db
def test_creator_cannot_delete_in_wrong_phase(client, phase_factory, map_idea_factory):
phase, module, project, mapidea = setup_phase(
phase_factory, map_idea_factory, phases.RatingPhase
)
url = reverse(
"a4_candy_mapideas:mapidea-delete",
kwargs={
"organisation_slug": mapidea.project.organisation.slug,
"pk": mapidea.pk,
"year": mapidea.created.year,
},
)
with freeze_phase(phase):
count = models.MapIdea.objects.all().count()
assert count == 1
client.login(username=mapidea.creator.email, password="password")
response = client.get(url)
assert response.status_code == 403
@pytest.mark.django_db
def test_moderator_can_delete_in_active_phase(client, phase_factory, map_idea_factory):
phase, module, project, mapidea = setup_phase(
phase_factory, map_idea_factory, phases.IssuePhase
)
url = reverse(
"a4_candy_mapideas:mapidea-delete",
kwargs={
"organisation_slug": mapidea.project.organisation.slug,
"pk": mapidea.pk,
"year": mapidea.created.year,
},
)
with freeze_phase(phase):
count = models.MapIdea.objects.all().count()
assert count == 1
moderator = mapidea.module.project.moderators.first()
client.login(username=moderator.email, password="password")
response = client.get(url)
assert response.status_code == 200
assert_template_response(
response, "a4_candy_mapideas/mapidea_confirm_delete.html"
)
response = client.post(url)
assert redirect_target(response) == "project-detail"
assert response.status_code == 302
count = models.MapIdea.objects.all().count()
assert count == 0
@pytest.mark.django_db
def test_moderator_can_delete_in_wrong_phase(client, phase_factory, map_idea_factory):
phase, module, project, mapidea = setup_phase(
phase_factory, map_idea_factory, phases.RatingPhase
)
url = reverse(
"a4_candy_mapideas:mapidea-delete",
kwargs={
"organisation_slug": mapidea.project.organisation.slug,
"pk": mapidea.pk,
"year": mapidea.created.year,
},
)
with freeze_phase(phase):
count = models.MapIdea.objects.all().count()
assert count == 1
moderator = mapidea.module.project.moderators.first()
client.login(username=moderator.email, password="password")
response = client.get(url)
assert response.status_code == 200
assert_template_response(
response, "a4_candy_mapideas/mapidea_confirm_delete.html"
)
response = client.post(url)
assert redirect_target(response) == "project-detail"
assert response.status_code == 302
count = models.MapIdea.objects.all().count()
assert count == 0 |
6,957 | set up | # -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import os
import sys
import unittest
# noinspection PyProtectedMember
from numpy.testing import assert_allclose
from numpy.testing import assert_array_less
from numpy.testing import assert_equal
from numpy.testing import assert_raises
from scipy.stats import rankdata
from sklearn.base import clone
from sklearn.metrics import roc_auc_score
# temporary solution for relative imports in case pyod is not installed
# if pyod is installed, no need to use the following line
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from pyod.models.copod import COPOD
from pyod.utils.data import generate_data
class TestCOPODParallel(unittest.TestCase):
def METHOD_NAME(self):
self.n_train = 200
self.n_test = 100
self.contamination = 0.1
self.roc_floor = 0.8
self.X_train, self.X_test, self.y_train, self.y_test = generate_data(
n_train=self.n_train, n_test=self.n_test, n_features=10,
contamination=self.contamination, random_state=42)
self.clf = COPOD(contamination=self.contamination, n_jobs=2)
self.clf.fit(self.X_train)
# get a copy from the single thread copy
self.clf_ = COPOD(contamination=self.contamination)
self.clf_.fit(self.X_train)
def test_fit(self):
clf = COPOD(contamination=self.contamination, n_jobs=3)
clf.fit(self.X_train[:, :2])
def test_parameters(self):
assert (hasattr(self.clf, 'decision_scores_') and
self.clf.decision_scores_ is not None)
assert (hasattr(self.clf, 'labels_') and
self.clf.labels_ is not None)
assert (hasattr(self.clf, 'threshold_') and
self.clf.threshold_ is not None)
def test_train_scores(self):
assert_equal(len(self.clf.decision_scores_), self.X_train.shape[0])
assert_allclose(self.clf.decision_scores_, self.clf_.decision_scores_)
def test_prediction_scores(self):
pred_scores = self.clf.decision_function(self.X_test)
# check score shapes
assert_equal(pred_scores.shape[0], self.X_test.shape[0])
# check performance
assert (roc_auc_score(self.y_test, pred_scores) >= self.roc_floor)
def test_prediction_labels(self):
pred_labels = self.clf.predict(self.X_test)
assert_equal(pred_labels.shape, self.y_test.shape)
pred_labels_ = self.clf_.predict(self.X_test)
assert_equal(pred_labels, pred_labels_)
def test_prediction_proba(self):
pred_proba = self.clf.predict_proba(self.X_test)
assert (pred_proba.min() >= 0)
assert (pred_proba.max() <= 1)
def test_prediction_proba_linear(self):
pred_proba = self.clf.predict_proba(self.X_test, method='linear')
assert (pred_proba.min() >= 0)
assert (pred_proba.max() <= 1)
def test_prediction_proba_unify(self):
pred_proba = self.clf.predict_proba(self.X_test, method='unify')
assert (pred_proba.min() >= 0)
assert (pred_proba.max() <= 1)
def test_prediction_proba_parameter(self):
with assert_raises(ValueError):
self.clf.predict_proba(self.X_test, method='something')
def test_prediction_labels_confidence(self):
pred_labels, confidence = self.clf.predict(self.X_test,
return_confidence=True)
assert_equal(pred_labels.shape, self.y_test.shape)
assert_equal(confidence.shape, self.y_test.shape)
assert (confidence.min() >= 0)
assert (confidence.max() <= 1)
def test_prediction_proba_linear_confidence(self):
pred_proba, confidence = self.clf.predict_proba(self.X_test,
method='linear',
return_confidence=True)
assert (pred_proba.min() >= 0)
assert (pred_proba.max() <= 1)
assert_equal(confidence.shape, self.y_test.shape)
assert (confidence.min() >= 0)
assert (confidence.max() <= 1)
def test_fit_predict(self):
pred_labels = self.clf.fit_predict(self.X_train)
assert_equal(pred_labels.shape, self.y_train.shape)
def test_fit_predict_score(self):
self.clf.fit_predict_score(self.X_test, self.y_test)
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='roc_auc_score')
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='prc_n_score')
with assert_raises(NotImplementedError):
self.clf.fit_predict_score(self.X_test, self.y_test,
scoring='something')
def test_predict_rank(self):
pred_socres = self.clf.decision_function(self.X_test)
pred_ranks = self.clf._predict_rank(self.X_test)
# assert the order is reserved
assert_allclose(rankdata(pred_ranks), rankdata(pred_socres), atol=3)
assert_array_less(pred_ranks, self.X_train.shape[0] + 1)
assert_array_less(-0.1, pred_ranks)
def test_predict_rank_normalized(self):
pred_socres = self.clf.decision_function(self.X_test)
pred_ranks = self.clf._predict_rank(self.X_test, normalized=True)
# assert the order is reserved
assert_allclose(rankdata(pred_ranks), rankdata(pred_socres), atol=3)
assert_array_less(pred_ranks, 1.01)
assert_array_less(-0.1, pred_ranks)
def test_fit_single_feature_multiple_jobs(self):
clf = COPOD(contamination=self.contamination, n_jobs=5)
with assert_raises(ValueError):
clf.fit(self.X_train[:, 0])
# def test_plot(self):
# os, cutoff1, cutoff2 = self.clf.explain_outlier(ind=1)
# assert_array_less(0, os)
def test_model_clone(self):
clone_clf = clone(self.clf)
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main() |
6,958 | test packages to install with custom index | # Copyright 2022 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
from typing import Dict, List, NamedTuple
import unittest
from kfp.dsl import python_component
from kfp.dsl import structures
from kfp.dsl.component_decorator import component
class TestComponentDecorator(unittest.TestCase):
def test_as_decorator_syntactic_sugar_no_args(self):
@component
def hello_world(text: str) -> str:
"""Hello world component."""
return text
self.assertIsInstance(hello_world, python_component.PythonComponent)
def test_as_decorator_syntactic_sugar_some_args(self):
@component(base_image='python:3.9')
def hello_world(text: str) -> str:
"""Hello world component."""
return text
self.assertIsInstance(hello_world, python_component.PythonComponent)
def test_no_args(self):
@component
def comp(text: str) -> str:
return text
self.assertIsInstance(comp, python_component.PythonComponent)
def test_some_args(self):
@component(base_image='python:3.9')
def comp(text: str) -> str:
return text
self.assertIsInstance(comp, python_component.PythonComponent)
def test_packages_to_install(self):
@component(packages_to_install=['numpy', 'tensorflow'])
def comp(text: str) -> str:
return text
self.assertIsInstance(comp, python_component.PythonComponent)
concat_command = ' '.join(
comp.component_spec.implementation.container.command)
self.assertTrue('numpy' in concat_command and
'tensorflow' in concat_command)
def METHOD_NAME(self):
@component(
packages_to_install=['numpy', 'tensorflow'],
pip_index_urls=['https://pypi.org/simple'])
def comp(text: str) -> str:
return text
self.assertIsInstance(comp, python_component.PythonComponent)
concat_command = ' '.join(
comp.component_spec.implementation.container.command)
self.assertTrue('numpy' in concat_command and
'tensorflow' in concat_command)
self.assertTrue('https://pypi.org/simple' in concat_command)
def test_output_component_file_parameter(self):
with tempfile.TemporaryDirectory() as tmpdir:
filepath = os.path.join(tmpdir, 'my_component.yaml')
with self.assertWarnsRegex(
DeprecationWarning,
r'output_component_file parameter is deprecated and will eventually be removed\.'
):
@component(output_component_file=filepath)
def comp(text: str) -> str:
return text
self.assertIsInstance(comp, python_component.PythonComponent)
self.assertTrue(os.path.exists(filepath))
with open(filepath, 'r') as f:
yaml_text = f.read()
component_spec = structures.ComponentSpec.from_yaml_documents(yaml_text)
self.assertEqual(component_spec.name, comp.component_spec.name)
def test_output_named_tuple_with_dict(self):
@component
def comp(
text: str) -> NamedTuple('outputs', [('data', Dict[str, str])]):
outputs = NamedTuple('outputs', [('data', Dict[str, str])])
return outputs(data={text: text})
# TODO: ideally should be the canonical type string, rather than the specific annotation as string, but both work
self.assertEqual(comp.component_spec.outputs['data'].type,
'typing.Dict[str, str]')
def test_output_dict(self):
@component
def comp(text: str) -> Dict[str, str]:
return {text: text}
# TODO: ideally should be the canonical type string, rather than the specific annotation as string, but both work
self.assertEqual(comp.component_spec.outputs['Output'].type,
'typing.Dict[str, str]')
def test_output_named_tuple_with_list(self):
@component
def comp(text: str) -> NamedTuple('outputs', [('data', List[str])]):
outputs = NamedTuple('outputs', [('data', List[str])])
return outputs(data={text: text})
# TODO: ideally should be the canonical type string, rather than the specific annotation as string, but both work
self.assertEqual(comp.component_spec.outputs['data'].type,
'typing.List[str]')
def test_output_list(self):
@component
def comp(text: str) -> List[str]:
return {text: text}
# TODO: ideally should be the canonical type string, rather than the specific annotation as string, but both work
self.assertEqual(comp.component_spec.outputs['Output'].type,
'typing.List[str]') |
6,959 | extract id from inline | #-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import dotenv
from bs4 import BeautifulSoup
import json
import markdown_it
import os
import re
import sys
from typing import List, Optional, Tuple
dotenv.load_dotenv()
# Create a new MarkdownIt instance
md = markdown_it.MarkdownIt()
# API Doc Constants
MAY_PATTERN = r'{% include requirement/MAY\s*id=\\?"[a-zA-Z0-9_-]+\\?" %}'
MAY_REPLACE = 'YOU MAY'
MUST_DO_PATTERN = r'{% include requirement/MUST\s*id=\\?"[a-zA-Z0-9_-]+\\?" %}'
MUST_NO_ID_PATTERN = r'{% include requirement/MUST %}'
MUST_DO_REPLACE = 'DO'
MUST_NOT_PATTERN = r'{% include requirement/MUSTNOT\s*id=\\?"[a-zA-Z0-9_-]+\\?" %}'
MUST_NOT_REPLACE = 'DO NOT'
SHOULD_PATTERN = r'{% include requirement/SHOULD\s*id=\\?"[a-zA-Z0-9_-]+\\?" %}'
SHOULD_NO_ID_PATTERN = r'{% include requirement/SHOULD %}'
SHOULD_REPLACE = 'YOU SHOULD'
SHOULD_NOT_PATTERN = r'{% include requirement/SHOULDNOT\s*id=\\?"[a-zA-Z0-9_-]+\\?" %}'
SHOULD_NOT_REPLACE = 'YOU SHOULD NOT'
INCLUDE_PATTERN = r'{%\s*(include|include_relative)\s*([^\s%}]+)\s*%}'
INCLUDE_NOTE_PATTERN = r'{% include note.html content=\\?"([^\\]+)\\?" %}'
INCLUDE_NOTE_REPLACE = r'**NOTE:** \1'
INCLUDE_DRAFT_PATTERN = r'{% include draft.html content=\\?"([^\\]+)\\?" %}'
INCLUDE_DRAFT_REPLACE = r'**DRAFT:** \1'
INCLUDE_IMPORTANT_PATTERN = r'{% include important.html content=\\?"([^\\]+)\\?" %}'
INCLUDE_IMPORTANT_REPLACE = r'**IMPORTANT:** \1'
ICON_PATTERN = r'^:[a-z_]+: '
ICON_REPLACE = ''
# Parse the markdown file
def parse_markdown(file, root_path) -> List[dict]:
with open(file, 'r', encoding='utf-8') as f:
md_text = f.read()
entries = []
html = md.render(md_text)
soup = BeautifulSoup(html, features="html.parser")
category = None
for item in soup.find_all():
if item.name in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']:
category = item.text
# Skip the explanations of rule types in introduction section
if category == 'Prescriptive Guidance':
continue
if item.name == 'p':
text, id = _split_tags(item, file)
text = _add_links(text, item)
text = _expand_include_tags(text, root_path, os.path.dirname(file))
if id:
entries.append({
'id': id,
'category': category,
'text': text,
})
else:
try:
entries[-1]['text'] += '\n\n' + text
except IndexError:
continue
elif item.name in ["pre"]:
raw_html = ''.join(str(tag) for tag in item.contents)
markdown_text = _convert_code_tag_to_markdown(raw_html)
markdown_text = _expand_include_tags(markdown_text, root_path, os.path.dirname(file))
try:
entries[-1]['text'] += '\n\n' + markdown_text
except IndexError:
continue
elif item.name in ['ol', 'ul']:
items = item.find_all('li')
for item in items:
item_text, id = _split_tags(item, file)
item_text = _add_links(item_text, item)
item_text = _expand_include_tags(item_text, root_path, os.path.dirname(file))
if id:
entries.append({
'id': id,
'category': category,
'text': item_text,
})
else:
try:
entries[-1]['text'] += '\n' + item_text
except IndexError:
continue
else:
continue
return entries
def _add_links(text, item):
"""Find any links associated with the text and add them in format: text (link)
"""
links = [link for link in item.find_all("a") if link.get("href", "").startswith("http")]
if not links:
return text
for link in links:
index = text.find(link.text)
if index == -1:
continue
text = f"{text[:index]}{link.text} ({link['href']}) {text[len(link.text)+1 + index:]}"
return text
def _expand_include_tags(text, root_path, rel_path) -> str:
matches = re.findall(INCLUDE_PATTERN, text)
if not matches:
return text
for match in matches:
include_tag = match[0]
include_path = match[1]
if include_tag == 'include_relative':
include_path = os.path.join(rel_path, include_path)
with open(include_path, 'r', encoding='utf-8') as f:
text = f.read()
else:
include_path = os.path.join(root_path, "_includes", include_path)
with open(include_path, 'r', encoding='utf-8') as f:
text = f.read()
# if text looks like html, convert it to markdown
if text.startswith('<'):
return _convert_html_to_markdown(text)
else:
return text
def _convert_html_to_markdown(html) -> str:
# convert HTML text to markdown
markdown = md.render(html)
return markdown
def _convert_code_tag_to_markdown(html):
# Define the regular expression to match the code tag
code_tag_pattern = r'<code class="language-(.+)">([\s\S]*?)</code>'
match = re.search(code_tag_pattern, html)
if match:
language = match[1]
code = match[2]
markdown = f'```{language}\n{code}\n```'
return markdown
else:
return html
# Split the tag from the ID
def _split_tags(item, file) -> Tuple[str, Optional[str]]:
text = item.text
id = METHOD_NAME(item)
text = re.sub(MAY_PATTERN, MAY_REPLACE, text)
text = re.sub(MUST_DO_PATTERN, MUST_DO_REPLACE, text)
text = re.sub(MUST_NO_ID_PATTERN, MUST_DO_REPLACE, text)
text = re.sub(MUST_NOT_PATTERN, MUST_NOT_REPLACE, text)
text = re.sub(SHOULD_PATTERN, SHOULD_REPLACE, text)
text = re.sub(SHOULD_NO_ID_PATTERN, SHOULD_REPLACE, text)
text = re.sub(SHOULD_NOT_PATTERN, SHOULD_NOT_REPLACE, text)
text = re.sub(ICON_PATTERN, ICON_REPLACE, text)
text = re.sub(INCLUDE_NOTE_PATTERN, INCLUDE_NOTE_REPLACE, text)
text = re.sub(INCLUDE_IMPORTANT_PATTERN, INCLUDE_IMPORTANT_REPLACE, text)
text = re.sub(INCLUDE_DRAFT_PATTERN, INCLUDE_DRAFT_REPLACE, text)
# REST API guidelines don't actually have IDs.
if not file.endswith("Guidelines.md"):
segments = file.split(os.sep)
relevant_segments = segments[segments.index("docs") + 1:]
prefix = "_".join(relevant_segments).replace(".md", ".html")
id = f"{prefix}#{id}" if id else id
return text, id
# Extract the id from the inline text
def METHOD_NAME(item):
id = re.search(r'id="([a-zA-Z0-9_-]+)"', item.text)
if id:
return id.group(1)
try:
id = item.next_element.attrs["name"]
except:
id = None
return id |
6,960 | get lineno | from _typeshed import Incomplete, SupportsGetItem, SupportsLenAndGetItem, Unused
from abc import abstractmethod
from collections.abc import Iterable, Iterator, MutableSequence
from typing_extensions import Final, Self, TypeAlias
from .fixer_base import BaseFix
from .pgen2.grammar import Grammar
_NL: TypeAlias = Node | Leaf
_Context: TypeAlias = tuple[str, int, int]
_Results: TypeAlias = dict[str, _NL]
_RawNode: TypeAlias = tuple[int, str, _Context, list[_NL] | None]
HUGE: Final = 0x7FFFFFFF
def type_repr(type_num: int) -> str | int: ...
class Base:
type: int
parent: Node | None
prefix: str
children: list[_NL]
was_changed: bool
was_checked: bool
def __eq__(self, other: object) -> bool: ...
@abstractmethod
def _eq(self, other: Base) -> bool: ...
@abstractmethod
def clone(self) -> Self: ...
@abstractmethod
def post_order(self) -> Iterator[Self]: ...
@abstractmethod
def pre_order(self) -> Iterator[Self]: ...
def replace(self, new: _NL | list[_NL]) -> None: ...
def METHOD_NAME(self) -> int: ...
def changed(self) -> None: ...
def remove(self) -> int | None: ...
@property
def next_sibling(self) -> _NL | None: ...
@property
def prev_sibling(self) -> _NL | None: ...
def leaves(self) -> Iterator[Leaf]: ...
def depth(self) -> int: ...
def get_suffix(self) -> str: ...
class Node(Base):
fixers_applied: MutableSequence[BaseFix] | None
# Is Unbound until set in refactor.RefactoringTool
future_features: frozenset[Incomplete]
# Is Unbound until set in pgen2.parse.Parser.pop
used_names: set[str]
def __init__(
self,
type: int,
children: Iterable[_NL],
context: Unused = None,
prefix: str | None = None,
fixers_applied: MutableSequence[BaseFix] | None = None,
) -> None: ...
def _eq(self, other: Base) -> bool: ...
def clone(self) -> Node: ...
def post_order(self) -> Iterator[Self]: ...
def pre_order(self) -> Iterator[Self]: ...
def set_child(self, i: int, child: _NL) -> None: ...
def insert_child(self, i: int, child: _NL) -> None: ...
def append_child(self, child: _NL) -> None: ...
def __unicode__(self) -> str: ...
class Leaf(Base):
lineno: int
column: int
value: str
fixers_applied: MutableSequence[BaseFix]
def __init__(
self,
type: int,
value: str,
context: _Context | None = None,
prefix: str | None = None,
fixers_applied: MutableSequence[BaseFix] = [],
) -> None: ...
def _eq(self, other: Base) -> bool: ...
def clone(self) -> Leaf: ...
def post_order(self) -> Iterator[Self]: ...
def pre_order(self) -> Iterator[Self]: ...
def __unicode__(self) -> str: ...
def convert(gr: Grammar, raw_node: _RawNode) -> _NL: ...
class BasePattern:
type: int
content: str | None
name: str | None
def optimize(self) -> BasePattern: ... # sic, subclasses are free to optimize themselves into different patterns
def match(self, node: _NL, results: _Results | None = None) -> bool: ...
def match_seq(self, nodes: SupportsLenAndGetItem[_NL], results: _Results | None = None) -> bool: ...
def generate_matches(self, nodes: SupportsGetItem[int, _NL]) -> Iterator[tuple[int, _Results]]: ...
class LeafPattern(BasePattern):
def __init__(self, type: int | None = None, content: str | None = None, name: str | None = None) -> None: ...
class NodePattern(BasePattern):
wildcards: bool
def __init__(self, type: int | None = None, content: str | None = None, name: str | None = None) -> None: ...
class WildcardPattern(BasePattern):
min: int
max: int
def __init__(self, content: str | None = None, min: int = 0, max: int = 0x7FFFFFFF, name: str | None = None) -> None: ...
class NegatedPattern(BasePattern):
def __init__(self, content: str | None = None) -> None: ...
def generate_matches(
patterns: SupportsGetItem[int | slice, BasePattern] | None, nodes: SupportsGetItem[int | slice, _NL]
) -> Iterator[tuple[int, _Results]]: ... |
6,961 | fn | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import re
import warnings
from django.conf import settings
from django.http import HttpResponse
import json
from django.utils.encoding import force_str
from django.utils.functional import Promise
from dimagi.utils.parsing import json_format_datetime
from datetime import date, datetime, time
from decimal import Decimal
def get_url_base():
return '{}://{}'.format(settings.DEFAULT_PROTOCOL, get_site_domain())
def get_site_domain():
return settings.BASE_ADDRESS
def get_static_url_prefix():
return '' if settings.STATIC_CDN else 'http://' + get_site_domain()
def parse_int(arg_keys=[], kwarg_keys=[]):
"""
A decorator to translate coerce arguments to be ints
>>> @parse_int([0,1])
>>> def add(x,y):
... return x + y
...
>>> add("1", "2")
3
"""
def _parse_int(fn):
def METHOD_NAME(*args, **kwargs):
args = list(args)
kwargs = dict(kwargs)
for i in arg_keys:
args[i] = int(args[i])
for key in kwarg_keys:
kwargs[key] = int(kwargs[key])
return fn(*args, **kwargs)
return METHOD_NAME
return _parse_int
# http://stackoverflow.com/questions/455580/json-datetime-between-python-and-javascript
def json_handler(obj):
if callable(getattr(obj, 'to_complete_json', None)):
return obj.to_complete_json()
elif callable(getattr(obj, 'to_json', None)):
return obj.to_json()
elif isinstance(obj, datetime):
return json_format_datetime(obj)
elif isinstance(obj, date):
return obj.isoformat()
elif isinstance(obj, time):
return obj.strftime('%H:%M:%S')
elif isinstance(obj, Decimal):
return float(obj) # warning, potential loss of precision
elif isinstance(obj, Promise):
return force_str(obj) # to support gettext_lazy
elif isinstance(obj, bytes):
return obj.decode('utf-8')
else:
return json.JSONEncoder().default(obj)
def json_response(obj, status_code=200, **kwargs):
warnings.warn(
"json_response is deprecated. Use django.http.JsonResponse instead.",
DeprecationWarning,
)
if 'default' not in kwargs:
kwargs['default'] = json_handler
return HttpResponse(json.dumps(obj, **kwargs), status=status_code,
content_type="application/json")
def json_request(params, lenient=True, booleans_as_strings=False):
d = {}
for key, val in params.items():
try:
if booleans_as_strings and val in ('true', 'false'):
d[str(key)] = val
else:
d[str(key)] = json.loads(val)
except ValueError:
if lenient:
d[str(key)] = val
else:
raise
return d
# this is not intended to be an all-knowing IP address regex
IP_RE = re.compile(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}')
def get_ip(request):
"""
Retrieves the remote IP address from the request data. If the user is
behind a proxy, they may have a comma-separated list of IP addresses, so
we need to account for that. In such a case, only the first IP in the
list will be retrieved. Also, some hosts that use a proxy will put the
REMOTE_ADDR into HTTP_X_FORWARDED_FOR. This will handle pulling back the
IP from the proper place.
"""
# if neither header contain a value, just use local loopback
ip_address = request.META.get('HTTP_X_FORWARDED_FOR', request.META.get('REMOTE_ADDR', '127.0.0.1'))
if ip_address:
# make sure we have one and only one IP
try:
ip_address = IP_RE.match(ip_address)
if ip_address:
ip_address = ip_address.group(0)
else:
# no IP, probably from some dirty proxy or other device
# throw in some bogus IP
ip_address = '10.0.0.1'
except IndexError:
pass
return ip_address |
6,962 | test ingredient groups | from recipe_scrapers._grouping_utils import IngredientGroup
from recipe_scrapers.rainbowplantlife import RainbowPlantLife
from tests import ScraperTest
class TestRainbowPlantLifeScraper(ScraperTest):
scraper_class = RainbowPlantLife
test_file_name = "rainbowplantlife_groups"
def test_host(self):
self.assertEqual("rainbowplantlife.com", self.harvester_class.host())
def test_title(self):
self.assertEqual("Vegan Pasta Salad", self.harvester_class.title())
def test_total_time(self):
self.assertEqual(40, self.harvester_class.total_time())
def test_yields(self):
self.assertEqual("10 servings", self.harvester_class.yields())
def test_image(self):
self.assertEqual(
"https://rainbowplantlife.com/wp-content/uploads/2022/06/vegan-pasta-salad-beauty-shot-1-of-2-scaled.jpg",
self.harvester_class.image(),
)
def test_ingredients(self):
self.assertEqual(
[
"1 pound fusilli, (rotini, or penne rigate (a ridged pasta))",
"1 cup (112g) raw walnuts",
"5 ounces (140g) sourdough loaf, (baguette, or country-style bread, sliced)",
"1 (12-ounce/340g) jar of roasted red bell peppers**, (drained from the liquid in the jar)",
"3 garlic cloves, (roughly chopped)",
"1 medium lemon, (zested and juiced (3 tablespoons juice; save the zest for the Topping))",
"½ to 1 teaspoon smoked paprika ((use 1 teaspoon for prominent smokiness))",
"½ teaspoon red pepper flakes",
"1 teaspoon kosher salt, (plus more to taste)",
"Freshly cracked black pepper to taste",
"⅓ cup (75g) extra virgin olive oil",
"Reserved bread crumbs from Sauce",
"Reserved lemon zest from Topping",
"1 cup (16g) flat-leaf parsley, (finely chopped)",
"1 ½ cups (24g) fresh basil, (finely chopped)",
"3 tablespoons capers***, (chopped)",
"¼ teaspoon red pepper flakes ((optional) )",
"½ teaspoon flaky sea salt",
"3 cups (60g) baby arugula, (chopped)",
],
self.harvester_class.ingredients(),
)
def METHOD_NAME(self):
self.assertEqual(
[
IngredientGroup(
ingredients=[
"1 pound fusilli, (rotini, or penne rigate (a ridged pasta))"
],
purpose=None,
),
IngredientGroup(
ingredients=[
"1 cup (112g) raw walnuts",
"5 ounces (140g) sourdough loaf, (baguette, or country-style bread, sliced)",
"1 (12-ounce/340g) jar of roasted red bell peppers**, (drained from the liquid in the jar)",
"3 garlic cloves, (roughly chopped)",
"1 medium lemon, (zested and juiced (3 tablespoons juice; save the zest for the Topping))",
"½ to 1 teaspoon smoked paprika ((use 1 teaspoon for prominent smokiness))",
"½ teaspoon red pepper flakes",
"1 teaspoon kosher salt, (plus more to taste)",
"Freshly cracked black pepper to taste",
"⅓ cup (75g) extra virgin olive oil",
],
purpose="Sauce",
),
IngredientGroup(
ingredients=[
"Reserved bread crumbs from Sauce",
"Reserved lemon zest from Topping",
"1 cup (16g) flat-leaf parsley, (finely chopped)",
"1 ½ cups (24g) fresh basil, (finely chopped)",
"3 tablespoons capers***, (chopped)",
"¼ teaspoon red pepper flakes ((optional) )",
"½ teaspoon flaky sea salt",
"3 cups (60g) baby arugula, (chopped)",
],
purpose="Herby Bread Crumb Topping",
),
],
self.harvester_class.ingredient_groups(),
)
def test_instructions(self):
self.assertEqual(
"Preheat the oven to 350ºF/175ºC. Place bread slices on a rimmed sheet pan. Spread out walnuts in the empty spaces on the pan. Toast both in the oven for 8 to 10 minutes, tossing the walnuts and flipping the bread slices over halfway through, until the walnuts are toasty and lightly browned and the bread is also a bit toasty. Allow to cool a bit.\nCook the pasta. Bring a large pot of water to a boil, 4 quarts/liters or 128 ounces. Once boiling, season with 2 tablespoons of kosher salt. Add the pasta and, once it comes back to a boil, set a timer for 2 minutes longer than the upper range of the cook time for al dente pasta on the box instructions.***Before draining, reserve 1 cup of pasta water, then drain pasta in a colander. Rinse the pasta with cold water to bring to room temperature and shake off the excess water.\nMake the bread crumbs: Tear the toasted bread into small pieces. Add them to a food processor and pulse repeatedly until you get bread crumbs, but don’t blend continuously—this helps retain some texture. It'll take a couple minutes. Take out half of the bread crumbs and set aside in a medium bowl for the Topping.\nMake the roasted red pepper sauce****. To the remaining bread crumbs in the food processor, add the toasted walnuts, roasted bell peppers, garlic, lemon juice (save the zest for the topping), smoked paprika, red pepper flakes, 1 teaspoon kosher salt, and several cracks of pepper. Process until a thick paste forms. With the motor running, stream in the olive oil until a sauce forms, and blend until smooth and thick. Season to taste with salt and pepper as needed, but don’t add too much salt, as there’s also salt in the Topping.\nMake the Herby Bread Crumb Topping: In a bowl, combine the reserved bread crumbs, reserved lemon zest, chopped parsley and basil, chopped capers, red pepper flakes if using, and flaky salt. Stir well to combine.\nAssemble the pasta: toss the pasta with the roasted red pepper sauce until well coated. Add about ¾ cup (180 mL) of pasta water and toss to coat. Add more pasta water until it’s sufficiently saucy. Add the arugula and any other desired mix-ins***** and toss to combine. Add the Herby Bread Crumb Topping and gently toss just to combine.",
self.harvester_class.instructions(),
)
def test_ratings(self):
self.assertEqual(4.97, self.harvester_class.ratings()) |
6,963 | test main help | import asyncio
import configparser
import sys
import tempfile
import unittest.mock as mock
from pathlib import Path
from typing import TYPE_CHECKING, Any
import pytest
from _pytest.capture import CaptureFixture
from _pytest.logging import LogCaptureFixture
import bandersnatch.mirror
import bandersnatch.storage
from bandersnatch.configuration import Singleton
from bandersnatch.main import main
from bandersnatch.simple import SimpleFormat
if TYPE_CHECKING:
from bandersnatch.mirror import BandersnatchMirror
async def empty_dict(*args: Any, **kwargs: Any) -> dict:
return {}
def setup() -> None:
"""simple setup function to clear Singleton._instances before each test"""
Singleton._instances = {}
def METHOD_NAME(capfd: CaptureFixture) -> None:
sys.argv = ["bandersnatch", "--help"]
with pytest.raises(SystemExit):
main(asyncio.new_event_loop())
out, err = capfd.readouterr()
assert out.startswith("usage: bandersnatch")
assert "" == err
def test_main_create_config(caplog: LogCaptureFixture, tmpdir: Path) -> None:
sys.argv = ["bandersnatch", "-c", str(tmpdir / "bandersnatch.conf"), "mirror"]
assert main(asyncio.new_event_loop()) == 1
assert "creating default config" in caplog.text
conf_path = Path(tmpdir) / "bandersnatch.conf"
assert conf_path.exists()
def test_main_cant_create_config(caplog: LogCaptureFixture, tmpdir: Path) -> None:
sys.argv = [
"bandersnatch",
"-c",
str(tmpdir / "foo" / "bandersnatch.conf"),
"mirror",
]
assert main(asyncio.new_event_loop()) == 1
assert "creating default config" in caplog.text
assert "Could not create config file" in caplog.text
conf_path = Path(tmpdir) / "bandersnatch.conf"
assert not conf_path.exists()
def test_main_reads_config_values(mirror_mock: mock.MagicMock, tmpdir: Path) -> None:
base_config_path = Path(bandersnatch.__file__).parent / "unittest.conf"
diff_file = Path(tempfile.gettempdir()) / "srv/pypi/mirrored-files"
config_lines = [
(
f"diff-file = {diff_file.as_posix()}\n"
if line.startswith("diff-file")
else line
)
for line in base_config_path.read_text().splitlines()
]
config_path = tmpdir / "unittest.conf"
config_path.write_text("\n".join(config_lines), encoding="utf-8")
sys.argv = ["bandersnatch", "-c", str(config_path), "mirror"]
assert config_path.exists()
main(asyncio.new_event_loop())
(homedir, master), kwargs = mirror_mock.call_args_list[0]
assert Path("/srv/pypi") == homedir
assert isinstance(master, bandersnatch.master.Master)
assert {
"stop_on_error": False,
"hash_index": False,
"workers": 3,
"root_uri": "",
"json_save": False,
"digest_name": "sha256",
"keep_index_versions": 0,
"release_files_save": True,
"storage_backend": "filesystem",
"diff_file": diff_file,
"diff_append_epoch": False,
"diff_full_path": diff_file,
"cleanup": False,
"compare_method": "hash",
"download_mirror": "",
"download_mirror_no_fallback": False,
"simple_format": SimpleFormat.ALL,
} == kwargs
def test_main_reads_custom_config_values(
mirror_mock: "BandersnatchMirror", logging_mock: mock.MagicMock, customconfig: Path
) -> None:
setup()
conffile = str(customconfig / "bandersnatch.conf")
sys.argv = ["bandersnatch", "-c", conffile, "mirror"]
main(asyncio.new_event_loop())
(log_config, _kwargs) = logging_mock.call_args_list[0]
assert log_config == (str(customconfig / "bandersnatch-log.conf"),)
def test_main_throws_exception_on_unsupported_digest_name(
customconfig: Path,
) -> None:
setup()
conffile = str(customconfig / "bandersnatch.conf")
parser = configparser.ConfigParser()
parser.read(conffile)
parser["mirror"]["digest_name"] = "foobar"
del parser["mirror"]["log-config"]
with open(conffile, "w") as fp:
parser.write(fp)
sys.argv = ["bandersnatch", "-c", conffile, "mirror"]
with pytest.raises(ValueError) as e:
main(asyncio.new_event_loop())
assert "foobar is not a valid" in str(e.value)
@pytest.fixture
def customconfig(tmpdir: Path) -> Path:
default_path = Path(bandersnatch.__file__).parent / "unittest.conf"
with default_path.open("r") as dfp:
config = dfp.read()
config = config.replace("/srv/pypi", str(tmpdir / "pypi"))
with open(str(tmpdir / "bandersnatch.conf"), "w") as f:
f.write(config)
config = config.replace("; log-config", "log-config")
config = config.replace(
"/etc/bandersnatch-log.conf", str(tmpdir / "bandersnatch-log.conf")
)
with open(str(tmpdir / "bandersnatch.conf"), "w") as f:
f.write(config)
return tmpdir |
6,964 | get hostname for platform | """
Copyright (c) 2017-2022 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from typing import Any, Dict, Optional
from atomic_reactor.plugin import Plugin
from atomic_reactor.constants import PLUGIN_GATHER_BUILDS_METADATA_KEY
from atomic_reactor.util import Output, is_scratch_build, get_platforms
from atomic_reactor.utils.koji import get_buildroot, get_output, get_output_metadata
from osbs.utils import ImageName
from atomic_reactor.utils.remote_host import RemoteHost
from atomic_reactor.utils.rpm import parse_rpm_output
class GatherBuildsMetadataPlugin(Plugin):
"""
Gather builds metadata from platform-specific builds which are done by
podman-remote on remote hosts. This metadata may also contain metadata
fetched from remote hosts by running commands via ssh tunnel.
This plugin returns a mapping from platform to the build's metadata.
For example,
{
"x86_64": {...},
"s390x": {...},
}
Each metadata mapping follows the format of Koji Content Generator Metadata.
This plugin ensures it has keys ``metadata_version``, ``buildroots`` and
``output``.
"""
key = PLUGIN_GATHER_BUILDS_METADATA_KEY
is_allowed_to_fail = False
def _determine_image_pullspec(self, platform: str) -> ImageName:
tag_conf = self.workflow.data.tag_conf
unique_images = tag_conf.get_unique_images_with_platform(platform)
if not unique_images:
raise RuntimeError('Unable to determine pullspec_image')
return unique_images[0]
def _generate_build_log_output(self, platform: str, buildroot_id: str) -> Optional[Output]:
build_log_file = self.workflow.context_dir.get_platform_build_log(platform)
if not build_log_file.exists():
self.log.info("Build log file is not found: %s", str(build_log_file))
return None
metadata = get_output_metadata(str(build_log_file), build_log_file.name)
metadata['buildroot_id'] = buildroot_id
metadata['type'] = 'log'
metadata['arch'] = platform
return Output(metadata=metadata, filename=str(build_log_file))
def METHOD_NAME(self, platform: str) -> str:
task_results = self.workflow.osbs.get_task_results(self.workflow.pipeline_run_name)
task_platform = platform.replace('_', '-')
try:
task_name = next(filter(lambda task_name: # pylint: disable=W1639
task_name.startswith('binary-container-build') and
task_platform in task_name, task_results))
if 'task_result' not in task_results[task_name]:
raise RuntimeError(f"task_results is missing from: {task_name}")
return task_results[task_name]['task_result']
except StopIteration:
# pylint: disable=W0707
raise RuntimeError(f"unable to find build host for platform: {platform}")
def _get_build_rpms(self, platform: str, build_host: str):
remote_host = None
remote_host_pools = self.workflow.conf.remote_hosts.get("pools", {})
slots_dir = self.workflow.conf.remote_hosts.get("slots_dir")
platform_config = remote_host_pools.get(platform)
if not platform_config:
raise RuntimeError(f"unable to find remote hosts for platform: {platform}")
host_config = platform_config.get(build_host)
if host_config:
remote_host = RemoteHost(
hostname=build_host, username=host_config["username"],
ssh_keyfile=host_config["auth"], slots=host_config.get("slots", 1),
socket_path=host_config["socket_path"], slots_dir=slots_dir
)
if not remote_host:
raise RuntimeError(f"unable to get remote host instance: {build_host}")
rpms = remote_host.rpms_installed
if not rpms:
raise RuntimeError(f"unable to obtain installed rpms on: {build_host}")
all_rpms = [line for line in rpms.splitlines() if line]
return parse_rpm_output(all_rpms)
def _get_build_metadata(self, platform: str):
"""
Build the metadata needed for importing the build
:return: tuple, the metadata and the list of Output instances
"""
pullspec_image = self._determine_image_pullspec(platform)
buildroot = get_buildroot(platform)
build_host = self.METHOD_NAME(platform)
output_files, _ = get_output(workflow=self.workflow, buildroot_id=build_host,
pullspec=pullspec_image, platform=platform,
source_build=False)
if build_log_output := self._generate_build_log_output(platform, build_host):
output_files.append(build_log_output)
buildroot['components'] = self._get_build_rpms(platform, build_host)
buildroot['id'] = build_host
koji_metadata = {
'metadata_version': 0,
'buildroots': [buildroot],
'output': [output.metadata for output in output_files],
}
return koji_metadata, output_files
def run(self):
"""Run the plugin."""
metadatas: Dict[str, Dict[str, Any]] = {}
wf_data = self.workflow.data
enabled_platforms = get_platforms(wf_data)
if not enabled_platforms:
raise ValueError("No enabled platforms.")
for platform in enabled_platforms:
koji_metadata, output_files = self._get_build_metadata(platform)
if not is_scratch_build(self.workflow):
for output in output_files:
wf_data.koji_upload_files.append({
"local_filename": output.filename,
"dest_filename": output.metadata["filename"],
})
metadatas[platform] = koji_metadata
return metadatas |
6,965 | add kind | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Matti Hämäläinen <msh@nmr.mgh.harvard.edu>
# Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD-3-Clause
from copy import deepcopy
import numpy as np
from .constants import FIFF
from .tag import read_tag
from .tree import dir_tree_find
from .write import start_block, end_block, write_int
from .matrix import write_named_matrix, _read_named_matrix
from ..utils import logger, verbose, _pl
def METHOD_NAME(one):
"""Convert CTF kind to MNE kind."""
if one["ctfkind"] == int("47314252", 16):
one["kind"] = 1
elif one["ctfkind"] == int("47324252", 16):
one["kind"] = 2
elif one["ctfkind"] == int("47334252", 16):
one["kind"] = 3
else:
one["kind"] = int(one["ctfkind"])
def _calibrate_comp(
comp, chs, row_names, col_names, mult_keys=("range", "cal"), flip=False
):
"""Get row and column cals."""
ch_names = [c["ch_name"] for c in chs]
row_cals = np.zeros(len(row_names))
col_cals = np.zeros(len(col_names))
for names, cals, inv in zip(
(row_names, col_names), (row_cals, col_cals), (False, True)
):
for ii in range(len(cals)):
p = ch_names.count(names[ii])
if p != 1:
raise RuntimeError(
"Channel %s does not appear exactly once "
"in data, found %d instance%s" % (names[ii], p, _pl(p))
)
idx = ch_names.index(names[ii])
val = chs[idx][mult_keys[0]] * chs[idx][mult_keys[1]]
val = float(1.0 / val) if inv else float(val)
val = 1.0 / val if flip else val
cals[ii] = val
comp["rowcals"] = row_cals
comp["colcals"] = col_cals
comp["data"]["data"] = row_cals[:, None] * comp["data"]["data"] * col_cals[None, :]
@verbose
def read_ctf_comp(fid, node, chs, verbose=None):
"""Read the CTF software compensation data from the given node.
Parameters
----------
fid : file
The file descriptor.
node : dict
The node in the FIF tree.
chs : list
The list of channels from info['chs'] to match with
compensators that are read.
%(verbose)s
Returns
-------
compdata : list
The compensation data
"""
return _read_ctf_comp(fid, node, chs, None)
def _read_ctf_comp(fid, node, chs, ch_names_mapping):
"""Read the CTF software compensation data from the given node.
Parameters
----------
fid : file
The file descriptor.
node : dict
The node in the FIF tree.
chs : list
The list of channels from info['chs'] to match with
compensators that are read.
ch_names_mapping : dict | None
The channel renaming to use.
%(verbose)s
Returns
-------
compdata : list
The compensation data
"""
from .meas_info import _rename_comps
ch_names_mapping = dict() if ch_names_mapping is None else ch_names_mapping
compdata = []
comps = dir_tree_find(node, FIFF.FIFFB_MNE_CTF_COMP_DATA)
for node in comps:
# Read the data we need
mat = _read_named_matrix(fid, node, FIFF.FIFF_MNE_CTF_COMP_DATA)
for p in range(node["nent"]):
kind = node["directory"][p].kind
pos = node["directory"][p].pos
if kind == FIFF.FIFF_MNE_CTF_COMP_KIND:
tag = read_tag(fid, pos)
break
else:
raise Exception("Compensation type not found")
# Get the compensation kind and map it to a simple number
one = dict(ctfkind=tag.data.item())
del tag
METHOD_NAME(one)
for p in range(node["nent"]):
kind = node["directory"][p].kind
pos = node["directory"][p].pos
if kind == FIFF.FIFF_MNE_CTF_COMP_CALIBRATED:
tag = read_tag(fid, pos)
calibrated = tag.data
break
else:
calibrated = False
one["save_calibrated"] = bool(calibrated)
one["data"] = mat
_rename_comps([one], ch_names_mapping)
if not calibrated:
# Calibrate...
_calibrate_comp(one, chs, mat["row_names"], mat["col_names"])
else:
one["rowcals"] = np.ones(mat["data"].shape[0], dtype=np.float64)
one["colcals"] = np.ones(mat["data"].shape[1], dtype=np.float64)
compdata.append(one)
if len(compdata) > 0:
logger.info(" Read %d compensation matrices" % len(compdata))
return compdata
###############################################################################
# Writing
def write_ctf_comp(fid, comps):
"""Write the CTF compensation data into a fif file.
Parameters
----------
fid : file
The open FIF file descriptor
comps : list
The compensation data to write
"""
if len(comps) <= 0:
return
# This is very simple in fact
start_block(fid, FIFF.FIFFB_MNE_CTF_COMP)
for comp in comps:
start_block(fid, FIFF.FIFFB_MNE_CTF_COMP_DATA)
# Write the compensation kind
write_int(fid, FIFF.FIFF_MNE_CTF_COMP_KIND, comp["ctfkind"])
if comp.get("save_calibrated", False):
write_int(fid, FIFF.FIFF_MNE_CTF_COMP_CALIBRATED, comp["save_calibrated"])
if not comp.get("save_calibrated", True):
# Undo calibration
comp = deepcopy(comp)
data = (
(1.0 / comp["rowcals"][:, None])
* comp["data"]["data"]
* (1.0 / comp["colcals"][None, :])
)
comp["data"]["data"] = data
write_named_matrix(fid, FIFF.FIFF_MNE_CTF_COMP_DATA, comp["data"])
end_block(fid, FIFF.FIFFB_MNE_CTF_COMP_DATA)
end_block(fid, FIFF.FIFFB_MNE_CTF_COMP) |
6,966 | add | # ---------------------------------------------------------------------
#
# Copyright (C) 2015 - 2022 by the deal.II authors
#
# This file is part of the deal.II library.
#
# The deal.II library is free software; you can use it, redistribute
# it, and/or modify it under the terms of the GNU Lesser General
# Public License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# The full text of the license can be found in the file LICENSE.md at
# the top level directory of deal.II.
#
# ---------------------------------------------------------------------
#
# Instructions: Place a copy of this file, renamed as ".gdbinit", in your home
# directory to enable pretty-printing of various deal.II objects. If you already
# have a ".gdbinit" file or would like to manage multiple sets of pretty
# printers, then see the directions included in the Documentation, in the
# "Configuration for debugging via GDB" section in the "Information for users"
# category.
#
# This has only been tested with GDB 7.7.1 and newer, but it should work with
# slightly older versions of GDB (the Python interface was added in 7.0,
# released in 2009).
#
# Authors: Wolfgang Bangerth, 2015, David Wells, 2015 - 2018
#
set print pretty 1
python
import gdb
import re
def build_output_string(keys, accessor):
"""Build an output string of the form
{
a = foo,
b = bar
}
where a and b are elements of keys and foo and bar are values of
accessor (e.g., accessor['a'] = foo).
Note that accessor need not be a dictionary (i.e., gdb.Values
redefines __getitem__)."""
return ("{\n" +
",\n".join([" {} = {}".format(key, accessor[key])
for key in keys]) +
"\n}")
class AlignedVectorPrinter(object):
"""Print a deal.II AlignedVector instance."""
def __init__(self, typename, val):
self.typename = typename
self.val = val
self.end = self.val['used_elements_end']
# evaluate the get() method of the unique_ptr
eval_string = "(*("+str(self.val['elements'].type)+"*)("+str(self.val['elements'].address)+")).get()"
self.begin = gdb.parse_and_eval(eval_string);
self.length = int(self.end - self.begin )
def children(self):
# The first entry (see the "Pretty Printing API" documentation of GDB)
# in the tuple should be a name for the child, which should be nothing
# (the array elements don't have individual names) here.
return (("", (self.begin + count).dereference())
for count in range(self.length))
def to_string(self):
return "AlignedVector<{}>({})".format(self.val.type.template_argument(0),
self.length)
@staticmethod
def display_hint():
"""Provide a hint to GDB that this is an array-like container
(so print values in sequence)."""
return "array"
class PointPrinter(object):
"""Print a deal.II Point instance."""
def __init__(self, typename, val):
self.typename = typename
self.val = val
def to_string(self):
return self.val['values']
class TensorPrinter(object):
"""Print a deal.II Tensor instance."""
def __init__(self, typename, val):
self.typename = typename
self.val = val
def to_string(self):
if int(self.val.type.template_argument(0)) == 0:
return self.val['value']
else:
return self.val['values']
class TriaIteratorPrinter(object):
"""Print a deal.II TriaIterator instance."""
def __init__(self, typename, val):
self.typename = typename
self.val = val
def to_string(self):
keys = ['tria', 'present_level', 'present_index']
if 'DoFHandler' in str(self.val.type.template_argument(0)):
keys.insert(1, 'dof_handler')
return build_output_string(keys, self.val['accessor'])
class VectorPrinter(object):
"""Print a deal.II Vector instance."""
def __init__(self, typename, val):
self.typename = typename
self.val = val
a_vec = self.val['values']
self.end = a_vec['used_elements_end']
# evaluate the elements.get() method of the AlignedVector member
eval_string = "(*("+str(a_vec['elements'].type)+"*)("+str(a_vec['elements'].address)+")).get()"
self.begin = gdb.parse_and_eval(eval_string);
self.length = int(self.end - self.begin )
def to_string(self):
return ("Vector<{}>({})".format(self.val.type.template_argument(0),
self.length) +
build_output_string(['values'], self.val))
class QuadraturePrinter(object):
"""Print a deal.II Quadrature instance."""
def __init__(self, typename, val):
self.typename = typename
self.val = val
def to_string(self):
return build_output_string(['quadrature_points', 'weights'], self.val)
class RxPrinter(object):
"""A "regular expression" printer which conforms to the
"SubPrettyPrinter" protocol from gdb.printing."""
def __init__(self, name, function):
self.name = name
self.function = function
self.enabled = True
def __call__(self, value):
if self.enabled:
return self.function(self.name, value)
else:
return None
class Printer(object):
"""A pretty-printer that conforms to the "PrettyPrinter" protocol
from gdb.printing. It can also be used directly as an old-style
printer."""
def __init__(self, name):
self.name = name
self.subprinters = list()
self.lookup = dict()
self.enabled = True
self.compiled_rx = re.compile('^([a-zA-Z0-9_:]+)<.*>$')
def METHOD_NAME(self, name, function):
printer = RxPrinter(name, function)
self.subprinters.append(printer)
self.lookup[name] = printer
@staticmethod
def get_basic_type(object_type):
# If it points to a reference, then get the reference.
if object_type.code == gdb.TYPE_CODE_REF:
object_type = object_type.target()
object_type = object_type.unqualified().strip_typedefs()
return object_type.tag
def __call__(self, val):
typename = self.get_basic_type(val.type)
if typename:
# All the types we match are template types, so we can use a
# dictionary.
match = self.compiled_rx.match(typename)
if match:
basename = match.group(1)
if basename in self.lookup:
return self.lookup[basename](val)
return None
dealii_printer = Printer("deal.II")
def register_dealii_printers():
"""Register deal.II pretty-printers with gdb."""
printers = {
AlignedVectorPrinter: ['AlignedVector'],
PointPrinter: ['Point'],
TensorPrinter: ['Tensor'],
VectorPrinter: ['Vector'],
TriaIteratorPrinter:
['TriaRawIterator', 'TriaIterator', 'TriaActiveIterator'],
QuadraturePrinter:
['Quadrature', 'QGauss', 'QGaussLobatto', 'QMidpoint', 'QSimpson',
'QTrapezoid',
# The following name has been deprecated in deal.II 9.3 and can
# be removed at a later time.
'QTrapez',
'QMilne', 'QWeddle', 'QGaussLog', 'QGaussLogR',
'QGaussOneOverR', 'QSorted', 'QTelles', 'QGaussChebyshev',
'QGaussRadauChebyshev', 'QIterated', 'QAnisotropic']
}
for printer, class_names in printers.items():
for class_name in class_names:
dealii_printer.METHOD_NAME('dealii::' + class_name, printer)
try:
from gdb import printing
printing.register_pretty_printer(gdb, dealii_printer)
except ImportError:
gdb.pretty_printers.append(dealii_printer)
register_dealii_printers()
end |
6,967 | visit begin | #
# QAPI command marshaller generator
#
# Copyright IBM, Corp. 2011
# Copyright (C) 2014-2016 Red Hat, Inc.
#
# Authors:
# Anthony Liguori <aliguori@us.ibm.com>
# Michael Roth <mdroth@linux.vnet.ibm.com>
# Markus Armbruster <armbru@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
from qapi import *
def gen_command_decl(name, arg_type, boxed, ret_type):
return mcgen('''
%(c_type)s qmp_%(c_name)s(%(params)s);
''',
c_type=(ret_type and ret_type.c_type()) or 'void',
c_name=c_name(name),
params=gen_params(arg_type, boxed, 'Error **errp'))
def gen_call(name, arg_type, boxed, ret_type):
ret = ''
argstr = ''
if boxed:
assert arg_type and not arg_type.is_empty()
argstr = '&arg, '
elif arg_type:
assert not arg_type.variants
for memb in arg_type.members:
if memb.optional:
argstr += 'arg.has_%s, ' % c_name(memb.name)
argstr += 'arg.%s, ' % c_name(memb.name)
lhs = ''
if ret_type:
lhs = 'retval = '
ret = mcgen('''
%(lhs)sqmp_%(c_name)s(%(args)s&err);
''',
c_name=c_name(name), args=argstr, lhs=lhs)
if ret_type:
ret += mcgen('''
if (err) {
goto out;
}
qmp_marshal_output_%(c_name)s(retval, ret, &err);
''',
c_name=ret_type.c_name())
return ret
def gen_marshal_output(ret_type):
return mcgen('''
static void qmp_marshal_output_%(c_name)s(%(c_type)s ret_in, QObject **ret_out, Error **errp)
{
Error *err = NULL;
Visitor *v;
v = qobject_output_visitor_new(ret_out);
visit_type_%(c_name)s(v, "unused", &ret_in, &err);
if (!err) {
visit_complete(v, ret_out);
}
error_propagate(errp, err);
visit_free(v);
v = qapi_dealloc_visitor_new();
visit_type_%(c_name)s(v, "unused", &ret_in, NULL);
visit_free(v);
}
''',
c_type=ret_type.c_type(), c_name=ret_type.c_name())
def gen_marshal_proto(name):
return ('void qmp_marshal_%s(QDict *args, QObject **ret, Error **errp)'
% c_name(name))
def gen_marshal_decl(name):
return mcgen('''
%(proto)s;
''',
proto=gen_marshal_proto(name))
def gen_marshal(name, arg_type, boxed, ret_type):
have_args = arg_type and not arg_type.is_empty()
ret = mcgen('''
%(proto)s
{
Error *err = NULL;
''',
proto=gen_marshal_proto(name))
if ret_type:
ret += mcgen('''
%(c_type)s retval;
''',
c_type=ret_type.c_type())
if have_args:
visit_members = ('visit_type_%s_members(v, &arg, &err);'
% arg_type.c_name())
ret += mcgen('''
Visitor *v;
%(c_name)s arg = {0};
''',
c_name=arg_type.c_name())
else:
visit_members = ''
ret += mcgen('''
Visitor *v = NULL;
if (args) {
''')
push_indent()
ret += mcgen('''
v = qobject_input_visitor_new(QOBJECT(args));
visit_start_struct(v, NULL, NULL, 0, &err);
if (err) {
goto out;
}
%(visit_members)s
if (!err) {
visit_check_struct(v, &err);
}
visit_end_struct(v, NULL);
if (err) {
goto out;
}
''',
visit_members=visit_members)
if not have_args:
pop_indent()
ret += mcgen('''
}
''')
ret += gen_call(name, arg_type, boxed, ret_type)
ret += mcgen('''
out:
error_propagate(errp, err);
visit_free(v);
''')
if have_args:
visit_members = ('visit_type_%s_members(v, &arg, NULL);'
% arg_type.c_name())
else:
visit_members = ''
ret += mcgen('''
if (args) {
''')
push_indent()
ret += mcgen('''
v = qapi_dealloc_visitor_new();
visit_start_struct(v, NULL, NULL, 0, NULL);
%(visit_members)s
visit_end_struct(v, NULL);
visit_free(v);
''',
visit_members=visit_members)
if not have_args:
pop_indent()
ret += mcgen('''
}
''')
ret += mcgen('''
}
''')
return ret
def gen_register_command(name, success_response):
options = 'QCO_NO_OPTIONS'
if not success_response:
options = 'QCO_NO_SUCCESS_RESP'
ret = mcgen('''
qmp_register_command(cmds, "%(name)s",
qmp_marshal_%(c_name)s, %(opts)s);
''',
name=name, c_name=c_name(name),
opts=options)
return ret
def gen_registry(registry):
ret = mcgen('''
void %(c_prefix)sqmp_init_marshal(QmpCommandList *cmds)
{
QTAILQ_INIT(cmds);
''',
c_prefix=c_name(prefix, protect=False))
ret += registry
ret += mcgen('''
}
''')
return ret
class QAPISchemaGenCommandVisitor(QAPISchemaVisitor):
def __init__(self):
self.decl = None
self.defn = None
self._regy = None
self._visited_ret_types = None
def METHOD_NAME(self, schema):
self.decl = ''
self.defn = ''
self._regy = ''
self._visited_ret_types = set()
def visit_end(self):
self.defn += gen_registry(self._regy)
self._regy = None
self._visited_ret_types = None
def visit_command(self, name, info, arg_type, ret_type,
gen, success_response, boxed):
if not gen:
return
self.decl += gen_command_decl(name, arg_type, boxed, ret_type)
if ret_type and ret_type not in self._visited_ret_types:
self._visited_ret_types.add(ret_type)
self.defn += gen_marshal_output(ret_type)
self.decl += gen_marshal_decl(name)
self.defn += gen_marshal(name, arg_type, boxed, ret_type)
self._regy += gen_register_command(name, success_response)
(input_file, output_dir, do_c, do_h, prefix, opts) = parse_command_line()
c_comment = '''
/*
* schema-defined QMP->QAPI command dispatch
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
'''
h_comment = '''
/*
* schema-defined QAPI function prototypes
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
'''
(fdef, fdecl) = open_output(output_dir, do_c, do_h, prefix,
'qmp-marshal.c', 'qmp-commands.h',
c_comment, h_comment)
fdef.write(mcgen('''
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "qemu/module.h"
#include "qapi/qmp/types.h"
#include "qapi/visitor.h"
#include "qapi/qobject-output-visitor.h"
#include "qapi/qobject-input-visitor.h"
#include "qapi/dealloc-visitor.h"
#include "%(prefix)sqapi-types.h"
#include "%(prefix)sqapi-visit.h"
#include "%(prefix)sqmp-commands.h"
''',
prefix=prefix))
fdecl.write(mcgen('''
#include "%(prefix)sqapi-types.h"
#include "qapi/qmp/qdict.h"
#include "qapi/qmp/dispatch.h"
#include "qapi/error.h"
void %(c_prefix)sqmp_init_marshal(QmpCommandList *cmds);
''',
prefix=prefix, c_prefix=c_name(prefix, protect=False)))
schema = QAPISchema(input_file)
gen = QAPISchemaGenCommandVisitor()
schema.visit(gen)
fdef.write(gen.defn)
fdecl.write(gen.decl)
close_output(fdef, fdecl) |
6,968 | is parameter | """Interface to the compiler's internal symbol tables"""
import _symtable
from _symtable import (USE, DEF_GLOBAL, DEF_LOCAL, DEF_PARAM,
DEF_IMPORT, DEF_BOUND, DEF_ANNOT, SCOPE_OFF, SCOPE_MASK, FREE,
LOCAL, GLOBAL_IMPLICIT, GLOBAL_EXPLICIT, CELL)
import weakref
__all__ = ["symtable", "SymbolTable", "Class", "Function", "Symbol"]
def symtable(code, filename, compile_type):
top = _symtable.symtable(code, filename, compile_type)
return _newSymbolTable(top, filename)
class SymbolTableFactory:
def __init__(self):
self.__memo = weakref.WeakValueDictionary()
def new(self, table, filename):
if table.type == _symtable.TYPE_FUNCTION:
return Function(table, filename)
if table.type == _symtable.TYPE_CLASS:
return Class(table, filename)
return SymbolTable(table, filename)
def __call__(self, table, filename):
key = table, filename
obj = self.__memo.get(key, None)
if obj is None:
obj = self.__memo[key] = self.new(table, filename)
return obj
_newSymbolTable = SymbolTableFactory()
class SymbolTable(object):
def __init__(self, raw_table, filename):
self._table = raw_table
self._filename = filename
self._symbols = {}
def __repr__(self):
if self.__class__ == SymbolTable:
kind = ""
else:
kind = "%s " % self.__class__.__name__
if self._table.name == "global":
return "<{0}SymbolTable for module {1}>".format(kind, self._filename)
else:
return "<{0}SymbolTable for {1} in {2}>".format(kind,
self._table.name,
self._filename)
def get_type(self):
if self._table.type == _symtable.TYPE_MODULE:
return "module"
if self._table.type == _symtable.TYPE_FUNCTION:
return "function"
if self._table.type == _symtable.TYPE_CLASS:
return "class"
assert self._table.type in (1, 2, 3), \
"unexpected type: {0}".format(self._table.type)
def get_id(self):
return self._table.id
def get_name(self):
return self._table.name
def get_lineno(self):
return self._table.lineno
def is_optimized(self):
return bool(self._table.type == _symtable.TYPE_FUNCTION)
def is_nested(self):
return bool(self._table.nested)
def has_children(self):
return bool(self._table.children)
def has_exec(self):
"""Return true if the scope uses exec. Deprecated method."""
return False
def get_identifiers(self):
return self._table.symbols.keys()
def lookup(self, name):
sym = self._symbols.get(name)
if sym is None:
flags = self._table.symbols[name]
namespaces = self.__check_children(name)
sym = self._symbols[name] = Symbol(name, flags, namespaces)
return sym
def get_symbols(self):
return [self.lookup(ident) for ident in self.get_identifiers()]
def __check_children(self, name):
return [_newSymbolTable(st, self._filename)
for st in self._table.children
if st.name == name]
def get_children(self):
return [_newSymbolTable(st, self._filename)
for st in self._table.children]
class Function(SymbolTable):
# Default values for instance variables
__params = None
__locals = None
__frees = None
__globals = None
def __idents_matching(self, test_func):
return tuple([ident for ident in self.get_identifiers()
if test_func(self._table.symbols[ident])])
def get_parameters(self):
if self.__params is None:
self.__params = self.__idents_matching(lambda x:x & DEF_PARAM)
return self.__params
def get_locals(self):
if self.__locals is None:
locs = (LOCAL, CELL)
test = lambda x: ((x >> SCOPE_OFF) & SCOPE_MASK) in locs
self.__locals = self.__idents_matching(test)
return self.__locals
def get_globals(self):
if self.__globals is None:
glob = (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT)
test = lambda x:((x >> SCOPE_OFF) & SCOPE_MASK) in glob
self.__globals = self.__idents_matching(test)
return self.__globals
def get_frees(self):
if self.__frees is None:
is_free = lambda x:((x >> SCOPE_OFF) & SCOPE_MASK) == FREE
self.__frees = self.__idents_matching(is_free)
return self.__frees
class Class(SymbolTable):
__methods = None
def get_methods(self):
if self.__methods is None:
d = {}
for st in self._table.children:
d[st.name] = 1
self.__methods = tuple(d)
return self.__methods
class Symbol(object):
def __init__(self, name, flags, namespaces=None):
self.__name = name
self.__flags = flags
self.__scope = (flags >> SCOPE_OFF) & SCOPE_MASK # like PyST_GetScope()
self.__namespaces = namespaces or ()
def __repr__(self):
return "<symbol {0!r}>".format(self.__name)
def get_name(self):
return self.__name
def is_referenced(self):
return bool(self.__flags & _symtable.USE)
def METHOD_NAME(self):
return bool(self.__flags & DEF_PARAM)
def is_global(self):
return bool(self.__scope in (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT))
def is_declared_global(self):
return bool(self.__scope == GLOBAL_EXPLICIT)
def is_local(self):
return bool(self.__flags & DEF_BOUND)
def is_annotated(self):
return bool(self.__flags & DEF_ANNOT)
def is_free(self):
return bool(self.__scope == FREE)
def is_imported(self):
return bool(self.__flags & DEF_IMPORT)
def is_assigned(self):
return bool(self.__flags & DEF_LOCAL)
def is_namespace(self):
"""Returns true if name binding introduces new namespace.
If the name is used as the target of a function or class
statement, this will be true.
Note that a single name can be bound to multiple objects. If
is_namespace() is true, the name may also be bound to other
objects, like an int or list, that does not introduce a new
namespace.
"""
return bool(self.__namespaces)
def get_namespaces(self):
"""Return a list of namespaces bound to this name"""
return self.__namespaces
def get_namespace(self):
"""Returns the single namespace bound to this name.
Raises ValueError if the name is bound to multiple namespaces.
"""
if len(self.__namespaces) != 1:
raise ValueError("name is bound to multiple namespaces")
return self.__namespaces[0]
if __name__ == "__main__":
import os, sys
with open(sys.argv[0]) as f:
src = f.read()
mod = symtable(src, os.path.split(sys.argv[0])[1], "exec")
for ident in mod.get_identifiers():
info = mod.lookup(ident)
print(info, info.is_local(), info.is_namespace()) |
6,969 | margo client | import logging
import asyncio
import functools
import collections
import pymargo
import pymargo.core
from concurrent.futures import ProcessPoolExecutor
from deephyper.evaluator._evaluator import Evaluator
import mpi4py
# !To avoid initializing MPI when module is imported (MPI is optional)
mpi4py.rc.initialize = False
mpi4py.rc.finalize = True
from mpi4py import MPI # noqa: E402
logger = logging.getLogger(__name__)
def METHOD_NAME(protocol, target_address, func, *args, **kwargs):
with pymargo.core.Engine(protocol, mode=pymargo.client) as engine:
execute_function = engine.register("execute_function")
address = engine.lookup(target_address)
response = execute_function.on(address)(func, *args, **kwargs)
return response
def execute_function(handle: pymargo.core.Handle, func, *args, **kwargs):
res = func(*args, **kwargs)
handle.respond(res)
def margo_server(comm, protocol):
with pymargo.core.Engine(protocol, mode=pymargo.server) as engine:
comm.send(engine.address) # !temporary
engine.register("execute_function", execute_function)
engine.enable_remote_shutdown()
engine.wait_for_finalize()
class MochiEvaluator(Evaluator):
"""This evaluator uses the ``ProcessPoolExecutor`` as backend.
Args:
run_function (callable): functions to be executed by the ``Evaluator``.
num_workers (int, optional): Number of parallel processes used to compute the ``run_function``. Defaults to 1.
callbacks (list, optional): A list of callbacks to trigger custom actions at the creation or completion of jobs. Defaults to None.
"""
def __init__(
self,
run_function,
num_workers: int = 1,
callbacks: list = None,
run_function_kwargs: dict = None,
protocol="tcp",
):
super().__init__(run_function, num_workers, callbacks, run_function_kwargs)
self._protocol = protocol
# !use of MPI is temporary to initialise addresses
if not MPI.Is_initialized():
MPI.Init_thread()
self._comm = MPI.COMM_WORLD
self._rank = self._comm.Get_rank()
self._size = self._comm.Get_size()
self.executor = None
if self._rank == 0: # master
self.sem = asyncio.Semaphore(num_workers)
# !creating the exector once here is crutial to avoid repetitive overheads
self.executor = ProcessPoolExecutor(max_workers=num_workers)
if hasattr(run_function, "__name__") and hasattr(
run_function, "__module__"
):
logger.info(
f"Mochi Evaluator will execute {self.run_function.__name__}() from module {self.run_function.__module__}"
)
else:
logger.info(f"Mochi Evaluator will execute {self.run_function}")
# queue of worker addresses
self._worker_addresses = []
for i in range(1, self._size):
address = self._comm.recv(source=i)
self._worker_addresses.append(address)
self._qworker_addresses = collections.deque(self._worker_addresses)
else: # workers
margo_server(self._comm, self._protocol)
def __enter__(self):
if self.executor:
self.executor = self.executor.__enter__()
return self
else:
return None
def __exit__(self, type, value, traceback):
if self.executor:
self.executor.__exit__(type, value, traceback)
# shutdown pymargo servers
with pymargo.core.Engine(self._protocol, mode=pymargo.client) as engine:
for target_address in self._worker_addresses:
address = engine.lookup(target_address)
address.shutdown()
async def execute(self, job):
async with self.sem:
target_address = self._qworker_addresses.popleft()
running_job = job.create_running_job(self._storage, self._stopper)
run_function = functools.partial(
METHOD_NAME,
self._protocol,
target_address,
running_job,
**self.run_function_kwargs,
)
sol = await self.loop.run_in_executor(self.executor, run_function)
job.result = sol
self._qworker_addresses.append(target_address)
return job |
6,970 | sync signal | """
This module provides signals, which are a simple dispatching system that allows any number of interested parties
to subscribe to events ("signals").
This is similar to the Blinker library (https://pypi.org/project/blinker/), with the following changes:
- provides only a small subset of Blinker's functionality
- supports type hints
- supports async receivers.
"""
from __future__ import annotations
import asyncio
import inspect
import weakref
from collections.abc import Awaitable
from collections.abc import Callable
from typing import Any
from typing import cast
from typing import Generic
from typing import ParamSpec
from typing import TypeVar
P = ParamSpec("P")
R = TypeVar("R")
def make_weak_ref(obj: Any) -> weakref.ReferenceType:
"""
Like weakref.ref(), but using weakref.WeakMethod for bound methods.
"""
if hasattr(obj, "__self__"):
return cast(weakref.ref, weakref.WeakMethod(obj))
else:
return weakref.ref(obj)
# We're running into https://github.com/python/mypy/issues/6073 here,
# which is why the base class is a mixin and not a generic superclass.
class _SignalMixin:
def __init__(self) -> None:
self.receivers: list[weakref.ref[Callable]] = []
def connect(self, receiver: Callable) -> None:
"""
Register a signal receiver.
The signal will only hold a weak reference to the receiver function.
"""
receiver = make_weak_ref(receiver)
self.receivers.append(receiver)
def disconnect(self, receiver: Callable) -> None:
self.receivers = [r for r in self.receivers if r() != receiver]
def notify(self, *args, **kwargs):
cleanup = False
for ref in self.receivers:
r = ref()
if r is not None:
yield r(*args, **kwargs)
else:
cleanup = True
if cleanup:
self.receivers = [r for r in self.receivers if r() is not None]
class _SyncSignal(Generic[P], _SignalMixin):
def connect(self, receiver: Callable[P, None]) -> None:
assert not asyncio.iscoroutinefunction(receiver)
super().connect(receiver)
def disconnect(self, receiver: Callable[P, None]) -> None:
super().disconnect(receiver)
def send(self, *args: P.args, **kwargs: P.kwargs) -> None:
for ret in super().notify(*args, **kwargs):
assert ret is None or not inspect.isawaitable(ret)
class _AsyncSignal(Generic[P], _SignalMixin):
def connect(self, receiver: Callable[P, Awaitable[None] | None]) -> None:
super().connect(receiver)
def disconnect(self, receiver: Callable[P, Awaitable[None] | None]) -> None:
super().disconnect(receiver)
async def send(self, *args: P.args, **kwargs: P.kwargs) -> None:
await asyncio.gather(
*[
aws
for aws in super().notify(*args, **kwargs)
if aws is not None and inspect.isawaitable(aws)
]
)
# noinspection PyPep8Naming
def METHOD_NAME(receiver_spec: Callable[P, None]) -> _SyncSignal[P]:
"""
Create a synchronous signal with the given function signature for receivers.
Example:
s = SyncSignal(lambda event: None) # all receivers must accept a single "event" argument.
def receiver(event):
print(event)
s.connect(receiver)
s.send("foo") # prints foo
s.send(event="bar") # prints bar
def receiver2():
...
s.connect(receiver2) # mypy complains about receiver2 not having the right signature
s2 = SyncSignal(lambda: None) # this signal has no arguments
s2.send()
"""
return cast(_SyncSignal[P], _SyncSignal())
# noinspection PyPep8Naming
def AsyncSignal(receiver_spec: Callable[P, Awaitable[None] | None]) -> _AsyncSignal[P]:
"""
Create an signal that supports both regular and async receivers:
Example:
s = AsyncSignal(lambda event: None)
async def receiver(event):
print(event)
s.connect(receiver)
await s.send("foo") # prints foo
"""
return cast(_AsyncSignal[P], _AsyncSignal()) |
6,971 | returns 400 if start date not provided | import time
import random
from datetime import datetime, timedelta
from backend.models.postgis.task import Task, TaskStatus
from tests.backend.base import BaseTestCase
from tests.backend.helpers.test_helpers import (
create_canned_project,
generate_encoded_token,
return_canned_user,
)
from tests.backend.integration.api.users.test_resources import USER_NOT_FOUND_SUB_CODE
class TestUsersStatisticsAPI(BaseTestCase):
def setUp(self):
super().setUp()
self.test_project, self.test_user = create_canned_project()
self.user_session_token = generate_encoded_token(self.test_user.id)
self.url = f"/api/v2/users/{self.test_user.username}/statistics/"
def test_returns_401_if_no_token(self):
"""Test that the user needs to be logged in."""
# Act
response = self.client.get(self.url)
# Assert
self.assertEqual(response.status_code, 401)
def test_return_404_if_user_not_found(self):
"""Test returns 404 if user not found."""
# Act
response = self.client.get(
"/api/v2/users/doesntexist/statistics/",
headers={"Authorization": self.user_session_token},
)
# Assert
self.assertEqual(response.status_code, 404)
self.assertEqual(response.json["error"]["sub_code"], USER_NOT_FOUND_SUB_CODE)
def test_return_200_if_user_found(self):
"""Test returns 200 if user found."""
# Arrange
task = Task.get(1, self.test_project.id)
task.lock_task_for_mapping(self.test_user.id)
time.sleep(5)
task.unlock_task(self.test_user.id, TaskStatus.MAPPED)
# Act
response = self.client.get(
self.url, headers={"Authorization": self.user_session_token}
)
# Assert
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json["totalTimeSpent"], 5)
self.assertEqual(response.json["timeSpentMapping"], 5)
self.assertEqual(response.json["timeSpentValidating"], 0)
self.assertEqual(response.json["projectsMapped"], 0)
self.assertEqual(response.json["countriesContributed"]["total"], 0)
self.assertEqual(response.json["tasksMapped"], 1)
self.assertEqual(response.json["tasksValidated"], 0)
self.assertEqual(response.json["tasksInvalidated"], 0)
self.assertEqual(response.json["tasksInvalidatedByOthers"], 0)
self.assertEqual(response.json["tasksValidatedByOthers"], 0)
self.assertEqual(response.json["ContributionsByInterest"], [])
class TestUsersStatisticsAllAPI(BaseTestCase):
def setUp(self):
super().setUp()
self.test_project, self.test_user = create_canned_project()
self.user_session_token = generate_encoded_token(self.test_user.id)
self.url = "/api/v2/users/statistics/"
def generate_random_user_level(self):
return random.randint(1, 3)
def test_returns_401_if_no_token(self):
"""Test that the user needs to be logged in."""
# Act
response = self.client.get(self.url)
# Assert
self.assertEqual(response.status_code, 401)
def METHOD_NAME(self):
"""Test that the start date needs to be provided."""
# Act
response = self.client.get(
self.url,
headers={"Authorization": self.user_session_token},
)
# Assert
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json["SubCode"], "MissingDate")
def returns_400_if_invalid_date_value(self):
"""Test that the date should be in date format"""
# Act
response = self.client.get(
self.url,
headers={"Authorization": self.user_session_token},
query_string={"startDate": "invalid"},
)
# Assert
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json["SubCode"], "InvalidDateValue")
def returns_400_if_start_date_greater_than_end_date(self):
"""Test that the start date cannot be greater than the end date."""
# Act
response = self.client.get(
self.url,
headers={"Authorization": self.user_session_token},
query_string={"startDate": "2020-01-01", "endDate": "2019-01-01"},
)
# Assert
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json["SubCode"], "InvalidDateRange")
def test_returns_400_if_date_range_greater_than_3_years(self):
"""Test that the date range cannot be greater than 3 years."""
# Act
response = self.client.get(
self.url,
headers={"Authorization": self.user_session_token},
query_string={"startDate": "2017-01-01", "endDate": "2021-01-01"},
)
# Assert
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json["SubCode"], "InvalidDateRange")
def test_returns_all_users_statistics(self):
"""Test that the statistics for all users that registerd in the give priod are returned."""
# Arrange
mapping_level_dict = {1: 0, 2: 0, 3: 0}
# Create 10 users with random mapping levels
for i in range(10):
user = return_canned_user(f"user_{i}", i)
user.mapping_level = self.generate_random_user_level()
mapping_level_dict[user.mapping_level] += 1
user.date_registered = datetime.today() - timedelta(days=100)
user.create()
# Act
response = self.client.get(
self.url,
headers={"Authorization": self.user_session_token},
query_string={
"startDate": (datetime.today() - timedelta(days=100)).strftime(
"%Y-%m-%d"
),
"endDate": datetime.now().strftime("%Y-%m-%d"),
},
)
# Assert
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json["total"], 10)
self.assertEqual(response.json["beginner"], mapping_level_dict[1])
self.assertEqual(response.json["intermediate"], mapping_level_dict[2])
self.assertEqual(response.json["advanced"], mapping_level_dict[3]) |
6,972 | get easiu r2005 | """
sample Python codes for EASIUR and APSCA
"""
import csv
import deepdish
import h5py
import numpy as np
import pyproj
import os
# print(f"This module uses the following packages")
# print(f"deepdish: {deepdish.__version__}")
# print(f"h5py : {h5py.__version__}")
# print(f"numpy : {np.__version__}")
# print(f"pyproj : {pyproj.__version__}")
library_path = os.path.join('reo', 'src', 'data')
# Income Growth Adjustment factors from BenMAP
MorIncomeGrowthAdj = {
1990: 1.000000,
1991: 0.992025,
1992: 0.998182,
1993: 1.003087,
1994: 1.012843,
1995: 1.016989,
1996: 1.024362,
1997: 1.034171,
1998: 1.038842,
1999: 1.042804,
2000: 1.038542,
2001: 1.043834,
2002: 1.049992,
2003: 1.056232,
2004: 1.062572,
2005: 1.068587,
2006: 1.074681,
2007: 1.080843,
2008: 1.087068,
2009: 1.093349,
2010: 1.099688,
2011: 1.111515,
2012: 1.122895,
2013: 1.133857,
2014: 1.144425,
2015: 1.154627,
2016: 1.164482,
2017: 1.174010,
2018: 1.183233,
2019: 1.192168,
2020: 1.200834,
2021: 1.209226,
2022: 1.217341,
2023: 1.225191,
2024: 1.232790,
}
# GDP deflator from BenMAP
GDP_deflator = {
1980: 0.478513,
1981: 0.527875,
1982: 0.560395,
1983: 0.578397,
1984: 0.603368,
1985: 0.624855,
1986: 0.636469,
1987: 0.659698,
1988: 0.686992,
1989: 0.720093,
1990: 0.759001,
1991: 0.790941,
1992: 0.814750,
1993: 0.839141,
1994: 0.860627,
1995: 0.885017,
1996: 0.911150,
1997: 0.932056,
1998: 0.946574,
1999: 0.967480,
2000: 1.000000,
2001: 1.028455,
2002: 1.044715,
2003: 1.068525,
2004: 1.096980,
2005: 1.134146,
2006: 1.170732,
2007: 1.204077,
2008: 1.250308,
2009: 1.245860,
2010: 1.266295,
}
# pyproj constant
# LCP_US = pyproj.Proj("+proj=lcc +no_defs +a=6370000.0m +b=6370000.0m \
# +lon_0=97w +lat_0=40n +lat_1=33n +lat_2=45n \
# +x_0=2736000.0m +y_0=2088000.0m +towgs84=0,0,0")
# GEO_SPHEROID = pyproj.Proj("+proj=lonlat +towgs84=0,0,0 +a=6370000.0m +no_defs")
LCP_US = pyproj.Proj(
"+proj=lcc +no_defs +a=6370000.0 +b=6370000.0 "
"+lon_0=97w +lat_0=40n +lat_1=33n +lat_2=45n "
"+x_0=2736000.0 +y_0=2088000.0 +to_wgs=0,0,0 +units=m"
)
# GEO_SPHEROID = pyproj.Proj("+proj=lonlat +towgs84=0,0,0 +a=6370000.0 +no_defs")
DATUM_NAD83 = pyproj.Proj("epsg:4269 +proj=longlat +ellps=GRS80 +datum=NAD83 +no_defs +towgs84=0,0,0")
DATUM_WGS84 = pyproj.Proj("epsg:4326 +proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs +towgs84=0,0,0")
def METHOD_NAME(stack, pop_year=2005, income_year=2005, dollar_year=2010):
"""Returns EASIUR for a given `stack` height in a dict.
Args:
stack: area, p150, p300
pop_year: population year
income_year: income level (1990 to 2024)
dollar_year: dollar year (1980 to 2010)
"""
if stack not in ["area", "p150", "p300"]:
print("stack should be one of 'area', 'p150', 'p300'")
return False
file_2005 = "sc_8.6MVSL_" + stack + "_pop2005.hdf5"
#ret_map = deepdish.io.load("data/EASIUR_Data/" + file_2005)
ret_map = deepdish.io.load(os.path.join(library_path, 'EASIUR_Data', file_2005))
if pop_year != 2005:
filename = "sc_growth_rate_pop2005_pop2040_" + stack + ".hdf5"
map_rate = deepdish.io.load(os.path.join(library_path, 'EASIUR_Data', filename))
for k, v in map_rate.items():
ret_map[k] = ret_map[k] * (v ** (pop_year - 2005))
if income_year != 2005:
try:
adj = MorIncomeGrowthAdj[income_year] / MorIncomeGrowthAdj[2005]
for k, v in ret_map.items():
ret_map[k] = v * adj
except KeyError:
print("income year must be between 1990 to 2024")
return False
if dollar_year != 2010:
try:
adj = GDP_deflator[dollar_year] / GDP_deflator[2010]
for k, v in ret_map.items():
ret_map[k] = v * adj
except KeyError:
print("Dollar year must be between 1980 to 2010")
return False
return ret_map
def get_pop_inc(year, min_age=30):
"""Returns population and incidence (or mortality) rate of `min_age` or older for a given `year`."""
remainder = int(year) % 5
if remainder == 0:
return get_pop_inc_raw(year, min_age)
else:
# linear interpolation
p1, i1 = get_pop_inc_raw(year - remainder, min_age)
p2, i2 = get_pop_inc_raw(year - remainder + 5, min_age)
pop = (p1 * (5 - remainder) + p2 * remainder) / 5.0
inc = (i1 * (5 - remainder) + i2 * remainder) / 5.0
return pop, inc
def get_pop_inc_raw(year, min_age=30):
"""Returns population and incidence (or mortality) rate of `min_age` or older for a given `year`.
Raw data (CSV files) were derived from BenMAP
"""
# with open("data/EASIUR_Data/PopInc/popinc" + str(year) + ".CSV", newline="") as f:
with open(os.path.join(library_path, 'EASIUR_Data/PopInc/popinc{}.CSV'.format(str(year))), newline="") as f:
pop = np.zeros((148, 112))
inc = np.zeros((148, 112))
popinc_csv = csv.reader(f)
header = next(popinc_csv)
if "Col" in header[0]: # Col has a strange char
header[0] = "Col"
xys = set()
for row in popinc_csv:
row_d = dict(zip(header, row))
if int(row_d["Start Age"]) == min_age:
x = int(row_d["Col"]) - 1
y = int(row_d["Row"]) - 1
if (x, y) in xys:
# just to check
print("Duplicate?", x, y)
xys.add((x, y))
p = float(row_d["Population"])
i = float(row_d["Baseline"])
# print float(row_d['Point Estimate'])/i, row_d['Percent of Baseline']
pop[x, y] = p
inc[x, y] = i / p
return pop, inc
def get_avg_plume(x, y, spec, stack="area", season="Q0"):
"""Returns an Average Plume in a 148x112 array
Args:
x, y: source location
spec: species (PEC, SO2, NOX, NH3)
stack: stack height (area, p150, p300)
season: season (Q0 for Annual, Q1 for Jan-Mar, Q2 for Apr-Jun, Q3 for Jul-Sep, Q4 for Oct-Dec)
Returns:
an average plume in a 148x112 array
"""
h5f = "".join(
["data/EASIUR_Data/AveragePlumes_181x181/avgplumes_", season, "_", str(stack) + ".hdf5"]
)
with h5py.File(h5f) as f:
return f[spec][x, y]
def get_avg_plume_stack(x, y, stkht, spec, season="Q0"):
"""Returns an Average Plume in a 148x112 array"""
stkht = float(stkht)
# linear interpolation
if stkht < 150:
ap1 = get_avg_plume(x, y, spec, stack="area", season=season)
ap2 = get_avg_plume(x, y, spec, stack="p150", season=season)
return ap1 * (1 - stkht / 150.0) + ap2 * stkht / 150.0
elif stkht < 300:
ap2 = get_avg_plume(x, y, spec, stack="p150", season=season)
ap3 = get_avg_plume(x, y, spec, stack="p300", season=season)
return ap2 * (1 - (stkht - 150.0) / 150.0) + ap3 * (stkht - 150.0) / 150.0
else:
ap3 = get_avg_plume(x, y, spec, stack="p300", season=season)
return ap3
def l2g(x, y, inverse=False, datum="NAD83"):
"""Convert LCP (x, y) in CAMx 148x112 grid to Geodetic (lon, lat)"""
if datum == "NAD83":
datum = DATUM_NAD83
elif datum == "WGS84":
datum = DATUM_WGS84
if inverse:
return np.array(pyproj.transform(datum, LCP_US, x, y)) / 36000.0 + np.array(
[1, 1]
)
else:
return pyproj.transform(LCP_US, datum, (x - 1) * 36e3, (y - 1) * 36e3)
def g2l(lon, lat, datum="NAD83"):
"""Convert Geodetic (lon, lat) to LCP (x, y) in CAMx 148x112 grid"""
return l2g(lon, lat, True, datum) |
6,973 | with ret type | from typing import ClassVar, Dict, FrozenSet, List, final
from mypy.nodes import ARG_OPT, ARG_POS, ARG_STAR, ARG_STAR2, ArgKind
from mypy.typeops import get_type_vars
from mypy.types import AnyType, CallableType, FunctionLike, Overloaded
from mypy.types import Type as MypyType
from mypy.types import TypeOfAny, TypeVarType
from returns.contrib.mypy._structures.args import FuncArg
def proper_type(
case_functions: List[CallableType],
) -> FunctionLike:
"""Returns a ``CallableType`` or ``Overloaded`` based on case functions."""
if len(case_functions) == 1:
return case_functions[0]
return Overloaded(case_functions)
@final
class Intermediate(object):
"""
Allows to build a new callable from old one and different options.
For example, helps to tell which callee arguments
was already provided in caller.
"""
#: Positional arguments can be of this kind.
_positional_kinds: ClassVar[FrozenSet[ArgKind]] = frozenset((
ARG_POS,
ARG_OPT,
ARG_STAR,
))
def __init__(self, case_function: CallableType) -> None:
"""We only need a callable to work on."""
self._case_function = case_function
def with_applied_args(self, applied_args: List[FuncArg]) -> CallableType:
"""
By calling this method we construct a new callable from its usage.
This allows use to create an intermediate callable with just used args.
"""
new_pos_args = self._applied_positional_args(applied_args)
new_named_args = self._applied_named_args(applied_args)
return self.with_signature(new_pos_args + new_named_args)
def with_signature(self, new_args: List[FuncArg]) -> CallableType:
"""Smartly creates a new callable from a given arguments."""
return detach_callable(self._case_function.copy_modified(
arg_names=[arg.name for arg in new_args],
arg_types=[arg.type for arg in new_args],
arg_kinds=[arg.kind for arg in new_args],
))
def METHOD_NAME(self, ret_type: MypyType) -> CallableType:
"""Smartly creates a new callable from a given return type."""
return self._case_function.copy_modified(ret_type=ret_type)
def _applied_positional_args(
self,
applied_args: List[FuncArg],
) -> List[FuncArg]:
callee_args = list(filter(
lambda name: name.name is None, # TODO: maybe use `kind` instead?
applied_args,
))
new_function_args = []
for ind, arg in enumerate(FuncArg.from_callable(self._case_function)):
if arg.kind in self._positional_kinds and ind < len(callee_args):
new_function_args.append(arg)
return new_function_args
def _applied_named_args(
self,
applied_args: List[FuncArg],
) -> List[FuncArg]:
callee_args = list(filter(
lambda name: name.name is not None,
applied_args,
))
new_function_args = []
for arg in FuncArg.from_callable(self._case_function):
has_named_arg_def = any(
# Argument can either be used as a named argument
# or passed to `**kwargs` if it exists.
arg.name == rdc.name or arg.kind == ARG_STAR2
for rdc in callee_args
)
if callee_args and has_named_arg_def:
new_function_args.append(arg)
return new_function_args
@final
class Functions(object):
"""
Allows to create new callables based on two existing ones.
For example, one can need a diff of two callables.
"""
def __init__(
self,
original: CallableType,
intermediate: CallableType,
) -> None:
"""We need two callable to work with."""
self._original = original
self._intermediate = intermediate
def diff(self) -> CallableType:
"""Finds a diff between two functions' arguments."""
intermediate_names = [
arg.name
for arg in FuncArg.from_callable(self._intermediate)
]
new_function_args = []
for index, arg in enumerate(FuncArg.from_callable(self._original)):
should_be_copied = (
arg.kind in {ARG_STAR, ARG_STAR2} or
arg.name not in intermediate_names or
# We need to treat unnamed args differently, because python3.8
# has pos_only_args, all their names are `None`.
# This is also true for `lambda` functions where `.name`
# might be missing for some reason.
(
not arg.name and not (
index < len(intermediate_names) and
# If this is also unnamed arg, then ignoring it.
not intermediate_names[index]
)
)
)
if should_be_copied:
new_function_args.append(arg)
return Intermediate(self._original).with_signature(
new_function_args,
)
# TODO: Remove this function once `mypy` order the TypeVars
# by their appearance sequence
def detach_callable(typ: CallableType) -> CallableType: # noqa: C901, WPS210
"""
THIS IS A COPY OF `mypy.checker.detach_callable` FUNCTION.
THE ONLY PURPOSE WE'VE COPIED IS TO GUARANTEE A DETERMINISTIC FOR OUR
TYPE VARIABLES!
AS YOU CAN SEE, WE ORDER THE TYPE VARS BY THEIR APPEARANCE SEQUENCE.
"""
type_list = typ.arg_types + [typ.ret_type]
appear_map: Dict[str, List[int]] = {}
for idx, inner_type in enumerate(type_list):
typevars_available = get_type_vars(inner_type)
for var in typevars_available: # noqa: WPS110
if var.fullname not in appear_map:
appear_map[var.fullname] = []
appear_map[var.fullname].append(idx)
used_type_var_names = set()
for var_name, _ in appear_map.items():
used_type_var_names.add(var_name)
all_type_vars = get_type_vars(typ)
new_variables = []
for var in set(all_type_vars): # noqa: WPS110
if var.fullname not in used_type_var_names:
continue
new_variables.append(
TypeVarType(
name=var.name,
fullname=var.fullname,
id=var.id,
values=var.values,
upper_bound=var.upper_bound,
variance=var.variance,
default=AnyType(TypeOfAny.from_omitted_generics),
),
)
new_variables = sorted(
new_variables,
key=lambda item: appear_map[item.fullname][0], # noqa: WPS110
)
return typ.copy_modified(
variables=new_variables,
arg_types=type_list[:-1],
ret_type=type_list[-1],
) |
6,974 | close | """Operating system-related utility functions for Sphinx."""
from __future__ import annotations
import contextlib
import filecmp
import os
import re
import shutil
import sys
import unicodedata
from io import StringIO
from os import path
from typing import TYPE_CHECKING, Any
from sphinx.deprecation import _deprecation_warning
if TYPE_CHECKING:
from collections.abc import Iterator
# SEP separates path elements in the canonical file names
#
# Define SEP as a manifest constant, not so much because we expect it to change
# in the future as to avoid the suspicion that a stray "/" in the code is a
# hangover from more *nix-oriented origins.
SEP = "/"
def os_path(canonical_path: str, /) -> str:
return canonical_path.replace(SEP, path.sep)
def canon_path(native_path: str | os.PathLike[str], /) -> str:
"""Return path in OS-independent form"""
return os.fspath(native_path).replace(path.sep, SEP)
def path_stabilize(filepath: str | os.PathLike[str], /) -> str:
"Normalize path separator and unicode string"
new_path = canon_path(filepath)
return unicodedata.normalize('NFC', new_path)
def relative_uri(base: str, to: str) -> str:
"""Return a relative URL from ``base`` to ``to``."""
if to.startswith(SEP):
return to
b2 = base.split('#')[0].split(SEP)
t2 = to.split('#')[0].split(SEP)
# remove common segments (except the last segment)
for x, y in zip(b2[:-1], t2[:-1]):
if x != y:
break
b2.pop(0)
t2.pop(0)
if b2 == t2:
# Special case: relative_uri('f/index.html','f/index.html')
# returns '', not 'index.html'
return ''
if len(b2) == 1 and t2 == ['']:
# Special case: relative_uri('f/index.html','f/') should
# return './', not ''
return '.' + SEP
return ('..' + SEP) * (len(b2) - 1) + SEP.join(t2)
def ensuredir(file: str | os.PathLike[str]) -> None:
"""Ensure that a path exists."""
os.makedirs(file, exist_ok=True)
def mtimes_of_files(dirnames: list[str], suffix: str) -> Iterator[float]:
for dirname in dirnames:
for root, _dirs, files in os.walk(dirname):
for sfile in files:
if sfile.endswith(suffix):
with contextlib.suppress(OSError):
yield path.getmtime(path.join(root, sfile))
def copytimes(source: str | os.PathLike[str], dest: str | os.PathLike[str]) -> None:
"""Copy a file's modification times."""
st = os.stat(source)
if hasattr(os, 'utime'):
os.utime(dest, (st.st_atime, st.st_mtime))
def copyfile(source: str | os.PathLike[str], dest: str | os.PathLike[str]) -> None:
"""Copy a file and its modification times, if possible.
Note: ``copyfile`` skips copying if the file has not been changed"""
if not path.exists(dest) or not filecmp.cmp(source, dest):
shutil.copyfile(source, dest)
with contextlib.suppress(OSError):
# don't do full copystat because the source may be read-only
copytimes(source, dest)
no_fn_re = re.compile(r'[^a-zA-Z0-9_-]')
project_suffix_re = re.compile(' Documentation$')
def make_filename(string: str) -> str:
return no_fn_re.sub('', string) or 'sphinx'
def make_filename_from_project(project: str) -> str:
return make_filename(project_suffix_re.sub('', project)).lower()
def relpath(path: str | os.PathLike[str],
start: str | os.PathLike[str] | None = os.curdir) -> str:
"""Return a relative filepath to *path* either from the current directory or
from an optional *start* directory.
This is an alternative of ``os.path.relpath()``. This returns original path
if *path* and *start* are on different drives (for Windows platform).
"""
try:
return os.path.relpath(path, start)
except ValueError:
return str(path)
safe_relpath = relpath # for compatibility
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
abspath = path.abspath
class _chdir:
"""Remove this fall-back once support for Python 3.10 is removed."""
def __init__(self, target_dir: str, /):
self.path = target_dir
self._dirs: list[str] = []
def __enter__(self):
self._dirs.append(os.getcwd())
os.chdir(self.path)
def __exit__(self, _exc_type, _exc_value, _traceback, /):
os.chdir(self._dirs.pop())
@contextlib.contextmanager
def cd(target_dir: str) -> Iterator[None]:
if sys.version_info[:2] >= (3, 11):
_deprecation_warning(__name__, 'cd', 'contextlib.chdir', remove=(8, 0))
with _chdir(target_dir):
yield
class FileAvoidWrite:
"""File-like object that buffers output and only writes if content changed.
Use this class like when writing to a file to avoid touching the original
file if the content hasn't changed. This is useful in scenarios where file
mtime is used to invalidate caches or trigger new behavior.
When writing to this file handle, all writes are buffered until the object
is closed.
Objects can be used as context managers.
"""
def __init__(self, path: str) -> None:
self._path = path
self._io: StringIO | None = None
def write(self, data: str) -> None:
if not self._io:
self._io = StringIO()
self._io.write(data)
def METHOD_NAME(self) -> None:
"""Stop accepting writes and write file, if needed."""
if not self._io:
msg = 'FileAvoidWrite does not support empty files.'
raise Exception(msg)
buf = self.getvalue()
self._io.METHOD_NAME()
try:
with open(self._path, encoding='utf-8') as old_f:
old_content = old_f.read()
if old_content == buf:
return
except OSError:
pass
with open(self._path, 'w', encoding='utf-8') as f:
f.write(buf)
def __enter__(self) -> FileAvoidWrite:
return self
def __exit__(
self, exc_type: type[Exception], exc_value: Exception, traceback: Any,
) -> bool:
self.METHOD_NAME()
return True
def __getattr__(self, name: str) -> Any:
# Proxy to _io instance.
if not self._io:
msg = 'Must write to FileAvoidWrite before other methods can be used'
raise Exception(msg)
return getattr(self._io, name)
def rmtree(path: str) -> None:
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path) |
6,975 | benchmark decode csv | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for the `MapVectorization` optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
def _generate_csv_test_case():
"""Generates a `decode_csv()` test case."""
def csv_factory():
return dataset_ops.Dataset.from_tensor_slices(["1.0:2:a",
"2.4:5:c"]).repeat(5)
def decode_csv_fn(x):
return parsing_ops.decode_csv(
x,
record_defaults=[
constant_op.constant([], dtypes.float32),
constant_op.constant([], dtypes.int32),
constant_op.constant([], dtypes.string)
],
field_delim=":")
return decode_csv_fn, csv_factory
def _generate_parse_single_example_test_case():
"""Generates a `parse_single_example()` test case."""
def parse_example_factory():
"""Parse example factory."""
def _int64_feature(*values):
return feature_pb2.Feature(int64_list=feature_pb2.Int64List(value=values))
def _bytes_feature(*values):
return feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[v.encode("utf-8") for v in values]))
return dataset_ops.Dataset.from_tensor_slices(
constant_op.constant([
example_pb2.Example(
features=feature_pb2.Features(
feature={
"dense_int": _int64_feature(i),
"dense_str": _bytes_feature(str(i)),
"sparse_int": _int64_feature(i, i * 2, i * 4, i * 8),
"sparse_str": _bytes_feature(*["abc"] * i)
})).SerializeToString() for i in range(10)
]))
def parse_single_example_fn(x):
features = {
"dense_int": parsing_ops.FixedLenFeature((), dtypes.int64, 0),
"dense_str": parsing_ops.FixedLenFeature((), dtypes.string, ""),
"sparse_int": parsing_ops.VarLenFeature(dtypes.int64),
"sparse_str": parsing_ops.VarLenFeature(dtypes.string),
}
return parsing_ops.parse_single_example(x, features)
return parse_single_example_fn, parse_example_factory
# TODO(rachelim): Add a benchmark for more expensive transformations, such as
# vgg_preprocessing.
class MapVectorizationBenchmark(test.Benchmark):
"""Benchmarks for the `MapVectorization` optimization."""
def _run(self, x, num_iters=100, name=None):
deltas = []
with session.Session() as sess:
for _ in range(5):
# Warm up session...
sess.run(x)
for _ in range(num_iters):
start = time.time()
sess.run(x)
end = time.time()
deltas.append(end - start)
median_time = np.median(deltas)
self.report_benchmark(iters=num_iters, wall_time=median_time, name=name)
return median_time
def _compare(self, input_dataset, map_fn, batch_size, input_size, str_id):
num_elems = int(np.sum([np.prod(x) for x in input_size]))
name_template = "{}_batch_size_{}_input_element_size_{}_{}"
unoptimized_dataset = input_dataset.map(map_fn).batch(batch_size)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
unoptimized_dataset = unoptimized_dataset.with_options(options)
unoptimized_next = dataset_ops.make_one_shot_iterator(
unoptimized_dataset).get_next()
options = dataset_ops.Options()
options.experimental_optimization.map_vectorization.enabled = True
optimized_dataset = unoptimized_dataset.with_options(options)
optimized_next = dataset_ops.make_one_shot_iterator(
optimized_dataset).get_next()
unoptimized_time = self._run(
unoptimized_next,
name=name_template.format(str_id, batch_size, num_elems, "unoptimized"))
optimized_time = self._run(
optimized_next,
name=name_template.format(str_id, batch_size, num_elems, "optimized"))
print("Batch size: {}\n"
"Input element size: {}\n"
"Transformation: {}\n"
"Speedup: {}\n".format(batch_size, input_size, str_id,
(unoptimized_time / optimized_time)))
# Known cheap functions
def benchmark_identity(self):
self._benchmark_helper(lambda *args: [array_ops.identity(x) for x in args],
"identity")
def benchmark_add_const(self):
self._benchmark_helper(lambda *args: [x + 1 for x in args], "add_const")
def benchmark_return_const(self):
self._benchmark_helper(lambda *args: [constant_op.constant(2)], "ret_const")
def benchmark_select(self):
self._benchmark_helper(lambda *args: args[0], "select")
def benchmark_cast(self):
self._benchmark_helper(
lambda *args: [math_ops.cast(x, dtypes.float32) for x in args], "cast")
def benchmark_reshape(self):
self._benchmark_helper(
lambda *args: [array_ops.reshape(x, (-1, 30)) for x in args], "reshape")
def METHOD_NAME(self):
csv_fn, csv_factory = _generate_csv_test_case()
self._benchmark_helper(csv_fn, "decode_csv", lambda: [csv_factory()])
def benchmark_parse_single_example(self):
# NOTE: Since we haven't implemented a vectorizer for "SerializeSparse",
# this function is only naively vectorized.
parse_fn, parse_factory = _generate_parse_single_example_test_case()
self._benchmark_helper(parse_fn, "parse_single_example",
lambda: [parse_factory()])
def _default_dataset_factory(self):
input_sizes = [(10, 10, 3), (10, 100, 300)]
for sz in input_sizes:
yield dataset_ops.Dataset.from_tensor_slices(np.random.rand(*sz))
def _benchmark_helper(self, map_fn, str_id, base_dataset_factory=None):
if base_dataset_factory is None:
base_dataset_factory = self._default_dataset_factory
batch_size = 1000
for base_dataset in base_dataset_factory():
base_dataset = base_dataset.repeat()
input_size = [
tuple(shape.as_list())
for shape in nest.flatten(
dataset_ops.get_legacy_output_shapes(base_dataset))
]
self._compare(base_dataset, map_fn, batch_size, input_size, str_id)
if __name__ == "__main__":
test.main() |
6,976 | read lines | import _winapi
import math
import msvcrt
import os
import subprocess
import uuid
import winreg
from test import support
from test.libregrtest.utils import print_warning
# Max size of asynchronous reads
BUFSIZE = 8192
# Seconds per measurement
SAMPLING_INTERVAL = 1
# Exponential damping factor to compute exponentially weighted moving average
# on 1 minute (60 seconds)
LOAD_FACTOR_1 = 1 / math.exp(SAMPLING_INTERVAL / 60)
# Initialize the load using the arithmetic mean of the first NVALUE values
# of the Processor Queue Length
NVALUE = 5
# Windows registry subkey of HKEY_LOCAL_MACHINE where the counter names
# of typeperf are registered
COUNTER_REGISTRY_KEY = (r"SOFTWARE\Microsoft\Windows NT\CurrentVersion"
r"\Perflib\CurrentLanguage")
class WindowsLoadTracker():
"""
This class asynchronously interacts with the `typeperf` command to read
the system load on Windows. Multiprocessing and threads can't be used
here because they interfere with the test suite's cases for those
modules.
"""
def __init__(self):
self._values = []
self._load = None
self._buffer = ''
self._popen = None
self.start()
def start(self):
# Create a named pipe which allows for asynchronous IO in Windows
pipe_name = r'\\.\pipe\typeperf_output_' + str(uuid.uuid4())
open_mode = _winapi.PIPE_ACCESS_INBOUND
open_mode |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
open_mode |= _winapi.FILE_FLAG_OVERLAPPED
# This is the read end of the pipe, where we will be grabbing output
self.pipe = _winapi.CreateNamedPipe(
pipe_name, open_mode, _winapi.PIPE_WAIT,
1, BUFSIZE, BUFSIZE, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL
)
# The write end of the pipe which is passed to the created process
pipe_write_end = _winapi.CreateFile(
pipe_name, _winapi.GENERIC_WRITE, 0, _winapi.NULL,
_winapi.OPEN_EXISTING, 0, _winapi.NULL
)
# Open up the handle as a python file object so we can pass it to
# subprocess
command_stdout = msvcrt.open_osfhandle(pipe_write_end, 0)
# Connect to the read end of the pipe in overlap/async mode
overlap = _winapi.ConnectNamedPipe(self.pipe, overlapped=True)
overlap.GetOverlappedResult(True)
# Spawn off the load monitor
counter_name = self._get_counter_name()
command = ['typeperf', counter_name, '-si', str(SAMPLING_INTERVAL)]
self._popen = subprocess.Popen(' '.join(command), stdout=command_stdout, cwd=support.SAVEDCWD)
# Close our copy of the write end of the pipe
os.close(command_stdout)
def _get_counter_name(self):
# accessing the registry to get the counter localization name
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, COUNTER_REGISTRY_KEY) as perfkey:
counters = winreg.QueryValueEx(perfkey, 'Counter')[0]
# Convert [key1, value1, key2, value2, ...] list
# to {key1: value1, key2: value2, ...} dict
counters = iter(counters)
counters_dict = dict(zip(counters, counters))
# System counter has key '2' and Processor Queue Length has key '44'
system = counters_dict['2']
process_queue_length = counters_dict['44']
return f'"\\{system}\\{process_queue_length}"'
def close(self, kill=True):
if self._popen is None:
return
self._load = None
if kill:
self._popen.kill()
self._popen.wait()
self._popen = None
def __del__(self):
self.close()
def _parse_line(self, line):
# typeperf outputs in a CSV format like this:
# "07/19/2018 01:32:26.605","3.000000"
# (date, process queue length)
tokens = line.split(',')
if len(tokens) != 2:
raise ValueError
value = tokens[1]
if not value.startswith('"') or not value.endswith('"'):
raise ValueError
value = value[1:-1]
return float(value)
def METHOD_NAME(self):
overlapped, _ = _winapi.ReadFile(self.pipe, BUFSIZE, True)
bytes_read, res = overlapped.GetOverlappedResult(False)
if res != 0:
return ()
output = overlapped.getbuffer()
output = output.decode('oem', 'replace')
output = self._buffer + output
lines = output.splitlines(True)
# bpo-36670: typeperf only writes a newline *before* writing a value,
# not after. Sometimes, the written line in incomplete (ex: only
# timestamp, without the process queue length). Only pass the last line
# to the parser if it's a valid value, otherwise store it in
# self._buffer.
try:
self._parse_line(lines[-1])
except ValueError:
self._buffer = lines.pop(-1)
else:
self._buffer = ''
return lines
def getloadavg(self):
if self._popen is None:
return None
returncode = self._popen.poll()
if returncode is not None:
self.close(kill=False)
return None
try:
lines = self.METHOD_NAME()
except BrokenPipeError:
self.close()
return None
for line in lines:
line = line.rstrip()
# Ignore the initial header:
# "(PDH-CSV 4.0)","\\\\WIN\\System\\Processor Queue Length"
if 'PDH-CSV' in line:
continue
# Ignore blank lines
if not line:
continue
try:
processor_queue_length = self._parse_line(line)
except ValueError:
print_warning("Failed to parse typeperf output: %a" % line)
continue
# We use an exponentially weighted moving average, imitating the
# load calculation on Unix systems.
# https://en.wikipedia.org/wiki/Load_(computing)#Unix-style_load_calculation
# https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
if self._load is not None:
self._load = (self._load * LOAD_FACTOR_1
+ processor_queue_length * (1.0 - LOAD_FACTOR_1))
elif len(self._values) < NVALUE:
self._values.append(processor_queue_length)
else:
self._load = sum(self._values) / len(self._values)
return self._load |
6,977 | generic visit | import dataclasses
import functools
from typing import Callable, Iterable
from magma.circuit import CircuitKind, CircuitType, DefineCircuitKind
from magma.clock import ClockTypes
from magma.common import is_empty
from magma.conversions import as_bits
from magma.passes.clock import get_undriven_clocks_in_value
from magma.value_utils import ValueVisitor
from magma.t import Type
@dataclasses.dataclass(frozen=True)
class _StubbifierResult:
modified: bool
descend: bool
Stubbifier = Callable[[Type], _StubbifierResult]
def zero_stubbifier(value: Type) -> _StubbifierResult:
if isinstance(value, ClockTypes):
return _StubbifierResult(modified=False, descend=False)
contains_clock = any(
not is_empty(get_undriven_clocks_in_value(value, T))
for T in ClockTypes
)
if contains_clock:
return _StubbifierResult(modified=False, descend=True)
if value.is_output():
value.unused()
return _StubbifierResult(modified=True, descend=False)
if value.is_input():
bits = as_bits(value)
bits @= 0
return _StubbifierResult(modified=True, descend=False)
return _StubbifierResult(modified=False, descend=True)
class NoOverrideDrivenStubbifierFactory:
def __init__(self, stubbifier: Stubbifier):
self._stubbifier = stubbifier
def __call__(self, value: Type) -> bool:
if value.value() is not None:
return _StubbifierResult(modified=False, descend=False)
return self._stubbifier(value)
no_override_driven_zero_stubbifier = (
NoOverrideDrivenStubbifierFactory(zero_stubbifier)
)
class _StubifyVisitor(ValueVisitor):
def __init__(self, stubbifier: Stubbifier):
self._stubbifier = stubbifier
self._modified = False
@property
def modified(self):
return self._modified
def METHOD_NAME(self, value: Type):
result = self._stubbifier(value)
if result.modified:
self._modified = True
if not result.descend:
return
super().METHOD_NAME(value)
def _stubify_impl(
ports: Iterable[Type],
stubbifier: Stubbifier = no_override_driven_zero_stubbifier
) -> bool:
modified = False
for port in ports:
visitor = _StubifyVisitor(stubbifier)
visitor.visit(port)
modified = modified or visitor.modified
return modified
def _stub_open(cls):
raise NotImplementedError("Can not call open() on a circuit stub")
@functools.singledispatch
def stubify(
interface,
stubbifier: Stubbifier = no_override_driven_zero_stubbifier
):
_ = _stubify_impl(interface.ports.values(), stubbifier)
@stubify.register
def _(
obj: CircuitKind,
stubbifier: Stubbifier = no_override_driven_zero_stubbifier
):
ckt = obj
with ckt.open():
modified = _stubify_impl(ckt.interface.ports.values(), stubbifier)
if modified:
# NOTE(rsetaluri): This is a hack because we don't have good handling of
# isdefinition when the circuit is modified. We should be doing that
# more principled-ly. See https://github.com/phanrahan/magma/issues/929.
ckt._is_definition = True
# Set ckt.open to be a function which raises a NotImplementedError. Note
# that we *can't* do this in the class itself, since we need to call open()
# to tie the outputs first (in stubify()). Afterwards, we can override the
# method.
setattr(ckt, "open", classmethod(_stub_open))
def circuit_stub(
cls=None,
*,
stubbifier: Stubbifier = no_override_driven_zero_stubbifier
):
"""
Inspired by https://pybit.es/decorator-optional-argument.html.
Stub-ifys a circuit, based on optional parameter @stubbifier. Default
behavior is driving all outputs to zero. All inputs will be marked as unused
automatically as well.
"""
if cls is None:
return functools.partial(circuit_stub, stubbifier=stubbifier)
stubify(cls, stubbifier)
return cls
class _CircuitStubMeta(DefineCircuitKind):
def __new__(metacls, name, bases, dct):
cls = super().__new__(metacls, name, bases, dct)
# Only call stubify() on user circuits (i.e. do not call on CircuitStub
# base class).
if not dct.get("_circuit_base_", False):
stubify(cls, stubbifier=no_override_driven_zero_stubbifier)
return cls
class CircuitStub(CircuitType, metaclass=_CircuitStubMeta):
_circuit_base_ = True |
6,978 | export | # coding=utf-8
# @license
# Copyright 2019-2020 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrappers for representing a Neuroglancer coordinate space."""
import collections
import numpy as np
__all__ = []
def METHOD_NAME(obj):
__all__.append(obj.__name__)
return obj
si_prefixes = {
'Y': 24,
'Z': 21,
'E': 18,
'P': 15,
'T': 12,
'G': 9,
'M': 6,
'k': 3,
'h': 2,
'': 0,
'c': -2,
'm': -3,
'u': -6,
'µ': -6,
'n': -9,
'p': -12,
'f': -15,
'a': -18,
'z': -21,
'y': -24,
}
si_units = ['m', 's', 'rad/s', 'Hz']
si_units_with_prefixes = {
'%s%s' % (prefix, unit): (unit, exponent)
for (prefix, exponent) in si_prefixes.items() for unit in si_units
}
si_units_with_prefixes[''] = ('', 0)
def parse_unit(scale, unit):
unit, exponent = si_units_with_prefixes[unit]
if exponent >= 0:
return (scale * 10**exponent, unit)
else:
return (scale / 10**(-exponent), unit)
@METHOD_NAME
class CoordinateArray:
__slots__ = ('_data')
def __init__(self, json_data=None, labels=None, coordinates=None, mappings=None):
if mappings is None:
mappings = dict()
else:
mappings = dict(mappings)
if labels is not None:
if coordinates is None:
coordinates = range(len(labels))
for coordinate, label in zip(coordinates, labels):
mappings[coordinate] = label
if json_data is not None:
if not isinstance(json_data,
dict) or 'coordinates' not in json_data or 'labels' not in json_data:
raise ValueError('Expected object with "coordinates" and "labels" properties')
coordinates = json_data['coordinates']
labels = json_data['labels']
for coordinate, label in zip(coordinates, labels):
mappings[coordinate] = label
self._data = mappings
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __repr__(self):
return repr(self._data)
def __str__(self):
return str(self._data)
def __eq__(self, other):
if not isinstance(other, CoordinateArray): return False
return self._data == other._data
def __getitem__(self, k):
if isinstance(k, str):
for other_k, other_v in self._data.items():
if other_k == k: return other_v
raise KeyError('label not found: %r' % (k, ))
return self._data[k]
def to_json(self):
return dict(coordinates=list(self._data.keys()), labels=list(self._data.values()))
@METHOD_NAME
class DimensionScale(collections.namedtuple('DimensionScale',
['scale', 'unit', 'coordinate_array'])):
__slots__ = ()
def __new__(cls, scale=1, unit='', coordinate_array=None):
return super(DimensionScale, cls).__new__(cls, scale, unit, coordinate_array)
@staticmethod
def from_json(json):
if isinstance(json, DimensionScale):
return json
if isinstance(json, list):
if len(json) != 2:
raise ValueError('Expected [scale, unit], but received: %r' % (json, ))
scale = json[0]
unit = json[1]
coordinate_array = None
else:
scale = None
unit = None
coordinate_array = CoordinateArray(json_data=json)
return DimensionScale(scale=scale, unit=unit, coordinate_array=coordinate_array)
@METHOD_NAME
class CoordinateSpace(object):
__slots__ = ('names', 'scales', 'units', 'coordinate_arrays')
def __init__(self, json=None, names=None, scales=None, units=None, coordinate_arrays=None):
if json is None:
if names is not None:
self.names = tuple(names)
scales = np.array(scales, dtype=np.float64)
if isinstance(units, str):
units = tuple(units for _ in names)
scales_and_units = tuple(
parse_unit(scale, unit) for scale, unit in zip(scales, units))
scales = np.array([s[0] for s in scales_and_units], dtype=np.float64)
units = tuple(s[1] for s in scales_and_units)
if coordinate_arrays is None:
coordinate_arrays = tuple(None for x in units)
else:
coordinate_arrays = tuple(coordinate_arrays)
self.units = units
self.scales = scales
self.coordinate_arrays = coordinate_arrays
else:
self.names = ()
self.scales = np.zeros(0, dtype=np.float64)
self.units = ()
self.coordinate_arrays = ()
else:
if not isinstance(json, dict): raise TypeError
self.names = tuple(json.keys())
values = tuple(DimensionScale.from_json(v) for v in json.values())
self.scales = np.array([v.scale for v in values], dtype=np.float64)
self.units = tuple(v.unit for v in values)
self.coordinate_arrays = tuple(v.coordinate_array for v in values)
self.scales.setflags(write=False)
@property
def rank(self):
return len(self.names)
def __getitem__(self, i):
if isinstance(i, str):
idx = self.names.index(i)
return DimensionScale(scale=self.scales[idx],
unit=self.units[idx],
coordinate_array=self.coordinate_arrays[idx])
if isinstance(i, slice):
idxs = range(self.rank)[i]
return [
DimensionScale(scale=self.scales[j],
unit=self.units[j],
coordinate_array=self.coordinate_arrays[j]) for j in idxs
]
return DimensionScale(scale=self.scales[i],
unit=self.units[i],
coordinate_array=self.coordinate_arrays[i])
def __repr__(self):
return 'CoordinateSpace(%r)' % (self.to_json(), )
def to_json(self):
d = collections.OrderedDict()
for name, scale, unit, coordinate_array in zip(self.names, self.scales, self.units,
self.coordinate_arrays):
if coordinate_array is None:
d[name] = [scale, unit]
else:
d[name] = coordinate_array.to_json()
return d |
6,979 | test zero | """
:codeauthor: Rupesh Tare <rupesht@saltstack.com>
Test cases for salt.modules.lvs
"""
import pytest
import salt.modules.lvs as lvs
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {lvs: {}}
def test_add_service():
"""
Test for Add a virtual service.
"""
with patch.object(lvs, "__detect_os", return_value="C"):
with patch.object(lvs, "_build_cmd", return_value="B"):
with patch.dict(
lvs.__salt__,
{
"cmd.run_all": MagicMock(
return_value={"retcode": "ret", "stderr": "stderr"}
)
},
):
assert lvs.add_service() == "stderr"
def test_edit_service():
"""
Test for Edit the virtual service.
"""
with patch.object(lvs, "__detect_os", return_value="C"):
with patch.object(lvs, "_build_cmd", return_value="B"):
with patch.dict(
lvs.__salt__,
{
"cmd.run_all": MagicMock(
return_value={"retcode": "ret", "stderr": "stderr"}
)
},
):
assert lvs.edit_service() == "stderr"
def test_delete_service():
"""
Test for Delete the virtual service.
"""
with patch.object(lvs, "__detect_os", return_value="C"):
with patch.object(lvs, "_build_cmd", return_value="B"):
with patch.dict(
lvs.__salt__,
{
"cmd.run_all": MagicMock(
return_value={"retcode": "ret", "stderr": "stderr"}
)
},
):
assert lvs.delete_service() == "stderr"
def test_add_server():
"""
Test for Add a real server to a virtual service.
"""
with patch.object(lvs, "__detect_os", return_value="C"):
with patch.object(lvs, "_build_cmd", return_value="B"):
with patch.dict(
lvs.__salt__,
{
"cmd.run_all": MagicMock(
return_value={"retcode": "ret", "stderr": "stderr"}
)
},
):
assert lvs.add_server() == "stderr"
def test_edit_server():
"""
Test for Edit a real server to a virtual service.
"""
with patch.object(lvs, "__detect_os", return_value="C"):
with patch.object(lvs, "_build_cmd", return_value="B"):
with patch.dict(
lvs.__salt__,
{
"cmd.run_all": MagicMock(
return_value={"retcode": "ret", "stderr": "stderr"}
)
},
):
assert lvs.edit_server() == "stderr"
def test_delete_server():
"""
Test for Delete the realserver from the virtual service.
"""
with patch.object(lvs, "__detect_os", return_value="C"):
with patch.object(lvs, "_build_cmd", return_value="B"):
with patch.dict(
lvs.__salt__,
{
"cmd.run_all": MagicMock(
return_value={"retcode": "ret", "stderr": "stderr"}
)
},
):
assert lvs.delete_server() == "stderr"
def test_clear():
"""
Test for Clear the virtual server table
"""
with patch.object(lvs, "__detect_os", return_value="C"):
with patch.dict(
lvs.__salt__,
{
"cmd.run_all": MagicMock(
return_value={"retcode": "ret", "stderr": "stderr"}
)
},
):
assert lvs.clear() == "stderr"
def test_get_rules():
"""
Test for Get the virtual server rules
"""
with patch.object(lvs, "__detect_os", return_value="C"):
with patch.dict(lvs.__salt__, {"cmd.run": MagicMock(return_value="A")}):
assert lvs.get_rules() == "A"
def test_list_():
"""
Test for List the virtual server table
"""
with patch.object(lvs, "__detect_os", return_value="C"):
with patch.object(lvs, "_build_cmd", return_value="B"):
with patch.dict(
lvs.__salt__,
{
"cmd.run_all": MagicMock(
return_value={"retcode": "ret", "stderr": "stderr"}
)
},
):
assert lvs.list_("p", "s") == "stderr"
def METHOD_NAME():
"""
Test for Zero the packet, byte and rate counters in a
service or all services.
"""
with patch.object(lvs, "__detect_os", return_value="C"):
with patch.object(lvs, "_build_cmd", return_value="B"):
with patch.dict(
lvs.__salt__,
{
"cmd.run_all": MagicMock(
return_value={"retcode": "ret", "stderr": "stderr"}
)
},
):
assert lvs.zero("p", "s") == "stderr"
def test_check_service():
"""
Test for Check the virtual service exists.
"""
with patch.object(lvs, "__detect_os", return_value="C"):
with patch.object(lvs, "_build_cmd", return_value="B"):
with patch.dict(
lvs.__salt__,
{
"cmd.run_all": MagicMock(
return_value={"retcode": "ret", "stderr": "stderr"}
)
},
):
with patch.object(lvs, "get_rules", return_value="C"):
assert lvs.check_service("p", "s") == "Error: service not exists"
def test_check_server():
"""
Test for Check the real server exists in the specified service.
"""
with patch.object(lvs, "__detect_os", return_value="C"):
with patch.object(lvs, "_build_cmd", return_value="B"):
with patch.dict(
lvs.__salt__,
{
"cmd.run_all": MagicMock(
return_value={"retcode": "ret", "stderr": "stderr"}
)
},
):
with patch.object(lvs, "get_rules", return_value="C"):
assert lvs.check_server("p", "s") == "Error: server not exists" |
6,980 | generate | from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.build import check_min_cppstd
from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
from conan.tools.files import copy, get, rmdir
from conan.tools.microsoft import is_msvc
from conan.tools.scm import Version
import os
required_conan_version = ">=1.51.1"
class OatppLibresslConan(ConanFile):
name = "oatpp-libressl"
license = "Apache-2.0"
homepage = "https://github.com/oatpp/oatpp-libressl"
url = "https://github.com/conan-io/conan-center-index"
description = "oat++ libressl library"
topics = ("oat++", "oatpp", "libressl")
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
try:
del self.options.fPIC
except Exception:
pass
def layout(self):
cmake_layout(self, src_folder="src")
def requirements(self):
self.requires(f"oatpp/{self.version}")
self.requires("libressl/3.5.3")
def validate(self):
if self.info.settings.compiler.get_safe("cppstd"):
check_min_cppstd(self, 11)
if is_msvc(self) and self.info.options.shared:
raise ConanInvalidConfiguration(f"{self.ref} can not be built as shared library with msvc")
if self.info.settings.compiler == "gcc" and Version(self.info.settings.compiler.version) < "5":
raise ConanInvalidConfiguration(f"{self.ref} requires GCC >=5")
def source(self):
get(self, **self.conan_data["sources"][self.version],
destination=self.source_folder, strip_root=True)
def METHOD_NAME(self):
tc = CMakeToolchain(self)
tc.variables["OATPP_BUILD_TESTS"] = False
tc.variables["OATPP_MODULES_LOCATION"] = "INSTALLED"
# Honor BUILD_SHARED_LIBS from conan_toolchain (see https://github.com/conan-io/conan/issues/11840)
tc.cache_variables["CMAKE_POLICY_DEFAULT_CMP0077"] = "NEW"
tc.METHOD_NAME()
deps = CMakeDeps(self)
deps.METHOD_NAME()
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
copy(self, "LICENSE", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
cmake = CMake(self)
cmake.install()
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "oatpp-libressl")
self.cpp_info.set_property("cmake_target_name", "oatpp::oatpp-libressl")
# TODO: back to global scope in conan v2 once legacy generators removed
self.cpp_info.components["_oatpp-libressl"].includedirs = [
os.path.join("include", f"oatpp-{self.version}", "oatpp-libressl")
]
self.cpp_info.components["_oatpp-libressl"].libdirs = [os.path.join("lib", f"oatpp-{self.version}")]
if self.settings.os == "Windows" and self.options.shared:
self.cpp_info.components["_oatpp-libressl"].bindirs = [os.path.join("bin", f"oatpp-{self.version}")]
else:
self.cpp_info.components["_oatpp-libressl"].bindirs = []
self.cpp_info.components["_oatpp-libressl"].libs = ["oatpp-libressl"]
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.components["_oatpp-libressl"].system_libs = ["pthread"]
# TODO: to remove in conan v2 once legacy generators removed
self.cpp_info.filenames["cmake_find_package"] = "oatpp-libressl"
self.cpp_info.filenames["cmake_find_package_multi"] = "oatpp-libressl"
self.cpp_info.names["cmake_find_package"] = "oatpp"
self.cpp_info.names["cmake_find_package_multi"] = "oatpp"
self.cpp_info.components["_oatpp-libressl"].names["cmake_find_package"] = "oatpp-libressl"
self.cpp_info.components["_oatpp-libressl"].names["cmake_find_package_multi"] = "oatpp-libressl"
self.cpp_info.components["_oatpp-libressl"].set_property("cmake_target_name", "oatpp::oatpp-libressl")
self.cpp_info.components["_oatpp-libressl"].requires = ["oatpp::oatpp", "libressl::libressl"] |
6,981 | teardown module | #!/usr/bin/env python
# SPDX-License-Identifier: ISC
#
# test_bgp_vpnv4_ebgp.py
# Part of NetDEF Topology Tests
#
# Copyright (c) 2022 by 6WIND
#
"""
test_bgp_vpnv4_ebgp.py: Test the FRR BGP daemon with EBGP direct connection
"""
import os
import sys
import json
from functools import partial
import pytest
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
# Required to instantiate the topology builder class.
pytestmark = [pytest.mark.bgpd]
def build_topo(tgen):
"Build function"
# Create 2 routers.
tgen.add_router("r1")
tgen.add_router("r2")
tgen.add_router("r3")
switch = tgen.add_switch("s1")
switch.add_link(tgen.gears["r1"])
switch.add_link(tgen.gears["r2"])
switch.add_link(tgen.gears["r3"])
switch = tgen.add_switch("s2")
switch.add_link(tgen.gears["r1"])
switch = tgen.add_switch("s3")
switch.add_link(tgen.gears["r2"])
switch = tgen.add_switch("s4")
switch.add_link(tgen.gears["r3"])
def _populate_iface():
tgen = get_topogen()
cmds_list = [
"ip link add vrf1 type vrf table 10",
"echo 100000 > /proc/sys/net/mpls/platform_labels",
"ip link set dev vrf1 up",
"ip link set dev {0}-eth1 master vrf1",
"echo 1 > /proc/sys/net/mpls/conf/{0}-eth0/input",
]
for cmd in cmds_list:
input = cmd.format("r1")
logger.info("input: " + cmd)
output = tgen.net["r1"].cmd(cmd.format("r1"))
logger.info("output: " + output)
for cmd in cmds_list:
input = cmd.format("r2")
logger.info("input: " + cmd)
output = tgen.net["r2"].cmd(cmd.format("r2"))
logger.info("output: " + output)
for cmd in cmds_list:
input = cmd.format("r3")
logger.info("input: " + cmd)
output = tgen.net["r3"].cmd(cmd.format("r3"))
logger.info("output: " + output)
def setup_module(mod):
"Sets up the pytest environment"
tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
_populate_iface()
for rname, router in router_list.items():
router.load_config(
TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
)
# Initialize all routers.
tgen.start_router()
def METHOD_NAME(_mod):
"Teardown the pytest environment"
tgen = get_topogen()
tgen.stop_topology()
def test_protocols_convergence():
"""
Assert that all protocols have converged
statuses as they depend on it.
"""
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
router = tgen.gears["r1"]
logger.info("Dump some context for r1")
router.vtysh_cmd("show bgp ipv4 vpn")
router.vtysh_cmd("show bgp summary")
router.vtysh_cmd("show bgp vrf vrf1 ipv4")
router.vtysh_cmd("show running-config")
router = tgen.gears["r2"]
logger.info("Dump some context for r2")
router.vtysh_cmd("show bgp ipv4 vpn")
router.vtysh_cmd("show bgp summary")
router.vtysh_cmd("show bgp vrf vrf1 ipv4")
router.vtysh_cmd("show running-config")
# Check IPv4 routing tables on r1
logger.info("Checking IPv4 routes for convergence on r1")
router = tgen.gears["r1"]
json_file = "{}/{}/ipv4_routes.json".format(CWD, router.name)
if not os.path.isfile(json_file):
logger.info("skipping file {}".format(json_file))
assert 0, "ipv4_routes.json file not found"
return
expected = json.loads(open(json_file).read())
test_func = partial(
topotest.router_json_cmp,
router,
"show ip route vrf vrf1 json",
expected,
)
_, result = topotest.run_and_expect(test_func, None, count=40, wait=2)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
# Check BGP IPv4 routing tables on r1
logger.info("Checking BGP IPv4 routes for convergence on r1")
router = tgen.gears["r1"]
json_file = "{}/{}/bgp_ipv4_routes.json".format(CWD, router.name)
if not os.path.isfile(json_file):
assert 0, "bgp_ipv4_routes.json file not found"
expected = json.loads(open(json_file).read())
test_func = partial(
topotest.router_json_cmp,
router,
"show bgp vrf vrf1 ipv4 json",
expected,
)
_, result = topotest.run_and_expect(test_func, None, count=40, wait=2)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
# Check BGP IPv4 imported entry is not detected as local
# "selectionReason": "Locally configured route"
donna = tgen.gears["r1"].vtysh_cmd(
"show bgp vrf vrf1 ipv4 172.31.0.10/32 json", isjson=True
)
routes = donna["paths"]
selectionReasonFound = False
for route in routes:
if "bestpath" not in route.keys():
continue
if "selectionReason" not in route["bestpath"].keys():
continue
if "Locally configured route" == route["bestpath"]["selectionReason"]:
assert 0, "imported prefix has wrong reason detected"
selectionReasonFound = True
if not selectionReasonFound:
assertmsg = '"{}" imported prefix has wrong reason detected'.format(router.name)
assert False, assertmsg
# Check BGP IPv4 routing tables on r2 not installed
logger.info("Checking BGP IPv4 routes for convergence on r2")
router = tgen.gears["r2"]
json_file = "{}/{}/bgp_ipv4_routes.json".format(CWD, router.name)
if not os.path.isfile(json_file):
assert 0, "bgp_ipv4_routes.json file not found"
expected = json.loads(open(json_file).read())
test_func = partial(
topotest.router_json_cmp,
router,
"show bgp vrf vrf1 ipv4 json",
expected,
)
_, result = topotest.run_and_expect(test_func, None, count=40, wait=2)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
if not tgen.is_memleak_enabled():
pytest.skip("Memory leak test/report is disabled")
tgen.report_memory_leaks()
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args)) |
6,982 | config options | from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.build import check_min_cppstd
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
from conan.tools.files import apply_conandata_patches, collect_libs, copy, export_conandata_patches, get, rmdir, save
from conan.tools.microsoft import is_msvc, is_msvc_static_runtime
from conan.tools.scm import Version
import os
import textwrap
required_conan_version = ">=1.53.0"
class YamlCppConan(ConanFile):
name = "yaml-cpp"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/jbeder/yaml-cpp"
topics = ("yaml", "yaml-parser", "serialization", "data-serialization")
description = "A YAML parser and emitter in C++"
license = "MIT"
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
def export_sources(self):
export_conandata_patches(self)
def METHOD_NAME(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
def layout(self):
cmake_layout(self, src_folder="src")
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
check_min_cppstd(self, "11")
if self.options.shared and is_msvc(self) and is_msvc_static_runtime(self):
raise ConanInvalidConfiguration(
f"Visual Studio build for {self.name} shared library with MT runtime is not supported"
)
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.variables["YAML_CPP_BUILD_TESTS"] = False
tc.variables["YAML_CPP_BUILD_CONTRIB"] = True
tc.variables["YAML_CPP_BUILD_TOOLS"] = False
tc.variables["YAML_CPP_INSTALL"] = True
tc.variables["YAML_BUILD_SHARED_LIBS"] = self.options.shared
if is_msvc(self):
tc.variables["YAML_MSVC_SHARED_RT"] = not is_msvc_static_runtime(self)
tc.preprocessor_definitions["_NOEXCEPT"] = "noexcept"
tc.generate()
def build(self):
apply_conandata_patches(self)
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
copy(self, "LICENSE", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
cmake = CMake(self)
cmake.install()
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
rmdir(self, os.path.join(self.package_folder, "CMake"))
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
rmdir(self, os.path.join(self.package_folder, "share"))
# TODO: to remove in conan v2 once cmake_find_package_* generators removed
self._create_cmake_module_alias_targets(
os.path.join(self.package_folder, self._module_file_rel_path),
{"yaml-cpp": "yaml-cpp::yaml-cpp"}
)
def _create_cmake_module_alias_targets(self, module_file, targets):
content = ""
for alias, aliased in targets.items():
content += textwrap.dedent(f"""\
if(TARGET {aliased} AND NOT TARGET {alias})
add_library({alias} INTERFACE IMPORTED)
set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased})
endif()
""")
save(self, module_file, content)
@property
def _module_file_rel_path(self):
return os.path.join("lib", "cmake", f"conan-official-{self.name}-targets.cmake")
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "yaml-cpp")
self.cpp_info.set_property("cmake_target_name", "yaml-cpp")
self.cpp_info.set_property("pkg_config_name", "yaml-cpp")
self.cpp_info.libs = collect_libs(self)
if self.settings.os in ("Linux", "FreeBSD"):
self.cpp_info.system_libs.append("m")
if is_msvc(self):
self.cpp_info.defines.append("_NOEXCEPT=noexcept")
if Version(self.version) < "0.8.0" and self.options.shared:
self.cpp_info.defines.append("YAML_CPP_DLL")
if Version(self.version) >= "0.8.0" and not self.options.shared:
self.cpp_info.defines.append("YAML_CPP_STATIC_DEFINE")
# TODO: to remove in conan v2 once cmake_find_package_* generators removed
self.cpp_info.build_modules["cmake_find_package"] = [self._module_file_rel_path]
self.cpp_info.build_modules["cmake_find_package_multi"] = [self._module_file_rel_path] |
6,983 | wires for tile | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import os.path
import csv
import pickle
import re
from collections import namedtuple
from prjxray.util import OpenSafeFile
def read_root_csv(root_dir):
""" Reads root.csv from raw db directory.
This should only be used during database generation.
"""
tiles = {}
nodes = []
with OpenSafeFile(os.path.join(root_dir, 'root.csv')) as f:
for d in csv.DictReader(f):
if d['filetype'] == 'tile':
if d['subtype'] not in tiles:
tiles[d['subtype']] = []
tiles[d['subtype']].append(
os.path.join(root_dir, d['filename']))
elif d['filetype'] == 'node':
nodes.append(os.path.join(root_dir, d['filename']))
return tiles, nodes
def verify_nodes(raw_nodes, nodes, error_nodes):
""" Compares raw_nodes with generated_nodes and adds errors to error_nodes.
Args:
raw_nodes - Iterable of (node name, iterable of wires in node).
nodes - Iterable of iterable of wires in nodes.
error_nodes - List to be appended to when an error occurs. Elements will
be 3 tuple of raw node name, raw node, and generated node
that did not match.
"""
wire_nodes = {}
for node in nodes:
node_set = set(node)
for wire in node:
wire_nodes[wire] = node_set
for node, raw_node_wires in raw_nodes:
raw_node_set = set(raw_node_wires)
for wire in sorted(raw_node_set):
if wire not in wire_nodes:
if set((wire, )) != raw_node_set:
error_nodes.append((node, tuple(raw_node_set), (wire, )))
elif wire_nodes[wire] != raw_node_set:
error_nodes.append(
(node, tuple(raw_node_set), tuple(wire_nodes[wire])))
def check_errors(flat_error_nodes, ignored_wires):
""" Check if error_nodes has errors that are not covered in ignored_wires.
Args:
flat_error_nodes - List of error_nodes generated from verify_nodes.
ignored_wires - List of wires that should be ignored if they were generated.
"""
error_nodes = {}
for node, raw_node, generated_nodes in flat_error_nodes:
if node not in error_nodes:
error_nodes[node] = {
'raw_node': set(raw_node),
'generated_nodes': set(),
}
# Make sure all raw nodes are the same.
assert error_nodes[node]['raw_node'] == set(raw_node)
error_nodes[node]['generated_nodes'].add(
tuple(sorted(generated_nodes)))
for node, error in error_nodes.items():
combined_generated_nodes = set()
for generated_node in error['generated_nodes']:
combined_generated_nodes |= set(generated_node)
# Make sure there are not extra wires in nodes.
assert error['raw_node'] == combined_generated_nodes, (node, error)
good_node = max(error['generated_nodes'], key=lambda x: len(x))
bad_nodes = error['generated_nodes'] - set((good_node, ))
# Max sure only single wires are stranded
assert max(len(generated_node) for generated_node in bad_nodes) == 1
for generate_node in bad_nodes:
for wire in generate_node:
if wire not in ignored_wires:
return False
return True
class NodeLookup(object):
def __init__(self):
self.nodes = {}
def load_from_nodes(self, nodes):
self.nodes = nodes
def load_from_root_csv(self, nodes):
import pyjson5 as json5
import progressbar
for node in progressbar.progressbar(nodes):
with OpenSafeFile(node) as f:
node_wires = json5.load(f)
assert node_wires['node'] not in self.nodes
self.nodes[node_wires['node']] = node_wires['wires']
def load_from_file(self, fname):
with OpenSafeFile(fname, 'rb') as f:
self.nodes = pickle.load(f)
def save_to_file(self, fname):
with OpenSafeFile(fname, 'wb') as f:
pickle.dump(self.nodes, f)
def site_pin_node_to_wires(self, tile, node):
if node is None:
return
node_wires = self.nodes[node]
for wire in node_wires:
if wire['wire'].startswith(tile + '/'):
yield wire['wire'][len(tile) + 1:]
def METHOD_NAME(self, tile):
for node in self.nodes.values():
for wire in node:
if wire['wire'].startswith(tile + '/'):
yield wire['wire'][len(tile) + 1:]
def compare_prototype_site(proto_a, proto_b):
""" Compare two proto site type.
Will assert if prototypes are not equivalent.
"""
assert proto_a == proto_b, repr((proto_a, proto_b))
# All site names appear to follow the pattern <type>_X<abs coord>Y<abs coord>.
# Generally speaking, only the tile relatively coordinates are required to
# assemble arch defs, so we re-origin the coordinates to be relative to the tile
# (e.g. start at X0Y0) and discard the prefix from the name.
SITE_COORDINATE_PATTERN = re.compile('^(.+)_X([0-9]+)Y([0-9]+)$')
SiteCoordinate = namedtuple('SiteCoordinate', 'prefix x_coord y_coord')
def get_site_coordinate_from_name(name):
"""
>>> get_site_coordinate_from_name('SLICE_X1Y0')
SiteCoordinate(prefix='SLICE', x_coord=1, y_coord=0)
>>> get_site_coordinate_from_name('SLICE_X0Y0')
SiteCoordinate(prefix='SLICE', x_coord=0, y_coord=0)
>>> get_site_coordinate_from_name('INT_L_X500Y999')
SiteCoordinate(prefix='INT_L', x_coord=500, y_coord=999)
"""
coordinate = SITE_COORDINATE_PATTERN.match(name)
assert coordinate is not None, name
return SiteCoordinate(
prefix=coordinate.group(1),
x_coord=int(coordinate.group(2)),
y_coord=int(coordinate.group(3)),
)
def get_site_prefix_from_name(name):
"""
Returns the prefix of a site from its name
"""
coordinate = SITE_COORDINATE_PATTERN.match(name)
assert coordinate is not None, name
return coordinate.group(1)
def find_origin_coordinate(site_name, site_names):
"""
Find the coordinates of each site within the tile, and then subtract the
smallest coordinate to re-origin them all to be relative to the tile.
"""
x_coords = []
y_coords = []
for site in site_names:
coordinate = get_site_coordinate_from_name(site)
site_name_prefix = get_site_prefix_from_name(site_name)
if coordinate.prefix == site_name_prefix:
x_coords.append(coordinate.x_coord)
y_coords.append(coordinate.y_coord)
if len(x_coords) == 0 or len(y_coords) == 0:
return 0, 0
min_x_coord = min(x_coords)
min_y_coord = min(y_coords)
return min_x_coord, min_y_coord |
6,984 | load | '''
Usage: `python3 rename_nll_facts.py src ref dest`
Renames atoms in `src/*.facts` to match the names used in `ref/*.facts`, then
writes the renamed facts to `dest/`.
'''
import ast
from collections import defaultdict
import os
import sys
src_dir, ref_dir, dest_dir = sys.argv[1:]
# Map `src` loan/origin/path names to `ref` loan/origin/path names. We don't
# break this down by type because the names for each type don't collide anyway.
name_map = {}
# Set of `ref` names that appear as values in `name_map`.
ref_names_seen = set()
def match_name(src_name, ref_name):
if src_name in name_map:
old_ref_name = name_map[src_name]
if ref_name != old_ref_name:
print('error: %r matches both %r and %r' % (
src_name, old_ref_name, ref_name))
return
else:
if ref_name in ref_names_seen:
print('error: %r matches %r, but %r is already used' % (
src_name, ref_name, ref_name))
return
name_map[src_name] = ref_name
ref_names_seen.add(ref_name)
def match_loan(src_name, ref_name):
match_name(src_name, ref_name)
def match_origin(src_name, ref_name):
match_name(src_name, ref_name)
def match_path(src_name, ref_name):
match_name(src_name, ref_name)
def METHOD_NAME(name):
with open(os.path.join(src_dir, name + '.facts')) as f:
src_rows = [[ast.literal_eval(s) for s in line.strip().split('\t')]
for line in f]
with open(os.path.join(ref_dir, name + '.facts')) as f:
ref_rows = [[ast.literal_eval(s) for s in line.strip().split('\t')]
for line in f]
return src_rows, ref_rows
# Match up paths using `path_is_var` and `path_assigned_at_base`.
def match_path_is_var():
src, ref = METHOD_NAME('path_is_var')
ref_dct = {var: path for path, var in ref}
for path, var in src:
if var not in ref_dct:
continue
match_path(path, ref_dct[var])
match_path_is_var()
def match_path_assigned_at_base():
src, ref = METHOD_NAME('path_assigned_at_base')
ref_dct = {point: path for path, point in ref}
for path, point in src:
if point not in ref_dct:
continue
match_path(path, ref_dct[point])
match_path_assigned_at_base()
# Match up origins and loans using `loan_issued_at`
def match_loan_issued_at():
src, ref = METHOD_NAME('loan_issued_at')
ref_dct = {point: (origin, loan) for origin, loan, point in ref}
for origin, loan, point in src:
if point not in ref_dct:
continue
match_origin(origin, ref_dct[point][0])
match_origin(loan, ref_dct[point][1])
match_loan_issued_at()
# Match up origins using `use_of_var_derefs_origin`
def match_use_of_var_derefs_origin():
src, ref = METHOD_NAME('use_of_var_derefs_origin')
src_dct = defaultdict(list)
for var, origin in src:
src_dct[var].append(origin)
ref_dct = defaultdict(list)
for var, origin in ref:
ref_dct[var].append(origin)
for var in set(src_dct.keys()) & set(ref_dct.keys()):
src_origins = src_dct[var]
ref_origins = ref_dct[var]
if len(src_origins) != len(ref_origins):
print('error: var %r has %d origins in src but %d in ref' % (
var, len(src_origins), len(ref_origins)))
continue
for src_origin, ref_origin in zip(src_origins, ref_origins):
match_origin(src_origin, ref_origin)
match_use_of_var_derefs_origin()
# Rewrite `src` using the collected name mappings.
os.makedirs(dest_dir, exist_ok=True)
for name in os.listdir(src_dir):
if name.startswith('.') or not name.endswith('.facts'):
continue
with open(os.path.join(src_dir, name)) as src, \
open(os.path.join(dest_dir, name), 'w') as dest:
for line in src:
src_parts = [ast.literal_eval(s) for s in line.strip().split('\t')]
dest_parts = []
for part in src_parts:
if part.startswith('_') or part.startswith('Start') or part.startswith('Mid'):
dest_parts.append(part)
continue
dest_part = name_map.get(part)
if dest_part is None:
print('error: no mapping for %r (used in %s: %r)' % (
part, name, src_parts))
dest_part = 'OLD:' + part
dest_parts.append(dest_part)
dest.write('\t'.join('"%s"' % part for part in dest_parts) + '\n')
|
6,985 | sku | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetWebPubSubReplicaResult',
'AwaitableGetWebPubSubReplicaResult',
'get_web_pub_sub_replica',
'get_web_pub_sub_replica_output',
]
@pulumi.output_type
class GetWebPubSubReplicaResult:
"""
A class represent a replica resource.
"""
def __init__(__self__, id=None, location=None, name=None, provisioning_state=None, METHOD_NAME=None, system_data=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", METHOD_NAME)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. E.g. "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}"
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def METHOD_NAME(self) -> Optional['outputs.ResourceSkuResponse']:
"""
The billing information of the resource.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetWebPubSubReplicaResult(GetWebPubSubReplicaResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWebPubSubReplicaResult(
id=self.id,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
METHOD_NAME=self.METHOD_NAME,
system_data=self.system_data,
tags=self.tags,
type=self.type)
def get_web_pub_sub_replica(replica_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWebPubSubReplicaResult:
"""
Get the replica and its properties.
:param str replica_name: The name of the replica.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str resource_name: The name of the resource.
"""
__args__ = dict()
__args__['replicaName'] = replica_name
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:webpubsub/v20230601preview:getWebPubSubReplica', __args__, opts=opts, typ=GetWebPubSubReplicaResult).value
return AwaitableGetWebPubSubReplicaResult(
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
METHOD_NAME=pulumi.get(__ret__, 'sku'),
system_data=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_web_pub_sub_replica)
def get_web_pub_sub_replica_output(replica_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWebPubSubReplicaResult]:
"""
Get the replica and its properties.
:param str replica_name: The name of the replica.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str resource_name: The name of the resource.
"""
... |
6,986 | tear down | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Masaki Murooka (JSK Robotics Lab, The University of Tokyo)
import unittest
import numpy as np
import rospy
import rostest
import os
from moveit_commander import RobotCommander
class PythonTimeParameterizationTest(unittest.TestCase):
PLANNING_GROUP = "manipulator"
@classmethod
def setUpClass(self):
self.commander = RobotCommander("robot_description")
self.group = self.commander.get_group(self.PLANNING_GROUP)
@classmethod
def METHOD_NAME(self):
pass
def plan(self):
start_pose = self.group.get_current_pose().pose
goal_pose = self.group.get_current_pose().pose
goal_pose.position.z -= 0.1
(plan, fraction) = self.group.compute_cartesian_path(
[start_pose, goal_pose], 0.005, 0.0
)
self.assertEqual(fraction, 1.0, "Cartesian path plan failed")
return plan
def time_parameterization(self, plan, algorithm):
ref_state = self.commander.get_current_state()
retimed_plan = self.group.retime_trajectory(
ref_state,
plan,
velocity_scaling_factor=0.1,
acceleration_scaling_factor=0.1,
algorithm=algorithm,
)
return retimed_plan
def test_plan_and_time_parameterization(self):
plan = self.plan()
retimed_plan = self.time_parameterization(
plan, "iterative_time_parameterization"
)
self.assertTrue(
len(retimed_plan.joint_trajectory.points) > 0, "Retimed plan is invalid"
)
retimed_plan = self.time_parameterization(
plan, "iterative_spline_parameterization"
)
self.assertTrue(
len(retimed_plan.joint_trajectory.points) > 0, "Retimed plan is invalid"
)
retimed_plan = self.time_parameterization(
plan, "time_optimal_trajectory_generation"
)
self.assertTrue(
len(retimed_plan.joint_trajectory.points) > 0, "Retimed plan is invalid"
)
retimed_plan = self.time_parameterization(plan, "")
self.assertTrue(
len(retimed_plan.joint_trajectory.points) == 0, "Invalid retime algorithm"
)
if __name__ == "__main__":
PKGNAME = "moveit_ros_planning_interface"
NODENAME = "moveit_test_python_time_parameterization"
rospy.init_node(NODENAME)
rostest.rosrun(PKGNAME, NODENAME, PythonTimeParameterizationTest)
# suppress cleanup segfault
os._exit(0) |
6,987 | validate and add block multi error | from __future__ import annotations
from typing import List, Optional
from chia.consensus.blockchain import AddBlockResult, Blockchain
from chia.consensus.multiprocess_validation import PreValidationResult
from chia.types.full_block import FullBlock
from chia.util.errors import Err
from chia.util.ints import uint32, uint64
async def check_block_store_invariant(bc: Blockchain):
db_wrapper = bc.block_store.db_wrapper
if db_wrapper.db_version == 1:
return
in_chain = set()
max_height = -1
async with db_wrapper.writer_maybe_transaction() as conn:
async with conn.execute("SELECT height, in_main_chain FROM full_blocks") as cursor:
rows = await cursor.fetchall()
for row in rows:
height = row[0]
# if this block is in-chain, ensure we haven't found another block
# at this height that's also in chain. That would be an invariant
# violation
if row[1]:
# make sure we don't have any duplicate heights. Each block
# height can only have a single block with in_main_chain set
assert height not in in_chain
in_chain.add(height)
if height > max_height:
max_height = height
# make sure every height is represented in the set
assert len(in_chain) == max_height + 1
async def _validate_and_add_block(
blockchain: Blockchain,
block: FullBlock,
expected_result: Optional[AddBlockResult] = None,
expected_error: Optional[Err] = None,
skip_prevalidation: bool = False,
fork_point_with_peak: Optional[uint32] = None,
) -> None:
# Tries to validate and add the block, and checks that there are no errors in the process and that the
# block is added to the peak.
# If expected_result is not None, that result will be enforced.
# If expected_error is not None, that error will be enforced. If expected_error is not None,
# add_block must return Err.INVALID_BLOCK.
# If expected_result == INVALID_BLOCK but expected_error is None, we will allow for errors to happen
await check_block_store_invariant(blockchain)
if skip_prevalidation:
results = PreValidationResult(None, uint64(1), None, False)
else:
# Do not change this, validate_signatures must be False
pre_validation_results: List[PreValidationResult] = await blockchain.pre_validate_blocks_multiprocessing(
[block], {}, validate_signatures=False
)
assert pre_validation_results is not None
results = pre_validation_results[0]
if results.error is not None:
if expected_result == AddBlockResult.INVALID_BLOCK and expected_error is None:
# We expected an error but didn't specify which one
await check_block_store_invariant(blockchain)
return None
if expected_error is None:
# We did not expect an error
raise AssertionError(Err(results.error))
elif Err(results.error) != expected_error:
# We expected an error but a different one
raise AssertionError(f"Expected {expected_error} but got {Err(results.error)}")
await check_block_store_invariant(blockchain)
return None
(
result,
err,
_,
) = await blockchain.add_block(block, results, fork_point_with_peak=fork_point_with_peak)
await check_block_store_invariant(blockchain)
if expected_error is None and expected_result != AddBlockResult.INVALID_BLOCK:
# Expecting an error here (but didn't specify which), let's check if we actually got an error
if err is not None:
# Got an error
raise AssertionError(err)
else:
# Here we will enforce checking of the exact error
if err != expected_error:
# Did not get the right error, or did not get an error
raise AssertionError(f"Expected {expected_error} but got {err}")
if expected_result is not None and expected_result != result:
raise AssertionError(f"Expected {expected_result} but got {result}")
elif expected_result is None:
# If we expected an error assume that expected_result = INVALID_BLOCK
if expected_error is not None and result != AddBlockResult.INVALID_BLOCK:
raise AssertionError(f"Block should be invalid, but received: {result}")
# Otherwise, assume that expected_result = NEW_PEAK
if expected_error is None and result != AddBlockResult.NEW_PEAK:
raise AssertionError(f"Block was not added: {result}")
async def METHOD_NAME(
blockchain: Blockchain, block: FullBlock, expected_errors: List[Err], skip_prevalidation: bool = False
) -> None:
# Checks that the blockchain returns one of the expected errors
try:
await _validate_and_add_block(blockchain, block, skip_prevalidation=skip_prevalidation)
except Exception as e:
assert isinstance(e, AssertionError)
assert e.args[0] in expected_errors
return
raise AssertionError("Did not return an error")
async def _validate_and_add_block_multi_error_or_pass(
blockchain: Blockchain, block: FullBlock, expected_errors: List[Err], skip_prevalidation: bool = False
) -> None:
# Checks that the blockchain returns one of the expected errors, also allows block to be added.
try:
await _validate_and_add_block(blockchain, block, skip_prevalidation=skip_prevalidation)
except AssertionError as e:
assert e.args[0] in expected_errors
async def _validate_and_add_block_multi_result(
blockchain: Blockchain,
block: FullBlock,
expected_result: List[AddBlockResult],
skip_prevalidation: Optional[bool] = None,
) -> None:
try:
if skip_prevalidation is not None:
await _validate_and_add_block(blockchain, block, skip_prevalidation=skip_prevalidation)
else:
await _validate_and_add_block(blockchain, block)
except Exception as e:
assert isinstance(e, AssertionError)
assert "Block was not added" in e.args[0]
expected_list: List[str] = [f"Block was not added: {res}" for res in expected_result]
if e.args[0] not in expected_list:
raise AssertionError(f"{e.args[0].split('Block was not added: ')[1]} not in {expected_result}")
async def _validate_and_add_block_no_error(
blockchain: Blockchain, block: FullBlock, skip_prevalidation: Optional[bool] = None
) -> None:
# adds a block and ensures that there is no error. However, does not ensure that block extended the peak of
# the blockchain
await _validate_and_add_block_multi_result(
blockchain,
block,
expected_result=[
AddBlockResult.ALREADY_HAVE_BLOCK,
AddBlockResult.NEW_PEAK,
AddBlockResult.ADDED_AS_ORPHAN,
],
skip_prevalidation=skip_prevalidation,
) |
6,988 | scale up obj | from collections import defaultdict
from os.path import join as pjoin
import numpy as np
import numpy.testing as npt
import pytest
from fury import actor, interactor, ui
from fury import utils as vtk_utils
from fury import window
from fury.data import DATA_DIR
from fury.decorators import skip_osx, skip_win
from fury.lib import VTK_VERSION, Actor2D, PolyDataMapper2D, RegularPolygonSource
@pytest.mark.skipif(
skip_win, reason='This test does not work on Windows.' ' Need to be introspected'
)
def test_custom_interactor_style_events(recording=False):
print('Using VTK {}'.format(VTK_VERSION))
filename = 'test_custom_interactor_style_events.log.gz'
recording_filename = pjoin(DATA_DIR, filename)
scene = window.Scene()
# the show manager allows to break the rendering process
# in steps so that the widgets can be added properly
interactor_style = interactor.CustomInteractorStyle()
show_manager = window.ShowManager(
scene, size=(800, 800), reset_camera=False, interactor_style=interactor_style
)
# Create a cursor, a circle that will follow the mouse.
polygon_source = RegularPolygonSource()
polygon_source.GeneratePolygonOff() # Only the outline of the circle.
polygon_source.SetNumberOfSides(50)
polygon_source.SetRadius(10)
# polygon_source.SetRadius
polygon_source.SetCenter(0, 0, 0)
mapper = PolyDataMapper2D()
vtk_utils.set_input(mapper, polygon_source.GetOutputPort())
cursor = Actor2D()
cursor.SetMapper(mapper)
cursor.GetProperty().SetColor(1, 0.5, 0)
scene.add(cursor)
def follow_mouse(iren, obj):
obj.SetPosition(*iren.event.position)
iren.force_render()
interactor_style.add_active_prop(cursor)
interactor_style.add_callback(cursor, 'MouseMoveEvent', follow_mouse)
# create some minimalistic streamlines
lines = [
np.array([[-1, 0, 0.0], [1, 0, 0.0]]),
np.array([[-1, 1, 0.0], [1, 1, 0.0]]),
]
colors = np.array([[1.0, 0.0, 0.0], [0.3, 0.7, 0.0]])
tube1 = actor.streamtube([lines[0]], colors[0])
tube2 = actor.streamtube([lines[1]], colors[1])
scene.add(tube1)
scene.add(tube2)
# Define some counter callback.
states = defaultdict(lambda: 0)
def counter(iren, _obj):
states[iren.event.name] += 1
# Assign the counter callback to every possible event.
for event in [
'CharEvent',
'MouseMoveEvent',
'KeyPressEvent',
'KeyReleaseEvent',
'LeftButtonPressEvent',
'LeftButtonReleaseEvent',
'RightButtonPressEvent',
'RightButtonReleaseEvent',
'MiddleButtonPressEvent',
'MiddleButtonReleaseEvent',
]:
interactor_style.add_callback(tube1, event, counter)
# Add callback to scale up/down tube1.
def METHOD_NAME(iren, obj):
counter(iren, obj)
scale = np.asarray(obj.GetScale()) + 0.1
obj.SetScale(*scale)
iren.force_render()
iren.event.abort() # Stop propagating the event.
def scale_down_obj(iren, obj):
counter(iren, obj)
scale = np.array(obj.GetScale()) - 0.1
obj.SetScale(*scale)
iren.force_render()
iren.event.abort() # Stop propagating the event.
interactor_style.add_callback(tube2, 'MouseWheelForwardEvent', METHOD_NAME)
interactor_style.add_callback(tube2, 'MouseWheelBackwardEvent', scale_down_obj)
# Add callback to hide/show tube1.
def toggle_visibility(iren, obj):
key = iren.event.key
if key.lower() == 'v':
obj.SetVisibility(not obj.GetVisibility())
iren.force_render()
interactor_style.add_active_prop(tube1)
interactor_style.add_active_prop(tube2)
interactor_style.remove_active_prop(tube2)
interactor_style.add_callback(tube1, 'CharEvent', toggle_visibility)
if recording:
show_manager.record_events_to_file(recording_filename)
print(list(states.items()))
else:
show_manager.play_events_from_file(recording_filename)
msg = "Wrong count for '{}'."
expected = [
('CharEvent', 6),
('KeyPressEvent', 6),
('KeyReleaseEvent', 6),
('MouseMoveEvent', 1652),
('LeftButtonPressEvent', 1),
('RightButtonPressEvent', 1),
('MiddleButtonPressEvent', 2),
('LeftButtonReleaseEvent', 1),
('MouseWheelForwardEvent', 3),
('MouseWheelBackwardEvent', 1),
('MiddleButtonReleaseEvent', 2),
('RightButtonReleaseEvent', 1),
]
# Useful loop for debugging.
for event, count in expected:
if states[event] != count:
print('{}: {} vs. {} (expected)'.format(event, states[event], count))
for event, count in expected:
npt.assert_equal(states[event], count, err_msg=msg.format(event))
def test_double_click_events(recording=False):
filename = 'test_double_click_events.log.gz'
recording_filename = pjoin(DATA_DIR, filename)
label = ui.TextBlock2D(
position=(400, 780),
font_size=40,
color=(1, 0.5, 0),
justification='center',
vertical_justification='top',
text='FURY rocks!!!',
)
cube = actor.cube(
np.array([(0, 0, 0)]),
np.array([(0.16526678, 0.0186237, 0.01906076)]),
(1, 1, 1),
scales=3,
)
states = defaultdict(lambda: 0)
def left_single_click(iren, obj):
states['LeftButtonPressEvent'] += 1
iren.force_render()
def left_double_click(iren, obj):
states['LeftButtonDoubleClickEvent'] += 1
label.color = (1, 0, 0)
iren.force_render()
def right_single_click(iren, obj):
states['RightButtonPressEvent'] += 1
iren.force_render()
def right_double_click(iren, obj):
states['RightButtonDoubleClickEvent'] += 1
label.color = (0, 1, 0)
iren.force_render()
def middle_single_click(iren, obj):
states['MiddleButtonPressEvent'] += 1
iren.force_render()
def middle_double_click(iren, obj):
states['MiddleButtonDoubleClickEvent'] += 1
label.color = (0, 0, 1)
iren.force_render()
test_events = {
'LeftButtonPressEvent': left_single_click,
'LeftButtonDoubleClickEvent': left_double_click,
'RightButtonPressEvent': right_single_click,
'RightButtonDoubleClickEvent': right_double_click,
'MiddleButtonPressEvent': middle_single_click,
'MiddleButtonDoubleClickEvent': middle_double_click,
}
current_size = (800, 800)
showm = window.ShowManager(size=current_size, title='Double Click Test')
showm.scene.add(cube)
showm.scene.add(label)
for test_event, callback in test_events.items():
showm.style.add_callback(cube, test_event, callback)
if recording:
showm.record_events_to_file(recording_filename)
print(list(states.items()))
else:
showm.play_events_from_file(recording_filename)
msg = "Wrong count for '{}'."
expected = [
('LeftButtonPressEvent', 3),
('LeftButtonDoubleClickEvent', 1),
('MiddleButtonPressEvent', 3),
('MiddleButtonDoubleClickEvent', 1),
('RightButtonPressEvent', 2),
('RightButtonDoubleClickEvent', 1),
]
# Useful loop for debugging.
for event, count in expected:
if states[event] != count:
print('{}: {} vs. {} (expected)'.format(event, states[event], count))
for event, count in expected:
npt.assert_equal(states[event], count, err_msg=msg.format(event))
if __name__ == '__main__':
test_custom_interactor_style_events(recording=False)
test_double_click_events(recording=False) |
6,989 | outputs format | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging
import traceback
from functools import partial
from copy import deepcopy
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from pipeline.core.flow.activity import Service
from pipeline.core.flow.io import StringItemSchema, ArrayItemSchema, IntItemSchema, ObjectItemSchema
from pipeline.component_framework.component import Component
from pipeline_plugins.components.collections.sites.open.cc.base import cc_format_tree_mode_id, cc_format_prop_data
from pipeline_plugins.base.utils.inject import supplier_account_for_business
from gcloud.conf import settings
from gcloud.utils.handlers import handle_api_error
logger = logging.getLogger("celery")
get_client_by_user = settings.ESB_GET_CLIENT_BY_USER
__group_name__ = _("配置平台(CMDB)")
VERSION = "v1.0"
BREAK_LINE = "\n"
cc_handle_api_error = partial(handle_api_error, __group_name__)
def chunk_table_data(column):
"""
@summary: 表格参数值支持以换行符 `\n` 分隔的多条数据,对一行数据,当有一列有多条数据时(包含换行符),其他列要么也有相等个数的
数据(换行符个数相等),要么只有一条数据(不包含换行符,此时表示多条数据此列参数值都相同)
@param column: 表格单行数据,字典格式
@return:
"""
count = 1
chunk_data = []
multiple_keys = []
for key, value in column.items():
if not isinstance(value, str):
# 如果列类型为int,则跳过处理
if isinstance(value, int):
continue
else:
return {"result": False, "message": _("数据[%s]格式错误,请改为字符串") % value, "data": []}
value = value.strip()
if BREAK_LINE in value:
multiple_keys.append(key)
value = value.split(BREAK_LINE)
if len(value) != count and count != 1:
message = _(f"非法请求: [单行自动扩展]中, [{value}] 按分隔符分割后的行数不一致, 请修复后重试")
logger.error(message)
return {"result": False, "message": message, "data": []}
count = len(value)
column[key] = value
if count == 1:
return {"result": True, "data": [column], "message": ""}
for i in range(count):
item = deepcopy(column)
for key in multiple_keys:
item[key] = column[key][i]
chunk_data.append(item)
return {"result": True, "data": chunk_data, "message": ""}
class CCCreateSetService(Service):
def inputs_format(self):
return [
self.InputItem(
name=_("业务 ID"),
key="biz_cc_id",
type="string",
schema=StringItemSchema(description=_("当前操作所属的 CMDB 业务 ID")),
),
self.InputItem(
name=_("父实例"),
key="cc_set_parent_select",
type="array",
schema=ArrayItemSchema(description=_("父实例 ID 列表"), item_schema=IntItemSchema(description=_("实例 ID"))),
),
self.InputItem(
name=_("集群信息"),
key="cc_set_info",
type="array",
schema=ArrayItemSchema(
description=_("新集群信息对象列表"),
item_schema=ObjectItemSchema(description=_("集群信息描述对象"), property_schemas={}),
),
),
]
def METHOD_NAME(self):
return []
def execute(self, data, parent_data):
executor = parent_data.get_one_of_inputs("executor")
client = get_client_by_user(executor)
if parent_data.get_one_of_inputs("language"):
setattr(client, "language", parent_data.get_one_of_inputs("language"))
translation.activate(parent_data.get_one_of_inputs("language"))
biz_cc_id = data.get_one_of_inputs("biz_cc_id", parent_data.inputs.biz_cc_id)
supplier_account = supplier_account_for_business(biz_cc_id)
cc_set_parent_select = cc_format_tree_mode_id(data.get_one_of_inputs("cc_set_parent_select"))
cc_set_info = deepcopy(data.get_one_of_inputs("cc_set_info"))
bk_set_env = cc_format_prop_data(
executor, "set", "bk_set_env", parent_data.get_one_of_inputs("language"), supplier_account
)
if not bk_set_env["result"]:
data.set_outputs("ex_data", bk_set_env["message"])
return False
bk_service_status = cc_format_prop_data(
executor, "set", "bk_service_status", parent_data.get_one_of_inputs("language"), supplier_account
)
if not bk_service_status["result"]:
data.set_outputs("ex_data", bk_service_status["message"])
return False
set_list = []
for set_params in cc_set_info:
chunk_result = chunk_table_data(set_params)
if not chunk_result["result"]:
data.set_outputs("ex_data", chunk_result["message"])
return False
for property_data in chunk_result["data"]:
set_property = {}
for key, value in property_data.items():
if value:
if key == "bk_set_env" and value not in bk_set_env["data"].values():
data.set_outputs("ex_data", _("环境类型校验失败,请重试并修改为正确的环境类型"))
return False
elif key == "bk_service_status" and value not in bk_service_status["data"].values():
data.set_outputs("ex_data", _("服务状态校验失败,请重试并修改为正确的服务状态"))
return False
elif key == "bk_capacity":
try:
value = int(value)
except ValueError:
self.logger.error(traceback.format_exc())
data.set_outputs("ex_data", _("集群容量必须为整数"))
return False
set_property[key] = value
set_list.append(set_property)
for parent_id in cc_set_parent_select:
for set_data in set_list:
cc_kwargs = {
"bk_biz_id": biz_cc_id,
"bk_supplier_account": supplier_account,
"data": {"bk_parent_id": parent_id},
}
cc_kwargs["data"].update(set_data)
cc_result = client.cc.create_set(cc_kwargs)
if not cc_result["result"]:
message = cc_handle_api_error("cc.create_set", cc_kwargs, cc_result)
self.logger.error(message)
data.set_outputs("ex_data", message)
return False
return True
class CCCreateSetComponent(Component):
"""
@version log(v1.0): 表格参数值支持以换行符 `\n` 分隔的多条数据,对一行数据,当有一列有多条数据时(包含换行符),
其他列要么也有相等个数的数据(换行符个数相等),要么只有一条数据(不包含换行符,此时表示多条数据此列参数值都相同)
"""
name = _("创建集群")
code = "cc_create_set"
bound_service = CCCreateSetService
form = "{static_url}components/atoms/cc/create_set/{ver}.js".format(
static_url=settings.STATIC_URL, ver=VERSION.replace(".", "_")
)
version = VERSION
desc = _(
"1. 填参方式支持手动填写和结合模板生成(单行自动扩展)\n"
"2. 使用单行自动扩展模式时,每一行支持填写多个已自定义分隔符或是英文逗号分隔的数据,"
'插件后台会自动将其扩展成多行,如 "1,2,3,4" 会被扩展成四行:1 2 3 4\n'
"3. 结合模板生成(单行自动扩展)当有一列有多条数据时,其他列要么也有相等个数的数据,要么只有一条数据"
) |
6,990 | get abs pos | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = [
"window_partition",
"window_unpartition",
"add_decomposed_rel_pos",
"get_abs_pos",
"PatchEmbed",
]
def window_partition(x, window_size):
"""
Partition into non-overlapping windows with padding if needed.
Args:
x (tensor): input tokens with [B, H, W, C].
window_size (int): window size.
Returns:
windows: windows after partition with [B * num_windows, window_size, window_size, C].
(Hp, Wp): padded height and width before partition
"""
B, H, W, C = x.shape
pad_h = (window_size - H % window_size) % window_size
pad_w = (window_size - W % window_size) % window_size
if pad_h > 0 or pad_w > 0:
x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
Hp, Wp = H + pad_h, W + pad_w
x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows, (Hp, Wp)
def window_unpartition(windows, window_size, pad_hw, hw):
"""
Window unpartition into original sequences and removing padding.
Args:
x (tensor): input tokens with [B * num_windows, window_size, window_size, C].
window_size (int): window size.
pad_hw (Tuple): padded height and width (Hp, Wp).
hw (Tuple): original height and width (H, W) before padding.
Returns:
x: unpartitioned sequences with [B, H, W, C].
"""
Hp, Wp = pad_hw
H, W = hw
B = windows.shape[0] // (Hp * Wp // window_size // window_size)
x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)
if Hp > H or Wp > W:
x = x[:, :H, :W, :].contiguous()
return x
def get_rel_pos(q_size, k_size, rel_pos):
"""
Get relative positional embeddings according to the relative positions of
query and key sizes.
Args:
q_size (int): size of query q.
k_size (int): size of key k.
rel_pos (Tensor): relative position embeddings (L, C).
Returns:
Extracted positional embeddings according to relative positions.
"""
max_rel_dist = int(2 * max(q_size, k_size) - 1)
# Interpolate rel pos if needed.
if rel_pos.shape[0] != max_rel_dist:
# Interpolate rel pos.
rel_pos_resized = F.interpolate(
rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1),
size=max_rel_dist,
mode="linear",
)
rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)
else:
rel_pos_resized = rel_pos
# Scale the coords with short length if shapes for q and k are different.
q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)
k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)
relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)
return rel_pos_resized[relative_coords.long()]
def add_decomposed_rel_pos(attn, q, rel_pos_h, rel_pos_w, q_size, k_size):
"""
Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950
Args:
attn (Tensor): attention map.
q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C).
rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis.
rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis.
q_size (Tuple): spatial sequence size of query q with (q_h, q_w).
k_size (Tuple): spatial sequence size of key k with (k_h, k_w).
Returns:
attn (Tensor): attention map with added relative positional embeddings.
"""
q_h, q_w = q_size
k_h, k_w = k_size
Rh = get_rel_pos(q_h, k_h, rel_pos_h)
Rw = get_rel_pos(q_w, k_w, rel_pos_w)
B, _, dim = q.shape
r_q = q.reshape(B, q_h, q_w, dim)
rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh)
rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw)
attn = (
attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]
).view(B, q_h * q_w, k_h * k_w)
return attn
def METHOD_NAME(abs_pos, has_cls_token, hw):
"""
Calculate absolute positional embeddings. If needed, resize embeddings and remove cls_token
dimension for the original embeddings.
Args:
abs_pos (Tensor): absolute positional embeddings with (1, num_position, C).
has_cls_token (bool): If true, has 1 embedding in abs_pos for cls token.
hw (Tuple): size of input image tokens.
Returns:
Absolute positional embeddings after processing with shape (1, H, W, C)
"""
h, w = hw
if has_cls_token:
abs_pos = abs_pos[:, 1:]
xy_num = abs_pos.shape[1]
size = int(math.sqrt(xy_num))
assert size * size == xy_num
if size != h or size != w:
new_abs_pos = F.interpolate(
abs_pos.reshape(1, size, size, -1).permute(0, 3, 1, 2),
size=(h, w),
mode="bicubic",
align_corners=False,
)
return new_abs_pos.permute(0, 2, 3, 1)
else:
return abs_pos.reshape(1, h, w, -1)
class PatchEmbed(nn.Module):
"""
Image to Patch Embedding.
"""
def __init__(
self, kernel_size=(16, 16), stride=(16, 16), padding=(0, 0), in_chans=3, embed_dim=768
):
"""
Args:
kernel_size (Tuple): kernel size of the projection layer.
stride (Tuple): stride of the projection layer.
padding (Tuple): padding size of the projection layer.
in_chans (int): Number of input image channels.
embed_dim (int): embed_dim (int): Patch embedding dimension.
"""
super().__init__()
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding
)
def forward(self, x):
x = self.proj(x)
# B C H W -> B H W C
x = x.permute(0, 2, 3, 1)
return x |
6,991 | setup | # encoding: utf-8
'''A Sphinx extension to automatically document CKAN's crazy plugins toolkit,
autodoc-style.
Sphinx's autodoc extension can document modules or classes, but although it
masquerades as a module CKAN's plugins toolkit is actually neither a module nor
a class, it's an object-instance of a class, and it's an object with weird
__getattr__ behavior too. Autodoc can't handle it, so we have this custom
Sphinx extension to automate documenting it instead.
This extension plugs into the reading phase of the Sphinx build. It intercepts
the 'toolkit' document (extensions/plugins-toolkit.rst) after Sphinx has read
the reStructuredText source from file. It modifies the source, adding in Sphinx
directives for everything in the plugins toolkit, and then the Sphinx build
continues as normal (just as if the generated reStructuredText had been entered
into plugins-toolkit.rst manually before running Sphinx).
'''
import inspect
from typing import Any, Callable, Optional
import ckan.plugins.toolkit as toolkit
def METHOD_NAME(app: Any):
'''Setup this Sphinx extension. Called once when initializing Sphinx.
'''
# Connect to Sphinx's source-read event, the callback function will be
# called after each source file is read.
app.connect('source-read', source_read)
def format_function(name: str,
function: Callable[..., Any],
docstring: Optional[str] = None) -> str:
'''Return a Sphinx .. function:: directive for the given function.
The directive includes the function's docstring if it has one.
:param name: the name to give to the function in the directive,
eg. 'get_converter'
:type name: string
:param function: the function itself
:type function: function
:param docstring: if given, use this instead of introspecting the function
to find its actual docstring
:type docstring: string
:returns: a Sphinx .. function:: directive for the function
:rtype: string
'''
# The template we'll use to render the Sphinx function directive.
template = ('.. py:function:: ckan.plugins.toolkit.{function}{args}\n'
'\n'
'{docstring}\n'
'\n')
# Get the arguments of the function, as a string like:
# "(foo, bar=None, ...)"
argstring = str(inspect.signature(function))
docstring = docstring or inspect.getdoc(function)
if docstring is None:
docstring = ''
else:
# Indent the docstring by 3 spaces, as needed for the Sphinx directive.
docstring = '\n'.join([' ' + line for line in docstring.split('\n')])
return template.format(function=name, args=argstring, docstring=docstring)
def format_class(
name: str, class_: Any,
docstring: Optional[str] = None) -> str:
'''Return a Sphinx .. class:: directive for the given class.
The directive includes the class's docstring if it has one.
:param name: the name to give to the class in the directive,
eg. 'DefaultDatasetForm'
:type name: string
:param class_: the class itself
:type class_: class
:param docstring: if given, use this instead of introspecting the class
to find its actual docstring
:type docstring: string
:returns: a Sphinx .. class:: directive for the class
:rtype: string
'''
# The template we'll use to render the Sphinx class directive.
template = ('.. py:class:: ckan.plugins.toolkit.{cls}\n'
'\n'
'{docstring}\n'
'\n')
docstring = docstring or inspect.getdoc(class_)
if docstring is None:
docstring = ''
else:
# Indent the docstring by 3 spaces, as needed for the Sphinx directive.
docstring = '\n'.join([' ' + line for line in docstring.split('\n')])
return template.format(cls=name, docstring=docstring)
def format_object(
name: str, object_: Any, docstring: Optional[str] = None) -> str:
'''Return a Sphinx .. attribute:: directive for the given object.
The directive includes the object's class's docstring if it has one.
:param name: the name to give to the object in the directive,
eg. 'request'
:type name: string
:param object_: the object itself
:type object_: object
:param docstring: if given, use this instead of introspecting the object
to find its actual docstring
:type docstring: string
:returns: a Sphinx .. attribute:: directive for the object
:rtype: string
'''
# The template we'll use to render the Sphinx attribute directive.
template = ('.. py:attribute:: ckan.plugins.toolkit.{obj}\n'
'\n'
'{docstring}\n'
'\n')
docstring = docstring or inspect.getdoc(object_)
if docstring is None:
docstring = ''
else:
# Indent the docstring by 3 spaces, as needed for the Sphinx directive.
docstring = '\n'.join([' ' + line for line in docstring.split('\n')])
return template.format(obj=name, docstring=docstring)
def source_read(app: Any, docname: str, source: Any) -> None:
'''Transform the contents of plugins-toolkit.rst to contain reference docs.
'''
# We're only interested in the 'plugins-toolkit' doc (plugins-toolkit.rst).
if docname != 'extensions/plugins-toolkit':
return
source_ = '\n'
for name, thing in inspect.getmembers(toolkit):
if name not in toolkit.__all__:
continue
# The plugins toolkit can override the docstrings of some of its
# members (e.g. things that are imported from third-party libraries)
# by putting custom docstrings in this docstring_overrides dict.
custom_docstring = toolkit.docstring_overrides.get(name)
if inspect.isfunction(thing):
source_ += format_function(name, thing, docstring=custom_docstring)
elif inspect.ismethod(thing):
# We document plugins toolkit methods as if they're functions. This
# is correct because the class ckan.plugins.toolkit._Toolkit
# actually masquerades as a module ckan.plugins.toolkit, and you
# call its methods as if they were functions.
source_ += format_function(name, thing, docstring=custom_docstring)
elif inspect.isclass(thing):
source_ += format_class(name, thing, docstring=custom_docstring)
elif isinstance(thing, object):
source_ += format_object(name, thing, docstring=custom_docstring)
else:
assert False, ("Someone added {name}:{thing} to the plugins "
"toolkit and this Sphinx extension doesn't know "
"how to document that yet. If you're that someone, "
"you need to add a new format_*() function for it "
"here or the docs won't build.".format(
name=name, thing=thing))
source[0] += source_
# This is useful for debugging the generated RST.
# open('/tmp/source', 'w').write(source[0]) |
6,992 | private link service connection state | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetPrivateEndpointConnectionsForSCCPowershellResult',
'AwaitableGetPrivateEndpointConnectionsForSCCPowershellResult',
'get_private_endpoint_connections_for_scc_powershell',
'get_private_endpoint_connections_for_scc_powershell_output',
]
@pulumi.output_type
class GetPrivateEndpointConnectionsForSCCPowershellResult:
"""
The Private Endpoint Connection resource.
"""
def __init__(__self__, id=None, name=None, private_endpoint=None, METHOD_NAME=None, provisioning_state=None, system_data=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if private_endpoint and not isinstance(private_endpoint, dict):
raise TypeError("Expected argument 'private_endpoint' to be a dict")
pulumi.set(__self__, "private_endpoint", private_endpoint)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'private_link_service_connection_state' to be a dict")
pulumi.set(__self__, "private_link_service_connection_state", METHOD_NAME)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']:
"""
The resource of private end point.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def METHOD_NAME(self) -> 'outputs.PrivateLinkServiceConnectionStateResponse':
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the private endpoint connection resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Required property for system data
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateEndpointConnectionsForSCCPowershellResult(GetPrivateEndpointConnectionsForSCCPowershellResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateEndpointConnectionsForSCCPowershellResult(
id=self.id,
name=self.name,
private_endpoint=self.private_endpoint,
METHOD_NAME=self.METHOD_NAME,
provisioning_state=self.provisioning_state,
system_data=self.system_data,
type=self.type)
def get_private_endpoint_connections_for_scc_powershell(private_endpoint_connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateEndpointConnectionsForSCCPowershellResult:
"""
Gets the specified private endpoint connection associated with the service.
Azure REST API version: 2021-03-25-preview.
:param str private_endpoint_connection_name: The name of the private endpoint connection associated with the Azure resource
:param str resource_group_name: The name of the resource group that contains the service instance.
:param str resource_name: The name of the service instance.
"""
__args__ = dict()
__args__['privateEndpointConnectionName'] = private_endpoint_connection_name
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:m365securityandcompliance:getPrivateEndpointConnectionsForSCCPowershell', __args__, opts=opts, typ=GetPrivateEndpointConnectionsForSCCPowershellResult).value
return AwaitableGetPrivateEndpointConnectionsForSCCPowershellResult(
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
private_endpoint=pulumi.get(__ret__, 'private_endpoint'),
METHOD_NAME=pulumi.get(__ret__, 'private_link_service_connection_state'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_private_endpoint_connections_for_scc_powershell)
def get_private_endpoint_connections_for_scc_powershell_output(private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPrivateEndpointConnectionsForSCCPowershellResult]:
"""
Gets the specified private endpoint connection associated with the service.
Azure REST API version: 2021-03-25-preview.
:param str private_endpoint_connection_name: The name of the private endpoint connection associated with the Azure resource
:param str resource_group_name: The name of the resource group that contains the service instance.
:param str resource_name: The name of the service instance.
"""
... |
6,993 | extract translation units | #
# Copyright 2023 Zuza Software Foundation & Anders Kaplan
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
"""Convert Markdown files to Gettext PO localization files.
See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/md2po.html
for examples and usage instructions.
"""
from translate.convert import convert
from translate.storage import markdown, po
class MD2POOptionParser(convert.ConvertOptionParser):
def __init__(self):
formats = {
"md": ("po", self.METHOD_NAME),
"markdown": ("po", self.METHOD_NAME),
"txt": ("po", self.METHOD_NAME),
"text": ("po", self.METHOD_NAME),
None: ("po", self.METHOD_NAME),
}
super().__init__(formats, usetemplates=False, usepots=True, description=__doc__)
self.add_duplicates_option()
self.add_multifile_option()
def METHOD_NAME(
self,
inputfile,
outputfile,
templates,
duplicatestyle,
multifilestyle,
):
if hasattr(self, "outputstore"):
self._parse_and_extract(inputfile, self.outputstore)
else:
store = po.pofile()
self._parse_and_extract(inputfile, store)
store.removeduplicates(duplicatestyle)
store.serialize(outputfile)
return 1
@staticmethod
def _parse_and_extract(inputfile, outputstore):
"""Extract translation units from a markdown file and add them to an existing message store (pofile object) without any further processing."""
parser = markdown.MarkdownFile(inputfile=inputfile)
for tu in parser.units:
storeunit = outputstore.addsourceunit(tu.source)
storeunit.addlocations(tu.getlocations())
def recursiveprocess(self, options):
"""Recurse through directories and process files. (override)"""
if options.multifilestyle == "onefile":
self.outputstore = po.pofile()
super().recursiveprocess(options)
if not self.outputstore.isempty():
outputfile = super().openoutputfile(options, options.output)
self.outputstore.removeduplicates(options.duplicatestyle)
self.outputstore.serialize(outputfile)
if options.output:
outputfile.close()
else:
super().recursiveprocess(options)
def isrecursive(self, fileoption, filepurpose="input"):
"""Check if fileoption is a recursive file. (override)"""
if hasattr(self, "outputstore") and filepurpose == "output":
return True
return super().isrecursive(fileoption, filepurpose=filepurpose)
def checkoutputsubdir(self, options, subdir):
"""Check if subdir under options.output needs to be created, and
create if neccessary. Do nothing if in single-output-file mode. (override)
"""
if hasattr(self, "outputstore"):
return
super().checkoutputsubdir(options, subdir)
def openoutputfile(self, options, fulloutputpath):
"""Open the output file, or do nothing if in single-output-file mode. (override)"""
if hasattr(self, "outputstore"):
return None
return super().openoutputfile(options, fulloutputpath)
def main(argv=None):
parser = MD2POOptionParser()
parser.run(argv)
if __name__ == "__main__":
main() |
6,994 | pre prepare client | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import asyncio
import os
import dotenv
import time
import logging
from logging.handlers import RotatingFileHandler
from azure.eventhub.aio import EventHubProducerClient
from azure.eventhub import EventData
logger = logging.getLogger('ASYNC_SEND_PERF_TEST')
logger.setLevel(logging.INFO)
logger.addHandler(RotatingFileHandler("async_send_perf_test.log"))
dotenv.load_dotenv()
CONN_STRS = [
os.environ["EVENT_HUB_CONN_STR_BASIC_NORTHEU"],
os.environ["EVENT_HUB_CONN_STR_STANDARD_NORTHEU"],
os.environ["EVENT_HUB_CONN_STR_BASIC_WESTUS2"],
os.environ["EVENT_HUB_CONN_STR_STANDARD_WESTUS2"]
]
EVENTHUB_NAME = "pyamqp"
SINGLE_EVENT_SIZE_LIST = [512]
PARALLEL_COROUTINE_COUNT_LIST = [1]
FIXED_AMOUNT_OF_EVENTS = 100_000
RUN_DURATION = 30
async def METHOD_NAME(client, data):
await client.create_batch() # precall to retrieve sender link settings
await client.send_batch([EventData(data)]) # precall to set up the sender link
async def send_batch_message(conn_str, eventhub_name, num_of_events, single_event_size, run_times=1, description=None):
client = EventHubProducerClient.from_connection_string(
conn_str=conn_str, eventhub_name=eventhub_name
)
data = b'a' * single_event_size
perf_records = []
await METHOD_NAME(client, data)
for _ in range(run_times): # run run_times and calculate the avg performance
start_time = time.time()
batch = await client.create_batch()
for _ in range(num_of_events):
try:
batch.add(EventData(data))
except ValueError:
# Batch full
await client.send_batch(batch)
batch = await client.create_batch()
batch.add(EventData(data))
await client.send_batch(batch)
end_time = time.time()
total_time = end_time - start_time
speed = num_of_events / total_time
perf_records.append(speed)
await client.close()
avg_perf = round(sum(perf_records) / len(perf_records), 2)
logger.info(
"Method: {}, The average performance is {} events/s, throughput: {} bytes/s, run times: {}.\n"
"Configs are: Num of events: {} events, Single message size: {} bytes.".format(
description or "send_batch_message",
avg_perf,
avg_perf * single_event_size,
run_times,
num_of_events,
single_event_size
)
)
return avg_perf
async def send_batch_message_worker_coroutine(client, data, run_flag):
total_cnt = 0
while run_flag[0]:
batch = await client.create_batch()
try:
while True:
event_data = EventData(body=data)
batch.add(event_data)
except ValueError:
await client.send_batch(batch)
total_cnt += len(batch)
return total_cnt
async def send_batch_message_in_parallel(conn_str, eventhub_name, single_event_size, parallel_coroutine_count=4, run_times=1, run_duration=30, description=None):
perf_records = []
for _ in range(run_times):
futures = []
clients = [
EventHubProducerClient.from_connection_string(
conn_str=conn_str, eventhub_name=eventhub_name
) for _ in range(parallel_coroutine_count)
]
data = b'a' * single_event_size
for client in clients:
await METHOD_NAME(client, data)
run_flag = [True]
for i in range(parallel_coroutine_count):
futures.append(asyncio.create_task(
send_batch_message_worker_coroutine(
clients[i],
data,
run_flag
)
))
await asyncio.sleep(run_duration)
run_flag[0] = False
await asyncio.gather(*futures)
perf_records.append(sum([future.result() for future in futures]) / run_duration)
for client in clients:
await client.close()
avg_perf = round(sum(perf_records) / len(perf_records), 2)
logger.info(
"Method: {}, The average performance is {} events/s, throughput: {} bytes/s, run times: {}.\n"
"Configs are: Single message size: {} bytes, Parallel thread count: {} threads, Run duration: {} seconds.".format(
description or "send_batch_message_in_parallel",
avg_perf,
avg_perf * single_event_size,
run_times,
single_event_size,
parallel_coroutine_count,
run_duration
)
)
if __name__ == '__main__':
logger.info('------------------- START OF TEST -------------------')
for conn_str in CONN_STRS:
for single_event_size in SINGLE_EVENT_SIZE_LIST:
print('------------------- sending fixed amount of message -------------------')
asyncio.run(
send_batch_message(
conn_str=conn_str,
eventhub_name=EVENTHUB_NAME,
num_of_events=FIXED_AMOUNT_OF_EVENTS,
single_event_size=single_event_size,
description='sending fixed amount message'
)
)
for parallel_coroutine_count in PARALLEL_COROUTINE_COUNT_LIST:
for single_event_size in SINGLE_EVENT_SIZE_LIST:
print('------------------- multiple coroutines sending messages for a fixed period -------------------')
asyncio.run(
send_batch_message_in_parallel(
conn_str=conn_str,
eventhub_name=EVENTHUB_NAME,
single_event_size=single_event_size,
parallel_coroutine_count=parallel_coroutine_count,
run_duration=RUN_DURATION,
description='multiple coroutine sending messages'
)
)
logger.info('------------------- END OF TEST -------------------') |
6,995 | extract data | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-08-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.Storage/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.storage.v2021_08_01.StorageManagementClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.Operation"]:
"""Lists all of the available Storage Rest API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Operation or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.storage.v2021_08_01.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2021-08-01"))
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def METHOD_NAME(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, METHOD_NAME)
list.metadata = {"url": "/providers/Microsoft.Storage/operations"} |
6,996 | on failed log | import logging
import operator
import socket
from contextlib import contextmanager
from typing import Any, Callable, Mapping, MutableMapping, Optional, cast
from mode import Service
from faust.types import AppT
from . import states
from .app import send_update
from .models import Status
CHECK_FREQUENCY = 5.0
class Check(Service):
description: str = ""
# This can be used to format the "current value" in error logs.
# If not set it will just use ``repr(value)``.
prev_value_repr: Optional[str] = None
current_value_repr: Optional[str] = None
state_to_severity = {
states.SLOW: logging.WARNING,
states.STALL: logging.ERROR,
states.FAIL: logging.CRITICAL,
}
faults_to_state = [
(6, states.STALL),
(3, states.SLOW),
(0, states.OK),
]
default_operator = None
def __init__(
self,
name: str,
get_value: Callable[[], Any] = None,
operator: Callable[[Any, Any], bool] = None,
**kwargs: Any,
):
self.name = name
self._get_value = cast(Callable[[], Any], get_value)
if operator is None:
operator = self.default_operator
self.operator = operator
self.faults = 0
self.prev_value = None
self.status = states.OK
self.interval_skew = 0.0
self.app = None
super().__init__(**kwargs)
def to_representation(self, app, severity) -> Status:
return Status(
app_id=app.conf.id,
hostname=socket.gethostname(),
category=self.name,
color=self.color,
count=self.faults,
state=self.status,
severity=logging.getLevelName(severity),
)
def asdict(self) -> Mapping[str, Any]:
return {
"state": self.status,
"color": self.color,
"faults": self.faults,
}
def get_value(self) -> Any:
if self._get_value is not None:
return self._get_value()
raise NotImplementedError()
async def on_rebalancing(self, app: AppT):
self.status = states.REBALANCING
await send_update(app, self.to_representation(app, logging.INFO))
async def on_unassigned(self, app: AppT):
self.status = states.UNASSIGNED
await send_update(app, self.to_representation(app, logging.INFO))
async def on_paused(self, app: AppT):
self.status = states.PAUSED
await send_update(app, self.to_representation(app, logging.INFO))
async def check(self, app: AppT) -> None:
current_value: Any = self.get_value()
prev_value = self.prev_value
severity = app.log.info
try:
if prev_value is not None:
if self.compare(prev_value, current_value):
self.faults += 1
self.status = self.get_state_for_faults(self.faults)
severity = self.state_to_severity.get(self.status, logging.INFO)
await self.METHOD_NAME(severity, app, prev_value, current_value)
else:
self.faults = 0
self.status = states.OK
await self.on_ok_log(app, prev_value, current_value)
self.store_previous_value(current_value)
except Exception as exc:
print(f"ERROR: {exc!r}")
raise
def compare(self, prev_value: Any, current_value: Any):
return self.operator(current_value, prev_value)
def store_previous_value(self, current_value):
self.prev_value = current_value
async def METHOD_NAME(
self, severity: int, app: AppT, prev_value: Any, current_value: Any
) -> None:
await send_update(app, self.to_representation(app, severity))
prev_value_repr = self.prev_value_repr
current_value_repr = self.current_value_repr
if current_value_repr is None:
current_value_repr = repr(current_value)
if prev_value_repr is None:
prev_value_repr = repr(prev_value)
app.log.log(
severity,
"%s:%s %s (x%s): was %s now %s",
app.conf.id,
self.name,
self.negate_description,
self.faults,
prev_value_repr,
current_value_repr,
extra={"no_alert": True},
)
async def on_ok_log(self, app: AppT, prev_value: Any, current_value: Any) -> None:
await send_update(app, self.to_representation(app, logging.INFO))
app.log.info(
"%s:%s %s: was %s now %s",
app.conf.id,
self.name,
self.description,
prev_value,
current_value,
extra={"no_alert": True},
)
def get_state_for_faults(self, faults: int) -> str:
for level, state in self.faults_to_state:
if faults > level:
return state
return states.OK
@property
def color(self) -> str:
if self.status in states.OK_STATES:
return "green"
elif self.status in states.MAYBE_STATES:
return "yellow"
return "red"
@Service.task
async def _run_check(self) -> None:
try:
app = self.app
while not self.should_stop:
await self.sleep(CHECK_FREQUENCY)
if app.system_checks.paused:
await self.on_paused(app)
elif app.rebalancing:
await self.on_rebalancing(app)
elif app.unassigned:
await self.on_unassigned(app)
else:
await self.check(app)
except Exception as exc:
print(f"RUN CHECK RAISED: {exc!r}")
raise
@property
def label(self) -> str:
return f"{type(self).__name__}: {self.name}"
class Increasing(Check):
default_operator = operator.le
description = "increasing"
negate_description = "not increasing"
def _transitioned_to_false(previous: bool, current: bool) -> bool:
return not current
class Condition(Check):
description = "functional"
negate_description = "nonfunctional"
default_operator = _transitioned_to_false
faults_to_state = [
(1, states.FAIL),
(0, states.OK),
]
class Stationary(Check):
"""Monitors a value that should stand still, i.e, not going up or down."""
description = "functional"
negate_description = "increasing"
default_operator = operator.ne
class SystemChecks(Service):
checks: MutableMapping[str, Check]
current_skew = 0.0
paused: bool = False
def __init__(self, app: AppT, **kwargs: Any) -> None:
self.app = app
self.checks = {}
Service.__init__(self, **kwargs)
def on_init_dependencies(self):
return self.checks.values()
@contextmanager
def pause(self):
self.paused = True
try:
yield
finally:
self.paused = False
def add(self, check: Check) -> None:
self.checks[check.name] = check
self.current_skew += 0.2
check.interval_skew = self.current_skew
check.app = self.app
def remove(self, name: str) -> None:
self.checks.pop(name, None) |
6,997 | test inverse | """
ckwg +31
Copyright 2020 by Kitware, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither name of Kitware, Inc. nor the names of any contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==============================================================================
Tests for Python interface to vital::transform_2d
"""
import numpy as np
import nose.tools as nt
import unittest
from kwiver.vital.tests.py_helpers import no_call_pure_virtual_method
from kwiver.vital.tests.cpp_helpers import transform_2d_helpers as t2dh
from kwiver.vital.types import Transform2D
class SimpleTransform2D(Transform2D):
def __init__(self, arr):
Transform2D.__init__(self)
self.arr = arr
def clone(self):
return SimpleTransform2D(self.arr.copy())
def map(self, p):
return p + 5
def inverse_(self):
return SimpleTransform2D(1/self.arr)
class TestVitalTransform2D(object):
# Note that clone and inverse_ are skipped. See binding code for explanation
def test_bad_call_virtual_map(self):
t = Transform2D()
no_call_pure_virtual_method(t.map, np.array([2, 4]))
def test_pure_virt_inverse(self):
t = Transform2D()
with nt.assert_raises_regexp(
AttributeError, "'kwiver.vital.types.transform_2d.Transform2D' object has no attribute 'inverse_'",
):
t.inverse()
def test_is_instance(self):
st = SimpleTransform2D(np.array([2, 4]))
nt.ok_(isinstance(st, Transform2D))
class TestVitalTransform2DSubclass(unittest.TestCase):
def METHOD_NAME(self):
st = SimpleTransform2D(np.array([2, 4]))
st_inverse = st.inverse()
np.testing.assert_array_equal(st_inverse.arr, np.array([0.5, 0.25]))
# Make sure instance wasn't sliced
nt.ok_(isinstance(st_inverse, SimpleTransform2D))
# Now test bouncing back to the cpp side, and back with no slicing
st_inverse_2 = t2dh.call_inverse(st)
self.assertIsInstance(st_inverse_2, SimpleTransform2D)
np.testing.assert_array_equal(st_inverse_2.arr, np.array([0.5, 0.25]))
def test_clone(self):
st = SimpleTransform2D(np.array([2, 4]))
st_clone = st.clone()
self.assertIsInstance(st_clone, SimpleTransform2D)
st_clone_2 = t2dh.call_clone(st)
self.assertIsInstance(st_clone_2, SimpleTransform2D)
np.testing.assert_array_equal(st_clone_2.arr, st.arr)
def test_map(self):
st = SimpleTransform2D(np.array([2, 4]))
np.testing.assert_array_equal(st.map(np.array([-5, 5])), np.array([0, 10]))
np.testing.assert_array_equal(t2dh.call_map(st, np.array([-5, 5])), np.array([0, 10])) |
6,998 | get course in cache v2 | """
Asynchronous tasks related to the Course Blocks sub-application.
"""
import logging
from celery import shared_task
from django.conf import settings
from edx_django_utils.monitoring import set_code_owner_attribute
from edxval.api import ValInternalError
from lxml.etree import XMLSyntaxError
from opaque_keys.edx.keys import CourseKey
from xmodule.capa.responsetypes import LoncapaProblemError
from openedx.core.djangoapps.content.block_structure import api
from openedx.core.djangoapps.content.block_structure.config import enable_storage_backing_for_cache_in_request
from xmodule.modulestore.exceptions import ItemNotFoundError # lint-amnesty, pylint: disable=wrong-import-order
log = logging.getLogger('edx.celery.task')
# TODO: TNL-5799 is ongoing; narrow these lists down until the general exception is no longer needed
RETRY_TASKS = (ItemNotFoundError, TypeError, ValInternalError)
NO_RETRY_TASKS = (XMLSyntaxError, LoncapaProblemError, UnicodeEncodeError)
def block_structure_task(**kwargs):
"""
Decorator for block structure tasks.
"""
return shared_task(
default_retry_delay=settings.BLOCK_STRUCTURES_SETTINGS['TASK_DEFAULT_RETRY_DELAY'],
max_retries=settings.BLOCK_STRUCTURES_SETTINGS['TASK_MAX_RETRIES'],
bind=True,
**kwargs
)
@block_structure_task()
@set_code_owner_attribute
def update_course_in_cache_v2(self, **kwargs):
"""
Updates the course blocks (mongo -> BlockStructure) for the specified course.
Keyword Arguments:
course_id (string) - The string serialized value of the course key.
with_storage (boolean) - Whether or not storage backing should be
enabled for the generated block structure(s).
"""
_update_course_in_cache(self, **kwargs)
@block_structure_task()
@set_code_owner_attribute
def update_course_in_cache(self, course_id):
"""
Updates the course blocks (mongo -> BlockStructure) for the specified course.
"""
_update_course_in_cache(self, course_id=course_id)
def _update_course_in_cache(self, **kwargs):
"""
Updates the course blocks (mongo -> BlockStructure) for the specified course.
"""
if kwargs.get('with_storage'):
enable_storage_backing_for_cache_in_request()
_call_and_retry_if_needed(self, api.update_course_in_cache, **kwargs)
@block_structure_task()
@set_code_owner_attribute
def METHOD_NAME(self, **kwargs):
"""
Gets the course blocks for the specified course, updating the cache if needed.
Keyword Arguments:
course_id (string) - The string serialized value of the course key.
with_storage (boolean) - Whether or not storage backing should be
enabled for any generated block structure(s).
"""
_get_course_in_cache(self, **kwargs)
@block_structure_task()
@set_code_owner_attribute
def get_course_in_cache(self, course_id):
"""
Gets the course blocks for the specified course, updating the cache if needed.
"""
_get_course_in_cache(self, course_id=course_id)
def _get_course_in_cache(self, **kwargs):
"""
Gets the course blocks for the specified course, updating the cache if needed.
"""
if kwargs.get('with_storage'):
enable_storage_backing_for_cache_in_request()
_call_and_retry_if_needed(self, api.get_course_in_cache, **kwargs)
def _call_and_retry_if_needed(self, api_method, **kwargs):
"""
Calls the given api_method with the given course_id, retrying task_method upon failure.
"""
try:
course_key = CourseKey.from_string(kwargs['course_id'])
api_method(course_key)
except NO_RETRY_TASKS:
# Known unrecoverable errors
log.exception(
"BlockStructure: %s encountered unrecoverable error in course %s, task_id %s",
self.__name__,
kwargs.get('course_id'),
self.request.id,
)
raise
except RETRY_TASKS as exc:
log.exception("%s encountered expected error, retrying.", self.__name__)
raise self.retry(kwargs=kwargs, exc=exc)
except Exception as exc:
log.exception(
"BlockStructure: %s encountered unknown error in course %s, task_id %s. Retry #%d",
self.__name__,
kwargs.get('course_id'),
self.request.id,
self.request.retries,
)
raise self.retry(kwargs=kwargs, exc=exc) |
6,999 | test average dielectric constant | import os
import tempfile
import numpy as np
import pytest
from openff.units import unit
from openff.evaluator.forcefield import ParameterGradient, ParameterGradientKey
from openff.evaluator.protocols.analysis import (
AverageDielectricConstant,
AverageFreeEnergies,
AverageObservable,
ComputeDipoleMoments,
DecorrelateObservables,
DecorrelateTrajectory,
)
from openff.evaluator.protocols.forcefield import BuildSmirnoffSystem
from openff.evaluator.substances import Substance
from openff.evaluator.tests.utils import build_tip3p_smirnoff_force_field
from openff.evaluator.thermodynamics import ThermodynamicState
from openff.evaluator.utils import get_data_filename
from openff.evaluator.utils.observables import Observable, ObservableArray
from openff.evaluator.utils.timeseries import TimeSeriesStatistics
def test_average_observable():
with tempfile.TemporaryDirectory() as temporary_directory:
average_observable = AverageObservable("")
average_observable.observable = ObservableArray(1.0 * unit.kelvin)
average_observable.bootstrap_iterations = 1
average_observable.execute(temporary_directory)
assert np.isclose(average_observable.value.value, 1.0 * unit.kelvin)
def METHOD_NAME():
with tempfile.TemporaryDirectory() as temporary_directory:
average_observable = AverageDielectricConstant("")
average_observable.dipole_moments = ObservableArray(
np.zeros((1, 3)) * unit.elementary_charge * unit.nanometer
)
average_observable.volumes = ObservableArray(
np.ones((1, 1)) * unit.nanometer**3
)
average_observable.thermodynamic_state = ThermodynamicState(
298.15 * unit.kelvin, 1.0 * unit.atmosphere
)
average_observable.bootstrap_iterations = 1
average_observable.execute(temporary_directory)
assert np.isclose(average_observable.value.value, 1.0 * unit.dimensionless)
def test_average_free_energies_protocol():
"""Tests adding together two free energies."""
delta_g_one = Observable(
value=(-10.0 * unit.kilocalorie / unit.mole).plus_minus(
1.0 * unit.kilocalorie / unit.mole
),
gradients=[
ParameterGradient(
key=ParameterGradientKey("vdW", "[#6:1]", "sigma"),
value=0.1 * unit.kilocalorie / unit.mole / unit.angstrom,
)
],
)
delta_g_two = Observable(
value=(-20.0 * unit.kilocalorie / unit.mole).plus_minus(
2.0 * unit.kilocalorie / unit.mole
),
gradients=[
ParameterGradient(
key=ParameterGradientKey("vdW", "[#6:1]", "sigma"),
value=0.2 * unit.kilocalorie / unit.mole / unit.angstrom,
)
],
)
thermodynamic_state = ThermodynamicState(298 * unit.kelvin, 1 * unit.atmosphere)
sum_protocol = AverageFreeEnergies("")
sum_protocol.values = [delta_g_one, delta_g_two]
sum_protocol.thermodynamic_state = thermodynamic_state
sum_protocol.execute()
result_value = sum_protocol.result.value.to(unit.kilocalorie / unit.mole)
result_uncertainty = sum_protocol.result.error.to(unit.kilocalorie / unit.mole)
assert isinstance(sum_protocol.result, Observable)
assert result_value.magnitude == pytest.approx(-20.0, abs=0.2)
assert result_uncertainty.magnitude == pytest.approx(2.0, abs=0.2)
assert (
sum_protocol.confidence_intervals[0]
> result_value
> sum_protocol.confidence_intervals[1]
)
gradient_value = sum_protocol.result.gradients[0].value.to(
unit.kilocalorie / unit.mole / unit.angstrom
)
beta = 1.0 / (298.0 * unit.kelvin * unit.molar_gas_constant).to(
unit.kilocalorie / unit.mole
)
assert np.isclose(
gradient_value.magnitude,
(0.1 * np.exp(-beta.magnitude * -10.0) + 0.2 * np.exp(-beta.magnitude * -20.0))
/ (np.exp(-beta.magnitude * -10.0) + np.exp(-beta.magnitude * -20.0)),
)
def test_compute_dipole_moments(tmpdir):
coordinate_path = get_data_filename("test/trajectories/water.pdb")
trajectory_path = get_data_filename("test/trajectories/water.dcd")
# Build a system object for water
force_field_path = os.path.join(tmpdir, "ff.json")
with open(force_field_path, "w") as file:
file.write(build_tip3p_smirnoff_force_field().json())
assign_parameters = BuildSmirnoffSystem("")
assign_parameters.force_field_path = force_field_path
assign_parameters.coordinate_file_path = coordinate_path
assign_parameters.substance = Substance.from_components("O")
assign_parameters.execute(str(tmpdir))
# TODO - test gradients when TIP3P library charges added.
protocol = ComputeDipoleMoments("")
protocol.parameterized_system = assign_parameters.parameterized_system
protocol.trajectory_path = trajectory_path
protocol.execute(str(tmpdir))
assert len(protocol.dipole_moments) == 10
assert protocol.dipole_moments.value.shape[1] == 3
assert not np.allclose(
protocol.dipole_moments.value, 0.0 * unit.elementary_charge * unit.nanometers
)
def test_decorrelate_trajectory():
import mdtraj
coordinate_path = get_data_filename("test/trajectories/water.pdb")
trajectory_path = get_data_filename("test/trajectories/water.dcd")
with tempfile.TemporaryDirectory() as temporary_directory:
protocol = DecorrelateTrajectory("")
protocol.input_coordinate_file = coordinate_path
protocol.input_trajectory_path = trajectory_path
protocol.time_series_statistics = TimeSeriesStatistics(10, 4, 2.0, 2)
protocol.execute(temporary_directory)
final_trajectory = mdtraj.load(
protocol.output_trajectory_path, top=coordinate_path
)
assert len(final_trajectory) == 4
def test_decorrelate_observables():
with tempfile.TemporaryDirectory() as temporary_directory:
protocol = DecorrelateObservables("")
protocol.input_observables = ObservableArray(
np.ones((10, 1)) * unit.nanometer**3
)
protocol.time_series_statistics = TimeSeriesStatistics(10, 4, 2.0, 2)
protocol.execute(temporary_directory)
assert len(protocol.output_observables) == 4 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.