id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
298,300 | get tmp user | #coding: UTF-8
import requests
import unittest
from contextlib import contextmanager
from nose.tools import assert_equal, assert_in # pylint: disable=E0611
from urllib.parse import quote
from tests.common.common import USERNAME, PASSWORD, \
ADMIN_USERNAME, ADMIN_PASSWORD
from tests.common.utils import apiurl, urljoin, randstring
from tests.api.urls import TOKEN_URL, GROUPS_URL, ACCOUNTS_URL, REPOS_URL
from seahub.base.accounts import User
class ApiTestBase(unittest.TestCase):
_token = None
_admin_token = None
username = USERNAME
password = PASSWORD
admin_username = ADMIN_USERNAME
admin_password = ADMIN_PASSWORD
@classmethod
def get(cls, *args, **kwargs):
return cls._req('GET', *args, **kwargs)
@classmethod
def post(cls, *args, **kwargs):
return cls._req('POST', *args, **kwargs)
@classmethod
def put(cls, *args, **kwargs):
return cls._req('PUT', *args, **kwargs)
@classmethod
def delete(cls, *args, **kwargs):
return cls._req('DELETE', *args, **kwargs)
@classmethod
def admin_get(cls, *args, **kwargs):
kwargs['admin'] = True
return cls.get(*args, **kwargs)
@classmethod
def admin_post(cls, *args, **kwargs):
kwargs['admin'] = True
return cls.post(*args, **kwargs)
@classmethod
def admin_put(cls, *args, **kwargs):
kwargs['admin'] = True
return cls.put(*args, **kwargs)
@classmethod
def admin_delete(cls, *args, **kwargs):
kwargs['admin'] = True
return cls.delete(*args, **kwargs)
@classmethod
def _req(cls, method, *args, **kwargs):
use_token = kwargs.pop('use_token', True)
token = kwargs.pop('token', None)
if use_token and token is None:
admin = kwargs.pop('admin', False)
if admin:
if cls._admin_token is None:
cls._admin_token = get_auth_token(ADMIN_USERNAME,
ADMIN_PASSWORD)
token = cls._admin_token
else:
if cls._token is None:
cls._token = get_auth_token(USERNAME, PASSWORD)
token = cls._token
if use_token:
headers = kwargs.get('headers', {})
headers.setdefault('Authorization', 'Token ' + token)
kwargs['headers'] = headers
expected = kwargs.pop('expected', 200)
resp = requests.request(method, *args, **kwargs)
if expected is not None:
if hasattr(expected, '__iter__'):
assert_in(resp.status_code, expected,
"Expected http status in %s, received %s" % (expected,
resp.status_code))
else:
assert_equal(resp.status_code, expected,
"Expected http status %s, received %s" % (expected,
resp.status_code))
return resp
def assertHasLen(self, lst, length):
"""
Assert a list/tuple/string has exact `length`
"""
msg = 'Expected to have length %s, but length is %s' \
% (length, len(lst))
self.assertEqual(len(lst), length, msg)
def assertNotEmpty(self, lst):
"""
Assert a list/tuple/string is not empty
"""
msg = 'Expected not empty, but it is'
self.assertGreater(len(lst), 0, msg)
@contextmanager
def get_tmp_repo(self):
"""
Context manager to create a tmp repo, and automatically delete it after use
with self.tmp_repo() as repo:
self.get(repo.file_url + '?p=/')
"""
repo = self.create_repo()
try:
yield repo
finally:
self.remove_repo(repo.repo_id)
@contextmanager
def get_tmp_group(self):
"""
Context manager to create a tmp group, and automatically delete it after use
with self.tmp_repo() as repo:
self.get(repo.file_url + '?p=/')
"""
group = self.create_group()
try:
yield group
finally:
self.remove_group(group.group_id)
@contextmanager
def METHOD_NAME(self):
"""
Context manager to create a tmp user, and automatically delete it after use
with self.get_tmp_user() as user:
...
"""
user = self.create_user()
try:
yield user
finally:
self.remove_user(user.user_name)
def create_repo(self):
repo_name = '测试-test-repo-%s' % randstring(6)
data = {
'name': repo_name,
'desc': 'just for test - 测试用资料库',
}
repo = self.post(REPOS_URL, data=data).json()
repo_id = repo['repo_id']
return _Repo(repo_id)
def remove_repo(self, repo_id):
repo_url = urljoin(REPOS_URL, repo_id)
self.delete(repo_url)
def create_group(self):
group_name = '测试群组-%s' % randstring(16)
data = {'group_name': group_name}
group_id = self.put(GROUPS_URL, data=data).json()['group_id']
return _Group(group_name, group_id)
def remove_group(self, group_id):
group_url = urljoin(GROUPS_URL, str(group_id))
self.delete(group_url)
def create_user(self):
username = '%s@test.com' % randstring(20)
password = randstring(20)
user = User(email=username)
user.is_staff = False
user.is_active = True
user.set_password(password)
user.save()
return _User(username, password)
def remove_user(self, username):
user_url = urljoin(ACCOUNTS_URL, username)
self.admin_delete(user_url)
def create_file(self, repo, fname=None):
if isinstance(repo, str):
repo = _Repo(repo)
fname = fname or ('文件 %s.txt' % randstring())
furl = repo.get_filepath_url('/' + fname)
data = {'operation': 'create'}
res = self.post(furl, data=data, expected=201)
self.assertEqual(res.text, '"success"')
return fname, furl
def create_dir(self, repo):
data = {'operation': 'mkdir'}
dpath = '/目录 %s' % randstring()
durl = repo.get_dirpath_url(dpath)
res = self.post(durl, data=data, expected=201)
self.assertEqual(res.text, '"success"')
return dpath, durl
def get_auth_token(username, password):
data = {
'username': username,
'password': password,
'platform': 'linux',
'device_id': '701143c1238e6736b61c20e73de82fc95989c413',
'device_name': 'test',
}
res = requests.post(TOKEN_URL, data=data)
assert_equal(res.status_code, 200)
token = res.json()['token']
assert_equal(len(token), 40)
return token
class _Repo(object):
def __init__(self, repo_id):
self.repo_id = repo_id
self.repo_url = urljoin(REPOS_URL, self.repo_id)
self.file_url = urljoin(self.repo_url, 'file')
self.dir_url = urljoin(self.repo_url, 'dir')
def get_filepath_url(self, path):
query = '?p=%s' % quote(path)
return self.file_url + query
def get_dirpath_url(self, path):
query = '?p=%s' % quote(path)
return self.dir_url + query
class _Group(object):
def __init__(self, group_name, group_id):
self.group_name = group_name
self.group_id = group_id
self.group_url = urljoin(GROUPS_URL, str(self.group_id))
class _User(object):
def __init__(self, username, password):
self.user_name = username
self.password = password
self.user_url = urljoin(ACCOUNTS_URL, username) |
298,301 | test change permission | """
Tests course_creators.admin.py.
"""
from unittest import mock
from django.contrib.admin.sites import AdminSite
from django.core import mail
from django.http import HttpRequest
from django.test import TestCase
from cms.djangoapps.course_creators.admin import CourseCreatorAdmin
from cms.djangoapps.course_creators.models import CourseCreator
from common.djangoapps.student import auth
from common.djangoapps.student.roles import CourseCreatorRole
from common.djangoapps.student.tests.factories import UserFactory
def mock_render_to_string(template_name, context):
"""Return a string that encodes template_name and context"""
return str((template_name, context))
class CourseCreatorAdminTest(TestCase):
"""
Tests for course creator admin.
"""
def setUp(self):
""" Test case setup """
super().setUp()
self.user = UserFactory.create(
username='test_user',
email='test_user+courses@edx.org',
password='foo',
)
self.table_entry = CourseCreator(user=self.user)
self.table_entry.save()
self.admin = UserFactory.create(
username='Mark',
email='admin+courses@edx.org',
password='foo',
)
self.admin.is_staff = True
self.request = HttpRequest()
self.request.user = self.admin
self.creator_admin = CourseCreatorAdmin(self.table_entry, AdminSite())
self.studio_request_email = 'mark@marky.mark'
self.enable_creator_group_patch = {
"ENABLE_CREATOR_GROUP": True,
"STUDIO_REQUEST_EMAIL": self.studio_request_email
}
@mock.patch(
'cms.djangoapps.course_creators.admin.render_to_string',
mock.Mock(side_effect=mock_render_to_string, autospec=True)
)
@mock.patch('django.contrib.auth.models.User.email_user')
def test_change_status(self, email_user):
"""
Tests that updates to state impact the creator group maintained in authz.py and that e-mails are sent.
"""
def change_state_and_verify_email(state, is_creator):
""" Changes user state, verifies creator status, and verifies e-mail is sent based on transition """
self._change_state(state)
self.assertEqual(is_creator, auth.user_has_role(self.user, CourseCreatorRole()))
context = {'studio_request_email': self.studio_request_email}
if state == CourseCreator.GRANTED:
template = 'emails/course_creator_granted.txt'
elif state == CourseCreator.DENIED:
template = 'emails/course_creator_denied.txt'
else:
template = 'emails/course_creator_revoked.txt'
email_user.assert_called_with(
mock_render_to_string('emails/course_creator_subject.txt', context),
mock_render_to_string(template, context),
self.studio_request_email
)
with mock.patch.dict('django.conf.settings.FEATURES', self.enable_creator_group_patch):
# User is initially unrequested.
self.assertFalse(auth.user_has_role(self.user, CourseCreatorRole()))
change_state_and_verify_email(CourseCreator.GRANTED, True)
change_state_and_verify_email(CourseCreator.DENIED, False)
change_state_and_verify_email(CourseCreator.GRANTED, True)
change_state_and_verify_email(CourseCreator.PENDING, False)
change_state_and_verify_email(CourseCreator.GRANTED, True)
change_state_and_verify_email(CourseCreator.UNREQUESTED, False)
change_state_and_verify_email(CourseCreator.DENIED, False)
@mock.patch(
'cms.djangoapps.course_creators.admin.render_to_string',
mock.Mock(side_effect=mock_render_to_string, autospec=True)
)
def test_mail_admin_on_pending(self):
"""
Tests that the admin account is notified when a user is in the 'pending' state.
"""
def check_admin_message_state(state, expect_sent_to_admin, expect_sent_to_user):
""" Changes user state and verifies e-mail sent to admin address only when pending. """
mail.outbox = []
self._change_state(state)
# If a message is sent to the user about course creator status change, it will be the first
# message sent. Admin message will follow.
base_num_emails = 1 if expect_sent_to_user else 0
if expect_sent_to_admin:
context = {'user_name': 'test_user', 'user_email': 'test_user+courses@edx.org'}
self.assertEqual(base_num_emails + 1, len(mail.outbox), 'Expected admin message to be sent')
sent_mail = mail.outbox[base_num_emails]
self.assertEqual(
mock_render_to_string('emails/course_creator_admin_subject.txt', context),
sent_mail.subject
)
self.assertEqual(
mock_render_to_string('emails/course_creator_admin_user_pending.txt', context),
sent_mail.body
)
self.assertEqual(self.studio_request_email, sent_mail.from_email)
self.assertEqual([self.studio_request_email], sent_mail.to)
else:
self.assertEqual(base_num_emails, len(mail.outbox))
with mock.patch.dict('django.conf.settings.FEATURES', self.enable_creator_group_patch):
# E-mail message should be sent to admin only when new state is PENDING, regardless of what
# previous state was (unless previous state was already PENDING).
# E-mail message sent to user only on transition into and out of GRANTED state.
check_admin_message_state(CourseCreator.UNREQUESTED, expect_sent_to_admin=False, expect_sent_to_user=False)
check_admin_message_state(CourseCreator.PENDING, expect_sent_to_admin=True, expect_sent_to_user=False)
check_admin_message_state(CourseCreator.GRANTED, expect_sent_to_admin=False, expect_sent_to_user=True)
check_admin_message_state(CourseCreator.DENIED, expect_sent_to_admin=False, expect_sent_to_user=True)
check_admin_message_state(CourseCreator.GRANTED, expect_sent_to_admin=False, expect_sent_to_user=True)
check_admin_message_state(CourseCreator.PENDING, expect_sent_to_admin=True, expect_sent_to_user=True)
check_admin_message_state(CourseCreator.PENDING, expect_sent_to_admin=False, expect_sent_to_user=False)
check_admin_message_state(CourseCreator.DENIED, expect_sent_to_admin=False, expect_sent_to_user=True)
def _change_state(self, state):
""" Helper method for changing state """
self.table_entry.state = state
self.creator_admin.save_model(self.request, self.table_entry, None, True)
def test_add_permission(self):
"""
Tests that staff cannot add entries
"""
self.assertFalse(self.creator_admin.has_add_permission(self.request))
def test_delete_permission(self):
"""
Tests that staff cannot delete entries
"""
self.assertFalse(self.creator_admin.has_delete_permission(self.request))
def METHOD_NAME(self):
"""
Tests that only staff can change entries
"""
self.assertTrue(self.creator_admin.has_change_permission(self.request))
self.request.user = self.user
self.assertFalse(self.creator_admin.has_change_permission(self.request)) |
298,302 | logger | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Utilities dealing with plugins and entry points."""
from __future__ import annotations
from importlib import import_module
from inspect import isclass, isfunction
from logging import Logger
from types import FunctionType
import typing as t
from aiida.common import AIIDA_LOGGER
from aiida.common.exceptions import EntryPointError
from .entry_point import load_entry_point_from_string
__all__ = ('PluginVersionProvider',)
KEY_VERSION_ROOT: str = 'version'
KEY_VERSION_CORE: str = 'core' # The version of `aiida-core`
KEY_VERSION_PLUGIN: str = 'plugin' # The version of the plugin top level module, e.g. `aiida-quantumespresso`
class PluginVersionProvider:
"""Utility class that determines version information about a given plugin resource."""
def __init__(self):
self._cache: dict[type | FunctionType, dict[t.Any, dict[t.Any, t.Any]]] = {}
self._logger: Logger = AIIDA_LOGGER.getChild('plugin_version_provider')
@property
def METHOD_NAME(self) -> Logger:
return self._logger
def get_version_info(self, plugin: str | type) -> dict[t.Any, dict[t.Any, t.Any]]:
"""Get the version information for a given plugin.
.. note::
This container will keep a cache, so if this method was already called for the given ``plugin`` before for
this instance, the result computed at the last invocation will be returned.
:param plugin: A class, function, or an entry point string. If the type is string, it will be assumed to be an
entry point string and the class will attempt to load it first. It should be a full entry point string,
including the entry point group.
:return: Dictionary with the `version.core` and optionally `version.plugin` if it could be determined.
:raises EntryPointError: If ``plugin`` is a string but could not be loaded as a valid entry point.
:raises TypeError: If ``plugin`` (or the resource pointed to it in the case of an entry point) is not a class
or a function.
"""
from aiida import __version__ as version_core
if isinstance(plugin, str):
try:
plugin = load_entry_point_from_string(plugin)
except EntryPointError as exc:
raise EntryPointError(f'got string `{plugin}` but could not load corresponding entry point') from exc
if not isclass(plugin) and not isfunction(plugin):
raise TypeError(f'`{plugin}` is not a class nor a function.')
# If the `plugin` already exists in the cache, simply return it. On purpose we do not verify whether the version
# information is completed. If it failed the first time, we don't retry. If the failure was temporarily, whoever
# holds a reference to this instance can simply reconstruct it to start with a clean slate.
if plugin in self._cache:
return self._cache[plugin]
self._cache[plugin] = {
KEY_VERSION_ROOT: {
KEY_VERSION_CORE: version_core,
}
}
try:
parent_module_name = plugin.__module__.split('.')[0]
parent_module = import_module(parent_module_name)
except (AttributeError, IndexError, ImportError):
self.METHOD_NAME.debug(f'could not determine the top level module for plugin: {plugin}')
return self._cache[plugin]
try:
version_plugin = parent_module.__version__
except AttributeError:
self.METHOD_NAME.debug(f'parent module does not define `__version__` attribute for plugin: {plugin}')
return self._cache[plugin]
self._cache[plugin][KEY_VERSION_ROOT][KEY_VERSION_PLUGIN] = version_plugin
return self._cache[plugin] |
298,303 | database init | # This file is part of the Open Data Cube, see https://opendatacube.org for more information
#
# Copyright (c) 2015-2023 ODC Contributors
# SPDX-License-Identifier: Apache-2.0
import logging
import click
from click import echo, style
from sqlalchemy.exc import OperationalError
import datacube
from datacube import Datacube
from datacube.index import Index, index_connect
from datacube.drivers.postgres._connections import IndexSetupError
from datacube.ui import click as ui
from datacube.ui.click import cli, handle_exception
from datacube.config import LocalConfig
_LOG = logging.getLogger('datacube-system')
@cli.group(name='system', help='System commands')
def system():
pass
@system.command('init', help='Initialise the database')
@click.option(
'--default-types/--no-default-types', is_flag=True, default=True,
help="Add default types? (default: true)"
)
@click.option(
'--init-users/--no-init-users', is_flag=True, default=True,
help="Include user roles and grants. (default: true)"
)
@click.option(
'--recreate-views/--no-recreate-views', is_flag=True, default=True,
help="Recreate dynamic views"
)
@click.option(
'--rebuild/--no-rebuild', is_flag=True, default=False,
help="Rebuild all dynamic fields (caution: slow)"
)
@click.option(
'--lock-table/--no-lock-table', is_flag=True, default=False,
help="Allow table to be locked (eg. while creating missing indexes)"
)
@ui.pass_index(expect_initialised=False)
def METHOD_NAME(index, default_types, init_users, recreate_views, rebuild, lock_table):
echo('Initialising database...')
was_created = index.init_db(with_default_types=default_types,
with_permissions=init_users)
if was_created:
echo(style('Created.', bold=True))
else:
echo(style('Updated.', bold=True))
echo('Checking indexes/views.')
index.metadata_types.check_field_indexes(
allow_table_lock=lock_table,
rebuild_indexes=rebuild,
rebuild_views=recreate_views or rebuild,
)
echo('Done.')
@system.command('check', help='Check and display current configuration')
@ui.pass_config
def check(local_config: LocalConfig):
"""
Verify & view current configuration
"""
def echo_field(name, value):
echo('{:<15}'.format(name + ':') + style(str(value), bold=True))
echo_field('Version', datacube.__version__)
echo_field('Config files', ','.join(local_config.files_loaded))
echo_field('Host',
'{}:{}'.format(local_config['db_hostname'] or 'localhost', local_config.get('db_port', None) or '5432'))
echo_field('Database', local_config['db_database'])
echo_field('User', local_config['db_username'])
echo_field('Environment', local_config['env'])
echo_field('Index Driver', local_config['index_driver'])
echo()
echo('Valid connection:\t', nl=False)
try:
index = index_connect(local_config=local_config)
echo(style('YES', bold=True))
for role, user, description in index.users.list_users():
if user == local_config['db_username']:
echo('You have %s privileges.' % style(role.upper(), bold=True))
except OperationalError as e:
handle_exception('Error Connecting to Database: %s', e)
except IndexSetupError as e:
handle_exception('Database not initialised: %s', e)
@system.command('clone', help='Clone an existing ODC index into a new, empty index')
@click.option('--batch-size',
help='Size of batches for bulk-adding to the new index',
type=int,
default=1000)
@click.argument('source-env', type=str, nargs=1)
@ui.pass_index()
def clone(index: Index, batch_size: int, source_env: str):
try:
source_dc = Datacube(env=source_env)
except OperationalError as e:
handle_exception('Error Connecting to Source Database: %s', e)
except IndexSetupError as e:
handle_exception('Source database not initialised: %s', e)
index.clone(source_dc.index, batch_size=batch_size) |
298,304 | get dimension config | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import datetime
import time
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from api.collections.monitor import BKMonitorClient
from gcloud.conf import settings
from pipeline.core.flow.io import StringItemSchema
from pipeline.component_framework.component import Component
from pipeline_plugins.components.collections.sites.open.monitor.base import MonitorBaseService
__group_name__ = _("监控平台(Monitor)")
class MonitorAlarmShieldStrategyService(MonitorBaseService):
def get_end_time_by_duration(self, shield_start_time, shield_duration):
dt = datetime.datetime.strptime(shield_start_time, "%Y-%m-%d %H:%M:%S")
shield_end_time = (dt + datetime.timedelta(minutes=shield_duration)).strftime("%Y-%m-%d %H:%M:%S")
return shield_end_time
def inputs_format(self):
return [
self.InputItem(
name=_("策略 ID"),
key="bk_alarm_shield_strategy",
type="string",
schema=StringItemSchema(description=_("需要执行屏蔽的策略 ID")),
),
self.InputItem(
name=_("IP"), key="bk_alarm_shield_IP", type="string", schema=StringItemSchema(description=_("IP"))
),
self.InputItem(
name=_("时间选择"),
key="bk_alarm_time_type",
type="string",
schema=StringItemSchema(description=_("开始屏蔽的时间")),
),
self.InputItem(
name=_("屏蔽开始时间"),
key="bk_alarm_shield_begin_time",
type="string",
schema=StringItemSchema(description=_("开始屏蔽的时间")),
),
self.InputItem(
name=_("屏蔽结束时间"),
key="bk_alarm_end_time",
type="string",
schema=StringItemSchema(description=_("结束屏蔽的时间")),
),
self.InputItem(
name=_("屏蔽持续时间"),
key="bk_alarm_shield_duration",
type="string",
schema=StringItemSchema(description=_("屏蔽持续的时间")),
),
]
def execute(self, data, parent_data):
if parent_data.get_one_of_inputs("language"):
translation.activate(parent_data.get_one_of_inputs("language"))
bk_biz_id = parent_data.get_one_of_inputs("biz_cc_id")
executor = parent_data.get_one_of_inputs("executor")
client = BKMonitorClient(username=executor)
strategy = data.get_one_of_inputs("bk_alarm_shield_strategy")
begin_time = data.get_one_of_inputs("bk_alarm_shield_begin_time")
end_time = data.get_one_of_inputs("bk_alarm_shield_end_time")
scope_value = data.get_one_of_inputs("bk_alarm_shield_IP")
time_type = int(data.get_one_of_inputs("bk_alarm_time_type"))
shield_duration = data.get_one_of_inputs("bk_alarm_shield_duration")
# 从当前时间开始,仅输入持续时间
if time_type == 1:
begin_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
end_time = self.get_end_time_by_duration(begin_time, int(shield_duration))
# 输入开始时间和持续时间
elif time_type == 2:
end_time = self.get_end_time_by_duration(begin_time, int(shield_duration))
request_body = self.get_request_body(bk_biz_id, begin_time, end_time, "strategy", strategy, executor)
if scope_value:
target = self.get_ip_dimension(scope_value, bk_biz_id, executor)
request_body["dimension_config"].update(target)
result_flag = self.send_request(request_body, data, client)
return result_flag
def METHOD_NAME(self, shied_type, shied_value, bk_biz_id, client):
return {"id": shied_value}
def get_ip_dimension(self, scope_value, bk_biz_id, username):
ip_dimension = super(MonitorAlarmShieldStrategyService, self).get_ip_dimension_config(
scope_value, bk_biz_id, username
)
return ip_dimension
def get_request_body(self, bk_biz_id, begin_time, end_time, shied_type, shied_value, username):
dimension_config = self.METHOD_NAME(shied_type, shied_value, bk_biz_id, username)
request_body = self.build_request_body(
begin_time=begin_time,
bk_biz_id=bk_biz_id,
shied_type=shied_type,
dimension_config=dimension_config,
end_time=end_time,
)
return request_body
class MonitorAlarmShieldStrategyComponent(Component):
name = _("蓝鲸监控告警屏蔽(按策略)")
code = "monitor_alarm_shield_strategy"
bound_service = MonitorAlarmShieldStrategyService
form = "{static_url}components/atoms/monitor/alarm_shield_strategy/v1_1.js".format(static_url=settings.STATIC_URL)
version = "1.1" |
298,305 | make matcher configs | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging
from rest_framework.exceptions import ParseError
from backend.kube_core.toolkit.injectors import InjectManager
from ..toolkit.diff.parser import split_manifest
from .bcs_info_provider import BcsInfoProvider
from .utils import yaml_dump, yaml_load
yaml_seperator = b"\n---\n"
logger = logging.getLogger(__name__)
def METHOD_NAME(matcher_cls, kinds):
return [{"type": matcher_cls, "parameters": {"kind": kind}} for kind in kinds]
def make_kind_matcher_configs(kinds):
return METHOD_NAME("KindMatcher", kinds)
def make_re_kind_matcher_configs(kinds):
return METHOD_NAME("ReKindMatcher", kinds)
def parse_manifest(manifest):
if not isinstance(manifest, bytes):
if isinstance(manifest, str):
manifest = manifest.encode("utf-8")
else:
manifest = bytes(manifest, "utf-8")
result = list()
contents = split_manifest(manifest)
for content in contents:
content = content.replace(b'\t\n', b'\n')
content = content.strip(b'\t')
try:
resource = yaml_load(content)
# except yaml.composer.ComposerError as e:
except Exception as e:
message = "Parse manifest failed: \n{error}\n\nManifest content:\n{content}".format(
error=e, content=content.decode("utf-8")
)
logger.exception(message)
raise ParseError(message)
if not resource:
continue
result.append(resource)
return result
def join_manifest(resources_list):
resources_list = [yaml_dump(resource) for resource in resources_list]
return yaml_seperator.decode().join(resources_list)
def inject_configs(
access_token,
project_id,
cluster_id,
namespace_id,
namespace,
creator,
updator,
created_at,
updated_at,
version,
ignore_empty_access_token=False,
extra_inject_source=None,
source_type='helm',
):
if extra_inject_source is None:
extra_inject_source = dict()
context = {"creator": creator, "updator": updator, "version": version}
context.update(extra_inject_source)
provider = BcsInfoProvider(
access_token=access_token,
project_id=project_id,
cluster_id=cluster_id,
namespace_id=namespace_id,
namespace=namespace,
context=context,
ignore_empty_access_token=True,
)
bcs_annotations = provider.provide_annotations(source_type)
# resouce may not have annotations field
bcs_annotations = {"annotations": bcs_annotations}
bcs_labels = provider.provide_labels(source_type)
bcs_labels = {"labels": bcs_labels}
bcs_pod_labels = {"labels": provider.provide_pod_labels(source_type)}
# Some pod may has no env config, so we shouldn't add `env` to path,
# Add it to be injected data, make sure it will merge to pod's env anyway.
bcs_env = {"env": provider.provide_container_env()}
configs = [
{
# annotations
"matchers": make_re_kind_matcher_configs([".+"]),
"paths": ["/metadata"],
"data": bcs_annotations,
"force_str": True,
},
{
# pod labels
"matchers": make_re_kind_matcher_configs([".+"]),
"paths": ["/metadata"],
"data": bcs_labels,
"force_str": True,
},
{
# pod labels
"matchers": make_kind_matcher_configs(["Deployment", "StatefulSet", "DaemonSet", "ReplicaSet", "Job"]),
"paths": ["/spec/template/metadata"],
"data": bcs_pod_labels,
"force_str": True,
},
{
# pod env
"matchers": make_kind_matcher_configs(["Pod"]),
"paths": ["/spec/containers/*"],
"data": bcs_env,
"force_str": True,
},
{
# pod env
"matchers": make_kind_matcher_configs(["Deployment", "StatefulSet", "DaemonSet", "ReplicaSet", "Job"]),
"paths": ["/spec/template/spec/containers/*"],
"data": bcs_env,
"force_str": True,
},
]
return configs
def inject_bcs_info(
access_token,
project_id,
cluster_id,
namespace_id,
namespace,
creator,
updator,
created_at,
updated_at,
resources,
version,
ignore_empty_access_token=False,
extra_inject_source=None,
):
configs = inject_configs(
access_token=access_token,
project_id=project_id,
cluster_id=cluster_id,
namespace_id=namespace_id,
namespace=namespace,
creator=creator,
updator=updator,
created_at=created_at,
updated_at=updated_at,
version=version,
ignore_empty_access_token=ignore_empty_access_token,
extra_inject_source=extra_inject_source,
)
resources_list = parse_manifest(resources)
context = {"creator": creator, "updator": updator, "version": version}
manager = InjectManager(configs=configs, resources=resources_list, context=context)
resources_list = manager.do_inject()
content = join_manifest(resources_list)
return content |
298,306 | send feedback child | from __future__ import annotations
import json
class BaseMission:
"""
The base for all missions used in mil_missions. Lots of this class
is just documentation for the various functions that real missions
can overload. Individual ROS robotics platforms should extend this
base class to provide interfaces to the particular systems on the robot.
.. container:: operations
.. describe:: str(x)
Prints the name of the mission.
"""
nh = None
mission_runner = None
def __init__(self, parent=None):
self.parent = parent
@classmethod
def name(cls) -> str:
"""
Override for real missions to return a string for how the mission
with be referenced in the GUI/CLI. For example, a mission implemented
in class ``MyCoolMission`` might implement.
.. code-block:: python
class MyCoolMission:
@classmethod
def name(cls):
return 'My cool mission'
Returns:
str: The name of the mission. By default, simply returns the class'
``__name__`` method.
"""
return cls.__name__
@classmethod
async def setup(cls) -> None:
"""
Used to setup individual child missions. This is called after the base
mission is setup using :meth:`~.setup_base`.
This method should be overridden for all child missions who wish to have a
resource ready for when the mission begins.
Any resource setup in this method should be shutdown using the :meth:`~.shutdown`
method.
.. code-block:: python
class MovementMission(MyFancyRobotMission):
@classmethod
async def setup(cls):
self.my_sub = await self.nh.subscribe("/my_topic", MyMessage)
@classmethod
async def shutdown(cls):
await self.my_sub.shutdown()
"""
@classmethod
async def setup_base(cls, mission_runner) -> None:
"""
Sets up a base mission, used to generate individual child missions that perform
individual actions. This method should set up resources needed by all child
missions, so that they will be available when the child mission begins.
This method should only be used for base missions, and there should be just
one base mission per individual robotic system.
.. code-block:: python
class MyFancyRobotMission:
@classmethod
async def setup_base(cls, mission_runner):
await super(cls).setup_base(mission_runner)
Args:
mission_runner (:class:`MissionRunner`): The mission runner that will
run the missions. Used to allow the individual missions to send
feedback to the mission runner.
"""
cls.mission_runner = mission_runner
cls.nh = cls.mission_runner.nh
@classmethod
async def shutdown(cls) -> None:
"""
Shuts down a child mission. This is called when the mission server is shutting
down all individual child missions.
Any resources that were setup using :meth:`~.setup` should be considered for
shutdown using this method.
"""
pass
@classmethod
async def shutdown_base(cls) -> None:
"""
Shuts down a base mission. This is called when the mission server is shutting
down, and can be used to ensure that resources are properly closed. This
is called before each individual child mission is shutdown using :meth:`~.shutdown`.
Any resources that were setup using :meth:`~.setup_base` should be considered for
shutdown using this method.
"""
pass
def send_feedback(self, message: str) -> None:
"""
Send a string as feedback to any clients monitoring this mission. If the
mission is a child mission, it will call the send_feedback_child of its
parent, allowing missions to choose how to use the feedback from its children.
"""
if self.parent:
self.parent.METHOD_NAME(message, self)
else:
self.mission_runner.send_feedback(message)
def METHOD_NAME(self, message: str, child: BaseMission):
"""
Called by child missions when sending feedback. By default sends this feedback prefixed
with the name of the child mission.
"""
self.send_feedback(f"{child.name()}: {message}")
@classmethod
def has_mission(cls, name: str):
"""
Returns true if the mission runner has a mission with specified name.
"""
return cls.mission_runner.has_mission(name)
@classmethod
def get_mission(cls, name: str):
"""
Returns the mission with the specified name.
"""
return cls.mission_runner.get_mission(name)
async def run_submission(self, name: str, parameters: str = "") -> None:
"""
Runs another mission available to the mission server, returning the string
result of the missions execution.
Args:
name (str): The name of the submission to spawn as a string. If this
mission is unknown, raise an exception.
parameters (str): Parameters to pass to the run function of the submission. Note,
this function does not call decode_parameters, so parent
missions need to do this or otherwise ensure the parameters are in
the format expected by the child. Defaults to an empty string.
Raises:
Exception: The submission name is unrecognized - therefore, no submission
can be run.
Returns:
Optional[str]: The result of the mission with the given name.
"""
if not self.has_mission(name):
raise Exception(f"Cannot run_submission, '{name}' unrecognized")
mission = self.mission_runner.missions[name](parent=self)
return await mission.run(parameters)
@classmethod
def decode_parameters(cls, parameters: str) -> dict | str:
"""
Process parameters string from new mission goal or submission. Should return the
processes parameters which will be passed to the run function. By default
returns the json decoded object in the string or, if this fails, just
the original string.
If this function throws an exception (such as a ParametersException), the
mission will be aborted.
"""
try:
return json.loads(parameters)
except ValueError:
return parameters
def run(self, parameters):
"""
The actual body of the mission. Should attempt to execute whatever is expected
of the mission, using the interfaces set up in :meth:`~.setup` or :meth:`~.setup_base` to
command actuator movements, read perception output, etc. Should use :meth:`~.send_feedback`
to update clients about what the mission is doing at the moment.
If something goes wrong, raise an exception describing what went wrong
and the mission will be aborted.
If it executes successfully, return with a string to send a final result
to the connected clients. Missions can also spawn other missions in the
run function using :meth:`.run_submission`.
Args:
parameters: Arguments to modify the behavior of the mission. By default
will be a json decoded object from the string passed in the goal,
but can be changed by overriding decode_parameters.
"""
def __str__(self) -> str:
return self.__class__.__qualname__ |
298,307 | score | import copy
from typing import Any, List, Tuple
import torch
from typeguard import check_argument_types
from espnet2.asr.decoder.abs_decoder import AbsDecoder
from espnet.nets.scorer_interface import BatchScorerInterface
class OpenAIWhisperDecoder(AbsDecoder, BatchScorerInterface):
"""Transformer-based Speech-to-Text Decoder from OpenAI's Whisper Model:
URL: https://github.com/openai/whisper
"""
def __init__(
self,
vocab_size: int,
encoder_output_size: int,
dropout_rate: float = 0.0,
whisper_model: str = "small",
download_dir: str = None,
):
try:
import whisper
except Exception as e:
print("Error: whisper is not properly installed.")
print(
"Please install whisper with: cd ${MAIN_ROOT}/tools && "
"./installers/install_whisper.sh"
)
raise e
assert check_argument_types()
super().__init__()
assert whisper_model in whisper.available_models()
_model = whisper.load_model(whisper_model, download_root=download_dir)
self.decoders = copy.deepcopy(_model.decoder)
attention_dim = self.decoders.token_embedding.embedding_dim
# note that originally Whisper doesn't use dropouts
self.dropout = torch.nn.Dropout(dropout_rate)
# vocab size mismatch -> reinitialize embedding
# orig vocab size (multilingual): 51865
# orig vocab size (english): 51864
if vocab_size != self.decoders.token_embedding.num_embeddings:
orig_emb_std, orig_emb_mean = torch.std_mean(
self.decoders.token_embedding.weight
)
self.decoders.token_embedding = torch.nn.Embedding(
vocab_size, attention_dim
)
torch.nn.init.normal_(
self.decoders.token_embedding.weight,
orig_emb_mean.item(),
orig_emb_std.item(),
)
self.decoders.train()
del _model
def forward(
self,
hs_pad: torch.Tensor,
hlens: torch.Tensor,
ys_in_pad: torch.Tensor,
ys_in_lens: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Forward decoder.
Args:
hs_pad: encoded memory, float32 (batch, maxlen_in, feat)
hlens: (batch)
ys_in_pad:
input token ids, int64 (batch, maxlen_out)
if input_layer == "embed"
input tensor (batch, maxlen_out, #mels) in the other cases
ys_in_lens: (batch)
Returns:
(tuple): tuple containing:
x: decoded token score before softmax (batch, maxlen_out, token)
if use_output_layer is True,
olens: (batch, )
"""
tgt, memory = ys_in_pad, hs_pad
tgt = (
self.decoders.token_embedding(tgt)
+ self.decoders.positional_embedding[: tgt.size(1)]
)
tgt = self.dropout(tgt)
x = tgt.to(memory.dtype)
for layer, block in enumerate(self.decoders.blocks):
x = block(x, memory, mask=self.decoders.mask)
if layer < len(self.decoders.blocks) - 1:
x = self.dropout(x)
x = self.decoders.ln(x)
x = (
x @ torch.transpose(self.decoders.token_embedding.weight.to(x.dtype), 0, 1)
).float()
return x, ys_in_lens
def forward_one_step(
self,
tgt: torch.Tensor,
tgt_mask: torch.Tensor,
memory: torch.Tensor,
cache: List[torch.Tensor] = None,
) -> Tuple[torch.Tensor, List[torch.Tensor]]:
"""Forward one step.
Args:
tgt: input token ids, int64 (batch, maxlen_out)
tgt_mask: input token mask, (batch, maxlen_out)
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (include 1.2)
memory: encoded memory, float32 (batch, maxlen_in, feat)
cache: cached output list of (batch, max_time_out-1, size)
Returns:
y, cache: NN output value and cache per `self.decoders`.
y.shape` is (batch, maxlen_out, token)
NOTE (Shih-Lun):
cache implementation is ignored for now
for simplicity & correctness
"""
x = (
self.decoders.token_embedding(tgt)
+ self.decoders.positional_embedding[: tgt.size(1)]
)
x = self.dropout(x)
x = x.to(memory.dtype)
for layer, block in enumerate(self.decoders.blocks):
x = block(x, memory, mask=self.decoders.mask)
if layer < len(self.decoders.blocks) - 1:
x = self.dropout(x)
x = self.decoders.ln(x)
y = x[:, -1]
y = (
y @ torch.transpose(self.decoders.token_embedding.weight.to(x.dtype), 0, 1)
).float()
y = torch.log_softmax(y, dim=-1)
return y, None
def METHOD_NAME(self, ys, state, x):
"""Score."""
logp, state = self.forward_one_step(
ys.unsqueeze(0), torch.empty(0), x.unsqueeze(0), cache=state # dummy mask
)
return logp.squeeze(0), state
def batch_score(
self, ys: torch.Tensor, states: List[Any], xs: torch.Tensor
) -> Tuple[torch.Tensor, List[Any]]:
"""Score new token batch.
Args:
ys (torch.Tensor): torch.int64 prefix tokens (n_batch, ylen).
states (List[Any]): Scorer states for prefix tokens.
xs (torch.Tensor):
The encoder feature that generates ys (n_batch, xlen, n_feat).
Returns:
tuple[torch.Tensor, List[Any]]: Tuple of
batchfied scores for next token with shape of `(n_batch, n_vocab)`
and next state list for ys.
"""
# batch decoding, dummy mask is passed
logp, states = self.forward_one_step(ys, torch.empty(0), xs, cache=None)
return logp, None |
298,308 | look up gsutil version | # -*- coding: utf-8 -*-
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared utility methods for the update command and its tests."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import logging
import os
import re
import textwrap
import sys
import gslib
from gslib.utils.system_util import IS_OSX
from gslib.exception import CommandException
from gslib.storage_url import StorageUrlFromString
from gslib.utils.constants import GSUTIL_PUB_TARBALL
from gslib.utils.constants import GSUTIL_PUB_TARBALL_PY2
# This function used to belong inside of update.py. However, it needed to be
# moved here due to compatibility issues with Travis CI, because update.py is
# not included with PyPI installations.
def DisallowUpdateIfDataInGsutilDir(directory=gslib.GSUTIL_DIR):
"""Disallows the update command if files not in the gsutil distro are found.
This prevents users from losing data if they are in the habit of running
gsutil from the gsutil directory and leaving data in that directory.
This will also detect someone attempting to run gsutil update from a git
repo, since the top-level directory will contain git files and dirs (like
.git) that are not distributed with gsutil.
Args:
directory: (str) The directory to use this functionality on.
Raises:
CommandException: if files other than those distributed with gsutil found.
"""
# Manifest includes recursive-includes of gslib. Directly add
# those to the list here so we will skip them in os.listdir() loop without
# having to build deeper handling of the MANIFEST file here. Also include
# 'third_party', which isn't present in manifest but gets added to the
# gsutil distro by the gsutil submodule configuration; and the MANIFEST.in
# and CHANGES.md files.
manifest_lines = ['MANIFEST.in', 'third_party']
try:
with open(os.path.join(directory, 'MANIFEST.in'), 'r') as fp:
for line in fp:
if line.startswith('include '):
manifest_lines.append(line.split()[-1])
elif re.match(r'recursive-include \w+ \*', line):
manifest_lines.append(line.split()[1])
except IOError:
logging.getLogger().warn(
'MANIFEST.in not found in %s.\nSkipping user data '
'check.\n', directory)
return
# Look just at top-level directory. We don't try to catch data dropped into
# subdirs (like gslib) because that would require deeper parsing of
# MANFFEST.in, and most users who drop data into gsutil dir do so at the top
# level directory.
addl_excludes = (
'.coverage',
'.DS_Store',
'.github',
'.style.yapf',
'.yapfignore',
'__pycache__',
'.github',
)
for filename in os.listdir(directory):
if filename.endswith('.pyc') or filename in addl_excludes:
continue
if filename not in manifest_lines:
raise CommandException('\n'.join(
textwrap.wrap(
'A file (%s) that is not distributed with gsutil was found in '
'the gsutil directory. The update command cannot run with user '
'data in the gsutil directory.' %
os.path.join(gslib.GSUTIL_DIR, filename))))
def METHOD_NAME(gsutil_api, url_str):
"""Looks up the gsutil version of the specified gsutil tarball URL.
Version is specified in the metadata field set on that object.
Args:
gsutil_api: gsutil Cloud API to use when retrieving gsutil tarball.
url_str: tarball URL to retrieve (such as 'gs://pub/gsutil.tar.gz').
Returns:
Version string if URL is a cloud URL containing x-goog-meta-gsutil-version
metadata, else None.
"""
url = StorageUrlFromString(url_str)
if url.IsCloudUrl():
obj = gsutil_api.GetObjectMetadata(url.bucket_name,
url.object_name,
provider=url.scheme,
fields=['metadata'])
if obj.metadata and obj.metadata.additionalProperties:
for prop in obj.metadata.additionalProperties:
if prop.key == 'gsutil_version':
return prop.value
def GsutilPubTarball():
"""Returns the appropriate gsutil pub tarball based on the Python version.
Returns:
The storage_uri of the appropriate pub tarball.
"""
if sys.version_info.major == 2:
return GSUTIL_PUB_TARBALL_PY2
return GSUTIL_PUB_TARBALL |
298,309 | test ordering is off by default | from collections import OrderedDict
import datetime as dt
import pytest
from marshmallow import fields, Schema, EXCLUDE
from tests.base import User
class TestUnordered:
class UnorderedSchema(Schema):
name = fields.Str()
email = fields.Str()
class Meta:
ordered = False
def test_unordered_dump_returns_dict(self):
schema = self.UnorderedSchema()
u = User("steve", email="steve@steve.steve")
result = schema.dump(u)
assert not isinstance(result, OrderedDict)
assert type(result) is dict
def test_unordered_load_returns_dict(self):
schema = self.UnorderedSchema()
result = schema.load({"name": "steve", "email": "steve@steve.steve"})
assert not isinstance(result, OrderedDict)
assert type(result) is dict
class KeepOrder(Schema):
class Meta:
ordered = True
name = fields.String(allow_none=True)
email = fields.Email(allow_none=True)
age = fields.Integer()
created = fields.DateTime()
id = fields.Integer(allow_none=True)
homepage = fields.Url()
birthdate = fields.Date()
class OrderedMetaSchema(Schema):
id = fields.Int(allow_none=True)
email = fields.Email(allow_none=True)
class Meta:
fields = ("name", "email", "age", "created", "id", "homepage", "birthdate")
ordered = True
class OrderedNestedOnly(Schema):
class Meta:
ordered = True
user = fields.Nested(KeepOrder)
class TestFieldOrdering:
@pytest.mark.parametrize("with_meta", (False, True))
def test_ordered_option_is_inherited(self, user, with_meta):
class ParentUnordered(Schema):
class Meta:
ordered = False
# KeepOrder is before ParentUnordered in MRO,
# so ChildOrderedSchema will be ordered
class ChildOrderedSchema(KeepOrder, ParentUnordered):
if with_meta:
class Meta:
pass
schema = ChildOrderedSchema()
assert schema.opts.ordered is True
assert schema.dict_class == OrderedDict
data = schema.dump(user)
keys = list(data)
assert keys == [
"name",
"email",
"age",
"created",
"id",
"homepage",
"birthdate",
]
# KeepOrder is before ParentUnordered in MRO,
# so ChildOrderedSchema will be ordered
class ChildUnorderedSchema(ParentUnordered, KeepOrder):
class Meta:
pass
schema = ChildUnorderedSchema()
assert schema.opts.ordered is False
def METHOD_NAME(self):
class DummySchema(Schema):
pass
schema = DummySchema()
assert schema.ordered is False
def test_declared_field_order_is_maintained_on_dump(self, user):
ser = KeepOrder()
data = ser.dump(user)
keys = list(data)
assert keys == [
"name",
"email",
"age",
"created",
"id",
"homepage",
"birthdate",
]
def test_declared_field_order_is_maintained_on_load(self, serialized_user):
schema = KeepOrder(unknown=EXCLUDE)
data = schema.load(serialized_user)
keys = list(data)
assert keys == [
"name",
"email",
"age",
"created",
"id",
"homepage",
"birthdate",
]
def test_nested_field_order_with_only_arg_is_maintained_on_dump(self, user):
schema = OrderedNestedOnly()
data = schema.dump({"user": user})
user_data = data["user"]
keys = list(user_data)
assert keys == [
"name",
"email",
"age",
"created",
"id",
"homepage",
"birthdate",
]
def test_nested_field_order_with_only_arg_is_maintained_on_load(self):
schema = OrderedNestedOnly()
data = schema.load(
{
"user": {
"name": "Foo",
"email": "Foo@bar.com",
"age": 42,
"created": dt.datetime.now().isoformat(),
"id": 123,
"homepage": "http://foo.com",
"birthdate": dt.datetime.now().date().isoformat(),
}
}
)
user_data = data["user"]
keys = list(user_data)
assert keys == [
"name",
"email",
"age",
"created",
"id",
"homepage",
"birthdate",
]
def test_nested_field_order_with_exclude_arg_is_maintained(self, user):
class HasNestedExclude(Schema):
user = fields.Nested(KeepOrder, exclude=("birthdate",))
ser = HasNestedExclude()
data = ser.dump({"user": user})
user_data = data["user"]
keys = list(user_data)
assert keys == ["name", "email", "age", "created", "id", "homepage"]
def test_meta_fields_order_is_maintained_on_dump(self, user):
ser = OrderedMetaSchema()
data = ser.dump(user)
keys = list(data)
assert keys == [
"name",
"email",
"age",
"created",
"id",
"homepage",
"birthdate",
]
def test_meta_fields_order_is_maintained_on_load(self, serialized_user):
schema = OrderedMetaSchema(unknown=EXCLUDE)
data = schema.load(serialized_user)
keys = list(data)
assert keys == [
"name",
"email",
"age",
"created",
"id",
"homepage",
"birthdate",
]
class TestIncludeOption:
class AddFieldsSchema(Schema):
name = fields.Str()
class Meta:
include = {"from": fields.Str()}
def test_fields_are_added(self):
s = self.AddFieldsSchema()
in_data = {"name": "Steve", "from": "Oskosh"}
result = s.load({"name": "Steve", "from": "Oskosh"})
assert result == in_data
def test_included_fields_ordered_after_declared_fields(self):
class AddFieldsOrdered(Schema):
name = fields.Str()
email = fields.Str()
class Meta:
include = {
"from": fields.Str(),
"in": fields.Str(),
"@at": fields.Str(),
}
s = AddFieldsOrdered()
in_data = {
"name": "Steve",
"from": "Oskosh",
"email": "steve@steve.steve",
"in": "VA",
"@at": "Charlottesville",
}
# declared fields, then "included" fields
expected_fields = ["name", "email", "from", "in", "@at"]
assert list(AddFieldsOrdered._declared_fields.keys()) == expected_fields
result = s.load(in_data)
assert list(result.keys()) == expected_fields
def test_added_fields_are_inherited(self):
class AddFieldsChild(self.AddFieldsSchema):
email = fields.Str()
s = AddFieldsChild()
assert "email" in s._declared_fields.keys()
assert "from" in s._declared_fields.keys()
assert isinstance(s._declared_fields["from"], fields.Str) |
298,310 | test response request | # -*- coding: utf-8 -*-
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for
# license information.
# -------------------------------------------------------------------------
import sys
from rest_client import TestRestClient
import pytest
from azure.core.pipeline.transport import HttpRequest as PipelineTransportHttpRequest
from azure.core.rest import HttpRequest as RestHttpRequest
from azure.core.pipeline import Pipeline
from azure.core.pipeline.transport import RequestsTransport
@pytest.fixture
def old_request(port):
return PipelineTransportHttpRequest("GET", "http://localhost:{}/streams/basic".format(port))
@pytest.fixture
def old_response(old_request):
return RequestsTransport().send(old_request)
@pytest.fixture
def new_request(port):
return RestHttpRequest("GET", "http://localhost:{}/streams/basic".format(port))
@pytest.fixture
def new_response(new_request):
return RequestsTransport().send(new_request)
def test_response_attr_parity(old_response, new_response):
for attr in dir(old_response):
if not attr[0] == "_":
# if not a private attr, we want parity
assert hasattr(new_response, attr)
def test_response_set_attrs(old_response, new_response):
for attr in dir(old_response):
if attr[0] == "_":
continue
try:
# if we can set it on the old request, we want to
# be able to set it on the new
setattr(old_response, attr, "foo")
except:
pass
else:
setattr(new_response, attr, "foo")
assert getattr(old_response, attr) == getattr(new_response, attr) == "foo"
def test_response_block_size(old_response, new_response):
assert old_response.block_size == new_response.block_size == 4096
old_response.block_size = 500
new_response.block_size = 500
assert old_response.block_size == new_response.block_size == 500
def test_response_body(old_response, new_response):
assert old_response.body() == new_response.body() == b"Hello, world!"
def test_response_internal_response(old_response, new_response, port):
assert (
old_response.internal_response.url
== new_response.internal_response.url
== "http://localhost:{}/streams/basic".format(port)
)
old_response.internal_response = "foo"
new_response.internal_response = "foo"
assert old_response.internal_response == new_response.internal_response == "foo"
def test_response_stream_download(old_request, new_request):
transport = RequestsTransport()
pipeline = Pipeline(transport)
old_response = transport.send(old_request, stream=True)
old_string = b"".join(old_response.stream_download(pipeline=pipeline))
new_response = transport.send(new_request, stream=True)
new_string = b"".join(new_response.stream_download(pipeline))
assert old_string == new_string == b"Hello, world!"
def METHOD_NAME(old_response, new_response, port):
assert old_response.request.url == new_response.request.url == "http://localhost:{}/streams/basic".format(port)
old_response.request = "foo"
new_response.request = "foo"
assert old_response.request == new_response.request == "foo"
def test_response_status_code(old_response, new_response):
assert old_response.status_code == new_response.status_code == 200
old_response.status_code = 202
new_response.status_code = 202
assert old_response.status_code == new_response.status_code == 202
def test_response_headers(old_response, new_response):
assert (
set(old_response.headers.keys())
== set(new_response.headers.keys())
== set(["Content-Type", "Connection", "Server", "Date"])
)
old_response.headers = {"Hello": "world!"}
new_response.headers = {"Hello": "world!"}
assert old_response.headers == new_response.headers == {"Hello": "world!"}
def test_response_reason(old_response, new_response):
assert old_response.reason == new_response.reason == "OK"
old_response.reason = "Not OK"
new_response.reason = "Not OK"
assert old_response.reason == new_response.reason == "Not OK"
def test_response_content_type(old_response, new_response):
assert old_response.content_type == new_response.content_type == "text/html; charset=utf-8"
old_response.content_type = "application/json"
new_response.content_type = "application/json"
assert old_response.content_type == new_response.content_type == "application/json"
def _create_multiapart_request(http_request_class):
class ResponsePolicy(object):
def on_request(self, *args):
return
def on_response(self, request, response):
response.http_response.headers["x-ms-fun"] = "true"
req0 = http_request_class("DELETE", "/container0/blob0")
req1 = http_request_class("DELETE", "/container1/blob1")
request = http_request_class("POST", "/multipart/request")
request.set_multipart_mixed(req0, req1, policies=[ResponsePolicy()])
return request
def _test_parts(response):
# hack the content type
parts = response.parts()
assert len(parts) == 2
parts0 = parts[0]
assert parts0.status_code == 202
assert parts0.headers["x-ms-fun"] == "true"
parts1 = parts[1]
assert parts1.status_code == 404
assert parts1.headers["x-ms-fun"] == "true"
def test_response_parts(port):
old_request = _create_multiapart_request(PipelineTransportHttpRequest)
new_request = _create_multiapart_request(RestHttpRequest)
old_response = TestRestClient(port).send_request(old_request, stream=True)
new_response = TestRestClient(port).send_request(new_request, stream=True)
_test_parts(old_response)
_test_parts(new_response) |
298,311 | cancel | "Dialog to specify or edit the parameters for a user configured help source."
import os
import sys
from Tkinter import *
import tkMessageBox
import tkFileDialog
class GetHelpSourceDialog(Toplevel):
def __init__(self, parent, title, menuItem='', filePath=''):
"""Get menu entry and url/ local file location for Additional Help
User selects a name for the Help resource and provides a web url
or a local file as its source. The user can enter a url or browse
for the file.
"""
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
self.resizable(height=FALSE, width=FALSE)
self.title(title)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.METHOD_NAME)
self.parent = parent
self.result = None
self.CreateWidgets()
self.menu.set(menuItem)
self.path.set(filePath)
self.withdraw() #hide while setting geometry
#needs to be done here so that the winfo_reqwidth is valid
self.update_idletasks()
#centre dialog over parent:
self.geometry("+%d+%d" %
((parent.winfo_rootx() + ((parent.winfo_width()/2)
-(self.winfo_reqwidth()/2)),
parent.winfo_rooty() + ((parent.winfo_height()/2)
-(self.winfo_reqheight()/2)))))
self.deiconify() #geometry set, unhide
self.bind('<Return>', self.Ok)
self.wait_window()
def CreateWidgets(self):
self.menu = StringVar(self)
self.path = StringVar(self)
self.fontSize = StringVar(self)
self.frameMain = Frame(self, borderwidth=2, relief=GROOVE)
self.frameMain.pack(side=TOP, expand=TRUE, fill=BOTH)
labelMenu = Label(self.frameMain, anchor=W, justify=LEFT,
text='Menu Item:')
self.entryMenu = Entry(self.frameMain, textvariable=self.menu,
width=30)
self.entryMenu.focus_set()
labelPath = Label(self.frameMain, anchor=W, justify=LEFT,
text='Help File Path: Enter URL or browse for file')
self.entryPath = Entry(self.frameMain, textvariable=self.path,
width=40)
self.entryMenu.focus_set()
labelMenu.pack(anchor=W, padx=5, pady=3)
self.entryMenu.pack(anchor=W, padx=5, pady=3)
labelPath.pack(anchor=W, padx=5, pady=3)
self.entryPath.pack(anchor=W, padx=5, pady=3)
browseButton = Button(self.frameMain, text='Browse', width=8,
command=self.browseFile)
browseButton.pack(pady=3)
frameButtons = Frame(self)
frameButtons.pack(side=BOTTOM, fill=X)
self.buttonOk = Button(frameButtons, text='OK',
width=8, default=ACTIVE, command=self.Ok)
self.buttonOk.grid(row=0, column=0, padx=5,pady=5)
self.buttonCancel = Button(frameButtons, text='Cancel',
width=8, command=self.METHOD_NAME)
self.buttonCancel.grid(row=0, column=1, padx=5, pady=5)
def browseFile(self):
filetypes = [
("HTML Files", "*.htm *.html", "TEXT"),
("PDF Files", "*.pdf", "TEXT"),
("Windows Help Files", "*.chm"),
("Text Files", "*.txt", "TEXT"),
("All Files", "*")]
path = self.path.get()
if path:
dir, base = os.path.split(path)
else:
base = None
if sys.platform[:3] == 'win':
dir = os.path.join(os.path.dirname(sys.executable), 'Doc')
if not os.path.isdir(dir):
dir = os.getcwd()
else:
dir = os.getcwd()
opendialog = tkFileDialog.Open(parent=self, filetypes=filetypes)
file = opendialog.show(initialdir=dir, initialfile=base)
if file:
self.path.set(file)
def MenuOk(self):
"Simple validity check for a sensible menu item name"
menuOk = True
menu = self.menu.get()
menu.strip()
if not menu:
tkMessageBox.showerror(title='Menu Item Error',
message='No menu item specified',
parent=self)
self.entryMenu.focus_set()
menuOk = False
elif len(menu) > 30:
tkMessageBox.showerror(title='Menu Item Error',
message='Menu item too long:'
'\nLimit 30 characters.',
parent=self)
self.entryMenu.focus_set()
menuOk = False
return menuOk
def PathOk(self):
"Simple validity check for menu file path"
pathOk = True
path = self.path.get()
path.strip()
if not path: #no path specified
tkMessageBox.showerror(title='File Path Error',
message='No help file path specified.',
parent=self)
self.entryPath.focus_set()
pathOk = False
elif path.startswith(('www.', 'http')):
pass
else:
if path[:5] == 'file:':
path = path[5:]
if not os.path.exists(path):
tkMessageBox.showerror(title='File Path Error',
message='Help file path does not exist.',
parent=self)
self.entryPath.focus_set()
pathOk = False
return pathOk
def Ok(self, event=None):
if self.MenuOk() and self.PathOk():
self.result = (self.menu.get().strip(),
self.path.get().strip())
if sys.platform == 'darwin':
path = self.result[1]
if path.startswith(('www', 'file:', 'http:')):
pass
else:
# Mac Safari insists on using the URI form for local files
self.result = list(self.result)
self.result[1] = "file://" + path
self.destroy()
def METHOD_NAME(self, event=None):
self.result = None
self.destroy()
if __name__ == '__main__':
#test the dialog
root = Tk()
def run():
keySeq = ''
dlg = GetHelpSourceDialog(root, 'Get Help Source')
print dlg.result
Button(root,text='Dialog', command=run).pack()
root.mainloop() |
298,312 | execute argmin series | """Reduces sequences.
NOTE: This file overwrite the pandas backend registered handlers for:
- execute_node_greatest_list,
- execute_node_least_list
This is so we can register our handlers that transparently handle both the'
dask specific types and pandas types. This cannot be done via the
dispatcher since the top level container is a list.
"""
from __future__ import annotations
import contextlib
import functools
from collections.abc import Sized
import dask.array as da
import dask.dataframe as dd
import dask.dataframe.groupby as ddgb
import numpy as np
import toolz
from multipledispatch.variadic import Variadic
import ibis.common.exceptions as exc
import ibis.expr.operations as ops
from ibis.backends.dask.dispatch import execute_node
from ibis.backends.dask.execution.util import make_selected_obj
from ibis.backends.pandas.execution.generic import (
execute_node_greatest_list,
execute_node_least_list,
)
@toolz.curry
def promote_to_sequence(length, obj):
if isinstance(obj, dd.Series):
# we must force length computation if we have mixed types
# otherwise da.reductions can't compare arrays
return obj.to_dask_array(lengths=True)
else:
return da.from_array(np.repeat(obj, length))
def pairwise_reducer(func, values):
return functools.reduce(lambda x, y: func(x, y), values)
def compute_row_reduction(func, values):
final_sizes = {len(x) for x in values if isinstance(x, Sized)}
if not final_sizes:
return func(values)
(final_size,) = final_sizes
arrays = list(map(promote_to_sequence(final_size), values))
raw = pairwise_reducer(func, arrays)
return dd.from_array(raw).squeeze()
# XXX: there's non-determinism in the dask and pandas dispatch registration of
# Greatest/Least/Coalesce, because 1) dask and pandas share `execute_node`
# which is a design flaw and 2) greatest/least/coalesce need to handle
# mixed-type (the Series types plus any related scalar type) inputs so `object`
# is used as a possible input type.
#
# Here we remove the dispatch for pandas if it exists because the dask rule
# handles both cases.
with contextlib.suppress(KeyError):
del execute_node[ops.Greatest, Variadic[object]]
with contextlib.suppress(KeyError):
del execute_node[ops.Least, Variadic[object]]
@execute_node.register(ops.Greatest, [(object, dd.Series)])
def dask_execute_node_greatest_list(op, *values, **kwargs):
if all(type(v) != dd.Series for v in values):
return execute_node_greatest_list(op, *values, **kwargs)
return compute_row_reduction(da.maximum, values)
@execute_node.register(ops.Least, [(object, dd.Series)])
def dask_execute_node_least_list(op, *values, **kwargs):
if all(type(v) != dd.Series for v in values):
return execute_node_least_list(op, *values, **kwargs)
return compute_row_reduction(da.minimum, values)
@execute_node.register(ops.Reduction, ddgb.SeriesGroupBy, type(None))
def execute_reduction_series_groupby(op, data, mask, aggcontext=None, **kwargs):
return aggcontext.agg(data, type(op).__name__.lower())
def _filtered_reduction(data, mask):
return make_selected_obj(data)[mask.obj].groupby(data.index)
@execute_node.register(ops.Reduction, ddgb.SeriesGroupBy, ddgb.SeriesGroupBy)
def execute_reduction_series_gb_mask(op, data, mask, aggcontext=None, **kwargs):
grouped_and_filtered_data = _filtered_reduction(data, mask)
return aggcontext.agg(grouped_and_filtered_data, type(op).__name__.lower())
@execute_node.register(ops.Reduction, dd.Series, (dd.Series, type(None)))
def execute_reduction_series_mask(op, data, mask, aggcontext=None, **kwargs):
operand = data[mask] if mask is not None else data
return aggcontext.agg(operand, type(op).__name__.lower())
@execute_node.register(
(ops.First, ops.Last), ddgb.SeriesGroupBy, (ddgb.SeriesGroupBy, type(None))
)
@execute_node.register((ops.First, ops.Last), dd.Series, (dd.Series, type(None)))
def execute_first_last_dask(op, data, mask, aggcontext=None, **kwargs):
raise exc.OperationNotDefinedError(
"Dask does not support first or last aggregations"
)
@execute_node.register(
(ops.CountDistinct, ops.ApproxCountDistinct),
ddgb.SeriesGroupBy,
type(None),
)
def execute_count_distinct_series_groupby(op, data, _, aggcontext=None, **kwargs):
return aggcontext.agg(data, "nunique")
@execute_node.register(
(ops.CountDistinct, ops.ApproxCountDistinct),
ddgb.SeriesGroupBy,
ddgb.SeriesGroupBy,
)
def execute_count_distinct_series_groupby_mask(
op, data, mask, aggcontext=None, **kwargs
):
grouped_and_filtered_data = _filtered_reduction(data, mask)
return aggcontext.agg(grouped_and_filtered_data, "nunique")
@execute_node.register(
(ops.CountDistinct, ops.ApproxCountDistinct),
dd.Series,
(dd.Series, type(None)),
)
def execute_count_distinct_series_mask(op, data, mask, aggcontext=None, **kwargs):
return aggcontext.agg(data[mask] if mask is not None else data, "nunique")
variance_ddof = {"pop": 0, "sample": 1}
@execute_node.register(ops.Variance, ddgb.SeriesGroupBy, type(None))
def execute_reduction_series_groupby_var(op, data, _, aggcontext=None, **kwargs):
return aggcontext.agg(data, "var", ddof=variance_ddof[op.how])
@execute_node.register(ops.Variance, ddgb.SeriesGroupBy, ddgb.SeriesGroupBy)
def execute_var_series_groupby_mask(op, data, mask, aggcontext=None, **kwargs):
grouped_and_filtered_data = _filtered_reduction(data, mask)
return aggcontext.agg(grouped_and_filtered_data, "var", ddof=variance_ddof[op.how])
@execute_node.register(ops.Variance, dd.Series, (dd.Series, type(None)))
def execute_variance_series(op, data, mask, aggcontext=None, **kwargs):
return aggcontext.agg(
data[mask] if mask is not None else data,
"var",
ddof=variance_ddof[op.how],
)
@execute_node.register(ops.StandardDev, ddgb.SeriesGroupBy, type(None))
def execute_reduction_series_groupby_std(op, data, _, aggcontext=None, **kwargs):
return aggcontext.agg(data, "std", ddof=variance_ddof[op.how])
@execute_node.register(ops.StandardDev, ddgb.SeriesGroupBy, ddgb.SeriesGroupBy)
def execute_std_series_groupby_mask(op, data, mask, aggcontext=None, **kwargs):
grouped_and_filtered_data = _filtered_reduction(data, mask)
return aggcontext.agg(grouped_and_filtered_data, "std", ddof=variance_ddof[op.how])
@execute_node.register(ops.StandardDev, dd.Series, (dd.Series, type(None)))
def execute_standard_dev_series(op, data, mask, aggcontext=None, **kwargs):
return aggcontext.agg(
data[mask] if mask is not None else data,
"std",
ddof=variance_ddof[op.how],
)
@execute_node.register(ops.ArgMax, dd.Series, dd.Series, (dd.Series, type(None)))
def execute_argmax_series(op, data, key, mask, aggcontext=None, **kwargs):
idxmax = aggcontext.agg(key[mask] if mask is not None else key, "idxmax").compute()
return data.loc[idxmax]
@execute_node.register(ops.ArgMin, dd.Series, dd.Series, (dd.Series, type(None)))
def METHOD_NAME(op, data, key, mask, aggcontext=None, **kwargs):
idxmin = aggcontext.agg(key[mask] if mask is not None else key, "idxmin").compute()
return data.loc[idxmin] |
298,313 | render | import queue
import time
import numpy as np
import voluptuous as vol
from ledfx.color import parse_color, validate_color, validate_gradient
from ledfx.effects.audio import AudioReactiveEffect
from ledfx.effects.gradient import GradientEffect
from ledfx.utils import empty_queue
class Strobe(AudioReactiveEffect, GradientEffect):
NAME = "Strobe"
CATEGORY = "Classic"
CONFIG_SCHEMA = vol.Schema(
{
vol.Optional(
"gradient",
description="Color scheme for bass strobe to cycle through",
default="Dancefloor",
): validate_gradient,
vol.Optional(
"color_step",
description="Amount of color change per bass strobe",
default=0.0625,
): vol.All(vol.Coerce(float), vol.Range(min=0, max=0.25)),
vol.Optional(
"bass_strobe_decay_rate",
description="Bass strobe decay rate. Higher -> decays faster.",
default=0.5,
): vol.All(vol.Coerce(float), vol.Range(min=0, max=1)),
vol.Optional(
"strobe_color",
description="color for percussive strobes",
default="#FFFFFF",
): validate_color,
vol.Optional(
"strobe_width",
description="Percussive strobe width, in pixels",
default=10,
): vol.All(vol.Coerce(int), vol.Range(min=0, max=1000)),
vol.Optional(
"strobe_decay_rate",
description="Percussive strobe decay rate. Higher -> decays faster.",
default=0.5,
): vol.All(vol.Coerce(float), vol.Range(min=0, max=1)),
vol.Optional(
"color_shift_delay",
description="color shift delay for percussive strobes. Lower -> more shifts",
default=1,
): vol.All(vol.Coerce(float), vol.Range(min=0, max=1)),
}
)
def on_activate(self, pixel_count):
self.strobe_overlay = np.zeros(np.shape(self.pixels))
self.bass_strobe_overlay = np.zeros(np.shape(self.pixels))
self.onsets_queue = queue.Queue()
def deactivate(self):
empty_queue(self.onsets_queue)
self.onsets_queue = None
return super().deactivate()
def config_updated(self, config):
self.color_shift_step = self._config["color_step"]
self.strobe_color = np.array(
parse_color(self._config["strobe_color"]), dtype=float
)
self.last_color_shift_time = 0
self.strobe_width = self._config["strobe_width"]
self.color_shift_delay_in_seconds = self._config["color_shift_delay"]
self.color_idx = 0
self.last_strobe_time = 0
self.strobe_wait_time = 0
self.strobe_decay_rate = 1 - self._config["strobe_decay_rate"]
self.last_bass_strobe_time = 0
self.bass_strobe_wait_time = 0.2
self.bass_strobe_decay_rate = (
1 - self._config["bass_strobe_decay_rate"]
)
def METHOD_NAME(self):
pixels = np.copy(self.bass_strobe_overlay)
# Sometimes we lose the queue? No idea why. This should ensure it doesn't happen
if self.onsets_queue is None:
self.onsets_queue = queue.Queue()
if not self.onsets_queue.empty():
self.onsets_queue.get()
strobe_width = min(self.strobe_width, self.pixel_count)
length_diff = self.pixel_count - strobe_width
position = (
0
if length_diff == 0
else np.random.randint(self.pixel_count - strobe_width)
)
self.strobe_overlay[
position : position + strobe_width
] = self.strobe_color
pixels += self.strobe_overlay
self.strobe_overlay *= self.strobe_decay_rate
self.bass_strobe_overlay *= self.bass_strobe_decay_rate
self.pixels = pixels
def audio_data_updated(self, data):
currentTime = time.time()
if (
currentTime - self.last_color_shift_time
> self.color_shift_delay_in_seconds
):
self.color_idx += self.color_shift_step
self.color_idx = self.color_idx % 1
self.bass_strobe_color = self.get_gradient_color(self.color_idx)
self.last_color_shift_time = currentTime
if (
data.volume_beat_now()
and currentTime - self.last_bass_strobe_time
> self.bass_strobe_wait_time
and self.bass_strobe_decay_rate
):
self.bass_strobe_overlay = np.tile(
self.bass_strobe_color, (self.pixel_count, 1)
)
self.last_bass_strobe_time = currentTime
if (
data.onset()
and currentTime - self.last_strobe_time > self.strobe_wait_time
):
self.onsets_queue.put(True)
self.last_strobe_time = currentTime |
298,314 | estimate resolution | #
# AUTHOR(S): Caitlin Haedrich <caitlin DOT haedrich AT gmail>
#
# PURPOSE: This module contains utility functions for InteractiveMap.
#
# COPYRIGHT: (C) 2021-2022 Caitlin Haedrich, and by the GRASS Development Team
#
# This program is free software under the GNU General Public
# License (>=v2). Read the file COPYING that comes with GRASS
# for details.
"""Utility functions warpping existing processes in a suitable way"""
import grass.script as gs
def get_region(env=None):
"""Returns current computational region as dictionary.
Additionally, it adds long key names.
"""
region = gs.region(env=env)
region["east"] = region["e"]
region["west"] = region["w"]
region["north"] = region["n"]
region["south"] = region["s"]
return region
def get_location_proj_string(env=None):
"""Returns projection of environment in PROJ.4 format"""
out = gs.read_command("g.proj", flags="jf", env=env)
return out.strip()
def reproject_region(region, from_proj, to_proj):
"""Reproject boundary of region from one projection to another.
:param dict region: region to reproject as a dictionary with long key names
output of get_region
:param str from_proj: PROJ.4 string of region; output of get_location_proj_string
:param str in_proj: PROJ.4 string of target location;
output of get_location_proj_string
:return dict region: reprojected region as a dictionary with long key names
"""
region = region.copy()
# reproject all corners, otherwise reproj. region may be underestimated
# even better solution would be reprojecting vector region like in r.import
proj_input = (
f"{region['east']} {region['north']}\n"
f"{region['west']} {region['north']}\n"
f"{region['east']} {region['south']}\n"
f"{region['west']} {region['south']}\n"
)
proc = gs.start_command(
"m.proj",
input="-",
separator=" , ",
proj_in=from_proj,
proj_out=to_proj,
flags="d",
stdin=gs.PIPE,
stdout=gs.PIPE,
stderr=gs.PIPE,
)
proc.stdin.write(gs.encode(proj_input))
proc.stdin.close()
proc.stdin = None
proj_output, stderr = proc.communicate()
if proc.returncode:
raise RuntimeError(
_("Encountered error while running m.proj: {}").format(stderr)
)
output = gs.decode(proj_output).splitlines()
# get the largest bbox
latitude_list = []
longitude_list = []
for row in output:
longitude, latitude, unused = row.split(" ")
longitude_list.append(float(longitude))
latitude_list.append(float(latitude))
region["east"] = max(longitude_list)
region["north"] = max(latitude_list)
region["west"] = min(longitude_list)
region["south"] = min(latitude_list)
return region
def METHOD_NAME(raster, mapset, location, dbase, env):
"""Estimates resolution of reprojected raster.
:param str raster: name of raster
:param str mapset: mapset of raster
:param str location: name of source location
:param str dbase: path to source database
:param dict env: target environment
:return float estimate: estimated resolution of raster in destination
environment
"""
output = gs.read_command(
"r.proj",
flags="g",
input=raster,
mapset=mapset,
location=location,
dbase=dbase,
env=env,
).strip()
params = gs.parse_key_val(output, vsep=" ")
output = gs.read_command("g.region", flags="ug", env=env, **params)
output = gs.parse_key_val(output, val_type=float)
cell_ns = (output["n"] - output["s"]) / output["rows"]
cell_ew = (output["e"] - output["w"]) / output["cols"]
estimate = (cell_ew + cell_ns) / 2.0
return estimate
def setup_location(name, path, epsg, src_env):
"""Setup temporary location with different projection but
same computational region as source location
:param str name: name of new location
:param path path: path to new location's database
:param str epsg: EPSG code
:param dict src_env: source environment
:return str rcfile: name of new locations rcfile
:return dict new_env: new environment
"""
# Create new environment
rcfile, new_env = gs.create_environment(path, name, "PERMANENT")
# Location and mapset
gs.create_location(path, name, epsg=epsg, overwrite=True)
# Reproject region
set_target_region(src_env, new_env)
return rcfile, new_env
def set_target_region(src_env, tgt_env):
"""Set target region based on source region.
Number of rows and columns is preserved.
"""
region = get_region(env=src_env)
from_proj = get_location_proj_string(src_env)
to_proj = get_location_proj_string(env=tgt_env)
new_region = reproject_region(region, from_proj, to_proj)
# Set region to match original region extent
gs.run_command(
"g.region",
n=new_region["north"],
s=new_region["south"],
e=new_region["east"],
w=new_region["west"],
rows=new_region["rows"],
cols=new_region["cols"],
env=tgt_env,
)
def get_map_name_from_d_command(module, **kwargs):
"""Returns map name from display command.
Assumes only positional parameters.
When more maps are present (e.g., d.rgb), it returns only 1.
Returns empty string if fails to find it.
"""
special = {"d.his": "hue", "d.legend": "raster", "d.rgb": "red", "d.shade": "shade"}
parameter = special.get(module, "map")
return kwargs.get(parameter, "")
def get_rendering_size(region, width, height, default_width=600, default_height=400):
"""Returns the rendering width and height based
on the region aspect ratio.
:param dict region: region dictionary
:param integer width: rendering width (can be None)
:param integer height: rendering height (can be None)
:param integer default_width: default rendering width (can be None)
:param integer default_height: default rendering height (can be None)
:return tuple (width, height): adjusted width and height
When both width and height are provided, values are returned without
adjustment. When one value is provided, the other is computed
based on the region aspect ratio. When no dimension is given,
the default width or height is used and the other dimension computed.
"""
if width and height:
return (width, height)
region_width = region["e"] - region["w"]
region_height = region["n"] - region["s"]
if width:
return (width, round(width * region_height / region_width))
if height:
return (round(height * region_width / region_height), height)
if region_height > region_width:
return (round(default_height * region_width / region_height), default_height)
return (default_width, round(default_width * region_height / region_width)) |
298,315 | contains | from typing import Any
from django.contrib.gis.gdal import CoordTransform, SpatialReference
from django.contrib.gis.gdal.geometries import OGRGeometry
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.coordseq import GEOSCoordSeq
from django.contrib.gis.geos.mutable_list import ListMixin
from django.contrib.gis.geos.point import Point
from django.contrib.gis.geos.prepared import PreparedGeometry
from typing_extensions import Self
class GEOSGeometryBase(GEOSBase):
ptr_type: Any
destructor: Any
has_cs: bool
def __init__(self, ptr: Any, cls: Any) -> None: ...
def __copy__(self) -> Self: ...
def __deepcopy__(self, memodict: Any) -> Self: ...
@staticmethod
def from_ewkt(ewkt: str) -> GEOSGeometry: ...
@classmethod
def from_gml(cls, gml_string: str) -> GEOSGeometry: ...
def __eq__(self, other: object) -> bool: ...
def __hash__(self) -> int: ...
def __or__(self, other: GEOSGeometry) -> GEOSGeometry: ...
def __and__(self, other: GEOSGeometry) -> GEOSGeometry: ...
def __sub__(self, other: GEOSGeometry) -> GEOSGeometry: ...
def __xor__(self, other: GEOSGeometry) -> GEOSGeometry: ...
@property
def coord_seq(self) -> GEOSCoordSeq | None: ...
@property
def geom_type(self) -> str: ...
@property
def geom_typeid(self) -> int: ...
@property
def num_geom(self) -> int: ...
@property
def num_coords(self) -> int: ...
@property
def num_points(self) -> int: ...
@property
def dims(self) -> int: ...
def normalize(self) -> None: ...
@property
def empty(self) -> bool: ...
@property
def hasz(self) -> bool: ...
@property
def ring(self) -> bool: ...
@property
def simple(self) -> bool: ...
@property
def valid(self) -> bool: ...
@property
def valid_reason(self) -> str: ...
def METHOD_NAME(self, other: GEOSGeometry) -> bool: ...
def covers(self, other: GEOSGeometry) -> bool: ...
def crosses(self, other: GEOSGeometry) -> bool: ...
def disjoint(self, other: GEOSGeometry) -> bool: ...
def equals(self, other: GEOSGeometry) -> bool: ...
def equals_exact(self, other: GEOSGeometry, tolerance: float = ...) -> bool: ...
def intersects(self, other: GEOSGeometry) -> bool: ...
def overlaps(self, other: GEOSGeometry) -> bool: ...
def relate_pattern(self, other: GEOSGeometry, pattern: str) -> bool: ...
def touches(self, other: GEOSGeometry) -> bool: ...
def within(self, other: GEOSGeometry) -> bool: ...
@property
def srid(self) -> int | None: ...
@srid.setter
def srid(self, srid: int | None) -> None: ...
@property
def ewkt(self) -> str: ...
@property
def wkt(self) -> str: ...
@property
def hex(self) -> bytes: ...
@property
def hexewkb(self) -> bytes: ...
@property
def json(self) -> str: ...
geojson: str
@property
def wkb(self) -> memoryview: ...
@property
def ewkb(self) -> memoryview: ...
@property
def kml(self) -> str: ...
@property
def prepared(self) -> PreparedGeometry: ...
@property
def ogr(self) -> OGRGeometry: ...
@property
def srs(self) -> SpatialReference | None: ...
@property
def crs(self) -> SpatialReference | None: ...
ptr: Any
def transform(self, ct: CoordTransform | SpatialReference | str | int, clone: bool = ...) -> GEOSGeometry: ...
@property
def boundary(self) -> GEOSGeometry: ...
def buffer(self, width: float, quadsegs: int = ...) -> GEOSGeometry: ...
def buffer_with_style(
self,
width: float,
quadsegs: int = ...,
end_cap_style: int = ...,
join_style: int = ...,
mitre_limit: float = ...,
) -> GEOSGeometry: ...
@property
def centroid(self) -> Point: ...
@property
def convex_hull(self) -> GEOSGeometry: ...
def difference(self, other: GEOSGeometry) -> GEOSGeometry: ...
@property
def envelope(self) -> GEOSGeometry: ...
def intersection(self, other: GEOSGeometry) -> GEOSGeometry: ...
@property
def point_on_surface(self) -> Point: ...
def relate(self, other: GEOSGeometry) -> str: ...
def simplify(self, tolerance: float = ..., preserve_topology: bool = ...) -> GEOSGeometry: ...
def sym_difference(self, other: GEOSGeometry) -> GEOSGeometry: ...
@property
def unary_union(self) -> GEOSGeometry: ...
def union(self, other: GEOSGeometry) -> GEOSGeometry: ...
@property
def area(self) -> float: ...
def distance(self, other: GEOSGeometry) -> float: ...
@property
def extent(self) -> tuple[float, float, float, float]: ...
@property
def length(self) -> float: ...
def clone(self) -> Self: ...
class LinearGeometryMixin:
def interpolate(self, distance: float) -> Point: ...
def interpolate_normalized(self, distance: float) -> Point: ...
def project(self, point: Point) -> float: ...
def project_normalized(self, point: Point) -> float: ...
@property
def merged(self) -> GEOSGeometry: ...
@property
def closed(self) -> bool: ...
class GEOSGeometry(GEOSGeometryBase, ListMixin):
def __init__(self, geo_input: Any, srid: int | None = ...) -> None: ... |
298,316 | cross origin | # -*- coding: utf-8 -*-
"""
flask_cors
~~~~
Flask-CORS is a simple extension to Flask allowing you to support cross
origin resource sharing (CORS) using a simple decorator.
:copyright: (c) 2014 by Cory Dolphin.
:license: MIT, see LICENSE for more details.
"""
from functools import update_wrapper
from flask import make_response, request, current_app
from .core import *
def METHOD_NAME(*args, **kwargs):
"""
This function is the decorator which is used to wrap a Flask route with.
In the simplest case, simply use the default parameters to allow all
origins in what is the most permissive configuration. If this method
modifies state or performs authentication which may be brute-forced, you
should add some degree of protection, such as Cross Site Forgery
Request protection.
:param origins: The origin, or list of origins to allow requests from.
The origin(s) may be regular expressions, case-sensitive strings,
or else an asterisk
Default : '*'
:type origins: list, string or regex
:param methods: The method or list of methods which the allowed origins
are allowed to access for non-simple requests.
Default : [GET, HEAD, POST, OPTIONS, PUT, PATCH, DELETE]
:type methods: list or string
:param expose_headers: The header or list which are safe to expose to the
API of a CORS API specification.
Default : None
:type expose_headers: list or string
:param allow_headers: The header or list of header field names which can be
used when this resource is accessed by allowed origins. The header(s)
may be regular expressions, case-sensitive strings, or else an asterisk.
Default : '*', allow all headers
:type allow_headers: list, string or regex
:param supports_credentials: Allows users to make authenticated requests.
If true, injects the `Access-Control-Allow-Credentials` header in
responses. This allows cookies to be submitted across domains.
:note: This option cannot be used in conjuction with a '*' origin
Default : False
:type supports_credentials: bool
:param max_age: The maximum time for which this CORS request maybe cached.
This value is set as the `Access-Control-Max-Age` header.
Default : None
:type max_age: timedelta, integer, string or None
:param send_wildcard: If True, and the origins parameter is `*`, a
wildcard `Access-Control-Allow-Origin` header is sent, rather than
the request's `Origin` header.
Default : False
:type send_wildcard: bool
:param vary_header: If True, the header Vary: Origin will be returned
as per the W3 implementation guidelines.
Setting this header when the `Access-Control-Allow-Origin` is
dynamically generated (e.g. when there is more than one allowed
origin, and an Origin than '*' is returned) informs CDNs and other
caches that the CORS headers are dynamic, and cannot be cached.
If False, the Vary header will never be injected or altered.
Default : True
:type vary_header: bool
:param automatic_options: Only applies to the `cross_origin` decorator.
If True, Flask-CORS will override Flask's default OPTIONS handling to
return CORS headers for OPTIONS requests.
Default : True
:type automatic_options: bool
"""
_options = kwargs
def decorator(f):
debugLog("Enabling %s for cross_origin using options:%s", f, _options)
# If True, intercept OPTIONS requests by modifying the view function,
# replicating Flask's default behavior, and wrapping the response with
# CORS headers.
#
# If f.provide_automatic_options is unset or True, Flask's route
# decorator (which is actually wraps the function object we return)
# intercepts OPTIONS handling, and requests will not have CORS headers
if _options.get("automatic_options", True):
f.required_methods = getattr(f, "required_methods", set())
f.required_methods.add("OPTIONS")
f.provide_automatic_options = False
def wrapped_function(*args, **kwargs):
# Handle setting of Flask-Cors parameters
options = get_cors_options(current_app, _options)
if options.get("automatic_options") and request.method == "OPTIONS":
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
set_cors_headers(resp, options)
setattr(resp, FLASK_CORS_EVALUATED, True)
return resp
return update_wrapper(wrapped_function, f)
return decorator |
298,317 | prev character | # -*- coding: utf-8 -*-
# HeckeCharacters.py
from sage.all import gp, xmrange, Integer, pari, gcd, LCM, prod
from sage.misc.cachefunc import cached_method
from sage.groups.abelian_gps.abelian_group import AbelianGroup_class
from sage.groups.abelian_gps.abelian_group_element import AbelianGroupElement
from sage.groups.abelian_gps.dual_abelian_group import DualAbelianGroup_class, DualAbelianGroupElement
class RayClassGroup(AbelianGroup_class):
def __init__(self, number_field, mod_ideal=1, mod_archimedean=None):
if mod_archimedean is None:
mod_archimedean = [0] * len(number_field.real_places())
mod_ideal = number_field.ideal(mod_ideal)
bnf = gp(number_field.pari_bnf())
# Use PARI to compute ray class group
bnr = bnf.bnrinit([mod_ideal, mod_archimedean], 1)
invariants = bnr[5][2] # bnr.clgp.cyc
invariants = tuple(Integer(x) for x in invariants)
names = tuple("I%i" % i for i in range(len(invariants)))
generators = bnr[5][3] # bnr.gen = bnr.clgp[3]
generators = [number_field.ideal(pari(x)) for x in generators]
AbelianGroup_class.__init__(self, invariants, names)
self.__number_field = number_field
self.__bnr = bnr
self.__pari_mod = bnr[2][1]
self.__mod_ideal = mod_ideal
self.__mod_arch = mod_archimedean
self.__generators = generators
#def __call__(self, *args, **kwargs):
# return group.Group.__call__(self, *args, **kwargs)
def log(self, I):
# Use PARI to compute class of given ideal
g = self.__bnr.bnrisprincipal(I, flag=0)
return [Integer(x) for x in g]
def number_field(self):
return self.__number_field
def bnr(self):
return self.__bnr
def modulus(self):
return self.__mod_ideal
def _element_constructor_(self, *args, **kwargs):
try:
return AbelianGroupElement(args[0], self)
except Exception:
I = self.__number_field.ideal(*args, **kwargs)
return AbelianGroupElement(self, self.log(I))
@cached_method
def dual_group(self, base_ring=None):
return HeckeCharGroup(self, base_ring)
def __str__(self):
return "Ray class group of modulus %s over %s" \
% (self.modulus(), self.__number_field)
def __repr__(self):
return self.__str__()
def gen_ideals(self):
return self.__generators
def exp(self, x):
gens = self.gen_ideals()
return prod(g**e for g, e in zip(gens, x))
def lift(self, x):
return self.exp(x.exponents())
def iter_exponents(self):
for e in xmrange(self.invariants(), tuple):
yield e
def iter_ideals(self):
for e in self.iter_exponents():
yield self.exp(e)
class HeckeCharGroup(DualAbelianGroup_class):
def __init__(self, ray_class_group, base_ring):
names = tuple("chi%i" % i for i in range(ray_class_group.ngens()))
if base_ring is None:
from sage.rings.number_field.number_field import CyclotomicField
base_ring = CyclotomicField(LCM(ray_class_group.gens_orders()))
DualAbelianGroup_class.__init__(self, ray_class_group, names, base_ring)
""" ray_class_group accessible as self.group() """
def __call__(self, x):
if isinstance(x, HeckeChar) and x.parent() is self:
return x
return HeckeChar(self, x)
def __repr__(self):
return "Group of Hecke characters on %s"%self.group()
#def list(self):
# return [ HeckeChar(self, c.list()) for c in DualAbelianGroup_class.list(self) ]
def list_primitive(self):
return [chi for chi in self.list() if chi.is_primitive() ]
class HeckeChar(DualAbelianGroupElement):
def __init__(self, hecke_char_group, x):
ray_class_group = hecke_char_group.group()
if not isinstance(x, (list,tuple)) or len(x) != ray_class_group.ngens():
x = ray_class_group(x).list()
DualAbelianGroupElement.__init__(self, hecke_char_group, x)
self.__repr = None
self.__element_vector = x
#def __repr__(self):
# #return "Hecke character of index %s over %s" \
# # %(self.list(),self.parent().group())
# return str(self.list())
def number_field(self):
return self.parent().group().number_field()
def modulus(self):
return self.parent().group().modulus()
@cached_method
def conductor(self):
bnr = self.parent().group().bnr()
finite, _ = pari(bnr.bnrconductorofchar(self.list()))
return self.number_field().ideal(finite)
def is_primitive(self):
return self.conductor() == self.modulus()
def logvalue(self, x):
try:
E = self.parent().group()(x)
except Exception:
return None
E = E.exponents()
F = self.exponents()
D = self.parent().gens_orders()
r = sum( e*f/d for e,f,d in zip( E, F, D) )
if isinstance(r, (int,Integer)):
return 0
n,d = r.numerator(), r.denominator()
return n%d/d
def logvalues_on_gens(self):
F = self.exponents()
D = self.parent().gens_orders()
return tuple( f/d for f,d in zip( F, D) )
def __call__(self, x):
try:
logx = self.parent().group()(x)
except Exception:
return 0
return DualAbelianGroupElement.__call__(self,logx)
def next_character(self, only_primitive=False):
D = self.parent().gens_orders()
F = list(self.exponents())
i = len(D)-1
while True:
F[i] += 1
if F[i] == D[i]:
F[i] = 0
i -= 1
if i < 0:
return None
else:
c = HeckeChar(self.parent(), F)
if not only_primitive or c.is_primitive():
return c
def METHOD_NAME(self, only_primitive=False):
D = self.parent().gens_orders()
F = list(self.exponents())
i = len(D)-1
while True:
F[i] -= 1
if F[i] < 0:
F[i] = D[i] - 1
i -= 1
if i < 0:
return None
else:
c = HeckeChar(self.parent(), F)
if not only_primitive or c.is_primitive():
return c
def galois_orbit(self):
order = self.multiplicative_order()
return [ self.__pow__(k) for k in range(order) if gcd(k,order) == 1 ]
"""
k.<a> = NumberField(x^4+7*x^2+13)
G = RayClassGroup(k,7)
H = G.dual_group()
H(3)
H([3,1])
""" |
298,318 | get list domains | """
Base class for Task
"""
from abc import abstractmethod
import warnings
from domainlab.compos.pcr.p_chain_handler import AbstractChainNodeHandler
from domainlab.tasks.task_utils import parse_domain_id
from domainlab.utils.logger import Logger
class NodeTaskDG(AbstractChainNodeHandler):
"""
Domain Generalization Classification Task
"""
def __init__(self, succ=None):
super().__init__(succ)
self._loader_tr = None
self._loader_te = None
self._loader_val = None
self._list_domains = None
self._list_domain_tr = None # versatile
self._name = None
self._args = None
self.dict_dset_all = {} # persist
self.dict_dset_tr = {} # versatile variable: which domains to use as training
self.dict_dset_te = {} # versatile
self.dict_dset_val = {} # versatile
self.dict_domain_class_count = {}
self.dim_d_tr = None # public, only used for diva
self._im_size = None
self._dict_domains2imgroot = {}
self._dict_domain_folder_name2class = {} # {"domain1": {"class1":car, "class2":dog}}
self._dict_domain_img_trans = {}
self.dict_att = {}
self.img_trans_te = None
self.dict_domain2imgroot = {}
self._dict_domain2filepath_list_im_tr = {} # {"photo": "xxx/yyy/file_of_path2imgs"}
self._dict_domain2filepath_list_im_val = {}
self._dict_domain2filepath_list_im_te = {}
self.dict_class_label_ind2name = None
self.conf_without_args() # configuration without init_business
def conf_without_args(self):
"""
configuration without init_business
"""
@abstractmethod
def init_business(self, args, node_algo_builder=None):
"""
construct task
"""
def METHOD_NAME(self):
"""
1. get list of domain names
2. better use method than property so new domains can be added
"""
return self._list_domains
def set_list_domains(self, list_domains):
"""
setter for self._list_domains
"""
self._list_domains = list_domains
@property
def isize(self):
"""
getter for input size: isize
"""
return self._im_size
@isize.setter
def isize(self, im_size):
"""
setter for input size: isize
"""
self._im_size = im_size
@property
def list_domain_tr(self):
"""
property getter of list of domains for this task
"""
if self._list_domain_tr is None:
raise RuntimeError("task not intialized!")
return self._list_domain_tr
@property
def loader_tr(self):
"""loader of mixed train domains"""
return self._loader_tr
@property
def loader_val(self):
"""loader of validation dataset on the training domains"""
return self._loader_val
@property
def loader_te(self):
"""loader of mixed test domains"""
return self._loader_te
@property
def task_name(self):
"""
The basic name of the task, without configurations
"""
# @FIXME: hardcoded position
return type(self).__name__[8:].lower()
def get_na(self, na_tr, na_te):
"""
task name appended with configurations
:param na_tr: training domain names
:param na_te: test domain names
"""
_, list_te = self.get_list_domains_tr_te(na_tr, na_te)
str_te = "_".join(list_te)
# train domain names are too long
return "_".join([self.task_name, "te", str_te])
def is_myjob(self, request):
"""
:param request: string
"""
return request == self.task_name
def get_list_domains_tr_te(self, tr_id, te_id):
"""
For static DG task, get train and test domains list.
:param tr_id: training domain ids;
int or str, or a list of int or str, or None;
if None, then assumed to be the complement of te_id.
:param te_id: test domain ids;
int or str, or a list of int or str; required.
:return: list of training domain names, list of test domain names.
"""
list_domains = self.METHOD_NAME()
list_domain_te = parse_domain_id(te_id, list_domains)
assert set(list_domain_te).issubset(set(list_domains))
if tr_id is None:
list_domain_tr = [did for did in list_domains if
did not in list_domain_te]
else:
list_domain_tr = parse_domain_id(tr_id, list_domains)
if not set(list_domain_tr).issubset(set(list_domains)):
raise RuntimeError(
f"training domain {list_domain_tr} is not \
subset of available domains {list_domains}")
if set(list_domain_tr) & set(list_domain_te):
logger = Logger.get_logger()
logger.warn(
"The sets of training and test domains overlap -- "
"be aware of data leakage or training to the test!"
)
warnings.warn(
"The sets of training and test domains overlap -- "
"be aware of data leakage or training to the test!",
RuntimeWarning
)
self.dim_d_tr = len(list_domain_tr)
self._list_domain_tr = list_domain_tr
return list_domain_tr, list_domain_te |
298,319 | get model | import logging
import os.path
from django.db.models import F, Q, Value
from django.db.models.fields import TextField
from django.db.models.functions import Concat
from django.template import loader
from haystack import connections
from haystack.constants import Indexable
from haystack.fields import CharField
from haystack.indexes import SearchIndex
from haystack.utils import get_model_ct_tuple
from sapl.compilacao.models import (STATUS_TA_IMMUTABLE_PUBLIC,
STATUS_TA_PUBLIC, Dispositivo)
from sapl.materia.models import DocumentoAcessorio, MateriaLegislativa
from sapl.norma.models import NormaJuridica
from sapl.sessao.models import SessaoPlenaria
from sapl.settings import SOLR_URL
from sapl.utils import RemoveTag
class TextExtractField(CharField):
backend = None
logger = logging.getLogger(__name__)
def __init__(self, **kwargs):
super().__init__(**kwargs)
assert self.model_attr
if not isinstance(self.model_attr, (list, tuple)):
self.model_attr = (self.model_attr, )
def solr_extraction(self, arquivo):
if not self.backend:
self.backend = connections['default'].get_backend()
try:
with open(arquivo.path, 'rb') as f:
content = self.backend.extract_file_contents(f)
data = ''
if content:
# update from Solr 7.5 to 8.9
if content['contents']:
data += content['contents']
if content['file']:
data += content['file']
return data
except Exception as e:
print('erro processando arquivo: ' % arquivo.path)
self.logger.error(arquivo.path)
self.logger.error('erro processando arquivo: ' % arquivo.path)
data = ''
return data
def print_error(self, arquivo, error):
msg = 'Erro inesperado processando arquivo %s erro: %s' % (
arquivo.path, error)
print(msg, error)
self.logger.error(msg, error)
def file_extractor(self, arquivo):
if not os.path.exists(arquivo.path) or \
not os.path.splitext(arquivo.path)[1][:1]:
return ''
# Em ambiente de produção utiliza-se o SOLR
if SOLR_URL:
try:
return self.solr_extraction(arquivo)
except Exception as err:
print(str(err))
self.print_error(arquivo, err)
return ''
def ta_extractor(self, value):
r = []
for ta in value.filter(privacidade__in=[
STATUS_TA_PUBLIC,
STATUS_TA_IMMUTABLE_PUBLIC]):
dispositivos = Dispositivo.objects.filter(
Q(ta=ta) | Q(ta_publicado=ta)
).order_by(
'ordem'
).annotate(
rotulo_texto=Concat(
F('rotulo'), Value(' '), F('texto'),
output_field=TextField(),
)
).values_list(
'rotulo_texto', flat=True)
r += list(filter(lambda x: x.strip(), dispositivos))
return ' '.join(r)
def string_extractor(self, value):
return value
def extract_data(self, obj):
data = ''
for attr, func in self.model_attr:
if not hasattr(obj, attr) or not hasattr(self, func):
raise Exception
value = getattr(obj, attr)
if not value:
continue
data += getattr(self, func)(value) + ' '
data = data.replace('\n', ' ')
return data
def prepare_template(self, obj):
app_label, model_name = get_model_ct_tuple(obj)
template_names = ['search/indexes/%s/%s_%s.txt' %
(app_label, model_name, self.instance_name)]
t = loader.select_template(template_names)
return t.render({'object': obj,
'extracted': self.extract_data(obj)})
class DocumentoAcessorioIndex(SearchIndex, Indexable):
model = DocumentoAcessorio
text = TextExtractField(
document=True, use_template=True,
model_attr=(
('arquivo', 'file_extractor'),
('ementa', 'string_extractor'),
('indexacao', 'string_extractor'),
)
)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.text.search_index = self
def METHOD_NAME(self):
return self.model
def index_queryset(self, using=None):
return self.METHOD_NAME().objects.all()
def get_updated_field(self):
return 'data_ultima_atualizacao'
class NormaJuridicaIndex(DocumentoAcessorioIndex):
model = NormaJuridica
text = TextExtractField(
document=True, use_template=True,
model_attr=(
('texto_integral', 'file_extractor'),
('texto_articulado', 'ta_extractor'),
('ementa', 'string_extractor'),
('indexacao', 'string_extractor'),
('observacao', 'string_extractor'),
)
)
class MateriaLegislativaIndex(DocumentoAcessorioIndex):
model = MateriaLegislativa
text = TextExtractField(
document=True, use_template=True,
model_attr=(
('texto_original', 'file_extractor'),
('texto_articulado', 'ta_extractor'),
('ementa', 'string_extractor'),
('indexacao', 'string_extractor'),
('observacao', 'string_extractor'),
)
)
class SessaoPlenariaIndex(DocumentoAcessorioIndex):
model = SessaoPlenaria
text = TextExtractField(
document=True, use_template=True,
model_attr=(
('upload_ata', 'file_extractor'),
('upload_anexo', 'file_extractor'),
('upload_pauta', 'file_extractor'),
)
) |
298,320 | parse time notes | import re
from collections import defaultdict
from datetime import datetime, timedelta
import scrapy
from city_scrapers_core.constants import COMMISSION
from city_scrapers_core.items import Meeting
from city_scrapers_core.spiders import CityScrapersSpider
from dateutil.relativedelta import relativedelta
class CookHumanRightsSpider(CityScrapersSpider):
name = "cook_human_rights"
agency = "Cook County Commission on Human Rights"
timezone = "America/Chicago"
start_urls = ["https://www.cookcountyil.gov/agency/commission-human-rights-0"]
def __init__(self, *args, **kwargs):
self.link_map = defaultdict(list)
super().__init__(*args, **kwargs)
def parse(self, response):
"""
`parse` should always `yield` Meeting items.
"""
links = response.css(
"#block-fieldblock-node-agency-default-field-resources .content a"
)
pattern = r"( *)(?P<m>[a-zA-Z]+)( *)(\d+),( *)(?P<y>\d{4})"
for link in links:
link_text = " ".join(link.css("*::text").extract()).strip()
link_relative_path = link.attrib["href"] # href = reletive file address
file_id = re.search(r"\d{4,6}", link_relative_path)
url = "https://www.cookcountyil.gov/file/{}/".format(file_id[0])
if "Minutes" in link_text:
regex = re.search(pattern, link_text)
if regex is not None:
raw_monthyear = regex.group("m") + " " + regex.group("y")
if len(regex.group("m")) < 4:
date_obj = datetime.strptime(raw_monthyear, "%b %Y")
else:
date_obj = datetime.strptime(raw_monthyear, "%B %Y")
formatted_date = datetime.strftime(date_obj, "%y-%m")
yield response.follow(
url=url,
method="GET",
callback=self._parse_meetings_page,
meta={"formatted_date": formatted_date},
)
def _parse_links(self, response):
"""Parse file page to get minutes file link"""
formatted_date = response.meta.get("formatted_date")
link = response.xpath("//a[contains(@href, 'default/files')]")
link_path = link.xpath("./@href").extract_first()
self.link_map[formatted_date].append(
{"title": "Minutes", "href": response.urljoin(link_path)}
)
def _parse_meetings_page(self, response):
"""
Triger collecting Minutes' files
Go to calendar page for extract mettings
"""
self._parse_links(response)
today = datetime.now()
for month_delta in range(-6, 6): # Meetings from 6 month ago to next 6 month
mo_str = (today + relativedelta(months=month_delta)).strftime("%Y-%m")
url = (
"https://www.cookcountyil.gov/"
"calendar-node-field-date/month/{}".format(mo_str)
)
yield scrapy.Request(
url=url, method="GET", callback=self._parse_events_page
)
def _parse_events_page(self, response):
"""parse the calendar page to find human rights commitee meetings"""
for url in self._get_event_urls(response):
yield scrapy.Request(url, callback=self._parse_event, dont_filter=True)
def _get_event_urls(self, response):
"""
Get urls for all Human rights Commission meetings on the page
"""
responses = []
events = response.xpath("//a[contains(@href, 'event')]")
for event in events:
event_title = event.xpath("text()").extract_first().lower()
if "human rights" in event_title:
href = event.xpath("./@href").extract_first()
responses.append(response.urljoin(href))
return responses
def _parse_event(self, response):
"""Parse the event page."""
start = self._parse_start(response)
links_key = start.strftime("%y-%m")
meeting = Meeting(
title=self._parse_title(),
description=self._parse_description(response),
classification=self._parse_classification(),
start=start,
end=self._parse_end(response),
time_notes=self.METHOD_NAME(),
all_day=self._parse_all_day(response),
location=self._parse_location(response),
links=self.link_map[links_key],
source=response.url,
)
meeting["id"] = self._get_id(meeting)
meeting["status"] = self._get_status(meeting)
return meeting
def _parse_title(self):
"""Parse or generate meeting title."""
return "Commission on Human Rights"
def _parse_description(self, response):
"""Parse or generate meeting description."""
block = response.xpath(
"//div[contains(@class,'field-name-field-event-description')]"
)
field_items = block.xpath(".//div[contains(@class, 'field-items')]")
return " ".join(
field_items.xpath(".//p/text()").extract()
+ field_items.xpath(".//strong/text()").extract()
).strip()
def _parse_classification(self):
"""Parse or generate classification from allowed options."""
return COMMISSION
def _parse_start(self, response):
"""Parse start date and time"""
start = response.xpath(
'//span[@class="date-display-single"]/descendant-or-self::*/text()'
).extract()
start = "".join(start).upper()
start = start.split(" TO ")[0].strip()
start = start.replace("(ALL DAY)", "12:00AM")
return datetime.strptime(start, "%B %d, %Y %I:%M%p")
def _parse_end(self, response):
"""Parse end date and time"""
date = response.xpath(
'//span[@class="date-display-single"]/descendant-or-self::*/text()'
).extract()
date = "".join(date).upper()
date.replace("(ALL DAY)", " TO 11:59PM")
start_end = date.split(" TO ")
if len(start_end) < 2:
start = datetime.strptime(start_end[0], "%B %d, %Y %I:%M%p")
return start + timedelta(hours=2) # usually this meeting takes 2 hours
end_time = start_end[1]
date = start_end[0][: start_end[0].rindex(" ")]
return datetime.strptime("{} {}".format(date, end_time), "%B %d, %Y %I:%M%p")
def METHOD_NAME(self):
"""Parse any additional notes on the timing of the meeting"""
return "Regular meetings are held on the second Thursday of every other month"
def _parse_all_day(self, response):
"""
Parse or generate all-day status. Defaults to false.
"""
date = response.xpath(
'//span[@class="date-display-single"]/descendant-or-self::*/text()'
).extract()
date = "".join(date).upper()
return "ALL DAY" in date
def _parse_location(self, response):
"""
Parse or generate location.
"""
address = response.xpath(
'//div[@class="field event-location"]/descendant::*/text()'
).extract()
address = " ".join([w for w in address if w not in ["Location:", ", ", " "]])
if "Virtual Meeting" in address:
return {
"address": "",
"name": "Virtual Meeting",
}
else:
return {
"address": address,
"name": "",
}
def _parse_source(self, response):
"""Parse or generate source."""
return response.url |
298,321 | test m gt n rank deficient | """
Unit test for Linear Programming via Simplex Algorithm.
"""
# TODO: add tests for:
# https://github.com/scipy/scipy/issues/5400
# https://github.com/scipy/scipy/issues/6690
import numpy as np
from numpy.testing import (
assert_,
assert_allclose,
assert_equal)
from .test_linprog import magic_square
from scipy.optimize._remove_redundancy import _remove_redundancy_svd
from scipy.optimize._remove_redundancy import _remove_redundancy_pivot_dense
from scipy.optimize._remove_redundancy import _remove_redundancy_pivot_sparse
from scipy.optimize._remove_redundancy import _remove_redundancy_id
from scipy.sparse import csc_matrix
def setup_module():
np.random.seed(2017)
def _assert_success(
res,
desired_fun=None,
desired_x=None,
rtol=1e-7,
atol=1e-7):
# res: linprog result object
# desired_fun: desired objective function value or None
# desired_x: desired solution or None
assert_(res.success)
assert_equal(res.status, 0)
if desired_fun is not None:
assert_allclose(
res.fun,
desired_fun,
err_msg="converged to an unexpected objective value",
rtol=rtol,
atol=atol)
if desired_x is not None:
assert_allclose(
res.x,
desired_x,
err_msg="converged to an unexpected solution",
rtol=rtol,
atol=atol)
def redundancy_removed(A, B):
"""Checks whether a matrix contains only independent rows of another"""
for rowA in A:
# `rowA in B` is not a reliable check
for rowB in B:
if np.all(rowA == rowB):
break
else:
return False
return A.shape[0] == np.linalg.matrix_rank(A) == np.linalg.matrix_rank(B)
class RRCommonTests:
def test_no_redundancy(self):
m, n = 10, 10
A0 = np.random.rand(m, n)
b0 = np.random.rand(m)
A1, b1, status, message = self.rr(A0, b0)
assert_allclose(A0, A1)
assert_allclose(b0, b1)
assert_equal(status, 0)
def test_infeasible_zero_row(self):
A = np.eye(3)
A[1, :] = 0
b = np.random.rand(3)
A1, b1, status, message = self.rr(A, b)
assert_equal(status, 2)
def test_remove_zero_row(self):
A = np.eye(3)
A[1, :] = 0
b = np.random.rand(3)
b[1] = 0
A1, b1, status, message = self.rr(A, b)
assert_equal(status, 0)
assert_allclose(A1, A[[0, 2], :])
assert_allclose(b1, b[[0, 2]])
def test_infeasible_m_gt_n(self):
m, n = 20, 10
A0 = np.random.rand(m, n)
b0 = np.random.rand(m)
A1, b1, status, message = self.rr(A0, b0)
assert_equal(status, 2)
def test_infeasible_m_eq_n(self):
m, n = 10, 10
A0 = np.random.rand(m, n)
b0 = np.random.rand(m)
A0[-1, :] = 2 * A0[-2, :]
A1, b1, status, message = self.rr(A0, b0)
assert_equal(status, 2)
def test_infeasible_m_lt_n(self):
m, n = 9, 10
A0 = np.random.rand(m, n)
b0 = np.random.rand(m)
A0[-1, :] = np.arange(m - 1).dot(A0[:-1])
A1, b1, status, message = self.rr(A0, b0)
assert_equal(status, 2)
def test_m_gt_n(self):
np.random.seed(2032)
m, n = 20, 10
A0 = np.random.rand(m, n)
b0 = np.random.rand(m)
x = np.linalg.solve(A0[:n, :], b0[:n])
b0[n:] = A0[n:, :].dot(x)
A1, b1, status, message = self.rr(A0, b0)
assert_equal(status, 0)
assert_equal(A1.shape[0], n)
assert_equal(np.linalg.matrix_rank(A1), n)
def METHOD_NAME(self):
m, n = 20, 10
A0 = np.zeros((m, n))
A0[:, 0] = 1
b0 = np.ones(m)
A1, b1, status, message = self.rr(A0, b0)
assert_equal(status, 0)
assert_allclose(A1, A0[0:1, :])
assert_allclose(b1, b0[0])
def test_m_lt_n_rank_deficient(self):
m, n = 9, 10
A0 = np.random.rand(m, n)
b0 = np.random.rand(m)
A0[-1, :] = np.arange(m - 1).dot(A0[:-1])
b0[-1] = np.arange(m - 1).dot(b0[:-1])
A1, b1, status, message = self.rr(A0, b0)
assert_equal(status, 0)
assert_equal(A1.shape[0], 8)
assert_equal(np.linalg.matrix_rank(A1), 8)
def test_dense1(self):
A = np.ones((6, 6))
A[0, :3] = 0
A[1, 3:] = 0
A[3:, ::2] = -1
A[3, :2] = 0
A[4, 2:] = 0
b = np.zeros(A.shape[0])
A1, b1, status, message = self.rr(A, b)
assert_(redundancy_removed(A1, A))
assert_equal(status, 0)
def test_dense2(self):
A = np.eye(6)
A[-2, -1] = 1
A[-1, :] = 1
b = np.zeros(A.shape[0])
A1, b1, status, message = self.rr(A, b)
assert_(redundancy_removed(A1, A))
assert_equal(status, 0)
def test_dense3(self):
A = np.eye(6)
A[-2, -1] = 1
A[-1, :] = 1
b = np.random.rand(A.shape[0])
b[-1] = np.sum(b[:-1])
A1, b1, status, message = self.rr(A, b)
assert_(redundancy_removed(A1, A))
assert_equal(status, 0)
def test_m_gt_n_sparse(self):
np.random.seed(2013)
m, n = 20, 5
p = 0.1
A = np.random.rand(m, n)
A[np.random.rand(m, n) > p] = 0
rank = np.linalg.matrix_rank(A)
b = np.zeros(A.shape[0])
A1, b1, status, message = self.rr(A, b)
assert_equal(status, 0)
assert_equal(A1.shape[0], rank)
assert_equal(np.linalg.matrix_rank(A1), rank)
def test_m_lt_n_sparse(self):
np.random.seed(2017)
m, n = 20, 50
p = 0.05
A = np.random.rand(m, n)
A[np.random.rand(m, n) > p] = 0
rank = np.linalg.matrix_rank(A)
b = np.zeros(A.shape[0])
A1, b1, status, message = self.rr(A, b)
assert_equal(status, 0)
assert_equal(A1.shape[0], rank)
assert_equal(np.linalg.matrix_rank(A1), rank)
def test_m_eq_n_sparse(self):
np.random.seed(2017)
m, n = 100, 100
p = 0.01
A = np.random.rand(m, n)
A[np.random.rand(m, n) > p] = 0
rank = np.linalg.matrix_rank(A)
b = np.zeros(A.shape[0])
A1, b1, status, message = self.rr(A, b)
assert_equal(status, 0)
assert_equal(A1.shape[0], rank)
assert_equal(np.linalg.matrix_rank(A1), rank)
def test_magic_square(self):
A, b, c, numbers = magic_square(3)
A1, b1, status, message = self.rr(A, b)
assert_equal(status, 0)
assert_equal(A1.shape[0], 23)
assert_equal(np.linalg.matrix_rank(A1), 23)
def test_magic_square2(self):
A, b, c, numbers = magic_square(4)
A1, b1, status, message = self.rr(A, b)
assert_equal(status, 0)
assert_equal(A1.shape[0], 39)
assert_equal(np.linalg.matrix_rank(A1), 39)
class TestRRSVD(RRCommonTests):
def rr(self, A, b):
return _remove_redundancy_svd(A, b)
class TestRRPivotDense(RRCommonTests):
def rr(self, A, b):
return _remove_redundancy_pivot_dense(A, b)
class TestRRID(RRCommonTests):
def rr(self, A, b):
return _remove_redundancy_id(A, b)
class TestRRPivotSparse(RRCommonTests):
def rr(self, A, b):
rr_res = _remove_redundancy_pivot_sparse(csc_matrix(A), b)
A1, b1, status, message = rr_res
return A1.toarray(), b1, status, message |
298,322 | test authz allow fail | import pytest
from satosa.internal import AuthenticationInformation
from satosa.internal import InternalData
from satosa.micro_services.attribute_authorization import AttributeAuthorization
from satosa.exception import SATOSAAuthenticationError
from satosa.context import Context
class TestAttributeAuthorization:
def create_authz_service(
self,
attribute_allow,
attribute_deny,
force_attributes_presence_on_allow=False,
force_attributes_presence_on_deny=False,
):
authz_service = AttributeAuthorization(
config=dict(
force_attributes_presence_on_allow=force_attributes_presence_on_allow,
force_attributes_presence_on_deny=force_attributes_presence_on_deny,
attribute_allow=attribute_allow,
attribute_deny=attribute_deny,
),
name="test_authz",
base_url="https://satosa.example.com",
)
authz_service.next = lambda ctx, data: data
return authz_service
def test_authz_allow_success(self):
attribute_allow = {
"": { "default": {"a0": ['.+@.+']} }
}
attribute_deny = {}
authz_service = self.create_authz_service(attribute_allow, attribute_deny)
resp = InternalData(auth_info=AuthenticationInformation())
resp.attributes = {
"a0": ["test@example.com"],
}
try:
ctx = Context()
ctx.state = dict()
authz_service.process(ctx, resp)
except SATOSAAuthenticationError:
assert False
def METHOD_NAME(self):
attribute_allow = {
"": { "default": {"a0": ['foo1','foo2']} }
}
attribute_deny = {}
authz_service = self.create_authz_service(attribute_allow, attribute_deny)
resp = InternalData(auth_info=AuthenticationInformation())
resp.attributes = {
"a0": ["bar"],
}
with pytest.raises(SATOSAAuthenticationError):
ctx = Context()
ctx.state = dict()
authz_service.process(ctx, resp)
def test_authz_allow_missing(self):
attribute_allow = {
"": { "default": {"a0": ['foo1','foo2']} }
}
attribute_deny = {}
authz_service = self.create_authz_service(attribute_allow, attribute_deny, force_attributes_presence_on_allow=True)
resp = InternalData(auth_info=AuthenticationInformation())
resp.attributes = {
}
with pytest.raises(SATOSAAuthenticationError):
ctx = Context()
ctx.state = dict()
authz_service.process(ctx, resp)
def test_authz_allow_second(self):
attribute_allow = {
"": { "default": {"a0": ['foo1','foo2']} }
}
attribute_deny = {}
authz_service = self.create_authz_service(attribute_allow, attribute_deny)
resp = InternalData(auth_info=AuthenticationInformation())
resp.attributes = {
"a0": ["foo2","kaka"],
}
try:
ctx = Context()
ctx.state = dict()
authz_service.process(ctx, resp)
except SATOSAAuthenticationError:
assert False
def test_authz_deny_success(self):
attribute_deny = {
"": { "default": {"a0": ['foo1','foo2']} }
}
attribute_allow = {}
authz_service = self.create_authz_service(attribute_allow, attribute_deny)
resp = InternalData(auth_info=AuthenticationInformation())
resp.attributes = {
"a0": ["foo2"],
}
with pytest.raises(SATOSAAuthenticationError):
ctx = Context()
ctx.state = dict()
authz_service.process(ctx, resp)
def test_authz_deny_fail(self):
attribute_deny = {
"": { "default": {"a0": ['foo1','foo2']} }
}
attribute_allow = {}
authz_service = self.create_authz_service(attribute_allow, attribute_deny)
resp = InternalData(auth_info=AuthenticationInformation())
resp.attributes = {
"a0": ["foo3"],
}
try:
ctx = Context()
ctx.state = dict()
authz_service.process(ctx, resp)
except SATOSAAuthenticationError:
assert False |
298,323 | get tag | """Utility function for estimator testing.
copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""
__author__ = ["mloning", "fkiraly"]
from inspect import isclass, signature
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from sklearn.utils.validation import check_random_state
from sktime.alignment.base import BaseAligner
from sktime.base import BaseEstimator, BaseObject
from sktime.classification.base import BaseClassifier
from sktime.classification.early_classification import BaseEarlyClassifier
from sktime.clustering.base import BaseClusterer
from sktime.datatypes._panel._check import is_nested_dataframe
from sktime.dists_kernels import BasePairwiseTransformer, BasePairwiseTransformerPanel
from sktime.forecasting.base import BaseForecaster
from sktime.regression.base import BaseRegressor
from sktime.tests._config import VALID_ESTIMATOR_TYPES
from sktime.transformations.base import BaseTransformer
def _get_err_msg(estimator):
return (
f"Invalid estimator type: {type(estimator)}. Valid estimator types are: "
f"{VALID_ESTIMATOR_TYPES}"
)
def _list_required_methods(estimator):
"""Return list of required method names (beyond BaseEstimator ones)."""
# all BaseObject children must implement these
MUST_HAVE_FOR_OBJECTS = ["set_params", "get_params"]
# all BaseEstimator children must implement these
MUST_HAVE_FOR_ESTIMATORS = [
"fit",
"check_is_fitted",
"is_fitted", # read-only property
]
# prediction/forecasting base classes that must have predict
BASE_CLASSES_THAT_MUST_HAVE_PREDICT = (
BaseClusterer,
BaseRegressor,
BaseForecaster,
)
# transformation base classes that must have transform
BASE_CLASSES_THAT_MUST_HAVE_TRANSFORM = (
BaseTransformer,
BasePairwiseTransformer,
BasePairwiseTransformerPanel,
)
required_methods = []
if isinstance(estimator, BaseObject):
required_methods += MUST_HAVE_FOR_OBJECTS
if isinstance(estimator, BaseEstimator):
required_methods += MUST_HAVE_FOR_ESTIMATORS
if isinstance(estimator, BASE_CLASSES_THAT_MUST_HAVE_PREDICT):
required_methods += ["predict"]
if isinstance(estimator, BASE_CLASSES_THAT_MUST_HAVE_TRANSFORM):
required_methods += ["transform"]
if isinstance(estimator, BaseAligner):
required_methods += [
"get_alignment",
"get_alignment_loc",
"get_aligned",
"get_distance",
"get_distance_matrix",
]
return required_methods
def _make_primitives(n_columns=1, random_state=None):
"""Generate one or more primitives, for checking inverse-transform."""
rng = check_random_state(random_state)
if n_columns == 1:
return rng.rand()
return rng.rand(size=(n_columns,))
def _make_tabular_X(n_instances=20, n_columns=1, return_numpy=True, random_state=None):
"""Generate tabular X, for checking inverse-transform."""
rng = check_random_state(random_state)
X = rng.rand(n_instances, n_columns)
if return_numpy:
return X
else:
return pd.DataFrame(X)
def _compare_nested_frame(func, x, y, **kwargs):
"""Compare two nested pd.DataFrames.
Parameters
----------
func : function
Function from np.testing for comparing arrays.
x : pd.DataFrame
y : pd.DataFrame
kwargs : dict
Keyword argument for function
Raises
------
AssertionError
If x and y are not equal
"""
# We iterate over columns and rows to make cell-wise comparisons.
# Tabularizing the data first would simplify this, but does not
# work for unequal length data.
# In rare cases, x and y may be empty (e.g. TSFreshRelevantFeatureExtractor) and
# we cannot compare individual cells, so we simply check if everything else is
# equal here.
assert isinstance(x, pd.DataFrame)
if x.empty:
assert_frame_equal(x, y)
elif is_nested_dataframe(x):
# Check if both inputs have the same shape
if not x.shape == y.shape:
raise ValueError("Found inputs with different shapes")
# Iterate over columns
n_columns = x.shape[1]
for i in range(n_columns):
xc = x.iloc[:, i].tolist()
yc = y.iloc[:, i].tolist()
# Iterate over rows, checking if individual cells are equal
for xci, yci in zip(xc, yc):
func(xci, yci, **kwargs)
def _assert_array_almost_equal(x, y, decimal=6, err_msg=""):
func = np.testing.assert_array_almost_equal
if isinstance(x, pd.DataFrame):
_compare_nested_frame(func, x, y, decimal=decimal, err_msg=err_msg)
else:
func(x, y, decimal=decimal, err_msg=err_msg)
def _assert_array_equal(x, y, err_msg=""):
func = np.testing.assert_array_equal
if isinstance(x, pd.DataFrame):
_compare_nested_frame(func, x, y, err_msg=err_msg)
else:
func(x, y, err_msg=err_msg)
def _get_args(function, varargs=False):
"""Get function arguments."""
try:
params = signature(function).parameters
except ValueError:
# Error on builtin C function
return []
args = [
key
for key, param in params.items()
if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)
]
if varargs:
varargs = [
param.name
for param in params.values()
if param.kind == param.VAR_POSITIONAL
]
if len(varargs) == 0:
varargs = None
return args, varargs
else:
return args
def _has_capability(est, method: str) -> bool:
"""Check whether estimator has capability of method."""
def METHOD_NAME(est, tag_name, tag_value_default=None):
if isclass(est):
return est.get_class_tag(
tag_name=tag_name, tag_value_default=tag_value_default
)
else:
return est.METHOD_NAME(
tag_name=tag_name,
tag_value_default=tag_value_default,
raise_error=False,
)
if not hasattr(est, method):
return False
if method == "inverse_transform":
return METHOD_NAME(est, "capability:inverse_transform", False)
if method in [
"predict_proba",
"predict_interval",
"predict_quantiles",
"predict_var",
]:
ALWAYS_HAVE_PREDICT_PROBA = (BaseClassifier, BaseEarlyClassifier, BaseClusterer)
# all classifiers and clusterers implement predict_proba
if method == "predict_proba" and isinstance(est, ALWAYS_HAVE_PREDICT_PROBA):
return True
return METHOD_NAME(est, "capability:pred_int", False)
# skip transform for forecasters that have it - pipelines
if method == "transform" and isinstance(est, BaseForecaster):
return False
return True |
298,324 | test react proposals vote | import html
import json
import re
import pytest
from django.contrib.contenttypes.models import ContentType
from django.contrib.sessions.middleware import SessionMiddleware
from django.urls import reverse
from adhocracy4.test.helpers import freeze_phase
from adhocracy4.test.helpers import freeze_post_phase
from adhocracy4.test.helpers import render_template
from adhocracy4.test.helpers import setup_phase
from meinberlin.apps.budgeting import phases
from meinberlin.apps.budgeting.models import Proposal
from tests.votes.test_token_vote_api import add_token_to_session
@pytest.mark.django_db
def test_react_proposals(module, rf):
request = rf.get("/")
template = "{% load react_proposals %}{% react_proposals module %}"
contenttype = ContentType.objects.get_for_model(Proposal)
expected = (
r"^<div data-mb-widget=\"proposals\" data-attributes="
r"\"(?P<props>{.+})\"><\/div>$"
)
props = get_rendered_props(
{"request": request, "module": module}, expected, template
)
assert props == {
"proposals_api_url": reverse("proposals-list", kwargs={"module_pk": module.pk}),
"tokenvote_api_url": reverse(
"tokenvotes-list",
kwargs={"module_pk": module.pk, "content_type": contenttype.id},
),
"end_session_url": reverse("end_session"),
}
@pytest.mark.django_db
def METHOD_NAME(
phase_factory, proposal_factory, voting_token_factory, token_vote_factory, user, rf
):
phase, module, project, proposal = setup_phase(
phase_factory, proposal_factory, phases.VotingPhase
)
request = rf.get("/")
middleware = SessionMiddleware()
middleware.process_request(request)
request.session.save()
request.user = user
template = (
"{% load react_proposals_vote %}" "{% react_proposals_vote module proposal %}"
)
contenttype = ContentType.objects.get_for_model(Proposal)
expected = (
r"^<div data-mb-widget=\"vote_button\" data-attributes="
r"\"(?P<props>{.+})\"><\/div>$"
)
with freeze_phase(phase):
proposal = get_annotated_proposal(module)
props = get_rendered_props(
{"request": request, "module": module, "proposal": proposal},
expected,
template,
)
assert props == {
"tokenvote_api_url": reverse(
"tokenvotes-list",
kwargs={"module_pk": module.pk, "content_type": contenttype.id},
),
"objectID": proposal.pk,
"session_token_voted": False,
"token_info": None,
}
token = voting_token_factory(module=module)
add_token_to_session(request.session, token)
token_vote_factory(token=token, content_object=proposal)
with freeze_phase(phase):
proposal = get_annotated_proposal(module)
props = get_rendered_props(
{"request": request, "module": module, "proposal": proposal},
expected,
template,
)
assert props == {
"tokenvote_api_url": reverse(
"tokenvotes-list",
kwargs={"module_pk": module.pk, "content_type": contenttype.id},
),
"objectID": proposal.pk,
"session_token_voted": True,
"token_info": {
"votes_left": True,
"num_votes_left": token.allowed_votes - 1,
},
}
@pytest.mark.django_db
def test_react_support(phase_factory, proposal_factory, rating_factory, user, rf):
phase, module, project, proposal = setup_phase(
phase_factory, proposal_factory, phases.SupportPhase
)
request = rf.get("/")
request.user = user
template = "{% load react_support %}{% react_support proposal %}"
contenttype = ContentType.objects.get_for_model(Proposal)
expected = (
r"^<div data-mb-widget=\"support\" data-attributes="
r"\"(?P<props>{.+})\"><\/div>$"
)
with freeze_phase(phase):
proposal = get_annotated_proposal(module)
props = get_rendered_props(
{"request": request, "proposal": proposal}, expected, template
)
assert props == {
"contentType": contenttype.id,
"objectId": proposal.pk,
"authenticated": True,
"support": 0,
"userSupported": False,
"userSupportId": -1,
"isReadOnly": False,
"isArchived": False,
}
rating = rating_factory(content_object=proposal, creator=user)
with freeze_post_phase(phase):
proposal = get_annotated_proposal(module)
props = get_rendered_props(
{"request": request, "proposal": proposal}, expected, template
)
assert props == {
"contentType": contenttype.id,
"objectId": proposal.pk,
"authenticated": True,
"support": 1,
"userSupported": True,
"userSupportId": rating.pk,
"isReadOnly": True,
"isArchived": False,
}
def get_annotated_proposal(module):
qs = Proposal.objects.filter(module=module)
annotated_qs = qs.annotate_positive_rating_count().annotate_negative_rating_count()
proposal = annotated_qs.first()
return proposal
def get_rendered_props(context, expected, template):
rendered_template = render_template(template, context)
match = re.match(expected, rendered_template)
props = json.loads(html.unescape(match.group("props")))
return props |
298,325 | test list consistency | # Copyright 2016 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for the object_storage_service benchmark worker process."""
import time
import unittest
import object_storage_api_tests # noqa: importing for flags
import object_storage_interface
import validate_service
class MockObjectStorageService(object_storage_interface.ObjectStorageServiceBase): # noqa
def __init__(self):
self.bucket = None
self.objects = {}
def _CheckBucket(self, bucket):
"""Make sure that we are only passed one bucket name.
Args:
bucket: the name of a bucket.
Raises: ValueError, if this object has been passed a different
bucket name previously.
"""
if self.bucket is None:
self.bucket = bucket
elif self.bucket != bucket:
raise ValueError(
'MockObjectStorageService passed two bucket names: %s and %s' %
(self.bucket, bucket))
def ListObjects(self, bucket, prefix):
self._CheckBucket(bucket)
return [value
for name, value in self.objects.iteritems()
if name.startswith(prefix)]
def DeleteObjects(self, bucket, objects_to_delete, objects_deleted=None):
self._CheckBucket(bucket)
for name in objects_to_delete:
if name in self.objects:
del self.objects[name]
if objects_deleted is not None:
objects_deleted.append(name)
def WriteObjectFromBuffer(self, bucket, object, stream, size):
self._CheckBucket(bucket)
stream.seek(0)
self.objects[object] = stream.read(size)
return time.time(), 0.001
def ReadObject(self, bucket, object):
self._CheckBucket(bucket)
self.objects[object]
return time.time(), 0.001
class TestScenarios(unittest.TestCase):
"""Test that the benchmark scenarios complete.
Specifically, given a correctly operating service
(MockObjectStorageService), verify that the benchmarking scenarios
run to completion without raising an exception.
"""
def setUp(self):
self.FLAGS = object_storage_api_tests.FLAGS
self.FLAGS([])
self.objects_written_file = self.FLAGS.objects_written_file
self.FLAGS.objects_written_file = '/tmp/objects-written'
def tearDown(self):
self.FLAGS.objects_written_file = self.objects_written_file
def testOneByteRW(self):
object_storage_api_tests.OneByteRWBenchmark(MockObjectStorageService())
def METHOD_NAME(self):
object_storage_api_tests.ListConsistencyBenchmark(
MockObjectStorageService())
def testSingleStreamThroughput(self):
object_storage_api_tests.SingleStreamThroughputBenchmark(
MockObjectStorageService())
def testCleanupBucket(self):
object_storage_api_tests.CleanupBucket(MockObjectStorageService())
def testMultiStreamWriteAndRead(self):
service = MockObjectStorageService()
# Have to sequence MultiStreamWrites and MultiStreamReads because
# MultiStreamReads will read from the objects_written_file that
# MultiStreamWrites generates.
object_storage_api_tests.MultiStreamWrites(service)
object_storage_api_tests.MultiStreamReads(service)
class TestValidateService(unittest.TestCase):
"""Validate the ValidateService script."""
def setUp(self):
self.FLAGS = object_storage_api_tests.FLAGS
self.FLAGS([])
self.objects_written_file = self.FLAGS.objects_written_file
self.FLAGS.objects_written_file = '/tmp/objects-written'
def testValidateService(self):
validate_service.ValidateService(MockObjectStorageService())
if __name__ == '__main__':
unittest.main() |
298,326 | asymmetric image | import numpy as np
import pytest
from aspire.image import Image
from aspire.operators import PolarFT
from aspire.utils import gaussian_2d, grid_2d
from aspire.volume import AsymmetricVolume, CnSymmetricVolume
# ==========
# Parameters
# ==========
IMG_SIZES = [
64,
65,
]
DTYPES = [
np.float64,
np.float32,
]
RADIAL_MODES = [
2,
3,
4,
5,
8,
9,
16,
17,
]
# ==================
# Parameter Fixtures
# ==================
@pytest.fixture(params=DTYPES, ids=lambda x: f"dtype={x}")
def dtype(request):
return request.param
@pytest.fixture(params=IMG_SIZES, ids=lambda x: f"img_size={x}")
def img_size(request):
return request.param
@pytest.fixture(params=RADIAL_MODES, ids=lambda x: f"radial_mode={x}")
def radial_mode(request):
return request.param
# =====================
# Image and PF Fixtures
# =====================
@pytest.fixture
def gaussian(img_size, dtype):
"""Radially symmetric image."""
gauss = Image(
gaussian_2d(img_size, sigma=(img_size // 10, img_size // 10), dtype=dtype)
)
pf = pf_transform(gauss)
return pf
@pytest.fixture
def symmetric_image(img_size, dtype):
"""Cyclically (C4) symmetric image."""
symmetric_vol = CnSymmetricVolume(
img_size, C=1, order=4, K=25, seed=10, dtype=dtype
).generate()
symmetric_image = symmetric_vol.project(np.eye(3, dtype=dtype))
pf = pf_transform(symmetric_image)
return pf
@pytest.fixture
def METHOD_NAME(img_size, dtype):
"""Asymetric image."""
asymmetric_vol = AsymmetricVolume(img_size, C=1, dtype=dtype).generate()
METHOD_NAME = asymmetric_vol.project(np.eye(3, dtype=dtype))
pf = pf_transform(METHOD_NAME)
return METHOD_NAME, pf
@pytest.fixture
def radial_mode_image(img_size, dtype, radial_mode):
g = grid_2d(img_size, dtype=dtype)
image = Image(np.sin(radial_mode * np.pi * g["r"]))
pf = pf_transform(image)
return pf, radial_mode
# Helper function
def pf_transform(image):
"""Take polar Fourier transform of image."""
img_size = image.resolution
nrad = img_size // 2
ntheta = 360
pft = PolarFT(img_size, nrad=nrad, ntheta=ntheta, dtype=image.dtype)
pf = pft.transform(image)[0]
return pf
# =============
# Testing Suite
# =============
def test_dc_component(METHOD_NAME):
"""Test that the DC component equals the mean of the signal."""
image, pf = METHOD_NAME
signal_mean = np.mean(image)
dc_components = abs(pf[:, 0])
assert np.allclose(dc_components, signal_mean)
def test_radially_symmetric_image(gaussian):
"""Test that all polar Fourier rays are equal for a radially symmetric image."""
pf = gaussian
assert np.allclose(pf, pf[0])
def test_cyclically_symmetric_image(symmetric_image):
"""Test that a symmetric image produces repeated sets of polar Fourier rays."""
pf = symmetric_image
# For C4 symmetry any two sets of rays seperated by 90 degrees should be equal.
ntheta = pf.shape[0] # ntheta is the number of rays in 180 degrees.
assert np.allclose(abs(pf[: ntheta // 2]), abs(pf[ntheta // 2 :]), atol=1e-7)
def test_radial_modes(radial_mode_image):
pf, mode = radial_mode_image
# Set DC component to zero.
pf[:, 0] = 0
# Check that all rays are close.
assert abs(abs(pf) - abs(pf[0])).all() < 1e-4
# Check that correct mode is most prominent.
# Mode could be off by a pixel depending on resolution and mode.
# Since all rays are close will just test one.
mode_window = [mode - 1, mode, mode + 1]
ray = 3
assert np.argmax(abs(pf[ray])) in mode_window
def test_complex_image_error():
"""Test that we raise for complex images."""
img_size = 5
complex_image = Image(np.ones((img_size, img_size), dtype=np.complex64)) + 2j
pft = PolarFT(size=img_size, dtype=np.complex64)
with pytest.raises(TypeError, match=r"The Image `x` must be real valued*"):
_ = pft.transform(complex_image)
def test_numpy_array_error():
"""Test that we raise when passed numpy array."""
img_size = 5
image_np = np.ones((img_size, img_size), dtype=np.float32)
pft = PolarFT(size=img_size, dtype=np.float32)
with pytest.raises(TypeError, match=r"passed numpy array*"):
_ = pft.transform(image_np)
def test_inconsistent_dtypes_error():
"""Test that we raise for complex images."""
img_size = 5
image = Image(np.ones((img_size, img_size), dtype=np.float32))
pft = PolarFT(size=img_size, dtype=np.float64)
with pytest.raises(TypeError, match=r"Inconsistent dtypes*"):
_ = pft.transform(image)
def test_theta_error():
"""
Test that `PolarFT`, when instantiated with odd value for `ntheta`,
gives appropriate error.
"""
# Test we raise with expected error.
with pytest.raises(NotImplementedError, match=r"Only even values for ntheta*"):
_ = PolarFT(size=42, ntheta=143, dtype=np.float32)
@pytest.mark.parametrize("stack_shape", [(5,), (2, 3)])
def test_half_to_full_transform(stack_shape):
"""
Test conjugate symmetry and shape of the full polar Fourier transform.
"""
img_size = 32
image = Image(
np.random.rand(*stack_shape, img_size, img_size).astype(np.float32, copy=False)
)
pft = PolarFT(size=img_size)
pf = pft.transform(image)
full_pf = pft.half_to_full(pf)
# Check shape.
assert full_pf.shape == (*stack_shape, pft.ntheta, pft.nrad)
# Check conjugate symmetry against pf.
assert np.allclose(np.conj(pf), full_pf[..., pft.ntheta // 2 :, :])
# Check conjugate symmetry against self.
for ray in range(pft.ntheta // 2):
np.testing.assert_allclose(
full_pf[..., ray, :], np.conj(full_pf[..., ray + pft.ntheta // 2, :])
) |
298,327 | initial message | from __future__ import annotations
import typing as T
from string import ascii_uppercase
import discord
from core import Context
from models import Scrim, Timer
from utils import discord_timestamp as dt
from utils import regional_indicator as ri
from ._base import ScrimsView
from ._btns import *
from ._pages import *
class ScrimsEditor(ScrimsView):
def __init__(self, ctx: Context, scrim: Scrim):
super().__init__(ctx, timeout=60.0)
self.ctx = ctx
self.record = scrim
self.page_info = ("x", "y")
async def refresh_view(self):
_d = dict(self.record)
del _d["id"]
del _d["autoclean"]
del _d["available_slots"]
del _d["open_days"]
await Timer.filter(extra={"args": [], "kwargs": {"scrim_id": self.record.id}}, event="scrim_open").delete()
await self.bot.reminders.create_timer(_d["open_time"], "scrim_open", scrim_id=self.record.id)
await self.bot.db.execute(
"""UPDATE public."sm.scrims" SET open_days = $1 WHERE id = $2""",
[_.value for _ in self.record.open_days],
self.record.id,
)
await self.record.make_changes(**_d)
await self._add_buttons()
try:
self.message = await self.message.edit(embed=await self.METHOD_NAME, view=self)
except discord.HTTPException:
await self.on_timeout()
@property
async def METHOD_NAME(self):
scrim = self.record
_e = discord.Embed(color=0x00FFB3, url=self.ctx.config.SERVER_LINK)
_e.title = "Scrims Editor - Edit Settings"
fields = {
"Name": "`{0}`".format(scrim.name),
"Registration Channel": getattr(scrim.registration_channel, "mention", "`channel-deleted`"),
"Slotlist Channel": getattr(scrim.slotlist_channel, "mention", "`deleted-channel`"),
"Success Role": getattr(scrim.role, "mention", "`role-deleted`"),
"Mentions": f"`{scrim.required_mentions}`",
"Slots": f"`{scrim.total_slots}`",
"Open Time": f"{dt(scrim.open_time,'t')} ({dt(scrim.open_time)})",
f"Reactions {self.bot.config.PRIME_EMOJI}": f"{scrim.check_emoji},{scrim.cross_emoji}",
"Ping Role": getattr(scrim.ping_role, "mention", "`Not-Set`"),
"Open Role": getattr(scrim.open_role, "mention", "`role-deleted`"),
"Multi-Register": ("`Not allowed!`", "`Allowed`")[scrim.multiregister],
"Team Compulsion": ("`No!`", "`Yes!`")[scrim.teamname_compulsion],
"Duplicate Team Name": ("`Allowed`", "`Not allowed!`")[scrim.no_duplicate_name],
"Autodelete Rejected": ("`No!`", "`Yes!`")[scrim.autodelete_rejects],
"Autodelete Late Messages": ("`No!`", "`Yes!`")[scrim.autodelete_extras],
"Slotlist Start from": "`{}`".format(scrim.start_from),
"Autoclean": f"{dt(scrim.autoclean_time,'t')} (`{', '.join(_.name.title() for _ in scrim.autoclean)}`)"
if scrim.autoclean_time
else "`Turned OFF`",
"Scrim Days": ", ".join(map(lambda x: "`{0}`".format(x.name.title()[:2]), self.record.open_days))
if self.record.open_days
else "`Not set`",
f"Required Lines {self.bot.config.PRIME_EMOJI}": ("`Not set`", "`{0}`".format(scrim.required_lines))[
bool(scrim.required_lines)
],
f"Duplicate / Fake Tags {self.bot.config.PRIME_EMOJI}": ("`Not allowed!`", "`Allowed`")[
scrim.allow_duplicate_tags
],
}
for idx, (name, value) in enumerate(fields.items()):
_e.add_field(
name=f"{ri(ascii_uppercase[idx])} {name}:",
value=value,
)
_e.add_field(name="\u200b", value="\u200b") # invisible field
_e.set_footer(text=f"Page - {' / '.join(await self.record.scrim_posi())}")
return _e
async def _add_buttons(self):
self.clear_items()
if await Scrim.filter(guild_id=self.ctx.guild.id).count() >= 2:
self.add_item(Prev(self.ctx))
self.add_item(SkipTo(self.ctx))
self.add_item(Next(self.ctx))
self.add_item(SetName(self.ctx, "a"))
self.add_item(RegChannel(self.ctx, "b"))
self.add_item(SlotChannel(self.ctx, "c"))
self.add_item(SetRole(self.ctx, "d"))
self.add_item(SetMentions(self.ctx, "e"))
self.add_item(TotalSlots(self.ctx, "f"))
self.add_item(OpenTime(self.ctx, "g"))
self.add_item(SetEmojis(self.ctx, "h"))
self.add_item(PingRole(self.ctx, "i"))
self.add_item(OpenRole(self.ctx, "j"))
self.add_item(MultiReg(self.ctx, "k"))
self.add_item(TeamCompulsion(self.ctx, "l"))
self.add_item(DuplicateTeam(self.ctx, "m"))
self.add_item(DeleteReject(self.ctx, "n"))
self.add_item(DeleteLate(self.ctx, "o"))
self.add_item(SlotlistStart(self.ctx, "p"))
self.add_item(SetAutoclean(self.ctx, "q"))
self.add_item(OpenDays(self.ctx, "r"))
self.add_item(MinLines(self.ctx, "s"))
self.add_item(DuplicateTags(self.ctx, "t"))
self.add_item(DeleteScrim(self.ctx))
self.add_item(Discard(self.ctx, "Main Menu")) |
298,328 | compiler name | # Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class OpenradiossEngine(CMakePackage):
"""
OpenRadioss is the publicly available open-source code base that a worldwide
community of researchers, software developers, and industry leaders are
enhancing every day. OpenRadioss is changing the game by empowering users to
make rapid contributions that tackle the latest challenges brought on by rapidly
evolving technologies like battery development, lightweight materials and composites,
human body models and biomaterials, autonomous driving and flight,
as well as the desire to give passengers the safest environment possible via virtual testing.
OpenRadioss Engine is a component of the OpenRadioss that runs the simulation in parallel.
"""
homepage = "https://www.openradioss.org/"
git = "https://github.com/OpenRadioss/OpenRadioss.git"
maintainers("kjrstory")
version("main", branch="main")
variant("mpi", default=False, description="Enable MPI support")
variant("sp", default=False, description="Using single precision option")
variant("debug", default=False, description="Debug Option")
variant("static_link", default=True, description="Static_link Option")
depends_on("openmpi", when="+mpi")
depends_on("cmake@2.8:", type="build")
depends_on("perl", type="build")
depends_on("python", type="build")
requires(
"%gcc",
"%intel",
"%oneapi",
"%aocc",
"%arm",
policy="one_of",
msg="Openradioss-starter can be built using GNU Fortran, Intel Fortran, AOCC, \
or Armflang compilers only.",
)
build_directory = "engine"
root_cmakelists_dir = "engine"
@property
def METHOD_NAME(self):
compiler_mapping = {
"aocc": "64_AOCC",
"intel": "64_intel",
"oneapi": "64_intel",
"gcc": "64_gf",
"arm": "a64_gf",
}
METHOD_NAME = compiler_mapping[self.spec.compiler.name]
return METHOD_NAME
def cmake_args(self):
args = [
"-Dmpi_os=0",
"-DCMAKE_Fortran_COMPILER={0}".format(spack_fc),
"-DCMAKE_C_COMPILER={0}".format(spack_cc),
"-DCMAKE_CPP_COMPILER={0}".format(spack_cxx),
"-DCMAKE_CXX_COMPILER={0}".format(spack_cxx),
"-Dsanitize=0",
]
args.append("-Drach=linux" + self.METHOD_NAME)
if "+sp" in self.spec:
args.append("-Dprecision=sp")
else:
args.append("-Dprecision=dp")
if "+mpi" in self.spec:
args.append("-DMPI=ompi")
args.append("-Dmpi_root=" + self.spec["mpi"].prefix)
args.append("-Dmpi_incdir=" + self.spec["mpi"].prefix.include)
args.append("-Dmpi_libdir=" + self.spec["mpi"].prefix.lib)
else:
args.append("-DMPI=smp")
if "+debug" in self.spec:
args.append("-Ddebug=1")
else:
args.append("-Ddebug=0")
if "+static_link" in self.spec:
args.append("-Dstatic_link=1")
else:
args.append("-Dstatic_link=0")
return args
def install(self, spec, prefix):
mkdirp(join_path(prefix, "exec"))
if "+mpi" in spec:
exec_file = "engine_linux" + self.METHOD_NAME + "_ompi"
else:
exec_file = "engine_linux" + self.METHOD_NAME
install(
join_path(self.stage.source_path, "engine", exec_file),
join_path(prefix, "exec", exec_file),
)
install_tree(
join_path(self.stage.source_path, "hm_cfg_files"), join_path(prefix, "hm_cfg_files")
),
install_tree(
join_path(self.stage.source_path, "extlib", "h3d"), join_path(prefix, "extlib", "h3d")
),
install_tree(
join_path(self.stage.source_path, "extlib", "hm_reader"),
join_path(prefix, "extlib", "hm_reader"),
)
def setup_run_environment(self, env):
env.set("OPENRADIOSS_PATH", self.prefix)
env.set("RAD_CFG_PATH", join_path(self.prefix, "hm_cfg_files"))
env.set("RAD_H3D_PATH", join_path(self.prefix, "extlib", "h3d", "lib", "linux64"))
env.set("OMP_STACKSIZE", "400m")
env.prepend_path("LD_LIBRARY_PATH", join_path(self.prefix, "extlib", "h3d", "linux64"))
env.prepend_path(
"LD_LIBRARY_PATH", join_path(self.prefix, "extlib", "hm_reader", "linux64")
)
env.prepend_path("PATH", join_path(self.prefix, "exec"))
if "+mpi" in self.spec:
env.prepend_path("PATH", self.spec["mpi"].prefix.bin)
env.prepend_path("LD_LIBRARY_PATH", self.spec["mpi"].prefix.lib) |
298,329 | get error | # Copyright (c) Alibaba, Inc. and its affiliates.
import torch
import torch.nn as nn
from .Embedding import Embedding
from .geometry import index, orthogonal, perspective
from .Res_backbone import Res_hournet
from .Surface_head import Surface_Head
class Pixto3DNet(nn.Module):
def __init__(self,
backbone,
head,
rgbhead,
embedding,
projection_mode: str = 'orthogonal',
error_term: str = 'mse',
num_views: int = 1):
"""
Parameters:
backbone: parameter of networks to extract image features
head: parameter of networks to predict value in surface
rgbhead: parameter of networks to predict rgb of point
embedding: parameter of networks to normalize depth of camera coordinate
projection_mode: how to render your 3d model to images
error_term: train loss
num_view: how many images from which you want to reconstruct model
"""
super(Pixto3DNet, self).__init__()
self.backbone = Res_hournet(**backbone)
self.head = Surface_Head(**head)
self.rgbhead = Surface_Head(**rgbhead)
self.depth = Embedding(**embedding)
if error_term == 'mse':
self.error_term = nn.MSELoss(reduction='none')
elif error_term == 'bce':
self.error_term = nn.BCELoss(reduction='none')
elif error_term == 'l1':
self.error_term = nn.L1Loss(reduction='none')
else:
raise NotImplementedError
self.index = index
self.projection = orthogonal if projection_mode == 'orthogonal' else perspective
self.num_views = num_views
self.im_feat_list = []
self.intermediate_preds_list = []
def extract_features(self, images: torch.Tensor):
self.im_feat_list = self.backbone(images)
def query(self, points, calibs, transforms=None, labels=None):
if labels is not None:
self.labels = labels
xyz = self.projection(points, calibs, transforms)
xy = xyz[:, :2, :]
xyz_feat = self.depth(xyz)
self.intermediate_preds_list = []
im_feat_256 = self.im_feat_list[0]
im_feat_512 = self.im_feat_list[1]
point_local_feat_list = [
self.index(im_feat_256, xy),
self.index(im_feat_512, xy), xyz_feat
]
point_local_feat = torch.cat(point_local_feat_list, 1)
pred, phi = self.head(point_local_feat)
self.intermediate_preds_list.append(pred)
self.phi = phi
self.preds = self.intermediate_preds_list[-1]
def get_preds(self):
return self.preds
def query_rgb(self, points, calibs, transforms=None):
xyz = self.projection(points, calibs, transforms)
xy = xyz[:, :2, :]
xyz_feat = self.depth(xyz)
self.intermediate_preds_list = []
im_feat_256 = self.im_feat_list[0]
im_feat_512 = self.im_feat_list[1]
point_local_feat_list = [
self.index(im_feat_256, xy),
self.index(im_feat_512, xy), xyz_feat
]
point_local_feat = torch.cat(point_local_feat_list, 1)
pred, phi = self.head(point_local_feat)
rgb_point_feat = torch.cat([point_local_feat, phi], 1)
rgb, phi = self.rgbhead(rgb_point_feat)
return rgb
def METHOD_NAME(self):
error = 0
lc = torch.tensor(self.labels.shape[0] * self.labels.shape[1]
* self.labels.shape[2])
inw = torch.sum(self.labels)
weight_in = inw / lc
weight = torch.abs(self.labels - weight_in)
lamda = 1 / torch.mean(weight)
for preds in self.intermediate_preds_list:
error += lamda * torch.mean(
self.error_term(preds, self.labels) * weight)
error /= len(self.intermediate_preds_list)
return error
def forward(self,
images,
points,
calibs,
surpoint=None,
transforms=None,
labels=None):
self.extract_features(images)
self.query(
points=points, calibs=calibs, transforms=transforms, labels=labels)
if surpoint is not None:
rgb = self.query_rgb(
points=surpoint, calibs=calibs, transforms=transforms)
else:
rgb = None
res = self.preds
return res, rgb |
298,330 | get serial | #############################################################################
# Celestica Seastone-DX010
#
# Platform and model specific eeprom subclass, inherits from the base class,
# and provides the followings:
# - the eeprom format definition
# - specific encoder/decoder if there is special need
#############################################################################
try:
import os
import sys
import re
if sys.version_info.major == 3:
from io import StringIO
else:
from cStringIO import StringIO
from sonic_platform_base.sonic_eeprom import eeprom_tlvinfo
except ImportError as e:
raise ImportError(str(e) + "- required module not found")
CACHE_ROOT = '/var/cache/sonic/decode-syseeprom'
CACHE_FILE = 'syseeprom_cache'
NULL = 'N/A'
class Tlv(eeprom_tlvinfo.TlvInfoDecoder):
EEPROM_DECODE_HEADLINES = 6
def __init__(self):
self._eeprom_path = "/sys/class/i2c-adapter/i2c-12/12-0050/eeprom"
super(Tlv, self).__init__(self._eeprom_path, 0, '', True)
self._eeprom = self._load_eeprom()
def __parse_output(self, decode_output):
decode_output.replace('\0', '')
lines = decode_output.split('\n')
lines = lines[self.EEPROM_DECODE_HEADLINES:]
_eeprom_info_dict = dict()
for line in lines:
try:
match = re.search(
'(0x[0-9a-fA-F]{2})([\s]+[\S]+[\s]+)(.*$)', line)
if match is not None:
idx = match.group(1)
value = match.group(3).rstrip('\0')
_eeprom_info_dict[idx] = value
except BaseException:
pass
return _eeprom_info_dict
def _load_eeprom(self):
original_stdout = sys.stdout
sys.stdout = StringIO()
try:
self.read_eeprom_db()
except BaseException:
decode_output = sys.stdout.getvalue()
sys.stdout = original_stdout
return self.__parse_output(decode_output)
status = self.check_status()
if 'ok' not in status:
return False
if not os.path.exists(CACHE_ROOT):
try:
os.makedirs(CACHE_ROOT)
except BaseException:
pass
#
# only the eeprom classes that inherit from eeprom_base
# support caching. Others will work normally
#
try:
self.set_cache_name(os.path.join(CACHE_ROOT, CACHE_FILE))
except BaseException:
pass
e = self.read_eeprom()
if e is None:
return 0
try:
self.update_cache(e)
except BaseException:
pass
self.decode_eeprom(e)
decode_output = sys.stdout.getvalue()
sys.stdout = original_stdout
(is_valid, valid_crc) = self.is_checksum_valid(e)
if not is_valid:
return False
return self.__parse_output(decode_output)
def _valid_tlv(self, eeprom_data):
tlvinfo_type_codes_list = [
self._TLV_CODE_PRODUCT_NAME,
self._TLV_CODE_PART_NUMBER,
self._TLV_CODE_SERIAL_NUMBER,
self._TLV_CODE_MAC_BASE,
self._TLV_CODE_MANUF_DATE,
self._TLV_CODE_DEVICE_VERSION,
self._TLV_CODE_LABEL_REVISION,
self._TLV_CODE_PLATFORM_NAME,
self._TLV_CODE_ONIE_VERSION,
self._TLV_CODE_MAC_SIZE,
self._TLV_CODE_MANUF_NAME,
self._TLV_CODE_MANUF_COUNTRY,
self._TLV_CODE_VENDOR_NAME,
self._TLV_CODE_DIAG_VERSION,
self._TLV_CODE_SERVICE_TAG,
self._TLV_CODE_VENDOR_EXT,
self._TLV_CODE_CRC_32
]
for code in tlvinfo_type_codes_list:
code_str = "0x{:X}".format(code)
eeprom_data[code_str] = eeprom_data.get(code_str, NULL)
return eeprom_data
def get_eeprom(self):
return self._valid_tlv(self._eeprom)
def get_product_name(self):
return self._eeprom.get('0x21', NULL)
def get_pn(self):
return self._eeprom.get('0x22', NULL)
def METHOD_NAME(self):
return self._eeprom.get('0x23', NULL)
def get_mac(self):
return self._eeprom.get('0x24', NULL) |
298,331 | test scan source and destination | ######################################################################
#
# File: test/unit/replication/test_monitoring.py
#
# Copyright 2022 Backblaze Inc. All Rights Reserved.
#
# License https://www.backblaze.com/using_b2_code.html
#
######################################################################
from __future__ import annotations
from apiver_deps import (
SSE_B2_AES,
EncryptionAlgorithm,
EncryptionKey,
EncryptionMode,
EncryptionSetting,
FileRetentionSetting,
ReplicationScanResult,
RetentionMode,
)
SSE_C_AES = EncryptionSetting(
mode=EncryptionMode.SSE_C,
algorithm=EncryptionAlgorithm.AES256,
key=EncryptionKey(secret=b'some_key', key_id='some-id'),
)
RETENTION_GOVERNANCE = FileRetentionSetting(RetentionMode.GOVERNANCE, retain_until=1)
DEFAULT_REPLICATION_RESULT = dict(
source_replication_status=None,
source_has_hide_marker=False,
source_encryption_mode=EncryptionMode.NONE,
source_has_large_metadata=False,
source_has_file_retention=False,
source_has_legal_hold=False,
destination_replication_status=None,
metadata_differs=None,
hash_differs=None,
)
def test_iter_pairs(source_bucket, destination_bucket, test_file, monitor):
source_file = source_bucket.upload_local_file(test_file, 'folder/test.txt')
source_subfolder_file = source_bucket.upload_local_file(test_file, 'folder/subfolder/test.txt')
destination_subfolder_file = destination_bucket.upload_local_file(
test_file, 'folder/subfolder/test.txt'
)
destination_other_file = destination_bucket.upload_local_file(
test_file, 'folder/subfolder/test2.txt'
)
pairs = [
(
source_path and 'folder/' + source_path.relative_path,
destination_path and 'folder/' + destination_path.relative_path,
) for source_path, destination_path in monitor.iter_pairs()
]
assert set(pairs) == {
(source_file.file_name, None),
(source_subfolder_file.file_name, destination_subfolder_file.file_name),
(None, destination_other_file.file_name),
}
def test_scan_source(source_bucket, test_file, monitor):
# upload various types of files to source and get a report
files = [
source_bucket.upload_local_file(test_file, 'folder/test-1-1.txt'),
source_bucket.upload_local_file(test_file, 'folder/test-1-2.txt'),
source_bucket.upload_local_file(test_file, 'folder/test-2.txt', encryption=SSE_B2_AES),
source_bucket.upload_local_file(test_file,
'not-in-folder.txt'), # monitor should ignore this
source_bucket.upload_local_file(test_file, 'folder/test-3.txt', encryption=SSE_C_AES),
source_bucket.upload_local_file(test_file, 'folder/test-4.txt', encryption=SSE_C_AES),
source_bucket.upload_local_file(
test_file,
'folder/subfolder/test-5.txt',
encryption=SSE_C_AES,
file_retention=RETENTION_GOVERNANCE
),
source_bucket.upload_local_file(
test_file,
'folder/test-large-meta.txt',
file_info={
'dummy-key': 'a' * 7000,
},
),
source_bucket.upload_local_file(
test_file,
'folder/test-large-meta-encrypted.txt',
file_info={
'dummy-key': 'a' * 2048,
},
encryption=SSE_C_AES,
),
]
report = monitor.scan(scan_destination=False)
assert report.counter_by_status[ReplicationScanResult(**DEFAULT_REPLICATION_RESULT)] == 2
assert report.counter_by_status[ReplicationScanResult(
**{
**DEFAULT_REPLICATION_RESULT,
'source_encryption_mode': EncryptionMode.SSE_B2,
}
)] == 1
assert report.counter_by_status[ReplicationScanResult(
**{
**DEFAULT_REPLICATION_RESULT,
'source_encryption_mode': EncryptionMode.SSE_C,
}
)] == 2
assert report.counter_by_status[ReplicationScanResult(
**{
**DEFAULT_REPLICATION_RESULT,
'source_encryption_mode': EncryptionMode.SSE_C,
'source_has_file_retention': True,
}
)] == 1
assert report.counter_by_status[ReplicationScanResult(
**{
**DEFAULT_REPLICATION_RESULT,
'source_has_large_metadata': True,
}
)] == 1
assert report.counter_by_status[ReplicationScanResult(
**{
**DEFAULT_REPLICATION_RESULT,
'source_encryption_mode': EncryptionMode.SSE_C,
'source_has_large_metadata': True,
}
)] == 1
# ---- first and last ----
assert report.samples_by_status_first[ReplicationScanResult(**DEFAULT_REPLICATION_RESULT,)
][0] == files[0]
assert report.samples_by_status_last[ReplicationScanResult(**DEFAULT_REPLICATION_RESULT,)
][0] == files[1]
def METHOD_NAME(
source_bucket, destination_bucket, test_file, test_file_reversed, monitor
):
_ = [
# match
source_bucket.upload_local_file(test_file, 'folder/test-1.txt'),
destination_bucket.upload_local_file(test_file, 'folder/test-1.txt'),
# missing on destination
source_bucket.upload_local_file(test_file, 'folder/test-2.txt'),
# missing on source
destination_bucket.upload_local_file(test_file, 'folder/test-3.txt'),
# metadata differs
source_bucket.upload_local_file(
test_file, 'folder/test-4.txt', file_info={
'haha': 'hoho',
}
),
destination_bucket.upload_local_file(
test_file, 'folder/test-4.txt', file_info={
'hehe': 'hihi',
}
),
# hash differs
source_bucket.upload_local_file(test_file, 'folder/test-5.txt'),
destination_bucket.upload_local_file(test_file_reversed, 'folder/test-5.txt'),
]
report = monitor.scan(scan_destination=True)
# match
assert report.counter_by_status[ReplicationScanResult(
**{
**DEFAULT_REPLICATION_RESULT,
'metadata_differs': False,
'hash_differs': False,
}
)] == 1
# missing on destination
assert report.counter_by_status[ReplicationScanResult(
**{
**DEFAULT_REPLICATION_RESULT,
'destination_replication_status': None,
}
)] == 1
# missing on source
assert report.counter_by_status[ReplicationScanResult(
**{
**DEFAULT_REPLICATION_RESULT,
'source_replication_status': None,
'source_has_hide_marker': None,
'source_encryption_mode': None,
'source_has_large_metadata': None,
'source_has_file_retention': None,
'source_has_legal_hold': None,
}
)] == 1
# metadata differs
assert report.counter_by_status[ReplicationScanResult(
**{
**DEFAULT_REPLICATION_RESULT,
'metadata_differs': True,
'hash_differs': False,
}
)] == 1
# hash differs
assert report.counter_by_status[ReplicationScanResult(
**{
**DEFAULT_REPLICATION_RESULT,
'metadata_differs': False,
'hash_differs': True,
}
)] == 1 |
298,332 | end | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides I{marshaller} core classes.
"""
from logging import getLogger
from suds.mx.appender import ContentAppender
from suds.sax.element import Element
from suds.sax.document import Document
from suds.sudsobject import Property
log = getLogger(__name__)
class Core:
"""
An I{abstract} marshaller. This class implement the core
functionality of the marshaller.
@ivar appender: A content appender.
@type appender: L{ContentAppender}
"""
def __init__(self):
"""
"""
self.appender = ContentAppender(self)
def process(self, content):
"""
Process (marshal) the tag with the specified value using the
optional type information.
@param content: The content to process.
@type content: L{Object}
"""
log.debug('processing:\n%s', content)
self.reset()
if content.tag is None:
content.tag = content.value.__class__.__name__
document = Document()
if isinstance(content.value, Property):
root = self.node(content) # root is never used?
self.append(document, content)
else:
self.append(document, content)
return document.root()
def append(self, parent, content):
"""
Append the specified L{content} to the I{parent}.
@param parent: The parent node to append to.
@type parent: L{Element}
@param content: The content to append.
@type content: L{Object}
"""
log.debug('appending parent:\n%s\ncontent:\n%s', parent, content)
if self.start(content):
self.appender.append(parent, content)
self.METHOD_NAME(parent, content)
def reset(self):
"""
Reset the marshaller.
"""
pass
def node(self, content):
"""
Create and return an XML node.
@param content: The content for which proccessing has been suspended.
@type content: L{Object}
@return: An element.
@rtype: L{Element}
"""
return Element(content.tag)
def start(self, content):
"""
Appending this content has started.
@param content: The content for which proccessing has started.
@type content: L{Content}
@return: True to continue appending
@rtype: boolean
"""
return True
def suspend(self, content):
"""
Appending this content has suspended.
@param content: The content for which proccessing has been suspended.
@type content: L{Content}
"""
pass
def resume(self, content):
"""
Appending this content has resumed.
@param content: The content for which proccessing has been resumed.
@type content: L{Content}
"""
pass
def METHOD_NAME(self, parent, content):
"""
Appending this content has ended.
@param parent: The parent node ending.
@type parent: L{Element}
@param content: The content for which proccessing has ended.
@type content: L{Content}
"""
pass
def setnil(self, node, content):
"""
Set the value of the I{node} to nill.
@param node: A I{nil} node.
@type node: L{Element}
@param content: The content to set nil.
@type content: L{Content}
"""
pass
def setdefault(self, node, content):
"""
Set the value of the I{node} to a default value.
@param node: A I{nil} node.
@type node: L{Element}
@param content: The content to set the default value.
@type content: L{Content}
@return: The default.
"""
pass
def optional(self, content):
"""
Get whether the specified content is optional.
@param content: The content which to check.
@type content: L{Content}
"""
return False |
298,333 | test read parquet | from __future__ import annotations
import json
import pandas as pd
import pandas.testing as tm
import pyarrow as pa
import pytest
from pytest import param
import ibis
import ibis.common.exceptions as com
from ibis.backends.snowflake.tests.conftest import _get_url
from ibis.util import gen_name
@pytest.fixture
def temp_db(con):
db = gen_name("tmp_db")
con.create_database(db)
assert db in con.list_databases()
yield db
con.drop_database(db)
assert db not in con.list_databases()
@pytest.fixture
def temp_schema(con, temp_db):
schema = gen_name("tmp_schema")
con.create_schema(schema, database=temp_db)
assert schema in con.list_schemas(database=temp_db)
yield schema
con.drop_schema(schema, database=temp_db)
assert schema not in con.list_schemas(database=temp_db)
def test_cross_db_access(con, temp_db, temp_schema):
table = gen_name("tmp_table")
with con.begin() as c:
c.exec_driver_sql(
f'CREATE TABLE "{temp_db}"."{temp_schema}"."{table}" ("x" INT)'
)
t = con.table(table, schema=f"{temp_db}.{temp_schema}")
assert t.schema() == ibis.schema(dict(x="int"))
assert t.execute().empty
@pytest.fixture(scope="session")
def simple_con():
return ibis.connect(_get_url())
@pytest.mark.parametrize(
"data",
[
# raw
{"key": list("abc"), "value": [[1], [2], [3]]},
# dataframe
pd.DataFrame({"key": list("abc"), "value": [[1], [2], [3]]}),
# pyarrow table
pa.Table.from_pydict({"key": list("abc"), "value": [[1], [2], [3]]}),
],
)
def test_basic_memtable_registration(simple_con, data):
expected = pd.DataFrame({"key": list("abc"), "value": [[1], [2], [3]]})
t = ibis.memtable(data)
result = simple_con.execute(t)
tm.assert_frame_equal(result, expected)
def test_repeated_memtable_registration(simple_con, mocker):
data = {"key": list("abc"), "value": [[1], [2], [3]]}
expected = pd.DataFrame(data)
t = ibis.memtable(data)
spy = mocker.spy(simple_con, "_register_in_memory_table")
n = 2
for _ in range(n):
tm.assert_frame_equal(simple_con.execute(t), expected)
# assert that we called _register_in_memory_table exactly n times
assert spy.call_count == n
def test_timestamp_tz_column(simple_con):
t = simple_con.create_table(
ibis.util.gen_name("snowflake_timestamp_tz_column"),
schema=ibis.schema({"ts": "string"}),
temp=True,
).mutate(ts=lambda t: t.ts.to_timestamp("YYYY-MM-DD HH24-MI-SS"))
expr = t.ts
assert expr.execute().empty
def test_create_schema(simple_con):
schema = gen_name("test_create_schema")
cur_schema = simple_con.current_schema
cur_db = simple_con.current_database
simple_con.create_schema(schema)
assert simple_con.current_schema == cur_schema
assert simple_con.current_database == cur_db
simple_con.drop_schema(schema)
assert simple_con.current_schema == cur_schema
assert simple_con.current_database == cur_db
def test_create_database(simple_con):
database = gen_name("test_create_database")
cur_db = simple_con.current_database
simple_con.create_database(database)
assert simple_con.current_database == cur_db
simple_con.drop_database(database)
assert simple_con.current_database == cur_db
@pytest.fixture(scope="session")
def db_con():
return ibis.connect(_get_url())
@pytest.fixture(scope="session")
def schema_con():
return ibis.connect(_get_url())
def test_drop_current_db_not_allowed(db_con):
database = gen_name("test_create_database")
cur_db = db_con.current_database
db_con.create_database(database)
assert db_con.current_database == cur_db
with db_con.begin() as c:
c.exec_driver_sql(f'USE DATABASE "{database}"')
with pytest.raises(com.UnsupportedOperationError, match="behavior is undefined"):
db_con.drop_database(database)
with db_con.begin() as c:
c.exec_driver_sql(f"USE DATABASE {cur_db}")
db_con.drop_database(database)
def test_drop_current_schema_not_allowed(schema_con):
schema = gen_name("test_create_schema")
cur_schema = schema_con.current_schema
schema_con.create_schema(schema)
assert schema_con.current_schema == cur_schema
with schema_con.begin() as c:
c.exec_driver_sql(f'USE SCHEMA "{schema}"')
with pytest.raises(com.UnsupportedOperationError, match="behavior is undefined"):
schema_con.drop_schema(schema)
with schema_con.begin() as c:
c.exec_driver_sql(f"USE SCHEMA {cur_schema}")
schema_con.drop_schema(schema)
def test_read_csv_options(con, tmp_path):
path = tmp_path / "test_pipe.csv"
path.write_text("a|b\n1|2\n3|4\n")
t = con.read_csv(path, field_delimiter="|")
assert t.schema() == ibis.schema(dict(a="int64", b="int64"))
@pytest.fixture(scope="module")
def json_data():
return [
{"a": 1, "b": "abc", "c": [{"d": 1}]},
{"a": 2, "b": "def", "c": [{"d": 2}]},
{"a": 3, "b": "ghi", "c": [{"d": 3}]},
]
@pytest.mark.parametrize(
"serialize",
[
param(lambda obj: "\n".join(map(json.dumps, obj)), id="ndjson"),
param(json.dumps, id="json"),
],
)
def test_read_json(con, tmp_path, serialize, json_data):
path = tmp_path / "test.json"
path.write_text(serialize(json_data))
t = con.read_json(path)
assert t.schema() == ibis.schema(dict(a="int", b="string", c="array<json>"))
assert t.count().execute() == len(json_data)
def METHOD_NAME(con, data_dir):
path = data_dir / "parquet" / "functional_alltypes.parquet"
t = con.read_parquet(path)
assert t.timestamp_col.type().is_timestamp() |
298,334 | calc grid index | """
Breadth-First grid planning
author: Erwin Lejeune (@spida_rwin)
See Wikipedia article (https://en.wikipedia.org/wiki/Breadth-first_search)
"""
import math
import matplotlib.pyplot as plt
show_animation = True
class BreadthFirstSearchPlanner:
def __init__(self, ox, oy, reso, rr):
"""
Initialize grid map for bfs planning
ox: x position list of Obstacles [m]
oy: y position list of Obstacles [m]
resolution: grid resolution [m]
rr: robot radius[m]
"""
self.reso = reso
self.rr = rr
self.calc_obstacle_map(ox, oy)
self.motion = self.get_motion_model()
class Node:
def __init__(self, x, y, cost, parent_index, parent):
self.x = x # index of grid
self.y = y # index of grid
self.cost = cost
self.parent_index = parent_index
self.parent = parent
def __str__(self):
return str(self.x) + "," + str(self.y) + "," + str(
self.cost) + "," + str(self.parent_index)
def planning(self, sx, sy, gx, gy):
"""
Breadth First search based planning
input:
s_x: start x position [m]
s_y: start y position [m]
gx: goal x position [m]
gy: goal y position [m]
output:
rx: x position list of the final path
ry: y position list of the final path
"""
nstart = self.Node(self.calc_xyindex(sx, self.minx),
self.calc_xyindex(sy, self.miny), 0.0, -1, None)
ngoal = self.Node(self.calc_xyindex(gx, self.minx),
self.calc_xyindex(gy, self.miny), 0.0, -1, None)
open_set, closed_set = dict(), dict()
open_set[self.METHOD_NAME(nstart)] = nstart
while True:
if len(open_set) == 0:
print("Open set is empty..")
break
current = open_set.pop(list(open_set.keys())[0])
c_id = self.METHOD_NAME(current)
closed_set[c_id] = current
# show graph
if show_animation: # pragma: no cover
plt.plot(self.calc_grid_position(current.x, self.minx),
self.calc_grid_position(current.y, self.miny), "xc")
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect('key_release_event',
lambda event:
[exit(0) if event.key == 'escape'
else None])
if len(closed_set.keys()) % 10 == 0:
plt.pause(0.001)
if current.x == ngoal.x and current.y == ngoal.y:
print("Find goal")
ngoal.parent_index = current.parent_index
ngoal.cost = current.cost
break
# expand_grid search grid based on motion model
for i, _ in enumerate(self.motion):
node = self.Node(current.x + self.motion[i][0],
current.y + self.motion[i][1],
current.cost + self.motion[i][2], c_id, None)
n_id = self.METHOD_NAME(node)
# If the node is not safe, do nothing
if not self.verify_node(node):
continue
if (n_id not in closed_set) and (n_id not in open_set):
node.parent = current
open_set[n_id] = node
rx, ry = self.calc_final_path(ngoal, closed_set)
return rx, ry
def calc_final_path(self, ngoal, closedset):
# generate final course
rx, ry = [self.calc_grid_position(ngoal.x, self.minx)], [
self.calc_grid_position(ngoal.y, self.miny)]
n = closedset[ngoal.parent_index]
while n is not None:
rx.append(self.calc_grid_position(n.x, self.minx))
ry.append(self.calc_grid_position(n.y, self.miny))
n = n.parent
return rx, ry
def calc_grid_position(self, index, minp):
"""
calc grid position
:param index:
:param minp:
:return:
"""
pos = index * self.reso + minp
return pos
def calc_xyindex(self, position, min_pos):
return round((position - min_pos) / self.reso)
def METHOD_NAME(self, node):
return (node.y - self.miny) * self.xwidth + (node.x - self.minx)
def verify_node(self, node):
px = self.calc_grid_position(node.x, self.minx)
py = self.calc_grid_position(node.y, self.miny)
if px < self.minx:
return False
elif py < self.miny:
return False
elif px >= self.maxx:
return False
elif py >= self.maxy:
return False
# collision check
if self.obmap[node.x][node.y]:
return False
return True
def calc_obstacle_map(self, ox, oy):
self.minx = round(min(ox))
self.miny = round(min(oy))
self.maxx = round(max(ox))
self.maxy = round(max(oy))
print("min_x:", self.minx)
print("min_y:", self.miny)
print("max_x:", self.maxx)
print("max_y:", self.maxy)
self.xwidth = round((self.maxx - self.minx) / self.reso)
self.ywidth = round((self.maxy - self.miny) / self.reso)
print("x_width:", self.xwidth)
print("y_width:", self.ywidth)
# obstacle map generation
self.obmap = [[False for _ in range(self.ywidth)]
for _ in range(self.xwidth)]
for ix in range(self.xwidth):
x = self.calc_grid_position(ix, self.minx)
for iy in range(self.ywidth):
y = self.calc_grid_position(iy, self.miny)
for iox, ioy in zip(ox, oy):
d = math.hypot(iox - x, ioy - y)
if d <= self.rr:
self.obmap[ix][iy] = True
break
@staticmethod
def get_motion_model():
# dx, dy, cost
motion = [[1, 0, 1],
[0, 1, 1],
[-1, 0, 1],
[0, -1, 1],
[-1, -1, math.sqrt(2)],
[-1, 1, math.sqrt(2)],
[1, -1, math.sqrt(2)],
[1, 1, math.sqrt(2)]]
return motion
def main():
print(__file__ + " start!!")
# start and goal position
sx = 10.0 # [m]
sy = 10.0 # [m]
gx = 50.0 # [m]
gy = 50.0 # [m]
grid_size = 2.0 # [m]
robot_radius = 1.0 # [m]
# set obstacle positions
ox, oy = [], []
for i in range(-10, 60):
ox.append(i)
oy.append(-10.0)
for i in range(-10, 60):
ox.append(60.0)
oy.append(i)
for i in range(-10, 61):
ox.append(i)
oy.append(60.0)
for i in range(-10, 61):
ox.append(-10.0)
oy.append(i)
for i in range(-10, 40):
ox.append(20.0)
oy.append(i)
for i in range(0, 40):
ox.append(40.0)
oy.append(60.0 - i)
if show_animation: # pragma: no cover
plt.plot(ox, oy, ".k")
plt.plot(sx, sy, "og")
plt.plot(gx, gy, "xb")
plt.grid(True)
plt.axis("equal")
bfs = BreadthFirstSearchPlanner(ox, oy, grid_size, robot_radius)
rx, ry = bfs.planning(sx, sy, gx, gy)
if show_animation: # pragma: no cover
plt.plot(rx, ry, "-r")
plt.pause(0.01)
plt.show()
if __name__ == '__main__':
main() |
298,335 | get objects | # -*- coding: utf-8 -*-
"""
sphinx.domains.rst
~~~~~~~~~~~~~~~~~~
The reStructuredText domain.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from six import iteritems
from sphinx import addnodes
from sphinx.domains import Domain, ObjType
from sphinx.locale import l_, _
from sphinx.directives import ObjectDescription
from sphinx.roles import XRefRole
from sphinx.util.nodes import make_refnode
dir_sig_re = re.compile(r'\.\. (.+?)::(.*)$')
class ReSTMarkup(ObjectDescription):
"""
Description of generic reST markup.
"""
def add_target_and_index(self, name, sig, signode):
targetname = self.objtype + '-' + name
if targetname not in self.state.document.ids:
signode['names'].append(targetname)
signode['ids'].append(targetname)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
objects = self.env.domaindata['rst']['objects']
key = (self.objtype, name)
if key in objects:
self.state_machine.reporter.warning(
'duplicate description of %s %s, ' % (self.objtype, name) +
'other instance in ' + self.env.doc2path(objects[key]),
line=self.lineno)
objects[key] = self.env.docname
indextext = self.get_index_text(self.objtype, name)
if indextext:
self.indexnode['entries'].append(('single', indextext,
targetname, '', None))
def get_index_text(self, objectname, name):
if self.objtype == 'directive':
return _('%s (directive)') % name
elif self.objtype == 'role':
return _('%s (role)') % name
return ''
def parse_directive(d):
"""Parse a directive signature.
Returns (directive, arguments) string tuple. If no arguments are given,
returns (directive, '').
"""
dir = d.strip()
if not dir.startswith('.'):
# Assume it is a directive without syntax
return (dir, '')
m = dir_sig_re.match(dir)
if not m:
return (dir, '')
parsed_dir, parsed_args = m.groups()
return (parsed_dir.strip(), ' ' + parsed_args.strip())
class ReSTDirective(ReSTMarkup):
"""
Description of a reST directive.
"""
def handle_signature(self, sig, signode):
name, args = parse_directive(sig)
desc_name = '.. %s::' % name
signode += addnodes.desc_name(desc_name, desc_name)
if len(args) > 0:
signode += addnodes.desc_addname(args, args)
return name
class ReSTRole(ReSTMarkup):
"""
Description of a reST role.
"""
def handle_signature(self, sig, signode):
signode += addnodes.desc_name(':%s:' % sig, ':%s:' % sig)
return sig
class ReSTDomain(Domain):
"""ReStructuredText domain."""
name = 'rst'
label = 'reStructuredText'
object_types = {
'directive': ObjType(l_('directive'), 'dir'),
'role': ObjType(l_('role'), 'role'),
}
directives = {
'directive': ReSTDirective,
'role': ReSTRole,
}
roles = {
'dir': XRefRole(),
'role': XRefRole(),
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
}
def clear_doc(self, docname):
for (typ, name), doc in list(self.data['objects'].items()):
if doc == docname:
del self.data['objects'][typ, name]
def merge_domaindata(self, docnames, otherdata):
# XXX check duplicates
for (typ, name), doc in otherdata['objects'].items():
if doc in docnames:
self.data['objects'][typ, name] = doc
def resolve_xref(self, env, fromdocname, builder, typ, target, node,
contnode):
objects = self.data['objects']
objtypes = self.objtypes_for_role(typ)
for objtype in objtypes:
if (objtype, target) in objects:
return make_refnode(builder, fromdocname,
objects[objtype, target],
objtype + '-' + target,
contnode, target + ' ' + objtype)
def resolve_any_xref(self, env, fromdocname, builder, target,
node, contnode):
objects = self.data['objects']
results = []
for objtype in self.object_types:
if (objtype, target) in self.data['objects']:
results.append(('rst:' + self.role_for_objtype(objtype),
make_refnode(builder, fromdocname,
objects[objtype, target],
objtype + '-' + target,
contnode, target + ' ' + objtype)))
return results
def METHOD_NAME(self):
for (typ, name), docname in iteritems(self.data['objects']):
yield name, name, typ, docname, typ + '-' + name, 1 |
298,336 | benchmark compiled path | # (C) Copyright 2005-2023 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
"""
Benchmarks Agg rendering times.
"""
from time import perf_counter
from numpy import array, shape, arange, transpose, sin, cos, zeros, pi
from scipy import stats
import kiva
from kiva import agg
def benchmark_real_time(cycles=10, n_pts=1000, sz=(1000, 1000)):
""" Render a sin wave to the screen repeatedly. Clears
the screen between each rendering.
"""
print("realtime:", end=" ")
width, height = sz
pts = zeros((n_pts, 2), float)
x = pts[:, 0]
y = pts[:, 1]
interval = width / float(n_pts)
x[:] = arange(0, width, interval)
t1 = perf_counter()
# TODO: module 'kiva.agg' has no attribute 'GraphicsContextBitmap'
gc = agg.GraphicsContextBitmap(sz)
for i in range(cycles):
y[:] = height / 2.0 + height / 2.0 * sin(
x * 2 * pi / width + i * interval
)
# gc.clear()
gc.lines(pts)
gc.stroke_path()
# agg.write_bmp_rgb24("sin%d.bmp" % i,gc.bitmap)
t2 = perf_counter()
tot_time = t2 - t1
print("tot,per cycle:", tot_time, tot_time / cycles)
def METHOD_NAME(cycles=10, n_pts=1000, sz=(1000, 1000)):
""" Render a sin wave to a compiled_path then display it repeatedly.
"""
width, height = sz
pts = zeros((n_pts, 2), float)
x = pts[:, 0]
y = pts[:, 1]
interval = width / float(n_pts)
x[:] = arange(0, width, interval)
y[:] = height / 2.0 + height / 2.0 * sin(x * 2 * pi / n_pts)
path = agg.CompiledPath()
path.lines(pts)
# path.move_to(pts[0,0],pts[0,1])
# for x,y in pts[1:]:
# path.line_to(x,y)
t1 = perf_counter()
# TODO: module 'kiva.agg' has no attribute 'GraphicsContextBitmap'
gc = agg.GraphicsContextBitmap(sz)
for _ in range(cycles):
# gc.clear()
gc.add_path(path)
gc.stroke_path()
t2 = perf_counter()
tot_time = t2 - t1
print("tot,per cycle:", tot_time, tot_time / cycles)
return
def benchmark_draw_path_flags(cycles=10, n_pts=1000, sz=(1000, 1000)):
print("realtime:", end=" ")
width, height = sz
pts = zeros((n_pts, 2), float)
x = pts[:, 0]
y = pts[:, 1]
interval = width / float(n_pts)
x[:] = arange(0, width, interval)
flags = [
kiva.FILL,
kiva.EOF_FILL,
kiva.STROKE,
kiva.FILL_STROKE,
kiva.EOF_FILL_STROKE,
]
for flag in flags:
t1 = perf_counter()
for i in range(cycles):
# TODO: module 'kiva.agg' has no attribute 'GraphicsContextBitmap'
gc = agg.GraphicsContextBitmap(sz)
y[:] = height / 2.0 + height / 2.0 * sin(
x * 2 * pi / width + i * interval
)
gc.lines(pts)
gc.draw_path(flag)
t2 = perf_counter()
agg.write_bmp_rgb24("draw_path%d.bmp" % flag, gc.bitmap)
tot_time = t2 - t1
print("tot,per cycle:", tot_time, tot_time / cycles)
return
def star_array(size=40):
half_size = size * 0.5
tenth_size = size * 0.1
star_pts = [
array((tenth_size, 0)),
array((half_size, size - tenth_size)),
array((size - tenth_size, 0)),
array((0, half_size)),
array((size, half_size)),
array((tenth_size, 0)),
]
return array(star_pts)
def circle_array(size=5):
x = arange(0, 6.3, 0.1)
pts = transpose(array((cos(x), sin(x)))).copy() * size / 2.0
return pts
def star_path_gen(size=40):
star_path = agg.CompiledPath()
# spts = circle_array()
spts = star_array(size)
# star_path.lines(spts)
star_path.move_to(spts[0][0], spts[0][1])
for x, y in spts:
star_path.line_to(x, y)
star_path.close_path()
return star_path
def benchmark_individual_symbols(n_pts=1000, sz=(1000, 1000)):
"Draws some stars"
# width, height = sz
pts = stats.norm.rvs(size=(n_pts, 2)) * array(sz) / 8.0 + array(sz) / 2.0
print(pts[5, :])
print(shape(pts))
star_path = star_path_gen()
gc = agg.GraphicsContextArray(sz)
gc.set_fill_color((1.0, 0.0, 0.0, 0.1))
gc.set_stroke_color((0.0, 1.0, 0.0, 0.6))
t1 = perf_counter()
for x, y in pts:
with gc:
gc.translate_ctm(x, y)
gc.add_path(star_path)
gc.draw_path()
t2 = perf_counter()
gc.save("benchmark_symbols1.bmp")
tot_time = t2 - t1
print("star count, tot,per shape:", n_pts, tot_time, tot_time / n_pts)
return
def benchmark_rect(n_pts=1000, sz=(1000, 1000)):
"Draws a number of randomly-placed renctangles."
# width, height = sz
pts = stats.norm.rvs(size=(n_pts, 2)) * array(sz) / 8.0 + array(sz) / 2.0
print(pts[5, :])
print(shape(pts))
gc = agg.GraphicsContextArray(sz)
gc.set_fill_color((1.0, 0.0, 0.0, 0.1))
gc.set_stroke_color((0.0, 1.0, 0.0, 0.6))
t1 = perf_counter()
for x, y in pts:
with gc:
gc.translate_ctm(x, y)
gc.rect(-2.5, -2.5, 5, 5)
gc.draw_path()
t2 = perf_counter()
gc.save("benchmark_rect.bmp")
tot_time = t2 - t1
print("rect count, tot,per shape:", n_pts, tot_time, tot_time / n_pts)
return
def benchmark_symbols_all_at_once(n_pts=1000, sz=(1000, 1000)):
"""
Renders all the symbols.
"""
# width, height = sz
pts = stats.norm.rvs(size=(n_pts, 2)) * array(sz) / 8.0 + array(sz) / 2.0
star_path = agg.CompiledPath()
star_path.lines(circle_array())
gc = agg.GraphicsContextArray(sz)
gc.set_fill_color((1.0, 0.0, 0.0, 0.1))
gc.set_stroke_color((0.0, 1.0, 0.0, 0.6))
path = agg.CompiledPath()
t1 = perf_counter()
for x, y in pts:
path.save_ctm()
path.translate_ctm(x, y)
path.add_path(star_path)
path.restore_ctm()
gc.add_path(path)
t2 = perf_counter()
gc.draw_path()
t3 = perf_counter()
gc.save("benchmark_symbols2.bmp")
build_path_time = t2 - t1
render_path_time = t3 - t2
tot_time = t3 - t1
print(
"star count, tot,building path, rendering path:",
n_pts,
tot_time,
build_path_time,
render_path_time,
)
return
def run_all_benchmarks(n_pts=1000, sz=(500, 500)):
# TODO: does not work: Fix or remove?
# benchmark_real_time(n_pts=n_pts, sz=sz)
# TODO: does not work: Fix or remove?
# benchmark_compiled_path(n_pts=n_pts, sz=sz)
benchmark_individual_symbols(n_pts=n_pts, sz=sz)
benchmark_rect(n_pts=n_pts, sz=sz)
benchmark_symbols_all_at_once(n_pts=n_pts, sz=sz)
# TODO: does not work: Fix or remove?
# benchmark_draw_path_flags(n_pts=n_pts, sz=sz)
if __name__ == "__main__":
run_all_benchmarks(n_pts=100, sz=(500, 500)) |
298,337 | marganalize counts | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Readout mitigation data handling utils
"""
import logging
from typing import Optional, List, Tuple, Dict
import numpy as np
from qiskit.exceptions import QiskitError
from ..utils import marginal_counts
from ..counts import Counts
logger = logging.getLogger(__name__)
def z_diagonal(dim, dtype=float):
r"""Return the diagonal for the operator :math:`Z^\otimes n`"""
parity = np.zeros(dim, dtype=dtype)
for i in range(dim):
parity[i] = bin(i)[2:].count("1")
return (-1) ** np.mod(parity, 2)
def expval_with_stddev(coeffs: np.ndarray, probs: np.ndarray, shots: int) -> Tuple[float, float]:
"""Compute expectation value and standard deviation.
Args:
coeffs: array of diagonal operator coefficients.
probs: array of measurement probabilities.
shots: total number of shots to obtain probabilities.
Returns:
tuple: (expval, stddev) expectation value and standard deviation.
"""
# Compute expval
expval = coeffs.dot(probs)
# Compute variance
sq_expval = (coeffs**2).dot(probs)
variance = (sq_expval - expval**2) / shots
# Compute standard deviation
if variance < 0 and not np.isclose(variance, 0):
logger.warning(
"Encountered a negative variance in expectation value calculation."
"(%f). Setting standard deviation of result to 0.",
variance,
)
calc_stddev = np.sqrt(variance) if variance > 0 else 0.0
return [expval, calc_stddev]
def stddev(probs, shots):
"""Calculate stddev dict"""
ret = {}
for key, prob in probs.items():
std_err = np.sqrt(prob * (1 - prob) / shots)
ret[key] = std_err
return ret
def str2diag(string):
"""Transform diagonal from a string to a numpy array"""
chars = {
"I": np.array([1, 1], dtype=float),
"Z": np.array([1, -1], dtype=float),
"0": np.array([1, 0], dtype=float),
"1": np.array([0, 1], dtype=float),
}
ret = np.array([1], dtype=float)
for i in reversed(string):
if i not in chars:
raise QiskitError(f"Invalid diagonal string character {i}")
ret = np.kron(chars[i], ret)
return ret
def counts_to_vector(counts: Counts, num_qubits: int) -> Tuple[np.ndarray, int]:
"""Transforms Counts to a probability vector"""
vec = np.zeros(2**num_qubits, dtype=float)
shots = 0
for key, val in counts.items():
shots += val
vec[int(key, 2)] = val
vec /= shots
return vec, shots
def remap_qubits(
vec: np.ndarray, num_qubits: int, qubits: Optional[List[int]] = None
) -> np.ndarray:
"""Remapping the qubits"""
if qubits is not None:
if len(qubits) != num_qubits:
raise QiskitError("Num qubits does not match vector length.")
axes = [num_qubits - 1 - i for i in reversed(np.argsort(qubits))]
vec = np.reshape(vec, num_qubits * [2]).transpose(axes).reshape(vec.shape)
return vec
def METHOD_NAME(
counts: Counts,
qubit_index: Dict[int, int],
qubits: Optional[List[int]] = None,
clbits: Optional[List[int]] = None,
) -> np.ndarray:
"""Marginalization of the Counts. Verify that number of clbits equals to the number of qubits."""
if clbits is not None:
qubits_len = len(qubits) if not qubits is None else 0
clbits_len = len(clbits) if not clbits is None else 0
if clbits_len not in (0, qubits_len):
raise QiskitError(
"Num qubits ({}) does not match number of clbits ({}).".format(
qubits_len, clbits_len
)
)
counts = marginal_counts(counts, clbits)
if clbits is None and qubits is not None:
clbits = [qubit_index[qubit] for qubit in qubits]
counts = marginal_counts(counts, clbits)
return counts
def counts_probability_vector(
counts: Counts,
qubit_index: Dict[int, int],
qubits: Optional[List[int]] = None,
clbits: Optional[List[int]] = None,
) -> Tuple[np.ndarray, int]:
"""Compute a probability vector for all count outcomes.
Args:
counts: counts object
qubit_index: For each qubit, its index in the mitigator qubits list
qubits: qubits the count bitstrings correspond to.
clbits: Optional, marginalize counts to just these bits.
Raises:
QiskitError: if qubits and clbits kwargs are not valid.
Returns:
np.ndarray: a probability vector for all count outcomes.
int: Number of shots in the counts
"""
counts = METHOD_NAME(counts, qubit_index, qubits, clbits)
if qubits is not None:
num_qubits = len(qubits)
else:
num_qubits = len(qubit_index.keys())
vec, shots = counts_to_vector(counts, num_qubits)
vec = remap_qubits(vec, num_qubits, qubits)
return vec, shots |
298,338 | teardown module | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy import units as u
from astropy.modeling import Parameter
from ...calib import solar_fluxd
from ...data import Ephem
from ..core import *
def setup_module(module):
module.solar_fluxd_default = solar_fluxd.get()
solar_fluxd.set({'V': -26.77 * u.mag})
def METHOD_NAME(module):
solar_fluxd.set(module.solar_fluxd_default)
class TestDiskIntegratedPhaseFunc():
def test__unit(self):
class TestClass(DiskIntegratedPhaseFunc):
p = Parameter(default=1.)
@staticmethod
def evaluate(a, p):
return a * p
temp = TestClass()
with pytest.raises(ValueError):
temp._check_unit()
def test_ref_phasefunc(self):
class ExpPhase(DiskIntegratedPhaseFunc):
_unit = 'ref'
p = Parameter(default=0.1 / u.sr)
nu = Parameter(default=0.1 / u.rad)
@staticmethod
def evaluate(a, p, nu):
return p * np.exp(-nu * a)
exp_phase = ExpPhase()
pha_test = np.linspace(0, 120, 10) * u.deg
ref_test = [0.1, 0.09769976, 0.09545244, 0.0932568, 0.09111168,
0.08901589, 0.08696831, 0.08496784, 0.08301337,
0.08110387] / u.sr
ref = exp_phase(pha_test)
assert np.allclose(ref.value, ref_test.value)
assert ref.unit == ref_test.unit
ref_n_test = [1.01760649, 0.99419913, 0.97133019, 0.94898729,
0.92715833, 0.90583148, 0.88499521, 0.86463822,
0.84474949, 0.82531824]
ref_n = exp_phase.to_ref(pha_test, normalized=10 * u.deg)
assert np.allclose(ref_n, ref_n_test)
with pytest.raises(ValueError):
mag = exp_phase.to_mag(1 * u.rad)
with pytest.raises(ValueError):
mag = exp_phase.to_mag(1 * u.rad, unit=u.mag)
exp_phase.radius = 100 * u.km
with pytest.raises(ValueError):
mag = exp_phase.to_mag(1 * u.rad, unit=u.mag)
exp_phase.wfb = 'V'
mag = exp_phase.to_mag(pha_test, unit=u.mag)
mag_test = [5.36175238, 5.38701861, 5.41228484, 5.43755106,
5.46281729, 5.48808352, 5.51334975, 5.53861598, 5.56388221,
5.58914844] * u.mag
assert np.allclose(mag.value, mag_test.value)
assert mag.unit == mag_test.unit
eph_dict = {'alpha': pha_test,
'r': np.repeat(0.8 * u.au, 10),
'delta': np.repeat(0.5 * u.au, 10)}
eph_test = Ephem.from_dict(eph_dict)
ref1 = exp_phase.to_ref(eph_test)
assert np.allclose(ref1.value, ref_test.value)
class TestLinear():
def test_init(self):
linphase = LinearPhaseFunc(5 * u.mag, 0.04 * u.mag/u.deg,
radius=300 * u.km, wfb='V')
assert np.isclose(linphase.H.value, 5)
assert linphase.H.unit == u.mag
assert np.isclose(linphase.S.value, 0.04)
assert linphase.S.unit == u.mag/u.deg
assert linphase.radius == 300 * u.km
assert linphase.wfb == 'V'
def test_to_mag(self):
linphase = LinearPhaseFunc(5 * u.mag, 0.04 * u.mag/u.deg,
radius=300 * u.km)
pha_test = np.linspace(0, np.pi, 10) * u.rad
mag_test = [5., 5.8, 6.6, 7.4, 8.2, 9., 9.8, 10.6, 11.4, 12.2] * u.mag
eph = linphase.to_mag(pha_test, append_results=True)
assert np.allclose(eph['mag'].value, mag_test.value)
assert eph['mag'].unit == mag_test.unit
assert np.allclose(eph['alpha'].value, pha_test.value)
assert eph['alpha'].unit == pha_test.unit
assert set(eph.field_names) == {'alpha', 'mag'}
eph = linphase.to_mag(eph, append_results=True)
assert set(eph.field_names) == {'alpha', 'mag', 'mag1'}
def test_to_ref(self):
linphase = LinearPhaseFunc(5 * u.mag, 0.04 * u.mag/u.deg,
radius=300 * u.km, wfb='V')
pha_test = np.linspace(0, 180, 10) * u.deg
eph = linphase.to_ref(pha_test, append_results=True)
ref_test = [1.55045242e-02, 7.42093183e-03, 3.55188129e-03,
1.70003727e-03, 8.13688994e-04, 3.89456039e-04,
1.86405380e-04, 8.92192241e-05, 4.27030055e-05,
2.04389434e-05] / u.sr
ref_norm_test = np.array(
[1., 0.47863009, 0.22908677, 0.10964782, 0.05248075,
0.02511886, 0.01202264, 0.0057544, 0.00275423,
0.00131826]) * u.dimensionless_unscaled
assert u.allclose(eph['ref'], ref_test)
assert u.allclose(eph['alpha'], pha_test)
assert set(eph.field_names) == {'alpha', 'ref'}
eph_norm = linphase.to_ref(pha_test, normalized=0 * u.deg,
append_results=True)
assert u.allclose(eph_norm['ref'], ref_norm_test)
assert u.allclose(eph_norm['alpha'], pha_test)
assert set(eph_norm.field_names) == {'alpha', 'ref'}
eph = linphase.to_ref(eph, append_results=True)
assert set(eph.field_names) == {'alpha', 'ref', 'ref1'}
# test exception
linphase = LinearPhaseFunc(5 * u.mag, 0.04 * u.mag/u.deg,
radius=300 * u.km)
with pytest.raises(ValueError):
linphase.to_ref(10 * u.deg)
def test_props(self):
linphase = LinearPhaseFunc(5 * u.mag, 2.29 * u.mag/u.rad,
radius=300 * u.km, wfb='V')
assert np.isclose(linphase.geomalb, 0.0487089)
assert np.isclose(linphase.bondalb, 0.01790315)
assert np.isclose(linphase.phaseint, 0.36755394203990327)
def test__distance_module(self):
r = [0.5, 1, 1.2, 2] * u.au
delta = [0.3, 1, 1, 2] * u.au
m = LinearPhaseFunc(5 * u.mag, 0.04 * u.mag / u.deg)
module_test = [0.0225, 1., 1.44, 16.]
module = m._distance_module(Ephem.from_dict({'r': r, 'delta': delta}))
assert np.allclose(module, module_test)
def test_fit(self):
pha = np.linspace(0, 60, 100) * u.deg
mag = LinearPhaseFunc(5 * u.mag, 0.04 * u.mag/u.deg)(pha) + \
(np.random.rand(100)*0.2-0.1) * u.mag
from astropy.modeling.fitting import LevMarLSQFitter
fitter = LevMarLSQFitter()
m0 = LinearPhaseFunc(3 * u.mag, 0.02 * u.mag/u.deg)
m = fitter(m0, pha, mag)
assert isinstance(m, LinearPhaseFunc)
def test_fit_deriv(self):
assert np.allclose(LinearPhaseFunc.fit_deriv(1, 1, 2), [1, 1]) |
298,339 | primary keys by stream | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import time
from collections import defaultdict
from functools import partial
from logging import Logger
from typing import List, Mapping, Optional
import pytest
from airbyte_protocol.models import ConfiguredAirbyteCatalog, Type
from connector_acceptance_test.base import BaseTest
from connector_acceptance_test.config import IgnoredFieldsConfiguration
from connector_acceptance_test.utils import ConnectorRunner, JsonSchemaHelper, SecretDict, full_refresh_only_catalog, make_hashable
from connector_acceptance_test.utils.json_schema_helper import CatalogField
# from airbyte_pr import ConfiguredAirbyteCatalog, Type
def METHOD_NAME(configured_catalog: ConfiguredAirbyteCatalog) -> Mapping[str, List[CatalogField]]:
"""Get PK fields for each stream
:param configured_catalog:
:return:
"""
data = {}
for stream in configured_catalog.streams:
helper = JsonSchemaHelper(schema=stream.stream.json_schema)
pks = stream.primary_key or []
data[stream.stream.name] = [helper.field(pk) for pk in pks]
return data
def primary_keys_only(record, pks):
return ";".join([f"{pk.path}={pk.parse(record)}" for pk in pks])
@pytest.mark.default_timeout(20 * 60)
class TestFullRefresh(BaseTest):
def assert_emitted_at_increase_on_subsequent_runs(self, first_read_records, second_read_records):
first_read_records_data = [record.data for record in first_read_records]
assert first_read_records_data, "At least one record should be read using provided catalog"
first_read_records_emitted_at = [record.emitted_at for record in first_read_records]
max_emitted_at_first_read = max(first_read_records_emitted_at)
second_read_records_emitted_at = [record.emitted_at for record in second_read_records]
min_emitted_at_second_read = min(second_read_records_emitted_at)
assert max_emitted_at_first_read < min_emitted_at_second_read, "emitted_at should increase on subsequent runs"
def assert_two_sequential_reads_produce_same_or_subset_records(
self, records_1, records_2, configured_catalog, ignored_fields, detailed_logger
):
records_by_stream_1 = defaultdict(list)
for record in records_1:
records_by_stream_1[record.stream].append(record.data)
records_by_stream_2 = defaultdict(list)
for record in records_2:
records_by_stream_2[record.stream].append(record.data)
pks_by_stream = METHOD_NAME(configured_catalog)
for stream in records_by_stream_1:
if pks_by_stream.get(stream):
serializer = partial(primary_keys_only, pks=pks_by_stream.get(stream))
else:
serializer = partial(make_hashable, exclude_fields=[field.name for field in ignored_fields.get(stream, [])])
stream_records_1 = records_by_stream_1.get(stream)
stream_records_2 = records_by_stream_2.get(stream)
if not set(map(serializer, stream_records_1)).issubset(set(map(serializer, stream_records_2))):
missing_records = set(map(serializer, stream_records_1)) - (set(map(serializer, stream_records_2)))
msg = f"{stream}: the two sequential reads should produce either equal set of records or one of them is a strict subset of the other"
detailed_logger.info(msg)
detailed_logger.info("First read")
detailed_logger.log_json_list(stream_records_1)
detailed_logger.info("Second read")
detailed_logger.log_json_list(stream_records_2)
detailed_logger.info("Missing records")
detailed_logger.log_json_list(missing_records)
pytest.fail(msg)
async def test_sequential_reads(
self,
connector_config: SecretDict,
configured_catalog: ConfiguredAirbyteCatalog,
ignored_fields: Optional[Mapping[str, List[IgnoredFieldsConfiguration]]],
docker_runner: ConnectorRunner,
detailed_logger: Logger,
):
configured_catalog = full_refresh_only_catalog(configured_catalog)
output_1 = await docker_runner.call_read(
connector_config,
configured_catalog,
enable_caching=False,
)
records_1 = [message.record for message in output_1 if message.type == Type.RECORD]
# sleep for 1 second to ensure that the emitted_at timestamp is different
time.sleep(1)
output_2 = await docker_runner.call_read(connector_config, configured_catalog, enable_caching=False)
records_2 = [message.record for message in output_2 if message.type == Type.RECORD]
self.assert_emitted_at_increase_on_subsequent_runs(records_1, records_2)
self.assert_two_sequential_reads_produce_same_or_subset_records(
records_1, records_2, configured_catalog, ignored_fields, detailed_logger
) |
298,340 | on point alpha change | # -*- coding: utf-8 -*-
"""
Created on Sat May 14 11:20:11 2016
@author: david
"""
import wx
#import PYME.ui.autoFoldPanel as afp
import PYME.ui.manualFoldPanel as afp
from PYME.recipes.traits import HasTraits, Float, File, BaseEnum, Enum, List, Instance, CStr, Bool, Int, on_trait_change
class PointDisplaySettings(HasTraits):
pointSize = Float(5.0)
colourDataKey = CStr('t')
alpha = Float(1.0)
def _getPossibleKeys(pipeline):
colKeys = ['<None>']
if not pipeline.colourFilter is None: #is the test needed?
colKeys += list(pipeline.keys())
colKeys += list(pipeline.GeneratedMeasures.keys())
colKeys.sort()
return colKeys
class PointSettingsPanel(wx.Panel):
"""A GUI class for determining the settings to use when displaying points
in VisGUI.
Constructed as follows:
PointSettingsPanel(parent, pipeline, pointDisplaySettings)
where:
parent is the parent window
pipeline is the pipeline object which provides the points,
pointDisplaySettings is an instance of PointDisplaySettings
"""
def __init__(self, parent, pipeline, pointDisplaySettings):
wx.Panel.__init__(self, parent, -1)
self.pipeline = pipeline
self.pointDisplaySettings = pointDisplaySettings
bsizer = wx.BoxSizer(wx.VERTICAL)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
hsizer.Add(wx.StaticText(self, -1, 'Size [nm]:'), 0,wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
self.tPointSize = wx.TextCtrl(self, -1, '%3.2f' % self.pointDisplaySettings.pointSize)
hsizer.Add(self.tPointSize, 0,wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
bsizer.Add(hsizer, 0, wx.ALL, 0)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
hsizer.Add(wx.StaticText(self, -1, 'Alpha:'), 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.tPointAlpha = wx.TextCtrl(self, -1, '%3.2f' % self.pointDisplaySettings.alpha)
hsizer.Add(self.tPointAlpha, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
bsizer.Add(hsizer, 0, wx.ALL, 0)
colKeys = _getPossibleKeys(self.pipeline)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
hsizer.Add(wx.StaticText(self, -1, 'Colour:'), 0,wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
self.chPointColour = wx.Choice(self, -1, choices=colKeys)
currentCol = self.pointDisplaySettings.colourDataKey
if currentCol in colKeys:
self.chPointColour.SetSelection(colKeys.index(currentCol))
hsizer.Add(self.chPointColour, 0,wx.ALL|wx.ALIGN_CENTER_VERTICAL|wx.EXPAND, 5)
bsizer.Add(hsizer, 0, wx.ALL|wx.EXPAND, 0)
self.SetSizerAndFit(bsizer)
self.tPointSize.Bind(wx.EVT_TEXT, self.OnPointSizeChange)
self.tPointAlpha.Bind(wx.EVT_TEXT, self.METHOD_NAME)
self.chPointColour.Bind(wx.EVT_CHOICE, self.OnChangePointColour)
self.chPointColour.Bind(wx.EVT_ENTER_WINDOW, self.UpdatePointColourChoices)
self.pipeline.onKeysChanged.connect(self.UpdatePointColourChoices)
def UpdatePointColourChoices(self, event=None, **kwargs):
"""Update our choice of keys if the pipeline has changed.
"""
colKeys = _getPossibleKeys(self.pipeline)
self.chPointColour.Clear()
self.chPointColour.SetItems(colKeys)
currentCol = self.pointDisplaySettings.colourDataKey
if currentCol in colKeys:
self.chPointColour.SetSelection(colKeys.index(currentCol))
def OnPointSizeChange(self, event):
self.pointDisplaySettings.pointSize = float(self.tPointSize.GetValue())
#self.glCanvas.Refresh()
def METHOD_NAME(self, event):
try:
self.pointDisplaySettings.alpha = float(self.tPointAlpha.GetValue())
except ValueError:
pass # do nothing, if value couldn't be parsed
#self.glCanvas.Refresh()
def OnChangePointColour(self, event):
self.pointDisplaySettings.colourDataKey = event.GetString()
def GenPointsPanel(visgui, pnl, title='Points'):
"""Generate a ponts pane and insert into the given panel"""
item = afp.foldingPane(pnl, -1, caption=title, pinned = True)
pan = PointSettingsPanel(item, visgui.pipeline, visgui.pointDisplaySettings)
item.AddNewElement(pan)
pnl.AddPane(item) |
298,341 | accept | # -*- coding: utf-8 -*-
#
# ast_node.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from typing import Optional, List
from abc import ABCMeta, abstractmethod
from pynestml.symbol_table.scope import Scope
from pynestml.utils.ast_source_location import ASTSourceLocation
class ASTNode(metaclass=ABCMeta):
"""
This class is not a part of the grammar but is used to store commonalities of all possible meta_model classes,
e.g., the source position.
This class is abstract, thus no instances can be created.
Attributes:
source_position = None
scope = None
comment = None
#
pre_comments = list()
in_comment = None
#
implicit_conversion_factor = None
"""
def __init__(self, source_position: ASTSourceLocation = None, scope: Scope = None, comment: Optional[str] = None, pre_comments: Optional[List[str]] = None,
in_comment: Optional[str] = None, implicit_conversion_factor: Optional[float] = None):
"""
The standard constructor.
:param source_position: a source position element.
:param scope: the scope in which this element is embedded in.
:param comment: comment for this node
:param pre_comments: pre-comments for this node
:param in_comment: in-comment for this node
:param implicit_conversion_factor: see set_implicit_conversion_factor()
"""
self.source_position = source_position
self.scope = scope
self.comment = comment
if pre_comments is None:
pre_comments = []
self.pre_comments = pre_comments
self.in_comment = in_comment
self.implicit_conversion_factor = implicit_conversion_factor
@abstractmethod
def clone(self):
"""
Return a deep copy of this node.
"""
pass
@abstractmethod
def equals(self, other):
"""
The equals operation.
:param other: a different object.
:type other: object
:return: True if equal, otherwise False.
:rtype: bool
"""
pass
# todo: we can do this with a visitor instead of hard coding grammar traversals all over the place
@abstractmethod
def get_parent(self, ast):
"""
Indicates whether a this node contains the handed over node.
:param ast: an arbitrary meta_model node.
:type ast: AST_
:return: AST if this or one of the child nodes contains the handed over element.
:rtype: AST_ or None
"""
pass
def set_implicit_conversion_factor(self, implicit_factor: Optional[float]) -> None:
"""
Sets a factor that, when applied to the (unit-typed) expression, converts it to the magnitude of the
context where it is used. eg. Volt + milliVolt needs to either be
1000*Volt + milliVolt or Volt + 0.001 * milliVolt
:param implicit_factor: the factor to be installed
"""
self.implicit_conversion_factor = implicit_factor
def get_implicit_conversion_factor(self) -> Optional[float]:
"""
Returns the factor installed as implicitConversionFactor for this expression
:return: the conversion factor, if present, or None
"""
return self.implicit_conversion_factor
def get_source_position(self):
"""
Returns the source position of the element.
:return: a source position object.
:rtype: ASTSourceLocation
"""
if self.source_position is None:
return ASTSourceLocation.get_predefined_source_position()
return self.source_position
def set_source_position(self, new_position):
"""
Updates the source position of the element.
:param new_position: a new source position
:type new_position: ASTSourceLocation
:return: a source position object.
:rtype: ASTSourceLocation
"""
self.source_position = new_position
def get_scope(self):
"""
Returns the scope of this element.
:return: a scope object.
:rtype: Scope
"""
return self.scope
def update_scope(self, _scope):
"""
Updates the scope of this element.
:param _scope: a scope object.
:type _scope: Scope
"""
self.scope = _scope
def get_comment(self):
"""
Returns the comment of this element.
:return: a comment.
:rtype: str
"""
return self.comment
def set_comment(self, comment):
"""
Updates the comment of this element.
:param comment: a comment
:type comment: str
"""
self.comment = comment
def has_comment(self):
"""
Indicates whether this element stores a comment.
:return: True if has comment, otherwise False.
:rtype: bool
"""
return self.comment is not None and len(self.comment) > 0
def print_comment(self, prefix: str = "") -> str:
"""
Prints the comment of this meta_model element.
:param prefix: a prefix string
:return: a comment
"""
ret = ''
if not self.has_comment():
return prefix if prefix is not None else ''
# in the last part, delete the new line if it is the last comment, otherwise there is an ugly gap
# between the comment and the element
for comment in self.get_comment():
ret += (prefix + ' ' if prefix is not None else '') + comment + \
('\n' if self.get_comment().index(comment) < len(self.get_comment()) - 1 else '')
return ret
def get_comments(self):
comments = list()
comments.extend(self.pre_comments)
if self.in_comment is not None:
comments.append(self.in_comment)
return comments
def METHOD_NAME(self, visitor):
"""
Double dispatch for visitor pattern.
:param visitor: A visitor.
:type visitor: Inherited from ASTVisitor.
"""
visitor.handle(self)
def __str__(self):
from pynestml.codegeneration.printers.nestml_printer import NESTMLPrinter
return NESTMLPrinter().print(self) |
298,342 | test pickle rlock | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the pickler module."""
# pytype: skip-file
import sys
import threading
import types
import unittest
from apache_beam.internal import module_test
from apache_beam.internal.pickler import dumps
from apache_beam.internal.pickler import loads
class PicklerTest(unittest.TestCase):
NO_MAPPINGPROXYTYPE = not hasattr(types, "MappingProxyType")
def test_basics(self):
self.assertEqual([1, 'a', ('z', )], loads(dumps([1, 'a', ('z', )])))
fun = lambda x: 'xyz-%s' % x
self.assertEqual('xyz-abc', loads(dumps(fun))('abc'))
def test_lambda_with_globals(self):
"""Tests that the globals of a function are preserved."""
# The point of the test is that the lambda being called after unpickling
# relies on having the re module being loaded.
self.assertEqual(['abc', 'def'],
loads(dumps(
module_test.get_lambda_with_globals()))('abc def'))
def test_lambda_with_main_globals(self):
self.assertEqual(unittest, loads(dumps(lambda: unittest))())
def test_lambda_with_closure(self):
"""Tests that the closure of a function is preserved."""
self.assertEqual(
'closure: abc',
loads(dumps(module_test.get_lambda_with_closure('abc')))())
def test_class(self):
"""Tests that a class object is pickled correctly."""
self.assertEqual(['abc', 'def'],
loads(dumps(module_test.Xyz))().foo('abc def'))
def test_object(self):
"""Tests that a class instance is pickled correctly."""
self.assertEqual(['abc', 'def'],
loads(dumps(module_test.XYZ_OBJECT)).foo('abc def'))
def test_nested_class(self):
"""Tests that a nested class object is pickled correctly."""
self.assertEqual(
'X:abc', loads(dumps(module_test.TopClass.NestedClass('abc'))).datum)
self.assertEqual(
'Y:abc',
loads(dumps(module_test.TopClass.MiddleClass.NestedClass('abc'))).datum)
def test_dynamic_class(self):
"""Tests that a nested class object is pickled correctly."""
self.assertEqual(
'Z:abc', loads(dumps(module_test.create_class('abc'))).get())
def test_generators(self):
with self.assertRaises(TypeError):
dumps((_ for _ in range(10)))
def test_recursive_class(self):
self.assertEqual(
'RecursiveClass:abc',
loads(dumps(module_test.RecursiveClass('abc').datum)))
def METHOD_NAME(self):
rlock_instance = threading.RLock()
rlock_type = type(rlock_instance)
self.assertIsInstance(loads(dumps(rlock_instance)), rlock_type)
@unittest.skipIf(NO_MAPPINGPROXYTYPE, 'test if MappingProxyType introduced')
def test_dump_and_load_mapping_proxy(self):
self.assertEqual(
'def', loads(dumps(types.MappingProxyType({'abc': 'def'})))['abc'])
self.assertEqual(
types.MappingProxyType, type(loads(dumps(types.MappingProxyType({})))))
# pylint: disable=exec-used
@unittest.skipIf(sys.version_info < (3, 7), 'Python 3.7 or above only')
def test_dataclass(self):
exec(
'''
from apache_beam.internal.module_test import DataClass
self.assertEqual(DataClass(datum='abc'), loads(dumps(DataClass(datum='abc'))))
''')
if __name__ == '__main__':
unittest.main() |
298,343 | group meta data | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetVolumeGroupResult',
'AwaitableGetVolumeGroupResult',
'get_volume_group',
'get_volume_group_output',
]
@pulumi.output_type
class GetVolumeGroupResult:
"""
Volume group resource for create
"""
def __init__(__self__, METHOD_NAME=None, id=None, location=None, name=None, provisioning_state=None, type=None, volumes=None):
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'group_meta_data' to be a dict")
pulumi.set(__self__, "group_meta_data", METHOD_NAME)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if volumes and not isinstance(volumes, list):
raise TypeError("Expected argument 'volumes' to be a list")
pulumi.set(__self__, "volumes", volumes)
@property
@pulumi.getter(name="groupMetaData")
def METHOD_NAME(self) -> Optional['outputs.VolumeGroupMetaDataResponse']:
"""
Volume group details
"""
return pulumi.get(self, "group_meta_data")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Azure lifecycle management
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def volumes(self) -> Optional[Sequence['outputs.VolumeGroupVolumePropertiesResponse']]:
"""
List of volumes from group
"""
return pulumi.get(self, "volumes")
class AwaitableGetVolumeGroupResult(GetVolumeGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVolumeGroupResult(
METHOD_NAME=self.METHOD_NAME,
id=self.id,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
type=self.type,
volumes=self.volumes)
def get_volume_group(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
volume_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVolumeGroupResult:
"""
Get details of the specified volume group
:param str account_name: The name of the NetApp account
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str volume_group_name: The name of the volumeGroup
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
__args__['volumeGroupName'] = volume_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:netapp/v20221101preview:getVolumeGroup', __args__, opts=opts, typ=GetVolumeGroupResult).value
return AwaitableGetVolumeGroupResult(
METHOD_NAME=pulumi.get(__ret__, 'group_meta_data'),
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
type=pulumi.get(__ret__, 'type'),
volumes=pulumi.get(__ret__, 'volumes'))
@_utilities.lift_output_func(get_volume_group)
def get_volume_group_output(account_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
volume_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetVolumeGroupResult]:
"""
Get details of the specified volume group
:param str account_name: The name of the NetApp account
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str volume_group_name: The name of the volumeGroup
"""
... |
298,344 | test selection range variables 3 | from robocorp_ls_core.lsp import SelectionRangeTypedDict, RangeTypedDict, Range
def _check_not_same_and_inside(child: RangeTypedDict, parent: RangeTypedDict):
c = Range.create_from_range_typed_dict(child)
p = Range.create_from_range_typed_dict(parent)
assert c != p, f"{c} is equal to {p}"
assert c.is_inside(p), f"{c} should be inside {p}"
def check_parent_ranges(r: SelectionRangeTypedDict):
parent = r.get("parent")
if parent is not None:
_check_not_same_and_inside(r["range"], parent["range"])
check_parent_ranges(parent)
def test_selection_range_basic(workspace, data_regression):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.selection_range import selection_range
workspace.set_root("case4")
doc = workspace.put_doc(
"my.robot",
"""*** Settings ***
Library Collections
*** Test Cases ***
Test case 1
Collections.Append to list foo
Append to list foo""",
)
line, _col = doc.get_last_line_col()
col = 5
completion_context = CompletionContext(
doc, workspace=workspace.ws, line=line, col=col
)
positions = [{"line": line, "character": col}]
result = selection_range(completion_context, positions=positions)
assert len(result) == 1
for r in result:
check_parent_ranges(r)
data_regression.check(result)
def test_selection_range_no_dupes(workspace, libspec_manager, data_regression):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.selection_range import selection_range
workspace.set_root("case4", libspec_manager=libspec_manager)
doc = workspace.put_doc(
"my.robot",
"""*** Settings ***
Library Collections
*** Test Cases ***
Test case 1
Collections.Append to list foo
Append to list""",
)
line, _col = doc.get_last_line_col()
col = 5
completion_context = CompletionContext(
doc, workspace=workspace.ws, line=line, col=col
)
positions = [{"line": line, "character": col}]
result = selection_range(completion_context, positions=positions)
assert len(result) == 1
for r in result:
check_parent_ranges(r)
data_regression.check(result)
def test_selection_range_variables(workspace, libspec_manager, data_regression):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.selection_range import selection_range
workspace.set_root("case4", libspec_manager=libspec_manager)
doc = workspace.put_doc(
"my.robot",
"""*** Settings ***
Library Collections
*** Test Cases ***
Test case 1
Collections.Append to list foo
${var a}= Append to list""",
)
line, _col = doc.get_last_line_col()
col = 6
completion_context = CompletionContext(
doc, workspace=workspace.ws, line=line, col=col
)
positions = [{"line": line, "character": col}]
result = selection_range(completion_context, positions=positions)
assert len(result) == 1
for r in result:
check_parent_ranges(r)
data_regression.check(result)
def test_selection_range_variables_2(workspace, libspec_manager, data_regression):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.selection_range import selection_range
workspace.set_root("case4", libspec_manager=libspec_manager)
doc = workspace.put_doc(
"my.robot",
"""*** Settings ***
Library Collections
*** Test Cases ***
Test case 1
${var a['bar']}= Evaluate 'bar'""",
)
line, _col = doc.get_last_line_col()
col = 6
completion_context = CompletionContext(
doc, workspace=workspace.ws, line=line, col=col
)
positions = [{"line": line, "character": col}]
result = selection_range(completion_context, positions=positions)
assert len(result) == 1
for r in result:
check_parent_ranges(r)
data_regression.check(result)
def METHOD_NAME(workspace, libspec_manager, data_regression):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.selection_range import selection_range
workspace.set_root("case4", libspec_manager=libspec_manager)
doc = workspace.put_doc(
"my.robot",
"""*** Settings ***
Library Collections
*** Test Cases ***
Test case 1
${var a['bar']}= Evaluate 'bar'""",
)
line, _col = doc.get_last_line_col()
col = 14
completion_context = CompletionContext(
doc, workspace=workspace.ws, line=line, col=col
)
positions = [{"line": line, "character": col}]
result = selection_range(completion_context, positions=positions)
assert len(result) == 1
for r in result:
check_parent_ranges(r)
data_regression.check(result)
def test_selection_range_on_empty_space(workspace, libspec_manager, data_regression):
from robotframework_ls.impl.completion_context import CompletionContext
from robotframework_ls.impl.selection_range import selection_range
workspace.set_root("case4", libspec_manager=libspec_manager)
doc = workspace.put_doc(
"my.robot",
"""*** Settings ***
Library Collections
*** Test Cases ***
Test case 1
${var a['bar']}= Evaluate 'bar'""",
)
line, _col = doc.get_last_line_col()
col = 1
completion_context = CompletionContext(
doc, workspace=workspace.ws, line=line, col=col
)
positions = [{"line": line, "character": col}]
result = selection_range(completion_context, positions=positions)
assert len(result) == 1
for r in result:
check_parent_ranges(r)
data_regression.check(result) |
298,345 | location | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetAccountResult',
'AwaitableGetAccountResult',
'get_account',
'get_account_output',
]
@pulumi.output_type
class GetAccountResult:
"""
Account resource details.
"""
def __init__(__self__, id=None, METHOD_NAME=None, name=None, properties=None, system_data=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", METHOD_NAME)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.AccountResourceResponseProperties':
"""
Account resource properties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetAccountResult(GetAccountResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAccountResult(
id=self.id,
METHOD_NAME=self.METHOD_NAME,
name=self.name,
properties=self.properties,
system_data=self.system_data,
tags=self.tags,
type=self.type)
def get_account(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAccountResult:
"""
Returns RecommendationsService Account resource for a given name.
:param str account_name: The name of the RecommendationsService Account resource.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:recommendationsservice/v20220201:getAccount', __args__, opts=opts, typ=GetAccountResult).value
return AwaitableGetAccountResult(
id=pulumi.get(__ret__, 'id'),
METHOD_NAME=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_account)
def get_account_output(account_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAccountResult]:
"""
Returns RecommendationsService Account resource for a given name.
:param str account_name: The name of the RecommendationsService Account resource.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
... |
298,346 | rn50 pipeline | # Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import os
from itertools import product
from nvidia.dali.pipeline.experimental import pipeline_def
from time import time
from test_utils import get_dali_extra_path
@pipeline_def(device_id=0)
def METHOD_NAME(data_path):
uniform = fn.random.uniform(range=(0., 1.), shape=2)
resize_uniform = fn.random.uniform(range=(256., 480.))
mirror = fn.random.coin_flip(probability=0.5)
jpegs, _ = fn.readers.file(file_root=data_path)
images = fn.decoders.image(jpegs, output_type=types.RGB)
resized_images = fn.fast_resize_crop_mirror(images, crop=(224, 224), crop_pos_x=uniform[0],
crop_pos_y=uniform[1], mirror=mirror,
resize_shorter=resize_uniform)
output = fn.crop_mirror_normalize(resized_images.gpu(), device='gpu', dtype=types.FLOAT16,
mean=[128., 128., 128.], std=[1., 1., 1.])
return output
@pipeline_def(device_id=0)
def rn50_pipeline_2(data_path):
uniform = fn.random.uniform(range=(0., 1.), shape=2)
resize_uniform = fn.random.uniform(range=(256., 480.))
mirror = fn.random.coin_flip(probability=0.5)
jpegs, _ = fn.readers.file(file_root=data_path)
images = fn.decoders.image(jpegs, device='mixed', output_type=types.RGB)
resized_images = fn.resize(images, device='gpu', interp_type=types.INTERP_LINEAR,
resize_shorter=resize_uniform)
output = fn.crop_mirror_normalize(resized_images, device='gpu', dtype=types.FLOAT16,
crop=(224, 224), mean=[128., 128., 128.], std=[1., 1., 1.],
mirror=mirror, crop_pos_x=uniform[0], crop_pos_y=uniform[1])
return output
def run_benchmark(pipe_fun, batch_size, num_threads, num_samples, debug, data_path):
num_iters = num_samples // batch_size
times = np.empty(num_iters + 1)
times[0] = time()
pipe = pipe_fun(data_path, batch_size=batch_size, num_threads=num_threads, debug=debug)
pipe.build()
build_time = time()
for i in range(num_iters):
pipe.run()
times[i + 1] = time()
full_time = times[-1] - build_time
times = np.diff(times)
return full_time, times[0], times[1:]
def test_rn50_benchmark(pipe_fun=METHOD_NAME, batch_size=8, num_threads=2, num_samples=256,
data_path=None, save_df=None):
if not data_path:
data_path = os.path.join(get_dali_extra_path(), 'db/single/jpeg')
print(f'num_threads: {num_threads}, batch_size: {batch_size}')
full_stand, build_stand, times_stand = run_benchmark(pipe_fun, batch_size, num_threads,
num_samples, False, data_path)
iter_time_stand = np.mean(times_stand[1:]) / batch_size
avg_speed_stand = num_samples / full_stand
print(
f'Stand pipeline --- time: {full_stand:8.5f} [s] --- '
f'build + 1st iter time: {build_stand:.5f} [s] --- '
f'avg iter time per sample: {iter_time_stand:7.5f} [s] --- '
f'avg speed: {avg_speed_stand:8.3f} [img/s]')
full_debug, build_debug, times_debug = run_benchmark(pipe_fun, batch_size, num_threads,
num_samples, True, data_path)
iter_time_debug = np.mean(times_debug[1:]) / batch_size
avg_speed_debug = num_samples / full_debug
print(
f'Debug pipeline --- time: {full_debug:8.5f} [s] --- '
f'build + 1st iter time: {build_debug:.5f} [s] --- '
f'avg iter time per sample: {iter_time_debug:7.5f} [s] --- '
f'avg speed: {avg_speed_debug:8.3f} [img/s]')
if save_df is not None:
df = pd.DataFrame({'type': ['standard_sync', 'debug_old'],
'batch_size': batch_size,
'time': [full_stand, full_debug],
'iter_time': [iter_time_stand, iter_time_debug],
'avg_speed': [avg_speed_stand, avg_speed_debug]})
return pd.concat([save_df, df])
return None
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--batch-sizes', nargs='+', type=int, default=[1, 4, 8, 32, 64, 128],
help='List of batch sizes to run')
parser.add_argument('--thread-counts', nargs='+', type=int, default=[1, 2, 4, 8],
help='List of thread counts')
parser.add_argument('--num-samples', type=int, default=2048,
help='Number of samples')
parser.add_argument('--data-path', type=str,
help='Directory path of training dataset')
parser.add_argument('--save-dir', type=str,
help='Directory where to save results')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
df = None
for pipe_fun, num_threads in product([METHOD_NAME, rn50_pipeline_2], args.thread_counts):
if args.save_dir is not None:
import pandas as pd
save_file = os.path.join(args.save_dir,
f'bench_{pipe_fun.__name__}_threads_{num_threads}.csv')
if os.path.isfile(save_file):
df = pd.read_csv(save_file)
else:
df = pd.DataFrame(columns=['type', 'batch_size', 'time', 'iter_time', 'avg_speed'])
for batch_size in args.batch_sizes:
df = test_rn50_benchmark(rn50_pipeline_2, batch_size, num_threads, args.num_samples,
args.data_path, df)
if df is not None:
df.to_csv(save_file, index=False) |
298,347 | render | # -*- coding: utf-8 -*-
#
# This file is part of SENAITE.CORE.
#
# SENAITE.CORE is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright 2018-2023 by it's authors.
# Some rights reserved, see README and LICENSE.
from bika.lims import api
from bika.lims.api.security import check_permission
from plone.app.layout.viewlets.common import PersonalBarViewlet
from plone.app.viewletmanager.manager import OrderedViewletManager
from plone.formwidget.namedfile.converter import b64decode_file
from plone.memoize.instance import memoize
from plone.registry.interfaces import IRegistry
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from senaite.core.browser.viewlets.languageselector import LanguageSelector
from senaite.core.browser.viewlets.sections import GlobalSectionsViewlet
from zope.component import getMultiAdapter
from zope.component import getUtility
LOGO = "/++plone++senaite.core.static/images/senaite.svg"
class ToolbarViewletManager(OrderedViewletManager):
custom_template = ViewPageTemplateFile("templates/toolbar.pt")
def base_render(self):
return super(ToolbarViewletManager, self).METHOD_NAME()
def METHOD_NAME(self):
return self.custom_template()
@property
@memoize
def context_state(self):
return getMultiAdapter(
(self.context, self.request),
name='plone_context_state'
)
@property
@memoize
def portal(self):
return self.portal_state.portal()
@property
@memoize
def portal_state(self):
return getMultiAdapter(
(self.context, self.request),
name='plone_portal_state'
)
@memoize
def is_manager(self):
return check_permission("senaite.core: Manage Bika", self.portal)
def get_personal_bar(self):
viewlet = PersonalBarViewlet(
self.context,
self.request,
self.__parent__, self
)
viewlet.update()
return viewlet
def get_toolbar_logo(self):
"""Return the toolbar logo
"""
portal_url = self.portal_state.portal_url()
# Try to (gracefully) get the logo from the SENAITE setup
setup = api.get_senaite_setup()
site_logo = setup.getSiteLogo() if setup else None
if site_logo:
filename, data = b64decode_file(site_logo)
return '{}/@@site-logo/{}'.format(
portal_url, filename)
# Check if an URL is given in the registry
registry = getUtility(IRegistry)
try:
logo = registry["senaite.toolbar_logo"]
except (AttributeError, KeyError):
logo = LOGO
if not logo:
logo = LOGO
return portal_url + logo
def get_toolbar_styles(self):
"""Return the CSS for the toolbar logo
"""
# Try to (gracefully) get the logo CSS from the SENAITE setup
setup = api.get_senaite_setup()
site_logo_css = setup.getSiteLogoCSS() if setup else None
if site_logo_css:
return site_logo_css
# Fall back to registry
registry = getUtility(IRegistry)
try:
styles = registry["senaite.toolbar_logo_styles"]
except (AttributeError, KeyError):
return "height:15px;"
css = map(lambda style: "{}:{};".format(*style), styles.items())
return " ".join(css)
def get_lims_setup_url(self):
portal_url = self.portal_state.portal().absolute_url()
return "/".join([portal_url, "@@lims-setup"])
def get_global_sections(self):
viewlet = GlobalSectionsViewlet(
self.context,
self.request,
self.__parent__, self
)
viewlet.update()
return viewlet
def get_language_selector(self):
viewlet = LanguageSelector(
self.context,
self.request,
self.__parent__, self
)
viewlet.update()
return viewlet |
298,348 | sdk stdlib | # Copyright 2019 The Bazel Go Rules Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load(
"//go/private:common.bzl",
"COVERAGE_OPTIONS_DENYLIST",
)
load(
"//go/private:providers.bzl",
"GoStdLib",
)
load(
"//go/private:mode.bzl",
"LINKMODE_NORMAL",
"extldflags_from_cc_toolchain",
"link_mode_args",
)
load("//go/private:sdk.bzl", "parse_version")
load("//go/private/actions:utils.bzl", "quote_opts")
def emit_stdlib(go):
"""Returns a standard library for the target configuration.
If the precompiled standard library is suitable, it will be returned.
Otherwise, the standard library will be compiled for the target.
Returns:
A list of providers containing GoLibrary and GoSource. GoSource.stdlib
will point to a new GoStdLib.
"""
library = go.new_library(go, resolver = _stdlib_library_to_source)
source = go.library_to_source(go, {}, library, False)
return [source, library]
def _stdlib_library_to_source(go, _attr, source, _merge):
if _should_use_sdk_stdlib(go):
source["stdlib"] = METHOD_NAME(go)
else:
source["stdlib"] = _build_stdlib(go)
def _should_use_sdk_stdlib(go):
version = parse_version(go.sdk.version)
if version and version[0] <= 1 and version[1] <= 19 and go.sdk.experiments:
# The precompiled stdlib shipped with 1.19 or below doesn't have experiments
return False
return (go.sdk.libs and # go.sdk.libs is non-empty if sdk ships with precompiled .a files
go.mode.goos == go.sdk.goos and
go.mode.goarch == go.sdk.goarch and
not go.mode.race and # TODO(jayconrod): use precompiled race
not go.mode.msan and
not go.mode.pure and
not go.mode.gc_goopts and
go.mode.link == LINKMODE_NORMAL)
def _build_stdlib_list_json(go):
out = go.declare_file(go, "stdlib.pkg.json")
cache_dir = go.declare_directory(go, "gocache")
args = go.builder_args(go, "stdliblist")
args.add("-sdk", go.sdk.root_file.dirname)
args.add("-out", out)
args.add("-cache", cache_dir.path)
inputs = go.sdk_files
if not go.mode.pure:
inputs += go.crosstool
go.actions.run(
inputs = inputs,
outputs = [out, cache_dir],
mnemonic = "GoStdlibList",
executable = go.toolchain._builder,
arguments = [args],
env = _build_env(go),
)
return out
def _build_env(go):
env = go.env
if go.mode.pure:
env.update({"CGO_ENABLED": "0"})
return env
# NOTE(#2545): avoid unnecessary dynamic link
# go std library doesn't use C++, so should not have -lstdc++
# Also drop coverage flags as nothing in the stdlib is compiled with
# coverage - we disable it for all CGo code anyway.
# NOTE(#3590): avoid forcing static linking.
ldflags = [
option
for option in extldflags_from_cc_toolchain(go)
if option not in ("-lstdc++", "-lc++", "-static") and option not in COVERAGE_OPTIONS_DENYLIST
]
env.update({
"CGO_ENABLED": "1",
"CC": go.cgo_tools.c_compiler_path,
"CGO_CFLAGS": " ".join(go.cgo_tools.c_compile_options),
"CGO_LDFLAGS": " ".join(ldflags),
})
return env
def METHOD_NAME(go):
return GoStdLib(
_list_json = _build_stdlib_list_json(go),
libs = go.sdk.libs,
root_file = go.sdk.root_file,
)
def _build_stdlib(go):
pkg = go.declare_directory(go, path = "pkg")
args = go.builder_args(go, "stdlib")
args.add("-out", pkg.dirname)
if go.mode.race:
args.add("-race")
args.add("-package", "std")
if not go.mode.pure:
args.add("-package", "runtime/cgo")
args.add_all(link_mode_args(go.mode))
args.add("-gcflags", quote_opts(go.mode.gc_goopts))
inputs = (go.sdk.srcs +
go.sdk.headers +
go.sdk.tools +
[go.sdk.go, go.sdk.package_list, go.sdk.root_file] +
go.crosstool)
if go.mode.pgoprofile:
args.add("-pgoprofile", go.mode.pgoprofile)
inputs.append(go.mode.pgoprofile)
outputs = [pkg]
go.actions.run(
inputs = inputs,
outputs = outputs,
mnemonic = "GoStdlib",
executable = go.toolchain._builder,
arguments = [args],
env = _build_env(go),
)
return GoStdLib(
_list_json = _build_stdlib_list_json(go),
libs = [pkg],
root_file = pkg,
) |
298,349 | set up | #!/usr/bin/env python3
import os
import random
import unittest
import warnings
from math import exp, pi
import torch
from torch import optim
import gpytorch
from gpytorch.distributions import MultivariateNormal
from gpytorch.kernels import GridInterpolationKernel, RBFKernel, ScaleKernel
from gpytorch.likelihoods import FixedNoiseGaussianLikelihood
from gpytorch.means import ConstantMean
from gpytorch.priors import SmoothedBoxPrior
from gpytorch.test.utils import least_used_cuda_device
from gpytorch.utils.warnings import GPInputWarning
# Simple training data: let's try to learn a sine function,
# but with KISS-GP let's use 100 training examples.
def make_data(cuda=False):
train_x = torch.linspace(0, 1, 100)
train_y = torch.sin(train_x * (2 * pi))
test_x = torch.linspace(0, 1, 51)
test_y = torch.sin(test_x * (2 * pi))
if cuda:
train_x = train_x.cuda()
train_y = train_y.cuda()
test_x = test_x.cuda()
test_y = test_y.cuda()
return train_x, train_y, test_x, test_y
class GPRegressionModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
self.mean_module = ConstantMean(constant_prior=SmoothedBoxPrior(-1e-5, 1e-5))
self.base_covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1)))
self.grid_covar_module = GridInterpolationKernel(self.base_covar_module, grid_size=50, num_dims=1)
self.covar_module = self.grid_covar_module
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return MultivariateNormal(mean_x, covar_x)
class TestKISSGPWhiteNoiseRegression(unittest.TestCase):
def METHOD_NAME(self):
if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false":
self.rng_state = torch.get_rng_state()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
random.seed(0)
def tearDown(self):
if hasattr(self, "rng_state"):
torch.set_rng_state(self.rng_state)
def test_kissgp_gp_mean_abs_error(self):
# This test throws a warning because the fixed noise likelihood gets the wrong input
warnings.simplefilter("ignore", GPInputWarning)
train_x, train_y, test_x, test_y = make_data()
likelihood = FixedNoiseGaussianLikelihood(torch.ones(100) * 0.001)
gp_model = GPRegressionModel(train_x, train_y, likelihood)
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model)
# Optimize the model
gp_model.train()
likelihood.train()
optimizer = optim.Adam(gp_model.parameters(), lr=0.1)
optimizer.n_iter = 0
with gpytorch.settings.debug(False):
for _ in range(25):
optimizer.zero_grad()
output = gp_model(train_x)
loss = -mll(output, train_y)
loss.backward()
optimizer.n_iter += 1
optimizer.step()
for param in gp_model.parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
# Test the model
gp_model.eval()
likelihood.eval()
test_preds = likelihood(gp_model(test_x)).mean
mean_abs_error = torch.mean(torch.abs(test_y - test_preds))
self.assertLess(mean_abs_error.squeeze().item(), 0.05)
def test_kissgp_gp_fast_pred_var(self):
with gpytorch.settings.fast_pred_var(), gpytorch.settings.debug(False):
train_x, train_y, test_x, test_y = make_data()
likelihood = FixedNoiseGaussianLikelihood(torch.ones(100) * 0.001)
gp_model = GPRegressionModel(train_x, train_y, likelihood)
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model)
# Optimize the model
gp_model.train()
likelihood.train()
optimizer = optim.Adam(gp_model.parameters(), lr=0.1)
optimizer.n_iter = 0
for _ in range(25):
optimizer.zero_grad()
output = gp_model(train_x)
loss = -mll(output, train_y)
loss.backward()
optimizer.n_iter += 1
optimizer.step()
for param in gp_model.parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
# Test the model
gp_model.eval()
likelihood.eval()
# Set the cache
test_function_predictions = likelihood(gp_model(train_x))
# Now bump up the likelihood to something huge
# This will make it easy to calculate the variance
likelihood.noise = torch.ones(100) * 3.0
test_function_predictions = likelihood(gp_model(train_x))
noise = likelihood.noise
var_diff = (test_function_predictions.variance - noise).abs()
self.assertLess(torch.max(var_diff / noise), 0.05)
def test_kissgp_gp_mean_abs_error_cuda(self):
if not torch.cuda.is_available():
return
with least_used_cuda_device():
train_x, train_y, test_x, test_y = make_data(cuda=True)
likelihood = FixedNoiseGaussianLikelihood(torch.ones(100) * 0.001).cuda()
gp_model = GPRegressionModel(train_x, train_y, likelihood).cuda()
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model)
# Optimize the model
gp_model.train()
likelihood.train()
optimizer = optim.Adam(gp_model.parameters(), lr=0.1)
optimizer.n_iter = 0
with gpytorch.settings.debug(False):
for _ in range(25):
optimizer.zero_grad()
output = gp_model(train_x)
loss = -mll(output, train_y)
loss.backward()
optimizer.n_iter += 1
optimizer.step()
for param in gp_model.parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
# Test the model
gp_model.eval()
likelihood.eval()
test_preds = likelihood(gp_model(test_x)).mean
mean_abs_error = torch.mean(torch.abs(test_y - test_preds))
self.assertLess(mean_abs_error.squeeze().item(), 0.02)
if __name__ == "__main__":
unittest.main() |
298,350 | test cobbler version | import os
import re
import pytest
dummy_file_path = "/root/dummy"
@pytest.fixture(scope="function")
def get_last_line():
def _get_last_line(lines):
i = len(lines) - 1
while lines[i] == "" and i > 0:
i -= 1
return lines[i]
return _get_last_line
@pytest.fixture(scope="function")
def assert_list_section():
def _assert_list_section(lines, start_line, section_name):
i = start_line
assert lines[i] == "%s:" % section_name
i += 1
while lines[i] != "":
i += 1
i += 1
return i
return _assert_list_section
@pytest.fixture(scope="function")
def assert_report_section():
def _assert_report_section(lines, start_line, section_name):
i = start_line
assert lines[i] == "%s:" % section_name
i += 1
match_obj = re.match(r"=+$", lines[i].strip())
assert match_obj is not None
i += 1
while i < len(lines) - 1 and re.match(r"=+$", lines[i + 1]) is None:
while i < len(lines) and lines[i] != "":
i += 1
while i < len(lines) and lines[i] == "":
i += 1
return i
return _assert_report_section
class TestCobblerCliTestDirect:
"""
Tests Cobbler CLI direct commands
"""
def METHOD_NAME(self, run_cmd):
"""Runs 'cobbler version'"""
(outputstd, outputerr) = run_cmd(cmd=["version"])
line = outputstd.split("\n")[0]
match_obj = re.match(r"Cobbler \d+\.\d+\.\d+", line)
assert match_obj is not None
def test_cobbler_status(self, run_cmd):
"""Runs 'cobbler status'"""
(outputstd, outputerr) = run_cmd(cmd=["status"])
lines = outputstd.split("\n")
match_obj = re.match(r"ip\s+|target\s+|start\s+|state\s+", lines[0])
assert match_obj is not None
def test_cobbler_sync(self, run_cmd, get_last_line):
"""Runs 'cobbler sync'"""
(outputstd, outputerr) = run_cmd(cmd=["sync"])
lines = outputstd.split("\n")
assert "*** TASK COMPLETE ***" == get_last_line(lines)
def test_cobbler_sync_dns(self, run_cmd, get_last_line):
"""Runs 'cobbler sync --dns'"""
(outputstd, outputerr) = run_cmd(cmd=["sync", "--dns"])
lines = outputstd.split("\n")
assert "*** TASK COMPLETE ***" == get_last_line(lines)
def test_cobbler_sync_dhcp(self, run_cmd, get_last_line):
"""Runs 'cobbler sync --dhcp'"""
(outputstd, outputerr) = run_cmd(cmd=["sync", "--dhcp"])
lines = outputstd.split("\n")
assert "*** TASK COMPLETE ***" == get_last_line(lines)
def test_cobbler_sync_dhcp_dns(self, run_cmd, get_last_line):
"""Runs 'cobbler sync --dhcp --dns'"""
(outputstd, outputerr) = run_cmd(cmd=["sync", "--dhcp", "--dns"])
lines = outputstd.split("\n")
assert "*** TASK COMPLETE ***" == get_last_line(lines)
def test_cobbler_sync_systems(self, run_cmd, get_last_line):
"""Runs 'cobbler sync'"""
(outputstd, outputerr) = run_cmd(cmd=["sync", "--systems=a.b.c,a.d.c"])
lines = outputstd.split("\n")
assert "*** TASK COMPLETE ***" == get_last_line(lines)
def test_cobbler_signature_report(self, run_cmd, get_last_line):
"""Runs 'cobbler signature report'"""
(outputstd, outputerr) = run_cmd(cmd=["signature", "report"])
lines = outputstd.split("\n")
assert "Currently loaded signatures:" == lines[0]
expected_output = r"\d+ breeds with \d+ total signatures loaded"
match_obj = re.match(expected_output, get_last_line(lines))
assert match_obj is not None
def test_cobbler_signature_update(self, run_cmd, get_last_line):
"""Runs 'cobbler signature update'"""
(outputstd, outputerr) = run_cmd(cmd=["signature", "update"])
lines = outputstd.split("\n")
assert "*** TASK COMPLETE ***" == get_last_line(lines)
def test_cobbler_acl_adduser(self, run_cmd):
"""Runs 'cobbler aclsetup --adduser'"""
(outputstd, outputerr) = run_cmd(cmd=["aclsetup", "--adduser=cobbler"])
# TODO: verify user acl exists on directories
def test_cobbler_acl_addgroup(self, run_cmd):
"""Runs 'cobbler aclsetup --addgroup'"""
(outputstd, outputerr) = run_cmd(cmd=["aclsetup", "--addgroup=cobbler"])
# TODO: verify group acl exists on directories
def test_cobbler_acl_removeuser(self, run_cmd):
"""Runs 'cobbler aclsetup --removeuser'"""
(outputstd, outputerr) = run_cmd(cmd=["aclsetup", "--removeuser=cobbler"])
# TODO: verify user acl no longer exists on directories
def test_cobbler_acl_removegroup(self, run_cmd):
"""Runs 'cobbler aclsetup --removegroup'"""
(outputstd, outputerr) = run_cmd(cmd=["aclsetup", "--removegroup=cobbler"])
# TODO: verify group acl no longer exists on directories
def test_cobbler_reposync(self, run_cmd):
"""Runs 'cobbler reposync'"""
(outputstd, outputerr) = run_cmd(cmd=["reposync"])
(outputstd, outputerr) = run_cmd(cmd=["reposync", "--tries=3"])
(outputstd, outputerr) = run_cmd(cmd=["reposync", "--no-fail"])
@pytest.mark.skip("Currently the setup of this test is too complicated")
def test_cobbler_buildiso(self, run_cmd, get_last_line):
"""Runs 'cobbler buildiso'"""
(outputstd, outputerr) = run_cmd(cmd=["buildiso"])
lines = outputstd.split("\n")
assert "*** TASK COMPLETE ***" == get_last_line(lines)
assert os.path.isfile("/root/generated.iso")
def test_11_cobbler_list(self, run_cmd, assert_list_section):
(outputstd, outputerr) = run_cmd(cmd=["list"])
lines = outputstd.split("\n")
i = 0
i = assert_list_section(lines, i, "distros")
i = assert_list_section(lines, i, "profiles")
i = assert_list_section(lines, i, "systems")
i = assert_list_section(lines, i, "repos")
i = assert_list_section(lines, i, "images")
i = assert_list_section(lines, i, "mgmtclasses")
i = assert_list_section(lines, i, "packages")
i = assert_list_section(lines, i, "files")
i = assert_list_section(lines, i, "menus")
def test_cobbler_report(self, run_cmd, assert_report_section):
(outputstd, outputerr) = run_cmd(cmd=["report"])
lines = outputstd.split("\n")
i = 0
i = assert_report_section(lines, i, "distros")
i = assert_report_section(lines, i, "profiles")
i = assert_report_section(lines, i, "systems")
i = assert_report_section(lines, i, "repos")
i = assert_report_section(lines, i, "images")
i = assert_report_section(lines, i, "mgmtclasses")
i = assert_report_section(lines, i, "packages")
i = assert_report_section(lines, i, "files")
i = assert_report_section(lines, i, "menus")
def test_cobbler_hardlink(self, run_cmd, get_last_line):
(outputstd, outputerr) = run_cmd(cmd=["hardlink"])
lines = outputstd.split("\n")
assert "*** TASK COMPLETE ***" == get_last_line(lines)
@pytest.mark.skip("Currently the setup of this test is too complicated")
def test_cobbler_replicate(self, run_cmd, get_last_line):
(outputstd, outputerr) = run_cmd(cmd=["replicate"])
lines = outputstd.split("\n")
assert "*** TASK COMPLETE ***" == get_last_line(lines)
def test_cobbler_validate_autoinstalls(self, run_cmd, get_last_line):
(outputstd, outputerr) = run_cmd(cmd=["validate-autoinstalls"])
lines = outputstd.split("\n")
assert "*** TASK COMPLETE ***" == get_last_line(lines) |
298,351 | unregister | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
from bpy.props import BoolProperty, IntProperty, StringProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import (changable_sockets, repeat_last, updateNode)
# ListItem2
# Allows a list of items, with both negative and positive index and repeated values
# Other output is not wrapped.
# Based on ListItem
# For now only accepts one list of items
# by Linus Yng
class ListItem2Node(SverchCustomTreeNode, bpy.types.Node):
''' List item '''
bl_idname = 'ListItem2Node'
bl_label = 'List Item'
replacement_nodes = [('SvListItemNode', {"Item":"Index"}, None)]
level: IntProperty(name='level_to_count', default=2, min=0, update=updateNode)
item: IntProperty(name='item', default=0, update=updateNode)
typ: StringProperty(name='typ', default='')
newsock: BoolProperty(name='newsock', default=False)
def draw_buttons(self, context, layout):
layout.prop(self, "level", text="level")
def sv_init(self, context):
self.inputs.new('SvStringsSocket', "Data")
self.inputs.new('SvStringsSocket', "Item").prop_name = 'item'
self.outputs.new('SvStringsSocket', "Item")
self.outputs.new('SvStringsSocket', "Other")
def sv_update(self):
if 'Data' in self.inputs and self.inputs['Data'].links:
inputsocketname = 'Data'
outputsocketname = ['Item', 'Other']
changable_sockets(self, inputsocketname, outputsocketname)
def process(self):
if self.inputs['Data'].is_linked:
OItem, OOther = self.outputs
data = self.inputs['Data'].sv_get()
items = self.inputs['Item'].sv_get([[self.item]])
if OItem.is_linked:
if self.level-1:
out = self.get(data, self.level-1, items, self.get_items)
else:
out = self.get_items(data, items[0])
OItem.sv_set(out)
if OOther.is_linked:
if self.level-1:
out = self.get(data, self.level-1, items, self.get_other)
else:
out = self.get_other(data, items[0])
OOther.sv_set(out)
def get_items(self, data, items):
if type(data) in [list, tuple]:
return [data[item] for item in items if item < len(data) and item >= -len(data)]
else:
return None
def get_other(self, data, items):
is_tuple = False
if type(data) == tuple:
data = list(data)
is_tuple = True
if type(data) == list:
m_items = items.copy()
for idx, item in enumerate(items):
if item < 0:
m_items[idx] = len(data)-abs(item)
for i in sorted(set(m_items), reverse=True):
if i < len(data) and i > -1:
del data[i]
if is_tuple:
return tuple(data)
else:
return data
else:
return None
def get(self, data, level, items, f):
if level == 1:
item_iter = repeat_last(items)
return [self.get(obj, level-1, next(item_iter), f) for obj in data]
elif level:
return [self.get(obj, level-1, items, f) for obj in data]
else:
return f(data, items)
def register():
bpy.utils.register_class(ListItem2Node)
def METHOD_NAME():
bpy.utils.unregister_class(ListItem2Node) |
298,352 | set value | # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
#
# Copyright (c) 2011-2012 Oracle and/or its affiliates. All rights reserved.
#
# The contents of this file are subject to the terms of either the GNU
# General Public License Version 2 only ("GPL") or the Common Development
# and Distribution License("CDDL") (collectively, the "License"). You
# may not use this file except in compliance with the License. You can
# obtain a copy of the License at
# https://glassfish.dev.java.net/public/CDDL+GPL_1_1.html
# or packager/legal/LICENSE.txt. See the License for the specific
# language governing permissions and limitations under the License.
#
# When distributing the software, include this License Header Notice in each
# file and include the License file at packager/legal/LICENSE.txt.
#
# GPL Classpath Exception:
# Oracle designates this particular file as subject to the "Classpath"
# exception as provided by Oracle in the GPL Version 2 section of the License
# file that accompanied this code.
#
# Modifications:
# If applicable, add the following below the License Header, with the fields
# enclosed by brackets [] replaced by your own identifying information:
# "Portions Copyright [year] [name of copyright owner]"
#
# Contributor(s):
# If you wish your version of this file to be governed by only the CDDL or
# only the GPL Version 2, indicate your decision by adding "[Contributor]
# elects to include this software in this distribution under the [CDDL or GPL
# Version 2] license." If you don't indicate a single choice of license, a
# recipient has the option to distribute your version of this file under
# either the CDDL, the GPL Version 2 or to extend the choice of license to
# its licensees as provided above. However, if you add GPL Version 2 code
# and therefore, elected the GPL Version 2 license, then the option applies
# only if the new code is made subject to such option by the copyright
# holder.
import traceback
import urllib
import random
import string
import mimetypes
import sys
from restresponse import *
from connection import *
class RestClientBase:
def __init__(self, connection, parent, name = None):
self.connection = connection
self.parent = parent
self.entityValues = { }
self.children = {}
if name:
self.setName(name)
self.name = name
try:
restResponse = self.connection.get(self.getRestUrl())
self.status = restResponse.getStatus()
self.entityValues = restResponse.getEntityValues()
self.children = restResponse.getChildren()
except Exception as e:
print e
traceback.print_exc(file=sys.stdout)
def getParent(self):
return self.parent
def getRestUrl(self):
return self.getParent().getRestUrl() + self.getSegment()
def getSegment(self):
return ""
def getStatus(self):
return self.status
def getMessage(self):
return self.message
def save(self):
response = self.connection.post(self.getRestUrl(), urllib.urlencode(self.entityValues), headers={'Content-type': 'application/x-www-form-urlencoded'})
self.status = response.getStatus()
def delete(self):
response = self.connection.delete(self.getRestUrl())
self.status = response.getStatus()
def execute(self, endPoint, method = "GET", payload = {}, needsMultiPart = False):
if method == "POST":
if needsMultiPart:
content_type, body = self.encode_multipart_formdata(payload)
restResponse = self.connection.post(self.getRestUrl() + endPoint, body, headers={'Content-type': content_type})
else:
restResponse = self.connection.post(self.getRestUrl() + endPoint, urllib.urlencode(payload), headers={'Content-type': 'application/x-www-form-urlencoded'})
else:
restResponse = self.connection.request(self.getRestUrl() + endPoint, method, urllib.urlencode(payload))
if restResponse:
self.status = restResponse.getStatus()
return restResponse
else:
self.status = -1
# raise an exception
def METHOD_NAME(self, key, value):
self.entityValues[key] = value
def getValue(self, key):
return self.entityValues[key]
def encode_multipart_formdata(self, args):
BOUNDARY = '----------' + self._random_string (30)
EOL = '\r\n'
body = []
for (key, value) in args.items():
if type(value) is file:
filename = value.name
body.append('--' + BOUNDARY)
body.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
body.append('Content-Type: %s' % self.get_content_type(filename))
body.append('')
body.append(value.read())
else:
body.append('--' + BOUNDARY)
body.append('Content-Disposition: form-data; name="%s"' % key)
body.append('')
body.append(str(value))
body.append('--' + BOUNDARY + '--')
body.append('')
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, EOL.join(body)
def get_content_type(self, filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
def _random_string (self, length):
return ''.join (random.choice (string.letters) for ii in range (length + 1)) |
298,353 | instance ssl dummy | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import copy
import logging
import os
import re
import pytest
from six.moves import range
from datadog_checks.dev import docker_run
from datadog_checks.dev.conditions import CheckDockerLogs, WaitFor
from datadog_checks.dev.utils import ON_WINDOWS
from . import common
log = logging.getLogger(__file__)
@pytest.fixture(scope='session')
def get_check():
# Late import to ignore missing library for e2e
from datadog_checks.ibm_mq import IbmMqCheck
yield lambda instance: IbmMqCheck('ibm_mq', {}, [instance])
@pytest.fixture
def instance():
return copy.deepcopy(common.INSTANCE)
@pytest.fixture
def instance_ssl():
return copy.deepcopy(common.INSTANCE_SSL)
@pytest.fixture
def instance_with_connection_name():
return copy.deepcopy(common.INSTANCE_WITH_CONNECTION_NAME)
@pytest.fixture
def instance_queue_pattern():
return copy.deepcopy(common.INSTANCE_QUEUE_PATTERN)
@pytest.fixture
def instance_queue_regex():
return copy.deepcopy(common.INSTANCE_QUEUE_REGEX)
@pytest.fixture
def instance_collect_all():
return copy.deepcopy(common.INSTANCE_COLLECT_ALL)
@pytest.fixture
def instance_queue_regex_tag():
return copy.deepcopy(common.INSTANCE_QUEUE_REGEX_TAG)
@pytest.fixture
def METHOD_NAME(instance):
instance['ssl_auth'] = 'yes'
instance['ssl_cipher_spec'] = 'TLS_RSA_WITH_AES_256_CBC_SHA256'
instance['ssl_key_repository_location'] = '/dummy'
return instance
@pytest.fixture
def seed_data():
publish()
consume()
def publish():
# Late import to not require it for e2e
import pymqi
conn_info = "%s(%s)" % (common.HOST, common.PORT)
qmgr = pymqi.connect(common.QUEUE_MANAGER, common.CHANNEL, conn_info, common.USERNAME, common.PASSWORD)
queue = pymqi.Queue(qmgr, common.QUEUE)
for i in range(10):
try:
message = 'Hello from Python! Message {}'.format(i)
log.info("sending message: %s", message)
queue.put(message.encode())
except Exception as e:
log.info("exception publishing: %s", e)
queue.close()
qmgr.disconnect()
return
queue.close()
qmgr.disconnect()
def consume():
# Late import to not require it for e2e
import pymqi
conn_info = "%s(%s)" % (common.HOST, common.PORT)
qmgr = pymqi.connect(common.QUEUE_MANAGER, common.CHANNEL, conn_info, common.USERNAME, common.PASSWORD)
queue = pymqi.Queue(qmgr, common.QUEUE)
for _ in range(10):
try:
message = queue.get()
print("got a new message: {}".format(message))
except Exception as e:
if not re.search("MQRC_NO_MSG_AVAILABLE", e.errorAsString()):
print(e)
queue.close()
qmgr.disconnect()
return
else:
pass
queue.close()
qmgr.disconnect()
def prepare_queue_manager():
import pymqi
conn_info = '{0}({1})'.format(common.HOST, common.PORT)
qm_name = common.QUEUE_MANAGER.lower()
qmgr = pymqi.QueueManager(None)
qmgr.connectTCPClient(common.QUEUE_MANAGER, pymqi.CD(), common.CHANNEL, conn_info, common.USERNAME, common.PASSWORD)
pcf = pymqi.PCFExecute(qmgr, response_wait_interval=5000)
attrs = [
pymqi.CFST(
Parameter=pymqi.CMQC.MQCA_SSL_KEY_REPOSITORY,
String=pymqi.ensure_bytes('/etc/mqm/pki/keys/{}'.format(qm_name)),
),
pymqi.CFST(Parameter=pymqi.CMQC.MQCA_CERT_LABEL, String=pymqi.ensure_bytes(qm_name)),
]
pcf.MQCMD_CHANGE_Q_MGR(attrs)
tls_channel_name = pymqi.ensure_bytes(common.CHANNEL_SSL)
cypher_spec = pymqi.ensure_bytes(common.SSL_CYPHER_SPEC)
client_dn = pymqi.ensure_bytes('CN={}'.format(common.SSL_CLIENT_LABEL))
certificate_label_qmgr = pymqi.ensure_bytes(qm_name)
attrs = [
pymqi.CFST(Parameter=pymqi.CMQCFC.MQCACH_CHANNEL_NAME, String=pymqi.ensure_bytes(tls_channel_name)),
pymqi.CFIN(Parameter=pymqi.CMQCFC.MQIACH_CHANNEL_TYPE, Value=pymqi.CMQC.MQCHT_SVRCONN),
pymqi.CFST(Parameter=pymqi.CMQCFC.MQCACH_SSL_CIPHER_SPEC, String=cypher_spec),
pymqi.CFST(Parameter=pymqi.CMQCFC.MQCACH_SSL_PEER_NAME, String=client_dn),
pymqi.CFIN(Parameter=pymqi.CMQCFC.MQIACH_SSL_CLIENT_AUTH, Value=pymqi.CMQXC.MQSCA_OPTIONAL),
pymqi.CFST(Parameter=pymqi.CMQC.MQCA_CERT_LABEL, String=certificate_label_qmgr),
pymqi.CFIN(Parameter=pymqi.CMQCFC.MQIACF_REPLACE, Value=pymqi.CMQCFC.MQRP_YES),
]
pcf.MQCMD_CREATE_CHANNEL(attrs)
attrs = [
pymqi.CFST(Parameter=pymqi.CMQCFC.MQCACH_CHANNEL_NAME, String=pymqi.ensure_bytes(tls_channel_name)),
pymqi.CFIN(Parameter=pymqi.CMQCFC.MQIACF_CHLAUTH_TYPE, Value=pymqi.CMQCFC.MQCAUT_USERMAP),
pymqi.CFIN(Parameter=pymqi.CMQCFC.MQIACF_ACTION, Value=pymqi.CMQCFC.MQACT_REPLACE),
pymqi.CFST(Parameter=pymqi.CMQCFC.MQCACH_CLIENT_USER_ID, String=pymqi.ensure_bytes(common.USERNAME)),
pymqi.CFIN(Parameter=pymqi.CMQC.MQIA_CHECK_CLIENT_BINDING, Value=pymqi.CMQCFC.MQCHK_REQUIRED_ADMIN),
pymqi.CFIN(Parameter=pymqi.CMQCFC.MQIACH_USER_SOURCE, Value=pymqi.CMQC.MQUSRC_MAP),
pymqi.CFST(Parameter=pymqi.CMQCFC.MQCACH_MCA_USER_ID, String=b'mqm'),
]
pcf.MQCMD_SET_CHLAUTH_REC(attrs)
attrs = [
pymqi.CFST(Parameter=pymqi.CMQCFC.MQCACH_CHANNEL_NAME, String=pymqi.ensure_bytes(tls_channel_name)),
pymqi.CFIN(Parameter=pymqi.CMQCFC.MQIACF_CHLAUTH_TYPE, Value=pymqi.CMQCFC.MQCAUT_BLOCKUSER),
pymqi.CFST(Parameter=pymqi.CMQCFC.MQCACH_MCA_USER_ID_LIST, String=b'nobody'),
pymqi.CFIN(Parameter=pymqi.CMQCFC.MQIACH_WARNING, Value=pymqi.CMQC.MQWARN_NO),
pymqi.CFIN(Parameter=pymqi.CMQCFC.MQIACF_ACTION, Value=pymqi.CMQCFC.MQACT_REPLACE),
]
pcf.MQCMD_SET_CHLAUTH_REC(attrs)
pcf.disconnect()
qmgr.disconnect()
@pytest.fixture(scope='session')
def dd_environment():
if common.MQ_VERSION == 9:
log_pattern = "AMQ5026I: The listener 'DEV.LISTENER.TCP' has started. ProcessId"
elif common.MQ_VERSION == 8:
log_pattern = r".*QMNAME\({}\)\s*STATUS\(Running\).*".format(common.QUEUE_MANAGER)
else:
raise RuntimeError('Invalid version: {}'.format(common.MQ_VERSION))
e2e_meta = copy.deepcopy(common.E2E_METADATA)
e2e_meta.setdefault('docker_volumes', [])
e2e_meta['docker_volumes'].append("{}:/opt/pki/keys".format(os.path.join(common.HERE, 'keys')))
conditions = [CheckDockerLogs('ibm_mq1', log_pattern)]
if not ON_WINDOWS:
conditions.append(WaitFor(prepare_queue_manager))
with docker_run(compose_file=common.COMPOSE_FILE_PATH, build=True, conditions=conditions, sleep=10, attempts=2):
yield common.INSTANCE, e2e_meta |
298,354 | test valid | from datetime import date
from django.test import TestCase
from organisations.management.commands.import_divisionsets_from_csv import (
Command,
)
from organisations.models import Organisation, OrganisationDivisionSet
class ImportDivisionSetsFromCsvTests(TestCase):
def setUp(self):
# set up test data
self.opts = {"url": "foo.bar/baz", "s3": None, "file": None}
self.org1 = Organisation.objects.create(
official_identifier="TEST1",
organisation_type="local-authority",
official_name="Test Council 1",
slug="test1",
territory_code="ENG",
election_name="Test Council 1 Local Elections",
start_date=date(2016, 10, 1),
)
self.base_record = {
"Start Date": "",
"End Date": "",
"Name": "",
"official_identifier": "",
"geography_curie": "",
"seats_total": "",
"Boundary Commission Consultation URL": "",
"Legislation URL": "",
"Short Title": "",
"Notes": "",
"Mapit Generation URI": "",
"Organisation ID": "",
"Organisation ID type": "",
}
self.org3 = Organisation.objects.create(
official_identifier="TEST3",
organisation_type="local-authority",
official_name="Test Council 3",
slug="test3",
territory_code="ENG",
election_name="Test Council 3 Local Elections",
start_date=date(2016, 10, 1),
)
self.org4 = Organisation.objects.create(
official_identifier="TEST4",
organisation_type="local-authority",
official_name="Test Council 4",
slug="test4",
territory_code="ENG",
election_name="Test Council 4 Local Elections",
start_date=date(2016, 10, 1),
)
OrganisationDivisionSet.objects.create(
organisation=self.org3,
start_date="2016-10-01",
end_date="2017-05-03",
legislation_url="",
consultation_url="",
short_title="",
notes="",
)
OrganisationDivisionSet.objects.create(
organisation=self.org4,
start_date="2016-10-01",
end_date="2018-05-02",
legislation_url="",
consultation_url="",
short_title="",
notes="",
)
records = [
self.base_record.copy(),
self.base_record.copy(),
self.base_record.copy(),
self.base_record.copy(),
]
records[0]["Name"] = "Central"
records[0]["seats_total"] = "1"
records[0]["Organisation ID"] = "TEST3"
records[1]["Name"] = "Abbey"
records[1]["seats_total"] = "2"
records[1]["Organisation ID"] = "TEST3"
records[2]["Name"] = "Castle"
records[2]["seats_total"] = "3"
records[2]["Organisation ID"] = "TEST4"
records[3]["Name"] = "Park"
records[3]["seats_total"] = "1"
records[3]["Organisation ID"] = "TEST4"
self.valid_test_data = records
def test_org_not_found_bad_code(self):
# Organisation doesn't exist
cmd = Command()
self.base_record[
"Organisation ID"
] = "XXXX" # this Org ID doesn't exist
cmd.read_from_url = lambda x: [self.base_record]
with self.assertRaises(Organisation.DoesNotExist):
cmd.handle(**self.opts)
def test_org_not_found_bad_date(self):
# Organisation code exists, but not valid for this date
cmd = Command()
self.base_record["Organisation ID"] = "TEST1"
self.base_record[
"Start Date"
] = "2016-09-01" # before TEST1 org start date
cmd.read_from_url = lambda x: [self.base_record]
with self.assertRaises(Organisation.DoesNotExist):
cmd.handle(**self.opts)
def test_divset_not_found(self):
# Organisation does exist, but has no associated DivisionSets
cmd = Command()
self.base_record["Organisation ID"] = "TEST1"
cmd.read_from_url = lambda x: [self.base_record]
with self.assertRaises(Exception):
cmd.handle(**self.opts)
def test_divset_null_end_date(self):
# Organisation does exist and has an associated DivisionSets
# but the DivisionSet has a NULL end date
OrganisationDivisionSet.objects.create(
organisation=self.org1,
start_date="2016-10-01",
end_date=None,
legislation_url="",
consultation_url="",
short_title="",
notes="",
)
cmd = Command()
self.base_record["Organisation ID"] = "TEST1"
cmd.read_from_url = lambda x: [self.base_record]
with self.assertRaises(Exception):
cmd.handle(**self.opts)
def METHOD_NAME(self):
# all data is valid - should import cleanly
cmd = Command()
cmd.read_from_url = lambda x: self.valid_test_data
cmd.get_division_type_from_registers = lambda x: "DIW"
cmd.handle(**self.opts)
# check it all imported correctly
org3divset = (
OrganisationDivisionSet.objects.all()
.filter(organisation=self.org3)
.order_by("-start_date")
)
self.assertEqual(2, len(org3divset))
self.assertEqual(
"2017-05-04", org3divset[0].start_date.strftime("%Y-%m-%d")
)
self.assertIsNone(org3divset[0].end_date)
self.assertEqual(2, len(org3divset[0].divisions.all()))
org4divset = (
OrganisationDivisionSet.objects.all()
.filter(organisation=self.org4)
.order_by("-start_date")
)
self.assertEqual(2, len(org4divset))
self.assertEqual(
"2018-05-03", org4divset[0].start_date.strftime("%Y-%m-%d")
)
self.assertIsNone(org4divset[0].end_date)
self.assertEqual(2, len(org4divset[0].divisions.all())) |
298,355 | log metrics | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from argparse import Namespace
from dataclasses import dataclass
from pathlib import Path
from typing import Any, List, Literal, Mapping, Optional, Union
import pandas as pd
from lightning_utilities.core.apply_func import apply_to_collection
from omegaconf import DictConfig, ListConfig, OmegaConf
from pytorch_lightning.callbacks import Checkpoint
from pytorch_lightning.loggers import Logger
from pytorch_lightning.utilities.parsing import AttributeDict
from torch import Tensor
from nemo.utils import logging
try:
from clearml import OutputModel, Task
HAVE_CLEARML_LOGGER = True
except (ImportError, ModuleNotFoundError):
HAVE_CLEARML_LOGGER = False
@dataclass
class ClearMLParams:
project: Optional[str] = None
task: Optional[str] = None
connect_pytorch: Optional[bool] = False
model_name: Optional[str] = None
tags: Optional[List[str]] = None
log_model: Optional[bool] = False
log_cfg: Optional[bool] = False
METHOD_NAME: Optional[bool] = False
class ClearMLLogger(Logger):
@property
def name(self) -> str:
return self.clearml_task.name
@property
def version(self) -> str:
return self.clearml_task.id
def __init__(
self, clearml_cfg: DictConfig, log_dir: str, prefix: str, save_best_model: bool, postfix: str = ".nemo"
) -> None:
if not HAVE_CLEARML_LOGGER:
raise ImportError(
"Found create_clearml_logger is True."
"But ClearML not found. Please see the README for installation instructions:"
"https://github.com/allegroai/clearml"
)
self.clearml_task = None
self.clearml_model = None
self.clearml_cfg = clearml_cfg
self.path_nemo_model = os.path.abspath(
os.path.expanduser(os.path.join(log_dir, "checkpoints", prefix + postfix))
)
self.save_best_model = save_best_model
self.prefix = prefix
self.previos_best_model_path = None
self.last_metrics = None
self.save_blocked = True
self.project_name = os.getenv("CLEARML_PROJECT", clearml_cfg.project if clearml_cfg.project else "NeMo")
self.task_name = os.getenv("CLEARML_TASK", clearml_cfg.task if clearml_cfg.task else f"Trainer {self.prefix}")
tags = ["NeMo"]
if clearml_cfg.tags:
tags.extend(clearml_cfg.tags)
self.clearml_task: Task = Task.init(
project_name=self.project_name,
task_name=self.task_name,
auto_connect_frameworks={"pytorch": clearml_cfg.connect_pytorch},
output_uri=True,
tags=tags,
)
if clearml_cfg.model_name:
model_name = clearml_cfg.model_name
elif self.prefix:
model_name = self.prefix
else:
model_name = self.task_name
if clearml_cfg.log_model:
self.clearml_model: OutputModel = OutputModel(
name=model_name, task=self.clearml_task, tags=tags, framework="NeMo"
)
def log_hyperparams(self, params, *args, **kwargs) -> None:
if self.clearml_model and self.clearml_cfg.log_cfg:
if isinstance(params, Namespace):
params = vars(params)
elif isinstance(params, AttributeDict):
params = dict(params)
params = apply_to_collection(params, (DictConfig, ListConfig), OmegaConf.to_container, resolve=True)
params = apply_to_collection(params, Path, str)
params = OmegaConf.to_yaml(params)
self.clearml_model.update_design(config_text=params)
def METHOD_NAME(self, metrics: Mapping[str, float], step: Optional[int] = None) -> None:
if self.clearml_model and self.clearml_cfg.METHOD_NAME:
metrics = {
k: {
"value": str(v.item() if type(v) == Tensor else v),
"type": str(type(v.item() if type(v) == Tensor else v)),
}
for k, v in metrics.items()
}
self.last_metrics = metrics
def log_table(
self,
key: str,
columns: List[str] = None,
data: List[List[Any]] = None,
dataframe: Any = None,
step: Optional[int] = None,
) -> None:
table: Optional[Union[pd.DataFrame, List[List[Any]]]] = None
if dataframe is not None:
table = dataframe
if columns is not None:
table.columns = columns
if data is not None:
table = data
assert len(columns) == len(table[0]), "number of column names should match the total number of columns"
table.insert(0, columns)
if table is not None:
self.clearml_task.logger.report_table(title=key, series=key, iteration=step, table_plot=table)
def after_save_checkpoint(self, checkpoint_callback: Checkpoint) -> None:
if self.clearml_model:
if self.save_best_model:
if self.save_blocked:
self.save_blocked = False
return None
if not os.path.exists(checkpoint_callback.best_model_path):
return None
if self.previos_best_model_path == checkpoint_callback.best_model_path:
return None
self.previos_best_model_path = checkpoint_callback.best_model_path
self._log_model(self.path_nemo_model)
def finalize(self, status: Literal["success", "failed", "aborted"] = "success") -> None:
if status == "success":
self.clearml_task.mark_completed()
elif status == "failed":
self.clearml_task.mark_failed()
elif status == "aborted":
self.clearml_task.mark_stopped()
def _log_model(self, save_path: str) -> None:
if self.clearml_model:
if os.path.exists(save_path):
self.clearml_model.update_weights(
weights_filename=save_path,
upload_uri=self.clearml_task.storage_uri or self.clearml_task._get_default_report_storage_uri(),
auto_delete_file=False,
is_package=True,
)
if self.clearml_cfg.METHOD_NAME and self.last_metrics:
self.clearml_model.set_all_metadata(self.last_metrics)
self.save_blocked = True
else:
logging.warning((f"Logging model enabled, but cant find .nemo file!" f" Path: {save_path}")) |
298,356 | setup image list | import wx
import sys
import time
import threading
from os import path, getenv
# if PAPARAZZI_HOME not set, then assume the tree containing this
# file is a reasonable substitute
PPRZ_HOME = getenv("PAPARAZZI_HOME", path.normpath(path.join(path.dirname(path.abspath(__file__)), '../../../../')))
sys.path.append(PPRZ_HOME + "/var/lib/python") # pprzlink
from pprzlink.ivy import IvyMessagesInterface
from pprzlink.message import PprzMessage
WIDTH = 450
LABEL_WIDTH = 166
DATA_WIDTH = 100
HEIGHT = 800
BORDER = 1
class Message(PprzMessage):
def __init__(self, class_name, name):
super(Message, self).__init__(class_name, name)
self.field_controls = []
self.index = None
self.last_seen = time.time()
class Aircraft(object):
def __init__(self, ac_id):
self.ac_id = ac_id
self.messages = {}
self.messages_book = None
class MessagesFrame(wx.Frame):
def message_recv(self, ac_id, msg):
"""Handle incoming messages
Callback function for IvyMessagesInterface
:param ac_id: aircraft id
:type ac_id: int
:param msg: message
:type msg: PprzMessage
"""
# only show messages of the requested class
if msg.msg_class != self.msg_class:
return
if ac_id in self.aircrafts and msg.name in self.aircrafts[ac_id].messages:
if time.time() - self.aircrafts[ac_id].messages[msg.name].last_seen < 0.2:
return
wx.CallAfter(self.gui_update, ac_id, msg)
def find_page(self, book, name):
if book.GetPageCount() < 1:
return 0
start = 0
end = book.GetPageCount()
while start < end:
if book.GetPageText(start) >= name:
return start
start += 1
return start
def update_leds(self):
wx.CallAfter(self.update_leds_real)
def update_leds_real(self):
for ac_id in self.aircrafts:
aircraft = self.aircrafts[ac_id]
for msg_str in aircraft.messages:
message = aircraft.messages[msg_str]
if message.last_seen + 0.2 < time.time():
aircraft.messages_book.SetPageImage(message.index, 0)
self.timer = threading.Timer(0.1, self.update_leds)
self.timer.start()
def METHOD_NAME(self, notebook):
imageList = wx.ImageList(24, 24)
image = wx.Image(PPRZ_HOME + "/data/pictures/gray_led24.png")
bitmap = wx.BitmapFromImage(image)
imageList.Add(bitmap)
image = wx.Image(PPRZ_HOME + "/data/pictures/green_led24.png")
bitmap = wx.BitmapFromImage(image)
imageList.Add(bitmap)
notebook.AssignImageList(imageList)
def add_new_aircraft(self, ac_id):
self.aircrafts[ac_id] = Aircraft(ac_id)
ac_panel = wx.Panel(self.notebook, -1)
self.notebook.AddPage(ac_panel, str(ac_id))
messages_book = wx.Notebook(ac_panel, style=wx.NB_LEFT)
self.METHOD_NAME(messages_book)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(messages_book, 1, wx.EXPAND)
ac_panel.SetSizer(sizer)
sizer.Layout()
self.aircrafts[ac_id].messages_book = messages_book
def add_new_message(self, aircraft, msg_class, name):
messages_book = aircraft.messages_book
aircraft.messages[name] = Message(msg_class, name)
field_panel = wx.Panel(messages_book)
grid_sizer = wx.FlexGridSizer(len(aircraft.messages[name].fieldnames), 2, 5, 0)
index = self.find_page(messages_book, name)
messages_book.InsertPage(index, field_panel, name, imageId=1)
aircraft.messages[name].index = index
# update indexes of pages which are to be moved
for message_name in aircraft.messages:
aircraft.messages[message_name].index = self.find_page(messages_book, message_name)
for field_name in aircraft.messages[name].fieldnames:
name_text = wx.StaticText(field_panel, -1, field_name)
size = name_text.GetSize()
size.x = LABEL_WIDTH
name_text.SetMinSize(size)
grid_sizer.Add(name_text, 1, wx.ALL, BORDER)
value_control = wx.StaticText(field_panel, -1, "42", style=wx.EXPAND)
size = value_control.GetSize()
size.x = LABEL_WIDTH
value_control.SetMinSize(size)
grid_sizer.Add(value_control, 1, wx.ALL | wx.EXPAND, BORDER)
if wx.MAJOR_VERSION > 2:
if grid_sizer.IsColGrowable(1):
grid_sizer.AddGrowableCol(1)
else:
grid_sizer.AddGrowableCol(1)
aircraft.messages[name].field_controls.append(value_control)
field_panel.SetAutoLayout(True)
field_panel.SetSizer(grid_sizer)
field_panel.Layout()
def gui_update(self, ac_id, msg):
if ac_id not in self.aircrafts:
self.add_new_aircraft(ac_id)
aircraft = self.aircrafts[ac_id]
if msg.name not in aircraft.messages:
self.add_new_message(aircraft, msg.msg_class, msg.name)
aircraft.messages_book.SetPageImage(aircraft.messages[msg.name].index, 1)
self.aircrafts[ac_id].messages[msg.name].last_seen = time.time()
for index in range(0, len(msg.fieldvalues)):
aircraft.messages[msg.name].field_controls[index].SetLabel(str(msg.get_field(index)))
def __init__(self, msg_class="telemetry"):
wx.Frame.__init__(self, id=-1, parent=None, name=u'MessagesFrame', size=wx.Size(WIDTH, HEIGHT), style=wx.DEFAULT_FRAME_STYLE, title=u'Messages')
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.notebook = wx.Notebook(self)
self.aircrafts = {}
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(self.notebook, 1, wx.EXPAND)
self.SetSizer(sizer)
sizer.Layout()
self.timer = threading.Timer(0.1, self.update_leds)
self.timer.start()
self.msg_class = msg_class
self.interface = IvyMessagesInterface("Paparazzi Messages Viewer")
self.interface.subscribe(self.message_recv)
def OnClose(self, event):
self.timer.cancel()
self.interface.shutdown()
self.Destroy() |
298,357 | get complex type | import clang.cindex
import csv
import enum
import os
import sys
from clang.cindex import CursorKind, LinkageKind, StorageClass, TypeKind
from collections import Counter
try:
from tqdm import tqdm
except ImportError:
def tqdm(it, *args, **kwargs):
return it
def traverse_namespaced(root, filter_files=None, skip_namespaces=1, namespace=()):
if root.location.file is not None and root.location.file.name not in filter_files:
return
yield namespace, root
if root.displayname != "":
if skip_namespaces > 0:
skip_namespaces -= 1
else:
namespace += (root.spelling,)
for node in root.get_children():
yield from traverse_namespaced(node, filter_files, skip_namespaces, namespace)
INTERESTING_NODE_KINDS = {
CursorKind.CLASS_DECL: "class",
CursorKind.CLASS_TEMPLATE: "class",
CursorKind.ENUM_DECL: "enum",
CursorKind.ENUM_CONSTANT_DECL: "enum_constant",
CursorKind.FIELD_DECL: "variable",
CursorKind.PARM_DECL: "variable",
CursorKind.STRUCT_DECL: "struct",
CursorKind.UNION_DECL: "union",
CursorKind.VAR_DECL: "variable",
CursorKind.FUNCTION_DECL: "function",
}
def is_array_type(type):
return type.kind in (TypeKind.CONSTANTARRAY, TypeKind.DEPENDENTSIZEDARRAY, TypeKind.INCOMPLETEARRAY)
def METHOD_NAME(type):
if type.spelling in ("IOHANDLE", "LOCK"):
return ""
if type.kind == TypeKind.AUTO:
return METHOD_NAME(type.get_canonical())
if type.kind == TypeKind.LVALUEREFERENCE:
return METHOD_NAME(type.get_pointee())
if type.kind == TypeKind.POINTER:
return "p" + METHOD_NAME(type.get_pointee())
if is_array_type(type):
return "a" + METHOD_NAME(type.element_type)
if type.kind == TypeKind.FUNCTIONPROTO:
return "fn"
if type.kind == TypeKind.TYPEDEF:
return METHOD_NAME(type.get_declaration().underlying_typedef_type)
if type.kind == TypeKind.ELABORATED:
return METHOD_NAME(type.get_named_type())
if type.kind in (TypeKind.UNEXPOSED, TypeKind.RECORD):
if type.get_declaration().spelling in "shared_ptr unique_ptr".split():
return "p" + METHOD_NAME(type.get_template_argument_type(0))
if type.get_declaration().spelling in "array sorted_array".split():
return "a" + METHOD_NAME(type.get_template_argument_type(0))
return ""
def is_static_member_definition_hack(node):
last_colons = False
for t in node.get_tokens():
t = t.spelling
if t == "::":
last_colons = True
elif last_colons:
if t.startswith("ms_"):
return True
last_colons = False
if t == "=":
return False
return False
def is_const(type):
if type.is_const_qualified():
return True
if is_array_type(type):
return is_const(type.element_type)
return False
class ParseError(RuntimeError):
pass
def process_source_file(out, file, extra_args, break_on):
args = extra_args + ["-Isrc"]
if file.endswith(".c"):
header = "{}.h".format(file[:-2])
elif file.endswith(".cpp"):
header = "{}.h".format(file[:-4])
else:
raise ValueError("unrecognized source file: {}".format(file))
index = clang.cindex.Index.create()
unit = index.parse(file, args=args)
errors = list(unit.diagnostics)
if errors:
for error in errors:
print("{}: {}".format(file, error.format()), file=sys.stderr)
print(args, file=sys.stderr)
raise ParseError("failed parsing {}".format(file))
filter_files = frozenset([file, header])
for namespace, node in traverse_namespaced(unit.cursor, filter_files=filter_files):
cur_file = None
if node.location.file is not None:
cur_file = node.location.file.name
if cur_file is None or (cur_file != file and cur_file != header):
continue
if node.kind in INTERESTING_NODE_KINDS and node.spelling:
type = METHOD_NAME(node.type)
qualifiers = ""
if INTERESTING_NODE_KINDS[node.kind] in {"variable", "function"}:
is_member = node.semantic_parent.kind in {CursorKind.CLASS_DECL, CursorKind.CLASS_TEMPLATE, CursorKind.STRUCT_DECL, CursorKind.UNION_DECL}
is_static = node.storage_class == StorageClass.STATIC or is_static_member_definition_hack(node)
if is_static:
qualifiers = "s" + qualifiers
if is_member:
qualifiers = "m" + qualifiers
if is_static and not is_member and is_const(node.type):
qualifiers = "c" + qualifiers
if node.linkage == LinkageKind.EXTERNAL and not is_member:
qualifiers = "g" + qualifiers
out.writerow({
"file": cur_file,
"line": node.location.line,
"column": node.location.column,
"kind": INTERESTING_NODE_KINDS[node.kind],
"path": "::".join(namespace),
"qualifiers": qualifiers,
"type": type,
"name": node.spelling,
})
if node.spelling == break_on:
breakpoint()
def main():
import argparse
p = argparse.ArgumentParser(description="Extracts identifier data from a Teeworlds source file and its header, outputting the data as CSV to stdout")
p.add_argument("file", metavar="FILE", nargs="+", help="Source file to analyze")
p.add_argument("--break-on", help="Break on a specific variable name, useful to debug issues with the script")
args = p.parse_args()
extra_args = []
if "CXXFLAGS" in os.environ:
extra_args = os.environ["CXXFLAGS"].split()
out = csv.DictWriter(sys.stdout, "file line column kind path qualifiers type name".split())
out.writeheader()
files = args.file
if len(files) > 1:
files = tqdm(files, leave=False)
error = False
for file in files:
try:
process_source_file(out, file, extra_args, args.break_on)
except ParseError:
error = True
return int(error)
if __name__ == "__main__":
sys.exit(main()) |
298,358 | emit | from __future__ import annotations
import logging
from logging import Logger
from typing import Any
import gymnasium.spaces
class EnvLogger:
"""Used for logging warnings and errors for environments."""
mqueue: list[Any] = []
_output: bool = True
@staticmethod
def get_logger() -> Logger:
"""Returns the logger object."""
logger = logging.getLogger(__name__)
return logger
@staticmethod
def _generic_warning(msg: Any) -> None:
"""Add a custom generic warning to the EnvLogger."""
logger = EnvLogger.get_logger()
if not logger.hasHandlers():
handler = EnvWarningHandler(mqueue=EnvLogger.mqueue)
logger.addHandler(handler)
logger.warning(msg)
# needed to get the pytest runner to work correctly, and doesn't seem to have serious issues
EnvLogger.mqueue.append(msg)
@staticmethod
def flush() -> None:
"""Flushes EnvLogger output."""
EnvLogger.mqueue.clear()
@staticmethod
def suppress_output() -> None:
"""Suppresses EnvLogger output."""
EnvLogger._output = False
@staticmethod
def unsuppress_output() -> None:
"""Resets EnvLogger output to be printed."""
EnvLogger._output = True
@staticmethod
def error_possible_agents_attribute_missing(name: str) -> None:
"""Warns: ``[ERROR]: This environment does not support {attribute}.``."""
raise AttributeError(
f"[ERROR]: This environment does not support {name}. This means that either the environment has procedurally generated agents such that this property cannot be well defined (which requires special learning code to handle) or the environment was improperly configured by the developer."
)
@staticmethod
def warn_action_out_of_bound(
action: Any, action_space: gymnasium.spaces.Space, backup_policy: str
) -> None:
"""Warns: ``[WARNING]: Received an action {action} that was outside action space {action_space}.``."""
EnvLogger._generic_warning(
f"[WARNING]: Received an action {action} that was outside action space {action_space}. Environment is {backup_policy}"
)
@staticmethod
def warn_close_unrendered_env() -> None:
"""Warns: ``[WARNING]: Called close on an unrendered environment.``."""
EnvLogger._generic_warning(
"[WARNING]: Called close on an unrendered environment."
)
@staticmethod
def warn_close_before_reset() -> None:
"""Warns: ``[WARNING]: reset() needs to be called before close.``."""
EnvLogger._generic_warning(
"[WARNING]: reset() needs to be called before close."
)
@staticmethod
def warn_on_illegal_move() -> None:
"""Warns: ``[WARNING]: Illegal move made, game terminating with current player losing.``."""
EnvLogger._generic_warning(
"[WARNING]: Illegal move made, game terminating with current player losing. \nobs['action_mask'] contains a mask of all legal moves that can be chosen."
)
@staticmethod
def error_observe_before_reset() -> None:
"""Error: ``reset() needs to be called before observe.``."""
assert False, "reset() needs to be called before observe."
@staticmethod
def error_step_before_reset() -> None:
"""Error: ``reset() needs to be called before step.``."""
assert False, "reset() needs to be called before step."
@staticmethod
def warn_step_after_terminated_truncated() -> None:
"""Warns: ``[WARNING]: step() called after all agents are terminated or truncated. Should reset() first.``."""
EnvLogger._generic_warning(
"[WARNING]: step() called after all agents are terminated or truncated. Should reset() first."
)
@staticmethod
def error_render_before_reset() -> None:
"""Error: ``reset() needs to be called before render.``."""
assert False, "reset() needs to be called before render."
@staticmethod
def error_agent_iter_before_reset() -> None:
"""Error: ``reset() needs to be called before agent_iter().``."""
assert False, "reset() needs to be called before agent_iter()."
@staticmethod
def error_nan_action() -> None:
"""Error: ``step() cannot take in a nan action.``."""
assert False, "step() cannot take in a nan action."
@staticmethod
def error_state_before_reset() -> None:
"""Error: ``reset() needs to be called before state.``."""
assert False, "reset() needs to be called before state."
class EnvWarningHandler(logging.Handler):
def __init__(self, *args, mqueue, **kwargs):
logging.Handler.__init__(self, *args, **kwargs)
self.mqueue = mqueue
def METHOD_NAME(self, record: logging.LogRecord):
m = self.format(record).rstrip("\n")
self.mqueue.append(m)
if EnvLogger._output:
print(m) |
298,359 | get build makedepends | """Licensed under GPLv3, see https://www.gnu.org/licenses/"""
import shutil
from pathlib import Path
from .config import BuildCachePath, CacheRoot
from .core import chown_to_current, open_file, spawn
from .exceptions import SysExit
from .i18n import translate
from .makepkg_config import MakePkgCommand, MakepkgConfig
from .pprint import print_error, print_stderr
from .privilege import isolate_root_cmd, using_dynamic_users
from .version import VersionMatcher
class SrcInfo:
_common_lines: list[str]
_package_lines: list[str]
path: Path
repo_path: Path
pkgbuild_path: Path
package_name: str | None
pkgnames: list[str]
def load_config(self) -> None:
self.pkgnames = []
self._common_lines = []
self._package_lines = []
if not self.path.exists():
return
destination = self._common_lines
with open_file(self.path) as srcinfo_file:
for line in srcinfo_file.readlines():
if line.startswith("pkgname ="):
pkgname = line.split("=")[1].strip()
self.pkgnames.append(pkgname)
destination = self._package_lines if pkgname == self.package_name else []
else:
destination.append(line)
def __init__(
self,
repo_path: str | Path | None = None,
package_name: str | None = None,
pkgbuild_path: str | Path | None = None,
) -> None:
if repo_path:
self.repo_path = Path(repo_path)
self.pkgbuild_path = self.repo_path / "PKGBUILD"
elif pkgbuild_path:
self.pkgbuild_path = Path(pkgbuild_path)
self.repo_path = self.pkgbuild_path.parent
else:
missing_property_error = translate(
"Either `{prop1}` or `{prop2}` should be set",
).format(
prop1="repo_path",
prop2="pkgbuild_path",
)
raise NotImplementedError(missing_property_error)
self.path = self.repo_path / ".SRCINFO"
self.package_name = package_name
self.load_config()
def get_values(self, field: str, lines: list[str] | None = None) -> list[str]:
prefix = field + " = "
values = []
if lines is None:
lines = self._common_lines + self._package_lines
for line in lines:
if line.strip().startswith(prefix):
values.append(line.strip().split(prefix)[1])
return values
def get_value(self, field: str, fallback: str | None = None) -> str | None:
values = self.get_values(field)
value = values[0] if values else None
if value is None:
return fallback
return value
def get_install_script(self) -> str | None:
values = self.get_values("install")
if values:
return values[0]
return None
def _get_depends(self, field: str, lines: list[str] | None = None) -> dict[str, VersionMatcher]:
if lines is None:
lines = self._common_lines + self._package_lines
carch = MakepkgConfig.get("CARCH")
dependencies: dict[str, VersionMatcher] = {}
for dep_line in (
self.get_values(field, lines=lines) +
self.get_values(f"{field}_{carch}", lines=lines)
):
version_matcher = VersionMatcher(dep_line, is_pkg_deps=True)
pkg_name = version_matcher.pkg_name
if pkg_name not in dependencies:
dependencies[pkg_name] = version_matcher
else:
dependencies[pkg_name].add_version_matcher(version_matcher)
return dependencies
def _get_build_depends(self, field: str) -> dict[str, VersionMatcher]:
return self._get_depends(field=field, lines=self._common_lines)
def get_depends(self) -> dict[str, VersionMatcher]:
return self._get_depends("depends")
def get_build_depends(self) -> dict[str, VersionMatcher]:
return self._get_build_depends("depends")
def METHOD_NAME(self) -> dict[str, VersionMatcher]:
return self._get_build_depends("makedepends")
def get_build_checkdepends(self) -> dict[str, VersionMatcher]:
return self._get_build_depends("checkdepends")
def get_version(self) -> str:
epoch = self.get_value("epoch")
epoch_display = (epoch + ":") if epoch else ""
version = self.get_value("pkgver")
release = self.get_value("pkgrel")
return f"{epoch_display}{version}-{release}"
def regenerate(self) -> None:
working_directory = self.repo_path
if using_dynamic_users() and not str(self.repo_path).startswith(str(CacheRoot()())):
working_directory = BuildCachePath()() / (
"_info_" + (self.get_value("pkgbase") or "unknown")
)
if not working_directory.exists():
working_directory.mkdir()
shutil.copy(self.pkgbuild_path, working_directory)
result = spawn(
isolate_root_cmd(
[
*MakePkgCommand.get(),
"--printsrcinfo",
"-p", self.pkgbuild_path.name,
],
cwd=working_directory,
),
cwd=working_directory,
)
if result.returncode != 0 or not result.stdout_text:
print_error(
translate("failed to generate .SRCINFO from {}:").format(self.pkgbuild_path),
)
print_stderr(result.stderr_text)
raise SysExit(5)
with open_file(self.path, "w") as srcinfo_file:
srcinfo_file.write(result.stdout_text)
chown_to_current(self.path)
self.load_config() |
298,360 | startup | import json
from typing import Any, Dict, Optional, Set, Type, Union, cast
import databases
import pytest
import sqlalchemy
from asgi_lifespan import LifespanManager
from fastapi import FastAPI
from httpx import AsyncClient
import ormar
from ormar.queryset.utils import translate_list_to_dict
from tests.settings import DATABASE_URL
app = FastAPI()
metadata = sqlalchemy.MetaData()
database = databases.Database(DATABASE_URL, force_rollback=True)
app.state.database = database
headers = {"content-type": "application/json"}
@app.on_event("startup")
async def METHOD_NAME() -> None:
database_ = app.state.database
if not database_.is_connected:
await database_.connect()
@app.on_event("shutdown")
async def shutdown() -> None:
database_ = app.state.database
if database_.is_connected:
await database_.disconnect()
class Department(ormar.Model):
class Meta:
database = database
metadata = metadata
id: int = ormar.Integer(primary_key=True)
department_name: str = ormar.String(max_length=100)
class Course(ormar.Model):
class Meta:
database = database
metadata = metadata
id: int = ormar.Integer(primary_key=True)
course_name: str = ormar.String(max_length=100)
completed: bool = ormar.Boolean()
department: Optional[Department] = ormar.ForeignKey(Department)
class Student(ormar.Model):
class Meta:
database = database
metadata = metadata
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100)
courses = ormar.ManyToMany(Course)
# create db and tables
@pytest.fixture(autouse=True, scope="module")
def create_test_database():
engine = sqlalchemy.create_engine(DATABASE_URL)
metadata.create_all(engine)
yield
metadata.drop_all(engine)
to_exclude = {
"id": ...,
"courses": {
"__all__": {"id": ..., "students": {"__all__": {"id", "studentcourse"}}}
},
}
exclude_all = {"id": ..., "courses": {"__all__"}}
to_exclude_ormar = {
"id": ...,
"courses": {"id": ..., "students": {"id", "studentcourse"}},
}
def auto_exclude_id_field(to_exclude: Any) -> Union[Dict, Set]:
if isinstance(to_exclude, dict):
for key in to_exclude.keys():
to_exclude[key] = auto_exclude_id_field(to_exclude[key])
to_exclude["id"] = Ellipsis
return to_exclude
else:
return {"id"}
def generate_exclude_for_ids(model: Type[ormar.Model]) -> Dict:
to_exclude_base = translate_list_to_dict(model._iterate_related_models())
return cast(Dict, auto_exclude_id_field(to_exclude=to_exclude_base))
to_exclude_auto = generate_exclude_for_ids(model=Department)
@app.post("/departments/", response_model=Department)
async def create_department(department: Department):
await department.save_related(follow=True, save_all=True)
return department
@app.get("/departments/{department_name}")
async def get_department(department_name: str):
department = await Department.objects.select_all(follow=True).get(
department_name=department_name
)
return department.dict(exclude=to_exclude)
@app.get("/departments/{department_name}/second")
async def get_department_exclude(department_name: str):
department = await Department.objects.select_all(follow=True).get(
department_name=department_name
)
return department.dict(exclude=to_exclude_ormar)
@app.get("/departments/{department_name}/exclude")
async def get_department_exclude_all(department_name: str):
department = await Department.objects.select_all(follow=True).get(
department_name=department_name
)
return department.dict(exclude=exclude_all)
@pytest.mark.asyncio
async def test_saving_related_in_fastapi():
client = AsyncClient(app=app, base_url="http://testserver")
async with client as client, LifespanManager(app):
payload = {
"department_name": "Ormar",
"courses": [
{
"course_name": "basic1",
"completed": True,
"students": [{"name": "Jack"}, {"name": "Abi"}],
},
{
"course_name": "basic2",
"completed": True,
"students": [{"name": "Kate"}, {"name": "Miranda"}],
},
],
}
response = await client.post("/departments/", json=payload, headers=headers)
department = Department(**response.json())
assert department.id is not None
assert len(department.courses) == 2
assert department.department_name == "Ormar"
assert department.courses[0].course_name == "basic1"
assert department.courses[0].completed
assert department.courses[1].course_name == "basic2"
assert department.courses[1].completed
response = await client.get("/departments/Ormar")
response2 = await client.get("/departments/Ormar/second")
assert response.json() == response2.json() == payload
response3 = await client.get("/departments/Ormar/exclude")
assert response3.json() == {"department_name": "Ormar"} |
298,361 | do master | """ JobCommand
The JobCommand class is a command class to know about present jobs efficiency
"""
from DIRAC import S_OK, S_ERROR
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getSites
from DIRAC.ResourceStatusSystem.Command.Command import Command
from DIRAC.ResourceStatusSystem.Client.ResourceManagementClient import ResourceManagementClient
from DIRAC.WorkloadManagementSystem.Client.WMSAdministratorClient import WMSAdministratorClient
class JobCommand(Command):
"""
Job "master" Command.
"""
def __init__(self, args=None, clients=None):
super().__init__(args, clients)
if "WMSAdministrator" in self.apis:
self.wmsAdmin = self.apis["WMSAdministrator"]
else:
self.wmsAdmin = WMSAdministratorClient()
if "ResourceManagementClient" in self.apis:
self.rmClient = self.apis["ResourceManagementClient"]
else:
self.rmClient = ResourceManagementClient()
def _storeCommand(self, result):
"""
Stores the results of doNew method on the database.
"""
for jobDict in result:
resQuery = self.rmClient.addOrModifyJobCache(
jobDict["Site"], jobDict["MaskStatus"], jobDict["Efficiency"], jobDict["Status"]
)
if not resQuery["OK"]:
return resQuery
return S_OK()
def _prepareCommand(self):
"""
JobCommand requires one arguments:
- name : <str>
"""
if "name" not in self.args:
return S_ERROR('"name" not found in self.args')
name = self.args["name"]
return S_OK(name)
def doNew(self, masterParams=None):
"""
Gets the parameters to run, either from the master method or from its
own arguments.
It contacts the WMSAdministrator with a list of site names, or a single
site.
If there are jobs, are recorded and then returned.
"""
if masterParams is not None:
name = masterParams
else:
params = self._prepareCommand()
if not params["OK"]:
return params
name = params["Value"]
# selectDict, sortList, startItem, maxItems
# Returns statistics of Last day !
results = self.wmsAdmin.getSiteSummaryWeb({"Site": name}, [], 0, 0)
if not results["OK"]:
return results
results = results["Value"]
if "ParameterNames" not in results:
return S_ERROR('Wrong result dictionary, missing "ParameterNames"')
params = results["ParameterNames"]
if "Records" not in results:
return S_ERROR('Wrong formed result dictionary, missing "Records"')
records = results["Records"]
uniformResult = []
for record in records:
# This returns a dictionary with the following keys
# 'Site', 'GridType', 'Country', 'Tier', 'MaskStatus', 'Received',
# 'Checking', 'Staging', 'Waiting', 'Matched', 'Running', 'Stalled',
# 'Done', 'Completed', 'Failed', 'Efficiency', 'Status'
jobDict = dict(zip(params, record))
# We cast efficiency to a float
jobDict["Efficiency"] = float(jobDict["Efficiency"])
uniformResult.append(jobDict)
storeRes = self._storeCommand(uniformResult)
if not storeRes["OK"]:
return storeRes
return S_OK(uniformResult)
def doCache(self):
"""
Method that reads the cache table and tries to read from it. It will
return a list of dictionaries if there are results.
"""
params = self._prepareCommand()
if not params["OK"]:
return params
name = params["Value"]
result = self.rmClient.selectJobCache(name)
if result["OK"]:
result = S_OK([dict(zip(result["Columns"], res)) for res in result["Value"]])
return result
def METHOD_NAME(self):
"""
Master method.
Gets all sites and calls doNew method.
"""
siteNames = getSites()
if not siteNames["OK"]:
return siteNames
siteNames = siteNames["Value"]
jobsResults = self.doNew(siteNames)
if not jobsResults["OK"]:
self.metrics["failed"].append(jobsResults["Message"])
return S_OK(self.metrics)
class JobsWMSCommand(Command):
def __init__(self, args=None, clients=None):
super().__init__(args, clients)
if "WMSAdministrator" in self.apis:
self.wmsAdmin = self.apis["WMSAdministrator"]
else:
self.wmsAdmin = WMSAdministratorClient()
def doCommand(self):
"""
Returns simple jobs efficiency
:param args:
- args[0]: string: should be a ValidElement
- args[1]: string should be the name of the ValidElement
:returns: { 'Result': 'Good'|'Fair'|'Poor'|'Idle'|'Bad' }
"""
if "siteName" not in self.args:
return self.returnERROR(S_ERROR("siteName is missing"))
siteName = self.args["siteName"]
# If siteName is None, we take all sites
if siteName is None:
siteName = getSites()
if not siteName["OK"]:
return self.returnERROR(siteName)
siteName = siteName["Value"]
results = self.wmsAdmin.getSiteSummaryWeb({"Site": siteName}, [], 0, 500)
if not results["OK"]:
return self.returnERROR(results)
results = results["Value"]
if "ParameterNames" not in results:
return self.returnERROR(S_ERROR("Malformed result dictionary"))
params = results["ParameterNames"]
if "Records" not in results:
return self.returnERROR(S_ERROR("Malformed result dictionary"))
records = results["Records"]
jobResults = []
for record in records:
jobDict = dict(zip(params, record))
try:
jobDict["Efficiency"] = float(jobDict["Efficiency"])
except KeyError as e:
return self.returnERROR(S_ERROR(e))
except ValueError as e:
return self.returnERROR(S_ERROR(e))
jobResults.append(jobDict)
return S_OK(jobResults) |
298,362 | get chunk layer | # pylint: disable=invalid-name, missing-docstring
from typing import List
from typing import Union
from typing import Optional
from typing import Sequence
from typing import Iterable
import numpy as np
def get_chunks_boundary(voxel_boundary, chunk_size) -> np.ndarray:
"""returns number of chunks in each dimension"""
return np.ceil((voxel_boundary / chunk_size)).astype(int)
def normalize_bounding_box(
meta,
bounding_box: Optional[Sequence[Sequence[int]]],
bbox_is_coordinate: bool,
) -> Union[Sequence[Sequence[int]], None]:
if bounding_box is None:
return None
bbox = bounding_box.copy()
if bbox_is_coordinate:
bbox[0] = _get_chunk_coordinates_from_vol_coordinates(
meta,
bbox[0][0],
bbox[0][1],
bbox[0][2],
resolution=meta.resolution,
ceil=False,
)
bbox[1] = _get_chunk_coordinates_from_vol_coordinates(
meta,
bbox[1][0],
bbox[1][1],
bbox[1][2],
resolution=meta.resolution,
ceil=True,
)
return np.array(bbox, dtype=int)
def METHOD_NAME(meta, node_or_chunk_id: np.uint64) -> int:
""" Extract Layer from Node ID or Chunk ID """
return int(int(node_or_chunk_id) >> 64 - meta.graph_config.LAYER_ID_BITS)
def get_chunk_layers(meta, node_or_chunk_ids: Sequence[np.uint64]) -> np.ndarray:
"""Extract Layers from Node IDs or Chunk IDs
:param node_or_chunk_ids: np.ndarray
:return: np.ndarray
"""
if len(node_or_chunk_ids) == 0:
return np.array([], dtype=int)
layers = np.array(node_or_chunk_ids, dtype=int)
layers1 = layers >> (64 - meta.graph_config.LAYER_ID_BITS)
# layers2 = np.vectorize(get_chunk_layer)(meta, node_or_chunk_ids)
# assert np.all(layers1 == layers2)
return layers1
def get_chunk_coordinates(meta, node_or_chunk_id: np.uint64) -> np.ndarray:
"""Extract X, Y and Z coordinate from Node ID or Chunk ID
:param node_or_chunk_id: np.uint64
:return: Tuple(int, int, int)
"""
layer = METHOD_NAME(meta, node_or_chunk_id)
bits_per_dim = meta.bitmasks[layer]
x_offset = 64 - meta.graph_config.LAYER_ID_BITS - bits_per_dim
y_offset = x_offset - bits_per_dim
z_offset = y_offset - bits_per_dim
x = int(node_or_chunk_id) >> x_offset & 2 ** bits_per_dim - 1
y = int(node_or_chunk_id) >> y_offset & 2 ** bits_per_dim - 1
z = int(node_or_chunk_id) >> z_offset & 2 ** bits_per_dim - 1
return np.array([x, y, z])
def get_chunk_coordinates_multiple(meta, ids: np.ndarray) -> np.ndarray:
"""
Array version of get_chunk_coordinates.
Assumes all given IDs are in same layer.
"""
if not len(ids):
return np.array([])
layer = METHOD_NAME(meta, ids[0])
bits_per_dim = meta.bitmasks[layer]
x_offset = 64 - meta.graph_config.LAYER_ID_BITS - bits_per_dim
y_offset = x_offset - bits_per_dim
z_offset = y_offset - bits_per_dim
ids = np.array(ids, dtype=int)
X = ids >> x_offset & 2 ** bits_per_dim - 1
Y = ids >> y_offset & 2 ** bits_per_dim - 1
Z = ids >> z_offset & 2 ** bits_per_dim - 1
return np.column_stack((X, Y, Z))
def get_chunk_id(
meta,
node_id: Optional[np.uint64] = None,
layer: Optional[int] = None,
x: Optional[int] = None,
y: Optional[int] = None,
z: Optional[int] = None,
) -> np.uint64:
"""(1) Extract Chunk ID from Node ID
(2) Build Chunk ID from Layer, X, Y and Z components
"""
assert node_id is not None or all(v is not None for v in [layer, x, y, z])
if node_id is not None:
layer = METHOD_NAME(meta, node_id)
bits_per_dim = meta.bitmasks[layer]
if node_id is not None:
chunk_offset = 64 - meta.graph_config.LAYER_ID_BITS - 3 * bits_per_dim
return np.uint64((int(node_id) >> chunk_offset) << chunk_offset)
return _compute_chunk_id(meta, layer, x, y, z)
def get_chunk_ids_from_coords(meta, layer: int, coords: np.ndarray):
result = np.zeros(len(coords), dtype=np.uint64)
s_bits_per_dim = meta.bitmasks[layer]
layer_offset = 64 - meta.graph_config.LAYER_ID_BITS
x_offset = layer_offset - s_bits_per_dim
y_offset = x_offset - s_bits_per_dim
z_offset = y_offset - s_bits_per_dim
coords = np.array(coords, dtype=np.uint64)
result |= layer << layer_offset
result |= coords[:, 0] << x_offset
result |= coords[:, 1] << y_offset
result |= coords[:, 2] << z_offset
return result
def get_chunk_ids_from_node_ids(meta, ids: Iterable[np.uint64]) -> np.ndarray:
""" Extract Chunk IDs from Node IDs"""
if len(ids) == 0:
return np.array([], dtype=np.uint64)
bits_per_dims = np.array([meta.bitmasks[l] for l in get_chunk_layers(meta, ids)])
offsets = 64 - meta.graph_config.LAYER_ID_BITS - 3 * bits_per_dims
cids1 = np.array((np.array(ids, dtype=int) >> offsets) << offsets, dtype=np.uint64)
# cids2 = np.vectorize(get_chunk_id)(meta, ids)
# assert np.all(cids1 == cids2)
return cids1
def _compute_chunk_id(
meta,
layer: int,
x: int,
y: int,
z: int,
) -> np.uint64:
s_bits_per_dim = meta.bitmasks[layer]
if not (
x < 2 ** s_bits_per_dim and y < 2 ** s_bits_per_dim and z < 2 ** s_bits_per_dim
):
raise ValueError(
f"Coordinate is out of range \
layer: {layer} bits/dim {s_bits_per_dim}. \
[{x}, {y}, {z}]; max = {2 ** s_bits_per_dim}."
)
layer_offset = 64 - meta.graph_config.LAYER_ID_BITS
x_offset = layer_offset - s_bits_per_dim
y_offset = x_offset - s_bits_per_dim
z_offset = y_offset - s_bits_per_dim
return np.uint64(
layer << layer_offset | x << x_offset | y << y_offset | z << z_offset
)
def _get_chunk_coordinates_from_vol_coordinates(
meta,
x: int,
y: int,
z: int,
resolution: Sequence[int],
ceil: bool = False,
layer: int = 1,
) -> np.ndarray:
"""Translates volume coordinates to chunk_coordinates."""
resolution = np.array(resolution)
scaling = np.array(meta.resolution / resolution, dtype=int)
chunk_size = meta.graph_config.CHUNK_SIZE
x = (x / scaling[0] - meta.voxel_bounds[0, 0]) / chunk_size[0]
y = (y / scaling[1] - meta.voxel_bounds[1, 0]) / chunk_size[1]
z = (z / scaling[2] - meta.voxel_bounds[2, 0]) / chunk_size[2]
x /= meta.graph_config.FANOUT ** (max(layer - 2, 0))
y /= meta.graph_config.FANOUT ** (max(layer - 2, 0))
z /= meta.graph_config.FANOUT ** (max(layer - 2, 0))
coords = np.array([x, y, z])
if ceil:
coords = np.ceil(coords)
return coords.astype(int)
def get_bounding_children_chunks(
cg_meta, layer: int, chunk_coords: Sequence[int], children_layer, return_unique=True
) -> np.ndarray:
"""Children chunk coordinates at given layer, along the boundary of a chunk"""
chunk_coords = np.array(chunk_coords, dtype=int)
chunks = []
# children chunk count along one dimension
chunks_count = cg_meta.graph_config.FANOUT ** (layer - children_layer)
chunk_offset = chunk_coords * chunks_count
x1, y1, z1 = chunk_offset
x2, y2, z2 = chunk_offset + chunks_count
# https://stackoverflow.com/a/35608701/2683367
f = lambda r1, r2, r3: np.array(np.meshgrid(r1, r2, r3), dtype=int).T.reshape(-1, 3)
chunks.append(f((x1, x2 - 1), range(y1, y2), range(z1, z2)))
chunks.append(f(range(x1, x2), (y1, y2 - 1), range(z1, z2)))
chunks.append(f(range(x1, x2), range(y1, y2), (z1, z2 - 1)))
chunks = np.concatenate(chunks)
mask = np.all(chunks < cg_meta.layer_chunk_bounds[children_layer], axis=1)
result = chunks[mask]
if return_unique:
return np.unique(result, axis=0) if result.size else result
return result |
298,363 | mean ds | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.logging import metrics
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from torch import Tensor
from dataclasses import dataclass, field
@dataclass
class LabelSmoothedDualImitationCriterionConfig(FairseqDataclass):
label_smoothing: float = field(
default=0.0,
metadata={"help": "epsilon for label smoothing, 0 means no label smoothing"},
)
@register_criterion("nat_loss", dataclass=LabelSmoothedDualImitationCriterionConfig)
class LabelSmoothedDualImitationCriterion(FairseqCriterion):
def __init__(self, task, label_smoothing):
super().__init__(task)
self.label_smoothing = label_smoothing
def _compute_loss(
self, outputs, targets, masks=None, label_smoothing=0.0, name="loss", factor=1.0
):
"""
outputs: batch x len x d_model
targets: batch x len
masks: batch x len
policy_logprob: if there is some policy
depends on the likelihood score as rewards.
"""
def METHOD_NAME(x: Tensor, dim=None) -> Tensor:
return (
x.float().mean().type_as(x)
if dim is None
else x.float().mean(dim).type_as(x)
)
if masks is not None:
outputs, targets = outputs[masks], targets[masks]
if masks is not None and not masks.any():
nll_loss = torch.tensor(0)
loss = nll_loss
else:
logits = F.log_softmax(outputs, dim=-1)
if targets.dim() == 1:
losses = F.nll_loss(logits, targets.to(logits.device), reduction="none")
else: # soft-labels
losses = F.kl_div(logits, targets.to(logits.device), reduction="none")
losses = losses.sum(-1)
nll_loss = METHOD_NAME(losses)
if label_smoothing > 0:
loss = (
nll_loss * (1 - label_smoothing) - METHOD_NAME(logits) * label_smoothing
)
else:
loss = nll_loss
loss = loss * factor
return {"name": name, "loss": loss, "nll_loss": nll_loss, "factor": factor}
def _custom_loss(self, loss, name="loss", factor=1.0):
return {"name": name, "loss": loss, "factor": factor}
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
nsentences, ntokens = sample["nsentences"], sample["ntokens"]
# B x T
src_tokens, src_lengths = (
sample["net_input"]["src_tokens"],
sample["net_input"]["src_lengths"],
)
tgt_tokens, prev_output_tokens = sample["target"], sample["prev_target"]
outputs = model(src_tokens, src_lengths, prev_output_tokens, tgt_tokens)
losses, nll_loss = [], []
for obj in outputs:
if outputs[obj].get("loss", None) is None:
_losses = self._compute_loss(
outputs[obj].get("out"),
outputs[obj].get("tgt"),
outputs[obj].get("mask", None),
outputs[obj].get("ls", 0.0),
name=obj + "-loss",
factor=outputs[obj].get("factor", 1.0),
)
else:
_losses = self._custom_loss(
outputs[obj].get("loss"),
name=obj + "-loss",
factor=outputs[obj].get("factor", 1.0),
)
losses += [_losses]
if outputs[obj].get("nll_loss", False):
nll_loss += [_losses.get("nll_loss", 0.0)]
loss = sum(l["loss"] for l in losses)
nll_loss = sum(l for l in nll_loss) if len(nll_loss) > 0 else loss.new_tensor(0)
# NOTE:
# we don't need to use sample_size as denominator for the gradient
# here sample_size is just used for logging
sample_size = 1
logging_output = {
"loss": loss.data,
"nll_loss": nll_loss.data,
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
for l in losses:
logging_output[l["name"]] = (
utils.item(l["loss"].data / l["factor"])
if reduce
else l[["loss"]].data / l["factor"]
)
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
loss = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
nll_loss = utils.item(sum(log.get("nll_loss", 0) for log in logging_outputs))
metrics.log_scalar(
"loss", loss / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar(
"nll_loss", nll_loss / sample_size / math.log(2), sample_size, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["loss"].avg)
)
for key in logging_outputs[0]:
if key[-5:] == "-loss":
val = sum(log.get(key, 0) for log in logging_outputs)
metrics.log_scalar(
key[:-5],
val / sample_size / math.log(2) if sample_size > 0 else 0.0,
sample_size,
round=3,
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True |
298,364 | test format | ##############################################################################
# Copyright 2009, Gerhard Weis
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the authors nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT
##############################################################################
'''
Test cases for the isotime module.
'''
import unittest
from datetime import time
from isodate import parse_time, UTC, FixedOffset, ISO8601Error, time_isoformat
from isodate import TIME_BAS_COMPLETE, TIME_BAS_MINUTE
from isodate import TIME_EXT_COMPLETE, TIME_EXT_MINUTE
from isodate import TIME_HOUR
from isodate import TZ_BAS, TZ_EXT, TZ_HOUR
# the following list contains tuples of ISO time strings and the expected
# result from the parse_time method. A result of None means an ISO8601Error
# is expected.
TEST_CASES = [('232050', time(23, 20, 50), TIME_BAS_COMPLETE + TZ_BAS),
('23:20:50', time(23, 20, 50), TIME_EXT_COMPLETE + TZ_EXT),
('2320', time(23, 20), TIME_BAS_MINUTE),
('23:20', time(23, 20), TIME_EXT_MINUTE),
('23', time(23), TIME_HOUR),
('232050,5', time(23, 20, 50, 500000), None),
('23:20:50.5', time(23, 20, 50, 500000), None),
# test precision
('15:33:42.123456', time(15, 33, 42, 123456), None),
('15:33:42.1234564', time(15, 33, 42, 123456), None),
('15:33:42.1234557', time(15, 33, 42, 123456), None),
('2320,8', time(23, 20, 48), None),
('23:20,8', time(23, 20, 48), None),
('23,3', time(23, 18), None),
('232030Z', time(23, 20, 30, tzinfo=UTC),
TIME_BAS_COMPLETE + TZ_BAS),
('2320Z', time(23, 20, tzinfo=UTC), TIME_BAS_MINUTE + TZ_BAS),
('23Z', time(23, tzinfo=UTC), TIME_HOUR + TZ_BAS),
('23:20:30Z', time(23, 20, 30, tzinfo=UTC),
TIME_EXT_COMPLETE + TZ_EXT),
('23:20Z', time(23, 20, tzinfo=UTC), TIME_EXT_MINUTE + TZ_EXT),
('152746+0100', time(15, 27, 46,
tzinfo=FixedOffset(1, 0, '+0100')), TIME_BAS_COMPLETE + TZ_BAS),
('152746-0500', time(15, 27, 46,
tzinfo=FixedOffset(-5, 0, '-0500')),
TIME_BAS_COMPLETE + TZ_BAS),
('152746+01', time(15, 27, 46,
tzinfo=FixedOffset(1, 0, '+01:00')),
TIME_BAS_COMPLETE + TZ_HOUR),
('152746-05', time(15, 27, 46,
tzinfo=FixedOffset(-5, -0, '-05:00')),
TIME_BAS_COMPLETE + TZ_HOUR),
('15:27:46+01:00', time(15, 27, 46,
tzinfo=FixedOffset(1, 0, '+01:00')),
TIME_EXT_COMPLETE + TZ_EXT),
('15:27:46-05:00', time(15, 27, 46,
tzinfo=FixedOffset(-5, -0, '-05:00')),
TIME_EXT_COMPLETE + TZ_EXT),
('15:27:46+01', time(15, 27, 46,
tzinfo=FixedOffset(1, 0, '+01:00')),
TIME_EXT_COMPLETE + TZ_HOUR),
('15:27:46-05', time(15, 27, 46,
tzinfo=FixedOffset(-5, -0, '-05:00')),
TIME_EXT_COMPLETE + TZ_HOUR),
('15:27:46-05:30', time(15, 27, 46,
tzinfo=FixedOffset(-5, -30, '-05:30')),
TIME_EXT_COMPLETE + TZ_EXT),
('15:27:46-0545', time(15, 27, 46,
tzinfo=FixedOffset(-5, -45, '-0545')),
TIME_EXT_COMPLETE + TZ_BAS),
('1:17:30', None, TIME_EXT_COMPLETE)]
def create_testcase(timestring, expectation, format):
"""
Create a TestCase class for a specific test.
This allows having a separate TestCase for each test tuple from the
TEST_CASES list, so that a failed test won't stop other tests.
"""
class TestTime(unittest.TestCase):
'''
A test case template to parse an ISO time string into a time
object.
'''
def test_parse(self):
'''
Parse an ISO time string and compare it to the expected value.
'''
if expectation is None:
self.assertRaises(ISO8601Error, parse_time, timestring)
else:
result = parse_time(timestring)
self.assertEqual(result, expectation)
def METHOD_NAME(self):
'''
Take time object and create ISO string from it.
This is the reverse test to test_parse.
'''
if expectation is None:
self.assertRaises(AttributeError,
time_isoformat, expectation, format)
elif format is not None:
self.assertEqual(time_isoformat(expectation, format),
timestring)
return unittest.TestLoader().loadTestsFromTestCase(TestTime)
def test_suite():
'''
Construct a TestSuite instance for all test cases.
'''
suite = unittest.TestSuite()
for timestring, expectation, format in TEST_CASES:
suite.addTest(create_testcase(timestring, expectation, format))
return suite
# load_tests Protocol
def load_tests(loader, tests, pattern):
return test_suite()
if __name__ == '__main__':
unittest.main(defaultTest='test_suite') |
298,365 | add reference to scala backend | from typing import Mapping
import abc
import json
import py4j
import py4j.java_gateway
import hail
from hail.expr import construct_expr
from hail.ir import JavaIR, finalize_randomness
from hail.ir.renderer import CSERenderer
from hail.utils.java import FatalError, Env
from hail.expr.blockmatrix_type import tblockmatrix
from hail.expr.matrix_type import tmatrix
from hail.expr.table_type import ttable
from hail.expr.types import dtype
from .backend import Backend, fatal_error_from_java_error_triplet
def handle_java_exception(f):
def deco(*args, **kwargs):
import pyspark
try:
return f(*args, **kwargs)
except py4j.protocol.Py4JJavaError as e:
s = e.java_exception.toString()
# py4j catches NoSuchElementExceptions to stop array iteration
if s.startswith('java.util.NoSuchElementException'):
raise
tpl = Env.jutils().handleForPython(e.java_exception)
deepest, full, error_id = tpl._1(), tpl._2(), tpl._3()
raise fatal_error_from_java_error_triplet(deepest, full, error_id) from None
except pyspark.sql.utils.CapturedException as e:
raise FatalError('%s\n\nJava stack trace:\n%s\n'
'Hail version: %s\n'
'Error summary: %s' % (e.desc, e.stackTrace, hail.__version__, e.desc)) from None
return deco
class Py4JBackend(Backend):
_jbackend: py4j.java_gateway.JavaObject
@abc.abstractmethod
def __init__(self):
super(Py4JBackend, self).__init__()
import base64
def decode_bytearray(encoded):
return base64.standard_b64decode(encoded)
# By default, py4j's version of this function does extra
# work to support python 2. This eliminates that.
py4j.protocol.decode_bytearray = decode_bytearray
@abc.abstractmethod
def jvm(self):
pass
@abc.abstractmethod
def hail_package(self):
pass
@abc.abstractmethod
def utils_package_object(self):
pass
def execute(self, ir, timed=False):
jir = self._to_java_value_ir(ir)
stream_codec = '{"name":"StreamBufferSpec"}'
# print(self._hail_package.expr.ir.Pretty.apply(jir, True, -1))
try:
result_tuple = self._jbackend.executeEncode(jir, stream_codec, timed)
(result, timings) = (result_tuple._1(), result_tuple._2())
value = ir.typ._from_encoding(result)
return (value, timings) if timed else value
except FatalError as e:
raise e.maybe_user_error(ir) from None
async def _async_execute(self, ir, timed=False):
raise NotImplementedError('no async available in Py4JBackend')
def persist_expression(self, expr):
return construct_expr(
JavaIR(self._jbackend.executeLiteral(self._to_java_value_ir(expr._ir))),
expr.dtype
)
def set_flags(self, **flags: Mapping[str, str]):
available = self._jbackend.availableFlags()
invalid = []
for flag, value in flags.items():
if flag in available:
self._jbackend.setFlag(flag, value)
else:
invalid.append(flag)
if len(invalid) != 0:
raise FatalError("Flags {} not valid. Valid flags: \n {}"
.format(', '.join(invalid), '\n '.join(available)))
def get_flags(self, *flags) -> Mapping[str, str]:
return {flag: self._jbackend.getFlag(flag) for flag in flags}
def METHOD_NAME(self, rg):
self._jbackend.pyAddReference(json.dumps(rg._config))
def _remove_reference_from_scala_backend(self, name):
self._jbackend.pyRemoveReference(name)
def from_fasta_file(self, name, fasta_file, index_file, x_contigs, y_contigs, mt_contigs, par):
return json.loads(self._jbackend.pyFromFASTAFile(name, fasta_file, index_file, x_contigs, y_contigs, mt_contigs, par))
def load_references_from_dataset(self, path):
return json.loads(self._jbackend.pyLoadReferencesFromDataset(path))
def add_sequence(self, name, fasta_file, index_file):
self._jbackend.pyAddSequence(name, fasta_file, index_file)
def remove_sequence(self, name):
self._jbackend.pyRemoveSequence(name)
def add_liftover(self, name, chain_file, dest_reference_genome):
self._jbackend.pyAddLiftover(name, chain_file, dest_reference_genome)
def remove_liftover(self, name, dest_reference_genome):
self._jbackend.pyRemoveLiftover(name, dest_reference_genome)
def parse_vcf_metadata(self, path):
return json.loads(self._jhc.pyParseVCFMetadataJSON(self._jbackend.fs(), path))
def index_bgen(self, files, index_file_map, referenceGenomeName, contig_recoding, skip_invalid_loci):
self._jbackend.pyIndexBgen(files, index_file_map, referenceGenomeName, contig_recoding, skip_invalid_loci)
def import_fam(self, path: str, quant_pheno: bool, delimiter: str, missing: str):
return json.loads(self._jbackend.pyImportFam(path, quant_pheno, delimiter, missing))
def _to_java_ir(self, ir, parse):
if not hasattr(ir, '_jir'):
r = CSERenderer(stop_at_jir=True)
# FIXME parse should be static
ir._jir = parse(r(finalize_randomness(ir)), ir_map=r.jirs)
return ir._jir
def _parse_value_ir(self, code, ref_map={}, ir_map={}):
return self._jbackend.parse_value_ir(
code,
{k: t._parsable_string() for k, t in ref_map.items()},
ir_map)
def _parse_table_ir(self, code, ir_map={}):
return self._jbackend.parse_table_ir(code, ir_map)
def _parse_matrix_ir(self, code, ir_map={}):
return self._jbackend.parse_matrix_ir(code, ir_map)
def _parse_blockmatrix_ir(self, code, ir_map={}):
return self._jbackend.parse_blockmatrix_ir(code, ir_map)
def _to_java_value_ir(self, ir):
return self._to_java_ir(ir, self._parse_value_ir)
def _to_java_table_ir(self, ir):
return self._to_java_ir(ir, self._parse_table_ir)
def _to_java_matrix_ir(self, ir):
return self._to_java_ir(ir, self._parse_matrix_ir)
def _to_java_blockmatrix_ir(self, ir):
return self._to_java_ir(ir, self._parse_blockmatrix_ir)
def value_type(self, ir):
jir = self._to_java_value_ir(ir)
return dtype(jir.typ().toString())
def table_type(self, tir):
jir = self._to_java_table_ir(tir)
return ttable._from_java(jir.typ())
def matrix_type(self, mir):
jir = self._to_java_matrix_ir(mir)
return tmatrix._from_java(jir.typ())
def blockmatrix_type(self, bmir):
jir = self._to_java_blockmatrix_ir(bmir)
return tblockmatrix._from_java(jir.typ())
@property
def requires_lowering(self):
return True |
298,366 | test ellipsis aug | # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
"""
Tests for numpy advanced indexing syntax. See also:
https://numpy.org/devdocs/reference/arrays.indexing.html
"""
import dace
import numpy as np
import pytest
N = dace.symbol('N')
M = dace.symbol('M')
def test_flat():
@dace.program
def indexing_test(A: dace.float64[20, 30]):
return A.flat
A = np.random.rand(20, 30)
res = indexing_test(A)
assert np.allclose(A.flat, res)
def test_flat_noncontiguous():
with dace.config.set_temporary('compiler', 'allow_view_arguments', value=True):
@dace.program
def indexing_test(A):
return A.flat
A = np.random.rand(20, 30).transpose()
res = indexing_test(A)
assert np.allclose(A.flat, res)
def test_ellipsis():
@dace.program
def indexing_test(A: dace.float64[5, 5, 5, 5, 5]):
return A[1:5, ..., 0]
A = np.random.rand(5, 5, 5, 5, 5)
res = indexing_test(A)
assert np.allclose(A[1:5, ..., 0], res)
def test_aug_implicit():
@dace.program
def indexing_test(A: dace.float64[5, 5, 5, 5, 5]):
A[:, 1:5][:, 0:2] += 5
A = np.random.rand(5, 5, 5, 5, 5)
regression = np.copy(A)
regression[:, 1:5][:, 0:2] += 5
indexing_test(A)
assert np.allclose(A, regression)
def METHOD_NAME():
@dace.program
def indexing_test(A: dace.float64[5, 5, 5, 5, 5]):
A[1:5, ..., 0] += 5
A = np.random.rand(5, 5, 5, 5, 5)
regression = np.copy(A)
regression[1:5, ..., 0] += 5
indexing_test(A)
assert np.allclose(A, regression)
def test_newaxis():
@dace.program
def indexing_test(A: dace.float64[20, 30]):
return A[:, np.newaxis, None, :]
A = np.random.rand(20, 30)
res = indexing_test(A)
assert res.shape == (20, 1, 1, 30)
assert np.allclose(A[:, np.newaxis, None, :], res)
def test_multiple_newaxis():
@dace.program
def indexing_test(A: dace.float64[10, 20, 30]):
return A[np.newaxis, :, np.newaxis, np.newaxis, :, np.newaxis, :, np.newaxis]
A = np.random.rand(10, 20, 30)
res = indexing_test(A)
assert res.shape == (1, 10, 1, 1, 20, 1, 30, 1)
assert np.allclose(A[np.newaxis, :, np.newaxis, np.newaxis, :, np.newaxis, :, np.newaxis], res)
def test_index_intarr_1d():
@dace.program
def indexing_test(A: dace.float64[N], indices: dace.int32[M]):
return A[indices]
A = np.random.rand(20)
indices = [1, 10, 15]
res = indexing_test(A, indices, M=3)
assert np.allclose(A[indices], res)
def test_index_intarr_1d_literal():
@dace.program
def indexing_test(A: dace.float64[20]):
return A[[1, 10, 15]]
A = np.random.rand(20)
indices = [1, 10, 15]
res = indexing_test(A)
assert np.allclose(A[indices], res)
def test_index_intarr_1d_constant():
indices = [1, 10, 15]
@dace.program
def indexing_test(A: dace.float64[20]):
return A[indices]
A = np.random.rand(20)
res = indexing_test(A)
assert np.allclose(A[indices], res)
def test_index_intarr_1d_multi():
@dace.program
def indexing_test(A: dace.float64[20, 10, 30], indices: dace.int32[3]):
return A[indices, 2:7:2, [15, 10, 1]]
A = np.random.rand(20, 10, 30)
indices = [1, 10, 15]
res = indexing_test(A, indices)
# FIXME: NumPy behavior is unclear in this case
assert np.allclose(np.diag(A[indices, 2:7:2, [15, 10, 1]]), res)
def test_index_intarr_nd():
@dace.program
def indexing_test(A: dace.float64[4, 3], rows: dace.int64[2, 2], columns: dace.int64[2, 2]):
return A[rows, columns]
A = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]], dtype=np.float64)
rows = np.array([[0, 0], [3, 3]], dtype=np.intp)
columns = np.array([[0, 2], [0, 2]], dtype=np.intp)
expected = A[rows, columns]
res = indexing_test(A, rows, columns)
assert np.allclose(expected, res)
def test_index_boolarr_rhs():
@dace.program
def indexing_test(A: dace.float64[20, 30]):
return A[A > 15]
A = np.ndarray((20, 30), dtype=np.float64)
for i in range(20):
A[i, :] = np.arange(0, 30)
regression = A[A > 15]
# Right-hand side boolean array indexing is unsupported
with pytest.raises(IndexError):
res = indexing_test(A)
assert np.allclose(regression, res)
def test_index_multiboolarr():
@dace.program
def indexing_test(A: dace.float64[20, 20], B: dace.bool[20]):
A[B, B] = 2
A = np.ndarray((20, 20), dtype=np.float64)
for i in range(20):
A[i, :] = np.arange(0, 20)
B = A[:, 1] > 0
# Advanced indexing with multiple boolean arrays should be disallowed
with pytest.raises(IndexError):
indexing_test(A, B)
def test_index_boolarr_fixed():
@dace.program
def indexing_test(A: dace.float64[20, 30], barr: dace.bool[20, 30]):
A[barr] += 5
A = np.ndarray((20, 30), dtype=np.float64)
for i in range(20):
A[i, :] = np.arange(0, 30)
barr = A > 15
regression = np.copy(A)
regression[barr] += 5
indexing_test(A, barr)
assert np.allclose(regression, A)
def test_index_boolarr_inline():
@dace.program
def indexing_test(A: dace.float64[20, 30]):
A[A > 15] = 2
A = np.ndarray((20, 30), dtype=np.float64)
for i in range(20):
A[i, :] = np.arange(0, 30)
regression = np.copy(A)
regression[A > 15] = 2
indexing_test(A)
assert np.allclose(regression, A)
if __name__ == '__main__':
test_flat()
test_flat_noncontiguous()
test_ellipsis()
test_aug_implicit()
METHOD_NAME()
test_newaxis()
test_multiple_newaxis()
test_index_intarr_1d()
test_index_intarr_1d_literal()
test_index_intarr_1d_constant()
test_index_intarr_1d_multi()
test_index_intarr_nd()
test_index_boolarr_rhs()
test_index_multiboolarr()
test_index_boolarr_fixed()
test_index_boolarr_inline() |
298,367 | save base | from collections.abc import Collection, Iterable, Sequence
from typing import Any, ClassVar, Final, TypeVar
from django.core.checks.messages import CheckMessage
from django.core.exceptions import MultipleObjectsReturned as BaseMultipleObjectsReturned
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db.models import BaseConstraint, Field
from django.db.models.manager import BaseManager, Manager
from django.db.models.options import Options
from typing_extensions import Self
_Self = TypeVar("_Self", bound=Model)
class ModelStateFieldsCacheDescriptor: ...
class ModelState:
db: str | None
adding: bool
fields_cache: ModelStateFieldsCacheDescriptor
class ModelBase(type):
@property
def _default_manager(cls: type[_Self]) -> BaseManager[_Self]: ... # type: ignore[misc]
@property
def _base_manager(cls: type[_Self]) -> BaseManager[_Self]: ... # type: ignore[misc]
class Model(metaclass=ModelBase):
# Note: these two metaclass generated attributes don't really exist on the 'Model'
# class, runtime they are only added on concrete subclasses of 'Model'. The
# metaclass also sets up correct inheritance from concrete parent models exceptions.
# Our mypy plugin aligns with this behaviour and will remove the 2 attributes below
# and re-add them to correct concrete subclasses of 'Model'
DoesNotExist: Final[type[ObjectDoesNotExist]]
MultipleObjectsReturned: Final[type[BaseMultipleObjectsReturned]]
# This 'objects' attribute will be deleted, via the plugin, in favor of managing it
# to only exist on subclasses it exists on during runtime.
objects: ClassVar[Manager[Self]]
class Meta: ...
_meta: Options[Any]
pk: Any
_state: ModelState
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
@classmethod
def add_to_class(cls, name: str, value: Any) -> Any: ...
@classmethod
def from_db(cls, db: str | None, field_names: Collection[str], values: Collection[Any]) -> Self: ...
def delete(self, using: Any = ..., keep_parents: bool = ...) -> tuple[int, dict[str, int]]: ...
async def adelete(self, using: Any = ..., keep_parents: bool = ...) -> tuple[int, dict[str, int]]: ...
def full_clean(
self, exclude: Iterable[str] | None = ..., validate_unique: bool = ..., validate_constraints: bool = ...
) -> None: ...
def clean(self) -> None: ...
def clean_fields(self, exclude: Collection[str] | None = ...) -> None: ...
def validate_unique(self, exclude: Collection[str] | None = ...) -> None: ...
def date_error_message(self, lookup_type: str, field_name: str, unique_for: str) -> ValidationError: ...
def unique_error_message(self, model_class: type[Self], unique_check: Sequence[str]) -> ValidationError: ...
def validate_constraints(self, exclude: Collection[str] | None = ...) -> None: ...
def get_constraints(self) -> list[tuple[type[Model], Sequence[BaseConstraint]]]: ...
def save(
self,
force_insert: bool = ...,
force_update: bool = ...,
using: str | None = ...,
update_fields: Iterable[str] | None = ...,
) -> None: ...
async def asave(
self,
force_insert: bool = ...,
force_update: bool = ...,
using: str | None = ...,
update_fields: Iterable[str] | None = ...,
) -> None: ...
def METHOD_NAME(
self,
raw: bool = ...,
force_insert: bool = ...,
force_update: bool = ...,
using: str | None = ...,
update_fields: Iterable[str] | None = ...,
) -> None: ...
def refresh_from_db(self, using: str | None = ..., fields: Sequence[str] | None = ...) -> None: ...
async def arefresh_from_db(self, using: str | None = ..., fields: Sequence[str] | None = ...) -> None: ...
def serializable_value(self, field_name: str) -> Any: ...
def prepare_database_save(self, field: Field) -> Any: ...
def get_deferred_fields(self) -> set[str]: ...
@classmethod
def check(cls, **kwargs: Any) -> list[CheckMessage]: ...
def __getstate__(self) -> dict: ...
def model_unpickle(model_id: tuple[str, str] | type[Model]) -> Model: ... |
298,368 | test trace multiple coroutines ot outer | import asyncio
import time
import pytest
from ddtrace.context import Context
from ddtrace.contrib.asyncio import context_provider
from ddtrace.contrib.asyncio.helpers import set_call_context
from ddtrace.contrib.asyncio.patch import patch
from ddtrace.contrib.asyncio.patch import unpatch
from ddtrace.internal.compat import CONTEXTVARS_IS_AVAILABLE
from ddtrace.provider import DefaultContextProvider
from tests.opentracer.utils import init_tracer
_orig_create_task = asyncio.BaseEventLoop.create_task
def test_event_loop_unpatch(tracer):
patch()
# ensures that the event loop can be unpatched
unpatch()
assert isinstance(tracer.context_provider, DefaultContextProvider)
assert asyncio.BaseEventLoop.create_task == _orig_create_task
@pytest.mark.asyncio
async def test_event_loop_double_patch(tracer):
# ensures that double patching will not double instrument
# the event loop
patch()
patch()
await test_tasks_chaining(tracer)
@pytest.mark.asyncio
async def test_tasks_chaining(tracer):
# ensures that the context is propagated between different tasks
@tracer.wrap("spawn_task")
async def coro_3():
await asyncio.sleep(0.01)
async def coro_2():
# This will have a new context, first run will test that the
# new context works correctly, second run will test if when we
# pop off the last span on the context if it is still parented
# correctly
await coro_3()
await coro_3()
@tracer.wrap("main_task")
async def coro_1():
await asyncio.ensure_future(coro_2())
await coro_1()
traces = tracer.pop_traces()
assert len(traces) == 1
spans = traces[0]
assert len(spans) == 3
main_task = spans[0]
spawn_task1 = spans[1]
spawn_task2 = spans[2]
# check if the context has been correctly propagated
assert spawn_task1.trace_id == main_task.trace_id
assert spawn_task1.parent_id == main_task.span_id
assert spawn_task2.trace_id == main_task.trace_id
assert spawn_task2.parent_id == main_task.span_id
@pytest.mark.asyncio
async def test_concurrent_chaining(tracer):
# ensures that the context is correctly propagated when
# concurrent tasks are created from a common tracing block
tracer.configure(context_provider=context_provider)
@tracer.wrap("f1")
async def f1():
await asyncio.sleep(0.01)
@tracer.wrap("f2")
async def f2():
await asyncio.sleep(0.01)
with tracer.trace("main_task"):
await asyncio.gather(f1(), f2())
# do additional synchronous work to confirm main context is
# correctly handled
with tracer.trace("main_task_child"):
time.sleep(0.01)
traces = tracer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 4
main_task = traces[0][0]
child_1 = traces[0][1]
child_2 = traces[0][2]
main_task_child = traces[0][3]
# check if the context has been correctly propagated
assert child_1.trace_id == main_task.trace_id
assert child_1.parent_id == main_task.span_id
assert child_2.trace_id == main_task.trace_id
assert child_2.parent_id == main_task.span_id
assert main_task_child.trace_id == main_task.trace_id
assert main_task_child.parent_id == main_task.span_id
@pytest.mark.skipif(CONTEXTVARS_IS_AVAILABLE, reason="only applicable to legacy asyncio provider")
@pytest.mark.asyncio
async def test_propagation_with_set_call_context(tracer):
# ensures that if a new Context is attached to the current
# running Task via helpers, a previous trace is resumed
tracer.configure(context_provider=context_provider)
task = asyncio.Task.current_task()
ctx = Context(trace_id=100, span_id=101)
set_call_context(task, ctx)
with tracer.trace("async_task"):
await asyncio.sleep(0.01)
traces = tracer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
span = traces[0][0]
assert span.trace_id == 100
assert span.parent_id == 101
@pytest.mark.asyncio
async def test_propagation_with_new_context(tracer):
# ensures that if a new Context is activated, a trace
# with the Context arguments is created
ctx = Context(trace_id=100, span_id=101)
tracer.context_provider.activate(ctx)
with tracer.trace("async_task"):
await asyncio.sleep(0.01)
traces = tracer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
span = traces[0][0]
assert span.trace_id == 100
assert span.parent_id == 101
@pytest.mark.asyncio
async def METHOD_NAME(tracer):
"""OpenTracing version of test_trace_multiple_coroutines."""
# if multiple coroutines have nested tracing, they must belong
# to the same trace
async def coro():
# another traced coroutine
with tracer.trace("coroutine_2"):
return 42
ot_tracer = init_tracer("asyncio_svc", tracer)
with ot_tracer.start_active_span("coroutine_1"):
value = await coro()
# the coroutine has been called correctly
assert 42 == value
# a single trace has been properly reported
traces = tracer.pop_traces()
assert 1 == len(traces)
assert 2 == len(traces[0])
assert "coroutine_1" == traces[0][0].name
assert "coroutine_2" == traces[0][1].name
# the parenting is correct
assert traces[0][0] == traces[0][1]._parent
assert traces[0][0].trace_id == traces[0][1].trace_id
@pytest.mark.asyncio
async def test_trace_multiple_coroutines_ot_inner(tracer):
"""OpenTracing version of test_trace_multiple_coroutines."""
# if multiple coroutines have nested tracing, they must belong
# to the same trace
ot_tracer = init_tracer("asyncio_svc", tracer)
async def coro():
# another traced coroutine
with ot_tracer.start_active_span("coroutine_2"):
return 42
with tracer.trace("coroutine_1"):
value = await coro()
# the coroutine has been called correctly
assert 42 == value
# a single trace has been properly reported
traces = tracer.pop_traces()
assert 1 == len(traces)
assert 2 == len(traces[0])
assert "coroutine_1" == traces[0][0].name
assert "coroutine_2" == traces[0][1].name
# the parenting is correct
assert traces[0][0] == traces[0][1]._parent
assert traces[0][0].trace_id == traces[0][1].trace_id |
298,369 | field | """
cross_product
=============
Autogenerated DPF operator classes.
"""
from warnings import warn
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.outputs import Output, _Outputs
from ansys.dpf.core.operators.specification import PinSpecification, Specification
class cross_product(Operator):
"""Computes the cross product of two vector fields. Fields can have the
same location or Elemental Nodal and Nodal locations.
Parameters
----------
fieldA : Field or FieldsContainer or float
Field or fields container with only one field
is expected
fieldB : Field or FieldsContainer or float
Field or fields container with only one field
is expected
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.math.cross_product()
>>> # Make input connections
>>> my_fieldA = dpf.Field()
>>> op.inputs.fieldA.connect(my_fieldA)
>>> my_fieldB = dpf.Field()
>>> op.inputs.fieldB.connect(my_fieldB)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.math.cross_product(
... fieldA=my_fieldA,
... fieldB=my_fieldB,
... )
>>> # Get output data
>>> result_field = op.outputs.field()
"""
def __init__(self, fieldA=None, fieldB=None, config=None, server=None):
super().__init__(name="cross_product", config=config, server=server)
self._inputs = InputsCrossProduct(self)
self._outputs = OutputsCrossProduct(self)
if fieldA is not None:
self.inputs.fieldA.connect(fieldA)
if fieldB is not None:
self.inputs.fieldB.connect(fieldB)
@staticmethod
def _spec():
description = """Computes the cross product of two vector fields. Fields can have the
same location or Elemental Nodal and Nodal locations."""
spec = Specification(
description=description,
map_input_pin_spec={
0: PinSpecification(
name="fieldA",
type_names=[
"field",
"fields_container",
"double",
"vector<double>",
],
optional=False,
document="""Field or fields container with only one field
is expected""",
),
1: PinSpecification(
name="fieldB",
type_names=[
"field",
"fields_container",
"double",
"vector<double>",
],
optional=False,
document="""Field or fields container with only one field
is expected""",
),
},
map_output_pin_spec={
0: PinSpecification(
name="field",
type_names=["field"],
optional=False,
document="""""",
),
},
)
return spec
@staticmethod
def default_config(server=None):
"""Returns the default config of the operator.
This config can then be changed to the user needs and be used to
instantiate the operator. The Configuration allows to customize
how the operation will be processed by the operator.
Parameters
----------
server : server.DPFServer, optional
Server with channel connected to the remote or local instance. When
``None``, attempts to use the global server.
"""
return Operator.default_config(name="cross_product", server=server)
@property
def inputs(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsCrossProduct
"""
return super().inputs
@property
def outputs(self):
"""Enables to get outputs of the operator by evaluating it
Returns
--------
outputs : OutputsCrossProduct
"""
return super().outputs
class InputsCrossProduct(_Inputs):
"""Intermediate class used to connect user inputs to
cross_product operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.cross_product()
>>> my_fieldA = dpf.Field()
>>> op.inputs.fieldA.connect(my_fieldA)
>>> my_fieldB = dpf.Field()
>>> op.inputs.fieldB.connect(my_fieldB)
"""
def __init__(self, op: Operator):
super().__init__(cross_product._spec().inputs, op)
self._fieldA = Input(cross_product._spec().input_pin(0), 0, op, -1)
self._inputs.append(self._fieldA)
self._fieldB = Input(cross_product._spec().input_pin(1), 1, op, -1)
self._inputs.append(self._fieldB)
@property
def fieldA(self):
"""Allows to connect fieldA input to the operator.
Field or fields container with only one field
is expected
Parameters
----------
my_fieldA : Field or FieldsContainer or float
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.cross_product()
>>> op.inputs.fieldA.connect(my_fieldA)
>>> # or
>>> op.inputs.fieldA(my_fieldA)
"""
return self._fieldA
@property
def fieldB(self):
"""Allows to connect fieldB input to the operator.
Field or fields container with only one field
is expected
Parameters
----------
my_fieldB : Field or FieldsContainer or float
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.cross_product()
>>> op.inputs.fieldB.connect(my_fieldB)
>>> # or
>>> op.inputs.fieldB(my_fieldB)
"""
return self._fieldB
class OutputsCrossProduct(_Outputs):
"""Intermediate class used to get outputs from
cross_product operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.cross_product()
>>> # Connect inputs : op.inputs. ...
>>> result_field = op.outputs.field()
"""
def __init__(self, op: Operator):
super().__init__(cross_product._spec().outputs, op)
self._field = Output(cross_product._spec().output_pin(0), 0, op)
self._outputs.append(self._field)
@property
def METHOD_NAME(self):
"""Allows to get field output of the operator
Returns
----------
my_field : Field
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.cross_product()
>>> # Connect inputs : op.inputs. ...
>>> result_field = op.outputs.field()
""" # noqa: E501
return self._field |
298,370 | add child | class ConfigNode:
def __init__(self, key, parent=None):
self.key = key
self.children = {}
self.parent = parent
def add(self, key, child):
self.children[key] = child
child.parent = self
def update(self, other, extra_data=None):
def _recursive_upsert(other_dict, keys):
for key, val in other_dict.items():
new_keys = keys + [key]
if isinstance(val, dict):
_recursive_upsert(val, new_keys)
else:
self.upsert_from_list(new_keys, val, extra_data)
_recursive_upsert(other, keys=[])
def get_child(self, key, constructor=None):
if key in self.children:
child = self.children[key]
elif constructor is not None:
child = self.children[key] = constructor()
else:
raise KeyError(f"Cannot get key {key}")
return child
def METHOD_NAME(self, key):
self.get_child(key, lambda: ConfigNode(key, parent=self))
def remove_child(self, key):
self.children.pop(key)
def upsert_from_list(self, keys, value, extra_data=None):
key, *next_keys = keys
if len(next_keys) == 0: # reach the end of the upsert
leaf = self.get_child(
key,
lambda: ConfigLeaf(
key, parent=self, value=value, extra_data=extra_data
),
)
leaf.value = value
leaf.extra_data = extra_data
if not isinstance(leaf, ConfigLeaf):
raise RuntimeError(f"Expected a ConfigLeaf, got {leaf}!")
else:
next_node = self.get_child(key, lambda: ConfigNode(key, parent=self))
if not isinstance(next_node, ConfigNode):
raise RuntimeError(f"Expected a ConfigNode, got {next_node}!")
next_node.upsert_from_list(next_keys, value, extra_data)
def get_from_list(self, key_list):
next, *key_list_remainder = key_list
child = self.get_child(next)
if len(key_list_remainder) == 0:
return child
else:
return child.get_from_list(key_list_remainder)
def get(self, *keys):
return self.get_from_list(keys)
def get_leaf(self, *keys, callback=lambda leaf: leaf.value):
leaf = self.get_from_list(keys)
return callback(leaf)
def pop_leaf(self, keys):
*node_keys, leaf_key = keys
node = self.get_from_list(node_keys)
node.children.pop(leaf_key)
def get_deepest_leaf(self, *keys, callback=lambda leaf: leaf.value):
root_key, *keys, leaf_key = keys
root_node = self.get_child(root_key)
node_list = [root_node]
node = root_node
# Traverse the tree down following the keys
for k in keys:
try:
node = node.get_child(k)
node_list.append(node)
except KeyError:
break
# For each node, starting from the deepest, try to find the leaf
for node in reversed(node_list):
try:
leaf = node.get_child(leaf_key)
if not isinstance(leaf, ConfigLeaf):
raise RuntimeError(f"Expected a ConfigLeaf, got {leaf}!")
return callback(leaf)
except KeyError:
continue
raise KeyError(f"Cannot any node that contains the leaf {leaf_key}.")
def serialize(self):
retval = {}
for key, child in self.children.items():
retval[key] = child.serialize()
return retval
@staticmethod
def from_dict(other, parent=None, **kwa):
me = ConfigNode(None, parent=parent)
for key, val in other.items():
if isinstance(val, dict):
me.add(key, ConfigNode.from_dict(val, parent=me, **kwa))
else:
me.add(key, ConfigLeaf(key, parent=me, value=val, **kwa))
return me
def _as_dict_with_count(self, callback):
data = {}
total_count = 0
for key, child in self.children.items():
if isinstance(child, ConfigLeaf):
total_count += 1
data[key] = callback(child)
elif isinstance(child, ConfigNode):
child_data, count = child._as_dict_with_count(callback)
total_count += count
if count > 0:
data[key] = child_data
return data, total_count
def as_dict(self, callback=lambda child: child.value):
data, _ = self._as_dict_with_count(callback)
return data
def __repr__(self):
return f"<Node {self.key}>"
def __contains__(self, item):
return item in self.children
# Add support for IPython rich display
# see https://ipython.readthedocs.io/en/stable/config/integrating.html
def _repr_json_(self):
return self.as_dict()
class ConfigLeaf:
def __init__(self, key, parent: ConfigNode, value, extra_data=None):
self.key = key # the name of the config leaf
self._value = value
self.parent = parent
self.extra_data = extra_data
def serialize(self):
return self.value
def get_tree(self):
node = self
parents = []
while node is not None:
parents.append(node)
node = node.parent
return reversed(parents)
@property
def value(self):
return self._value
@value.setter
def value(self, new_value):
if type(self.value) == type(new_value):
self._value = new_value
else:
tree = self.get_tree()
tree_str = ".".join(node.key for node in tree if node.key)
msg = f"Error when setting {tree_str}.\n"
msg += (
"Tried to assign a value of type "
f"{type(new_value)}, expected type {type(self.value)}."
)
source = self.extra_data.get("source", None)
if source:
msg += f"\nThis entry was last modified in {source}."
raise TypeError(msg)
def __repr__(self):
return f"<Leaf {self.key}: {self.value}>" |
298,371 | step end | #!/usr/bin/env python
"""
_TestMonitor_
This is the test class for monitors
"""
import os.path
import time
from WMCore.Algorithms.SubprocessAlgos import *
from WMCore.WMRuntime.Monitors.WMRuntimeMonitor import WMRuntimeMonitor
from WMCore.WMSpec.Steps.Executor import getStepSpace
from WMCore.WMSpec.WMStep import WMStepHelper
getStepName = lambda step: WMStepHelper(step).name()
def getStepPID(stepSpace, stepName):
"""
_getStepPID_
Find the PID for a step given its stepSpace from the file
"""
currDir = stepSpace.location
pidFile = os.path.join(currDir, 'process_id')
if not os.path.isfile(pidFile):
msg = "Could not find process ID file for step %s" % stepName
logging.error(msg)
return
with open(pidFile, 'r') as filehandle:
output = filehandle.read()
try:
stepPID = int(output)
except ValueError:
msg = "Couldn't find a number"
logging.error(msg)
return None
return stepPID
def searchForEvent(file):
"""
_searchForEvent_
Searches for the last event output into the CMSSW output file
"""
MatchRunEvent = re.compile("Run: [0-9]+ Event: [0-9]+$")
# I'm just grabbing the last twenty lines for the hell of it
lines = tailNLinesFromFile(file, 20)
lastMatch = None
for line in lines:
if MatchRunEvent.search(line.strip()):
matches = MatchRunEvent.findall(line.strip())
lastMatch = matches[-1]
if lastMatch != None:
# //
# // Extract and update last run/event number
# //
try:
runInfo, lastEvent = lastMatch.split("Event:", 1)
lastRun = int(runInfo.split("Run:", 1)[1])
lastEvent = int(lastEvent)
return (lastRun, lastEvent)
except Exception:
return (None, None)
return (None, None)
class TestMonitor(WMRuntimeMonitor):
def __init__(self):
self.startTime = None
self.currentStep = None
self.currentStepName = None
self.currentStepSpace = None
self.softTimeOut = None
self.hardTimeOut = None
self.killFlag = False
self.cmsswFile = None
WMRuntimeMonitor.__init__(self)
def initMonitor(self, task, job, logPath, args={}):
"""
Handles the monitor initiation
"""
print("In TestMonitor.initMonitor")
self.softTimeOut = args.get('softTimeOut', None)
self.hardTimeOut = args.get('hardTimeOut', None)
def jobStart(self, task):
"""
Job start notifier.
"""
print("Yeehaw! I started a job")
return
def jobEnd(self, task):
"""
Job End notification
"""
print("Job ended")
return
def stepStart(self, step):
"""
Step start notification
"""
self.currentStep = step
self.currentStepName = getStepName(step)
self.currentStepSpace = getStepSpace(self.currentStepName)
self.startTime = time.time()
print("Step started")
return
def METHOD_NAME(self, step, stepReport):
"""
Step end notification
"""
self.currentStep = None
self.currentStepName = None
self.currentStepSpace = None
print("Step ended")
def periodicUpdate(self):
"""
Run on the defined intervals.
"""
if not self.currentStep or not self.currentStepSpace:
# We're probably between steps
return
# Check for events
if self.cmsswFile:
run, event = searchForEvent(self.cmsswFile)
if run and event:
# Then we actually found something, otherwise do nothing
# Right now I don't know what to do
pass
# Do timeout
if not self.softTimeOut:
return
if time.time() - self.startTime > self.softTimeOut:
# Then we have to kill the process
# First, get the PID
stepPID = getStepPID(self.currentStepSpace, self.currentStepName)
# Now kill it!
msg = ""
msg += "Start Time: %s\n" % self.startTime
msg += "Time Now: %s\n" % time.time()
msg += "Timeout: %s\n" % self.softTimeOut
msg += "Killing Job...\n"
msg += "Process ID is: %s\n" % stepPID
if time.time() - self.startTime < self.hardTimeOut or not self.killFlag:
msg += "WARNING: Soft Kill Timeout has Expired:"
logging.error(msg)
os.kill(stepPID, signal.SIGUSR2)
self.killFlag = True
elif self.killFlag:
msg += "WARNING: Hard Kill Timeout has Expired:"
logging.error(msg)
os.kill(stepPID, signal.SIGTERM)
killedpid, stat = os.waitpid(stepPID, os.WNOHANG)
if killedpid == 0:
os.kill(stepPID, signal.SIGKill)
killedpid, stat = os.waitpid(stepPID, os.WNOHANG)
if killedpid == 0:
logging.error("Can't kill job. Out of options. Waiting for system reboot.")
# Panic! It's unkillable!
pass
# logging.error(msg) |
298,372 | send request | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, TYPE_CHECKING
from azure.core.rest import HttpRequest, HttpResponse
from azure.mgmt.core import ARMPipelineClient
from . import models as _models
from .._serialization import Deserializer, Serializer
from ._configuration import AuthorizationManagementClientConfiguration
from .operations import RoleAssignmentsOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class AuthorizationManagementClient: # pylint: disable=client-accepts-api-version-keyword
"""Role based access control provides you a way to apply granular level policy administration down
to individual resources or resource groups. These operations enable you to manage role
assignments. A role assignment grants access to Azure Active Directory users.
:ivar role_assignments: RoleAssignmentsOperations operations
:vartype role_assignments:
azure.mgmt.authorization.v2018_09_01_preview.operations.RoleAssignmentsOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2018-09-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = AuthorizationManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client: ARMPipelineClient = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.role_assignments = RoleAssignmentsOperations(
self._client, self._config, self._serialize, self._deserialize, "2018-09-01-preview"
)
def METHOD_NAME(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self) -> None:
self._client.close()
def __enter__(self) -> "AuthorizationManagementClient":
self._client.__enter__()
return self
def __exit__(self, *exc_details: Any) -> None:
self._client.__exit__(*exc_details) |
298,373 | has paired | """Companion+ server authentication code."""
from abc import ABC, abstractmethod
import binascii
from collections import namedtuple
import hashlib
import logging
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey
from cryptography.hazmat.primitives.asymmetric.x25519 import (
X25519PrivateKey,
X25519PublicKey,
)
from srptools import SRPContext, SRPServerSession, constants
from pyatv.auth.hap_srp import hkdf_expand
from pyatv.auth.hap_tlv8 import ErrorCode, TlvValue, read_tlv, write_tlv
from pyatv.auth.server_auth import PIN_CODE, PRIVATE_KEY, SERVER_IDENTIFIER
from pyatv.protocols.companion.connection import FrameType
from pyatv.support import chacha20, log_binary, opack
_LOGGER = logging.getLogger(__name__)
ServerKeys = namedtuple("ServerKeys", "sign auth auth_pub verify verify_pub")
def generate_keys(seed):
"""Generate server encryption keys from seed."""
signing_key = Ed25519PrivateKey.from_private_bytes(seed)
verify_private = X25519PrivateKey.from_private_bytes(seed)
return ServerKeys(
sign=signing_key,
auth=signing_key.private_bytes(
encoding=serialization.Encoding.Raw,
format=serialization.PrivateFormat.Raw,
encryption_algorithm=serialization.NoEncryption(),
),
auth_pub=signing_key.public_key().public_bytes(
encoding=serialization.Encoding.Raw, format=serialization.PublicFormat.Raw
),
verify=verify_private,
verify_pub=verify_private.public_key(),
)
def new_server_session(keys, pin):
"""Create SRP server session."""
context = SRPContext(
"Pair-Setup",
str(pin),
prime=constants.PRIME_3072,
generator=constants.PRIME_3072_GEN,
hash_func=hashlib.sha512,
bits_salt=128,
)
username, verifier, salt = context.get_user_data_triplet()
context_server = SRPContext(
username,
prime=constants.PRIME_3072,
generator=constants.PRIME_3072_GEN,
hash_func=hashlib.sha512,
bits_salt=128,
)
session = SRPServerSession(
context_server, verifier, binascii.hexlify(keys.auth).decode()
)
return session, salt
class CompanionServerAuth(ABC):
"""Server-side implementation of Companion authentication."""
def __init__(self, device_name, unique_id=SERVER_IDENTIFIER, pin=PIN_CODE):
"""Initialize a new instance if CompanionServerAuth."""
self.device_name = device_name
self.unique_id = unique_id.encode()
self.input_key = None
self.output_key = None
self.keys = generate_keys(PRIVATE_KEY)
self.session, self.salt = new_server_session(self.keys, str(PIN_CODE))
def handle_auth_frame(self, frame_type, data):
"""Handle incoming auth message."""
_LOGGER.debug("Received auth frame: type=%s, data=%s", frame_type, data)
pairing_data = read_tlv(data["_pd"])
seqno = int.from_bytes(pairing_data[TlvValue.SeqNo], byteorder="little")
suffix = (
"verify"
if frame_type in [FrameType.PV_Start, FrameType.PV_Next]
else "setup"
)
getattr(self, f"_m{seqno}_{suffix}")(pairing_data)
def _m1_verify(self, pairing_data):
server_pub_key = self.keys.verify_pub.public_bytes(
encoding=serialization.Encoding.Raw, format=serialization.PublicFormat.Raw
)
client_pub_key = pairing_data[TlvValue.PublicKey]
shared_key = self.keys.verify.exchange(
X25519PublicKey.from_public_bytes(client_pub_key)
)
session_key = hkdf_expand(
"Pair-Verify-Encrypt-Salt", "Pair-Verify-Encrypt-Info", shared_key
)
info = server_pub_key + self.unique_id + client_pub_key
signature = self.keys.sign.sign(info)
tlv = write_tlv(
{TlvValue.Identifier: self.unique_id, TlvValue.Signature: signature}
)
chacha = chacha20.Chacha20Cipher(session_key, session_key)
encrypted = chacha.encrypt(tlv, nonce="PV-Msg02".encode())
tlv = write_tlv(
{
TlvValue.SeqNo: b"\x02",
TlvValue.PublicKey: server_pub_key,
TlvValue.EncryptedData: encrypted,
}
)
self.output_key = hkdf_expand("", "ServerEncrypt-main", shared_key)
self.input_key = hkdf_expand("", "ClientEncrypt-main", shared_key)
log_binary(_LOGGER, "Keys", Output=self.output_key, Input=self.input_key)
self.send_to_client(FrameType.PV_Next, {"_pd": tlv})
def _m3_verify(self, pairing_data):
self.send_to_client(
FrameType.PV_Next, {"_pd": write_tlv({TlvValue.SeqNo: b"\x04"})}
)
self.enable_encryption(self.output_key, self.input_key)
def _m1_setup(self, pairing_data):
tlv = write_tlv(
{
TlvValue.SeqNo: b"\x02",
TlvValue.Salt: binascii.unhexlify(self.salt),
TlvValue.PublicKey: binascii.unhexlify(self.session.public),
27: b"\x01",
}
)
self.send_to_client(FrameType.PS_Next, {"_pd": tlv, "_pwTy": 1})
def _m3_setup(self, pairing_data):
pubkey = binascii.hexlify(pairing_data[TlvValue.PublicKey]).decode()
self.session.process(pubkey, self.salt)
if self.session.verify_proof(binascii.hexlify(pairing_data[TlvValue.Proof])):
proof = binascii.unhexlify(self.session.key_proof_hash)
tlv = {TlvValue.Proof: proof, TlvValue.SeqNo: b"\x04"}
else:
tlv = {
TlvValue.Error: bytes([ErrorCode.Authentication]),
TlvValue.SeqNo: b"\x04",
}
self.send_to_client(FrameType.PS_Next, {"_pd": write_tlv(tlv)})
def _m5_setup(self, pairing_data):
session_key = hkdf_expand(
"Pair-Setup-Encrypt-Salt",
"Pair-Setup-Encrypt-Info",
binascii.unhexlify(self.session.key),
)
acc_device_x = hkdf_expand(
"Pair-Setup-Accessory-Sign-Salt",
"Pair-Setup-Accessory-Sign-Info",
binascii.unhexlify(self.session.key),
)
chacha = chacha20.Chacha20Cipher(session_key, session_key)
decrypted_tlv_bytes = chacha.decrypt(
pairing_data[TlvValue.EncryptedData], nonce="PS-Msg05".encode()
)
_LOGGER.debug("MSG5 EncryptedData=%s", read_tlv(decrypted_tlv_bytes))
other = {
"altIRK": b"-\x54\xe0\x7a\x88*en\x11\xab\x82v-'%\xc5",
"accountID": "DC6A7CB6-CA1A-4BF4-880D-A61B717814DB",
"model": "AppleTV6,2",
"wifiMAC": b"@\xff\xa1\x8f\xa1\xb9",
"name": "Living Room",
"mac": b"@\xc4\xff\x8f\xb1\x99",
}
device_info = acc_device_x + self.unique_id + self.keys.auth_pub
signature = self.keys.sign.sign(device_info)
tlv = {
TlvValue.Identifier: self.unique_id,
TlvValue.PublicKey: self.keys.auth_pub,
TlvValue.Signature: signature,
17: opack.pack(other),
}
tlv = write_tlv(tlv)
chacha = chacha20.Chacha20Cipher(session_key, session_key)
encrypted = chacha.encrypt(tlv, nonce="PS-Msg06".encode())
tlv = write_tlv({TlvValue.SeqNo: b"\x06", TlvValue.EncryptedData: encrypted})
self.send_to_client(FrameType.PS_Next, {"_pd": tlv})
self.METHOD_NAME()
@abstractmethod
def send_to_client(self, frame_type: FrameType, data: object) -> None:
"""Send data to client device (iOS)."""
@abstractmethod
def enable_encryption(self, output_key: bytes, input_key: bytes) -> None:
"""Enable encryption with the specified keys."""
@staticmethod
def METHOD_NAME():
"""Call when a client has paired.""" |
298,374 | get nncf network | # Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Type
import numpy as np
import pytest
import torch
from torch import nn
from nncf import Dataset
from nncf.common.graph.transformations.commands import TargetType
from nncf.experimental.common.tensor_statistics.collectors import TensorReducerBase
from nncf.quantization.algorithms.fast_bias_correction.torch_backend import PTFastBiasCorrectionAlgoBackend
from nncf.quantization.algorithms.min_max.torch_backend import PTMinMaxAlgoBackend
from nncf.torch.graph.graph import PTTargetPoint
from nncf.torch.statistics.aggregator import PTStatisticsAggregator
from tests.common.test_statistics_aggregator import TemplateTestStatisticsAggregator
from tests.torch.ptq.helpers import METHOD_NAME
from tests.torch.ptq.test_ptq_params import ToNNCFNetworkInterface
IDENTITY_NODE_NAME = "PTIdentityConvModel/__add___0"
CONV_NODE_NAME = "PTIdentityConvModel/NNCFConv2d[conv]/conv2d_0"
INPUT_SHAPE = [1, 3, 3, 3]
class PTIdentityConvModel(nn.Module, ToNNCFNetworkInterface):
def __init__(self, kernel):
super().__init__()
self.conv = nn.Conv2d(3, 3, 3)
self.conv.weight.data = torch.tensor(kernel, dtype=torch.float32)
def forward(self, x):
return self.conv(x + 0.0)
def METHOD_NAME(self):
return METHOD_NAME(self, INPUT_SHAPE)
class TestStatisticsAggregator(TemplateTestStatisticsAggregator):
def get_min_max_algo_backend_cls(self) -> Type[PTMinMaxAlgoBackend]:
return PTMinMaxAlgoBackend
def get_bias_correction_algo_backend_cls(self) -> None:
pytest.skip("PTBiasCorrectionAlgoBackend is not implemented")
def get_fast_bias_correction_algo_backend_cls(self) -> Type[PTFastBiasCorrectionAlgoBackend]:
return PTFastBiasCorrectionAlgoBackend
def get_backend_model(self, dataset_samples):
sample = dataset_samples[0].reshape(INPUT_SHAPE[1:])
conv_w = self.dataset_samples_to_conv_w(np.array(sample))
return PTIdentityConvModel(conv_w).METHOD_NAME()
@pytest.fixture
def is_backend_support_custom_estimators(self) -> bool:
return False
@pytest.fixture(scope="session")
def test_params(self):
return
def get_statistics_aggregator(self, dataset):
return PTStatisticsAggregator(dataset)
def get_dataset(self, samples):
def transform_fn(data_item):
return data_item
return Dataset(samples, transform_fn)
def get_target_point(self, target_type: TargetType):
target_node_name = IDENTITY_NODE_NAME
port_id = 0
if target_type == TargetType.OPERATION_WITH_WEIGHTS:
target_node_name = CONV_NODE_NAME
port_id = None
return PTMinMaxAlgoBackend.target_point(target_type, target_node_name, port_id)
def get_target_point_cls(self):
return PTTargetPoint
def reducers_map(self) -> List[TensorReducerBase]:
return None
@pytest.fixture
def dataset_samples(self, dataset_values):
input_shape = INPUT_SHAPE
dataset_samples = [np.zeros(input_shape), np.ones(input_shape)]
for i, value in enumerate(dataset_values):
dataset_samples[0][0, i, 0, 0] = value["max"]
dataset_samples[0][0, i, 0, 1] = value["min"]
return torch.tensor(dataset_samples, dtype=torch.float32)
@pytest.fixture
def is_stat_in_shape_of_scale(self) -> bool:
return True
@pytest.fixture(params=[False], ids=["out_of_palce"])
def inplace_statistics(self, request) -> bool:
return request.param
@pytest.mark.skip("Merging is not implemented yet")
def test_statistics_merging_simple(self, dataset_samples, inplace_statistics, statistic_point_params):
pass
@pytest.mark.skip("Merging is not implemented yet")
def test_statistic_merging(self, dataset_samples, inplace_statistics):
pass
@pytest.mark.skip("Merging is not implemented yet")
def test_same_collectors_different_attrs_dont_merge(self, statistics_type, test_params, dataset_samples):
pass |
298,375 | install kernel spec | #########################################################
### Install Jupyter kernel spec and clean stale files
#########################################################
import os
import time
# Import setuptools before importing distutils, so that setuptools
# can replace distutils by its own vendored copy.
import setuptools
from distutils import log
from distutils.command.install import install
from setuptools.command.develop import develop
class install_kernel_spec_mixin:
def METHOD_NAME(self):
"""
Install the Jupyter kernel spec.
.. NOTE::
The files are generated, not copied. Therefore, we cannot
use ``data_files`` for this.
"""
from sage.repl.ipython_kernel.install import SageKernelSpec
# Jupyter packages typically use the data_files option to
# setup() to install kernels and nbextensions. So we should use
# the install_data directory for installing our Jupyter files.
SageKernelSpec.update(prefix=self.install_data)
class sage_install(install, install_kernel_spec_mixin):
def run(self):
install.run(self)
self.METHOD_NAME()
class sage_develop(develop, install_kernel_spec_mixin):
def run(self):
develop.run(self)
if not self.uninstall:
self.METHOD_NAME()
class sage_clean(install):
all_distributions = None
def run(self):
t = time.time()
self.clean_stale_files()
log.info('Finished cleaning, time: %.2f seconds.' % (time.time() - t))
def clean_stale_files(self):
"""
Remove stale installed files.
This removes files which are built/installed but which do not
exist in the Sage sources (typically because some source file
has been deleted). Files are removed from the build directory
``build/lib-*`` and from the install directory ``site-packages``.
"""
dist = self.distribution
cmd_build_py = self.get_finalized_command("build_py")
cmd_build_cython = self.get_finalized_command("build_cython")
# Determine all Python modules inside all packages
py_modules = []
ordinary_packages = []
for package in dist.packages:
package_dir = cmd_build_py.get_package_dir(package)
if os.path.exists(os.path.join(package_dir, '__init__.py')):
ordinary_packages.append(package)
py_modules.extend(cmd_build_py.find_package_modules(package, package_dir))
# modules is a list of triples (package, module, module_file).
# Construct the complete module name from this.
py_modules = ["{0}.{1}".format(*m) for m in py_modules]
if dist.py_modules:
py_modules.extend(dist.py_modules)
# Determine all files of package data and Cythonized package files
# example of entries of cmd_build_cython.get_cythonized_package_files():
# ('sage/media', ['./sage/media/channels.pyx'])
data_files = cmd_build_cython.get_cythonized_package_files()
# examples of entries of build_py.data_files:
# ('sage.libs.gap', 'sage/libs/gap', 'build/lib.macosx-10.9-x86_64-3.7/sage/libs/gap', ['sage.gaprc'])
# ('sage', 'sage', 'build/lib.macosx-10.9-x86_64-3.7/sage', ['ext_data/nodoctest.py', 'ext_data/kenzo/S4.txt', ...])
nobase_data_files = [(src_dir, [os.path.join(src_dir, filename) for filename in filenames])
for package, src_dir, build_dir, filenames in cmd_build_py.data_files]
# Clean install directory (usually, purelib and platlib are the same)
# and build directory.
output_dirs = [self.install_purelib, self.install_platlib, self.build_lib]
from sage_setup.clean import clean_install_dir
for output_dir in set(output_dirs):
log.info('- cleaning {0}'.format(output_dir))
clean_install_dir(output_dir,
ordinary_packages,
py_modules,
dist.ext_modules,
data_files,
nobase_data_files,
distributions=self.all_distributions)
class sage_install_and_clean(sage_install, sage_clean):
def run(self):
sage_install.run(self)
sage_clean.run(self) |
298,376 | ui draw filter unregister | # SPDX-FileCopyrightText: 2017-2023 Blender Authors
#
# SPDX-License-Identifier: GPL-2.0-or-later
"""
Module to manage overriding various parts of Blender.
Intended for use with 'app_templates', though it can be used from anywhere.
"""
# TODO, how to check these aren't from add-ons.
# templates might need to un-register while filtering.
def class_filter(cls_parent, **kw):
whitelist = kw.pop("whitelist", None)
blacklist = kw.pop("blacklist", None)
kw_items = tuple(kw.items())
for cls in cls_parent.__subclasses__():
# same as is_registered()
if "bl_rna" in cls.__dict__:
if blacklist is not None and cls.__name__ in blacklist:
continue
if ((whitelist is not None and cls.__name__ is whitelist) or
all((getattr(cls, attr) in expect) for attr, expect in kw_items)):
yield cls
def ui_draw_filter_register(
*,
ui_ignore_classes=None,
ui_ignore_operator=None,
ui_ignore_property=None,
ui_ignore_menu=None,
ui_ignore_label=None
):
import bpy
UILayout = bpy.types.UILayout
if ui_ignore_classes is None:
ui_ignore_classes = (
bpy.types.Panel,
bpy.types.Menu,
bpy.types.Header,
)
class OperatorProperties_Fake:
pass
class UILayout_Fake(bpy.types.UILayout):
__slots__ = ()
def __getattribute__(self, attr):
# ensure we always pass down UILayout_Fake instances
if attr in {"row", "split", "column", "box", "column_flow"}:
real_func = UILayout.__getattribute__(self, attr)
def dummy_func(*args, **kw):
# print("wrapped", attr)
ret = real_func(*args, **kw)
return UILayout_Fake(ret)
return dummy_func
elif attr in {"operator", "operator_menu_enum", "operator_enum", "operator_menu_hold"}:
if ui_ignore_operator is None:
return UILayout.__getattribute__(self, attr)
real_func = UILayout.__getattribute__(self, attr)
def dummy_func(*args, **kw):
# print("wrapped", attr)
ui_test = ui_ignore_operator(args[0])
if ui_test is False:
ret = real_func(*args, **kw)
else:
if ui_test is None:
UILayout.__getattribute__(self, "label")(text="")
else:
assert ui_test is True
# may need to be set
ret = OperatorProperties_Fake()
return ret
return dummy_func
elif attr in {"prop", "prop_enum"}:
if ui_ignore_property is None:
return UILayout.__getattribute__(self, attr)
real_func = UILayout.__getattribute__(self, attr)
def dummy_func(*args, **kw):
# print("wrapped", attr)
ui_test = ui_ignore_property(args[0].__class__.__name__, args[1])
if ui_test is False:
ret = real_func(*args, **kw)
else:
if ui_test is None:
UILayout.__getattribute__(self, "label")(text="")
else:
assert ui_test is True
ret = None
return ret
return dummy_func
elif attr == "menu":
if ui_ignore_menu is None:
return UILayout.__getattribute__(self, attr)
real_func = UILayout.__getattribute__(self, attr)
def dummy_func(*args, **kw):
# print("wrapped", attr)
ui_test = ui_ignore_menu(args[0])
if ui_test is False:
ret = real_func(*args, **kw)
else:
if ui_test is None:
UILayout.__getattribute__(self, "label")(text="")
else:
assert ui_test is True
ret = None
return ret
return dummy_func
elif attr == "label":
if ui_ignore_label is None:
return UILayout.__getattribute__(self, attr)
real_func = UILayout.__getattribute__(self, attr)
def dummy_func(*args, **kw):
# print("wrapped", attr)
ui_test = ui_ignore_label(args[0] if args else kw.get("text", ""))
if ui_test is False:
ret = real_func(*args, **kw)
else:
if ui_test is None:
real_func(text="")
else:
assert ui_test is True
ret = None
return ret
return dummy_func
else:
return UILayout.__getattribute__(self, attr)
# print(self, attr)
def operator(*args, **kw):
return super().operator(*args, **kw)
def draw_override(func_orig, self_real, context):
cls_real = self_real.__class__
if cls_real is super:
# simple, no wrapping
return func_orig(self_real, context)
class Wrapper(cls_real):
__slots__ = ()
def __getattribute__(self, attr):
if attr == "layout":
return UILayout_Fake(self_real.layout)
else:
cls = super()
try:
return cls.__getattr__(self, attr)
except AttributeError:
# class variable
try:
return getattr(cls, attr)
except AttributeError:
# for preset bl_idname access
return getattr(UILayout(self), attr)
@property
def layout(self):
# print("wrapped")
return self_real.layout
return func_orig(Wrapper(self_real), context)
ui_ignore_store = []
for cls in ui_ignore_classes:
for subcls in list(cls.__subclasses__()):
if "draw" in subcls.__dict__: # don't want to get parents draw()
def replace_draw():
# function also serves to hold draw_old in a local name-space
draw_orig = subcls.draw
def draw(self, context):
return draw_override(draw_orig, self, context)
subcls.draw = draw
ui_ignore_store.append((subcls, "draw", subcls.draw))
replace_draw()
return ui_ignore_store
def METHOD_NAME(ui_ignore_store):
for (obj, attr, value) in ui_ignore_store:
setattr(obj, attr, value) |
298,377 | test read encoding argument | # encoding: utf-8
"""
Unit tests of loader.py.
"""
import os
import sys
import unittest
from pystache.tests.common import AssertStringMixin, DATA_DIR, SetupDefaults
from pystache import defaults
from pystache.loader import Loader
# We use the same directory as the locator tests for now.
LOADER_DATA_DIR = os.path.join(DATA_DIR, 'locator')
class LoaderTests(unittest.TestCase, AssertStringMixin, SetupDefaults):
def setUp(self):
self.setup_defaults()
def tearDown(self):
self.teardown_defaults()
def test_init__extension(self):
loader = Loader(extension='foo')
self.assertEqual(loader.extension, 'foo')
def test_init__extension__default(self):
# Test the default value.
loader = Loader()
self.assertEqual(loader.extension, 'mustache')
def test_init__file_encoding(self):
loader = Loader(file_encoding='bar')
self.assertEqual(loader.file_encoding, 'bar')
def test_init__file_encoding__default(self):
file_encoding = defaults.FILE_ENCODING
try:
defaults.FILE_ENCODING = 'foo'
loader = Loader()
self.assertEqual(loader.file_encoding, 'foo')
finally:
defaults.FILE_ENCODING = file_encoding
def test_init__to_unicode(self):
to_unicode = lambda x: x
loader = Loader(to_unicode=to_unicode)
self.assertEqual(loader.to_unicode, to_unicode)
def test_init__to_unicode__default(self):
loader = Loader()
self.assertRaises(TypeError, loader.to_unicode, u"abc")
decode_errors = defaults.DECODE_ERRORS
string_encoding = defaults.STRING_ENCODING
nonascii = u'abcdé'.encode('utf-8')
loader = Loader()
self.assertRaises(UnicodeDecodeError, loader.to_unicode, nonascii)
defaults.DECODE_ERRORS = 'ignore'
loader = Loader()
self.assertString(loader.to_unicode(nonascii), u'abcd')
defaults.STRING_ENCODING = 'utf-8'
loader = Loader()
self.assertString(loader.to_unicode(nonascii), u'abcdé')
def _get_path(self, filename):
return os.path.join(DATA_DIR, filename)
def test_unicode__basic__input_str(self):
"""
Test unicode(): default arguments with str input.
"""
loader = Loader()
actual = loader.unicode("foo")
self.assertString(actual, u"foo")
def test_unicode__basic__input_unicode(self):
"""
Test unicode(): default arguments with unicode input.
"""
loader = Loader()
actual = loader.unicode(u"foo")
self.assertString(actual, u"foo")
def test_unicode__basic__input_unicode_subclass(self):
"""
Test unicode(): default arguments with unicode-subclass input.
"""
class UnicodeSubclass(unicode):
pass
s = UnicodeSubclass(u"foo")
loader = Loader()
actual = loader.unicode(s)
self.assertString(actual, u"foo")
def test_unicode__to_unicode__attribute(self):
"""
Test unicode(): encoding attribute.
"""
loader = Loader()
non_ascii = u'abcdé'.encode('utf-8')
self.assertRaises(UnicodeDecodeError, loader.unicode, non_ascii)
def to_unicode(s, encoding=None):
if encoding is None:
encoding = 'utf-8'
return unicode(s, encoding)
loader.to_unicode = to_unicode
self.assertString(loader.unicode(non_ascii), u"abcdé")
def test_unicode__encoding_argument(self):
"""
Test unicode(): encoding argument.
"""
loader = Loader()
non_ascii = u'abcdé'.encode('utf-8')
self.assertRaises(UnicodeDecodeError, loader.unicode, non_ascii)
actual = loader.unicode(non_ascii, encoding='utf-8')
self.assertString(actual, u'abcdé')
# TODO: check the read() unit tests.
def test_read(self):
"""
Test read().
"""
loader = Loader()
path = self._get_path('ascii.mustache')
actual = loader.read(path)
self.assertString(actual, u'ascii: abc')
def test_read__file_encoding__attribute(self):
"""
Test read(): file_encoding attribute respected.
"""
loader = Loader()
path = self._get_path('non_ascii.mustache')
self.assertRaises(UnicodeDecodeError, loader.read, path)
loader.file_encoding = 'utf-8'
actual = loader.read(path)
self.assertString(actual, u'non-ascii: é')
def METHOD_NAME(self):
"""
Test read(): encoding argument respected.
"""
loader = Loader()
path = self._get_path('non_ascii.mustache')
self.assertRaises(UnicodeDecodeError, loader.read, path)
actual = loader.read(path, encoding='utf-8')
self.assertString(actual, u'non-ascii: é')
def test_read__to_unicode__attribute(self):
"""
Test read(): to_unicode attribute respected.
"""
loader = Loader()
path = self._get_path('non_ascii.mustache')
self.assertRaises(UnicodeDecodeError, loader.read, path)
#loader.decode_errors = 'ignore'
#actual = loader.read(path)
#self.assertString(actual, u'non-ascii: ')
def test_load_file(self):
loader = Loader(search_dirs=[DATA_DIR, LOADER_DATA_DIR])
template = loader.load_file('template.txt')
self.assertEqual(template, 'Test template file\n')
def test_load_name(self):
loader = Loader(search_dirs=[DATA_DIR, LOADER_DATA_DIR],
extension='txt')
template = loader.load_name('template')
self.assertEqual(template, 'Test template file\n')
|
298,378 | all rl | """Generic Rules for Diofant.
This file assumes knowledge of Basic and little else.
"""
import functools
from ..utilities.iterables import sift
from .basic import Atom, Basic
__all__ = ('arguments', 'operator', 'term', 'rm_id',
'glom', 'flatten', 'unpack', 'sort')
@functools.singledispatch
def arguments(o):
"""Extract arguments from an expression."""
return o.args
@arguments.register(int)
@arguments.register(Atom)
def arguments_atomic(o):
return ()
@functools.singledispatch
def operator(o):
"""Extract the head of an expression."""
return o.func
@operator.register(int)
@operator.register(Atom)
def operator_atomic(o):
return o
@functools.singledispatch
def term(op, args):
"""Build an expression from the head and arguments."""
return op(*args)
@term.register(int)
@term.register(Atom)
def term_atomic(op, args):
return op
# Functions that create rules
def rm_id(isid):
"""Create a rule to remove identities
isid - fn :: x -> Bool --- whether or not this element is an identity
>>> remove_zeros = rm_id(lambda x: x == 0)
>>> remove_zeros(Basic(1, 0, 2))
Basic(1, 2)
>>> remove_zeros(Basic(0, 0)) # If only identites then we keep one
Basic(0)
See Also
========
unpack
"""
def ident_remove(expr):
"""Remove identities."""
ids = list(map(isid, arguments(expr)))
if sum(ids) == 0: # No identities. Common case
return expr
if sum(ids) != len(ids): # there is at least one non-identity
return term(operator(expr),
[arg for arg, x in zip(arguments(expr), ids) if not x])
return term(operator(expr), [arguments(expr)[0]])
return ident_remove
def glom(key, count, combine):
"""Create a rule to conglomerate identical args.
>>> def key(x):
... return x.as_coeff_Mul()[1]
>>> def count(x):
... return x.as_coeff_Mul()[0]
>>> def combine(cnt, arg):
... return cnt * arg
>>> rl = glom(key, count, combine)
>>> rl(Add(x, -x, 3*x, 2, 3, evaluate=False))
3*x + 5
Wait, how are key, count and combine supposed to work?
>>> key(2*x)
x
>>> count(2*x)
2
>>> combine(2, x)
2*x
"""
def conglomerate(expr):
"""Conglomerate together identical args x + x -> 2x."""
groups = sift(arguments(expr), key)
counts = {k: sum(map(count, args)) for k, args in groups.items()}
newargs = [combine(cnt, mat) for mat, cnt in counts.items()]
if set(newargs) != set(arguments(expr)):
return term(operator(expr), newargs)
return expr
return conglomerate
def sort(key):
"""Create a rule to sort by a key function.
>>> sort_rl = sort(str)
>>> sort_rl(Basic(3, 1, 2))
Basic(1, 2, 3)
"""
def sort_rl(expr):
return term(operator(expr), sorted(arguments(expr), key=key))
return sort_rl
# Functions that are rules
def unpack(expr):
"""Rule to unpack singleton args.
>>> unpack(Basic(2))
2
"""
if len(arguments(expr)) == 1:
return arguments(expr)[0]
return expr
def flatten(expr):
"""Flatten T(a, b, T(c, d), T2(e)) to T(a, b, c, d, T2(e))."""
cls = operator(expr)
args = []
for arg in arguments(expr):
if operator(arg) == cls:
args.extend(arguments(arg))
else:
args.append(arg)
return term(cls, args)
def identity(x):
return x
def switch(key, ruledict):
"""Select a rule based on the result of key called on the function."""
def switch_rl(expr):
rl = ruledict.get(key(expr), identity)
return rl(expr)
return switch_rl
def typed(ruletypes):
"""Apply rules based on the expression type.
Examples
========
>>> rm_zeros = rm_id(lambda x: x == 0)
>>> rm_ones = rm_id(lambda x: x == 1)
>>> remove_idents = typed({Add: rm_zeros, Mul: rm_ones})
"""
return switch(type, ruletypes)
def treeapply(tree, join, leaf=identity):
"""Apply functions onto recursive containers (tree).
join - a dictionary mapping container types to functions
e.g. ``{list: minimize, tuple: chain}``
Keys are containers/iterables. Values are functions [a] -> a.
Examples
========
>>> tree = [(3, 2), (4, 1)]
>>> treeapply(tree, {list: max, tuple: min})
2
>>> def mul(*args):
... total = 1
... for arg in args:
... total *= arg
... return total
>>> treeapply(tree, {list: mul, tuple: lambda *args: sum(args)})
25
"""
for typ in join:
if isinstance(tree, typ):
return join[typ](*map(functools.partial(treeapply, join=join, leaf=leaf),
tree))
return leaf(tree)
def minimize(*rules, objective=identity):
"""Select result of rules that minimizes objective.
Examples
========
>>> from diofant.core.strategies import minimize
>>> rl = minimize(lambda x: x + 1, lambda x: x - 1)
>>> rl(4)
3
"""
def minrule(expr):
return min((rule(expr) for rule in rules), key=objective)
return minrule
def chain(*rules):
"""Compose a sequence of rules so that they apply to the expr sequentially."""
def chain_rl(expr):
for rule in rules:
expr = rule(expr)
return expr
return chain_rl
def greedy(tree, objective=identity, **kwargs):
"""Execute a strategic tree. Select alternatives greedily,
Examples
========
>>> tree = [lambda x: x + 1,
... (lambda x: x - 1, lambda x: 2*x)] # either inc or dec-then-double
>>> fn = greedy(tree)
>>> fn(4) # lowest value comes from the inc
5
>>> fn(1) # lowest value comes from dec then double
0
This function selects between options in a tuple. The result is chosen that
minimizes the objective function.
>>> fn = greedy(tree, objective=lambda x: -x) # maximize
>>> fn(4) # highest value comes from the dec then double
6
>>> fn(1) # highest value comes from the inc
2
"""
optimize = functools.partial(minimize, objective=objective)
return treeapply(tree, {list: optimize, tuple: chain}, **kwargs)
def do_one(rules):
"""Try each of the rules until one works. Then stop."""
def do_one_rl(expr):
for rl in rules:
result = rl(expr)
if result != expr:
return result
return expr
return do_one_rl
def condition(cond, rule):
"""Only apply rule if condition is true."""
def conditioned_rl(expr):
if cond(expr):
return rule(expr)
return expr
return conditioned_rl
def exhaust(rule):
"""Apply a rule repeatedly until it has no effect."""
def exhaustive_rl(expr):
new, old = rule(expr), expr
while new != old:
new, old = rule(new), new
return new
return exhaustive_rl
basic_fns = {'op': type,
'new': Basic.__new__,
'leaf': lambda x: not isinstance(x, Basic) or x.is_Atom,
'children': lambda x: x.args}
def sall(rule, fns=basic_fns):
"""Strategic all - apply rule to args."""
op, new, children, leaf = map(fns.get, ('op', 'new', 'children', 'leaf'))
def METHOD_NAME(expr):
if leaf(expr):
return expr
args = map(rule, children(expr))
return new(op(expr), *args)
return METHOD_NAME
def bottom_up(rule, fns=basic_fns):
"""Apply a rule down a tree running it on the bottom nodes first."""
def rec(expr):
return sall(bottom_up(rule, fns), fns)(expr)
return chain(rec, rule)
def null_safe(rule):
"""Return original expr if rule returns None."""
def null_safe_rl(expr):
result = rule(expr)
if result is None:
return expr
return result
return null_safe_rl |
298,379 | parse | import scrapy
from scrapy.http import JsonRequest
from locations.dict_parser import DictParser
from locations.geo import point_locations
from locations.hours import OpeningHours
from locations.spiders.circle_k import CircleKSpider
from locations.spiders.cvs_us import CvsUSSpider
from locations.spiders.rite_aid_us import RiteAidUSSpider
from locations.spiders.speedway_us import SpeedwayUSSpider
from locations.spiders.target_us import TargetUSSpider
from locations.spiders.walgreens import WalgreensSpider
class BMOHarrisSpider(scrapy.Spider):
name = "bmo_harris"
item_attributes = {"brand": "BMO Harris Bank", "brand_wikidata": "Q4835981"}
allowed_domains = ["branches.bmoharris.com"]
download_delay = 0.5
def start_requests(self):
for lat, lon in point_locations(["us_centroids_100mile_radius.csv", "ca_centroids_100mile_radius.csv"]):
yield JsonRequest(
url="https://branchlocator.bmoharris.com/rest/locatorsearch",
data={
"request": {
"appkey": "1C92EACC-1A19-11E7-B395-EE7D55A65BB0",
"formdata": {
"limit": "0",
"geolocs": {"geoloc": [{"latitude": lat, "longitude": lon}]},
"searchradius": "100",
},
}
},
)
def METHOD_NAME(self, response, **kwargs):
res = response.json()
if res["code"] != 1:
return
for shop in res["response"]["collection"]:
item = DictParser.METHOD_NAME(shop)
item["ref"] = shop["clientkey"]
if item["country"] == "US":
host = "https://branches.bmoharris.com"
elif item["country"] == "CA":
host = "https://branches.bmo.com"
item["brand"] = "BMO"
item["brand_wikidata"] = "Q806693"
item["street_address"] = ", ".join(filter(None, [shop.get("address1"), shop.get("address2")]))
item["website"] = "{}/{}/{}/{}/".format(
host,
item["state"].lower(),
item["city"].lower().replace(" ", "-"),
item["ref"],
)
oh = OpeningHours()
for day in [
"monday",
"tuesday",
"wednesday",
"thursday",
"friday",
"saturday",
"sunday",
]:
oh.add_range(
day.title()[:2],
shop[day + "open"],
shop[day + "close"],
time_format="%H%M",
)
item["opening_hours"] = oh.as_opening_hours()
item["extras"] = {}
if shop["grouptype"] in ["BMOHarrisATM", "BMOATM"]:
item["extras"]["amenity"] = "atm"
if item["name"] == "Walgreens":
item["located_in"] = WalgreensSpider.WALGREENS["brand"]
item["located_in_wikidata"] = WalgreensSpider.WALGREENS["brand_wikidata"]
elif item["name"] == "CVS":
item["located_in"] = CvsUSSpider.item_attributes["brand"]
item["located_in_wikidata"] = CvsUSSpider.item_attributes["brand_wikidata"]
elif item["name"] == "Circle K":
item["located_in"] = CircleKSpider.item_attributes["brand"]
item["located_in_wikidata"] = CircleKSpider.item_attributes["brand_wikidata"]
elif item["name"] == "Speedway":
item["located_in"] = SpeedwayUSSpider.item_attributes["brand"]
item["located_in_wikidata"] = SpeedwayUSSpider.item_attributes["brand_wikidata"]
elif item["name"] == "Rite Aid":
item["located_in"] = RiteAidUSSpider.item_attributes["brand"]
item["located_in_wikidata"] = RiteAidUSSpider.item_attributes["brand_wikidata"]
elif item["name"] == "Target":
item["located_in"] = TargetUSSpider.item_attributes["brand"]
item["located_in_wikidata"] = TargetUSSpider.item_attributes["brand_wikidata"]
elif shop["grouptype"] in ["BMOHarrisBranches", "BMOBranches"]:
item["extras"]["amenity"] = "bank"
else:
item["extras"]["type"] = shop["grouptype"]
yield item |
298,380 | list | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._access_review_instance_contacted_reviewers_operations import build_list_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AccessReviewInstanceContactedReviewersOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.authorization.v2021_12_01_preview.aio.AuthorizationManagementClient`'s
:attr:`access_review_instance_contacted_reviewers` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = METHOD_NAME(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version")
@distributed_trace
def METHOD_NAME(
self, schedule_definition_id: str, id: str, **kwargs: Any
) -> AsyncIterable["_models.AccessReviewContactedReviewer"]:
"""Get access review instance contacted reviewers.
:param schedule_definition_id: The id of the access review schedule definition. Required.
:type schedule_definition_id: str
:param id: The id of the access review instance. Required.
:type id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AccessReviewContactedReviewer or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.authorization.v2021_12_01_preview.models.AccessReviewContactedReviewer]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop(
"api_version", _params.pop("api-version", self._api_version or "2021-12-01-preview")
)
cls: ClsType[_models.AccessReviewContactedReviewerListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
schedule_definition_id=schedule_definition_id,
id=id,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("AccessReviewContactedReviewerListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDefinition, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
METHOD_NAME.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/accessReviewScheduleDefinitions/{scheduleDefinitionId}/instances/{id}/contactedReviewers"
} |
298,381 | sbc frame analysis sig | #!/usr/bin/env python3
import numpy as np
import wave
import struct
import sys
from sbc import *
X = np.zeros(shape=(2,80), dtype = np.int16)
implementation = "SIG"
msbc_enabled = 0
total_time_ms = 0
def fetch_samples_for_next_sbc_frame(fin, frame):
raw_data = fin.readframes(frame.nr_blocks * frame.nr_subbands)
fmt = "%ih" % (len(raw_data) / 2)
data = struct.unpack(fmt, raw_data)
if frame.nr_channels == 2:
for i in range(len(data)/2):
frame.pcm[0][i] = data[2*i]
frame.pcm[1][i] = data[2*i+1]
else:
for i in range(len(data)):
frame.pcm[0][i] = data[i]
def METHOD_NAME(frame, ch, blk, C):
global X
M = frame.nr_subbands
L = 10 * M
M2 = 2*M
L2 = 2*L
Z = np.zeros(L)
Y = np.zeros(M2)
W = np.zeros(shape=(M, M2))
S = np.zeros(M)
for i in range(L-1, M-1, -1):
X[ch][i] = X[ch][i-M]
for i in range(M-1, -1, -1):
X[ch][i] = frame.EX[M-1-i]
for i in range(L):
Z[i] = X[ch][i] * C[i]
for i in range(M2):
for k in range(5):
Y[i] += Z[i+k*M2]
for i in range(M):
for k in range(M2):
W[i][k] = np.cos((i+0.5)*(k-M/2)*np.pi/M)
S[i] += W[i][k] * Y[k]
for sb in range(M):
frame.sb_sample[blk][ch][sb] = S[sb]
def sbc_frame_analysis(frame, ch, blk, proto_table):
global total_time_ms, implementation
t1 = time_ms()
if implementation == "SIG":
METHOD_NAME(frame, ch, blk, proto_table)
else:
print ("Analysis %s not implemented" % implementation)
exit(1)
t2 = time_ms()
total_time_ms += t2-t1
def sbc_analysis(frame):
if frame.nr_subbands == 4:
C = Proto_4_40
elif frame.nr_subbands == 8:
C = Proto_8_80
else:
return -1
frame.sb_sample = np.ndarray(shape=(frame.nr_blocks, frame.nr_channels, frame.nr_subbands))
for ch in range(frame.nr_channels):
index = 0
for blk in range(frame.nr_blocks):
for sb in range(frame.nr_subbands):
frame.EX[sb] = frame.pcm[ch][index]
index+=1
sbc_frame_analysis(frame, ch, blk, C)
return 0
def sbc_encode(frame, force_channel_mode):
err = sbc_analysis(frame)
if err >= 0:
err = sbc_quantization(frame, force_channel_mode)
return err
def sbc_quantization(frame, force_channel_mode):
global msbc_enabled
calculate_channel_mode_and_scale_factors(frame, force_channel_mode)
frame.bits = sbc_bit_allocation(frame)
# Reconstruct the Audio Samples
frame.levels = np.zeros(shape=(frame.nr_channels, frame.nr_subbands), dtype = np.int32)
for ch in range(frame.nr_channels):
for sb in range(frame.nr_subbands):
frame.levels[ch][sb] = (1 << frame.bits[ch][sb]) - 1 #pow(2.0, frame.bits[ch][sb]) - 1
if msbc_enabled:
frame.syncword = 0xad
else:
frame.syncword = 0x9c
frame.crc_check = calculate_crc(frame)
for blk in range(frame.nr_blocks):
for ch in range(frame.nr_channels):
for sb in range(frame.nr_subbands):
if frame.levels[ch][sb] > 0:
SB = frame.sb_sample[blk][ch][sb]
L = frame.levels[ch][sb]
SF = frame.scalefactor[ch][sb]
frame.audio_sample[blk][ch][sb] = np.uint16(((SB * L / SF + L) - 1.0)/2.0)
else:
frame.audio_sample[blk][ch][sb] = 0
return 0
def sbc_write_frame(fout, sbc_encoder_frame):
stream = frame_to_bitstream(sbc_encoder_frame)
barray = bytearray(stream)
fout.write(barray)
if __name__ == "__main__":
usage = '''
Usage: ./sbc_encoder.py input.wav blocks subbands bitpool allocation_method[0-LOUDNESS,1-SNR] force_channel_mode[2-STEREO,3-JOINT_STEREO] [0-sbc|1-msbc]
Example: ./sbc_encoder.py fanfare.wav 16 4 31 0 0
'''
nr_blocks = 0
nr_subbands = 0
if (len(sys.argv) < 7):
print(usage)
sys.exit(1)
try:
infile = sys.argv[1]
if not infile.endswith('.wav'):
print(usage)
sys.exit(1)
msbc_enabled = int(sys.argv[7])
print("msbc_enabled %d"%msbc_enabled)
if msbc_enabled:
sbcfile = infile.replace('.wav', '-encoded.msbc')
else:
sbcfile = infile.replace('.wav', '-encoded.sbc')
nr_blocks = int(sys.argv[2])
nr_subbands = int(sys.argv[3])
bitpool = int(sys.argv[4])
allocation_method = int(sys.argv[5])
force_channel_mode = int(sys.argv[6])
print("force_channel_mode %d"%force_channel_mode)
fin = wave.open(infile, 'rb')
nr_channels = fin.getnchannels()
if msbc_enabled:
sampling_frequency = 16000
nr_channels = 1
bitpool = 26
nr_subbands = 8
allocation_method = 0
force_channel_mode = 0
else:
sampling_frequency = fin.getframerate()
nr_channels = fin.getnchannels()
nr_audio_frames = fin.getnframes()
subband_frame_count = 0
audio_frame_count = 0
nr_samples = nr_blocks * nr_subbands
fout = open(sbcfile, 'wb')
while audio_frame_count < nr_audio_frames:
if subband_frame_count % 200 == 0:
print("== Frame %d == " % (subband_frame_count))
sbc_encoder_frame = SBCFrame(nr_blocks, nr_subbands, nr_channels, bitpool, sampling_frequency, allocation_method, force_channel_mode)
if subband_frame_count == 0:
print (sbc_encoder_frame)
fetch_samples_for_next_sbc_frame(fin, sbc_encoder_frame)
sbc_encode(sbc_encoder_frame, force_channel_mode)
sbc_write_frame(fout, sbc_encoder_frame)
audio_frame_count += nr_samples
subband_frame_count += 1
fin.close()
fout.close()
print("DONE, WAV file %s encoded into SBC file %s " % (infile, sbcfile))
if subband_frame_count > 0:
print ("Average analysis time per frame: %d ms/frame" % (total_time_ms/subband_frame_count))
else:
print ("No frame found")
except IOError as e:
print(usage)
sys.exit(1)
|
298,382 | remove extension | #!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2009 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Utility functions for the File Manager Connector for Python
"""
import string, re
import os
import config as Config
# Generic manipulation functions
def METHOD_NAME(fileName):
index = fileName.rindex(".")
newFileName = fileName[0:index]
return newFileName
def getExtension(fileName):
index = fileName.rindex(".") + 1
fileExtension = fileName[index:]
return fileExtension
def removeFromStart(string, char):
return string.lstrip(char)
def removeFromEnd(string, char):
return string.rstrip(char)
# Path functions
def combinePaths( basePath, folder ):
return removeFromEnd( basePath, '/' ) + '/' + removeFromStart( folder, '/' )
def getFileName(filename):
" Purpose: helper function to extrapolate the filename "
for splitChar in ["/", "\\"]:
array = filename.split(splitChar)
if (len(array) > 1):
filename = array[-1]
return filename
def sanitizeFolderName( newFolderName ):
"Do a cleanup of the folder name to avoid possible problems"
# Remove . \ / | : ? * " < > and control characters
return re.sub( '\\.|\\\\|\\/|\\||\\:|\\?|\\*|"|<|>|[\x00-\x1f\x7f-\x9f]', '_', newFolderName )
def sanitizeFileName( newFileName ):
"Do a cleanup of the file name to avoid possible problems"
# Replace dots in the name with underscores (only one dot can be there... security issue).
if ( Config.ForceSingleExtension ): # remove dots
newFileName = re.sub ( '\\.(?![^.]*$)', '_', newFileName ) ;
newFileName = newFileName.replace('\\','/') # convert windows to unix path
newFileName = os.path.basename (newFileName) # strip directories
# Remove \ / | : ? *
return re.sub ( '\\\\|\\/|\\||\\:|\\?|\\*|"|<|>|[\x00-\x1f\x7f-\x9f]/', '_', newFileName )
def getCurrentFolder(currentFolder):
if not currentFolder:
currentFolder = '/'
# Check the current folder syntax (must begin and end with a slash).
if (currentFolder[-1] <> "/"):
currentFolder += "/"
if (currentFolder[0] <> "/"):
currentFolder = "/" + currentFolder
# Ensure the folder path has no double-slashes
while '//' in currentFolder:
currentFolder = currentFolder.replace('//','/')
# Check for invalid folder paths (..)
if '..' in currentFolder or '\\' in currentFolder:
return None
# Check for invalid folder paths (..)
if re.search( '(/\\.)|(//)|([\\\\:\\*\\?\\""\\<\\>\\|]|[\x00-\x1F]|[\x7f-\x9f])', currentFolder ):
return None
return currentFolder
def mapServerPath( environ, url):
" Emulate the asp Server.mapPath function. Given an url path return the physical directory that it corresponds to "
# This isn't correct but for the moment there's no other solution
# If this script is under a virtual directory or symlink it will detect the problem and stop
return combinePaths( getRootPath(environ), url )
def mapServerFolder(resourceTypePath, folderPath):
return combinePaths ( resourceTypePath , folderPath )
def getRootPath(environ):
"Purpose: returns the root path on the server"
# WARNING: this may not be thread safe, and doesn't work w/ VirtualServer/mod_python
# Use Config.UserFilesAbsolutePath instead
if environ.has_key('DOCUMENT_ROOT'):
return environ['DOCUMENT_ROOT']
else:
realPath = os.path.realpath( './' )
selfPath = environ['SCRIPT_FILENAME']
selfPath = selfPath [ : selfPath.rfind( '/' ) ]
selfPath = selfPath.replace( '/', os.path.sep)
position = realPath.find(selfPath)
# This can check only that this script isn't run from a virtual dir
# But it avoids the problems that arise if it isn't checked
raise realPath
if ( position < 0 or position <> len(realPath) - len(selfPath) or realPath[ : position ]==''):
raise Exception('Sorry, can\'t map "UserFilesPath" to a physical path. You must set the "UserFilesAbsolutePath" value in "editor/filemanager/connectors/py/config.py".')
return realPath[ : position ] |
298,383 | srad | # Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# The Universal Permissive License (UPL), Version 1.0
#
# Subject to the condition set forth below, permission is hereby granted to any
# person obtaining a copy of this software, associated documentation and/or
# data (collectively the "Software"), free of charge and under any and all
# copyright rights in the Software, and any and all patent rights owned or
# freely licensable by each licensor hereunder covering either (i) the
# unmodified Software as contributed to or provided by such licensor, or (ii)
# the Larger Works (as defined below), to deal in both
#
# (a) the Software, and
#
# (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if
# one is included with the Software each a "Larger Work" to which the Software
# is contributed by such licensors),
#
# without restriction, including without limitation the rights to copy, create
# derivative works of, display, perform, and distribute the Software and make,
# use, sell, offer for sale, import, export, have made, and have sold the
# Software and the Larger Work(s), and to sublicense the foregoing rights on
# either these or other terms.
#
# This license is subject to the following condition:
#
# The above copyright notice and either this complete permission notice or at a
# minimum a reference to the UPL must be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import random
import math
def srad_1(q0sqr, cols, rows, J, dN, dS, dE, dW, c):
for i in range(rows):
for j in range(cols):
k = i * cols + j # current index
Jc = J[k]
# directional derivates
n = i - 1 if i > 0 else 0
s = i + 1 if i < (rows - 1) else (rows - 1)
w = j - 1 if j > 0 else 0
e = j + 1 if j < (cols - 1) else (cols - 1)
dn = J[n * cols + j] - Jc
ds = J[s * cols + j] - Jc
dw = J[i * cols + w] - Jc
de = J[i * cols + e] - Jc
dN[k] = dn
dS[k] = ds
dE[k] = de
dW[k] = dw
G2 = (dn*dn + ds*ds + dw*dw + de*de) / (Jc*Jc)
L = (dn + ds + dw + de) / Jc
num = (0.5*G2) - ((1.0/16.0)*(L*L))
den = 1 + (.25*L)
qsqr = num/(den*den)
# diffusion coefficent (equ 33)
den = (qsqr-q0sqr) / (q0sqr * (1+q0sqr))
v = 1.0 / (1.0+den)
# saturate diffusion coefficent
v = v if v > 0. else 0.
v = v if v < 1. else 1.
c[k] = v
def srad_2(cols, rows, J, dN, dS, dE, dW, c, Lambda):
for i in range(rows):
for j in range(cols):
k = i * cols + j # current index
# diffusion coefficent
s = i + 1 if i < (rows - 1) else (rows - 1)
e = j + 1 if j < (cols - 1) else (cols - 1)
cN = c[k]
cS = c[s * cols + j]
cW = c[k]
cE = c[i * cols + e]
# divergence (equ 58)
D = cN * dN[k] + cS * dS[k] + cW * dW[k] + cE * dE[k]
# image update (equ 61)
J[k] += 0.25*Lambda*D
def METHOD_NAME(nIter, size_R, cols, rows, J, dN, dS, dE, dW, c, r1, r2, c1, c2, Lambda):
for ii in range(nIter):
Sum = 0.
Sum2 = 0.
for i in range(r1, r2+1):
for j in range(c1, c2+1):
tmp = J[i * cols + j]
Sum += tmp
Sum2 += tmp*tmp
meanROI = Sum / size_R
varROI = (Sum2 / size_R) - meanROI*meanROI
q0sqr = varROI / (meanROI*meanROI)
srad_1(q0sqr, cols, rows, J, dN, dS, dE, dW, c)
srad_2(cols, rows, J, dN, dS, dE, dW, c, Lambda)
class Data:
def __init__(self):
self.dN = None
self.dS = None
self.dW = None
self.dE = None
self.J = None
self.JJ = None
self.c = None
data = Data()
default_size = 256
def measure(nIter, cols=default_size, rows=default_size, r1=0, r2=127, c1=0, c2=127, Lambda=0.5):
print("Start the SRAD main loop")
size_R = (r2 - r1 + 1) * (c2 - c1 + 1)
METHOD_NAME(nIter, size_R, cols, rows, data.J, data.dN, data.dS,
data.dE, data.dW, data.c, r1, r2, c1, c2, Lambda)
def __benchmark__(nIter=100):
measure(nIter)
def __setup__(nIter, cols=default_size, rows=default_size):
size_I = cols * rows
print("Initializing...")
data.dN = [0. for j in range(size_I)]
data.dS = [0. for j in range(size_I)]
data.dW = [0. for j in range(size_I)]
data.dE = [0. for j in range(size_I)]
print("Randomizing the input matrix")
random.seed(7)
data.J = [float(math.exp(random.random()/255)) for j in range(size_I)]
data.JJ = [data.J[j] for j in range(size_I)]
data.c = [0. for j in range(size_I)]
def __cleanup__(nIter, cols=default_size, rows=default_size):
# clean up written data
for i in range(cols * rows):
data.J[i] = data.JJ[i] |
298,384 | single shot | """
Basic Qt testing framework
==========================
"""
import unittest
import gc
from typing import Callable, Any
from AnyQt.QtWidgets import QApplication, QWidget
from AnyQt.QtCore import (
QCoreApplication, QTimer, QStandardPaths, QPoint, Qt, QMimeData, QPointF
)
from AnyQt.QtGui import (
QMouseEvent, QDragEnterEvent, QDropEvent, QDragMoveEvent, QDragLeaveEvent,
QContextMenuEvent
)
from AnyQt.QtTest import QTest
from AnyQt.QtCore import PYQT_VERSION
DEFAULT_TIMEOUT = 50
class QCoreAppTestCase(unittest.TestCase):
_AppClass = QCoreApplication
app = None # type: QCoreApplication
__appdomain = ""
__appname = ""
@classmethod
def setUpClass(cls):
super(QCoreAppTestCase, cls).setUpClass()
QStandardPaths.setTestModeEnabled(True)
app = cls._AppClass.instance()
if app is None:
app = cls._AppClass([])
cls.app = app
cls.__appname = cls.app.applicationName()
cls.__appdomain = cls.app.organizationDomain()
cls.app.setApplicationName("orangecanvas.testing")
cls.app.setOrganizationDomain("biolab.si")
def setUp(self):
super(QCoreAppTestCase, self).setUp()
def tearDown(self):
super(QCoreAppTestCase, self).tearDown()
@classmethod
def tearDownClass(cls):
gc.collect()
cls.app.setApplicationName(cls.__appname)
cls.app.setOrganizationDomain(cls.__appdomain)
cls.app.sendPostedEvents(None, 0)
# Keep app instance alive between tests with PyQt5 5.14.0 and later
if PYQT_VERSION <= 0x050e00:
cls.app = None
super(QCoreAppTestCase, cls).tearDownClass()
QStandardPaths.setTestModeEnabled(False)
@classmethod
def qWait(cls, timeout=DEFAULT_TIMEOUT):
QTest.qWait(timeout)
@classmethod
def METHOD_NAME(cls, timeout: int, slot: 'Callable[[], Any]'):
QTimer.METHOD_NAME(timeout, slot)
class QAppTestCase(QCoreAppTestCase):
_AppClass = QApplication
app = None # type: QApplication
def mouseMove(widget, buttons, modifier=Qt.NoModifier, pos=QPoint(), delay=-1):
# type: (QWidget, Qt.MouseButtons, Qt.KeyboardModifier, QPoint, int) -> None
"""
Like QTest.mouseMove, but with `buttons` and `modifier` parameters.
Parameters
----------
widget : QWidget
buttons: Qt.MouseButtons
modifier : Qt.KeyboardModifiers
pos : QPoint
delay : int
"""
if pos.isNull():
pos = widget.rect().center()
me = QMouseEvent(
QMouseEvent.MouseMove, QPointF(pos), QPointF(widget.mapToGlobal(pos)),
Qt.NoButton, buttons, modifier
)
if delay > 0:
QTest.qWait(delay)
QCoreApplication.sendEvent(widget, me)
def contextMenu(widget: QWidget, pos: QPoint, delay=-1) -> None:
"""
Simulates a contextMenuEvent on the widget.
"""
ev = QContextMenuEvent(
QContextMenuEvent.Mouse, pos, widget.mapToGlobal(pos)
)
if delay > 0:
QTest.qWait(delay)
QCoreApplication.sendEvent(widget, ev)
def dragDrop(
widget: QWidget, mime: QMimeData, pos: QPoint = QPoint(),
action=Qt.CopyAction, buttons=Qt.LeftButton, modifiers=Qt.NoModifier
) -> bool:
"""
Simulate a drag/drop interaction on the `widget`.
A `QDragEnterEvent`, `QDragMoveEvent` and `QDropEvent` are created and
dispatched to the `widget`. However if any of the `QDragEnterEvent` or
`QDragMoveEvent` are not accepted, a `QDragLeaveEvent` is dispatched
to 'reset' the widget state before this function returns `False`
Parameters
----------
widget: QWidget
The target widget.
mime: QMimeData
The mime data associated with the drag/drop.
pos: QPoint
Position of the drop
action: Qt.DropActions
Type of acceptable drop actions
buttons: Qt.MouseButtons:
Pressed mouse buttons.
modifiers: Qt.KeyboardModifiers
Pressed keyboard modifiers.
Returns
-------
state: bool
Were the events accepted.
See Also
--------
QDragEnterEvent, QDropEvent
"""
if pos.isNull():
pos = widget.rect().center()
ev = QDragEnterEvent(pos, action, mime, buttons, modifiers)
ev.setAccepted(False)
QApplication.sendEvent(widget, ev)
ev = QDragMoveEvent(pos, action, mime, buttons, modifiers)
ev.setAccepted(False)
QApplication.sendEvent(widget, ev)
if not ev.isAccepted():
QApplication.sendEvent(widget, QDragLeaveEvent())
return False
ev = QDropEvent(QPointF(pos), action, mime, buttons, modifiers)
ev.setAccepted(False)
QApplication.sendEvent(widget, ev)
return ev.isAccepted()
def dragEnterLeave(
widget: QWidget, mime: QMimeData, pos=QPoint(),
action=Qt.CopyAction, buttons=Qt.LeftButton, modifiers=Qt.NoModifier
) -> None:
"""
Simulate a drag/move/leave interaction on the `widget`.
A QDragEnterEvent, QDragMoveEvent and a QDragLeaveEvent are created
and dispatched to the widget.
"""
if pos.isNull():
pos = widget.rect().center()
ev = QDragEnterEvent(pos, action, mime, buttons, modifiers)
ev.setAccepted(False)
QApplication.sendEvent(widget, ev)
ev = QDragMoveEvent(
pos, action, mime, buttons, modifiers, QDragMoveEvent.DragMove
)
ev.setAccepted(False)
QApplication.sendEvent(widget, ev)
ev = QDragLeaveEvent()
ev.setAccepted(False)
QApplication.sendEvent(widget, ev)
return |
298,385 | test x inconsistent steps | # Copyright iris-grib contributors
#
# This file is part of iris-grib and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Unit tests for
:func:`iris_grib._load_convert.grid_definition_template_12`.
"""
# import iris_grib.tests first so that some things can be initialised
# before importing anything else.
import iris_grib.tests as tests
import numpy as np
import warnings
import iris.coord_systems
import iris.coords
import iris.exceptions
from iris_grib.tests.unit.load_convert import empty_metadata
from iris_grib._load_convert import _MDI as MDI
from iris_grib._load_convert import grid_definition_template_12
class Test(tests.IrisGribTest):
def section_3(self):
section = {
'shapeOfTheEarth': 7,
'scaleFactorOfRadiusOfSphericalEarth': MDI,
'scaledValueOfRadiusOfSphericalEarth': MDI,
'scaleFactorOfEarthMajorAxis': 3,
'scaledValueOfEarthMajorAxis': 6377563396,
'scaleFactorOfEarthMinorAxis': 3,
'scaledValueOfEarthMinorAxis': 6356256909,
'Ni': 4,
'Nj': 3,
'latitudeOfReferencePoint': 49000000,
'longitudeOfReferencePoint': -2000000,
'resolutionAndComponentFlags': 0,
'scaleFactorAtReferencePoint': 0.9996012717,
'XR': 40000000,
'YR': -10000000,
'scanningMode': 64,
'Di': 200000,
'Dj': 100000,
'X1': 29300000,
'Y1': 9200000,
'X2': 29900000,
'Y2': 9400000
}
return section
def expected(self, y_dim, x_dim, x_negative=False, y_negative=False):
# Prepare the expectation.
expected = empty_metadata()
ellipsoid = iris.coord_systems.GeogCS(6377563.396, 6356256.909)
cs = iris.coord_systems.TransverseMercator(49, -2, 400000, -100000,
0.9996012717, ellipsoid)
nx = 4
x_origin = 293000
dx = 2000
x_array = np.arange(nx) * dx + x_origin
if x_negative:
x_array = np.flip(x_array)
x = iris.coords.DimCoord(x_array,
'projection_x_coordinate', units='m',
coord_system=cs)
ny = 3
y_origin = 92000
dy = 1000
y_array = np.arange(ny) * dy + y_origin
if y_negative:
y_array = np.flip(y_array)
y = iris.coords.DimCoord(y_array,
'projection_y_coordinate', units='m',
coord_system=cs)
expected['dim_coords_and_dims'].append((y, y_dim))
expected['dim_coords_and_dims'].append((x, x_dim))
return expected
def test(self):
section = self.section_3()
metadata = empty_metadata()
grid_definition_template_12(section, metadata)
expected = self.expected(0, 1)
self.assertEqual(metadata, expected)
def test_spherical(self):
section = self.section_3()
section['shapeOfTheEarth'] = 0
metadata = empty_metadata()
grid_definition_template_12(section, metadata)
expected = self.expected(0, 1)
cs = expected['dim_coords_and_dims'][0][0].coord_system
cs.ellipsoid = iris.coord_systems.GeogCS(6367470)
self.assertEqual(metadata, expected)
def test_negative_x(self):
section = self.section_3()
section['scanningMode'] = 0b11000000
section['X1'], section['X2'] = section['X2'], section['X1']
metadata = empty_metadata()
grid_definition_template_12(section, metadata)
expected = self.expected(0, 1, x_negative=True)
self.assertEqual(metadata, expected)
def test_x_inconsistent_direction(self):
section = self.section_3()
section['scanningMode'] = 0b11000000
metadata = empty_metadata()
with warnings.catch_warnings(record=True) as warn:
grid_definition_template_12(section, metadata)
self.assertEqual(len(warn), 1)
message = "X definition inconsistent: scanningMode"
self.assertRegex(str(warn[0].message), message)
expected = self.expected(0, 1)
self.assertEqual(metadata, expected)
def METHOD_NAME(self):
section = self.section_3()
section['Ni'] += 1
metadata = empty_metadata()
expected_regex = (
"X definition inconsistent: .* incompatible with step-size")
with self.assertRaisesRegex(iris.exceptions.TranslationError,
expected_regex):
grid_definition_template_12(section, metadata)
def test_negative_y(self):
section = self.section_3()
section['scanningMode'] = 0b00000000
section['Y1'], section['Y2'] = section['Y2'], section['Y1']
metadata = empty_metadata()
grid_definition_template_12(section, metadata)
expected = self.expected(0, 1, y_negative=True)
self.assertEqual(metadata, expected)
def test_y_inconsistent_direction(self):
section = self.section_3()
section['scanningMode'] = 0b00000000
metadata = empty_metadata()
with warnings.catch_warnings(record=True) as warn:
grid_definition_template_12(section, metadata)
self.assertEqual(len(warn), 1)
message = "Y definition inconsistent: scanningMode"
self.assertRegex(str(warn[0].message), message)
expected = self.expected(0, 1)
self.assertEqual(metadata, expected)
def test_y_inconsistent_steps(self):
section = self.section_3()
section['Nj'] += 1
metadata = empty_metadata()
expected_regex = (
"Y definition inconsistent: .* incompatible with step-size")
with self.assertRaisesRegex(iris.exceptions.TranslationError,
expected_regex):
grid_definition_template_12(section, metadata)
def test_transposed(self):
section = self.section_3()
section['scanningMode'] = 0b01100000
metadata = empty_metadata()
grid_definition_template_12(section, metadata)
expected = self.expected(1, 0)
self.assertEqual(metadata, expected)
def test_di_tolerance(self):
# Even though Ni * Di doesn't exactly match X1 to X2 it should
# be close enough to allow the translation.
section = self.section_3()
section['X2'] += 1
metadata = empty_metadata()
grid_definition_template_12(section, metadata)
expected = self.expected(0, 1)
x = expected['dim_coords_and_dims'][1][0]
x.points = np.linspace(293000, 299000.01, 4)
self.assertEqual(metadata, expected)
def test_incompatible_grid_extent(self):
section = self.section_3()
section['X2'] += 100
metadata = empty_metadata()
with self.assertRaisesRegex(iris.exceptions.TranslationError,
'grid'):
grid_definition_template_12(section, metadata)
def test_scale_workaround(self):
section = self.section_3()
section['scaleFactorAtReferencePoint'] = 1065346526
metadata = empty_metadata()
grid_definition_template_12(section, metadata)
expected = self.expected(0, 1)
# A float32 can't hold exactly the same value.
cs = expected['dim_coords_and_dims'][0][0].coord_system
cs.scale_factor_at_central_meridian = 0.9996012449264526
self.assertEqual(metadata, expected)
if __name__ == '__main__':
tests.main() |
298,386 | flag handler | # Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Libunwind(AutotoolsPackage):
"""A portable and efficient C programming interface (API) to determine
the call-chain of a program."""
homepage = "https://www.nongnu.org/libunwind/"
url = "http://download.savannah.gnu.org/releases/libunwind/libunwind-1.1.tar.gz"
git = "https://github.com/libunwind/libunwind"
maintainers("mwkrentel")
version("master", branch="master")
version("1.6-stable", branch="v1.6-stable")
version("1.6.2", sha256="4a6aec666991fb45d0889c44aede8ad6eb108071c3554fcdff671f9c94794976")
version("1.5-stable", branch="v1.5-stable")
version("1.5.0", sha256="90337653d92d4a13de590781371c604f9031cdb50520366aa1e3a91e1efb1017")
version("1.4.0", sha256="df59c931bd4d7ebfd83ee481c943edf015138089b8e50abed8d9c57ba9338435")
version("1.3.1", sha256="43997a3939b6ccdf2f669b50fdb8a4d3205374728c2923ddc2354c65260214f8")
version("1.2.1", sha256="3f3ecb90e28cbe53fba7a4a27ccce7aad188d3210bb1964a923a731a27a75acb")
version(
"1.1",
sha256="9dfe0fcae2a866de9d3942c66995e4b460230446887dbdab302d41a8aee8d09a",
deprecated=True,
)
variant("docs", default=True, description="Build man page")
variant(
"libs",
default="shared,static",
values=("shared", "static"),
multi=True,
description="Build shared libs, static libs or both",
)
variant("pic", default=False, description="Compile with position independent code.")
variant("tests", default=True, description="Build tests")
variant(
"block_signals",
default=False,
description="Block signals before performing mutex operations",
)
variant(
"components",
values=any_combination_of("coredump", "ptrace", "setjump"),
description="Build specified libunwind libraries",
)
variant(
"conservative_checks",
default=False,
description="Validate all memory addresses before use",
)
variant("cxx_exceptions", default=False, description="Use libunwind to handle C++ exceptions")
variant("debug", default=False, description="Turn on debug support (slows down execution)")
variant(
"debug_frame", default=False, description='Load the ".debug_frame" section if available'
)
variant("weak_backtrace", default=True, description="Provide the weak 'backtrace' symbol")
variant("xz", default=False, description="Support xz (lzma) compressed symbol tables.")
variant(
"zlib",
default=False,
description="Support zlib compressed symbol tables " "(1.5 and later).",
)
# The libunwind releases contain the autotools generated files,
# but the git repo snapshots do not.
reconf_versions = "@master,1.5-stable,1.6-stable"
depends_on("autoconf", type="build", when=reconf_versions)
depends_on("automake", type="build", when=reconf_versions)
depends_on("libtool", type="build", when=reconf_versions)
depends_on("m4", type="build", when=reconf_versions)
depends_on("xz", type="link", when="+xz")
depends_on("zlib-api", type="link", when="+zlib")
conflicts("platform=darwin", msg="Non-GNU libunwind needs ELF libraries Darwin does not have")
provides("unwind")
def METHOD_NAME(self, name, flags):
wrapper_flags = []
if name == "cflags":
# https://github.com/libunwind/libunwind/pull/166
if (
self.spec.satisfies("@:1.4 %gcc@10:")
or self.spec.satisfies("@:1.4 %cce@11:")
or self.spec.satisfies("@:1.4 %clang@11:")
):
wrapper_flags.append("-fcommon")
if "+pic" in self.spec:
wrapper_flags.append(self.compiler.cc_pic_flag)
return (wrapper_flags, None, flags)
def configure_args(self):
spec = self.spec
args = []
args += self.enable_or_disable("documentation", variant="docs")
args += self.enable_or_disable("libs")
args += self.enable_or_disable("tests")
args += self.enable_or_disable("block-signals", variant="block_signals")
args += self.enable_or_disable("components")
args += self.enable_or_disable("conservative-checks", variant="conservative_checks")
args += self.enable_or_disable("cxx-exceptions", variant="cxx_exceptions")
args += self.enable_or_disable("debug")
args += self.enable_or_disable("debug-frame", variant="debug_frame")
args += self.enable_or_disable("minidebuginfo", variant="xz")
# building without weak backtrace symbol is possible in 1.5.x and later
if self.spec.satisfies("@1.5:"):
args += self.enable_or_disable("weak-backtrace", variant="weak_backtrace")
# zlib support is available in 1.5.x and later
if spec.satisfies("@1.5:"):
args += self.enable_or_disable("zlibdebuginfo", variant="zlib")
return args |
298,387 | offset in minutes | from typing import Any, Dict, Optional
from aws_lambda_powertools.utilities.data_classes.common import DictWrapper
class TimeZone(DictWrapper):
@property
def label(self) -> str:
"""The time range label. Either 'UTC' or 'Local'"""
return self["label"]
@property
def offset_iso(self) -> str:
"""The time range offset in the format +/-00:00"""
return self["offsetISO"]
@property
def METHOD_NAME(self) -> int:
"""The time range offset in minutes"""
return int(self["offsetInMinutes"])
class TimeRange(DictWrapper):
@property
def mode(self) -> str:
"""The time range mode, i.e. 'relative' or 'absolute'"""
return self["mode"]
@property
def start(self) -> int:
"""The start time within the time range"""
return self["start"]
@property
def end(self) -> int:
"""The end time within the time range"""
return self["end"]
@property
def relative_start(self) -> Optional[int]:
"""The relative start time within the time range"""
return self.get("relativeStart")
@property
def zoom_start(self) -> Optional[int]:
"""The start time within the zoomed time range"""
return (self.get("zoom") or {}).get("start")
@property
def zoom_end(self) -> Optional[int]:
"""The end time within the zoomed time range"""
return (self.get("zoom") or {}).get("end")
class CloudWatchWidgetContext(DictWrapper):
@property
def dashboard_name(self) -> str:
"""Get dashboard name, in which the widget is used"""
return self["dashboardName"]
@property
def widget_id(self) -> str:
"""Get widget ID"""
return self["widgetId"]
@property
def domain(self) -> str:
"""AWS domain name"""
return self["domain"]
@property
def account_id(self) -> str:
"""Get AWS Account ID"""
return self["accountId"]
@property
def locale(self) -> str:
"""Get locale language"""
return self["locale"]
@property
def timezone(self) -> TimeZone:
"""Timezone information of the dashboard"""
return TimeZone(self["timezone"])
@property
def period(self) -> int:
"""The period shown on the dashboard"""
return int(self["period"])
@property
def is_auto_period(self) -> bool:
"""Whether auto period is enabled"""
return bool(self["isAutoPeriod"])
@property
def time_range(self) -> TimeRange:
"""The widget time range"""
return TimeRange(self["timeRange"])
@property
def theme(self) -> str:
"""The dashboard theme, i.e. 'light' or 'dark'"""
return self["theme"]
@property
def link_charts(self) -> bool:
"""The widget is linked to other charts"""
return bool(self["linkCharts"])
@property
def title(self) -> str:
"""Get widget title"""
return self["title"]
@property
def params(self) -> Dict[str, Any]:
"""Get widget parameters"""
return self["params"]
@property
def forms(self) -> Dict[str, Any]:
"""Get widget form data"""
return self["forms"]["all"]
@property
def height(self) -> int:
"""Get widget height"""
return int(self["height"])
@property
def width(self) -> int:
"""Get widget width"""
return int(self["width"])
class CloudWatchDashboardCustomWidgetEvent(DictWrapper):
"""CloudWatch dashboard custom widget event
You can use a Lambda function to create a custom widget on a CloudWatch dashboard.
Documentation:
-------------
- https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/add_custom_widget_dashboard_about.html
"""
@property
def describe(self) -> bool:
"""Display widget documentation"""
return bool(self.get("describe", False))
@property
def widget_context(self) -> Optional[CloudWatchWidgetContext]:
"""The widget context"""
if self.get("widgetContext"):
return CloudWatchWidgetContext(self["widgetContext"])
return None |
298,388 | env get outdated | """
This file is the configuration file for the Sphinx documentation builder.
See the documentation: http://www.sphinx-doc.org/en/master/config
"""
import json
import os
import pathlib
import sys
import time
# Doc Path
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
project = "Determined"
html_title = "Determined AI Documentation"
copyright = time.strftime("%Y, Determined AI")
author = "hello@determined.ai"
version = pathlib.Path(__file__).parents[1].joinpath("VERSION").read_text().strip()
release = version
language = "en"
source_suffix = {".rst": "restructuredtext"}
templates_path = ["_templates"]
html_static_path = ["assets", "_static"]
html_css_files = [
"https://cdn.jsdelivr.net/npm/@docsearch/css@3",
"styles/determined.css",
]
html_js_files = [
("https://cdn.jsdelivr.net/npm/@docsearch/js@3", {"defer": "defer"}),
("scripts/docsearch.sbt.js", {"defer": "defer"}),
]
def METHOD_NAME(app, env, added, changed, removed):
return ["index"]
def setup(app):
app.connect("env-get-outdated", METHOD_NAME)
app.set_html_assets_policy("always")
exclude_patterns = [
"_build",
"Thumbs.db",
".DS_Store",
"examples",
"requirements.txt",
"site",
"README.md",
"release-notes/README.md",
]
html_baseurl = "https://docs.determined.ai" # Base URL for sitemap.
highlight_language = "none"
todo_include_todos = True
# HTML theme settings
html_show_sourcelink = False
html_show_sphinx = False
html_theme = "sphinx_book_theme"
html_favicon = "assets/images/favicon.ico"
html_last_updated_fmt = None
# See https://pradyunsg.me/furo/
# `navbar-logo.html` and `sbt-sidebar-nav.html` come from `sphinx-book-theme`
html_sidebars = {
"**": [
"navbar-logo.html",
"sidebar-version.html",
"search-field.html",
"sbt-sidebar-nav.html",
],
# to suppress sidebar on home page uncomment this line:
# "index": [],
}
pygments_style = "sphinx"
pygments_dark_style = "monokai"
html_theme_options = {
"logo": {
"image_light": "assets/images/logo-determined-ai.svg",
"image_dark": "assets/images/logo-determined-ai-white.svg",
},
"switcher": {
"json_url": "https://docs.determined.ai/latest/_static/version-switcher/versions.json",
"version_match": version,
},
"repository_url": "https://github.com/determined-ai/determined",
"use_repository_button": True,
"use_download_button": False,
"use_fullscreen_button": False,
}
html_use_index = True
html_domain_indices = True
extensions = [
"sphinx_ext_downloads",
"sphinx.ext.autodoc",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"sphinx_copybutton",
"sphinx_sitemap",
"sphinx_reredirects",
"sphinx_tabs.tabs",
"myst_parser",
]
myst_extensions = [
"colon_fence",
]
# Our custom sphinx extension uses this value to decide where to look for
# downloadable files.
dai_downloads_root = os.path.join("site", "downloads")
# sphinx.ext.autodoc configurations.
# See https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html
autosummary_generate = True
autoclass_content = "class"
autodoc_mock_imports = [
"mmcv",
"mmdet",
"transformers",
"deepspeed",
"datasets",
"analytics",
]
# sphinx-sitemap configurations.
# See https://github.com/jdillard/sphinx-sitemap
# The URLs generated by sphinx-sitemap include both the version number and the
# language by default. We don't use language in the published URL, and we also
# want to encourage the latest version of the docs to be indexed, so only
# include that variant in the sitemap.
sitemap_url_scheme = "latest/{link}"
with open(".redirects/redirects.json") as f:
redirects = json.load(f) |
298,389 | print body | #!/usr/bin/env python
# (C) Copyright IBM Corporation 2004, 2005
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Ian Romanick <idr@us.ibm.com>
import gl_XML, glX_XML, glX_proto_common, license
import sys, getopt
class glx_doc_item_factory(glX_proto_common.glx_proto_item_factory):
"""Factory to create GLX protocol documentation oriented objects derived from glItem."""
def create_item(self, name, element, context):
if name == "parameter":
return glx_doc_parameter(element, context)
else:
return glX_proto_common.glx_proto_item_factory.create_item(self, name, element, context)
class glx_doc_parameter(gl_XML.gl_parameter):
def packet_type(self, type_dict):
"""Get the type string for the packet header
GLX protocol documentation uses type names like CARD32,
FLOAT64, LISTofCARD8, and ENUM. This function converts the
type of the parameter to one of these names."""
list_of = ""
if self.is_array():
list_of = "LISTof"
t_name = self.get_base_type_string()
if not type_dict.has_key( t_name ):
type_name = "CARD8"
else:
type_name = type_dict[ t_name ]
return "%s%s" % (list_of, type_name)
def packet_size(self):
p = None
s = self.size()
if s == 0:
a_prod = "n"
b_prod = self.p_type.size
if not self.count_parameter_list and self.counter:
a_prod = self.counter
elif self.count_parameter_list and not self.counter or self.is_output:
pass
elif self.count_parameter_list and self.counter:
b_prod = self.counter
else:
raise RuntimeError("Parameter '%s' to function '%s' has size 0." % (self.name, self.context.name))
ss = "%s*%s" % (a_prod, b_prod)
return [ss, p]
else:
if s % 4 != 0:
p = "p"
return [str(s), p]
class PrintGlxProtoText(gl_XML.gl_print_base):
def __init__(self):
gl_XML.gl_print_base.__init__(self)
self.license = ""
def printHeader(self):
return
def body_size(self, f):
# At some point, refactor this function and
# glXFunction::command_payload_length.
size = 0;
size_str = ""
pad_str = ""
plus = ""
for p in f.parameterIterateGlxSend():
[s, pad] = p.packet_size()
try:
size += int(s)
except Exception,e:
size_str += "%s%s" % (plus, s)
plus = "+"
if pad != None:
pad_str = pad
return [size, size_str, pad_str]
def print_render_header(self, f):
[size, size_str, pad_str] = self.body_size(f)
size += 4;
if size_str == "":
s = "%u" % ((size + 3) & ~3)
elif pad_str != "":
s = "%u+%s+%s" % (size, size_str, pad_str)
else:
s = "%u+%s" % (size, size_str)
print ' 2 %-15s rendering command length' % (s)
print ' 2 %-4u rendering command opcode' % (f.glx_rop)
return
def print_single_header(self, f):
[size, size_str, pad_str] = self.body_size(f)
size = ((size + 3) / 4) + 2;
if f.glx_vendorpriv != 0:
size += 1
print ' 1 CARD8 opcode (X assigned)'
print ' 1 %-4u GLX opcode (%s)' % (f.opcode_real_value(), f.opcode_real_name())
if size_str == "":
s = "%u" % (size)
elif pad_str != "":
s = "%u+((%s+%s)/4)" % (size, size_str, pad_str)
else:
s = "%u+((%s)/4)" % (size, size_str)
print ' 2 %-15s request length' % (s)
if f.glx_vendorpriv != 0:
print ' 4 %-4u vendor specific opcode' % (f.opcode_value())
print ' 4 GLX_CONTEXT_TAG context tag'
return
def print_reply(self, f):
print ' =>'
print ' 1 1 reply'
print ' 1 unused'
print ' 2 CARD16 sequence number'
if f.output == None:
print ' 4 0 reply length'
elif f.reply_always_array:
print ' 4 m reply length'
else:
print ' 4 m reply length, m = (n == 1 ? 0 : n)'
output = None
for x in f.parameterIterateOutputs():
output = x
break
unused = 24
if f.return_type != 'void':
print ' 4 %-15s return value' % (f.return_type)
unused -= 4
elif output != None:
print ' 4 unused'
unused -= 4
if output != None:
print ' 4 CARD32 n'
unused -= 4
if output != None:
if not f.reply_always_array:
print ''
print ' if (n = 1) this follows:'
print ''
print ' 4 CARD32 %s' % (output.name)
print ' %-2u unused' % (unused - 4)
print ''
print ' otherwise this follows:'
print ''
print ' %-2u unused' % (unused)
[s, pad] = output.packet_size()
print ' %-8s %-15s %s' % (s, output.packet_type( self.type_map ), output.name)
if pad != None:
try:
bytes = int(s)
bytes = 4 - (bytes & 3)
print ' %-8u %-15s unused' % (bytes, "")
except Exception,e:
print ' %-8s %-15s unused, %s=pad(%s)' % (pad, "", pad, s)
else:
print ' %-2u unused' % (unused)
def print_body(self, f):
for p in f.parameterIterateGlxSend():
[s, pad] = p.packet_size()
print ' %-8s %-15s %s' % (s, p.packet_type( self.type_map ), p.name)
if pad != None:
try:
bytes = int(s)
bytes = 4 - (bytes & 3)
print ' %-8u %-15s unused' % (bytes, "")
except Exception,e:
print ' %-8s %-15s unused, %s=pad(%s)' % (pad, "", pad, s)
def METHOD_NAME(self, api):
self.type_map = {}
for t in api.typeIterate():
self.type_map[ "GL" + t.name ] = t.glx_name
# At some point this should be expanded to support pixel
# functions, but I'm not going to lose any sleep over it now.
for f in api.functionIterateByOffset():
if f.client_handcode or f.server_handcode or f.vectorequiv or len(f.get_images()):
continue
if f.glx_rop:
print ' %s' % (f.name)
self.print_render_header(f)
elif f.glx_sop or f.glx_vendorpriv:
print ' %s' % (f.name)
self.print_single_header(f)
else:
continue
self.print_body(f)
if f.needs_reply():
self.print_reply(f)
print ''
return
if __name__ == '__main__':
file_name = "gl_API.xml"
try:
(args, trail) = getopt.getopt(sys.argv[1:], "f:")
except Exception,e:
show_usage()
for (arg,val) in args:
if arg == "-f":
file_name = val
api = gl_XML.parse_GL_API( file_name, glx_doc_item_factory() )
printer = PrintGlxProtoText()
printer.Print( api ) |
298,390 | tear down | from datetime import datetime, timedelta
from unittest import TestCase
import pytest
from alfalfa_client.alfalfa_client import AlfalfaClient
from tests.integration.conftest import prepare_model
##################################################################################################
# The is a test of the simple_thermostat.fmu,
# which represents a simplified air temperature controller.
#
# The inputs oveWriMeasuredTemp_u and TsetPoint_u represent the measured air temperature,
# and the desired control setpoint respectively.
#
# The output, rea, represents the control output signal.
#
# Modelica source code of the simple_thermostat.fmu is available here: <todo: add link to source>
##################################################################################################
@pytest.mark.integration
class TestSimpleThermostat(TestCase):
def setUp(self):
self.alfalfa = AlfalfaClient(host='http://localhost')
fmu_path = prepare_model('simple_thermostat.fmu')
self.model_id = self.alfalfa.submit(fmu_path)
self.alfalfa.wait(self.model_id, "ready")
self.current_datetime = datetime(2019, 1, 1)
self.alfalfa.start(
self.model_id,
external_clock=True,
start_datetime=self.current_datetime,
end_datetime=datetime(2019, 1, 1, 0, 5),
timescale=5
)
self.alfalfa.wait(self.model_id, "running")
def test_io_with_external_clock(self):
# Simulation is running, but time should still be at 0
model_time = self.alfalfa.get_sim_time(self.model_id)
assert self.current_datetime == model_time
# If outputs are requested before the simulation is advanced,
# there will be an error.
# See issue https://github.com/NREL/alfalfa/issues/119
self.current_datetime += timedelta(minutes=1)
self.alfalfa.advance([self.model_id])
model_time = self.alfalfa.get_sim_time(self.model_id)
assert self.current_datetime == model_time
# Having not set any inputs the fmu will be at the initial state.
# The control signal output "rea" is at 0.0
outputs = self.alfalfa.get_outputs(self.model_id)
rea = outputs.get("rea")
assert rea == pytest.approx(0.0)
# Attempt to override the measured temp (ie zone temperature),
# and the setpoint, such that zone temperature is over setpoint.
self.alfalfa.set_inputs(self.model_id, {"oveWriMeasuredTemp_u": 303.15, "oveWriSetPoint_u": 294.15})
# Advance time, outputs will not be updated until advance happens.
# Should this limitation be considered a bug?
# Note that boptest advance and set input apis are combined,
# so that there is no method to set inputs without advancing
self.current_datetime += timedelta(minutes=1)
self.alfalfa.advance([self.model_id])
model_time = self.alfalfa.get_sim_time(self.model_id)
assert self.current_datetime == model_time
# When temperature is over setpoint controller returns 0.0
outputs = self.alfalfa.get_outputs(self.model_id)
rea = outputs.get("rea")
assert rea == pytest.approx(0.0)
# Now override the measured (zone) temperature such that it is below setpoint
self.alfalfa.set_inputs(self.model_id, {"oveWriMeasuredTemp_u": 283.15, "oveWriSetPoint_u": 294.15})
self.current_datetime += timedelta(minutes=1)
self.alfalfa.advance([self.model_id])
model_time = self.alfalfa.get_sim_time(self.model_id)
assert self.current_datetime == model_time
# When temperature is below setpoint controller returns 1.0
outputs = self.alfalfa.get_outputs(self.model_id)
rea = outputs.get("rea")
assert rea == pytest.approx(1.0)
# Test the control signal override
self.alfalfa.set_inputs(self.model_id, {"oveWriActuatorSignal_u": 0.0})
self.current_datetime += timedelta(minutes=1)
self.alfalfa.advance([self.model_id])
model_time = self.alfalfa.get_sim_time(self.model_id)
assert self.current_datetime == model_time
outputs = self.alfalfa.get_outputs(self.model_id)
rea = outputs.get("rea")
assert rea == pytest.approx(0.0)
def METHOD_NAME(self):
self.alfalfa.stop(self.model_id)
self.alfalfa.wait(self.model_id, "complete") |
298,391 | decrement energy requirement | """
Copyright 2018 Grid Singularity
This file is part of Grid Singularity Exchange.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from gsy_framework.utils import find_object_of_same_weekday_and_time
from pendulum.datetime import DateTime
from gsy_e.gsy_e_core.exceptions import GSyException
from gsy_e.models.strategy.state import SmartMeterState
from gsy_e.models.strategy import utils
from gsy_e.models.strategy.profile import EnergyProfile
class SmartMeterEnergyParameters:
"""Manage energy parameters for the Smart Meter Strategy class."""
def __init__(self, smart_meter_profile, smart_meter_profile_uuid):
self._energy_profile = EnergyProfile(smart_meter_profile, smart_meter_profile_uuid)
self._state = SmartMeterState()
self._simulation_start_timestamp = None
self._area = None
def activate(self, area):
"""Trigger by strategy activate event, configure the energy parameters for trading."""
self._area = area
self._energy_profile.read_or_rotate_profiles()
self._simulation_start_timestamp = area.now
def METHOD_NAME(self, energy_kWh: float, time_slot: DateTime, area_name: str):
"""Decrease the energy requirements of the asset."""
self._state.METHOD_NAME(
purchased_energy_Wh=energy_kWh * 1000,
time_slot=time_slot,
area_name=area_name)
def set_energy_forecast_for_future_markets(self, time_slots, reconfigure: bool = True):
"""Set the energy consumption/production expectations for the upcoming market slots.
Args:
reconfigure: if True, re-read and preprocess the raw profile data.
"""
self._energy_profile.read_or_rotate_profiles(reconfigure=reconfigure)
if not self._energy_profile.profile:
raise GSyException(
f"Smart Meter {self._area.name} tries to set its required energy forecast without "
"a profile.")
for slot_time in time_slots:
energy_kWh = find_object_of_same_weekday_and_time(
self._energy_profile.profile, slot_time)
# For the Smart Meter, the energy amount can be either positive (consumption) or
# negative (production).
consumed_energy = energy_kWh if energy_kWh > 0 else 0.0
# Turn energy into a positive number (required for set_available_energy method)
produced_energy = abs(energy_kWh) if energy_kWh < 0 else 0.0
if consumed_energy and produced_energy:
raise InconsistentEnergyException(
"The Smart Meter can't both produce and consume energy at the same time.")
# NOTE: set_desired_energy accepts energy in Wh (not kWh) so we multiply * 1000
self._state.set_desired_energy(consumed_energy * 1000, slot_time, overwrite=False)
self._state.set_available_energy(produced_energy, slot_time, reconfigure)
self._state.update_total_demanded_energy(slot_time)
def set_energy_measurement_kWh(self, time_slot: DateTime) -> None:
"""Set the (simulated) actual energy of the device in a market slot."""
energy_forecast_kWh = self._state.get_energy_at_market_slot(time_slot)
simulated_measured_energy_kWh = utils.compute_altered_energy(energy_forecast_kWh)
# This value can be either positive (consumption) or negative (production). This is
# different from the other devices (PV, Load) where the value is positive regardless of
# its direction (consumption or production)
self._state.set_energy_measurement_kWh(simulated_measured_energy_kWh, time_slot)
def reset(self, **kwargs):
"""Reconfigure energy parameters."""
if kwargs.get("smart_meter_profile") is not None:
self._energy_profile.input_profile = kwargs["smart_meter_profile"]
self.set_energy_forecast_for_future_markets(kwargs["time_slots"], reconfigure=True)
def serialize(self):
"""Create dict with smart meter energy parameters."""
return {
"smart_meter_profile": self._energy_profile.input_profile,
"smart_meter_profile_uuid": self._energy_profile.input_profile_uuid
}
class InconsistentEnergyException(Exception):
"""Exception raised when the energy produced/consumed by the Smart Meter doesn't make sense.""" |
298,392 | test starts query job | """Tests for bq_script_executor."""
from unittest import mock
from absl.testing import absltest
from google.auth import credentials
from google.cloud import bigquery
from jobs.workers.bigquery import bq_script_executor
def _make_credentials():
creds = mock.create_autospec(
credentials.Credentials, instance=True, spec_set=True)
return creds
class BQScriptExecutorTest(absltest.TestCase):
# TODO: If enough, consider making this less fragile by just testing to
# make sure the client "query" method was called with correct params.
@mock.patch('google.cloud.bigquery.job._AsyncJob._set_properties')
def METHOD_NAME(self, _):
worker_inst = bq_script_executor.BQScriptExecutor(
{
'job_id': 'JOBID',
'script': 'CREATE OR REPLACE TABLE t AS SELECT * FROM mytable',
'location': 'EU',
'dry_run': False,
},
pipeline_id=1,
job_id=1,
logger_project='PROJECT',
logger_credentials=_make_credentials())
# Stubs the BigQuery jobs query response.
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query#response-body
api_response = {
'kind': 'bigquery#queryResponse',
'schema': {
'fields': [
{
'name': 'model_name',
'type': 'STRING',
'mode': 'NULLABLE',
},
{
'name': 'score',
'type': 'FLOAT',
'mode': 'NULLABLE',
}
]
},
'jobReference': {
'projectId': 'PROJECT',
'jobId': 'job_U6-11mAExu4QJquW3k2c_111yPIF',
'location': 'EU',
},
'totalRows': '2',
'rows': [
{'f': [{'v': 'LTV v1'}, {'v': '20.70'}]},
{'f': [{'v': 'LTV v1'}, {'v': '10.25'}]},
],
'totalBytesProcessed': '1285317116',
'jobComplete': True,
'cacheHit': False,
}
bq_client = bigquery.Client(
project='PROJECT', credentials=_make_credentials())
self.enter_context(
mock.patch.object(
worker_inst,
'_get_client',
autospec=True,
return_value=bq_client))
self.enter_context(
mock.patch.object(
bq_client,
'_call_api',
autospec=True,
return_value=api_response))
patched_wait = self.enter_context(
mock.patch.object(
worker_inst,
'_wait',
autospec=True))
self.enter_context(
mock.patch.object(worker_inst, 'log_info', autospec=True))
worker_inst.execute()
patched_wait.assert_called_once()
call_job: bigquery.QueryJob = patched_wait.call_args.args[0]
self.assertIsInstance(call_job, bigquery.QueryJob)
with self.subTest('Ensures query is passed with standard SQL config'):
self.assertEqual(call_job.query,
'CREATE OR REPLACE TABLE t AS SELECT * FROM mytable')
self.assertFalse(call_job.use_legacy_sql,
msg='We only support the standard SQL for this worker')
with self.subTest('Storing results is delegated to the SQL query'):
self.assertIsNone(call_job.create_disposition)
self.assertIsNone(call_job.write_disposition)
self.assertIsNone(call_job.destination)
def test_dry_run_query(self):
worker_inst = bq_script_executor.BQScriptExecutor(
{
'job_id': 'JOBID',
'script': 'CREATE OR REPLACE TABLE t AS SELECT * FROM mytable',
'location': 'EU',
'dry_run': True,
},
pipeline_id=1,
job_id=1,
logger_project='PROJECT',
logger_credentials=_make_credentials())
# Stubs the BigQuery Job object.
# https://cloud.google.com/bigquery/docs/reference/rest/v2/Job
api_response = {
'kind': 'bigquery#job',
'statistics': {
'totalBytesProcessed': '16013044',
'query': {
'totalBytesProcessed': '16013044',
'totalBytesBilled': '0',
'cacheHit': False,
'totalBytesProcessedAccuracy': 'PRECISE',
'mlStatistics': {
'modelType': 'LOGISTIC_REGRESSION',
'trainingType': 'HPARAM_TUNING'
}
}
},
'status': {'state': 'DONE'},
'configuration': {
'query': {
'query': 'CREATE OR REPLACE TABLE t AS SELECT * FROM mytable',
'priority': 'INTERACTIVE',
'useQueryCache': False,
'useLegacySql': False
},
'dryRun': True,
'jobType': 'QUERY'
}
}
bq_client = bigquery.Client(
project='PROJECT', credentials=_make_credentials())
bq_query_job_config = bigquery.QueryJobConfig(
dry_run=True, use_query_cache=False)
self.enter_context(
mock.patch.object(
worker_inst,
'_get_client',
autospec=True,
return_value=bq_client))
self.enter_context(
mock.patch.object(
worker_inst,
'_get_dry_run_job_config',
autospec=True,
return_value=bq_query_job_config))
self.enter_context(
mock.patch.object(
bq_client,
'_call_api',
autospec=True,
return_value=api_response))
patched_logger = self.enter_context(
mock.patch.object(worker_inst, 'log_info', autospec=True))
worker_inst.execute()
self.assertSequenceEqual(
[
mock.call(mock.ANY),
mock.call('This query will process 16.01 MB when run.'),
mock.call('Finished successfully'),
],
patched_logger.mock_calls
)
if __name__ == '__main__':
absltest.main() |
298,393 | format | """DGL Runtime NDArray API.
dgl.ndarray provides a minimum runtime array structure to be
used with C++ library.
"""
# pylint: disable=invalid-name,unused-import
from __future__ import absolute_import as _abs
import ctypes
import functools
import operator
import numpy as _np
from . import backend as F
from ._ffi.function import _init_api
from ._ffi.ndarray import (
_set_class_ndarray,
context,
DGLContext,
DGLDataType,
empty,
empty_shared_mem,
from_dlpack,
NDArrayBase,
numpyasarray,
)
from ._ffi.object import ObjectBase, register_object
class NDArray(NDArrayBase):
"""Lightweight NDArray class for DGL framework."""
def __len__(self):
return functools.reduce(operator.mul, self.shape, 1)
def shared_memory(self, name):
"""Return a copy of the ndarray in shared memory
Parameters
----------
name : str
The name of the shared memory
Returns
-------
NDArray
"""
return empty_shared_mem(name, True, self.shape, self.dtype).copyfrom(
self
)
def cpu(dev_id=0):
"""Construct a CPU device
Parameters
----------
dev_id : int, optional
The integer device id
Returns
-------
ctx : DGLContext
The created context
"""
return DGLContext(1, dev_id)
def gpu(dev_id=0):
"""Construct a CPU device
Parameters
----------
dev_id : int, optional
The integer device id
Returns
-------
ctx : DGLContext
The created context
"""
return DGLContext(2, dev_id)
def array(arr, ctx=cpu(0)):
"""Create an array from source arr.
Parameters
----------
arr : numpy.ndarray
The array to be copied from
ctx : DGLContext, optional
The device context to create the array
Returns
-------
ret : NDArray
The created array
"""
if not isinstance(arr, (_np.ndarray, NDArray)):
arr = _np.array(arr)
return empty(arr.shape, arr.dtype, ctx).copyfrom(arr)
def zerocopy_from_numpy(np_data):
"""Create an array that shares the given numpy data.
Parameters
----------
np_data : numpy.ndarray
The numpy data
Returns
-------
NDArray
The array
"""
arr, _ = numpyasarray(np_data)
handle = ctypes.pointer(arr)
return NDArray(handle, is_view=True)
def cast_to_signed(arr):
"""Cast this NDArray from unsigned integer to signed one.
uint64 -> int64
uint32 -> int32
Useful for backends with poor signed integer support (e.g., TensorFlow).
Parameters
----------
arr : NDArray
Input array
Returns
-------
NDArray
Cased array
"""
return _CAPI_DGLArrayCastToSigned(arr)
def get_shared_mem_array(name, shape, dtype):
"""Get a tensor from shared memory with specific name
Parameters
----------
name : str
The unique name of the shared memory
shape : tuple of int
The shape of the returned tensor
dtype : F.dtype
The dtype of the returned tensor
Returns
-------
F.tensor
The tensor got from shared memory.
"""
new_arr = empty_shared_mem(
name, False, shape, F.reverse_data_type_dict[dtype]
)
dlpack = new_arr.to_dlpack()
return F.zerocopy_from_dlpack(dlpack)
def create_shared_mem_array(name, shape, dtype):
"""Create a tensor from shared memory with the specific name
Parameters
----------
name : str
The unique name of the shared memory
shape : tuple of int
The shape of the returned tensor
dtype : F.dtype
The dtype of the returned tensor
Returns
-------
F.tensor
The created tensor.
"""
new_arr = empty_shared_mem(
name, True, shape, F.reverse_data_type_dict[dtype]
)
dlpack = new_arr.to_dlpack()
return F.zerocopy_from_dlpack(dlpack)
def exist_shared_mem_array(name):
"""Check the existence of shared-memory array.
Parameters
----------
name : str
The name of the shared-memory array.
Returns
-------
bool
The existence of the array
"""
return _CAPI_DGLExistSharedMemArray(name)
class SparseFormat:
"""Format code"""
ANY = 0
COO = 1
CSR = 2
CSC = 3
FORMAT2STR = {
0: "ANY",
1: "COO",
2: "CSR",
3: "CSC",
}
@register_object("aten.SparseMatrix")
class SparseMatrix(ObjectBase):
"""Sparse matrix object class in C++ backend."""
@property
def METHOD_NAME(self):
"""Sparse format enum
Returns
-------
int
"""
return _CAPI_DGLSparseMatrixGetFormat(self)
@property
def num_rows(self):
"""Number of rows.
Returns
-------
int
"""
return _CAPI_DGLSparseMatrixGetNumRows(self)
@property
def num_cols(self):
"""Number of rows.
Returns
-------
int
"""
return _CAPI_DGLSparseMatrixGetNumCols(self)
@property
def indices(self):
"""Index arrays.
Returns
-------
list of ndarrays
"""
ret = [_CAPI_DGLSparseMatrixGetIndices(self, i) for i in range(3)]
return [F.zerocopy_from_dgl_ndarray(arr) for arr in ret]
@property
def flags(self):
"""Flag arrays
Returns
-------
list of boolean
"""
return _CAPI_DGLSparseMatrixGetFlags(self)
def __getstate__(self):
return (
self.METHOD_NAME,
self.num_rows,
self.num_cols,
self.indices,
self.flags,
)
def __setstate__(self, state):
fmt, nrows, ncols, indices, flags = state
indices = [F.zerocopy_to_dgl_ndarray(idx) for idx in indices]
self.__init_handle_by_constructor__(
_CAPI_DGLCreateSparseMatrix, fmt, nrows, ncols, indices, flags
)
def __repr__(self):
return 'SparseMatrix(fmt="{}", shape=({},{}))'.METHOD_NAME(
SparseFormat.FORMAT2STR[self.METHOD_NAME], self.num_rows, self.num_cols
)
_set_class_ndarray(NDArray)
_init_api("dgl.ndarray")
_init_api("dgl.ndarray.uvm", __name__)
# An array representing null (no value) that can be safely converted to
# other backend tensors.
NULL = {
"int64": array(_np.array([], dtype=_np.int64)),
"int32": array(_np.array([], dtype=_np.int32)),
} |
298,394 | compute upsampling | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, invalid-name, unused-argument, too-many-arguments, consider-using-in
"""Backend compiler related feature registration for dynamic relay ops in nn namespace"""
from __future__ import absolute_import
from tvm import topi
from tvm.runtime import convert
from tvm.te.hybrid import script
from ...op import register_shape_func, register_compute
from ...op import register_injective_schedule, register_broadcast_schedule
# upsampling
@register_compute("dyn.nn.upsampling")
def METHOD_NAME(attrs, inputs, out_dtype):
data = inputs[0]
scale_h = inputs[1]
scale_w = inputs[2]
layout = attrs.layout
method = attrs.method
align_corners = attrs.align_corners
return [
topi.nn.upsampling(data, scale_h, scale_w, layout, method, align_corners, out_dtype.shape)
]
# upsampling3d
@register_compute("dyn.nn.upsampling3d")
def compute_upsampling3d(attrs, inputs, out_dtype):
data = inputs[0]
scale_d = inputs[1]
scale_h = inputs[2]
scale_w = inputs[3]
layout = attrs.layout
method = attrs.method
coordinate_transformation_mode = attrs.coordinate_transformation_mode
return [
topi.nn.upsampling3d(
data,
scale_d,
scale_h,
scale_w,
layout,
method,
coordinate_transformation_mode,
out_dtype.shape,
)
]
register_injective_schedule("dyn.nn.upsampling")
register_injective_schedule("dyn.nn.upsampling3d")
register_broadcast_schedule("dyn.nn.pad")
#####################
# Shape functions #
#####################
# upsampling
@script
def _upsampling_shape_func(dshape, scale_h, scale_w, height_axis, width_axis):
out = output_tensor((4,), "int64")
for i in const_range(4):
out[i] = int64(dshape[i])
out[height_axis] = int64(round(dshape[height_axis] * scale_h[()]))
out[width_axis] = int64(round(dshape[width_axis] * scale_w[()]))
return out
@register_shape_func("dyn.nn.upsampling", True)
def upsampling_shape_func(attrs, inputs, _):
"""Shape function for upsampling. Supports NCHW and NHWC layouts."""
layout = attrs.layout
height_axis = width_axis = 1
for i, letter in enumerate(layout):
if letter == "H":
height_axis = i
if letter == "W":
width_axis = i
return [
_upsampling_shape_func(
inputs[0].shape, inputs[1], inputs[2], convert(height_axis), convert(width_axis)
)
]
# upsampling3d
@script
def _upsampling3d_shape_func(
dshape, scale_d, scale_h, scale_w, depth_axis, height_axis, width_axis
):
out = output_tensor((5,), "int64")
for i in const_range(5):
out[i] = int64(dshape[i])
out[depth_axis] = int64(round(dshape[depth_axis] * scale_d[()]))
out[height_axis] = int64(round(dshape[height_axis] * scale_h[()]))
out[width_axis] = int64(round(dshape[width_axis] * scale_w[()]))
return out
@register_shape_func("dyn.nn.upsampling3d", True)
def upsampling3d_shape_func(attrs, inputs, _):
"""Shape function for upsampling. Supports NCHW and NHWC layouts."""
layout = attrs.layout
depth_axis = height_axis = width_axis = 1
for i, letter in enumerate(layout):
if letter == "D":
depth_axis = i
if letter == "H":
height_axis = i
if letter == "W":
width_axis = i
return [
_upsampling3d_shape_func(
inputs[0].shape,
inputs[1],
inputs[2],
inputs[3],
convert(depth_axis),
convert(height_axis),
convert(width_axis),
)
]
# pad
@script
def _dyn_pad_shape_func(data, pad_width):
ndim = len(data.shape)
out = output_tensor((ndim,), "int64")
for i in const_range(ndim):
out[i] = int64(pad_width[i, 0] + pad_width[i, 1] + data.shape[i])
return out
@register_shape_func("dyn.nn.pad", True)
def pad_shape_func(attrs, inputs, data):
"""
Shape function for dynamic pad op.
"""
return [_dyn_pad_shape_func(inputs[0], inputs[1])] |
298,395 | error format | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"orbital operation-result show",
)
class Show(AAZCommand):
"""Return operation results.
"""
_aaz_info = {
"version": "2022-03-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/providers/microsoft.orbital/locations/{}/operationresults/{}", "2022-03-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, self._output)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.location = AAZResourceLocationArg(
required=True,
id_part="name",
)
_args_schema.operation_id = AAZStrArg(
options=["-n", "--name", "--operation-id"],
help="The ID of an ongoing async operation.",
required=True,
id_part="child_name_1",
fmt=AAZStrArgFormat(
min_length=1,
),
)
return cls._args_schema
def _execute_operations(self):
yield self.OperationsResultsGet(ctx=self.ctx)()
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class OperationsResultsGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/providers/Microsoft.Orbital/locations/{location}/operationResults/{operationId}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def METHOD_NAME(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"location", self.ctx.args.location,
required=True,
),
**self.serialize_url_param(
"operationId", self.ctx.args.operation_id,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-03-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.end_time = AAZStrType(
serialized_name="endTime",
flags={"read_only": True},
)
_schema_on_200.error = AAZObjectType()
_schema_on_200.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.percent_complete = AAZFloatType(
serialized_name="percentComplete",
flags={"read_only": True},
)
_schema_on_200.start_time = AAZStrType(
serialized_name="startTime",
flags={"read_only": True},
)
_schema_on_200.status = AAZStrType(
flags={"read_only": True},
)
error = cls._schema_on_200.error
error.code = AAZStrType(
flags={"read_only": True},
)
error.message = AAZStrType(
flags={"read_only": True},
)
return cls._schema_on_200
__all__ = ["Show"] |
298,396 | test compat module is added | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for create_python_api."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import imp
import sys
from tensorflow.python.platform import test
from tensorflow.python.tools.api.generator import create_python_api
from tensorflow.python.util.tf_export import tf_export
@tf_export('test_op', 'test_op1', 'test.test_op2')
def test_op():
pass
@tf_export('test1.foo', v1=['test.foo'])
def deprecated_test_op():
pass
@tf_export('TestClass', 'NewTestClass')
class TestClass(object):
pass
_TEST_CONSTANT = 5
_MODULE_NAME = 'tensorflow.python.test_module'
class CreatePythonApiTest(test.TestCase):
def setUp(self):
# Add fake op to a module that has 'tensorflow' in the name.
sys.modules[_MODULE_NAME] = imp.new_module(_MODULE_NAME)
setattr(sys.modules[_MODULE_NAME], 'test_op', test_op)
setattr(sys.modules[_MODULE_NAME], 'deprecated_test_op', deprecated_test_op)
setattr(sys.modules[_MODULE_NAME], 'TestClass', TestClass)
test_op.__module__ = _MODULE_NAME
TestClass.__module__ = _MODULE_NAME
tf_export('consts._TEST_CONSTANT').export_constant(
_MODULE_NAME, '_TEST_CONSTANT')
def tearDown(self):
del sys.modules[_MODULE_NAME]
def testFunctionImportIsAdded(self):
imports, _ = create_python_api.get_api_init_text(
packages=[create_python_api._DEFAULT_PACKAGE],
output_package='tensorflow',
api_name='tensorflow',
api_version=1)
if create_python_api._LAZY_LOADING:
expected_import = (
'\'test_op1\': '
'(\'tensorflow.python.test_module\','
' \'test_op\')')
else:
expected_import = (
'from tensorflow.python.test_module '
'import test_op as test_op1')
self.assertTrue(
expected_import in str(imports),
msg='%s not in %s' % (expected_import, str(imports)))
if create_python_api._LAZY_LOADING:
expected_import = (
'\'test_op\': '
'(\'tensorflow.python.test_module\','
' \'test_op\')')
else:
expected_import = (
'from tensorflow.python.test_module '
'import test_op')
self.assertTrue(
expected_import in str(imports),
msg='%s not in %s' % (expected_import, str(imports)))
# Also check that compat.v1 is not added to imports.
self.assertFalse('compat.v1' in imports,
msg='compat.v1 in %s' % str(imports.keys()))
def testClassImportIsAdded(self):
imports, _ = create_python_api.get_api_init_text(
packages=[create_python_api._DEFAULT_PACKAGE],
output_package='tensorflow',
api_name='tensorflow',
api_version=2)
if create_python_api._LAZY_LOADING:
expected_import = (
'\'NewTestClass\':'
' (\'tensorflow.python.test_module\','
' \'TestClass\')')
else:
expected_import = (
'from tensorflow.python.test_module '
'import TestClass')
self.assertTrue(
'TestClass' in str(imports),
msg='%s not in %s' % (expected_import, str(imports)))
def testConstantIsAdded(self):
imports, _ = create_python_api.get_api_init_text(
packages=[create_python_api._DEFAULT_PACKAGE],
output_package='tensorflow',
api_name='tensorflow',
api_version=1)
if create_python_api._LAZY_LOADING:
expected = ('\'_TEST_CONSTANT\':'
' (\'tensorflow.python.test_module\','
' \'_TEST_CONSTANT\')')
else:
expected = ('from tensorflow.python.test_module '
'import _TEST_CONSTANT')
self.assertTrue(expected in str(imports),
msg='%s not in %s' % (expected, str(imports)))
def METHOD_NAME(self):
imports, _ = create_python_api.get_api_init_text(
packages=[create_python_api._DEFAULT_PACKAGE],
output_package='tensorflow',
api_name='tensorflow',
api_version=2,
compat_api_versions=[1])
self.assertTrue('compat.v1' in imports,
msg='compat.v1 not in %s' % str(imports.keys()))
self.assertTrue('compat.v1.test' in imports,
msg='compat.v1.test not in %s' % str(imports.keys()))
if __name__ == '__main__':
test.main() |
298,397 | reject | # ***************************************************************************
# * Copyright (c) 2022 Wanderer Fan <wandererfan@gmail.com> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
"""Provides the TechDraw MoveView Task Dialog."""
__title__ = "TechDrawTools.TaskMoveView"
__author__ = "WandererFan"
__url__ = "https://www.freecad.org"
__version__ = "00.01"
__date__ = "2022/01/11"
from PySide import QtCore
import PySide.QtGui as QtGui
import FreeCAD as App
import FreeCADGui as Gui
from TechDrawTools import TDToolsMovers
import os
translate = App.Qt.translate
class TaskMoveView:
def __init__(self):
import os
self._uiPath = App.getHomePath()
self._uiPath = os.path.join(self._uiPath, "Mod/TechDraw/TechDrawTools/Gui/TaskMoveView.ui")
self.form = Gui.PySideUic.loadUi(self._uiPath)
self.form.setWindowTitle(translate("TechDraw_MoveView", "Move View to a different Page"))
self.form.pbView.clicked.connect(self.pickView)
self.form.pbFromPage.clicked.connect(self.pickFromPage)
self.form.pbToPage.clicked.connect(self.pickToPage)
self.viewName = ""
self.fromPageName = ""
self.toPageName = ""
self.dialogOpen = False
def accept(self):
# print ("Accept")
view = App.ActiveDocument.getObject(self.viewName)
fromPage = App.ActiveDocument.getObject(self.fromPageName)
toPage = App.ActiveDocument.getObject(self.toPageName)
TDToolsMovers.moveView(view, fromPage, toPage)
return True
def METHOD_NAME(self):
# print ("Reject")
return True
def pickView(self):
# print("pickView")
if (self.dialogOpen) :
return
_dlgPath = App.getHomePath()
_dlgPath = os.path.join(_dlgPath, "Mod/TechDraw/TechDrawTools/Gui/DlgPageChooser.ui")
dlg = Gui.PySideUic.loadUi(_dlgPath)
self.dialogOpen = True
dlg.lPrompt.setText(translate("TechDraw_MoveView", "Select View to move from list."))
dlg.setWindowTitle(translate("TechDraw_MoveView", "Select View"))
views = [x for x in App.ActiveDocument.Objects if x.isDerivedFrom("TechDraw::DrawView")]
for v in views:
s = v.Label + " / " + v.Name
item = QtGui.QListWidgetItem(s, dlg.lwPages)
item.setData(QtCore.Qt.UserRole, v.Name)
if (dlg.exec() == QtGui.QDialog.Accepted) :
if dlg.lwPages.selectedItems():
selItem = dlg.lwPages.selectedItems()[0]
self.viewName = selItem.data(QtCore.Qt.UserRole)
self.form.leView.setText(self.viewName)
self.dialogOpen = False
def pickFromPage(self):
# print("pickFromPage")
if (self.dialogOpen) :
return
_dlgPath = App.getHomePath()
_dlgPath = os.path.join(_dlgPath, "Mod/TechDraw/TechDrawTools/Gui/DlgPageChooser.ui")
dlg = Gui.PySideUic.loadUi(_dlgPath)
self.dialogOpen = True
dlg.lPrompt.setText(translate("TechDraw_MoveView", "Select From Page."))
dlg.setWindowTitle(translate("TechDraw_MoveView", "Select Page"))
pages = [x for x in App.ActiveDocument.Objects if x.isDerivedFrom("TechDraw::DrawPage")]
for p in pages:
s = p.Label + " / " + p.Label
item = QtGui.QListWidgetItem(s, dlg.lwPages)
item.setData(QtCore.Qt.UserRole, p.Name)
if (dlg.exec() == QtGui.QDialog.Accepted) :
if dlg.lwPages.selectedItems():
selItem = dlg.lwPages.selectedItems()[0]
self.fromPageName = selItem.data(QtCore.Qt.UserRole)
self.form.leFromPage.setText(self.fromPageName)
self.dialogOpen = False
def pickToPage(self):
# print("pickToPage")
if (self.dialogOpen) :
return
_dlgPath = App.getHomePath()
_dlgPath = os.path.join(_dlgPath, "Mod/TechDraw/TechDrawTools/Gui/DlgPageChooser.ui")
dlg = Gui.PySideUic.loadUi(_dlgPath)
self.dialogOpen = True
dlg.lPrompt.setText(translate("TechDraw_MoveView", "Select To Page."))
dlg.setWindowTitle(translate("TechDraw_MoveView", "Select Page"))
pages = [x for x in App.ActiveDocument.Objects if x.isDerivedFrom("TechDraw::DrawPage")]
for p in pages:
s = p.Label + " / " + p.Name
item = QtGui.QListWidgetItem(s, dlg.lwPages)
item.setData(QtCore.Qt.UserRole, p.Name)
if (dlg.exec() == QtGui.QDialog.Accepted) :
if dlg.lwPages.selectedItems():
selItem = dlg.lwPages.selectedItems()[0]
self.toPageName = selItem.data(QtCore.Qt.UserRole)
self.form.leToPage.setText(self.toPageName)
self.dialogOpen = False
def setValues(self, viewName, fromPageName, toPageName):
self.viewName = viewName
self.form.leView.setText(viewName)
self.fromPageName = fromPageName
self.form.leFromPage.setText(fromPageName)
self.toPageName = toPageName
self.form.leToPage.setText(toPageName)
|
298,398 | get nested | #!/usr/bin/env python3
#
# Helper script for committing data to git and pushing upstream
#
# Copyright (c) 2017, Intel Corporation.
#
# SPDX-License-Identifier: GPL-2.0-only
#
import argparse
import logging
import os
import re
import sys
# Import oe and bitbake libs
scripts_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(scripts_path, 'lib'))
import scriptpath
scriptpath.add_bitbake_lib_path()
scriptpath.add_oe_lib_path()
from oeqa.utils.git import GitRepo, GitError
from oeqa.utils.metadata import metadata_from_bb
import oeqa.utils.gitarchive as gitarchive
# Setup logging
logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
log = logging.getLogger()
def parse_args(argv):
"""Parse command line arguments"""
parser = argparse.ArgumentParser(
description="Commit data to git and push upstream",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--debug', '-D', action='store_true',
help="Verbose logging")
parser.add_argument('--git-dir', '-g', required=True,
help="Local git directory to use")
parser.add_argument('--no-create', action='store_true',
help="If GIT_DIR is not a valid Git repository, do not "
"try to create one")
parser.add_argument('--bare', action='store_true',
help="Initialize a bare repository when creating a "
"new one")
parser.add_argument('--push', '-p', nargs='?', default=False, const=True,
help="Push to remote")
parser.add_argument('--branch-name', '-b',
default='{hostname}/{branch}/{machine}',
help="Git branch name (pattern) to use")
parser.add_argument('--no-tag', action='store_true',
help="Do not create Git tag")
parser.add_argument('--tag-name', '-t',
default='{hostname}/{branch}/{machine}/{commit_count}-g{commit}/{tag_number}',
help="Tag name (pattern) to use")
parser.add_argument('--commit-msg-subject',
default='Results of {branch}:{commit} on {hostname}',
help="Subject line (pattern) to use in the commit message")
parser.add_argument('--commit-msg-body',
default='branch: {branch}\ncommit: {commit}\nhostname: {hostname}',
help="Commit message body (pattern)")
parser.add_argument('--tag-msg-subject',
default='Test run #{tag_number} of {branch}:{commit} on {hostname}',
help="Subject line (pattern) of the tag message")
parser.add_argument('--tag-msg-body',
default='',
help="Tag message body (pattern)")
parser.add_argument('--exclude', action='append', default=[],
help="Glob to exclude files from the commit. Relative "
"to DATA_DIR. May be specified multiple times")
parser.add_argument('--notes', nargs=2, action='append', default=[],
metavar=('GIT_REF', 'FILE'),
help="Add a file as a note under refs/notes/GIT_REF. "
"{branch_name} in GIT_REF will be expanded to the "
"actual target branch name (specified by "
"--branch-name). This option may be specified "
"multiple times.")
parser.add_argument('data_dir', metavar='DATA_DIR',
help="Data to commit")
return parser.parse_args(argv)
def METHOD_NAME(d, list_of_keys):
try:
for k in list_of_keys:
d = d[k]
return d
except KeyError:
return ""
def main(argv=None):
args = parse_args(argv)
if args.debug:
log.setLevel(logging.DEBUG)
try:
# Get keywords to be used in tag and branch names and messages
metadata = metadata_from_bb()
keywords = {'hostname': METHOD_NAME(metadata, ['hostname']),
'branch': METHOD_NAME(metadata, ['layers', 'meta', 'branch']),
'commit': METHOD_NAME(metadata, ['layers', 'meta', 'commit']),
'commit_count': METHOD_NAME(metadata, ['layers', 'meta', 'commit_count']),
'machine': METHOD_NAME(metadata, ['config', 'MACHINE'])}
gitarchive.gitarchive(args.data_dir, args.git_dir, args.no_create, args.bare,
args.commit_msg_subject.strip(), args.commit_msg_body, args.branch_name,
args.no_tag, args.tag_name, args.tag_msg_subject, args.tag_msg_body,
args.exclude, args.notes, args.push, keywords, log)
except gitarchive.ArchiveError as err:
log.error(str(err))
return 1
return 0
if __name__ == "__main__":
sys.exit(main()) |
298,399 | get book info | import re
from typing import List, Optional
from lms.models import LTIParams
from lms.services.vitalsource._client import VitalSourceClient
from lms.services.vitalsource.exceptions import VitalSourceMalformedRegex
from lms.services.vitalsource.model import VSBookLocation
class VitalSourceService:
"""A high-level interface for dealing with VitalSource."""
# pylint: disable=too-many-arguments
def __init__(
self,
enabled: bool = False,
global_client: Optional[VitalSourceClient] = None,
customer_client: Optional[VitalSourceClient] = None,
user_lti_param: Optional[str] = None,
user_lti_pattern: Optional[str] = None,
):
"""
Initialise the service.
:param enabled: Is VitalSource enabled for the customer?
:param global_client: Client for making generic API calls
:param customer_client: Client for making customer specific API calls
:param user_lti_param: Field to lookup user details for SSO
:param user_lti_pattern: A regex to apply to the user value to get the
id. The first capture group will be used
"""
self._enabled = enabled
# We can use either the customers API key (if they have one), or our
# generic fallback key. It's better to use the customer key as it
# ensures the books they can pick are available in their institution.
self._metadata_client = customer_client or global_client
# For SSO we *must* use the customers API key as the user ids only make
# sense in the context of an institutional relationship between the uni
# and VitalSource.
self._sso_client = customer_client
self._user_lti_param = user_lti_param
self._user_lti_pattern = user_lti_pattern
@property
def enabled(self) -> bool:
"""Check if the service has the minimum it needs to work."""
return bool(self._enabled and self._metadata_client)
@property
def sso_enabled(self) -> bool:
"""Check if the service can use single sign on."""
return bool(self.enabled and self._sso_client and self._user_lti_param)
def METHOD_NAME(self, book_id: str) -> dict:
"""Get details of a book."""
return self._metadata_client.METHOD_NAME(book_id)
def get_table_of_contents(self, book_id: str) -> List[dict]:
"""Get the table of contents for a book."""
return self._metadata_client.get_table_of_contents(book_id)
@classmethod
def get_book_reader_url(cls, document_url) -> str:
"""
Get the public URL for VitalSource book viewer.
:param document_url: `vitalsource://` type URL identifying the document
"""
loc = VSBookLocation.from_document_url(document_url)
return f"https://hypothesis.vitalsource.com/books/{loc.book_id}/cfi/{loc.cfi}"
def get_sso_redirect(self, document_url, user_reference: str) -> str:
"""
Get the public URL for VitalSource book viewer from our internal URL.
That URL can be used to load VitalSource content in an iframe like we
do with other types of content.
:param document_url: `vitalsource://` type URL identifying the document
:param user_reference: The user reference (you can use
`get_user_reference()` to help you with this)
"""
return self._sso_client.get_sso_redirect(
user_reference, self.get_book_reader_url(document_url)
)
def get_user_reference(self, lti_params: LTIParams) -> Optional[str]:
"""Get the user reference from the provided LTI params."""
value = lti_params.get(self._user_lti_param)
if not value:
return None
# Some customers have wacky values in their user params which require
# some parsing.
if pattern := self.compile_user_lti_pattern(self._user_lti_pattern):
match = pattern.search(value)
return match.group(1) if match else None
return value
@staticmethod
def compile_user_lti_pattern(pattern: str) -> Optional[re.Pattern]:
"""
Compile and vet a user id pattern.
:pattern: String format of the regex to parse
:raise VitalSourceMalformedRegex: For any issues with the regex
"""
if not pattern:
return None
try:
compiled_pattern = re.compile(pattern)
except re.error as err:
raise VitalSourceMalformedRegex(str(err), pattern=pattern) from err
if compiled_pattern.groups != 1:
raise VitalSourceMalformedRegex(
"The user regex must have one capture group (brackets)", pattern=pattern
)
return compiled_pattern |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.