id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
5,000 | set up | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from mock import MagicMock, patch
from django.test import TestCase
from pipeline_plugins.variables.collections.sites.open.cc import VarCmdbAttributeQuery
class VarCmdbAttributeQueryTestCase(TestCase):
def METHOD_NAME(self):
self.name = "name_token"
self.value = " 1.1.1.1 \n2.2.2.2 3.3.3.3\n4.4.4.4"
self.context = {}
self.executer = "tester"
self.pipeline_data = {
"executor": self.executer,
"project_id": 1,
}
self.bk_biz_id = "bk_biz_id_token"
self.supplier_account = "supplier_account_token"
self.get_business_host_return = [
{"bk_host_innerip": "1.1.1.1", "bk_cloud_id": 1, "bk_attr": 1},
{"bk_host_innerip": "1.1.1.2", "bk_cloud_id": 2, "bk_attr": 2},
{"bk_host_innerip": "1.1.1.3", "bk_attr": 3},
]
mock_project_obj = MagicMock()
mock_project_obj.bk_biz_id = self.bk_biz_id
mock_project = MagicMock()
mock_project.objects.get = MagicMock(return_value=mock_project_obj)
self.project_patcher = patch("pipeline_plugins.variables.collections.sites.open.cc.Project", mock_project)
self.supplier_account_for_project_patcher = patch(
"pipeline_plugins.variables.collections.sites.open.cc.supplier_account_for_project",
MagicMock(return_value=self.supplier_account),
)
self.project_patcher.start()
self.supplier_account_for_project_patcher.start()
def tearDown(self):
self.project_patcher.stop()
self.supplier_account_for_project_patcher.stop()
def test_get_value(self):
mock_get_business_host = MagicMock(return_value=self.get_business_host_return)
host_attrs_query = VarCmdbAttributeQuery(self.name, self.value, self.context, self.pipeline_data)
with patch("pipeline_plugins.variables.collections.sites.open.cc.get_business_host", mock_get_business_host):
value = host_attrs_query.get_value()
self.assertEqual(
value,
{
"1.1.1.1": {"bk_host_innerip": "1.1.1.1", "bk_attr": 1},
"1.1.1.2": {"bk_host_innerip": "1.1.1.2", "bk_attr": 2},
"1.1.1.3": {"bk_host_innerip": "1.1.1.3", "bk_attr": 3},
},
)
mock_get_business_host.assert_called_once_with(
self.executer,
self.bk_biz_id,
self.supplier_account,
[
"bk_cpu",
"bk_isp_name",
"bk_os_name",
"bk_province_name",
"bk_host_id",
"import_from",
"bk_os_version",
"bk_disk",
"operator",
"bk_mem",
"bk_host_name",
"bk_host_innerip",
"bk_comment",
"bk_os_bit",
"bk_outer_mac",
"bk_asset_id",
"bk_service_term",
"bk_sla",
"bk_cpu_mhz",
"bk_host_outerip",
"bk_state_name",
"bk_os_type",
"bk_mac",
"bk_bak_operator",
"bk_supplier_account",
"bk_sn",
"bk_cpu_module",
],
["1.1.1.1", "2.2.2.2", "3.3.3.3", "4.4.4.4"],
) |
5,001 | send transaction | '''
!!!! THESE TESTS ARE LONG AND COULD EACH TAKE A FEW MINUTES TO COMPLETE !!!!
THROUGHPUT Test send all transactions AT ONCE and then wait for all nodes to process them and come to consensus
After all node are in sync then the test are run to validate state etc.
'''
from tests.integration.mock import mocks_new
from lamden.crypto.wallet import Wallet
import zmq.asyncio
import asyncio
from unittest import TestCase
class TestMultiNode(TestCase):
def setUp(self):
self.ctx = zmq.asyncio.Context()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.n = None
def tearDown(self):
if self.n:
for node in self.n.nodes:
self.await_async_process(node.stop)
self.ctx.destroy()
self.loop.close()
def await_async_process(self, process):
tasks = asyncio.gather(
process()
)
loop = asyncio.get_event_loop()
res = loop.run_until_complete(tasks)
return res
def async_sleep(self, delay):
tasks = asyncio.gather(
asyncio.sleep(delay)
)
loop = asyncio.get_event_loop()
loop.run_until_complete(tasks)
def METHOD_NAME(self, node, tx):
node.tx_queue.append(tx)
def test_all_transactions_propegate_to_all_nodes(self):
delay = {'base': 0.1, 'self': 0.2}
self.n = mocks_new.MockNetwork(num_of_delegates=2, num_of_masternodes=2, ctx=self.ctx, metering=False, delay=delay)
self.await_async_process(self.n.start)
self.await_async_process(self.n.pause_all_queues)
num_of_transactions_to_send = 10
self.async_sleep(5)
for i in range(num_of_transactions_to_send):
self.n.send_random_currency_transaction(
sender_wallet=Wallet()
)
self.async_sleep(5)
for node in self.n.nodes:
self.assertEqual( num_of_transactions_to_send, len(node.obj.main_processing_queue))
def test_all_results_propegate_to_all_nodes(self):
delay = {'base': 0.1, 'self': 0.2}
self.n = mocks_new.MockNetwork(num_of_delegates=2, num_of_masternodes=2, ctx=self.ctx, metering=False, delay=delay)
self.await_async_process(self.n.start)
self.await_async_process(self.n.pause_all_validation_queues)
num_of_transactions_to_send = 10
self.async_sleep(10)
for i in range(num_of_transactions_to_send):
self.n.send_random_currency_transaction(
sender_wallet=self.n.founder_wallet
)
self.async_sleep(15)
for node in self.n.nodes:
self.assertEqual(num_of_transactions_to_send, len(node.obj.validation_queue))
for hlc_timestamp in node.obj.validation_queue.validation_results.keys():
results = node.obj.validation_queue.validation_results.get(hlc_timestamp)
solutions = results.get('solutions')
self.assertEqual(len(self.n.nodes), len(solutions))
def test_all_nodes_create_blocks_from_results(self):
delay = {'base': 0.1, 'self': 0.2}
self.n = mocks_new.MockNetwork(num_of_delegates=2, num_of_masternodes=2, ctx=self.ctx, metering=False,
delay=delay)
self.await_async_process(self.n.start)
num_of_transactions_to_send = 10
self.async_sleep(5)
for i in range(num_of_transactions_to_send):
self.n.send_random_currency_transaction(
sender_wallet=Wallet()
)
self.async_sleep(60)
# test all nodes have blocks ordered by hlc and the correct number of blocks
for node in self.n.nodes:
last_hlc = "0"
for i in range(num_of_transactions_to_send):
i = i + 1
block = node.obj.get_block_by_number(block_number=i)
self.assertIsNotNone(block)
block_hlc_timestamp = block.get('hlc_timestamp')
self.assertGreater(block_hlc_timestamp, last_hlc)
last_hlc = block_hlc_timestam |
5,002 | test location | from os.path import dirname, join
from unittest.mock import patch
import pytest
import scrapy
from city_scrapers_core.constants import BOARD, TENTATIVE
from city_scrapers_core.utils import file_response
from freezegun import freeze_time
from city_scrapers.spiders.il_health_facilities import IlHealthFacilitiesSpider
spider = IlHealthFacilitiesSpider()
test_response = file_response(
join(dirname(__file__), "files", "il_health_facilities.html"),
url="https://www2.illinois.gov/sites/hfsrb/events/Pages/Board-Meetings.aspx",
)
# The crawler for il_health_facilities grabs information from some pages that are
# linked to from the original page.
# As such, we need to test the adjacent links as well
adjacent_links = [
"https://www2.illinois.gov/sites/hfsrb/events/Pages/March-21%202023-State-Board-Meeting.aspx", # noqa
"https://www2.illinois.gov/sites/hfsrb/events/Pages/May-9-2023-State-Board-Meeting.aspx", # noqa
"https://www2.illinois.gov/sites/hfsrb/events/Pages/June-27-2023%20State%20Board%20Meeting.aspx", # noqa
"https://www2.illinois.gov/sites/hfsrb/events/Pages/August-15-2023-State-Board-Meeting.aspx", # noqa
"https://www2.illinois.gov/sites/hfsrb/events/Pages/October-3-2023-State-Board-Meeting.aspx", # noqa
"https://www2.illinois.gov/sites/hfsrb/events/Pages/December-5-2023%20State%20Board%20Meeting.aspx", # noqa
]
def mock_scrapy_request(link, callback):
with open(
join(dirname(__file__), "files", "il_health_facilities_helper.html"), "rb"
) as f:
body = f.read()
response = scrapy.http.HtmlResponse(
url="my HTML string", body=body, encoding="utf-8"
)
result = next(callback(response))
return result
@patch("scrapy.http.Request", mock_scrapy_request)
def generate_parsed_items():
freezer = freeze_time("2023-02-09")
freezer.start()
parsed_items = [item for item in spider.parse(test_response)]
freezer.stop()
return parsed_items
parsed_items = generate_parsed_items()
def test_num_meetings_found():
assert len(parsed_items) == 6
@pytest.mark.parametrize("item", parsed_items)
def test_title(item):
assert item["title"] == "March 21, 2023 State Board Meeting"
@pytest.mark.parametrize("item", parsed_items)
def test_description(item):
assert item["description"] == ""
@pytest.mark.parametrize("item", parsed_items)
def test_start(item):
assert item["start"].strftime("%Y_%B_%d_%I_%M_%p") == "2023_March_21_09_00_AM"
@pytest.mark.parametrize("item", parsed_items)
def test_end(item):
assert item["end"].strftime("%Y_%B_%d_%I_%M_%p") == "2023_March_21_04_00_PM"
@pytest.mark.parametrize("item", parsed_items)
def test_time_notes(item):
assert item["time_notes"] == ""
@pytest.mark.parametrize("item", parsed_items)
def test_id(item):
assert (
item["id"]
== "il_health_facilities/202303210900/x/march_21_2023_state_board_meeting"
)
@pytest.mark.parametrize("item", parsed_items)
def test_status(item):
assert item["status"] == TENTATIVE
@pytest.mark.parametrize("item", parsed_items)
def METHOD_NAME(item):
assert item["location"] == {
"name": "",
"address": "2001 Rodeo Drive, Bolingbrok, Illinois",
}
@pytest.mark.parametrize("item", parsed_items)
def test_source(item):
item[
"source"
] == "https://www2.illinois.gov/sites/hfsrb/events/Pages/Board-Meetings.aspx"
@pytest.mark.parametrize("item", parsed_items)
def test_links(item):
assert item["links"] == [
{
"href": "https://www2.illinois.gov/sites/hfsrb/events/Pages/Board-Meetings.aspx", # noqa
"title": "Board and Subcommittee Meetings",
},
{
"href": "https://www2.illinois.gov/sites/hfsrb/events/Pages/Previous-Meetings.aspx", # noqa
"title": "Previous Meeting",
},
{
"href": "https://www2.illinois.gov/sites/hfsrb/events/Pages/Public-Hearing.aspx", # noqa
"title": "Public Hearings",
},
]
@pytest.mark.parametrize("item", parsed_items)
def test_classification(item):
assert item["classification"] == BOARD
@pytest.mark.parametrize("item", parsed_items)
def test_all_day(item):
assert item["all_day"] is False |
5,003 | clear any data | """
FitPanel class contains fields allowing to fit models and data
"""
import sys
import datetime
from PySide6 import QtCore
from PySide6 import QtGui
from PySide6 import QtWidgets
class ResultPanel(QtWidgets.QTabWidget):
"""
FitPanel class contains fields allowing to fit models and data
:note: For Fit to be performed the user should check at least one parameter
on fit Panel window.
"""
## Internal name for the AUI manager
window_name = "Result panel"
windowClosedSignal = QtCore.Signal()
def __init__(self, parent, manager=None, *args, **kwargs):
"""
"""
super(ResultPanel, self).__init__(parent)
self.manager = manager
self.communicator = self.manager.communicator()
self.setMinimumSize(400, 400)
self.data_id = None
self.updateBumps() # patch bumps ## TEMPORARY ##
# the following two imports will move to the top once
# the monkeypatching is gone
from bumps.gui.convergence_view import ConvergenceView
from bumps.gui.uncertainty_view import UncertaintyView, CorrelationView, TraceView
self.convergenceView = ConvergenceView()
self.uncertaintyView = UncertaintyView()
self.correlationView = CorrelationView()
self.traceView = TraceView()
self.show()
def updateBumps(self):
"""
Monkeypatching bumps plot viewer to allow Qt
"""
from . import PlotView
import bumps.gui
sys.modules['bumps.gui.plot_view'] = PlotView
def onPlotResults(self, results, optimizer="Unknown"):
# import moved here due to its cost
from bumps.dream.stats import var_stats, format_vars
self.METHOD_NAME()
result = results[0][0]
name = result.data.sas_data.name
current_optimizer = optimizer
self.data_id = result.data.sas_data.id
self.setWindowTitle(self.window_name + " - " + name + " - " + current_optimizer)
if hasattr(result, 'convergence') and len(result.convergence) > 0:
best, pop = result.convergence[:, 0], result.convergence[:, 1:]
self.convergenceView.update(best, pop)
self.addTab(self.convergenceView, "Convergence")
self.convergenceView.show()
else:
self.convergenceView.close()
if hasattr(result, 'uncertainty_state'):
stats = var_stats(result.uncertainty_state.draw())
msg = format_vars(stats)
self.correlationView.update(result.uncertainty_state)
self.correlationView.show()
self.addTab(self.correlationView, "Correlation")
self.uncertaintyView.update((result.uncertainty_state, stats))
self.uncertaintyView.show()
self.addTab(self.uncertaintyView, "Uncertainty")
self.traceView.update(result.uncertainty_state)
self.traceView.show()
self.addTab(self.traceView, "Parameter Trace")
else:
for view in (self.correlationView, self.uncertaintyView, self.traceView):
view.close()
# no tabs in the widget - possibly LM optimizer. Mark "closed"
if self.count()==0:
self.close()
def onDataDeleted(self, data):
""" Check if the data set is shown in the window and close tabs as needed. """
if not data or not self.isVisible():
return
if data.id == self.data_id:
self.setWindowTitle(self.window_name)
self.METHOD_NAME()
self.close()
def METHOD_NAME(self):
""" Clear any previous results and reset window to its base state. """
self.data_id = None
# Clear up previous results
for view in (self.convergenceView, self.correlationView,
self.uncertaintyView, self.traceView):
view.close()
# close all tabs. REMEMBER TO USE REVERSED RANGE!!!
for index in reversed(range(self.count())):
self.removeTab(index)
def closeEvent(self, event):
"""
Overwrite QDialog close method to allow for custom widget close
"""
# notify the parent so it hides this window
self.windowClosedSignal.emit()
event.ignore()
|
5,004 | init message logger | import os
import logging
import functools
import json
import time
from datetime import datetime
# from tensorboardX import SummaryWriter
import yaml
import cv2
import numpy as np
from .config import Configurable, State
class Logger(Configurable):
SUMMARY_DIR_NAME = 'summaries'
VISUALIZE_NAME = 'visualize'
LOG_FILE_NAME = 'output.log'
ARGS_FILE_NAME = 'args.log'
METRICS_FILE_NAME = 'metrics.log'
database_dir = State(default='./outputs/')
log_dir = State(default='workspace')
verbose = State(default=False)
level = State(default='info')
log_interval = State(default=100)
def __init__(self, **kwargs):
self.load_all(**kwargs)
self._make_storage()
cmd = kwargs['cmd']
self.name = cmd['name']
self.log_dir = os.path.join(self.log_dir, self.name)
try:
self.verbose = cmd['verbose']
except:
print('verbose:', self.verbose)
if self.verbose:
print('Initializing log dir for', self.log_dir)
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
self.message_logger = self.METHOD_NAME()
summary_path = os.path.join(self.log_dir, self.SUMMARY_DIR_NAME)
self.tf_board_logger = SummaryWriter(summary_path)
self.metrics_writer = open(os.path.join(
self.log_dir, self.METRICS_FILE_NAME), 'at')
self.timestamp = time.time()
self.logged = -1
self.speed = None
self.eta_time = None
def _make_storage(self):
application = os.path.basename(os.getcwd())
storage_dir = os.path.join(
self.database_dir, self.log_dir, application)
if not os.path.exists(storage_dir):
os.makedirs(storage_dir)
if not os.path.exists(self.log_dir):
os.symlink(storage_dir, self.log_dir)
def save_dir(self, dir_name):
return os.path.join(self.log_dir, dir_name)
def METHOD_NAME(self):
message_logger = logging.getLogger('messages')
message_logger.setLevel(
logging.DEBUG if self.verbose else logging.INFO)
formatter = logging.Formatter(
'[%(levelname)s] [%(asctime)s] %(message)s')
std_handler = logging.StreamHandler()
std_handler.setLevel(message_logger.level)
std_handler.setFormatter(formatter)
file_handler = logging.FileHandler(
os.path.join(self.log_dir, self.LOG_FILE_NAME))
file_handler.setLevel(message_logger.level)
file_handler.setFormatter(formatter)
message_logger.addHandler(std_handler)
message_logger.addHandler(file_handler)
return message_logger
def report_time(self, name: str):
if self.verbose:
self.info(name + " time :" + str(time.time() - self.timestamp))
self.timestamp = time.time()
def report_eta(self, steps, total, epoch):
self.logged = self.logged % total + 1
steps = steps % total
if self.eta_time is None:
self.eta_time = time.time()
speed = -1
else:
eta_time = time.time()
speed = eta_time - self.eta_time
if self.speed is not None:
speed = ((self.logged - 1) * self.speed + speed) / self.logged
self.speed = speed
self.eta_time = eta_time
seconds = (total - steps) * speed
hours = seconds // 3600
minutes = (seconds - (hours * 3600)) // 60
seconds = seconds % 60
print('%d/%d batches processed in epoch %d, ETA: %2d:%2d:%2d' %
(steps, total, epoch,
hours, minutes, seconds), end='\r')
def args(self, parameters=None):
if parameters is None:
with open(os.path.join(self.log_dir, self.ARGS_FILE_NAME), 'rt') as reader:
return yaml.load(reader.read())
with open(os.path.join(self.log_dir, self.ARGS_FILE_NAME), 'wt') as writer:
yaml.dump(parameters.dump(), writer)
def metrics(self, epoch, steps, metrics_dict):
results = {}
for name, a in metrics_dict.items():
results[name] = {'count': a.count, 'value': float(a.avg)}
self.add_scalar('metrics/' + name, a.avg, steps)
result_dict = {
str(datetime.now()): {
'epoch': epoch,
'steps': steps,
**results
}
}
string_result = yaml.dump(result_dict)
self.info(string_result)
self.metrics_writer.write(string_result)
self.metrics_writer.flush()
def named_number(self, name, num=None, default=0):
if num is None:
return int(self.has_signal(name)) or default
else:
with open(os.path.join(self.log_dir, name), 'w') as writer:
writer.write(str(num))
return num
epoch = functools.partialmethod(named_number, 'epoch')
iter = functools.partialmethod(named_number, 'iter')
def message(self, level, content):
self.message_logger.__getattribute__(level)(content)
def images(self, prefix, image_dict, step):
for name, image in image_dict.items():
self.add_image(prefix + '/' + name, image, step, dataformats='HWC')
def merge_save_images(self, name, images):
for i, image in enumerate(images):
if i == 0:
result = image
else:
result = np.concatenate([result, image], 0)
cv2.imwrite(os.path.join(self.vis_dir(), name+'.jpg'), result)
def vis_dir(self):
vis_dir = os.path.join(self.log_dir, self.VISUALIZE_NAME)
if not os.path.exists(vis_dir):
os.mkdir(vis_dir)
return vis_dir
def save_image_dict(self, images, max_size=1024):
for file_name, image in images.items():
height, width = image.shape[:2]
if height > width:
actual_height = min(height, max_size)
actual_width = int(round(actual_height * width / height))
else:
actual_width = min(width, max_size)
actual_height = int(round(actual_width * height / width))
image = cv2.resize(image, (actual_width, actual_height))
cv2.imwrite(os.path.join(self.vis_dir(), file_name+'.jpg'), image)
def __getattr__(self, name):
message_levels = set(['debug', 'info', 'warning', 'error', 'critical'])
if name == '__setstate__':
raise AttributeError('haha')
if name in message_levels:
return functools.partial(self.message, name)
elif hasattr(self.__dict__.get('tf_board_logger'), name):
return self.tf_board_logger.__getattribute__(name)
else:
super() |
5,005 | set config | import os
import subprocess
from jadi import component
import aj
from aj.plugins.augeas.api import Augeas
from aj.plugins.network.api import NetworkManager
from .ifconfig import ifconfig_up, ifconfig_down, ifconfig_get_ip, ifconfig_get_up
@component(NetworkManager)
class CentOSNetworkManager(NetworkManager):
path = '/etc/sysconfig/network-scripts'
aug_path = '/files' + path
@classmethod
def __verify__(cls):
"""
Verify if this manager is relevant.
:return: bool
:rtype: bool
"""
return aj.platform in ['centos']
def __init__(self, context):
NetworkManager.__init__(self, context)
def get_augeas(self, iface):
"""
Read the content of interfaces config file through augeas.
:param iface: Network interface, e.g. eth0
:type iface: string
:return: Augeas object
:rtype: augeas
"""
aug = Augeas(modules=[{
'name': 'Shellvars',
'lens': 'Shellvars.lns',
'incl': [
os.path.join(self.path, 'ifcfg-' + iface),
]
}])
aug.load()
return aug
def get_config(self):
"""
Parse the content of interface config file through augeas.
:return: List of iface informations, one iface per dict
:rtype: list of dict
"""
ifaces = []
for file in os.listdir(self.path):
if file.startswith('ifcfg-'):
name = file.split('-')[1]
aug_path = os.path.join(self.aug_path, file)
aug = self.get_augeas(name)
iface = {
'name': name,
'family': 'inet6' if bool(aug.get(aug_path + '/IPV6INIT')) else 'inet',
'addressing': aug.get(aug_path + '/BOOTPROTO') or 'static',
'address': aug.get(aug_path + '/IPADDR'),
'mask': aug.get(aug_path + '/NETMASK'),
'gateway': aug.get(aug_path + '/GATEWAY') if bool(aug.get(aug_path + '/IPV6INIT')) else aug.get(aug_path + '/IPV6_DEFAULTGW'),
'hwaddress': aug.get(aug_path + '/HWADDR'),
'dhcpClient': aug.get(aug_path + '/DHCP_HOSTNAME'),
}
ifaces.append(iface)
return ifaces
def METHOD_NAME(self, config):
"""
Set the new config in the config file through augeas.
:param config: List of iface informations, one dict per iface
:type config: list of dict
"""
for index, iface in enumerate(config):
aug = self.get_augeas(iface['name'])
file = f'ifcfg-{iface}'
aug_path = os.path.join(self.aug_path, file)
if iface['family'] == 'inet':
aug.remove(aug_path + '/IPV6INIT')
aug.remove(aug_path + '/IPV6ADDR')
aug.remove(aug_path + '/IPV6_DEFAULTGW')
aug.setd(aug_path + '/IPADDR', iface['address'])
aug.setd(aug_path + '/NETMASK', iface['mask'])
aug.setd(aug_path + '/GATEWAY', iface['gateway'])
else:
aug.remove(aug_path + '/IPADDR')
aug.remove(aug_path + '/NETMASK')
aug.remove(aug_path + '/GATEWAY')
aug.setd(aug_path + '/IPV6INIT', 'yes')
aug.setd(aug_path + '/IPV6ADDR', iface['address'])
aug.setd(aug_path + '/IPV6_DEFAULTGW', iface['gateway'])
aug.setd(aug_path + '/BOOTPROTO', iface['method'])
aug.setd(aug_path + '/HWADDR', iface['hwaddress'])
aug.setd(aug_path + '/DHCP_HOSTNAME', iface['dhcpClient'])
aug.save()
def get_state(self, iface):
"""
Get ip and status for an iface.
:param iface: Network interface, e.g. eth0
:type iface: string
:return: Ip and status
:rtype: dict
"""
return {
'address': ifconfig_get_ip(iface),
'up': ifconfig_get_up(iface),
}
def up(self, iface):
"""
Bring an iface up.
:param iface: Network interface, e.g. eth0
:type iface: string
"""
ifconfig_up(iface)
def down(self, iface):
"""
Bring an iface down.
:param iface: Network interface, e.g. eth0
:type iface: string
"""
ifconfig_down(iface)
def get_hostname(self):
"""
Get hostname value.
:return: Hostname
:rtype: string
"""
return subprocess.check_output('hostname', encoding='utf-8')
def set_hostname(self, value):
"""
Write new hostname in /etc/hostname.
:param value: Hostname name
:type value: string
"""
with open('/etc/hostname', 'w') as f:
f.write(value)
subprocess.check_call(['hostname', value]) |
5,006 | process json | #!/usr/bin/python3
import argparse
import json
import os
import requests
from flask import Flask, request
app = Flask(__name__)
class State:
in_progress = "in_progress"
failed = "failed"
success = "success"
def submit_ci_status(key = "IROHA",
state = State.in_progress,
url = "null",
name = "null",
description = "null",
revision = "null"):
upsource_url = "http://upsource.soramitsu.co.jp/~buildStatus"
project = "iroha"
post_body = {
"key": key,
"state": state,
"url": url,
"name": name,
"description": description,
"project": project,
"revision": revision
}
# fails if token is not present
TOKEN = os.environ["UPSOURCE_TOKEN"]
post_headers = {
"Content-Type": "application/json; charset=UTF-8",
"Authorization": "Basic {}".format(TOKEN)
}
r = requests.post(
upsource_url,
headers=post_headers,
data=json.dumps(post_body)
)
print("status code: {}".format(r.status_code))
def METHOD_NAME(parsed_json):
options = {}
try:
pl = parsed_json["payload"]
options["committer_login"] = pl["all_commit_details"][0]["committer_login"]
options["commit"] = pl["all_commit_details"][0]["commit"]
options["build_num"] = pl["build_num"]
options["build_url"] = pl["build_url"]
options["outcome"] = pl["outcome"]
steps = pl["steps"]
for step in steps:
actions = step["actions"][0]
if actions["failed"]: # not None
options["failed_step"] = step["name"]
return options
except:
return None
def prepare_key(s):
return "IROHA-{}".format(s)
def prepare_state(s):
return State.success if s == "success" else State.failed
def prepare_name(s):
return str(s)
def prepare_description(s):
return "By {}".format(s)
def in_progress_update():
print('in progress update')
try:
# try to get these environment variables
# throw, if at least one is missing
build_num = str(os.environ["CIRCLE_BUILD_NUM"])
build_url = str(os.environ["CIRCLE_BUILD_URL"])
commit = os.environ["CIRCLE_SHA1"]
username = os.environ["CIRCLE_USERNAME"]
submit_ci_status(
key=prepare_key(build_num),
state=State.in_progress,
url=build_url,
name=build_num,
description=prepare_name(username),
revision=commit
)
except Exception as e:
# just print exception and quit with no errcode
print("exception occurred: {}".format(e))
@app.route("/", methods=['POST'])
def recv_json():
try:
if len(request.data) > 10 * 1024**2: # 10 MB
return "request is too big"
options = METHOD_NAME(request.get_json())
if not options:
return "can not parse json body"
submit_ci_status(
key = prepare_key(options["build_num"]),
state = prepare_state(options["outcome"]),
url = options["build_url"],
name = prepare_name(options["build_num"]),
description = prepare_description(options["committer_login"]),
revision = options["commit"]
)
return "ok"
except Exception as e:
return "error occurred: {}".format(e)
def main():
parser = argparse.ArgumentParser(description='Update upsource CI status')
parser.add_argument('--in-progress', action='store_true',
help='run script once in circle ci, notify upsource about "in progress" status of current commit')
parser.add_argument('--server', dest='port',
help='run script as a server on specified interface and port. it processes failed/succeeded commits')
args = parser.parse_args()
if not args.port and not args.in_progress:
print("use -h for help")
exit(0)
elif args.port:
try:
port = int(args.port)
except:
print("can not parse port")
exit(1)
app.run(host='0.0.0.0', port=port)
elif args.in_progress:
in_progress_update()
if __name__ == '__main__':
main() |
5,007 | tags | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetSpacecraftResult',
'AwaitableGetSpacecraftResult',
'get_spacecraft',
'get_spacecraft_output',
]
@pulumi.output_type
class GetSpacecraftResult:
"""
Customer creates a spacecraft resource to schedule a contact.
"""
def __init__(__self__, id=None, links=None, location=None, name=None, norad_id=None, system_data=None, METHOD_NAME=None, title_line=None, tle_line1=None, tle_line2=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if links and not isinstance(links, list):
raise TypeError("Expected argument 'links' to be a list")
pulumi.set(__self__, "links", links)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if norad_id and not isinstance(norad_id, str):
raise TypeError("Expected argument 'norad_id' to be a str")
pulumi.set(__self__, "norad_id", norad_id)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", METHOD_NAME)
if title_line and not isinstance(title_line, str):
raise TypeError("Expected argument 'title_line' to be a str")
pulumi.set(__self__, "title_line", title_line)
if tle_line1 and not isinstance(tle_line1, str):
raise TypeError("Expected argument 'tle_line1' to be a str")
pulumi.set(__self__, "tle_line1", tle_line1)
if tle_line2 and not isinstance(tle_line2, str):
raise TypeError("Expected argument 'tle_line2' to be a str")
pulumi.set(__self__, "tle_line2", tle_line2)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def links(self) -> Sequence['outputs.SpacecraftLinkResponse']:
"""
Immutable list of Spacecraft links.
"""
return pulumi.get(self, "links")
@property
@pulumi.getter
def location(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="noradId")
def norad_id(self) -> Optional[str]:
"""
NORAD ID of the spacecraft.
"""
return pulumi.get(self, "norad_id")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def METHOD_NAME(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="titleLine")
def title_line(self) -> str:
"""
Title line of the two-line element set (TLE).
"""
return pulumi.get(self, "title_line")
@property
@pulumi.getter(name="tleLine1")
def tle_line1(self) -> str:
"""
Line 1 of the two-line element set (TLE).
"""
return pulumi.get(self, "tle_line1")
@property
@pulumi.getter(name="tleLine2")
def tle_line2(self) -> str:
"""
Line 2 of the two-line element set (TLE).
"""
return pulumi.get(self, "tle_line2")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetSpacecraftResult(GetSpacecraftResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSpacecraftResult(
id=self.id,
links=self.links,
location=self.location,
name=self.name,
norad_id=self.norad_id,
system_data=self.system_data,
METHOD_NAME=self.METHOD_NAME,
title_line=self.title_line,
tle_line1=self.tle_line1,
tle_line2=self.tle_line2,
type=self.type)
def get_spacecraft(resource_group_name: Optional[str] = None,
spacecraft_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSpacecraftResult:
"""
Gets the specified spacecraft in a specified resource group.
Azure REST API version: 2022-11-01.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str spacecraft_name: Spacecraft ID.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['spacecraftName'] = spacecraft_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:orbital:getSpacecraft', __args__, opts=opts, typ=GetSpacecraftResult).value
return AwaitableGetSpacecraftResult(
id=pulumi.get(__ret__, 'id'),
links=pulumi.get(__ret__, 'links'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
norad_id=pulumi.get(__ret__, 'norad_id'),
system_data=pulumi.get(__ret__, 'system_data'),
METHOD_NAME=pulumi.get(__ret__, 'tags'),
title_line=pulumi.get(__ret__, 'title_line'),
tle_line1=pulumi.get(__ret__, 'tle_line1'),
tle_line2=pulumi.get(__ret__, 'tle_line2'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_spacecraft)
def get_spacecraft_output(resource_group_name: Optional[pulumi.Input[str]] = None,
spacecraft_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSpacecraftResult]:
"""
Gets the specified spacecraft in a specified resource group.
Azure REST API version: 2022-11-01.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str spacecraft_name: Spacecraft ID.
"""
... |
5,008 | id | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetMonitorResult',
'AwaitableGetMonitorResult',
'get_monitor',
'get_monitor_output',
]
@pulumi.output_type
class GetMonitorResult:
"""
Monitor resource.
"""
def __init__(__self__, METHOD_NAME=None, identity=None, location=None, name=None, properties=None, sku=None, system_data=None, tags=None, type=None):
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
ARM id of the monitor resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.IdentityPropertiesResponse']:
"""
Identity properties of the monitor resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> str:
"""
The location of the monitor resource
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the monitor resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.MonitorPropertiesResponse':
"""
Properties of the monitor resource.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.ResourceSkuResponse']:
"""
SKU of the monitor resource.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
The system metadata relating to this resource
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The tags of the monitor resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the monitor resource.
"""
return pulumi.get(self, "type")
class AwaitableGetMonitorResult(GetMonitorResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetMonitorResult(
METHOD_NAME=self.METHOD_NAME,
identity=self.identity,
location=self.location,
name=self.name,
properties=self.properties,
sku=self.sku,
system_data=self.system_data,
tags=self.tags,
type=self.type)
def get_monitor(monitor_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetMonitorResult:
"""
Monitor resource.
:param str monitor_name: Monitor resource name
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['monitorName'] = monitor_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:elastic/v20230701preview:getMonitor', __args__, opts=opts, typ=GetMonitorResult).value
return AwaitableGetMonitorResult(
METHOD_NAME=pulumi.get(__ret__, 'id'),
identity=pulumi.get(__ret__, 'identity'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
sku=pulumi.get(__ret__, 'sku'),
system_data=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_monitor)
def get_monitor_output(monitor_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetMonitorResult]:
"""
Monitor resource.
:param str monitor_name: Monitor resource name
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
... |
5,009 | plot violin | """Matplotlib Violinplot."""
import matplotlib.pyplot as plt
import numpy as np
from ....stats import hdi
from ....stats.density_utils import get_bins, histogram, kde
from ...plot_utils import _scale_fig_size
from . import backend_kwarg_defaults, backend_show, create_axes_grid, matplotlib_kwarg_dealiaser
def METHOD_NAME(
ax,
plotters,
figsize,
rows,
cols,
sharex,
sharey,
shade_kwargs,
shade,
rug,
rug_kwargs,
side,
bw,
textsize,
labeller,
circular,
hdi_prob,
quartiles,
backend_kwargs,
show,
):
"""Matplotlib violin plot."""
if backend_kwargs is None:
backend_kwargs = {}
backend_kwargs = {
**backend_kwarg_defaults(),
**backend_kwargs,
}
(figsize, ax_labelsize, _, xt_labelsize, linewidth, _) = _scale_fig_size(
figsize, textsize, rows, cols
)
backend_kwargs.setdefault("figsize", figsize)
backend_kwargs.setdefault("sharex", sharex)
backend_kwargs.setdefault("sharey", sharey)
backend_kwargs.setdefault("squeeze", True)
shade_kwargs = matplotlib_kwarg_dealiaser(shade_kwargs, "hexbin")
rug_kwargs = matplotlib_kwarg_dealiaser(rug_kwargs, "plot")
rug_kwargs.setdefault("alpha", 0.1)
rug_kwargs.setdefault("marker", ".")
rug_kwargs.setdefault("linestyle", "")
if ax is None:
fig, ax = create_axes_grid(
len(plotters),
rows,
cols,
backend_kwargs=backend_kwargs,
)
fig.set_constrained_layout(False)
fig.subplots_adjust(wspace=0)
ax = np.atleast_1d(ax)
current_col = 0
for (var_name, selection, isel, x), ax_ in zip(plotters, ax.flatten()):
val = x.flatten()
if val[0].dtype.kind == "i":
dens = cat_hist(val, rug, side, shade, ax_, **shade_kwargs)
else:
dens = _violinplot(val, rug, side, shade, bw, circular, ax_, **shade_kwargs)
if rug:
rug_x = -np.abs(np.random.normal(scale=max(dens) / 3.5, size=len(val)))
ax_.plot(rug_x, val, **rug_kwargs)
per = np.nanpercentile(val, [25, 75, 50])
hdi_probs = hdi(val, hdi_prob, multimodal=False, skipna=True)
if quartiles:
ax_.plot([0, 0], per[:2], lw=linewidth * 3, color="k", solid_capstyle="round")
ax_.plot([0, 0], hdi_probs, lw=linewidth, color="k", solid_capstyle="round")
ax_.plot(0, per[-1], "wo", ms=linewidth * 1.5)
ax_.set_title(labeller.make_label_vert(var_name, selection, isel), fontsize=ax_labelsize)
ax_.set_xticks([])
ax_.tick_params(labelsize=xt_labelsize)
ax_.grid(None, axis="x")
if current_col != 0:
ax_.spines["left"].set_visible(False)
ax_.yaxis.set_ticks_position("none")
current_col += 1
if current_col == cols:
current_col = 0
if backend_show(show):
plt.show()
return ax
def _violinplot(val, rug, side, shade, bw, circular, ax, **shade_kwargs):
"""Auxiliary function to plot violinplots."""
if bw == "default":
bw = "taylor" if circular else "experimental"
x, density = kde(val, circular=circular, bw=bw)
if rug and side == "both":
side = "right"
if side == "left":
dens = -density
elif side == "right":
x = x[::-1]
dens = density[::-1]
elif side == "both":
x = np.concatenate([x, x[::-1]])
dens = np.concatenate([-density, density[::-1]])
ax.fill_betweenx(x, dens, alpha=shade, lw=0, **shade_kwargs)
return density
def cat_hist(val, rug, side, shade, ax, **shade_kwargs):
"""Auxiliary function to plot discrete-violinplots."""
bins = get_bins(val)
_, binned_d, _ = histogram(val, bins=bins)
bin_edges = np.linspace(np.min(val), np.max(val), len(bins))
heights = np.diff(bin_edges)
centers = bin_edges[:-1] + heights.mean() / 2
if rug and side == "both":
side = "right"
if side == "right":
left = None
elif side == "left":
left = -binned_d
elif side == "both":
left = -0.5 * binned_d
ax.barh(centers, binned_d, height=heights, left=left, alpha=shade, **shade_kwargs)
return binned_d |
5,010 | test serialisation | ##########################################################################
#
# Copyright (c) 2012-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import imath
import IECore
import Gaffer
import GafferImage
import GafferImageTest
class AtomicFormatPlugTest( GafferImageTest.ImageTestCase ) :
def METHOD_NAME( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["f"] = GafferImage.AtomicFormatPlug( "testPlug", defaultValue = GafferImage.Format( 10, 5, .5 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
se = s.serialise()
s2 = Gaffer.ScriptNode()
s2.execute( se )
self.assertTrue( s2["n"]["f"].isInstanceOf( GafferImage.AtomicFormatPlug ) )
def testOffsetSerialize( self ) :
format = GafferImage.Format( imath.Box2i( imath.V2i( -5, -11 ), imath.V2i( 13, 19 ) ), .5 )
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["f"] = GafferImage.AtomicFormatPlug( "testPlug", defaultValue = format, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
se = s.serialise()
s2 = Gaffer.ScriptNode()
s2.execute( se )
self.assertEqual( s2["n"]["f"].getValue(), format )
def testInputPlug( self ) :
n = Gaffer.Node()
f = GafferImage.AtomicFormatPlug("f", direction = Gaffer.Plug.Direction.In, flags = Gaffer.Plug.Flags.Default )
n.addChild( f )
s = Gaffer.ScriptNode()
s.addChild( n )
with s.context() :
f1 = n["f"].getValue()
# The default value of any input plug should be it's real value regardless of whether it is empty or not.
self.assertEqual( f1, GafferImage.Format() )
def testExpressions( self ) :
s = Gaffer.ScriptNode()
s["n1"] = Gaffer.Node()
s["n2"] = Gaffer.Node()
s["n1"]["user"]["f"] = GafferImage.AtomicFormatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["n2"]["user"]["f"] = GafferImage.AtomicFormatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["e"] = Gaffer.Expression()
s["e"].setExpression( 'f = parent["n1"]["user"]["f"]; b = f.getDisplayWindow(); b.setMin( b.min() - imath.V2i( 10 ) ); b.setMax( b.max() + imath.V2i( 20 ) ); f.setPixelAspect( 0.5 ); f.setDisplayWindow( b ); parent["n2"]["user"]["f"] = f')
s["n1"]["user"]["f"].setValue( GafferImage.Format( imath.Box2i( imath.V2i( 20, 30 ), imath.V2i( 100, 110 ) ), 1 ) )
self.assertEqual( s["n2"]["user"]["f"].getValue(), GafferImage.Format( imath.Box2i( imath.V2i( 10, 20 ), imath.V2i( 120, 130 ) ), 0.5 ) )
def testGetAndSetEmptyFormat( self ) :
p = GafferImage.AtomicFormatPlug()
p.setValue( GafferImage.Format() )
self.assertEqual( p.getValue(), GafferImage.Format() )
def testHashRepeatability( self ) :
p = GafferImage.AtomicFormatPlug()
p.setValue( GafferImage.Format( 1920, 1080 ) )
allHashes = set()
for i in range( 0, 1000 ) :
allHashes.add( p.hash().toString() )
self.assertEqual( len( allHashes ), 1 )
if __name__ == "__main__":
unittest.main() |
5,011 | source mask | # --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq and espnet code bases
# https://github.com/pytorch/fairseq; https://github.com/espnet/espnet
# --------------------------------------------------------
import contextlib
import torch
import torch.nn as nn
import torch.nn.functional as F
from espnet.nets.pytorch_backend.tacotron2.decoder import Prenet as TacotronDecoderPrenet
from espnet.nets.pytorch_backend.transformer.embedding import PositionalEncoding
from espnet.nets.pytorch_backend.transformer.embedding import ScaledPositionalEncoding
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask
class SpeechDecoderPrenet(nn.Module):
"""
Args:
in_channels (int): the number of input channels
mid_channels (int): the number of intermediate channels
out_channels (int): the number of output channels
kernel_sizes (List[int]): the kernel size for each convolutional layer
"""
def __init__(
self,
odim,
args,
):
super(SpeechDecoderPrenet, self).__init__()
# define decoder prenet
if args.dprenet_layers != 0:
# decoder prenet
decoder_input_layer = torch.nn.Sequential(
TacotronDecoderPrenet(
idim=odim,
n_layers=args.dprenet_layers,
n_units=args.dprenet_units,
dropout_rate=args.dprenet_dropout_rate,
),
torch.nn.Linear(args.dprenet_units, args.decoder_embed_dim),
)
else:
decoder_input_layer = "linear"
pos_enc_class = (
ScaledPositionalEncoding if args.dec_use_scaled_pos_enc else PositionalEncoding
)
if decoder_input_layer == "linear":
self.decoder_prenet = torch.nn.Sequential(
torch.nn.Linear(odim, args.decoder_embed_dim),
torch.nn.LayerNorm(args.decoder_embed_dim),
torch.nn.Dropout(args.transformer_dec_dropout_rate),
torch.nn.ReLU(),
pos_enc_class(args.decoder_embed_dim, args.transformer_dec_positional_dropout_rate),
)
elif isinstance(decoder_input_layer, torch.nn.Module):
self.decoder_prenet = torch.nn.Sequential(
decoder_input_layer, pos_enc_class(args.decoder_embed_dim, args.transformer_dec_positional_dropout_rate, max_len=args.max_speech_positions)
)
if args.spk_embed_integration_type == 'pre':
self.spkembs_layer = torch.nn.Sequential(
torch.nn.Linear(args.spk_embed_dim + args.decoder_embed_dim, args.decoder_embed_dim), torch.nn.ReLU()
)
self.num_updates = 0
self.freeze_decoder_updates = args.freeze_decoder_updates
def forward(self, prev_output_tokens, tgt_lengths_in=None, spkembs=None):
ft = self.freeze_decoder_updates <= self.num_updates
with torch.no_grad() if not ft else contextlib.ExitStack():
prev_output_tokens = self.decoder_prenet(prev_output_tokens)
if spkembs is not None:
spkembs = F.normalize(spkembs).unsqueeze(1).expand(-1, prev_output_tokens.size(1), -1)
prev_output_tokens = self.spkembs_layer(torch.cat([prev_output_tokens, spkembs], dim=-1))
if tgt_lengths_in is not None:
tgt_frames_mask = ~(self.METHOD_NAME(tgt_lengths_in).squeeze(1))
else:
tgt_frames_mask = None
return prev_output_tokens, tgt_frames_mask
def METHOD_NAME(self, ilens):
"""Make masks for self-attention.
Args:
ilens (LongTensor or List): Batch of lengths (B,).
Returns:
Tensor: Mask tensor for self-attention.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
>>> ilens = [5, 3]
>>> self._source_mask(ilens)
tensor([[[1, 1, 1, 1, 1],
[[1, 1, 1, 0, 0]]], dtype=torch.uint8)
"""
x_masks = make_non_pad_mask(ilens).to(next(self.parameters()).device)
return x_masks.unsqueeze(-2)
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
self.num_updates = num_updates |
5,012 | set dacs zero | from qcodes.instrument.base import Instrument
from qcodes.utils.validators import Enum, Numbers
try:
from spirack import D5a_module
except ImportError:
raise ImportError(('The D5a_module class could not be found. '
'Try installing it using pip install spirack'))
from functools import partial
class D5a(Instrument):
"""
Qcodes driver for the D5a DAC SPI-rack module.
functions:
- set_dacs_zero set all DACs to zero voltage
parameters:
- dacN: get and set DAC voltage
- stepsizeN get the minimum step size corresponding to the span
- spanN get and set the DAC span: '4v uni', '4v bi', or '2.5v bi'
where N is the DAC number from 1 up to 16
"""
def __init__(self, name, spi_rack, module, inter_delay=0.1, dac_step=10e-3,
reset_voltages=False, mV=False, number_dacs=16, **kwargs):
""" Create instrument for the D5a module.
The D5a module works with volts as units. For backward compatibility
there is the option to allow mV for the dacX parameters.
The output span of the DAC module can be changed with the spanX
command. Be carefull when executing this command with a sample
connected as voltage jumps can occur.
Args:
name (str): name of the instrument.
spi_rack (SPI_rack): instance of the SPI_rack class as defined in
the spirack package. This class manages communication with the
individual modules.
module (int): module number as set on the hardware.
inter_delay (float): time in seconds, passed to dac parameters of the object
dac_step (float): max step size (V or mV), passed to dac parameters of the object
reset_voltages (bool): passed to D5a_module constructor
mV (bool): if True, then use mV as units in the dac parameters
number_dacs (int): number of DACs available. This is 8 for the D5mux
"""
super().__init__(name, **kwargs)
self.d5a = D5a_module(spi_rack, module, reset_voltages=reset_voltages)
self._number_dacs = number_dacs
self._span_set_map = {
'4v uni': 0,
'4v bi': 2,
'2v bi': 4,
}
self._span_get_map = {v: k for k, v in self._span_set_map.items()}
self.add_function('set_dacs_zero', call_cmd=self.METHOD_NAME,
docstring='Reset all dacs to zero voltage. No ramping is performed.')
if mV:
self._gain = 1e3
unit = 'mV'
else:
self._gain = 1
unit = 'V'
for i in range(self._number_dacs):
validator = self._get_validator(i)
self.add_parameter('dac{}'.format(i + 1),
label='DAC {}'.format(i + 1),
get_cmd=partial(self._get_dac, i),
set_cmd=partial(self._set_dac, i),
unit=unit,
vals=validator,
step=dac_step,
inter_delay=inter_delay)
self.add_parameter('stepsize{}'.format(i + 1),
get_cmd=partial(self.d5a.get_stepsize, i),
unit='V',
docstring='Returns the smallest voltage step of the DAC.')
self.add_parameter('span{}'.format(i + 1),
get_cmd=partial(self._get_span, i),
set_cmd=partial(self._set_span, i),
vals=Enum(*self._span_set_map.keys()),
docstring='Change the output span of the DAC. This command also updates the validator.')
def set_dac_unit(self, unit: str) -> None:
"""Set the unit of dac parameters"""
allowed_values = Enum('mV', 'V')
allowed_values.validate(unit)
self._gain = {'V': 1, 'mV': 1e3}[unit]
for i in range(1, self._number_dacs + 1):
setattr(self.parameters[f'dac{i}'], 'unit', unit)
setattr(self.parameters[f'dac{i}'], 'vals', self._get_validator(i - 1))
def METHOD_NAME(self):
for i in range(self._number_dacs):
self._set_dac(i, 0.0)
def _set_dac(self, dac, value):
return self.d5a.set_voltage(dac, value / self._gain)
def _get_dac(self, dac):
return self._gain * self.d5a.voltages[dac]
def _get_span(self, dac):
return self._span_get_map[self.d5a.span[dac]]
def _set_span(self, dac, span_str):
self.d5a.change_span_update(dac, self._span_set_map[span_str])
self.parameters['dac{}'.format(
dac + 1)].vals = self._get_validator(dac)
def _get_validator(self, dac):
span = self.d5a.span[dac]
if span == D5a_module.range_2V_bi:
validator = Numbers(-2 * self._gain, 2 * self._gain)
elif span == D5a_module.range_4V_bi:
validator = Numbers(-4 * self._gain, 4 * self._gain)
elif span == D5a_module.range_4V_uni:
validator = Numbers(0, 4 * self._gain)
else:
msg = 'The found DAC span of {} does not correspond to a known one'
raise Exception(msg.format(span))
return validator |
5,013 | rule runner | # Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import pytest
from pants.backend.kotlin.compile import kotlinc_plugins
from pants.backend.kotlin.compile.kotlinc import rules as kotlinc_rules
from pants.backend.kotlin.lint.ktlint import rules as ktlint_fmt_rules
from pants.backend.kotlin.lint.ktlint import skip_field
from pants.backend.kotlin.lint.ktlint.rules import KtlintFieldSet, KtlintRequest
from pants.backend.kotlin.target_types import KotlinSourcesGeneratorTarget, KotlinSourceTarget
from pants.backend.kotlin.target_types import rules as target_types_rules
from pants.build_graph.address import Address
from pants.core.goals.fmt import FmtResult
from pants.core.util_rules import config_files, source_files, system_binaries
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.rules import QueryRule
from pants.engine.target import Target
from pants.jvm import classpath, jdk_rules
from pants.jvm.resolve.coursier_fetch import rules as coursier_fetch_rules
from pants.jvm.resolve.coursier_setup import rules as coursier_setup_rules
from pants.jvm.strip_jar import strip_jar
from pants.jvm.util_rules import rules as util_rules
from pants.testutil.METHOD_NAME import PYTHON_BOOTSTRAP_ENV, RuleRunner
@pytest.fixture
def METHOD_NAME() -> RuleRunner:
METHOD_NAME = RuleRunner(
rules=[
*config_files.rules(),
*classpath.rules(),
*coursier_fetch_rules(),
*coursier_setup_rules(),
*jdk_rules.rules(),
*strip_jar.rules(),
*kotlinc_rules(),
*kotlinc_plugins.rules(),
*util_rules(),
*target_types_rules(),
*ktlint_fmt_rules.rules(),
*skip_field.rules(),
*system_binaries.rules(),
*source_files.rules(),
QueryRule(FmtResult, (KtlintRequest.Batch,)),
QueryRule(SourceFiles, (SourceFilesRequest,)),
],
target_types=[KotlinSourceTarget, KotlinSourcesGeneratorTarget],
)
METHOD_NAME.set_options(
[],
env_inherit=PYTHON_BOOTSTRAP_ENV,
)
return METHOD_NAME
GOOD_FILE = """\
package org.pantsbuild.example
open class Foo {
val CONSTANT = "Constant changes"
}
"""
BAD_FILE = """\
package org.pantsbuild.example
open class Bar {
val CONSTANT = "Constant changes"
}
"""
FIXED_BAD_FILE = """\
package org.pantsbuild.example
open class Bar {
val CONSTANT = "Constant changes"
}
"""
def run_ktlint(METHOD_NAME: RuleRunner, targets: list[Target]) -> FmtResult:
field_sets = [KtlintFieldSet.create(tgt) for tgt in targets]
input_sources = METHOD_NAME.request(
SourceFiles,
[
SourceFilesRequest(field_set.source for field_set in field_sets),
],
)
fmt_result = METHOD_NAME.request(
FmtResult,
[
KtlintRequest.Batch(
"",
input_sources.snapshot.files,
partition_metadata=None,
snapshot=input_sources.snapshot,
),
],
)
return fmt_result
def test_passing(METHOD_NAME: RuleRunner) -> None:
METHOD_NAME.write_files({"Foo.kt": GOOD_FILE, "BUILD": "kotlin_sources(name='t')"})
tgt = METHOD_NAME.get_target(Address("", target_name="t", relative_file_path="Foo.kt"))
fmt_result = run_ktlint(METHOD_NAME, [tgt])
assert fmt_result.output == METHOD_NAME.make_snapshot({"Foo.kt": GOOD_FILE})
assert fmt_result.did_change is False
def test_failing(METHOD_NAME: RuleRunner) -> None:
METHOD_NAME.write_files({"Bar.kt": BAD_FILE, "BUILD": "kotlin_sources(name='t')"})
tgt = METHOD_NAME.get_target(Address("", target_name="t", relative_file_path="Bar.kt"))
fmt_result = run_ktlint(METHOD_NAME, [tgt])
assert fmt_result.output == METHOD_NAME.make_snapshot({"Bar.kt": FIXED_BAD_FILE})
assert fmt_result.did_change is True
def test_multiple_targets(METHOD_NAME: RuleRunner) -> None:
METHOD_NAME.write_files(
{"Foo.kt": GOOD_FILE, "Bar.kt": BAD_FILE, "BUILD": "kotlin_sources(name='t')"}
)
tgts = [
METHOD_NAME.get_target(Address("", target_name="t", relative_file_path="Foo.kt")),
METHOD_NAME.get_target(Address("", target_name="t", relative_file_path="Bar.kt")),
]
fmt_result = run_ktlint(METHOD_NAME, tgts)
assert fmt_result.output == METHOD_NAME.make_snapshot(
{"Foo.kt": GOOD_FILE, "Bar.kt": FIXED_BAD_FILE}
)
assert fmt_result.did_change is True |
5,014 | tear down class | # SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the Rez Project
"""
test suites
"""
from rez.tests.util import TestBase, TempdirMixin, \
per_available_shell, install_dependent
from rez.resolved_context import ResolvedContext
from rez.suite import Suite
from rez.config import config
from rez.system import system
import subprocess
import unittest
import uuid
import os.path
class TestRezSuites(TestBase, TempdirMixin):
@classmethod
def setUpClass(cls):
TempdirMixin.setUpClass()
packages_path = cls.data_path("suites", "packages")
cls.settings = dict(
packages_path=[packages_path],
package_filter=None,
implicit_packages=[],
warn_untimestamped=False,
resolve_caching=False)
@classmethod
def METHOD_NAME(cls):
TempdirMixin.METHOD_NAME()
def _test_serialization(self, suite):
name = uuid.uuid4().hex
path = os.path.join(self.root, name)
suite.save(path)
suite2 = Suite.load(path)
self.assertEqual(suite.get_tools(), suite2.get_tools())
self.assertEqual(set(suite.context_names), set(suite2.context_names))
def test_1(self):
"""Test empty suite."""
s = Suite()
tools = s.get_tools()
self.assertEqual(tools, {})
self._test_serialization(s)
def test_2(self):
"""Test basic suite."""
c_foo = ResolvedContext(["foo"])
c_bah = ResolvedContext(["bah"])
s = Suite()
s.add_context("foo", c_foo)
s.add_context("bah", c_bah)
expected_tools = set(["fooer", "bahbah", "blacksheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
s.set_context_prefix("foo", "fx_")
expected_tools = set(["fx_fooer", "bahbah", "blacksheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
s.set_context_suffix("foo", "_fun")
s.set_context_suffix("bah", "_anim")
expected_tools = set(["fx_fooer_fun", "bahbah_anim", "blacksheep_anim"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
s.remove_context("bah")
expected_tools = set(["fx_fooer_fun"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
s.add_context("bah", c_bah)
expected_tools = set(["fx_fooer_fun", "bahbah", "blacksheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
s.alias_tool("bah", "blacksheep", "whitesheep")
expected_tools = set(["fx_fooer_fun", "bahbah", "whitesheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
# explicit alias takes precedence over prefix/suffix
s.alias_tool("foo", "fooer", "floober")
expected_tools = set(["floober", "bahbah", "whitesheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
s.unalias_tool("foo", "fooer")
s.unalias_tool("bah", "blacksheep")
expected_tools = set(["fx_fooer_fun", "bahbah", "blacksheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
s.hide_tool("bah", "bahbah")
expected_tools = set(["fx_fooer_fun", "blacksheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
s.unhide_tool("bah", "bahbah")
expected_tools = set(["fx_fooer_fun", "bahbah", "blacksheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
self._test_serialization(s)
def test_3(self):
"""Test tool clashes in a suite."""
c_foo = ResolvedContext(["foo"])
c_bah = ResolvedContext(["bah"])
s = Suite()
s.add_context("foo", c_foo)
s.add_context("bah", c_bah)
s.add_context("bah2", c_bah)
expected_tools = set(["fooer", "bahbah", "blacksheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
self.assertEqual(s.get_tool_context("bahbah"), "bah2")
self.assertEqual(s.get_tool_context("blacksheep"), "bah2")
s.bump_context("bah")
self.assertEqual(s.get_tool_context("bahbah"), "bah")
self.assertEqual(s.get_tool_context("blacksheep"), "bah")
expected_conflicts = set(["bahbah", "blacksheep"])
self.assertEqual(set(s.get_conflicting_aliases()), expected_conflicts)
s.set_context_prefix("bah", "hey_")
expected_tools = set(["fooer", "bahbah", "blacksheep",
"hey_bahbah", "hey_blacksheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
s.remove_context_prefix("bah")
expected_tools = set(["fooer", "bahbah", "blacksheep"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
self.assertEqual(s.get_tool_context("bahbah"), "bah")
self.assertEqual(s.get_tool_context("blacksheep"), "bah")
s.hide_tool("bah", "bahbah")
self.assertEqual(s.get_tool_context("bahbah"), "bah2")
s.unhide_tool("bah", "bahbah")
self.assertEqual(s.get_tool_context("bahbah"), "bah")
self._test_serialization(s)
@per_available_shell()
@install_dependent()
def test_executable(self, shell):
"""Test suite tool can be executed
Testing suite tool can be found and executed in multiple platforms.
This test is equivalent to the following commands in shell:
```
$ rez-env pooh --output pooh.rxt
$ rez-suite --create pooh
$ rez-suite --add pooh.rxt --context pooh pooh
$ export PATH=$(pwd)/pooh/bin:$PATH
$ hunny
yum yum
```
"""
config.override("default_shell", shell)
c_pooh = ResolvedContext(["pooh"])
s = Suite()
s.add_context("pooh", c_pooh)
expected_tools = set(["hunny"])
self.assertEqual(set(s.get_tools().keys()), expected_tools)
per_shell = config.get("default_shell")
suite_path = os.path.join(self.root, "test_suites", per_shell, "pooh")
s.save(suite_path)
bin_path = os.path.join(suite_path, "bin")
env = os.environ.copy()
# activate rez, to access _rez_fwd
env["PATH"] = os.pathsep.join([system.rez_bin_path, env["PATH"]])
# activate suite
env["PATH"] = os.pathsep.join([bin_path, env["PATH"]])
output = subprocess.check_output(["hunny"], shell=True, env=env,
universal_newlines=True)
self.assertTrue("yum yum" in output)
if __name__ == '__main__':
unittest.main() |
5,015 | resolve image by client versions | # Copyright 2023 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import mlrun.api.utils.runtimes.nuclio
import mlrun.common.schemas
import mlrun.utils.singleton
from mlrun.api.runtime_handlers.mpijob import resolve_mpijob_crd_version
from mlrun.config import Config, config, default_config
class ClientSpec(
metaclass=mlrun.utils.singleton.Singleton,
):
def get_client_spec(
self, client_version: str = None, client_python_version: str = None
):
mpijob_crd_version = resolve_mpijob_crd_version()
return mlrun.common.schemas.ClientSpec(
version=config.version,
namespace=config.namespace,
docker_registry=config.httpdb.builder.docker_registry,
remote_host=config.remote_host,
mpijob_crd_version=mpijob_crd_version,
ui_url=config.resolve_ui_url(),
artifact_path=config.artifact_path,
spark_app_image=config.spark_app_image,
spark_app_image_tag=config.spark_app_image_tag,
spark_history_server_path=config.spark_history_server_path,
kfp_image=self.METHOD_NAME(
config.kfp_image, client_version, client_python_version
),
kfp_url=config.resolve_kfp_url(),
dask_kfp_image=self.METHOD_NAME(
config.dask_kfp_image, client_version, client_python_version
),
api_url=config.httpdb.api_url,
nuclio_version=mlrun.api.utils.runtimes.nuclio.resolve_nuclio_version(),
spark_operator_version=config.spark_operator_version,
calculate_artifact_hash=config.artifacts.calculate_hash,
generate_artifact_target_path_from_artifact_hash=config.artifacts.generate_target_path_from_artifact_hash,
redis_url=config.redis.url,
redis_type=config.redis.type,
sql_url=config.sql.url,
# These don't have a default value, but we don't send them if they are not set to allow the client to know
# when to use server value and when to use client value (server only if set). Since their default value is
# empty and not set is also empty we can use the same _get_config_value_if_not_default
default_function_priority_class_name=self._get_config_value_if_not_default(
"default_function_priority_class_name"
),
valid_function_priority_class_names=self._get_config_value_if_not_default(
"valid_function_priority_class_names"
),
# These have a default value, therefore we want to send them only if their value is not the default one
# (otherwise clients don't know when to use server value and when to use client value)
ui_projects_prefix=self._get_config_value_if_not_default(
"ui.projects_prefix"
),
scrape_metrics=self._get_config_value_if_not_default("scrape_metrics"),
default_function_node_selector=self._get_config_value_if_not_default(
"default_function_node_selector"
),
igz_version=self._get_config_value_if_not_default("igz_version"),
auto_mount_type=self._get_config_value_if_not_default(
"storage.auto_mount_type"
),
auto_mount_params=self._get_config_value_if_not_default(
"storage.auto_mount_params"
),
default_tensorboard_logs_path=self._get_config_value_if_not_default(
"default_tensorboard_logs_path"
),
default_function_pod_resources=self._get_config_value_if_not_default(
"default_function_pod_resources"
),
preemptible_nodes_node_selector=self._get_config_value_if_not_default(
"preemptible_nodes.node_selector"
),
preemptible_nodes_tolerations=self._get_config_value_if_not_default(
"preemptible_nodes.tolerations"
),
default_preemption_mode=self._get_config_value_if_not_default(
"function_defaults.preemption_mode"
),
force_run_local=self._get_config_value_if_not_default("force_run_local"),
function=self._get_config_value_if_not_default("function"),
# ce_mode is deprecated, we will use the full ce config instead and ce_mode will be removed in 1.6.0
ce_mode=config.ce.mode,
ce=config.ce.to_dict(),
logs=self._get_config_value_if_not_default("httpdb.logs"),
feature_store_data_prefixes=self._get_config_value_if_not_default(
"feature_store.data_prefixes"
),
model_endpoint_monitoring_store_type=self._get_config_value_if_not_default(
"model_endpoint_monitoring.store_type"
),
model_endpoint_monitoring_endpoint_store_connection=self._get_config_value_if_not_default(
"model_endpoint_monitoring.endpoint_store_connection"
),
packagers=self._get_config_value_if_not_default("packagers"),
)
@staticmethod
def METHOD_NAME(
image: str, client_version: str = None, client_python_version=None
):
"""
This method main purpose is to provide enriched images for deployment processes which are being executed on
client side, such as building a workflow. The whole enrichment and construction of a workflow is being done on
client side unlike submitting job where the main enrichment and construction of the resource runtime is being
applied on the backend side. Therefore for the workflow case we need to provide it with already enriched
images.
:param image: image name
:param client_version: the client mlrun version
:param client_python_version: the client python version
:return: enriched image url
"""
try:
return mlrun.utils.helpers.enrich_image_url(
image, client_version, client_python_version
)
# if for some reason the user provided un-parsable versions, fall back to resolve version only by server
except ValueError:
return mlrun.utils.helpers.enrich_image_url(image)
@staticmethod
def _get_config_value_if_not_default(config_key):
config_key_parts = config_key.split(".")
current_config_value = config
current_default_config_value = default_config
for config_key_part in config_key_parts:
current_config_value = getattr(current_config_value, config_key_part)
current_default_config_value = current_default_config_value.get(
config_key_part, ""
)
# when accessing attribute in Config, if the object is of type Mapping it returns the object in type Config
if isinstance(current_config_value, Config):
current_config_value = current_config_value.to_dict()
if current_config_value == current_default_config_value:
return None
else:
return current_config_value |
5,016 | filter asset related | # -*- coding: utf-8 -*-
#
from django.db.models import Q
from django.shortcuts import get_object_or_404
from rest_framework.generics import ListAPIView
from assets.models import Asset
from common.utils import get_logger
from orgs.mixins import generics
from perms.filters import AssetPermissionFilter
from perms.models import AssetPermission
from perms.serializers import AssetPermissionSerializer
from users.filters import UserFilter
from users.models import User, UserGroup
from users.serializers import UserSerializer, UserGroupSerializer
logger = get_logger(__file__)
__all__ = [
'AssetPermUserListApi', 'AssetPermUserPermissionsListApi',
'AssetPermUserGroupListApi', 'AssetPermUserGroupPermissionsListApi',
]
class BaseAssetPermUserOrUserGroupListApi(ListAPIView):
rbac_perms = {
'GET': 'perms.view_assetpermission'
}
def get_object(self):
asset_id = self.kwargs.get('pk')
asset = get_object_or_404(Asset, pk=asset_id)
return asset
def get_asset_related_perms(self):
asset = self.get_object()
nodes = asset.get_all_nodes(flat=True)
perms = AssetPermission.objects.filter(Q(assets=asset) | Q(nodes__in=nodes))
return perms
class AssetPermUserListApi(BaseAssetPermUserOrUserGroupListApi):
filterset_class = UserFilter
search_fields = ('username', 'email', 'name', 'id', 'source', 'role')
serializer_class = UserSerializer
rbac_perms = {
'GET': 'perms.view_assetpermission'
}
def get_queryset(self):
perms = self.get_asset_related_perms()
users = User.objects.filter(
Q(assetpermissions__in=perms) | Q(groups__assetpermissions__in=perms)
).distinct()
return users
class AssetPermUserGroupListApi(BaseAssetPermUserOrUserGroupListApi):
serializer_class = UserGroupSerializer
queryset = UserGroup.objects.none()
def get_queryset(self):
perms = self.get_asset_related_perms()
user_groups = UserGroup.objects.filter(assetpermissions__in=perms).distinct()
return user_groups
class BaseAssetRelatedPermissionListApi(generics.ListAPIView):
model = AssetPermission
serializer_class = AssetPermissionSerializer
filterset_class = AssetPermissionFilter
search_fields = ('name',)
rbac_perms = {
'list': 'perms.view_assetpermission'
}
def get_object(self):
asset_id = self.kwargs.get('pk')
asset = get_object_or_404(Asset, pk=asset_id)
return asset
def METHOD_NAME(self, queryset):
asset = self.get_object()
nodes = asset.get_all_nodes(flat=True)
perms = queryset.filter(Q(assets=asset) | Q(nodes__in=nodes))
return perms
def filter_queryset(self, queryset):
queryset = super().filter_queryset(queryset)
queryset = self.METHOD_NAME(queryset)
return queryset
class AssetPermUserPermissionsListApi(BaseAssetRelatedPermissionListApi):
def filter_queryset(self, queryset):
queryset = super().filter_queryset(queryset)
queryset = self.filter_user_related(queryset)
queryset = queryset.distinct()
return queryset
def filter_user_related(self, queryset):
user = self.get_perm_user()
user_groups = user.groups.all()
perms = queryset.filter(Q(users=user) | Q(user_groups__in=user_groups))
return perms
def get_perm_user(self):
user_id = self.kwargs.get('perm_user_id')
user = get_object_or_404(User, pk=user_id)
return user
class AssetPermUserGroupPermissionsListApi(BaseAssetRelatedPermissionListApi):
def filter_queryset(self, queryset):
queryset = super().filter_queryset(queryset)
queryset = self.filter_user_group_related(queryset)
queryset = queryset.distinct()
return queryset
def filter_user_group_related(self, queryset):
user_group = self.get_perm_user_group()
perms = queryset.filter(user_groups=user_group)
return perms
def get_perm_user_group(self):
user_group_id = self.kwargs.get('perm_user_group_id')
user_group = get_object_or_404(UserGroup, pk=user_group_id)
return user_group |
5,017 | is token join or from | # Copyright 2023 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Module with reusable code for other modules """
import re
from sqlparse.sql import TokenList, Token
from google.cloud import bigquery
from google.cloud.exceptions import NotFound
def formatNewLines(q):
return " ".join([s.strip() for s in q.splitlines()])
def remove_multiple_whitespaces(content):
ct = re.sub('\t', ' ', content)
ct = re.sub(' +', ' ', ct)
ct = re.sub(' \. ', '.', ct)
return ct
def is_tokenlist_or_list(token):
return isinstance(token, TokenList) or type(token) is list
def is_token_CTE(token):
return isinstance(token, Token) and "keyword" in str(token.ttype).lower() and "CTE" in str(token.ttype)
def is_token_keyword(token):
return isinstance(token, Token) and "keyword" in str(token.ttype).lower()
def METHOD_NAME(token):
return isinstance(token, Token) and ('join' in str(token).lower() or 'from' in str(token).lower())
def is_token_word(token, word):
return isinstance(token, Token) and str(token).lower() == word.lower()
def is_token_cross_join(token):
return isinstance(token, Token) and "cross join" in str(token).lower()
def check_from(token):
return isinstance(token, Token) and 'Keyword' in str(token.ttype) and 'FROM' in str(token).upper()
def check_join(token):
return isinstance(token, Token) and 'JOIN' in str(token).upper() and 'Keyword' in str(token.ttype)
def is_select(token):
return isinstance(token, Token) and 'SELECT' in str(token).upper() and 'DML' in str(token.ttype)
def update_count(cte_key, list_of_tokens, dict1):
case_1 = cte_key+")"
case_2 = cte_key
indices = [index for (index, ele) in enumerate(list_of_tokens) if ele == case_1]
indices_1 = [index for (index, ele) in enumerate(list_of_tokens) if ele == case_2]
indices_1 = indices_1 + indices
for idx_1 in indices_1:
if idx_1>0 and idx_1 < len(list_of_tokens):
if list_of_tokens[idx_1 - 1] in ['FROM', 'JOIN', 'from', 'join']:
dict1[cte_key] = dict1[cte_key]+ 1
return dict1
def get_line_numbers(cte, count, sql_statements):
#list_of_line_numbers = []
line_number = 0
for chunks in sql_statements.split('\n'):
line_number = line_number + 1
if cte in chunks.split():
description = "CTE {0} defined at line number {1} is used {2} times, consider replacing all the instances of CTE with a temporary table.".format(cte, line_number, count)
return description
def get_line_numbers_for_select(sql_input, res_list, fname):
line_number = 0
description = []
count = 0
found_select = False
for chunks in sql_input.split('\n'):
chunks_ = chunks.split()
line_number = line_number + 1
for idx, ele in enumerate(chunks_):
if ele.upper() == 'SELECT':
found_select = True
elif ele.upper() == 'FROM':
found_select = False
elif found_select == True:
if ele == '*' and idx-1 >= 0:
pre_ele = chunks_[idx-1]
l = [e for i, e in enumerate(pre_ele) if i == len(pre_ele)-1]
print("l is --", l[0])
if l[0]==',':
print("entered pattern * without ,")
chunks_[idx] = chunks_[idx] + 'checked'
description.append("Instead of \"{0}\" Consider selecting required columns at line number {1}".format(res_list[count], line_number))
count = count + 1
elif pre_ele.upper() == 'SELECT':
print("entered pattern * without ,")
chunks_[idx] = chunks_[idx] + 'checked'
description.append("Instead of \"{0}\" Consider selecting required columns at line number {1}".format(res_list[count], line_number))
count = count + 1
elif ele == '*' or ele == '*,':
print("entered pattern *")
chunks_[idx] = chunks_[idx] + 'checked'
description.append("Instead of \"{0}\" Consider selecting required columns at line number {1}".format(res_list[count], line_number))
count = count + 1
else:
pattern = '^[a-zA-Z0-9_]+\.\*[,]?'
res = re.match(pattern, ele)
if res:
chunks_[idx] = chunks_[idx] + 'checked'
description.append("Instead of \"{0}\" Consider selecting required columns at line number {1}".format(res_list[count], line_number))
count = count + 1
return description
def create_external_table(bq_ds_id, bq_tbl_name):
dataset_id = bq_ds_id
table_id = bq_tbl_name
client = bigquery.Client()
schema = [
bigquery.SchemaField("Batch_ID", "STRING", mode="REQUIRED"),
bigquery.SchemaField("FileName", "STRING", mode="REQUIRED"),
bigquery.SchemaField("Best_Practice", "STRING", mode="REQUIRED"),
bigquery.SchemaField("Recommendation", "String", mode="REQUIRED")
]
dataset_ref= client.dataset(dataset_id)
try:
dataset = client.get_dataset(dataset_ref)
print('Dataset {} already exists.'.format(dataset))
except NotFound:
dataset = bigquery.Dataset(dataset_ref)
dataset.location = "US"
dataset = client.create_dataset(dataset)
dataset_ref = client.dataset(dataset_id)
table_ref = dataset_ref.table(table_id)
try:
table = client.get_table(table_ref)
print('table {} already exists.'.format(table))
except NotFound:
schema = schema
table = bigquery.Table(table_ref, schema=schema)
external_config = bigquery.ExternalConfig('CSV')
external_config.csv_options.skip_leading_rows = 1
uri_for_cte = f'gs://bq_long_running_optimization/output_cte/*.csv'
uri_1 = f'gs://bq_long_running_optimization/output/*.csv'
source_uris = [uri_for_cte, uri_1]
external_config.source_uris = source_uris
table.external_data_configuration = external_config
table = client.create_table(table) # Make an API request.
print("Created table {}".format(table.table_id))
|
5,018 | on transform change | # -*- coding: utf-8 -*-
# vispy: gallery 30
# -----------------------------------------------------------------------------
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
"""
Picking Faces from a Mesh
=========================
Demonstrates how to identify (pick) individual faces on a mesh.
Arguments:
* --mesh - Path to a mesh file (OBJ/OBJ.GZ) [optional]
Controls:
* p - Toggle face picking view - shows the colors encoding face ID
* r - Clear painted faces
* s - Cycle shading modes (None, 'flat', 'smooth')
* w - Toggle wireframe
"""
import argparse
import itertools
import time
import numpy as np
from vispy import app, scene
from vispy.io import read_mesh, load_data_file
from vispy.scene.visuals import Mesh
from vispy.scene import transforms
from vispy.visuals.filters import ShadingFilter, WireframeFilter, FacePickingFilter
parser = argparse.ArgumentParser()
default_mesh = load_data_file('orig/triceratops.obj.gz')
parser.add_argument('--mesh', default=default_mesh)
args, _ = parser.parse_known_args()
vertices, faces, _normals, _texcoords = read_mesh(args.mesh)
canvas = scene.SceneCanvas(keys='interactive', bgcolor='white')
view = canvas.central_widget.add_view()
view.camera = 'arcball'
view.camera.depth_value = 1e3
# Create a colored `MeshVisual`.
face_colors = np.tile((0.5, 0.0, 0.5, 1.0), (len(faces), 1))
mesh = Mesh(
vertices,
faces,
face_colors=face_colors.copy()
)
mesh.transform = transforms.MatrixTransform()
mesh.transform.rotate(90, (1, 0, 0))
mesh.transform.rotate(-45, (0, 0, 1))
view.add(mesh)
# Use filters to affect the rendering of the mesh.
wireframe_filter = WireframeFilter()
shading_filter = ShadingFilter()
face_picking_filter = FacePickingFilter()
mesh.attach(wireframe_filter)
mesh.attach(shading_filter)
mesh.attach(face_picking_filter)
def attach_headlight(view):
light_dir = (0, 1, 0, 0)
shading_filter.light_dir = light_dir[:3]
initial_light_dir = view.camera.transform.imap(light_dir)
@view.scene.transform.changed.connect
def METHOD_NAME(event):
transform = view.camera.transform
shading_filter.light_dir = transform.map(initial_light_dir)[:3]
attach_headlight(view)
shading = itertools.cycle(("flat", "smooth", None))
shading_filter.shading = next(shading)
throttle = time.monotonic()
@canvas.events.mouse_move.connect
def on_mouse_move(event):
global throttle
# throttle mouse events to 50ms
if time.monotonic() - throttle < 0.05:
return
throttle = time.monotonic()
# adjust the event position for hidpi screens
render_size = tuple(d * canvas.pixel_scale for d in canvas.size)
x_pos = event.pos[0] * canvas.pixel_scale
y_pos = render_size[1] - (event.pos[1] * canvas.pixel_scale)
# render a small patch around the mouse cursor
restore_state = not face_picking_filter.enabled
face_picking_filter.enabled = True
mesh.update_gl_state(blend=False)
picking_render = canvas.render(
region=(x_pos - 1, y_pos - 1, 3, 3),
size=(3, 3),
bgcolor=(0, 0, 0, 0),
alpha=True,
)
if restore_state:
face_picking_filter.enabled = False
mesh.update_gl_state(blend=not face_picking_filter.enabled)
# unpack the face index from the color in the center pixel
face_idx = (picking_render.view(np.uint32) - 1)[1, 1, 0]
if face_idx > 0 and face_idx < len(face_colors):
# this may be less safe, but it's faster than set_data
mesh.mesh_data._face_colors_indexed_by_faces[face_idx] = (0, 1, 0, 1)
mesh.mesh_data_changed()
@canvas.events.key_press.connect
def on_key_press(event):
if event.key == 'p':
face_picking_filter.enabled = not face_picking_filter.enabled
mesh.update_gl_state(blend=not face_picking_filter.enabled)
mesh.update()
if event.key == 'r':
mesh.set_data(vertices, faces, face_colors=face_colors)
if event.key == 's':
shading_filter.shading = next(shading)
mesh.update()
if event.key == 'w':
wireframe_filter.enabled = not wireframe_filter.enabled
mesh.update()
canvas.show()
if __name__ == "__main__":
print(__doc__)
app.run() |
5,019 | mkdir | #!/usr/bin/env python3
# This script aims to help to run locktests with several clients.
#
# Report bugs to Vincent ROQUETA : vincent.roqueta@ext.bull.net
import encodings
import shutil
import os, sys
import getopt, sys
import string
import socket
from stat import *
from sys import *
from os import *
NFS4_PATH="/mnt/nfsv4"
NFS4_SERVER=""
TEST_HOME="/home/vincent/locks/"
testfile=NFS4_PATH+"/testfile"
app="locktests"
SRC="locktests.tar.gz"
SRC_PATH="deploy"
install="'tar xzf "+SRC+"; cd locks; make `"
user="root"
class Machine:
def METHOD_NAME(self,dir):
self.command="mkdir -p "+dir
self.do()
def rmdir(self,dir):
self.command="rm -rf "+dir
self.do()
def printc(self):
print("->"+self.command)
print("\n")
class Client(Machine):
def __init__(self, machine):
self.command=""
self.machine=machine
self.mountPath=NFS4_PATH
def do(self):
self.command="ssh "+user+"@"+self.machine+" "+self.command
os.system(self.command)
def isomount(self, dir):
export=NFS4_SERVER
mntpoint=NFS4_PATH
self.command="'mkdir -p "+mntpoint+"; mount -t nfs4 "+export+" "+mntpoint+"'"
self.do()
def umount(self, dir):
mntpoint=self.mountPath+"/"+dir
self.command="umount "+mntpoint
self.do()
def install(self, path):
self.command="'cd "+path+"; tar xzf "+SRC+"; cd locks; make'"
self.do()
def run(self, appli):
self.command=appli
self.do()
def cp(self, fichier, path):
command="scp "+fichier+" "+user+"@"+self.machine+":"+path
os.system(command)
class Serveur(Machine):
def __init__(self, ip, exportPath):
self.SERVEUR=ip
self.exportPath=exportPath
def do(self):
self.command="ssh "+self.SERVEUR+" "+self.command
os.system(self.command)
def configure(self, dir):
exportDir=self.exportPath+'/'+dir
self. METHOD_NAME(exportDir)
#self.printc()
self.export(exportDir)
#self.printc()
def clean(self, dir):
unexportDir=self.exportPath+'/'+dir
self.unexport(unexportDir)
self.rmdir(unexportDir)
def usage():
print("\n")
print("usage:")
print("locktests.py <-n process -f testfile ><--setup -s fs_server> -c host1, host2, host3 ... ")
print("--setup : setup the configuration, deploy test on other test machines; This option also requires -c and -s")
print("-c <machine> : host list to deploy/run/clean the test")
print("-s <machine> : NFS server to use to setup the test")
print("-n <num> : number of processes each test machine will lauch to perform the test")
print("-f <file> : test file. This must be the same on each machine")
print(" ")
print("Example :")
print("=========")
print("*Setup machines for testing")
print("./locktests.py --setup -c testmachine1 testmachine2 testmachine3 -s my_nfs_server:/")
print("\n")
print("*Run test on testmachine1,testmachine2 with 50 process on each machine using /mnt/nfsv4/testfile")
print("./locktests.py -n 50 -f /mnt/nfsv4/testfile -c testmachine1 testmachine2")
print("\n")
print("_________________________________")
print("Vincent ROQUETA - Bull SA - 2005\n")
return 0
def setup():
path=os.path.abspath(".")
fichier=SRC_PATH+"/"+SRC
commande=""
for i in clients:
print("Setting up machine "+i)
c=Client(i)
c.METHOD_NAME(path)
c.cp(fichier, path)
c.install(path)
c.isomount(NFS4_PATH)
#Setup localhost
print("Setting up localhost")
commande="make; mkdir -p "+NFS4_PATH+" ; mount -t nfs4 "+NFS4_SERVER+" "+NFS4_PATH+" &"
os.system(commande)
def run():
path=os.path.abspath(".")
nbreClients=len(clients)
hostname=socket.gethostname()
# Lancement du serveur en local
# Launch the server locally
commande=path+"/"+app+" -n "+nbreProcess+" -f "+filename+" -c "+str(nbreClients)+" &"
os.system(commande)
commande=path+"/locks/"+app+" --server "+hostname
for i in clients:
c=Client(i)
c.run(commande)
def clean():
for i in clients:
client.umount(NFS4_PATH)
args=sys.argv[1:]
rge=list(range(len(args)))
a=""
r=True
s=False
nfsServer=False
c=False
f=False
n=False
clients=[]
for i in rge:
if args[i] in ("--install", "-i", "--setup"):
r=False
s=True
continue
if args[i] in ("-s", "--server"):
a="nfsServer"
nfsServer=True
continue
if args[i] in ("-h", "--help"):
usage()
sys.exit(1)
if args[i] in ("--clients", "-c"):
a="clients"
c=True
continue
if args[i] == "-n":
a="nbre"
n=True
continue
if args[i] == "-f":
a="file"
f=True
continue
if a=="clients":
clients.append(args[i])
continue
if a=="file":
filename=args[i]
continue
if a=="nbre":
nbreProcess=args[i]
continue
if a=="nfsServer":
NFS4_SERVER=args[i]
continue
usage()
# For ...
if s:
if (not c) or (not nfsServer):
usage()
sys.exit(1)
print("Setup")
print(NFS4_SERVER)
setup()
print("Setup complete")
if r:
if (not c) or (not f) or (not n):
usage()
sys.exit(1)
print("Running test")
run()
|
5,020 | test | # encoding: utf-8
#
# Copyright 2017-2023 Greg Neagle.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
authrestart.client.py
Created by Greg Neagle on 2017-04-15.
Routines for communicating with authrestartd.
Socket communications code adapted from autopkg's PkgCreator by Per Olofsson
"""
from __future__ import absolute_import, print_function
import os
import select
import socket
from .. import prefs
from ..wrappers import writePlistToString
AUTHRESTARTD_SOCKET = "/var/run/authrestartd"
class AuthRestartClientError(Exception):
'''Exception to raise for errors in AuthRestartClient'''
pass
class AuthRestartClient(object):
'''Handles communication with authrestartd daemon'''
def connect(self):
'''Connect to authrestartd'''
try:
#pylint: disable=attribute-defined-outside-init
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
#pylint: enable=attribute-defined-outside-init
self.socket.connect(AUTHRESTARTD_SOCKET)
except socket.error as err:
raise AuthRestartClientError(
"Couldn't connect to authrestartd: %s" % err.strerror)
def send_request(self, request):
'''Send a request to authrestartd'''
self.socket.send(writePlistToString(request))
# use select so we don't hang indefinitely if authrestartd dies
ready = select.select([self.socket.fileno()], [], [], 2)
if ready[0]:
reply = self.socket.recv(8192).decode("UTF-8")
else:
reply = ''
if reply:
return reply.rstrip()
return "ERROR:No reply"
def disconnect(self):
'''Disconnect from authrestartd'''
self.socket.close()
def process(self, request):
'''Send a request and return the result'''
try:
self.connect()
result = self.send_request(request)
finally:
self.disconnect()
return result
def fv_is_active(self):
'''Returns a boolean to indicate if FileVault is active'''
result = self.process({'task': 'verify_filevault'})
return result.startswith('OK')
def verify_user(self, username):
'''Returns True if username can unlock the FV volume'''
request = {'task': 'verify_user', 'username': username}
result = self.process(request)
return result.startswith('OK')
def verify_recovery_key_present(self):
'''Returns True if plist containing a FV recovery key is present'''
request = {'task': 'verify_recovery_key_present'}
result = self.process(request)
return result.startswith('OK')
def verify_can_attempt_auth_restart(self):
'''Returns True if we are ready to attempt an auth restart'''
request = {'task': 'verify_can_attempt_auth_restart'}
result = self.process(request)
return result.startswith('OK')
def store_password(self, password, username=None):
'''Stores a FV password with authrestartd'''
request = {'task': 'store_password', 'password': password}
if username:
request['username'] = username
result = self.process(request)
if not result.startswith('OK'):
raise AuthRestartClientError(result)
def restart(self):
'''Returns True if restart was successful'''
result = self.process({'task': 'restart'})
if not result.startswith('OK'):
raise AuthRestartClientError(result)
def setup_delayed_authrestart(self, delayminutes=-1):
'''Sets up a delayed auth restart'''
request = {'task': 'delayed_authrestart', 'delayminutes': delayminutes}
result = self.process(request)
if not result.startswith('OK'):
raise AuthRestartClientError(result)
# Higher-level wrapper functions that swallow AuthRestartClientErrors
def fv_is_active():
'''Returns True if FileVault can be verified to be active,
False otherwise'''
try:
return AuthRestartClient().fv_is_active()
except AuthRestartClientError:
return False
def verify_user(username):
'''Returns True if user can be verified to be able to perform an
authrestart, False otherwise'''
try:
return AuthRestartClient().verify_user(username)
except AuthRestartClientError:
return False
def verify_recovery_key_present():
'''Returns True if we have a plist with a FileVault recovery key,
False otherwise'''
try:
return AuthRestartClient().verify_recovery_key_present()
except AuthRestartClientError:
return False
def verify_can_attempt_auth_restart():
'''Returns True if we have what we need to attempt an auth restart'''
try:
return AuthRestartClient().verify_can_attempt_auth_restart()
except AuthRestartClientError:
return False
def store_password(password, username=None):
'''Stores a password for later authrestart usage.
Returns boolean to indicate success/failure'''
try:
AuthRestartClient().store_password(password, username=username)
return True
except AuthRestartClientError:
return False
def restart():
'''Performs a restart -- authenticated if possible.
Returns boolean to indicate success/failure'''
try:
AuthRestartClient().restart()
return True
except AuthRestartClientError:
return False
def setup_delayed_authrestart():
'''Sets up a delayed authrestart.
Returns boolean to indicate success/failure'''
try:
AuthRestartClient().setup_delayed_authrestart()
return True
except AuthRestartClientError:
return False
def METHOD_NAME():
'''A function for doing some basic testing'''
import getpass
import pwd
from ..wrappers import get_input
print('PerformAuthRestarts preference is: %s'
% prefs.pref('PerformAuthRestarts'))
print('FileVault is active: %s' % fv_is_active())
print('Recovery key is present: %s' % verify_recovery_key_present())
username = pwd.getpwuid(os.getuid()).pw_name
if username == 'root':
username = get_input('Enter name of FV-enabled user: ')
print('%s is FV user: %s' % (username, verify_user(username)))
password = getpass.getpass('Enter password: ')
if password:
if username == 'root':
username = None
if store_password(password, username=username):
print('store_password was successful')
else:
print('store_password failed')
print('Can attempt auth restart: %s' % verify_can_attempt_auth_restart())
answer = get_input('Test setup of delayed auth restart (y/n)? ')
if answer.lower().startswith('y'):
print('Successfully set up delayed authrestart: %s'
% setup_delayed_authrestart())
answer = get_input('Test auth restart (y/n)? ')
if answer.lower().startswith('y'):
print('Attempting auth restart...')
if restart():
print('restart was successfully triggered')
else:
print('restart failed') |
5,021 | setup | #!/usr/bin/env python
import sys
import random
from direct.showbase.ShowBase import ShowBase
from direct.showbase.InputStateGlobal import inputState
from panda3d.core import AmbientLight
from panda3d.core import DirectionalLight
from panda3d.core import LVector3
from panda3d.core import TransformState
from panda3d.core import BitMask32
from panda3d.bullet import BulletWorld
from panda3d.bullet import BulletRigidBodyNode
from panda3d.bullet import BulletDebugNode
from panda3d.bullet import BulletPlaneShape
from panda3d.bullet import BulletConvexHullShape
from panda3d.bullet import BulletTriangleMesh
from panda3d.bullet import BulletTriangleMeshShape
from panda3d.bullet import ZUp
class Game(ShowBase):
def __init__(self):
ShowBase.__init__(self)
base.set_background_color(0.1, 0.1, 0.8, 1)
base.set_frame_rate_meter(True)
base.cam.set_pos(0, -10, 5)
base.cam.look_at(0, 0, 0.2)
# Light
alight = AmbientLight('ambientLight')
alight.set_color((0.5, 0.5, 0.5, 1))
alightNP = render.attach_new_node(alight)
dlight = DirectionalLight('directionalLight')
dlight.set_direction((1, 1, -1))
dlight.set_color((0.7, 0.7, 0.7, 1))
dlightNP = render.attach_new_node(dlight)
render.clear_light()
render.set_light(alightNP)
render.set_light(dlightNP)
# Input
self.accept('escape', self.do_exit)
self.accept('r', self.do_reset)
self.accept('f1', base.toggle_wireframe)
self.accept('f2', base.toggle_texture)
self.accept('f3', self.toggle_debug)
self.accept('f5', self.do_screenshot)
inputState.watchWithModifiers('up', 'w')
inputState.watchWithModifiers('left', 'a')
inputState.watchWithModifiers('down', 's')
inputState.watchWithModifiers('right', 'd')
# Task
taskMgr.add(self.update, 'updateWorld')
# Physics
self.METHOD_NAME()
def do_exit(self):
self.cleanup()
sys.exit(1)
def do_reset(self):
self.cleanup()
self.METHOD_NAME()
def toggle_debug(self):
if self.debugNP.is_hidden():
self.debugNP.show()
else:
self.debugNP.hide()
def do_screenshot(self):
base.screenshot('Bullet')
def process_input(self, dt):
force = LVector3(0, 0, 0)
if inputState.isSet('up'): force.y = 1.0
if inputState.isSet('down'): force.y = -1.0
if inputState.isSet('left'): force.x = -1.0
if inputState.isSet('right'): force.x = 1.0
force *= 300.0
self.bowlNP.node().set_active(True)
self.bowlNP.node().apply_central_force(force)
def update(self, task):
dt = globalClock.get_dt()
self.process_input(dt)
self.world.do_physics(dt)
return task.cont
def cleanup(self):
self.world = None
self.worldNP.remove_node()
def METHOD_NAME(self):
self.worldNP = render.attach_new_node('World')
# World
self.debugNP = self.worldNP.attach_new_node(BulletDebugNode('Debug'))
self.debugNP.show()
self.debugNP.node().show_wireframe(True)
self.debugNP.node().show_constraints(True)
self.debugNP.node().show_bounding_boxes(False)
self.debugNP.node().show_normals(False)
self.world = BulletWorld()
self.world.set_gravity((0, 0, -9.81))
self.world.set_debug_node(self.debugNP.node())
# Ground
shape = BulletPlaneShape((0, 0, 1), 0)
body = BulletRigidBodyNode('Ground')
bodyNP = self.worldNP.attach_new_node(body)
bodyNP.node().add_shape(shape)
bodyNP.set_pos(0, 0, 0)
bodyNP.set_collide_mask(BitMask32.all_on())
self.world.attach(bodyNP.node())
# Bowl
visNP = loader.load_model('models/bowl.egg')
geom = (visNP.findAllMatches('**/+GeomNode')
.get_path(0).node().get_geom(0))
mesh = BulletTriangleMesh()
mesh.addGeom(geom)
shape = BulletTriangleMeshShape(mesh, dynamic=True)
body = BulletRigidBodyNode('Bowl')
bodyNP = self.worldNP.attach_new_node(body)
bodyNP.node().add_shape(shape)
bodyNP.node().set_mass(10.0)
bodyNP.set_pos(0, 0, 0)
bodyNP.set_collide_mask(BitMask32.all_on())
self.world.attach(bodyNP.node())
visNP.reparent_to(bodyNP)
self.bowlNP = bodyNP
self.bowlNP.set_scale(2)
# Eggs
self.eggNPs = []
for i in range(5):
x = random.gauss(0, 0.1)
y = random.gauss(0, 0.1)
z = random.gauss(0, 0.1) + 1
h = random.random() * 360
p = random.random() * 360
r = random.random() * 360
visNP = loader.load_model('models/egg.egg')
geom = (visNP.find_all_matches('**/+GeomNode')
.get_path(0).node().get_geom(0))
shape = BulletConvexHullShape()
shape.addGeom(geom)
body = BulletRigidBodyNode('Egg-%i' % i)
bodyNP = self.worldNP.attach_new_node(body)
bodyNP.node().set_mass(1.0)
bodyNP.node().add_shape(shape)
bodyNP.node().set_deactivation_enabled(False)
bodyNP.set_collide_mask(BitMask32.all_on())
bodyNP.set_pos_hpr(x, y, z, h, p, r)
#bodyNP.set_scale(1.5)
self.world.attach(bodyNP.node())
visNP.reparent_to(bodyNP)
self.eggNPs.append(bodyNP)
game = Game()
game.run() |
5,022 | leave call | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class appengine_loggingCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
}
def METHOD_NAME(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=appengine_loggingCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the appengine_logging client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir) |
5,023 | test error on unknown var name | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.script
import tvm.testing
from tvm import relax, tir
from tvm.script import relax as R
import numpy as np
import pytest
param_specification = tvm.testing.parameter("by_string", "by_var")
param_shape = tvm.testing.parameter("static_shape", "dynamic_shape", "ndim", "arbitrary")
tensor_param_dtype = tvm.testing.parameter("float32", None)
def test_bind_tensor_param(param_specification, param_shape, tensor_param_dtype):
if param_shape == "static_shape":
shape = [16]
ndim = -1
elif param_shape == "dynamic_shape":
shape = [tir.Var("N", "int64")]
ndim = -1
elif param_shape == "ndim":
shape = None
ndim = 1
elif param_shape == "arbitrary":
shape = None
ndim = -1
else:
raise ValueError(f"Unknown param_shape: {param_shape}")
@R.function
def before(A: R.Tensor(shape, ndim=ndim, dtype=tensor_param_dtype)):
R.func_attr({"global_symbol": "main"})
B: R.Tensor(shape=shape, ndim=ndim, dtype=tensor_param_dtype) = A
out = R.add(B, B)
return out
np_data = np.arange(16).astype("float32")
inlined_relax_const = relax.const(np_data)
@R.function
def expected() -> R.Tensor([16], "float32"):
R.func_attr({"global_symbol": "main"})
B = inlined_relax_const
out = R.add(B, B)
return out
if param_specification == "by_string":
var = "A"
elif param_specification == "by_var":
var = before.params[0]
else:
raise ValueError("Unknown param_specification: {param_specification}")
after = before.bind_params({var: np.arange(16).astype("float32")})
tvm.ir.assert_structural_equal(expected, after)
def test_bind_shape_param(param_shape):
if param_shape == "static_shape":
shape = [16]
ndim = -1
elif param_shape == "dynamic_shape":
shape = [tir.Var("N", "int64")]
ndim = -1
elif param_shape == "ndim":
shape = None
ndim = 1
elif param_shape == "arbitrary":
shape = None
ndim = -1
else:
raise ValueError(f"Unknown param_shape: {param_shape}")
@R.function
def before(A: R.Shape(shape, ndim=ndim)):
R.func_attr({"global_symbol": "main"})
B: R.Shape(shape, ndim=ndim) = A
return B
@R.function
def expected() -> R.Shape([16]):
R.func_attr({"global_symbol": "main"})
B = R.ShapeExpr([16])
return B
after = before.bind_params({"A": relax.ShapeExpr([16])})
tvm.ir.assert_structural_equal(expected, after)
prim_value_dtype = tvm.testing.parameter("int64", "int32", "float32")
@pytest.mark.xfail(reason="Depends on relax.PrimValue holding a tir.PrimExpr, PR#15577")
def test_bind_prim_value(prim_value_dtype):
@R.function
def before(A: R.Prim(value="N", dtype=prim_value_dtype)):
R.func_attr({"global_symbol": "main"})
B: R.Prim(value="N", dtype=prim_value_dtype) = A
return B
@R.function
def expected() -> R.Prim(value=16, dtype=prim_value_dtype):
R.func_attr({"global_symbol": "main"})
B = R.PrimValue(value=16, dtype=dtype)
return B
after = before.bind_params({"A": relax.PrimValue(tir.const(16, prim_value_dtype))})
tvm.ir.assert_structural_equal(expected, after)
def test_error_on_unknown_var():
@R.function
def before(A: R.Tensor([16], dtype="float32")):
R.func_attr({"global_symbol": "main"})
return A
unknown_var = relax.Var("unknown_var")
with pytest.raises(tvm.TVMError):
before.bind_params({unknown_var: np.arange(16).astype("float32")})
def METHOD_NAME():
@R.function
def before(A: R.Tensor([16], dtype="float32")):
R.func_attr({"global_symbol": "main"})
return A
with pytest.raises(tvm.TVMError):
before.bind_params({"unknown_var_name": np.arange(16).astype("float32")})
if __name__ == "__main__":
tvm.testing.main() |
5,024 | assert parsed results equals | #!/usr/bin/env python
#
# Copyright 2011-2015 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from io import BytesIO
from splunklib.six import StringIO
from tests import testlib
from time import sleep
import splunklib.results as results
import io
class ResultsTestCase(testlib.SDKTestCase):
def test_read_from_empty_result_set(self):
job = self.service.jobs.create("search index=_internal_does_not_exist | head 2")
while not job.is_done():
sleep(0.5)
self.assertEqual(0, len(list(results.JSONResultsReader(io.BufferedReader(job.results(output_mode='json'))))))
def test_read_normal_results(self):
xml_text = """
<?xml version='1.0' encoding='UTF-8'?>
<results preview='0'>
<meta>
<fieldOrder>
<field>series</field>
<field>sum(kb)</field>
</fieldOrder>
</meta>
<messages>
<msg type='DEBUG'>base lispy: [ AND ]</msg>
<msg type='DEBUG'>search context: user='admin', app='search', bs-pathname='/some/path'</msg>
</messages>
<result offset='0'>
<field k='series'>
<value><text>twitter</text></value>
</field>
<field k='sum(kb)'>
<value><text>14372242.758775</text></value>
</field>
</result>
<result offset='1'>
<field k='series'>
<value><text>splunkd</text></value>
</field>
<field k='sum(kb)'>
<value><text>267802.333926</text></value>
</field>
</result>
<result offset='2'>
<field k='series'>
<value><text>flurry</text></value>
</field>
<field k='sum(kb)'>
<value><text>12576.454102</text></value>
</field>
</result>
<result offset='3'>
<field k='series'>
<value><text>splunkd_access</text></value>
</field>
<field k='sum(kb)'>
<value><text>5979.036338</text></value>
</field>
</result>
<result offset='4'>
<field k='series'>
<value><text>splunk_web_access</text></value>
</field>
<field k='sum(kb)'>
<value><text>5838.935649</text></value>
</field>
</result>
</results>
""".strip()
expected_results = [
results.Message('DEBUG', 'base lispy: [ AND ]'),
results.Message('DEBUG', "search context: user='admin', app='search', bs-pathname='/some/path'"),
{
'series': 'twitter',
'sum(kb)': '14372242.758775',
},
{
'series': 'splunkd',
'sum(kb)': '267802.333926',
},
{
'series': 'flurry',
'sum(kb)': '12576.454102',
},
{
'series': 'splunkd_access',
'sum(kb)': '5979.036338',
},
{
'series': 'splunk_web_access',
'sum(kb)': '5838.935649',
},
]
self.METHOD_NAME(xml_text, expected_results)
def test_read_raw_field(self):
xml_text = """
<?xml version='1.0' encoding='UTF-8'?>
<results preview='0'>
<meta>
<fieldOrder>
<field>_raw</field>
</fieldOrder>
</meta>
<result offset='0'>
<field k='_raw'><v xml:space='preserve' trunc='0'>07-13-2012 09:27:27.307 -0700 INFO Metrics - group=search_concurrency, system total, active_hist_searches=0, active_realtime_searches=0</v></field>
</result>
</results>
""".strip()
expected_results = [
{
'_raw': '07-13-2012 09:27:27.307 -0700 INFO Metrics - group=search_concurrency, system total, active_hist_searches=0, active_realtime_searches=0',
},
]
self.METHOD_NAME(xml_text, expected_results)
def test_read_raw_field_with_segmentation(self):
xml_text = """
<?xml version='1.0' encoding='UTF-8'?>
<results preview='0'>
<meta>
<fieldOrder>
<field>_raw</field>
</fieldOrder>
</meta>
<result offset='0'>
<field k='_raw'><v xml:space='preserve' trunc='0'>07-13-2012 09:27:27.307 -0700 INFO Metrics - group=search_concurrency, <sg h="1">system total</sg>, <sg h="2">active_hist_searches=0</sg>, active_realtime_searches=0</v></field>
</result>
</results>
""".strip()
expected_results = [
{
'_raw': '07-13-2012 09:27:27.307 -0700 INFO Metrics - group=search_concurrency, system total, active_hist_searches=0, active_realtime_searches=0',
},
]
self.METHOD_NAME(xml_text, expected_results)
def METHOD_NAME(self, xml_text, expected_results):
results_reader = results.ResultsReader(BytesIO(xml_text.encode('utf-8')))
actual_results = [x for x in results_reader]
self.assertEqual(expected_results, actual_results)
if __name__ == "__main__":
try:
import unittest2 as unittest
except ImportError:
import unittest
unittest.main() |
5,025 | get file indices for rank | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from .constants import (BASE_OPTIMIZER_STATE, GROUP_PADDINGS, OPTIMIZER_STATE_DICT, PARTITION_COUNT)
from .reshape_utils import (basic_folder_validation, get_zero_files, merge_state)
from .reshape_3d_utils import (model_3d_desc, get_model_3d_descriptor)
GROUP_STATE_KEY = 'state'
class ZeROCheckpoint(object):
def __init__(self, dir):
basic_folder_validation(dir)
self.dir = dir
self.file_list = get_zero_files(dir)
self.num_files = len(self.file_list)
assert self.num_files > 0, f'No ZeRO files found in {dir}'
self.src_3d = get_model_3d_descriptor(dir)
self.target_3d = model_3d_desc(pp_degree=self.src_3d.pp_degree,
tp_degree=self.src_3d.tp_degree,
dp_degree=self.src_3d.dp_degree)
self._3d_file_map = self.src_3d.reshape(self.target_3d)
def get_src_world_size(self):
return self.src_3d.world_size()
def get_src_tp_degree(self):
return self.src_3d.tp_degree
def get_src_pp_degree(self):
return self.src_3d.pp_degree
def get_src_dp_degree(self):
return self.src_3d.dp_degree
def METHOD_NAME(self, pp_index, tp_index, dp_index):
assert dp_index < len(self._3d_file_map), f'DP index {dp_index} >= DP degree {len(self._3d_file_map)}'
dp_2d_map = self._3d_file_map[dp_index]
return dp_2d_map.get_data(pp_index, tp_index)
def get_files_for_rank(self, pp_index, tp_index, dp_index):
file_idx_list = self.METHOD_NAME(pp_index, tp_index, dp_index)
return [self.file_list[idx] for idx in file_idx_list]
def get_state_for_rank(self, pp_index, tp_index, dp_index, keys_to_ignore=[], strip_tensor_paddings=True):
state_file_list = self.get_files_for_rank(pp_index, tp_index, dp_index)
merged_sd = None
for state_file in state_file_list:
sd = torch.load(state_file, map_location=torch.device('cpu'))
for key in keys_to_ignore:
sd.pop(key, None)
if strip_tensor_paddings:
self._strip_tensor_paddings(sd)
if merged_sd is None:
merged_sd = sd
else:
merged_sd = merge_state(merged_sd, sd)
self._update_partition_count(merged_sd)
if strip_tensor_paddings:
self._clear_group_paddings(merged_sd)
return merged_sd
def print_3d_index_map(self, tag=None):
if tag:
print(f'3D index map: {tag}')
for dp_index, _2d_map in enumerate(self._3d_file_map):
_2d_map.print_data(f'dp = {dp_index}')
def print_3d_file_map(self, tag=None):
if tag:
print(f'3D file map: {tag}')
for dp_index, _2d_map in enumerate(self._3d_file_map):
for pp_index in _2d_map.pp_degree:
for tp_index in _2d_map.tp_degree:
file_index_list = _2d_map.get_data(pp_index, tp_index)
file_list = [self.file_list[idx] for idx in file_index_list]
print(f'{pp_index}, {tp_index}, {dp_index} => {file_list}')
def reshape(self, target_3d_desc: model_3d_desc):
self.target_3d = target_3d_desc
self._3d_file_map = self.src_3d.reshape(self.target_3d)
def _strip_tensor_paddings(self, sd):
param_group_states = self._get_param_group_states(sd)
if param_group_states is None:
return
group_paddings = self._get_optimizer_state(sd, GROUP_PADDINGS)
if group_paddings is None:
return
for key, group_state in param_group_states.items():
if group_paddings[key] == 0:
continue
for state_name, state_value in group_state.items():
if torch.is_tensor(state_value):
raw_length = state_value.numel() - group_paddings[key]
group_state[state_name] = torch.narrow(state_value, 0, 0, raw_length).clone()
def _clear_group_paddings(self, sd):
group_paddings = self._get_optimizer_state(sd, GROUP_PADDINGS)
if group_paddings:
num_groups = len(group_paddings)
sd[OPTIMIZER_STATE_DICT][GROUP_PADDINGS] = [0] * num_groups
def _get_optimizer_state(self, sd, state_key):
optimizer_state = sd.get(OPTIMIZER_STATE_DICT, None)
if optimizer_state is None:
return None
return optimizer_state.get(state_key, None)
def _get_param_group_states(self, sd):
optimizer_state = sd.get(OPTIMIZER_STATE_DICT, None)
if optimizer_state is None:
return None
base_optimizer_state = optimizer_state.get(BASE_OPTIMIZER_STATE, None)
if base_optimizer_state is None:
return None
return base_optimizer_state.get(GROUP_STATE_KEY, None)
def _update_partition_count(self, sd):
partition_counts = self._get_optimizer_state(sd, PARTITION_COUNT)
if partition_counts:
num_groups = len(partition_counts)
sd[OPTIMIZER_STATE_DICT][PARTITION_COUNT] = [self.target_3d.dp_degree] * num_groups |
5,026 | queries | """Module for reading repodata directory (created with createrepo) for package
information instead of scanning individual rpms."""
import gzip
import os
from xml.etree import ElementTree as ET
from . import rpmquery
from . import packagequery
def namespace(name):
return "{http://linux.duke.edu/metadata/%s}" % name
OPERATOR_BY_FLAGS = {
"EQ": "=",
"LE": "<=",
"GE": ">=",
"LT": "<",
"GT": ">"
}
def primaryPath(directory):
"""Returns path to the primary repository data file.
:param directory: repository directory that contains the repodata subdirectory
:return: path to primary repository data file
:rtype: str
:raise IOError: if repomd.xml contains no primary location
"""
metaDataPath = os.path.join(directory, "repodata", "repomd.xml")
elementTree = ET.parse(metaDataPath)
root = elementTree.getroot()
for dataElement in root:
if dataElement.get("type") == "primary":
locationElement = dataElement.find(namespace("repo") + "location")
# even though the repomd.xml file is under repodata, the location a
# attribute is relative to parent directory (directory).
primaryPath = os.path.join(directory, locationElement.get("href"))
break
else:
raise OSError("'%s' contains no primary location" % metaDataPath)
return primaryPath
def METHOD_NAME(directory):
"""Returns a list of RepoDataQueries constructed from the repodata under
the directory.
:param directory: path to a repository directory (parent directory of repodata directory)
:return: list of RepoDataQueryResult instances
:raise IOError: if repomd.xml contains no primary location
"""
path = primaryPath(directory)
gunzippedPrimary = gzip.GzipFile(path)
elementTree = ET.parse(gunzippedPrimary)
root = elementTree.getroot()
packageQueries = []
for packageElement in root:
packageQuery = RepoDataQueryResult(directory, packageElement)
packageQueries.append(packageQuery)
return packageQueries
def _to_bytes_or_None(method):
def _method(self, *args, **kwargs):
res = method(self, *args, **kwargs)
if res is None:
return None
return res.encode()
return _method
def _to_bytes_list(method):
def _method(self, *args, **kwargs):
res = method(self, *args, **kwargs)
return [data.encode() for data in res]
return _method
class RepoDataQueryResult(packagequery.PackageQueryResult):
"""PackageQueryResult that reads in data from the repodata directory files."""
def __init__(self, directory, element):
"""Creates a RepoDataQueryResult from the a package Element under a metadata
Element in a primary.xml file.
:param directory: repository directory path. Used to convert relative paths to full paths.
:param element: package Element
"""
self.__directory = os.path.abspath(directory)
self.__element = element
def __formatElement(self):
return self.__element.find(namespace("common") + "format")
def __parseEntry(self, element):
entry = element.get("name")
flags = element.get("flags")
if flags is not None:
version = element.get("ver")
operator = OPERATOR_BY_FLAGS[flags]
entry += " %s %s" % (operator, version)
release = element.get("rel")
if release is not None:
entry += "-%s" % release
return entry
def __parseEntryCollection(self, collection):
formatElement = self.__formatElement()
collectionElement = formatElement.find(namespace("rpm") + collection)
entries = []
if collectionElement is not None:
for entryElement in collectionElement.findall(namespace("rpm") + "entry"):
entry = self.__parseEntry(entryElement)
entries.append(entry)
return entries
def __versionElement(self):
return self.__element.find(namespace("common") + "version")
@_to_bytes_or_None
def arch(self):
return self.__element.find(namespace("common") + "arch").text
@_to_bytes_or_None
def description(self):
return self.__element.find(namespace("common") + "description").text
def distribution(self):
return None
@_to_bytes_or_None
def epoch(self):
return self.__versionElement().get("epoch")
@_to_bytes_or_None
def name(self):
return self.__element.find(namespace("common") + "name").text
def path(self):
locationElement = self.__element.find(namespace("common") + "location")
relativePath = locationElement.get("href")
absolutePath = os.path.join(self.__directory, relativePath)
return absolutePath
@_to_bytes_list
def provides(self):
return self.__parseEntryCollection("provides")
@_to_bytes_or_None
def release(self):
return self.__versionElement().get("rel")
@_to_bytes_list
def requires(self):
return self.__parseEntryCollection("requires")
@_to_bytes_list
def conflicts(self):
return self.__parseEntryCollection('conflicts')
@_to_bytes_list
def obsoletes(self):
return self.__parseEntryCollection('obsoletes')
@_to_bytes_list
def recommends(self):
return self.__parseEntryCollection('recommends')
@_to_bytes_list
def suggests(self):
return self.__parseEntryCollection('suggests')
@_to_bytes_list
def supplements(self):
return self.__parseEntryCollection('supplements')
@_to_bytes_list
def enhances(self):
return self.__parseEntryCollection('enhances')
def canonname(self):
if self.release() is None:
release = None
else:
release = self.release()
return rpmquery.RpmQuery.filename(self.name(), None, self.version(), release, self.arch())
def gettag(self, tag):
# implement me, if needed
return None
def vercmp(self, other):
# if either self.epoch() or other.epoch() is None, the vercmp will do
# the correct thing because one is transformed into b'None' and the
# other one into b"b'<epoch>'" (and 'b' is greater than 'N')
res = rpmquery.RpmQuery.rpmvercmp(str(self.epoch()).encode(), str(other.epoch()).encode())
if res != 0:
return res
res = rpmquery.RpmQuery.rpmvercmp(self.version(), other.version())
if res != 0:
return res
res = rpmquery.RpmQuery.rpmvercmp(self.release(), other.release())
return res
@_to_bytes_or_None
def version(self):
return self.__versionElement().get("ver") |
5,027 | test execute | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at http://aws.amazon.com/apache2.0/
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from assertpy import assert_that
from pcluster.api.models import ListImagesResponseContent
from pcluster.cli.entrypoint import run
from pcluster.cli.exceptions import APIOperationException
class TestListImagesCommand:
def test_helper(self, test_datadir, run_cli, assert_out_err):
command = ["pcluster", "list-images", "--help"]
run_cli(command, expect_failure=False)
assert_out_err(
expected_out=(test_datadir / "pcluster-help.txt").read_text().strip(),
expected_err="",
)
@pytest.mark.parametrize(
"args, error_message",
[
([""], "error: the following arguments are required: --image-status"),
(
["--image-status"],
"error: argument --image-status: expected one argument",
),
(
["--image-status", "invalid"],
"argument --image-status: invalid choice: 'invalid' (choose from 'AVAILABLE', 'PENDING', 'FAILED')",
),
(
["--image-status", "AVAILABLE", "--invalid"],
"Invalid arguments ['--invalid']",
),
(
["--image-status", "AVAILABLE", "--region", "eu-west-"],
"Bad Request: invalid or unsupported region 'eu-west-'",
),
],
)
def test_invalid_args(self, args, error_message, run_cli, capsys):
command = ["pcluster", "list-images"] + args
run_cli(command, expect_failure=True)
out, err = capsys.readouterr()
assert_that(out + err).contains(error_message)
def METHOD_NAME(self, mocker):
response_dict = {
"images": [
{
"imageId": "aws-parallelcluster-3-0-0-amzn2-hvm-x86-64-202107121836",
"imageBuildStatus": "BUILD_COMPLETE",
"region": "us-east-2",
"version": "3.0.0",
},
{
# "imageId": "dlami-aws-parallelcluster-3-0-0-amzn2-hvm-x86-64-202106181651",
"imageId": "dlami-aws-parallelcluster-3-0-0-truncated",
"imageBuildStatus": "BUILD_COMPLETE",
"region": "us-east-2",
"version": "3.0.0",
},
]
}
response = ListImagesResponseContent().from_dict(response_dict)
list_images_mock = mocker.patch(
"pcluster.api.controllers.image_operations_controller.list_images",
return_value=response,
autospec=True,
)
out = run(["list-images", "--image-status", "AVAILABLE"])
assert_that(out).is_equal_to(response_dict)
assert_that(list_images_mock.call_args).is_length(2) # this is due to the decorator on list_clusters
expected_args = {"region": None, "next_token": None, "image_status": "AVAILABLE"}
list_images_mock.assert_called_with(**expected_args)
def test_error(self, mocker):
api_response = {"message": "error"}, 400
mocker.patch(
"pcluster.api.controllers.image_operations_controller.list_images",
return_value=api_response,
autospec=True,
)
with pytest.raises(APIOperationException) as exc_info:
command = [
"list-images",
"--region",
"eu-west-1",
"--image-status",
"AVAILABLE",
]
run(command)
assert_that(exc_info.value.data).is_equal_to(api_response[0]) |
5,028 | prepare config | from datetime import datetime, timedelta
from loguru import logger
from flexget import plugin
from flexget.event import event
from flexget.manager import Session
from flexget.utils.tools import parse_timedelta
from . import db
SCHEMA_VER = 3
FAIL_LIMIT = 100
logger = logger.bind(name='failed')
class PluginFailed:
"""
Records entry failures and stores them for trying again after a certain interval.
Rejects them after they have failed too many times.
"""
schema = {
"oneOf": [
# Allow retry_failed: no form to turn off plugin altogether
{"type": "boolean"},
{
"type": "object",
"properties": {
"retry_time": {"type": "string", "format": "interval", "default": "1 hour"},
"max_retries": {
"type": "integer",
"minimum": 0,
"maximum": FAIL_LIMIT,
"default": 3,
},
"retry_time_multiplier": {
# Allow turning off the retry multiplier with 'no' as well as 1
"oneOf": [{"type": "number", "minimum": 0}, {"type": "boolean"}],
"default": 1.5,
},
},
"additionalProperties": False,
},
]
}
def METHOD_NAME(self, config):
if not isinstance(config, dict):
config = {}
config.setdefault('retry_time', '1 hour')
config.setdefault('max_retries', 3)
if config.get('retry_time_multiplier', True) is True:
# If multiplier is not specified, or is specified as True, use the default
config['retry_time_multiplier'] = 1.5
else:
# If multiplier is False, turn it off
config['retry_time_multiplier'] = 1
return config
def retry_time(self, fail_count, config):
"""Return the timedelta an entry that has failed `fail_count` times before should wait before being retried."""
base_retry_time = parse_timedelta(config['retry_time'])
# Timedeltas do not allow floating point multiplication. Convert to seconds and then back to avoid this.
base_retry_secs = base_retry_time.days * 86400 + base_retry_time.seconds
retry_secs = base_retry_secs * (config['retry_time_multiplier'] ** fail_count)
# prevent OverflowError: date value out of range, cap to 30 days
max = 60 * 60 * 24 * 30
if retry_secs > max:
retry_secs = max
return timedelta(seconds=retry_secs)
@plugin.priority(plugin.PRIORITY_LAST)
def on_task_input(self, task, config):
if config is False:
return
config = self.METHOD_NAME(config)
for entry in task.all_entries:
entry.on_fail(self.add_failed, config=config)
def add_failed(self, entry, reason=None, config=None, **kwargs):
"""Adds entry to internal failed list, displayed with --failed"""
# Make sure reason is a string, in case it is set to an exception instance
reason = str(reason) or 'Unknown'
with Session() as session:
# query item's existence
item = (
session.query(db.FailedEntry)
.filter(db.FailedEntry.title == entry['title'])
.filter(db.FailedEntry.url == entry['original_url'])
.first()
)
if not item:
item = db.FailedEntry(entry['title'], entry['original_url'], reason)
item.count = 0
if item.count > FAIL_LIMIT:
logger.error(
"entry with title '{}' has failed over {} times", entry['title'], FAIL_LIMIT
)
return
retry_time = self.retry_time(item.count, config)
item.retry_time = datetime.now() + retry_time
item.count += 1
item.tof = datetime.now()
item.reason = reason
session.merge(item)
logger.debug('Marking {} in failed list. Has failed {} times.', item.title, item.count)
if item.count <= config['max_retries']:
plugin.get('backlog', self).add_backlog(
entry.task, entry, amount=retry_time, session=session
)
entry.task.rerun(plugin='retry_failed')
@plugin.priority(plugin.PRIORITY_FIRST)
def on_task_filter(self, task, config):
if config is False:
return
config = self.METHOD_NAME(config)
max_count = config['max_retries']
for entry in task.entries:
item = (
task.session.query(db.FailedEntry)
.filter(db.FailedEntry.title == entry['title'])
.filter(db.FailedEntry.url == entry['original_url'])
.first()
)
if item:
if item.count > max_count:
entry.reject(
f'Has already failed {item.count} times in the past. (failure reason: {item.reason})'
)
elif item.retry_time and item.retry_time > datetime.now():
entry.reject(
'Waiting before retrying entry which has failed in the past. (failure reason: %s)'
% item.reason
)
@event('plugin.register')
def register_plugin():
plugin.register(PluginFailed, 'retry_failed', builtin=True, api_ver=2) |
5,029 | read real table static | # pylint: disable=C0301,W0201
from __future__ import annotations
from struct import Struct
from typing import Union, Any, TYPE_CHECKING
import numpy as np
from pyNastran.op2.op2_helper import polar_to_real_imag
from pyNastran.op2.op2_interface.op2_reader import mapfmt
if TYPE_CHECKING: # pragma: no cover
from pyNastran.op2.op2 import OP2
def METHOD_NAME(op2: OP2, obj: Any, flag:str,
data: bytes, nnodes: int, ntotal: int) -> int:
n = 0
dt = np.nan
fmt = mapfmt(op2._endian + b'2i6f', op2.size)
s = Struct(fmt)
for unused_inode in range(nnodes):
out = s.unpack(data[n:n+ntotal])
(eid_device, grid_type, tx, ty, tz, rx, ry, rz) = out
#print(out)
eid = eid_device // 10
if op2.is_debug_file:
op2.binary_debug.write(' %s=%i; %s\n' % (flag, eid, str(out)))
obj.add_sort1(dt, eid, grid_type, tx, ty, tz, rx, ry, rz)
n += ntotal
return n
def read_real_table_sort1(op2: OP2, obj: Any, dt: Union[int, float], flag: str,
data: bytes, nnodes: int, ntotal: int) -> int:
n = 0
assert nnodes > 0, nnodes
fmt = mapfmt(op2._endian + b'2i6f', op2.size)
s = Struct(fmt)
for unused_inode in range(nnodes):
out = s.unpack(data[n:n+ntotal])
(eid_device, grid_type, tx, ty, tz, rx, ry, rz) = out
#print(out)
assert grid_type != 1065353216, out # caused by an op2 writer bug with int64 numbers being downcast directly to float32
eid = eid_device // 10
if op2.is_debug_file:
op2.binary_debug.write(' %s=%i; %s\n' % (flag, eid, str(out)))
obj.add_sort1(dt, eid, grid_type, tx, ty, tz, rx, ry, rz)
n += ntotal
return n
def read_complex_table_sort1_mag(op2: OP2, obj: Any, dt: Union[int, float], flag: str,
data: bytes, nnodes: int, ntotal: int) -> int:
n = 0
fmt = mapfmt(op2._endian + b'2i12f', op2.size)
s = Struct(fmt)
for unused_inode in range(nnodes):
out = s.unpack(data[n:n+ntotal])
(eid_device, grid_type, txr, tyr, tzr, rxr, ryr, rzr,
txi, tyi, tzi, rxi, ryi, rzi) = out
eid = eid_device // 10
if op2.is_debug_file:
op2.binary_debug.write(' %s=%i %s\n' % (flag, eid, str(out)))
tx = polar_to_real_imag(txr, txi)
ty = polar_to_real_imag(tyr, tyi)
tz = polar_to_real_imag(tzr, tzi)
rx = polar_to_real_imag(rxr, rxi)
ry = polar_to_real_imag(ryr, ryi)
rz = polar_to_real_imag(rzr, rzi)
obj.add_sort1(dt, eid, grid_type, tx, ty, tz, rx, ry, rz)
n += ntotal
return n
def read_complex_table_sort1_imag(op2: OP2, obj: Any, dt: Union[int, float], flag: str,
data: bytes, nnodes: int, ntotal: int) -> int:
n = 0
fmt = mapfmt(op2._endian + b'2i12f', op2.size)
s = Struct(fmt)
assert op2.obj is not None
assert nnodes > 0
for unused_inode in range(nnodes):
out = s.unpack(data[n:n+ntotal])
(eid_device, grid_type, txr, tyr, tzr, rxr, ryr, rzr,
txi, tyi, tzi, rxi, ryi, rzi) = out
eid = eid_device // 10
if op2.is_debug_file:
op2.binary_debug.write(' %s=%i %s\n' % (flag, eid, str(out)))
tx = complex(txr, txi)
ty = complex(tyr, tyi)
tz = complex(tzr, tzi)
rx = complex(rxr, rxi)
ry = complex(ryr, ryi)
rz = complex(rzr, rzi)
obj.add_sort1(dt, eid, grid_type, tx, ty, tz, rx, ry, rz)
n += ntotal
return n
def read_complex_table_sort2_imag(op2: OP2, obj: Any, node_id: int,
flag: str, flag_type: str,
data: bytes, nnodes: int, ntotal: int) -> int:
n = 0
#ntotal = 56 # 14 * 4
fmt = mapfmt(op2._endian + op2._analysis_code_fmt + b'i12f', op2.size)
s = Struct(fmt)
assert op2.obj is not None
assert nnodes > 0
#assert ndata % ntotal == 0
binary_debug_fmt = ' %s=%s %%s\n' % (flag, flag_type)
for unused_inode in range(nnodes):
edata = data[n:n+ntotal]
out = s.unpack(edata)
(freq, grid_type, txr, tyr, tzr, rxr, ryr, rzr,
txi, tyi, tzi, rxi, ryi, rzi) = out
if op2.is_debug_file:
op2.binary_debug.write(binary_debug_fmt % (freq, str(out)))
tx = complex(txr, txi)
ty = complex(tyr, tyi)
tz = complex(tzr, tzi)
rx = complex(rxr, rxi)
ry = complex(ryr, ryi)
rz = complex(rzr, rzi)
obj.add_sort2(freq, node_id, grid_type, tx, ty, tz, rx, ry, rz)
n += ntotal
return n
def read_complex_table_sort2_mag(op2: OP2, obj: Any, node_id: int,
flag: str, flag_type: str,
data: bytes, nnodes: int, ntotal: int) -> int:
n = 0
s = Struct(mapfmt(op2._endian + op2._analysis_code_fmt + b'i12f', op2.size))
binary_debug_fmt = ' %s=%s %%s\n' % (flag, flag_type)
for unused_inode in range(nnodes):
edata = data[n:n+ntotal]
out = s.unpack(edata)
(freq, grid_type, txr, tyr, tzr, rxr, ryr, rzr,
txi, tyi, tzi, rxi, ryi, rzi) = out
if op2.is_debug_file:
op2.binary_debug.write(binary_debug_fmt % (freq, str(out)))
tx = polar_to_real_imag(txr, txi)
ty = polar_to_real_imag(tyr, tyi)
tz = polar_to_real_imag(tzr, tzi)
rx = polar_to_real_imag(rxr, rxi)
ry = polar_to_real_imag(ryr, ryi)
rz = polar_to_real_imag(rzr, rzi)
obj.add_sort2(freq, node_id, grid_type, tx, ty, tz, rx, ry, rz)
n += ntotal
return n
def read_real_table_sort2(self: OP2, obj: Any, flag: str, nid: int,
data: bytes, nnodes: int, ntotal: int) -> int:
n = 0
assert nnodes > 0
fmt = mapfmt(self._endian + self._analysis_code_fmt + b'i6f', self.size)
structi = Struct(fmt)
#psds = ('CRM2', 'NO2', 'PSD2', 'RMS2')
#print('sort_method=%s' % self.sort_method)
#if self.table_name_str.endswith(psds):
for unused_inode in range(nnodes):
edata = data[n:n+ntotal]
out = structi.unpack(edata)
(dt, grid_type, tx, ty, tz, rx, ry, rz) = out
#print(out)
if self.is_debug_file:
self.binary_debug.write(
f' nid={nid} {flag}={dt} ({type(dt)}); {str(out)}\n')
obj.add_sort2(dt, nid, grid_type, tx, ty, tz, rx, ry, rz)
n += ntotal
return n |
5,030 | img to bytes | import io
import os
import torchvision
import webdataset as wds
from PIL import Image
from aistore.sdk import Client
from torch.utils.data import IterableDataset
from torch.utils.data.dataset import T_co
AIS_ENDPOINT = os.getenv("AIS_ENDPOINT")
bucket_name = "images"
etl_name = "wd-transform"
def show_image(image_data):
with Image.open(io.BytesIO(image_data)) as image:
image.show()
def wd_etl(object_url):
def METHOD_NAME(img):
buf = io.BytesIO()
img = img.convert("RGB")
img.save(buf, format="JPEG")
return buf.getvalue()
def process_trimap(trimap_bytes):
image = Image.open(io.BytesIO(trimap_bytes))
preprocessing = torchvision.transforms.Compose(
[
torchvision.transforms.CenterCrop(350),
torchvision.transforms.Lambda(METHOD_NAME)
]
)
return preprocessing(image)
def process_image(image_bytes):
image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
preprocessing = torchvision.transforms.Compose(
[
torchvision.transforms.CenterCrop(350),
torchvision.transforms.ToTensor(),
# Means and stds from ImageNet
torchvision.transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
torchvision.transforms.ToPILImage(),
torchvision.transforms.Lambda(METHOD_NAME),
]
)
return preprocessing(image)
# Initialize a WD object from the AIS URL
dataset = wds.WebDataset(object_url)
# Map the files for each individual sample to the appropriate processing function
processed_shard = dataset.map_dict(**{"image.jpg": process_image, "trimap.png": process_trimap})
# Write the output to a memory buffer and return the value
buffer = io.BytesIO()
with wds.TarWriter(fileobj=buffer) as dst:
for sample in processed_shard:
dst.write(sample)
return buffer.getvalue()
def create_wd_etl(client):
client.etl(etl_name).init_code(
transform=wd_etl,
preimported_modules=["torch"],
dependencies=["webdataset", "pillow", "torch", "torchvision"],
communication_type="hpull",
transform_url=True
)
class LocalTarDataset(IterableDataset):
"""
Builds a PyTorch IterableDataset from bytes in memory as if was read from a URL by WebDataset. This lets us
initialize a WebDataset Pipeline without writing to local disk and iterate over each record from a shard.
"""
def __getitem__(self, index) -> T_co:
raise NotImplemented
def __init__(self, input_bytes):
self.data = [{"url": "input_data", "stream": io.BytesIO(input_bytes)}]
def __iter__(self):
files = wds.tariterators.tar_file_expander(self.data)
samples = wds.tariterators.group_by_keys(files)
return samples
def read_object_tar(shard_data):
local_dataset = LocalTarDataset(shard_data)
sample = next(iter(local_dataset))
show_image(sample.get('image.jpg'))
def transform_object_inline():
single_object = client.bucket(bucket_name).object("samples-00.tar")
# Get object contents with ETL applied
processed_shard = single_object.get(etl_name=etl_name).read_all()
read_object_tar(processed_shard)
def transform_bucket_offline():
dest_bucket = client.bucket("processed-samples").create(exist_ok=True)
# Transform the entire bucket, placing the output in the destination bucket
transform_job = client.bucket(bucket_name).transform(to_bck=dest_bucket, etl_name=etl_name)
client.job(transform_job).wait(verbose=True)
processed_shard = dest_bucket.object("samples-00.tar").get().read_all()
read_object_tar(processed_shard)
if __name__ == "__main__":
client = Client(AIS_ENDPOINT)
image_bucket = client.bucket(bucket_name)
create_wd_etl(client)
transform_object_inline()
transform_bucket_offline() |
5,031 | set up | from addressbase.models import Address, UprnToCouncil
from councils.tests.factories import CouncilFactory
from data_importers.tests.stubs import stub_addressimport
from django.test import TestCase
# High-level functional tests for import scripts
class ImporterTest(TestCase):
opts = {"nochecks": True, "verbosity": 0, "include_past_elections": True}
def METHOD_NAME(self, addressbase, uprns, addresses_name):
for address in addressbase:
Address.objects.update_or_create(**address)
for uprn in uprns:
UprnToCouncil.objects.update_or_create(pk=uprn, lad="X01000000")
CouncilFactory(pk="ABC", identifiers=["X01000000"])
cmd = stub_addressimport.Command()
cmd.addresses_name = addresses_name
cmd.handle(**self.opts)
def test_duplicate_uprns(self):
"""
In the csv there are two matching uprns with different polling station ids.
Despite one appearing in addressbase, neither should be imported.
"""
test_params = {
"uprns": ["1", "2", "6"],
"addressbase": [
{
"address": "Another Haringey Park, London",
"uprn": "1",
"postcode": "N8 8NM",
},
{"address": "Haringey Park, London", "uprn": "2", "postcode": "N8 9JG"},
{
"address": "80 Pine Vale Cres, Bournemouth",
"uprn": "6",
"postcode": "BH10 6BJ",
},
],
"addresses_name": "duplicate_uprns.csv",
}
self.METHOD_NAME(**test_params)
imported_uprns = (
UprnToCouncil.objects.filter(lad="X01000000")
.exclude(polling_station_id="")
.order_by("uprn")
.values_list("uprn", "polling_station_id")
)
self.assertEqual(1, len(imported_uprns))
expected = {("6", "2")}
self.assertEqual(set(imported_uprns), expected)
def test_uprn_not_in_addressbase(self):
"""uprn does not appear in addressbase data, or in UprnToCouncil table"""
test_params = {
"uprns": ["6"],
"addressbase": [
{"address": "3 Factory Rd, Poole", "uprn": "4", "postcode": "BH16 5HT"},
{
"address": "80 Pine Vale Cres, Bournemouth",
"uprn": "6",
"postcode": "BH10 6BJ",
},
],
"addresses_name": "uprn_missing.csv",
}
self.METHOD_NAME(**test_params)
imported_uprns = (
UprnToCouncil.objects.filter(lad="X01000000")
.exclude(polling_station_id="")
.order_by("uprn")
.values_list("uprn", "polling_station_id")
)
self.assertEqual(1, len(imported_uprns))
expected = {("6", "2")}
self.assertEqual(set(imported_uprns), expected)
def test_uprn_assigned_to_wrong_council(self):
"""Uprn exists but we've located it in a different council in UprnToCouncil table"""
test_params = {
"uprns": ["6"],
"addressbase": [
{"address": "3 Factory Rd, Poole", "uprn": "4", "postcode": "BH16 5HT"},
{
"address": "80 Pine Vale Cres, Bournemouth",
"uprn": "6",
"postcode": "BH10 6BJ",
},
],
"addresses_name": "uprn_missing.csv",
}
self.METHOD_NAME(**test_params)
UprnToCouncil.objects.update_or_create(pk=4, lad="X01000002")
imported_uprns = (
UprnToCouncil.objects.filter(lad="X01000000")
.exclude(polling_station_id="")
.order_by("uprn")
.values_list("uprn", "polling_station_id")
)
self.assertEqual(1, len(imported_uprns))
expected = {("6", "2")}
self.assertEqual(set(imported_uprns), expected)
def test_postcode_mismatch(self):
"""Uprn exists but postcodes don't match"""
test_params = {
"uprns": ["4", "7"],
"addressbase": [
{"address": "3 Factory Rd, Poole", "uprn": "4", "postcode": "BH16 5HT"},
{
"address": "4 Factory Rd, Poole",
"uprn": "7",
"postcode": "BH16 5HT", # postcode is 'BH17 5HT' in csv
},
],
"addresses_name": "uprn_missing.csv",
}
self.METHOD_NAME(**test_params)
imported_uprns = (
UprnToCouncil.objects.filter(lad="X01000000")
.exclude(polling_station_id="")
.order_by("uprn")
.values_list("uprn", "polling_station_id")
)
self.assertEqual(1, len(imported_uprns))
expected = {("4", "1")}
self.assertEqual(set(imported_uprns), expected)
def test_address_import(self):
test_params = {
"uprns": ["1", "3", "4", "5", "6", "7"],
"addressbase": [
{"address": "Haringey Park, London", "uprn": "1", "postcode": "N8 9JG"},
# uprn '2' in addresses.csv but wasn't in addressbase so not in uprntocouncil either
{
"address": "36 Abbots Park, London",
"uprn": "3",
"postcode": "SW2 3QD",
},
{"address": "3 Factory Rd, Poole", "uprn": "4", "postcode": "BH16 5HT"},
{
"address": "5-6 Mickleton Dr, Southport",
"uprn": "5",
"postcode": "PR8 2QX",
},
{
"address": "80 Pine Vale Cres, Bournemouth",
"uprn": "6",
"postcode": "BH10 6BJ",
},
{
"address": "4 Factory Rd, Poole",
"uprn": "7",
"postcode": "BH16 5HT", # postcode is 'BH17 5HT' in csv
},
],
"addresses_name": "addresses.csv",
}
self.METHOD_NAME(**test_params)
imported_uprns = (
UprnToCouncil.objects.filter(lad="X01000000")
.exclude(polling_station_id="")
.order_by("uprn")
.values_list("uprn", "polling_station_id")
)
self.assertEqual(3, len(imported_uprns))
expected = {("3", "3"), ("4", "1"), ("6", "2")}
self.assertEqual(set(imported_uprns), expected) |
5,032 | test label ambiguity | # ----------------------------------------------------------------------------
# Copyright (C) 2021-2023 Deepchecks (https://www.deepchecks.com)
#
# This file is part of Deepchecks.
# Deepchecks is distributed under the terms of the GNU Affero General
# Public License (version 3 or later).
# You should have received a copy of the GNU Affero General Public License
# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
# ----------------------------------------------------------------------------
#
"""Tests for Mixed Nulls check"""
import pandas as pd
from hamcrest import assert_that, close_to, equal_to, has_entries, has_items, has_length, instance_of
from deepchecks.tabular.checks.data_integrity import ConflictingLabels
from deepchecks.tabular.dataset import Dataset
from tests.base.utils import equal_condition_result
def METHOD_NAME():
# Arrange
data = {
'col1': [1, 1, 1, 2, 2, 2]*100,
'col2': [1, 1, 1, 2, 2, 2]*100,
'col3': [1, 1, 1, 2, 2, 2]*100,
'label': [1, 1, 2, 2, 3, 4]*100
}
dataframe = pd.DataFrame(data)
dataframe = dataframe.astype({'col1': 'category'})
ds = Dataset(dataframe, label='label')
check = ConflictingLabels()
# Act
result = check.run(ds)
# Assert
assert_that(result.value, has_entries({
'percent_of_conflicting_samples': equal_to(1),
'samples_indices': instance_of(list)
}))
assert_that(result.display[1], has_length(2))
def test_label_ambiguity_empty():
# Arrange
data = {
'col1': [1, 1, 1, 2, 2, 2]*100,
'col2': [1, 1, 1, 2, 2, 2]*100,
'col3': [1, 2, 1, 2, 2, 2]*100,
'label': [1, 2, 1, 1, 1, 1]*100
}
ds = Dataset(pd.DataFrame(data), label='label')
check = ConflictingLabels()
# Act
result = check.run(ds)
# Assert
assert_that(result.value, has_entries({
'percent_of_conflicting_samples': equal_to(0),
'samples_indices': has_length(0)
}))
assert_that(result.display, has_length(0))
def test_label_ambiguity_mixed():
# Arrange
data = {
'col1': [1, 1, 1, 2, 2, 2]*100,
'col2': [1, 1, 1, 2, 2, 2]*100,
'col3': [1, 1, 1, 2, 2, 2]*100,
'label': [1, 1, 1, 1, 2, 1]*100
}
ds = Dataset(pd.DataFrame(data), label='label')
check = ConflictingLabels()
# Act
result = check.run(ds)
# Assert
assert_that(result.value, has_entries({
'percent_of_conflicting_samples': close_to(0.5, 0.01),
'samples_indices': has_length(1)
}))
assert_that(
result.display[1],
has_length(1)
)
def test_label_ambiguity_mixed_without_display():
# Arrange
data = {
'col1': [1, 1, 1, 2, 2, 2]*100,
'col2': [1, 1, 1, 2, 2, 2]*100,
'col3': [1, 1, 1, 2, 2, 2]*100,
'label': [1, 1, 1, 1, 2, 1]*100
}
ds = Dataset(pd.DataFrame(data), label='label')
check = ConflictingLabels()
# Act
result = check.run(ds, with_display=False)
# Assert
assert_that(result.value, has_entries({
'percent_of_conflicting_samples': close_to(0.5, 0.01),
'samples_indices': has_length(1)
}))
assert_that(result.display, has_length(0))
def test_label_ambiguity_condition():
# Arrange
data = {
'col1': [1, 1, 1, 2, 2, 2]*100,
'col2': [1, 1, 1, 2, 2, 2]*100,
'col3': [1, 1, 1, 2, 2, 2]*100,
'label': [1, 1, 1, 1, 2, 1]*100
}
ds = Dataset(pd.DataFrame(data), label='label')
check = ConflictingLabels().add_condition_ratio_of_conflicting_labels_less_or_equal()
# Act
result = check.run(ds)
condition_result = check.conditions_decision(result)
# Assert
assert_that(condition_result, has_items(
equal_condition_result(is_pass=False,
details='Ratio of samples with conflicting labels: 50%',
name='Ambiguous sample ratio is less or equal to 0%')
))
def test_label_ambiguity_condition_pass():
# Arrange
data = {
'col1': [1, 1, 1, 2, 2, 2]*100,
'col2': [1, 1, 1, 2, 2, 2]*100,
'col3': [1, 1, 1, 2, 2, 2]*100,
'label': [1, 1, 1, 1, 2, 1]*100
}
ds = Dataset(pd.DataFrame(data), label='label')
check = ConflictingLabels().add_condition_ratio_of_conflicting_labels_less_or_equal(.7)
# Act
result = check.run(ds)
condition_result = check.conditions_decision(result)
# Assert
assert_that(condition_result, has_items(
equal_condition_result(is_pass=True,
details='Ratio of samples with conflicting labels: 50%',
name='Ambiguous sample ratio is less or equal to 70%')
))
def test_label_ambiguity_single_column():
# Arrange
data = {
'col1': [1, 1, 1, 2, 2, 2]*100,
'label': [1, 1, 2, 2, 3, 4]*100
}
dataframe = pd.DataFrame(data)
ds = Dataset(dataframe, label='label')
check = ConflictingLabels()
# Act
result = check.run(ds)
# Assert
assert_that(result.value, has_entries({
'percent_of_conflicting_samples': equal_to(1),
'samples_indices': instance_of(list)
})) |
5,033 | recv result | import time
import unittest
import zmq
from westpa.work_managers.zeromq import ZMQWorker
from westpa.work_managers.zeromq.core import Message, Task, Result, TIMEOUT_MASTER_BEACON
from ..tsupport import (
random_int,
will_fail,
will_busyhang,
will_busyhang_uninterruptible,
ExceptionForTest,
identity,
)
from .zmq_tsupport import SETUP_WAIT, TEARDOWN_WAIT, BEACON_PERIOD
from .zmq_tsupport import ZMQTestBase
class TestZMQWorkerBasic(ZMQTestBase, unittest.TestCase):
# endpoint_type = 'tcp'
'''Tests for the core task dispersal/retrieval and shutdown operations
(the parts of the WM that do not require ZMQWorker).'''
def setUp(self):
super().setUp()
self.rr_endpoint = self.make_endpoint()
self.ann_endpoint = self.make_endpoint()
# Need to bind ann_socket here in setup, because if we bind it during
# tests, messages get lost.
self.ann_socket = self.test_context.socket(zmq.PUB)
self.ann_socket.bind(self.ann_endpoint)
# If we're binding ann_socket, we might as well bind rr_socket
self.rr_socket = self.test_context.socket(zmq.REP)
self.rr_socket.bind(self.rr_endpoint)
self.test_worker = ZMQWorker(self.rr_endpoint, self.ann_endpoint)
self.test_worker.validation_fail_action = 'raise'
self.test_worker.shutdown_timeout = 0.5
self.test_worker.master_beacon_period = BEACON_PERIOD
self.test_worker.startup()
self.test_core.master_id = self.test_core.node_id
time.sleep(SETUP_WAIT)
def tearDown(self):
time.sleep(TEARDOWN_WAIT)
self.test_worker.signal_shutdown()
self.test_worker.comm_thread.join()
super().tearDown()
def send_task(self, task):
self.test_core.send_message(self.ann_socket, Message.TASKS_AVAILABLE)
msg = self.test_core.recv_message(self.rr_socket)
assert msg.message == Message.TASK_REQUEST
self.test_core.send_message(self.rr_socket, Message.TASK, payload=task)
def METHOD_NAME(self):
msg = self.test_core.recv_message(self.rr_socket)
self.test_core.send_ack(self.rr_socket, msg)
assert msg.message == Message.RESULT
assert isinstance(msg.payload, Result)
return msg.payload
def roundtrip_task(self, task):
self.send_task(task)
return self.METHOD_NAME()
def test_meta(self):
pass
def test_executor_alive(self):
assert self.test_worker.executor_process.is_alive()
def test_executor_shuts_down_immediately(self):
self.test_worker.shutdown_executor()
assert not self.test_worker.executor_process.is_alive()
def test_shutdown_on_announcement(self):
self.test_core.send_message(self.ann_socket, Message.SHUTDOWN)
self.test_worker.join()
assert not self.test_worker.executor_process.is_alive()
def test_responds_to_task_avail(self):
self.test_core.send_message(self.ann_socket, Message.TASKS_AVAILABLE)
msg = self.test_core.recv_message(self.rr_socket)
self.test_core.send_nak(self.rr_socket, msg)
assert msg.message == Message.TASK_REQUEST
def test_shutdown_on_master_disappearance(self):
self.test_core.send_message(self.ann_socket, Message.RECONFIGURE_TIMEOUT, (TIMEOUT_MASTER_BEACON, 0.01))
time.sleep(0.02)
self.test_worker.join()
assert not self.test_worker.executor_process.is_alive()
def test_worker_processes_task(self):
r = random_int()
task = Task(identity, (r,), {})
rsl = self.roundtrip_task(task)
assert rsl.result == r
def test_worker_processes_exception(self):
task = Task(will_fail, (), {})
rsl = self.roundtrip_task(task)
assert isinstance(rsl.exception, ExceptionForTest)
def test_hung_worker_interruptible(self):
task = Task(will_busyhang, (), {})
self.send_task(task)
time.sleep(1.0)
self.test_core.send_message(self.ann_socket, Message.SHUTDOWN)
self.test_worker.join()
def test_hung_worker_uninterruptible(self):
task = Task(will_busyhang_uninterruptible, (), {})
self.send_task(task)
time.sleep(1.0)
self.test_core.send_message(self.ann_socket, Message.SHUTDOWN)
self.test_worker.join() |
5,034 | test image processor from dict with kwargs | # coding=utf-8
# Copyright 2022 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_vision_available():
from transformers import LevitImageProcessor
class LevitImageProcessingTester(unittest.TestCase):
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_center_crop=True,
crop_size=None,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
size = size if size is not None else {"shortest_edge": 18}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
def prepare_image_processor_dict(self):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.crop_size["height"], self.crop_size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
class LevitImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = LevitImageProcessor if is_vision_available() else None
def setUp(self):
self.image_processor_tester = LevitImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "do_center_crop"))
self.assertTrue(hasattr(image_processing, "size"))
def METHOD_NAME(self):
image_processor = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"shortest_edge": 18})
self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18})
image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84)
self.assertEqual(image_processor.size, {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) |
5,035 | base | from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple, Union
from github.Commit import Commit
from github.File import File
from github.GithubObject import CompletableGithubObject, _NotSetType
from github.Issue import Issue
from github.IssueComment import IssueComment
from github.IssueEvent import IssueEvent
from github.Label import Label
from github.Milestone import Milestone
from github.NamedUser import NamedUser
from github.PaginatedList import PaginatedList
from github.PullRequestComment import PullRequestComment
from github.PullRequestMergeStatus import PullRequestMergeStatus
from github.PullRequestPart import PullRequestPart
from github.PullRequestReview import PullRequestReview
from github.Team import Team
class PullRequest(CompletableGithubObject):
def _initAttributes(self) -> None: ...
def _useAttributes(self, attributes: Dict[str, Any]) -> None: ...
def add_to_labels(self, *labels: Union[Label, str]) -> None: ...
def add_to_assignees(self, *assignees: Union[NamedUser, str]) -> None: ...
@property
def additions(self) -> int: ...
def as_issue(self) -> Issue: ...
@property
def assignee(self) -> NamedUser: ...
@property
def assignees(self) -> List[NamedUser]: ...
@property
def requested_reviewers(self) -> List[NamedUser]: ...
@property
def requested_teams(self) -> List[Team]: ...
@property
def METHOD_NAME(self) -> PullRequestPart: ...
@property
def body(self) -> str: ...
@property
def changed_files(self) -> int: ...
@property
def closed_at(self) -> Optional[datetime]: ...
@property
def comments(self) -> int: ...
@property
def comments_url(self) -> str: ...
@property
def commits(self) -> int: ...
@property
def commits_url(self) -> str: ...
def create_comment(self, body: str, commit_id: Commit, path: str, position: int) -> PullRequestComment: ...
def create_issue_comment(self, body: str) -> IssueComment: ...
def create_review(
self,
commit: Commit = ...,
body: Union[_NotSetType, str] = ...,
event: Union[_NotSetType, str] = ...,
comments: Union[_NotSetType, str] = ...,
) -> PullRequestReview: ...
def create_review_comment(self, body: str, commit_id: Commit, path: str, position: int) -> PullRequestComment: ...
def create_review_request(
self,
reviewers: Union[_NotSetType, List[str]] = ...,
team_reviewers: Union[_NotSetType, List[str]] = ...,
) -> None: ...
@property
def created_at(self) -> datetime: ...
def delete_labels(self) -> None: ...
def delete_review_request(
self,
reviewers: Union[_NotSetType, List[str]] = ...,
team_reviewers: Union[_NotSetType, List[str]] = ...,
) -> None: ...
@property
def deletions(self) -> int: ...
@property
def diff_url(self) -> str: ...
@property
def draft(self) -> bool: ...
def edit(
self,
title: Union[str, _NotSetType] = ...,
body: Union[str, _NotSetType] = ...,
state: Union[str, _NotSetType] = ...,
METHOD_NAME: Union[_NotSetType, str] = ...,
) -> None: ...
def get_comment(self, id: int) -> PullRequestComment: ...
def get_comments(
self,
sort: Union[_NotSetType, str] = ...,
direction: Union[_NotSetType, str] = ...,
since: Union[_NotSetType, datetime] = ...,
) -> PaginatedList[PullRequestComment]: ...
def get_commits(self) -> PaginatedList[Commit]: ...
def get_files(self) -> PaginatedList[File]: ...
def get_issue_comment(self, id: int) -> IssueComment: ...
def get_issue_comments(self) -> PaginatedList[IssueComment]: ...
def get_issue_events(self) -> PaginatedList[IssueEvent]: ...
def get_labels(self) -> PaginatedList[Label]: ...
def get_review(self, id: int) -> PullRequestReview: ...
def get_review_comment(self, id: int) -> PullRequestComment: ...
def get_review_comments(
self,
sort: Union[_NotSetType, str] = ...,
direction: Union[_NotSetType, str] = ...,
since: Union[_NotSetType, datetime] = ...,
) -> PaginatedList[PullRequestComment]: ...
def get_single_review_comments(self, id: int) -> PaginatedList[PullRequestComment]: ...
def get_review_requests(
self,
) -> Tuple[PaginatedList[NamedUser], PaginatedList[Team]]: ...
def get_reviews(self) -> PaginatedList[PullRequestReview]: ...
@property
def head(self) -> PullRequestPart: ...
@property
def html_url(self) -> str: ...
@property
def id(self) -> int: ...
def is_merged(self) -> bool: ...
@property
def issue_url(self) -> str: ...
@property
def labels(self) -> List[Label]: ...
@property
def maintainer_can_modify(self) -> bool: ...
def merge(
self,
commit_message: Union[str, _NotSetType] = ...,
commit_title: Union[str, _NotSetType] = ...,
merge_method: Union[str, _NotSetType] = ...,
sha: Union[str, _NotSetType] = ...,
) -> PullRequestMergeStatus: ...
@property
def merge_commit_sha(self) -> str: ...
@property
def mergeable(self) -> Optional[bool]: ...
@property
def mergeable_state(self) -> str: ...
@property
def merged(self) -> bool: ...
@property
def merged_at(self) -> datetime: ...
@property
def merged_by(self) -> NamedUser: ...
@property
def milestone(self) -> Milestone: ...
@property
def number(self) -> int: ...
@property
def patch_url(self) -> str: ...
@property
def rebaseable(self) -> bool: ...
def remove_from_labels(self, label: Union[Label, str]) -> None: ...
@property
def review_comment_url(self) -> str: ...
@property
def review_comments(self) -> int: ...
@property
def review_comments_url(self) -> str: ...
def set_labels(self, *labels: Union[Label, str]) -> None: ...
@property
def state(self) -> str: ...
@property
def title(self) -> str: ...
@property
def updated_at(self) -> datetime: ...
@property
def url(self) -> str: ...
@property
def user(self) -> NamedUser: ... |
5,036 | get name indexed property | # Copyright 2023 Avaiga Private Limited
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import re
import typing as t
from .._warnings import _warn
from .boolean import _is_boolean, _is_boolean_true
from .clientvarname import _to_camel_case
def _get_column_desc(columns: t.Dict[str, t.Any], key: str) -> t.Optional[t.Dict[str, t.Any]]:
return next((x for x in columns.values() if x.get("dfid") == key), None)
def METHOD_NAME(attributes: t.Dict[str, t.Any], name: str) -> t.Dict[str, t.Any]:
ret = {}
index_re = re.compile(name + r"\[(.*)\]$")
for key in attributes.keys():
if m := index_re.match(key):
ret[m.group(1)] = attributes.get(key)
return ret
def _update_col_desc_from_indexed(
attributes: t.Dict[str, t.Any], columns: t.Dict[str, t.Any], name: str, elt_name: str
):
col_value = METHOD_NAME(attributes, name)
for k, v in col_value.items():
if col_desc := next((x for x in columns.values() if x.get("dfid") == k), None):
if col_desc.get(_to_camel_case(name)) is None:
col_desc[_to_camel_case(name)] = str(v)
else:
_warn(f"{elt_name}: {name}[{k}] is not in the list of displayed columns.")
def _enhance_columns( # noqa: C901
attributes: t.Dict[str, t.Any], hash_names: t.Dict[str, str], columns: t.Dict[str, t.Any], elt_name: str
):
_update_col_desc_from_indexed(attributes, columns, "nan_value", elt_name)
_update_col_desc_from_indexed(attributes, columns, "width", elt_name)
filters = METHOD_NAME(attributes, "filter")
for k, v in filters.items():
if _is_boolean_true(v):
if col_desc := _get_column_desc(columns, k):
col_desc["filter"] = True
else:
_warn(f"{elt_name}: filter[{k}] is not in the list of displayed columns.")
editables = METHOD_NAME(attributes, "editable")
for k, v in editables.items():
if _is_boolean(v):
if col_desc := _get_column_desc(columns, k):
col_desc["notEditable"] = not _is_boolean_true(v)
else:
_warn(f"{elt_name}: editable[{k}] is not in the list of displayed columns.")
group_by = METHOD_NAME(attributes, "group_by")
for k, v in group_by.items():
if _is_boolean_true(v):
if col_desc := _get_column_desc(columns, k):
col_desc["groupBy"] = True
else:
_warn(f"{elt_name}: group_by[{k}] is not in the list of displayed columns.")
apply = METHOD_NAME(attributes, "apply")
for k, v in apply.items(): # pragma: no cover
if col_desc := _get_column_desc(columns, k):
if callable(v):
value = hash_names.get(f"apply[{k}]")
elif isinstance(v, str):
value = v.strip()
else:
_warn(f"{elt_name}: apply[{k}] should be a user or predefined function.")
value = None
if value:
col_desc["apply"] = value
else:
_warn(f"{elt_name}: apply[{k}] is not in the list of displayed columns.")
styles = METHOD_NAME(attributes, "style")
for k, v in styles.items(): # pragma: no cover
if col_desc := _get_column_desc(columns, k):
if callable(v):
value = hash_names.get(f"style[{k}]")
elif isinstance(v, str):
value = v.strip()
else:
value = None
if value in columns.keys():
_warn(f"{elt_name}: style[{k}]={value} cannot be a column's name.")
elif value:
col_desc["style"] = value
else:
_warn(f"{elt_name}: style[{k}] is not in the list of displayed columns.")
tooltips = METHOD_NAME(attributes, "tooltip")
for k, v in tooltips.items(): # pragma: no cover
if col_desc := _get_column_desc(columns, k):
if callable(v):
value = hash_names.get(f"tooltip[{k}]")
elif isinstance(v, str):
value = v.strip()
else:
value = None
if value in columns.keys():
_warn(f"{elt_name}: tooltip[{k}]={value} cannot be a column's name.")
elif value:
col_desc["tooltip"] = value
else:
_warn(f"{elt_name}: tooltip[{k}] is not in the list of displayed columns.")
return columns |
5,037 | setup | from s3prl import Container
from s3prl.corpus.voxceleb1sid import voxceleb1_for_utt_classification
from s3prl.dataset.base import DataPipe, SequentialDataPipe
from s3prl.dataset.common_pipes import RandomCrop, SetOutputKeys
from s3prl.dataset.utterance_classification_pipe import UtteranceClassificationPipe
from s3prl.nn import MeanPoolingLinear
from s3prl.sampler import FixedBatchSizeBatchSampler, MaxTimestampBatchSampler
from s3prl.task.utterance_classification_task import UtteranceClassificationTask
from s3prl.util.configuration import default_cfg
class SuperbSIDTrainPipe(DataPipe):
def __init__(
self,
train_category_encoder: bool = False,
max_secs: float = None,
) -> None:
self.pipes = SequentialDataPipe(
UtteranceClassificationPipe(
train_category_encoder=train_category_encoder,
),
RandomCrop(max_secs=max_secs),
SetOutputKeys(
dict(
x="wav_crop",
x_len="wav_crop_len",
)
),
)
def forward(self, dataset):
dataset = self.pipes(dataset)
return dataset
from .base import SuperbProblem
class SuperbSID(SuperbProblem):
"""
Superb SID
"""
@default_cfg(
**SuperbProblem.METHOD_NAME.default_except(
corpus=dict(
CLS=voxceleb1_for_utt_classification,
dataset_root="???",
),
train_datapipe=dict(
CLS=SuperbSIDTrainPipe,
train_category_encoder=True,
max_secs=8.0,
),
train_sampler=dict(
CLS=FixedBatchSizeBatchSampler,
batch_size=8,
shuffle=True,
),
valid_datapipe=dict(
CLS=UtteranceClassificationPipe,
),
valid_sampler=dict(
CLS=FixedBatchSizeBatchSampler,
batch_size=1,
),
test_datapipe=dict(
CLS=UtteranceClassificationPipe,
),
test_sampler=dict(
CLS=FixedBatchSizeBatchSampler,
batch_size=1,
),
downstream=dict(
CLS=MeanPoolingLinear,
hidden_size=256,
),
task=dict(
CLS=UtteranceClassificationTask,
),
)
)
@classmethod
def METHOD_NAME(cls, **cfg):
"""
This setups the IC problem, containing train/valid/test datasets & samplers and a task object
"""
super().METHOD_NAME(**cfg)
@default_cfg(
**SuperbProblem.train.default_except(
optimizer=dict(
CLS="torch.optim.Adam",
lr=1.0e-4,
),
trainer=dict(
total_steps=200000,
log_step=500,
eval_step=5000,
save_step=1000,
gradient_clipping=1.0,
gradient_accumulate_steps=4,
valid_metric="accuracy",
valid_higher_better=True,
),
)
)
@classmethod
def train(cls, **cfg):
"""
Train the setup problem with the train/valid datasets & samplers and the task object
"""
super().train(**cfg)
@default_cfg(**SuperbProblem.inference.default_cfg)
@classmethod
def inference(cls, **cfg):
super().inference(**cfg)
@default_cfg(
**SuperbProblem.run.default_except(
stages=["setup", "train", "inference"],
start_stage="setup",
final_stage="inference",
METHOD_NAME=METHOD_NAME.default_cfg.deselect("workspace", "resume"),
train=train.default_cfg.deselect("workspace", "resume"),
inference=inference.default_cfg.deselect("workspace", "resume"),
)
)
@classmethod
def run(cls, **cfg):
super().run(**cfg) |
5,038 | test matched | # -*- coding: utf-8 -*-
# Copyright (C) 2012-2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
import dnf.match_counter
import tests.support
from tests.support import mock
class PackageStub(tests.support.MockPackage):
@classmethod
def several(cls, count):
for _ in range(count):
yield cls()
def __init__(self, nevra='nevra-1-1.noarch', summary='summary'):
super(PackageStub, self).__init__(nevra)
self.summary = summary
self.url = ''
self.description = ''
class MatchCounterTest(tests.support.TestCase):
def test_canonize_string_set(self):
a = ['f', 'p']
b = ['p']
self.assertLess(dnf.match_counter._canonize_string_set(b, 2),
dnf.match_counter._canonize_string_set(a, 2))
def METHOD_NAME(self):
pkg = tests.support.MockPackage("humbert-1-1.noarch")
pkg.url = url = "http://humbert.com"
pkg.summary = summary = "Glimpses of an incomparably more poignant bliss."
counter = dnf.match_counter.MatchCounter()
counter.add(pkg, 'summary', 'poignant')
counter.add(pkg, 'url', 'humbert')
counter.add(pkg, 'summary', 'humbert')
self.assertCountEqual(counter.matched_needles(pkg),
['humbert', 'poignant'])
self.assertCountEqual(counter.matched_keys(pkg), ['url', 'summary'])
self.assertCountEqual(counter.matched_haystacks(pkg), [url, summary])
def test_sorted(self):
counter = dnf.match_counter.MatchCounter()
self.assertEqual(counter.sorted(), [])
counter = dnf.match_counter.MatchCounter()
pkg1, pkg2, pkg3 = PackageStub().several(3)
counter.add(pkg1, 'name', '')
counter.add(pkg2, 'summary', '')
self.assertEqual(counter.sorted(), [pkg1, pkg2])
counter.add(pkg3, 'url', '')
self.assertEqual(counter.sorted(), [pkg1, pkg2, pkg3])
self.assertEqual(counter.sorted(reverse=True), [pkg1, pkg2, pkg3])
def test_sorted_with_needles(self):
# the same needles should be listed together:
counter = dnf.match_counter.MatchCounter()
pkg1, pkg2, pkg3, pkg4 = PackageStub().several(4)
counter.add(pkg1, 'summary', 'grin')
counter.add(pkg2, 'summary', 'foolish')
counter.add(pkg3, 'summary', 'grin')
counter.add(pkg4, 'summary', 'grin')
srt = counter.sorted()
self.assertEqual(srt.index(pkg2), 1)
# more unique needles is more than less unique needles:
counter = dnf.match_counter.MatchCounter()
counter.add(pkg1, 'summary', 'a')
counter.add(pkg1, 'summary', 'b')
counter.add(pkg2, 'summary', 'b')
counter.add(pkg2, 'summary', 'b')
self.assertSequenceEqual(counter.sorted(), (pkg1, pkg2))
def test_sorted_limit(self):
counter = dnf.match_counter.MatchCounter()
pkg1, pkg2, pkg3 = PackageStub().several(3)
counter.add(pkg1, 'name', '')
counter.add(pkg2, 'url', '')
counter.add(pkg3, 'description', '')
self.assertSequenceEqual(counter.sorted(limit_to=[pkg1, pkg2]),
(pkg1, pkg2))
def test_sorted_exact_match(self):
"""Exactly matching the name beats name and summary non-exact match."""
counter = dnf.match_counter.MatchCounter()
pkg1 = PackageStub('wednesday-1-1.noarch', 'morning')
pkg2 = PackageStub('wednesdaymorning-1-1.noarch', "5 o'clock")
counter.add(pkg1, 'name', 'wednesday')
counter.add(pkg2, 'name', 'wednesday')
counter.add(pkg2, 'summary', 'clock')
self.assertSequenceEqual(counter.sorted(), (pkg1, pkg2))
def test_total(self):
counter = dnf.match_counter.MatchCounter()
counter.add(3, 'summary', 'humbert')
counter.add(3, 'url', 'humbert')
counter.add(20, 'summary', 'humbert')
self.assertEqual(len(counter), 2)
self.assertEqual(counter.total(), 3)
def test_distance(self):
pkg2 = tests.support.MockPackage('rust-and-stardust-1-2.x86_64')
pkg1 = tests.support.MockPackage('rust-1-3.x86_64')
counter = dnf.match_counter.MatchCounter()
counter.add(pkg1, 'name', 'rust')
counter.add(pkg2, 'name', 'rust')
# 'rust-and-stardust' is a worse match for 'rust' than 'rust' itself
self.assertSequenceEqual([x.name for x in counter.sorted()],
['rust', 'rust-and-stardust']) |
5,039 | mekkatorque speech npc on load | #
# ArcEmu MMORPG Server
# Copyright (C) 2008-2023 <http://www.ArcEmu.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
#
import arcemu
MEKKATORQUE_SPEECH_TEXTS = [
# Ozzie
[
"What I want out of each and every one of you is a hard-target search of every refuelling station, residence, warehouse, farmhouse, henhouse, outhouse and doghouse in his area.",
"Your fugitive's name is Mekgineer Thermaplugg",
"Go get him!"
],
# Milli
[
"They may take our lives, but they'll never take...",
"... our INNOVATION!"
],
# Tog
[
"We Will not go quietly into the night! We will not vanish without a fight!",
"We're going to live on! We're going to survive! Today we celebrate...",
"...our Autonomy Day!"
]
]
MEKKATORQUE_REACTION_TEXTS = [
# Ozzie
"Hmm. I suppose it could work. But it could really use some more umph",
# Milli
"What? I don't even know what you are talking about! That's terrible!",
# Tog
"Horrible! Well, all right, maybe it just needs a little cleaning up?"
]
MEKKATORQUE_REACTION_SPELLS = [ 74154, 74155, 74156 ]
MEKKATORQUE_SPEECH_STATE = {
}
MEKKATORQUE_SPEECH_NPC_ID = 39712
# Returns the target of Mekkatorque ( the NPC who listens to the speech )
# The target is set by the dummy spell handler that summon Mekkatorque
def MekkaTorque_getTargetCreature( unit ):
creatureGUID = unit.getTargetGUID()
if creatureGUID != 0:
mapMgr = unit.getMapMgr()
targetUnit = mapMgr.getUnit( creatureGUID )
if targetUnit is not None:
targetCreature = targetUnit.toCreature()
return targetCreature
return None
# Returns the creator of MekkaTorque (the player)
def MekkaTorque_getCreator( unit ):
guid = unit.getUInt64Value( arcemu.UNIT_FIELD_CREATEDBY )
if guid != 0:
mapMgr = unit.getMapMgr()
creator = mapMgr.getUnit( guid )
return creator
return None
# Sends the reaction text message, and casts the quest credit spell on the player
def MekkaTorque_speechReaction( unit, idx ):
targetCreature = MekkaTorque_getTargetCreature( unit )
if targetCreature is not None:
targetCreature.sendChatMessage( arcemu.CHAT_MSG_MONSTER_SAY, arcemu.LANG_UNIVERSAL, MEKKATORQUE_REACTION_TEXTS[ idx ] )
creator = MekkaTorque_getCreator( unit )
if creator is not None:
targetCreature.castSpell( MEKKATORQUE_REACTION_SPELLS[ idx ], False, creator )
def Mekkatorque_Speech_npc_onAIUpdate( unit, event ):
guid = unit.getGUID()
if guid not in MEKKATORQUE_SPEECH_STATE:
MEKKATORQUE_SPEECH_STATE[ guid ] = 0
state = MEKKATORQUE_SPEECH_STATE[ guid ]
if state == 0:
targetCreature = MekkaTorque_getCreature( unit, unit.getTargetGUID() )
if targetCreature is not None:
if targetCreature.getId() == 1268:
state = 1
elif targetCreature.getId() == 6119:
state = 8
elif targetCreature.getId() == 7955:
state = 5
elif state == 1:
unit.sendChatMessage( arcemu.CHAT_MSG_MONSTER_SAY, arcemu.LANG_UNIVERSAL, MEKKATORQUE_SPEECH_TEXTS[ 0 ][ 0 ] )
state = 2
elif state == 2:
unit.sendChatMessage( arcemu.CHAT_MSG_MONSTER_SAY, arcemu.LANG_UNIVERSAL, MEKKATORQUE_SPEECH_TEXTS[ 0 ][ 1 ] )
state = 3
elif state == 3:
unit.sendChatMessage( arcemu.CHAT_MSG_MONSTER_SAY, arcemu.LANG_UNIVERSAL, MEKKATORQUE_SPEECH_TEXTS[ 0 ][ 2 ] )
state = 4
elif state == 4:
MekkaTorque_speechReaction( unit, 0 )
state = 200
elif state == 5:
unit.sendChatMessage( arcemu.CHAT_MSG_MONSTER_SAY, arcemu.LANG_UNIVERSAL, MEKKATORQUE_SPEECH_TEXTS[ 1 ][ 0 ] )
state = 6
elif state == 6:
unit.sendChatMessage( arcemu.CHAT_MSG_MONSTER_SAY, arcemu.LANG_UNIVERSAL, MEKKATORQUE_SPEECH_TEXTS[ 1 ][ 1 ] )
state = 7
elif state == 7:
MekkaTorque_speechReaction( unit, 1 )
state = 200
elif state == 8:
unit.sendChatMessage( arcemu.CHAT_MSG_MONSTER_SAY, arcemu.LANG_UNIVERSAL, MEKKATORQUE_SPEECH_TEXTS[ 2 ][ 0 ] )
state = 9
elif state == 9:
unit.sendChatMessage( arcemu.CHAT_MSG_MONSTER_SAY, arcemu.LANG_UNIVERSAL, MEKKATORQUE_SPEECH_TEXTS[ 2 ][ 1 ] )
state = 10
elif state == 10:
unit.sendChatMessage( arcemu.CHAT_MSG_MONSTER_SAY, arcemu.LANG_UNIVERSAL, MEKKATORQUE_SPEECH_TEXTS[ 2 ][ 2 ] )
state = 11
elif state == 11:
MekkaTorque_speechReaction( unit, 2 )
state = 200
elif state == 200:
unit.RemoveAIUpdateEvent()
unit.toCreature().despawn( 1, 0 )
MEKKATORQUE_SPEECH_STATE[ unit.getGUID() ] = state
def METHOD_NAME( unit, event ):
MEKKATORQUE_SPEECH_STATE[ unit.getGUID() ] = 0
unit.RegisterAIUpdateEvent( 2000 )
arcemu.RegisterUnitEvent( MEKKATORQUE_SPEECH_NPC_ID, arcemu.CREATURE_EVENT_ON_LOAD, METHOD_NAME )
arcemu.RegisterUnitEvent( MEKKATORQUE_SPEECH_NPC_ID, arcemu.CREATURE_EVENT_ON_AIUPDATE, Mekkatorque_Speech_npc_onAIUpdate ) |
5,040 | get unit file | import logging
import subprocess
from os import path
from pathlib import Path
from typing import List
from multiversx_sdk_cli import dependencies, errors, myprocess, utils
from multiversx_sdk_cli.projects.project_base import Project, rename_wasm_files
logger = logging.getLogger('ProjectClang')
class ProjectClang(Project):
def __init__(self, directory: Path):
super().__init__(directory)
def perform_build(self):
self.config = self.load_config()
self.ensure_source_files()
self.unit = self.METHOD_NAME()
self.file_ll = self.unit.with_suffix('.ll')
self.file_o = self.unit.with_suffix('.o')
self.file_export = self.unit.with_suffix('.export')
self.file_output = self.unit.with_suffix('.wasm')
try:
self.do_clang()
self.do_llvm_link()
self.do_llc()
self.do_wasm()
except subprocess.CalledProcessError as err:
raise errors.BuildError(err.output)
def do_clang(self):
logger.info('do_clang')
tool = path.join(self._get_llvm_path(), 'clang-9')
args = [
tool,
'-cc1', '-emit-llvm',
'-triple=wasm32-unknown-unknown-wasm',
]
if self.options.get('optimized', False):
args.append('-Ofast')
else:
args.append('-O0')
args.extend(map(str, self.get_source_files()))
myprocess.run_process(args)
def do_llvm_link(self):
logger.info('do_llvm_link')
tool = path.join(self._get_llvm_path(), 'llvm-link')
args = [tool]
args.extend(['-o', str(self.file_ll)])
args.extend(map(str, self.get_ll_files()))
myprocess.run_process(args)
def do_llc(self):
logger.info('do_llc')
tool = path.join(self._get_llvm_path(), 'llc')
args = [tool]
if self.options.get('optimized', False):
args.append('-O3')
else:
args.append('-O0')
args.append('-filetype=obj')
args.append(str(self.file_ll))
args.extend(['-o', str(self.file_o)])
myprocess.run_process(args)
def do_wasm(self):
logger.info('do_wasm')
tool = path.join(self._get_llvm_path(), 'wasm-ld')
args = [
tool,
'--no-entry',
str(self.file_o),
'-o', str(self.file_output),
'--strip-all',
'-allow-undefined'
]
if self.options.get('verbose', False):
args.append('--verbose')
logger.info('exported functions:')
for export in self.get_exported_functions():
logger.info(f'\t{export}')
args.append(f'-export={export}')
myprocess.run_process(args)
def _do_after_build_custom(self) -> List[Path]:
output_wasm_file = self._copy_to_output(self.file_output)
self.file_output.unlink()
self.file_ll.unlink()
self.file_o.unlink()
for ll_file in self.get_ll_files():
try:
ll_file.unlink()
except FileNotFoundError:
pass
paths = rename_wasm_files([output_wasm_file], self.options.get("wasm-name"))
return paths
def _get_llvm_path(self):
return dependencies.get_module_directory('llvm')
def get_source_files(self):
for filename in self.config['source_files']:
yield (self.path / filename).expanduser().resolve()
def get_ll_files(self):
for source_file in self.get_source_files():
yield source_file.with_suffix('.ll')
def METHOD_NAME(self):
first_file = next(self.get_source_files())
return first_file
def ensure_source_files(self):
try:
source_files = self.config['source_files']
if len(source_files) == 0:
source_files = self.get_source_files_from_folder()
except KeyError:
source_files = self.get_source_files_from_folder()
self.config['source_files'] = source_files
def get_exported_functions(self) -> List[str]:
file_export = self.find_file_globally('*.export')
lines = utils.read_lines(file_export)
return lines
def default_config(self):
config = super().default_config()
config['language'] = 'clang'
config['source_files'] = self.get_source_files_from_folder()
return config
def get_source_files_from_folder(self):
return list(map(str, self.path.rglob('*.c')))
def get_dependencies(self):
return ['llvm'] |
5,041 | get size in bytes | """
Code imported from aleph-client to avoid a direct reference to the SDK.
"""
import hashlib
import logging
import struct
from binascii import hexlify, unhexlify
from typing import Optional
from coincurve.keys import PrivateKey, PublicKey
LOGGER = logging.getLogger(__name__)
PLACE_HOLDER = b"\xFF\xFF\xFF\xFF"
B58_DIGITS = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
MESSAGE_TEMPLATE = "\x18NULS Signed Message:\n{}"
class VarInt:
# public final long value;
# private final int originallyEncodedSize;
def __init__(self, value=None):
self.value = value
self.originallyEncodedSize = 1
if value is not None:
self.originallyEncodedSize = self.METHOD_NAME()
def parse(self, buf, offset):
first = 0xFF & buf[offset]
if first < 253:
self.value = first
# 1 data byte (8 bits)
self.originallyEncodedSize = 1
elif first == 253:
self.value = (0xFF & buf[offset + 1]) | ((0xFF & buf[offset + 2]) << 8)
# 1 marker + 2 data bytes (16 bits)
self.originallyEncodedSize = 3
elif first == 254:
self.value = struct.unpack("<I", buf[offset + 1 : offset + 5])[0]
# 1 marker + 4 data bytes (32 bits)
self.originallyEncodedSize = 5
else:
self.value = struct.unpack("<Q", buf[offset + 1 : offset + 9])[0]
# 1 marker + 8 data bytes (64 bits)
self.originallyEncodedSize = 9
def getOriginalSizeInBytes(self):
return self.originallyEncodedSize
def METHOD_NAME(self):
return self.sizeOf(self.value)
@classmethod
def sizeOf(cls, value):
# if negative, it's actually a very large unsigned long value
if value < 0:
# 1 marker + 8 data bytes
return 9
if value < 253:
# 1 data byte
return 1
if value <= 0xFFFF:
# 1 marker + 2 data bytes
return 3
if value <= 0xFFFFFFFF:
# 1 marker + 4 data bytes
return 5
# 1 marker + 8 data bytes
return 9
# // /**
# // * Encodes the value into its minimal representation.
# // *
# // * @return the minimal encoded bytes of the value
# // */
def encode(self):
ob = bytes()
size = self.sizeOf(self.value)
if size == 1:
return bytes((self.value,))
elif size == 3:
return bytes((253, self.value & 255, self.value >> 8))
elif size == 5:
return bytes((254,)) + struct.pack("<I", self.value)
else:
return bytes((255,)) + struct.pack("<Q", self.value)
def read_by_length(buffer, cursor=0, check_size=True):
if check_size:
fc = VarInt()
fc.parse(buffer, cursor)
length = fc.value
size = fc.originallyEncodedSize
else:
length = buffer[cursor]
size = 1
value = buffer[cursor + size : cursor + size + length]
return size + length, value
def write_with_length(buffer):
if len(buffer) < 253:
return bytes([len(buffer)]) + buffer
else:
return VarInt(len(buffer)).encode() + buffer
def getxor(body):
xor = 0
for c in body:
xor ^= c
return xor
def b58_encode(b):
"""Encode bytes to a base58-encoded string"""
# Convert big-endian bytes to integer
n = int("0x0" + hexlify(b).decode("utf8"), 16)
# Divide that integer into bas58
res = []
while n > 0:
n, r = divmod(n, 58)
res.append(B58_DIGITS[r])
res = "".join(res[::-1])
# Encode leading zeros as base58 zeros
czero = 0
pad = 0
for c in b:
if c == czero:
pad += 1
else:
break
return B58_DIGITS[0] * pad + res
def b58_decode(s):
"""Decode a base58-encoding string, returning bytes"""
if not s:
return b""
# Convert the string to an integer
n = 0
for c in s:
n *= 58
if c not in B58_DIGITS:
raise ValueError(f"Character '{c}' is not a valid base58 character")
digit = B58_DIGITS.index(c)
n += digit
# Convert the integer to bytes
h = "%x" % n
if len(h) % 2:
h = "0" + h
res = unhexlify(h.encode("utf8"))
# Add padding back.
pad = 0
for c in s[:-1]:
if c == B58_DIGITS[0]:
pad += 1
else:
break
return b"\x00" * pad + res
def address_from_hash(addr):
return b58_encode(addr + bytes((getxor(addr),)))
def hash_from_address(hash):
return b58_decode(hash)[:-1]
def public_key_to_hash(pub_key, chain_id=8964, address_type=1):
sha256_digest = hashlib.sha256(pub_key).digest()
md160_digest = hashlib.new("ripemd160", sha256_digest).digest()
computed_address = (
bytes(struct.pack("h", chain_id)) + bytes([address_type]) + md160_digest
)
return computed_address
class BaseNulsData:
def _prepare(self, item):
if item is None:
return PLACE_HOLDER
else:
return item.serialize()
class NulsSignature(BaseNulsData):
ALG_TYPE = 0 # only one for now...
pub_key: Optional[bytes]
digest_bytes: Optional[bytes]
sig_ser: Optional[bytes]
ecc_type: Optional[bytes]
def __init__(self, data=None):
self.pub_key = None
self.digest_bytes = None
self.sig_ser = None
self.ecc_type = None
if data is not None:
self.parse(data)
def __eq__(self, other):
return all(
(
(self.pub_key == other.pub_key),
(self.digest_bytes == other.digest_bytes),
(self.sig_ser == other.sig_ser),
(self.ecc_type == other.ecc_type),
)
)
def parse(self, buffer, cursor=0):
pos, self.pub_key = read_by_length(buffer, cursor)
cursor += pos
self.ecc_type = buffer[cursor]
cursor += 1
pos, self.sig_ser = read_by_length(buffer, cursor)
cursor += pos
return cursor
@classmethod
def sign_data(cls, pri_key: bytes, digest_bytes: bytes):
privkey = PrivateKey(pri_key)
# we expect to have a private key as bytes. unhexlify it before passing.
item = cls()
item.pub_key = privkey.public_key.format()
item.digest_bytes = digest_bytes
item.sig_ser = privkey.sign(digest_bytes, hasher=None)
return item
@classmethod
async def sign_message(cls, pri_key: bytes, message):
# we expect to have a private key as bytes. unhexlify it before passing
privkey = PrivateKey(pri_key)
item = cls()
message = VarInt(len(message)).encode() + message
item.pub_key = privkey.public_key.format()
# item.digest_bytes = digest_bytes
item.sig_ser = privkey.sign(MESSAGE_TEMPLATE.format(message).encode())
return item
def serialize(self, with_length=False):
output = b""
output += write_with_length(self.pub_key)
output += bytes([0]) # alg ecc type
output += write_with_length(self.sig_ser)
if with_length:
return write_with_length(output)
else:
return output
def verify(self, message):
pub = PublicKey(self.pub_key)
message = VarInt(len(message)).encode() + message
# LOGGER.debug("Comparing with %r" % (MESSAGE_TEMPLATE.format(message).encode()))
try:
if self.sig_ser is None:
raise TypeError("sig_ser is None")
good = pub.verify(self.sig_ser, MESSAGE_TEMPLATE.format(message).encode())
except Exception:
LOGGER.exception("Verification failed")
good = False
return good |
5,042 | get mask | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import numpy as np
import math
import os
import cv2
import itertools
from nose2.tools import params
from test_utils import get_dali_extra_path
data_root = get_dali_extra_path()
img_dir = os.path.join(data_root, 'db', 'single', 'jpeg')
def get_pipeline(device, batch_size, tile, ratio, angle):
pipe = Pipeline(batch_size, 4, 0)
with pipe:
input, _ = fn.readers.file(file_root=img_dir)
decoded = fn.decoders.image(input, device='cpu', output_type=types.RGB)
decoded = decoded.gpu() if device == 'gpu' else decoded
grided = fn.grid_mask(decoded, device=device, tile=tile, ratio=ratio, angle=angle)
pipe.set_outputs(grided, decoded)
return pipe
def get_random_pipeline(device, batch_size):
pipe = Pipeline(batch_size, 4, 0)
with pipe:
input, _ = fn.readers.file(file_root=img_dir)
decoded = fn.decoders.image(input, device='cpu', output_type=types.RGB)
decoded = decoded.gpu() if device == 'gpu' else decoded
tile = fn.cast(fn.random.uniform(range=(50, 200)), dtype=types.INT32)
ratio = fn.random.uniform(range=(0.3, 0.7))
angle = fn.random.uniform(range=(-math.pi, math.pi))
grided = fn.grid_mask(decoded, device=device, tile=tile, ratio=ratio, angle=angle)
pipe.set_outputs(grided, decoded, tile, ratio, angle)
return pipe
def METHOD_NAME(w, h, tile, ratio, angle, d):
ca = math.cos(angle)
sa = math.sin(angle)
b = tile * ratio
i = np.tile(np.arange(w), (h, 1))
j = np.transpose(np.tile(np.arange(h), (w, 1)))
x = i * ca - j * sa
y = i * sa + j * ca
m = np.logical_or(((x + d) % tile > b + 2 * d), ((y + d) % tile > b + 2 * d))
return m
def check(result, input, tile, ratio, angle):
result = np.uint8(result)
input = np.uint8(input)
w = result.shape[1]
h = result.shape[0]
eps = 0.1
# inside of squares should be black
mask = np.uint8(1 - METHOD_NAME(w, h, tile, ratio, angle, -eps))
result2 = cv2.bitwise_and(result, result, mask=mask)
assert not np.any(result2)
# outside of squares should be same as input
mask = np.uint8(METHOD_NAME(w, h, tile, ratio, angle, eps))
result2 = cv2.bitwise_and(result, result, mask=mask)
input2 = cv2.bitwise_and(input, input, mask=mask)
assert np.all(result2 == input2)
def run_test(batch_size, device, tile, ratio, angle):
pipe = get_pipeline(device, batch_size, tile, ratio, angle)
pipe.build()
results, inputs = pipe.run()
if device == 'gpu':
results, inputs = results.as_cpu(), inputs.as_cpu()
for i in range(batch_size):
check(results[i], inputs[i], tile, ratio, angle)
devices = ['cpu', 'gpu']
args = [
(40, 0.5, 0),
(100, 0.1, math.pi / 2),
(200, 0.7, math.pi / 3),
(150, 1 / 3, math.pi / 4),
(50, 0.532, 1),
(51, 0.38158387, 2.6810782),
(123, 0.456, 0.789)]
@params(*itertools.product(devices, args))
def test_gridmask_vs_cv(device, args):
batch_size = 4
tile, ratio, angle = args
run_test(batch_size, device, tile, ratio, angle)
def run_random_test(batch_size, device):
pipe = get_random_pipeline(device, batch_size)
pipe.build()
for _ in range(16):
results, inputs, tiles, ratios, angles = pipe.run()
if device == 'gpu':
results, inputs = results.as_cpu(), inputs.as_cpu()
for i in range(batch_size):
tile = np.int32(tiles[i])
ratio = np.float32(ratios[i])
angle = np.float32(angles[i])
check(results[i], inputs[i], tile, ratio, angle)
@params(*devices)
def test_gridmask_vs_cv_random(device):
batch_size = 4
run_random_test(batch_size, device) |
5,043 | get mapbox state | from typing import Optional, Dict, List, NamedTuple, Tuple
import re
import urllib.parse
import pydantic
import logging
from django.conf import settings
import requests
from project.util.geojson import FeatureGeometry
logger = logging.getLogger(__name__)
MAPBOX_PLACES_URL = "https://api.mapbox.com/geocoding/v5/mapbox.places"
MAPBOX_STATE_SHORT_CODE_RE = r"^US-([A-Z][A-Z])$"
MAPBOX_CITY_ID_RE = r"^(place|locality)\..*"
class MapboxFeatureContext(pydantic.BaseModel):
id: str
text: str
short_code: Optional[str]
class MapboxFeature(pydantic.BaseModel):
context: List[MapboxFeatureContext]
text: str
address: Optional[str]
center: Tuple[float, float]
place_name: str
place_type: List[str]
geometry: FeatureGeometry
class MapboxResults(pydantic.BaseModel):
features: List[MapboxFeature]
class StreetAddress(NamedTuple):
address: str
zip_code: str
place_name: str
geometry: FeatureGeometry
def _encode_query_for_places_request(query: str) -> str:
# Mapbox's API prohibits semicolons, so replace them with commas.
query = query.replace(";", ",")
# By default, urllib.parse.quote() considers '/' a "safe" character to
# not quote, but in the context of the Mapbox API it needs to be quoted
# or else the URL will 404. This is particularly important for addresses
# which contain fractional addresses, e.g. "654 1/2 Park Place".
query = urllib.parse.quote(query, safe="")
return query
def mapbox_places_request(query: str, args: Dict[str, str]) -> Optional[MapboxResults]:
"""
Make a request for the given place to the Mapbox Places API [1], using the
given arguments.
Returns None if Mapbox isn't configured, or if a network error occurs.
Note that Mapbox's Places API prohibits semicolons from
being in the query, so this function will replace them with commas.
[1] https://docs.mapbox.com/api/search/#forward-geocoding
"""
if not settings.MAPBOX_ACCESS_TOKEN:
return None
query = _encode_query_for_places_request(query)
try:
response = requests.get(
f"{MAPBOX_PLACES_URL}/{query}.json",
{
"access_token": settings.MAPBOX_ACCESS_TOKEN,
"country": "US",
"autocomplete": "false",
**args,
},
timeout=settings.MAPBOX_TIMEOUT,
)
if response.status_code == 422:
# Unprocessable entity; our query was likely too long, so return
# an empty result set.
return MapboxResults(features=[])
response.raise_for_status()
return MapboxResults(**response.json())
except Exception:
logger.exception(f"Error while retrieving data from {MAPBOX_PLACES_URL}")
return None
def find_city(city: str, state: str) -> Optional[List[Tuple[str, Tuple[float, float]]]]:
"""
Attempts to find matches for the closest city name in the given
state using the Mapbox Places API. The return value is a list of
(name, (lng, lat)) tuples in the given state that match the query.
If Mapbox isn't configured or a network error occurs, returns None.
"""
results = mapbox_places_request(
f"{city}, {state}",
{
# We want "place" because it covers all cities, but we also want
# "locality" so folks can enter places like "Brooklyn".
"types": ",".join(["place", "locality"]),
},
)
if not results:
return None
cities: List[Tuple[str, Tuple[float, float]]] = []
for result in results.features:
result_state = METHOD_NAME(result)
if result_state == state:
cities.append((result.text, result.center))
return cities
def find_address(
address: str, city: str, state: str, zip_code: str
) -> Optional[List[StreetAddress]]:
"""
Attempts to find matches for the closest street address in the given
city and state using the given zip code.
If Mapbox isn't configured or a network error occurs, returns None.
This function prioritizes matches that Mapbox claims are definitely
in the provided city, but it will also return other matches. This
is because sometimes the city someone thinks they live in might
not be the one that Mapbox thinks their address is in.
For example, Wappingers Falls is a small town near Poughkeepsie,
and while a person might believe they live in one of them, Mapbox
might believe their address is in the other (while yet another
map vendor might disagree with Mapbox!).
"""
city = city.strip()
results = mapbox_places_request(
f"{address}, {city}, {state} {zip_code}",
{
"types": "address",
},
)
if not results:
return None
in_city_addrs: List[StreetAddress] = []
out_of_city_addrs: List[StreetAddress] = []
for result in results.features:
state_matches = METHOD_NAME(result) == state
result_zip_code = get_mapbox_zip_code(result)
if state_matches and result_zip_code:
addr = StreetAddress(
address=get_mapbox_street_addr(result),
zip_code=result_zip_code,
place_name=result.place_name,
geometry=result.geometry,
)
if does_city_match(city, result):
in_city_addrs.append(addr)
else:
out_of_city_addrs.append(addr)
return in_city_addrs + out_of_city_addrs
def get_mapbox_street_addr(feature: MapboxFeature) -> str:
"""
Given a Mapbox Feature that represents an address, returns
the street address, e.g. "150 Court Street".
"""
assert "address" in feature.place_type
# Not really sure if any real-world addresses don't have the address
# property, but the Mapbox docs do say it's optional...
if feature.address:
return f"{feature.address} {feature.text}"
return feature.text
def get_state_from_short_code(short_code: Optional[str]) -> Optional[str]:
"""
Given a Mapbox short code, returns the state it corresponds to.
"""
if short_code == "pr":
return "PR"
match = re.match(MAPBOX_STATE_SHORT_CODE_RE, short_code or "")
if match:
return match[1]
return None
def METHOD_NAME(feature: MapboxFeature) -> Optional[str]:
"""
Returns the two-letter state code for the given Mapbox Feature, if
one exists.
"""
for context in feature.context:
state = get_state_from_short_code(context.short_code)
if state:
return state
return None
def get_mapbox_zip_code(feature: MapboxFeature) -> Optional[str]:
"""
Returns the U.S. Zip Code for the given Mapbox Feature, if one
exists.
"""
for context in feature.context:
if context.id.startswith("postcode."):
return context.text
return None
def does_city_match(city: str, feature: MapboxFeature) -> bool:
"""
Returns whether the given Mapbox Feature is inside the given city.
"""
for context in feature.context:
if re.match(MAPBOX_CITY_ID_RE, context.id) and context.text.lower() == city.lower():
return True
return False |
5,044 | on 200 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network virtual-appliance site show",
is_preview=True,
)
class Show(AAZCommand):
"""Show the detail of an Azure network virtual appliance site.
:example: Show the detail of an Azure network virtual appliance site.
az network virtual-appliance site show -n MySite -g MyRG --appliance-name MyName
"""
_aaz_info = {
"version": "2021-08-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/networkvirtualappliances/{}/virtualappliancesites/{}", "2021-08-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.appliance_name = AAZStrArg(
options=["--appliance-name"],
help="The name of Network Virtual Appliance.",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="The name of Network Virtual Appliance Site.",
required=True,
id_part="child_name_1",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.VirtualApplianceSitesGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class VirtualApplianceSitesGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.METHOD_NAME(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}/virtualApplianceSites/{siteName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"networkVirtualApplianceName", self.ctx.args.appliance_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"siteName", self.ctx.args.name,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2021-08-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def METHOD_NAME(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.etag = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.id = AAZStrType()
_schema_on_200.name = AAZStrType()
_schema_on_200.properties = AAZObjectType(
flags={"client_flatten": True},
)
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.properties
properties.address_prefix = AAZStrType(
serialized_name="addressPrefix",
)
properties.o365_policy = AAZObjectType(
serialized_name="o365Policy",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
o365_policy = cls._schema_on_200.properties.o365_policy
o365_policy.break_out_categories = AAZObjectType(
serialized_name="breakOutCategories",
)
break_out_categories = cls._schema_on_200.properties.o365_policy.break_out_categories
break_out_categories.allow = AAZBoolType()
break_out_categories.default = AAZBoolType()
break_out_categories.optimize = AAZBoolType()
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
__all__ = ["Show"] |
5,045 | dao get provider versions | from datetime import datetime
from notifications_utils.timezones import convert_utc_to_local_timezone
from sqlalchemy import asc, desc, func
from app import db
from app.dao.dao_utils import transactional
from app.models import (
SMS_TYPE,
FactBilling,
ProviderDetails,
ProviderDetailsHistory,
User,
)
from app.provider_details.switch_providers import (
provider_is_inactive,
provider_is_primary,
switch_providers,
)
def get_provider_details_by_id(provider_details_id):
return ProviderDetails.query.get(provider_details_id)
def get_provider_details_by_identifier(identifier):
return ProviderDetails.query.filter_by(identifier=identifier).one()
def get_alternative_sms_provider(identifier):
# We currently run with a single SMS provider (SNS)
# so this method is not implemented and does not
# switch providers.
return get_provider_details_by_identifier(identifier)
def get_current_provider(notification_type):
return (
ProviderDetails.query.filter_by(notification_type=notification_type, active=True)
.order_by(asc(ProviderDetails.priority))
.first()
)
def METHOD_NAME(provider_id):
return ProviderDetailsHistory.query.filter_by(id=provider_id).order_by(desc(ProviderDetailsHistory.version)).all()
@transactional
def dao_toggle_sms_provider(identifier):
alternate_provider = get_alternative_sms_provider(identifier)
dao_switch_sms_provider_to_provider_with_identifier(alternate_provider.identifier)
@transactional
def dao_switch_sms_provider_to_provider_with_identifier(identifier):
new_provider = get_provider_details_by_identifier(identifier)
if provider_is_inactive(new_provider):
return
# Check first to see if there is another provider with the same priority
# as this needs to be updated differently
conflicting_provider = dao_get_sms_provider_with_equal_priority(new_provider.identifier, new_provider.priority)
providers_to_update = []
if conflicting_provider:
switch_providers(conflicting_provider, new_provider)
else:
current_provider = get_current_provider("sms")
if not provider_is_primary(current_provider, new_provider, identifier):
providers_to_update = switch_providers(current_provider, new_provider)
for provider in providers_to_update:
dao_update_provider_details(provider)
def get_provider_details_by_notification_type(notification_type, supports_international=False):
filters = [ProviderDetails.notification_type == notification_type]
if supports_international:
filters.append(ProviderDetails.supports_international == supports_international)
return ProviderDetails.query.filter(*filters).order_by(asc(ProviderDetails.priority)).all()
@transactional
def dao_update_provider_details(provider_details):
provider_details.version += 1
provider_details.updated_at = datetime.utcnow()
history = ProviderDetailsHistory.from_original(provider_details)
db.session.add(provider_details)
db.session.add(history)
def dao_get_sms_provider_with_equal_priority(identifier, priority):
provider = (
db.session.query(ProviderDetails)
.filter(
ProviderDetails.identifier != identifier,
ProviderDetails.notification_type == "sms",
ProviderDetails.priority == priority,
ProviderDetails.active,
)
.order_by(asc(ProviderDetails.priority))
.first()
)
return provider
def dao_get_provider_stats():
# this query does not include the current day since the task to populate ft_billing runs overnight
current_local_datetime = convert_utc_to_local_timezone(datetime.utcnow())
first_day_of_the_month = current_local_datetime.date().replace(day=1)
subquery = (
db.session.query(
FactBilling.provider,
func.sum(FactBilling.billable_units * FactBilling.rate_multiplier).label("current_month_billable_sms"),
)
.filter(
FactBilling.notification_type == SMS_TYPE,
FactBilling.bst_date >= first_day_of_the_month,
)
.group_by(FactBilling.provider)
.subquery()
)
result = (
db.session.query(
ProviderDetails.id,
ProviderDetails.display_name,
ProviderDetails.identifier,
ProviderDetails.priority,
ProviderDetails.notification_type,
ProviderDetails.active,
ProviderDetails.updated_at,
ProviderDetails.supports_international,
User.name.label("created_by_name"),
func.coalesce(subquery.c.current_month_billable_sms, 0).label("current_month_billable_sms"),
)
.outerjoin(subquery, ProviderDetails.identifier == subquery.c.provider)
.outerjoin(User, ProviderDetails.created_by_id == User.id)
.order_by(
ProviderDetails.notification_type,
ProviderDetails.priority,
)
.all()
)
return result |
5,046 | run | import json
import random
import string
import time
import traceback
from ceph.ceph import CommandFailed
from ceph.ceph_admin import CephAdmin
from tests.cephfs.cephfs_utilsV1 import FsUtils
from tests.io.fs_io import fs_io
from utility.log import Log
from utility.retry import retry
log = Log(__name__)
"""
Pre-requisite:
Make sure cluster is up and configured with Single CephFS,
3 MDS (1 active and 2 standby),
required 3 clients. Configure cluster and make sure PG's in active + clean state.
Configure 2 clients with Fuse client and
another 1 client with kernel client.
Make sure MDS is having Stand-by rank set
Wait for cluster to get filled upto 20%
Steps:
Identify the faulty MDS node it can be either active
or stand-by MDS node and remove it from cluster using Ansible or manual way.
"""
@retry(CommandFailed, tries=10, delay=60)
def check_nodes(admin, target_node, check_node_cmd):
out2, _ = admin.installer.exec_command(sudo=True, cmd=check_node_cmd)
log.info(str(out2).strip())
if "No daemons reported" not in str(out2).strip():
raise CommandFailed(f"{target_node} daemons are not removed")
def METHOD_NAME(ceph_cluster, **kw):
try:
fs_util = FsUtils(ceph_cluster)
config = kw.get("config")
clients = ceph_cluster.get_ceph_objects("client")
build = config.get("build", config.get("rhbuild"))
fs_util.prepare_clients(clients, build)
fs_util.auth_list(clients)
log.info("checking Pre-requisites")
client1 = clients[0]
admin = CephAdmin(cluster=ceph_cluster, **config)
cephfs = {
"fill_data": 20,
"io_tool": "smallfile",
"mount": "fuse",
"filesystem": "cephfs",
"mount_dir": "/mnt/mycephfs1",
}
fs_io(client=clients[0], fs_config=cephfs, fs_util=fs_util)
mounting_dir = "".join(
random.choice(string.ascii_lowercase + string.digits)
for _ in list(range(10))
)
kernel_mounting_dir_1 = f"/mnt/cephfs_kernel{mounting_dir}/"
fuse_mounting_dir_1 = f"/mnt/cephfs_fuse{mounting_dir}/"
mon_node_ips = fs_util.get_mon_node_ips()
default_fs = "cephfs"
fs_util.kernel_mount(
[clients[0]],
kernel_mounting_dir_1,
",".join(mon_node_ips),
extra_params=f",fs={default_fs}",
)
fs_util.fuse_mount(
[clients[0]],
fuse_mounting_dir_1,
extra_params=f" --client_fs {default_fs}",
)
fs_util.run_ios(client1, fuse_mounting_dir_1, ["dd"])
fs_util.run_ios(client1, kernel_mounting_dir_1, ["smallfile"])
out1, _ = admin.installer.exec_command(
sudo=True, cmd="cephadm shell ceph orch host ls --format json-pretty"
)
output1 = json.loads(out1)
candidate_host = []
for host in output1:
if "osd" not in host["labels"] and "mds" in host["labels"]:
candidate_host.append(host["hostname"])
print("Candidate host for removing / Adding MDS node")
print(candidate_host)
target_node = candidate_host[-1]
drain_node_cmd = f"cephadm shell ceph orch host drain {target_node}"
remove_node_cmd = f"cephadm shell ceph orch host rm {target_node}"
check_node_cmd = f"cephadm shell ceph orch ps {target_node}"
admin.installer.exec_command(sudo=True, cmd=drain_node_cmd)
time.sleep(20)
admin.installer.exec_command(sudo=True, cmd=remove_node_cmd)
time.sleep(20)
check_nodes(admin, target_node, check_node_cmd)
add_node_cmd = f"cephadm shell ceph orch host add {target_node} --labels mds"
admin.installer.exec_command(sudo=True, cmd=add_node_cmd)
time.sleep(10)
check_ps_cmd = f"cephadm shell ceph orch ps {target_node} --format json-pretty"
out3, ec3 = admin.installer.exec_command(sudo=True, cmd=check_ps_cmd)
output3 = json.loads(out3)
if output3[0]["hostname"] == target_node and output3[0]["daemon_type"] == "mds":
log.info("The Target mds node added")
else:
raise CommandFailed("Added node is not added properly")
return 0
except Exception as e:
log.error(e)
log.error(traceback.format_exc())
return 1
finally:
log.info("Cleaning up")
fs_util.client_clean_up(
"umount", fuse_clients=[clients[0]], mounting_dir=fuse_mounting_dir_1
)
fs_util.client_clean_up(
"umount", fuse_clients=[clients[0]], mounting_dir=kernel_mounting_dir_1
) |
5,047 | do finalize | #
# This file is part of LiteX-Boards.
#
# Copyright (c) 2022 Andrew Elbert Wilson <Andrew.E.Wilson@ieee.org>
# SPDX-License-Identifier: BSD-2-Clause
from litex.build.generic_platform import *
from litex.build.xilinx import XilinxUSPPlatform, VivadoProgrammer
# IOs ----------------------------------------------------------------------------------------------
_io = [
# Clk / Rst
("sys_clk100", 0,
Subsignal("p", Pins("T24"), IOStandard("LVDS")),
Subsignal("n", Pins("U24"), IOStandard("LVDS"))
),
("ddr_clk100", 0,
Subsignal("p", Pins("AD20"), IOStandard("LVDS")),
Subsignal("n", Pins("AE20"), IOStandard("LVDS"))
),
#NO RESET, maybe use okHOST USB later
#("cpu_reset", 0, Pins("AN8"), IOStandard("LVCMOS18")),
# Leds
("user_led", 0, Pins("G19"), IOStandard("LVCMOS18")),
("user_led", 1, Pins("B16"), IOStandard("LVCMOS18")),
("user_led", 2, Pins("F22"), IOStandard("LVCMOS18")),
("user_led", 3, Pins("E22"), IOStandard("LVCMOS18")),
("user_led", 4, Pins("M24"), IOStandard("LVCMOS18")),
("user_led", 5, Pins("G22"), IOStandard("LVCMOS18")),
# Opal Kelly Host USBC interface
("okHost", 0, # Uses the FrontPanel API
Subsignal("okAA", Pins("T19")),
Subsignal("okHU", Pins("U20 U26 T22")),
Subsignal("okUH", Pins("V23 T23 U22 U25 U21")),
Subsignal("okUHU", Pins(
"P26 P25 R26 R25 R23 R22 P21 P20",
"R21 R20 P23 N23 T25 N24 N22 V26",
"N19 V21 N21 W20 W26 W19 Y25 Y26",
"Y22 V22 W21 AA23 Y23 AA24 W25 AA25")),
IOStandard("LVCMOS18"),
Misc("SLEW=FAST"),
),
# TODO: Add SMA & SFP+
# DDR4 SDRAM
("ddram", 0,
Subsignal("a", Pins(
"AD18 AE17 AB17 AE18 AD19 AF17 Y17 AE16",
"AA17 AC17 AC19 AC16 AF20 AD16"),
IOStandard("SSTL12_DCI")),
Subsignal("ba", Pins("AC18 AF18"), IOStandard("SSTL12_DCI")),
Subsignal("bg", Pins("AB19"), IOStandard("SSTL12_DCI")),
Subsignal("ras_n", Pins("AA18"), IOStandard("SSTL12_DCI")),
Subsignal("cas_n", Pins("AF19"), IOStandard("SSTL12_DCI")),
Subsignal("we_n", Pins("AA19"), IOStandard("SSTL12_DCI")),
Subsignal("cs_n", Pins("AF22"), IOStandard("SSTL12_DCI")),
Subsignal("act_n", Pins("Y18"), IOStandard("SSTL12_DCI")),
Subsignal("dm", Pins("AE25 AE22"),
IOStandard("POD12_DCI")),
Subsignal("dq", Pins(
"AF24 AB25 AB26 AC24 AF25 AB24 AD24 AD25",
"AB21 AE21 AE23 AD23 AC23 AD21 AC22 AC21"),
IOStandard("POD12_DCI"),
Misc("PRE_EMPHASIS=RDRV_240"),
Misc("EQUALIZATION=EQ_LEVEL2")),
Subsignal("dqs_p", Pins("AC26 AA22"),
IOStandard("DIFF_POD12_DCI"),
Misc("PRE_EMPHASIS=RDRV_240"),
Misc("EQUALIZATION=EQ_LEVEL2")),
Subsignal("dqs_n", Pins("AD26 AB22"),
IOStandard("DIFF_POD12_DCI"),
Misc("PRE_EMPHASIS=RDRV_240"),
Misc("EQUALIZATION=EQ_LEVEL2")),
Subsignal("clk_p", Pins("Y20"), IOStandard("DIFF_SSTL12_DCI")),
Subsignal("clk_n", Pins("Y21"), IOStandard("DIFF_SSTL12_DCI")),
Subsignal("cke", Pins("AA20"), IOStandard("SSTL12_DCI")),
Subsignal("odt", Pins("AB20"), IOStandard("SSTL12_DCI")),
Subsignal("reset_n", Pins("AE26"), IOStandard("LVCMOS12")),
Misc("SLEW=FAST"),
),
]
# Connectors ---------------------------------------------------------------------------------------
# TODO: SYZYGY Connectors & SYZYGY to PMODS!
_connectors = [
("pmod1", "AC14 AC13 AF15 AF14 AF13 AE13 H13 J13"),
("pmod2", "AB15 AB16 W14 J14 AE15 W15 Y15 J15"),
("pmod3", "G14 H14 W13 W12 AA13 Y13 H12 J12"),
("pmod4", "AD14 AD13 W16 AD15 AB14 AA14 Y16 AA15"),
]
def dvi_pmod_io(pmoda,pmodb):
return [
("dvi", 0,
Subsignal("clk", Pins(f"{pmodb}:1")),
Subsignal("de", Pins(f"{pmodb}:6")),
Subsignal("hsync", Pins(f"{pmodb}:3")),
Subsignal("vsync", Pins(f"{pmodb}:7")),
Subsignal("b", Pins(f"{pmoda}:5 {pmoda}:1 {pmoda}:4 {pmoda}:0")),
Subsignal("g", Pins(f"{pmoda}:7 {pmoda}:3 {pmoda}:6 {pmoda}:2")),
Subsignal("r", Pins(f"{pmodb}:2 {pmodb}:5 {pmodb}:4 {pmodb}:0")),
IOStandard("LVCMOS33"),
)
]
_dvi_pmod_io = dvi_pmod_io("pmod2","pmod1") # SDCARD PMOD on JD.
def sdcard_pmod_io(pmod):
return [
# SDCard PMOD:
# - https://store.digilentinc.com/pmod-microsd-microsd-card-slot/
# - https://github.com/antmicro/arty-expansion-board
("spisdcard", 0,
Subsignal("clk", Pins(f"{pmod}:3")),
Subsignal("mosi", Pins(f"{pmod}:1"), Misc("PULLUP True")),
Subsignal("cs_n", Pins(f"{pmod}:0"), Misc("PULLUP True")),
Subsignal("miso", Pins(f"{pmod}:2"), Misc("PULLUP True")),
Misc("SLEW=FAST"),
IOStandard("LVCMOS33"),
),
("sdcard", 0,
Subsignal("data", Pins(f"{pmod}:2 {pmod}:4 {pmod}:5 {pmod}:0"), Misc("PULLUP True")),
Subsignal("cmd", Pins(f"{pmod}:1"), Misc("PULLUP True")),
Subsignal("clk", Pins(f"{pmod}:3")),
Subsignal("cd", Pins(f"{pmod}:6")),
Misc("SLEW=FAST"),
IOStandard("LVCMOS33"),
),
]
_sdcard_pmod_io = sdcard_pmod_io("pmod3") # SDCARD PMOD on JD.
# Platform -----------------------------------------------------------------------------------------
class Platform(XilinxUSPPlatform):
default_clk_name = "sys_clk100"
default_clk_period = 1e9/100e6
def __init__(self, toolchain="vivado"):
XilinxUSPPlatform.__init__(self, "xcau25p-ffvb676-2-e", _io, _connectors, toolchain=toolchain)
def create_programmer(self):
return VivadoProgrammer()
def METHOD_NAME(self, fragment):
XilinxUSPPlatform.METHOD_NAME(self, fragment)
self.add_period_constraint(self.lookup_request("sys_clk100", loose=True), 1e9/100e6)
self.add_period_constraint(self.lookup_request("ddr_clk100", loose=True), 1e9/100e6)
self.add_platform_command("set_property INTERNAL_VREF 0.84 [get_iobanks 64]") |
5,048 | test one accumulator while loop | """
Test suite for the AccumulationTable class on different
types of accumulator loops
"""
import pytest
from python_ta.debug import AccumulationTable
def test_one_accumulator() -> None:
test_list = [10, 20, 30]
sum_so_far = 0
with AccumulationTable(["sum_so_far"]) as table:
for number in test_list:
sum_so_far = sum_so_far + number
assert table.loop_variables == {"number": ["N/A", 10, 20, 30]}
assert table.loop_accumulators == {"sum_so_far": [0, 10, 30, 60]}
def METHOD_NAME() -> None:
number = 10
test_list = [10, 20, 30]
sum_so_far = 0
with AccumulationTable(["number", "sum_so_far"]) as table:
while number in test_list:
sum_so_far = sum_so_far + number
number += 10
assert table.loop_accumulators == {"number": [10, 20, 30, 40], "sum_so_far": [0, 10, 30, 60]}
def test_two_accumulator_while_loop() -> None:
number = 10
test_list = [10, 20, 30]
sum_so_far = 0
list_so_far = []
with AccumulationTable(["number", "sum_so_far", "list_so_far"]) as table:
while number in test_list:
sum_so_far = sum_so_far + number
list_so_far = list_so_far + [number]
number += 10
assert table.loop_accumulators == {
"number": [10, 20, 30, 40],
"sum_so_far": [0, 10, 30, 60],
"list_so_far": [[], [10], [10, 20], [10, 20, 30]],
}
def test_two_accumulators() -> None:
test_list = [10, 20, 30]
sum_so_far = 0
list_so_far = []
with AccumulationTable(["sum_so_far", "list_so_far"]) as table:
for number in test_list:
sum_so_far = sum_so_far + number
list_so_far = list_so_far + [number]
assert table.loop_variables == {"number": ["N/A", 10, 20, 30]}
assert table.loop_accumulators == {
"sum_so_far": [0, 10, 30, 60],
"list_so_far": [[], [10], [10, 20], [10, 20, 30]],
}
def test_empty_accumulators_and_variables() -> None:
with pytest.raises(AssertionError):
number = 10
test_list = [10, 20, 30]
sum_so_far = 0
with AccumulationTable([]) as table:
while number in test_list:
sum_so_far = sum_so_far + number
number += 10
def test_three_different_loop_lineno() -> None:
test_list = [10, 20, 30]
list_so_far = []
with AccumulationTable(["sum_so_far", "list_so_far"]) as table:
sum_so_far = 0
for number in test_list:
sum_so_far = sum_so_far + number
list_so_far = list_so_far + [number]
assert table.loop_variables == {"number": ["N/A", 10, 20, 30]}
assert table.loop_accumulators == {
"sum_so_far": [0, 10, 30, 60],
"list_so_far": [[], [10], [10, 20], [10, 20, 30]],
}
def test_four_different_loop_lineno() -> None:
test_list = [10, 20, 30]
sum_so_far = 0
list_so_far = []
with AccumulationTable(["sum_so_far", "list_so_far"]) as table:
for number in test_list:
sum_so_far = sum_so_far + number
list_so_far = list_so_far + [number]
b = ""
assert table.loop_variables == {"number": ["N/A", 10, 20, 30]}
assert table.loop_accumulators == {
"sum_so_far": [0, 10, 30, 60],
"list_so_far": [[], [10], [10, 20], [10, 20, 30]],
}
def test_five_nested_for_loop() -> None:
test_list = [10, 20, 30]
sum_so_far = 0
list_so_far = []
with AccumulationTable(["sum_so_far", "list_so_far"]) as table:
i = 0
if True:
for number in test_list:
sum_so_far = sum_so_far + number
list_so_far = list_so_far + [number]
while i < 5:
i += 1
assert table.loop_variables == {"number": ["N/A", 10, 20, 30]}
assert table.loop_accumulators == {
"sum_so_far": [0, 10, 30, 60],
"list_so_far": [[], [10], [10, 20], [10, 20, 30]],
}
def test_five_nested_while_loop() -> None:
number = 10
test_list = [10, 20, 30]
sum_so_far = 0
list_so_far = []
with AccumulationTable(["number", "sum_so_far", "list_so_far"]) as table:
if True:
while number in test_list:
sum_so_far = sum_so_far + number
list_so_far = list_so_far + [number]
number += 10
for number in test_list:
sum_so_far = sum_so_far + number
list_so_far = list_so_far + [number]
assert table.loop_accumulators == {
"number": [10, 20, 30, 40],
"sum_so_far": [0, 10, 30, 60],
"list_so_far": [[], [10], [10, 20], [10, 20, 30]],
}
class MyClass:
items: list
def __init__(self, items: list):
self.items = items
def get_total(self) -> None:
sum_so_far = 0
with AccumulationTable(["sum_so_far"]) as table:
for item in self.items:
sum_so_far = sum_so_far + item
assert table.loop_variables == {"item": ["N/A", 10, 20, 30]}
assert table.loop_accumulators == {"sum_so_far": [0, 10, 30, 60]}
def test_class_var() -> None:
my_class = MyClass([10, 20, 30])
my_class.get_total()
def test_two_loop_vars_one_accumulator() -> None:
test_list = [10, 20, 30]
sum_so_far = 0
with AccumulationTable(["sum_so_far"]) as table:
for index, item in enumerate(test_list):
sum_so_far = sum_so_far + item
assert table.loop_variables == {"index": ["N/A", 0, 1, 2], "item": ["N/A", 10, 20, 30]}
assert table.loop_accumulators == {"sum_so_far": [0, 10, 30, 60]}
def test_two_loop_vars_two_accumulators() -> None:
test_dict = {1: "I lo", 2: "ve CS", 3: "C110"}
keys_so_far = 0
values_so_far = ""
with AccumulationTable(["keys_so_far", "values_so_far"]) as table:
for key, value in test_dict.items():
keys_so_far = keys_so_far + key
values_so_far = values_so_far + value
assert table.loop_variables == {
"key": ["N/A", 1, 2, 3],
"value": ["N/A", "I lo", "ve CS", "C110"],
}
assert table.loop_accumulators == {
"keys_so_far": [0, 1, 3, 6],
"values_so_far": ["", "I lo", "I love CS", "I love CSC110"],
} |
5,049 | fromfd | #
# Module to allow connection and socket objects to be transferred
# between processes
#
# multiprocessing/reduction.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = []
import os
import sys
import socket
import threading
import _multiprocessing
from multiprocessing import current_process
from multiprocessing.forking import Popen, duplicate, close, ForkingPickler
from multiprocessing.util import register_after_fork, debug, sub_debug
from multiprocessing.connection import Client, Listener
#
#
#
if not(sys.platform == 'win32' or hasattr(_multiprocessing, 'recvfd')):
raise ImportError('pickling of connections not supported')
#
# Platform specific definitions
#
if sys.platform == 'win32':
import _subprocess
from _multiprocessing import win32
def send_handle(conn, handle, destination_pid):
process_handle = win32.OpenProcess(
win32.PROCESS_ALL_ACCESS, False, destination_pid
)
try:
new_handle = duplicate(handle, process_handle)
conn.send(new_handle)
finally:
close(process_handle)
def recv_handle(conn):
return conn.recv()
else:
def send_handle(conn, handle, destination_pid):
_multiprocessing.sendfd(conn.fileno(), handle)
def recv_handle(conn):
return _multiprocessing.recvfd(conn.fileno())
#
# Support for a per-process server thread which caches pickled handles
#
_cache = set()
def _reset(obj):
global _lock, _listener, _cache
for h in _cache:
close(h)
_cache.clear()
_lock = threading.Lock()
_listener = None
_reset(None)
register_after_fork(_reset, _reset)
def _get_listener():
global _listener
if _listener is None:
_lock.acquire()
try:
if _listener is None:
debug('starting listener and thread for sending handles')
_listener = Listener(authkey=current_process().authkey)
t = threading.Thread(target=_serve)
t.daemon = True
t.start()
finally:
_lock.release()
return _listener
def _serve():
from .util import is_exiting, sub_warning
while 1:
try:
conn = _listener.accept()
handle_wanted, destination_pid = conn.recv()
_cache.remove(handle_wanted)
send_handle(conn, handle_wanted, destination_pid)
close(handle_wanted)
conn.close()
except:
if not is_exiting():
import traceback
sub_warning(
'thread for sharing handles raised exception :\n' +
'-'*79 + '\n' + traceback.format_exc() + '-'*79
)
#
# Functions to be used for pickling/unpickling objects with handles
#
def reduce_handle(handle):
if Popen.thread_is_spawning():
return (None, Popen.duplicate_for_child(handle), True)
dup_handle = duplicate(handle)
_cache.add(dup_handle)
sub_debug('reducing handle %d', handle)
return (_get_listener().address, dup_handle, False)
def rebuild_handle(pickled_data):
address, handle, inherited = pickled_data
if inherited:
return handle
sub_debug('rebuilding handle %d', handle)
conn = Client(address, authkey=current_process().authkey)
conn.send((handle, os.getpid()))
new_handle = recv_handle(conn)
conn.close()
return new_handle
#
# Register `_multiprocessing.Connection` with `ForkingPickler`
#
def reduce_connection(conn):
rh = reduce_handle(conn.fileno())
return rebuild_connection, (rh, conn.readable, conn.writable)
def rebuild_connection(reduced_handle, readable, writable):
handle = rebuild_handle(reduced_handle)
return _multiprocessing.Connection(
handle, readable=readable, writable=writable
)
ForkingPickler.register(_multiprocessing.Connection, reduce_connection)
#
# Register `socket.socket` with `ForkingPickler`
#
def METHOD_NAME(fd, family, type_, proto=0):
s = socket.METHOD_NAME(fd, family, type_, proto)
if s.__class__ is not socket.socket:
s = socket.socket(_sock=s)
return s
def reduce_socket(s):
reduced_handle = reduce_handle(s.fileno())
return rebuild_socket, (reduced_handle, s.family, s.type, s.proto)
def rebuild_socket(reduced_handle, family, type_, proto):
fd = rebuild_handle(reduced_handle)
_sock = METHOD_NAME(fd, family, type_, proto)
close(fd)
return _sock
ForkingPickler.register(socket.socket, reduce_socket)
#
# Register `_multiprocessing.PipeConnection` with `ForkingPickler`
#
if sys.platform == 'win32':
def reduce_pipe_connection(conn):
rh = reduce_handle(conn.fileno())
return rebuild_pipe_connection, (rh, conn.readable, conn.writable)
def rebuild_pipe_connection(reduced_handle, readable, writable):
handle = rebuild_handle(reduced_handle)
return _multiprocessing.PipeConnection(
handle, readable=readable, writable=writable
)
ForkingPickler.register(_multiprocessing.PipeConnection, reduce_pipe_connection) |
5,050 | load account info | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import json
import socket
from abc import ABC, abstractmethod
from datetime import datetime, timedelta
from functools import partial
from typing import Any, Callable, Dict, Iterator, Mapping, Optional, Sequence
import backoff
import pendulum
import pytz
from airbyte_cdk.entrypoint import logger
from google.oauth2 import service_account
from googleapiclient.discovery import Resource, build
from googleapiclient.errors import HttpError as GoogleApiHttpError
from .utils import rate_limit_handling
SCOPES = ["https://www.googleapis.com/auth/admin.reports.audit.readonly", "https://www.googleapis.com/auth/admin.reports.usage.readonly"]
class API:
def __init__(self, credentials_json: str, email: str, lookback: Optional[int] = None):
self._creds = None
self._credentials_json = credentials_json
self._admin_email = email
self._resource = None
self.lookback = lookback
def METHOD_NAME(self) -> Dict:
account_info = json.loads(self._credentials_json)
return account_info
def _obtain_creds(self) -> service_account.Credentials:
account_info = self.METHOD_NAME()
creds = service_account.Credentials.from_service_account_info(account_info, scopes=SCOPES)
self._creds = creds.with_subject(self._admin_email)
def _construct_resource(self) -> Resource:
if not self._creds:
self._obtain_creds()
service = build("admin", "reports_v1", credentials=self._creds)
return service
def _get_resource(self, name: str):
service = self._construct_resource()
return getattr(service, name)
@backoff.on_exception(backoff.expo, (GoogleApiHttpError, socket.timeout), max_tries=7, giveup=rate_limit_handling)
def get(self, name: str, params: Dict = None) -> Dict:
if not self._resource:
self._resource = self._get_resource(name)
response = self._resource().list(**params).execute()
return response
class StreamAPI(ABC):
results_per_page = 100
def __init__(self, api: API, *args, **kwargs):
super().__init__(*args, **kwargs)
self._api = api
self._start_time = None
if self._api.lookback:
base_start_time = datetime.utcnow() - timedelta(self._api.lookback)
self._start_time = base_start_time.replace(tzinfo=pytz.UTC).isoformat()
@property
@abstractmethod
def name(self):
"""Name of the stream"""
def _api_get(self, resource: str, params: Dict = None):
return self._api.get(resource, params=params)
@abstractmethod
def list(self, fields: Sequence[str] = None) -> Iterator[dict]:
"""Iterate over entities"""
@abstractmethod
def process_response(self, response: Dict) -> Iterator[dict]:
"""Process Google Workspace Admin SDK Reports API response"""
def read(self, getter: Callable, params: Dict = None) -> Iterator:
"""Read using getter"""
params = params or {}
params["maxResults"] = self.results_per_page
while True:
batch = getter(params={**params})
yield from self.process_response(batch)
if "nextPageToken" in batch:
params["pageToken"] = batch["nextPageToken"]
else:
break
class IncrementalStreamAPI(StreamAPI, ABC):
"""Stream that supports state and incremental read"""
state_pk = "time"
@property
def state(self) -> Optional[Mapping[str, Any]]:
"""Current state, if wasn't set return None"""
if self._state:
return {self.state_pk: self._state.isoformat()}
return None
@state.setter
def state(self, value):
self._state = pendulum.parse(value[self.state_pk])
self._start_time = self._state.to_iso8601_string()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._state = None
def read(self, getter: Callable, params: Mapping[str, Any] = None) -> Iterator:
"""Update cursor(state)"""
params = params or {}
cursor = None
for record in super().read(getter, params):
"Report API return records from newest to oldest"
if not cursor:
cursor = pendulum.parse(record[self.state_pk])
record[self.state_pk] = pendulum.parse(record[self.state_pk]).isoformat()
yield record
if cursor:
new_state = max(cursor, self._state) if self._state else cursor
if new_state != self._state:
logger.info(f"Advancing bookmark for {self.name} stream from {self._state} to {new_state}")
self._state = new_state
class ActivitiesAPI(IncrementalStreamAPI):
application_name = None
def get_params(self) -> Dict:
params = {"userKey": "all", "applicationName": self.application_name}
if self._start_time:
params["startTime"] = self._start_time
return params
def process_response(self, response: Dict) -> Iterator[dict]:
activities = response.get("items", [])
for activity in activities:
activity_id = activity.get("id", {})
if "time" in activity_id:
# place time property in top level
activity["time"] = activity_id["time"]
yield activity
def list(self, fields: Sequence[str] = None) -> Iterator[dict]:
params = self.get_params()
yield from self.read(partial(self._api_get, resource="activities"), params=params)
class AdminAPI(ActivitiesAPI):
name = "Admin"
application_name = "admin"
class DriveAPI(ActivitiesAPI):
name = "Drive"
application_name = "drive"
class LoginsAPI(ActivitiesAPI):
name = "Logins"
application_name = "login"
class MeetAPI(ActivitiesAPI):
name = "Meet"
application_name = "meet"
class MobileAPI(ActivitiesAPI):
name = "Mobile"
application_name = "mobile"
class OAuthTokensAPI(ActivitiesAPI):
name = "OAuth Tokens"
application_name = "token" |
5,051 | setup method | __author__ = "sibirrer"
import numpy as np
import numpy.testing as npt
import pytest
from lenstronomy.Cosmo.lens_cosmo import LensCosmo
from lenstronomy.Util import util
class TestLensCosmo(object):
"""Tests the UnitManager class routines."""
def METHOD_NAME(self):
z_L = 0.8
z_S = 3.0
from astropy.cosmology import FlatLambdaCDM
cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Ob0=0.05)
self.lensCosmo = LensCosmo(z_L, z_S, cosmo=cosmo)
def test_ang_dist(self):
npt.assert_almost_equal(self.lensCosmo.ds, 1588.9213590743666, decimal=8)
npt.assert_almost_equal(self.lensCosmo.dd, 1548.7055203661785, decimal=8)
npt.assert_almost_equal(self.lensCosmo.dds, 892.0038749095863, decimal=8)
def test_epsilon_crit(self):
npt.assert_almost_equal(self.lensCosmo.sigma_crit / 1.9121e15, 1, decimal=3)
def test_arcsec2phys(self):
arcsec = np.array([1, 2]) # pixel coordinate from center
physcoord = self.lensCosmo.arcsec2phys_lens(arcsec)
npt.assert_almost_equal(physcoord[0], 0.0075083362428338641, decimal=8)
npt.assert_almost_equal(physcoord[1], 0.015016672485667728, decimal=8)
physcoord = self.lensCosmo.arcsec2phys_source(arcsec)
npt.assert_almost_equal(physcoord[0], 0.007703308130864105, decimal=8)
npt.assert_almost_equal(physcoord[1], 0.01540661626172821, decimal=8)
def test_phys2arcsec_lens(self):
phys = 1.0
arc_sec = self.lensCosmo.phys2arcsec_lens(phys)
phys_new = self.lensCosmo.arcsec2phys_lens(arc_sec)
npt.assert_almost_equal(phys_new, phys, decimal=8)
def test_mass_in_phi_E(self):
phi_E = 1.5
mass = self.lensCosmo.mass_in_theta_E(phi_E)
npt.assert_almost_equal(mass, 761967261292.6725, decimal=2)
def test_kappa2proj_mass(self):
kappa = 0.5
mass = self.lensCosmo.kappa2proj_mass(kappa)
npt.assert_almost_equal(mass, kappa * self.lensCosmo.sigma_crit, decimal=3)
def test_mass_in_coin(self):
theta_E = 1.0
m_coin = self.lensCosmo.mass_in_coin(theta_E)
npt.assert_almost_equal(m_coin, 165279526936.52194, decimal=0)
def test_D_dt_model(self):
D_dt = self.lensCosmo.ddt
npt.assert_almost_equal(D_dt, 4965.660384441859, decimal=8)
def test_nfw_angle2physical(self):
Rs_angle = 6.0
alpha_Rs = 1.0
rho0, Rs, c, r200, M200 = self.lensCosmo.nfw_angle2physical(Rs_angle, alpha_Rs)
assert Rs * c == r200
def test_nfw_physical2angle(self):
M = 10.0**13.5
c = 4
Rs_angle, alpha_Rs = self.lensCosmo.nfw_physical2angle(M, c)
rho0, Rs, c_out, r200, M200 = self.lensCosmo.nfw_angle2physical(
Rs_angle, alpha_Rs
)
npt.assert_almost_equal(c_out, c, decimal=3)
npt.assert_almost_equal(np.log10(M200), np.log10(M), decimal=4)
def test_sis_theta_E2sigma_v(self):
theta_E = 2.0
sigma_v = self.lensCosmo.sis_theta_E2sigma_v(theta_E)
theta_E_out = self.lensCosmo.sis_sigma_v2theta_E(sigma_v)
npt.assert_almost_equal(theta_E_out, theta_E, decimal=5)
def test_fermat2delays(self):
fermat_pot = 0.5
dt_days = self.lensCosmo.time_delay_units(fermat_pot)
fermat_pot_out = self.lensCosmo.time_delay2fermat_pot(dt_days)
npt.assert_almost_equal(fermat_pot, fermat_pot_out, decimal=10)
def test_uldm_angular2phys(self):
kappa_0, theta_c = 0.1, 3
mlog10, Mlog10 = self.lensCosmo.uldm_angular2phys(kappa_0, theta_c)
npt.assert_almost_equal(mlog10, -24.3610006, decimal=5)
npt.assert_almost_equal(Mlog10, 11.7195843, decimal=5)
def test_uldm_mphys2angular(self):
m_log10, M_log10 = -24, 11
kappa_0, theta_c = self.lensCosmo.uldm_mphys2angular(m_log10, M_log10)
mcheck, Mcheck = self.lensCosmo.uldm_angular2phys(kappa_0, theta_c)
npt.assert_almost_equal(mcheck, m_log10, decimal=4)
npt.assert_almost_equal(Mcheck, M_log10, decimal=4)
def test_a_z(self):
a = self.lensCosmo.background.a_z(z=1)
npt.assert_almost_equal(a, 0.5)
def test_sersic_m_star2k_eff(self):
m_star = 10**11.5
R_sersic = 1
n_sersic = 4
k_eff = self.lensCosmo.sersic_m_star2k_eff(m_star, R_sersic, n_sersic)
npt.assert_almost_equal(k_eff, 0.1294327891669961, decimal=5)
m_star_out = self.lensCosmo.sersic_k_eff2m_star(k_eff, R_sersic, n_sersic)
npt.assert_almost_equal(m_star_out, m_star, decimal=6)
def test_hernquist_angular2phys(self):
m_star = 10**10 # in M_sun
rs = 0.01 # in Mpc
# test bijective transformation
sigma0, rs_angle = self.lensCosmo.hernquist_phys2angular(mass=m_star, rs=rs)
m_star_new, rs_new = self.lensCosmo.hernquist_angular2phys(
sigma0=sigma0, rs_angle=rs_angle
)
npt.assert_almost_equal(m_star_new, m_star, decimal=1)
npt.assert_almost_equal(rs_new, rs, decimal=8)
def test_hernquist_mass_normalization(self):
m_star = 10**10 # in M_sun
rs = 0.01 # in Mpc
# test bijective transformation
sigma0, rs_angle = self.lensCosmo.hernquist_phys2angular(mass=m_star, rs=rs)
# test mass integrals
# make large grid
delta_pix = rs_angle / 30.0
x, y = util.make_grid(numPix=501, deltapix=delta_pix)
# compute convergence
from lenstronomy.LensModel.lens_model import LensModel
lens_model = LensModel(lens_model_list=["HERNQUIST"])
kwargs = [{"sigma0": sigma0, "Rs": rs_angle, "center_x": 0, "center_y": 0}]
kappa = lens_model.kappa(x, y, kwargs)
# sum up convergence
kappa_tot = np.sum(kappa) * delta_pix**2
# transform to mass
mass_tot = kappa_tot * self.lensCosmo.sigma_crit_angle
# compare
npt.assert_almost_equal(mass_tot / m_star, 1, decimal=1)
if __name__ == "__main__":
pytest.main() |
5,052 | test loading asset | from mpfmc.tests.MpfMcTestCase import MpfMcTestCase
class TestBitmapFonts(MpfMcTestCase):
def get_machine_path(self):
return 'tests/machine_files/bitmap_fonts'
def get_config_file(self):
return 'test_bitmap_fonts.yaml'
def METHOD_NAME(self):
# test that the bitmap_fonts asset class gets built correctly
self.assertTrue(hasattr(self.mc, 'bitmap_fonts'))
# Monospaced font with simple descriptor list
self.assertIn('F1fuv', self.mc.bitmap_fonts)
f1fuv_font = self.mc.bitmap_fonts['F1fuv']
self.assertIsNotNone(f1fuv_font)
self.assertIsNotNone(f1fuv_font.bitmap_font)
# Test the font descriptor list calculations
self.assertEqual(f1fuv_font.bitmap_font.scale_w, 801)
self.assertEqual(f1fuv_font.bitmap_font.scale_h, 300)
self.assertEqual(f1fuv_font.bitmap_font.line_height, 50)
self.assertEqual(f1fuv_font.bitmap_font.base, 50)
self.assertTrue(len(f1fuv_font.bitmap_font.get_characters()), 95)
self.assertEqual(f1fuv_font.bitmap_font.get_characters()[57].id, 57)
self.assertEqual(f1fuv_font.bitmap_font.get_characters()[57].rect["w"], 50)
self.assertEqual(f1fuv_font.bitmap_font.get_characters()[57].rect["h"], 50)
self.assertEqual(f1fuv_font.bitmap_font.get_characters()[57].xadvance, 50)
self.assertEqual(f1fuv_font.bitmap_font.get_characters()[57].xoffset, 0)
self.assertEqual(f1fuv_font.bitmap_font.get_characters()[57].yoffset, 0)
self.assertEqual(f1fuv_font.bitmap_font.get_characters()[57].rect["x"], 450)
self.assertEqual(f1fuv_font.bitmap_font.get_characters()[57].rect["y"], 50)
self.assertEqual(f1fuv_font.bitmap_font.get_characters()[90].rect["x"], 500)
self.assertEqual(f1fuv_font.bitmap_font.get_characters()[90].rect["y"], 150)
self.assertEqual(f1fuv_font.bitmap_font.get_characters()[122].rect["x"], 500)
self.assertEqual(f1fuv_font.bitmap_font.get_characters()[122].rect["y"], 250)
# Test the extent calculations
self.assertEqual(f1fuv_font.bitmap_font.get_extents("testing"), (350, 50))
self.assertEqual(f1fuv_font.bitmap_font.get_extents("more testing"), (600, 50))
self.assertEqual(f1fuv_font.bitmap_font.get_ascent(), 50)
self.assertEqual(f1fuv_font.bitmap_font.get_descent(), 0)
# Variable width font (with xml descriptor file)
self.assertIn('test_font', self.mc.bitmap_fonts)
test_font = self.mc.bitmap_fonts['test_font']
self.assertIsNotNone(test_font)
self.assertIsNotNone(test_font.bitmap_font)
self.assertEqual(test_font.bitmap_font.scale_w, 361)
self.assertEqual(test_font.bitmap_font.scale_h, 512)
self.assertEqual(test_font.bitmap_font.line_height, 80)
self.assertEqual(test_font.bitmap_font.base, 57)
self.assertTrue(len(test_font.bitmap_font.get_characters()), 80)
self.assertEqual(test_font.bitmap_font.get_characters()[122].id, 122)
self.assertEqual(test_font.bitmap_font.get_characters()[122].rect["w"], 35)
self.assertEqual(test_font.bitmap_font.get_characters()[122].rect["h"], 39)
self.assertEqual(test_font.bitmap_font.get_characters()[122].xadvance, 36)
self.assertEqual(test_font.bitmap_font.get_characters()[122].xoffset, 1)
self.assertEqual(test_font.bitmap_font.get_characters()[122].yoffset, 19)
self.assertEqual(test_font.bitmap_font.get_characters()[122].rect["x"], 58)
self.assertEqual(test_font.bitmap_font.get_characters()[122].rect["y"], 273)
# Variable width font (with text descriptor file)
self.assertIn('test_font_2', self.mc.bitmap_fonts)
test_font_2 = self.mc.bitmap_fonts['test_font_2']
self.assertIsNotNone(test_font_2)
self.assertIsNotNone(test_font_2.bitmap_font)
self.assertEqual(test_font_2.bitmap_font.scale_w, 330)
self.assertEqual(test_font_2.bitmap_font.scale_h, 511)
self.assertEqual(test_font_2.bitmap_font.line_height, 67)
self.assertEqual(test_font_2.bitmap_font.base, 47)
self.assertTrue(len(test_font_2.bitmap_font.get_characters()), 80)
self.assertEqual(test_font_2.bitmap_font.get_characters()[122].id, 122)
self.assertEqual(test_font_2.bitmap_font.get_characters()[122].rect["w"], 35)
self.assertEqual(test_font_2.bitmap_font.get_characters()[122].rect["h"], 39)
self.assertEqual(test_font_2.bitmap_font.get_characters()[122].xadvance, 30)
self.assertEqual(test_font_2.bitmap_font.get_characters()[122].xoffset, 1)
self.assertEqual(test_font_2.bitmap_font.get_characters()[122].yoffset, 16)
self.assertEqual(test_font_2.bitmap_font.get_characters()[122].rect["x"], 80)
self.assertEqual(test_font_2.bitmap_font.get_characters()[122].rect["y"], 438)
def test_bitmap_font_text(self):
# Very basic test
self.mc.events.post('static_text')
self.advance_real_time(3)
|
5,053 | add pattern rpm | #!/usr/bin/python3
#
# image-minimizer: removes files and packages on the filesystem
#
# Copyright 2007-2016 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import glob
import argparse
import os
import sys
import rpm
class ImageMinimizer:
filename = ''
dryrun = False
verbose = False
prefix = None
drops = set()
visited = set()
drops_rpm = set()
ts = None
def __init__(self, filename, root, dryrun, verbose):
self.filename = filename
self.prefix = root
self.dryrun = dryrun
self.verbose = verbose
self.ts = None
# Recursively adds all files and directories.
# This is done becuase globbing does not allow
# ** for arbitrary nesting.
def add_directory(self, files, dirname):
self.visited.add(dirname)
for root, dirs, items in os.walk(dirname):
for d in dirs:
self.visited.add(os.path.join(root, d))
for name in items:
files.add(os.path.join(root, name))
def add_pattern(self, files, pattern):
globs = glob.glob(pattern)
if self.verbose and len(globs) == 0:
print("%s file not found" % pattern)
for g in globs:
if os.path.isdir(g):
self.add_directory(files, g)
else:
files.add(g)
def METHOD_NAME(self, rpms, pattern):
if self.ts is None:
if self.prefix is None:
raise RuntimeError('Must specify installation root for droprpm/keeprpm')
self.ts = rpm.TransactionSet(self.prefix)
mi = self.ts.dbMatch()
mi.pattern('name', rpm.RPMMIRE_GLOB, pattern)
not_found = True
for hdr in mi:
not_found = False
rpms.add(hdr['name'])
if self.verbose and not_found:
print("%s package not found" % pattern)
# Parses each line in the ifle
def parse_line(self, line):
command = ""
pattern = ""
tok = line.split(None,1)
if len(tok) > 0:
command = tok[0].lower()
if len(tok) > 1:
pattern = tok[1].strip()
# Strip out all the comments and blank lines
if not (command.startswith('#') or command==''):
if command == 'keep':
if self.prefix is not None :
pattern = pattern.lstrip('/')
pattern = os.path.join(self.prefix, pattern)
keeps = set()
self.add_pattern(keeps, pattern)
self.drops.difference_update(keeps)
keeps = None
elif command == 'drop':
if self.prefix is not None :
pattern = pattern.lstrip('/')
pattern = os.path.join(self.prefix, pattern)
self.add_pattern(self.drops, pattern)
elif command == 'keeprpm':
keeps_rpm = set()
self.METHOD_NAME(keeps_rpm, pattern)
self.drops_rpm.difference_update(keeps_rpm)
keeps_rpm = None
elif command == 'droprpm':
self.METHOD_NAME(self.drops_rpm, pattern)
else:
raise RuntimeError('Unknown Command: ' + command)
def remove(self):
for tag in sorted(self.drops, reverse=True):
self.visited.add(os.path.split(tag)[0])
if os.path.isdir(tag):
self.visited.add(tag)
else:
if self.dryrun or self.verbose:
print("rm %s" % tag)
if not self.dryrun:
os.remove(tag)
#remove all empty directory. Every 8k counts!
for d in sorted(self.visited, reverse=True):
if len(os.listdir(d)) == 0:
if self.dryrun or self.verbose:
print("rm -rf %s" % d)
if not self.dryrun:
os.rmdir(d)
def remove_rpm(self):
def runCallback(reason, amount, total, key, client_data):
if self.verbose and reason == rpm.RPMCALLBACK_UNINST_STOP:
print("%s erased" % key)
if len(self.drops_rpm) == 0:
return
for pkg in self.drops_rpm:
if self.verbose:
print("erasing: %s " % pkg)
self.ts.addErase(pkg)
if not self.dryrun:
# skip ts.check(), equivalent to --nodeps
self.ts.run(runCallback, "erase")
def filter(self):
if not os.path.isdir(self.prefix):
raise FileNotFoundError(f"No such directory: '{self.prefix}")
with open(self.filename) as f:
for line in f:
self.parse_line(line.strip())
self.remove()
self.remove_rpm()
def parse_options():
parser = argparse.ArgumentParser(description="Image Minimizer")
parser.set_defaults(root=os.environ.get('INSTALL_ROOT', '/mnt/sysimage/'), dry_run=False)
parser.add_argument("-i", "--installroot", metavar="STRING", dest="root",
help="Root path to prepend to all file patterns and installation root for RPM "
"operations. Defaults to INSTALL_ROOT or /mnt/sysimage/")
parser.add_argument("--dryrun", action="store_true", dest="dryrun",
help="If set, no filesystem changes are made.")
parser.add_argument("-v", "--verbose", action="store_true", dest="verbose",
help="Display every action as it is performed.")
parser.add_argument("filename", metavar="STRING", help="Filename to process")
return parser.parse_args()
def main():
try:
args = parse_options()
minimizer = ImageMinimizer(args.filename, args.root, args.dryrun,
args.verbose)
minimizer.filter()
except SystemExit as e:
sys.exit(e.code)
except KeyboardInterrupt:
print("Aborted at user request")
if __name__ == "__main__":
main() |
5,054 | lookup value | # SPDX-License-Identifier: Apache-2.0
""" tf2onnx mapping functions for onnx ml domain. """
import logging
import numpy as np
from onnx import TensorProto
from onnx import numpy_helper
from tf2onnx import constants
from tf2onnx.handler import tf_op
from tf2onnx import utils
logger = logging.getLogger(__name__)
# pylint: disable=unused-argument,missing-docstring,unnecessary-pass
@tf_op("HashTableV2")
class HashTable:
@classmethod
def version_8(cls, ctx, node, **kwargs):
""" HashTable will be removed """
pass
@tf_op("LookupTableFindV2")
class LookupTableFind:
@classmethod
def version_8(cls, ctx, node, initialized_tables, **kwargs):
""" convert lookup to category mapper """
table_node = node.inputs[0]
while table_node.type == 'Identity':
table_node = table_node.inputs[0]
shared_name = table_node.get_attr_value("shared_name")
utils.make_sure(shared_name is not None, "Could not determine table shared name for node %s", node.name)
utils.make_sure(shared_name in initialized_tables, "Initialized table %s for node %s not found.",
shared_name, node.name)
default_node = node.inputs[2]
utils.make_sure(default_node.is_const(), "Default value of table lookup must be const.")
default_val_np = default_node.get_tensor_value(as_list=False)
default_val = default_node.get_tensor_value()
dtype = ctx.get_dtype(node.output[0])
in_dtype = ctx.get_dtype(node.input[1])
utils.make_sure(dtype == TensorProto.INT64 and in_dtype == TensorProto.STRING,
"Only lookup tables of type string->int64 are currently supported.")
cats_strings, cats_int64s = initialized_tables[shared_name]
shape = ctx.get_shape(node.input[1])
node_name = node.name
node_inputs = node.input
node_outputs = node.output
if node.inputs[1].is_const():
# Handle explicitly since const folding doesn't work for tables
key_np = node.inputs[1].get_tensor_value(as_list=False)
ctx.remove_node(node.name)
key_to_val = dict(zip(cats_strings, cats_int64s))
def METHOD_NAME(key):
return key_to_val.get(key.encode("UTF-8"), default_val_np)
lookup_result = np.vectorize(METHOD_NAME)(key_np)
onnx_tensor = numpy_helper.from_array(lookup_result, node_name)
ctx.make_node("Const", name=node_name, inputs=[], outputs=node_outputs,
attr={"value": onnx_tensor}, shapes=[lookup_result.shape], dtypes=[dtype])
else:
ctx.remove_node(node.name)
ctx.make_node("CategoryMapper", domain=constants.AI_ONNX_ML_DOMAIN,
name=node_name, inputs=[node_inputs[1]], outputs=node_outputs,
attr={'cats_int64s': cats_int64s, 'cats_strings': cats_strings, 'default_int64': default_val},
shapes=[shape], dtypes=[dtype])
customer_nodes = ctx.find_output_consumers(table_node.output[0])
if len(customer_nodes) == 0:
ctx.remove_node(table_node.name)
@tf_op("LookupTableSizeV2")
class LookupTableSize:
@classmethod
def version_1(cls, ctx, node, initialized_tables, **kwargs):
table_node = node.inputs[0]
while table_node.type == 'Identity':
table_node = table_node.inputs[0]
shared_name = table_node.get_attr_value("shared_name")
utils.make_sure(shared_name is not None, "Could not determine table shared name for node %s", node.name)
utils.make_sure(shared_name in initialized_tables, "Initialized table %s for node %s not found.",
shared_name, node.name)
keys, _ = initialized_tables[shared_name]
node_name = node.name
node_outputs = node.output
ctx.remove_node(node.name)
size_const = ctx.make_const(node_name, np.array(len(keys), dtype=np.int64))
ctx.replace_all_inputs(node_outputs[0], size_const.output[0])
customer_nodes = ctx.find_output_consumers(table_node.output[0])
if len(customer_nodes) == 0:
ctx.remove_node(table_node.name) |
5,055 | test eq wrong type | # Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
class TestEncryptionConfiguration(unittest.TestCase):
KMS_KEY_NAME = "projects/1/locations/us/keyRings/1/cryptoKeys/1"
@staticmethod
def _get_target_class():
from google.cloud.bigquery.encryption_configuration import (
EncryptionConfiguration,
)
return EncryptionConfiguration
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor_defaults(self):
encryption_config = self._make_one()
self.assertIsNone(encryption_config.kms_key_name)
def test_ctor_with_key(self):
encryption_config = self._make_one(kms_key_name=self.KMS_KEY_NAME)
self.assertEqual(encryption_config.kms_key_name, self.KMS_KEY_NAME)
def test_kms_key_name_setter(self):
encryption_config = self._make_one()
self.assertIsNone(encryption_config.kms_key_name)
encryption_config.kms_key_name = self.KMS_KEY_NAME
self.assertEqual(encryption_config.kms_key_name, self.KMS_KEY_NAME)
encryption_config.kms_key_name = None
self.assertIsNone(encryption_config.kms_key_name)
def test_from_api_repr(self):
RESOURCE = {"kmsKeyName": self.KMS_KEY_NAME}
klass = self._get_target_class()
encryption_config = klass.from_api_repr(RESOURCE)
self.assertEqual(encryption_config.kms_key_name, self.KMS_KEY_NAME)
def test_to_api_repr(self):
encryption_config = self._make_one(kms_key_name=self.KMS_KEY_NAME)
resource = encryption_config.to_api_repr()
self.assertEqual(resource, {"kmsKeyName": self.KMS_KEY_NAME})
def METHOD_NAME(self):
encryption_config = self._make_one()
other = object()
self.assertNotEqual(encryption_config, other)
self.assertEqual(encryption_config, mock.ANY)
def test___eq___kms_key_name_mismatch(self):
encryption_config = self._make_one()
other = self._make_one(self.KMS_KEY_NAME)
self.assertNotEqual(encryption_config, other)
def test___eq___hit(self):
encryption_config = self._make_one(self.KMS_KEY_NAME)
other = self._make_one(self.KMS_KEY_NAME)
self.assertEqual(encryption_config, other)
def test___ne___wrong_type(self):
encryption_config = self._make_one()
other = object()
self.assertNotEqual(encryption_config, other)
self.assertEqual(encryption_config, mock.ANY)
def test___ne___same_value(self):
encryption_config1 = self._make_one(self.KMS_KEY_NAME)
encryption_config2 = self._make_one(self.KMS_KEY_NAME)
# unittest ``assertEqual`` uses ``==`` not ``!=``.
comparison_val = encryption_config1 != encryption_config2
self.assertFalse(comparison_val)
def test___ne___different_values(self):
encryption_config1 = self._make_one()
encryption_config2 = self._make_one(self.KMS_KEY_NAME)
self.assertNotEqual(encryption_config1, encryption_config2)
def test___hash__set_equality(self):
encryption_config1 = self._make_one(self.KMS_KEY_NAME)
encryption_config2 = self._make_one(self.KMS_KEY_NAME)
set_one = {encryption_config1, encryption_config2}
set_two = {encryption_config1, encryption_config2}
self.assertEqual(set_one, set_two)
def test___hash__not_equals(self):
encryption_config1 = self._make_one()
encryption_config2 = self._make_one(self.KMS_KEY_NAME)
set_one = {encryption_config1}
set_two = {encryption_config2}
self.assertNotEqual(set_one, set_two)
def test___repr__(self):
encryption_config = self._make_one(self.KMS_KEY_NAME)
expected = "EncryptionConfiguration({})".format(self.KMS_KEY_NAME)
self.assertEqual(repr(encryption_config), expected) |
5,056 | test codeml binary | # Copyright (C) 2011 by Brandon Invergo (b.invergo@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests for PAML tools module."""
import unittest
import os
import sys
from Bio.Phylo.PAML import codeml, baseml, yn00
from Bio import MissingExternalDependencyError
def is_exe(filepath):
"""Test if a file is an executable."""
return os.path.exists(filepath) and os.access(filepath, os.X_OK)
def which(program):
"""Find the path to an executable."""
filepath, filename = os.path.split(program)
os_path = os.environ["PATH"].split(os.pathsep)
if sys.platform == "win32":
try:
# This can vary depending on the Windows language.
prog_files = os.environ["PROGRAMFILES"]
except KeyError:
prog_files = r"C:\Program Files"
# For Windows, the user is instructed to move the programs to a folder
# and then to add the folder to the system path. Just in case they didn't
# do that, we can check for it in Program Files.
likely_dirs = [
"", # Current dir
prog_files,
os.path.join(prog_files, "paml41"),
os.path.join(prog_files, "paml43"),
os.path.join(prog_files, "paml44"),
os.path.join(prog_files, "paml45"),
] + sys.path
os_path.extend(likely_dirs)
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
# Find the PAML binaries
if sys.platform == "win32":
binaries = ["codeml.exe", "baseml.exe", "yn00.exe"]
else:
binaries = ["codeml", "baseml", "yn00"]
for binary in binaries:
if which(binary) is None:
raise MissingExternalDependencyError(
"Install PAML if you want to use the Bio.Phylo.PAML wrapper."
)
class Common(unittest.TestCase):
"""Base class for PAML unit tests."""
del_files = []
def __del__(self):
"""Just in case tool creates some junk files, do a clean-up."""
del_files = self.del_files
for filename in del_files:
if os.path.exists(filename):
os.remove(filename)
class CodemlTest(Common):
"""Tests for PAML tool codeml."""
def setUp(self):
self.cml = codeml.Codeml()
def METHOD_NAME(self):
"""Check codeml runs, generates correct output, and is the correct version."""
ctl_file = os.path.join("PAML", "Control_files", "codeml", "codeml.ctl")
self.cml.read_ctl_file(ctl_file)
self.cml.alignment = os.path.join("PAML", "Alignments", "alignment.phylip")
self.cml.tree = os.path.join("PAML", "Trees", "species.tree")
self.cml.out_file = os.path.join("PAML", "temp.out")
self.cml.working_dir = os.path.join("PAML", "codeml_test")
results = self.cml.run()
self.assertGreater(results["version"], "4.0")
self.assertIn("NSsites", results)
self.assertEqual(len(results["NSsites"]), 1)
self.assertEqual(len(results["NSsites"][0]), 5)
class BasemlTest(Common):
"""Tests for PAML tool baseml."""
def setUp(self):
self.bml = baseml.Baseml()
def testBasemlBinary(self):
"""Check baseml runs, generates correct output, and is the correct version."""
ctl_file = os.path.join("PAML", "Control_files", "baseml", "baseml.ctl")
self.bml.read_ctl_file(ctl_file)
self.bml.alignment = os.path.join("PAML", "Alignments", "alignment.phylip")
self.bml.tree = os.path.join("PAML", "Trees", "species.tree")
self.bml.out_file = os.path.join("PAML", "temp.out")
self.bml.working_dir = os.path.join("PAML", "baseml_test")
results = self.bml.run()
self.assertGreater(results["version"], "4.0")
self.assertIn("parameters", results)
self.assertEqual(len(results["parameters"]), 5)
class Yn00Test(Common):
"""Tests for PAML tool yn00."""
def setUp(self):
self.yn = yn00.Yn00()
def testYn00Binary(self):
"""Check yn00 binary runs and generates correct output.
yn00 output does not specify the version number.
"""
ctl_file = os.path.join("PAML", "Control_files", "yn00", "yn00.ctl")
self.yn.read_ctl_file(ctl_file)
self.yn.alignment = os.path.join("PAML", "Alignments", "alignment.phylip")
self.yn.out_file = os.path.join("PAML", "temp.out")
self.yn.working_dir = os.path.join("PAML", "yn00_test")
results = self.yn.run()
self.assertEqual(len(results), 5)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner) |
5,057 | format list | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Elastic Driver class."""
import json
from datetime import datetime
from typing import Any, Dict, Iterable, Optional, Tuple, Union
import pandas as pd
from ..._version import VERSION
from ...common.exceptions import MsticpyUserConfigError
from ...common.utility import check_kwargs, export
from ..core.query_defns import Formatters
from .driver_base import DriverBase, DriverProps, QuerySource
__version__ = VERSION
__author__ = "Neil Desai, Ian Hellen"
ELASTIC_CONNECT_ARGS: Dict[str, str] = {
# TBD - you may not need these - mainly for user
# help/error messages (see _get_connect_args)
}
_ELASTIC_REQUIRED_ARGS: Dict[str, str] = {
# TBD
}
@export
class ElasticDriver(DriverBase):
"""Driver to connect and query from Elastic Search."""
def __init__(self, **kwargs):
"""Instantiate Elastic Driver."""
super().__init__(**kwargs)
self.service = None
self._loaded = True
self._connected = False
self._debug = kwargs.get("debug", False)
self.set_driver_property(
DriverProps.FORMATTERS,
{
Formatters.PARAM_HANDLER: self._custom_param_handler,
Formatters.DATETIME: self._format_datetime,
Formatters.LIST: self.METHOD_NAME,
},
)
def connect(self, connection_str: str = None, **kwargs):
"""
Connect to Elastic cluster.
Parameters
----------
connection_str : Optional[str], optional
Connection string with Splunk connection parameters
Other Parameters
----------------
kwargs :
Connection parameters can be supplied as keyword parameters.
Notes
-----
Default configuration is read from the DataProviders/Splunk
section of msticpyconfig.yaml, if available.
"""
# cs_dict = self._get_connect_args(connection_str, **kwargs)
# TBD
self._connected = True
print("connected")
def _get_connect_args(
self, connection_str: Optional[str], **kwargs
) -> Dict[str, Any]:
"""Check and consolidate connection parameters."""
cs_dict: Dict[str, Any] = {}
# Fetch any config settings
cs_dict.update(self._get_config_settings("Elastic"))
# If a connection string - parse this and add to config
if connection_str:
cs_items = connection_str.split(";")
cs_dict.update(
{
cs_item.split("=")[0].strip(): cs_item.split("=")[1]
for cs_item in cs_items
}
)
elif kwargs:
# if connection args supplied as kwargs
cs_dict.update(kwargs)
check_kwargs(cs_dict, list(ELASTIC_CONNECT_ARGS.keys()))
missing_args = set(_ELASTIC_REQUIRED_ARGS) - cs_dict.keys()
if missing_args:
raise MsticpyUserConfigError(
"One or more connection parameters missing for Elastic connector",
", ".join(missing_args),
f"Required parameters are {', '.join(_ELASTIC_REQUIRED_ARGS)}",
"All parameters:",
*[f"{arg}: {desc}" for arg, desc in ELASTIC_CONNECT_ARGS.items()],
title="no Elastic connection parameters",
)
return cs_dict
def query(
self, query: str, query_source: QuerySource = None, **kwargs
) -> Union[pd.DataFrame, Any]:
"""
Execute query and retrieve results.
Parameters
----------
query : str
Elastic query to execute
query_source : QuerySource
The query definition object
Other Parameters
----------------
kwargs :
Not used
Returns
-------
Union[pd.DataFrame, Any]
Query results in a dataframe.
or query response if an error.
"""
del query_source
if not self._connected:
raise self._create_not_connected_err("Elastic")
# TBD
# Run query and return results
return pd.DateFrame()
def query_with_results(self, query: str, **kwargs) -> Tuple[pd.DataFrame, Any]:
"""
Execute query string and return DataFrame of results.
Parameters
----------
query : str
Query to execute.
Returns
-------
Union[pd.DataFrame,Any]
A DataFrame (if successful) or
the underlying provider result if an error occurs.
"""
raise NotImplementedError(f"Not supported for {self.__class__.__name__}")
# Parameter Formatting methods
# If not needed, remove these and remove from self.formatters
# dict in __init__
@staticmethod
def _format_datetime(date_time: datetime) -> str:
"""Return datetime-formatted string."""
return f'"{date_time.isoformat(sep=" ")}"'
@staticmethod
def METHOD_NAME(param_list: Iterable[Any]) -> str:
"""Return formatted list parameter."""
fmt_list = [f'"{item}"' for item in param_list]
return ",".join(fmt_list)
@staticmethod
def _custom_param_handler(query: str, param_dict: Dict[str, Any]) -> str:
"""Replace parameters in query template for Elastic JSON queries."""
query_dict = json.loads(query)
start = param_dict.pop("start", None)
end = param_dict.pop("end", None)
if start or end:
time_range = {
"range": {"@timestamp": {"format": "strict_date_optional_time"}}
}
if start:
time_range["range"]["@timestamp"]["gte"] = start
if end:
time_range["range"]["@timestamp"]["lte"] = end
query_dict["query"]["bool"]["filter"].append(time_range)
add_query_items = param_dict.pop("add_query_items", None)
if add_query_items:
# "add_query_items" expects additional custom filter parameters
# as a Python dict (e.g. add_query_items={"match_phrase: {"field": "value"}})
query_dict["query"]["bool"]["filter"].extend(add_query_items)
if param_dict:
filter_terms = [
{"match_phrase": {field: value}} for field, value in param_dict.items()
]
query_dict["query"]["bool"]["filter"].extend(filter_terms)
return json.dumps(query_dict, indent=2) |
5,058 | test location sync query | import param
import pytest
from panel.io.location import Location
from panel.util import edit_readonly
@pytest.fixture
def location():
loc = Location()
with edit_readonly(loc):
loc.href = "http://localhost:5006"
loc.hostname = "localhost"
loc.pathname = ""
loc.protocol = 'http'
loc.search = ""
loc.hash = ""
return loc
class SyncParameterized(param.Parameterized):
integer = param.Integer(default=None)
string = param.String(default=None)
def test_location_update_query(location):
location.update_query(a=1)
assert location.search == "?a=1"
location.update_query(b='c')
assert location.search == "?a=1&b=c"
def test_location_sync_query_init(location):
p = SyncParameterized(integer=1, string='abc')
location.sync(p)
assert location.search == "?integer=1&string=abc"
location.unsync(p)
assert location._synced == []
assert location.search == ""
def test_location_unsync(location):
p = SyncParameterized(integer=1, string='abc')
location.sync(p)
assert location.search == "?integer=1&string=abc"
location.unsync(p)
assert location.search == ""
location.update_query(integer=2, string='def')
assert p.integer == 1
assert p.string == "abc"
p.integer = 3
p.string = "ghi"
assert location.search == "?integer=2&string=def"
def test_location_unsync_partial(location):
p = SyncParameterized(integer=1, string='abc')
location.sync(p)
assert location.search == "?integer=1&string=abc"
location.unsync(p, ['string'])
assert location.search == "?integer=1"
location.update_query(integer=2, string='def')
assert p.integer == 2
assert p.string == "abc"
p.integer = 3
p.string = "ghi"
assert location.search == "?integer=3&string=def"
def test_location_sync_query_init_partial(location):
p = SyncParameterized(integer=1, string='abc')
location.sync(p, ['integer'])
assert location.search == "?integer=1"
location.unsync(p)
assert location._synced == []
def test_location_sync_query_init_rename(location):
p = SyncParameterized(integer=1, string='abc')
location.sync(p, {'integer': 'int', 'string': 'str'})
assert location.search == "?int=1&str=abc"
location.unsync(p)
assert location._synced == []
assert location.search == ""
def METHOD_NAME(location):
p = SyncParameterized()
location.sync(p)
p.integer = 2
assert location.search == "?integer=2"
location.unsync(p)
assert location._synced == []
assert location.search == ""
def test_location_sync_param_init(location):
p = SyncParameterized()
location.search = "?integer=1&string=abc"
location.sync(p)
assert p.integer == 1
assert p.string == "abc"
location.unsync(p)
assert location._synced == []
assert location.search == ""
def test_location_sync_on_error(location):
p = SyncParameterized(string='abc')
changes = []
def on_error(change):
changes.append(change)
location.sync(p, on_error=on_error)
location.search = "?integer=a&string=abc"
assert changes == [{'integer': 'a'}]
location.unsync(p)
assert location._synced == []
assert location.search == ""
def test_location_sync_param_init_partial(location):
p = SyncParameterized()
location.search = "?integer=1&string=abc"
location.sync(p, ['integer'])
assert p.integer == 1
assert p.string is None
location.unsync(p)
assert location._synced == []
assert location.search == "?string=abc"
def test_location_sync_param_init_rename(location):
p = SyncParameterized()
location.search = "?int=1&str=abc"
location.sync(p, {'integer': 'int', 'string': 'str'})
assert p.integer == 1
assert p.string == 'abc'
location.unsync(p)
assert location._synced == []
assert location.search == ""
def test_location_sync_param_update(location):
p = SyncParameterized()
location.sync(p)
location.search = "?integer=1&string=abc"
assert p.integer == 1
assert p.string == "abc"
location.unsync(p)
assert location._synced == []
assert location.search == "" |
5,059 | show | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from threading import current_thread
from tkinter import (BOTH, Button, END, Entry, Frame, Label, LEFT, Listbox, Tk,
Toplevel, W)
from typing import Any, Union
class TkDialog(Toplevel):
left_button = 'OK'
right_button = 'Cancel'
def __init__(self, message, value=None, **config):
self._prevent_execution_with_timeouts()
self.root = self._get_root()
self._button_bindings = {}
super().__init__(self.root)
self._initialize_dialog()
self.widget = self._create_body(message, value, **config)
self._create_buttons()
self._finalize_dialog()
self._result = None
def _prevent_execution_with_timeouts(self):
if 'linux' not in sys.platform and current_thread().name != 'MainThread':
raise RuntimeError('Dialogs library is not supported with '
'timeouts on Python on this platform.')
def _get_root(self) -> Tk:
root = Tk()
root.withdraw()
return root
def _initialize_dialog(self):
self.withdraw() # Remove from display until finalized.
self.title('Robot Framework')
self.protocol("WM_DELETE_WINDOW", self._close)
self.bind("<Escape>", self._close)
if self.left_button == TkDialog.left_button:
self.bind("<Return>", self._left_button_clicked)
def _finalize_dialog(self):
self.update() # Needed to get accurate dialog size.
screen_width = self.winfo_screenwidth()
screen_height = self.winfo_screenheight()
min_width = screen_width // 6
min_height = screen_height // 10
width = max(self.winfo_reqwidth(), min_width)
height = max(self.winfo_reqheight(), min_height)
x = (screen_width - width) // 2
y = (screen_height - height) // 2
self.geometry(f'{width}x{height}+{x}+{y}')
self.lift()
self.deiconify()
if self.widget:
self.widget.focus_set()
def _create_body(self, message, value, **config) -> Union[Entry, Listbox, None]:
frame = Frame(self)
max_width = self.winfo_screenwidth() // 2
label = Label(frame, text=message, anchor=W, justify=LEFT, wraplength=max_width)
label.pack(fill=BOTH)
widget = self._create_widget(frame, value, **config)
if widget:
widget.pack(fill=BOTH)
frame.pack(padx=5, pady=5, expand=1, fill=BOTH)
return widget
def _create_widget(self, frame, value) -> Union[Entry, Listbox, None]:
return None
def _create_buttons(self):
frame = Frame(self)
self._create_button(frame, self.left_button, self._left_button_clicked)
self._create_button(frame, self.right_button, self._right_button_clicked)
frame.pack()
def _create_button(self, parent, label, callback):
if label:
button = Button(parent, text=label, width=10, command=callback, underline=0)
button.pack(side=LEFT, padx=5, pady=5)
for char in label[0].upper(), label[0].lower():
self.bind(char, callback)
self._button_bindings[char] = callback
def _left_button_clicked(self, event=None):
if self._validate_value():
self._result = self._get_value()
self._close()
def _validate_value(self) -> bool:
return True
def _get_value(self) -> Any:
return None
def _close(self, event=None):
# self.destroy() is not enough on Linux
self.root.destroy()
def _right_button_clicked(self, event=None):
self._result = self._get_right_button_value()
self._close()
def _get_right_button_value(self) -> Any:
return None
def METHOD_NAME(self) -> Any:
self.wait_window(self)
return self._result
class MessageDialog(TkDialog):
right_button = None
class InputDialog(TkDialog):
def __init__(self, message, default='', hidden=False):
super().__init__(message, default, hidden=hidden)
def _create_widget(self, parent, default, hidden=False) -> Entry:
widget = Entry(parent, METHOD_NAME='*' if hidden else '')
widget.insert(0, default)
widget.select_range(0, END)
widget.bind('<FocusIn>', self._unbind_buttons)
widget.bind('<FocusOut>', self._rebind_buttons)
return widget
def _unbind_buttons(self, event):
for char in self._button_bindings:
self.unbind(char)
def _rebind_buttons(self, event):
for char, callback in self._button_bindings.items():
self.bind(char, callback)
def _get_value(self) -> str:
return self.widget.get()
class SelectionDialog(TkDialog):
def _create_widget(self, parent, values) -> Listbox:
widget = Listbox(parent)
for item in values:
widget.insert(END, item)
widget.config(width=0)
return widget
def _validate_value(self) -> bool:
return bool(self.widget.curselection())
def _get_value(self) -> str:
return self.widget.get(self.widget.curselection())
class MultipleSelectionDialog(TkDialog):
def _create_widget(self, parent, values) -> Listbox:
widget = Listbox(parent, selectmode='multiple')
for item in values:
widget.insert(END, item)
widget.config(width=0)
return widget
def _get_value(self) -> list:
selected_values = [self.widget.get(i) for i in self.widget.curselection()]
return selected_values
class PassFailDialog(TkDialog):
left_button = 'PASS'
right_button = 'FAIL'
def _get_value(self) -> bool:
return True
def _get_right_button_value(self) -> bool:
return False |
5,060 | list users | """
Support for RallyDev
.. versionadded:: 2015.8.0
Requires a ``username`` and a ``password`` in ``/etc/salt/minion``:
.. code-block:: yaml
rallydev:
username: myuser@example.com
password: 123pass
"""
import logging
import salt.utils.http
import salt.utils.json
from salt.exceptions import SaltInvocationError
log = logging.getLogger(__name__)
def __virtual__():
"""
Only load the module if apache is installed
"""
if not __opts__.get("rallydev", {}).get("username", None):
return (
False,
"The rallydev execution module failed to load: rallydev:username not"
" defined in config.",
)
if not __opts__.get("rallydev", {}).get("password", None):
return (
False,
"The rallydev execution module failed to load: rallydev:password not"
" defined in config.",
)
return True
def _get_token():
"""
Get an auth token
"""
username = __opts__.get("rallydev", {}).get("username", None)
password = __opts__.get("rallydev", {}).get("password", None)
path = "https://rally1.rallydev.com/slm/webservice/v2.0/security/authorize"
result = salt.utils.http.query(
path,
decode=True,
decode_type="json",
text=True,
status=True,
username=username,
password=password,
cookies=True,
persist_session=True,
opts=__opts__,
)
if "dict" not in result:
return None
return result["dict"]["OperationResult"]["SecurityToken"]
def _query(
action=None, command=None, args=None, method="GET", header_dict=None, data=None
):
"""
Make a web call to RallyDev.
"""
token = _get_token()
username = __opts__.get("rallydev", {}).get("username", None)
password = __opts__.get("rallydev", {}).get("password", None)
path = "https://rally1.rallydev.com/slm/webservice/v2.0/"
if action:
path += action
if command:
path += "/{}".format(command)
log.debug("RallyDev URL: %s", path)
if not isinstance(args, dict):
args = {}
args["key"] = token
if header_dict is None:
header_dict = {"Content-type": "application/json"}
if method != "POST":
header_dict["Accept"] = "application/json"
decode = True
if method == "DELETE":
decode = False
return_content = None
result = salt.utils.http.query(
path,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type="json",
text=True,
status=True,
username=username,
password=password,
cookies=True,
persist_session=True,
opts=__opts__,
)
log.debug("RallyDev Response Status Code: %s", result["status"])
if "error" in result:
log.error(result["error"])
return [result["status"], result["error"]]
return [result["status"], result.get("dict", {})]
def list_items(name):
"""
List items of a particular type
CLI Examples:
.. code-block:: bash
salt myminion rallydev.list_<item name>s
salt myminion rallydev.list_users
salt myminion rallydev.list_artifacts
"""
status, result = _query(action=name)
return result
def query_item(name, query_string, order="Rank"):
"""
Query a type of record for one or more items. Requires a valid query string.
See https://rally1.rallydev.com/slm/doc/webservice/introduction.jsp for
information on query syntax.
CLI Example:
.. code-block:: bash
salt myminion rallydev.query_<item name> <query string> [<order>]
salt myminion rallydev.query_task '(Name contains github)'
salt myminion rallydev.query_task '(Name contains reactor)' Rank
"""
status, result = _query(action=name, args={"query": query_string, "order": order})
return result
def show_item(name, id_):
"""
Show an item
CLI Example:
.. code-block:: bash
salt myminion rallydev.show_<item name> <item id>
"""
status, result = _query(action=name, command=id_)
return result
def update_item(name, id_, field=None, value=None, postdata=None):
"""
Update an item. Either a field and a value, or a chunk of POST data, may be
used, but not both.
CLI Example:
.. code-block:: bash
salt myminion rallydev.update_<item name> <item id> field=<field> value=<value>
salt myminion rallydev.update_<item name> <item id> postdata=<post data>
"""
if field and value:
if postdata:
raise SaltInvocationError(
"Either a field and a value, or a chunk "
"of POST data, may be specified, but not both."
)
postdata = {name.title(): {field: value}}
if postdata is None:
raise SaltInvocationError(
"Either a field and a value, or a chunk of POST data must be specified."
)
status, result = _query(
action=name,
command=id_,
method="POST",
data=salt.utils.json.dumps(postdata),
)
return result
def show_artifact(id_):
"""
Show an artifact
CLI Example:
.. code-block:: bash
salt myminion rallydev.show_artifact <artifact id>
"""
return show_item("artifact", id_)
def METHOD_NAME():
"""
List the users
CLI Example:
.. code-block:: bash
salt myminion rallydev.list_users
"""
return list_items("user")
def show_user(id_):
"""
Show a user
CLI Example:
.. code-block:: bash
salt myminion rallydev.show_user <user id>
"""
return show_item("user", id_)
def update_user(id_, field, value):
"""
Update a user
CLI Example:
.. code-block:: bash
salt myminion rallydev.update_user <user id> <field> <new value>
"""
return update_item("user", id_, field, value)
def query_user(query_string, order="UserName"):
"""
Update a user
CLI Example:
.. code-block:: bash
salt myminion rallydev.query_user '(Name contains Jo)'
"""
return query_item("user", query_string, order) |
5,061 | mkdtemp | import shutil
import tempfile
import unittest
import queuelib
from scrapy.http import Request
from scrapy.spiders import Spider
from scrapy.squeues import (
FifoMemoryQueue,
LifoMemoryQueue,
MarshalFifoDiskQueue,
MarshalLifoDiskQueue,
PickleFifoDiskQueue,
PickleLifoDiskQueue,
)
from scrapy.utils.test import get_crawler
"""
Queues that handle requests
"""
class BaseQueueTestCase(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.METHOD_NAME(prefix="scrapy-queue-tests-")
self.qpath = self.tempfilename()
self.qdir = self.METHOD_NAME()
self.crawler = get_crawler(Spider)
def tearDown(self):
shutil.rmtree(self.tmpdir)
def tempfilename(self):
with tempfile.NamedTemporaryFile(dir=self.tmpdir) as nf:
return nf.name
def METHOD_NAME(self):
return tempfile.METHOD_NAME(dir=self.tmpdir)
class RequestQueueTestMixin:
def queue(self):
raise NotImplementedError()
def test_one_element_with_peek(self):
if not hasattr(queuelib.queue.FifoMemoryQueue, "peek"):
raise unittest.SkipTest("The queuelib queues do not define peek")
q = self.queue()
self.assertEqual(len(q), 0)
self.assertIsNone(q.peek())
self.assertIsNone(q.pop())
req = Request("http://www.example.com")
q.push(req)
self.assertEqual(len(q), 1)
self.assertEqual(q.peek().url, req.url)
self.assertEqual(q.pop().url, req.url)
self.assertEqual(len(q), 0)
self.assertIsNone(q.peek())
self.assertIsNone(q.pop())
q.close()
def test_one_element_without_peek(self):
if hasattr(queuelib.queue.FifoMemoryQueue, "peek"):
raise unittest.SkipTest("The queuelib queues define peek")
q = self.queue()
self.assertEqual(len(q), 0)
self.assertIsNone(q.pop())
req = Request("http://www.example.com")
q.push(req)
self.assertEqual(len(q), 1)
with self.assertRaises(
NotImplementedError,
msg="The underlying queue class does not implement 'peek'",
):
q.peek()
self.assertEqual(q.pop().url, req.url)
self.assertEqual(len(q), 0)
self.assertIsNone(q.pop())
q.close()
class FifoQueueMixin(RequestQueueTestMixin):
def test_fifo_with_peek(self):
if not hasattr(queuelib.queue.FifoMemoryQueue, "peek"):
raise unittest.SkipTest("The queuelib queues do not define peek")
q = self.queue()
self.assertEqual(len(q), 0)
self.assertIsNone(q.peek())
self.assertIsNone(q.pop())
req1 = Request("http://www.example.com/1")
req2 = Request("http://www.example.com/2")
req3 = Request("http://www.example.com/3")
q.push(req1)
q.push(req2)
q.push(req3)
self.assertEqual(len(q), 3)
self.assertEqual(q.peek().url, req1.url)
self.assertEqual(q.pop().url, req1.url)
self.assertEqual(len(q), 2)
self.assertEqual(q.peek().url, req2.url)
self.assertEqual(q.pop().url, req2.url)
self.assertEqual(len(q), 1)
self.assertEqual(q.peek().url, req3.url)
self.assertEqual(q.pop().url, req3.url)
self.assertEqual(len(q), 0)
self.assertIsNone(q.peek())
self.assertIsNone(q.pop())
q.close()
def test_fifo_without_peek(self):
if hasattr(queuelib.queue.FifoMemoryQueue, "peek"):
raise unittest.SkipTest("The queuelib queues do not define peek")
q = self.queue()
self.assertEqual(len(q), 0)
self.assertIsNone(q.pop())
req1 = Request("http://www.example.com/1")
req2 = Request("http://www.example.com/2")
req3 = Request("http://www.example.com/3")
q.push(req1)
q.push(req2)
q.push(req3)
with self.assertRaises(
NotImplementedError,
msg="The underlying queue class does not implement 'peek'",
):
q.peek()
self.assertEqual(len(q), 3)
self.assertEqual(q.pop().url, req1.url)
self.assertEqual(len(q), 2)
self.assertEqual(q.pop().url, req2.url)
self.assertEqual(len(q), 1)
self.assertEqual(q.pop().url, req3.url)
self.assertEqual(len(q), 0)
self.assertIsNone(q.pop())
q.close()
class LifoQueueMixin(RequestQueueTestMixin):
def test_lifo_with_peek(self):
if not hasattr(queuelib.queue.FifoMemoryQueue, "peek"):
raise unittest.SkipTest("The queuelib queues do not define peek")
q = self.queue()
self.assertEqual(len(q), 0)
self.assertIsNone(q.peek())
self.assertIsNone(q.pop())
req1 = Request("http://www.example.com/1")
req2 = Request("http://www.example.com/2")
req3 = Request("http://www.example.com/3")
q.push(req1)
q.push(req2)
q.push(req3)
self.assertEqual(len(q), 3)
self.assertEqual(q.peek().url, req3.url)
self.assertEqual(q.pop().url, req3.url)
self.assertEqual(len(q), 2)
self.assertEqual(q.peek().url, req2.url)
self.assertEqual(q.pop().url, req2.url)
self.assertEqual(len(q), 1)
self.assertEqual(q.peek().url, req1.url)
self.assertEqual(q.pop().url, req1.url)
self.assertEqual(len(q), 0)
self.assertIsNone(q.peek())
self.assertIsNone(q.pop())
q.close()
def test_lifo_without_peek(self):
if hasattr(queuelib.queue.FifoMemoryQueue, "peek"):
raise unittest.SkipTest("The queuelib queues do not define peek")
q = self.queue()
self.assertEqual(len(q), 0)
self.assertIsNone(q.pop())
req1 = Request("http://www.example.com/1")
req2 = Request("http://www.example.com/2")
req3 = Request("http://www.example.com/3")
q.push(req1)
q.push(req2)
q.push(req3)
with self.assertRaises(
NotImplementedError,
msg="The underlying queue class does not implement 'peek'",
):
q.peek()
self.assertEqual(len(q), 3)
self.assertEqual(q.pop().url, req3.url)
self.assertEqual(len(q), 2)
self.assertEqual(q.pop().url, req2.url)
self.assertEqual(len(q), 1)
self.assertEqual(q.pop().url, req1.url)
self.assertEqual(len(q), 0)
self.assertIsNone(q.pop())
q.close()
class PickleFifoDiskQueueRequestTest(FifoQueueMixin, BaseQueueTestCase):
def queue(self):
return PickleFifoDiskQueue.from_crawler(crawler=self.crawler, key="pickle/fifo")
class PickleLifoDiskQueueRequestTest(LifoQueueMixin, BaseQueueTestCase):
def queue(self):
return PickleLifoDiskQueue.from_crawler(crawler=self.crawler, key="pickle/lifo")
class MarshalFifoDiskQueueRequestTest(FifoQueueMixin, BaseQueueTestCase):
def queue(self):
return MarshalFifoDiskQueue.from_crawler(
crawler=self.crawler, key="marshal/fifo"
)
class MarshalLifoDiskQueueRequestTest(LifoQueueMixin, BaseQueueTestCase):
def queue(self):
return MarshalLifoDiskQueue.from_crawler(
crawler=self.crawler, key="marshal/lifo"
)
class FifoMemoryQueueRequestTest(FifoQueueMixin, BaseQueueTestCase):
def queue(self):
return FifoMemoryQueue.from_crawler(crawler=self.crawler)
class LifoMemoryQueueRequestTest(LifoQueueMixin, BaseQueueTestCase):
def queue(self):
return LifoMemoryQueue.from_crawler(crawler=self.crawler) |
5,062 | insert into all | import inspect
from .exceptions import InvalidConfigError, RegistrationError
from .util.dict import filter_dict
class TargetFactory:
def __init__(self):
self.resources = {}
self.drivers = {}
self.all_classes = {}
def reg_resource(self, cls):
"""Register a resource with the factory.
Returns the class to allow using it as a decorator."""
cls_name = cls.__name__
if cls_name in self.all_classes:
raise RegistrationError(f"resource with name {cls_name} was already registered")
self.resources[cls.__name__] = cls
self.METHOD_NAME(cls)
return cls
def reg_driver(self, cls):
"""Register a driver with the factory.
Returns the class to allow using it as a decorator."""
cls_name = cls.__name__
if cls_name in self.all_classes:
raise RegistrationError(f"driver with name {cls_name} was already registered")
self.drivers[cls_name] = cls
self.METHOD_NAME(cls)
return cls
@staticmethod
def _convert_to_named_list(data):
"""Convert a tree of resources or drivers to a named list.
When using named resources or drivers, the config file uses a list of
dicts instead of simply nested dicts. This allows creating multiple
instances of the same class with different names.
resources: # or drivers
FooPort: {}
BarPort:
name: "bar"
or
resources: # or drivers
- FooPort: {}
- BarPort:
name: "bar"
should be transformed to
resources: # or drivers
- cls: "FooPort"
- cls: "BarPort"
name: "bar"
"""
# resolve syntactic sugar (list of dicts each containing a dict of key -> args)
result = []
if isinstance(data, list):
for item in data:
if not isinstance(item, dict):
raise InvalidConfigError(
f"invalid list item type {type(item)} (should be dict)")
if not item:
raise InvalidConfigError("invalid empty dict as list item")
if len(item) > 1:
if 'cls' in item:
item = item.copy()
else:
raise InvalidConfigError(f"missing 'cls' key in {item}")
else:
# only one pair left
(key, value), = item.items()
if key == 'cls':
item = item.copy()
else:
item = {'cls': key}
if value is None:
raise InvalidConfigError("invalid list item, add empty dict for no arguments") # pylint: disable=line-too-long
item.update(value)
result.append(item)
elif isinstance(data, dict):
for cls, args in data.items():
args = args.copy()
args.setdefault('cls', cls)
result.append(args)
else:
raise InvalidConfigError(f"invalid type {type(data)} (should be dict or list)")
for item in result:
item.setdefault('name', None)
assert 'cls' in item
return result
@staticmethod
def normalize_config(config):
resources = {}
drivers = {}
for item in TargetFactory._convert_to_named_list(config.get('resources', {})):
resource = item.pop('cls')
name = item.pop('name', None)
args = item # remaining args
resources.setdefault(resource, {})[name] = (args, )
for item in TargetFactory._convert_to_named_list(config.get('drivers', {})):
driver = item.pop('cls')
name = item.pop('name', None)
bindings = item.pop('bindings', {})
args = item # remaining args
drivers.setdefault(driver, {})[name] = (args, bindings)
return resources, drivers
def make_resource(self, target, resource, name, args):
assert isinstance(args, dict)
if not resource in self.resources:
raise InvalidConfigError(f"unknown resource class {resource}")
try:
cls = self.resources[resource]
args = filter_dict(args, cls, warn=True)
r = cls(target, name, **args)
except TypeError as e:
raise InvalidConfigError(
f"failed to create {resource} for target '{target}' using {args} "
) from e
return r
def make_driver(self, target, driver, name, args):
assert isinstance(args, dict)
if not driver in self.drivers:
raise InvalidConfigError(f"unknown driver class {driver}")
try:
cls = self.drivers[driver]
args = filter_dict(args, cls, warn=True)
d = cls(target, name, **args)
except TypeError as e:
raise InvalidConfigError(
f"failed to create {driver} for target '{target}' using {args} ") from e
return d
def make_target(self, name, config, *, env=None):
from .target import Target
target = Target(name, env=env)
for item in TargetFactory._convert_to_named_list(config.get('resources', {})):
resource = item.pop('cls')
name = item.pop('name', None)
args = item # remaining args
self.make_resource(target, resource, name, args)
for item in TargetFactory._convert_to_named_list(config.get('drivers', {})):
driver = item.pop('cls')
name = item.pop('name', None)
bindings = item.pop('bindings', {})
args = item # remaining args
target.set_binding_map(bindings)
self.make_driver(target, driver, name, args)
return target
def class_from_string(self, string: str):
try:
return self.all_classes[string]
except KeyError:
raise KeyError(f"No driver/resource/protocol of type '{string}' in factory, perhaps not registered?")
def METHOD_NAME(self, cls):
classes = inspect.getmro(cls)
for cl in classes:
if not self.all_classes.get(cl.__name__):
self.all_classes[cl.__name__] = cl
#: Global TargetFactory instance
#:
#: This instance is used to register Resource and Driver classes so that
#: Targets can be created automatically from YAML files.
target_factory = TargetFactory() |
5,063 | test missed framework in create gapi launcher | """
Copyright (c) 2018-2023 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pytest
pytest.importorskip('openvino.inference_engine')
pytest.importorskip('cv2.gapi.ie.params')
pytest.importorskip('openvino.tools.accuracy_checker.launcher.gapi_launcher')
import cv2
import numpy as np
from openvino.tools.accuracy_checker.launcher.launcher import create_launcher
from openvino.tools.accuracy_checker.config import ConfigError
def get_gapi_test_model(models_dir):
config = {
"framework": "g-api",
#"weights": str(models_dir / "SampLeNet.bin"),
"model": models_dir,
"adapter": "classification",
"device": "cpu",
"inputs": [{"name": "data", "type": "INPUT", "shape": "(3, 32, 32)"}],
'outputs': ['fc3']
}
return create_launcher(config)
class TestGAPILauncher:
def test_launcher_creates(self, models_dir):
assert get_gapi_test_model(models_dir).inputs['data'] == (1, 3, 32, 32)
def test_infer_model(self, data_dir, models_dir):
test_model = get_gapi_test_model(models_dir)
_, _, h, w = test_model.inputs['data']
img_raw = cv2.imread(str(data_dir / '1.jpg'))
img_resized = cv2.resize(img_raw, (w, h))
res = test_model.predict([{'data': img_resized}], [{}])
assert np.argmax(res[0]['fc3']) == 7
@pytest.mark.usefixtures('mock_path_exists')
class TestOpenCVLauncherConfig:
def METHOD_NAME(self):
config = {
# 'framework': 'g-api',
'model': 'model.xml',
'weights': 'weights.bin',
'device': 'CPU',
'adapter': 'classification',
'inputs': [{'name': 'data', 'type': 'INPUT'}],
'outputs': ['out']
}
with pytest.raises(KeyError):
create_launcher(config)
def test_missed_model_in_create_gapi_launcher_raises_config_error_exception(self):
config = {
'framework': 'g-api',
# 'model': 'model.ocv',
'weights': 'weights.bin',
'device': 'CPU',
'adapter': 'classification',
'inputs': [{'name': 'data', 'type': 'INPUT'}],
'outputs': ['out']
}
with pytest.raises(ConfigError):
create_launcher(config, 'model')
def test_missed_device_in_create_gapi_launcher_raises_config_error_exception(self):
config = {
'framework': 'g-api',
'model': 'model.xml',
'weights': 'weights.bin',
# 'device': 'not_device',
'adapter': 'classification',
'inputs': [{'name': 'data', 'type': 'INPUT'}],
'outputs': ['out']
}
with pytest.raises(ConfigError):
create_launcher(config)
def test_missed_inputs_in_create_gapi_launcher_raises_config_error_exception(self):
config = {
'framework': 'g-api',
'model': 'model.xml',
'weights': 'weights.bin',
'device': 'CPU',
'backend': 'not_backend',
'adapter': 'classification',
# 'inputs': [{'name': 'data', 'type': 'INPUT'}]
'outputs': ['out']
}
with pytest.raises(ConfigError):
create_launcher(config)
def test_missed_outputs_in_create_gapi_launcher_raises_config_error_exception(self):
config = {
'framework': 'g-api',
'model': 'model.xml',
'weights': 'weights.bin',
'device': 'CPU',
'backend': 'not_backend',
'adapter': 'classification',
'inputs': [{'name': 'data', 'type': 'INPUT'}]
#'outputs': ['out']
}
with pytest.raises(ConfigError):
create_launcher(config) |
5,064 | test twoglobals | # Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# The Universal Permissive License (UPL), Version 1.0
#
# Subject to the condition set forth below, permission is hereby granted to any
# person obtaining a copy of this software, associated documentation and/or
# data (collectively the "Software"), free of charge and under any and all
# copyright rights in the Software, and any and all patent rights owned or
# freely licensable by each licensor hereunder covering either (i) the
# unmodified Software as contributed to or provided by such licensor, or (ii)
# the Larger Works (as defined below), to deal in both
#
# (a) the Software, and
#
# (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if
# one is included with the Software each a "Larger Work" to which the Software
# is contributed by such licensors),
#
# without restriction, including without limitation the rights to copy, create
# derivative works of, display, perform, and distribute the Software and make,
# use, sell, offer for sale, import, export, have made, and have sold the
# Software and the Larger Work(s), and to sublicense the foregoing rights on
# either these or other terms.
#
# This license is subject to the following condition:
#
# The above copyright notice and either this complete permission notice or at a
# minimum a reference to the UPL must be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
NOTE: this tests are also meant to be run as PyPy "applevel" tests.
This means that global imports will NOT be visible inside the test
functions. In particular, you have to "import pytest" inside the test in order
to be able to use e.g. pytest.raises (which on PyPy will be implemented by a
"fake pytest module")
"""
from .support import HPyTest
class TestHPyGlobal(HPyTest):
def test_basics(self):
mod = self.make_module("""
HPyGlobal myglobal;
HPyDef_METH(setg, "setg", HPyFunc_O)
static HPy setg_impl(HPyContext *ctx, HPy self, HPy arg)
{
HPyGlobal_Store(ctx, &myglobal, arg);
return HPy_Dup(ctx, ctx->h_None);
}
HPyDef_METH(getg, "getg", HPyFunc_NOARGS)
static HPy getg_impl(HPyContext *ctx, HPy self)
{
return HPyGlobal_Load(ctx, myglobal);
}
@EXPORT(setg)
@EXPORT(getg)
@EXPORT_GLOBAL(myglobal)
@INIT
""")
obj = {'hello': 'world'}
assert mod.setg(obj) is None
assert mod.getg() is obj
def METHOD_NAME(self):
mod = self.make_module("""
HPyGlobal myglobal1;
HPyGlobal myglobal2;
HPyDef_METH(setg1, "setg1", HPyFunc_O)
static HPy setg1_impl(HPyContext *ctx, HPy self, HPy arg)
{
HPyGlobal_Store(ctx, &myglobal1, arg);
return HPy_Dup(ctx, ctx->h_None);
}
HPyDef_METH(setg2, "setg2", HPyFunc_O)
static HPy setg2_impl(HPyContext *ctx, HPy self, HPy arg)
{
HPyGlobal_Store(ctx, &myglobal2, arg);
return HPy_Dup(ctx, ctx->h_None);
}
HPyDef_METH(getg1, "getg1", HPyFunc_NOARGS)
static HPy getg1_impl(HPyContext *ctx, HPy self)
{
return HPyGlobal_Load(ctx, myglobal1);
}
HPyDef_METH(getg2, "getg2", HPyFunc_NOARGS)
static HPy getg2_impl(HPyContext *ctx, HPy self)
{
return HPyGlobal_Load(ctx, myglobal2);
}
@EXPORT(setg1)
@EXPORT(setg2)
@EXPORT(getg1)
@EXPORT(getg2)
@EXPORT_GLOBAL(myglobal1)
@EXPORT_GLOBAL(myglobal2)
@INIT
""")
obj1 = {'hello': 'world'}
obj2 = {'foo': 'bar'}
assert mod.setg1(obj1) is None
assert mod.setg2(obj2) is None
assert mod.getg1() is obj1
assert mod.getg2() is obj2 |
5,065 | test interpolator | """
Methods for reading ROMS model outputs
"""
from thetis import *
import thetis.coordsys as coordsys
from thetis.timezone import *
from thetis.forcing import *
# define model coordinate system
COORDSYS = coordsys.UTM_ZONE10
def test_time_search():
"""
Tests time search object.
Time stamps are deduced from ROMS output files.
.. note::
The following ROMS output files must be present:
./forcings/liveocean/f2015.05.16/ocean_his_0009.nc
./forcings/liveocean/f2015.05.16/ocean_his_0010.nc
./forcings/liveocean/f2015.05.16/ocean_his_0011.nc
"""
# test time parser
tp = interpolation.NetCDFTimeParser(
'forcings/liveocean/f2015.05.16/ocean_his_0009.nc',
time_variable_name='ocean_time')
nc_start = datetime.datetime(2015, 5, 16, 8, tzinfo=pytz.utc)
assert tp.start_time == nc_start
assert tp.end_time == nc_start
assert numpy.allclose(tp.time_array, numpy.array([datetime_to_epoch(nc_start)]))
# test time search
sim_tz = timezone.FixedTimeZone(-8, 'PST')
init_date = datetime.datetime(2015, 5, 16, tzinfo=sim_tz)
ncpattern = 'forcings/liveocean/f2015.*/ocean_his_*.nc'
timesearch_obj = interpolation.NetCDFTimeSearch(
ncpattern, init_date, interpolation.NetCDFTimeParser,
time_variable_name='ocean_time', verbose=True)
sim_time = 100.0
fn, itime, time = timesearch_obj.find(sim_time, previous=True)
assert fn == 'forcings/liveocean/f2015.05.16/ocean_his_0009.nc'
assert itime == 0
assert time == 0.0
fn, itime, time = timesearch_obj.find(sim_time)
assert fn == 'forcings/liveocean/f2015.05.16/ocean_his_0010.nc'
assert itime == 0
assert time == 3600.0
dt = 900
for i in range(8):
d = init_date + datetime.timedelta(seconds=i*dt)
print('Time step {:}, {:}'.format(i, d))
fn, itime, time = timesearch_obj.find(i*dt, previous=True)
print(' prev: {:} {:}'.format(fn, itime))
fn, itime, time = timesearch_obj.find(i*dt, previous=False)
print(' next: {:} {:}'.format(fn, itime))
def METHOD_NAME():
"""
Test ROMS 3d interpolator.
.. note::
The following ROMS output files must be present:
./forcings/liveocean/f2015.05.16/ocean_his_0009.nc
./forcings/liveocean/f2015.05.16/ocean_his_0010.nc
./forcings/liveocean/f2015.05.16/ocean_his_0011.nc
"""
# load and extrude mesh
from bathymetry import get_bathymetry, smooth_bathymetry, smooth_bathymetry_at_bnd
nlayers, surf_elem_height, max_z_stretch = (9, 5.0, 4.0)
mesh2d = Mesh('mesh_cre-plume_03_normal.msh')
# interpolate bathymetry and smooth it
bathymetry_2d = get_bathymetry('bathymetry_utm_large.nc', mesh2d, project=False)
bathymetry_2d = smooth_bathymetry(
bathymetry_2d, delta_sigma=1.0, bg_diff=0,
alpha=1e2, exponent=2.5,
minimum_depth=3.5, niter=30)
bathymetry_2d = smooth_bathymetry_at_bnd(bathymetry_2d, [2, 7])
# 3d mesh vertical stretch factor
z_stretch_fact_2d = Function(bathymetry_2d.function_space(), name='z_stretch')
# 1.0 (sigma mesh) in shallow areas, 4.0 in deep ocean
z_stretch_fact_2d.project(-ln(surf_elem_height/bathymetry_2d)/ln(nlayers))
z_stretch_fact_2d.dat.data[z_stretch_fact_2d.dat.data < 1.0] = 1.0
z_stretch_fact_2d.dat.data[z_stretch_fact_2d.dat.data > max_z_stretch] = max_z_stretch
extrude_options = {
'z_stretch_fact': z_stretch_fact_2d,
}
mesh = extrude_mesh_sigma(mesh2d, nlayers, bathymetry_2d,
**extrude_options)
p1 = get_functionspace(mesh, 'CG', 1)
# make functions
salt = Function(p1, name='salinity')
temp = Function(p1, name='temperature')
sim_tz = timezone.FixedTimeZone(-8, 'PST')
init_date = datetime.datetime(2015, 5, 16, tzinfo=sim_tz)
interp = LiveOceanInterpolator(p1,
[salt, temp],
['salt', 'temp'],
'forcings/liveocean/f2015.*/ocean_his_*.nc',
init_date, COORDSYS)
interp.set_fields(0.0)
salt_fn = 'tmp/salt_roms.pvd'
temp_fn = 'tmp/temp_roms.pvd'
print('Saving output to {:} {:}'.format(salt_fn, temp_fn))
out_salt = File(salt_fn)
out_temp = File(temp_fn)
out_salt.write(salt)
out_temp.write(temp)
dt = 900.
for i in range(8):
print('Time step {:}'.format(i))
interp.set_fields(i*dt)
out_salt.write(salt)
out_temp.write(temp)
if __name__ == '__main__':
test_time_search()
METHOD_NAME() |
5,066 | set child | from _typeshed import Incomplete, SupportsGetItem, SupportsLenAndGetItem, Unused
from abc import abstractmethod
from collections.abc import Iterable, Iterator, MutableSequence
from typing_extensions import Final, Self, TypeAlias
from .fixer_base import BaseFix
from .pgen2.grammar import Grammar
_NL: TypeAlias = Node | Leaf
_Context: TypeAlias = tuple[str, int, int]
_Results: TypeAlias = dict[str, _NL]
_RawNode: TypeAlias = tuple[int, str, _Context, list[_NL] | None]
HUGE: Final = 0x7FFFFFFF
def type_repr(type_num: int) -> str | int: ...
class Base:
type: int
parent: Node | None
prefix: str
children: list[_NL]
was_changed: bool
was_checked: bool
def __eq__(self, other: object) -> bool: ...
@abstractmethod
def _eq(self, other: Base) -> bool: ...
@abstractmethod
def clone(self) -> Self: ...
@abstractmethod
def post_order(self) -> Iterator[Self]: ...
@abstractmethod
def pre_order(self) -> Iterator[Self]: ...
def replace(self, new: _NL | list[_NL]) -> None: ...
def get_lineno(self) -> int: ...
def changed(self) -> None: ...
def remove(self) -> int | None: ...
@property
def next_sibling(self) -> _NL | None: ...
@property
def prev_sibling(self) -> _NL | None: ...
def leaves(self) -> Iterator[Leaf]: ...
def depth(self) -> int: ...
def get_suffix(self) -> str: ...
class Node(Base):
fixers_applied: MutableSequence[BaseFix] | None
# Is Unbound until set in refactor.RefactoringTool
future_features: frozenset[Incomplete]
# Is Unbound until set in pgen2.parse.Parser.pop
used_names: set[str]
def __init__(
self,
type: int,
children: Iterable[_NL],
context: Unused = None,
prefix: str | None = None,
fixers_applied: MutableSequence[BaseFix] | None = None,
) -> None: ...
def _eq(self, other: Base) -> bool: ...
def clone(self) -> Node: ...
def post_order(self) -> Iterator[Self]: ...
def pre_order(self) -> Iterator[Self]: ...
def METHOD_NAME(self, i: int, child: _NL) -> None: ...
def insert_child(self, i: int, child: _NL) -> None: ...
def append_child(self, child: _NL) -> None: ...
def __unicode__(self) -> str: ...
class Leaf(Base):
lineno: int
column: int
value: str
fixers_applied: MutableSequence[BaseFix]
def __init__(
self,
type: int,
value: str,
context: _Context | None = None,
prefix: str | None = None,
fixers_applied: MutableSequence[BaseFix] = [],
) -> None: ...
def _eq(self, other: Base) -> bool: ...
def clone(self) -> Leaf: ...
def post_order(self) -> Iterator[Self]: ...
def pre_order(self) -> Iterator[Self]: ...
def __unicode__(self) -> str: ...
def convert(gr: Grammar, raw_node: _RawNode) -> _NL: ...
class BasePattern:
type: int
content: str | None
name: str | None
def optimize(self) -> BasePattern: ... # sic, subclasses are free to optimize themselves into different patterns
def match(self, node: _NL, results: _Results | None = None) -> bool: ...
def match_seq(self, nodes: SupportsLenAndGetItem[_NL], results: _Results | None = None) -> bool: ...
def generate_matches(self, nodes: SupportsGetItem[int, _NL]) -> Iterator[tuple[int, _Results]]: ...
class LeafPattern(BasePattern):
def __init__(self, type: int | None = None, content: str | None = None, name: str | None = None) -> None: ...
class NodePattern(BasePattern):
wildcards: bool
def __init__(self, type: int | None = None, content: str | None = None, name: str | None = None) -> None: ...
class WildcardPattern(BasePattern):
min: int
max: int
def __init__(self, content: str | None = None, min: int = 0, max: int = 0x7FFFFFFF, name: str | None = None) -> None: ...
class NegatedPattern(BasePattern):
def __init__(self, content: str | None = None) -> None: ...
def generate_matches(
patterns: SupportsGetItem[int | slice, BasePattern] | None, nodes: SupportsGetItem[int | slice, _NL]
) -> Iterator[tuple[int, _Results]]: ... |
5,067 | test 15 minute dt index | from numpy import isnan
import pandas as pd
import pytest
from pvlib.iotools import srml
from ..conftest import (DATA_DIR, RERUNS, RERUNS_DELAY, assert_index_equal,
assert_frame_equal, fail_on_pvlib_version)
from pvlib._deprecation import pvlibDeprecationWarning
srml_testfile = DATA_DIR / 'SRML-day-EUPO1801.txt'
def test_read_srml():
srml.read_srml(srml_testfile)
@pytest.mark.remote_data
@pytest.mark.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
def test_read_srml_remote():
srml.read_srml('http://solardat.uoregon.edu/download/Archive/EUPO1801.txt')
def test_read_srml_columns_exist():
data = srml.read_srml(srml_testfile)
assert 'ghi_0' in data.columns
assert 'ghi_0_flag' in data.columns
assert 'dni_1' in data.columns
assert 'dni_1_flag' in data.columns
assert '7008' in data.columns
assert '7008_flag' in data.columns
def test_read_srml_map_variables_false():
data = srml.read_srml(srml_testfile, map_variables=False)
assert '1000' in data.columns
assert '1000_flag' in data.columns
assert '2010' in data.columns
assert '2010_flag' in data.columns
assert '7008' in data.columns
assert '7008_flag' in data.columns
def test_read_srml_nans_exist():
data = srml.read_srml(srml_testfile)
assert isnan(data['dni_0'][1119])
assert data['dni_0_flag'][1119] == 99
@pytest.mark.parametrize('url,year,month', [
('http://solardat.uoregon.edu/download/Archive/EUPO1801.txt',
2018, 1),
('http://solardat.uoregon.edu/download/Archive/EUPO1612.txt',
2016, 12),
])
@pytest.mark.remote_data
@pytest.mark.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
def test_read_srml_dt_index(url, year, month):
data = srml.read_srml(url)
start = pd.Timestamp(f'{year:04d}{month:02d}01 00:00')
start = start.tz_localize('Etc/GMT+8')
end = pd.Timestamp(f'{year:04d}{month:02d}31 23:59')
end = end.tz_localize('Etc/GMT+8')
assert data.index[0] == start
assert data.index[-1] == end
assert (data.index[59::60].minute == 59).all()
assert str(year) not in data.columns
@pytest.mark.parametrize('column,expected', [
('1001', 'ghi_1'),
('7324', '7324'),
('2001', '2001'),
('2017', 'dni_7')
])
def test__map_columns(column, expected):
assert srml._map_columns(column) == expected
@pytest.mark.remote_data
@pytest.mark.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
def test_get_srml():
url = 'http://solardat.uoregon.edu/download/Archive/EUPO1801.txt'
file_data = srml.read_srml(url)
requested, _ = srml.get_srml(station='EU', start='2018-01-01',
end='2018-01-31')
assert_frame_equal(file_data, requested)
@fail_on_pvlib_version('0.11')
@pytest.mark.remote_data
@pytest.mark.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
def test_read_srml_month_from_solardat():
url = 'http://solardat.uoregon.edu/download/Archive/EUPO1801.txt'
file_data = srml.read_srml(url)
with pytest.warns(pvlibDeprecationWarning, match='get_srml instead'):
requested = srml.read_srml_month_from_solardat('EU', 2018, 1)
assert file_data.equals(requested)
@fail_on_pvlib_version('0.11')
@pytest.mark.remote_data
@pytest.mark.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
def METHOD_NAME():
with pytest.warns(pvlibDeprecationWarning, match='get_srml instead'):
data = srml.read_srml_month_from_solardat('TW', 2019, 4, 'RQ')
start = pd.Timestamp('20190401 00:00')
start = start.tz_localize('Etc/GMT+8')
end = pd.Timestamp('20190430 23:45')
end = end.tz_localize('Etc/GMT+8')
assert data.index[0] == start
assert data.index[-1] == end
assert (data.index[3::4].minute == 45).all()
@fail_on_pvlib_version('0.11')
@pytest.mark.remote_data
@pytest.mark.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
def test_hourly_dt_index():
with pytest.warns(pvlibDeprecationWarning, match='get_srml instead'):
data = srml.read_srml_month_from_solardat('CD', 1986, 4, 'PH')
start = pd.Timestamp('19860401 00:00')
start = start.tz_localize('Etc/GMT+8')
end = pd.Timestamp('19860430 23:00')
end = end.tz_localize('Etc/GMT+8')
assert data.index[0] == start
assert data.index[-1] == end
assert (data.index.minute == 0).all()
@pytest.mark.remote_data
@pytest.mark.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
def test_get_srml_hourly():
data, meta = data, meta = srml.get_srml(station='CD', start='1986-04-01',
end='1986-05-31', filetype='PH')
expected_index = pd.date_range(start='1986-04-01', end='1986-05-31 23:59',
freq='1h', tz='Etc/GMT+8')
assert_index_equal(data.index, expected_index)
@pytest.mark.remote_data
@pytest.mark.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
def test_get_srml_minute():
data_read = srml.read_srml(srml_testfile)
data_get, meta = srml.get_srml(station='EU', start='2018-01-01',
end='2018-01-31', filetype='PO')
expected_index = pd.date_range(start='2018-01-01', end='2018-01-31 23:59',
freq='1min', tz='Etc/GMT+8')
assert_index_equal(data_get.index, expected_index)
assert all([c in data_get.columns for c in data_read.columns])
# Check that all indices in example file are present in remote file
assert data_read.index.isin(data_get.index).all()
assert meta['station'] == 'EU'
assert meta['filetype'] == 'PO'
assert meta['filenames'] == ['EUPO1801.txt']
@pytest.mark.remote_data
@pytest.mark.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
def test_get_srml_nonexisting_month_warning():
with pytest.warns(UserWarning, match='file was not found: EUPO0912.txt'):
# Request data for a period where not all files exist
# Eugene (EU) station started reporting 1-minute data in January 2010
data, meta = data, meta = srml.get_srml(
station='EU', start='2009-12-01', end='2010-01-31', filetype='PO') |
5,068 | get slug | """
Serializers for the ES's search result object.
.. note::
Some fields are re-named to make their meaning more clear.
They should be renamed in the ES index too.
"""
import re
from functools import namedtuple
from operator import attrgetter
from urllib.parse import urlparse
from rest_framework import serializers
from readthedocs.projects.constants import GENERIC, MKDOCS, SPHINX_HTMLDIR
from readthedocs.projects.models import Project
# Structures used for storing cached data of a version mostly.
ProjectData = namedtuple("ProjectData", ["version", "alias"])
VersionData = namedtuple("VersionData", ["slug", "docs_url"])
class ProjectHighlightSerializer(serializers.Serializer):
name = serializers.SerializerMethodField()
slug = serializers.SerializerMethodField()
description = serializers.SerializerMethodField()
def get_name(self, obj):
return list(getattr(obj, "name", []))
def METHOD_NAME(self, obj):
return list(getattr(obj, "slug", []))
def get_description(self, obj):
return list(getattr(obj, "description", []))
class ProjectSearchSerializer(serializers.Serializer):
type = serializers.CharField(default="project", source=None, read_only=True)
name = serializers.CharField()
slug = serializers.CharField()
link = serializers.CharField(source="url")
description = serializers.CharField()
highlights = ProjectHighlightSerializer(source="meta.highlight", default=dict)
class PageHighlightSerializer(serializers.Serializer):
title = serializers.SerializerMethodField()
def get_title(self, obj):
return list(getattr(obj, "title", []))
class PageSearchSerializer(serializers.Serializer):
"""
Page serializer.
If ``projects`` is passed in the constructor, the serializer
will pre-generate a cache with that information,
this is to avoid querying the database again for each result.
:param projects: A list of tuples of project and version.
"""
type = serializers.CharField(default="page", source=None, read_only=True)
project = serializers.CharField()
project_alias = serializers.SerializerMethodField()
version = serializers.CharField()
title = serializers.CharField()
path = serializers.SerializerMethodField()
domain = serializers.SerializerMethodField()
highlights = PageHighlightSerializer(source="meta.highlight", default=dict)
blocks = serializers.SerializerMethodField()
def __init__(self, *args, projects=None, **kwargs):
if projects:
context = kwargs.setdefault("context", {})
context["projects_data"] = {
project.slug: self._build_project_data(project, version.slug)
for project, version in projects
}
super().__init__(*args, **kwargs)
def _build_project_data(self, project, version_slug):
"""Build a `ProjectData` object given a project and its version."""
url = project.get_docs_url(version_slug=version_slug)
project_alias = project.superprojects.values_list("alias", flat=True).first()
version_data = VersionData(
slug=version_slug,
docs_url=url,
)
return ProjectData(
alias=project_alias,
version=version_data,
)
def _get_project_data(self, obj):
"""
Get and cache the project data.
Try to get the data from the ``projects_data`` context,
and fallback to get it from the database.
If the result is fetched from the database,
it's cached into ``projects_data``.
"""
project_data = self.context.get("projects_data", {}).get(obj.project)
if project_data:
return project_data
project = Project.objects.filter(slug=obj.project).first()
if project:
projects_data = self.context.setdefault("projects_data", {})
projects_data[obj.project] = self._build_project_data(project, obj.version)
return projects_data[obj.project]
return None
def get_project_alias(self, obj):
project_data = self._get_project_data(obj)
if project_data:
return project_data.alias
return None
def get_domain(self, obj):
full_path = self._get_full_path(obj)
if full_path:
parsed = urlparse(full_path)
return f"{parsed.scheme}://{parsed.netloc}"
return None
def get_path(self, obj):
full_path = self._get_full_path(obj)
if full_path:
parsed = urlparse(full_path)
return parsed.path
return None
def _get_full_path(self, obj):
project_data = self._get_project_data(obj)
if project_data:
docs_url = project_data.version.docs_url
path = obj.full_path
# Generate an appropriate link for the doctypes that use htmldir,
# and always end it with / so it goes directly to proxito.
# For a generic doctype we just strip the index.html part if it exists.
if obj.doctype in {SPHINX_HTMLDIR, MKDOCS, GENERIC}:
path = re.sub("(^|/)index.html$", "/", path)
return docs_url.rstrip("/") + "/" + path.lstrip("/")
return None
def get_blocks(self, obj):
"""Combine and sort inner results (domains and sections)."""
sections = obj.meta.inner_hits.sections or []
sorted_results = sorted(
sections,
key=attrgetter("meta.score"),
reverse=True,
)
sorted_results = [SectionSearchSerializer(hit).data for hit in sorted_results]
return sorted_results
class SectionHighlightSerializer(serializers.Serializer):
title = serializers.SerializerMethodField()
content = serializers.SerializerMethodField()
def get_title(self, obj):
return list(getattr(obj, "sections.title", []))
def get_content(self, obj):
return list(getattr(obj, "sections.content", []))
class SectionSearchSerializer(serializers.Serializer):
type = serializers.CharField(default="section", source=None, read_only=True)
id = serializers.CharField()
title = serializers.CharField()
content = serializers.CharField()
highlights = SectionHighlightSerializer(source="meta.highlight", default=dict) |
5,069 | agent process | import os
import signal
import sys
import tempfile
import anyio
import pytest
from prefect.settings import get_current_settings
from prefect.utilities.processutils import open_process
POLL_INTERVAL = 0.5
STARTUP_TIMEOUT = 20
SHUTDOWN_TIMEOUT = 5
async def safe_shutdown(process):
try:
with anyio.fail_after(SHUTDOWN_TIMEOUT):
await process.wait()
except TimeoutError:
# try twice in case process.wait() hangs
with anyio.fail_after(SHUTDOWN_TIMEOUT):
await process.wait()
@pytest.fixture(scope="function")
async def METHOD_NAME(use_hosted_api_server):
"""
Runs an agent listening to all queues.
Yields:
The anyio.Process.
"""
out = tempfile.TemporaryFile() # capture output for test assertions
# Will connect to the same database as normal test clients
async with open_process(
command=[
"prefect",
"agent",
"start",
"--match=nonexist",
],
stdout=out,
stderr=out,
env={**os.environ, **get_current_settings().to_environment_variables()},
) as process:
process.out = out
for _ in range(int(STARTUP_TIMEOUT / POLL_INTERVAL)):
await anyio.sleep(POLL_INTERVAL)
if out.tell() > 400:
# Sleep to allow startup to complete
# TODO: Replace with a healthcheck endpoint
await anyio.sleep(4)
break
assert out.tell() > 400, "The agent did not start up in time"
assert process.returncode is None, "The agent failed to start up"
# Yield to the consuming tests
yield process
# Then shutdown the process
try:
process.terminate()
except ProcessLookupError:
pass
out.close()
class TestAgentSignalForwarding:
@pytest.mark.skipif(
sys.platform == "win32",
reason="SIGTERM is only used in non-Windows environments",
)
async def test_sigint_sends_sigterm(self, METHOD_NAME):
METHOD_NAME.send_signal(signal.SIGINT)
await safe_shutdown(METHOD_NAME)
METHOD_NAME.out.seek(0)
out = METHOD_NAME.out.read().decode()
assert "Sending SIGINT" in out, (
"When sending a SIGINT, the main process should receive a SIGINT."
f" Output:\n{out}"
)
assert "Agent stopped!" in out, (
"When sending a SIGINT, the main process should shutdown gracefully."
f" Output:\n{out}"
)
@pytest.mark.skipif(
sys.platform == "win32",
reason="SIGTERM is only used in non-Windows environments",
)
@pytest.mark.flaky(max_runs=2)
async def test_sigterm_sends_sigterm_directly(self, METHOD_NAME):
METHOD_NAME.send_signal(signal.SIGTERM)
await safe_shutdown(METHOD_NAME)
METHOD_NAME.out.seek(0)
out = METHOD_NAME.out.read().decode()
assert "Sending SIGINT" in out, (
"When sending a SIGTERM, the main process should receive a SIGINT."
f" Output:\n{out}"
)
assert "Agent stopped!" in out, (
"When sending a SIGTERM, the main process should shutdown gracefully."
f" Output:\n{out}"
)
@pytest.mark.skipif(
sys.platform == "win32",
reason="SIGTERM is only used in non-Windows environments",
)
async def test_sigint_sends_sigterm_then_sigkill(self, METHOD_NAME):
METHOD_NAME.send_signal(signal.SIGINT)
await anyio.sleep(0.01) # some time needed for the recursive signal handler
METHOD_NAME.send_signal(signal.SIGINT)
await safe_shutdown(METHOD_NAME)
METHOD_NAME.out.seek(0)
out = METHOD_NAME.out.read().decode()
assert (
# either the main PID is still waiting for shutdown, so forwards the SIGKILL
"Sending SIGKILL" in out
# or SIGKILL came too late, and the main PID is already closing
or "KeyboardInterrupt" in out
or "Agent stopped!" in out
or "Aborted." in out
), (
"When sending two SIGINT shortly after each other, the main process should"
f" first receive a SIGINT and then a SIGKILL. Output:\n{out}"
)
@pytest.mark.skipif(
sys.platform == "win32",
reason="SIGTERM is only used in non-Windows environments",
)
async def test_sigterm_sends_sigterm_then_sigkill(self, METHOD_NAME):
METHOD_NAME.send_signal(signal.SIGTERM)
await anyio.sleep(0.01) # some time needed for the recursive signal handler
METHOD_NAME.send_signal(signal.SIGTERM)
await safe_shutdown(METHOD_NAME)
METHOD_NAME.out.seek(0)
out = METHOD_NAME.out.read().decode()
assert (
# either the main PID is still waiting for shutdown, so forwards the SIGKILL
"Sending SIGKILL" in out
# or SIGKILL came too late, and the main PID is already closing
or "KeyboardInterrupt" in out
or "Agent stopped!" in out
or "Aborted." in out
), (
"When sending two SIGTERM shortly after each other, the main process should"
f" first receive a SIGINT and then a SIGKILL. Output:\n{out}"
)
@pytest.mark.skipif(
sys.platform != "win32",
reason="CTRL_BREAK_EVENT is only defined in Windows",
)
async def test_sends_ctrl_break_win32(self, METHOD_NAME):
METHOD_NAME.send_signal(signal.SIGINT)
await safe_shutdown(METHOD_NAME)
METHOD_NAME.out.seek(0)
out = METHOD_NAME.out.read().decode()
assert "Sending CTRL_BREAK_EVENT" in out, (
"When sending a SIGINT, the main process should send a CTRL_BREAK_EVENT to"
f" the uvicorn subprocess. Output:\n{out}"
) |
5,070 | scroll to | """
`ScrollView` is a base class for [line api](/guide/widgets#line-api) widgets.
"""
from __future__ import annotations
from rich.console import RenderableType
from ._animator import EasingFunction
from ._types import CallbackType
from .containers import ScrollableContainer
from .geometry import Region, Size
class ScrollView(ScrollableContainer):
"""
A base class for a Widget that handles its own scrolling (i.e. doesn't rely
on the compositor to render children).
"""
DEFAULT_CSS = """
ScrollView {
overflow-y: auto;
overflow-x: auto;
}
"""
@property
def is_scrollable(self) -> bool:
"""Always scrollable."""
return True
def watch_scroll_x(self, old_value: float, new_value: float) -> None:
if self.show_horizontal_scrollbar and round(old_value) != round(new_value):
self.horizontal_scrollbar.position = round(new_value)
self.refresh()
def watch_scroll_y(self, old_value: float, new_value: float) -> None:
if self.show_vertical_scrollbar and round(old_value) != round(new_value):
self.vertical_scrollbar.position = round(new_value)
self.refresh()
def on_mount(self):
self._refresh_scrollbars()
def get_content_width(self, container: Size, viewport: Size) -> int:
"""Gets the width of the content area.
Args:
container: Size of the container (immediate parent) widget.
viewport: Size of the viewport.
Returns:
The optimal width of the content.
"""
return self.virtual_size.width
def get_content_height(self, container: Size, viewport: Size, width: int) -> int:
"""Gets the height (number of lines) in the content area.
Args:
container: Size of the container (immediate parent) widget.
viewport: Size of the viewport.
width: Width of renderable.
Returns:
The height of the content.
"""
return self.virtual_size.height
def _size_updated(
self, size: Size, virtual_size: Size, container_size: Size, layout: bool = True
) -> bool:
"""Called when size is updated.
Args:
size: New size.
virtual_size: New virtual size.
container_size: New container size.
layout: Perform layout if required.
Returns:
True if anything changed, or False if nothing changed.
"""
if self._size != size or self._container_size != container_size:
self.refresh()
if (
self._size != size
or virtual_size != self.virtual_size
or container_size != self.container_size
):
self._size = size
virtual_size = self.virtual_size
self._container_size = size - self.styles.gutter.totals
self._scroll_update(virtual_size)
return True
else:
return False
def render(self) -> RenderableType:
"""Render the scrollable region (if `render_lines` is not implemented).
Returns:
Renderable object.
"""
from rich.panel import Panel
return Panel(f"{self.scroll_offset} {self.show_vertical_scrollbar}")
# Custom scroll to which doesn't require call_after_refresh
def METHOD_NAME(
self,
x: float | None = None,
y: float | None = None,
*,
animate: bool = True,
speed: float | None = None,
duration: float | None = None,
easing: EasingFunction | str | None = None,
force: bool = False,
on_complete: CallbackType | None = None,
) -> None:
"""Scroll to a given (absolute) coordinate, optionally animating.
Args:
x: X coordinate (column) to scroll to, or `None` for no change.
y: Y coordinate (row) to scroll to, or `None` for no change.
animate: Animate to new scroll position.
speed: Speed of scroll if `animate` is `True`; or `None` to use `duration`.
duration: Duration of animation, if `animate` is `True` and `speed` is `None`.
easing: An easing method for the scrolling animation.
force: Force scrolling even when prohibited by overflow styling.
on_complete: A callable to invoke when the animation is finished.
"""
self._scroll_to(
x,
y,
animate=animate,
speed=speed,
duration=duration,
easing=easing,
force=force,
on_complete=on_complete,
)
def refresh_lines(self, y_start: int, line_count: int = 1) -> None:
"""Refresh one or more lines.
Args:
y_start: First line to refresh.
line_count: Total number of lines to refresh.
"""
width = self.size.width
scroll_x, scroll_y = self.scroll_offset
refresh_region = Region(scroll_x, y_start - scroll_y, width, line_count)
self.refresh(refresh_region) |
5,071 | ref roi align | # Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import nnabla.functions as F
from nbla_test_utils import list_context
ctxs = list_context('RoiAlign')
def METHOD_NAME(input, boxes, output_size, spatial_scale, sampling_ratio,
channel_last):
assert len(input.shape) == 4
assert len(boxes.shape) == 2
def _roi_align(image, roi, output_size, sampling_ratio):
channels, height, width = image.shape
def linspace(start, stop, num):
return np.linspace(start, stop, num, endpoint=False,
retstep=True, dtype=np.float32)
steps = output_size * sampling_ratio
x, x_step = linspace(roi[0], roi[2], steps[1])
y, y_step = linspace(roi[1], roi[3], steps[0])
x, y = x + 0.5 * x_step, y + 0.5 * y_step
# compute out-of-bounds mask before clipping
oob_x = np.where(x < -1, 0, np.where(x > width, 0, 1)).astype(bool)
oob_y = np.where(y < -1, 0, np.where(y > height, 0, 1)).astype(bool)
oob_mask = oob_y.reshape(-1, 1) @ oob_x.reshape(1, -1)
x, y = np.clip(x, 0, width - 1), np.clip(y, 0, height - 1)
# left/right x and top/bottom y coordinates
lx, rx = np.floor(x).astype(int), np.ceil(x).astype(int)
ty, by = np.floor(y).astype(int), np.ceil(y).astype(int)
# distance values around sampling point
dlx = np.broadcast_to(x - lx, (channels, len(y), len(x)))
dty = np.broadcast_to(
y - ty, (channels, len(x), len(y))).swapaxes(1, 2)
dlx, dty = dlx.astype(np.float32), dty.astype(np.float32)
drx, dby = 1 - dlx, 1 - dty
# image values around sampling point
tl = tuple(np.meshgrid(range(channels), ty, lx, indexing='ij'))
tr = tuple(np.meshgrid(range(channels), ty, rx, indexing='ij'))
bl = tuple(np.meshgrid(range(channels), by, lx, indexing='ij'))
br = tuple(np.meshgrid(range(channels), by, rx, indexing='ij'))
# bilinear interpolate
result = (image[tl] * drx * dby + image[tr] * dlx * dby +
image[bl] * drx * dty + image[br] * dlx * dty)
# nullify out-of-bounds
result = result * oob_mask
# pool the sampling ratio
result = (result.reshape(-1, output_size[0], sampling_ratio[0],
output_size[1], sampling_ratio[1])
.mean(axis=(2, 4)))
return result
if channel_last:
input = np.transpose(input, (0, 3, 1, 2))
output = list()
for box in boxes:
img = input[int(box[0])]
roi = box[1:] * (2 * spatial_scale[::-1]) - 0.5
if sampling_ratio > 0:
_sampling_ratio = np.array([sampling_ratio, sampling_ratio])
else:
roi_size = np.maximum(roi[2:] - roi[:2], 1)[::-1] # flip x,y
_sampling_ratio = np.ceil(roi_size / output_size).astype(int)
output.append(_roi_align(img, roi, output_size, _sampling_ratio))
output = np.stack(output)
if channel_last:
output = np.transpose(output, (0, 2, 3, 1))
return output
@pytest.mark.parametrize("ctx, func_name", ctxs)
@pytest.mark.parametrize("inshape, boxes", [
((2, 3, 10, 10), ([[0, 0, 0, 10, 10], [1, 0, 0, 10, 10]])),
((2, 3, 11, 12), ([[1, 1, 2, 8, 7], [0, -2, 0, 11, 8]])),
])
@pytest.mark.parametrize('output_size', [(10, 10), (5, 15), (13, 8)])
@pytest.mark.parametrize('spatial_scale', [(1.0, 1.5), (0.7, 1.0)])
@pytest.mark.parametrize('sampling_ratio', [-1, 0, 1, 2])
@pytest.mark.parametrize('channel_last', [False, True])
@pytest.mark.parametrize("seed", [313])
def test_roi_align_forward_backward(seed, inshape, boxes, output_size,
spatial_scale, sampling_ratio,
channel_last, ctx, func_name):
from nbla_test_utils import function_tester
if channel_last and not func_name.endswith('Cuda'):
pytest.skip('channel_last=True is only supported in CUDA backend')
rng = np.random.RandomState(seed)
inputs = [
rng.randn(*inshape).astype(np.float32),
np.array(boxes, dtype=np.float32)
]
if channel_last:
inputs[0] = np.transpose(inputs[0], (0, 2, 3, 1))
func_args = [
output_size, spatial_scale, sampling_ratio, channel_last,
]
function_tester(rng, F.roi_align, METHOD_NAME, inputs, func_args,
atol_f=1e-4, atol_b=1e-2, backward=[True, False],
ctx=ctx, func_name=func_name) |
5,072 | test well known endpoints | import pytest
from constants import TRUST_STORE_BUNDLE, TRUST_STORE_TRUSTED_BUNDLE
from configuration import PROTOCOLS
from common import ProviderOptions, Ciphers, pq_enabled
from fixtures import managed_process # lgtm [py/unused-import]
from global_flags import get_flag, is_criterion_on, S2N_FIPS_MODE, S2N_USE_CRITERION
from providers import Provider, S2N
from utils import invalid_test_parameters, get_parameter_name, to_bytes
ENDPOINTS = [
"www.akamai.com",
"www.amazon.com",
"kms.us-east-1.amazonaws.com",
"s3.us-west-2.amazonaws.com",
"www.apple.com",
"www.att.com",
# "www.badssl.com",
# "mozilla-intermediate.badssl.com",
# "mozilla-modern.badssl.com",
# "rsa2048.badssl.com",
# "rsa4096.badssl.com",
# "sha256.badssl.com",
# "sha384.badssl.com",
# "sha512.badssl.com",
# "tls-v1-0.badssl.com",
# "tls-v1-1.badssl.com",
# "tls-v1-2.badssl.com",
"www.cloudflare.com",
"www.ebay.com",
"www.f5.com",
"www.facebook.com",
"www.google.com",
"www.github.com",
"www.ibm.com",
"www.microsoft.com",
"www.mozilla.org",
"www.netflix.com",
"www.openssl.org",
"www.samsung.com",
"www.t-mobile.com",
"www.twitter.com",
"www.verizon.com",
"www.wikipedia.org",
"www.yahoo.com",
"www.youtube.com",
]
CIPHERS = [
None, # `None` will default to the appropriate `test_all` cipher preference in the S2N client provider
Ciphers.KMS_PQ_TLS_1_0_2019_06,
Ciphers.PQ_SIKE_TEST_TLS_1_0_2019_11,
Ciphers.KMS_PQ_TLS_1_0_2020_07,
Ciphers.KMS_PQ_TLS_1_0_2020_02,
Ciphers.PQ_SIKE_TEST_TLS_1_0_2020_02
]
if pq_enabled():
EXPECTED_RESULTS = {
("kms.us-east-1.amazonaws.com", Ciphers.KMS_PQ_TLS_1_0_2019_06):
{"cipher": "ECDHE-RSA-AES256-GCM-SHA384", "kem": "NONE"},
("kms.us-east-1.amazonaws.com", Ciphers.PQ_SIKE_TEST_TLS_1_0_2019_11):
{"cipher": "ECDHE-RSA-AES256-GCM-SHA384", "kem": "NONE"},
("kms.us-east-1.amazonaws.com", Ciphers.KMS_PQ_TLS_1_0_2020_07):
{"cipher": "ECDHE-KYBER-RSA-AES256-GCM-SHA384", "kem": "kyber512r3"},
("kms.us-east-1.amazonaws.com", Ciphers.KMS_PQ_TLS_1_0_2020_02):
{"cipher": "ECDHE-RSA-AES256-GCM-SHA384", "kem": "NONE"},
("kms.us-east-1.amazonaws.com", Ciphers.PQ_SIKE_TEST_TLS_1_0_2020_02):
{"cipher": "ECDHE-RSA-AES256-GCM-SHA384", "kem": "NONE"},
}
else:
EXPECTED_RESULTS = {
("kms.us-east-1.amazonaws.com", Ciphers.KMS_PQ_TLS_1_0_2019_06):
{"cipher": "ECDHE-RSA-AES256-GCM-SHA384", "kem": "NONE"},
("kms.us-east-1.amazonaws.com", Ciphers.PQ_SIKE_TEST_TLS_1_0_2019_11):
{"cipher": "ECDHE-RSA-AES256-GCM-SHA384", "kem": "NONE"},
("kms.us-east-1.amazonaws.com", Ciphers.KMS_PQ_TLS_1_0_2020_07):
{"cipher": "ECDHE-RSA-AES256-GCM-SHA384", "kem": "NONE"},
("kms.us-east-1.amazonaws.com", Ciphers.KMS_PQ_TLS_1_0_2020_02):
{"cipher": "ECDHE-RSA-AES256-GCM-SHA384", "kem": "NONE"},
("kms.us-east-1.amazonaws.com", Ciphers.PQ_SIKE_TEST_TLS_1_0_2020_02):
{"cipher": "ECDHE-RSA-AES256-GCM-SHA384", "kem": "NONE"},
}
@pytest.mark.uncollect_if(func=invalid_test_parameters)
@pytest.mark.parametrize("protocol", PROTOCOLS, ids=get_parameter_name)
@pytest.mark.parametrize("endpoint", ENDPOINTS, ids=get_parameter_name)
@pytest.mark.parametrize("provider", [S2N], ids=get_parameter_name)
@pytest.mark.parametrize("cipher", CIPHERS, ids=get_parameter_name)
@pytest.mark.flaky(reruns=5, reruns_delay=4)
def METHOD_NAME(managed_process, protocol, endpoint, provider, cipher):
port = "443"
client_options = ProviderOptions(
mode=Provider.ClientMode,
host=endpoint,
port=port,
insecure=False,
trust_store=TRUST_STORE_BUNDLE,
protocol=protocol,
cipher=cipher)
if get_flag(S2N_FIPS_MODE) is True:
client_options.trust_store = TRUST_STORE_TRUSTED_BUNDLE
# TODO: Understand the failure with criterion and this endpoint.
if is_criterion_on() and 'www.netflix.com' in endpoint:
pytest.skip()
# expect_stderr=True because S2N sometimes receives OCSP responses:
# https://github.com/aws/s2n-tls/blob/14ed186a13c1ffae7fbb036ed5d2849ce7c17403/bin/echo.c#L180-L184
client = managed_process(provider, client_options,
timeout=5, expect_stderr=True)
expected_result = EXPECTED_RESULTS.get((endpoint, cipher), None)
for results in client.get_results():
results.assert_success()
if expected_result is not None:
assert to_bytes(expected_result['cipher']) in results.stdout
assert to_bytes(expected_result['kem']) in results.stdout |
5,073 | check no |
from vsg.rules import utils as rules_utils
from vsg.vhdlFile import utils
from vsg import violation
def check_for_carriage_return_after_token(self, oToi):
if self.value == 'ignore':
return []
elif self.value == 'no':
return METHOD_NAME(self, oToi)
elif self.value == 'yes':
return check_yes(self, oToi)
def METHOD_NAME(self, oToi):
return check(self, oToi, 'remove_new_line')
def check_yes(self, oToi):
return check(self, oToi, 'add_new_line')
def check(self, oToi, action):
lTokens = oToi.get_tokens()
iLine = oToi.get_line_number()
lReturn = []
value = set_check_value(action)
for iToken, oToken in enumerate(lTokens):
if rules_utils.token_exists_in_token_type_list(oToken, self.analysis_options):
if utils.is_token_at_end_of_line(iToken, lTokens) == value:
iViolation_line_number = iLine + rules_utils.number_of_carriage_returns(lTokens[0:iToken])
sSolution = 'jcl-fix this'
iEndIndex = utils.find_next_non_whitespace_token(iToken + 1, lTokens) - 1
oMyToi = oToi.extract_tokens(iToken + 1, iEndIndex)
oViolation = violation.New(iViolation_line_number, oMyToi, sSolution)
dAction = {}
dAction['action'] = action
oViolation.set_action(dAction)
lReturn.append(oViolation)
return lReturn
def set_check_value(action):
if action == 'remove_new_line':
return True
return False
def check_for_carriage_return_before_token(self, oToi):
if self.value == 'ignore':
return []
lTokens = oToi.get_tokens()
iLine = oToi.get_line_number()
lReturn = []
for iToken, oToken in enumerate(lTokens):
if rules_utils.token_exists_in_token_type_list(oToken, self.analysis_options):
if rules_utils.token_at_beginning_of_line_in_token_list(iToken, lTokens) and self.value == 'no':
sSolution = 'jcl-fix this'
iViolation_line_number = iLine + rules_utils.number_of_carriage_returns(lTokens[0:iToken])
iStartIndex = utils.find_previous_non_whitespace_token(iToken - 1, lTokens) + 1
oMyToi = oToi.extract_tokens(iStartIndex, iToken - 1)
oViolation = violation.New(iViolation_line_number, oMyToi, 'jcl-fix this')
dAction = {}
dAction['action'] = 'remove_new_line'
oViolation.set_action(dAction)
lReturn.append(oViolation)
elif not rules_utils.token_at_beginning_of_line_in_token_list(iToken, lTokens) and self.value == 'yes':
sSolution = 'jcl-fix this'
iViolation_line_number = iLine + rules_utils.number_of_carriage_returns(lTokens[0:iToken])
iStartIndex = utils.find_previous_non_whitespace_token(iToken - 1, lTokens) + 1
oMyToi = oToi.extract_tokens(iStartIndex, iToken - 1)
oViolation = violation.New(iViolation_line_number, oMyToi, 'jcl-fix this')
dAction = {}
dAction['action'] = 'add_new_line'
oViolation.set_action(dAction)
lReturn.append(oViolation)
return lReturn
def check_for_carriage_returns_between_tokens_ignoring_leading_and_trailing_whitespace(self, oToi):
if self.value == 'ignore':
return []
lTokens = oToi.get_tokens()
iLine = oToi.get_line_number()
lReturn = []
for lTokenPairs in self.analysis_options:
for iToken, oToken in enumerate(lTokens):
if isinstance(oToken, lTokenPairs[0]):
iEndIndex = rules_utils.get_index_of_token_in_list(lTokenPairs[1], lTokens)
if iEndIndex is None:
continue
# print(f'{iToken}, {iEndIndex}')
iStartIndex = utils.find_next_non_whitespace_token(iToken + 1, lTokens)
iEndIndex = utils.find_previous_non_whitespace_token(iEndIndex - 1, lTokens)
iNumberCarriageReturns = rules_utils.number_of_carriage_returns(lTokens[iStartIndex:iEndIndex + 1])
# print(f'{iNumberCarriageReturns}')
if iNumberCarriageReturns > 0:
sSolution = 'jcl-fix this'
# print(lTokens[iStartIndex:iEndIndex + 1])
iViolation_line_number = iLine + rules_utils.number_of_carriage_returns(lTokens[0:iStartIndex])
oMyToi = oToi.extract_tokens(iStartIndex, iEndIndex)
# print(oMyToi.get_tokens())
oViolation = violation.New(iViolation_line_number, oMyToi, 'jcl-fix this')
dAction = {}
dAction['action'] = 'remove_new_line'
oViolation.set_action(dAction)
lReturn.append(oViolation)
return lReturn |
5,074 | create all regions and connections | # Regions are areas in your game that you travel to.
from typing import Dict, Set
from BaseClasses import Entrance, MultiWorld, Region
from . import Locations
def add_location(player: int, loc_name: str, id: int, region: Region) -> None:
location = Locations.NoitaLocation(player, loc_name, id, region)
region.locations.append(location)
def add_locations(multiworld: MultiWorld, player: int, region: Region) -> None:
locations = Locations.location_region_mapping.get(region.name, {})
for location_name, location_data in locations.items():
location_type = location_data.ltype
flag = location_data.flag
opt_orbs = multiworld.orbs_as_checks[player].value
opt_bosses = multiworld.bosses_as_checks[player].value
opt_paths = multiworld.path_option[player].value
opt_num_chests = multiworld.hidden_chests[player].value
opt_num_pedestals = multiworld.pedestal_checks[player].value
is_orb_allowed = location_type == "orb" and flag <= opt_orbs
is_boss_allowed = location_type == "boss" and flag <= opt_bosses
if flag == Locations.LocationFlag.none or is_orb_allowed or is_boss_allowed:
add_location(player, location_name, location_data.id, region)
elif location_type == "chest" and flag <= opt_paths:
for i in range(opt_num_chests):
add_location(player, f"{location_name} {i+1}", location_data.id + i, region)
elif location_type == "pedestal" and flag <= opt_paths:
for i in range(opt_num_pedestals):
add_location(player, f"{location_name} {i+1}", location_data.id + i, region)
# Creates a new Region with the locations found in `location_region_mapping` and adds them to the world.
def create_region(multiworld: MultiWorld, player: int, region_name: str) -> Region:
new_region = Region(region_name, player, multiworld)
add_locations(multiworld, player, new_region)
return new_region
def create_regions(multiworld: MultiWorld, player: int) -> Dict[str, Region]:
return {name: create_region(multiworld, player, name) for name in noita_regions}
# An "Entrance" is really just a connection between two regions
def create_entrance(player: int, source: str, destination: str, regions: Dict[str, Region]):
entrance = Entrance(player, f"From {source} To {destination}", regions[source])
entrance.connect(regions[destination])
return entrance
# Creates connections based on our access mapping in `noita_connections`.
def create_connections(player: int, regions: Dict[str, Region]) -> None:
for source, destinations in noita_connections.items():
new_entrances = [create_entrance(player, source, destination, regions) for destination in destinations]
regions[source].exits = new_entrances
# Creates all regions and connections. Called from NoitaWorld.
def METHOD_NAME(multiworld: MultiWorld, player: int) -> None:
created_regions = create_regions(multiworld, player)
create_connections(player, created_regions)
multiworld.regions += created_regions.values()
# Oh, what a tangled web we weave
# Notes to create artificial spheres:
# - Shaft is excluded to disconnect Mines from the Snowy Depths
# - Lukki Lair is disconnected from The Vault
# - Overgrown Cavern is connected to the Underground Jungle instead of the Desert due to similar difficulty
# - Powerplant is disconnected from the Sandcave due to difficulty and sphere creation
# - Snow Chasm is disconnected from the Snowy Wasteland
# - Pyramid is connected to the Hiisi Base instead of the Desert due to similar difficulty
# - Frozen Vault is connected to the Vault instead of the Snowy Wasteland due to similar difficulty
# - Lake is connected to The Laboratory, since the boss is hard without specific set-ups (which means late game)
# - Snowy Depths connects to Lava Lake orb since you need digging for it, so fairly early is acceptable
# - Ancient Laboratory is connected to the Coal Pits, so that Ylialkemisti isn't sphere 1
noita_connections: Dict[str, Set[str]] = {
"Menu": {"Forest"},
"Forest": {"Mines", "Floating Island", "Desert", "Snowy Wasteland"},
"Snowy Wasteland": {"Forest"},
"Frozen Vault": {"The Vault"},
"Lake": {"The Laboratory"},
"Desert": {"Forest"},
"Floating Island": {"Forest"},
"Pyramid": {"Hiisi Base"},
"Overgrown Cavern": {"Sandcave", "Undeground Jungle"},
"Sandcave": {"Overgrown Cavern"},
###
"Mines": {"Collapsed Mines", "Coal Pits Holy Mountain", "Lava Lake", "Forest"},
"Collapsed Mines": {"Mines", "Dark Cave"},
"Lava Lake": {"Mines", "Abyss Orb Room"},
"Abyss Orb Room": {"Lava Lake"},
"Below Lava Lake": {"Snowy Depths"},
"Dark Cave": {"Collapsed Mines"},
"Ancient Laboratory": {"Coal Pits"},
###
"Coal Pits Holy Mountain": {"Coal Pits"},
"Coal Pits": {"Coal Pits Holy Mountain", "Fungal Caverns", "Snowy Depths Holy Mountain", "Ancient Laboratory"},
"Fungal Caverns": {"Coal Pits"},
###
"Snowy Depths Holy Mountain": {"Snowy Depths"},
"Snowy Depths": {"Snowy Depths Holy Mountain", "Hiisi Base Holy Mountain", "Magical Temple", "Below Lava Lake"},
"Magical Temple": {"Snowy Depths"},
###
"Hiisi Base Holy Mountain": {"Hiisi Base"},
"Hiisi Base": {"Hiisi Base Holy Mountain", "Secret Shop", "Pyramid", "Underground Jungle Holy Mountain"},
"Secret Shop": {"Hiisi Base"},
###
"Underground Jungle Holy Mountain": {"Underground Jungle"},
"Underground Jungle": {"Underground Jungle Holy Mountain", "Dragoncave", "Overgrown Cavern", "Vault Holy Mountain",
"Lukki Lair"},
"Dragoncave": {"Underground Jungle"},
"Lukki Lair": {"Underground Jungle", "Snow Chasm", "Frozen Vault"},
"Snow Chasm": {},
###
"Vault Holy Mountain": {"The Vault"},
"The Vault": {"Vault Holy Mountain", "Frozen Vault", "Temple of the Art Holy Mountain"},
###
"Temple of the Art Holy Mountain": {"Temple of the Art"},
"Temple of the Art": {"Temple of the Art Holy Mountain", "Laboratory Holy Mountain", "The Tower",
"Wizards' Den"},
"Wizards' Den": {"Temple of the Art", "Powerplant"},
"Powerplant": {"Wizards' Den", "Deep Underground"},
"The Tower": {"Forest"},
"Deep Underground": {},
###
"Laboratory Holy Mountain": {"The Laboratory"},
"The Laboratory": {"Laboratory Holy Mountain", "The Work", "Friend Cave", "The Work (Hell)", "Lake"},
"Friend Cave": {},
"The Work": {},
"The Work (Hell)": {},
###
}
noita_regions: Set[str] = set(noita_connections.keys()).union(*noita_connections.values()) |
5,075 | add torus | # SPDX-FileCopyrightText: 2009-2023 Blender Authors
#
# SPDX-License-Identifier: GPL-2.0-or-later
from __future__ import annotations
import bpy
from bpy.types import Operator
from bpy.props import (
BoolProperty,
EnumProperty,
FloatProperty,
IntProperty,
)
from bpy.app.translations import pgettext_data as data_
from bpy_extras import object_utils
def METHOD_NAME(major_rad, minor_rad, major_seg, minor_seg):
from math import cos, sin, pi
from mathutils import Vector, Matrix
pi_2 = pi * 2.0
verts = []
faces = []
i1 = 0
tot_verts = major_seg * minor_seg
for major_index in range(major_seg):
matrix = Matrix.Rotation((major_index / major_seg) * pi_2, 3, 'Z')
for minor_index in range(minor_seg):
angle = pi_2 * minor_index / minor_seg
vec = matrix @ Vector((
major_rad + (cos(angle) * minor_rad),
0.0,
sin(angle) * minor_rad,
))
verts.extend(vec[:])
if minor_index + 1 == minor_seg:
i2 = (major_index) * minor_seg
i3 = i1 + minor_seg
i4 = i2 + minor_seg
else:
i2 = i1 + 1
i3 = i1 + minor_seg
i4 = i3 + 1
if i2 >= tot_verts:
i2 = i2 - tot_verts
if i3 >= tot_verts:
i3 = i3 - tot_verts
if i4 >= tot_verts:
i4 = i4 - tot_verts
faces.extend([i1, i3, i4, i2])
i1 += 1
return verts, faces
def add_uvs(mesh, minor_seg, major_seg):
from math import fmod
mesh.uv_layers.new()
uv_data = mesh.uv_layers.active.data
polygons = mesh.polygons
u_step = 1.0 / major_seg
v_step = 1.0 / minor_seg
# Round UVs, needed when segments aren't divisible by 4.
u_init = 0.5 + fmod(0.5, u_step)
v_init = 0.5 + fmod(0.5, v_step)
# Calculate wrapping value under 1.0 to prevent
# float precision errors wrapping at the wrong step.
u_wrap = 1.0 - (u_step / 2.0)
v_wrap = 1.0 - (v_step / 2.0)
vertex_index = 0
u_prev = u_init
u_next = u_prev + u_step
for _major_index in range(major_seg):
v_prev = v_init
v_next = v_prev + v_step
for _minor_index in range(minor_seg):
loops = polygons[vertex_index].loop_indices
uv_data[loops[0]].uv = u_prev, v_prev
uv_data[loops[1]].uv = u_next, v_prev
uv_data[loops[3]].uv = u_prev, v_next
uv_data[loops[2]].uv = u_next, v_next
if v_next > v_wrap:
v_prev = v_next - 1.0
else:
v_prev = v_next
v_next = v_prev + v_step
vertex_index += 1
if u_next > u_wrap:
u_prev = u_next - 1.0
else:
u_prev = u_next
u_next = u_prev + u_step
class AddTorus(Operator, object_utils.AddObjectHelper):
"""Construct a torus mesh"""
bl_idname = "mesh.primitive_torus_add"
bl_label = "Add Torus"
bl_options = {'REGISTER', 'UNDO', 'PRESET'}
def mode_update_callback(self, _context):
if self.mode == 'EXT_INT':
self.abso_major_rad = self.major_radius + self.minor_radius
self.abso_minor_rad = self.major_radius - self.minor_radius
major_segments: IntProperty(
name="Major Segments",
description="Number of segments for the main ring of the torus",
min=3, max=256,
default=48,
)
minor_segments: IntProperty(
name="Minor Segments",
description="Number of segments for the minor ring of the torus",
min=3, max=256,
default=12,
)
mode: EnumProperty(
name="Dimensions Mode",
items=(
('MAJOR_MINOR', "Major/Minor",
"Use the major/minor radii for torus dimensions"),
('EXT_INT', "Exterior/Interior",
"Use the exterior/interior radii for torus dimensions"),
),
update=AddTorus.mode_update_callback,
)
major_radius: FloatProperty(
name="Major Radius",
description=("Radius from the origin to the "
"center of the cross sections"),
soft_min=0.0, soft_max=100.0,
min=0.0, max=10_000.0,
default=1.0,
subtype='DISTANCE',
unit='LENGTH',
)
minor_radius: FloatProperty(
name="Minor Radius",
description="Radius of the torus' cross section",
soft_min=0.0, soft_max=100.0,
min=0.0, max=10_000.0,
default=0.25,
subtype='DISTANCE',
unit='LENGTH',
)
abso_major_rad: FloatProperty(
name="Exterior Radius",
description="Total Exterior Radius of the torus",
soft_min=0.0, soft_max=100.0,
min=0.0, max=10_000.0,
default=1.25,
subtype='DISTANCE',
unit='LENGTH',
)
abso_minor_rad: FloatProperty(
name="Interior Radius",
description="Total Interior Radius of the torus",
soft_min=0.0, soft_max=100.0,
min=0.0, max=10_000.0,
default=0.75,
subtype='DISTANCE',
unit='LENGTH',
)
generate_uvs: BoolProperty(
name="Generate UVs",
description="Generate a default UV map",
default=True,
)
def draw(self, _context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
layout.separator()
layout.prop(self, "major_segments")
layout.prop(self, "minor_segments")
layout.separator()
layout.prop(self, "mode")
if self.mode == 'MAJOR_MINOR':
layout.prop(self, "major_radius")
layout.prop(self, "minor_radius")
else:
layout.prop(self, "abso_major_rad")
layout.prop(self, "abso_minor_rad")
layout.separator()
layout.prop(self, "generate_uvs")
layout.prop(self, "align")
layout.prop(self, "location")
layout.prop(self, "rotation")
def invoke(self, context, _event):
object_utils.object_add_grid_scale_apply_operator(self, context)
return self.execute(context)
def execute(self, context):
if self.mode == 'EXT_INT':
extra_helper = (self.abso_major_rad - self.abso_minor_rad) * 0.5
self.major_radius = self.abso_minor_rad + extra_helper
self.minor_radius = extra_helper
verts_loc, faces = METHOD_NAME(
self.major_radius,
self.minor_radius,
self.major_segments,
self.minor_segments,
)
mesh = bpy.data.meshes.new(data_("Torus"))
mesh.vertices.add(len(verts_loc) // 3)
nbr_loops = len(faces)
nbr_polys = nbr_loops // 4
mesh.loops.add(nbr_loops)
mesh.polygons.add(nbr_polys)
mesh.vertices.foreach_set("co", verts_loc)
mesh.polygons.foreach_set("loop_start", range(0, nbr_loops, 4))
mesh.loops.foreach_set("vertex_index", faces)
mesh.shade_flat()
if self.generate_uvs:
add_uvs(mesh, self.minor_segments, self.major_segments)
mesh.update()
object_utils.object_data_add(context, mesh, operator=self)
return {'FINISHED'}
classes = (
AddTorus,
) |
5,076 | test standalone asterisks with newlines | """
Python Markdown
A Python implementation of John Gruber's Markdown.
Documentation: https://python-markdown.github.io/
GitHub: https://github.com/Python-Markdown/markdown/
PyPI: https://pypi.org/project/Markdown/
Started by Manfred Stienstra (http://www.dwerg.net/).
Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
Currently maintained by Waylan Limberg (https://github.com/waylan),
Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
Copyright 2007-2019 The Python Markdown Project (v. 1.7 and later)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see LICENSE.md for details).
"""
from markdown.test_tools import TestCase
class TestNotEmphasis(TestCase):
def test_standalone_asterisk(self):
self.assertMarkdownRenders(
'*',
'<p>*</p>'
)
def test_standalone_understore(self):
self.assertMarkdownRenders(
'_',
'<p>_</p>'
)
def test_standalone_asterisks_consecutive(self):
self.assertMarkdownRenders(
'Foo * * * *',
'<p>Foo * * * *</p>'
)
def test_standalone_understore_consecutive(self):
self.assertMarkdownRenders(
'Foo _ _ _ _',
'<p>Foo _ _ _ _</p>'
)
def test_standalone_asterisks_pairs(self):
self.assertMarkdownRenders(
'Foo ** ** ** **',
'<p>Foo ** ** ** **</p>'
)
def test_standalone_understore_pairs(self):
self.assertMarkdownRenders(
'Foo __ __ __ __',
'<p>Foo __ __ __ __</p>'
)
def test_standalone_asterisks_triples(self):
self.assertMarkdownRenders(
'Foo *** *** *** ***',
'<p>Foo *** *** *** ***</p>'
)
def test_standalone_understore_triples(self):
self.assertMarkdownRenders(
'Foo ___ ___ ___ ___',
'<p>Foo ___ ___ ___ ___</p>'
)
def test_standalone_asterisk_in_text(self):
self.assertMarkdownRenders(
'foo * bar',
'<p>foo * bar</p>'
)
def test_standalone_understore_in_text(self):
self.assertMarkdownRenders(
'foo _ bar',
'<p>foo _ bar</p>'
)
def test_standalone_asterisks_in_text(self):
self.assertMarkdownRenders(
'foo * bar * baz',
'<p>foo * bar * baz</p>'
)
def test_standalone_understores_in_text(self):
self.assertMarkdownRenders(
'foo _ bar _ baz',
'<p>foo _ bar _ baz</p>'
)
def METHOD_NAME(self):
self.assertMarkdownRenders(
'foo\n* bar *\nbaz',
'<p>foo\n* bar *\nbaz</p>'
)
def test_standalone_understores_with_newlines(self):
self.assertMarkdownRenders(
'foo\n_ bar _\nbaz',
'<p>foo\n_ bar _\nbaz</p>'
)
def test_standalone_underscore_at_begin(self):
self.assertMarkdownRenders(
'_ foo_ bar',
'<p>_ foo_ bar</p>'
)
def test_standalone_asterisk_at_end(self):
self.assertMarkdownRenders(
'foo *bar *',
'<p>foo *bar *</p>'
)
def test_standalone_understores_at_begin_end(self):
self.assertMarkdownRenders(
'_ bar _',
'<p>_ bar _</p>'
)
def test_complex_emphasis_asterisk(self):
self.assertMarkdownRenders(
'This is text **bold *italic bold*** with more text',
'<p>This is text <strong>bold <em>italic bold</em></strong> with more text</p>'
)
def test_complex_emphasis_asterisk_mid_word(self):
self.assertMarkdownRenders(
'This is text **bold*italic bold*** with more text',
'<p>This is text <strong>bold<em>italic bold</em></strong> with more text</p>'
)
def test_complex_emphasis_smart_underscore(self):
self.assertMarkdownRenders(
'This is text __bold _italic bold___ with more text',
'<p>This is text <strong>bold <em>italic bold</em></strong> with more text</p>'
)
def test_complex_emphasis_smart_underscore_mid_word(self):
self.assertMarkdownRenders(
'This is text __bold_italic bold___ with more text',
'<p>This is text __bold_italic bold___ with more text</p>'
)
def test_nested_emphasis(self):
self.assertMarkdownRenders(
'This text is **bold *italic* *italic* bold**',
'<p>This text is <strong>bold <em>italic</em> <em>italic</em> bold</strong></p>'
)
def test_complex_multple_emphasis_type(self):
self.assertMarkdownRenders(
'traced ***along*** bla **blocked** if other ***or***',
'<p>traced <strong><em>along</em></strong> bla <strong>blocked</strong> if other <strong><em>or</em></strong></p>' # noqa: E501
)
def test_complex_multple_emphasis_type_variant2(self):
self.assertMarkdownRenders(
'on the **1-4 row** of the AP Combat Table ***and*** receive',
'<p>on the <strong>1-4 row</strong> of the AP Combat Table <strong><em>and</em></strong> receive</p>'
) |
5,077 | test dynamic factor with exogenous variables | """Tests the DynamicFactor model."""
import pytest
from pandas.testing import assert_frame_equal
from sktime.datasets import load_longley
from sktime.forecasting.dynamic_factor import DynamicFactor
from sktime.utils.validation._dependencies import _check_soft_dependencies
__author__ = ["yarnabrina"]
HISTORY_LENGTH = 10
PREDICTION_LENGTH = 6
K_FACTORS = 1
FACTOR_ORDER = 1
COVERAGES = [0.95, 0.99]
_, MULTIVARIATE_DF = load_longley()
TARGET_COLUMNS = ["GNPDEFL", "GNP"]
FEATURE_COLUMNS = ["UNEMP", "POP"]
ENDOGENOUS_DF = MULTIVARIATE_DF[TARGET_COLUMNS]
EXOGENOUS_DF = MULTIVARIATE_DF[FEATURE_COLUMNS]
TRAIN_Y = ENDOGENOUS_DF[:HISTORY_LENGTH]
TRAIN_X = EXOGENOUS_DF[:HISTORY_LENGTH]
PREDICT_X = EXOGENOUS_DF[HISTORY_LENGTH : (HISTORY_LENGTH + PREDICTION_LENGTH)]
def compare_predictions_against_statsmodels(
sktime_point_predictions, sktime_interval_predictions, statsmodels_predictions
) -> None:
"""Compare predictions from ``sktime`` wrapper against ``statsmodels`` estimator.
Notes
-----
* compare point predictions - predictive mean
* compare confidence intervals for multiple coverage values, viz. ``COVERAGES``
"""
statsmodels_point_predictions = statsmodels_predictions.predicted_mean
assert_frame_equal(sktime_point_predictions, statsmodels_point_predictions)
for coverage in COVERAGES:
statsmodels_interval_predictions = statsmodels_predictions.conf_int(
alpha=(1 - coverage)
)
for target in TARGET_COLUMNS:
sktime_results = sktime_interval_predictions.xs(
(target, coverage), axis="columns"
)
statsmodels_results = statsmodels_interval_predictions.filter(
regex=f"{target}$"
)
statsmodels_results.columns = ["lower", "upper"]
assert_frame_equal(sktime_results, statsmodels_results)
@pytest.mark.skipif(
not _check_soft_dependencies("statsmodels", severity="none"),
reason="skip test if required soft dependency not available",
)
def test_DynamicFactor_without_exogenous_variables():
"""Test ``DynamicFactor`` in absence of exogenous variables."""
from statsmodels.tsa.statespace.dynamic_factor import (
DynamicFactor as _DynamicFactor,
)
unfitted_sktime_model = DynamicFactor(
k_factors=K_FACTORS, factor_order=FACTOR_ORDER
)
fitted_sktime_model = unfitted_sktime_model.fit(TRAIN_Y)
sktime_point_predictions = fitted_sktime_model.predict(
fh=range(1, PREDICTION_LENGTH + 1)
)
sktime_interval_predictions = fitted_sktime_model.predict_interval(
fh=range(1, PREDICTION_LENGTH + 1), coverage=COVERAGES
)
unfitted_statsmodels_model = _DynamicFactor(TRAIN_Y, K_FACTORS, FACTOR_ORDER)
fitted_statsmodels_model = unfitted_statsmodels_model.fit()
statsmodels_predictions = fitted_statsmodels_model.get_prediction(
start=HISTORY_LENGTH, end=HISTORY_LENGTH + PREDICTION_LENGTH - 1
)
compare_predictions_against_statsmodels(
sktime_point_predictions, sktime_interval_predictions, statsmodels_predictions
)
@pytest.mark.skipif(
not _check_soft_dependencies("statsmodels", severity="none"),
reason="skip test if required soft dependency not available",
)
def METHOD_NAME():
"""Test ``DynamicFactor`` in presence of exogenous variables."""
from statsmodels.tsa.statespace.dynamic_factor import (
DynamicFactor as _DynamicFactor,
)
unfitted_sktime_model = DynamicFactor(
k_factors=K_FACTORS, factor_order=FACTOR_ORDER
)
fitted_sktime_model = unfitted_sktime_model.fit(TRAIN_Y, X=TRAIN_X)
sktime_point_predictions = fitted_sktime_model.predict(
fh=range(1, PREDICTION_LENGTH + 1), X=PREDICT_X
)
sktime_interval_predictions = fitted_sktime_model.predict_interval(
fh=range(1, PREDICTION_LENGTH + 1), X=PREDICT_X, coverage=COVERAGES
)
unfitted_statsmodels_model = _DynamicFactor(
TRAIN_Y, K_FACTORS, FACTOR_ORDER, exog=TRAIN_X
)
fitted_statsmodels_model = unfitted_statsmodels_model.fit()
statsmodels_predictions = fitted_statsmodels_model.get_prediction(
start=HISTORY_LENGTH, end=HISTORY_LENGTH + PREDICTION_LENGTH - 1, exog=PREDICT_X
)
compare_predictions_against_statsmodels(
sktime_point_predictions, sktime_interval_predictions, statsmodels_predictions
) |
5,078 | test to python from string | # -*- coding: utf-8 -*-
from datetime import datetime as dt
from decimal import Decimal
import pickle
from decimal import Decimal
from django.core import exceptions
from django.db import connection
import pytest
from nav.models.fields import CIDRField
from nav.models.fields import DateTimeInfinityField
from nav.models.fields import DictAsJsonField
from nav.models.fields import LegacyGenericForeignKey
from nav.models.fields import PointField
class TestCIDRField(object):
def test_to_python_empty(self):
falsey = (None, u'', 0, False, [], {}, set(), 0.0)
field = CIDRField()
for value in falsey:
result = field.to_python(value)
assert result == value
def test_to_python_valid_cidr(self):
field = CIDRField()
cidr4 = u'192.168.0.0/23'
result4 = field.to_python(cidr4)
assert cidr4 == result4
cidr6 = u'1234:dead:beef::/64'
result6 = field.to_python(cidr6)
assert cidr6 == result6
def test_to_python_valid_ip(self):
field = CIDRField()
ip4 = u'192.168.0.0'
result4 = field.to_python(ip4)
assert ip4 == result4
bip4 = b'192.168.0.0'
bresult4 = field.to_python(bip4)
assert ip4 == bresult4
ip6 = u'1234:dead:beef::63'
result6 = field.to_python(ip6)
assert ip6 == result6
bip6 = b'1234:dead:beef::63'
bresult6 = field.to_python(bip6)
assert ip6 == bresult6
def test_to_python_invalid(self):
field = CIDRField()
values = (u'333.222.999.0', u'blåbærsyltetøy', 300, 3.1415, [True])
for value in values:
with pytest.raises(exceptions.ValidationError):
field.to_python(value)
def test_to_python_seemingly_valid(self):
# IPY works on CIDRs for networks, not hosts
field = CIDRField()
ip6 = u'1234:dead:beef::63/23'
with pytest.raises(exceptions.ValidationError):
result6 = field.to_python(ip6)
class TestDateTimeInfinityField(object):
def test_get_db_prep_value_infinity(self):
field = DateTimeInfinityField()
result_min = field.get_db_prep_value(dt.min, connection)
assert result_min == u'-infinity'
result_max = field.get_db_prep_value(dt.max, connection)
assert result_max == u'infinity'
def test_get_db_prep_value_prepared_other(self):
field = DateTimeInfinityField()
test_val = dt(2018, 3, 5)
result = field.get_db_prep_value(test_val, connection, prepared=True)
# The actual result here will vary with Django versions and which
# database engine has been selected in the Django settings!
expected = super(DateTimeInfinityField, field).get_db_prep_value(
test_val, connection, prepared=True
)
assert result == expected
def test_get_db_prep_value_unprepared_other(self):
field = DateTimeInfinityField()
test_val = dt(2018, 3, 5)
result = field.get_db_prep_value(test_val, connection, prepared=False)
# The actual result here will vary with Django versions and which
# database engine has been selected in the Django settings!
expected = super(DateTimeInfinityField, field).get_db_prep_value(
test_val, connection, prepared=False
)
assert result == expected
class TestDictAsJsonField(object):
def test_to_python_dict(self):
field = DictAsJsonField()
value = {'a': 'b'}
result = field.to_python(value)
assert result == value
def test_to_python_json(self):
field = DictAsJsonField()
value = u'{"a": "b"}'
result = field.to_python(value)
assert result == {"a": "b"}
value = u'[1, 2, 3]'
result = field.to_python(value)
assert result == [1, 2, 3]
value = b'[1, 2, 3]'
result = field.to_python(value)
assert result == [1, 2, 3]
def test_to_python_pickle(self):
field = DictAsJsonField()
orig_value = 2
value = pickle.dumps(orig_value, protocol=1)
result = field.to_python(value)
assert result == orig_value
def test_to_python_pickle_str(self):
# Github issue #2085
# Not all pickles can be converted to Py3 str at all so use an actual
# production value
field = DictAsJsonField()
orig_value = {'refresh_interval': 600000}
value = "(dp0\nS'refresh_interval'\np1\nI600000\ns."
result = field.to_python(value)
assert result == orig_value
def test_get_prep_value_empty(self):
field = DictAsJsonField()
result = field.get_prep_value(None)
assert result is None
def test_get_prep_value_filled(self):
field = DictAsJsonField()
result = field.get_prep_value({'a': 'b'})
assert result == u'{"a": "b"}'
class TestLegacyGenericForeignKey(object):
def test_get_model_class_unknown_model(self):
mc = LegacyGenericForeignKey.get_model_class('doesnotexistindb')
assert mc is None
def test_get_model_class_known_model(self):
# use existing class
mc = LegacyGenericForeignKey.get_model_class('subsystem')
assert bool(mc)
class TestPointField(object):
def METHOD_NAME(self):
expected_point = (Decimal("1.2"), Decimal("3.4"))
point_string = "(1.2, 3.4)"
field = PointField()
point = field.to_python(point_string)
assert expected_point == point
def get_db_prep_value(self):
expected_db_string = "(7.1,5.12)"
point = (Decimal("7.1"), Decimal("5.12"))
field = PointField()
db_string = field.get_db_prep_value(point)
assert expected_db_string == db_string |
5,079 | all | from packaging.version import Version
try:
import torch
except ImportError as error:
message = (
"Impossible to import PyTorch.\n"
"To use TensorLy with the PyTorch backend, "
"you must first install PyTorch!"
)
raise ImportError(message) from error
import numpy as np
from .core import (
Backend,
backend_types,
backend_basic_math,
backend_array,
)
if Version(torch.__version__) < Version("1.9.0"):
raise RuntimeError("TensorLy only supports pytorch v1.9.0 and above.")
class PyTorchBackend(Backend, backend_name="pytorch"):
@staticmethod
def context(tensor):
return {
"dtype": tensor.dtype,
"device": tensor.device,
"requires_grad": tensor.requires_grad,
}
@staticmethod
def tensor(data, dtype=torch.float32, device="cpu", requires_grad=False):
if isinstance(data, torch.Tensor):
with torch.device(device):
return data.clone().detach().type(dtype).requires_grad_(requires_grad)
if isinstance(data, np.ndarray):
data = data.copy()
return torch.tensor(
data, dtype=dtype, device=device, requires_grad=requires_grad
)
@staticmethod
def to_numpy(tensor):
if torch.is_tensor(tensor):
if tensor.requires_grad:
tensor = tensor.detach()
if tensor.cuda:
tensor = tensor.cpu()
return tensor.numpy()
elif isinstance(tensor, np.ndarray):
return tensor
else:
return np.asarray(tensor)
@staticmethod
def shape(tensor):
return tuple(tensor.shape)
@staticmethod
def ndim(tensor):
return tensor.dim()
@staticmethod
def arange(start, stop=None, step=1.0, *args, **kwargs):
if stop is None:
return torch.arange(
start=0.0, end=float(start), step=float(step), *args, **kwargs
)
else:
return torch.arange(float(start), float(stop), float(step), *args, **kwargs)
@staticmethod
def clip(tensor, a_min=None, a_max=None, inplace=False):
if inplace:
return torch.clip(tensor, a_min, a_max, out=tensor)
else:
return torch.clip(tensor, a_min, a_max)
@staticmethod
def METHOD_NAME(tensor):
return torch.sum(tensor != 0)
def transpose(self, tensor, axes=None):
axes = axes or list(range(self.ndim(tensor)))[::-1]
return tensor.permute(*axes)
@staticmethod
def copy(tensor):
return tensor.clone()
@staticmethod
def norm(tensor, order=None, axis=None):
# pytorch does not accept `None` for any keyword arguments. additionally,
# pytorch doesn't seems to support keyword arguments in the first place
kwds = {}
if axis is not None:
kwds["dim"] = axis
if order and order != "inf":
kwds["p"] = order
if order == "inf":
res = torch.max(torch.abs(tensor), **kwds)
if axis is not None:
return res[0] # ignore indices output
return res
return torch.norm(tensor, **kwds)
@staticmethod
def dot(a, b):
if a.ndim > 2 and b.ndim > 2:
return torch.tensordot(a, b, dims=([-1], [-2]))
if not a.ndim or not b.ndim:
return a * b
return torch.matmul(a, b)
@staticmethod
def tensordot(a, b, axes=2, **kwargs):
return torch.tensordot(a, b, dims=axes, **kwargs)
@staticmethod
def mean(tensor, axis=None):
if axis is None:
return torch.mean(tensor)
else:
return torch.mean(tensor, dim=axis)
@staticmethod
def sum(tensor, axis=None, keepdims=False):
if axis is None:
axis = tuple(range(tensor.ndim))
return torch.sum(tensor, dim=axis, keepdim=keepdims)
@staticmethod
def max(tensor, axis=None):
if axis is None:
return torch.max(tensor)
else:
return torch.max(tensor, dim=axis)[0]
@staticmethod
def flip(tensor, axis=None):
if isinstance(axis, int):
axis = [axis]
if axis is None:
return torch.flip(tensor, dims=[i for i in range(tensor.ndim)])
else:
return torch.flip(tensor, dims=axis)
@staticmethod
def concatenate(tensors, axis=0):
return torch.cat(tensors, dim=axis)
@staticmethod
def argmin(input, axis=None):
return torch.argmin(input, dim=axis)
@staticmethod
def argsort(input, axis=None):
return torch.argsort(input, dim=axis)
@staticmethod
def argmax(input, axis=None):
return torch.argmax(input, dim=axis)
@staticmethod
def stack(arrays, axis=0):
return torch.stack(arrays, dim=axis)
@staticmethod
def diag(tensor, k=0):
return torch.diag(tensor, diagonal=k)
@staticmethod
def sort(tensor, axis):
if axis is None:
tensor = tensor.flatten()
axis = -1
return torch.sort(tensor, dim=axis).values
@staticmethod
def update_index(tensor, index, values):
tensor.index_put_(index, values)
@staticmethod
def lstsq(a, b, rcond=None, driver="gelsd"):
return torch.linalg.lstsq(a, b, rcond=rcond, driver=driver)
@staticmethod
def eigh(tensor):
"""Legacy only, deprecated from PyTorch 1.8.0"""
return torch.symeig(tensor, eigenvectors=True)
@staticmethod
def sign(tensor):
"""torch.sign does not support complex numbers."""
return torch.sgn(tensor)
@staticmethod
def logsumexp(tensor, axis=0):
return torch.logsumexp(tensor, dim=axis)
# Register the other functions
for name in (
backend_types
+ backend_basic_math
+ backend_array
+ [
"nan",
"is_tensor",
"trace",
"conj",
"finfo",
"log2",
"digamma",
]
):
PyTorchBackend.register_method(name, getattr(torch, name))
for name in ["kron", "moveaxis"]:
PyTorchBackend.register_method(name, getattr(torch, name))
for name in ["solve", "qr", "svd", "eigh"]:
PyTorchBackend.register_method(name, getattr(torch.linalg, name)) |
5,080 | run verifications | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from collections import OrderedDict
import re
from subprocess import check_output, STDOUT, CalledProcessError
import sys
TOTAL = 'ALL'
NUM_RUNS = 3
DEFAULT_THRESHOLD = 10
# explicit thresholds that deviate from the default
THRESHOLDS = {
'network': 30,
'vm': 30,
'batch': 30,
'storage': 50,
TOTAL: 300
}
def init(root):
parser = root.add_parser('module-load-perf', help='Verify that modules load within an acceptable timeframe.')
parser.set_defaults(func=METHOD_NAME)
def mean(data):
"""Return the sample arithmetic mean of data."""
n = len(data)
if n < 1:
raise ValueError('len < 1')
return sum(data)/float(n)
def sq_deviation(data):
"""Return sum of square deviations of sequence data."""
c = mean(data)
return sum((x-c)**2 for x in data)
def pstdev(data):
"""Calculates the population standard deviation."""
n = len(data)
if n < 2:
raise ValueError('len < 2')
ss = sq_deviation(data)
return (ss/n) ** 0.5
def print_values(data):
print('{:<20} {:>12} {:>12} {:>12} {:>25}'.format('Module', 'Average', 'Threshold', 'Stdev', 'Values'))
for key, val in data.items():
print('{:<20} {:>12.0f} {:>12.0f} {:>12.0f} {:>25}'.format(
key, val['average'], val['threshold'], val['stdev'], str(val['values'])))
def METHOD_NAME(args):
regex = r"[^']*'([^']*)'[\D]*([\d\.]*)"
results = {TOTAL: []}
try:
use_shell = sys.platform.lower() in ['windows', 'win32']
# Time the module loading X times
for i in range(0, NUM_RUNS + 1):
lines = check_output('az -h --debug'.split(), shell=use_shell, stderr=STDOUT)
if i == 0:
# Ignore the first run since it can be longer due to *.pyc file compilation
continue
try:
lines = lines.decode().splitlines()
except:
lines = lines.splitlines()
total_time = 0
for line in lines:
if line.startswith('DEBUG: Loaded module'):
matches = re.match(regex, line)
mod = matches.group(1)
val = float(matches.group(2)) * 1000
total_time = total_time + val
if mod in results:
results[mod].append(val)
else:
results[mod] = [val]
results[TOTAL].append(total_time)
passed_mods = {}
failed_mods = {}
mods = sorted(results.keys())
bubble_found = False
for mod in mods:
val = results[mod]
mean_val = mean(val)
stdev_val = pstdev(val)
threshold = THRESHOLDS.get(mod) or DEFAULT_THRESHOLD
statistics = {
'average': mean_val,
'stdev': stdev_val,
'threshold': threshold,
'values': val
}
if mean_val > threshold:
if not bubble_found and mean_val < 30:
# This temporary measure allows one floating performance
# failure up to 30 ms. See issue #6224 and #6218.
bubble_found = True
passed_mods[mod] = statistics
else:
failed_mods[mod] = statistics
else:
passed_mods[mod] = statistics
if not failed_mods:
print('== PASSED MODULES ==')
print_values(passed_mods)
print('\nPASSED: Average load time all modules: {} ms'.format(
int(passed_mods[TOTAL]['average'])))
sys.exit(0)
else:
print('== PASSED MODULES ==')
print_values(passed_mods)
print('\nFAILED MODULES')
print_values(failed_mods)
sys.exit(1)
except CalledProcessError as ex:
print(ex.output) |
5,081 | get service account output | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetServiceAccountResult',
'AwaitableGetServiceAccountResult',
'get_service_account',
'get_service_account_output',
]
@pulumi.output_type
class GetServiceAccountResult:
"""
A collection of values returned by getServiceAccount.
"""
def __init__(__self__, arn=None, id=None, region=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if region and not isinstance(region, str):
raise TypeError("Expected argument 'region' to be a str")
pulumi.set(__self__, "region", region)
@property
@pulumi.getter
def arn(self) -> str:
"""
ARN of the AWS CloudTrail service account in the selected region.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def region(self) -> Optional[str]:
return pulumi.get(self, "region")
class AwaitableGetServiceAccountResult(GetServiceAccountResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetServiceAccountResult(
arn=self.arn,
id=self.id,
region=self.region)
def get_service_account(region: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetServiceAccountResult:
"""
Use this data source to get the Account ID of the [AWS CloudTrail Service Account](http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-supported-regions.html)
in a given region for the purpose of allowing CloudTrail to store trail data in S3.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
main = aws.cloudtrail.get_service_account()
bucket = aws.s3.BucketV2("bucket", force_destroy=True)
allow_cloudtrail_logging_policy_document = pulumi.Output.all(bucket.arn, bucket.arn).apply(lambda bucketArn, bucketArn1: aws.iam.get_policy_document_output(statements=[
aws.iam.GetPolicyDocumentStatementArgs(
sid="Put bucket policy needed for trails",
effect="Allow",
principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
type="AWS",
identifiers=[main.arn],
)],
actions=["s3:PutObject"],
resources=[f"{bucket_arn}/*"],
),
aws.iam.GetPolicyDocumentStatementArgs(
sid="Get bucket policy needed for trails",
effect="Allow",
principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
type="AWS",
identifiers=[main.arn],
)],
actions=["s3:GetBucketAcl"],
resources=[bucket_arn1],
),
]))
allow_cloudtrail_logging_bucket_policy = aws.s3.BucketPolicy("allowCloudtrailLoggingBucketPolicy",
bucket=bucket.id,
policy=allow_cloudtrail_logging_policy_document.json)
```
:param str region: Name of the region whose AWS CloudTrail account ID is desired.
Defaults to the region from the AWS provider configuration.
"""
__args__ = dict()
__args__['region'] = region
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('aws:cloudtrail/getServiceAccount:getServiceAccount', __args__, opts=opts, typ=GetServiceAccountResult).value
return AwaitableGetServiceAccountResult(
arn=pulumi.get(__ret__, 'arn'),
id=pulumi.get(__ret__, 'id'),
region=pulumi.get(__ret__, 'region'))
@_utilities.lift_output_func(get_service_account)
def METHOD_NAME(region: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetServiceAccountResult]:
"""
Use this data source to get the Account ID of the [AWS CloudTrail Service Account](http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-supported-regions.html)
in a given region for the purpose of allowing CloudTrail to store trail data in S3.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
main = aws.cloudtrail.get_service_account()
bucket = aws.s3.BucketV2("bucket", force_destroy=True)
allow_cloudtrail_logging_policy_document = pulumi.Output.all(bucket.arn, bucket.arn).apply(lambda bucketArn, bucketArn1: aws.iam.get_policy_document_output(statements=[
aws.iam.GetPolicyDocumentStatementArgs(
sid="Put bucket policy needed for trails",
effect="Allow",
principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
type="AWS",
identifiers=[main.arn],
)],
actions=["s3:PutObject"],
resources=[f"{bucket_arn}/*"],
),
aws.iam.GetPolicyDocumentStatementArgs(
sid="Get bucket policy needed for trails",
effect="Allow",
principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
type="AWS",
identifiers=[main.arn],
)],
actions=["s3:GetBucketAcl"],
resources=[bucket_arn1],
),
]))
allow_cloudtrail_logging_bucket_policy = aws.s3.BucketPolicy("allowCloudtrailLoggingBucketPolicy",
bucket=bucket.id,
policy=allow_cloudtrail_logging_policy_document.json)
```
:param str region: Name of the region whose AWS CloudTrail account ID is desired.
Defaults to the region from the AWS provider configuration.
"""
... |
5,082 | test function1 | #!/usr/bin/env py.test
# -*- coding: utf-8 -*-
__authors__ = "Martin Sandve Alnæs"
__date__ = "2009-02-13 -- 2009-02-13"
import pytest
import math
from ufl import *
from ufl.constantvalue import as_ufl
def testScalars():
s = as_ufl(123)
e = s((5, 7))
v = 123
assert e == v
def testZero():
s = as_ufl(0)
e = s((5, 7))
v = 0
assert e == v
def testIdentity():
cell = triangle
I = Identity(cell.geometric_dimension())
s = 123 * I[0, 0]
e = s((5, 7))
v = 123
assert e == v
s = 123 * I[1, 0]
e = s((5, 7))
v = 0
assert e == v
def testCoords():
cell = triangle
x = SpatialCoordinate(cell)
s = x[0] + x[1]
e = s((5, 7))
v = 5 + 7
assert e == v
def METHOD_NAME():
cell = triangle
element = FiniteElement("CG", cell, 1)
f = Coefficient(element)
s = 3 * f
e = s((5, 7), {f: 123})
v = 3 * 123
assert e == v
def testFunction2():
cell = triangle
element = FiniteElement("CG", cell, 1)
f = Coefficient(element)
def g(x):
return x[0]
s = 3 * f
e = s((5, 7), {f: g})
v = 3 * 5
assert e == v
def testArgument2():
cell = triangle
element = FiniteElement("CG", cell, 1)
f = Argument(element, 2)
def g(x):
return x[0]
s = 3 * f
e = s((5, 7), {f: g})
v = 3 * 5
assert e == v
def testAlgebra():
cell = triangle
x = SpatialCoordinate(cell)
s = 3 * (x[0] + x[1]) - 7 + x[0] ** (x[1] / 2)
e = s((5, 7))
v = 3 * (5. + 7.) - 7 + 5. ** (7. / 2)
assert e == v
def testIndexSum():
cell = triangle
x = SpatialCoordinate(cell)
i, = indices(1)
s = x[i] * x[i]
e = s((5, 7))
v = 5 ** 2 + 7 ** 2
assert e == v
def testIndexSum2():
cell = triangle
x = SpatialCoordinate(cell)
I = Identity(cell.geometric_dimension())
i, j = indices(2)
s = (x[i] * x[j]) * I[i, j]
e = s((5, 7))
# v = sum_i sum_j x_i x_j delta_ij = x_0 x_0 + x_1 x_1
v = 5 ** 2 + 7 ** 2
assert e == v
def testMathFunctions():
x = SpatialCoordinate(triangle)[0]
s = sin(x)
e = s((5, 7))
v = math.sin(5)
assert e == v
s = cos(x)
e = s((5, 7))
v = math.cos(5)
assert e == v
s = tan(x)
e = s((5, 7))
v = math.tan(5)
assert e == v
s = ln(x)
e = s((5, 7))
v = math.log(5)
assert e == v
s = exp(x)
e = s((5, 7))
v = math.exp(5)
assert e == v
s = sqrt(x)
e = s((5, 7))
v = math.sqrt(5)
assert e == v
def testListTensor():
x, y = SpatialCoordinate(triangle)
m = as_matrix([[x, y], [-y, -x]])
s = m[0, 0] + m[1, 0] + m[0, 1] + m[1, 1]
e = s((5, 7))
v = 0
assert e == v
s = m[0, 0] * m[1, 0] * m[0, 1] * m[1, 1]
e = s((5, 7))
v = 5 ** 2 * 7 ** 2
assert e == v
def testComponentTensor1():
x = SpatialCoordinate(triangle)
m = as_vector(x[i], i)
s = m[0] * m[1]
e = s((5, 7))
v = 5 * 7
assert e == v
def testComponentTensor2():
x = SpatialCoordinate(triangle)
xx = outer(x, x)
m = as_matrix(xx[i, j], (i, j))
s = m[0, 0] + m[1, 0] + m[0, 1] + m[1, 1]
e = s((5, 7))
v = 5 * 5 + 5 * 7 + 5 * 7 + 7 * 7
assert e == v
def testComponentTensor3():
x = SpatialCoordinate(triangle)
xx = outer(x, x)
m = as_matrix(xx[i, j], (i, j))
s = m[0, 0] * m[1, 0] * m[0, 1] * m[1, 1]
e = s((5, 7))
v = 5 * 5 * 5 * 7 * 5 * 7 * 7 * 7
assert e == v
def testCoefficient():
V = FiniteElement("CG", triangle, 1)
f = Coefficient(V)
e = f ** 2
def eval_f(x):
return x[0] * x[1] ** 2
assert e((3, 7), {f: eval_f}) == (3 * 7 ** 2) ** 2
def testCoefficientDerivative():
V = FiniteElement("CG", triangle, 1)
f = Coefficient(V)
e = f.dx(0) ** 2 + f.dx(1) ** 2
def eval_f(x, derivatives):
if not derivatives:
return eval_f.c * x[0] * x[1] ** 2
# assume only first order derivative
d, = derivatives
if d == 0:
return eval_f.c * x[1] ** 2
if d == 1:
return eval_f.c * x[0] * 2 * x[1]
# shows how to attach data to eval_f
eval_f.c = 5
assert e((3, 7), {f: eval_f}) == (5 * 7 ** 2) ** 2 + (5 * 3 * 2 * 7) ** 2
def test_dot():
x = SpatialCoordinate(triangle)
s = dot(x, 2 * x)
e = s((5, 7))
v = 2 * (5 * 5 + 7 * 7)
assert e == v
def test_inner():
x = SpatialCoordinate(triangle)
xx = as_matrix(((2 * x[0], 3 * x[0]), (2 * x[1], 3 * x[1])))
s = inner(xx, 2 * xx)
e = s((5, 7))
v = 2 * ((5 * 2) ** 2 + (5 * 3) ** 2 + (7 * 2) ** 2 + (7 * 3) ** 2)
assert e == v
def test_outer():
x = SpatialCoordinate(triangle)
xx = outer(outer(x, x), as_vector((2, 3)))
s = inner(xx, 2 * xx)
e = s((5, 7))
v = 2 * (5 ** 2 + 7 ** 2) ** 2 * (2 ** 2 + 3 ** 2)
assert e == v
def test_cross():
x = SpatialCoordinate(tetrahedron)
xv = (3, 5, 7)
# Test cross product of triplets of orthogonal
# vectors, where |a x b| = |a| |b|
ts = [
[as_vector((x[0], 0, 0)),
as_vector((0, x[1], 0)),
as_vector((0, 0, x[2]))],
[as_vector((x[0], x[1], 0)),
as_vector((x[1], -x[0], 0)),
as_vector((0, 0, x[2]))],
[as_vector((0, x[0], x[1])),
as_vector((0, x[1], -x[0])),
as_vector((x[2], 0, 0))],
[as_vector((x[0], 0, x[1])),
as_vector((x[1], 0, -x[0])),
as_vector((0, x[2], 0))],
]
for t in ts:
for i in range(3):
for j in range(3):
cij = cross(t[i], t[j])
dij = dot(cij, cij)
eij = dij(xv)
tni = dot(t[i], t[i])(xv)
tnj = dot(t[j], t[j])(xv)
vij = tni * tnj if i != j else 0
assert eij == vij
def xtest_dev():
x = SpatialCoordinate(triangle)
xv = (5, 7)
xx = outer(x, x)
s1 = dev(2 * xx)
s2 = 2 * (xx - xx.T) # FIXME
e = inner(s1, s1)(xv)
v = inner(s2, s2)(xv)
assert e == v
def test_skew():
x = SpatialCoordinate(triangle)
xv = (5, 7)
xx = outer(x, x)
s1 = skew(2 * xx)
s2 = (xx - xx.T)
e = inner(s1, s1)(xv)
v = inner(s2, s2)(xv)
assert e == v
def test_sym():
x = SpatialCoordinate(triangle)
xv = (5, 7)
xx = outer(x, x)
s1 = sym(2 * xx)
s2 = (xx + xx.T)
e = inner(s1, s1)(xv)
v = inner(s2, s2)(xv)
assert e == v
def test_tr():
x = SpatialCoordinate(triangle)
xv = (5, 7)
xx = outer(x, x)
s = tr(2 * xx)
e = s(xv)
v = 2 * sum(xv[i] ** 2 for i in (0, 1))
assert e == v
def test_det2D():
x = SpatialCoordinate(triangle)
xv = (5, 7)
a, b = 6.5, -4
xx = as_matrix(((x[0], x[1]), (a, b)))
s = det(2 * xx)
e = s(xv)
v = 2 ** 2 * (5 * b - 7 * a)
assert e == v
def xtest_det3D(): # FIXME
x = SpatialCoordinate(tetrahedron)
xv = (5, 7, 9)
a, b, c = 6.5, -4, 3
d, e, f = 2, 3, 4
xx = as_matrix(((x[0], x[1], x[2]),
(a, b, c),
(d, e, f)))
s = det(2 * xx)
e = s(xv)
v = 2 ** 3 * \
(xv[0] * (b * f - e * c) - xv[1] *
(a * f - c * d) + xv[2] * (a * e - b * d))
assert e == v
def test_cofac():
pass # TODO
def test_inv():
pass # TODO |
5,083 | cleanup | # Copyright 2016 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs the in-memory analytics benchmark of Cloudsuite.
More info: http://cloudsuite.ch/inmemoryanalytics/
"""
import re
from absl import flags
from perfkitbenchmarker import background_tasks
from perfkitbenchmarker import configs
from perfkitbenchmarker import errors
from perfkitbenchmarker import sample
from perfkitbenchmarker.linux_packages import docker
flags.DEFINE_string('cloudsuite_in_memory_analytics_dataset',
'/data/ml-latest-small',
'Dataset to use for training.')
flags.DEFINE_string('cloudsuite_in_memory_analytics_ratings_file',
'/data/myratings.csv',
'Ratings file to give the recommendation for.')
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'cloudsuite_in_memory_analytics'
BENCHMARK_CONFIG = """
cloudsuite_in_memory_analytics:
description: >
Run Cloudsuite in-memory analytics benchmark. Specify the number of worker
VMs with --num_vms.
vm_groups:
master:
vm_spec: *default_single_core
vm_count: 1
workers:
vm_spec: *default_single_core
"""
def GetConfig(user_config):
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
if FLAGS['num_vms'].present:
config['vm_groups']['workers']['vm_count'] = FLAGS.num_vms
return config
def Prepare(benchmark_spec):
"""Install docker. Pull images. Create datasets. Start master and workers.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
master = benchmark_spec.vm_groups['master'][0]
workers = benchmark_spec.vm_groups['workers']
def PrepareCommon(vm):
if not docker.IsInstalled(vm):
vm.Install('docker')
vm.Install('cloudsuite/spark')
vm.Install('cloudsuite/movielens-dataset')
vm.RemoteCommand('sudo docker create --name data '
'cloudsuite/movielens-dataset')
def PrepareMaster(vm):
PrepareCommon(vm)
vm.Install('cloudsuite/in-memory-analytics')
start_master_cmd = ('sudo docker run -d --net host -e SPARK_MASTER_IP=%s '
'--name spark-master cloudsuite/spark master' %
master.internal_ip)
vm.RemoteCommand(start_master_cmd)
def PrepareWorker(vm):
PrepareCommon(vm)
start_worker_cmd = ('sudo docker run -d --net host --volumes-from data '
'--name spark-worker cloudsuite/spark worker '
'spark://%s:7077' % master.internal_ip)
vm.RemoteCommand(start_worker_cmd)
target_arg_tuples = ([(PrepareWorker, [vm], {}) for vm in workers] +
[(PrepareMaster, [master], {})])
background_tasks.RunParallelThreads(target_arg_tuples, len(target_arg_tuples))
def Run(benchmark_spec):
"""Run the in-memory analytics benchmark.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
master = benchmark_spec.vm_groups['master'][0]
results = []
benchmark_cmd = ('sudo docker run --rm --net host --volumes-from data '
'cloudsuite/in-memory-analytics %s %s '
'--master spark://%s:7077' %
(FLAGS.cloudsuite_in_memory_analytics_dataset,
FLAGS.cloudsuite_in_memory_analytics_ratings_file,
master.internal_ip))
stdout, _ = master.RemoteCommand(benchmark_cmd)
matches = re.findall(r'Benchmark execution time: (\d+)ms', stdout)
if len(matches) != 1:
raise errors.Benchmarks.RunError('Expected to find benchmark execution '
'time')
execution_time = matches[0]
results.append(sample.Sample('Benchmark execution time',
float(execution_time) / 1000,
'seconds'))
return results
def METHOD_NAME(benchmark_spec):
"""Stop and remove docker containers. Remove images.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
master = benchmark_spec.vm_groups['master'][0]
workers = benchmark_spec.vm_groups['workers']
def CleanupCommon(vm):
vm.RemoteCommand('sudo docker rm -v data')
def CleanupMaster(vm):
vm.RemoteCommand('sudo docker stop spark-master')
vm.RemoteCommand('sudo docker rm spark-master')
CleanupCommon(vm)
def CleanupWorker(vm):
vm.RemoteCommand('sudo docker stop spark-worker')
vm.RemoteCommand('sudo docker rm spark-worker')
CleanupCommon(vm)
target_arg_tuples = ([(CleanupWorker, [vm], {}) for vm in workers] +
[(CleanupMaster, [master], {})])
background_tasks.RunParallelThreads(target_arg_tuples, len(target_arg_tuples)) |
5,084 | normalize answer | # Copyright © 2022 BAAI. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License")
import torch
import re
from sklearn.metrics import f1_score
from collections import defaultdict
from typing import List
import functools
import string
import math
import sacrebleu
from rouge_score import rouge_scorer
def sigmoid(x):
sig = 1 / (1 + math.exp(-x))
return sig
def accuracy_metric(predictions, labels, meta=None):
'''
predictions: torch.size(n, class_num)
labels: torch.size(n)
'''
count = 0
assert len(predictions) == len(labels)
if predictions.size() != labels.size():
predictions = torch.argmax(predictions, dim=-1)
for prediction, label in zip(predictions, labels):
count += prediction == label
else:
prediction, label = predictions[0], labels[0]
if sigmoid(prediction) >= 0.5:
count += label == 1
else:
count += label == 0
return 100.0 * count / len(labels)
def bleu_metric(predictions, labels, meta=None, tokenizer=None):
ref_list = []
for i in labels:
i = i.tolist()
ref = tokenizer.DecodeIds(i)
ref_list.append(ref)
pred_list = []
for prediction in predictions:
buf = []
prediction = prediction.tolist()
prediction = tokenizer.DecodeIds(prediction)
pred_list.append(prediction)
bleu_results = sacrebleu.corpus_bleu(pred_list, [ref_list])
bleu_score = bleu_results.score
return bleu_score
def rouge_metric(predictions, labels, meta=None, tokenizer=None, metric="rouge-1"):
metric_dict = {"rouge-1": "rouge1", "rouge-2": "rouge2", "rouge-l": "rougeLsum"}
ref_list = []
for i in labels:
i = i.tolist()
ref = tokenizer.DecodeIds(i)
ref_list.append(ref)
pred_list = []
for prediction in predictions:
buf = []
prediction = prediction.tolist()
prediction = tokenizer.DecodeIds(prediction)
pred_list.append(prediction)
scorer = rouge_scorer.RougeScorer([metric_dict[metric]], use_stemmer=True)
scores = [scorer.score(pred, ref) for pred, ref in zip(pred_list, ref_list)]
scores = [score[metric_dict[metric]].fmeasure * 100 for score in scores]
scores = sum(scores) / len(scores)
return scores
def f1_metric(predictions, labels, meta=None):
pred = torch.argmax(predictions, dim=-1).cpu()
labels = labels.cpu()
if torch.equal(pred, labels):
return 1.0
return f1_score(labels, pred)
def f1_macro_metric(predictions, labels, meta=None):
pred = torch.argmax(predictions, dim=-1).cpu()
labels = labels.cpu()
if torch.equal(pred, labels):
return 1.0
return f1_score(labels, pred, average='macro')
def multirc_em(predictions, labels, meta):
"""Compute the exact match (EM) for a sequence of predictions and actual labels"""
question_ids = meta["question_idx"]
unique_questions = set(question_ids)
q_actuals = list(zip(question_ids, labels))
q_predictions = list(zip(question_ids, predictions))
actuals_per_question = defaultdict(list)
predictions_per_question = defaultdict(list)
for qid, val in q_actuals:
actuals_per_question[qid].append(val)
for qid, val in q_predictions:
predictions_per_question[qid].append(val)
em = 0
for qid in unique_questions:
if actuals_per_question[qid] == predictions_per_question[qid]:
em += 1
em /= len(unique_questions)
return em
def METHOD_NAME(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def exact_match_score(prediction, ground_truth, meta=None):
return METHOD_NAME(prediction) == METHOD_NAME(ground_truth)
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
if not ground_truths:
return 0.0
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def qa_evaluate(predictions, labels, meta, metric):
# assert len(examples) == len(predictions)
score = 0.0
for ground_truths, candidate, prediction in zip(meta["answers"],
meta['candidates'],
predictions):
# ground_truths = example.meta["answers"]
prediction = candidate[prediction]
if ground_truths:
score += metric_max_over_ground_truths(metric, prediction,
ground_truths)
score = 100.0 * score / len(predictions)
return score
qa_exact_match = functools.partial(qa_evaluate, metric=exact_match_score)
qa_f1 = functools.partial(qa_evaluate, metric=f1_score) |
5,085 | traced yaaredis | # -*- encoding: utf-8 -*-
import os
import uuid
import pytest
import yaaredis
from ddtrace import Pin
from ddtrace.contrib.yaaredis.patch import patch
from ddtrace.contrib.yaaredis.patch import unpatch
from ddtrace.vendor.wrapt import ObjectProxy
from tests.opentracer.utils import init_tracer
from tests.utils import override_config
from ..config import REDIS_CONFIG
@pytest.fixture(autouse=True)
async def METHOD_NAME():
r = yaaredis.StrictRedis(port=REDIS_CONFIG["port"])
await r.flushall()
patch()
try:
yield r
finally:
unpatch()
r = yaaredis.StrictRedis(port=REDIS_CONFIG["port"])
await r.flushall()
def test_patching():
"""
When patching yaaredis library
We wrap the correct methods
When unpatching yaaredis library
We unwrap the correct methods
"""
assert isinstance(yaaredis.client.StrictRedis.execute_command, ObjectProxy)
assert isinstance(yaaredis.client.StrictRedis.pipeline, ObjectProxy)
assert isinstance(yaaredis.pipeline.StrictPipeline.execute, ObjectProxy)
assert isinstance(yaaredis.pipeline.StrictPipeline.immediate_execute_command, ObjectProxy)
unpatch()
assert not isinstance(yaaredis.client.StrictRedis.execute_command, ObjectProxy)
assert not isinstance(yaaredis.client.StrictRedis.pipeline, ObjectProxy)
assert not isinstance(yaaredis.pipeline.StrictPipeline.execute, ObjectProxy)
assert not isinstance(yaaredis.pipeline.StrictPipeline.immediate_execute_command, ObjectProxy)
@pytest.mark.asyncio
async def test_long_command(snapshot_context, METHOD_NAME):
with snapshot_context():
await METHOD_NAME.mget(*range(1000))
@pytest.mark.asyncio
@pytest.mark.snapshot
async def test_cmd_max_length(METHOD_NAME):
with override_config("yaaredis", dict(cmd_max_length=7)):
await METHOD_NAME.get("here-is-a-long-key")
@pytest.mark.skip(reason="No traces sent to the test agent")
@pytest.mark.subprocess(env=dict(DD_YAAREDIS_CMD_MAX_LENGTH="10"), ddtrace_run=True)
@pytest.mark.snapshot
def test_cmd_max_length_env():
import asyncio
import yaaredis
from tests.contrib.config import REDIS_CONFIG
async def main():
r = yaaredis.StrictRedis(port=REDIS_CONFIG["port"])
await r.get("here-is-a-long-key")
asyncio.run(main())
@pytest.mark.asyncio
async def test_basics(snapshot_context, METHOD_NAME):
with snapshot_context():
await METHOD_NAME.get("cheese")
@pytest.mark.asyncio
async def test_unicode(snapshot_context, METHOD_NAME):
with snapshot_context():
await METHOD_NAME.get(u"😐")
@pytest.mark.asyncio
async def test_analytics_without_rate(snapshot_context, METHOD_NAME):
with override_config("yaaredis", dict(analytics_enabled=True)):
with snapshot_context():
await METHOD_NAME.get("cheese")
@pytest.mark.asyncio
async def test_analytics_with_rate(snapshot_context, METHOD_NAME):
with override_config("yaaredis", dict(analytics_enabled=True, analytics_sample_rate=0.5)):
with snapshot_context():
await METHOD_NAME.get("cheese")
@pytest.mark.asyncio
async def test_pipeline_traced(snapshot_context, METHOD_NAME):
with snapshot_context():
p = await METHOD_NAME.pipeline(transaction=False)
await p.set("blah", 32)
await p.rpush("foo", u"éé")
await p.hgetall("xxx")
await p.execute()
@pytest.mark.asyncio
async def test_pipeline_immediate(snapshot_context, METHOD_NAME):
with snapshot_context():
p = await METHOD_NAME.pipeline()
await p.set("a", 1)
await p.immediate_execute_command("SET", "a", 1)
await p.execute()
@pytest.mark.asyncio
async def test_meta_override(tracer, test_spans, METHOD_NAME):
pin = Pin.get_from(METHOD_NAME)
assert pin is not None
pin.clone(tags={"cheese": "camembert"}, tracer=tracer).onto(METHOD_NAME)
await METHOD_NAME.get("cheese")
test_spans.assert_trace_count(1)
test_spans.assert_span_count(1)
assert test_spans.spans[0].service == "redis"
assert test_spans.spans[0].get_tag("component") == "yaaredis"
assert test_spans.spans[0].get_tag("span.kind") == "client"
assert test_spans.spans[0].get_tag("db.system") == "redis"
assert "cheese" in test_spans.spans[0].get_tags() and test_spans.spans[0].get_tag("cheese") == "camembert"
@pytest.mark.asyncio
async def test_service_name(tracer, test_spans, METHOD_NAME):
service = str(uuid.uuid4())
Pin.override(METHOD_NAME, service=service, tracer=tracer)
await METHOD_NAME.set("cheese", "1")
test_spans.assert_trace_count(1)
test_spans.assert_span_count(1)
assert test_spans.spans[0].service == service
@pytest.mark.asyncio
async def test_service_name_config(tracer, test_spans, METHOD_NAME):
service = str(uuid.uuid4())
with override_config("yaaredis", dict(service=service)):
Pin.override(METHOD_NAME, tracer=tracer)
await METHOD_NAME.set("cheese", "1")
test_spans.assert_trace_count(1)
test_spans.assert_span_count(1)
assert test_spans.spans[0].service == service
@pytest.mark.asyncio
async def test_opentracing(tracer, snapshot_context, METHOD_NAME):
"""Ensure OpenTracing works with redis."""
with snapshot_context():
pin = Pin.get_from(METHOD_NAME)
ot_tracer = init_tracer("redis_svc", pin.tracer)
with ot_tracer.start_active_span("redis_get"):
await METHOD_NAME.get("cheese")
@pytest.mark.parametrize(
"service_schema",
[
(None, None),
(None, "v0"),
(None, "v1"),
("mysvc", None),
("mysvc", "v0"),
("mysvc", "v1"),
],
)
@pytest.mark.snapshot()
def test_schematization(ddtrace_run_python_code_in_subprocess, service_schema):
service, schema = service_schema
code = """
import pytest
import sys
from tests.contrib.yaaredis.test_yaaredis import traced_yaaredis
@pytest.mark.asyncio
async def test_basics(traced_yaaredis):
if sys.version_info < (3, 7):
await traced_yaaredis.get("cheese")
else:
async for client in traced_yaaredis:
await client.get("cheese")
if __name__ == "__main__":
sys.exit(pytest.main(["-x", __file__]))
"""
env = os.environ.copy()
if service:
env["DD_SERVICE"] = service
if schema:
env["DD_TRACE_SPAN_ATTRIBUTE_SCHEMA"] = schema
out, err, status, _ = ddtrace_run_python_code_in_subprocess(code, env=env)
assert status == 0, (err.decode(), out.decode())
assert err == b"", err.decode() |
5,086 | sigterm context | import logbook
import signal
import sys
import dessert
from contextlib import contextmanager, ExitStack
from . import hooks as trigger_hook
from . import log
from . import site
from . import plugins
from .conf import config
from .core.session import Session
from .reporting.console_reporter import ConsoleReporter
from . import exceptions
from .exceptions import TerminatedException, SlashException
from .exception_handling import handling_exceptions, inhibit_unhandled_exception_traceback, should_inhibit_unhandled_exception_traceback
from .loader import Loader
from .log import ConsoleHandler
from .utils import cli_utils
from .utils.debug import debug_if_needed
from .utils.warning_capture import warning_callback_context
from .warnings import RecordedWarning, capture_all_warnings
_logger = logbook.Logger(__name__)
inhibit_unhandled_exception_traceback(SlashException)
class Application(object):
def __init__(self):
super(Application, self).__init__()
self._test_loader = Loader()
self.set_report_stream(sys.stderr)
self._argv = None
self._reset_parser()
self._positional_args = None
self._parsed_args = None
self._reporter = None
self.test_loader = Loader()
self.session = None
self._working_directory = None
self._interrupted = False
self._exit_code = 0
self._prelude_warning_records = []
def set_working_directory(self, path):
self._working_directory = path
@property
def exit_code(self):
return self._exit_code
def set_exit_code(self, exit_code):
self._exit_code = exit_code
@property
def interrupted(self):
return self._interrupted
@property
def positional_args(self):
return self._positional_args
@property
def parsed_args(self):
return self._parsed_args
def enable_interactive(self):
self.arg_parser.add_argument(
'-i', '--interactive', help='Enter an interactive shell',
action="store_true", default=False)
def _reset_parser(self):
self.arg_parser = cli_utils.SlashArgumentParser()
def set_argv(self, argv):
self._argv = list(argv)
def _get_argv(self):
if self._argv is None:
return sys.argv[1:]
return self._argv[:]
def set_report_stream(self, stream):
if stream is not None:
self._report_stream = stream
self._default_reporter = ConsoleReporter(level=logbook.ERROR, stream=stream)
self._console_handler = ConsoleHandler(stream=stream, level=logbook.ERROR)
def set_reporter(self, reporter):
self._reporter = reporter
def get_reporter(self):
returned = self._reporter
if returned is None:
returned = ConsoleReporter(
level=config.root.log.console_level,
stream=self._report_stream)
return returned
def __enter__(self):
self._exit_stack = ExitStack()
self._exit_stack.__enter__()
try:
self._exit_stack.enter_context(self._prelude_logging_context())
self._exit_stack.enter_context(self._prelude_warning_context())
self._exit_stack.enter_context(self.METHOD_NAME())
with dessert.rewrite_assertions_context():
site.load(working_directory=self._working_directory)
cli_utils.configure_arg_parser_by_plugins(self.arg_parser)
cli_utils.configure_arg_parser_by_config(self.arg_parser)
argv = cli_utils.add_pending_plugins_from_commandline(self._get_argv())
self._parsed_args, self._positional_args = self.arg_parser.parse_known_args(argv)
self._exit_stack.enter_context(
cli_utils.get_modified_configuration_from_args_context(self.arg_parser, self._parsed_args)
)
self.session = Session(reporter=self.get_reporter(), console_stream=self._report_stream)
trigger_hook.configure() # pylint: disable=no-member
plugins.manager.configure_for_parallel_mode()
plugins.manager.activate_pending_plugins()
cli_utils.configure_plugins_from_args(self._parsed_args)
self._exit_stack.enter_context(self.session)
self._emit_prelude_logs()
self._emit_prelude_warnings()
return self
except:
self._emit_prelude_logs()
self.__exit__(*sys.exc_info())
raise
def __exit__(self, exc_type, exc_value, exc_tb):
exc_info = (exc_type, exc_value, exc_tb)
try:
debug_if_needed(exc_info)
except Exception as e: # pylint: disable=broad-except
_logger.error("Failed to debug_if_needed: {!r}", e, exc_info=True, extra={'capture': False})
if exc_value is not None:
self._exit_code = exc_value.code if isinstance(exc_value, SystemExit) else 1
if should_inhibit_unhandled_exception_traceback(exc_value):
self.get_reporter().report_error_message(str(exc_value))
elif isinstance(exc_value, Exception):
_logger.error('Unexpected error occurred', exc_info=exc_info, extra={'capture': False})
self.get_reporter().report_error_message('Unexpected error: {}'.format(exc_value))
if isinstance(exc_value, exceptions.INTERRUPTION_EXCEPTIONS):
self._interrupted = True
if exc_type is not None:
trigger_hook.result_summary() # pylint: disable=no-member
self._exit_stack.__exit__(exc_type, exc_value, exc_tb)
self._exit_stack = None
self._reset_parser()
trigger_hook.app_quit() # pylint: disable=no-member
return True
def _capture_native_warning(self, message, category, filename, lineno, file=None, line=None): # pylint: disable=unused-argument
self._prelude_warning_records.append(RecordedWarning.from_native_warning(message, category, filename, lineno))
def _prelude_logging_context(self):
self._prelude_log_handler = log.RetainedLogHandler(bubble=True, level=logbook.TRACE)
return self._prelude_log_handler.applicationbound()
def _prelude_warning_context(self):
capture_all_warnings()
return warning_callback_context(self._capture_native_warning)
def _emit_prelude_warnings(self):
if self.session is not None:
for warning in self._prelude_warning_records:
if not self.session.warnings.warning_should_be_filtered(warning):
self.session.warnings.add(warning)
def _emit_prelude_logs(self):
self._prelude_log_handler.disable()
handler = None
if self.session is not None:
handler = self.session.logging.session_log_handler
if handler is None:
handler = self._console_handler
self._prelude_log_handler.flush_to_handler(handler)
@contextmanager
def METHOD_NAME(self):
def handle_sigterm(*_):
with handling_exceptions():
raise TerminatedException('Terminated by signal')
prev = signal.signal(signal.SIGTERM, handle_sigterm)
try:
yield
finally:
try:
signal.signal(signal.SIGTERM, prev)
except TypeError as e:
#workaround for a strange issue on app cleanup. See https://bugs.python.org/issue23548
if 'signal handler must be signal.SIG_IGN' not in str(e):
raise |
5,087 | curate post | from datetime import datetime, timedelta
from django.shortcuts import get_object_or_404, render, redirect
from authn.decorators.auth import require_auth, require_moderator_role, require_curator_role
from club.exceptions import AccessDenied
from comments.models import Comment
from common.data.labels import LABELS
from notifications.telegram.common import render_html_message
from notifications.telegram.posts import announce_in_club_channel, notify_post_collectible_tag_owners
from posts.forms.admin import PostAdminForm, PostAnnounceForm, PostCuratorForm
from posts.helpers import extract_any_image
from posts.models.linked import LinkedPost
from posts.models.post import Post
from users.models.user import User
@require_auth
@require_curator_role
def METHOD_NAME(request, post_slug):
post = get_object_or_404(Post, slug=post_slug)
if request.method == "POST":
form = PostCuratorForm(request.POST)
if form.is_valid():
return do_post_curator_actions(request, post, form.cleaned_data)
else:
form = PostCuratorForm()
return render(request, "admin/simple_form.html", {
"title": "Курирование поста",
"post": post,
"form": form
})
@require_auth
@require_moderator_role
def admin_post(request, post_slug):
post = get_object_or_404(Post, slug=post_slug)
if request.method == "POST":
form = PostAdminForm(request.POST)
if form.is_valid():
return do_post_admin_actions(request, post, form.cleaned_data)
else:
form = PostAdminForm()
return render(request, "admin/simple_form.html", {
"title": "Админить пост",
"post": post,
"form": form
})
@require_auth
@require_moderator_role
def announce_post(request, post_slug):
post = get_object_or_404(Post, slug=post_slug)
initial = {
"text": render_html_message("channel_post_announce.html", post=post),
"image": extract_any_image(post),
}
if request.method == "POST":
form = PostAnnounceForm(request.POST, initial=initial)
if form.is_valid():
announce_in_club_channel(
post=post,
announce_text=form.cleaned_data["text"],
image=form.cleaned_data["image"] if form.cleaned_data["with_image"] else None
)
return render(request, "message.html", {
"title": "Запощено ✅"
})
else:
form = PostAnnounceForm(initial=initial)
return render(request, "admin/simple_form.html", {
"title": "Анонсировать пост на канале",
"post": post,
"form": form
})
def do_post_admin_actions(request, post, data):
if not request.me.is_moderator:
raise AccessDenied()
do_common_admin_and_curator_actions(request, post, data)
# Close comments
if data["toggle_is_commentable"]:
post.is_commentable = not post.is_commentable
post.save()
# Transfer ownership to the given username
if data["transfer_ownership"]:
user = User.objects.filter(slug=data["transfer_ownership"].strip()).first()
if user:
post.author = user
post.save()
if data["refresh_linked"]:
LinkedPost.create_links_from_text(post, post.text)
post_comments = Comment.visible_objects().filter(post=post, is_deleted=False)
for comment in post_comments:
LinkedPost.create_links_from_text(comment.post, comment.text)
return redirect("show_post", post.type, post.slug)
def do_post_curator_actions(request, post, data):
if not request.me.is_curator:
raise AccessDenied()
do_common_admin_and_curator_actions(request, post, data)
return redirect("show_post", post.type, post.slug)
def do_common_admin_and_curator_actions(request, post, data):
# Change type
if data["change_type"]:
post.type = data["change_type"]
post.save()
# Labels
if data["new_label"]:
label = LABELS.get(data["new_label"])
if label:
post.label_code = data["new_label"]
post.save()
if data["remove_label"]:
post.label_code = None
post.save()
# Pins
if data["add_pin"]:
post.is_pinned_until = datetime.utcnow() + timedelta(days=data["pin_days"])
post.save()
if data["remove_pin"]:
post.is_pinned_until = None
post.save()
# Moving up
if data["move_up"]:
post.last_activity_at = datetime.utcnow()
post.save()
# Moving down
if data["move_down"]:
post.last_activity_at -= timedelta(days=3)
post.save()
# Shadow banning
if data["shadow_ban"]:
post.is_shadow_banned = True
post.save()
# Hide from feeds
if data["hide_from_feeds"]:
post.is_visible_in_feeds = False
post.save()
# Show back in feeds
if data["show_in_feeds"]:
post.is_visible_in_feeds = True
post.save()
# Ping collectible tag owners again
if data["re_ping_collectible_tag_owners"]:
if post.collectible_tag_code:
notify_post_collectible_tag_owners(post) |
5,088 | cmp bytes | from typing import (
Callable,
Dict,
FrozenSet,
ItemsView,
Iterable,
Iterator,
KeysView,
List,
Optional,
Tuple,
)
import cxxfilt
import graphviz
import os
from .graphs import DiGraph
class FunctionInfo:
def __init__(
self,
name: str,
METHOD_NAME: Dict[str, List[int]],
input_bytes: Optional[Dict[str, List[int]]] = None,
called_from: Iterable[str] = (),
):
self.name: str = name
self.called_from: FrozenSet[str] = frozenset(called_from)
self._cmp_bytes: Dict[str, List[int]] = METHOD_NAME
if input_bytes is None:
self._input_bytes: Dict[str, List[int]] = METHOD_NAME
else:
self._input_bytes = input_bytes
self._demangled_name: Optional[str] = None
@property
def demangled_name(self) -> str:
if self._demangled_name is None:
self._demangled_name = self.name
if self._demangled_name.startswith("dfs$"):
self._demangled_name = self._demangled_name[4:]
self._demangled_name = cxxfilt.demangle(self._demangled_name)
return self._demangled_name # type: ignore
def source_size(self, source: str) -> int:
if source not in self.taint_sources:
raise KeyError(source)
elif os.path.exists(source):
return os.stat(source).st_size
else:
# find the largest byte this trace touched
return max(self.input_bytes[source])
def taint_source_sizes(self) -> Dict[str, int]:
return {source: self.source_size(source) for source in self.taint_sources}
@property
def input_bytes(self) -> Dict[str, List[int]]:
return self._input_bytes
@property
def METHOD_NAME(self) -> Dict[str, List[int]]:
return self._cmp_bytes
@property
def taint_sources(self) -> KeysView[str]:
return self.input_bytes.keys()
@staticmethod
def tainted_chunks(byte_offsets: Iterable[int]) -> Iterator[Tuple[int, int]]:
start_offset: Optional[int] = None
last_offset: Optional[int] = None
for offset in sorted(byte_offsets):
if last_offset is None:
start_offset = offset
elif offset != last_offset and offset != last_offset + 1:
yield start_offset, last_offset + 1 # type: ignore
start_offset = offset
last_offset = offset
if last_offset is not None:
yield start_offset, last_offset + 1 # type: ignore
def input_chunks(self) -> Iterator[Tuple[str, Tuple[int, int]]]:
for source, byte_offsets in self.input_bytes.items():
for start, end in FunctionInfo.tainted_chunks(byte_offsets):
yield source, (start, end)
def cmp_chunks(self) -> Iterator[Tuple[str, Tuple[int, int]]]:
for source, byte_offsets in self.METHOD_NAME.items():
for start, end in FunctionInfo.tainted_chunks(byte_offsets):
yield source, (start, end)
def __getitem__(self, input_source_name: str) -> List[int]:
return self.input_bytes[input_source_name]
def __iter__(self) -> Iterable[str]:
return self.taint_sources
def items(self) -> ItemsView[str, List[int]]:
return self.input_bytes.items()
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return isinstance(other, FunctionInfo) and other.name == self.name
def __str__(self):
return self.demangled_name
def __repr__(self):
return (
f"{self.__class__.__name__}(name={self.name!r}, cmp_bytes={self.METHOD_NAME!r}, "
f"input_bytes={self.input_bytes!r}, called_from={self.called_from!r})"
)
class CFG(DiGraph[FunctionInfo]):
def __init__(self):
super().__init__()
def to_dot(
self,
comment: Optional[str] = "PolyTracker Program Trace",
labeler: Optional[Callable[[FunctionInfo], str]] = None,
node_filter=None,
) -> graphviz.Digraph:
function_labels: Dict[str, str] = {}
def func_labeler(f):
if labeler is not None:
return labeler(f)
elif f.name in function_labels:
return f"{f.name} ({function_labels[f.name]})"
else:
return f.name
return super().to_dot(comment, labeler=func_labeler, node_filter=node_filter) |
5,089 | run | import logging as log
from avocado.core import exceptions
from virttest import virsh
from virttest.libvirt_xml import vm_xml
from virttest.utils_test import libvirt
# Using as lower capital is not the best way to do, but this is just a
# workaround to avoid changing the entire file.
logging = log.getLogger('avocado.' + __name__)
def get_xmlinfo(vm_name, options):
"""
Get some iothreadinfo from the guests xml
Returns:
List of iothread ids
"""
if "--config" in options:
xml_info = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
else:
xml_info = vm_xml.VMXML.new_from_dumpxml(vm_name)
logging.debug("domxml: %s", xml_info)
return xml_info.iothreadids.iothread
def METHOD_NAME(test, params, env):
"""
Test command: virsh iothread.
The command can change the number of iothread.
1.Prepare test environment,destroy or suspend a VM.
2.Perform virsh iothreadadd operation.
3.Recover test environment.
4.Confirm the test result.
"""
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
pre_vm_state = params.get("iothread_pre_vm_state")
command = params.get("iothread_command", "iothreadadd")
options = params.get("iothread_options")
vm_ref = params.get("iothread_vm_ref", "name")
iothreads = params.get("iothreads", 4)
iothread_id = params.get("iothread_id", "6")
status_error = "yes" == params.get("status_error")
iothreadids = params.get("iothreadids")
iothreadpins = params.get("iothreadpins")
try:
iothreads = int(iothreads)
except ValueError:
# 'iothreads' may not invalid number in negative tests
logging.debug("Can't convert %s to integer type", iothreads)
# Save original configuration
vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
orig_config_xml = vmxml.copy()
try:
if vm.is_alive():
vm.destroy()
option_list = options.split(" ")
for item in option_list:
if virsh.has_command_help_match(command, item) is None:
raise exceptions.TestSkipError("The current libvirt version"
" doesn't support '%s' option"
% item)
# Set iothreads first
if iothreadids:
ids_xml = vm_xml.VMIothreadidsXML()
ids_xml.iothread = [{'id': id} for id in iothreadids.split()]
vmxml.iothreadids = ids_xml
if iothreadpins:
cputune_xml = vm_xml.VMCPUTuneXML()
io_pins = []
for pins in iothreadpins.split():
thread, cpuset = pins.split(':')
io_pins.append({"iothread": thread,
"cpuset": cpuset})
cputune_xml.iothreadpins = io_pins
vmxml.cputune = cputune_xml
vmxml.iothreads = iothreads
logging.debug("Pre-test xml is %s", vmxml)
vmxml.sync()
# Restart, unless that's not our test
if not vm.is_alive():
vm.start()
vm.wait_for_login()
domid = vm.get_id() # only valid for running
domuuid = vm.get_uuid()
if pre_vm_state == "shut off" and vm.is_alive():
vm.destroy()
# Run test
if vm_ref == "name":
dom_option = vm_name
elif vm_ref == "id":
dom_option = domid
elif vm_ref == "uuid":
dom_option = domuuid
else:
dom_option = vm_ref
ret = virsh.iothreadadd(dom_option, iothread_id, options,
ignore_status=True, debug=True)
libvirt.check_exit_status(ret, status_error)
if not status_error:
# Check domainxml
iothread_info = get_xmlinfo(vm_name, options)
logging.debug("iothreadinfo: %s", iothread_info)
if {'id': iothread_id} not in iothread_info:
test.fail("Failed to add iothread '%s' in "
"domain xml" % iothread_id)
# Check iothreadinfo by virsh command
iothread_info = libvirt.get_iothreadsinfo(dom_option, options)
logging.debug("iothreadinfo: %s", iothread_info)
if iothread_id not in iothread_info:
test.fail("Failed to add iothread '%s'" % iothread_id)
finally:
# Cleanup
if vm.is_alive():
vm.destroy()
orig_config_xml.sync() |
5,090 | projects by orgs | from urllib import parse as urlparse
from django.conf import settings
from django.contrib.gis.geos import Point
from django.contrib.gis.measure import D
from django.core.paginator import Paginator
from .common import apply_tag_filters, sort_by_field
from civictechprojects.caching.cache import ProjectSearchTagsCache
from civictechprojects.helpers.projects.annotations import apply_project_annotations
from civictechprojects.models import (
Project,
Group,
Event,
ProjectPosition,
ProjectFavorite,
)
from common.helpers.db import unique_column_values
from common.helpers.tags import get_tags_by_category
from common.models.tags import Tag
from democracylab.models import Contributor, get_request_contributor
def projects_list(request):
url_parts = request.GET.urlencode()
query_params = urlparse.parse_qs(url_parts, keep_blank_values=0, strict_parsing=0)
event = None
group = None
if "group_id" in query_params:
group_id = query_params["group_id"][0]
group = Group.objects.get(id=group_id)
project_list = group.get_group_projects(approved_only=True)
elif "event_id" in query_params:
event_id = query_params["event_id"][0]
event = Event.get_by_id_or_slug(event_id)
project_list = event.get_linked_projects()
else:
project_list = Project.objects.filter(is_searchable=True, is_private=False)
project_list = apply_tag_filters(
project_list, query_params, "issues", projects_by_issue_areas
)
project_list = apply_tag_filters(
project_list, query_params, "tech", projects_by_technologies
)
project_list = apply_tag_filters(
project_list, query_params, "role", projects_by_roles
)
project_list = apply_tag_filters(
project_list, query_params, "org", METHOD_NAME
)
project_list = apply_tag_filters(
project_list, query_params, "orgType", projects_by_org_types
)
project_list = apply_tag_filters(
project_list, query_params, "stage", projects_by_stage
)
if "favoritesOnly" in query_params:
user = get_request_contributor(request)
project_list = project_list & ProjectFavorite.get_for_user(user)
if "keyword" in query_params:
project_list = project_list & projects_by_keyword(query_params["keyword"][0])
if "locationRadius" in query_params:
project_list = projects_by_location(
project_list, query_params["locationRadius"][0]
)
if "location" in query_params:
project_list = projects_by_legacy_city(
project_list, query_params["location"][0]
)
project_list = project_list.distinct()
if "sortField" in query_params:
project_list = sort_by_field(project_list, query_params["sortField"][0])
else:
project_list = sort_by_field(project_list, "-project_date_modified")
project_count = len(project_list)
project_paginator = Paginator(project_list, settings.PROJECTS_PER_PAGE)
if "page" in query_params:
project_list_page = project_paginator.page(query_params["page"][0])
project_pages = project_paginator.num_pages
else:
project_list_page = project_list
project_pages = 1
tag_counts = get_tag_counts(category=None, event=event, group=group)
response = projects_with_meta_data(
get_request_contributor(request),
query_params,
project_list_page,
project_pages,
project_count,
tag_counts,
)
return response
def recent_projects_list(request):
url_parts = request.GET.urlencode()
query_params = urlparse.parse_qs(url_parts, keep_blank_values=0, strict_parsing=0)
project_count = int(query_params["count"][0]) if "count" in query_params else 3
project_list = Project.objects.filter(is_searchable=True, is_private=False)
# Filter out the DemocracyLab project
if settings.DLAB_PROJECT_ID.isdigit():
project_list = project_list.exclude(id=int(settings.DLAB_PROJECT_ID))
project_list = sort_by_field(project_list, "-project_date_modified")[:project_count]
return list(project.hydrate_to_tile_json() for project in project_list)
def projects_by_keyword(keyword):
return Project.objects.filter(full_text__icontains=keyword)
def projects_by_location(project_list, param):
param_parts = param.split(",")
location = Point(float(param_parts[1]), float(param_parts[0]))
radius = float(param_parts[2])
project_list = project_list.filter(
project_location_coords__distance_lte=(location, D(mi=radius))
)
return project_list
def projects_by_legacy_city(project_list, param):
param_parts = param.split(", ")
if len(param_parts) > 1:
project_list = project_list.filter(
project_city=param_parts[0], project_state=param_parts[1]
)
return project_list
def projects_by_issue_areas(tags):
return Project.objects.filter(project_issue_area__name__in=tags)
def projects_by_technologies(tags):
return Project.objects.filter(project_technologies__name__in=tags)
def METHOD_NAME(tags):
return Project.objects.filter(project_organization__name__in=tags)
def projects_by_org_types(tags):
return Project.objects.filter(project_organization_type__name__in=tags)
def projects_by_stage(tags):
return Project.objects.filter(project_stage__name__in=tags)
def projects_by_roles(tags):
# Get roles by tags
# filter out positions that are hidden
positions = (
ProjectPosition.objects.filter(position_role__name__in=tags)
.exclude(position_event__isnull=False)
.exclude(is_hidden=True)
.select_related("position_project")
)
# Get the list of projects linked to those roles
return Project.objects.filter(positions__in=positions)
def project_countries():
return unique_column_values(
Project, "project_country", lambda country: country and len(country) == 2
)
def projects_with_meta_data(
user: Contributor, query_params, projects, project_pages, project_count, tag_counts
):
projects_json = apply_project_annotations(
user, query_params, [project.hydrate_to_tile_json() for project in projects]
)
return {
"projects": projects_json,
"availableCountries": project_countries(),
"tags": tag_counts,
"numPages": project_pages,
"numProjects": project_count,
}
def get_tag_counts(category=None, event=None, group=None):
queryset = (
get_tags_by_category(category) if category is not None else Tag.objects.all()
)
activetagdict = ProjectSearchTagsCache.get(event=event, group=group)
querydict = {tag.tag_name: tag for tag in queryset}
resultdict = {}
for slug in querydict.keys():
resultdict[slug] = Tag.hydrate_tag_model(querydict[slug])
resultdict[slug]["num_times"] = (
activetagdict[slug] if slug in activetagdict else 0
)
return list(resultdict.values()) |
5,091 | type | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetPrivateEndpointConnectionResult',
'AwaitableGetPrivateEndpointConnectionResult',
'get_private_endpoint_connection',
'get_private_endpoint_connection_output',
]
@pulumi.output_type
class GetPrivateEndpointConnectionResult:
"""
A private endpoint connection class.
"""
def __init__(__self__, id=None, name=None, private_endpoint=None, private_link_service_connection_state=None, provisioning_state=None, system_data=None, METHOD_NAME=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if private_endpoint and not isinstance(private_endpoint, dict):
raise TypeError("Expected argument 'private_endpoint' to be a dict")
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_link_service_connection_state and not isinstance(private_link_service_connection_state, dict):
raise TypeError("Expected argument 'private_link_service_connection_state' to be a dict")
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", METHOD_NAME)
@property
@pulumi.getter
def id(self) -> str:
"""
Gets or sets the identifier.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Gets or sets the name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']:
"""
The private endpoint information.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> Optional['outputs.PrivateLinkServiceConnectionStateResponse']:
"""
The private link service connection state.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.ProxyResourceResponseSystemData':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Gets or sets the type.
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateEndpointConnectionResult(GetPrivateEndpointConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateEndpointConnectionResult(
id=self.id,
name=self.name,
private_endpoint=self.private_endpoint,
private_link_service_connection_state=self.private_link_service_connection_state,
provisioning_state=self.provisioning_state,
system_data=self.system_data,
METHOD_NAME=self.METHOD_NAME)
def get_private_endpoint_connection(account_name: Optional[str] = None,
private_endpoint_connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateEndpointConnectionResult:
"""
Get a private endpoint connection
Azure REST API version: 2021-12-01.
:param str account_name: The name of the account.
:param str private_endpoint_connection_name: Name of the private endpoint connection.
:param str resource_group_name: The resource group name.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['privateEndpointConnectionName'] = private_endpoint_connection_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:purview:getPrivateEndpointConnection', __args__, opts=opts, typ=GetPrivateEndpointConnectionResult).value
return AwaitableGetPrivateEndpointConnectionResult(
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
private_endpoint=pulumi.get(__ret__, 'private_endpoint'),
private_link_service_connection_state=pulumi.get(__ret__, 'private_link_service_connection_state'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
system_data=pulumi.get(__ret__, 'system_data'),
METHOD_NAME=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_private_endpoint_connection)
def get_private_endpoint_connection_output(account_name: Optional[pulumi.Input[str]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPrivateEndpointConnectionResult]:
"""
Get a private endpoint connection
Azure REST API version: 2021-12-01.
:param str account_name: The name of the account.
:param str private_endpoint_connection_name: Name of the private endpoint connection.
:param str resource_group_name: The resource group name.
"""
... |
5,092 | test resource not enough | import pytest
from autogluon.common.utils.resource_utils import ResourceManager
from autogluon.core.ray.resources_calculator import (
CpuResourceCalculator,
GpuResourceCalculator,
NonParallelGpuResourceCalculator,
ResourceCalculatorFactory,
)
def test_cpu_calculator_no_bottleneck():
num_cpus = 32
num_jobs = 20
calculator = ResourceCalculatorFactory.get_resource_calculator(calculator_type="cpu")
assert type(calculator) == CpuResourceCalculator
resources_info = calculator.get_resources_per_job(
total_num_cpus=num_cpus,
num_jobs=num_jobs,
minimum_cpu_per_job=4, # allows 8 jobs to run in parallel
)
expected_resources_per_trial = dict(
cpu=4,
)
expected_num_parallel_jobs = 8
expected_batches = 3
assert expected_resources_per_trial == resources_info["resources_per_job"]
assert expected_num_parallel_jobs == resources_info["num_parallel_jobs"]
assert expected_batches == resources_info["batches"]
def test_cpu_calculator_mem_bottleneck():
num_cpus = 32
num_jobs = 20
mem_available = ResourceManager.get_available_virtual_mem()
model_estimate_memory_usage = mem_available // 2.5 # allows 2 jobs to run in parallel
calculator = ResourceCalculatorFactory.get_resource_calculator(calculator_type="cpu")
assert type(calculator) == CpuResourceCalculator
resources_info = calculator.get_resources_per_job(
total_num_cpus=num_cpus,
num_jobs=num_jobs,
model_estimate_memory_usage=model_estimate_memory_usage,
minimum_cpu_per_job=4, # allows 8 jobs to run in parallel
)
expected_num_parallel_jobs = 2 # even user wants to run 20 jobs in prallel, cpu can run 8 jobs in parallel, memory only allows for 2 jobs
expected_resources_per_trial = dict(
cpu=16,
)
expected_batches = 10
assert expected_resources_per_trial == resources_info["resources_per_job"]
assert expected_num_parallel_jobs == resources_info["num_parallel_jobs"]
assert expected_batches == resources_info["batches"]
def test_gpu_calculator_no_bottleneck():
num_cpus = 32
num_gpus = 4
num_jobs = 20
calculator = ResourceCalculatorFactory.get_resource_calculator(calculator_type="gpu")
assert type(calculator) == GpuResourceCalculator
resources_info = calculator.get_resources_per_job(
total_num_cpus=num_cpus,
total_num_gpus=num_gpus,
num_jobs=num_jobs,
minimum_cpu_per_job=1, # allows 32 jobs to run in parallel
minimum_gpu_per_job=0.5, # allows 8 jobs to run in parallel
)
expected_num_parallel_jobs = 8
expected_resources_per_trial = dict(
cpu=4,
gpu=0.5,
)
expected_batches = 3
assert expected_resources_per_trial == resources_info["resources_per_job"]
assert expected_num_parallel_jobs == resources_info["num_parallel_jobs"]
assert expected_batches == resources_info["batches"]
def test_gpu_calculator_cpu_bottleneck():
num_cpus = 4
num_gpus = 4
num_jobs = 20
calculator = ResourceCalculatorFactory.get_resource_calculator(calculator_type="gpu")
assert type(calculator) == GpuResourceCalculator
resources_info = calculator.get_resources_per_job(
total_num_cpus=num_cpus,
total_num_gpus=num_gpus,
num_jobs=num_jobs,
minimum_cpu_per_job=1, # allows 4 jobs to run in parallel
minimum_gpu_per_job=0.5, # allows 8 jobs to run in parallel
)
expected_num_parallel_jobs = 4
expected_resources_per_trial = dict(
cpu=1,
gpu=1,
)
expected_batches = 5
assert expected_resources_per_trial == resources_info["resources_per_job"]
assert expected_num_parallel_jobs == resources_info["num_parallel_jobs"]
assert expected_batches == resources_info["batches"]
def test_non_parallel_gpu_calculator():
num_cpus = 32
num_gpus = 4
num_jobs = 2
calculator = ResourceCalculatorFactory.get_resource_calculator(calculator_type="non_parallel_gpu")
assert type(calculator) == NonParallelGpuResourceCalculator
resources_info = calculator.get_resources_per_job(
total_num_cpus=num_cpus,
total_num_gpus=num_gpus,
num_jobs=num_jobs,
minimum_cpu_per_job=1,
minimum_gpu_per_job=1,
)
expected_num_parallel_jobs = 2
expected_resources_per_trial = dict(
cpu=16,
gpu=1,
)
expected_batches = 1
assert expected_resources_per_trial == resources_info["resources_per_job"]
assert expected_num_parallel_jobs == resources_info["num_parallel_jobs"]
assert expected_batches == resources_info["batches"]
@pytest.mark.parametrize("calculator_type", ["cpu", "gpu", "non_parallel_gpu"])
def METHOD_NAME(calculator_type):
num_cpus = 0
num_gpus = 0
num_jobs = 20
calculator = ResourceCalculatorFactory.get_resource_calculator(calculator_type=calculator_type)
with pytest.raises(Exception, match=r"Cannot train model with provided resources! .*") as e_info:
resources_info = calculator.get_resources_per_job(
total_num_cpus=num_cpus,
total_num_gpus=num_gpus,
num_jobs=num_jobs,
minimum_cpu_per_job=1,
minimum_gpu_per_job=1,
) |
5,093 | verify square root real | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_square_root."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
class SquareRootOpTest(test.TestCase):
def _verifySquareRoot(self, matrix, np_type):
matrix = matrix.astype(np_type)
# Verify that matmul(sqrtm(A), sqrtm(A)) = A
sqrt = gen_linalg_ops.matrix_square_root(matrix)
with ops.device("/cpu:0"):
square = math_ops.matmul(sqrt, sqrt)
self.assertShapeEqual(matrix, square)
self.assertAllClose(matrix, square, rtol=1e-4, atol=1e-3)
def METHOD_NAME(self, x):
for np_type in [np.float32, np.float64]:
self._verifySquareRoot(x, np_type)
def _verifySquareRootComplex(self, x):
for np_type in [np.complex64, np.complex128]:
self._verifySquareRoot(x, np_type)
def _makeBatch(self, matrix1, matrix2):
matrix_batch = np.concatenate(
[np.expand_dims(matrix1, 0),
np.expand_dims(matrix2, 0)])
matrix_batch = np.tile(matrix_batch, [2, 3, 1, 1])
return matrix_batch
def _testMatrices(self, matrix1, matrix2):
# Real
self.METHOD_NAME(matrix1)
self.METHOD_NAME(matrix2)
self.METHOD_NAME(self._makeBatch(matrix1, matrix2))
if not test.is_built_with_rocm():
# ROCm does not support BLAS operations for complex types
# Complex
matrix1 = matrix1.astype(np.complex64)
matrix2 = matrix2.astype(np.complex64)
matrix1 += 1j * matrix1
matrix2 += 1j * matrix2
self._verifySquareRootComplex(matrix1)
self._verifySquareRootComplex(matrix2)
self._verifySquareRootComplex(self._makeBatch(matrix1, matrix2))
def testSymmetricPositiveDefinite(self):
matrix1 = np.array([[2., 1.], [1., 2.]])
matrix2 = np.array([[3., -1.], [-1., 3.]])
self._testMatrices(matrix1, matrix2)
def testAsymmetric(self):
matrix1 = np.array([[0., 4.], [-1., 5.]])
matrix2 = np.array([[33., 24.], [48., 57.]])
self._testMatrices(matrix1, matrix2)
def testIdentityMatrix(self):
# 2x2
identity = np.array([[1., 0], [0, 1.]])
self.METHOD_NAME(identity)
# 3x3
identity = np.array([[1., 0, 0], [0, 1., 0], [0, 0, 1.]])
self.METHOD_NAME(identity)
def testEmpty(self):
self.METHOD_NAME(np.empty([0, 2, 2]))
self.METHOD_NAME(np.empty([2, 0, 0]))
@test_util.run_v1_only("b/120545219")
def testWrongDimensions(self):
# The input to the square root should be at least a 2-dimensional tensor.
tensor = constant_op.constant([1., 2.])
with self.assertRaises(ValueError):
gen_linalg_ops.matrix_square_root(tensor)
@test_util.run_v1_only("b/120545219")
def testNotSquare(self):
with self.assertRaises(ValueError):
tensor = constant_op.constant([[1., 0., -1.], [-1., 1., 0.]])
self.evaluate(gen_linalg_ops.matrix_square_root(tensor))
@test_util.run_v1_only("b/120545219")
def testConcurrentExecutesWithoutError(self):
with test_util.use_gpu():
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
square1 = math_ops.matmul(matrix1, matrix1)
square2 = math_ops.matmul(matrix2, matrix2)
sqrt1 = gen_linalg_ops.matrix_square_root(square1)
sqrt2 = gen_linalg_ops.matrix_square_root(square2)
all_ops = [sqrt1, sqrt2]
sqrt = self.evaluate(all_ops)
self.assertAllClose(sqrt[0], sqrt[1])
if __name__ == "__main__":
test.main() |
5,094 | density bandwidth | # -*- coding: utf-8 -*-
import warnings
import numpy as np
import scipy.stats
def METHOD_NAME(x, method="KernSmooth", resolution=401):
"""**Bandwidth Selection for Density Estimation**
Bandwidth selector for :func:`.density` estimation. See ``bw_method`` argument in
:func:`.scipy.stats.gaussian_kde`.
The ``"KernSmooth"`` method is adapted from the ``dpik()`` function from the *KernSmooth* R
package. In this case, it estimates the optimal AMISE bandwidth using the direct plug-in method
with 2 levels for the Parzen-Rosenblatt estimator with Gaussian kernel.
Parameters
-----------
x : Union[list, np.array, pd.Series]
A vector of values.
method : float
The bandwidth of the kernel. The larger the values, the smoother the estimation. Can be a
number, or ``"scott"`` or ``"silverman"``
(see ``bw_method`` argument in :func:`.scipy.stats.gaussian_kde`), or ``"KernSmooth"``.
resolution : int
Only when ``method="KernSmooth"``. The number of equally-spaced points over which binning
is performed to obtain kernel functional approximation (see ``gridsize`` argument in ``KernSmooth::dpik()``).
Returns
-------
float
Bandwidth value.
See Also
--------
density
Examples
--------
.. ipython:: python
import neurokit2 as nk
x = np.random.normal(0, 1, size=100)
bw = nk.density_bandwidth(x)
bw
nk.density_bandwidth(x, method="scott")
nk.density_bandwidth(x, method=1)
@savefig p_density_bandwidth.png scale=100%
x, y = nk.density(signal, bandwidth=bw, show=True)
@suppress
plt.close()
References
----------
* Jones, W. M. (1995). Kernel Smoothing, Chapman & Hall.
"""
if isinstance(method, str):
method = method.lower()
if isinstance(method, (float, int)) or method != "kernsmooth":
return scipy.stats.gaussian_kde(x, bw_method=method).factor
n = len(x)
stdev = np.nanstd(x, ddof=1)
iqr = np.diff(np.percentile(x, [25, 75]))[0] / 1.349
scalest = min(stdev, iqr)
data_scaled = (x - np.nanmean(x)) / scalest
min_scaled = np.nanmin(data_scaled)
max_scaled = np.nanmax(data_scaled)
gcounts = _density_linearbinning(
x=data_scaled,
gpoints=np.linspace(min_scaled, max_scaled, resolution),
truncate=True,
)
alpha = (2 * np.sqrt(2) ** 9 / (7 * n)) ** (1 / 9)
psi6hat = _density_bkfe(gcounts, 6, alpha, min_scaled, max_scaled)
alpha = (-3 * np.sqrt(2 / np.pi) / (psi6hat * n)) ** (1 / 7)
psi4hat = _density_bkfe(gcounts, 4, alpha, min_scaled, max_scaled)
delta_0 = 1 / ((4 * np.pi) ** (1 / 10))
output = scalest * delta_0 * (1 / (psi4hat * n)) ** (1 / 5)
return output
def _density_linearbinning(x, gpoints, truncate=True):
"""
Linear binning. Adapted from KernSmooth R package.
"""
n = len(x)
M = gpoints.shape[0]
a = gpoints[0]
b = gpoints[-1]
# initialization of gcounts:
gcounts = np.zeros(M)
Delta = (b - a) / (M - 1)
for i in range(n):
lxi = ((x[i] - a) / Delta) + 1
li = int(lxi)
rem = lxi - li
if (li >= 1) and (li < M):
gcounts[li - 1] = gcounts[li - 1] + 1 - rem
gcounts[li] = gcounts[li] + rem
elif (li < 1) and (truncate is False):
gcounts[0] = gcounts[0] + 1
elif (li >= M) and (truncate is False):
gcounts[M - 1] = gcounts[M - 1] + 1
return gcounts
def _density_bkfe(gcounts, drv, h, a, b):
"""
'bkfe' function adapted from KernSmooth R package.
"""
resol = len(gcounts)
# Set the sample size and bin width
n = np.nansum(gcounts)
delta = (b - a) / (resol - 1)
# Obtain kernel weights
tau = drv + 4
L = min(int(tau * h / delta), resol)
if L == 0:
warnings.warn(
"WARNING : Binning grid too coarse for current (small) bandwidth: consider increasing 'resolution'"
)
lvec = np.arange(L + 1)
arg = lvec * delta / h
dnorm = np.exp(-np.square(arg) / 2) / np.sqrt(2 * np.pi)
kappam = dnorm / h ** (drv + 1)
hmold0 = 1
hmold1 = arg
hmnew = 1
if drv >= 2:
for i in np.arange(2, drv + 1):
hmnew = arg * hmold1 - (i - 1) * hmold0
hmold0 = hmold1 # Compute mth degree Hermite polynomial
hmold1 = hmnew # by recurrence.
kappam = hmnew * kappam
# Now combine weights and counts to obtain estimate
P = 2 ** (int(np.log(resol + L + 1) / np.log(2)) + 1)
kappam = np.concatenate((kappam, np.zeros(P - 2 * L - 1), kappam[1:][::-1]), axis=0)
Gcounts = np.concatenate((gcounts, np.zeros(P - resol)), axis=0)
kappam = np.fft.fft(kappam)
Gcounts = np.fft.fft(Gcounts)
gcounter = gcounts * (np.real(np.fft.ifft(kappam * Gcounts)))[0:resol]
return np.nansum(gcounter) / n**2 |
5,095 | test symlinks | # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import tarfile
import pytest
from platformio import fs
from platformio.compat import IS_WINDOWS
from platformio.package.exception import UnknownManifestError
from platformio.package.pack import PackagePacker
def test_base(tmpdir_factory):
pkg_dir = tmpdir_factory.mktemp("package")
pkg_dir.join(".git").mkdir().join("file").write("")
pkg_dir.join(".gitignore").write("")
pkg_dir.join("._hidden_file").write("")
pkg_dir.join("main.cpp").write("#include <stdio.h>")
p = PackagePacker(str(pkg_dir))
# test missed manifest
with pytest.raises(UnknownManifestError):
p.pack()
# minimal package
pkg_dir.join("library.json").write('{"name": "foo", "version": "1.0.0"}')
pkg_dir.mkdir("include").join("main.h").write("#ifndef")
with fs.cd(str(pkg_dir)):
p.pack()
with tarfile.open(os.path.join(str(pkg_dir), "foo-1.0.0.tar.gz"), "r:gz") as tar:
assert set(tar.getnames()) == set(
[".gitignore", "include/main.h", "library.json", "main.cpp"]
)
def test_filters(tmpdir_factory):
pkg_dir = tmpdir_factory.mktemp("package")
src_dir = pkg_dir.mkdir("src")
src_dir.join("main.cpp").write("#include <stdio.h>")
src_dir.mkdir("util").join("helpers.cpp").write("void")
pkg_dir.mkdir("include").join("main.h").write("#ifndef")
test_dir = pkg_dir.mkdir("tests")
test_dir.join("test_1.h").write("")
test_dir.join("test_2.h").write("")
# test include with remap of root
pkg_dir.join("library.json").write(
json.dumps(dict(name="bar", version="1.2.3", export={"include": "src"}))
)
p = PackagePacker(str(pkg_dir))
with tarfile.open(p.pack(str(pkg_dir)), "r:gz") as tar:
assert set(tar.getnames()) == set(
["util/helpers.cpp", "main.cpp", "library.json"]
)
os.unlink(str(src_dir.join("library.json")))
# test include "src" and "include"
pkg_dir.join("library.json").write(
json.dumps(
dict(name="bar", version="1.2.3", export={"include": ["src", "include"]})
)
)
p = PackagePacker(str(pkg_dir))
with tarfile.open(p.pack(str(pkg_dir)), "r:gz") as tar:
assert set(tar.getnames()) == set(
["include/main.h", "library.json", "src/main.cpp", "src/util/helpers.cpp"]
)
# test include & exclude
pkg_dir.join("library.json").write(
json.dumps(
dict(
name="bar",
version="1.2.3",
export={"include": ["src", "include"], "exclude": ["*/*.h"]},
)
)
)
p = PackagePacker(str(pkg_dir))
with tarfile.open(p.pack(str(pkg_dir)), "r:gz") as tar:
assert set(tar.getnames()) == set(
["library.json", "src/main.cpp", "src/util/helpers.cpp"]
)
def test_gitgnore_filters(tmpdir_factory):
pkg_dir = tmpdir_factory.mktemp("package")
pkg_dir.join(".git").mkdir().join("file").write("")
pkg_dir.join(".gitignore").write(
"""
# comment
gi_file
gi_folder
gi_folder_*
**/main_nested.h
gi_keep_file
!gi_keep_file
LICENSE
"""
)
pkg_dir.join("LICENSE").write("")
pkg_dir.join("gi_keep_file").write("")
pkg_dir.join("gi_file").write("")
pkg_dir.mkdir("gi_folder").join("main.h").write("#ifndef")
pkg_dir.mkdir("gi_folder_name").join("main.h").write("#ifndef")
pkg_dir.mkdir("gi_nested_folder").mkdir("a").mkdir("b").join("main_nested.h").write(
"#ifndef"
)
pkg_dir.join("library.json").write('{"name": "foo", "version": "1.0.0"}')
p = PackagePacker(str(pkg_dir))
with fs.cd(str(pkg_dir)):
p.pack()
with tarfile.open(os.path.join(str(pkg_dir), "foo-1.0.0.tar.gz"), "r:gz") as tar:
assert set(tar.getnames()) == set(
["library.json", "LICENSE", ".gitignore", "gi_keep_file"]
)
def METHOD_NAME(tmpdir_factory):
# Windows does not support symbolic links
if IS_WINDOWS:
return
pkg_dir = tmpdir_factory.mktemp("package")
src_dir = pkg_dir.mkdir("src")
src_dir.join("main.cpp").write("#include <stdio.h>")
pkg_dir.mkdir("include").join("main.h").write("#ifndef")
src_dir.join("main.h").mksymlinkto(os.path.join("..", "include", "main.h"))
pkg_dir.join("library.json").write('{"name": "bar", "version": "2.0.0"}')
tarball = pkg_dir.join("bar.tar.gz")
with tarfile.open(str(tarball), "w:gz") as tar:
for item in pkg_dir.listdir():
tar.add(str(item), str(item.relto(pkg_dir)))
p = PackagePacker(str(tarball))
assert p.pack(str(pkg_dir)).endswith("bar-2.0.0.tar.gz")
with tarfile.open(os.path.join(str(pkg_dir), "bar-2.0.0.tar.gz"), "r:gz") as tar:
assert set(tar.getnames()) == set(
["include/main.h", "library.json", "src/main.cpp", "src/main.h"]
)
m = tar.getmember("src/main.h")
assert m.issym()
def test_source_root(tmpdir_factory):
pkg_dir = tmpdir_factory.mktemp("package")
root_dir = pkg_dir.mkdir("root")
src_dir = root_dir.mkdir("src")
src_dir.join("main.cpp").write("#include <stdio.h>")
root_dir.join("library.json").write('{"name": "bar", "version": "2.0.0"}')
p = PackagePacker(str(pkg_dir))
with tarfile.open(p.pack(str(pkg_dir)), "r:gz") as tar:
assert set(tar.getnames()) == set(["library.json", "src/main.cpp"])
def test_manifest_uri(tmpdir_factory):
pkg_dir = tmpdir_factory.mktemp("package")
root_dir = pkg_dir.mkdir("root")
src_dir = root_dir.mkdir("src")
src_dir.join("main.cpp").write("#include <stdio.h>")
root_dir.join("library.json").write('{"name": "foo", "version": "1.0.0"}')
bar_dir = root_dir.mkdir("library").mkdir("bar")
bar_dir.join("library.json").write('{"name": "bar", "version": "2.0.0"}')
bar_dir.mkdir("include").join("bar.h").write("")
manifest_path = pkg_dir.join("remote_library.json")
manifest_path.write(
'{"name": "bar", "version": "3.0.0", "export": {"include": "root/library/bar"}}'
)
p = PackagePacker(str(pkg_dir), manifest_uri="file:%s" % manifest_path)
p.pack(str(pkg_dir))
with tarfile.open(os.path.join(str(pkg_dir), "bar-2.0.0.tar.gz"), "r:gz") as tar:
assert set(tar.getnames()) == set(["library.json", "include/bar.h"]) |
5,096 | load | import os
import uvicorn, json
from asgiref.sync import sync_to_async
import os
import random
import numpy as np
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
from cyg_conversation import covert_prompt_to_input_ids_with_history
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
LogitsProcessorList,
MinLengthLogitsProcessor,
TopKLogitsWarper,
TemperatureLogitsWarper,
TopPLogitsWarper,
StoppingCriteriaList,
MaxLengthCriteria,
)
model_name = "aquilachat-7b-huggingface"
server_port = 5050
device = "cuda:0"
def METHOD_NAME():
tokenizer = AutoTokenizer.from_pretrained("BAAI/AquilaChat-7B")
model = AutoModelForCausalLM.from_pretrained("BAAI/AquilaChat-7B")
model.half()
model.eval()
model.to("cuda:0")
return model, tokenizer
model, tokenizer = METHOD_NAME()
vocab = tokenizer.get_vocab()
id2word = {v:k for k, v in vocab.items()}
def set_random_seed(seed):
"""Set random seed for reproducability."""
if seed is not None and seed > 0:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def predict(text,
max_gen_len=200, top_p=0.95,
seed=1234, topk=100,
temperature=0.9,
sft=True):
set_random_seed(seed)
if sft:
tokens = covert_prompt_to_input_ids_with_history(text, history=[], tokenizer=tokenizer, max_token=2048)
tokens = torch.tensor(tokens)[None,].to(device)
else :
tokens = tokenizer.encode_plus(text)["input_ids"][:-1]
tokens = torch.tensor(tokens)[None,].to(device)
input_length = len(tokens[0])
with torch.no_grad():
# instantiate logits processors
logits_processor = LogitsProcessorList(
[
MinLengthLogitsProcessor(1, eos_token_id=100007),
]
)
# instantiate logits processors
logits_warper = LogitsProcessorList(
[
TopPLogitsWarper(top_p),
TopKLogitsWarper(topk),
TemperatureLogitsWarper(temperature),
]
)
stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=input_length + max_gen_len)])
out = model.sample(
tokens,
logits_processor=logits_processor,
logits_warper=logits_warper,
stopping_criteria=stopping_criteria,
return_dict_in_generate=True,
output_scores=True,
)
# print(out)
out_ids = out["sequences"][0][input_length+1: ].cpu().numpy()
out_scores = out["scores"]
out_scores = torch.cat(out_scores, dim=0)[1:]
out_scores = torch.nn.functional.softmax(out_scores, dim=-1).cpu().numpy()
probs = []
for i in range(len(out_ids)):
probs.append(float(out_scores[i][out_ids[i]]))
print(f"probs is {probs}")
convert_tokens = []
for t in out_ids:
if t == 100006:
convert_tokens.append("[CLS]")
else :
convert_tokens.append(id2word.get(t, "[unkonwn_token]"))
out_text = tokenizer.decode(out_ids.tolist())
print(out_text)
out = out_text
if "###" in out:
special_index = out.index("###")
out = out[: special_index]
token_length = len(tokenizer.encode_plus(out)["input_ids"][1:-1])
convert_tokens = convert_tokens[:token_length]
probs = probs[:token_length]
if "[UNK]" in out:
special_index = out.index("[UNK]")
out = out[:special_index]
token_length = len(tokenizer.encode_plus(out)["input_ids"][1:-1])
convert_tokens = convert_tokens[:token_length]
probs = probs[:token_length]
if "</s>" in out:
special_index = out.index("</s>")
out = out[: special_index]
token_length = len(tokenizer.encode_plus(out)["input_ids"][1:-1])
convert_tokens = convert_tokens[:token_length]
probs = probs[:token_length]
if len(out) > 0 and out[0] == " ":
out = out[1:]
convert_tokens = convert_tokens[1:]
probs = probs[1:]
return out, convert_tokens, probs
def init_flask():
from fastapi import FastAPI, Request
app = FastAPI()
@app.post("/func")
async def get_generate_h(request: Request):
json_post_raw = await request.json()
config = json.loads(json_post_raw)
text = config["prompt"]
topp = config.get("top_p", 0.95)
max_length = config.get("max_new_tokens", 256)
topk = config.get("top_k_per_token", 1000)
temperature = config.get("temperature", 0.9)
sft = config.get("sft", False)
seed = config.get("seed", 1234)
print(f"sft is {sft}")
out, tokens, probs = await sync_to_async(predict)(text,
max_gen_len=max_length, top_p=topp,
topk=topk,
temperature=temperature, sft=sft,
seed=seed)
result = {
"completions": [{
"text": out,
"tokens": tokens,
"logprobs": probs,
"top_logprobs_dicts": [{k: v} for k, v in zip(tokens, probs)],
}],
"input_length": len(config["prompt"]),
"model_info":model_name}
return result
return app
app = init_flask()
uvicorn.run(app, host='0.0.0.0', port=server_port, workers=1) |
5,097 | nozip gallery file | # Copyright 2019 - Remi Ferrand
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
""" This plugin controls the generation of a ZIP archive for a gallery
If the ``zip_gallery`` setting is set, it contains the location of a zip
archive with all original images of the corresponding directory.
To ignore a ZIP gallery generation for a particular album, put
a ``.nozip_gallery`` file next to it in its parent folder. Only the existence
of this ``.nozip_gallery`` file is tested. If no ``.nozip_gallery`` file is
present, then make a ZIP archive with all media files.
See :ref:`compatibility with the encrypt plugin <compatibility-with-encrypt>`.
"""
import logging
import os
import zipfile
from functools import cached_property
from os.path import isfile, join
from sigal import signals
from sigal.gallery import Album
logger = logging.getLogger(__name__)
def _should_generate_album_zip(album):
"""Checks whether a `.nozip_gallery` file exists in the album folder"""
nozipgallerypath = os.path.join(album.src_path, ".nozip_gallery")
return not os.path.isfile(nozipgallerypath)
def _generate_album_zip(album):
"""Make a ZIP archive with all media files and return its path.
If the ``zip_gallery`` setting is set,it contains the location of a zip
archive with all original images of the corresponding directory.
"""
zip_gallery = album.settings["zip_gallery"]
if zip_gallery and len(album) > 0:
zip_gallery = zip_gallery.format(album=album)
archive_path = join(album.dst_path, zip_gallery)
if album.settings.get("zip_skip_if_exists", False) and isfile(archive_path):
logger.debug("Archive %s already created, passing", archive_path)
return zip_gallery
archive = zipfile.ZipFile(archive_path, "w", allowZip64=True)
attr = (
"src_path" if album.settings["zip_media_format"] == "orig" else "dst_path"
)
for p in album:
path = getattr(p, attr)
try:
archive.write(path, os.path.split(path)[1])
except OSError as e:
logger.warn("Failed to add %s to the ZIP: %s", p, e)
archive.close()
logger.debug("Created ZIP archive %s", archive_path)
return zip_gallery
return False
def generate_album_zip(album):
"""Checks for .nozip_gallery file in album folder.
If this file exists, no ZIP archive is generated.
If the file is absent, make a ZIP archive with all media files and return its path.
If the ``zip_gallery`` setting is set,it contains the location of a zip
archive with all original images of the corresponding directory.
"""
# check if ZIP file generation as been disabled by .nozip_gallery file
if not _should_generate_album_zip(album):
logger.info(
"Ignoring ZIP gallery generation for album '%s' because of present "
".nozip_gallery file",
album.name,
)
return False
return _generate_album_zip(album)
def METHOD_NAME(album, settings=None):
"""Filesystem based switch to disable ZIP generation for an Album"""
Album.zip = cached_property(generate_album_zip)
Album.zip.__set_name__(Album, "zip")
def check_settings(gallery):
if gallery.settings["zip_gallery"] and not isinstance(
gallery.settings["zip_gallery"], str
):
logger.error("'zip_gallery' should be set to a filename")
gallery.settings["zip_gallery"] = False
def register(settings):
signals.album_initialized.connect(METHOD_NAME)
signals.gallery_initialized.connect(check_settings) |
5,098 | test batch mode | import numpy as np
import scipy.sparse as sp
import tensorflow as tf
from spektral.data import Dataset, Graph, loaders
tf.keras.backend.set_floatx("float64")
MODES = {"SINGLE": 0, "BATCH": 1, "MIXED": 2, "DISJOINT": 3}
batch_size = 16
n_nodes = 11
n_node_features = 7
n_edge_features = 3
def _get_graph(n_nodes, n_features, n_edge_features=None, sparse=False):
x = np.random.rand(n_nodes, n_features)
a = np.random.randint(0, 2, (n_nodes, n_nodes)).astype("f4")
e = (
np.random.rand(np.count_nonzero(a), n_edge_features)
if n_edge_features is not None
else None
)
if sparse:
a = sp.csr_matrix(a)
return Graph(x=x, a=a, e=e)
class TestDataset(Dataset):
def __init__(self, graphs):
self.graphs = graphs
super().__init__()
def read(self):
return self.graphs
def _test_single_mode(model, sparse=False, edges=False, **kwargs):
dataset = TestDataset(
[
_get_graph(
n_nodes=n_nodes,
n_features=n_node_features,
n_edge_features=n_edge_features if edges else None,
sparse=sparse,
)
]
)
loader = loaders.SingleLoader(dataset, epochs=1)
inputs = list(loader)[0]
model_instance = model(**kwargs)
output = model_instance(inputs)
def _test_disjoint_mode(model, sparse=False, edges=False, **kwargs):
dataset = TestDataset(
[
_get_graph(
n_nodes=n_nodes,
n_features=n_node_features,
n_edge_features=n_edge_features if edges else None,
sparse=sparse,
)
for _ in range(batch_size)
]
)
loader = loaders.DisjointLoader(dataset, epochs=1, batch_size=batch_size)
inputs = loader.__next__()
model_instance = model(**kwargs)
output = model_instance(inputs)
def METHOD_NAME(model, edges=False, **kwargs):
dataset = TestDataset(
[
_get_graph(
n_nodes=n_nodes,
n_features=n_node_features,
n_edge_features=n_edge_features if edges else None,
)
for _ in range(batch_size)
]
)
loader = loaders.BatchLoader(dataset, epochs=1, batch_size=batch_size)
inputs = loader.__next__()
model_instance = model(**kwargs)
output = model_instance(inputs)
def _test_mixed_mode(model, sparse=False, edges=False, **kwargs):
graphs = []
for i in range(batch_size):
graph = _get_graph(
n_nodes=n_nodes,
n_features=n_node_features,
n_edge_features=n_edge_features if edges else None,
sparse=sparse,
)
if i == 0:
a = graph.a
graph.a = None
graphs.append(graph)
dataset = TestDataset(graphs)
dataset.a = a
loader = loaders.MixedLoader(dataset, epochs=1, batch_size=batch_size)
inputs = loader.__next__()
model_instance = model(**kwargs)
output = model_instance(inputs)
def _test_get_config(layer, **kwargs):
layer_instance = layer(**kwargs)
config = layer_instance.get_config()
layer_instance_new = layer(**config)
config_new = layer_instance_new.get_config()
# Remove 'name' if we have advanced activations (needed for GeneralConv)
if (
"activation" in config
and isinstance(config["activation"], dict)
and "class_name" in config["activation"]
):
config["activation"]["config"].pop("name")
config_new["activation"]["config"].pop("name")
assert config_new == config
def run_model(config):
"""
Each `config` is a dictionary with the form:
{
"model": class,
"modes": list[int],
"kwargs": dict,
"dense": bool,
"sparse": bool,
"edges": bool
},
"model" is the class of the model to be tested.
"modes" is a list containing the data modes supported by the model, as specified by
the global MODES dictionary in this file.
"kwargs" is a dictionary containing all keywords to be passed to the layer
(including mandatory ones).
"dense" is True if the layer supports dense adjacency matrices.
"sparse" is True if the layer supports sparse adjacency matrices.
"edges" is True if the layer supports edge attributes.
The testing loop will create a simple 1-layer Model and run it in single, mixed,
and batch mode according the what specified in the testing config.
The loop will check:
- that the model does not crash;
- that the output shape is pre-computed correctly;
- that the real output shape is correct;
- that the get_config() method works correctly (i.e., it is possible to
re-instatiate a layer using LayerClass(**layer_instance.get_config())).
"""
for mode in config["modes"]:
if mode == MODES["SINGLE"]:
if config["dense"]:
_test_single_mode(
config["model"],
sparse=False,
edges=config.get("edges", False),
**config["kwargs"],
)
if config["sparse"]:
_test_single_mode(
config["model"],
sparse=True,
edges=config.get("edges", False),
**config["kwargs"],
)
elif mode == MODES["BATCH"]:
METHOD_NAME(
config["model"], edges=config.get("edges", False), **config["kwargs"]
)
elif mode == MODES["MIXED"]:
if config["dense"]:
_test_mixed_mode(
config["model"],
sparse=False,
edges=config.get("edges", False),
**config["kwargs"],
)
if config["sparse"]:
_test_mixed_mode(
config["model"],
sparse=True,
edges=config.get("edges", False),
**config["kwargs"],
)
elif mode == MODES["DISJOINT"]:
if config["dense"]:
_test_disjoint_mode(
config["model"],
sparse=False,
edges=config.get("edges", False),
**config["kwargs"],
)
if config["sparse"]:
_test_disjoint_mode(
config["model"],
sparse=True,
edges=config.get("edges", False),
**config["kwargs"],
)
_test_get_config(config["model"], **config["kwargs"]) |
5,099 | test trainer manual optimization config | # Copyright The Lightning AI team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import Mock
import pytest
import torch
from lightning.fabric.utilities.warnings import PossibleUserWarning
from lightning.pytorch import LightningDataModule, LightningModule, Trainer
from lightning.pytorch.demos.boring_classes import BoringModel, RandomDataset
from lightning.pytorch.trainer.configuration_validator import (
__verify_eval_loop_configuration,
__verify_train_val_loop_configuration,
)
from lightning.pytorch.utilities.exceptions import MisconfigurationException
def test_wrong_train_setting(tmpdir):
"""Test that an error is raised when no `training_step()` is defined."""
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1)
with pytest.raises(MisconfigurationException, match=r"No `training_step\(\)` method defined."):
model = BoringModel()
model.training_step = None
trainer.fit(model)
def test_wrong_configure_optimizers(tmpdir):
"""Test that an error is thrown when no `configure_optimizers()` is defined."""
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1)
with pytest.raises(MisconfigurationException, match=r"No `configure_optimizers\(\)` method defined."):
model = BoringModel()
model.configure_optimizers = None
trainer.fit(model)
def test_fit_val_loop_config(tmpdir):
"""When either val loop or val data are missing raise warning."""
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1)
# no val data has val loop
with pytest.warns(UserWarning, match=r"You passed in a `val_dataloader` but have no `validation_step`"):
model = BoringModel()
model.validation_step = None
trainer.fit(model)
# has val loop but no val data
with pytest.warns(PossibleUserWarning, match=r"You defined a `validation_step` but have no `val_dataloader`"):
model = BoringModel()
model.val_dataloader = None
trainer.fit(model)
def test_eval_loop_config(tmpdir):
"""When either eval step or eval data is missing."""
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1)
# has test data but no val step
model = BoringModel()
model.validation_step = None
with pytest.raises(MisconfigurationException, match=r"No `validation_step\(\)` method defined"):
trainer.validate(model)
# has test data but no test step
model = BoringModel()
model.test_step = None
with pytest.raises(MisconfigurationException, match=r"No `test_step\(\)` method defined"):
trainer.test(model)
# has predict data but no predict_step
model = BoringModel()
model.predict_step = None
with pytest.raises(MisconfigurationException, match=r"`predict_step` cannot be None."):
trainer.predict(model)
# has predict data but no forward
model = BoringModel()
model.forward = None
with pytest.raises(MisconfigurationException, match=r"requires `forward` method to run."):
trainer.predict(model)
@pytest.mark.parametrize("datamodule", [False, True])
def test_trainer_predict_verify_config(tmpdir, datamodule):
class TestModel(LightningModule):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(32, 2)
def forward(self, x):
return self.layer(x)
class TestLightningDataModule(LightningDataModule):
def __init__(self, dataloaders):
super().__init__()
self._dataloaders = dataloaders
def test_dataloader(self):
return self._dataloaders
def predict_dataloader(self):
return self._dataloaders
data = [torch.utils.data.DataLoader(RandomDataset(32, 2)), torch.utils.data.DataLoader(RandomDataset(32, 2))]
if datamodule:
data = TestLightningDataModule(data)
model = TestModel()
trainer = Trainer(default_root_dir=tmpdir)
results = trainer.predict(model, data)
assert len(results) == 2
assert results[0][0].shape == torch.Size([1, 2])
def METHOD_NAME():
"""Test error message when requesting Trainer features unsupported with manual optimization."""
model = BoringModel()
model.automatic_optimization = False
trainer = Trainer(gradient_clip_val=1.0)
with pytest.raises(MisconfigurationException, match="Automatic gradient clipping is not supported"):
trainer.fit(model)
trainer = Trainer(accumulate_grad_batches=2)
with pytest.raises(MisconfigurationException, match="Automatic gradient accumulation is not supported"):
trainer.fit(model)
def test_legacy_epoch_end_hooks():
class TrainingEpochEndModel(BoringModel):
def training_epoch_end(self, outputs):
pass
class ValidationEpochEndModel(BoringModel):
def validation_epoch_end(self, outputs):
pass
trainer = Mock()
with pytest.raises(NotImplementedError, match="training_epoch_end` has been removed in v2.0"):
__verify_train_val_loop_configuration(trainer, TrainingEpochEndModel())
with pytest.raises(NotImplementedError, match="validation_epoch_end` has been removed in v2.0"):
__verify_train_val_loop_configuration(trainer, ValidationEpochEndModel())
class TestEpochEndModel(BoringModel):
def test_epoch_end(self, outputs):
pass
with pytest.raises(NotImplementedError, match="validation_epoch_end` has been removed in v2.0"):
__verify_eval_loop_configuration(ValidationEpochEndModel(), "val")
with pytest.raises(NotImplementedError, match="test_epoch_end` has been removed in v2.0"):
__verify_eval_loop_configuration(TestEpochEndModel(), "test") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.