id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
6,500 | get next | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExpressRoutePortsLocationsOperations(object):
"""ExpressRoutePortsLocationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ExpressRoutePortsLocationListResult"]
"""Retrieves all ExpressRoutePort peering locations. Does not return available bandwidths for each
location. Available bandwidths can only be obtained when retrieving a specific peering
location.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRoutePortsLocationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_08_01.models.ExpressRoutePortsLocationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePortsLocationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRoutePortsLocationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def METHOD_NAME(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
METHOD_NAME, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ExpressRoutePortsLocations'} # type: ignore
def get(
self,
location_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRoutePortsLocation"
"""Retrieves a single ExpressRoutePort peering location, including the list of available
bandwidths available at said peering location.
:param location_name: Name of the requested ExpressRoutePort peering location.
:type location_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRoutePortsLocation, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_08_01.models.ExpressRoutePortsLocation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePortsLocation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'locationName': self._serialize.url("location_name", location_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRoutePortsLocation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ExpressRoutePortsLocations/{locationName}'} # type: ignore |
6,501 | test get long text from | from dataclasses import asdict
from socket import gaierror
from unittest.mock import MagicMock, Mock, patch
import pytest
from tribler.core.components.reporter.exception_handler import CoreExceptionHandler
from tribler.core.sentry_reporter import sentry_reporter
from tribler.core.sentry_reporter.sentry_reporter import SentryReporter
# pylint: disable=protected-access, redefined-outer-name
# fmt: off
@pytest.fixture
def exception_handler():
return CoreExceptionHandler()
def raise_error(error): # pylint: disable=inconsistent-return-statements
try:
raise error
except error.__class__ as e:
return e
def test_is_ignored(exception_handler):
# test that CoreExceptionHandler ignores specific exceptions
# by exception type
assert exception_handler._is_ignored(gaierror())
assert exception_handler._is_ignored(ConnectionResetError())
# by exception type and error code
assert exception_handler._is_ignored(OSError(113, 'Arbitrary error message'))
assert exception_handler._is_ignored(OSError(0, 'Arbitrary error message'))
# by exception type and regex
assert exception_handler._is_ignored(RuntimeError('A message with the following substring: invalid info-hash'))
assert not exception_handler._is_ignored(RuntimeError('Another message without a substring'))
def test_is_not_ignored(exception_handler):
# test that CoreExceptionHandler do not ignore exceptions out of
# IGNORED_ERRORS_BY_TYPE, IGNORED_ERRORS_BY_CODE and IGNORED_ERRORS_BY_SUBSTRING
# AttributeError is not in the IGNORED_ERRORS_BY_TYPE, IGNORED_ERRORS_BY_CODE or IGNORED_ERRORS_BY_SUBSTRING
assert not exception_handler._is_ignored(AttributeError())
# OSError with code 1 is not in the IGNORED_ERRORS_BY_CODE
assert not exception_handler._is_ignored(OSError(1, 'Arbitrary error message'))
# RuntimeError is in IGNORED_ERRORS_BY_REGEX, but the message does not contain "invalid info-hash" substring
assert not exception_handler._is_ignored(RuntimeError('Arbitrary error message'))
def test_create_exception_from(exception_handler):
# test that CoreExceptionHandler can create an Exception from a string
assert isinstance(exception_handler._create_exception_from('Any'), Exception)
def METHOD_NAME(exception_handler):
# test that CoreExceptionHandler can generate stacktrace from an Exception
error = raise_error(AttributeError('Any'))
actual_string = exception_handler._get_long_text_from(error)
assert 'raise_error' in actual_string
@patch(f'{sentry_reporter.__name__}.{SentryReporter.__name__}.{SentryReporter.event_from_exception.__name__}',
new=MagicMock(return_value={'sentry': 'event'}))
def test_unhandled_error_observer_exception(exception_handler):
# test that unhandled exception, represented by Exception, reported to the GUI
context = {'exception': raise_error(AttributeError('Any')), 'Any key': 'Any value'}
exception_handler.report_callback = MagicMock()
exception_handler.unhandled_error_observer(None, context)
exception_handler.report_callback.assert_called()
# get the argument that has been passed to the report_callback
reported_error = exception_handler.report_callback.call_args_list[-1][0][0]
assert reported_error.type == 'AttributeError'
assert reported_error.text == 'Any'
assert 'raise_error' in reported_error.long_text
assert reported_error.event == {'sentry': 'event'}
assert reported_error.context == "{'Any key': 'Any value'}"
assert reported_error.should_stop
def test_unhandled_error_observer_only_message(exception_handler):
# test that unhandled exception, represented by message, reported to the GUI
context = {'message': 'Any'}
exception_handler.report_callback = MagicMock()
exception_handler.unhandled_error_observer(None, context)
exception_handler.report_callback.assert_called()
# get the argument that has been passed to the report_callback
reported_error = exception_handler.report_callback.call_args_list[-1][0][0]
assert reported_error.type == 'Exception'
assert reported_error.text == 'Received error without exception: Any'
assert reported_error.long_text == 'Exception: Received error without exception: Any\n'
assert not reported_error.event
assert reported_error.context == '{}'
assert reported_error.should_stop
assert reported_error.additional_information == {}
def test_unhandled_error_observer_store_unreported_error(exception_handler):
context = {'message': 'Any'}
exception_handler.unhandled_error_observer(None, context)
assert exception_handler.unreported_error
def test_unhandled_error_observer_false_should_stop(exception_handler):
# Test passing negative value for should_stop flag through the context dict
context = {'message': 'Any', 'should_stop': False}
exception_handler.unhandled_error_observer(None, context)
assert exception_handler.unreported_error.should_stop is False
def test_unhandled_error_observer_additional_information(exception_handler):
# test that additional information is passed to the `report_callback`
exception_handler.report_callback = MagicMock()
exception_handler.sentry_reporter.additional_information['a'] = 1
exception_handler.unhandled_error_observer(None, {})
reported_error = exception_handler.report_callback.call_args_list[-1][0][0]
assert reported_error.additional_information == {'a': 1}
assert asdict(reported_error) # default dict could produce TypeError: first argument must be callable or None
def test_unhandled_error_observer_ignored(exception_handler):
# test that exception from list IGNORED_ERRORS_BY_CODE never sends to the GUI
context = {'exception': OSError(113, '')}
exception_handler.report_callback = MagicMock()
with patch.object(exception_handler.logger, 'warning') as mocked_warning:
exception_handler.unhandled_error_observer(None, context)
mocked_warning.assert_called_once()
exception_handler.report_callback.assert_not_called()
@patch.object(SentryReporter, 'ignore_logger', new=Mock(side_effect=ValueError))
@patch.object(SentryReporter, 'capture_exception')
def test_unhandled_error_observer_inner_exception(mocked_capture_exception: Mock,
exception_handler: CoreExceptionHandler):
with pytest.raises(ValueError):
exception_handler.unhandled_error_observer({}, {})
mocked_capture_exception.assert_called_once() |
6,502 | athena getresult | from sqlalchemy import create_engine
class basefunc:
# athena
@staticmethod
def athena_getdb(uri, schema):
engine = create_engine(uri, echo=True)
res = engine.execute('SHOW DATABASES').fetchall()
db_list = []
for row in res:
for item in row:
db_list.append(item)
return db_list
@staticmethod
def athena_gettable(uri, database, schema):
engine = create_engine(uri, echo=True)
res = engine.execute('SHOW TABLES FROM ' + database).fetchall()
table_list = []
for row in res:
for item in row:
meta = basefunc.athena_getmeta(database=database, schema=schema, table=item, engine=engine)
scores = {"name": item, "meta": meta}
table_list.append(scores)
return table_list
@staticmethod
def athena_getmeta(database, table, schema, engine=None):
meta_res = engine.execute('select * from ' + database + '.' + table + ' limit 500').keys()
meta = []
i = 0
for col_data in meta_res:
scores = {"key": col_data, "colIndex": i, "dataType": None}
meta.append(scores)
i += 1
return meta
@staticmethod
def athena_getdata(uri, database, table, schema, rows_num):
engine = create_engine(uri, echo=True)
data_res = engine.execute('select * from ' + database + '.' + table + ' limit ' + rows_num).fetchall()
data = []
for row in data_res:
rows = []
for item in row:
rows.append(item)
data.append(rows)
return data
@staticmethod
def athena_getdetail(uri, database, table, schema, rows_num):
engine = create_engine(uri, echo=True)
meta = basefunc.athena_getmeta(database=database, schema=schema, table=table, engine=engine)
sql = f'select * from {database}.{table} limit {rows_num}'
res_list = basefunc.METHOD_NAME(sql=sql, engine=engine)
return [meta, res_list[0], res_list[1]]
@staticmethod
def METHOD_NAME(sql, uri=None, engine=None):
if engine is None:
engine = create_engine(uri, echo=True)
res = engine.execute(sql)
data_res = res.fetchall()
col_res = res.keys()
sql_result = []
for row in data_res:
rows = []
for item in row:
rows.append(item)
sql_result.append(rows)
columns = []
for column in col_res:
columns.append(column)
return [columns, sql_result]
def lambda_handler(event, context):
uri = event['uri']
source_type = event['sourceType']
func = event['func']
database = event['db']
table = event['table']
schema = event['schema']
rows_num = event['rowsNum']
sql = event['query']
dict_func = basefunc.__dict__
if func == 'getDatabases':
db_list = dict_func['{0}_getdb'.format(source_type)].__func__(uri=uri, schema=schema)
return db_list
elif func == 'getSchemas':
schema_list = dict_func['{0}_getschema'.format(source_type)].__func__(uri=uri, db=database)
return schema_list
elif func == 'getTables':
table_list = dict_func['{0}_gettable'.format(source_type)].__func__(uri=uri, database=database, schema=schema)
return table_list
elif func == 'getTableDetail':
res_list = dict_func['{0}_getdetail'.format(source_type)].__func__(uri=uri, database=database, table=table,
schema=schema, rows_num=rows_num)
return {
"meta": res_list[0],
"columns": res_list[1],
"rows": res_list[2]
}
elif func == 'getResult':
res_list = dict_func['{0}_getresult'.format(source_type)].__func__(uri=uri, sql=sql)
return {
"columns": res_list[0],
"rows": res_list[1]
}
else:
return 'The wrong func was entered'
|
6,503 | train | import math
import random
import torch
import tqdm
import editdistance
from torch import nn
from torch.optim import Adam
from torch.optim.lr_scheduler import ExponentialLR
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.dataset import random_split
from torch.nn.utils.rnn import pad_sequence
from torchaudio.transforms import RNNTLoss
UNK_IDX, PAD_IDX, BOS_IDX, EOS_IDX = 0, 1, 2, 3
special_symbols = ['<unk>', '<pad>', '<bos>', '<eos>']
class G2pTrainer():
def __init__(self,
device: torch.device,
loss_device: torch.device,
model: nn.Module,
dataset: Dataset,
batch_size: int = 256,
epochs: int = 20,
lr: float = 0.005,
min_lr: float = 5e-5,
gamma: float = 0.8,
grad_clip: float = 2,
seed=None) -> None:
self.device = device
self.loss_device = loss_device
self.model = model
self.dataset = dataset
self.epochs = epochs
self.min_lr = min_lr
self.grad_clip = grad_clip
self.model.to(device)
self.loss_fn = RNNTLoss(blank=BOS_IDX, clamp=self.grad_clip)
self.optimizer = Adam(model.parameters(), lr=lr,
betas=(0.9, 0.98), eps=1e-9)
self.scheduler = ExponentialLR(
self.optimizer, gamma=gamma, verbose=True)
valid_set_size = len(dataset) // 10
train_set_size = len(dataset) - valid_set_size
g = torch.Generator()
if seed is not None:
g.manual_seed(seed)
valid_set, train_set = random_split(
dataset, [valid_set_size, train_set_size], g)
def collate_fn(batch):
src_batch, tgt_batch = [], []
for src, tgt in batch:
src_batch.append(src)
tgt_batch.append(tgt)
src_lengths = torch.tensor([len(s)
for s in src_batch], dtype=torch.int32)
src_batch = pad_sequence(src_batch, padding_value=0)
tgt_lengths = torch.tensor([len(s)
for s in tgt_batch], dtype=torch.int32)
tgt_batch = pad_sequence(tgt_batch, padding_value=0)
return src_batch.transpose(0, 1).contiguous(), tgt_batch.transpose(
0, 1).contiguous(), src_lengths, tgt_lengths
self.train_dl = DataLoader(
train_set, batch_size, True, collate_fn=collate_fn)
self.valid_dl = DataLoader(
valid_set, batch_size, collate_fn=collate_fn)
def _accuracy(self, logits, tgt_true):
tgt_pred = torch.argmax(logits, dim=-1)
correct = torch.sum(torch.logical_and(
tgt_pred == tgt_true, torch.logical_or(
tgt_pred != 0, tgt_true != 0)).int()).item()
total = torch.sum((tgt_true != 0).int()).item()
return correct, total
def _train_epoch(self):
self.model.METHOD_NAME()
losses = 0
count = 0
pbar = tqdm.tqdm(self.train_dl)
for src, tgt, src_lengths, tgt_lengths in pbar:
tgt_in = torch.concat(
[torch.full([tgt.shape[0], 1], BOS_IDX), tgt], dim=1)
logits = self.model(src.to(self.device), tgt_in.to(self.device))
self.optimizer.zero_grad()
loss = self.loss_fn(
logits.to(self.loss_device),
tgt.to(self.loss_device),
src_lengths.to(self.loss_device),
tgt_lengths.to(self.loss_device))
loss.backward()
torch.nn.utils.clip_grad.clip_grad_norm_(
self.model.parameters(), self.grad_clip)
self.optimizer.step()
losses += loss.item()
count += 1
pbar.set_description('loss: {:.4f}'.format(losses / count))
return losses / count
def _eval(self):
self.model.eval()
losses = 0
count = 0
for src, tgt, src_lengths, tgt_lengths in self.valid_dl:
tgt_in = torch.concat(
[torch.full([tgt.shape[0], 1], BOS_IDX), tgt], dim=1)
logits = self.model(src.to(self.device), tgt_in.to(self.device))
loss = self.loss_fn(
logits.to(self.loss_device),
tgt.to(self.loss_device),
src_lengths.to(self.loss_device),
tgt_lengths.to(self.loss_device))
losses += loss.item()
count += 1
return losses / count
def _save_state_dic(self, name):
torch.save(self.model.state_dict(), 'g2p-{}.ptsd'.format(name))
def _load_state_dic(self, name):
self.model.load_state_dict(torch.load('g2p-{}.ptsd'.format(name)))
def _preview(self, entry):
word, pron = entry
print('{}: [{}] [{}]'.format(word, '-'.join(pron),
'-'.join(self.model.predict_str(word))))
def METHOD_NAME(self):
preview_entries = []
for i in range(5):
idx = random.randrange(len(self.dataset.entries))
preview_entries.append(self.dataset.entries[idx])
best_eval_loss = 10000
for i in range(self.epochs):
loss = self._train_epoch()
eval_loss = self._eval()
lr = self.scheduler.get_last_lr()[0]
print('epoch: {} - lr: {:.2e} - loss: {:.4f} - eval_loss: {:.4f}'
.format(i, lr, loss, eval_loss))
if math.isnan(loss) or math.isnan(eval_loss):
break
if lr > self.min_lr:
self.scheduler.step()
if best_eval_loss > eval_loss:
best_eval_loss = eval_loss
print('saving new best at epoch {}'.format(i))
self._save_state_dic('best')
if (i + 1) % 20 == 0:
self._save_state_dic('{:03d}'.format(i + 1))
for entry in preview_entries:
self._preview(entry)
# Calculates the WER and PER of the model on the entire dataset.
# Very very slow.
def test(self, test_log=None):
word_count = 0
word_error = 0
phoneme_count = 0
phoneme_error = 0
pbar = tqdm.tqdm(self.dataset.entries)
if test_log is not None:
f = open(test_log, 'w', encoding='utf8')
for word, pron in pbar:
predicted = self.model.predict_str(word)
dis = editdistance.distance(pron, predicted)
word_count += 1
phoneme_count += len(pron)
if dis > 0:
word_error += 1
phoneme_error += dis
if test_log is not None:
f.write('{}\n\t{}\n\t{}\n'.format(
word, ' '.join(pron), ' '.join(predicted)))
pbar.set_description("wer = {:.4f} per = {:.4f}".format(
word_error / word_count, phoneme_error / phoneme_count))
if test_log is not None:
f.close() |
6,504 | do eval | # Copyright 2020-2023 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gym
import numpy as np
import os
import warnings
from stable_baselines3.common.callbacks import BaseCallback, EventCallback
from stable_baselines3.common.vec_env import DummyVecEnv, VecEnv, sync_envs_normalization
from typing import Union, Optional
from opendr.control.mobile_manipulation.mobileRL.evaluation import evaluation_rollout
class MobileRLEvalCallback(EventCallback):
"""
Callback for evaluating an agent.
:param eval_env: (Union[gym.Env, VecEnv]) The environment used for initialization
:param callback_on_new_best: (Optional[BaseCallback]) Callback to trigger
when there is a new best model according to the ``mean_reward``
:param n_eval_episodes: (int) The number of episodes to test the agent
:param eval_freq: (int) Evaluate the agent every eval_freq call of the callback.
:param log_path: (str) Path to a folder where the evaluations (``evaluations.npz``)
will be saved. It will be updated at each evaluation.
:param best_model_save_path: (str) Path to a folder where the best model
according to performance on the eval env will be saved.
:param deterministic: (bool) Whether the evaluation should
use a stochastic or deterministic actions.
:param render: (bool) Whether to render or not the environment during evaluation
:param verbose: (int)
"""
def __init__(self, eval_env: Union[gym.Env, VecEnv],
callback_on_new_best: Optional[BaseCallback] = None,
n_eval_episodes: int = 5,
eval_freq: int = 10000,
log_path: str = None,
best_model_save_path: str = None,
deterministic: bool = True,
render: bool = False,
verbose: int = 1,
debug: bool = False,
prefix: str = 'eval',
checkpoint_after_iter: int = 0
):
super(MobileRLEvalCallback, self).__init__(callback_on_new_best, verbose=verbose)
self.n_eval_episodes = n_eval_episodes
self.eval_freq = eval_freq
self.deterministic = deterministic
self.render = render
self.checkpoint_after_iter = checkpoint_after_iter
# Convert to VecEnv for consistency
if not isinstance(eval_env, VecEnv):
eval_env = DummyVecEnv([lambda: eval_env])
if isinstance(eval_env, VecEnv):
assert eval_env.num_envs == 1, "You must pass only one environment for evaluation"
self.eval_env = eval_env
self.best_model_save_path = best_model_save_path
self._best_mean_reward = - np.infty
self.log_path = log_path
self.debug = debug
self.prefix = prefix
def _init_callback(self):
# Does not work in some corner cases, where the wrapper is not the same
if not isinstance(self.training_env, type(self.eval_env)):
warnings.warn("Training and eval env are not of the same type"
f"{self.training_env} != {self.eval_env}")
# Create folders if needed
if self.best_model_save_path:
os.makedirs(self.best_model_save_path, exist_ok=True)
if self.log_path:
os.makedirs(os.path.dirname(self.log_path), exist_ok=True)
def METHOD_NAME(self):
# Sync training and eval env if there is VecNormalize
sync_envs_normalization(self.training_env, self.eval_env)
eval_env = self.eval_env
if isinstance(eval_env, VecEnv):
assert eval_env.num_envs == 1, "You must pass only one environment when using this function"
eval_env = eval_env.envs[0]
return evaluation_rollout(self.model, env=eval_env,
num_eval_episodes=self.n_eval_episodes,
global_step=self.num_timesteps,
name_prefix=self.prefix)
def _on_step(self) -> bool:
continue_train = True
if self.checkpoint_after_iter and self.n_calls and (self.n_calls % self.checkpoint_after_iter == 0) and (
self.best_model_save_path is not None):
self.model.save(os.path.join(self.best_model_save_path, f'model_step{self.num_timesteps}'))
if (self.n_calls == 1) or (self.eval_freq > 0 and self.n_calls % self.eval_freq == 0):
episode_rewards, episode_lengths, metrics, name_prefix = self.METHOD_NAME()
mean_reward = np.mean(episode_rewards)
mean_ep_length, std_ep_length = np.mean(episode_lengths), np.std(episode_lengths)
if self.verbose > 0:
print(f"Episode length: {mean_ep_length:.2f} +/- {std_ep_length:.2f}")
if mean_reward > self._best_mean_reward:
if self.best_model_save_path is not None:
print("Saving best model")
self.model.save(os.path.join(self.best_model_save_path, 'best_model'))
# Trigger callback if needed
if self.callback is not None:
return self._on_event()
self._best_mean_reward = mean_reward
return continue_train |
6,505 | get legacy ot types | """Safely unpickle objects stored in the database by older robot-server versions."""
from dataclasses import dataclass
from functools import lru_cache
from io import BytesIO
from logging import getLogger
from pickle import ( # noqa: F401
Unpickler,
# Re-export `dumps()` to allow this module to be used as a drop-in replacement
# for the `pickle` module, which is useful for `sqlalchemy.PickleType`.
#
# TODO(mm, 2022-10-13): Transition to JSON and remove this after we've stopped
# new objects. Or, wrap this with validation to stop us from accidentally pickling
# unknown types.
dumps as dumps,
)
from typing import Dict, List
_log = getLogger(__name__)
class LegacyUnpickler(Unpickler):
"""A custom unpickler to safely handle legacy Opentrons types.
The standard library's default unpickler is sensitive to import paths.
If you pickle an object, and then refactor your code to move or rename that object's
type, you'll get a "class not found" error when you try to unpickle it later.
This class lets us unpickle objects stored by older robot-server versions
even if we've moved or renamed their types.
"""
def find_class(self, module: str, name: str) -> object: # noqa: D102
try:
# We match purely on the type name, ignoring the name of the containing
# module. This is to avoid potential confusion and false negatives
# with types that could be imported through multiple paths.
return _get_types_by_original_name()[name]
except KeyError:
# The type of the object that we're unpickling doesn't appear to be an
# Opentrons-defined type. Is it something else that we know about?
if module == "" and name in {"int", "str"}:
known_type = True
elif module == "datetime":
# `datetime.datetime`s and their attributes, like `datetime.timezone`.
known_type = True
elif module == "numpy" or module.startswith("numpy."):
# `numpy.dtype` and `numpy.core.multiarray.scalar` (and possibly others)
# are present.
known_type = True
else:
known_type = False
if not known_type:
_log.warning(
f'Unpickling unknown type "{name}" from module "{module}".'
f" This may cause problems with reading records created by"
f" older versions of this robot software."
f" This should be reported to Opentrons and investigated."
)
return super().find_class(module, name)
def loads(data: bytes) -> object:
"""Drop-in replacement for `pickle.loads` that uses our custom unpickler."""
return LegacyUnpickler(BytesIO(data)).load()
@dataclass
class _LegacyTypeInfo:
"""Information about a Python type that older robot-server versions pickled."""
original_name: str
"""The Python source name that the type had in older robot-server versions.
Should not include the name of the containing module.
"""
current_type: type
"""The Python type as it exists today.
Legacy objects whose type name matches `original_name` will be unpickled
into this type.
The current type is allowed to have a different source name or be defined in a
different module from the original. But it must otherwise be pickle-compatible
with the original.
"""
def METHOD_NAME() -> List[_LegacyTypeInfo]:
"""Return all legacy Opentrons types.
A "legacy Opentrons type," in this context, is any Opentrons-defined Python type
that was pickled by older robot-server versions and stored in the database.
NOTE: After adding a type to this list, its `original_name` should never change.
Even if the `current_type` gets renamed.
This is in a function with local imports to mitigate circular dependency problems.
This module will be imported by low-level database setup, so it can't immediately
import higher-level types, because they might try to import the low-level database
setup again.
"""
# fmt: off
_legacy_ot_types: List[_LegacyTypeInfo] = []
from robot_server.protocols.analysis_models import AnalysisResult
_legacy_ot_types.append(
_LegacyTypeInfo(original_name="AnalysisResult", current_type=AnalysisResult)
)
from robot_server.protocols.analysis_models import AnalysisStatus
_legacy_ot_types.append(
_LegacyTypeInfo(original_name="AnalysisStatus", current_type=AnalysisStatus)
)
from opentrons.protocol_engine.commands import CommandIntent
_legacy_ot_types.append(
_LegacyTypeInfo(original_name="CommandIntent", current_type=CommandIntent)
)
from opentrons.protocol_engine.commands import CommandStatus
_legacy_ot_types.append(
_LegacyTypeInfo(original_name="CommandStatus", current_type=CommandStatus)
)
from opentrons.types import DeckSlotName
_legacy_ot_types.append(
_LegacyTypeInfo(original_name="DeckSlotName", current_type=DeckSlotName)
)
from opentrons_shared_data.labware.labware_definition import DisplayCategory
_legacy_ot_types.append(
_LegacyTypeInfo(original_name="DisplayCategory", current_type=DisplayCategory)
)
from opentrons.protocol_engine import DropTipWellOrigin
_legacy_ot_types.append(
_LegacyTypeInfo(original_name="DropTipWellOrigin", current_type=DropTipWellOrigin)
)
from opentrons.protocol_engine import EngineStatus
_legacy_ot_types.append(
_LegacyTypeInfo(original_name="EngineStatus", current_type=EngineStatus)
)
from opentrons.protocol_engine.types import LabwareMovementStrategy
_legacy_ot_types.append(
_LegacyTypeInfo(original_name="LabwareMovementStrategy", current_type=LabwareMovementStrategy)
)
from opentrons.protocol_engine import ModuleModel
_legacy_ot_types.append(
_LegacyTypeInfo(original_name="ModuleModel", current_type=ModuleModel)
)
from opentrons.hardware_control.modules.types import ModuleType
_legacy_ot_types.append(
_LegacyTypeInfo(original_name="ModuleType", current_type=ModuleType)
)
from opentrons.protocol_engine.types import MotorAxis
_legacy_ot_types.append(
_LegacyTypeInfo(original_name="MotorAxis", current_type=MotorAxis)
)
from opentrons.types import MountType
_legacy_ot_types.append(
_LegacyTypeInfo(original_name="MountType", current_type=MountType)
)
from opentrons.protocol_engine.types import MovementAxis
_legacy_ot_types.append(
_LegacyTypeInfo(original_name="MovementAxis", current_type=MovementAxis)
)
from opentrons_shared_data.pipette.dev_types import PipetteNameType
_legacy_ot_types.append(
_LegacyTypeInfo(original_name="PipetteName", current_type=PipetteNameType)
)
_legacy_ot_types.append(
_LegacyTypeInfo(original_name="PipetteNameType", current_type=PipetteNameType)
)
from opentrons.protocol_engine import WellOrigin
_legacy_ot_types.append(
_LegacyTypeInfo(original_name="WellOrigin", current_type=WellOrigin)
)
return _legacy_ot_types
# fmt: on
@lru_cache(maxsize=1)
def _get_types_by_original_name() -> Dict[str, type]:
types_by_original_name: Dict[str, type] = {}
for legacy_type in METHOD_NAME():
assert (
legacy_type.original_name not in types_by_original_name
), "LegacyUnpickler assumes the original names are unique."
types_by_original_name[legacy_type.original_name] = legacy_type.current_type
return types_by_original_name |
6,506 | etag | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetDatasetIamPolicyResult',
'AwaitableGetDatasetIamPolicyResult',
'get_dataset_iam_policy',
'get_dataset_iam_policy_output',
]
@pulumi.output_type
class GetDatasetIamPolicyResult:
"""
A collection of values returned by getDatasetIamPolicy.
"""
def __init__(__self__, dataset_id=None, METHOD_NAME=None, id=None, policy_data=None, project=None):
if dataset_id and not isinstance(dataset_id, str):
raise TypeError("Expected argument 'dataset_id' to be a str")
pulumi.set(__self__, "dataset_id", dataset_id)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", METHOD_NAME)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if policy_data and not isinstance(policy_data, str):
raise TypeError("Expected argument 'policy_data' to be a str")
pulumi.set(__self__, "policy_data", policy_data)
if project and not isinstance(project, str):
raise TypeError("Expected argument 'project' to be a str")
pulumi.set(__self__, "project", project)
@property
@pulumi.getter(name="datasetId")
def dataset_id(self) -> str:
return pulumi.get(self, "dataset_id")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
(Computed) The etag of the IAM policy.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="policyData")
def policy_data(self) -> str:
"""
(Computed) The policy data
"""
return pulumi.get(self, "policy_data")
@property
@pulumi.getter
def project(self) -> str:
return pulumi.get(self, "project")
class AwaitableGetDatasetIamPolicyResult(GetDatasetIamPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDatasetIamPolicyResult(
dataset_id=self.dataset_id,
METHOD_NAME=self.METHOD_NAME,
id=self.id,
policy_data=self.policy_data,
project=self.project)
def get_dataset_iam_policy(dataset_id: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDatasetIamPolicyResult:
"""
Retrieves the current IAM policy data for a BigQuery dataset.
## example
```python
import pulumi
import pulumi_gcp as gcp
policy = gcp.bigquery.get_dataset_iam_policy(dataset_id=google_bigquery_dataset["dataset"]["dataset_id"])
```
:param str dataset_id: The dataset ID.
:param str project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
__args__ = dict()
__args__['datasetId'] = dataset_id
__args__['project'] = project
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('gcp:bigquery/getDatasetIamPolicy:getDatasetIamPolicy', __args__, opts=opts, typ=GetDatasetIamPolicyResult).value
return AwaitableGetDatasetIamPolicyResult(
dataset_id=pulumi.get(__ret__, 'dataset_id'),
METHOD_NAME=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
policy_data=pulumi.get(__ret__, 'policy_data'),
project=pulumi.get(__ret__, 'project'))
@_utilities.lift_output_func(get_dataset_iam_policy)
def get_dataset_iam_policy_output(dataset_id: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDatasetIamPolicyResult]:
"""
Retrieves the current IAM policy data for a BigQuery dataset.
## example
```python
import pulumi
import pulumi_gcp as gcp
policy = gcp.bigquery.get_dataset_iam_policy(dataset_id=google_bigquery_dataset["dataset"]["dataset_id"])
```
:param str dataset_id: The dataset ID.
:param str project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
... |
6,507 | process | # This file is part of project Sverchok. It's copyrighted by the contributors
# recorded in the version control history of the file, available from
# its original location https://github.com/nortikin/sverchok/commit/master
#
# SPDX-License-Identifier: GPL3
# License-Filename: LICENSE
import bpy
from mathutils import Vector
from bpy.props import FloatProperty, EnumProperty, BoolProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode, zip_long_repeat, ensure_nesting_level, split_by_count, transpose_list
from sverchok.utils.curve import SvCurve
from sverchok.utils.curve.nurbs import SvNurbsCurve
from sverchok.utils.curve.nurbs_algorithms import intersect_nurbs_curves
from sverchok.utils.curve.freecad import curve_to_freecad
from sverchok.dependencies import FreeCAD, scipy
if FreeCAD is not None:
from FreeCAD import Base
class SvIntersectNurbsCurvesNode(SverchCustomTreeNode, bpy.types.Node):
"""
Triggers: Intersect Curves
Tooltip: Find intersection points of two NURBS curves
"""
bl_idname = 'SvIntersectNurbsCurvesNode'
bl_label = 'Intersect NURBS Curves'
bl_icon = 'OUTLINER_OB_EMPTY'
sv_icon = 'SV_INTERSECT_CURVES'
sv_dependencies = {'FreeCAD', 'scipy'}
implementations = []
if FreeCAD is not None:
implementations.append(('FREECAD', "FreeCAD", "Implementation from FreeCAD library", 0))
if scipy is not None:
implementations.append(('SCIPY', "SciPy", "Sverchok built-in implementation", 1))
implementation : EnumProperty(
name = "Implementation",
items=implementations,
update = updateNode)
match_methods = [
('LONG', "Longest", "", 0),
('CROSS', "Cross", "", 1)
]
matching : EnumProperty(
name = "Matching",
items = match_methods,
update = updateNode)
single : BoolProperty(
name = "Find single intersection",
default = True,
update = updateNode)
check_intersection : BoolProperty(
name = "Curves do intersect",
description = "If checked, the node will fail when curves do not intersect",
default = False,
update = updateNode)
precision : FloatProperty(
name = "Precision",
default = 0.001,
precision = 6,
min = 0,
update = updateNode)
methods = [
('Nelder-Mead', "Nelder-Mead", "", 0),
('L-BFGS-B', 'L-BFGS-B', "", 1),
('SLSQP', 'SLSQP', "", 2),
('Powell', 'Powell', "", 3),
('trust-constr', 'Trust-Constr', "", 4)
]
method : EnumProperty(
name = "Numeric method",
items = methods,
default = methods[0][0],
update = updateNode)
split : BoolProperty(
name = "Split by row",
default = True,
update = updateNode)
def draw_buttons(self, context, layout):
layout.prop(self, 'implementation', text='')
layout.prop(self, 'matching')
layout.prop(self, 'single')
layout.prop(self, 'check_intersection')
if self.matching == 'CROSS':
layout.prop(self, 'split')
def draw_buttons_ext(self, context, layout):
self.draw_buttons(context, layout)
if self.implementation == 'SCIPY':
layout.prop(self, 'precision')
layout.prop(self, 'method')
def sv_init(self, context):
self.inputs.new('SvCurveSocket', "Curve1")
self.inputs.new('SvCurveSocket', "Curve2")
self.outputs.new('SvVerticesSocket', "Intersections")
self.outputs.new('SvStringsSocket', "T1")
self.outputs.new('SvStringsSocket', "T2")
def _filter(self, points):
if not points:
return [], [], []
t1, t2, prev = points[0]
out_t1 = [t1]
out_t2 = [t2]
out_points = [prev]
for t1, t2, p in points[1:]:
r = (Vector(p) - Vector(prev)).length
if r > 1e-4:
out_t1.append(t1)
out_t2.append(t2)
out_points.append(p)
prev = p
return out_t1, out_t2, out_points
def process_native(self, curve1, curve2):
res = intersect_nurbs_curves(curve1, curve2,
method = self.method,
numeric_precision = self.precision,
logger = self.sv_logger)
points = [(r[0], r[1], r[2].tolist()) for r in res]
return self._filter(points)
def process_freecad(self, sv_curve1, sv_curve2):
fc_curve1 = curve_to_freecad(sv_curve1)[0]
fc_curve2 = curve_to_freecad(sv_curve2)[0]
points = fc_curve1.curve.intersectCC(fc_curve2.curve)
points = [(p.X, p.Y, p.Z) for p in points]
pts = []
for p in points:
t1 = fc_curve1.curve.parameter(Base.Vector(*p))
t2 = fc_curve2.curve.parameter(Base.Vector(*p))
pts.append((t1, t2, p))
return self._filter(pts)
def match(self, curves1, curves2):
if self.matching == 'LONG':
return zip_long_repeat(list(enumerate(curves1)), list(enumerate(curves2)))
else:
return [(c1, c2) for c2 in enumerate(curves2) for c1 in enumerate(curves1)]
def METHOD_NAME(self):
if not any(socket.is_linked for socket in self.outputs):
return
curve1_s = self.inputs['Curve1'].sv_get()
curve2_s = self.inputs['Curve2'].sv_get()
curve1_s = ensure_nesting_level(curve1_s, 2, data_types=(SvCurve,))
curve2_s = ensure_nesting_level(curve2_s, 2, data_types=(SvCurve,))
points_out = []
t1_out = []
t2_out = []
object_idx = 0
for curve1s, curve2s in zip_long_repeat(curve1_s, curve2_s):
new_points = []
new_t1 = []
new_t2 = []
for (i, curve1), (j, curve2) in self.match(curve1s, curve2s):
curve1 = SvNurbsCurve.to_nurbs(curve1)
if curve1 is None:
raise Exception("Curve1 is not a NURBS")
curve2 = SvNurbsCurve.to_nurbs(curve2)
if curve2 is None:
raise Exception("Curve2 is not a NURBS")
if self.implementation == 'SCIPY':
t1s, t2s, ps = self.process_native(curve1, curve2)
else:
t1s, t2s, ps = self.process_freecad(curve1, curve2)
if self.check_intersection:
if not ps:
raise Exception(f"Object #{object_idx}: Curve #{i} does not intersect with curve #{j}!")
if self.single:
if len(ps) >= 1:
ps = ps[0]
t1s = t1s[0]
t2s = t2s[0]
new_points.append(ps)
new_t1.append(t1s)
new_t2.append(t2s)
if self.split:
n = len(curve1s)
new_points = split_by_count(new_points, n)
new_t1 = split_by_count(new_t1, n)
new_t1 = transpose_list(new_t1)
new_t2 = split_by_count(new_t2, n)
points_out.append(new_points)
t1_out.append(new_t1)
t2_out.append(new_t2)
object_idx += 1
self.outputs['Intersections'].sv_set(points_out)
self.outputs['T1'].sv_set(t1_out)
self.outputs['T2'].sv_set(t2_out)
def register():
bpy.utils.register_class(SvIntersectNurbsCurvesNode)
def unregister():
bpy.utils.unregister_class(SvIntersectNurbsCurvesNode) |
6,508 | main | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Federated server launching script."""
import argparse
import logging
import os
import sys
import time
from nvflare.apis.fl_constant import JobConstants, SiteType, WorkspaceConstants
from nvflare.apis.workspace import Workspace
from nvflare.fuel.common.excepts import ConfigError
from nvflare.fuel.f3.mpm import MainProcessMonitor as mpm
from nvflare.fuel.utils.argument_utils import parse_vars
from nvflare.private.defs import AppFolderConstants
from nvflare.private.fed.app.fl_conf import FLServerStarterConfiger, create_privacy_manager
from nvflare.private.fed.app.utils import create_admin_server
from nvflare.private.fed.server.server_status import ServerStatus
from nvflare.private.fed.utils.fed_utils import add_logfile_handler, fobs_initialize, security_init
from nvflare.private.privacy_manager import PrivacyService
from nvflare.security.logging import secure_format_exception
def METHOD_NAME():
if sys.version_info >= (3, 11):
raise RuntimeError("Python versions 3.11 and above are not yet supported. Please use Python 3.8, 3.9 or 3.10.")
if sys.version_info < (3, 8):
raise RuntimeError("Python versions 3.7 and below are not supported. Please use Python 3.8, 3.9 or 3.10")
parser = argparse.ArgumentParser()
parser.add_argument("--workspace", "-m", type=str, help="WORKSPACE folder", required=True)
parser.add_argument(
"--fed_server", "-s", type=str, help="an aggregation server specification json file", required=True
)
parser.add_argument("--set", metavar="KEY=VALUE", nargs="*")
args = parser.parse_args()
kv_list = parse_vars(args.set)
config_folder = kv_list.get("config_folder", "")
if config_folder == "":
args.server_config = JobConstants.SERVER_JOB_CONFIG
else:
args.server_config = os.path.join(config_folder, JobConstants.SERVER_JOB_CONFIG)
# TODO:: remove env and train config since they are not core
args.env = os.path.join("config", AppFolderConstants.CONFIG_ENV)
args.train_config = os.path.join("config", AppFolderConstants.CONFIG_TRAIN)
args.config_folder = config_folder
logger = logging.getLogger()
args.log_config = None
args.job_id = None
workspace = Workspace(root_dir=args.workspace, site_name="server")
for name in [WorkspaceConstants.RESTART_FILE, WorkspaceConstants.SHUTDOWN_FILE]:
try:
f = workspace.get_file_path_in_root(name)
if os.path.exists(f):
os.remove(f)
except Exception:
print(f"Could not remove file '{name}'. Please check your system before starting FL.")
sys.exit(-1)
try:
os.chdir(args.workspace)
fobs_initialize()
conf = FLServerStarterConfiger(
workspace=workspace,
args=args,
kv_list=args.set,
)
log_level = os.environ.get("FL_LOG_LEVEL", "")
numeric_level = getattr(logging, log_level.upper(), None)
if isinstance(numeric_level, int):
logging.getLogger().setLevel(numeric_level)
logger.debug("loglevel debug enabled")
logger.info("loglevel info enabled")
logger.warning("loglevel warn enabled")
logger.error("loglevel error enabled")
logger.critical("loglevel critical enabled")
conf.configure()
log_file = workspace.get_log_file_path()
add_logfile_handler(log_file)
deployer = conf.deployer
secure_train = conf.cmd_vars.get("secure_train", False)
security_init(
secure_train=secure_train,
site_org=conf.site_org,
workspace=workspace,
app_validator=conf.app_validator,
site_type=SiteType.SERVER,
)
# initialize Privacy Service
privacy_manager = create_privacy_manager(workspace, names_only=True, is_server=True)
PrivacyService.initialize(privacy_manager)
admin_server = None
try:
# Deploy the FL server
services = deployer.deploy(args)
first_server = sorted(conf.config_data["servers"])[0]
# allow command to overwrite the admin_host
if conf.cmd_vars.get("host", None):
first_server["admin_host"] = conf.cmd_vars["host"]
admin_server = create_admin_server(
services,
server_conf=first_server,
args=args,
secure_train=secure_train,
)
admin_server.start()
services.set_admin_server(admin_server)
# mpm.add_cleanup_cb(admin_server.stop)
finally:
deployer.close()
logger.info("Server started")
# From Python 3.9 and above, the ThreadPoolExecutor does not allow submit() to create a new thread while the
# main thread has exited. Use the ServerStatus.SHUTDOWN to keep the main thread waiting for the gRPC
# server to be shutdown.
while services.status != ServerStatus.SHUTDOWN:
time.sleep(1.0)
if admin_server:
admin_server.stop()
services.engine.close()
except ConfigError as e:
logger.exception(f"ConfigError: {secure_format_exception(e)}")
raise e
if __name__ == "__main__":
"""
This is the main program when starting the NVIDIA FLARE server process.
"""
rc = mpm.run(main_func=METHOD_NAME)
sys.exit(rc) |
6,509 | do transformation | #
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Bias correction graph transform."""
from __future__ import absolute_import, division, print_function
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import dtypes, tensor_util
from .graph_transform_base import GraphTransformBase
class BiasCorrection(GraphTransformBase):
"""This class implements the bias correction graph transform.
Will correct the weight and scale for *Conv2D* op
weight_empirical:
our task is to correct int8 weight distribution close to fp32 weight
r*(W_int8 + u) -> W_fp32, r is variance ratio between fp32 and int8
u is the difference between fp32 and int8 channel wise, it's equal to minimize:
round(scale_c * (W_fp32 + shift))/scale - r*(round(scale * W_fp32) + scale*u)/scale
notice we can only change the first round: round(scale_c * (W_fp32 + shift))
an empirical solution is to make:
scale_c = r * scale and shift = u
with this we don't change the min/max value, and correct the weight
"""
def __init__(self, input_graph, fp32_graph, method="weight_empirical", new_api=False):
"""Initialization."""
# only support weight_empirical now
self.bias_correct_map = {"weight_empirical": self._weight_empirical}
assert method in self.bias_correct_map, "only support weight empirical correction method"
super(BiasCorrection, self).__init__(input_graph)
self.fp32_graph = fp32_graph
self.input_graph = input_graph
self.method = method
self.fp32_node_mapping = {}
self.parse_input_pb()
self.new_api = new_api
def _weight_empirical(self):
"""Weight empirical correction method."""
for node in self.fp32_graph.node:
if node.name not in self.fp32_node_mapping:
self.fp32_node_mapping[node.name] = node
for node_name in self.node_mapping:
node = self.node_mapping[node_name]
node_op = node.op
if self.new_api:
if "_FusedQuantizedConv2D" not in node_op:
continue
else:
if "QuantizedConv2D" not in node_op:
continue
int8_filter = self.node_mapping[self.get_node_name_from_input(node.input[1])]
int8_value = tensor_util.MakeNdarray(int8_filter.attr["value"].tensor)
tr_int8_value = int8_value.transpose([3, 0, 1, 2])
fp32_filter_name = self.get_node_name_from_input(node.input[1]).split("_qint8_const")[0]
fp32_filter = self.fp32_node_mapping[fp32_filter_name]
fp32_value = tensor_util.MakeNdarray(fp32_filter.attr["value"].tensor)
tr_fp32_value = fp32_value.transpose([3, 0, 1, 2])
# if bias fused, then offset to min/max filter should be 5
if self.new_api:
offset = 5
else:
offset = 5 if "Bias" in node_op else 4
min_filter_node = self.node_mapping[node.input[offset]]
max_filter_node = self.node_mapping[node.input[offset + 1]]
channel_size = (
1
if not min_filter_node.attr["value"].tensor.tensor_shape.dim
else min_filter_node.attr["value"].tensor.tensor_shape.dim[0].size
)
if channel_size == 1:
max_filter_tensor = []
min_filter_tensor = []
max_filter_tensor.append((max_filter_node.attr["value"].tensor.float_val)[0])
min_filter_tensor.append((min_filter_node.attr["value"].tensor.float_val)[0])
else:
max_filter_tensor = tensor_util.MakeNdarray(max_filter_node.attr["value"].tensor)
min_filter_tensor = tensor_util.MakeNdarray(min_filter_node.attr["value"].tensor)
tr_quantized_fp32_value = np.zeros_like(tr_fp32_value)
tr_corrected_int8_value = np.zeros_like(tr_int8_value)
for i in range(channel_size):
scale = max(abs(max_filter_tensor[i]), abs(min_filter_tensor[i])) / 127
tr_quantized_fp32_value[i] = tr_int8_value[i].astype(np.float64) * scale
delta_mean = np.mean((tr_fp32_value[i] - tr_quantized_fp32_value[i]).flatten())
var_ratio = (
np.std(tr_fp32_value[i].flatten()) / np.std(tr_quantized_fp32_value[i].flatten())
if np.std(tr_quantized_fp32_value[i].flatten()) != 0
else 1
)
tr_corrected_int8_value[i] = (var_ratio / scale) * (tr_fp32_value[i] + delta_mean)
correct_int8_value = tr_int8_value.transpose([1, 2, 3, 0])
assert (
int8_value.shape == correct_int8_value.shape
), "correct filter shape should equal with origin filter shape"
bias = int8_value.astype(np.float32) - correct_int8_value.astype(np.float32)
if np.sum(bias) != 0:
int8_filter.attr["value"].CopyFrom(
attr_value_pb2.AttrValue(
tensor=tensor_util.make_tensor_proto(correct_int8_value, dtypes.qint8, int8_value.shape)
)
)
return self.input_graph
def METHOD_NAME(self):
"""Apply bias correction graph transform."""
return self.bias_correct_map[self.method]() |
6,510 | test pid | #!/usr/bin/env pmpython
#
# Copyright (C) 2016 Sitaram Shelke.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
import mock
import unittest
from pcp_pidstat import ProcessCpuUsage
class TestProcessCpuUsage(unittest.TestCase):
def setUp(self):
self.__metric_repository = mock.Mock()
self.__metric_repository.current_value = mock.Mock(side_effect=self.metric_repo_current_value_side_effect)
self.__metric_repository.previous_value = mock.Mock(side_effect=self.metric_repo_previous_value_side_effect)
def metric_repo_current_value_side_effect(self, metric_name,instance):
if metric_name == 'proc.psinfo.utime' and instance == 1:
return 112233
if metric_name == 'proc.psinfo.guest_time' and instance == 1:
return 112213
if metric_name == 'proc.psinfo.stime' and instance == 1:
return 112243
if metric_name == 'proc.psinfo.pid' and instance == 1:
return 1
if metric_name == 'proc.psinfo.cmd' and instance == 1:
return "test"
if metric_name == 'proc.psinfo.processor' and instance == 1:
return 0
if metric_name == 'proc.id.uid' and instance == 1:
return 1
if metric_name == 'proc.id.uid_nm' and instance == 1:
return "pcp"
if metric_name == 'proc.psinfo.utime' and instance == 2:
return 112233
if metric_name == 'proc.psinfo.guest_time' and instance == 2:
return 112213
if metric_name == 'proc.psinfo.stime' and instance == 2:
return 112243
if metric_name == 'proc.psinfo.pid' and instance == 2:
return 1
if metric_name == 'proc.psinfo.cmd' and instance == 2:
return "test"
if metric_name == 'proc.psinfo.processor' and instance == 2:
return 0
if metric_name == 'proc.id.uid' and instance == 2:
return 1
if metric_name == 'proc.id.uid_nm' and instance == 2:
return "pcp"
return None
def metric_repo_previous_value_side_effect(self, metric_name,instance):
if metric_name == 'proc.psinfo.utime' and instance == 1:
return 112223
if metric_name == 'proc.psinfo.guest_time' and instance == 1:
return 112203
if metric_name == 'proc.psinfo.stime' and instance == 1:
return 112233
if metric_name == 'proc.psinfo.pid' and instance == 1:
return 1
if metric_name == 'proc.psinfo.cmd' and instance == 1:
return "test"
if metric_name == 'proc.psinfo.processor' and instance == 1:
return 0
if metric_name == 'proc.id.uid' and instance == 1:
return 1
if metric_name == 'proc.id.uid_nm' and instance == 1:
return "pcp"
if metric_name == 'proc.psinfo.utime' and instance == 3:
return 112223
if metric_name == 'proc.psinfo.guest_time' and instance == 3:
return 112203
if metric_name == 'proc.psinfo.stime' and instance == 3:
return 112233
if metric_name == 'proc.psinfo.pid' and instance == 3:
return 1
if metric_name == 'proc.psinfo.cmd' and instance == 3:
return "test"
if metric_name == 'proc.psinfo.processor' and instance == 3:
return 0
if metric_name == 'proc.id.uid' and instance == 3:
return 1
if metric_name == 'proc.id.uid_nm' and instance == 3:
return "pcp"
return None
def test_user_percent(self):
process_cpu_usage = ProcessCpuUsage(1,1.34,self.__metric_repository)
user_percent = process_cpu_usage.user_percent()
self.assertEquals(user_percent, 0.75)
def test_user_percent_if_current_value_is_None(self):
process_cpu_usage = ProcessCpuUsage(3,1.34,self.__metric_repository)
user_percent = process_cpu_usage.user_percent()
self.assertIsNone(user_percent)
def test_user_percent_if_previous_value_is_None(self):
process_cpu_usage = ProcessCpuUsage(2,1.34,self.__metric_repository)
user_percent = process_cpu_usage.user_percent()
self.assertIsNone(user_percent)
def test_guest_percent(self):
process_cpu_usage = ProcessCpuUsage(1,1.34,self.__metric_repository)
guest_percent = process_cpu_usage.guest_percent()
self.assertEquals(guest_percent, 0.75)
def test_guest_percent_if_current_value_is_None(self):
process_cpu_usage = ProcessCpuUsage(3,1.34,self.__metric_repository)
guest_percent = process_cpu_usage.guest_percent()
self.assertIsNone(guest_percent)
def test_guest_percent_if_previous_value_is_None(self):
process_cpu_usage = ProcessCpuUsage(2,1.34,self.__metric_repository)
guest_percent = process_cpu_usage.guest_percent()
self.assertIsNone(guest_percent)
def test_system_percent(self):
process_cpu_usage = ProcessCpuUsage(1,1.34,self.__metric_repository)
system_percent = process_cpu_usage.system_percent()
self.assertEquals(system_percent, 0.75)
def test_system_percent_if_current_value_is_None(self):
process_cpu_usage = ProcessCpuUsage(3,1.34,self.__metric_repository)
system_percent = process_cpu_usage.system_percent()
self.assertIsNone(system_percent, None)
def test_system_percent_if_previous_value_is_None(self):
process_cpu_usage = ProcessCpuUsage(2,1.34,self.__metric_repository)
system_percent = process_cpu_usage.system_percent()
self.assertIsNone(system_percent, None)
def test_total_percent(self):
process_cpu_usage = ProcessCpuUsage(1,1.34,self.__metric_repository)
total_percent = process_cpu_usage.total_percent()
self.assertEquals(total_percent, 2.25)
def test_total_percent_if_current_value_None(self):
process_cpu_usage = ProcessCpuUsage(3,1.34,self.__metric_repository)
total_percent = process_cpu_usage.total_percent()
self.assertIsNone(total_percent, None)
def test_total_percent_if_previous_value_None(self):
process_cpu_usage = ProcessCpuUsage(2,1.34,self.__metric_repository)
total_percent = process_cpu_usage.total_percent()
self.assertIsNone(total_percent, None)
def METHOD_NAME(self):
process_cpu_usage = ProcessCpuUsage(1,1.34,self.__metric_repository)
pid = process_cpu_usage.pid()
self.assertEqual(pid,1)
def test_process_name(self):
process_cpu_usage = ProcessCpuUsage(1,1.34,self.__metric_repository)
name = process_cpu_usage.process_name()
self.assertEqual(name,'test')
def test_cpu_number(self):
process_cpu_usage = ProcessCpuUsage(1,1.34,self.__metric_repository)
number = process_cpu_usage.cpu_number()
self.assertEqual(number,0)
def test_user_id(self):
process_cpu_usage = ProcessCpuUsage(1,1.34,self.__metric_repository)
user_id = process_cpu_usage.user_id()
self.assertEqual(user_id,1)
def test_user_name(self):
process_cpu_usage = ProcessCpuUsage(1,1.34,self.__metric_repository)
user_name = process_cpu_usage.user_name()
self.assertEqual(user_name,'pcp')
if __name__ == '__main__':
unittest.main() |
6,511 | list policy fragment references output | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'ListPolicyFragmentReferencesResult',
'AwaitableListPolicyFragmentReferencesResult',
'list_policy_fragment_references',
'list_policy_fragment_references_output',
]
@pulumi.output_type
class ListPolicyFragmentReferencesResult:
"""
A collection of resources.
"""
def __init__(__self__, count=None, next_link=None, value=None):
if count and not isinstance(count, float):
raise TypeError("Expected argument 'count' to be a float")
pulumi.set(__self__, "count", count)
if next_link and not isinstance(next_link, str):
raise TypeError("Expected argument 'next_link' to be a str")
pulumi.set(__self__, "next_link", next_link)
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def count(self) -> Optional[float]:
"""
Total record count number.
"""
return pulumi.get(self, "count")
@property
@pulumi.getter(name="nextLink")
def next_link(self) -> Optional[str]:
"""
Next page link if any.
"""
return pulumi.get(self, "next_link")
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.ResourceCollectionResponseValue']]:
"""
A collection of resources.
"""
return pulumi.get(self, "value")
class AwaitableListPolicyFragmentReferencesResult(ListPolicyFragmentReferencesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListPolicyFragmentReferencesResult(
count=self.count,
next_link=self.next_link,
value=self.value)
def list_policy_fragment_references(id: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
skip: Optional[int] = None,
top: Optional[int] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListPolicyFragmentReferencesResult:
"""
Lists policy resources that reference the policy fragment.
:param str id: A resource identifier.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str service_name: The name of the API Management service.
:param int skip: Number of records to skip.
:param int top: Number of records to return.
"""
__args__ = dict()
__args__['id'] = id
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
__args__['skip'] = skip
__args__['top'] = top
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:apimanagement/v20220801:listPolicyFragmentReferences', __args__, opts=opts, typ=ListPolicyFragmentReferencesResult).value
return AwaitableListPolicyFragmentReferencesResult(
count=pulumi.get(__ret__, 'count'),
next_link=pulumi.get(__ret__, 'next_link'),
value=pulumi.get(__ret__, 'value'))
@_utilities.lift_output_func(list_policy_fragment_references)
def METHOD_NAME(id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
skip: Optional[pulumi.Input[Optional[int]]] = None,
top: Optional[pulumi.Input[Optional[int]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListPolicyFragmentReferencesResult]:
"""
Lists policy resources that reference the policy fragment.
:param str id: A resource identifier.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str service_name: The name of the API Management service.
:param int skip: Number of records to skip.
:param int top: Number of records to return.
"""
... |
6,512 | auth | # Author: Google
# See the LICENSE file for legal information regarding use of this file.
# GCM derived from Go's implementation in crypto/cipher.
#
# https://golang.org/src/crypto/cipher/gcm.go
# GCM works over elements of the field GF(2^128), each of which is a 128-bit
# polynomial. Throughout this implementation, polynomials are represented as
# Python integers with the low-order terms at the most significant bits. So a
# 128-bit polynomial is an integer from 0 to 2^128-1 with the most significant
# bit representing the x^0 term and the least significant bit representing the
# x^127 term. This bit reversal also applies to polynomials used as indices in a
# look-up table.
from __future__ import division
from tlslite.utils import python_aes
from .constanttime import ct_compare_digest
from .cryptomath import bytesToNumber, numberToByteArray
class AESGCM(object):
"""
AES-GCM implementation. Note: this implementation does not attempt
to be side-channel resistant. It's also rather slow.
"""
def __init__(self, key, implementation, rawAesEncrypt):
self.isBlockCipher = False
self.isAEAD = True
self.nonceLength = 12
self.tagLength = 16
self.implementation = implementation
if len(key) == 16:
self.name = "aes128gcm"
elif len(key) == 32:
self.name = "aes256gcm"
else:
raise AssertionError()
self.key = key
self._rawAesEncrypt = rawAesEncrypt
self._ctr = python_aes.new(self.key, 6, bytearray(b'\x00' * 16))
# The GCM key is AES(0).
h = bytesToNumber(self._rawAesEncrypt(bytearray(16)))
# Pre-compute all 4-bit multiples of h. Note that bits are reversed
# because our polynomial representation places low-order terms at the
# most significant bit. Thus x^0 * h = h is at index 0b1000 = 8 and
# x^1 * h is at index 0b0100 = 4.
self._productTable = [0] * 16
self._productTable[self._reverseBits(1)] = h
for i in range(2, 16, 2):
self._productTable[self._reverseBits(i)] = \
self._gcmShift(self._productTable[self._reverseBits(i//2)])
self._productTable[self._reverseBits(i+1)] = \
self._gcmAdd(self._productTable[self._reverseBits(i)], h)
def METHOD_NAME(self, ciphertext, ad, tagMask):
y = 0
y = self._update(y, ad)
y = self._update(y, ciphertext)
y ^= (len(ad) << (3 + 64)) | (len(ciphertext) << 3)
y = self._mul(y)
y ^= bytesToNumber(tagMask)
return numberToByteArray(y, 16)
def _update(self, y, data):
for i in range(0, len(data) // 16):
y ^= bytesToNumber(data[16*i:16*i+16])
y = self._mul(y)
extra = len(data) % 16
if extra != 0:
block = bytearray(16)
block[:extra] = data[-extra:]
y ^= bytesToNumber(block)
y = self._mul(y)
return y
def _mul(self, y):
""" Returns y*H, where H is the GCM key. """
ret = 0
# Multiply H by y 4 bits at a time, starting with the highest power
# terms.
for i in range(0, 128, 4):
# Multiply by x^4. The reduction for the top four terms is
# precomputed.
retHigh = ret & 0xf
ret >>= 4
ret ^= (AESGCM._gcmReductionTable[retHigh] << (128-16))
# Add in y' * H where y' are the next four terms of y, shifted down
# to the x^0..x^4. This is one of the pre-computed multiples of
# H. The multiplication by x^4 shifts them back into place.
ret ^= self._productTable[y & 0xf]
y >>= 4
assert y == 0
return ret
def seal(self, nonce, plaintext, data):
"""
Encrypts and authenticates plaintext using nonce and data. Returns the
ciphertext, consisting of the encrypted plaintext and tag concatenated.
"""
if len(nonce) != 12:
raise ValueError("Bad nonce length")
# The initial counter value is the nonce, followed by a 32-bit counter
# that starts at 1. It's used to compute the tag mask.
counter = bytearray(16)
counter[:12] = nonce
counter[-1] = 1
tagMask = self._rawAesEncrypt(counter)
# The counter starts at 2 for the actual encryption.
counter[-1] = 2
self._ctr.counter = counter
ciphertext = self._ctr.encrypt(plaintext)
tag = self.METHOD_NAME(ciphertext, data, tagMask)
return ciphertext + tag
def open(self, nonce, ciphertext, data):
"""
Decrypts and authenticates ciphertext using nonce and data. If the
tag is valid, the plaintext is returned. If the tag is invalid,
returns None.
"""
if len(nonce) != 12:
raise ValueError("Bad nonce length")
if len(ciphertext) < 16:
return None
tag = ciphertext[-16:]
ciphertext = ciphertext[:-16]
# The initial counter value is the nonce, followed by a 32-bit counter
# that starts at 1. It's used to compute the tag mask.
counter = bytearray(16)
counter[:12] = nonce
counter[-1] = 1
tagMask = self._rawAesEncrypt(counter)
if not ct_compare_digest(tag, self.METHOD_NAME(ciphertext, data, tagMask)):
return None
# The counter starts at 2 for the actual decryption.
counter[-1] = 2
self._ctr.counter = counter
return self._ctr.decrypt(ciphertext)
@staticmethod
def _reverseBits(i):
assert i < 16
i = ((i << 2) & 0xc) | ((i >> 2) & 0x3)
i = ((i << 1) & 0xa) | ((i >> 1) & 0x5)
return i
@staticmethod
def _gcmAdd(x, y):
return x ^ y
@staticmethod
def _gcmShift(x):
# Multiplying by x is a right shift, due to bit order.
highTermSet = x & 1
x >>= 1
if highTermSet:
# The x^127 term was shifted up to x^128, so subtract a 1+x+x^2+x^7
# term. This is 0b11100001 or 0xe1 when represented as an 8-bit
# polynomial.
x ^= 0xe1 << (128-8)
return x
@staticmethod
def _inc32(counter):
for i in range(len(counter)-1, len(counter)-5, -1):
counter[i] = (counter[i] + 1) % 256
if counter[i] != 0:
break
return counter
# _gcmReductionTable[i] is i * (1+x+x^2+x^7) for all 4-bit polynomials i. The
# result is stored as a 16-bit polynomial. This is used in the reduction step to
# multiply elements of GF(2^128) by x^4.
_gcmReductionTable = [
0x0000, 0x1c20, 0x3840, 0x2460, 0x7080, 0x6ca0, 0x48c0, 0x54e0,
0xe100, 0xfd20, 0xd940, 0xc560, 0x9180, 0x8da0, 0xa9c0, 0xb5e0,
] |
6,513 | is valid test tmpdir | #!/usr/bin/env python3
# Copyright (c) 2017-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Combine logs from multiple bitcoin nodes as well as the test_framework log.
This streams the combined log output to stdout. Use combine_logs.py > outputfile
to write to an outputfile.
If no argument is provided, the most recent test directory will be used."""
import argparse
from collections import defaultdict, namedtuple
import heapq
import itertools
import os
import pathlib
import re
import sys
import tempfile
# N.B.: don't import any local modules here - this script must remain executable
# without the parent module installed.
# Should match same symbol in `test_framework.test_framework`.
TMPDIR_PREFIX = "bitcoin_func_test_"
# Matches on the date format at the start of the log event
TIMESTAMP_PATTERN = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{6})?Z")
LogEvent = namedtuple('LogEvent', ['timestamp', 'source', 'event'])
def main():
"""Main function. Parses args, reads the log files and renders them as text or html."""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'testdir', nargs='?', default='',
help=('temporary test directory to combine logs from. '
'Defaults to the most recent'))
parser.add_argument('-c', '--color', dest='color', action='store_true', help='outputs the combined log with events colored by source (requires posix terminal colors. Use less -r for viewing)')
parser.add_argument('--html', dest='html', action='store_true', help='outputs the combined log as html. Requires jinja2. pip install jinja2')
args = parser.parse_args()
if args.html and args.color:
print("Only one out of --color or --html should be specified")
sys.exit(1)
testdir = args.testdir or find_latest_test_dir()
if not testdir:
print("No test directories found")
sys.exit(1)
if not args.testdir:
print("Opening latest test directory: {}".format(testdir), file=sys.stderr)
colors = defaultdict(lambda: '')
if args.color:
colors["test"] = "\033[0;36m" # CYAN
colors["node0"] = "\033[0;34m" # BLUE
colors["node1"] = "\033[0;32m" # GREEN
colors["node2"] = "\033[0;31m" # RED
colors["node3"] = "\033[0;33m" # YELLOW
colors["reset"] = "\033[0m" # Reset font color
log_events = read_logs(testdir)
if args.html:
print_logs_html(log_events)
else:
print_logs_plain(log_events, colors)
print_node_warnings(testdir, colors)
def read_logs(tmp_dir):
"""Reads log files.
Delegates to generator function get_log_events() to provide individual log events
for each of the input log files."""
# Find out what the folder is called that holds the debug.log file
glob = pathlib.Path(tmp_dir).glob('node0/**/debug.log')
path = next(glob, None)
if path:
assert next(glob, None) is None # more than one debug.log, should never happen
chain = re.search(r'node0/(.+?)/debug\.log$', path.as_posix()).group(1) # extract the chain name
else:
chain = 'regtest' # fallback to regtest (should only happen when none exists)
files = [("test", "%s/test_framework.log" % tmp_dir)]
for i in itertools.count():
logfile = "{}/node{}/{}/debug.log".format(tmp_dir, i, chain)
if not os.path.isfile(logfile):
break
files.append(("node%d" % i, logfile))
return heapq.merge(*[get_log_events(source, f) for source, f in files])
def print_node_warnings(tmp_dir, colors):
"""Print nodes' errors and warnings"""
warnings = []
for stream in ['stdout', 'stderr']:
for i in itertools.count():
folder = "{}/node{}/{}".format(tmp_dir, i, stream)
if not os.path.isdir(folder):
break
for (_, _, fns) in os.walk(folder):
for fn in fns:
warning = pathlib.Path('{}/{}'.format(folder, fn)).read_text().strip()
if warning:
warnings.append(("node{} {}".format(i, stream), warning))
print()
for w in warnings:
print("{} {} {} {}".format(colors[w[0].split()[0]], w[0], w[1], colors["reset"]))
def find_latest_test_dir():
"""Returns the latest tmpfile test directory prefix."""
tmpdir = tempfile.gettempdir()
def join_tmp(basename):
return os.path.join(tmpdir, basename)
def METHOD_NAME(basename):
fullpath = join_tmp(basename)
return (
os.path.isdir(fullpath)
and basename.startswith(TMPDIR_PREFIX)
and os.access(fullpath, os.R_OK)
)
testdir_paths = [
join_tmp(name) for name in os.listdir(tmpdir) if METHOD_NAME(name)
]
return max(testdir_paths, key=os.path.getmtime) if testdir_paths else None
def get_log_events(source, logfile):
"""Generator function that returns individual log events.
Log events may be split over multiple lines. We use the timestamp
regex match as the marker for a new log event."""
try:
with open(logfile, 'r', encoding='utf-8') as infile:
event = ''
timestamp = ''
for line in infile:
# skip blank lines
if line == '\n':
continue
# if this line has a timestamp, it's the start of a new log event.
time_match = TIMESTAMP_PATTERN.match(line)
if time_match:
if event:
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
timestamp = time_match.group()
if time_match.group(1) is None:
# timestamp does not have microseconds. Add zeroes.
timestamp_micro = timestamp.replace("Z", ".000000Z")
line = line.replace(timestamp, timestamp_micro)
timestamp = timestamp_micro
event = line
# if it doesn't have a timestamp, it's a continuation line of the previous log.
else:
# Add the line. Prefix with space equivalent to the source + timestamp so log lines are aligned
event += " " + line
# Flush the final event
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
except FileNotFoundError:
print("File %s could not be opened. Continuing without it." % logfile, file=sys.stderr)
def print_logs_plain(log_events, colors):
"""Renders the iterator of log events into text."""
for event in log_events:
lines = event.event.splitlines()
print("{0} {1: <5} {2} {3}".format(colors[event.source.rstrip()], event.source, lines[0], colors["reset"]))
if len(lines) > 1:
for line in lines[1:]:
print("{0}{1}{2}".format(colors[event.source.rstrip()], line, colors["reset"]))
def print_logs_html(log_events):
"""Renders the iterator of log events into html."""
try:
import jinja2 #type:ignore
except ImportError:
print("jinja2 not found. Try `pip install jinja2`")
sys.exit(1)
print(jinja2.Environment(loader=jinja2.FileSystemLoader('./'))
.get_template('combined_log_template.html')
.render(title="Combined Logs from testcase", log_events=[event._asdict() for event in log_events]))
if __name__ == '__main__':
main() |
6,514 | get | # SPDX-License-Identifier: MIT
import os, tempfile, shutil, subprocess, re
from . import sysreg
__all__ = ["AsmException", "ARMAsm"]
uname = os.uname()
if uname.sysname == "Darwin":
DEFAULT_ARCH = "aarch64-linux-gnu-"
if uname.machine == "arm64":
TOOLCHAIN = "/opt/homebrew/opt/llvm/bin/"
else:
TOOLCHAIN = "/usr/local/opt/llvm/bin/"
USE_CLANG = "1"
else:
if uname.machine == "aarch64":
DEFAULT_ARCH = ""
else:
DEFAULT_ARCH = "aarch64-linux-gnu-"
USE_CLANG = "0"
TOOLCHAIN = ""
use_clang = os.environ.get("USE_CLANG", USE_CLANG).strip() == "1"
toolchain = os.environ.get("TOOLCHAIN", TOOLCHAIN)
if use_clang:
CC = toolchain + "clang --target=%ARCH"
LD = toolchain + "ld.lld"
OBJCOPY = toolchain + "llvm-objcopy"
OBJDUMP = toolchain + "llvm-objdump"
NM = toolchain + "llvm-nm"
else:
CC = toolchain + "%ARCHgcc"
LD = toolchain + "%ARCHld"
OBJCOPY = toolchain + "%ARCHobjcopy"
OBJDUMP = toolchain + "%ARCHobjdump"
NM = toolchain + "%ARCHnm"
class AsmException(Exception):
pass
class BaseAsm(object):
def __init__(self, source, addr = 0):
self.source = source
self._tmp = tempfile.mkdtemp() + os.sep
self.addr = addr
self.compile(source)
def _call(self, program, args):
subprocess.check_call(program.replace("%ARCH", self.ARCH) + " " + args, shell=True)
def METHOD_NAME(self, program, args):
return subprocess.check_output(program.replace("%ARCH", self.ARCH) + " " + args, shell=True).decode("ascii")
def compile(self, source):
for name, enc in sysreg.sysreg_fwd.items():
source = re.sub("\\b" + name + "\\b", f"s{enc[0]}_{enc[1]}_c{enc[2]}_c{enc[3]}_{enc[4]}", source)
self.sfile = self._tmp + "b.S"
with open(self.sfile, "w") as fd:
fd.write(self.HEADER + "\n")
fd.write(source + "\n")
fd.write(self.FOOTER + "\n")
self.ofile = self._tmp + "b.o"
self.elffile = self._tmp + "b.elf"
self.bfile = self._tmp + "b.b"
self.nfile = self._tmp + "b.n"
self._call(CC, f"{self.CFLAGS} -c -o {self.ofile} {self.sfile}")
self._call(LD, f"{self.LDFLAGS} --Ttext={self.addr:#x} -o {self.elffile} {self.ofile}")
self._call(OBJCOPY, f"-j.text -O binary {self.elffile} {self.bfile}")
self._call(NM, f"{self.elffile} > {self.nfile}")
with open(self.bfile, "rb") as fd:
self.data = fd.read()
with open(self.nfile) as fd:
for line in fd:
line = line.replace("\n", "")
addr, type, name = line.split()
addr = int(addr, 16)
setattr(self, name, addr)
self.start = self._start
self.len = len(self.data)
self.end = self.start + self.len
def objdump(self):
self._call(OBJDUMP, f"-rd {self.elffile}")
def disassemble(self):
output = self.METHOD_NAME(OBJDUMP, f"-zd {self.elffile}")
for line in output.split("\n"):
if not line or line.startswith("/"):
continue
sl = line.split()
if not sl or sl[0][-1] != ":":
continue
yield line
def __del__(self):
if self._tmp:
shutil.rmtree(self._tmp)
self._tmp = None
class ARMAsm(BaseAsm):
ARCH = os.path.join(os.environ.get("ARCH", DEFAULT_ARCH))
CFLAGS = "-pipe -Wall -march=armv8.4-a"
if use_clang:
LDFLAGS = "-maarch64elf"
else:
LDFLAGS = "-maarch64linux"
HEADER = """
.text
.globl _start
_start:
"""
FOOTER = """
.pool
"""
if __name__ == "__main__":
import sys
code = """
ldr x0, =0xDEADBEEF
b test
mrs x0, spsel
svc 1
%s
test:
b test
ret
""" % (" ".join(sys.argv[1:]))
c = ARMAsm(code, 0x1238)
c.objdump()
assert c.start == 0x1238
if not sys.argv[1:]:
assert c.test == 0x1248 |
6,515 | load backend | # Copyright (c) 2012-2016 Seafile Ltd.
import datetime
from importlib import import_module
from warnings import warn
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from seahub.auth.signals import user_logged_in
from constance import config
SESSION_KEY = '_auth_user_name'
BACKEND_SESSION_KEY = '_auth_user_backend_2'
REDIRECT_FIELD_NAME = 'next'
def METHOD_NAME(path):
i = path.rfind('.')
module, attr = path[:i], path[i+1:]
try:
mod = import_module(module)
except ImportError as e:
raise ImproperlyConfigured('Error importing authentication backend %s: "%s"' % (module, e))
except ValueError as e:
raise ImproperlyConfigured('Error importing authentication backends. Is AUTHENTICATION_BACKENDS a correctly defined list or tuple?')
try:
cls = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" authentication backend' % (module, attr))
try:
getattr(cls, 'supports_object_permissions')
except AttributeError:
warn("Authentication backends without a `supports_object_permissions` attribute are deprecated. Please define it in %s." % cls,
PendingDeprecationWarning)
cls.supports_object_permissions = False
try:
getattr(cls, 'supports_anonymous_user')
except AttributeError:
warn("Authentication backends without a `supports_anonymous_user` attribute are deprecated. Please define it in %s." % cls,
PendingDeprecationWarning)
cls.supports_anonymous_user = False
return cls()
def get_backends():
backends = []
for backend_path in settings.AUTHENTICATION_BACKENDS:
backends.append(METHOD_NAME(backend_path))
return backends
def authenticate(**credentials):
"""
If the given credentials are valid, return a User object.
"""
for backend in get_backends():
try:
user = backend.authenticate(**credentials)
except TypeError:
# This backend doesn't accept these credentials as arguments. Try the next one.
continue
if user is None:
continue
# Annotate the user object with the path of the backend.
user.backend = "%s.%s" % (backend.__module__, backend.__class__.__name__)
return user
def login(request, user):
"""
Persist a user id and a backend in the request. This way a user doesn't
have to reauthenticate on every request.
"""
if user is None:
user = request.user
# TODO: It would be nice to support different login methods, like signed cookies.
user.last_login = datetime.datetime.now()
# After each ADFS/SAML single sign-on is completed, `_saml2_subject_id` will be recorded in the session,
# so that to distinguish ADFS/SAML users and local users when logging out.
# Therefore, every time login, try to delete `_saml2_subject_id` from the session
# to ensure that `_saml2_subject_id` is brand new and will not interfere with other users' logout.
try:
del request.saml_session['_saml2_subject_id']
except:
pass
if SESSION_KEY in request.session:
if request.session[SESSION_KEY] != user.username:
# To avoid reusing another user's session, create a new, empty
# session if the existing session corresponds to a different
# authenticated user.
request.session.flush()
else:
request.session.cycle_key()
request.session[SESSION_KEY] = user.username
request.session[BACKEND_SESSION_KEY] = user.backend
if request.session.get('remember_me', False):
request.session.set_expiry(config.LOGIN_REMEMBER_DAYS * 24 * 60 * 60)
if hasattr(request, 'user'):
request.user = user
user_logged_in.send(sender=user.__class__, request=request, user=user)
def logout(request):
"""
Removes the authenticated user's ID from the request and flushes their
session data.
Also remove all passwords used to decrypt repos.
"""
request.session.flush()
if hasattr(request, 'user'):
from seahub.base.accounts import User
if isinstance(request.user, User):
# Do not directly/indirectly import models in package root level.
from seahub.utils import is_org_context
if is_org_context(request):
org_id = request.user.org.org_id
request.user.remove_org_repo_passwds(org_id)
else:
request.user.remove_repo_passwds()
from seahub.auth.models import AnonymousUser
request.user = AnonymousUser()
def get_user(request):
from seahub.auth.models import AnonymousUser
try:
username = request.session[SESSION_KEY]
backend_path = request.session[BACKEND_SESSION_KEY]
backend = METHOD_NAME(backend_path)
user = backend.get_user(username) or AnonymousUser()
except KeyError:
user = AnonymousUser()
return user |
6,516 | test flattener new | # Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sympy
import cirq
from cirq.study import flatten_expressions
# None of the following tests use expressions of the form
# <constant term> - <other term> because the string of expressions containing
# exactly two terms, one constant term and one non-constant term with a negative
# factor, is not consistent between sympy versions <1.4 and >=1.4.
def test_expr_map_names():
flattener = flatten_expressions._ParamFlattener({'collision': '<x + 2>'})
expressions = [sympy.Symbol('x') + i for i in range(3)]
syms = flattener.flatten(expressions)
assert syms == [sympy.Symbol(name) for name in ('x', '<x + 1>', '<x + 2>_1')]
def test_flattener_value_of():
flattener = flatten_expressions._ParamFlattener({'c': 5, 'x1': 'x1'})
assert flattener.value_of(9) == 9
assert flattener.value_of('c') == 5
assert flattener.value_of(sympy.Symbol('c')) == 5
# Twice
assert flattener.value_of(sympy.Symbol('c') / 2 + 1) == sympy.Symbol('<c/2 + 1>')
assert flattener.value_of(sympy.Symbol('c') / 2 + 1) == sympy.Symbol('<c/2 + 1>')
# Collisions between the string representation of different expressions
# This tests the unusual case where str(expr1) == str(expr2) doesn't imply
# expr1 == expr2. In this case it would be incorrect to flatten to the same
# symbol because the two expression will evaluate to different values.
# Also tests that '_#' is appended when avoiding collisions.
assert flattener.value_of(sympy.Symbol('c') / sympy.Symbol('2 + 1')) == sympy.Symbol(
'<c/2 + 1>_1'
)
assert flattener.value_of(sympy.Symbol('c/2') + 1) == sympy.Symbol('<c/2 + 1>_2')
assert cirq.flatten([sympy.Symbol('c') / 2 + 1, sympy.Symbol('c/2') + 1])[0] == [
sympy.Symbol('<c/2 + 1>'),
sympy.Symbol('<c/2 + 1>_1'),
]
def test_flattener_repr():
assert repr(flatten_expressions._ParamFlattener({'a': 1})) == ("_ParamFlattener({a: 1})")
assert repr(
flatten_expressions._ParamFlattener({'a': 1}, get_param_name=lambda expr: 'x')
).startswith("_ParamFlattener({a: 1}, get_param_name=<function ")
def test_expression_map_repr():
cirq.testing.assert_equivalent_repr(cirq.ExpressionMap({'a': 'b'}))
def test_flatten_circuit():
qubit = cirq.LineQubit(0)
a = sympy.Symbol('a')
circuit = cirq.Circuit(cirq.X(qubit) ** a, cirq.X(qubit) ** (1 + a / 2))
c_flat, expr_map = cirq.flatten(circuit)
c_expected = cirq.Circuit(cirq.X(qubit) ** a, cirq.X(qubit) ** sympy.Symbol('<a/2 + 1>'))
assert c_flat == c_expected
assert isinstance(expr_map, cirq.ExpressionMap)
assert expr_map == {a: a, 1 + a / 2: sympy.Symbol('<a/2 + 1>')}
def test_transform_params():
qubit = cirq.LineQubit(0)
a = sympy.Symbol('a')
circuit = cirq.Circuit(cirq.X(qubit) ** (a / 4), cirq.X(qubit) ** (1 + a / 2))
params = {'a': 3}
_, new_params = cirq.flatten_with_params(circuit, params)
expected_params = {sympy.Symbol('<a/4>'): 3 / 4, sympy.Symbol('<a/2 + 1>'): 1 + 3 / 2}
assert new_params == expected_params
def test_transform_sweep():
qubit = cirq.LineQubit(0)
a = sympy.Symbol('a')
circuit = cirq.Circuit(cirq.X(qubit) ** (a / 4), cirq.X(qubit) ** (1 + a / 2))
sweep = cirq.Linspace(a, start=0, stop=3, length=4)
_, new_sweep = cirq.flatten_with_sweep(circuit, sweep)
assert isinstance(new_sweep, cirq.Sweep)
resolvers = list(new_sweep)
expected_resolvers = [
cirq.ParamResolver({'<a/4>': 0.0, '<a/2 + 1>': 1.0}),
cirq.ParamResolver({'<a/4>': 0.25, '<a/2 + 1>': 1.5}),
cirq.ParamResolver({'<a/4>': 0.5, '<a/2 + 1>': 2}),
cirq.ParamResolver({'<a/4>': 0.75, '<a/2 + 1>': 2.5}),
]
assert resolvers == expected_resolvers
def METHOD_NAME():
flattener = flatten_expressions._ParamFlattener({'a': 'b'})
flattener2 = flatten_expressions._ParamFlattener(flattener)
assert isinstance(flattener2, flatten_expressions._ParamFlattener)
assert flattener2.param_dict == flattener.param_dict
def test_resolver_new():
flattener = flatten_expressions._ParamFlattener({'a': 'b'})
flattener2 = cirq.ParamResolver(flattener)
assert flattener2 is flattener
def test_transformed_sweep():
a = sympy.Symbol('a')
sweep = cirq.Linspace('a', start=0, stop=3, length=4)
expr_map = cirq.ExpressionMap({a / 4: 'x0', 1 - a / 2: 'x1'})
transformed = expr_map.transform_sweep(sweep)
assert len(transformed) == 4
assert transformed.keys == ['x0', 'x1']
params = list(transformed.param_tuples())
assert len(params) == 4
assert params[1] == (('x0', 1 / 4), ('x1', 1 - 1 / 2))
def test_transformed_sweep_equality():
a = sympy.Symbol('a')
sweep = cirq.Linspace('a', start=0, stop=3, length=4)
expr_map = cirq.ExpressionMap({a / 4: 'x0', 1 - a / 4: 'x1'})
sweep2 = cirq.Linspace(a, start=0, stop=3, length=4)
expr_map2 = cirq.ExpressionMap({a / 4: 'x0', 1 - a / 4: 'x1'})
sweep3 = cirq.Linspace(a, start=0, stop=3, length=20)
expr_map3 = cirq.ExpressionMap({a / 20: 'x0', 1 - a / 20: 'x1'})
et = cirq.testing.EqualsTester()
et.make_equality_group(
lambda: expr_map.transform_sweep(sweep),
lambda: expr_map.transform_sweep(sweep2),
lambda: expr_map2.transform_sweep(sweep2),
)
et.add_equality_group(expr_map.transform_sweep(sweep3))
et.add_equality_group(expr_map3.transform_sweep(sweep))
et.add_equality_group(expr_map3.transform_sweep(sweep3)) |
6,517 | test blame newest | # Copyright 2010-2023 The pygit2 contributors
#
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2,
# as published by the Free Software Foundation.
#
# In addition to the permissions in the GNU General Public License,
# the authors give you unlimited permission to link the compiled
# version of this file into combinations with other programs,
# and to distribute those combinations without any restriction
# coming from the use of this file. (The General Public License
# restrictions do apply in other respects; for example, they cover
# modification of the file, and distribution when not linked into
# a combined executable.)
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
"""Tests for Blame objects."""
import pytest
from pygit2 import Signature, Oid, GIT_BLAME_IGNORE_WHITESPACE
PATH = 'hello.txt'
HUNKS = [
(Oid(hex='acecd5ea2924a4b900e7e149496e1f4b57976e51'), 1,
Signature('J. David Ibañez', 'jdavid@itaapy.com',
1297179898, 60, encoding='utf-8'), True),
(Oid(hex='6aaa262e655dd54252e5813c8e5acd7780ed097d'), 2,
Signature('J. David Ibañez', 'jdavid@itaapy.com',
1297696877, 60, encoding='utf-8'), False),
(Oid(hex='4ec4389a8068641da2d6578db0419484972284c8'), 3,
Signature('J. David Ibañez', 'jdavid@itaapy.com',
1297696908, 60, encoding='utf-8'), False)
]
def test_blame_index(testrepo):
blame = testrepo.blame(PATH)
assert len(blame) == 3
for i, hunk in enumerate(blame):
assert hunk.lines_in_hunk == 1
assert HUNKS[i][0] == hunk.final_commit_id
assert HUNKS[i][1] == hunk.final_start_line_number
assert HUNKS[i][2] == hunk.final_committer
assert HUNKS[i][0] == hunk.orig_commit_id
assert hunk.orig_path == PATH
assert HUNKS[i][1] == hunk.orig_start_line_number
assert HUNKS[i][2] == hunk.orig_committer
assert HUNKS[i][3] == hunk.boundary
def test_blame_flags(blameflagsrepo):
blame = blameflagsrepo.blame(PATH, flags=GIT_BLAME_IGNORE_WHITESPACE)
assert len(blame) == 3
for i, hunk in enumerate(blame):
assert hunk.lines_in_hunk == 1
assert HUNKS[i][0] == hunk.final_commit_id
assert HUNKS[i][1] == hunk.final_start_line_number
assert HUNKS[i][2] == hunk.final_committer
assert HUNKS[i][0] == hunk.orig_commit_id
assert hunk.orig_path == PATH
assert HUNKS[i][1] == hunk.orig_start_line_number
assert HUNKS[i][2] == hunk.orig_committer
assert HUNKS[i][3] == hunk.boundary
def test_blame_with_invalid_index(testrepo):
blame = testrepo.blame(PATH)
def test():
blame[100000]
blame[-1]
with pytest.raises(IndexError): test()
def test_blame_for_line(testrepo):
blame = testrepo.blame(PATH)
for i, line in zip(range(0, 2), range(1, 3)):
hunk = blame.for_line(line)
assert hunk.lines_in_hunk == 1
assert HUNKS[i][0] == hunk.final_commit_id
assert HUNKS[i][1] == hunk.final_start_line_number
assert HUNKS[i][2] == hunk.final_committer
assert HUNKS[i][0] == hunk.orig_commit_id
assert hunk.orig_path == PATH
assert HUNKS[i][1] == hunk.orig_start_line_number
assert HUNKS[i][2] == hunk.orig_committer
assert HUNKS[i][3] == hunk.boundary
def test_blame_with_invalid_line(testrepo):
blame = testrepo.blame(PATH)
def test():
blame.for_line(0)
blame.for_line(100000)
blame.for_line(-1)
with pytest.raises(IndexError): test()
def METHOD_NAME(testrepo):
revs = [
( 'master^2', 3 ),
( 'master^2^', 2 ),
( 'master^2^^', 1 ),
]
for rev, num_commits in revs:
commit = testrepo.revparse_single(rev)
blame = testrepo.blame(PATH, newest_commit=commit.id)
assert len(blame) == num_commits
for i, hunk in enumerate(tuple(blame)[:num_commits]):
assert hunk.lines_in_hunk == 1
assert HUNKS[i][0] == hunk.final_commit_id
assert HUNKS[i][1] == hunk.final_start_line_number
assert HUNKS[i][2] == hunk.final_committer
assert HUNKS[i][0] == hunk.orig_commit_id
assert hunk.orig_path == PATH
assert HUNKS[i][1] == hunk.orig_start_line_number
assert HUNKS[i][2] == hunk.orig_committer
assert HUNKS[i][3] == hunk.boundary |
6,518 | is valid episode | from logging import debug, info, warning, error, exception
import re
from datetime import datetime, timedelta
from .. import AbstractServiceHandler
from data.models import Episode, UnprocessedStream
class ServiceHandler(AbstractServiceHandler):
_playlist_api_query = "https://www.googleapis.com/youtube/v3/playlistItems?part=contentDetails&maxResults=50&playlistId={id}&key={key}"
_videos_api_query = "https://youtube.googleapis.com/youtube/v3/videos?part=status&part=snippet&hl=en&id={id}&key={key}"
_channel_url = "https://www.youtube.com/playlist?list={id}"
_channel_re = re.compile("youtube.com/playlist\\?list=([\w-]+)", re.I)
def __init__(self):
super().__init__("youtube", "Youtube", False)
# Episode finding
def get_all_episodes(self, stream, **kwargs):
info(f"Getting live episodes for Youtube/{stream.show_key}")
episode_datas = self._get_feed_episodes(stream.show_key, **kwargs)
# Extract valid episodes from feed and digest
episodes = []
for episode_data in episode_datas:
if METHOD_NAME(episode_data, stream.show_key):
try:
episode = _digest_episode(episode_data)
if episode is not None:
episodes.append(episode)
except:
exception(f"Problem digesting episode for Youtube/{stream.show_key}")
if len(episode_datas) > 0:
debug(" {} episodes found, {} valid".format(len(episode_datas), len(episodes)))
else:
debug(" No episodes found")
return episodes
def _get_feed_episodes(self, show_key, **kwargs):
url = self._get_feed_url(show_key)
if url is None:
error(f"Cannot get feed url for {self.name}/{show_key}")
# Request channel information
response = self.request(url, json=True, **kwargs)
if response is None:
error(f"Cannot get episode feed for {self.name}/{show_key}")
return list()
# Extract videos ids and build new query for all videos
if not _verify_feed(response):
warning("Parsed feed could not be verified, may have unexpected results")
feed = response.get("items", list())
video_ids = [item["contentDetails"]["videoId"] for item in feed]
url = self._get_videos_url(video_ids)
# Request videos information
response = self.request(url, json=True, **kwargs)
if response is None:
error(f"Cannot get video information for {self.name}/{show_key}")
return list()
# Return feed
if not _verify_feed(response):
warning("Parsed feed could not be verified, may have unexpected results")
return response.get("items", list())
def _get_feed_url(self, show_key):
# Show key is the channel ID
if "api_key" not in self.config or not self.config["api_key"]:
error(" Missing API key for access to Youtube channel")
return None
api_key = self.config["api_key"]
if show_key is not None:
return self._playlist_api_query.format(id=show_key, key=api_key)
else:
return None
def _get_videos_url(self, video_ids):
# Videos ids is a list of all videos in feed
if "api_key" not in self.config or not self.config["api_key"]:
error(" Missing API key for access to Youtube channel")
return None
api_key = self.config["api_key"]
if video_ids:
return self._videos_api_query.format(id=','.join(video_ids), key=api_key)
else:
return None
def get_stream_info(self, stream, **kwargs):
# Can't trust consistent stream naming, ignored
return None
def get_seasonal_streams(self, **kwargs):
# What is this for again ?
return list()
def get_stream_link(self, stream):
return self._channel_url.format(id=stream.show_key)
def extract_show_key(self, url):
match = self._channel_re.search(url)
if match:
return match.group(1)
return None
# Episode feeds format
def _verify_feed(feed):
debug("Verifying feed")
if not (feed["kind"] == "youtube#playlistItemListResponse" or feed["kind"] == "youtube#videoListResponse"):
debug(" Feed does not match request")
return False
if feed["pageInfo"]["totalResults"] > feed["pageInfo"]["resultsPerPage"]:
debug(f" Too many results ({feed['pageInfo']['totalResults']}), will not get all episodes")
return False
debug(" Feed verified")
return True
_excludors = [re.compile(x, re.I) for x in [
"(?:[^a-zA-Z]|^)(?:PV|OP|ED)(?:[^a-zA-Z]|$)",
"blu.?ray",
"preview",
]]
_num_extractors = [re.compile(x, re.I) for x in [
r".*\D(\d{2,3})(?:\D|$)",
r".*episode (\d+)(?:\D|$)",
r".*S(?:\d+)E(\d+)(?:\D|$)",
]]
def METHOD_NAME(feed_episode, show_id):
if feed_episode["status"]["privacyStatus"] == "private":
info(" Video was excluded (is private)")
return False
if feed_episode["snippet"]["liveBroadcastContent"] == "upcoming":
info(" Video was excluded (not yet online)")
return False
title = feed_episode["snippet"]["localized"]["title"]
if len(title) == 0:
info(" Video was exluded (no title found)")
return False
if any(ex.search(title) is not None for ex in _excludors):
info(" Video was exluded (excludors)")
return False
if all(num.match(title) is None for num in _num_extractors):
info(" Video was excluded (no episode number found)")
return False
return True
def _digest_episode(feed_episode):
_video_url = "https://www.youtube.com/watch?v={video_id}"
snippet = feed_episode["snippet"]
title = snippet["localized"]["title"]
episode_num = _extract_episode_num(title)
if episode_num is None or not 0 < episode_num <720:
return None
date_string = snippet["publishedAt"].replace('Z', '')
#date_string = snippet["publishedAt"].replace('Z', '+00:00') # Use this for offset-aware dates
date = datetime.fromisoformat(date_string) or datetime.utcnow()
link = _video_url.format(video_id=feed_episode["id"])
return Episode(episode_num, None, link, date)
def _extract_episode_num(name):
debug(f"Extracting episode number from \"{name}\"")
if any(ex.search(name) is not None for ex in _excludors):
return None
for regex in _num_extractors:
match = regex.match(name)
if match is not None:
num = int(match.group(1))
debug(f" Match found, num={num}")
return num
debug(" No match found")
return none |
6,519 | test describe user unknown | import boto3
import pytest
from botocore.exceptions import ClientError
from moto import mock_mq
@mock_mq
def test_create_user():
client = boto3.client("mq", region_name="us-east-1")
broker_id = client.create_broker(
AutoMinorVersionUpgrade=False,
BrokerName="testbroker",
DeploymentMode="dm",
EngineType="ACTIVEMQ",
EngineVersion="version",
HostInstanceType="hit",
PubliclyAccessible=True,
Users=[],
)["BrokerId"]
client.create_user(BrokerId=broker_id, Username="admin", Password="adm1n")
resp = client.describe_broker(BrokerId=broker_id)
assert resp["Users"] == [{"Username": "admin"}]
@mock_mq
def test_describe_user():
client = boto3.client("mq", region_name="us-east-1")
broker_id = client.create_broker(
AutoMinorVersionUpgrade=False,
BrokerName="testbroker",
DeploymentMode="dm",
EngineType="ACTIVEMQ",
EngineVersion="version",
HostInstanceType="hit",
PubliclyAccessible=True,
Users=[],
)["BrokerId"]
client.create_user(
BrokerId=broker_id,
Username="admin",
Password="adm1n",
ConsoleAccess=True,
Groups=["group1", "group2"],
)
resp = client.describe_user(BrokerId=broker_id, Username="admin")
assert resp["BrokerId"] == broker_id
assert resp["ConsoleAccess"] is True
assert resp["Groups"] == ["group1", "group2"]
assert resp["Username"] == "admin"
@mock_mq
def METHOD_NAME():
client = boto3.client("mq", region_name="us-east-2")
broker_id = client.create_broker(
AutoMinorVersionUpgrade=False,
BrokerName="testbroker",
DeploymentMode="dm",
EngineType="ACTIVEMQ",
EngineVersion="version",
HostInstanceType="hit",
PubliclyAccessible=True,
Users=[],
)["BrokerId"]
with pytest.raises(ClientError) as exc:
client.describe_user(BrokerId=broker_id, Username="unknown")
err = exc.value.response["Error"]
assert err["Code"] == "NotFoundException"
assert (
err["Message"]
== "Can't find requested user [unknown]. Make sure your user exists."
)
@mock_mq
def test_list_users_empty():
client = boto3.client("mq", region_name="us-east-1")
broker_id = client.create_broker(
AutoMinorVersionUpgrade=False,
BrokerName="testbroker",
DeploymentMode="dm",
EngineType="ACTIVEMQ",
EngineVersion="version",
HostInstanceType="hit",
PubliclyAccessible=True,
Users=[],
)["BrokerId"]
resp = client.list_users(BrokerId=broker_id)
assert resp["BrokerId"] == broker_id
assert resp["Users"] == []
@mock_mq
def test_list_users():
client = boto3.client("mq", region_name="us-east-1")
broker_id = client.create_broker(
AutoMinorVersionUpgrade=False,
BrokerName="testbroker",
DeploymentMode="dm",
EngineType="ACTIVEMQ",
EngineVersion="version",
HostInstanceType="hit",
PubliclyAccessible=True,
Users=[{"Username": "admin", "Password": "adm1n"}],
)["BrokerId"]
client.create_user(BrokerId=broker_id, Username="user1", Password="us3r1")
resp = client.list_users(BrokerId=broker_id)
assert resp["BrokerId"] == broker_id
assert len(resp["Users"]) == 2
assert {"Username": "admin"} in resp["Users"]
assert {"Username": "user1"} in resp["Users"]
@mock_mq
def test_update_user():
client = boto3.client("mq", region_name="us-east-2")
broker_id = client.create_broker(
AutoMinorVersionUpgrade=False,
BrokerName="testbroker",
DeploymentMode="dm",
EngineType="ACTIVEMQ",
EngineVersion="version",
HostInstanceType="hit",
PubliclyAccessible=True,
Users=[{"Username": "admin", "Password": "adm1n"}],
)["BrokerId"]
client.update_user(BrokerId=broker_id, Username="admin", Groups=["administrators"])
resp = client.describe_user(BrokerId=broker_id, Username="admin")
assert resp["BrokerId"] == broker_id
assert resp["Groups"] == ["administrators"]
assert resp["Username"] == "admin"
@mock_mq
def test_delete_user():
client = boto3.client("mq", region_name="us-east-1")
broker_id = client.create_broker(
AutoMinorVersionUpgrade=False,
BrokerName="testbroker",
DeploymentMode="dm",
EngineType="ACTIVEMQ",
EngineVersion="version",
HostInstanceType="hit",
PubliclyAccessible=True,
Users=[{"Username": "admin", "Password": "adm1n"}],
)["BrokerId"]
client.create_user(BrokerId=broker_id, Username="user1", Password="us3r1")
client.delete_user(BrokerId=broker_id, Username="admin")
resp = client.list_users(BrokerId=broker_id)
assert resp["BrokerId"] == broker_id
assert len(resp["Users"]) == 1
assert {"Username": "user1"} in resp["Users"] |
6,520 | filter special tags | # IMPORTATION STANDARD
import logging
import re
# IMPORTATION THIRDPARTY
# IMPORTATION INTERNAL
from openbb_terminal.core.config.paths import HOME_DIRECTORY
from openbb_terminal.core.log.generation.settings import AppSettings
from openbb_terminal.core.log.generation.user_logger import get_user_uuid
class FormatterWithExceptions(logging.Formatter):
"""Logging Formatter that includes formatting of Exceptions"""
DATEFORMAT = "%Y-%m-%dT%H:%M:%S%z"
LOGFORMAT = "%(asctime)s|%(name)s|%(funcName)s|%(lineno)s|%(message)s"
LOGPREFIXFORMAT = (
"%(levelname)s|%(appName)s|%(commitHash)s|%(appId)s|%(sessionId)s|%(userId)s|"
)
@staticmethod
def calculate_level_name(record: logging.LogRecord) -> str:
if record.exc_text:
level_name = "X"
elif record.levelname:
level_name = record.levelname[0]
else:
level_name = "U"
return level_name
@staticmethod
def extract_log_extra(record: logging.LogRecord):
log_extra = dict()
if hasattr(record, "func_name_override"):
record.funcName = record.func_name_override # type: ignore
record.lineno = 0
if hasattr(record, "session_id"):
log_extra["sessionId"] = record.session_id # type: ignore
return log_extra
@staticmethod
def mock_ipv4(text: str) -> str:
pattern = r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}"
replacement = " FILTERED_IP "
text_mocked = re.sub(pattern, replacement, text)
return text_mocked
@staticmethod
def mock_email(text: str) -> str:
pattern = r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b"
replacement = " FILTERED_EMAIL "
text_mocked = re.sub(pattern, replacement, text)
return text_mocked
@staticmethod
def mock_password(text: str) -> str:
pattern = r'("password": ")[^"]+'
replacement = r"\1 FILTERED_PASSWORD "
text_mocked = re.sub(pattern, replacement, text)
return text_mocked
@staticmethod
def mock_flair(text: str) -> str:
pattern = r'("FLAIR": "\[)(.*?)\]'
replacement = r"\1 FILTERED_FLAIR ]"
text_mocked = re.sub(pattern, replacement, text)
return text_mocked
@staticmethod
def mock_home_directory(text: str) -> str:
user_home_directory = str(HOME_DIRECTORY.as_posix())
text_mocked = text.replace("\\\\", "/").replace(
user_home_directory, "MOCKING_USER_PATH"
)
return text_mocked
@classmethod
def METHOD_NAME(cls, text: str) -> str:
text_filtered = text.replace("\n", " MOCKING_BREAKLINE ")
text_filtered = text_filtered.replace("'Traceback", "Traceback")
return text_filtered
@classmethod
def filter_piis(cls, text: str) -> str:
text_filtered = cls.mock_ipv4(text=text)
text_filtered = cls.mock_email(text=text_filtered)
text_filtered = cls.mock_password(text=text_filtered)
text_filtered = cls.mock_home_directory(text=text_filtered)
text_filtered = cls.mock_flair(text=text_filtered)
return text_filtered
@classmethod
def filter_log_line(cls, text: str):
text_filtered = cls.METHOD_NAME(text=text)
text_filtered = cls.filter_piis(text=text_filtered)
return text_filtered
# OVERRIDE
def __init__(
self,
app_settings: AppSettings,
style="%",
validate=True,
) -> None:
super().__init__(
fmt=self.LOGFORMAT,
datefmt=self.DATEFORMAT,
style=style,
validate=validate,
)
self.__log_settings = app_settings
# OVERRIDE
def formatException(self, ei) -> str:
"""Exception formatting handler
Parameters
----------
ei : logging._SysExcInfoType
Exception to be logged
Returns
----------
str
Formatted exception
"""
result = super().formatException(ei)
return repr(result)
# OVERRIDE
def format(self, record: logging.LogRecord) -> str:
"""Log formatter
Parameters
----------
record : logging.LogRecord
Logging record
Returns
----------
str
Formatted_log message
"""
app_settings = self.__log_settings
level_name = self.calculate_level_name(record=record)
log_prefix_content = {
"appName": app_settings.name,
"levelname": level_name,
"appId": app_settings.identifier,
"sessionId": app_settings.session_id,
"commitHash": app_settings.commit_hash,
"userId": get_user_uuid(),
}
log_extra = self.extract_log_extra(record=record)
log_prefix_content = {**log_prefix_content, **log_extra}
log_prefix = self.LOGPREFIXFORMAT % log_prefix_content
record.msg = record.msg.replace("|", "-MOCK_PIPE-")
log_line = super().format(record)
log_line = self.filter_log_line(text=log_line)
log_line_full = log_prefix + log_line
return log_line_full |
6,521 | test valid daily scheduler time and day | import datetime
from unittest import mock
from testifycompat import assert_equal
from testifycompat import assert_raises
from testifycompat import run
from testifycompat import TestCase
from tron.config import config_utils
from tron.config import ConfigError
from tron.config import schedule_parse
class TestPadSequence(TestCase):
def test_pad_sequence_short(self):
expected = [0, 1, 2, 3, None, None]
assert_equal(schedule_parse.pad_sequence(range(4), 6), expected)
def test_pad_sequence_long(self):
expected = [0, 1, 2, 3]
assert_equal(schedule_parse.pad_sequence(range(6), 4), expected)
def test_pad_sequence_exact(self):
expected = [0, 1, 2, 3]
assert_equal(schedule_parse.pad_sequence(range(4), 4), expected)
def test_pad_sequence_empty(self):
expected = ["a", "a"]
assert_equal(schedule_parse.pad_sequence([], 2, "a"), expected)
def test_pad_negative_size(self):
assert_equal(schedule_parse.pad_sequence([], -2, "a"), [])
class TestScheduleConfigFromString(TestCase):
@mock.patch(
"tron.config.schedule_parse.parse_groc_expression", autospec=True,
)
def test_groc_config(self, mock_parse_groc):
schedule = "every Mon,Wed at 12:00"
context = config_utils.NullConfigContext
config = schedule_parse.schedule_config_from_string(schedule, context)
assert_equal(config, mock_parse_groc.return_value)
generic_config = schedule_parse.ConfigGenericSchedule("groc daily", schedule, None,)
mock_parse_groc.assert_called_with(generic_config, context)
class TestValidScheduler(TestCase):
@mock.patch("tron.config.schedule_parse.schedulers", autospec=True)
def assert_validation(self, schedule, expected, mock_schedulers):
context = config_utils.NullConfigContext
config = schedule_parse.valid_schedule(schedule, context)
mock_schedulers.__getitem__.assert_called_with("cron")
func = mock_schedulers.__getitem__.return_value
assert_equal(config, func.return_value)
func.assert_called_with(expected, context)
def test_cron_from_dict(self):
schedule = {"type": "cron", "value": "* * * * *"}
config = schedule_parse.ConfigGenericSchedule("cron", schedule["value"], datetime.timedelta(),)
self.assert_validation(schedule, config)
def test_cron_from_dict_with_jitter(self):
schedule = {"type": "cron", "value": "* * * * *", "jitter": "5 min"}
config = schedule_parse.ConfigGenericSchedule("cron", schedule["value"], datetime.timedelta(minutes=5),)
self.assert_validation(schedule, config)
class TestValidCronScheduler(TestCase):
_suites = ["integration"]
def validate(self, line):
config = schedule_parse.ConfigGenericSchedule("cron", line, None)
context = config_utils.NullConfigContext
return schedule_parse.valid_cron_scheduler(config, context)
def test_valid_config(self):
config = self.validate("5 0 L * *")
assert_equal(config.minutes, [5])
assert_equal(config.months, None)
assert_equal(config.monthdays, ["LAST"])
def test_invalid_config(self):
assert_raises(ConfigError, self.validate, "* * *")
class TestValidDailyScheduler(TestCase):
def validate(self, config):
context = config_utils.NullConfigContext
config = schedule_parse.ConfigGenericSchedule("daily", config, None)
return schedule_parse.valid_daily_scheduler(config, context)
def assert_parse(self, config, expected):
config = self.validate(config)
expected = schedule_parse.ConfigDailyScheduler(*expected, jitter=None)
assert_equal(config, expected)
def test_valid_daily_scheduler_start_time(self):
expected = ("14:32 ", 14, 32, 0, set())
self.assert_parse("14:32", expected)
def test_valid_daily_scheduler_just_days(self):
expected = ("00:00:00 MWS", 0, 0, 0, {1, 3, 6})
self.assert_parse("00:00:00 MWS", expected)
def METHOD_NAME(self):
expected = ("17:02:44 SU", 17, 2, 44, {0, 6})
self.assert_parse("17:02:44 SU", expected)
def test_valid_daily_scheduler_invalid_start_time(self):
assert_raises(ConfigError, self.validate, "5 MWF")
assert_raises(ConfigError, self.validate, "05:30:45:45 MWF")
assert_raises(ConfigError, self.validate, "25:30:45 MWF")
def test_valid_daily_scheduler_invalid_days(self):
assert_raises(ConfigError, self.validate, "SUG")
assert_raises(ConfigError, self.validate, "3")
if __name__ == "__main__":
run() |
6,522 | set values | """Context for datastore."""
# pylint: disable=missing-type-doc
from pymodbus.datastore.store import ModbusSequentialDataBlock
from pymodbus.exceptions import NoSuchSlaveException
from pymodbus.logging import Log
class ModbusBaseSlaveContext: # pylint: disable=too-few-public-methods
"""Interface for a modbus slave data context.
Derived classes must implemented the following methods:
reset(self)
validate(self, fx, address, count=1)
getValues(self, fx, address, count=1)
setValues(self, fx, address, values)
"""
_fx_mapper = {2: "d", 4: "i"}
_fx_mapper.update([(i, "h") for i in (3, 6, 16, 22, 23)])
_fx_mapper.update([(i, "c") for i in (1, 5, 15)])
def decode(self, fx): # pylint: disable=invalid-name
"""Convert the function code to the datastore to.
:param fx: The function we are working with
:returns: one of [d(iscretes),i(nputs),h(olding),c(oils)
"""
return self._fx_mapper[fx]
# ---------------------------------------------------------------------------#
# Slave Contexts
# ---------------------------------------------------------------------------#
class ModbusSlaveContext(ModbusBaseSlaveContext):
"""This creates a modbus data model with each data access stored in a block."""
def __init__(self, *_args, **kwargs):
"""Initialize the datastores.
:param kwargs: Each element is a ModbusDataBlock
"di" - Discrete Inputs initializer
"co" - Coils initializer
"hr" - Holding Register initializer
"ir" - Input Registers iniatializer
"""
self.store = {}
self.store["d"] = kwargs.get("di", ModbusSequentialDataBlock.create())
self.store["c"] = kwargs.get("co", ModbusSequentialDataBlock.create())
self.store["i"] = kwargs.get("ir", ModbusSequentialDataBlock.create())
self.store["h"] = kwargs.get("hr", ModbusSequentialDataBlock.create())
self.zero_mode = kwargs.get("zero_mode", False)
def __str__(self):
"""Return a string representation of the context.
:returns: A string representation of the context
"""
return "Modbus Slave Context"
def reset(self):
"""Reset all the datastores to their default values."""
for datastore in iter(self.store.values()):
datastore.reset()
def validate(self, fc_as_hex, address, count=1):
"""Validate the request to make sure it is in range.
:param fc_as_hex: The function we are working with
:param address: The starting address
:param count: The number of values to test
:returns: True if the request in within range, False otherwise
"""
if not self.zero_mode:
address += 1
Log.debug("validate: fc-[{}] address-{}: count-{}", fc_as_hex, address, count)
return self.store[self.decode(fc_as_hex)].validate(address, count)
def getValues(self, fc_as_hex, address, count=1):
"""Get `count` values from datastore.
:param fc_as_hex: The function we are working with
:param address: The starting address
:param count: The number of values to retrieve
:returns: The requested values from a:a+c
"""
if not self.zero_mode:
address += 1
Log.debug("getValues: fc-[{}] address-{}: count-{}", fc_as_hex, address, count)
return self.store[self.decode(fc_as_hex)].getValues(address, count)
def METHOD_NAME(self, fc_as_hex, address, values):
"""Set the datastore with the supplied values.
:param fc_as_hex: The function we are working with
:param address: The starting address
:param values: The new values to be set
"""
if not self.zero_mode:
address += 1
Log.debug("setValues[{}] address-{}: count-{}", fc_as_hex, address, len(values))
self.store[self.decode(fc_as_hex)].METHOD_NAME(address, values)
def register(self, function_code, fc_as_hex, datablock=None):
"""Register a datablock with the slave context.
:param function_code: function code (int)
:param fc_as_hex: string representation of function code (e.g "cf" )
:param datablock: datablock to associate with this function code
"""
self.store[fc_as_hex] = datablock or ModbusSequentialDataBlock.create()
self._fx_mapper[function_code] = fc_as_hex
class ModbusServerContext:
"""This represents a master collection of slave contexts.
If single is set to true, it will be treated as a single
context so every slave_id returns the same context. If single
is set to false, it will be interpreted as a collection of
slave contexts.
"""
def __init__(self, slaves=None, single=True):
"""Initialize a new instance of a modbus server context.
:param slaves: A dictionary of client contexts
:param single: Set to true to treat this as a single context
"""
self.single = single
self._slaves = slaves or {}
if self.single:
self._slaves = {0: self._slaves}
def __iter__(self):
"""Iterate over the current collection of slave contexts.
:returns: An iterator over the slave contexts
"""
return iter(self._slaves.items())
def __contains__(self, slave):
"""Check if the given slave is in this list.
:param slave: slave The slave to check for existence
:returns: True if the slave exists, False otherwise
"""
if self.single and self._slaves:
return True
return slave in self._slaves
def __setitem__(self, slave, context):
"""Use to set a new slave context.
:param slave: The slave context to set
:param context: The new context to set for this slave
:raises NoSuchSlaveException:
"""
if self.single:
slave = 0
if 0xF7 >= slave >= 0x00:
self._slaves[slave] = context
else:
raise NoSuchSlaveException(f"slave index :{slave} out of range")
def __delitem__(self, slave):
"""Use to access the slave context.
:param slave: The slave context to remove
:raises NoSuchSlaveException:
"""
if not self.single and (0xF7 >= slave >= 0x00):
del self._slaves[slave]
else:
raise NoSuchSlaveException(f"slave index: {slave} out of range")
def __getitem__(self, slave):
"""Use to get access to a slave context.
:param slave: The slave context to get
:returns: The requested slave context
:raises NoSuchSlaveException:
"""
if self.single:
slave = 0
if slave in self._slaves:
return self._slaves.get(slave)
raise NoSuchSlaveException(
f"slave - {slave} does not exist, or is out of range"
)
def slaves(self):
"""Define slaves."""
# Python3 now returns keys() as iterable
return list(self._slaves.keys()) |
6,523 | assert ispipe | import os
import stat
try:
from pathlib import Path
except ImportError:
try:
# Python 2 backport
from pathlib2 import Path
except ImportError:
class Path(object):
"""Dummy for isinstance checks"""
pass
__all__ = ['assert_path_exists', 'assert_not_path_exists',
'assert_isfile', 'assert_not_isfile',
'assert_isdir', 'assert_not_isdir',
'assert_islink', 'assert_not_islink',
'assert_ispipe', 'assert_not_ispipe',
'assert_issocket', 'assert_not_issocket',
]
if hasattr(os, 'fspath'):
_strpath = os.fspath
else:
def _strpath(p):
if hasattr(p, '__fspath__'):
return p.__fspath__()
elif isinstance(p, Path):
return str(p)
return p
def _stat_for_assert(path, follow_symlinks=True, msg=None):
stat = os.stat if follow_symlinks else os.lstat
try:
return stat(path)
except OSError:
if msg is None:
msg = "Path does not exist, or can't be stat-ed: %r" % path
raise AssertionError(msg)
def assert_path_exists(path, msg=None):
"""Assert that something exists at the given path.
"""
_stat_for_assert(_strpath(path), True, msg)
def assert_not_path_exists(path, msg=None):
"""Assert that nothing exists at the given path.
"""
path = _strpath(path)
if os.path.exists(path):
if msg is None:
msg = "Path exists: %r" % path
raise AssertionError(msg)
def assert_isfile(path, follow_symlinks=True, msg=None):
"""Assert that path exists and is a regular file.
With follow_symlinks=True, the default, this will pass if path is a symlink
to a regular file. With follow_symlinks=False, it will fail in that case.
"""
path = _strpath(path)
st = _stat_for_assert(path, follow_symlinks, msg)
if not stat.S_ISREG(st.st_mode):
if msg is None:
msg = "Path exists, but is not a regular file: %r" % path
raise AssertionError(msg)
def assert_not_isfile(path, follow_symlinks=True, msg=None):
"""Assert that path exists but is not a regular file.
With follow_symlinks=True, the default, this will fail if path is a symlink
to a regular file. With follow_symlinks=False, it will pass in that case.
"""
path = _strpath(path)
st = _stat_for_assert(path, follow_symlinks, msg)
if stat.S_ISREG(st.st_mode):
if msg is None:
msg = "Path is a regular file: %r" % path
raise AssertionError(msg)
def assert_isdir(path, follow_symlinks=True, msg=None):
"""Assert that path exists and is a directory.
With follow_symlinks=True, the default, this will pass if path is a symlink
to a directory. With follow_symlinks=False, it will fail in that case.
"""
path = _strpath(path)
st = _stat_for_assert(path, follow_symlinks, msg)
if not stat.S_ISDIR(st.st_mode):
if msg is None:
msg = "Path exists, but is not a directory: %r" % path
raise AssertionError(msg)
def assert_not_isdir(path, follow_symlinks=True, msg=None):
"""Assert that path exists but is not a directory.
With follow_symlinks=True, the default, this will fail if path is a symlink
to a directory. With follow_symlinks=False, it will pass in that case.
"""
path = _strpath(path)
st = _stat_for_assert(path, follow_symlinks, msg)
if stat.S_ISDIR(st.st_mode):
if msg is None:
msg = "Path is a directory: %r" % path
raise AssertionError(msg)
_link_target_msg = """Symlink target of:
{path}
Expected:
{expected}
Actual:
{actual}
"""
def assert_islink(path, to=None, msg=None):
"""Assert that path exists and is a symlink.
If to is specified, also check that it is the target of the symlink.
"""
path = _strpath(path)
st = _stat_for_assert(path, False, msg)
if not stat.S_ISLNK(st.st_mode):
if msg is None:
msg = "Path exists, but is not a symlink: %r" % path
raise AssertionError(msg)
if to is not None:
to = _strpath(to)
target = os.readlink(path)
# TODO: Normalise the target to an absolute path?
if target != to:
if msg is None:
msg = _link_target_msg.format(path=path, expected=to, actual=target)
raise AssertionError(msg)
def assert_not_islink(path, msg=None):
"""Assert that path exists but is not a symlink.
"""
path = _strpath(path)
st = _stat_for_assert(path, False, msg)
if stat.S_ISLNK(st.st_mode):
if msg is None:
msg = "Path is a symlink: %r" % path
raise AssertionError(msg)
def METHOD_NAME(path, follow_symlinks=True, msg=None):
"""Assert that path exists and is a named pipe (FIFO).
With follow_symlinks=True, the default, this will pass if path is a symlink
to a named pipe. With follow_symlinks=False, it will fail in that case.
"""
path = _strpath(path)
st = _stat_for_assert(path, follow_symlinks, msg)
if not stat.S_ISFIFO(st.st_mode):
if msg is None:
msg = "Path exists, but is not a named pipe: %r" % path
raise AssertionError(msg)
def assert_not_ispipe(path, follow_symlinks=True, msg=None):
"""Assert that path exists but is not a named pipe (FIFO).
With follow_symlinks=True, the default, this will fail if path is a symlink
to a named pipe. With follow_symlinks=False, it will pass in that case.
"""
path = _strpath(path)
st = _stat_for_assert(path, follow_symlinks, msg)
if stat.S_ISFIFO(st.st_mode):
if msg is None:
msg = "Path is a named pipe: %r" % path
raise AssertionError(msg)
def assert_issocket(path, follow_symlinks=True, msg=None):
"""Assert that path exists and is a Unix domain socket.
With follow_symlinks=True, the default, this will pass if path is a symlink
to a Unix domain socket. With follow_symlinks=False, it will fail in that case.
"""
path = _strpath(path)
st = _stat_for_assert(path, follow_symlinks, msg)
if not stat.S_ISSOCK(st.st_mode):
if msg is None:
msg = "Path exists, but is not a socket: %r" % path
raise AssertionError(msg)
def assert_not_issocket(path, follow_symlinks=True, msg=None):
"""Assert that path exists but is not a Unix domain socket.
With follow_symlinks=True, the default, this will fail if path is a symlink
to a Unix domain socket. With follow_symlinks=False, it will pass in that case.
"""
path = _strpath(path)
st = _stat_for_assert(path, follow_symlinks, msg)
if stat.S_ISSOCK(st.st_mode):
if msg is None:
msg = "Path is a socket: %r" % path
raise AssertionError(msg) |
6,524 | dnc app id | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetControllerDetailsResult',
'AwaitableGetControllerDetailsResult',
'get_controller_details',
'get_controller_details_output',
]
@pulumi.output_type
class GetControllerDetailsResult:
"""
Represents an instance of a DNC controller.
"""
def __init__(__self__, METHOD_NAME=None, dnc_endpoint=None, dnc_tenant_id=None, id=None, location=None, name=None, provisioning_state=None, resource_guid=None, tags=None, type=None):
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'dnc_app_id' to be a str")
pulumi.set(__self__, "dnc_app_id", METHOD_NAME)
if dnc_endpoint and not isinstance(dnc_endpoint, str):
raise TypeError("Expected argument 'dnc_endpoint' to be a str")
pulumi.set(__self__, "dnc_endpoint", dnc_endpoint)
if dnc_tenant_id and not isinstance(dnc_tenant_id, str):
raise TypeError("Expected argument 'dnc_tenant_id' to be a str")
pulumi.set(__self__, "dnc_tenant_id", dnc_tenant_id)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="dncAppId")
def METHOD_NAME(self) -> str:
"""
dnc application id should be used by customer to authenticate with dnc gateway.
"""
return pulumi.get(self, "dnc_app_id")
@property
@pulumi.getter(name="dncEndpoint")
def dnc_endpoint(self) -> str:
"""
dnc endpoint url that customers can use to connect to
"""
return pulumi.get(self, "dnc_endpoint")
@property
@pulumi.getter(name="dncTenantId")
def dnc_tenant_id(self) -> str:
"""
tenant id of dnc application id
"""
return pulumi.get(self, "dnc_tenant_id")
@property
@pulumi.getter
def id(self) -> str:
"""
An identifier that represents the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The current state of dnc controller resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
Resource guid.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of resource.
"""
return pulumi.get(self, "type")
class AwaitableGetControllerDetailsResult(GetControllerDetailsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetControllerDetailsResult(
METHOD_NAME=self.METHOD_NAME,
dnc_endpoint=self.dnc_endpoint,
dnc_tenant_id=self.dnc_tenant_id,
id=self.id,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
resource_guid=self.resource_guid,
tags=self.tags,
type=self.type)
def get_controller_details(resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetControllerDetailsResult:
"""
Gets details about the specified dnc controller.
Azure REST API version: 2021-03-15.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str resource_name: The name of the resource. It must be a minimum of 3 characters, and a maximum of 63.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:delegatednetwork:getControllerDetails', __args__, opts=opts, typ=GetControllerDetailsResult).value
return AwaitableGetControllerDetailsResult(
METHOD_NAME=pulumi.get(__ret__, 'dnc_app_id'),
dnc_endpoint=pulumi.get(__ret__, 'dnc_endpoint'),
dnc_tenant_id=pulumi.get(__ret__, 'dnc_tenant_id'),
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
resource_guid=pulumi.get(__ret__, 'resource_guid'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_controller_details)
def get_controller_details_output(resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetControllerDetailsResult]:
"""
Gets details about the specified dnc controller.
Azure REST API version: 2021-03-15.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str resource_name: The name of the resource. It must be a minimum of 3 characters, and a maximum of 63.
"""
... |
6,525 | get private key primitive | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions to access the information in this module.
"""
from typing import Any, Iterable, List
from tink.proto import tink_pb2
from tink_config import _key_types
_TYPE_URL_PREFIX = 'type.googleapis.com/google.crypto.tink.'
def all_key_types() -> List[str]:
"""Returns all key types which Tink currently knows in short format.
The related TypeUrl equals the short format returned here, but prefixed with
type.googleapis.com/google.crypto.tink.
"""
result = []
for key_types_for_single_primitive in _key_types.KEY_TYPES.values():
result += key_types_for_single_primitive
return result
def key_types_for_primitive(p: Any) -> List[str]:
"""Returns all key types for the given primitive which Tink currently has.
The related TypeUrl equals the short format returned here, but prefixed with
type.googleapis.com/google.crypto.tink.
Args:
p: The class of the primitive (e.g. tink.Aead)
Returns:
The list of key types (e.g. ['AesGcmKey', 'AesEaxKey'])
"""
return list(_key_types.KEY_TYPES[p])
def key_type_from_type_url(type_url: str) -> str:
"""Returns the key type from a given TypeUrl.
If the TypeUrl is invalid throws an exception.
Args:
type_url: For example 'type.googleapis.com/google.crypto.tink.AesGcmKey'
Returns:
The stripped version (e.g. AesGcmKey)
Raises:
ValueError if the type url is unknown or in a bad format.
"""
if not type_url.startswith(_TYPE_URL_PREFIX):
raise ValueError('Invalid type_url: ' + type_url)
# removeprefix does not yet exist in all our supported python versions.
key_type = type_url[len(_TYPE_URL_PREFIX):]
if key_type not in all_key_types():
raise ValueError('key type unknown: ' + key_type)
return key_type
def supported_languages_for_key_type(key_type: str) -> List[str]:
"""Returns the list of supported languages for a given KeyType.
Throws an except if the key type is unkonwn.
Args:
key_type: The shortened type URL (e.g. 'AesGcmKey')
Returns:
The list of languages which this key type supportes.
Raises:
ValueError if the key type is unknown.
"""
if key_type not in all_key_types():
raise ValueError('key_type unknown: ' + key_type)
return _key_types.SUPPORTED_LANGUAGES[key_type]
def supported_languages_for_primitive(p: Any) -> List[str]:
"""Returns the list of languages which support a primitive.
Throws an except if the key type is unkonwn.
Args:
p: The Primitive
Returns:
The list of languages which this primitive supportes.
Raises:
ValueError if the key type is unknown.
"""
result = set()
for key_type in key_types_for_primitive(p):
result.update(set(supported_languages_for_key_type(key_type)))
return list(result)
def all_primitives() -> Iterable[Any]:
"""Returns all the primitive types (such as tink.aead.Aead)."""
return [p for p, _ in _key_types.KEY_TYPES.items()]
def primitive_for_keytype(key_type: str) -> Any:
"""Returns the primitive for the given key type."""
for p, key_types in _key_types.KEY_TYPES.items():
if key_type in key_types:
return p
raise ValueError('Unknown key type: ' + key_type)
def is_asymmetric_public_key_primitive(p: Any) -> bool:
"""Returns true iff this p is the public part of an asymmetric scheme."""
return p in _key_types.PRIVATE_TO_PUBLIC_PRIMITIVE.values()
def METHOD_NAME(p: Any) -> Any:
"""Returns the private primitive corresponding to this public part."""
inverted = {v: k for (k, v) in _key_types.PRIVATE_TO_PUBLIC_PRIMITIVE.items()}
return inverted[p]
def _key_types_in_keyset(keyset: bytes) -> List[str]:
parsed_keyset = tink_pb2.Keyset.FromString(keyset)
type_urls = [k.key_data.type_url for k in parsed_keyset.key]
return [key_type_from_type_url(t) for t in type_urls]
def keyset_supported(keyset: bytes, p: Any, lang: str) -> bool:
"""Checks if the given keyset can be instantiated as 'p' in the 'lang'.
Returns true if it is expected that the keyset can be instantiated in language
'lang', according to the current configuration stored in tink_config. This
only looks at the key types in the keyset, and does not check if the keys
themselves are valid. It also does not check that the keyset is valid.
Args:
keyset: The serialized keyset
p: The primitive class, e.g. aead.Aead
lang: The language, e.g. 'python' or 'java'.
Returns:
True iff all key types are for this primitive and supported in the given
language.
"""
key_types = _key_types_in_keyset(keyset)
for key_type in key_types:
if primitive_for_keytype(key_type) != p:
return False
if lang not in supported_languages_for_key_type(key_type):
return False
return True |
6,526 | ok | """About Dialog for IDLE
"""
import os
from sys import version
from Tkinter import *
from idlelib import textView
class AboutDialog(Toplevel):
"""Modal about dialog for idle
"""
def __init__(self, parent, title, _htest=False):
"""
_htest - bool, change box location when running htest
"""
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
# place dialog below parent if running htest
self.geometry("+%d+%d" % (
parent.winfo_rootx()+30,
parent.winfo_rooty()+(30 if not _htest else 100)))
self.bg = "#707070"
self.fg = "#ffffff"
self.CreateWidgets()
self.resizable(height=FALSE, width=FALSE)
self.title(title)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.METHOD_NAME)
self.parent = parent
self.buttonOk.focus_set()
self.bind('<Return>',self.METHOD_NAME) #dismiss dialog
self.bind('<Escape>',self.METHOD_NAME) #dismiss dialog
self.wait_window()
def CreateWidgets(self):
release = version[:version.index(' ')]
frameMain = Frame(self, borderwidth=2, relief=SUNKEN)
frameButtons = Frame(self)
frameButtons.pack(side=BOTTOM, fill=X)
frameMain.pack(side=TOP, expand=TRUE, fill=BOTH)
self.buttonOk = Button(frameButtons, text='Close',
command=self.METHOD_NAME)
self.buttonOk.pack(padx=5, pady=5)
#self.picture = Image('photo', data=self.pictureData)
frameBg = Frame(frameMain, bg=self.bg)
frameBg.pack(expand=TRUE, fill=BOTH)
labelTitle = Label(frameBg, text='IDLE', fg=self.fg, bg=self.bg,
font=('courier', 24, 'bold'))
labelTitle.grid(row=0, column=0, sticky=W, padx=10, pady=10)
#labelPicture = Label(frameBg, text='[picture]')
#image=self.picture, bg=self.bg)
#labelPicture.grid(row=1, column=1, sticky=W, rowspan=2,
# padx=0, pady=3)
byline = "Python's Integrated DeveLopment Environment" + 5*'\n'
labelDesc = Label(frameBg, text=byline, justify=LEFT,
fg=self.fg, bg=self.bg)
labelDesc.grid(row=2, column=0, sticky=W, columnspan=3, padx=10, pady=5)
labelEmail = Label(frameBg, text='email: idle-dev@python.org',
justify=LEFT, fg=self.fg, bg=self.bg)
labelEmail.grid(row=6, column=0, columnspan=2,
sticky=W, padx=10, pady=0)
labelWWW = Label(frameBg, text='https://docs.python.org/' +
version[:3] + '/library/idle.html',
justify=LEFT, fg=self.fg, bg=self.bg)
labelWWW.grid(row=7, column=0, columnspan=2, sticky=W, padx=10, pady=0)
Frame(frameBg, borderwidth=1, relief=SUNKEN,
height=2, bg=self.bg).grid(row=8, column=0, sticky=EW,
columnspan=3, padx=5, pady=5)
labelPythonVer = Label(frameBg, text='Python version: ' +
release, fg=self.fg, bg=self.bg)
labelPythonVer.grid(row=9, column=0, sticky=W, padx=10, pady=0)
tkVer = self.tk.call('info', 'patchlevel')
labelTkVer = Label(frameBg, text='Tk version: '+
tkVer, fg=self.fg, bg=self.bg)
labelTkVer.grid(row=9, column=1, sticky=W, padx=2, pady=0)
py_button_f = Frame(frameBg, bg=self.bg)
py_button_f.grid(row=10, column=0, columnspan=2, sticky=NSEW)
buttonLicense = Button(py_button_f, text='License', width=8,
highlightbackground=self.bg,
command=self.ShowLicense)
buttonLicense.pack(side=LEFT, padx=10, pady=10)
buttonCopyright = Button(py_button_f, text='Copyright', width=8,
highlightbackground=self.bg,
command=self.ShowCopyright)
buttonCopyright.pack(side=LEFT, padx=10, pady=10)
buttonCredits = Button(py_button_f, text='Credits', width=8,
highlightbackground=self.bg,
command=self.ShowPythonCredits)
buttonCredits.pack(side=LEFT, padx=10, pady=10)
Frame(frameBg, borderwidth=1, relief=SUNKEN,
height=2, bg=self.bg).grid(row=11, column=0, sticky=EW,
columnspan=3, padx=5, pady=5)
idle_v = Label(frameBg, text='IDLE version: ' + release,
fg=self.fg, bg=self.bg)
idle_v.grid(row=12, column=0, sticky=W, padx=10, pady=0)
idle_button_f = Frame(frameBg, bg=self.bg)
idle_button_f.grid(row=13, column=0, columnspan=3, sticky=NSEW)
idle_about_b = Button(idle_button_f, text='README', width=8,
highlightbackground=self.bg,
command=self.ShowIDLEAbout)
idle_about_b.pack(side=LEFT, padx=10, pady=10)
idle_news_b = Button(idle_button_f, text='NEWS', width=8,
highlightbackground=self.bg,
command=self.ShowIDLENEWS)
idle_news_b.pack(side=LEFT, padx=10, pady=10)
idle_credits_b = Button(idle_button_f, text='Credits', width=8,
highlightbackground=self.bg,
command=self.ShowIDLECredits)
idle_credits_b.pack(side=LEFT, padx=10, pady=10)
# License, et all, are of type _sitebuiltins._Printer
def ShowLicense(self):
self.display_printer_text('About - License', license)
def ShowCopyright(self):
self.display_printer_text('About - Copyright', copyright)
def ShowPythonCredits(self):
self.display_printer_text('About - Python Credits', credits)
# Encode CREDITS.txt to utf-8 for proper version of Loewis.
# Specify others as ascii until need utf-8, so catch errors.
def ShowIDLECredits(self):
self.display_file_text('About - Credits', 'CREDITS.txt', 'utf-8')
def ShowIDLEAbout(self):
self.display_file_text('About - Readme', 'README.txt', 'ascii')
def ShowIDLENEWS(self):
self.display_file_text('About - NEWS', 'NEWS.txt', 'utf-8')
def display_printer_text(self, title, printer):
printer._Printer__setup()
text = '\n'.join(printer._Printer__lines)
textView.view_text(self, title, text)
def display_file_text(self, title, filename, encoding=None):
fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), filename)
textView.view_file(self, title, fn, encoding)
def METHOD_NAME(self, event=None):
self.grab_release()
self.destroy()
if __name__ == '__main__':
import unittest
unittest.main('idlelib.idle_test.test_helpabout', verbosity=2, exit=False)
from idlelib.idle_test.htest import run
run(AboutDialog) |
6,527 | test event file read no pi | import numpy as np
import os
import matplotlib.pyplot as plt
from astropy.tests.helper import pytest
from astropy.utils.exceptions import AstropyUserWarning
from ..io import split_numbers
from ..io import ref_mjd
from ..io import high_precision_keyword_read
from ..io import load_events_and_gtis, read_mission_info
from ..io import read_header_key
import warnings
curdir = os.path.abspath(os.path.dirname(__file__))
datadir = os.path.join(curdir, "data")
_H5PY_INSTALLED = True
try:
import h5py
except ImportError:
_H5PY_INSTALLED = False
skip_condition = pytest.mark.skipif(not _H5PY_INSTALLED, reason="H5PY not installed.")
class TestIO(object):
def test_common_name(self):
"""Test the common_name function."""
from ..io import common_name
a = "A_3-50_A"
b = "B_3-50_B"
assert common_name(a, b) == "3-50"
def test_high_precision_keyword(self):
"""Test high precision FITS keyword read."""
hdr = {"MJDTESTI": 100, "MJDTESTF": np.longdouble(0.5), "CIAO": np.longdouble(0.0)}
assert high_precision_keyword_read(hdr, "MJDTEST") == np.longdouble(
100.5
), "Keyword MJDTEST read incorrectly"
assert high_precision_keyword_read(hdr, "MJDTESTA") == np.longdouble(
100.5
), "Keyword MJDTESTA read incorrectly"
assert high_precision_keyword_read(hdr, "CIAO") == np.longdouble(
0.0
), "Keyword CIAO read incorrectly"
assert high_precision_keyword_read(hdr, "BU") is None, "Inexistent key read incorrectly"
def test_xselect_mdb_is_found_headas(self, monkeypatch, tmp_path):
"""Test event file reading."""
path = tmp_path / "bin"
path.mkdir()
f = path / "xselect.mdb"
f.write_text("MAXI:submkey NONE\nMAXI:instkey INSTRUME")
monkeypatch.setenv("HEADAS", str(tmp_path))
info = read_mission_info()
assert "NUSTAR" not in info
def test_read_whole_mission_info(self):
"""Test event file reading."""
info = read_mission_info()
assert "NUSTAR" in info
assert "XMM" in info
assert "NICER" in info
def test_event_file_read_and_automatic_sort(self):
"""Test event file reading."""
fname = os.path.join(datadir, "monol_testA_calib.evt")
with pytest.warns(AstropyUserWarning, match="No extensions found with a"):
evdata = load_events_and_gtis(fname)
fname_unsrt = os.path.join(datadir, "monol_testA_calib_unsrt.evt")
with pytest.warns(UserWarning, match="not sorted. Sorting them for you"):
evdata_unsrt = load_events_and_gtis(fname_unsrt)
for attr in "ev_list", "energy_list", "pi_list":
assert np.allclose(getattr(evdata, attr), getattr(evdata_unsrt, attr))
def test_event_file_read_additional_warns_uncal(self):
"""Test event file reading."""
fname = os.path.join(datadir, "monol_testA.evt")
with pytest.warns(UserWarning) as record:
vals = load_events_and_gtis(fname, additional_columns=["energy"])
assert np.any(["Column energy not found" in r.message.args[0] for r in record])
# This is the default calibration for nustar data, as returned
# from rough_calibration
assert np.allclose(vals.energy_list, vals.pi_list * 0.04 + 1.6)
def test_event_file_read_additional_energy_cal(self):
"""Test event file reading."""
fname = os.path.join(datadir, "monol_testA_calib.evt")
with pytest.warns(AstropyUserWarning, match="No extensions found with a"):
vals = load_events_and_gtis(fname, additional_columns=["energy"])
# These energies were calibrated with a different calibration than
# returned from rough_calibration, on purpose! (notice the +1.)
assert np.allclose(vals.energy_list, vals.pi_list * 0.04 + 1.6 + 1.0)
def test_event_file_read_xmm(self):
"""Test event file reading."""
fname = os.path.join(datadir, "xmm_test.fits")
with pytest.warns(UserWarning) as record:
load_events_and_gtis(fname, additional_columns=["PRIOR"])
assert np.any(["Trying first extension" in r.message.args[0] for r in record])
def test_event_file_read_no_mission(self):
"""Test event file reading."""
fname = os.path.join(datadir, "nomission.evt")
with pytest.warns(UserWarning, match="Sorting them"):
load_events_and_gtis(fname)
def test_event_file_read_no_additional(self):
"""Test event file reading."""
fname = os.path.join(datadir, "monol_testA.evt")
load_events_and_gtis(fname)
def METHOD_NAME(self):
"""Test event file reading."""
fname = os.path.join(datadir, "monol_testA.evt")
load_events_and_gtis(fname)
def test_read_header_key(self):
"""Test event file reading."""
fname = os.path.join(datadir, "monol_testA.evt")
assert read_header_key(fname, "INSTRUME") == "FPMA"
assert read_header_key(fname, "BU") == ""
def test_read_mjdref(self):
"""Test event file reading."""
fname = os.path.join(datadir, "monol_testA.evt")
assert ref_mjd(fname) is not None
def test_split_number(self):
"""Test split with high precision numbers."""
numbers = np.array(
[57401.0000003423423400453453, 0.00000574010000003426646], dtype=np.longdouble
)
number_I, number_F = split_numbers(numbers)
r_numbers = np.longdouble(number_I) + np.longdouble(number_F)
assert (numbers == r_numbers).all()
n = [1234.567, 12.345]
shift = -2
n_i, n_f = split_numbers(n, shift)
assert np.allclose(n_i, [1200, 0])
r_n = n_i + n_f
assert (n == r_n).all()
class TmpIOReadWrite(object):
"""A temporary helper class to test all the read and write functions."""
def __init__(self):
self.number = 10
self.str = "Test"
self.list = [1, 2, 3]
self.array = np.array([1, 2, 3])
self.long_number = np.longdouble(1.25)
self.long_array = np.longdouble([1, 2, 3])
def test_operation(self):
return self.number * 10
class TestFileFormats(object):
def test_savefig_without_plot(self):
from ..io import savefig
plt.close("all")
with pytest.warns(UserWarning, match="plot the image first"):
savefig("test.png")
os.unlink("test.png")
def test_savefig(self):
from ..io import savefig
plt.plot([1, 2, 3])
savefig("test.png")
os.unlink("test.png") |
6,528 | scrolled list | from tkinter import *
from tkinter.ttk import Frame, Scrollbar
from idlelib import macosx
class ScrolledList:
default = "(None)"
def __init__(self, master, **options):
# Create top frame, with scrollbar and listbox
self.master = master
self.frame = frame = Frame(master)
self.frame.pack(fill="both", expand=1)
self.vbar = vbar = Scrollbar(frame, name="vbar")
self.vbar.pack(side="right", fill="y")
self.listbox = listbox = Listbox(frame, exportselection=0,
background="white")
if options:
listbox.configure(options)
listbox.pack(expand=1, fill="both")
# Tie listbox and scrollbar together
vbar["command"] = listbox.yview
listbox["yscrollcommand"] = vbar.set
# Bind events to the list box
listbox.bind("<ButtonRelease-1>", self.click_event)
listbox.bind("<Double-ButtonRelease-1>", self.double_click_event)
if macosx.isAquaTk():
listbox.bind("<ButtonPress-2>", self.popup_event)
listbox.bind("<Control-Button-1>", self.popup_event)
else:
listbox.bind("<ButtonPress-3>", self.popup_event)
listbox.bind("<Key-Up>", self.up_event)
listbox.bind("<Key-Down>", self.down_event)
# Mark as empty
self.clear()
def close(self):
self.frame.destroy()
def clear(self):
self.listbox.delete(0, "end")
self.empty = 1
self.listbox.insert("end", self.default)
def append(self, item):
if self.empty:
self.listbox.delete(0, "end")
self.empty = 0
self.listbox.insert("end", str(item))
def get(self, index):
return self.listbox.get(index)
def click_event(self, event):
self.listbox.activate("@%d,%d" % (event.x, event.y))
index = self.listbox.index("active")
self.select(index)
self.on_select(index)
return "break"
def double_click_event(self, event):
index = self.listbox.index("active")
self.select(index)
self.on_double(index)
return "break"
menu = None
def popup_event(self, event):
if not self.menu:
self.make_menu()
menu = self.menu
self.listbox.activate("@%d,%d" % (event.x, event.y))
index = self.listbox.index("active")
self.select(index)
menu.tk_popup(event.x_root, event.y_root)
return "break"
def make_menu(self):
menu = Menu(self.listbox, tearoff=0)
self.menu = menu
self.fill_menu()
def up_event(self, event):
index = self.listbox.index("active")
if self.listbox.selection_includes(index):
index = index - 1
else:
index = self.listbox.size() - 1
if index < 0:
self.listbox.bell()
else:
self.select(index)
self.on_select(index)
return "break"
def down_event(self, event):
index = self.listbox.index("active")
if self.listbox.selection_includes(index):
index = index + 1
else:
index = 0
if index >= self.listbox.size():
self.listbox.bell()
else:
self.select(index)
self.on_select(index)
return "break"
def select(self, index):
self.listbox.focus_set()
self.listbox.activate(index)
self.listbox.selection_clear(0, "end")
self.listbox.selection_set(index)
self.listbox.see(index)
# Methods to override for specific actions
def fill_menu(self):
pass
def on_select(self, index):
pass
def on_double(self, index):
pass
def METHOD_NAME(parent): # htest #
top = Toplevel(parent)
x, y = map(int, parent.geometry().split('+')[1:])
top.geometry("+%d+%d" % (x+200, y + 175))
class MyScrolledList(ScrolledList):
def fill_menu(self): self.menu.add_command(label="right click")
def on_select(self, index): print("select", self.get(index))
def on_double(self, index): print("double", self.get(index))
scrolled_list = MyScrolledList(top)
for i in range(30):
scrolled_list.append("Item %02d" % i)
if __name__ == '__main__':
from unittest import main
main('idlelib.idle_test.test_scrolledlist', verbosity=2,)
from idlelib.idle_test.htest import run
run(METHOD_NAME) |
6,529 | test broken artifact | #!/usr/bin/env python
"""Tests for grr.server.grr_response_server.flows.general.collectors.
These tests cover the interaction of artifacts. They test that collection of
good artifacts can still succeed if some bad artifacts are defined, and the
various ways of loading artifacts.
"""
import os
from unittest import mock
from absl import app
from grr_response_core import config
from grr_response_core.lib.rdfvalues import artifacts as rdf_artifacts
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_server import artifact_registry
from grr_response_server import data_store
from grr_response_server.flows.general import collectors
from grr_response_server.flows.general import transfer
from grr.test_lib import action_mocks
from grr.test_lib import artifact_test_lib
from grr.test_lib import flow_test_lib
from grr.test_lib import parser_test_lib
from grr.test_lib import test_lib
from grr.test_lib import vfs_test_lib
class TestArtifactCollectorsInteractions(flow_test_lib.FlowTestsBaseclass):
"""Test the collection of artifacts.
This class loads both real and test artifacts to test the interaction of badly
defined artifacts with real artifacts.
"""
def setUp(self):
super().setUp()
patcher = artifact_test_lib.PatchDefaultArtifactRegistry()
patcher.start()
self.addCleanup(patcher.stop)
test_artifacts_file = os.path.join(config.CONFIG["Test.data_dir"],
"artifacts", "test_artifacts.json")
artifact_registry.REGISTRY.AddFileSource(test_artifacts_file)
def _GetKB(self):
return rdf_client.KnowledgeBase(
environ_systemroot="C:\\Windows",
os="Windows",
environ_temp="C:\\Windows\\TEMP",
users=[
rdf_client.User(
homedir="C:\\Users\\jim",
sid="S-1-5-21-702227068-2140022151-3110739409-1000",
username="jim",
userprofile="C:\\Users\\jim"),
rdf_client.User(
homedir="C:\\Users\\kovacs",
sid="S-1-5-21-702227000-2140022111-3110739999-1990",
username="kovacs",
userprofile="C:\\Users\\kovacs")
])
def testNewArtifactLoaded(self):
"""Simulate a new artifact being loaded into the store via the UI."""
cmd_artifact = """name: "TestCmdArtifact"
doc: "Test command artifact for dpkg."
sources:
- type: "COMMAND"
attributes:
cmd: "/usr/bin/dpkg"
args: ["--list"]
labels: [ "Software" ]
supported_os: [ "Linux" ]
"""
no_datastore_artifact = """name: "NotInDatastore"
doc: "Test command artifact for dpkg."
sources:
- type: "COMMAND"
attributes:
cmd: "/usr/bin/dpkg"
args: ["--list"]
labels: [ "Software" ]
supported_os: [ "Linux" ]
"""
test_registry = artifact_registry.ArtifactRegistry()
test_registry.ClearRegistry()
test_registry._dirty = False
with mock.patch.object(artifact_registry, "REGISTRY", test_registry):
with self.assertRaises(rdf_artifacts.ArtifactNotRegisteredError):
artifact_registry.REGISTRY.GetArtifact("TestCmdArtifact")
with self.assertRaises(rdf_artifacts.ArtifactNotRegisteredError):
artifact_registry.REGISTRY.GetArtifact("NotInDatastore")
# Add artifact to datastore but not registry
for artifact_val in artifact_registry.REGISTRY.ArtifactsFromYaml(
cmd_artifact):
data_store.REL_DB.WriteArtifact(artifact_val)
# Add artifact to registry but not datastore
for artifact_val in artifact_registry.REGISTRY.ArtifactsFromYaml(
no_datastore_artifact):
artifact_registry.REGISTRY.RegisterArtifact(
artifact_val, source="datastore", overwrite_if_exists=False)
# We need to reload all artifacts from the data store before trying to get
# the artifact.
artifact_registry.REGISTRY.ReloadDatastoreArtifacts()
self.assertTrue(artifact_registry.REGISTRY.GetArtifact("TestCmdArtifact"))
# We registered this artifact with datastore source but didn't
# write it into aff4. This simulates an artifact that was
# uploaded in the UI then later deleted. We expect it to get
# cleared when the artifacts are reloaded from the datastore.
with self.assertRaises(rdf_artifacts.ArtifactNotRegisteredError):
artifact_registry.REGISTRY.GetArtifact("NotInDatastore")
@parser_test_lib.WithAllParsers
def testProcessCollectedArtifacts(self):
"""Tests downloading files from artifacts."""
self.client_id = self.SetupClient(0, system="Windows", os_version="6.2")
client_mock = action_mocks.FileFinderClientMock()
artifact_list = ["WindowsPersistenceMechanismFiles"]
with vfs_test_lib.VFSOverrider(rdf_paths.PathSpec.PathType.REGISTRY,
vfs_test_lib.FakeRegistryVFSHandler):
with test_lib.Instrument(transfer.MultiGetFile,
"Start") as getfile_instrument:
flow_test_lib.TestFlowHelper(
collectors.ArtifactCollectorFlow.__name__,
client_mock,
artifact_list=artifact_list,
knowledge_base=self._GetKB(),
creator=self.test_username,
client_id=self.client_id,
split_output_by_artifact=True)
# Check MultiGetFile got called for our runkey files
# TODO(user): RunKeys for S-1-5-20 are not found because users.sid
# only expands to users with profiles.
pathspecs = getfile_instrument.args[0][0].args.pathspecs
self.assertCountEqual([x.path for x in pathspecs],
[u"C:\\Windows\\TEMP\\A.exe"])
def METHOD_NAME(self):
"""Tests a broken artifact."""
self.client_id = self.SetupClient(0, system="Windows", os_version="6.2")
client_mock = action_mocks.FileFinderClientMock()
artifact_list = ["BadPathspecArtifact"]
with vfs_test_lib.VFSOverrider(
rdf_paths.PathSpec.PathType.OS, vfs_test_lib.ClientVFSHandlerFixture
):
with test_lib.Instrument(
transfer.MultiGetFile, "Start"
) as getfile_instrument:
flow_test_lib.TestFlowHelper(
collectors.ArtifactCollectorFlow.__name__,
client_mock,
artifact_list=artifact_list,
knowledge_base=self._GetKB(),
creator=self.test_username,
client_id=self.client_id,
split_output_by_artifact=True,
)
self.assertFalse(getfile_instrument.args)
def main(argv):
# Run the full test suite
test_lib.main(argv)
if __name__ == "__main__":
app.run(main) |
6,530 | test construct | """
ckwg +31
Copyright 2020 by Kitware, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither name of Kitware, Inc. nor the names of any contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==============================================================================
Tests for CameraPerspectiveMap interface
"""
import unittest
import nose.tools as nt
import numpy as np
from kwiver.vital.types import CameraMap
from kwiver.vital.types import CameraPerspectiveMap as cam
from kwiver.vital.types import SimpleCameraPerspective as scap
from kwiver.vital.tests.cpp_helpers import camera_perspective_map_helpers as cpmh
class CameraPerspectiveMapTest(unittest.TestCase):
def setUp(self):
self.a = scap()
self.a2 = scap()
self.a3 = scap()
self.b = {1:self.a,2:self.a2,3:self.a3}
self.ca = cam(self.b)
def test_constructors(self):
cam()
a = scap()
b = {1:a}
cam(b)
def test_size(self):
# size()
self.assertEqual(self.ca.size(),3)
def test_cameras(self):
# cameras()
ret_dict = self.ca.cameras()
self.assertIsInstance(ret_dict, dict)
self.assertEqual(len(ret_dict),3)
nt.assert_equal(ret_dict[1],self.a)
def test_frame_ids(self):
# get_frame_ids()
ret_set = self.ca.get_frame_ids()
self.assertIsInstance(ret_set, set)
self.assertEqual(len(ret_set), 3)
self.assertSetEqual(ret_set,{1,2,3})
def test_find(self):
# find
ret_persp = self.ca.find(2)
self.assertIsInstance(ret_persp, scap)
nt.assert_equal(ret_persp, self.a2)
def test_erase(self):
# erase
self.ca.erase(1)
self.assertEqual(self.ca.size(), 2)
self.assertEqual(len(self.ca.cameras()), 2)
self.assertDictEqual(self.ca.cameras(), {2:self.a2, 3:self.a3})
def test_insert(self):
# insert
self.ca.insert(1, self.a)
self.assertDictEqual(self.b, self.ca.cameras())
self.assertEqual(self.ca.size(), 3)
def test_clone(self):
# clone
new_ca = self.ca.clone()
self.assertIsInstance(new_ca, cam)
self.assertEqual(new_ca.size(), 3)
nt.assert_equal(new_ca.cameras().keys(), self.ca.cameras().keys())
def test_clear(self):
# clear
self.ca.clear()
self.assertEqual(self.ca.size(), 0)
self.assertEqual(len(self.ca.cameras()), 0)
def test_set_from_base_camera_map(self):
# set_from_base_camera_map
self.ca.set_from_base_camera_map(self.b)
self.assertEqual(self.ca.size(), 3)
self.assertDictEqual(self.ca.cameras(), self.b)
class CameraPerspectiveInheritance(cam):
def __init__(self, cam_dict_):
cam.__init__(self)
self.cam_dict = cam_dict_
def size(self):
return len(self.cam_dict)
def cameras(self):
return self.cam_dict
def get_frame_ids(self):
return set(self.cam_dict.keys())
class TestCamPerspectiveInheritance(unittest.TestCase):
def METHOD_NAME(self):
a1 = scap()
a2 = scap()
cam_dict = {1:a1, 2:a2}
CameraPerspectiveInheritance(cam_dict)
def test_inheritance(self):
a1 = scap()
a2 = scap()
cam_dct = {1:a1, 2:a2}
CameraPerspectiveInheritance(cam_dct)
nt.ok_(issubclass(CameraPerspectiveInheritance, cam))
def test_methods(self):
a1 = scap()
a2 = scap()
cam_dict = {1:a1, 2:a2}
a = CameraPerspectiveInheritance(cam_dict)
ret_size = cpmh.call_size(a)
self.assertEqual(ret_size, 2)
ret_cam_dict = cpmh.call_cameras(a)
self.assertDictEqual(ret_cam_dict, cam_dict)
a = CameraPerspectiveInheritance(cam_dict)
ret_set = cpmh.call_get_frame_ids(a)
self.assertSetEqual(ret_set, {1,2}) |
6,531 | stop test class inside fixtures | # testing/fixtures/orm.py
# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
# mypy: ignore-errors
from __future__ import annotations
from typing import Any
import sqlalchemy as sa
from .base import TestBase
from .sql import TablesTest
from .. import assertions
from .. import config
from .. import schema
from ..entities import BasicEntity
from ..entities import ComparableEntity
from ..util import adict
from ... import orm
from ...orm import DeclarativeBase
from ...orm import events as orm_events
from ...orm import registry
class ORMTest(TestBase):
@config.fixture
def fixture_session(self):
return fixture_session()
class MappedTest(ORMTest, TablesTest, assertions.AssertsExecutionResults):
# 'once', 'each', None
run_setup_classes = "once"
# 'once', 'each', None
run_setup_mappers = "each"
classes: Any = None
@config.fixture(autouse=True, scope="class")
def _setup_tables_test_class(self):
cls = self.__class__
cls._init_class()
if cls.classes is None:
cls.classes = adict()
cls._setup_once_tables()
cls._setup_once_classes()
cls._setup_once_mappers()
cls._setup_once_inserts()
yield
cls._teardown_once_class()
cls._teardown_once_metadata_bind()
@config.fixture(autouse=True, scope="function")
def _setup_tables_test_instance(self):
self._setup_each_tables()
self._setup_each_classes()
self._setup_each_mappers()
self._setup_each_inserts()
yield
orm.session.close_all_sessions()
self._teardown_each_mappers()
self._teardown_each_classes()
self._teardown_each_tables()
@classmethod
def _teardown_once_class(cls):
cls.classes.clear()
@classmethod
def _setup_once_classes(cls):
if cls.run_setup_classes == "once":
cls._with_register_classes(cls.setup_classes)
@classmethod
def _setup_once_mappers(cls):
if cls.run_setup_mappers == "once":
cls.mapper_registry, cls.mapper = cls._generate_registry()
cls._with_register_classes(cls.setup_mappers)
def _setup_each_mappers(self):
if self.run_setup_mappers != "once":
(
self.__class__.mapper_registry,
self.__class__.mapper,
) = self._generate_registry()
if self.run_setup_mappers == "each":
self._with_register_classes(self.setup_mappers)
def _setup_each_classes(self):
if self.run_setup_classes == "each":
self._with_register_classes(self.setup_classes)
@classmethod
def _generate_registry(cls):
decl = registry(metadata=cls._tables_metadata)
return decl, decl.map_imperatively
@classmethod
def _with_register_classes(cls, fn):
"""Run a setup method, framing the operation with a Base class
that will catch new subclasses to be established within
the "classes" registry.
"""
cls_registry = cls.classes
class _Base:
def __init_subclass__(cls) -> None:
assert cls_registry is not None
cls_registry[cls.__name__] = cls
super().__init_subclass__()
class Basic(BasicEntity, _Base):
pass
class Comparable(ComparableEntity, _Base):
pass
cls.Basic = Basic
cls.Comparable = Comparable
fn()
def _teardown_each_mappers(self):
# some tests create mappers in the test bodies
# and will define setup_mappers as None -
# clear mappers in any case
if self.run_setup_mappers != "once":
orm.clear_mappers()
def _teardown_each_classes(self):
if self.run_setup_classes != "once":
self.classes.clear()
@classmethod
def setup_classes(cls):
pass
@classmethod
def setup_mappers(cls):
pass
class DeclarativeMappedTest(MappedTest):
run_setup_classes = "once"
run_setup_mappers = "once"
@classmethod
def _setup_once_tables(cls):
pass
@classmethod
def _with_register_classes(cls, fn):
cls_registry = cls.classes
class _DeclBase(DeclarativeBase):
__table_cls__ = schema.Table
metadata = cls._tables_metadata
type_annotation_map = {
str: sa.String().with_variant(
sa.String(50), "mysql", "mariadb", "oracle"
)
}
def __init_subclass__(cls, **kw) -> None:
assert cls_registry is not None
cls_registry[cls.__name__] = cls
super().__init_subclass__(**kw)
cls.DeclarativeBasic = _DeclBase
# sets up cls.Basic which is helpful for things like composite
# classes
super()._with_register_classes(fn)
if cls._tables_metadata.tables and cls.run_create_tables:
cls._tables_metadata.create_all(config.db)
class RemoveORMEventsGlobally:
@config.fixture(autouse=True)
def _remove_listeners(self):
yield
orm_events.MapperEvents._clear()
orm_events.InstanceEvents._clear()
orm_events.SessionEvents._clear()
orm_events.InstrumentationEvents._clear()
orm_events.QueryEvents._clear()
_fixture_sessions = set()
def fixture_session(**kw):
kw.setdefault("autoflush", True)
kw.setdefault("expire_on_commit", True)
bind = kw.pop("bind", config.db)
sess = orm.Session(bind, **kw)
_fixture_sessions.add(sess)
return sess
def close_all_sessions():
# will close all still-referenced sessions
orm.close_all_sessions()
_fixture_sessions.clear()
def METHOD_NAME(cls):
close_all_sessions()
orm.clear_mappers()
def after_test():
if _fixture_sessions:
close_all_sessions() |
6,532 | func result | import sys
from unittest import mock
from unittest.mock import Mock
import pytest
import pytest
from PySide6.QtCore import QThreadPool
from src.Model.Worker import Worker
class FakeClass:
def METHOD_NAME(self, result):
pass
def func_error(self, error):
pass
# qtbot is a pytest fixture used to test PyQt5. Part of the pytest-qt plugin.
def test_worker_progress_callback(qtbot):
"""
Testing for the progress_callback parameter being present in the called function when progress_callback=True
"""
func_to_test = Mock()
w = Worker(func_to_test, "test", 3, progress_callback=True)
# This starts the Worker in the threadpool and then blocks the test from progressing until the finished signal is
# emitted. qtbot is a pytest fixture used to test PyQt5.
threadpool = QThreadPool()
with qtbot.waitSignal(w.signals.finished) as blocker:
threadpool.start(w)
assert w.fn == func_to_test
assert w.kwargs['progress_callback'] is not None
func_to_test.assert_called_with("test", 3, progress_callback=w.kwargs['progress_callback'])
def test_worker_progress_callback_false(qtbot):
"""
Testing for the progress_callback parameter not being present in the called function when progress_callback=False
"""
func_to_test = Mock()
w = Worker(func_to_test, "test", 3, progress_callback=False)
threadpool = QThreadPool()
with qtbot.waitSignal(w.signals.finished) as blocker:
threadpool.start(w)
assert w.fn == func_to_test
assert 'progress_callback' not in w.kwargs
func_to_test.assert_called_with("test", 3)
def test_worker_no_progress_callback(qtbot):
"""
Testing for the progress_callback parameter not being present in the called function when no progress_callback
"""
func_to_test = Mock()
w = Worker(func_to_test, "test", 3)
threadpool = QThreadPool()
with qtbot.waitSignal(w.signals.finished) as blocker:
threadpool.start(w)
assert w.fn == func_to_test
assert 'progress_callback' not in w.kwargs
func_to_test.assert_called_with("test", 3)
def test_worker_result_signal(qtbot, monkeypatch):
"""
Testing return value of worker's called function through result signal.
"""
thing = FakeClass()
thing.func_to_test = Mock(return_value=5, unsafe=True)
w = Worker(thing.func_to_test, "test", 3)
with mock.patch.object(FakeClass, 'func_result', wraps=thing.METHOD_NAME) as mock_func_result:
w.signals.result.connect(thing.METHOD_NAME)
threadpool = QThreadPool()
with qtbot.waitSignal(w.signals.finished) as blocker:
threadpool.start(w)
thing.func_to_test.assert_called_with("test", 3)
mock_func_result.assert_called_with(5)
@pytest.mark.skip(reason="This test works perfectly in a local environment and fails every time in CI")
@pytest.mark.qt_no_exception_capture
def test_worker_error_signal(qtbot):
"""
Testing exception value of worker's called function through error signal.
"""
thing = FakeClass()
thing.func_to_test = Mock(side_effect=ValueError("Some Error"))
w = Worker(thing.func_to_test, "test", 3)
# from https://github.com/pytest-dev/pytest-qt/blob/master/tests/test_exceptions.py
# PyQt 5.5+ will crash if there's no custom exception handler installed
# wrapping storage of original excepthook and putting it back in case this would linger.
old_excepthook = sys.excepthook
sys.excepthook = lambda *args: None
with mock.patch.object(FakeClass, 'func_error', wraps=thing.func_error):
w.signals.error.connect(thing.func_error)
threadpool = QThreadPool()
with qtbot.waitSignal(w.signals.finished) as blocker:
threadpool.start(w)
kall = thing.func_error.call_args
args, kwargs = kall
thing.func_to_test.assert_called_with("test", 3)
assert isinstance(args[0][1], ValueError)
sys.excepthook = old_excepthook |
6,533 | layout | import os
from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.build import check_min_cppstd
from conan.tools.files import copy, get
from conan.tools.METHOD_NAME import basic_layout
from conan.tools.scm import Version
required_conan_version = ">=1.52.0"
class ContinuableConan(ConanFile):
name = "continuable"
description = (
"C++14 asynchronous allocation aware futures "
"(supporting then, exception handling, coroutines and connections)"
)
license = "MIT"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/Naios/continuable"
topics = ("asynchronous", "future", "coroutines", "header-only")
package_type = "header-library"
settings = "os", "arch", "compiler", "build_type"
options = {
"no_exceptions": [True, False],
"custom_error_type": [True, False],
"unhandled_exceptions": [True, False],
"custom_final_callback": [True, False],
"immediate_types": [True, False],
}
default_options = {
"no_exceptions": False,
"custom_error_type": False,
"unhandled_exceptions": False,
"custom_final_callback": False,
"immediate_types": False,
}
options_description = {
"no_exceptions": (
"Exceptions are disabled and `std::error_condition` is used as error_type. "
"See tutorial-chaining-continuables-fail for details."
),
"custom_error_type": (
"Exceptions are disabled and the type defined by `CONTINUABLE_WITH_CUSTOM_ERROR_TYPE` "
"is used as error_type. See tutorial-chaining-continuables-fail for details."
),
"unhandled_exceptions": (
"Allows unhandled exceptions in asynchronous call hierarchies. "
"See tutorial-chaining-continuables-fail for details."
),
"custom_final_callback": (
"Allows to customize the final callback which can be used to implement custom unhandled"
" asynchronous exception handlers."
),
"immediate_types": (
"Don't decorate the used type erasure, "
"which is done to keep type names minimal for better error messages in debug builds."
),
}
no_copy_source = True
@property
def _min_cppstd(self):
return 14
@property
def _compilers_minimum_version(self):
return {
"gcc": "5",
"clang": "3.4",
"apple-clang": "10",
"Visual Studio": "14",
}
def METHOD_NAME(self):
basic_layout(self, src_folder="src")
def requirements(self):
self.requires("function2/4.1.0")
def package_id(self):
self.info.clear()
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
check_min_cppstd(self, self._min_cppstd)
minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)
if minimum_version and Version(self.settings.compiler.version) < minimum_version:
raise ConanInvalidConfiguration(
f"{self.ref} requires C++{self._min_cppstd}, which your compiler does not support."
)
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def package(self):
copy(
self,
pattern="LICENSE.txt",
dst=os.path.join(self.package_folder, "licenses"),
src=self.source_folder,
)
copy(
self,
pattern="*",
dst=os.path.join(self.package_folder, "include", "continuable"),
src=os.path.join(self.source_folder, "include", "continuable"),
)
def package_info(self):
self.cpp_info.bindirs = []
self.cpp_info.libdirs = []
if self.settings.os == "Linux":
self.cpp_info.system_libs.append("pthread")
if self.options.no_exceptions:
self.cpp_info.defines.append("CONTINUABLE_WITH_NO_EXCEPTIONS")
if self.options.custom_error_type:
self.cpp_info.defines.append("CONTINUABLE_WITH_CUSTOM_ERROR_TYPE")
if self.options.unhandled_exceptions:
self.cpp_info.defines.append("CONTINUABLE_WITH_UNHANDLED_EXCEPTIONS")
if self.options.custom_final_callback:
self.cpp_info.defines.append("CONTINUABLE_WITH_CUSTOM_FINAL_CALLBACK")
if self.options.immediate_types:
self.cpp_info.defines.append("CONTINUABLE_WITH_IMMEDIATE_TYPES") |
6,534 | mock terminate | import random
from operator import xor
from unittest import mock
import pytest
from tests.factories.agreements_pool import BufferedAgreement, BufferedAgreementFactory
from yapapi import agreements_pool
from yapapi.events import AgreementTerminated
def mock_agreement(**properties):
"""Return a coroutine that creates a mock agreement with the given properties."""
async def mock_details():
return mock.MagicMock()
async def mock_confirm():
return mock.MagicMock()
async def create_agreement():
mock_agreement = mock.MagicMock(**properties)
mock_agreement.get_details = mock_details
mock_agreement.confirm = mock_confirm
return mock_agreement
return create_agreement
def get_agreements_pool() -> agreements_pool.AgreementsPool:
return agreements_pool.AgreementsPool(
lambda _event, **kwargs: None, lambda _offer: None, mock.Mock()
)
@pytest.mark.asyncio
async def test_use_agreement_chooses_max_score():
"""Test that a proposal with the largest score is chosen in AgreementsPool.use_agreement()."""
# Prepare proposals with random scores
proposals = {}
for n in range(100):
mock_proposal = mock.MagicMock(proposal_id=n)
mock_proposal.create_agreement = mock_agreement(proposal_id=n)
mock_score = random.random()
proposals[n] = (mock_score, mock_proposal)
pool = get_agreements_pool()
for score, proposal in proposals.values():
await pool.add_proposal(score, proposal)
chosen_proposal_ids = []
def use_agreement_cb(agreement):
chosen_proposal_ids.append(agreement.proposal_id)
return True
for _ in proposals.items():
await pool.use_agreement(use_agreement_cb)
# Make sure that proposals are chosen according to the decreasing ordering of the scores
sorted_scores = sorted((score for score, _ in proposals.values()), reverse=True)
chosen_scores = [proposals[id][0] for id in chosen_proposal_ids]
assert chosen_scores == sorted_scores
@pytest.mark.asyncio
async def test_use_agreement_shuffles_proposals():
"""Test that a random proposal is chosen among the ones with the largest score."""
chosen_proposal_ids = set()
all_proposal_ids = range(5)
for i in range(100):
# Prepare proposal data, all proposals have the same score except the one with id 0
proposals = []
for n in all_proposal_ids:
mock_proposal = mock.MagicMock(proposal_id=n)
mock_proposal.create_agreement = mock_agreement(proposal_id=n)
mock_score = 42.0 if n != 0 else 41.0
proposals.append((mock_score, mock_proposal))
pool = get_agreements_pool()
for score, proposal in proposals:
await pool.add_proposal(score, proposal)
def use_agreement_cb(agreement):
chosen_proposal_ids.add(agreement.proposal_id)
return True
await pool.use_agreement(use_agreement_cb)
# Make sure that each proposal id with the highest score has been chosen
assert chosen_proposal_ids == {n for n in all_proposal_ids if n != 0}
@pytest.mark.asyncio
async def test_use_agreement_no_proposals():
"""Test that `AgreementPool.use_agreement()` returns `None` when there are no proposals."""
pool = get_agreements_pool()
def use_agreement_cb(_agreement):
assert False, "use_agreement callback called"
result = await pool.use_agreement(use_agreement_cb)
assert result is None
@pytest.mark.asyncio
@pytest.mark.parametrize(
"multi_activity,simulate_race,event_emitted",
[
(True, False, True),
(False, False, True),
(True, True, False),
],
)
async def test_terminate_agreement(multi_activity, simulate_race, event_emitted):
"""Test if `_terminate_agreement` works while the agreement gets deleted from the pool."""
events = []
pool = agreements_pool.AgreementsPool(
lambda event, **kwargs: events.append(event), lambda _offer: None, mock.Mock() # noqa
)
agreement: BufferedAgreement = BufferedAgreementFactory(has_multi_activity=multi_activity)
pool._agreements[agreement.agreement.id] = agreement
async def METHOD_NAME(_, **__):
if simulate_race:
del pool._agreements[agreement.agreement.id]
with mock.patch(
"yapapi.rest.market.Agreement.terminate", mock.AsyncMock(side_effect=METHOD_NAME)
) as terminate_mock:
await pool._terminate_agreement(agreement.agreement.id, {})
assert terminate_mock.called == multi_activity
assert xor(AgreementTerminated in events, not event_emitted)
assert not pool._agreements |
6,535 | zero ext | """
Functions for acting on a axis of an array.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
def axis_slice(a, start=None, stop=None, step=None, axis=-1):
"""Take a slice along axis 'axis' from 'a'.
Parameters
----------
a : numpy.ndarray
The array to be sliced.
start, stop, step : int or None
The slice parameters.
axis : int, optional
The axis of `a` to be sliced.
Examples
--------
>>> a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> axis_slice(a, start=0, stop=1, axis=1)
array([[1],
[4],
[7]])
>>> axis_slice(a, start=1, axis=0)
array([[4, 5, 6],
[7, 8, 9]])
Notes
-----
The keyword arguments start, stop and step are used by calling
slice(start, stop, step). This implies axis_slice() does not
handle its arguments the exactly the same as indexing. To select
a single index k, for example, use
axis_slice(a, start=k, stop=k+1)
In this case, the length of the axis 'axis' in the result will
be 1; the trivial dimension is not removed. (Use numpy.squeeze()
to remove trivial axes.)
"""
a_slice = [slice(None)] * a.ndim
a_slice[axis] = slice(start, stop, step)
b = a[tuple(a_slice)]
return b
def axis_reverse(a, axis=-1):
"""Reverse the 1-d slices of `a` along axis `axis`.
Returns axis_slice(a, step=-1, axis=axis).
"""
return axis_slice(a, step=-1, axis=axis)
def odd_ext(x, n, axis=-1):
"""
Odd extension at the boundaries of an array
Generate a new ndarray by making an odd extension of `x` along an axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import odd_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> odd_ext(a, 2)
array([[-1, 0, 1, 2, 3, 4, 5, 6, 7],
[-4, -1, 0, 1, 4, 9, 16, 23, 28]])
Odd extension is a "180 degree rotation" at the endpoints of the original
array:
>>> t = np.linspace(0, 1.5, 100)
>>> a = 0.9 * np.sin(2 * np.pi * t**2)
>>> b = odd_ext(a, 40)
>>> import matplotlib.pyplot as plt
>>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='odd extension')
>>> plt.plot(arange(100), a, 'r', lw=2, label='original')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 1:
return x
if n > x.shape[axis] - 1:
raise ValueError(("The extension length n (%d) is too big. " +
"It must not exceed x.shape[axis]-1, which is %d.")
% (n, x.shape[axis] - 1))
left_end = axis_slice(x, start=0, stop=1, axis=axis)
left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
right_end = axis_slice(x, start=-1, axis=axis)
right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
ext = np.concatenate((2 * left_end - left_ext,
x,
2 * right_end - right_ext),
axis=axis)
return ext
def even_ext(x, n, axis=-1):
"""
Even extension at the boundaries of an array
Generate a new ndarray by making an even extension of `x` along an axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import even_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> even_ext(a, 2)
array([[ 3, 2, 1, 2, 3, 4, 5, 4, 3],
[ 4, 1, 0, 1, 4, 9, 16, 9, 4]])
Even extension is a "mirror image" at the boundaries of the original array:
>>> t = np.linspace(0, 1.5, 100)
>>> a = 0.9 * np.sin(2 * np.pi * t**2)
>>> b = even_ext(a, 40)
>>> import matplotlib.pyplot as plt
>>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='even extension')
>>> plt.plot(arange(100), a, 'r', lw=2, label='original')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 1:
return x
if n > x.shape[axis] - 1:
raise ValueError(("The extension length n (%d) is too big. " +
"It must not exceed x.shape[axis]-1, which is %d.")
% (n, x.shape[axis] - 1))
left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
ext = np.concatenate((left_ext,
x,
right_ext),
axis=axis)
return ext
def const_ext(x, n, axis=-1):
"""
Constant extension at the boundaries of an array
Generate a new ndarray that is a constant extension of `x` along an axis.
The extension repeats the values at the first and last element of
the axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import const_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> const_ext(a, 2)
array([[ 1, 1, 1, 2, 3, 4, 5, 5, 5],
[ 0, 0, 0, 1, 4, 9, 16, 16, 16]])
Constant extension continues with the same values as the endpoints of the
array:
>>> t = np.linspace(0, 1.5, 100)
>>> a = 0.9 * np.sin(2 * np.pi * t**2)
>>> b = const_ext(a, 40)
>>> import matplotlib.pyplot as plt
>>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='constant extension')
>>> plt.plot(arange(100), a, 'r', lw=2, label='original')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 1:
return x
left_end = axis_slice(x, start=0, stop=1, axis=axis)
ones_shape = [1] * x.ndim
ones_shape[axis] = n
ones = np.ones(ones_shape, dtype=x.dtype)
left_ext = ones * left_end
right_end = axis_slice(x, start=-1, axis=axis)
right_ext = ones * right_end
ext = np.concatenate((left_ext,
x,
right_ext),
axis=axis)
return ext
def METHOD_NAME(x, n, axis=-1):
"""
Zero padding at the boundaries of an array
Generate a new ndarray that is a zero padded extension of `x` along
an axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the
axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import zero_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> zero_ext(a, 2)
array([[ 0, 0, 1, 2, 3, 4, 5, 0, 0],
[ 0, 0, 0, 1, 4, 9, 16, 0, 0]])
"""
if n < 1:
return x
zeros_shape = list(x.shape)
zeros_shape[axis] = n
zeros = np.zeros(zeros_shape, dtype=x.dtype)
ext = np.concatenate((zeros, x, zeros), axis=axis)
return ext |
6,536 | tokenize fn | import argparse
import os
import socket
from functools import partial
import pandas as pd
import ray
import torch
from coati.quant import llama_load_quant, low_resource_init
from coati.ray.detached_trainer_ppo import DetachedPPOTrainer
from coati.ray.experience_maker_holder import ExperienceMakerHolder
from coati.ray.utils import (
get_actor_from_args,
get_critic_from_args,
get_reward_model_from_args,
get_strategy_from_args,
get_tokenizer_from_args,
)
from torch.utils.data import DataLoader
from transformers import AutoConfig
from transformers.modeling_utils import no_init_weights
def get_free_port():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(('', 0))
return s.getsockname()[1]
def get_local_ip():
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
s.connect(('8.8.8.8', 80))
return s.getsockname()[0]
def main(args):
master_addr = str(get_local_ip())
# trainer_env_info
trainer_port = str(get_free_port())
env_info_trainers = [{
'local_rank': '0',
'rank': str(rank),
'world_size': str(args.num_trainers),
'master_port': trainer_port,
'master_addr': master_addr
} for rank in range(args.num_trainers)]
# maker_env_info
maker_port = str(get_free_port())
env_info_maker = {
'local_rank': '0',
'rank': '0',
'world_size': '1',
'master_port': maker_port,
'master_addr': master_addr
}
# configure tokenizer
tokenizer = get_tokenizer_from_args(args.model)
def trainer_model_fn():
actor = get_actor_from_args(args.model, args.pretrain).half().cuda()
critic = get_critic_from_args(args.model, args.critic_pretrain).half().cuda()
return actor, critic
# configure Trainer
trainer_refs = [
DetachedPPOTrainer.options(name=f"trainer{i}", num_gpus=1, max_concurrency=2).remote(
experience_maker_holder_name_list=["maker1"],
strategy_fn=partial(get_strategy_from_args, args.trainer_strategy),
model_fn=trainer_model_fn,
env_info=env_info_trainer,
train_batch_size=args.train_batch_size,
buffer_limit=16,
eval_performance=True,
debug=args.debug,
update_lora_weights=not (args.lora_rank == 0),
) for i, env_info_trainer in enumerate(env_info_trainers)
]
def model_fn():
actor = get_actor_from_args(args.model, args.pretrain).requires_grad_(False).half().cuda()
critic = get_critic_from_args(args.model, args.critic_pretrain).requires_grad_(False).half().cuda()
reward_model = get_reward_model_from_args(args.model, args.critic_pretrain).requires_grad_(False).half().cuda()
if args.initial_model_quant_ckpt is not None and args.model == 'llama':
# quantize initial model
actor_cfg = AutoConfig.from_pretrained(args.pretrain)
with low_resource_init(), no_init_weights():
initial_model = get_actor_from_args(args.model, config=actor_cfg)
initial_model.model = llama_load_quant(initial_model.model, args.initial_model_quant_ckpt, args.quant_bits,
args.quant_group_size).cuda().requires_grad_(False)
else:
initial_model = get_actor_from_args(args.model, args.pretrain).requires_grad_(False).half().cuda()
return actor, critic, reward_model, initial_model
# configure Experience Maker
experience_holder_ref = ExperienceMakerHolder.options(name="maker1", num_gpus=1, max_concurrency=2).remote(
detached_trainer_name_list=[f'trainer{i}' for i in range(args.num_trainers)],
strategy_fn=partial(get_strategy_from_args, args.maker_strategy),
model_fn=model_fn,
env_info=env_info_maker,
experience_batch_size=args.experience_batch_size,
kl_coef=0.1,
debug=args.debug,
update_lora_weights=not (args.lora_rank == 0),
# sync_models_from_trainers=True,
# generation kwargs:
max_length=512,
do_sample=True,
temperature=1.0,
top_k=50,
pad_token_id=tokenizer.pad_token_id,
eos_token_id=tokenizer.eos_token_id,
eval_performance=True,
use_cache=True,
)
# uncomment this function if sync_models_from_trainers is True
# ray.get([
# trainer_ref.sync_models_to_remote_makers.remote()
# for trainer_ref in trainer_refs
# ])
wait_tasks = []
total_steps = args.experience_batch_size * args.experience_steps // (args.num_trainers * args.train_batch_size)
for trainer_ref in trainer_refs:
wait_tasks.append(trainer_ref.fit.remote(total_steps, args.update_steps, args.train_epochs))
dataset_size = args.experience_batch_size * 4
def build_dataloader():
def METHOD_NAME(texts):
batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True)
return {k: v.cuda() for k, v in batch.items()}
dataset = pd.read_csv(args.prompt_path)['prompt']
dataloader = DataLoader(dataset=dataset, batch_size=dataset_size, shuffle=True, collate_fn=METHOD_NAME)
return dataloader
wait_tasks.append(experience_holder_ref.workingloop.remote(build_dataloader, num_steps=args.experience_steps))
ray.get(wait_tasks)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--prompt_path', type=str, default=None)
parser.add_argument('--num_trainers', type=int, default=1)
parser.add_argument('--trainer_strategy',
choices=[
'ddp', 'colossalai_gemini', 'colossalai_zero2', 'colossalai_gemini_cpu',
'colossalai_zero2_cpu'
],
default='ddp')
parser.add_argument('--maker_strategy', choices=['naive'], default='naive')
parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama'])
parser.add_argument('--critic_model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama'])
parser.add_argument('--pretrain', type=str, default=None)
parser.add_argument('--critic_pretrain', type=str, default=None)
parser.add_argument('--experience_steps', type=int, default=4)
parser.add_argument('--experience_batch_size', type=int, default=8)
parser.add_argument('--train_epochs', type=int, default=1)
parser.add_argument('--update_steps', type=int, default=2)
parser.add_argument('--train_batch_size', type=int, default=8)
parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank")
parser.add_argument('--initial_model_quant_ckpt', type=str, default=None)
parser.add_argument('--quant_bits', type=int, default=4)
parser.add_argument('--quant_group_size', type=int, default=128)
parser.add_argument('--debug', action='store_true')
args = parser.parse_args()
ray.init(namespace=os.environ["RAY_NAMESPACE"], runtime_env={"env_vars": dict(os.environ)})
main(args) |
6,537 | parse events into yaml | # Copyright (C) 2015-2021, Wazuh Inc.
# Created by Wazuh, Inc. <info@wazuh.com>.
# This program is free software; you can redistribute it and/or modify it under the terms of GPLv2
import argparse
import json
import logging
import os
import subprocess
import time
import yaml
from wazuh_testing import logger
from wazuh_testing.analysis import callback_analysisd_agent_id, callback_analysisd_event
from wazuh_testing.tools import WAZUH_LOGS_PATH, LOG_FILE_PATH, WAZUH_PATH
from wazuh_testing.tools.file import truncate_file
from wazuh_testing.tools.monitoring import ManInTheMiddle, QueueMonitor
from wazuh_testing.tools.services import control_service, check_daemon_status, delete_sockets
alerts_json = os.path.join(WAZUH_LOGS_PATH, 'alerts', 'alerts.json')
analysis_path = os.path.join(os.path.join(WAZUH_PATH, 'queue', 'sockets', 'queue'))
# Syscheck variables
n_directories = 0
testdir = 'testdir'
yaml_file = 'syscheck_events_win32.yaml'
expected_deleted = None
def generate_analysisd_yaml(n_events, modify_events):
def METHOD_NAME(requests, yaml_file):
yaml_result = []
with open(yaml_file, 'a') as y_f:
id_ev = 0
for req, event in requests:
type_ev = event['data']['type']
stage_ev = type_ev.title()
mode = None
agent_id = callback_analysisd_agent_id(req) or '000'
del event['data']['mode']
del event['data']['type']
if 'tags' in event['data']:
del event['data']['tags']
if type_ev == 'added':
mode = 'save2'
output_ev = json.dumps(event['data'])
elif type_ev == 'deleted':
mode = 'delete'
output_ev = json.dumps(event['data']['path']).replace('"', '')
elif type_ev == 'modified':
mode = 'save2'
for field in ['old_attributes', 'changed_attributes', 'content_changes']:
if field in event['data']:
del event['data'][field]
output_ev = json.dumps(event['data'])
yaml_result.append({
'name': f"{stage_ev}{id_ev}",
'test_case': [
{
'input': f"{req}",
'output': f"agent {agent_id} syscheck {mode} {output_ev}",
'stage': f"{stage_ev}"
}
]
})
id_ev += 1
y_f.write(yaml.safe_dump(yaml_result))
def remove_logs():
for root, _, files in os.walk(WAZUH_LOGS_PATH):
for file in files:
os.remove(os.path.join(root, file))
# Restart syscheckd with the new configuration
truncate_file(LOG_FILE_PATH)
control_service('stop')
check_daemon_status(running_condition=False)
remove_logs()
control_service('start', daemon='wazuh-db', debug_mode=True)
check_daemon_status(running_condition=True, target_daemon='wazuh-db')
control_service('start', daemon='wazuh-analysisd', debug_mode=True)
check_daemon_status(running_condition=True, target_daemon='wazuh-analysisd')
mitm_analysisd = ManInTheMiddle(address=analysis_path, family='AF_UNIX', connection_protocol='UDP')
analysis_queue = mitm_analysisd.queue
mitm_analysisd.start()
control_service('start', daemon='wazuh-remoted', debug_mode=True)
check_daemon_status(running_condition=True, target_daemon='wazuh-remoted')
analysis_monitor = QueueMonitor(analysis_queue)
while True:
try:
grep = subprocess.Popen(['grep', 'deleted', alerts_json], stdout=subprocess.PIPE)
wc = int(subprocess.check_output(['wc', '-l', ], stdin=grep.stdout).decode())
except subprocess.CalledProcessError:
wc = 0
if wc >= n_events:
logging.debug('All alerts received. Collecting by alert type...')
break
logger.debug(f'{wc} deleted events so far.')
logger.debug('Waiting for alerts. Sleeping 5 seconds.')
time.sleep(5)
added = analysis_monitor.start(timeout=max(0.01 * n_events, 10), callback=callback_analysisd_event,
accum_results=n_events).result()
logger.debug('"added" alerts collected.')
modified = analysis_monitor.start(timeout=max(0.01 * n_events, 10), callback=callback_analysisd_event,
accum_results=modify_events).result()
logger.debug('"modified" alerts collected.')
deleted = analysis_monitor.start(timeout=max(0.01 * n_events, 10), callback=callback_analysisd_event,
accum_results=n_events).result()
logger.debug('"deleted" alerts collected.')
# Truncate file
with open(yaml_file, 'w')as y_f:
y_f.write('---\n')
for ev_list in [added, modified, deleted]:
METHOD_NAME(ev_list, yaml_file)
logger.debug(f'YAML done: "{yaml_file}"')
return mitm_analysisd
def kill_daemons():
for daemon in ['wazuh-remoted', 'wazuh-analysisd', 'wazuh-db']:
control_service('stop', daemon=daemon)
check_daemon_status(running_condition=False, target_daemon=daemon)
def get_script_arguments():
list_of_choices = ['DEBUG', 'ERROR']
parser = argparse.ArgumentParser(usage="python3 %(prog)s [options]",
description="Analysisd YAML generator (Windows)",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-e', '--events', dest='n_events', default=4096, type=int,
help='Specify how many events will be expected. Default 4096.', action='store')
parser.add_argument('-m', '--modified', dest='modified_events', default=4080, type=int,
help='Specify how many modified events will be expected. Default 4080.', action='store')
parser.add_argument('-d', '--debug', dest='debug_level', default='ERROR', choices=list_of_choices,
help='Specify debug level. Default "ERROR".', action='store')
return parser.parse_args()
if __name__ == '__main__':
log_level = {'DEBUG': 10, 'ERROR': 40}
options = get_script_arguments()
events = options.n_events
modified = options.modified_events
logger.setLevel(log_level[options.debug_level])
try:
mitm = generate_analysisd_yaml(n_events=events, modify_events=modified)
mitm.shutdown()
except (TimeoutError, FileNotFoundError) as e:
logger.error(f'Could not generate the YAML. Please clean the environment.{e}')
delete_sockets()
finally:
kill_daemons()
control_service('start') |
6,538 | api v1 models | import contextlib
import json
import ssl
import threading
from http.server import HTTPServer, SimpleHTTPRequestHandler
from pathlib import Path
from typing import Any, Callable, Dict, Iterator, Optional, Tuple
import determined
from determined.common.api import bindings
from determined.common.api.authentication import salt_and_hash
CERTS_DIR = Path(__file__).parent / "multimaster-certs"
CERTS1 = {
"keyfile": CERTS_DIR / "key1.pem",
"certfile": CERTS_DIR / "cert1.pem",
}
CERTS2 = {
"keyfile": CERTS_DIR / "key2.pem",
"certfile": CERTS_DIR / "cert2.pem",
}
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 12345
DEFAULT_USER = "user1"
DEFAULT_PASSWORD = "password1"
DEFAULT_TOKEN = "token1"
FIXTURES_DIR = Path(__file__).parent.parent / "fixtures"
def sample_get_experiment(**kwargs: Any) -> bindings.v1GetExperimentResponse:
"""Get an experiment from a fixture and optionally override some fields.
Load a sample experiment from a fixture. It's assumed that generally a caller cares only that
the response is well-formed. If instead the caller cares about any particular fields, they can
override them by passing them as keyword arguments.
Args:
**kwargs: Fields to override in the experiment.
Returns:
A bindings.v1GetExperimentResponse object with the experiment. NOTE: The returned object
is a bindings type, *not* a ExperimentReference.
"""
with open(FIXTURES_DIR / "experiment.json") as f:
resp = bindings.v1GetExperimentResponse.from_json(json.load(f))
for k, v in kwargs.items():
setattr(resp.experiment, k, v)
return resp
@contextlib.contextmanager
def run_api_server(
address: Tuple[str, int] = (DEFAULT_HOST, DEFAULT_PORT),
credentials: Tuple[str, str, str] = (DEFAULT_USER, DEFAULT_PASSWORD, DEFAULT_TOKEN),
ssl_keys: Optional[Dict[str, Path]] = CERTS1,
) -> Iterator[str]:
user, password, token = credentials
lock = threading.RLock()
state: Dict[str, Any] = {}
class RequestHandler(SimpleHTTPRequestHandler):
def _info(self) -> Dict[str, Any]:
return {"cluster_id": "fake-cluster", "version": determined.__version__}
def _users_me(self) -> Dict[str, Any]:
return {"username": user}
def _login(self) -> Dict[str, Any]:
content_length = int(self.headers["Content-Length"])
post_data = self.rfile.read(content_length)
posted_credentials = json.loads(post_data)
expected_password = salt_and_hash(password)
assert posted_credentials.get("username") == user
assert posted_credentials.get("password") == expected_password
fake_user = {"username": user, "admin": True, "active": True}
return {"token": token, "user": fake_user}
def METHOD_NAME(self) -> Dict[str, Any]:
assert self.headers["Authorization"] == f"Bearer {token}"
return {
"models": [],
"pagination": {
"offset": 0,
"limit": 100,
"startIndex": 0,
"endIndex": 0,
"total": 0,
},
}
def get_experiment_longrunning(self) -> Dict[str, Any]:
"""A master response to get_GetExperiment for a long-running experiment.
This function models an experiment that may take a long time to complete. The first
two times get_experiment is called, the experiment state is still in a
bindings.experimentv1State.RUNNING. On the third call, its state is
bindings.experimentv1State.COMPLETED.
Returns:
If successful, a JSON-encoded sample experiment. Else None.
"""
key = "get_experiment_longrunning_n_calls"
n_calls = 2
sample_experiment = sample_get_experiment()
with lock:
state[key] = state.get(key, 0) + 1
if state[key] <= n_calls:
sample_experiment.experiment.state = bindings.experimentv1State.RUNNING
else:
sample_experiment.experiment.state = bindings.experimentv1State.COMPLETED
return sample_experiment.to_json()
def get_experiment_flaky(self) -> Dict[str, Any]:
"""A master response to get_GetExperiment for a long-running experiment.
This function models an experiment where master sometimes cannot be reached. The first
two times get_experiment is called, the call to the master returns a 504 HTTP code.
The third call is successful.
Returns:
If successful, a JSON-encoded sample experiment. Else None.
"""
key = "get_experiment_flaky_n_calls"
fail_for = 2
with lock:
state[key] = state.get(key, 0) + 1
if state[key] <= fail_for:
self.send_error(504)
return {}
return sample_get_experiment().to_json()
def do_core(self, fn: Optional[Callable[..., Dict[str, Any]]]) -> None:
if fn is None:
self.send_error(404, f"path not handled: {self.path}")
return None
result = fn()
self._send_result(result)
def do_GET(self) -> None:
fn = {
"/info": self._info,
"/users/me": self._users_me,
"/api/v1/models": self.METHOD_NAME,
"/api/v1/experiments/1": self.get_experiment_flaky,
"/api/v1/experiments/2": self.get_experiment_longrunning,
}.get(self.path.split("?")[0])
self.do_core(fn)
def do_POST(self) -> None:
fn = {
"/api/v1/auth/login": self._login,
}.get(self.path)
self.do_core(fn)
def _send_result(self, result: Dict[str, Any]) -> None:
response = json.dumps(result).encode("utf8")
self.send_response(200)
self.send_header("Content-type", "application/json")
self.send_header("Content-Length", str(len(response)))
self.end_headers()
self.wfile.write(response)
server = HTTPServer(address, RequestHandler)
if ssl_keys is not None:
server.socket = ssl.wrap_socket(
server.socket,
keyfile=str(ssl_keys["keyfile"]),
certfile=str(ssl_keys["certfile"]),
server_side=True,
)
thread = threading.Thread(target=server.serve_forever, args=[0.1])
thread.start()
try:
host = address[0]
port = address[1]
protocol = "https" if ssl_keys is not None else "http"
yield f"{protocol}://{host}:{port}"
finally:
server.shutdown()
thread.join() |
6,539 | conflicts with | import logging
from typing import Callable, Optional, Any, ContextManager
from types import ModuleType
import importlib
import importlib.util
import importlib.resources
import importlib.abc
import sys
from pathlib import Path
from dataclasses import dataclass
from esphome.const import SOURCE_FILE_EXTENSIONS
import esphome.core.config
from esphome.core import CORE
from esphome.types import ConfigType
_LOGGER = logging.getLogger(__name__)
@dataclass(frozen=True, order=True)
class FileResource:
package: str
resource: str
def path(self) -> ContextManager[Path]:
return importlib.resources.path(self.package, self.resource)
class ComponentManifest:
def __init__(self, module: ModuleType):
self.module = module
@property
def package(self) -> str:
"""Return the package name the module is contained in.
Examples:
- esphome/components/gpio/__init__.py -> esphome.components.gpio
- esphome/components/gpio/switch/__init__.py -> esphome.components.gpio.switch
- esphome/components/a4988/stepper.py -> esphome.components.a4988
"""
return self.module.__package__
@property
def is_platform(self) -> bool:
return len(self.module.__name__.split(".")) == 4
@property
def is_platform_component(self) -> bool:
return getattr(self.module, "IS_PLATFORM_COMPONENT", False)
@property
def config_schema(self) -> Optional[Any]:
return getattr(self.module, "CONFIG_SCHEMA", None)
@property
def multi_conf(self) -> bool:
return getattr(self.module, "MULTI_CONF", False)
@property
def to_code(self) -> Optional[Callable[[Any], None]]:
return getattr(self.module, "to_code", None)
@property
def dependencies(self) -> list[str]:
return getattr(self.module, "DEPENDENCIES", [])
@property
def METHOD_NAME(self) -> list[str]:
return getattr(self.module, "CONFLICTS_WITH", [])
@property
def auto_load(self) -> list[str]:
al = getattr(self.module, "AUTO_LOAD", [])
if callable(al):
return al()
return al
@property
def codeowners(self) -> list[str]:
return getattr(self.module, "CODEOWNERS", [])
@property
def final_validate_schema(self) -> Optional[Callable[[ConfigType], None]]:
"""Components can declare a `FINAL_VALIDATE_SCHEMA` cv.Schema that gets called
after the main validation. In that function checks across components can be made.
Note that the function can't mutate the configuration - no changes are saved
"""
return getattr(self.module, "FINAL_VALIDATE_SCHEMA", None)
@property
def resources(self) -> list[FileResource]:
"""Return a list of all file resources defined in the package of this component.
This will return all cpp source files that are located in the same folder as the
loaded .py file (does not look through subdirectories)
"""
ret = []
for resource in importlib.resources.contents(self.package):
if Path(resource).suffix not in SOURCE_FILE_EXTENSIONS:
continue
if not importlib.resources.is_resource(self.package, resource):
# Not a resource = this is a directory (yeah this is confusing)
continue
ret.append(FileResource(self.package, resource))
return ret
class ComponentMetaFinder(importlib.abc.MetaPathFinder):
def __init__(
self, components_path: Path, allowed_components: Optional[list[str]] = None
) -> None:
self._allowed_components = allowed_components
self._finders = []
for hook in sys.path_hooks:
try:
finder = hook(str(components_path))
except ImportError:
continue
self._finders.append(finder)
def find_spec(self, fullname: str, path: Optional[list[str]], target=None):
if not fullname.startswith("esphome.components."):
return None
parts = fullname.split(".")
if len(parts) != 3:
# only handle direct components, not platforms
# platforms are handled automatically when parent is imported
return None
component = parts[2]
if (
self._allowed_components is not None
and component not in self._allowed_components
):
return None
for finder in self._finders:
spec = finder.find_spec(fullname, target=target)
if spec is not None:
return spec
return None
def clear_component_meta_finders():
sys.meta_path = [x for x in sys.meta_path if not isinstance(x, ComponentMetaFinder)]
def install_meta_finder(
components_path: Path, allowed_components: Optional[list[str]] = None
):
sys.meta_path.insert(0, ComponentMetaFinder(components_path, allowed_components))
def install_custom_components_meta_finder():
custom_components_dir = (Path(CORE.config_dir) / "custom_components").resolve()
install_meta_finder(custom_components_dir)
def _lookup_module(domain):
if domain in _COMPONENT_CACHE:
return _COMPONENT_CACHE[domain]
try:
module = importlib.import_module(f"esphome.components.{domain}")
except ImportError as e:
if "No module named" not in str(e):
_LOGGER.error("Unable to import component %s:", domain, exc_info=True)
return None
except Exception: # pylint: disable=broad-except
_LOGGER.error("Unable to load component %s:", domain, exc_info=True)
return None
manif = ComponentManifest(module)
_COMPONENT_CACHE[domain] = manif
return manif
def get_component(domain):
assert "." not in domain
return _lookup_module(domain)
def get_platform(domain, platform):
full = f"{platform}.{domain}"
return _lookup_module(full)
_COMPONENT_CACHE = {}
CORE_COMPONENTS_PATH = (Path(__file__).parent / "components").resolve()
_COMPONENT_CACHE["esphome"] = ComponentManifest(esphome.core.config) |
6,540 | data source | # ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
# ┃ ██████ ██████ ██████ █ █ █ █ █ █▄ ▀███ █ ┃
# ┃ ▄▄▄▄▄█ █▄▄▄▄▄ ▄▄▄▄▄█ ▀▀▀▀▀█▀▀▀▀▀ █ ▀▀▀▀▀█ ████████▌▐███ ███▄ ▀█ █ ▀▀▀▀▀ ┃
# ┃ █▀▀▀▀▀ █▀▀▀▀▀ █▀██▀▀ ▄▄▄▄▄ █ ▄▄▄▄▄█ ▄▄▄▄▄█ ████████▌▐███ █████▄ █ ▄▄▄▄▄ ┃
# ┃ █ ██████ █ ▀█▄ █ ██████ █ ███▌▐███ ███████▄ █ ┃
# ┣━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┫
# ┃ Copyright (c) 2017, the Perspective Authors. ┃
# ┃ ╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌ ┃
# ┃ This file is part of the Perspective library, distributed under the terms ┃
# ┃ of the [Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0). ┃
# ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛
import random
import logging
import threading
import tornado.websocket
import tornado.web
import tornado.ioloop
from datetime import date, datetime
from perspective import Table, PerspectiveManager, PerspectiveTornadoHandler
import threading
import concurrent.futures
def METHOD_NAME():
rows = []
modifier = random.random() * random.randint(1, 50)
for i in range(5):
rows.append(
{
"name": SECURITIES[random.randint(0, len(SECURITIES) - 1)],
"client": CLIENTS[random.randint(0, len(CLIENTS) - 1)],
"open": (random.random() * 75 + random.randint(0, 9)) * modifier,
"high": (random.random() * 105 + random.randint(1, 3)) * modifier,
"low": (random.random() * 85 + random.randint(1, 3)) * modifier,
"close": (random.random() * 90 + random.randint(1, 3)) * modifier,
"lastUpdate": datetime.now(),
"date": date.today(),
}
)
return rows
IS_MULTI_THREADED = True
SECURITIES = [
"AAPL.N",
"AMZN.N",
"QQQ.N",
"NVDA.N",
"TSLA.N",
"FB.N",
"MSFT.N",
"TLT.N",
"XIV.N",
"YY.N",
"CSCO.N",
"GOOGL.N",
"PCLN.N",
]
CLIENTS = ["Homer", "Marge", "Bart", "Lisa", "Maggie", "Moe", "Lenny", "Carl", "Krusty"]
def perspective_thread(manager):
"""Perspective application thread starts its own tornado IOLoop, and
adds the table with the name "data_source_one", which will be used
in the front-end."""
table = Table(
{
"name": str,
"client": str,
"open": float,
"high": float,
"low": float,
"close": float,
"lastUpdate": datetime,
"date": date,
},
limit=2500,
)
# Track the table with the name "data_source_one", which will be used in
# the front-end to access the Table.
manager.host_table("data_source_one", table)
# update with new data every 50ms
def updater():
table.update(METHOD_NAME())
callback = tornado.ioloop.PeriodicCallback(callback=updater, callback_time=50)
psp_loop = tornado.ioloop.IOLoop()
if IS_MULTI_THREADED:
with concurrent.futures.ThreadPoolExecutor() as executor:
manager.set_loop_callback(psp_loop.run_in_executor, executor)
callback.start()
psp_loop.start()
else:
manager.set_loop_callback(psp_loop.add_callback)
callback.start()
psp_loop.start()
def make_app():
manager = PerspectiveManager()
thread = threading.Thread(target=perspective_thread, args=(manager,))
thread.daemon = True
thread.start()
return tornado.web.Application(
[
# create a websocket endpoint that the client Javascript can access
(
r"/websocket",
PerspectiveTornadoHandler,
{"manager": manager, "check_origin": True},
),
(
r"/node_modules/(.*)",
tornado.web.StaticFileHandler,
{"path": "../../node_modules/"},
),
(
r"/(.*)",
tornado.web.StaticFileHandler,
{"path": "./", "default_filename": "index.html"},
),
]
)
if __name__ == "__main__":
app = make_app()
app.listen(8080)
logging.critical("Listening on http://localhost:8080")
loop = tornado.ioloop.IOLoop.current()
loop.start() |
6,541 | test lifecycle bad server unloaded signature | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2023, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from bokeh.application.handlers.handler import Handler
from bokeh.document import Document
from tests.support.util.filesystem import with_file_contents
# Module under test
import bokeh.application.handlers.server_lifecycle as bahs # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
script_adds_four_handlers = """
def on_server_loaded(server_context):
return "on_server_loaded"
def on_server_unloaded(server_context):
return "on_server_unloaded"
def on_session_created(session_context):
return "on_session_created"
def on_session_destroyed(session_context):
return "on_session_destroyed"
"""
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class Test_ServerLifecycleHandler:
# Public methods ----------------------------------------------------------
async def test_empty_lifecycle(self) -> None:
doc = Document()
result: dict[str, Handler] = {}
def load(filename: str):
handler = bahs.ServerLifecycleHandler(filename=filename)
handler.modify_document(doc)
result['handler'] = handler
with_file_contents("# This script does nothing", load)
handler = result['handler']
handler.on_server_loaded(None)
handler.on_server_unloaded(None)
await handler.on_session_created(None)
await handler.on_session_destroyed(None)
if handler.failed:
raise RuntimeError(handler.error)
assert not doc.roots
def test_lifecycle_bad_syntax(self) -> None:
result: dict[str, Handler] = {}
def load(filename: str):
handler = bahs.ServerLifecycleHandler(filename=filename)
result['handler'] = handler
with_file_contents("This is a syntax error", load)
handler = result['handler']
assert handler.error is not None
assert 'Invalid syntax' in handler.error
def test_lifecycle_runtime_error(self) -> None:
result: dict[str, Handler] = {}
def load(filename: str):
handler = bahs.ServerLifecycleHandler(filename=filename)
result['handler'] = handler
with_file_contents("raise RuntimeError('nope')", load)
handler = result['handler']
assert handler.error is not None
assert 'nope' in handler.error
def test_lifecycle_bad_server_loaded_signature(self) -> None:
result: dict[str, Handler] = {}
def load(filename: str):
handler = bahs.ServerLifecycleHandler(filename=filename)
result['handler'] = handler
with_file_contents("""
def on_server_loaded(a,b):
pass
""", load)
handler = result['handler']
assert handler.error is not None
assert handler.error_detail is not None
assert 'on_server_loaded must have signature func(server_context)' in handler.error
assert 'func(a, b)' in handler.error
assert "Traceback" in handler.error_detail
def METHOD_NAME(self) -> None:
result: dict[str, Handler] = {}
def load(filename: str):
handler = bahs.ServerLifecycleHandler(filename=filename)
result['handler'] = handler
with_file_contents("""
def on_server_unloaded(a,b):
pass
""", load)
handler = result['handler']
assert handler.error is not None
assert handler.error_detail is not None
assert 'on_server_unloaded must have signature func(server_context)' in handler.error
assert 'func(a, b)' in handler.error
assert "Traceback" in handler.error_detail
def test_lifecycle_bad_session_created_signature(self) -> None:
result: dict[str, Handler] = {}
def load(filename: str):
handler = bahs.ServerLifecycleHandler(filename=filename)
result['handler'] = handler
with_file_contents("""
def on_session_created(a,b):
pass
""", load)
handler = result['handler']
assert handler.error is not None
assert 'on_session_created must have signature func(session_context)' in handler.error
assert 'func(a, b)' in handler.error
def test_lifecycle_bad_session_destroyed_signature(self) -> None:
result: dict[str, Handler] = {}
def load(filename: str):
handler = bahs.ServerLifecycleHandler(filename=filename)
result['handler'] = handler
with_file_contents("""
def on_session_destroyed(a,b):
pass
""", load)
handler = result['handler']
assert handler.error is not None
assert 'on_session_destroyed must have signature func(session_context)' in handler.error
assert 'func(a, b)' in handler.error
async def test_calling_lifecycle_hooks(self) -> None:
result: dict[str, Handler] = {}
def load(filename: str):
handler = result['handler'] = bahs.ServerLifecycleHandler(filename=filename)
if handler.failed:
raise RuntimeError(handler.error)
with_file_contents(script_adds_four_handlers, load)
handler = result['handler']
assert "on_server_loaded" == handler.on_server_loaded(None)
assert "on_server_unloaded" == handler.on_server_unloaded(None)
assert "on_session_created" == await handler.on_session_created(None)
assert "on_session_destroyed" == await handler.on_session_destroyed(None)
def test_url_path(self) -> None:
result: dict[str, Handler] = {}
def load(filename: str):
handler = bahs.ServerLifecycleHandler(filename=filename)
result['handler'] = handler
with_file_contents("""
def on_server_unloaded(server_context):
pass
""", load)
handler = result['handler']
assert handler.error is None
url_path = handler.url_path()
assert url_path is not None and url_path.startswith("/")
def test_url_path_failed(self) -> None:
result: dict[str, Handler] = {}
def load(filename: str):
handler = bahs.ServerLifecycleHandler(filename=filename)
result['handler'] = handler
with_file_contents("""
# bad signature
def on_server_unloaded():
pass
""", load)
handler = result['handler']
assert handler.error is not None
assert handler.url_path() is None
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#----------------------------------------------------------------------------- |
6,542 | test load programs | import unittest
from unittest import mock
from django.core.management import call_command
class CommandTests(unittest.TestCase):
def setUp(self):
stdout_patch = mock.patch("sys.stdout")
stdout_patch.start()
self.addCleanup(stdout_patch.stop)
@mock.patch(
"paying_for_college.management.commands."
"tag_schools.tag_settlement_schools.tag_schools"
)
def test_tag_schools(self, mock_tag):
mock_tag.return_value = "Aye Aye"
call_command("tag_schools", "s3URL")
self.assertEqual(mock_tag.call_count, 1)
@mock.patch("paying_for_college.management.commands.purge.purge")
def test_purges(self, mock_purge):
mock_purge.return_value = "Aye Aye"
call_command("purge", "notifications")
self.assertEqual(mock_purge.call_count, 1)
call_command("purge", "programs")
self.assertEqual(mock_purge.call_count, 2)
call_command("purge", "")
self.assertEqual(mock_purge.call_count, 3)
call_command("purge", "schools")
self.assertEqual(mock_purge.call_count, 4)
@mock.patch(
"paying_for_college.management.commands.update_ipeds.load_values"
)
def test_update_ipeds(self, mock_load):
mock_load.return_value = "DRY RUN"
call_command("update_ipeds")
self.assertEqual(mock_load.call_count, 1)
call_command("update_ipeds", "--dry-run", "false")
self.assertEqual(mock_load.call_count, 2)
call_command("update_ipeds", "--dry-run", "jabberwocky")
self.assertEqual(mock_load.call_count, 2)
@mock.patch(
"paying_for_college.management.commands."
"update_via_api.update_colleges.update"
)
def test_api_command_calls_update(self, mock_update):
mock_update.return_value = ([], "OK")
call_command("update_via_api")
self.assertTrue(mock_update.call_count == 1)
call_command("update_via_api", "--school_id", "999999")
self.assertTrue(mock_update.call_count == 2)
self.assertTrue(mock_update.called_with(single_school=999999))
call_command(
"update_via_api", "--school_id", "999999", "--save_programs"
)
self.assertTrue(mock_update.call_count == 3)
self.assertTrue(
mock_update.called_with(single_school=999999, store_programs=True)
)
call_command("update_via_api", "--save_programs")
self.assertTrue(mock_update.call_count == 4)
self.assertTrue(mock_update.called_with(store_programs=True))
@mock.patch(
"paying_for_college.management.commands."
"load_programs.load_programs.load"
)
def METHOD_NAME(self, mock_load):
mock_load.return_value = ([], "OK")
call_command("load_programs", "filename")
self.assertEqual(mock_load.call_count, 1)
mock_load.assert_called_once_with("filename")
mock_load.return_value = (["failure"], "not OK")
call_command("load_programs", "filename")
self.assertEqual(mock_load.call_count, 2)
call_command("load_programs", "filename", "--s3", "true")
self.assertEqual(mock_load.call_count, 3)
mock_error = mock.Mock()
mock_error.side_effect = Exception("Mock Error!")
mock_load.return_value = mock_error
error_state = call_command("load_programs", "filename")
self.assertTrue(error_state is None)
@mock.patch(
"paying_for_college.management.commands."
"load_programs.load_programs.load"
)
def test_load_programs_more_than_1_files(self, mock_load):
mock_load.return_value = ([], "OK")
call_command("load_programs", "filename", "filename2", "filename3")
self.assertEqual(mock_load.call_count, 3)
mock_load.assert_has_calls(
[
mock.call("filename"),
mock.call("filename2"),
mock.call("filename3"),
]
)
@mock.patch(
"paying_for_college.management.commands."
"retry_notifications.retry_notifications"
)
def test_retry_notifications(self, mock_retry):
mock_retry.return_value = "notified"
call_command("retry_notifications")
self.assertEqual(mock_retry.call_count, 1)
call_command("retry_notifications", "--days", "2")
self.assertEqual(mock_retry.call_count, 2)
self.assertTrue(mock_retry.called_with(days=2))
@mock.patch(
"paying_for_college.management.commands."
"send_stale_notifications.send_stale_notifications"
)
def test_send_stale_notifications(self, mock_send):
mock_send.return_value = "notified"
call_command("send_stale_notifications")
self.assertEqual(mock_send.call_count, 1)
call_command(
"send_stale_notifications", "--add-email", "fake@fake.com"
)
self.assertEqual(mock_send.call_count, 2)
self.assertTrue(mock_send.called_with(add_email=["fake@fake.com"])) |
6,543 | get api url | import copy
from django.contrib.postgres.indexes import GinIndex
from django.contrib.postgres.search import SearchVector, SearchVectorField
from django.db import models
from django.db.models import Q
from django.utils import timezone
from ...conf import settings
from ...core.utils import parse_iso8601_string
from ...markup import finalize_markup
from ..checksums import is_post_valid, update_post_checksum
from ..filtersearch import filter_search
class Post(models.Model):
category = models.ForeignKey("misago_categories.Category", on_delete=models.CASCADE)
thread = models.ForeignKey("misago_threads.Thread", on_delete=models.CASCADE)
poster = models.ForeignKey(
settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.SET_NULL
)
poster_name = models.CharField(max_length=255)
original = models.TextField()
parsed = models.TextField()
checksum = models.CharField(max_length=64, default="-")
mentions = models.ManyToManyField(
settings.AUTH_USER_MODEL, related_name="mention_set"
)
attachments_cache = models.JSONField(null=True, blank=True)
posted_on = models.DateTimeField(db_index=True)
updated_on = models.DateTimeField()
hidden_on = models.DateTimeField(default=timezone.now)
edits = models.PositiveIntegerField(default=0)
last_editor = models.ForeignKey(
settings.AUTH_USER_MODEL,
blank=True,
null=True,
on_delete=models.SET_NULL,
related_name="+",
)
last_editor_name = models.CharField(max_length=255, null=True, blank=True)
last_editor_slug = models.SlugField(max_length=255, null=True, blank=True)
hidden_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
blank=True,
null=True,
on_delete=models.SET_NULL,
related_name="+",
)
hidden_by_name = models.CharField(max_length=255, null=True, blank=True)
hidden_by_slug = models.SlugField(max_length=255, null=True, blank=True)
has_reports = models.BooleanField(default=False)
has_open_reports = models.BooleanField(default=False)
is_unapproved = models.BooleanField(default=False, db_index=True)
is_hidden = models.BooleanField(default=False)
is_protected = models.BooleanField(default=False)
is_event = models.BooleanField(default=False, db_index=True)
event_type = models.CharField(max_length=255, null=True, blank=True)
event_context = models.JSONField(null=True, blank=True)
likes = models.PositiveIntegerField(default=0)
last_likes = models.JSONField(null=True, blank=True)
liked_by = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name="liked_post_set",
through="misago_threads.PostLike",
)
search_document = models.TextField(null=True, blank=True)
search_vector = SearchVectorField()
class Meta:
indexes = [
models.Index(
name="misago_post_has_open_repo_part",
fields=["has_open_reports"],
condition=Q(has_open_reports=True),
),
models.Index(
name="misago_post_is_hidden_part",
fields=["is_hidden"],
condition=Q(is_hidden=False),
),
models.Index(
name="misago_post_is_event_part",
fields=["is_event", "event_type"],
condition=Q(is_event=True),
),
GinIndex(fields=["search_vector"]),
]
index_together = [
("thread", "id"), # speed up threadview for team members
("is_event", "is_hidden"),
("poster", "posted_on"),
]
def __str__(self):
return "%s..." % self.original[10:].strip()
def delete(self, *args, **kwargs):
from ..signals import delete_post
delete_post.send(sender=self)
super().delete(*args, **kwargs)
def merge(self, other_post):
if self.poster_id != other_post.poster_id:
raise ValueError("post can't be merged with other user's post")
elif (
self.poster_id is None
and other_post.poster_id is None
and self.poster_name != other_post.poster_name
):
raise ValueError("post can't be merged with other user's post")
if self.thread_id != other_post.thread_id:
raise ValueError("only posts belonging to same thread can be merged")
if self.is_event or other_post.is_event:
raise ValueError("can't merge events")
if self.pk == other_post.pk:
raise ValueError("post can't be merged with itself")
other_post.original = str("\n\n").join((other_post.original, self.original))
other_post.parsed = str("\n").join((other_post.parsed, self.parsed))
update_post_checksum(other_post)
if self.is_protected:
other_post.is_protected = True
if self.is_best_answer:
self.thread.best_answer = other_post
if other_post.is_best_answer:
self.thread.best_answer_is_protected = other_post.is_protected
from ..signals import merge_post
merge_post.send(sender=self, other_post=other_post)
def move(self, new_thread):
from ..signals import move_post
if self.is_best_answer:
self.thread.clear_best_answer()
self.category = new_thread.category
self.thread = new_thread
move_post.send(sender=self)
@property
def attachments(self):
# pylint: disable=access-member-before-definition
if hasattr(self, "_hydrated_attachments_cache"):
return self._hydrated_attachments_cache
self._hydrated_attachments_cache = []
if self.attachments_cache:
for attachment in copy.deepcopy(self.attachments_cache):
attachment["uploaded_on"] = parse_iso8601_string(
attachment["uploaded_on"]
)
self._hydrated_attachments_cache.append(attachment)
return self._hydrated_attachments_cache
@property
def content(self):
if not hasattr(self, "_finalised_parsed"):
self._finalised_parsed = finalize_markup(self.parsed)
return self._finalised_parsed
@property
def thread_type(self):
return self.category.thread_type
def METHOD_NAME(self):
return self.thread_type.get_post_api_url(self)
def get_likes_api_url(self):
return self.thread_type.get_post_likes_api_url(self)
def get_editor_api_url(self):
return self.thread_type.get_post_editor_api_url(self)
def get_edits_api_url(self):
return self.thread_type.get_post_edits_api_url(self)
def get_read_api_url(self):
return self.thread_type.get_post_read_api_url(self)
def get_absolute_url(self):
return self.thread_type.get_post_absolute_url(self)
def set_search_document(self, thread_title=None):
if thread_title:
self.search_document = filter_search(
"\n\n".join([thread_title, self.original])
)
else:
self.search_document = filter_search(self.original)
def update_search_vector(self):
self.search_vector = SearchVector(
"search_document", config=settings.MISAGO_SEARCH_CONFIG
)
@property
def short(self):
if self.is_valid:
if len(self.original) > 150:
return str("%s...") % self.original[:150].strip()
return self.original
return ""
@property
def is_valid(self):
return is_post_valid(self)
@property
def is_first_post(self):
return self.id == self.thread.first_post_id
@property
def is_best_answer(self):
return self.id == self.thread.best_answer_id |
6,544 | init optimizer state | """Training algorithm track submission functions for LibriSpeech."""
import functools
from typing import Dict, Iterator, List, Tuple
from absl import logging
from flax import jax_utils
import jax
from jax import lax
import jax.numpy as jnp
import numpy as np
import optax
from algorithmic_efficiency import spec
_GRAD_CLIP_EPS = 1e-6
def get_batch_size(workload_name):
# Return the global batch size.
del workload_name
return 256
def get_learning_rate(step, hyperparams):
warmup_steps = hyperparams.warmup_steps
if step < warmup_steps:
current_lr = (step * hyperparams.base_lr) / warmup_steps
else:
decay_factor = (1 + np.cos(step / hyperparams.training_steps * np.pi)) * 0.5
current_lr = hyperparams.base_lr * decay_factor
return current_lr
def optimizer(hyperparameters: spec.Hyperparameters, num_train_examples: int):
opt_init_fn, opt_update_fn = optax.inject_hyperparams(optax.adamw)(
b1=hyperparameters.beta1,
b2=hyperparameters.beta2,
eps=hyperparameters.epsilon,
weight_decay=hyperparameters.weight_decay,
learning_rate=0.0)
return opt_init_fn, opt_update_fn
def METHOD_NAME(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
del model_state
del rng
params_zeros_like = jax.tree_map(lambda s: jnp.zeros(s.shape_tuple),
workload.param_shapes)
opt_init_fn, opt_update_fn = optimizer(hyperparameters,
workload.num_train_examples)
optimizer_state = opt_init_fn(params_zeros_like)
return jax_utils.replicate(optimizer_state), opt_update_fn
def l2_regularization(params, l2_decay_rank_threshold):
"""Computes the squared l2 norm of the given parameters.
This function will only filter for parameters with
rank >= l2_decay_rank_threshold. So if this threshold is set to 2, then all
1d (and lower) parameter arrays, including all bias and batch norm params,
will be ignored in this computation.
Args:
params: Pytree containing parameters.
l2_decay_rank_threshold: The calculation will only include parameters with
param.ndim >= l2_decay_rank_threshold. Set to 2 to ignore all bias and
batch_norm params in the model.
Returns:
weight_l2: the squared l2 norm of all params matching the threshold.
"""
weight_penalty_params = jax.tree_util.tree_leaves(params)
weight_l2 = sum(
jnp.sum(x**2)
for x in weight_penalty_params
if x.ndim >= l2_decay_rank_threshold)
return weight_l2
@functools.partial(
jax.pmap,
axis_name='batch',
in_axes=(None, None, 0, 0, 0, None, 0, 0, None),
static_broadcasted_argnums=(0, 1))
def pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
hyperparameters,
batch,
rng,
lr):
optimizer_state.hyperparams['learning_rate'] = lr
def _loss_fn(params):
"""loss function used for training."""
(logits, logit_paddings), new_model_state = workload.model_fn(
params,
batch,
model_state,
spec.ForwardPassMode.TRAIN,
rng,
update_batch_norm=True)
loss_dict = workload.loss_fn(batch['targets'], (logits, logit_paddings))
summed_loss = loss_dict['summed']
n_valid_examples = loss_dict['n_valid_examples']
return summed_loss, (n_valid_examples, new_model_state)
grad_fn = jax.value_and_grad(_loss_fn, has_aux=True)
(summed_loss, (n_valid_examples, new_model_state)), grad = grad_fn(
current_param_container)
# Get correct global mean loss and grad.
(summed_loss, n_valid_examples, grad) = lax.psum(
(summed_loss, n_valid_examples, grad), axis_name='batch')
loss = summed_loss / n_valid_examples
grad = jax.tree_map(lambda x: x / n_valid_examples, grad)
grad_norm = jnp.sqrt(l2_regularization(grad, 0))
grad_clip = hyperparameters.grad_clip
grad_scaling_factor = grad_clip / (grad_norm + _GRAD_CLIP_EPS)
grad_scaling_factor = jax.lax.clamp(min=0.0, x=grad_scaling_factor, max=1.0)
grad = jax.tree_map(lambda x: x * grad_scaling_factor, grad)
updates, new_optimizer_state = opt_update_fn(grad, optimizer_state,
current_param_container)
updated_params = optax.apply_updates(current_param_container, updates)
return new_model_state, new_optimizer_state, updated_params, loss, grad_norm
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params)."""
del current_params_types
del eval_results
del loss_type
lr = get_learning_rate(global_step, hyperparameters)
optimizer_state, opt_update_fn = optimizer_state
per_device_rngs = jax.random.split(rng, jax.local_device_count())
outputs = pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
hyperparameters,
batch,
per_device_rngs,
lr)
new_model_state, new_optimizer_state, new_params, loss, grad_norm = outputs
if global_step <= 1000 or global_step % 100 == 0:
logging.info('%d) loss = %0.3f, grad_norm = %0.3f lr = %0.6f',
global_step,
loss.mean(),
grad_norm.mean(),
lr)
if workload.summary_writer is not None:
workload.summary_writer.scalar('train_step_ctc_loss',
loss.mean(),
global_step)
workload.summary_writer.scalar('grad_norm', grad_norm.mean(), global_step)
workload.summary_writer.scalar('learning_rate', lr, global_step)
return (new_optimizer_state, opt_update_fn), new_params, new_model_state
# Not allowed to update the model parameters, hyperparameters, global step, or
# optimzier state.
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del optimizer_state
del current_param_container
del global_step
del rng
del hyperparameters
del workload
return next(input_queue) |
6,545 | render html footnote item | import re
from ...mistune.inline_parser import LINK_LABEL
from ...mistune.scanner import unikey
__all__ = ['plugin_footnotes']
#: inline footnote syntax looks like::
#:
#: [^key]
INLINE_FOOTNOTE_PATTERN = r'\[\^(' + LINK_LABEL + r')\]'
#: define a footnote item like::
#:
#: [^key]: paragraph text to describe the note
DEF_FOOTNOTE = re.compile(
r'( {0,3})\[\^(' + LINK_LABEL + r')\]:[ \t]*('
r'[^\n]*\n+'
r'(?:\1 {1,3}(?! )[^\n]*\n+)*'
r')'
)
def parse_inline_footnote(inline, m, state):
key = unikey(m.group(1))
def_footnotes = state.get('def_footnotes')
if not def_footnotes or key not in def_footnotes:
return 'text', m.group(0)
index = state.get('footnote_index', 0)
index += 1
state['footnote_index'] = index
state['footnotes'].append(key)
return 'footnote_ref', key, index
def parse_def_footnote(block, m, state):
key = unikey(m.group(2))
if key not in state['def_footnotes']:
state['def_footnotes'][key] = m.group(3)
def parse_footnote_item(block, k, i, state):
def_footnotes = state['def_footnotes']
text = def_footnotes[k]
stripped_text = text.strip()
if '\n' not in stripped_text:
children = [{'type': 'paragraph', 'text': stripped_text}]
else:
lines = text.splitlines()
for second_line in lines[1:]:
if second_line:
break
spaces = len(second_line) - len(second_line.lstrip())
pattern = re.compile(r'^ {' + str(spaces) + r',}', flags=re.M)
text = pattern.sub('', text)
children = block.parse_text(text, state)
if not isinstance(children, list):
children = [children]
return {
'type': 'footnote_item',
'children': children,
'params': (k, i)
}
def md_footnotes_hook(md, result, state):
footnotes = state.get('footnotes')
if not footnotes:
return result
children = [
parse_footnote_item(md.block, k, i + 1, state)
for i, k in enumerate(footnotes)
]
tokens = [{'type': 'footnotes', 'children': children}]
output = md.block.render(tokens, md.inline, state)
return result + output
def render_ast_footnote_ref(key, index):
return {'type': 'footnote_ref', 'key': key, 'index': index}
def render_ast_footnote_item(children, key, index):
return {
'type': 'footnote_item',
'children': children,
'key': key,
'index': index,
}
def render_html_footnote_ref(key, index):
i = str(index)
html = '<sup class="footnote-ref" id="fnref-' + i + '">'
return html + '<a href="#fn-' + i + '">' + i + '</a></sup>'
def render_html_footnotes(text):
return (
'<section class="footnotes">\n<ol>\n'
+ text +
'</ol>\n</section>\n'
)
def METHOD_NAME(text, key, index):
i = str(index)
back = '<a href="#fnref-' + i + '" class="footnote">↩</a>'
text = text.rstrip()
if text.endswith('</p>'):
text = text[:-4] + back + '</p>'
else:
text = text + back
return '<li id="fn-' + i + '">' + text + '</li>\n'
def plugin_footnotes(md):
md.inline.register_rule(
'footnote',
INLINE_FOOTNOTE_PATTERN,
parse_inline_footnote
)
index = md.inline.rules.index('std_link')
if index != -1:
md.inline.rules.insert(index, 'footnote')
else:
md.inline.rules.append('footnote')
md.block.register_rule('def_footnote', DEF_FOOTNOTE, parse_def_footnote)
index = md.block.rules.index('def_link')
if index != -1:
md.block.rules.insert(index, 'def_footnote')
else:
md.block.rules.append('def_footnote')
if md.renderer.NAME == 'html':
md.renderer.register('footnote_ref', render_html_footnote_ref)
md.renderer.register('footnote_item', METHOD_NAME)
md.renderer.register('footnotes', render_html_footnotes)
elif md.renderer.NAME == 'ast':
md.renderer.register('footnote_ref', render_ast_footnote_ref)
md.renderer.register('footnote_item', render_ast_footnote_item)
md.after_render_hooks.append(md_footnotes_hook) |
6,546 | test nested children | from __future__ import annotations
from collections.abc import Mapping, Sequence
import pytest
from ibis.common.collections import frozendict
from ibis.common.graph import (
Graph,
Node,
_flatten_collections,
_recursive_get,
bfs,
dfs,
toposort,
)
from ibis.common.grounds import Annotable, Concrete
from ibis.common.patterns import InstanceOf, TupleOf
class MyNode(Node):
def __init__(self, name, children):
self.name = name
self.children = children
@property
def __args__(self):
return (self.children,)
@property
def __argnames__(self):
return ("children",)
def __repr__(self):
return f"{self.__class__.__name__}({self.name})"
def __hash__(self):
return hash((self.__class__, self.name))
def __eq__(self, other):
return self.name == other.name
C = MyNode(name="C", children=[])
D = MyNode(name="D", children=[])
E = MyNode(name="E", children=[])
B = MyNode(name="B", children=[D, E])
A = MyNode(name="A", children=[B, C])
def test_bfs():
assert list(bfs(A).keys()) == [A, B, C, D, E]
with pytest.raises(
TypeError, match="must be an instance of ibis.common.graph.Node"
):
bfs(1)
def test_construction():
assert Graph(A) == bfs(A)
def test_graph_nodes():
g = Graph(A)
assert g.nodes() == {A, B, C, D, E}
def test_graph_repr():
g = Graph(A)
assert repr(g) == f"Graph({dict(g)})"
def test_dfs():
assert list(dfs(A).keys()) == [D, E, B, C, A]
with pytest.raises(
TypeError, match="must be an instance of ibis.common.graph.Node"
):
dfs(1)
def test_invert():
g = dfs(A)
assert g == {D: (), E: (), B: (D, E), C: (), A: (B, C)}
i = g.invert()
assert i == {D: (B,), E: (B,), B: (A,), C: (A,), A: ()}
j = i.invert()
assert j == g
def test_toposort():
assert list(toposort(A).keys()) == [C, D, E, B, A]
def test_toposort_cycle_detection():
C = MyNode(name="C", children=[])
A = MyNode(name="A", children=[C])
B = MyNode(name="B", children=[A])
A.children.append(B)
# A depends on B which depends on A
with pytest.raises(ValueError, match="cycle detected in the graph"):
toposort(A)
def METHOD_NAME():
a = MyNode(name="a", children=[])
b = MyNode(name="b", children=[a])
c = MyNode(name="c", children=[])
d = MyNode(name="d", children=[])
e = MyNode(name="e", children=[[b, c], d])
assert e.__children__() == (b, c, d)
def test_example():
class Example(Annotable, Node):
def __hash__(self):
return hash((self.__class__, self.__args__))
class Literal(Example):
value = InstanceOf(object)
class BoolLiteral(Literal):
value = InstanceOf(bool)
class And(Example):
operands = TupleOf(InstanceOf(BoolLiteral))
class Or(Example):
operands = TupleOf(InstanceOf(BoolLiteral))
class Collect(Example):
arguments = TupleOf(TupleOf(InstanceOf(Example)) | InstanceOf(Example))
a = BoolLiteral(True)
b = BoolLiteral(False)
c = BoolLiteral(True)
d = BoolLiteral(False)
and_ = And((a, b, c, d))
or_ = Or((a, c))
collect = Collect([and_, (or_, or_)])
graph = bfs(collect)
expected = {
collect: (and_, or_, or_),
or_: (a, c),
and_: (a, b, c, d),
a: (),
b: (),
# c and d are identical with a and b
}
assert graph == expected
def test_concrete_with_traversable_children():
class Bool(Concrete, Node):
pass
class Value(Bool):
value = InstanceOf(bool)
class Either(Bool):
left = InstanceOf(Bool)
right = InstanceOf(Bool)
class All(Bool):
arguments = TupleOf(InstanceOf(Bool))
strict = InstanceOf(bool)
T, F = Value(True), Value(False)
node = All((T, F), strict=True)
assert node.__args__ == ((T, F), True)
assert node.__children__() == (T, F)
node = Either(T, F)
assert node.__args__ == (T, F)
assert node.__children__() == (T, F)
node = All((T, Either(T, Either(T, F))), strict=False)
assert node.__args__ == ((T, Either(T, Either(T, F))), False)
assert node.__children__() == (T, Either(T, Either(T, F)))
copied = node.copy(arguments=(T, F))
assert copied == All((T, F), strict=False)
class MySequence(Sequence):
def __init__(self, *items):
self.items = items
def __getitem__(self, index):
raise AssertionError("must not be called") # pragma: no cover
def __len__(self):
return len(self.items)
class MyMapping(Mapping):
def __init__(self, **items):
self.items = items
def __getitem__(self, key):
raise AssertionError("must not be called") # pragma: no cover
def __iter__(self):
return iter(self.items)
def __len__(self):
return len(self.items)
def test_flatten_collections():
# test that flatten collections doesn't recurse into arbitrary mappings
# and sequences, just the commonly used builtin ones: list, tuple, dict
result = _flatten_collections(
[0.0, 1, 2, [3, 4, (5, 6)], "7", MySequence(8, 9)], filter=int
)
assert list(result) == [1, 2, 3, 4, 5, 6]
result = _flatten_collections(
{
"a": 0.0,
"b": 1,
"c": (MyMapping(d=2, e=3), frozendict(f=4)),
"d": [5, "6", {"e": (7, 8.9)}],
},
filter=int,
)
assert list(result) == [1, 4, 5, 7]
def test_recurse_get():
results = {"a": "A", "b": "B", "c": "C", "d": "D"}
assert _recursive_get((0, 1, "a", {"b": "c"}), results, filter=str) == (
0,
1,
"A",
{"b": "C"},
)
assert _recursive_get({"a": "b", "c": "d"}, results, filter=str) == {
"a": "B",
"c": "D",
}
assert _recursive_get(["a", "b", "c"], results, filter=str) == ("A", "B", "C")
assert _recursive_get("a", results, filter=str) == "A"
my_seq = MySequence("a", "b", "c")
my_map = MyMapping(a="a", b="b", c="c")
assert _recursive_get(("a", my_seq, ["b", "a"], my_map), results, filter=str) == (
"A",
my_seq,
("B", "A"),
my_map,
) |
6,547 | test plugins sorted by version | import random
from enum import Enum
from pathlib import PurePosixPath
import pytest
from semver import VersionInfo
from common.agent_plugins import AgentPluginMetadata, AgentPluginRepositoryIndex, AgentPluginType
from common.agent_plugins.agent_plugin_repository_index import ( # type: ignore[attr-defined]
DEVELOPMENT,
)
PAYLOAD_PLUGIN_NAME = "awesome_payload"
def get_plugin_metadata_with_given_version(version: str) -> AgentPluginMetadata:
return AgentPluginMetadata(
name=PAYLOAD_PLUGIN_NAME,
plugin_type=AgentPluginType.PAYLOAD,
resource_path=PurePosixPath("/tmp"),
sha256="7ac0f5c62a9bcb81af3e9d67a764d7bbd3cce9af7cd26c211f136400ebe703c4",
description="an awesome payload plugin",
version=version,
safe=True,
)
PLUGIN_VERSION_1_0_0 = get_plugin_metadata_with_given_version("1.0.0")
PLUGIN_VERSION_1_0_1 = get_plugin_metadata_with_given_version("1.0.1")
PLUGIN_VERSION_1_2_0 = get_plugin_metadata_with_given_version("1.2.0")
PLUGIN_VERSION_1_2_3 = get_plugin_metadata_with_given_version("1.2.3")
PLUGIN_VERSION_2_0_0 = get_plugin_metadata_with_given_version("2.0.0")
PLUGIN_VERSION_3_0_1 = get_plugin_metadata_with_given_version("3.0.1")
PLUGIN_VERSION_3_0_1_SERIALIZED = {
"name": PAYLOAD_PLUGIN_NAME,
"plugin_type": str(AgentPluginType.PAYLOAD),
"resource_path": "/tmp",
"sha256": "7ac0f5c62a9bcb81af3e9d67a764d7bbd3cce9af7cd26c211f136400ebe703c4",
"description": "an awesome payload plugin",
"version": "3.0.1",
"safe": True,
}
SORTED_PLUGIN_VERSIONS = [
PLUGIN_VERSION_1_0_0,
PLUGIN_VERSION_1_0_1,
PLUGIN_VERSION_1_2_0,
PLUGIN_VERSION_1_2_3,
PLUGIN_VERSION_2_0_0,
PLUGIN_VERSION_3_0_1,
]
REPOSITORY_INDEX_PLUGINS = {AgentPluginType.PAYLOAD: {PAYLOAD_PLUGIN_NAME: [PLUGIN_VERSION_3_0_1]}}
REPOSITORY_INDEX_PLUGINS_SERIALIZED = {
str(AgentPluginType.PAYLOAD): {PAYLOAD_PLUGIN_NAME: [PLUGIN_VERSION_3_0_1_SERIALIZED]}
}
def get_repository_index_with_given_version(version: str) -> AgentPluginRepositoryIndex:
return AgentPluginRepositoryIndex(
timestamp=123, compatible_infection_monkey_version=version, plugins=REPOSITORY_INDEX_PLUGINS
)
REPOSITORY_INDEX_VERSION_DEVELOPMENT = get_repository_index_with_given_version(DEVELOPMENT)
REPOSITORY_INDEX_VERSION_DEVELOPMENT_SERIALIZED = {
"timestamp": 123,
"compatible_infection_monkey_version": DEVELOPMENT,
"plugins": REPOSITORY_INDEX_PLUGINS_SERIALIZED,
}
REPOSITORY_INDEX_VERSION_OBJECT = get_repository_index_with_given_version(VersionInfo(7, 8, 9))
REPOSITORY_INDEX_VERSION_DICT = get_repository_index_with_given_version("7.8.9")
REPOSITORY_INDEX_VERSION_SERIALIZED = {
"timestamp": 123,
"compatible_infection_monkey_version": "7.8.9",
"plugins": REPOSITORY_INDEX_PLUGINS_SERIALIZED,
}
@pytest.mark.parametrize(
"object_,expected_serialization",
[
(REPOSITORY_INDEX_VERSION_DEVELOPMENT, REPOSITORY_INDEX_VERSION_DEVELOPMENT_SERIALIZED),
(REPOSITORY_INDEX_VERSION_DICT, REPOSITORY_INDEX_VERSION_SERIALIZED),
(REPOSITORY_INDEX_VERSION_OBJECT, REPOSITORY_INDEX_VERSION_SERIALIZED),
],
)
def test_agent_plugin_repository_index_serialization(object_, expected_serialization):
assert object_.dict(simplify=True) == expected_serialization
@pytest.mark.parametrize(
"expected_object,serialized",
[
(REPOSITORY_INDEX_VERSION_DEVELOPMENT, REPOSITORY_INDEX_VERSION_DEVELOPMENT_SERIALIZED),
(REPOSITORY_INDEX_VERSION_DICT, REPOSITORY_INDEX_VERSION_SERIALIZED),
(REPOSITORY_INDEX_VERSION_OBJECT, REPOSITORY_INDEX_VERSION_SERIALIZED),
],
)
def test_agent_plugin_repository_index_deserialization(expected_object, serialized):
repository_index = AgentPluginRepositoryIndex(**serialized)
assert repository_index == expected_object
for agent_plugin_type in repository_index.plugins.keys():
assert isinstance(agent_plugin_type, Enum)
def METHOD_NAME():
UNSORTED_PLUGIN_VERSIONS = SORTED_PLUGIN_VERSIONS.copy()
random.shuffle(UNSORTED_PLUGIN_VERSIONS) # noqa: DUO102
assert UNSORTED_PLUGIN_VERSIONS != SORTED_PLUGIN_VERSIONS
repository_index = AgentPluginRepositoryIndex(
compatible_infection_monkey_version="development",
plugins={
AgentPluginType.PAYLOAD: {PAYLOAD_PLUGIN_NAME: UNSORTED_PLUGIN_VERSIONS},
AgentPluginType.EXPLOITER: {},
AgentPluginType.CREDENTIALS_COLLECTOR: {PAYLOAD_PLUGIN_NAME: [PLUGIN_VERSION_1_0_0]},
},
)
assert repository_index.plugins == {
AgentPluginType.PAYLOAD: {PAYLOAD_PLUGIN_NAME: SORTED_PLUGIN_VERSIONS},
AgentPluginType.EXPLOITER: {},
AgentPluginType.CREDENTIALS_COLLECTOR: {PAYLOAD_PLUGIN_NAME: [PLUGIN_VERSION_1_0_0]},
} |
6,548 | chunk keys | import zlib
from sentry.utils import metrics
from sentry.utils.json import prune_empty_keys
ATTACHMENT_META_KEY = "{key}:a"
ATTACHMENT_UNCHUNKED_DATA_KEY = "{key}:a:{id}"
ATTACHMENT_DATA_CHUNK_KEY = "{key}:a:{id}:{chunk_index}"
UNINITIALIZED_DATA = object()
class MissingAttachmentChunks(Exception):
pass
class CachedAttachment:
def __init__(
self,
key=None,
id=None,
name=None,
content_type=None,
type=None,
data=UNINITIALIZED_DATA,
chunks=None,
cache=None,
rate_limited=None,
size=None,
**kwargs,
):
self.key = key
self.id = id
self.name = name
self.content_type = content_type
self.type = type or "event.attachment"
assert isinstance(self.type, str), self.type
self.rate_limited = rate_limited
if size is not None:
self.size = size
elif data not in (None, UNINITIALIZED_DATA):
self.size = len(data)
else:
self.size = 0
self._data = data
self.chunks = chunks
self._cache = cache
@classmethod
def from_upload(cls, file, **kwargs):
return CachedAttachment(
name=file.name, content_type=file.content_type, data=file.read(), **kwargs
)
@property
def data(self):
if self._data is UNINITIALIZED_DATA and self._cache is not None:
self._data = self._cache.get_data(self)
assert self._data is not UNINITIALIZED_DATA
return self._data
def delete(self):
for key in self.METHOD_NAME:
self._cache.inner.delete(key)
@property
def METHOD_NAME(self):
assert self.key is not None
assert self.id is not None
if self.chunks is None:
yield ATTACHMENT_UNCHUNKED_DATA_KEY.format(key=self.key, id=self.id)
return
for chunk_index in range(self.chunks):
yield ATTACHMENT_DATA_CHUNK_KEY.format(
key=self.key, id=self.id, chunk_index=chunk_index
)
def meta(self):
return prune_empty_keys(
{
"id": self.id,
"name": self.name,
"content_type": self.content_type,
"type": self.type,
"chunks": self.chunks,
"size": self.size or None, # None for backwards compatibility
"rate_limited": self.rate_limited,
}
)
class BaseAttachmentCache:
def __init__(self, inner):
self.inner = inner
def set(self, key, attachments, timeout=None):
for id, attachment in enumerate(attachments):
if attachment.chunks is not None:
continue
# TODO(markus): We need to get away from sequential IDs, they
# are risking collision when using Relay.
if attachment.id is None:
attachment.id = id
if attachment.key is None:
attachment.key = key
metrics_tags = {"type": attachment.type}
self.set_unchunked_data(
key=key,
id=attachment.id,
data=attachment.data,
timeout=timeout,
metrics_tags=metrics_tags,
)
meta = []
for attachment in attachments:
attachment._cache = self
meta.append(attachment.meta())
self.inner.set(ATTACHMENT_META_KEY.format(key=key), meta, timeout, raw=False)
def set_chunk(self, key, id, chunk_index, chunk_data, timeout=None):
key = ATTACHMENT_DATA_CHUNK_KEY.format(key=key, id=id, chunk_index=chunk_index)
self.inner.set(key, zlib.compress(chunk_data), timeout, raw=True)
def set_unchunked_data(self, key, id, data, timeout=None, metrics_tags=None):
key = ATTACHMENT_UNCHUNKED_DATA_KEY.format(key=key, id=id)
compressed = zlib.compress(data)
metrics.timing("attachments.blob-size.raw", len(data), tags=metrics_tags)
metrics.timing("attachments.blob-size.compressed", len(compressed), tags=metrics_tags)
metrics.incr("attachments.received", tags=metrics_tags, skip_internal=False)
self.inner.set(key, compressed, timeout, raw=True)
def get_from_chunks(self, key, **attachment):
return CachedAttachment(key=key, cache=self, **attachment)
def get(self, key):
result = self.inner.get(ATTACHMENT_META_KEY.format(key=key), raw=False)
for id, attachment in enumerate(result or ()):
attachment.setdefault("id", id)
attachment.setdefault("key", key)
yield CachedAttachment(cache=self, **attachment)
def get_data(self, attachment):
data = []
for key in attachment.METHOD_NAME:
raw_data = self.inner.get(key, raw=True)
if raw_data is None:
raise MissingAttachmentChunks()
data.append(zlib.decompress(raw_data))
return b"".join(data)
def delete(self, key):
for attachment in self.get(key):
attachment.delete()
self.inner.delete(ATTACHMENT_META_KEY.format(key=key)) |
6,549 | package info | from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.build import check_min_cppstd
from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
from conan.tools.files import copy, get, rmdir
from conan.tools.microsoft import is_msvc
from conan.tools.scm import Version
import os
required_conan_version = ">=1.51.1"
class OatppOpenSSLConan(ConanFile):
name = "oatpp-openssl"
license = "Apache-2.0"
homepage = "https://github.com/oatpp/oatpp-openssl"
url = "https://github.com/conan-io/conan-center-index"
description = "Oat++ OpenSSL library"
topics = ("oat++", "oatpp", "openssl")
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
try:
del self.options.fPIC
except Exception:
pass
def layout(self):
cmake_layout(self, src_folder="src")
def requirements(self):
self.requires(f"oatpp/{self.version}")
self.requires("openssl/[>=1.1 <4]")
def validate(self):
if self.info.settings.compiler.get_safe("cppstd"):
check_min_cppstd(self, 11)
if is_msvc(self) and self.info.options.shared:
raise ConanInvalidConfiguration(f"{self.ref} can not be built as shared library with msvc")
if self.info.settings.compiler == "gcc" and Version(self.info.settings.compiler.version) < "5":
raise ConanInvalidConfiguration(f"{self.ref} requires GCC >=5")
def source(self):
get(self, **self.conan_data["sources"][self.version],
destination=self.source_folder, strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.variables["OATPP_BUILD_TESTS"] = False
tc.variables["OATPP_MODULES_LOCATION"] = "INSTALLED"
# Honor BUILD_SHARED_LIBS from conan_toolchain (see https://github.com/conan-io/conan/issues/11840)
tc.cache_variables["CMAKE_POLICY_DEFAULT_CMP0077"] = "NEW"
tc.generate()
deps = CMakeDeps(self)
deps.generate()
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
copy(self, "LICENSE", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
cmake = CMake(self)
cmake.install()
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
def METHOD_NAME(self):
self.cpp_info.set_property("cmake_file_name", "oatpp-openssl")
self.cpp_info.set_property("cmake_target_name", "oatpp::oatpp-openssl")
# TODO: back to global scope in conan v2 once legacy generators removed
self.cpp_info.components["_oatpp-openssl"].includedirs = [
os.path.join("include", f"oatpp-{self.version}", "oatpp-openssl")
]
self.cpp_info.components["_oatpp-openssl"].libdirs = [os.path.join("lib", f"oatpp-{self.version}")]
if self.settings.os == "Windows" and self.options.shared:
self.cpp_info.components["_oatpp-openssl"].bindirs = [os.path.join("bin", f"oatpp-{self.version}")]
else:
self.cpp_info.components["_oatpp-openssl"].bindirs = []
self.cpp_info.components["_oatpp-openssl"].libs = ["oatpp-openssl"]
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.components["_oatpp-openssl"].system_libs = ["pthread"]
# TODO: to remove in conan v2 once legacy generators removed
self.cpp_info.filenames["cmake_find_package"] = "oatpp-openssl"
self.cpp_info.filenames["cmake_find_package_multi"] = "oatpp-openssl"
self.cpp_info.names["cmake_find_package"] = "oatpp"
self.cpp_info.names["cmake_find_package_multi"] = "oatpp"
self.cpp_info.components["_oatpp-openssl"].names["cmake_find_package"] = "oatpp-openssl"
self.cpp_info.components["_oatpp-openssl"].names["cmake_find_package_multi"] = "oatpp-openssl"
self.cpp_info.components["_oatpp-openssl"].set_property("cmake_target_name", "oatpp::oatpp-openssl")
self.cpp_info.components["_oatpp-openssl"].requires = ["oatpp::oatpp", "openssl::openssl"] |
6,550 | test api | """Unit tests for the label printing mixin."""
import json
import os
from django.apps import apps
from django.urls import reverse
from PIL import Image
from InvenTree.unit_test import InvenTreeAPITestCase
from label.models import PartLabel, StockItemLabel, StockLocationLabel
from part.models import Part
from plugin.base.label.mixins import LabelPrintingMixin
from plugin.helpers import MixinNotImplementedError
from plugin.plugin import InvenTreePlugin
from plugin.registry import registry
from stock.models import StockItem, StockLocation
class LabelMixinTests(InvenTreeAPITestCase):
"""Test that the Label mixin operates correctly."""
fixtures = [
'category',
'part',
'location',
'stock',
]
roles = 'all'
def do_activate_plugin(self):
"""Activate the 'samplelabel' plugin."""
config = registry.get_plugin('samplelabel').plugin_config()
config.active = True
config.save()
def do_url(self, parts, plugin_ref, label, url_name: str = 'api-part-label-print', url_single: str = 'part', invalid: bool = False):
"""Generate an URL to print a label."""
# Construct URL
kwargs = {}
if label:
kwargs["pk"] = label.pk
url = reverse(url_name, kwargs=kwargs)
# Append part filters
if not parts:
pass
elif len(parts) == 1:
url += f'?{url_single}={parts[0].pk}'
elif len(parts) > 1:
url += '?' + '&'.join([f'{url_single}s={item.pk}' for item in parts])
# Append an invalid item
if invalid:
url += f'&{url_single}{"s" if len(parts) > 1 else ""}=abc'
# Append plugin reference
if plugin_ref:
url += f'&plugin={plugin_ref}'
return url
def test_wrong_implementation(self):
"""Test that a wrong implementation raises an error."""
class WrongPlugin(LabelPrintingMixin, InvenTreePlugin):
pass
with self.assertRaises(MixinNotImplementedError):
plugin = WrongPlugin()
plugin.print_label(filename='test')
def test_installed(self):
"""Test that the sample printing plugin is installed."""
# Get all label plugins
plugins = registry.with_mixin('labels')
self.assertEqual(len(plugins), 2)
# But, it is not 'active'
plugins = registry.with_mixin('labels', active=True)
self.assertEqual(len(plugins), 1)
def METHOD_NAME(self):
"""Test that we can filter the API endpoint by mixin."""
url = reverse('api-plugin-list')
# Try POST (disallowed)
response = self.client.post(url, {})
self.assertEqual(response.status_code, 405)
response = self.client.get(
url,
{
'mixin': 'labels',
'active': True,
}
)
# No results matching this query!
self.assertEqual(len(response.data), 0)
# What about inactive?
response = self.client.get(
url,
{
'mixin': 'labels',
'active': False,
}
)
self.assertEqual(len(response.data), 0)
self.do_activate_plugin()
# Should be available via the API now
response = self.client.get(
url,
{
'mixin': 'labels',
'active': True,
}
)
self.assertEqual(len(response.data), 2)
data = response.data[1]
self.assertEqual(data['key'], 'samplelabel')
def test_printing_process(self):
"""Test that a label can be printed."""
# Ensure the labels were created
apps.get_app_config('label').create_labels()
# Lookup references
part = Part.objects.first()
plugin_ref = 'samplelabel'
label = PartLabel.objects.first()
url = self.do_url([part], plugin_ref, label)
# Non-exsisting plugin
response = self.get(f'{url}123', expected_code=404)
self.assertIn(f'Plugin \'{plugin_ref}123\' not found', str(response.content, 'utf8'))
# Inactive plugin
response = self.get(url, expected_code=400)
self.assertIn(f'Plugin \'{plugin_ref}\' is not enabled', str(response.content, 'utf8'))
# Active plugin
self.do_activate_plugin()
# Print one part
self.get(url, expected_code=200)
# Print multiple parts
self.get(self.do_url(Part.objects.all()[:2], plugin_ref, label), expected_code=200)
# Print multiple parts without a plugin
self.get(self.do_url(Part.objects.all()[:2], None, label), expected_code=200)
# Print multiple parts without a plugin in debug mode
response = self.get(self.do_url(Part.objects.all()[:2], None, label), expected_code=200)
data = json.loads(response.content)
self.assertIn('file', data)
# Print no part
self.get(self.do_url(None, plugin_ref, label), expected_code=400)
# Test that the labels have been printed
# The sample labelling plugin simply prints to file
self.assertTrue(os.path.exists('label.pdf'))
# Read the raw .pdf data - ensure it contains some sensible information
with open('label.pdf', 'rb') as f:
pdf_data = str(f.read())
self.assertIn('WeasyPrint', pdf_data)
# Check that the .png file has already been created
self.assertTrue(os.path.exists('label.png'))
# And that it is a valid image file
Image.open('label.png')
def test_printing_endpoints(self):
"""Cover the endpoints not covered by `test_printing_process`."""
plugin_ref = 'samplelabel'
# Activate the label components
apps.get_app_config('label').create_labels()
self.do_activate_plugin()
def run_print_test(label, qs, url_name, url_single):
"""Run tests on single and multiple page printing.
Args:
label: class of the label
qs: class of the base queryset
url_name: url for endpoints
url_single: item lookup reference
"""
label = label.objects.first()
qs = qs.objects.all()
# List endpoint
self.get(self.do_url(None, None, None, f'{url_name}-list', url_single), expected_code=200)
# List endpoint with filter
self.get(self.do_url(qs[:2], None, None, f'{url_name}-list', url_single, invalid=True), expected_code=200)
# Single page printing
self.get(self.do_url(qs[:1], plugin_ref, label, f'{url_name}-print', url_single), expected_code=200)
# Multi page printing
self.get(self.do_url(qs[:2], plugin_ref, label, f'{url_name}-print', url_single), expected_code=200)
# Test StockItemLabels
run_print_test(StockItemLabel, StockItem, 'api-stockitem-label', 'item')
# Test StockLocationLabels
run_print_test(StockLocationLabel, StockLocation, 'api-stocklocation-label', 'location')
# Test PartLabels
run_print_test(PartLabel, Part, 'api-part-label', 'part') |
6,551 | set | # -----------------------------------------------------------------------------
# Getting Things GNOME! - a personal organizer for the GNOME desktop
# Copyright (c) 2008-2015 - Lionel Dricot & Bertrand Rousseau
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
"""
Classes responsible for handling user configuration
"""
import configparser
import os
import re
import logging
from GTG.core.dirs import CONFIG_DIR
log = logging.getLogger(__name__)
DEFAULTS = {
'browser': {
"bg_color_enable": True,
"contents_preview_enable": False,
'tag_pane': True,
"sidebar_width": 120,
'collapsed_tasks': [],
'expanded_tags': [],
'view': 'default',
"opened_tasks": [],
'width': 400,
'height': 400,
'tasklist_sort_column': 5,
'tasklist_sort_order': 1,
"font_name": "",
'hour': "00",
'min': "00",
'autoclean': True,
'autoclean_days': 30,
'dark_mode': False,
'maximized': False,
},
'tag_editor': {
"custom_colors": [],
},
'plugins': {
"enabled": [],
"disabled": [],
},
'task': {
'position': [],
'size': [],
},
'backend': {}
}
def open_config_file(config_file):
""" Opens config file and makes additional checks
Creates config file if it doesn't exist and makes sure it is readable and
writable by user. That prevents surprise when user is not able to save
configuration when exiting the app.
"""
dirname = os.path.dirname(config_file)
if not os.path.exists(dirname):
os.makedirs(dirname)
if not os.path.exists(config_file):
open(config_file, "w").close()
if not os.access(config_file, os.R_OK | os.W_OK):
raise Exception("File " + config_file + " is a configuration file "
"for gtg, but it cannot be read or written. "
"Please check it")
config = configparser.ConfigParser(interpolation=None)
try:
config.read(config_file)
except configparser.Error as e:
log.warning("Problem with opening file %s: %s", config_file, e)
return config
class SectionConfig():
""" Configuration only for a section (system or a task) """
def __init__(self, section_name, section, defaults, save_function):
""" Initiatizes section config:
- section_name: name for writing error logs
- section: section of the config handled by this object
- defaults: dictionary of default values
- save_function: function to be called to save changes (this function
needs to save the whole config)
"""
self._section_name = section_name
self._section = section
self._defaults = defaults
self._save_function = save_function
def _getlist(self, option):
""" Parses string representation of list from configuration
List can't contain an empty value as those are skipped over,
e.g. "a, ,b" is parsed as ['a', 'b'].
Accepted formats:
- "('a', 'b'),('c','d','e')" => ["('a', 'b')", "('c','d','e')"]
- "a, b" => ['a', 'b']
"""
raw = self._section.get(option)
if not raw:
return None
# Match tuples in format "('string1', 'string2', ...)"
values = re.findall(r'\(.*?\)', raw)
if not values:
# It only normal list
values = raw.split(',')
return [item.strip() for item in values if item]
def _type_function(self, default_value):
""" Returns function that returns correct type of value """
default_type = type(default_value)
if default_type in (list, tuple):
return self._getlist
elif default_type == int:
return self._section.getint
elif default_type == bool:
return self._section.getboolean
else:
return self._section.get
def get(self, option):
""" Get option from configuration.
If the option is not specified in the configuration or is of invalid
type, return default value. If there is no default value,
None is returned
"""
default_value = self._defaults.get(option)
get_function = self._type_function(default_value)
try:
value = get_function(option)
except ValueError as error:
value = None
log.warning('Invalid configuration value "%s" for %s in %s: %s',
self._section.get(option), option, self._section_name,
error)
if value is None and default_value is None:
raise ValueError(
'No valid configuration value or default value was '
'found for %s in %s'.format(option, self._section_name))
elif value is None:
return default_value
else:
return value
def METHOD_NAME(self, option, value):
if type(value) in (list, tuple):
value = ','.join(str(item) for item in value)
else:
value = str(value)
self._section[option] = value
# Immediately save the configuration
self.save()
def save(self):
self._save_function()
class CoreConfig():
""" Class holding configuration to all systems and tasks """
def __init__(self):
self._conf_path = os.path.join(CONFIG_DIR, 'gtg.conf')
self._conf = open_config_file(self._conf_path)
self._task_conf_path = os.path.join(CONFIG_DIR, 'tasks.conf')
self._task_conf = open_config_file(self._task_conf_path)
self._backends_conf_path = os.path.join(CONFIG_DIR, 'backends.conf')
self._backends_conf = open_config_file(self._backends_conf_path)
def save_gtg_config(self):
self._conf.write(open(self._conf_path, 'w'))
def save_task_config(self):
self._task_conf.write(open(self._task_conf_path, 'w'))
def save_backends_config(self):
self._backends_conf.write(open(self._backends_conf_path, 'w'))
def get_subconfig(self, name):
""" Returns configuration object for special section of config """
if name not in self._conf:
self._conf.add_section(name)
defaults = DEFAULTS.get(name, dict())
return SectionConfig(
name, self._conf[name], defaults, self.save_gtg_config)
def get_task_config(self, task_id):
if task_id not in self._task_conf:
self._task_conf.add_section(task_id)
return SectionConfig(
f'Task {task_id}',
self._task_conf[task_id],
DEFAULTS['task'],
self.save_task_config)
def get_all_backends(self):
return self._backends_conf.sections()
def get_backend_config(self, backend):
if backend not in self._backends_conf:
self._backends_conf.add_section(backend)
return SectionConfig(
f'Backend {backend}',
self._backends_conf[backend],
DEFAULTS['backend'],
self.save_backends_config) |
6,552 | cpu freq path | #!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
#
# Copyright: 2018 IBM
# Author: Shriya Kulkarni <shriyak@linux.vnet.ibm.com>
# : Praveen K Pandey <praveen@linux.vnet.ibm.com>>
import time
import os
import random
import platform
import re
from avocado import Test
from avocado.utils import process, distro, cpu, genio
from avocado import skipIf
from avocado.utils.software_manager.manager import SoftwareManager
IS_POWER_NV = 'PowerNV' not in open('/proc/cpuinfo', 'r').read()
class freq_transitions(Test):
"""
To validate quad level frequency transitions.
:avocado: tags=cpu,power,privileged
"""
@skipIf(IS_POWER_NV, "This test is not supported on PowerVM")
def setUp(self):
"""
Verify :
1. It is Power system and platform is Power NV.
2. Cpupower tool is installed.
"""
if 'ppc' not in distro.detect().arch:
self.cancel("Processor is not ppc64")
if not os.path.exists('/sys/devices/system/cpu/cpu0/cpufreq'):
self.cancel('missing sysfs entry cpufreq, CPUFREQ not supported')
smm = SoftwareManager()
detected_distro = distro.detect()
if 'Ubuntu' in detected_distro.name:
deps = ['linux-tools-common', 'linux-tools-%s'
% platform.uname()[2]]
elif detected_distro.name == "SuSE":
deps = ['cpupower']
else:
deps = ['kernel-tools']
for package in deps:
if not smm.check_installed(package) and not smm.install(package):
self.cancel('%s is needed for the test to be run' % package)
fre_min = 0
fre_max = 0
freq_info = process.system_output(
"cpupower frequency-info", shell=True).decode("utf-8")
for line in str(freq_info).splitlines():
if re.search('hardware limits:', line, re.IGNORECASE):
frq = line.split(":")[1]
frq_init = frq.split('-')[0]
frq_last = frq.split('-')[1]
fre_min = float(frq_init.split('GHz')[0])
fre_max = float(frq_last.split('GHz')[0])
break
threshold = (fre_max - fre_min) * (10 ** 6)
self.threshold = int(self.params.get("threshold", default=threshold))
self.cpus = cpu.online_cpus_count()
self.cpu_num = 0
self.max_freq = 0
self.quad_dict = {}
self.max_freq_dict = {}
self.quad_to_cpu_mapping()
def run_cmd(self, cmdline):
try:
return process.run(cmdline, ignore_status=True,
sudo=True, shell=True)
except process.CmdError as details:
self.fail("test failed: %s" % details)
def test(self):
"""
1) Change governor to userspace on all CPUs.
2) For each core in the quad, pick a random frequency
and set cpu with that frequency.
3) Validate that the cpuinfo_cur_freq on any core in
the code is set to max(set of frequencies)
4) Run the perf command on each cpu to validate frequencies
independently.
"""
output = self.run_cmd("cpupower frequency-set -g userspace")
cur_governor = self.METHOD_NAME('scaling_governor')
if 'userspace' == cur_governor and output.exit_status == 0:
self.log.info("%s governor set successfully" % cur_governor)
else:
self.cancel("Unable to set the userspace governor")
for chip in self.quad_dict:
for quad in self.quad_dict[chip]:
for self.cpu_num in self.quad_dict[chip][quad]:
self.run_cmd("cpupower -c %s frequency-set -f %s"
% (self.cpu_num, self.get_random_freq()))
time.sleep(1)
freq_set = int(self.METHOD_NAME('cpuinfo_cur_freq'))
if self.max_freq < freq_set:
self.max_freq = freq_set
if chip not in self.max_freq_dict.keys():
self.max_freq_dict[chip] = {}
if quad not in self.max_freq_dict[chip].keys():
self.max_freq_dict[chip][quad] = self.max_freq
self.log.info("Maximum frequency set:%s quad:"
"%s" % (self.max_freq, quad))
self.max_freq = 0
for chip in self.quad_dict:
for quad in self.quad_dict[chip]:
for cpu in self.quad_dict[chip][quad]:
freq_get = self.perf_cmd(cpu)
freq_max = self.max_freq_dict[chip][quad]
diff = float(freq_max) - float(freq_get)
if diff > self.threshold:
self.fail("Quad level max frequency %s is not set on"
"this cpu %s"
% (self.max_freq_dict[chip][quad], cpu))
self.log.info("Quad level max frequency %s is set on this quad"
"%s" % (self.max_freq_dict[chip][quad], quad))
def quad_to_cpu_mapping(self):
"""
Get the total quad and cpus list belonging to each quad.
"""
self.nums = range(0, self.cpus)
for cpu in self.nums:
phy_id = genio.read_file(
'/sys/devices/system/cpu/cpu%s/physical_id' % cpu).rstrip("\n")
quad_id = int(phy_id) >> 4 & 0x7
chip_id = int(phy_id) >> 8 & 0x7F
if chip_id not in self.quad_dict.keys():
self.quad_dict[chip_id] = {}
if quad_id not in self.quad_dict[chip_id].keys():
self.quad_dict[chip_id][quad_id] = []
self.quad_dict[chip_id][quad_id].append(cpu)
def METHOD_NAME(self, file):
"""
get cpu_freq values
:param: file: is filename for which data needs to be fetched
"""
f_name = "/sys/devices/system/cpu/cpu%s/cpufreq/%s" % (
self.cpu_num, file)
return genio.read_file(f_name).rstrip('\n').strip(' ')
def get_random_freq(self):
"""
Get random frequency from list
"""
cmd = "scaling_available_frequencies"
return random.choice(self.METHOD_NAME(cmd).split(' '))
def perf_cmd(self, cpu):
"""
Parse the output for perf cmd
:param: cpu: provide the output for each cpu
"""
cmd = "perf stat -e cycles -e task-clock timeout 1 taskset -c \
%s yes>/dev/null" % cpu
output = self.run_cmd(cmd)
self.log.info("output : %s" % output.stderr)
output = output.stderr.decode("utf-8").splitlines()[3].split('#')[
1].strip().split(' ')[0]
output = float(output) * (10 ** 6)
return output |
6,553 | get parameter projection | # coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Time series distributional output classes and utilities.
"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class AffineTransformed(TransformedDistribution):
def __init__(self, base_distribution: Distribution, loc=None, scale=None, event_dim=0):
self.scale = 1.0 if scale is None else scale
self.loc = 0.0 if loc is None else loc
super().__init__(base_distribution, [AffineTransform(loc=self.loc, scale=self.scale, event_dim=event_dim)])
@property
def mean(self):
"""
Returns the mean of the distribution.
"""
return self.base_dist.mean * self.scale + self.loc
@property
def variance(self):
"""
Returns the variance of the distribution.
"""
return self.base_dist.variance * self.scale**2
@property
def stddev(self):
"""
Returns the standard deviation of the distribution.
"""
return self.variance.sqrt()
class ParameterProjection(nn.Module):
def __init__(
self, in_features: int, args_dim: Dict[str, int], domain_map: Callable[..., Tuple[torch.Tensor]], **kwargs
) -> None:
super().__init__(**kwargs)
self.args_dim = args_dim
self.proj = nn.ModuleList([nn.Linear(in_features, dim) for dim in args_dim.values()])
self.domain_map = domain_map
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor]:
params_unbounded = [proj(x) for proj in self.proj]
return self.domain_map(*params_unbounded)
class LambdaLayer(nn.Module):
def __init__(self, function):
super().__init__()
self.function = function
def forward(self, x, *args):
return self.function(x, *args)
class DistributionOutput:
distribution_class: type
in_features: int
args_dim: Dict[str, int]
def __init__(self, dim: int = 1) -> None:
self.dim = dim
self.args_dim = {k: dim * self.args_dim[k] for k in self.args_dim}
def _base_distribution(self, distr_args):
if self.dim == 1:
return self.distribution_class(*distr_args)
else:
return Independent(self.distribution_class(*distr_args), 1)
def distribution(
self,
distr_args,
loc: Optional[torch.Tensor] = None,
scale: Optional[torch.Tensor] = None,
) -> Distribution:
distr = self._base_distribution(distr_args)
if loc is None and scale is None:
return distr
else:
return AffineTransformed(distr, loc=loc, scale=scale, event_dim=self.event_dim)
@property
def event_shape(self) -> Tuple:
r"""
Shape of each individual event contemplated by the distributions that this object constructs.
"""
return () if self.dim == 1 else (self.dim,)
@property
def event_dim(self) -> int:
r"""
Number of event dimensions, i.e., length of the `event_shape` tuple, of the distributions that this object
constructs.
"""
return len(self.event_shape)
@property
def value_in_support(self) -> float:
r"""
A float that will have a valid numeric value when computing the log-loss of the corresponding distribution. By
default 0.0. This value will be used when padding data series.
"""
return 0.0
def METHOD_NAME(self, in_features: int) -> nn.Module:
r"""
Return the parameter projection layer that maps the input to the appropriate parameters of the distribution.
"""
return ParameterProjection(
in_features=in_features,
args_dim=self.args_dim,
domain_map=LambdaLayer(self.domain_map),
)
def domain_map(self, *args: torch.Tensor):
r"""
Converts arguments to the right shape and domain. The domain depends on the type of distribution, while the
correct shape is obtained by reshaping the trailing axis in such a way that the returned tensors define a
distribution of the right event_shape.
"""
raise NotImplementedError()
@staticmethod
def squareplus(x: torch.Tensor) -> torch.Tensor:
r"""
Helper to map inputs to the positive orthant by applying the square-plus operation. Reference:
https://twitter.com/jon_barron/status/1387167648669048833
"""
return (x + torch.sqrt(torch.square(x) + 4.0)) / 2.0
class StudentTOutput(DistributionOutput):
"""
Student-T distribution output class.
"""
args_dim: Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
distribution_class: type = StudentT
@classmethod
def domain_map(cls, df: torch.Tensor, loc: torch.Tensor, scale: torch.Tensor):
scale = cls.squareplus(scale).clamp_min(torch.finfo(scale.dtype).eps)
df = 2.0 + cls.squareplus(df)
return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)
class NormalOutput(DistributionOutput):
"""
Normal distribution output class.
"""
args_dim: Dict[str, int] = {"loc": 1, "scale": 1}
distribution_class: type = Normal
@classmethod
def domain_map(cls, loc: torch.Tensor, scale: torch.Tensor):
scale = cls.squareplus(scale).clamp_min(torch.finfo(scale.dtype).eps)
return loc.squeeze(-1), scale.squeeze(-1)
class NegativeBinomialOutput(DistributionOutput):
"""
Negative Binomial distribution output class.
"""
args_dim: Dict[str, int] = {"total_count": 1, "logits": 1}
distribution_class: type = NegativeBinomial
@classmethod
def domain_map(cls, total_count: torch.Tensor, logits: torch.Tensor):
total_count = cls.squareplus(total_count)
return total_count.squeeze(-1), logits.squeeze(-1)
def _base_distribution(self, distr_args) -> Distribution:
total_count, logits = distr_args
if self.dim == 1:
return self.distribution_class(total_count=total_count, logits=logits)
else:
return Independent(self.distribution_class(total_count=total_count, logits=logits), 1)
# Overwrites the parent class method. We cannot scale using the affine
# transformation since negative binomial should return integers. Instead
# we scale the parameters.
def distribution(
self, distr_args, loc: Optional[torch.Tensor] = None, scale: Optional[torch.Tensor] = None
) -> Distribution:
total_count, logits = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits)) |
6,554 | build dirname | # Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import inspect
import os
from typing import List
import llnl.util.filesystem as fs
import spack.builder
import spack.package_base
from spack.directives import build_system, depends_on, variant
from spack.multimethod import when
from ._checks import BaseBuilder, execute_build_time_tests
class MesonPackage(spack.package_base.PackageBase):
"""Specialized class for packages built using Meson. For more information
on the Meson build system, see https://mesonbuild.com/
"""
#: This attribute is used in UI queries that need to know the build
#: system base class
build_system_class = "MesonPackage"
#: Legacy buildsystem attribute used to deserialize and install old specs
legacy_buildsystem = "meson"
build_system("meson")
with when("build_system=meson"):
variant(
"buildtype",
default="release",
description="Meson build type",
values=("plain", "debug", "debugoptimized", "release", "minsize"),
)
variant(
"default_library",
default="shared",
values=("shared", "static"),
multi=True,
description="Build shared libs, static libs or both",
)
variant("strip", default=False, description="Strip targets on install")
depends_on("meson", type="build")
depends_on("ninja", type="build")
def flags_to_build_system_args(self, flags):
"""Produces a list of all command line arguments to pass the specified
compiler flags to meson."""
# Has to be dynamic attribute due to caching
setattr(self, "meson_flag_args", [])
@spack.builder.builder("meson")
class MesonBuilder(BaseBuilder):
"""The Meson builder encodes the default way to build software with Meson.
The builder has three phases that can be overridden, if need be:
1. :py:meth:`~.MesonBuilder.meson`
2. :py:meth:`~.MesonBuilder.build`
3. :py:meth:`~.MesonBuilder.install`
They all have sensible defaults and for many packages the only thing
necessary will be to override :py:meth:`~.MesonBuilder.meson_args`.
For a finer tuning you may also override:
+-----------------------------------------------+--------------------+
| **Method** | **Purpose** |
+===============================================+====================+
| :py:meth:`~.MesonBuilder.root_mesonlists_dir` | Location of the |
| | root MesonLists.txt|
+-----------------------------------------------+--------------------+
| :py:meth:`~.MesonBuilder.build_directory` | Directory where to |
| | build the package |
+-----------------------------------------------+--------------------+
"""
phases = ("meson", "build", "install")
#: Names associated with package methods in the old build-system format
legacy_methods = ("meson_args", "check")
#: Names associated with package attributes in the old build-system format
legacy_attributes = (
"build_targets",
"install_targets",
"build_time_test_callbacks",
"root_mesonlists_dir",
"std_meson_args",
"build_directory",
)
build_targets: List[str] = []
install_targets = ["install"]
build_time_test_callbacks = ["check"]
@property
def archive_files(self):
"""Files to archive for packages based on Meson"""
return [os.path.join(self.build_directory, "meson-logs", "meson-log.txt")]
@property
def root_mesonlists_dir(self):
"""Relative path to the directory containing meson.build
This path is relative to the root of the extracted tarball,
not to the ``build_directory``. Defaults to the current directory.
"""
return self.pkg.stage.source_path
@property
def std_meson_args(self):
"""Standard meson arguments provided as a property for convenience
of package writers.
"""
# standard Meson arguments
std_meson_args = MesonBuilder.std_args(self.pkg)
std_meson_args += getattr(self, "meson_flag_args", [])
return std_meson_args
@staticmethod
def std_args(pkg):
"""Standard meson arguments for a generic package."""
try:
build_type = pkg.spec.variants["buildtype"].value
except KeyError:
build_type = "release"
strip = "true" if "+strip" in pkg.spec else "false"
if "default_library=static,shared" in pkg.spec:
default_library = "both"
elif "default_library=static" in pkg.spec:
default_library = "static"
else:
default_library = "shared"
args = [
"-Dprefix={0}".format(pkg.prefix),
# If we do not specify libdir explicitly, Meson chooses something
# like lib/x86_64-linux-gnu, which causes problems when trying to
# find libraries and pkg-config files.
# See https://github.com/mesonbuild/meson/issues/2197
"-Dlibdir={0}".format(pkg.prefix.lib),
"-Dbuildtype={0}".format(build_type),
"-Dstrip={0}".format(strip),
"-Ddefault_library={0}".format(default_library),
# Do not automatically download and install dependencies
"-Dwrap_mode=nodownload",
]
return args
@property
def METHOD_NAME(self):
"""Returns the directory name to use when building the package."""
return "spack-build-{}".format(self.spec.dag_hash(7))
@property
def build_directory(self):
"""Directory to use when building the package."""
return os.path.join(self.pkg.stage.path, self.METHOD_NAME)
def meson_args(self):
"""List of arguments that must be passed to meson, except:
* ``--prefix``
* ``--libdir``
* ``--buildtype``
* ``--strip``
* ``--default_library``
which will be set automatically.
"""
return []
def meson(self, pkg, spec, prefix):
"""Run ``meson`` in the build directory"""
options = []
if self.spec["meson"].satisfies("@0.64:"):
options.append("setup")
options.append(os.path.abspath(self.root_mesonlists_dir))
options += self.std_meson_args
options += self.meson_args()
with fs.working_dir(self.build_directory, create=True):
inspect.getmodule(self.pkg).meson(*options)
def build(self, pkg, spec, prefix):
"""Make the build targets"""
options = ["-v"]
options += self.build_targets
with fs.working_dir(self.build_directory):
inspect.getmodule(self.pkg).ninja(*options)
def install(self, pkg, spec, prefix):
"""Make the install targets"""
with fs.working_dir(self.build_directory):
inspect.getmodule(self.pkg).ninja(*self.install_targets)
spack.builder.run_after("build")(execute_build_time_tests)
def check(self):
"""Search Meson-generated files for the target ``test`` and run it if found."""
with fs.working_dir(self.build_directory):
self.pkg._if_ninja_target_execute("test")
self.pkg._if_ninja_target_execute("check") |
6,555 | handle obj | # Stubs for xlrd.book (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
import struct
from typing import Any, Optional
from .biffh import BaseObject
unpack = struct.unpack
empty_cell: Any
DEBUG: int
USE_FANCY_CD: int
TOGGLE_GC: int
MMAP_AVAILABLE: int
USE_MMAP = MMAP_AVAILABLE
MY_EOF: int
SUPBOOK_UNK: Any
SUPBOOK_INTERNAL: Any
SUPBOOK_EXTERNAL: Any
SUPBOOK_ADDIN: Any
SUPBOOK_DDEOLE: Any
SUPPORTED_VERSIONS: Any
builtin_name_from_code: Any
code_from_builtin_name: Any
def open_workbook_xls(filename: Optional[Any] = ..., logfile: Any = ..., verbosity: int = ..., use_mmap: Any = ..., file_contents: Optional[Any] = ..., encoding_override: Optional[Any] = ..., formatting_info: bool = ..., on_demand: bool = ..., ragged_rows: bool = ...): ...
class Name(BaseObject):
book: Any = ...
hidden: int = ...
func: int = ...
vbasic: int = ...
macro: int = ...
complex: int = ...
builtin: int = ...
funcgroup: int = ...
binary: int = ...
name_index: int = ...
name: Any = ...
raw_formula: bytes = ...
scope: int = ...
result: Any = ...
def cell(self): ...
def area2d(self, clipped: bool = ...): ...
class Book(BaseObject):
nsheets: int = ...
datemode: int = ...
biff_version: int = ...
name_obj_list: Any = ...
codepage: Any = ...
encoding: Any = ...
countries: Any = ...
user_name: Any = ...
font_list: Any = ...
xf_list: Any = ...
format_list: Any = ...
format_map: Any = ...
style_name_map: Any = ...
colour_map: Any = ...
palette_record: Any = ...
load_time_stage_1: Any = ...
load_time_stage_2: Any = ...
def sheets(self): ...
def sheet_by_index(self, sheetx: Any): ...
def sheet_by_name(self, sheet_name: Any): ...
def sheet_names(self): ...
def sheet_loaded(self, sheet_name_or_index: Any): ...
def unload_sheet(self, sheet_name_or_index: Any) -> None: ...
mem: Any = ...
filestr: Any = ...
def release_resources(self) -> None: ...
def __enter__(self): ...
def __exit__(self, exc_type: Any, exc_value: Any, exc_tb: Any) -> None: ...
name_and_scope_map: Any = ...
name_map: Any = ...
raw_user_name: bool = ...
builtinfmtcount: int = ...
addin_func_names: Any = ...
def __init__(self) -> None: ...
logfile: Any = ...
verbosity: Any = ...
use_mmap: Any = ...
encoding_override: Any = ...
formatting_info: Any = ...
on_demand: Any = ...
ragged_rows: Any = ...
stream_len: Any = ...
base: int = ...
def biff2_8_load(self, filename: Optional[Any] = ..., file_contents: Optional[Any] = ..., logfile: Any = ..., verbosity: int = ..., use_mmap: Any = ..., encoding_override: Optional[Any] = ..., formatting_info: bool = ..., on_demand: bool = ..., ragged_rows: bool = ...) -> None: ...
xfcount: int = ...
actualfmtcount: int = ...
def initialise_format_info(self) -> None: ...
def get2bytes(self): ...
def get_record_parts(self): ...
def get_record_parts_conditional(self, reqd_record: Any): ...
def get_sheet(self, sh_number: Any, update_pos: bool = ...): ...
def get_sheets(self) -> None: ...
def fake_globals_get_sheet(self) -> None: ...
def handle_boundsheet(self, data: Any) -> None: ...
def handle_builtinfmtcount(self, data: Any) -> None: ...
def derive_encoding(self): ...
def handle_codepage(self, data: Any) -> None: ...
def handle_country(self, data: Any) -> None: ...
def handle_datemode(self, data: Any) -> None: ...
def handle_externname(self, data: Any) -> None: ...
def handle_externsheet(self, data: Any) -> None: ...
def handle_filepass(self, data: Any) -> None: ...
def handle_name(self, data: Any) -> None: ...
def names_epilogue(self) -> None: ...
def METHOD_NAME(self, data: Any) -> None: ...
def handle_supbook(self, data: Any) -> None: ...
def handle_sheethdr(self, data: Any) -> None: ...
def handle_sheetsoffset(self, data: Any) -> None: ...
def handle_sst(self, data: Any) -> None: ...
def handle_writeaccess(self, data: Any) -> None: ...
def parse_globals(self) -> None: ...
def read(self, pos: Any, length: Any): ...
def getbof(self, rqd_stream: Any): ...
def expand_cell_address(inrow: Any, incol: Any): ...
def colname(colx: Any, _A2Z: str = ...): ...
def display_cell_address(rowx: Any, colx: Any, relrow: Any, relcol: Any): ...
def unpack_SST_table(datatab: Any, nstrings: Any): ... |
6,556 | has started | # -*- python-mode -*-
## Copyright (C) 2012-2013 Daniel Pavel
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License along
## with this program; if not, write to the Free Software Foundation, Inc.,
## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import threading as _threading
from logging import DEBUG as _DEBUG
from logging import INFO as _INFO
from logging import getLogger
from . import base as _base
# from time import time as _timestamp
# for both Python 2 and 3
try:
from Queue import Queue as _Queue
except ImportError:
from queue import Queue as _Queue
_log = getLogger(__name__)
del getLogger
#
#
#
class _ThreadedHandle:
"""A thread-local wrapper with different open handles for each thread.
Closing a ThreadedHandle will close all handles.
"""
__slots__ = ('path', '_local', '_handles', '_listener')
def __init__(self, listener, path, handle):
assert listener is not None
assert path is not None
assert handle is not None
assert isinstance(handle, int)
self._listener = listener
self.path = path
self._local = _threading.local()
# take over the current handle for the thread doing the replacement
self._local.handle = handle
self._handles = [handle]
def _open(self):
handle = _base.open_path(self.path)
if handle is None:
_log.error('%r failed to open new handle', self)
else:
# if _log.isEnabledFor(_DEBUG):
# _log.debug("%r opened new handle %d", self, handle)
self._local.handle = handle
self._handles.append(handle)
return handle
def close(self):
if self._local:
self._local = None
handles, self._handles = self._handles, []
if _log.isEnabledFor(_DEBUG):
_log.debug('%r closing %s', self, handles)
for h in handles:
_base.close(h)
@property
def notifications_hook(self):
if self._listener:
assert isinstance(self._listener, _threading.Thread)
if _threading.current_thread() == self._listener:
return self._listener._notifications_hook
def __del__(self):
self._listener = None
self.close()
def __index__(self):
if self._local:
try:
return self._local.handle
except Exception:
return self._open()
else:
return -1
__int__ = __index__
def __str__(self):
if self._local:
return str(int(self))
def __repr__(self):
return '<_ThreadedHandle(%s)>' % self.path
def __bool__(self):
return bool(self._local)
__nonzero__ = __bool__
#
#
#
# How long to wait during a read for the next packet, in seconds
# Ideally this should be rather long (10s ?), but the read is blocking
# and this means that when the thread is signalled to stop, it would take
# a while for it to acknowledge it.
# Forcibly closing the file handle on another thread does _not_ interrupt the
# read on Linux systems.
_EVENT_READ_TIMEOUT = 1. # in seconds
# After this many reads that did not produce a packet, call the tick() method.
# This only happens if tick_period is enabled (>0) for the Listener instance.
# _IDLE_READS = 1 + int(5 // _EVENT_READ_TIMEOUT) # wait at least 5 seconds between ticks
class EventsListener(_threading.Thread):
"""Listener thread for notifications from the Unifying Receiver.
Incoming packets will be passed to the callback function in sequence.
"""
def __init__(self, receiver, notifications_callback):
super().__init__(name=self.__class__.__name__ + ':' + receiver.path.split('/')[2])
self.daemon = True
self._active = False
self.receiver = receiver
self._queued_notifications = _Queue(16)
self._notifications_callback = notifications_callback
def run(self):
self._active = True
# replace the handle with a threaded one
self.receiver.handle = _ThreadedHandle(self, self.receiver.path, self.receiver.handle)
if _log.isEnabledFor(_INFO):
_log.info('started with %s (%d)', self.receiver, int(self.receiver.handle))
self.METHOD_NAME()
if self.receiver.isDevice: # ping (wired or BT) devices to see if they are really online
if self.receiver.ping():
self.receiver.status.changed(True, reason='initialization')
while self._active:
if self._queued_notifications.empty():
try:
n = _base.read(self.receiver.handle, _EVENT_READ_TIMEOUT)
except _base.NoReceiver:
_log.warning('%s disconnected', self.receiver.name)
self.receiver.close()
break
if n:
n = _base.make_notification(*n)
else:
n = self._queued_notifications.get() # deliver any queued notifications
if n:
try:
self._notifications_callback(n)
except Exception:
_log.exception('processing %s', n)
del self._queued_notifications
self.has_stopped()
def stop(self):
"""Tells the listener to stop as soon as possible."""
self._active = False
def METHOD_NAME(self):
"""Called right after the thread has started, and before it starts
reading notification packets."""
pass
def has_stopped(self):
"""Called right before the thread stops."""
pass
# def tick(self, timestamp):
# """Called about every tick_period seconds."""
# pass
def _notifications_hook(self, n):
# Only consider unhandled notifications that were sent from this thread,
# i.e. triggered by a callback handling a previous notification.
assert _threading.current_thread() == self
if self._active: # and _threading.current_thread() == self:
# if _log.isEnabledFor(_DEBUG):
# _log.debug("queueing unhandled %s", n)
if not self._queued_notifications.full():
self._queued_notifications.put(n)
def __bool__(self):
return bool(self._active and self.receiver)
__nonzero__ = __bool__ |
6,557 | build list request | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def METHOD_NAME(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-09-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.KubernetesConfiguration/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.kubernetesconfiguration.v2021_09_01.SourceControlConfigurationClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.ResourceProviderOperation"]:
"""List all the available operations the KubernetesConfiguration resource provider supports, in
this api-version.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceProviderOperation or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.kubernetesconfiguration.v2021_09_01.models.ResourceProviderOperation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-09-01"))
cls: ClsType[_models.ResourceProviderOperationList] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = METHOD_NAME(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ResourceProviderOperationList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.KubernetesConfiguration/operations"} |
6,558 | test revert commit | import base64
import os
import sys
import tarfile
import time
import zipfile
from io import BytesIO
import pytest
import gitlab
def test_repository_files(project):
project.files.create(
{
"file_path": "README.md",
"branch": "main",
"content": "Initial content",
"commit_message": "Initial commit",
}
)
readme = project.files.get(file_path="README.md", ref="main")
readme.content = base64.b64encode(b"Improved README").decode()
time.sleep(2)
readme.save(branch="main", commit_message="new commit")
readme.delete(commit_message="Removing README", branch="main")
project.files.create(
{
"file_path": "README.rst",
"branch": "main",
"content": "Initial content",
"commit_message": "New commit",
}
)
readme = project.files.get(file_path="README.rst", ref="main")
# The first decode() is the ProjectFile method, the second one is the bytes
# object method
assert readme.decode().decode() == "Initial content"
headers = project.files.head("README.rst", ref="main")
assert headers["X-Gitlab-File-Path"] == "README.rst"
blame = project.files.blame(file_path="README.rst", ref="main")
assert blame
raw_file = project.files.raw(file_path="README.rst", ref="main")
assert os.fsdecode(raw_file) == "Initial content"
def test_repository_tree(project):
tree = project.repository_tree()
assert tree
assert tree[0]["name"] == "README.rst"
blob_id = tree[0]["id"]
blob = project.repository_raw_blob(blob_id)
assert blob.decode() == "Initial content"
snapshot = project.snapshot()
assert isinstance(snapshot, bytes)
def test_repository_archive(project):
archive = project.repository_archive()
assert isinstance(archive, bytes)
archive2 = project.repository_archive("main")
assert archive == archive2
# NOTE(jlvillal): Support for using tarfile.is_tarfile() on a file or file-like object
# was added in Python 3.9
@pytest.mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
@pytest.mark.parametrize(
"format,assertion",
[
("tbz", tarfile.is_tarfile),
("tbz2", tarfile.is_tarfile),
("tb2", tarfile.is_tarfile),
("bz2", tarfile.is_tarfile),
("tar", tarfile.is_tarfile),
("tar.gz", tarfile.is_tarfile),
("tar.bz2", tarfile.is_tarfile),
("zip", zipfile.is_zipfile),
],
)
def test_repository_archive_formats(project, format, assertion):
archive = project.repository_archive(format=format)
assert assertion(BytesIO(archive))
def test_create_commit(project):
data = {
"branch": "main",
"commit_message": "blah blah blah",
"actions": [{"action": "create", "file_path": "blah", "content": "blah"}],
}
commit = project.commits.create(data)
assert "@@" in project.commits.list()[0].diff()[0]["diff"]
assert isinstance(commit.refs(), list)
assert isinstance(commit.merge_requests(), list)
def test_list_all_commits(project):
data = {
"branch": "new-branch",
"start_branch": "main",
"commit_message": "New commit on new branch",
"actions": [
{"action": "create", "file_path": "new-file", "content": "new content"}
],
}
commit = project.commits.create(data)
commits = project.commits.list(all=True)
assert commit not in commits
# Listing commits on other branches requires `all` parameter passed to the API
all_commits = project.commits.list(get_all=True, all=True)
assert commit in all_commits
assert len(all_commits) > len(commits)
def test_create_commit_status(project):
commit = project.commits.list()[0]
status = commit.statuses.create({"state": "success", "sha": commit.id})
assert status in commit.statuses.list()
def test_commit_signature(project):
commit = project.commits.list()[0]
with pytest.raises(gitlab.GitlabGetError) as e:
commit.signature()
assert "404 Signature Not Found" in str(e.value)
def test_commit_comment(project):
commit = project.commits.list()[0]
commit.comments.create({"note": "This is a commit comment"})
assert len(commit.comments.list()) == 1
def test_commit_discussion(project):
commit = project.commits.list()[0]
discussion = commit.discussions.create({"body": "Discussion body"})
assert discussion in commit.discussions.list()
note = discussion.notes.create({"body": "first note"})
note_from_get = discussion.notes.get(note.id)
note_from_get.body = "updated body"
note_from_get.save()
discussion = commit.discussions.get(discussion.id)
# assert discussion.attributes["notes"][-1]["body"] == "updated body"
note_from_get.delete()
discussion = commit.discussions.get(discussion.id)
# assert len(discussion.attributes["notes"]) == 1
def METHOD_NAME(project):
commit = project.commits.list()[0]
revert_commit = commit.revert(branch="main")
expected_message = f'Revert "{commit.message}"\n\nThis reverts commit {commit.id}'
assert revert_commit["message"] == expected_message
with pytest.raises(gitlab.GitlabRevertError):
# Two revert attempts should raise GitlabRevertError
commit.revert(branch="main")
def test_repository_merge_base(project):
refs = [commit.id for commit in project.commits.list(all=True)]
commit = project.repository_merge_base(refs)
assert commit["id"] in refs
with pytest.raises(gitlab.GitlabGetError, match="Provide at least 2 refs"):
commit = project.repository_merge_base(refs[0]) |
6,559 | define tables | """test the current state of the hasparent() flag."""
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import testing
from sqlalchemy.orm import attributes
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy.orm import relationship
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from sqlalchemy.testing.util import gc_collect
class ParentRemovalTest(fixtures.MappedTest):
"""Test that the 'hasparent' flag gets flipped to False
only if we're sure this object is the real parent.
In ambiguous cases a stale data exception is
raised.
"""
run_inserts = None
# trying to push GC to do a better job
run_setup_classes = "each"
run_setup_mappers = "each"
@classmethod
def METHOD_NAME(cls, metadata):
if testing.against("oracle"):
fk_args = dict(deferrable=True, initially="deferred")
elif testing.against("mysql"):
fk_args = {}
else:
fk_args = dict(onupdate="cascade")
Table(
"users",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
)
Table(
"addresses",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("user_id", Integer, ForeignKey("users.id", **fk_args)),
)
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Address(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
cls.mapper_registry.map_imperatively(
cls.classes.Address, cls.tables.addresses
)
cls.mapper_registry.map_imperatively(
cls.classes.User,
cls.tables.users,
properties={
"addresses": relationship(
cls.classes.Address, cascade="all, delete-orphan"
)
},
)
def _assert_hasparent(self, a1):
assert attributes.has_parent(self.classes.User, a1, "addresses")
def _assert_not_hasparent(self, a1):
assert not attributes.has_parent(self.classes.User, a1, "addresses")
def _fixture(self):
User, Address = self.classes.User, self.classes.Address
s = fixture_session()
u1 = User()
a1 = Address()
u1.addresses.append(a1)
s.add(u1)
s.flush()
return s, u1, a1
def test_stale_state_positive(self):
User = self.classes.User
s, u1, a1 = self._fixture()
s.expunge(u1)
u1 = s.query(User).first()
u1.addresses.remove(a1)
self._assert_not_hasparent(a1)
@testing.requires.predictable_gc
def test_stale_state_positive_gc(self):
User = self.classes.User
s, u1, a1 = self._fixture()
s.expunge(u1)
del u1
gc_collect()
u1 = s.query(User).first()
u1.addresses.remove(a1)
self._assert_not_hasparent(a1)
@testing.requires.updateable_autoincrement_pks
@testing.requires.predictable_gc
def test_stale_state_positive_pk_change(self):
"""Illustrate that we can't easily link a
stale state to a fresh one if the fresh one has
a PK change (unless we a. tracked all the previous PKs,
wasteful, or b. recycled states - time consuming,
breaks lots of edge cases, destabilizes the code)
"""
User = self.classes.User
s, u1, a1 = self._fixture()
s._expunge_states([attributes.instance_state(u1)])
del u1
gc_collect()
u1 = s.query(User).first()
# primary key change. now we
# can't rely on state.key as the
# identifier.
new_id = u1.id + 10
u1.id = new_id
a1.user_id = new_id
s.flush()
assert_raises_message(
orm_exc.StaleDataError,
"can't be sure this is the most recent parent.",
u1.addresses.remove,
a1,
)
# u1.addresses wasn't actually impacted, because the event was
# caught before collection mutation
eq_(u1.addresses, [a1])
# expire all and we can continue
s.expire_all()
u1.addresses.remove(a1)
self._assert_not_hasparent(a1)
def test_stale_state_negative_child_expired(self):
"""illustrate the current behavior of
expiration on the child.
there's some uncertainty here in how
this use case should work.
"""
User = self.classes.User
s, u1, a1 = self._fixture()
gc_collect()
u2 = User(addresses=[a1]) # noqa
s.expire(a1)
u1.addresses.remove(a1)
u2_is = u2._sa_instance_state
del u2
for i in range(5):
gc_collect()
# heisenberg the GC a little bit, since #7823 caused a lot more
# GC when mappings are set up, larger test suite started failing
# on this being gc'ed
o = u2_is.obj()
assert o is None
# controversy here. The action is
# to expire one object, not the other, and remove;
# this is pretty abusive in any case. for now
# we are expiring away the 'parents' collection
# so the remove will unset the hasparent flag.
# this is what has occurred historically in any case.
self._assert_not_hasparent(a1)
# self._assert_hasparent(a1)
@testing.requires.predictable_gc
def test_stale_state_negative(self):
User = self.classes.User
s, u1, a1 = self._fixture()
gc_collect()
u2 = User(addresses=[a1])
s.add(u2)
s.flush()
s._expunge_states([attributes.instance_state(u2)])
u2_is = u2._sa_instance_state
del u2
for i in range(5):
gc_collect()
# heisenberg the GC a little bit, since #7823 caused a lot more
# GC when mappings are set up, larger test suite started failing
# on this being gc'ed
o = u2_is.obj()
assert o is None
assert_raises_message(
orm_exc.StaleDataError,
"can't be sure this is the most recent parent.",
u1.addresses.remove,
a1,
)
s.flush()
self._assert_hasparent(a1)
def test_fresh_state_positive(self):
s, u1, a1 = self._fixture()
self._assert_hasparent(a1)
def test_fresh_state_negative(self):
s, u1, a1 = self._fixture()
u1.addresses.remove(a1)
self._assert_not_hasparent(a1) |
6,560 | softmax np | # Copyright 2019-2022 ETH Zurich and the DaCe authors. All rights reserved.
# Original application code: NPBench - https://github.com/spcl/npbench
import dace.dtypes
import numpy as np
import dace as dc
import pytest
import argparse
from dace.fpga_testing import fpga_test, xilinx_test
from dace.transformation.interstate import FPGATransformSDFG, InlineSDFG
from dace.transformation.dataflow import StreamingMemory, StreamingComposition
from dace.transformation.auto.auto_optimize import auto_optimize, fpga_auto_opt
from dace.config import set_temporary
C_in, N, S0, S1, S2, N1, N2 = (dc.symbol(s, dtype=dc.int64) for s in ('C_in', 'N', 'S0', 'S1', 'S2', 'N1', 'N2'))
@dc.program
def relu(x: dc.float32[N1, N2]):
return np.maximum(x, 0)
# Numerically-stable version of softmax
@dc.program
def softmax(x: dc.float32[N1, N2]):
# tmp_max = np.max(x, axis=-1, keepdims=True)
tmp_max = np.maximum.reduce(x, axis=-1, keepdims=True)
tmp_out = np.exp(x - tmp_max)
# tmp_sum = np.sum(tmp_out, axis=-1, keepdims=True)
tmp_sum = np.add.reduce(tmp_out, axis=-1, keepdims=True)
return tmp_out / tmp_sum
# 3-layer MLP
@dc.program
def mlp_kernel(input: dc.float32[N, C_in], w1: dc.float32[C_in, S0], b1: dc.float32[S0], w2: dc.float32[S0, S1],
b2: dc.float32[S1], w3: dc.float32[S1, S2], b3: dc.float32[S2]):
x1 = relu(input @ w1 + b1)
x2 = relu(x1 @ w2 + b2)
x3 = softmax(x2 @ w3 + b3) # Softmax call can be omitted if necessary
return x3
def initialize(C_in, N, S0, S1, S2):
from numpy.random import default_rng
rng = default_rng(42)
mlp_sizes = [S0, S1, S2] # [300, 100, 10]
# Inputs
input = np.random.rand(N, C_in).astype(np.float32)
# Weights
w1 = rng.random((C_in, mlp_sizes[0]), dtype=np.float32)
b1 = rng.random((mlp_sizes[0], ), dtype=np.float32)
w2 = rng.random((mlp_sizes[0], mlp_sizes[1]), dtype=np.float32)
b2 = rng.random((mlp_sizes[1], ), dtype=np.float32)
w3 = rng.random((mlp_sizes[1], mlp_sizes[2]), dtype=np.float32)
b3 = rng.random((mlp_sizes[2], ), dtype=np.float32)
return input, w1, b1, w2, b2, w3, b3
def relu_np(x):
return np.maximum(x, 0)
# Numerically-stable version of softmax
def METHOD_NAME(x):
tmp_max = np.max(x, axis=-1, keepdims=True)
tmp_out = np.exp(x - tmp_max)
tmp_sum = np.sum(tmp_out, axis=-1, keepdims=True)
return tmp_out / tmp_sum
# 3-layer MLP
def mlp_np(input, w1, b1, w2, b2, w3, b3):
x = relu_np(input @ w1 + b1)
x = relu_np(x @ w2 + b2)
x = METHOD_NAME(x @ w3 + b3) # Softmax call can be omitted if necessary
return x
def run_mlp(device_type: dace.dtypes.DeviceType):
'''
Runs conv2d_bias for the given device
:return: the SDFG
'''
# Initialize data (npbench small size)
C_in, N, S0, S1, S2 = 3, 8, 30000, 2000, 2000
input, w1, b1, w2, b2, w3, b3 = initialize(C_in, N, S0, S1, S2)
if device_type in {dace.dtypes.DeviceType.CPU, dace.dtypes.DeviceType.GPU}:
# Parse the SDFG and apply auto-opt
sdfg = mlp_kernel.to_sdfg()
sdfg = auto_optimize(sdfg, device_type)
out = sdfg(input, w1, b1, w2, b2, w3, b3, N=N, S0=S0, S1=S1, S2=S2, C_in=C_in)
elif device_type == dace.dtypes.DeviceType.FPGA:
# Parse SDFG and apply FPGA friendly optimization
sdfg = mlp_kernel.to_sdfg(simplify=True)
applied = sdfg.apply_transformations([FPGATransformSDFG])
assert applied == 1
# Use FPGA Expansion for lib nodes, and expand them to enable further optimizations
from dace.libraries.standard import Reduce
Reduce.default_implementation = "FPGAPartialReduction"
from dace.libraries.blas import Gemm
Gemm.default_implementation = "FPGA1DSystolic"
sdfg.expand_library_nodes()
sdfg.apply_transformations_repeated([InlineSDFG], print_report=True)
sdfg.specialize(dict(N=N, S0=S0, S1=S1, S2=S2, C_in=C_in))
out = sdfg(input, w1, b1, w2, b2, w3, b3)
# Compute ground truth and validate
out_ref = mlp_np(input, w1, b1, w2, b2, w3, b3)
assert np.allclose(out, out_ref)
return sdfg
def test_cpu():
run_mlp(dace.dtypes.DeviceType.CPU)
@pytest.mark.gpu
def test_gpu():
run_mlp(dace.dtypes.DeviceType.GPU)
@pytest.mark.skip(reason="Intel, compilation error")
@fpga_test(assert_ii_1=False)
def test_fpga():
return run_mlp(dace.dtypes.DeviceType.FPGA)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--target", default='cpu', choices=['cpu', 'gpu', 'fpga'], help='Target platform')
args = vars(parser.parse_args())
target = args["target"]
if target == "cpu":
run_mlp(dace.dtypes.DeviceType.CPU)
elif target == "gpu":
run_mlp(dace.dtypes.DeviceType.GPU)
elif target == "fpga":
run_mlp(dace.dtypes.DeviceType.FPGA) |
6,561 | test submit name for area without any | from candidates.models import LoggedAction
from candidates.tests.auth import TestUserMixin
from candidates.tests.factories import MembershipFactory
from candidates.tests.uk_examples import UK2015ExamplesMixin
from django_webtest import WebTest
from people.tests.factories import PersonFactory
from utils.testing_utils import FuzzyInt
class TestBulkAddingByParty(TestUserMixin, UK2015ExamplesMixin, WebTest):
def test_party_select(self):
response = self.app.get(
"/bulk_adding/party/parl.2015-05-07/",
user=self.user_who_can_upload_documents,
)
self.assertContains(response, "GB Parties")
def test_party_select_invalid_party(self):
form = self.app.get(
"/bulk_adding/party/parl.2015-05-07/",
user=self.user_who_can_upload_documents,
).forms[1]
form["party_GB_1"] = ""
form["party_GB_0"] = ""
response = form.submit()
self.assertContains(response, "Select one and only one party")
def test_party_select_non_current_party(self):
self.person = PersonFactory.create(id=2009, name="Tessa Jowell")
MembershipFactory.create(
person=self.person,
post=self.dulwich_post,
party=self.labour_party,
ballot=self.dulwich_post_ballot,
)
form = self.app.get(
"/bulk_adding/party/parl.2015-05-07/",
user=self.user_who_can_upload_documents,
).forms[1]
form["party_GB_1"] = "PP63"
response = form.submit()
self.assertEqual(response.status_code, 302)
def test_submit_party_redirects_to_person_form(self):
form = self.app.get(
"/bulk_adding/party/parl.2015-05-07/",
user=self.user_who_can_upload_documents,
).forms[1]
form["party_GB_1"] = self.conservative_party.ec_id
response = form.submit().follow()
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Add Conservative Party candidates")
self.assertEqual(response.context["election_obj"], self.election)
self.assertTrue(len(response.context["posts"]), 2)
self.assertContains(response, "Camberwell and Peckham")
self.assertContains(response, "1 seat contested.")
self.assertContains(
response, "No Conservative Party candidates known yet."
)
def test_submit_name_for_area_without_source(self):
ballot = self.election.ballot_set.first()
form = self.app.get(
"/bulk_adding/party/parl.2015-05-07/PP52/",
user=self.user_who_can_upload_documents,
).forms[1]
form["{}-0-name".format(ballot.pk)] = "Pemphero Pasternak"
response = form.submit()
self.assertContains(response, "This field is required")
self.assertContains(response, "Pemphero Pasternak")
def METHOD_NAME(self):
form = self.app.get(
"/bulk_adding/party/parl.2015-05-07/PP52/",
user=self.user_who_can_upload_documents,
).forms[1]
form["source"] = "https://example.com/candidates/"
response = form.submit()
self.assertContains(response, "Please enter at least one name")
def test_submit_name_for_area(self):
ballot = self.election.ballot_set.first()
ballot.winner_count = 3
ballot.save()
# Make sure we have no people or logged actions
self.assertEqual(ballot.post.memberships.count(), 0)
self.assertEqual(LoggedAction.objects.count(), 0)
form = self.app.get(
"/bulk_adding/party/parl.2015-05-07/PP52/", user=self.user
).forms[1]
self.assertEqual(len(form.fields), 25)
form["source"] = "https://example.com/candidates/"
form["{}-0-name".format(ballot.pk)] = "Pemphero Pasternak"
response = form.submit().follow()
self.assertContains(
response, '<label>Add a new profile "Pemphero Pasternak"</label>'
)
form = response.forms[1]
# Now submit the valid form
with self.assertNumQueries(FuzzyInt(49, 52)):
form["{}-0-select_person".format(ballot.pk)] = "_new"
response = form.submit().follow()
# We should have a new person and membership
self.assertTrue(
ballot.post.memberships.first().person.name, "Pemphero Pasternak"
)
# We should have a new person and membership
self.assertTrue(
ballot.post.memberships.first().person.name, "Pemphero Pasternak"
)
# We should have created 2 logged actions, one for person-create
# and one for person-update (adding the membership)
self.assertEqual(LoggedAction.objects.count(), 2) |
6,562 | test vuln count | '''
Faraday Penetration Test IDE
Copyright (C) 2013 Infobyte LLC (http://www.infobytesec.com/)
See the file 'doc/LICENSE' for the license information
'''
from faraday.server.models import db, Workspace
from tests.factories import (
HostFactory,
ServiceFactory,
SourceCodeFactory,
VulnerabilityFactory,
VulnerabilityCodeFactory,
VulnerabilityWebFactory,
)
C_SOURCE_CODE_VULN_COUNT = 3
C_STANDARD_VULN_COUNT = [6, 2] # With host parent and with service parent
C_WEB_VULN_COUNT = 5
NC_SOURCE_CODE_VULN_COUNT = 1
NC_STANDARD_VULN_COUNT = [1, 2] # With host parent and with service parent
NC_WEB_VULN_COUNT = 2
SOURCE_CODE_VULN_COUNT = C_SOURCE_CODE_VULN_COUNT + NC_SOURCE_CODE_VULN_COUNT
STANDARD_VULN_COUNT = C_STANDARD_VULN_COUNT + NC_STANDARD_VULN_COUNT
WEB_VULN_COUNT = C_WEB_VULN_COUNT + NC_WEB_VULN_COUNT
def populate_workspace(workspace):
host = HostFactory.create(workspace=workspace)
service = ServiceFactory.create(workspace=workspace, host=host)
code = SourceCodeFactory.create(workspace=workspace)
# Create non confirmed vulnerabilities
# Create standard vulns
VulnerabilityFactory.create_batch(
NC_STANDARD_VULN_COUNT[0], workspace=workspace, host=host,
service=None, confirmed=False)
VulnerabilityFactory.create_batch(
NC_STANDARD_VULN_COUNT[1], workspace=workspace, service=service,
host=None, confirmed=False)
# Create web vulns
VulnerabilityWebFactory.create_batch(
NC_WEB_VULN_COUNT, workspace=workspace, service=service,
confirmed=False)
# Create source code vulns
VulnerabilityCodeFactory.create_batch(
NC_SOURCE_CODE_VULN_COUNT, workspace=workspace, source_code=code,
confirmed=False)
# Create confirmed vulnerabilities
# Create standard vulns
VulnerabilityFactory.create_batch(
C_STANDARD_VULN_COUNT[0], workspace=workspace, host=host, service=None,
confirmed=True)
VulnerabilityFactory.create_batch(
C_STANDARD_VULN_COUNT[1], workspace=workspace, service=service,
host=None, confirmed=True)
# Create web vulns
VulnerabilityWebFactory.create_batch(
C_WEB_VULN_COUNT, workspace=workspace, service=service, confirmed=True)
# Create source code vulns
VulnerabilityCodeFactory.create_batch(
C_SOURCE_CODE_VULN_COUNT, workspace=workspace, source_code=code,
confirmed=True)
db.session.commit()
def METHOD_NAME(workspace, second_workspace, database):
if database.engine.dialect.name == 'sqlite':
return
populate_workspace(workspace)
populate_workspace(second_workspace)
workspace = Workspace.query_with_count(None, workspace_name=workspace.name).fetchone()
assert workspace['vulnerability_web_count'] == WEB_VULN_COUNT
assert workspace['vulnerability_code_count'] == SOURCE_CODE_VULN_COUNT
assert workspace['vulnerability_standard_count'] == sum(
STANDARD_VULN_COUNT)
assert workspace['vulnerability_total_count'] == (
sum(STANDARD_VULN_COUNT) + WEB_VULN_COUNT + SOURCE_CODE_VULN_COUNT
)
def test_vuln_count_confirmed(workspace, second_workspace, database):
if database.engine.dialect.name == 'sqlite':
return
populate_workspace(workspace)
populate_workspace(second_workspace)
workspace = Workspace.query_with_count(True, workspace_name=workspace.name).fetchone()
workspace = dict(workspace)
assert workspace['vulnerability_web_count'] == C_WEB_VULN_COUNT
assert workspace['vulnerability_code_count'] == C_SOURCE_CODE_VULN_COUNT
assert workspace['vulnerability_standard_count'] == sum(
C_STANDARD_VULN_COUNT)
assert workspace['vulnerability_total_count'] == (
sum(C_STANDARD_VULN_COUNT) + C_WEB_VULN_COUNT + C_SOURCE_CODE_VULN_COUNT
)
def test_vuln_no_count(workspace, second_workspace, database):
if database.engine.dialect.name == 'sqlite':
return
populate_workspace(workspace)
populate_workspace(second_workspace)
workspace = Workspace.query.get(workspace.id)
assert workspace.vulnerability_web_count is None
assert workspace.vulnerability_code_count is None
assert workspace.vulnerability_standard_count is None
assert workspace.vulnerability_total_count is None |
6,563 | test sqlite ingestion column names mismatch | # Copyright 2021-2023 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
import os
from unittest import mock
from click.testing import Result
from pytest import raises
from vdk.internal.core.errors import UserCodeError
from vdk.plugin.sqlite import sqlite_plugin
from vdk.plugin.sqlite.ingest_to_sqlite import IngestToSQLite
from vdk.plugin.sqlite.sqlite_configuration import SQLiteConfiguration
from vdk.plugin.test_utils.util_funcs import cli_assert_equal
from vdk.plugin.test_utils.util_funcs import CliEntryBasedTestRunner
from vdk.plugin.test_utils.util_funcs import jobs_path_from_caller_directory
# uses the pytest tmpdir fixture - https://docs.pytest.org/en/6.2.x/tmpdir.html#the-tmpdir-fixture
def test_sqlite_plugin(tmpdir):
with mock.patch.dict(
os.environ,
{
"VDK_DB_DEFAULT_TYPE": "SQLITE",
"VDK_SQLITE_FILE": str(tmpdir) + "vdk-sqlite.db",
},
):
runner = CliEntryBasedTestRunner(sqlite_plugin)
result: Result = runner.invoke(
["run", jobs_path_from_caller_directory("sql-job")]
)
cli_assert_equal(0, result)
actual_rs: Result = runner.invoke(
["sqlite-query", "--query", f"SELECT * FROM stocks"]
)
cli_assert_equal(0, actual_rs)
assert "GOOG" in actual_rs.output
def test_sqlite_ingestion(tmpdir):
db_dir = str(tmpdir) + "vdk-sqlite.db"
with mock.patch.dict(
os.environ,
{
"VDK_DB_DEFAULT_TYPE": "SQLITE",
"VDK_SQLITE_FILE": db_dir,
},
):
# create table first, as the ingestion fails otherwise
runner = CliEntryBasedTestRunner(sqlite_plugin)
runner.invoke(
[
"sqlite-query",
"--query",
r"CREATE TABLE test_table (some\ data TEXT, more_data TEXT)",
]
)
mock_sqlite_conf = mock.MagicMock(SQLiteConfiguration)
sqlite_ingester = IngestToSQLite(mock_sqlite_conf)
payload = [{"some data": "some test data", "more_data": "more_test_data"}]
sqlite_ingester.ingest_payload(
payload=payload,
destination_table="test_table",
target=db_dir,
)
check_result = runner.invoke(
["sqlite-query", "--query", "SELECT * FROM test_table"]
)
assert check_result.stdout == (
"some data more_data\n"
"-------------- --------------\n"
"some test data more_test_data\n"
)
def test_sqlite_ingestion_missing_dest_table(tmpdir):
db_dir = str(tmpdir) + "vdk-sqlite.db"
with mock.patch.dict(
os.environ,
{
"VDK_DB_DEFAULT_TYPE": "SQLITE",
"VDK_SQLITE_FILE": db_dir,
},
):
# create table first, as the ingestion fails otherwise
runner = CliEntryBasedTestRunner(sqlite_plugin)
mock_sqlite_conf = mock.MagicMock(SQLiteConfiguration)
sqlite_ingester = IngestToSQLite(mock_sqlite_conf)
payload = [
{
"str_col": "str_data",
"int_col": 11,
"bool_col": True,
"bytes_col": b"bytes",
"float_col": 1.23,
"extra_col": None,
},
{
"str_col": "str_data",
"int_col": 11,
"bool_col": True,
"bytes_col": b"bytes",
"float_col": 1.23,
"extra_col": 1,
},
]
sqlite_ingester.ingest_payload(
payload=payload,
destination_table="auto_created_table",
target=db_dir,
)
check_result = runner.invoke(
["sqlite-query", "--query", "SELECT * FROM auto_created_table"]
)
assert check_result.stdout == (
"str_col int_col bool_col bytes_col float_col extra_col\n"
"--------- --------- ---------- ----------- ----------- -----------\n"
"str_data 11 1 bytes 1.23\n"
"str_data 11 1 bytes 1.23 1\n"
)
def METHOD_NAME(tmpdir):
db_dir = str(tmpdir) + "vdk-sqlite.db"
with mock.patch.dict(
os.environ,
{
"VDK_DB_DEFAULT_TYPE": "SQLITE",
"VDK_SQLITE_FILE": db_dir,
},
):
# create table first, as the ingestion fails otherwise
runner = CliEntryBasedTestRunner(sqlite_plugin)
runner.invoke(
[
"sqlite-query",
"--query",
"CREATE TABLE test_table (wrong_column_name TEXT, more_data TEXT)",
]
)
mock_sqlite_conf = mock.MagicMock(SQLiteConfiguration)
sqlite_ingester = IngestToSQLite(mock_sqlite_conf)
payload = [{"some_data": "some_test_data", "more_data": "more_test_data"}]
with raises(UserCodeError):
sqlite_ingester.ingest_payload(
payload=payload,
destination_table="test_table",
target=db_dir,
) |
6,564 | test get field value | import copy
import os.path
import collections
from tests.conftest import TESTSDIR, data_dict
from processor.helper.json.json_utils import save_json_to_file, json_from_file,\
json_from_string, valid_json, check_field_exists, get_field_value, put_value,\
parse_boolean, set_timestamp, get_container_dir, get_container_snapshot_json_files,\
get_json_files
from processor.reporting.json_output import dump_output_results
def test_save_json_to_file(create_temp_dir):
newpath = create_temp_dir()
fname = '%s/a1.json' % newpath
file_exists = os.path.exists(fname)
assert False == file_exists
save_json_to_file({}, fname)
file_exists = os.path.exists(fname)
assert True == file_exists
os.remove(fname)
save_json_to_file(None, fname)
file_exists = os.path.exists(fname)
assert False == file_exists
save_json_to_file({'a': 'b'}, fname)
file_exists = os.path.exists(fname)
assert True == file_exists
os.remove(fname)
fname = '%s/a/a1.json' % newpath
file_exists = os.path.exists(fname)
assert False == file_exists
save_json_to_file({'a':'b'}, fname)
file_exists = os.path.exists(fname)
assert False == file_exists
def test_json_from_file(create_temp_dir, create_temp_json, create_temp_text):
newpath = create_temp_dir()
fname = create_temp_text(newpath)
fullpath = '%s/%s' % (newpath, fname)
file_exists = os.path.exists(fullpath)
assert True == file_exists
json_data = json_from_file(fullpath)
assert json_data is None
fname = create_temp_json(newpath)
fullpath = '%s/%s' % (newpath, fname)
file_exists = os.path.exists(fullpath)
assert True == file_exists
json_data = json_from_file(fullpath)
assert json_data is not None
assert isinstance(json_data, collections.OrderedDict)
json_data = json_from_file(None)
assert json_data is None
json_data = json_from_file('/tmp/xyza.json')
assert json_data is None
def test_json_from_string(create_temp_dir, create_temp_json):
newpath = create_temp_dir()
fname = create_temp_json(newpath)
fullpath = '%s/%s' % (newpath, fname)
file_exists = os.path.exists(fullpath)
assert True == file_exists
with open(fullpath) as f:
data_str = f.read()
assert data_str is not None
data = json_from_string(data_str)
assert data is not None
data_str = 'abcd'
data = json_from_string(data_str)
assert data is None
data = json_from_string(None)
assert data is None
def test_valid_json(create_temp_dir, create_temp_json):
newpath = create_temp_dir()
fname = create_temp_json(newpath)
fullpath = '%s/%s' % (newpath, fname)
file_exists = os.path.exists(fullpath)
assert True == file_exists
with open(fullpath) as f:
data_str = f.read()
assert data_str is not None
isjson = valid_json(data_str)
assert True == isjson
data_str = 'abcd'
isjson = valid_json(data_str)
assert False == isjson
isjson = valid_json(None)
assert False == isjson
def test_check_field_exists():
assert False == check_field_exists(data_dict, None)
assert False == check_field_exists(None, 'c.d')
assert False == check_field_exists(data_dict, 'c.d.e')
assert True == check_field_exists(data_dict, 'a')
assert True == check_field_exists(data_dict, 'c.d')
assert True == check_field_exists(data_dict, 'f.g.h')
def METHOD_NAME():
assert None == get_field_value(data_dict, None)
assert None == get_field_value(None, 'c.d')
assert None == get_field_value(data_dict, 'c.d.e')
assert 'b' == get_field_value(data_dict, 'a')
assert 'e' == get_field_value(data_dict, 'c.d')
assert 1 == get_field_value(data_dict, 'f.g.h')
assert {'h': 1} == get_field_value(data_dict, 'f.g')
def test_put_value():
data_new = copy.deepcopy(data_dict)
put_value(data_new, 'a.b', 1)
assert 1 == get_field_value(data_new, 'a.b')
put_value(data_new, '.a.b', 2)
assert 2 == get_field_value(data_new, 'a.b')
put_value(data_new, 'm.n.o', {'a': {'b': 'c'}})
assert {'a': {'b': 'c'}} == get_field_value(data_new, 'm.n.o')
def test_get_boolean():
assert False == parse_boolean(None)
assert False == parse_boolean('False')
assert True == parse_boolean('true')
assert True == parse_boolean('TrUE')
def test_set_timestamp():
assert False == set_timestamp(None)
assert False == set_timestamp([1,2,3])
assert True == set_timestamp({})
time_data = {'a': 1}
assert True == set_timestamp(time_data)
ts_exists = True if 'timestamp' in time_data and \
time_data['timestamp'] and \
isinstance(time_data['timestamp'], int) else False
assert True == ts_exists
time_data = {'a': 1}
assert True == set_timestamp(time_data, 'ts')
ts_exists = True if 'ts' in time_data and \
time_data['ts'] and \
isinstance(time_data['ts'], int) else False
assert True == ts_exists
def skiptest_get_container_dir():
container = 'container1'
mytest_dir = '%s/realm/validation/%s' % (TESTSDIR, container)
os.chdir(mytest_dir)
mytest_curdir = os.getcwd()
test_dir = get_container_dir(container)
os.chdir(test_dir)
test_curdir = os.getcwd()
assert mytest_curdir == test_curdir
def test_get_container_snapshot_json_files():
container = 'container1'
files = get_container_snapshot_json_files(container)
assert files is not None
def skiptest_get_json_files():
container = 'container1'
mytest_dir = '%s/realm/validation/%s' % (TESTSDIR, container)
files = get_json_files(mytest_dir, 'snapshot')
assert True == isinstance(files, list)
files = get_json_files(mytest_dir, 'test')
assert True == isinstance(files, list) and len(files) > 0
files = get_json_files('/a/b/c', 'txt')
assert True == isinstance(files, list)
def skiptest_dump_output_results():
container = 'container1'
test_file = '%s/realm/validation/%s/test1.json' % (TESTSDIR, container)
outputtest_file = '%s/realm/validation/%s/output-test1.json' % (TESTSDIR, container)
file_exists = os.path.exists(outputtest_file)
if file_exists:
os.remove(outputtest_file)
dump_output_results([], container, test_file, 'snapshot')
file_exists = os.path.exists(outputtest_file)
assert True == file_exists
os.remove(outputtest_file) |
6,565 | virtual hub | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetAzureTrafficCollectorResult',
'AwaitableGetAzureTrafficCollectorResult',
'get_azure_traffic_collector',
'get_azure_traffic_collector_output',
]
@pulumi.output_type
class GetAzureTrafficCollectorResult:
"""
Azure Traffic Collector resource.
"""
def __init__(__self__, collector_policies=None, etag=None, id=None, location=None, name=None, provisioning_state=None, system_data=None, tags=None, type=None, METHOD_NAME=None):
if collector_policies and not isinstance(collector_policies, list):
raise TypeError("Expected argument 'collector_policies' to be a list")
pulumi.set(__self__, "collector_policies", collector_policies)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'virtual_hub' to be a dict")
pulumi.set(__self__, "virtual_hub", METHOD_NAME)
@property
@pulumi.getter(name="collectorPolicies")
def collector_policies(self) -> Optional[Sequence['outputs.CollectorPolicyResponse']]:
"""
Collector Policies for Azure Traffic Collector.
"""
return pulumi.get(self, "collector_policies")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the application rule collection resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.TrackedResourceResponseSystemData':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualHub")
def METHOD_NAME(self) -> Optional['outputs.ResourceReferenceResponse']:
"""
The virtualHub to which the Azure Traffic Collector belongs.
"""
return pulumi.get(self, "virtual_hub")
class AwaitableGetAzureTrafficCollectorResult(GetAzureTrafficCollectorResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAzureTrafficCollectorResult(
collector_policies=self.collector_policies,
etag=self.etag,
id=self.id,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
system_data=self.system_data,
tags=self.tags,
type=self.type,
METHOD_NAME=self.METHOD_NAME)
def get_azure_traffic_collector(azure_traffic_collector_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAzureTrafficCollectorResult:
"""
Gets the specified Azure Traffic Collector in a specified resource group
:param str azure_traffic_collector_name: Azure Traffic Collector name
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['azureTrafficCollectorName'] = azure_traffic_collector_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:networkfunction/v20220801:getAzureTrafficCollector', __args__, opts=opts, typ=GetAzureTrafficCollectorResult).value
return AwaitableGetAzureTrafficCollectorResult(
collector_policies=pulumi.get(__ret__, 'collector_policies'),
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
system_data=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'),
METHOD_NAME=pulumi.get(__ret__, 'virtual_hub'))
@_utilities.lift_output_func(get_azure_traffic_collector)
def get_azure_traffic_collector_output(azure_traffic_collector_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAzureTrafficCollectorResult]:
"""
Gets the specified Azure Traffic Collector in a specified resource group
:param str azure_traffic_collector_name: Azure Traffic Collector name
:param str resource_group_name: The name of the resource group.
"""
... |
6,566 | stop | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2022 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Instrumentation to profile resource utilization."""
from time import time_ns, sleep
from datetime import datetime
from pathlib import Path
from multiprocessing import Process, Event
from contextlib import suppress
import signal
import psutil
_MB = 1024.0**2
SAMPLE_ATTRS = (
"pid",
"name",
# "cmdline",
"cpu_num",
"cpu_percent",
"memory_info",
"num_threads",
"num_fds",
)
def FindProcess(process_name):
"""
Find a process by its name and returns its PID. Child processes are excluded
Parameters
----------
process_name : :obj:`str`
The name of the process that must be found.
Return
----------
PID of the process if found, False if the process is not found
"""
for proc in psutil.process_iter():
try:
if process_name == proc.name():
parent = proc.parent()
if parent.name() != process_name:
return proc.pid
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
print("Process ", process_name, " not found")
return False
def sample(
pid=None,
recursive=True,
attrs=SAMPLE_ATTRS,
exclude=tuple(),
):
"""
Probe process tree and snapshot current resource utilization.
Parameters
----------
pid : :obj:`int` or :obj:`None`
The process ID that must be sampled. If ``None`` then it samples the
current process from which ``sample()`` has been called.
recursive : :obj:`bool`
Whether the sampler should descend and explore the whole process tree.
attrs : :obj:`iterable` of :obj:`str`
A list of :obj:`psutil.Process` attribute names that will be retrieved when
sampling.
"""
proc_list = [psutil.Process(pid)]
if proc_list and recursive:
with suppress(psutil.NoSuchProcess):
proc_list += proc_list[0].children(recursive=True)
proc_info = []
for process in proc_list:
if process.pid in exclude:
continue
with suppress(psutil.NoSuchProcess):
proc_info.append(process.as_dict(attrs=attrs))
return proc_info
def parse_sample(datapoint, timestamp=None, attrs=SAMPLE_ATTRS):
"""Convert a sample dictionary into a list of string values."""
retval = [f"{timestamp or time_ns()}"]
for attr in attrs:
value = datapoint.get(attr, None)
if value is None:
continue
if attr == "cmdline":
value = " ".join(value).replace("'", "\\'").replace('"', '\\"')
value = [f"'{value}'"]
elif attr == "memory_info":
value = [f"{value.rss / _MB}", f"{value.vms / _MB}"]
else:
value = [f"{value}"]
retval += value
return retval
def sample2file(
pid=None, recursive=True, timestamp=None, fd=None, flush=True, exclude=tuple()
):
if fd is None:
return
print(
"\n".join(
[
"\t".join(parse_sample(s, timestamp=timestamp))
for s in sample(pid=pid, recursive=recursive, exclude=exclude)
]
),
file=fd,
)
if flush:
fd.flush()
class ResourceRecorder(Process):
"""Attach a ``Thread`` to sample a specific PID with a certain frequency."""
def __init__(
self, pid, frequency=0.2, log_file=None, exclude_probe=True, **process_kwargs
):
Process.__init__(self, name="nipype_resmon", daemon=True, **process_kwargs)
self._pid = int(pid)
"""The process to be sampled."""
self._logfile = str(
Path(log_file if log_file is not None else f".prof-{pid}.tsv").absolute()
)
"""An open file descriptor where results are dumped."""
self._exclude = exclude_probe or tuple()
"""A list/tuple containing PIDs that should not be monitored."""
self._freq_ns = int(max(frequency, 0.02) * 1e9)
"""Sampling frequency (stored in ns)."""
self._done = Event()
"""Flag indicating if the process is marked to finish."""
signal.signal(signal.SIGINT, self.METHOD_NAME)
signal.signal(signal.SIGTERM, self.METHOD_NAME)
def run(self, *args, **kwargs):
"""Core monitoring function, called by start()"""
# Open file now, because it cannot be pickled.
Path(self._logfile).parent.mkdir(parents=True, exist_ok=True)
_logfile = Path(self._logfile).open("w")
# Write headers (comment trace + header row)
_header = [
f"# MRIQC Resource recorder started tracking PID {self._pid} "
f"{datetime.now().strftime('(%Y/%m/%d; %H:%M:%S)')}",
"\t".join(("timestamp", *SAMPLE_ATTRS)).replace(
"memory_info", "mem_rss_mb\tmem_vsm_mb"
),
]
print("\n".join(_header), file=_logfile)
# Add self to exclude list if pertinent
if self._exclude is True:
self._exclude = (psutil.Process().pid,)
# Ensure done is not marked set
self._done.clear()
# Initiate periodic sampling
start_time = time_ns()
wait_til = start_time
while not self._done.is_set():
try:
sample2file(self._pid, fd=_logfile, timestamp=wait_til)
except psutil.NoSuchProcess:
print(
f"# MRIQC Resource recorder killed "
f"{datetime.now().strftime('(%Y/%m/%d; %H:%M:%S)')}",
file=_logfile,
)
_logfile.flush()
_logfile.close()
break
wait_til += self._freq_ns
sleep(max(0, (wait_til - time_ns()) / 1.0e9))
_logfile.close()
def METHOD_NAME(self, *args):
# Tear-down process
self._done.set()
with Path(self._logfile).open("a") as f:
f.write(
f"# MRIQC Resource recorder finished "
f"{datetime.now().strftime('(%Y/%m/%d; %H:%M:%S)')}",
) |
6,567 | test l10n nl | # python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <dr.prodigy.github@gmail.com> (c) 2017-2023
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from holidays.countries.curacao import Curacao, CW, CUW
from tests.common import TestCase
class TestCuracao(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass(Curacao, years=range(1954, 2077))
def test_country_aliases(self):
self.assertCountryAliases(Curacao, CW, CUW)
def test_no_holidays(self):
self.assertNoHolidays(Curacao(years=1953))
def test_2016(self):
self.assertHolidays(
Curacao(years=2016),
("2016-01-01", "Aña Nobo"),
("2016-02-08", "Dialuna despues di Carnaval Grandi"),
("2016-03-25", "Bièrnèsantu"),
("2016-03-27", "Pasku di Resurekshon"),
("2016-03-28", "Di dos dia di Pasku di Resurekshon"),
("2016-04-27", "Dia di Rey"),
("2016-05-02", "Dia di Obrero"),
("2016-05-05", "Dia di Asenshon"),
("2016-07-02", "Dia di Himno i Bandera"),
("2016-10-10", "Dia di Pais Kòrsou"),
("2016-12-25", "Pasku di Nasementu"),
("2016-12-26", "Di dos dia di Pasku di Nasementu"),
)
def test_queens_day(self):
name = "Dia di la Reina"
self.assertHolidayName(
name,
"1961-05-01",
"1965-04-30",
"1967-05-01",
"1972-05-01",
"1978-05-01",
"1989-04-29",
"1995-04-29",
"2000-04-29",
"2006-04-29",
"2013-04-30",
)
self.assertNoHoliday(
"1961-04-30",
"1967-04-30",
"1972-04-30",
"1978-04-30",
"1995-04-30",
"1989-04-30",
"2000-04-30",
"2006-04-30",
)
self.assertNoHolidayName(name, 2014)
def test_king_day(self):
name = "Dia di Rey"
self.assertNoHolidayName(name, 2013)
self.assertHolidayName(
name,
"2016-04-27",
"2017-04-27",
"2018-04-27",
"2019-04-27",
"2020-04-27",
"2021-04-27",
"2022-04-27",
"2023-04-27",
"2024-04-27",
"2025-04-26",
"2031-04-26",
"2036-04-26",
)
self.assertNoHoliday(
"2014-04-27",
"2025-04-27",
"2031-04-27",
"2036-04-27",
)
def test_labor_day(self):
self.assertHolidayName(
"Dia di Obrero",
"2016-05-02",
"2017-05-01",
"2018-05-01",
"2019-05-01",
"2020-05-01",
"2021-05-01",
"2022-05-02",
"2023-05-01",
)
self.assertNoHoliday(
"2011-05-01",
"2016-05-01",
"2022-05-01",
)
def test_anthem_and_flag_day(self):
name = "Dia di Himno i Bandera"
self.assertNoHolidayName(name, 1983)
self.assertHolidayName(name, (f"{year}-07-02" for year in range(1984, 2077)))
def test_curacao_day(self):
name = "Dia di Pais Kòrsou"
self.assertNoHolidayName(name, 2009)
self.assertHolidayName(name, (f"{year}-10-10" for year in range(2010, 2077)))
def test_l10n_default(self):
self.assertLocalizedHolidays(
("2023-01-01", "Aña Nobo"),
("2023-02-20", "Dialuna despues di Carnaval Grandi"),
("2023-04-07", "Bièrnèsantu"),
("2023-04-09", "Pasku di Resurekshon"),
("2023-04-10", "Di dos dia di Pasku di Resurekshon"),
("2023-04-27", "Dia di Rey"),
("2023-05-01", "Dia di Obrero"),
("2023-05-18", "Dia di Asenshon"),
("2023-07-02", "Dia di Himno i Bandera"),
("2023-10-10", "Dia di Pais Kòrsou"),
("2023-12-25", "Pasku di Nasementu"),
("2023-12-26", "Di dos dia di Pasku di Nasementu"),
)
def test_l10n_en_us(self):
self.assertLocalizedHolidays(
"en_US",
("2023-01-01", "New Year's Day"),
("2023-02-20", "Carnival Monday"),
("2023-04-07", "Good Friday"),
("2023-04-09", "Easter Sunday"),
("2023-04-10", "Easter Monday"),
("2023-04-27", "King's Day"),
("2023-05-01", "Labor Day"),
("2023-05-18", "Ascension Day"),
("2023-07-02", "National Anthem and Flag Day"),
("2023-10-10", "Curaçao Day"),
("2023-12-25", "Christmas Day"),
("2023-12-26", "Second Day of Christmas"),
)
def METHOD_NAME(self):
self.assertLocalizedHolidays(
"nl",
("2023-01-01", "Nieuwjaarsdag"),
("2023-02-20", "De maandag na de Grote Karnaval"),
("2023-04-07", "Goede Vrijdag"),
("2023-04-09", "Paasdag"),
("2023-04-10", "Tweede paasdag"),
("2023-04-27", "Koningsdag"),
("2023-05-01", "Dag van de Arbeid"),
("2023-05-18", "Hemelvaartsdag"),
("2023-07-02", "Nationale vlag en volkslied"),
("2023-10-10", "Dag van Land Curaçao"),
("2023-12-25", "Kerst"),
("2023-12-26", "Tweede kerstdag"),
)
def test_l10n_uk(self):
self.assertLocalizedHolidays(
"uk",
("2023-01-01", "Новий рік"),
("2023-02-20", "Карнавальний понеділок"),
("2023-04-07", "Страсна пʼятниця"),
("2023-04-09", "Великдень"),
("2023-04-10", "Великодній понеділок"),
("2023-04-27", "День короля"),
("2023-05-01", "День праці"),
("2023-05-18", "Вознесіння Господнє"),
("2023-07-02", "День державного гімну та прапора"),
("2023-10-10", "День Кюрасао"),
("2023-12-25", "Різдво Христове"),
("2023-12-26", "Другий день Різдва"),
) |
6,568 | test step | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test DistributionStrategy in the zero batch case."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import normalization
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
class NormalizationTest(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.one_device_strategy,
],
mode=["graph"],
fused=[True, False]))
def testBNWithZeroBatchInputGraph(self, distribution, fused):
distribution.extended.experimental_enable_get_next_as_optional = True
with distribution.scope(), self.cached_session() as sess:
bn_list = []
inputs = np.random.random((0, 4, 4, 3)) + 100
targets = np.random.random((0, 4, 4, 3))
inputs_placeholder = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, 4, 4, 3])
targets_placeholder = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, 4, 4, 3])
def step_fn(is_training, inputs, targets=None):
bn = normalization.BatchNormalization(
axis=3, epsilon=1e-3, momentum=0.9, fused=fused)
bn_list.append(bn)
outputs = bn.apply(inputs, training=is_training)
if not is_training:
return outputs
loss = losses.mean_squared_error(targets, outputs)
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
train_op = optimizer.minimize(loss)
with ops.control_dependencies([train_op]):
return array_ops.identity(loss)
train_op = distribution.extended.call_for_each_replica(
step_fn, args=(True, inputs_placeholder, targets_placeholder))
predict_op = distribution.extended.call_for_each_replica(
step_fn, args=(False, inputs_placeholder))
bn = bn_list[0]
self.evaluate(variables.global_variables_initializer())
# Check for initial statistics and weights.
moving_mean, moving_var = self.evaluate(
[bn.moving_mean, bn.moving_variance])
self.assertAllEqual([0, 0, 0], moving_mean)
self.assertAllEqual([1, 1, 1], moving_var)
np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta])
self.assertAllEqual([1, 1, 1], np_gamma)
self.assertAllEqual([0, 0, 0], np_beta)
for _ in range(100):
np_output, _, _ = sess.run([train_op] + bn.updates, {
inputs_placeholder: inputs,
targets_placeholder: targets
})
self.assertEqual(0.0, np_output)
# Verify that the statistics and weights are not changed after training.
moving_mean, moving_var = self.evaluate(
[bn.moving_mean, bn.moving_variance])
self.assertAllEqual([0, 0, 0], moving_mean)
self.assertAllEqual([1, 1, 1], moving_var)
np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta])
self.assertAllEqual([1, 1, 1], np_gamma)
self.assertAllEqual([0, 0, 0], np_beta)
# Test inference.
np_output = sess.run(predict_op, {inputs_placeholder: inputs})
self.assertEqual([], np_output.tolist())
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.one_device_strategy,
],
mode=["eager"],
fused=[True, False]))
def testBNWithZeroBatchInput(self, distribution, fused):
distribution.extended.experimental_enable_get_next_as_optional = True
with distribution.scope():
inputs = np.random.random((0, 4, 4, 3)).astype(np.float32) + 100
targets = np.random.random((0, 4, 4, 3)).astype(np.float32)
bn = normalization.BatchNormalization(
axis=3, epsilon=1e-3, momentum=0.9, fused=fused)
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
@def_function.function
def train_step():
def step_fn(inputs, targets):
with backprop.GradientTape() as tape:
outputs = bn.apply(inputs, training=True)
loss = losses.mean_squared_error(targets, outputs)
grads = tape.gradient(loss, bn.variables)
optimizer.apply_gradients(zip(grads, bn.variables))
return loss
return distribution.experimental_run_v2(
step_fn, args=(inputs, targets))
for _ in range(100):
np_output = train_step().numpy()
self.assertEqual(0.0, np_output)
# Verify that the statistics and weights are not changed after training.
self.assertAllEqual([0, 0, 0], bn.moving_mean.numpy())
self.assertAllEqual([1, 1, 1], bn.moving_variance.numpy())
self.assertAllEqual([1, 1, 1], bn.gamma.numpy())
self.assertAllEqual([0, 0, 0], bn.beta.numpy())
@def_function.function
def METHOD_NAME():
def step_fn(inputs):
outputs = bn.apply(inputs, training=False)
return outputs
return distribution.experimental_run_v2(
step_fn, args=(inputs,))
# Test inference.
self.assertAllEqual(np.zeros(shape=(0, 4, 4, 3), dtype=np.float32),
METHOD_NAME().numpy())
if __name__ == "__main__":
test.main() |
6,569 | deal with discrete | # Copyright 2022 The Kubeflow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module containing helper functions to translate objects that come
to/from the grpc API into the format accepted/returned by the different
suggestion generation algorithms.
"""
from collections.abc import Iterable
from pkg.apis.manager.v1beta1.python import api_pb2
import numpy as np
from pkg.suggestion.v1beta1.hyperband.parameter import ParameterConfig
def METHOD_NAME(feasible_values, current_value):
""" function to embed the current values to the feasible discrete space"""
diff = np.subtract(feasible_values, current_value)
diff = np.absolute(diff)
return feasible_values[np.argmin(diff)]
def _deal_with_categorical(feasible_values, one_hot_values):
""" function to do the one hot encoding of the categorical values """
index = np.argmax(one_hot_values)
return feasible_values[int(index)]
def parse_parameter_configs(parameter_configs):
name_ids = {}
dim = 0
lower_bounds = []
upper_bounds = []
parameter_types = []
names = []
discrete_info = []
categorical_info = []
for param_idx, param in enumerate(parameter_configs):
name_ids[param.name] = param_idx
parameter_types.append(param.parameter_type)
names.append(param.name)
if param.parameter_type == api_pb2.DOUBLE:
new_lower = float(param.feasible_space.min)
new_upper = float(param.feasible_space.max)
elif param.parameter_type == api_pb2.INT:
new_lower = int(param.feasible_space.min)
new_upper = int(param.feasible_space.max)
elif param.parameter_type == api_pb2.DISCRETE:
discrete_values = [int(x) for x in param.feasible_space.list]
new_lower = min(discrete_values)
new_upper = max(discrete_values)
discrete_info.append(
{"name": param.name, "values": discrete_values})
elif param.parameter_type == api_pb2.CATEGORICAL:
num_feasible = len(param.feasible_space.list)
new_lower = [0 for _ in range(num_feasible)]
new_upper = [1 for _ in range(num_feasible)]
categorical_info.append({
"name": param.name,
"values": param.feasible_space.list,
"number": num_feasible,
})
if isinstance(new_lower, Iterable): # handles categorical parameters
lower_bounds.extend(new_lower)
upper_bounds.extend(new_upper)
dim += len(new_lower)
else: # handles ints, doubles, and discrete parameters
lower_bounds.append(new_lower)
upper_bounds.append(new_upper)
dim += 1
parsed_config = ParameterConfig(name_ids,
dim,
lower_bounds,
upper_bounds,
parameter_types,
names,
discrete_info,
categorical_info)
return parsed_config
def parse_previous_observations(parameters_list, dim, name_id, types, categorical_info):
parsed_X = np.zeros(shape=(len(parameters_list), dim))
for row_idx, parameters in enumerate(parameters_list):
offset = 0
for p in parameters:
map_id = name_id[p.name]
if types[map_id] in [api_pb2.DOUBLE, api_pb2.INT,
api_pb2.DISCRETE]:
parsed_X[row_idx, offset] = float(p.value)
offset += 1
elif types[map_id] == api_pb2.CATEGORICAL:
for ci in categorical_info:
if ci["name"] == p.name:
value_num = ci["values"].index(p.value)
parsed_X[row_idx, offset + value_num] = 1
offset += ci["number"]
return parsed_X
def parse_metric(y_train, goal):
"""
Parse the metric to the dictionary
"""
y_array = np.array(y_train, dtype=np.float64)
if goal == api_pb2.MINIMIZE:
y_array *= -1
return y_array
def parse_x_next_vector(x_next, param_types, param_names, discrete_info, categorical_info):
""" parse the next suggestion to the proper format """
counter = 0
result = []
if isinstance(x_next, np.ndarray):
x_next = x_next.squeeze(axis=0)
for par_type, par_name in zip(param_types, param_names):
if par_type == api_pb2.INT:
value = int(round(x_next[counter], 0))
counter = counter + 1
elif par_type == api_pb2.DOUBLE:
value = float(x_next[counter])
counter = counter + 1
elif par_type == api_pb2.DISCRETE:
for param in discrete_info:
if param["name"] == par_name:
value = METHOD_NAME(param["values"],
x_next[counter])
counter = counter + 1
break
elif par_type == api_pb2.CATEGORICAL:
for param in categorical_info:
if param["name"] == par_name:
value = _deal_with_categorical(
feasible_values=param["values"],
one_hot_values=x_next[counter:counter + param["number"]],
)
counter = counter + param["number"]
break
result.append({"name": par_name, "value": value, "type": par_type})
return result
def parse_x_next_tuple(x_next, param_types, param_names):
result = []
for value, param_type, param_name in zip(x_next, param_types, param_names):
result.append({"name": param_name, "type": param_type, "value": str(value)})
return result |
6,570 | test enqueue in a function sets time | import datetime
import pytest
from kolibri.core.tasks.exceptions import JobRunning
from kolibri.core.tasks.job import Job
from kolibri.core.tasks.storage import Storage
from kolibri.core.tasks.test.base import connection
from kolibri.utils.time_utils import local_now
from kolibri.utils.time_utils import naive_utc_datetime
@pytest.fixture
def job_storage():
with connection() as c:
s = Storage(connection=c)
s.clear(force=True)
yield s
s.clear(force=True)
@pytest.mark.django_db
class TestScheduler(object):
@pytest.fixture
def job(self):
return Job(id)
def test_enqueue_at_a_function(self, job_storage, job):
job_id = job_storage.enqueue_at(local_now(), job)
# is the job recorded in the chosen backend?
assert job_storage.get_job(job_id).job_id == job_id
def test_enqueue_at_a_function_sets_time(self, job_storage, job):
now = local_now()
job_id = job_storage.enqueue_at(now, job)
with job_storage.session_scope() as session:
_, scheduled_job = job_storage._get_job_and_orm_job(job_id, session)
scheduled_time = scheduled_job.scheduled_time
assert scheduled_time == naive_utc_datetime(now)
def test_enqueue_at_preserves_extra_metadata(self, job_storage, job):
metadata = {"saved": True}
job.extra_metadata = metadata
job_id = job_storage.enqueue_at(local_now(), job)
# Do we get back the metadata we save?
assert job_storage.get_job(job_id).extra_metadata == metadata
def test_enqueue_in_a_function(self, job_storage, job):
job_id = job_storage.enqueue_in(datetime.timedelta(seconds=1000), job)
# is the job recorded in the chosen backend?
assert job_storage.get_job(job_id).job_id == job_id
def METHOD_NAME(self, job_storage, job):
diff = datetime.timedelta(seconds=1000)
now = local_now()
job_storage._now = lambda: now
job_id = job_storage.enqueue_in(diff, job)
with job_storage.session_scope() as session:
_, scheduled_job = job_storage._get_job_and_orm_job(job_id, session)
scheduled_time = scheduled_job.scheduled_time
assert scheduled_time == naive_utc_datetime(now) + diff
def test_schedule_a_function_sets_time(self, job_storage, job):
now = local_now()
job_id = job_storage.schedule(now, job)
with job_storage.session_scope() as session:
_, scheduled_job = job_storage._get_job_and_orm_job(job_id, session)
scheduled_time = scheduled_job.scheduled_time
assert scheduled_time == naive_utc_datetime(now)
def test_schedule_a_function_gives_value_error_without_datetime(
self, job_storage, job
):
now = "test"
with pytest.raises(ValueError) as error:
job_storage.schedule(now, job)
assert "must be a datetime object" in str(error.value)
def test_schedule_a_function_gives_value_error_repeat_zero_interval(
self, job_storage, job
):
now = local_now()
with pytest.raises(ValueError) as error:
job_storage.schedule(now, job, interval=0, repeat=None)
assert "specify an interval" in str(error.value)
def test_schedule_a_function_gives_value_error_not_timezone_aware_datetime(
self, job_storage, job
):
now = datetime.datetime.utcnow()
with pytest.raises(ValueError) as error:
job_storage.schedule(now, job)
assert "timezone aware datetime object" in str(error.value)
def test_scheduled_repeating_function_updates_old_job(self, job_storage, job):
now = local_now()
old_id = job_storage.schedule(now, job, interval=1000, repeat=None)
job_storage.complete_job(old_id)
new_id = job_storage.get_all_jobs()[0].job_id
assert old_id == new_id
def test_scheduled_repeating_function_sets_endless_repeat_new_job(
self, job_storage, job
):
now = local_now()
job_id = job_storage.schedule(now, job, interval=1000, repeat=None)
job_storage.complete_job(job_id)
with job_storage.session_scope() as session:
_, scheduled_job = job_storage._get_job_and_orm_job(job_id, session)
repeat = scheduled_job.repeat
assert repeat is None
def test_scheduled_repeating_function_enqueues_job(self, job_storage, job):
now = local_now()
job_id = job_storage.schedule(now, job, interval=1000, repeat=None)
job_storage.complete_job(job_id)
assert job_storage.get_job(job_id).job_id == job_id
def test_scheduled_repeating_function_sets_new_job_with_one_fewer_repeats(
self, job_storage, job
):
now = local_now()
job_id = job_storage.schedule(now, job, interval=1000, repeat=1)
job_storage.complete_job(job_id)
with job_storage.session_scope() as session:
_, scheduled_job = job_storage._get_job_and_orm_job(job_id, session)
repeat = scheduled_job.repeat
assert repeat == 0
def test_scheduled_repeating_function_sets_new_job_at_interval(
self, job_storage, job
):
now = local_now()
job_id = job_storage.schedule(now, job, interval=1000, repeat=1)
job_storage._now = lambda: now
job_storage.complete_job(job_id)
with job_storage.session_scope() as session:
_, scheduled_job = job_storage._get_job_and_orm_job(job_id, session)
scheduled_time = scheduled_job.scheduled_time
assert scheduled_time == naive_utc_datetime(now) + datetime.timedelta(
seconds=1000
)
def test_scheduled_repeating_function_failure_sets_new_job_at_retry_interval(
self, job_storage, job
):
now = local_now()
job_id = job_storage.schedule(
now, job, interval=1000, repeat=1, retry_interval=5
)
job_storage._now = lambda: now
job_storage.mark_job_as_failed(job_id, "Exception", "Traceback")
with job_storage.session_scope() as session:
_, scheduled_job = job_storage._get_job_and_orm_job(job_id, session)
scheduled_time = scheduled_job.scheduled_time
assert scheduled_time == naive_utc_datetime(now) + datetime.timedelta(seconds=5)
class TestReschedule(TestScheduler):
@pytest.fixture
def job(self, job_storage):
now = local_now()
job_id = job_storage.schedule(now, Job(id), interval=1, repeat=123)
return job_storage.get_job(job_id)
def test_reschedule_a_function_gives_job_running_error(self, job_storage, job):
now = local_now()
job_storage.mark_job_as_running(job.job_id)
with pytest.raises(JobRunning):
job_storage.schedule(now, job) |
6,571 | initialize boto3 credential providers | import logging
import os
import threading
import time
from typing import Any
import boto3
from botocore import session
from botocore.compat import ensure_unicode
from botocore.credentials import (
CredentialProvider,
Credentials,
ReadOnlyCredentials,
SharedCredentialProvider,
)
from botocore.session import get_session
class RefreshableCredentialProvider(CredentialProvider): # type: ignore
"""
Creates a refreshable credential provider class given an existing credential provider in
the boto3 credential chain.
"""
METHOD = "managed-refresh-cred"
def __init__(self, credential_provider: SharedCredentialProvider, check_every: int = 2) -> None:
super().__init__()
self.check_every = check_every
self.credential_provider = credential_provider
def load(self) -> Credentials:
return self.credential_provider.load() and RefreshableSharedCredentials(
credentials_provider=self.credential_provider, check_every=self.check_every
)
class RefreshableSharedCredentials(Credentials): # type: ignore
def __init__(
self,
check_every: int,
credentials_provider: SharedCredentialProvider,
):
self._credentials_provider = credentials_provider
self._check_every = check_every
self._lock = threading.Lock()
self._check_time = time.time() + check_every
self._load_and_set_credentials()
def _load_and_set_credentials(self) -> None:
credentials = self._credentials_provider.load()
self._last_loaded = self._credentials_modified_time()
self.access_key = credentials.access_key
self.secret_key = credentials.secret_key
self.token = credentials.token
self._frozen_credentials = ReadOnlyCredentials(
credentials.access_key, credentials.secret_key, credentials.token
)
def _credentials_file(self) -> Any:
path = self._credentials_provider._creds_filename
path = os.path.expandvars(path)
path = os.path.expanduser(path)
return path
def _credentials_modified_time(self) -> float:
credentials_file = self._credentials_file()
return os.stat(credentials_file).st_mtime
def _refresh_needed(self) -> bool:
return self._credentials_modified_time() != self._last_loaded
def _refresh(self) -> None:
now = time.time()
# Check before acquiring lock to prevent excessive locking
if now < self._check_time:
return
with self._lock:
# Real time check after acquiring lock
if now < self._check_time:
return
self._check_time = now + self._check_every
if self._refresh_needed():
logging.info("credential file changes detected, refreshing credentials")
self._load_and_set_credentials()
def get_frozen_credentials(self) -> ReadOnlyCredentials:
self._refresh()
with self._lock:
return ReadOnlyCredentials(self._access_key, self._secret_key, self._token)
@property
def access_key(self) -> Any:
self._refresh()
return self._access_key
@access_key.setter
def access_key(self, value: str) -> None:
self._access_key = ensure_unicode(value)
@property
def secret_key(self) -> Any:
self._refresh()
return self._secret_key
@secret_key.setter
def secret_key(self, value: str) -> None:
self._secret_key = ensure_unicode(value)
@property
def token(self) -> str:
self._refresh()
return self._token
@token.setter
def token(self, value: str) -> Any:
self._token = value
def register_credential_provider(session: session, provider_name: str) -> None:
credential_resolver = session.get_component("credential_provider")
credential_provider = credential_resolver.get_provider(provider_name)
managed_credential_provider = RefreshableCredentialProvider(
check_every=2, credential_provider=credential_provider
)
credential_resolver.insert_before(
name=provider_name, credential_provider=managed_credential_provider
)
def METHOD_NAME() -> None:
session = get_session()
register_credential_provider(session, provider_name=SharedCredentialProvider.METHOD)
boto3.setup_default_session(botocore_session=session) |
6,572 | test dashboard | # Copyright 2021 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test Domo Dashboard using the topology
"""
import json
from pathlib import Path
from unittest import TestCase
from unittest.mock import patch
from metadata.generated.schema.api.data.createChart import CreateChartRequest
from metadata.generated.schema.api.data.createDashboard import CreateDashboardRequest
from metadata.generated.schema.entity.services.dashboardService import (
DashboardConnection,
DashboardService,
DashboardServiceType,
)
from metadata.generated.schema.metadataIngestion.workflow import (
OpenMetadataWorkflowConfig,
)
from metadata.generated.schema.type.basic import FullyQualifiedEntityName
from metadata.ingestion.api.models import Either
from metadata.ingestion.ometa.client import REST
from metadata.ingestion.source.dashboard.domodashboard.metadata import (
DomoDashboardDetails,
DomodashboardSource,
)
mock_file_path = (
Path(__file__).parent.parent.parent
/ "resources/datasets/domodashboard_dataset.json"
)
with open(mock_file_path, encoding="UTF-8") as file:
mock_data: dict = json.load(file)
MOCK_DASHBOARD_SERVICE = DashboardService(
id="c3eb265f-5445-4ad3-ba5e-797d3a3071bb",
fullyQualifiedName=FullyQualifiedEntityName(__root__="domodashboard_source_test"),
name="domodashboard_source_test",
connection=DashboardConnection(),
serviceType=DashboardServiceType.DomoDashboard,
)
mock_domopipeline_config = {
"source": {
"type": "domodashboard",
"serviceName": "test2",
"serviceConnection": {
"config": {
"type": "DomoDashboard",
"clientId": "00000",
"secretToken": "abcdefg",
"accessToken": "accessTpokem",
"apiHost": "api.domo.com",
"sandboxDomain": "https://domain.domo.com",
}
},
"sourceConfig": {
"config": {"dashboardFilterPattern": {}, "chartFilterPattern": {}}
},
},
"sink": {"type": "metadata-rest", "config": {}},
"workflowConfig": {
"openMetadataServerConfig": {
"hostPort": "http://localhost:8585/api",
"authProvider": "openmetadata",
"securityConfig": {
"jwtToken": "eyJraWQiOiJHYjM4OWEtOWY3Ni1nZGpzLWE5MmotMDI0MmJrOTQzNTYiLCJ0eXAiOiJKV1QiLCJhbGc"
"iOiJSUzI1NiJ9.eyJzdWIiOiJhZG1pbiIsImlzQm90IjpmYWxzZSwiaXNzIjoib3Blbi1tZXRhZGF0YS5vcmciLCJpYXQiOjE"
"2NjM5Mzg0NjIsImVtYWlsIjoiYWRtaW5Ab3Blbm1ldGFkYXRhLm9yZyJ9.tS8um_5DKu7HgzGBzS1VTA5uUjKWOCU0B_j08WXB"
"iEC0mr0zNREkqVfwFDD-d24HlNEbrqioLsBuFRiwIWKc1m_ZlVQbG7P36RUxhuv2vbSp80FKyNM-Tj93FDzq91jsyNmsQhyNv_fN"
"r3TXfzzSPjHt8Go0FMMP66weoKMgW2PbXlhVKwEuXUHyakLLzewm9UMeQaEiRzhiTMU3UkLXcKbYEJJvfNFcLwSl9W8JCO_l0Yj3u"
"d-qt_nQYEZwqW6u5nfdQllN133iikV4fM5QZsMCnm8Rq1mvLR0y9bmJiD7fwM1tmJ791TUWqmKaTnP49U493VanKpUAfzIiOiIbhg"
},
}
},
}
MOCK_DASHBOARD = DomoDashboardDetails(
id=552315335,
name="New Dashboard",
cardIds=["1982511286", "781210736"],
collection_ids=[],
owners=[],
)
EXPECTED_DASHBOARD = CreateDashboardRequest(
name="552315335",
displayName="New Dashboard",
description=None,
sourceUrl="https://domain.domo.com/page/552315335",
charts=[],
tags=None,
owner=None,
service=FullyQualifiedEntityName(__root__="domodashboard_source_test"),
extension=None,
)
EXPECTED_CHARTS = [
CreateChartRequest(
name="1982511286",
displayName="New Dashboard",
description=(
"TOP SALESPEOPLE\nDisplays the top 10 salespeople by won revenue."
" Identify over-performers and understand the secrets to their success."
),
chartType="Other",
sourceUrl="https://domain.domo.com/page/552315335/kpis/details/1982511286",
tags=None,
owner=None,
service=FullyQualifiedEntityName(__root__="domodashboard_source_test"),
),
CreateChartRequest(
name="781210736",
displayName="New Dashboard",
description=(
"TOP SALESPEOPLE\nDisplays the top 10 salespeople by won revenue."
" Identify over-performers and understand the secrets to their success."
),
chartType="Other",
sourceUrl="https://domain.domo.com/page/552315335/kpis/details/781210736",
tags=None,
owner=None,
service=FullyQualifiedEntityName(__root__="domodashboard_source_test"),
),
]
class DomoDashboardUnitTest(TestCase):
"""
Implements the necessary methods to extract
Domo Dashboard Unit Test
"""
@patch(
"metadata.ingestion.source.dashboard.dashboard_service.DashboardServiceSource.test_connection"
)
@patch("pydomo.Domo")
def __init__(self, methodName, domo_client, test_connection) -> None:
super().__init__(methodName)
test_connection.return_value = False
domo_client.return_value = False
self.config = OpenMetadataWorkflowConfig.parse_obj(mock_domopipeline_config)
self.domodashboard = DomodashboardSource.create(
mock_domopipeline_config["source"],
self.config.workflowConfig.openMetadataServerConfig,
)
self.domodashboard.context.__dict__["dashboard"] = MOCK_DASHBOARD
self.domodashboard.context.__dict__[
"dashboard_service"
] = MOCK_DASHBOARD_SERVICE
def METHOD_NAME(self):
dashboard_list = []
results = self.domodashboard.yield_dashboard(MOCK_DASHBOARD)
for result in results:
if isinstance(result, Either) and result.right:
dashboard_list.append(result.right)
self.assertEqual(EXPECTED_DASHBOARD, dashboard_list[0])
def test_dashboard_name(self):
assert (
self.domodashboard.get_dashboard_name(MOCK_DASHBOARD)
== mock_data[0][0]["title"]
)
def test_chart(self):
"""
Function for testing charts
"""
with patch.object(REST, "_request", return_value=mock_data[0]):
results = self.domodashboard.yield_dashboard_chart(MOCK_DASHBOARD)
chart_list = []
for result in results:
if isinstance(result, Either) and result.right:
chart_list.append(result.right)
for _, (expected, original) in enumerate(zip(EXPECTED_CHARTS, chart_list)):
self.assertEqual(expected, original)
with patch.object(REST, "_request", return_value=mock_data[1]):
result = self.domodashboard.domo_client.get_chart_details(
MOCK_DASHBOARD.cardIds[0]
)
assert (
self.domodashboard.domo_client.get_chart_details(
MOCK_DASHBOARD.cardIds[0]
)
is None
)
with patch.object(REST, "_request", return_value=mock_data[2]):
assert (
self.domodashboard.domo_client.get_chart_details(
MOCK_DASHBOARD.cardIds[0]
)
is None
) |
6,573 | make im bigger if needed | import os
import shutil
import math
from zou.app.utils import fs
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
Image.MAX_IMAGE_PIXELS = 20000 * 20000
RECTANGLE_SIZE = 150, 100
SQUARE_SIZE = 100, 100
PREVIEW_SIZE = 1200, 0
BIG_SQUARE_SIZE = 400, 400
def save_file(tmp_folder, instance_id, file_to_save):
"""
Save file in given folder. The file must only be temporary saved via
this function.
"""
extension = "." + file_to_save.filename.split(".")[-1].lower()
file_name = instance_id + extension.lower()
file_path = os.path.join(tmp_folder, file_name)
file_to_save.save(file_path)
im = Image.open(file_path)
if im.mode == "CMYK":
im = im.convert("RGB")
im.save(file_path, "PNG")
return file_path
def convert_jpg_to_png(file_source_path):
"""
Convert .jpg file located at given path into a .png file with same name.
"""
folder_path = os.path.dirname(file_source_path)
file_source_name = os.path.basename(file_source_path)
file_target_name = "%s.png" % file_source_name[:-4]
file_target_path = os.path.join(folder_path, file_target_name)
im = Image.open(file_source_path)
if im.mode == "CMYK":
im = im.convert("RGB")
im.save(file_target_path, "PNG")
fs.rm_file(file_source_path)
return file_target_path
def get_file_name(instance_id):
"""
Build thumbnail file name for given id.
"""
return "%s.png" % instance_id
def get_full_size_from_width(im, width):
"""
From given width/g
"""
im_width, im_height = im.size
ratio = float(im_height) / float(im_width)
height = int(math.ceil(width * ratio))
return (width, height)
def METHOD_NAME(im, size):
im_width, im_height = im.size
width, height = size
if im_width < width and im_height < height:
im = im.resize(size, Image.Resampling.LANCZOS)
return im
def fit_to_target_size(im, size):
"""
Make sure that the image is contained in the size given in parameter
(shorten width and/or height proporitionnally to expected ratio).
"""
im_width, im_height = im.size
width, height = size
w = width
h = height
original_ratio = float(im_width) / float(im_height)
target_ratio = float(width) / float(height)
if target_ratio != original_ratio:
w = math.ceil(height * original_ratio)
if w > width:
w = width
h = int(math.ceil(float(width) / original_ratio))
im = im.resize((w, h), Image.Resampling.LANCZOS)
return im
def turn_into_thumbnail(file_path, size=None):
"""
Turn given picture into a smaller version.
"""
im = Image.open(file_path)
if size is not None:
(width, height) = size
if height == 0:
size = get_full_size_from_width(im, width)
else:
size = im.size
im = METHOD_NAME(im, size)
im = fit_to_target_size(im, size)
im.thumbnail(size, Image.Resampling.LANCZOS)
if im.mode == "CMYK":
im = im.convert("RGBA")
final = Image.new("RGBA", size, (0, 0, 0, 0))
final.paste(
im, (int((size[0] - im.size[0]) / 2), int((size[1] - im.size[1]) / 2))
)
final.save(file_path, "PNG")
return file_path
def resize(file_path, size):
"""
Resize given picture
"""
im = Image.open(file_path)
im = im.resize(size, Image.Resampling.LANCZOS)
if im.mode == "CMYK":
im = im.convert("RGB")
im.save(file_path, "PNG")
return file_path
def prepare_image_for_thumbnail(im, size):
"""
Crop image to avoid deformation while building the target thumbnail.
"""
im_width, im_height = im.size
width, height = size
original_ratio = float(im_width) / float(im_height)
target_ratio = float(width) / float(height)
if target_ratio > original_ratio:
# image is too tall: take some off the top and bottom
scale_factor = float(target_ratio) / float(original_ratio)
crop_width = im_width
crop_height = math.floor(float(im_height) / scale_factor)
top_cut_line = (im_height - crop_height) / 2
im = im.crop(
flat(0, top_cut_line, crop_width, top_cut_line + crop_height)
)
else:
# image is too wide: take some off the sides
scale_factor = float(original_ratio) / float(target_ratio)
crop_width = math.ceil(float(im_width) / scale_factor)
crop_height = im_height
side_cut_line = int(float(im_width - crop_width) / 2)
im = im.crop(
flat(side_cut_line, 0, side_cut_line + crop_width, crop_height)
)
return im
def generate_preview_variants(original_path, instance_id):
"""
Generate three thumbnails for given picture path.
1. Rectangle thumbnail
2. Square thumbnail
3. Big rectangle thumbnail
"""
file_name = get_file_name(instance_id)
variants = [
("thumbnails", RECTANGLE_SIZE),
("thumbnails-square", SQUARE_SIZE),
("previews", PREVIEW_SIZE),
]
result = []
for picture_data in variants:
(picture_type, size) = picture_data
folder_path = os.path.dirname(original_path)
picture_path = os.path.join(
folder_path, "%s-%s" % (picture_type, file_name)
)
shutil.copyfile(original_path, picture_path)
turn_into_thumbnail(picture_path, size)
result.append((picture_type, picture_path))
return result
def url_path(data_type, instance_id):
"""
Build thumbnail download path for given data type and instance ID.
"""
data_type = data_type.replace("_", "-")
return "pictures/thumbnails/%s/%s.png" % (data_type, instance_id)
def flat(*nums):
"""
Turn into an int tuple an a enumerable of numbers.
"""
return tuple(int(round(n)) for n in nums) |
6,574 | test linkerror | import os
import unittest
import osc.core
import osc.oscerr
from .common import GET, PUT, OscTestCase
FIXTURES_DIR = os.path.join(os.path.dirname(__file__), 'setlinkrev_fixtures')
def suite():
return unittest.defaultTestLoader.loadTestsFromTestCase(TestSetLinkRev)
class TestSetLinkRev(OscTestCase):
def setUp(self):
super().setUp(copytree=False)
def _get_fixtures_dir(self):
return FIXTURES_DIR
@GET('http://localhost/source/osctest/simple/_link', file='simple_link')
@GET('http://localhost/source/srcprj/srcpkg?rev=latest', file='simple_filesremote')
@PUT('http://localhost/source/osctest/simple/_link?comment=Set+link+revision+to+42',
exp='<link package="srcpkg" project="srcprj" rev="42" />', text='dummytext')
def test_simple1(self):
"""a simple set_link_rev call without revision"""
osc.core.set_link_rev('http://localhost', 'osctest', 'simple')
@GET('http://localhost/source/osctest/simple/_link', file='simple_link')
@PUT('http://localhost/source/osctest/simple/_link?comment=Set+link+revision+to+42',
exp='<link package="srcpkg" project="srcprj" rev="42" />', text='dummytext')
def test_simple2(self):
"""a simple set_link_rev call with revision"""
osc.core.set_link_rev('http://localhost', 'osctest', 'simple', '42')
@GET('http://localhost/source/osctest/simple/_link', file='noproject_link')
@GET('http://localhost/source/osctest/srcpkg?rev=latest&expand=1', file='expandedsrc_filesremote')
@PUT('http://localhost/source/osctest/simple/_link?comment=Set+link+revision+to+eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee',
exp='<link package="srcpkg" rev="eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee" vrev="1" />', text='dummytext')
def test_expandedsrc(self):
"""expand src package"""
osc.core.set_link_rev('http://localhost', 'osctest', 'simple', expand=True)
@GET('http://localhost/source/osctest/simple/_link', file='link_with_rev')
@GET('http://localhost/source/srcprj/srcpkg?rev=latest', file='simple_filesremote')
@PUT('http://localhost/source/osctest/simple/_link?comment=Set+link+revision+to+42',
exp='<link package="srcpkg" project="srcprj" rev="42" />', text='dummytext')
def test_existingrev(self):
"""link already has a rev attribute, update it to current version"""
# we could also avoid the superfluous PUT
osc.core.set_link_rev('http://localhost', 'osctest', 'simple')
@GET('http://localhost/source/osctest/simple/_link', file='link_with_rev')
@GET('http://localhost/source/srcprj/srcpkg?rev=latest&expand=1', file='expandedsrc_filesremote')
@PUT('http://localhost/source/osctest/simple/_link?comment=Set+link+revision+to+eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee',
exp='<link package="srcpkg" project="srcprj" rev="eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee" vrev="1" />',
text='dummytext')
def test_expandexistingrev(self):
"""link already has a rev attribute, update it to current version"""
osc.core.set_link_rev('http://localhost', 'osctest', 'simple', expand=True)
@GET('http://localhost/source/osctest/simple/_link', file='simple_link')
@GET('http://localhost/source/srcprj/srcpkg?rev=latest&expand=1', text='conflict in file merge', code=400)
def METHOD_NAME(self):
"""link is broken"""
from urllib.error import HTTPError
# the backend returns status 400 if we try to expand a broken _link
self.assertRaises(HTTPError, osc.core.set_link_rev, 'http://localhost', 'osctest', 'simple', expand=True)
@GET('http://localhost/source/osctest/simple/_link', file='rev_link')
@PUT('http://localhost/source/osctest/simple/_link?comment=Unset+link+revision',
exp='<link package="srcpkg" project="srcprj" />', text='dummytext')
def test_deleterev(self):
"""delete rev attribute from link xml"""
osc.core.set_link_rev('http://localhost', 'osctest', 'simple', revision=None)
@GET('http://localhost/source/osctest/simple/_link', file='md5_rev_link')
@PUT('http://localhost/source/osctest/simple/_link?comment=Unset+link+revision',
exp='<link package="srcpkg" project="srcprj" />', text='dummytext')
def test_deleterev_md5(self):
"""delete rev and vrev attribute from link xml"""
osc.core.set_link_rev('http://localhost', 'osctest', 'simple', revision=None)
@GET('http://localhost/source/osctest/simple/_link', file='simple_link')
@PUT('http://localhost/source/osctest/simple/_link?comment=Unset+link+revision',
exp='<link package="srcpkg" project="srcprj" />', text='dummytext')
def test_deleterevnonexistent(self):
"""delete non existent rev attribute from link xml"""
osc.core.set_link_rev('http://localhost', 'osctest', 'simple', revision=None)
if __name__ == '__main__':
unittest.main() |
6,575 | process | import asab
import datetime
import logging
import os.path
from ..abc.sink import Sink
L = logging.getLogger(__file__)
class FileTableauSink(Sink):
ConfigDefaults = {
'path': '',
'rotate_period': 1,
'table_name': 'Extract',
}
def __init__(self, app, pipeline, id=None, config=None):
super().__init__(app, pipeline, id=id, config=config)
self.DataExtract = None
self.DataSchema = None
self.Table = None
self.ColumnMapping = {}
self.RotatePeriod = int(self.Config['rotate_period'])
self.Timer = asab.Timer(app, self.on_clock_tick, autorestart=True)
self.Timer.start(self.RotatePeriod)
async def on_clock_tick(self):
self.rotate()
def get_file_name(self, context, event):
'''
Override this method to gain control over output file name.
'''
return self.Config['path']
def set_data_schema(self, data_names, data_types):
self.ColumnMapping = {}
for i, name in enumerate(data_names):
self.ColumnMapping[name] = i
field_type = data_types[i]
if field_type == 'boolean':
self.DataSchema.addColumn(name, tableausdk.Types.Type.BOOLEAN)
elif field_type == 'charstring':
self.DataSchema.addColumn(name, tableausdk.Types.Type.CHAR_STRING)
elif field_type == 'date':
self.DataSchema.addColumn(name, tableausdk.Types.Type.DATE)
elif field_type == 'datetime':
self.DataSchema.addColumn(name, tableausdk.Types.Type.DATETIME)
elif field_type == 'double':
self.DataSchema.addColumn(name, tableausdk.Types.Type.DOUBLE)
elif field_type == 'duration':
self.DataSchema.addColumn(name, tableausdk.Types.Type.DURATION)
elif field_type == 'integer':
self.DataSchema.addColumn(name, tableausdk.Types.Type.INTEGER)
elif field_type == 'spatial':
self.DataSchema.addColumn(name, tableausdk.Types.Type.SPATIAL)
elif field_type == 'unicodestring':
self.DataSchema.addColumn(name, tableausdk.Types.Type.UNICODE_STRING)
else:
L.warning("Wrong type {} detected".format(field_type))
def set_row(self, context, event):
row = tableausdk.Extract.Row(self.DataSchema)
for key in event.keys():
field_value = event[key]['value']
field_type = event[key]['type']
if field_value is None:
row.setNull(self.ColumnMapping[key])
elif field_type == 'boolean':
row.setBoolean(self.ColumnMapping[key], field_value)
elif field_type == 'charstring':
row.setCharString(self.ColumnMapping[key], field_value)
elif field_type == 'date':
# field_value must be timestamp
t_t = datetime.datetime.fromtimestamp(field_value).timetuple()
row.setDate(self.ColumnMapping[key], t_t[0], t_t[1], t_t[2])
elif field_type == 'datetime':
# field_value must be timestamp
t_t = datetime.datetime.fromtimestamp(event[key]['value']).timetuple()
frac = int((field_value - int(field_value)) * 10000) # The fraction of a second as one tenth of a millisecond (1/10000)
row.setDateTime(self.ColumnMapping[key], t_t[0], t_t[1], t_t[2], t_t[3], t_t[4], t_t[5], frac)
elif field_type == 'double':
row.setDouble(self.ColumnMapping[key], field_value)
elif field_type == 'duration':
# must be in seconds
frac = (field_value - int(field_value)) / 10
field_value = int(field_value)
days = int(field_value / (24 * 60 * 60))
hours = days * 24
minutes = hours * 60
seconds = minutes * 60
row.setDuration(self.ColumnMapping[key], days, hours, minutes, seconds, frac)
elif field_type == 'integer':
row.setInteger(self.ColumnMapping[key], field_value)
elif field_type == 'spatial':
row.setSpatial(self.ColumnMapping[key], field_value)
elif field_type == 'unicodestring':
row.setString(self.ColumnMapping[key], field_value)
else:
L.warning("Wrong type in event {} detected".format(field_type))
self.Table.insert(row)
def METHOD_NAME(self, context, event):
if self.DataExtract is None:
if not os.path.isfile(self.get_file_name(context, event)):
# create table
self.DataExtract = tableausdk.Extract.Extract(self.get_file_name(context, event))
self.DataSchema = tableausdk.Extract.TableDefinition()
data_types = [event[key]['type'] for key in event.keys()]
self.set_data_schema(event.keys(), data_types)
self.Table = self.DataExtract.addTable(self.Config['table_name'], self.DataSchema)
else:
# get data_extract from table
self.DataExtract = tableausdk.Extract.Extract(self.get_file_name(context, event))
self.Table = self.DataExtract.openTable(self.Config['table_name'])
self.DataSchema = self.Table.getTableDefinition()
column_count = self.DataSchema.getColumnCount()
self.ColumnMapping = {}
for i in range(0, column_count):
self.ColumnMapping[self.DataSchema.getColumnName(i)] = i
self.set_row(context, event)
def rotate(self):
'''
Call this to close the currently open file.
'''
if self.DataExtract is not None:
self.DataExtract.close()
del self.DataExtract
self.DataExtract = None
self.DataSchema = None
self.Table = None
self.ColumnMapping = {} |
6,576 | test eval | # Copyright 2020-2023 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
from opendr.engine.data import Image
from opendr.engine.datasets import ExternalDataset
from opendr.perception.fall_detection import FallDetectorLearner
from opendr.perception.pose_estimation import LightweightOpenPoseLearner
device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu'
def rmfile(path):
try:
os.remove(path)
except OSError as e:
print("Error: %s - %s." % (e.filename, e.strerror))
def rmdir(_dir):
try:
shutil.rmtree(_dir)
except OSError as e:
print("Error: %s - %s." % (e.filename, e.strerror))
class TestFallDetectorLearner(unittest.TestCase):
@classmethod
def setUpClass(cls):
print("\n\n**********************************\nTEST Fall Detector Learner\n"
"**********************************")
cls.temp_dir = os.path.join(".", "tests", "sources", "tools", "perception", "fall_detection",
"fall_detector_temp")
cls.pose_estimator = LightweightOpenPoseLearner(device=device, temp_path=cls.temp_dir,
mobilenet_use_stride=False)
cls.pose_estimator.download(mode="pretrained")
cls.pose_estimator.load(os.path.join(cls.temp_dir, "openpose_default"))
cls.fall_detector = FallDetectorLearner(cls.pose_estimator)
cls.fall_detector.download(path=cls.temp_dir, mode="test_data")
@classmethod
def tearDownClass(cls):
# Clean up downloaded files
rmdir(os.path.join(cls.temp_dir, "openpose_default"))
rmdir(os.path.join(cls.temp_dir, "test_images"))
rmdir(os.path.join(cls.temp_dir))
def METHOD_NAME(self):
eval_dataset = ExternalDataset(path=os.path.join(self.temp_dir, "test_images"), dataset_type="test")
results_dict = self.fall_detector.eval(eval_dataset)
self.assertEqual(results_dict['accuracy'], 1.0,
msg="Accuracy is not 1.0.")
self.assertEqual(results_dict['sensitivity'], 1.0,
msg="Sensitivity is not 1.0.")
self.assertEqual(results_dict['specificity'], 1.0,
msg="Specificity is not 1.0.")
self.assertEqual(results_dict['detection_accuracy'], 1.0,
msg="Detection accuracy is not 1.0.")
self.assertEqual(results_dict['no_detections'], 0,
msg="Number of no detections is not 0.")
def test_infer(self):
img = Image.open(os.path.join(self.temp_dir, "test_images", "fallen.png"))
# Detector should detect fallen person on fallen.png
self.assertTrue(self.fall_detector.infer(img)[0][0].data == 1,
msg="Fall detector didn't detect fallen person on provided image fallen.png")
poses = self.pose_estimator.infer(img)
self.assertTrue(self.fall_detector.infer(poses)[0][0].data == 1,
msg="Fall detector didn't detect fallen person on poses provided for fallen.png")
img = Image.open(os.path.join(self.temp_dir, "test_images", "standing.png"))
# Detector should detect standing person on standing.png
self.assertTrue(self.fall_detector.infer(img)[0][0].data == -1,
msg="Fall detector didn't detect standing person on standing.png")
poses = self.pose_estimator.infer(img)
self.assertTrue(self.fall_detector.infer(poses)[0][0].data == -1,
msg="Fall detector didn't detect standing person on poses provided for standing.png")
img = Image.open(os.path.join(self.temp_dir, "test_images", "no_person.png"))
# Detector should not detect fallen nor standing person on no_person.png
self.assertTrue(len(self.fall_detector.infer(img)) == 0,
msg="Fall detector detected fallen or standing person on no_person.png")
poses = self.pose_estimator.infer(img)
self.assertTrue(len(self.fall_detector.infer(poses)) == 0,
msg="Fall detector detected fallen or standing person on poses provided for no_person.png") |
6,577 | test get forecast scalar | import copy
import unittest
from systems.tests.testdata import get_test_object_futures_with_rules
from systems.basesystem import System
from systems.forecast_scale_cap import ForecastScaleCap
class Test(unittest.TestCase):
def setUp(self):
(rules, rawdata, data, config) = get_test_object_futures_with_rules()
system = System([rawdata, rules, ForecastScaleCap()], data, config)
self.system = system
self.config = config
self.rules = rules
self.rawdata = rawdata
self.forecast_scale_cap = ForecastScaleCap
self.data = data
@unittest.SkipTest
def test_get_raw_forecast(self):
ans = self.system.forecastScaleCap.get_raw_forecast("EDOLLAR", "ewmac8").tail(1)
self.assertAlmostEqual(ans.values[0], 0.164383, places=6)
def test_get_forecast_cap(self):
ans = self.system.forecastScaleCap.get_forecast_cap()
self.assertEqual(ans, 21.0)
# test defaults
config = self.config
del config.forecast_cap
system3 = System(
[self.rawdata, self.rules, self.forecast_scale_cap()], self.data, config
)
ans = system3.forecastScaleCap.get_forecast_cap()
self.assertEqual(ans, 20.0)
@unittest.SkipTest
def METHOD_NAME(self):
# fixed
# From config
self.assertEqual(
self.system.forecastScaleCap.get_forecast_scalar("EDOLLAR", "ewmac8"), 5.3
)
# default
config = copy.copy(self.config)
unused = config.trading_rules["ewmac8"].pop("forecast_scalar")
system2 = System(
[self.rawdata, self.rules, self.forecast_scale_cap()], self.data, config
)
self.assertEqual(
system2.forecastScaleCap.get_forecast_scalar("EDOLLAR", "ewmac8"), 1.0
)
# other config location
setattr(config, "forecast_scalars", dict(ewmac8=11.0))
system3 = System(
[self.rawdata, self.rules, self.forecast_scale_cap()], self.data, config
)
self.assertEqual(
system3.forecastScaleCap.get_forecast_scalar("EDOLLAR", "ewmac8"), 11.0
)
# estimated
config = copy.copy(self.config)
config.use_forecast_scale_estimates = True
system2 = System(
[self.rawdata, self.rules, self.forecast_scale_cap()], self.data, config
)
# From default
self.assertAlmostEqual(
system2.forecastScaleCap.get_forecast_scalar("EDOLLAR", "ewmac8")
.tail(1)
.values[0],
5.8,
places=1,
)
# From config
scale_config = dict(pool_instruments=False)
config.forecast_scalar_estimate = scale_config
system2 = System(
[self.rawdata, self.rules, self.forecast_scale_cap()], self.data, config
)
self.assertAlmostEqual(
system2.forecastScaleCap.get_forecast_scalar("EDOLLAR", "ewmac8")
.tail(1)
.values[0],
5.653444301,
)
@unittest.SkipTest
def test_get_scaled_forecast(self):
self.assertAlmostEqual(
self.system.forecastScaleCap.get_scaled_forecast("EDOLLAR", "ewmac8")
.tail(1)
.values[0],
0.871230635,
)
@unittest.SkipTest
def test_get_capped_forecast(self):
# fixed, normal cap
self.assertAlmostEqual(
self.system.forecastScaleCap.get_capped_forecast("EDOLLAR", "ewmac8")
.tail(1)
.values[0],
0.871230635,
)
# estimated, normal cap
config = copy.copy(self.config)
config.use_forecast_scale_estimates = True
system2 = System(
[self.rawdata, self.rules, self.forecast_scale_cap()], self.data, config
)
self.assertAlmostEqual(
system2.forecastScaleCap.get_forecast_scalar("EDOLLAR", "ewmac8")
.tail(1)
.values[0],
5.8,
places=1,
)
# binding cap
config.use_forecast_scale_estimates = False
config.forecast_cap = 0.2
system3 = System(
[self.rawdata, self.rules, self.forecast_scale_cap()], self.data, config
)
self.assertAlmostEqual(
system3.forecastScaleCap.get_capped_forecast("EDOLLAR", "ewmac8")
.tail(1)
.values[0],
0.2,
)
if __name__ == "__main__":
unittest.main() |
6,578 | make request message | from typing import Dict, List, Tuple
from zulip_bots.bots.game_of_fifteen.game_of_fifteen import GameOfFifteenModel
from zulip_bots.game_handler import BadMoveException
from zulip_bots.test_lib import BotTestCase, DefaultTests
class TestGameOfFifteenBot(BotTestCase, DefaultTests):
bot_name = "game_of_fifteen"
def METHOD_NAME(
self, content: str, user: str = "foo@example.com", user_name: str = "foo"
) -> Dict[str, str]:
message = dict(sender_email=user, content=content, sender_full_name=user_name)
return message
# Function that serves similar purpose to BotTestCase.verify_dialog, but allows for multiple responses to be handled
def verify_response(
self,
request: str,
expected_response: str,
response_number: int,
user: str = "foo@example.com",
) -> None:
"""
This function serves a similar purpose
to BotTestCase.verify_dialog, but allows
for multiple responses to be validated,
and for mocking of the bot's internal data
"""
bot, bot_handler = self._get_handlers()
message = self.METHOD_NAME(request, user)
bot_handler.reset_transcript()
bot.handle_message(message, bot_handler)
responses = [message for (method, message) in bot_handler.transcript]
first_response = responses[response_number]
self.assertEqual(expected_response, first_response["content"])
def help_message(self) -> str:
return """** Game of Fifteen Bot Help:**
*Preface all commands with @**test-bot***
* To start a game in a stream, type
`start game`
* To quit a game at any time, type
`quit`
* To see rules of this game, type
`rules`
* To make your move during a game, type
```move <tile1> <tile2> ...```"""
def test_static_responses(self) -> None:
self.verify_response("help", self.help_message(), 0)
def test_game_message_handler_responses(self) -> None:
board = "\n\n:grey_question::one::two:\n\n:three::four::five:\n\n:six::seven::eight:"
bot, bot_handler = self._get_handlers()
self.assertEqual(bot.gameMessageHandler.parse_board(self.winning_board), board)
self.assertEqual(bot.gameMessageHandler.alert_move_message("foo", "move 1"), "foo moved 1")
self.assertEqual(
bot.gameMessageHandler.game_start_message(),
"Welcome to Game of Fifteen!"
"To make a move, type @-mention `move <tile1> <tile2> ...`",
)
winning_board = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
def test_game_of_fifteen_logic(self) -> None:
def confirmAvailableMoves(
good_moves: List[int], bad_moves: List[int], board: List[List[int]]
) -> None:
gameOfFifteenModel.update_board(board)
for move in good_moves:
self.assertTrue(gameOfFifteenModel.validate_move(move))
for move in bad_moves:
self.assertFalse(gameOfFifteenModel.validate_move(move))
def confirmMove(
tile: str,
token_number: int,
initial_board: List[List[int]],
final_board: List[List[int]],
) -> None:
gameOfFifteenModel.update_board(initial_board)
test_board = gameOfFifteenModel.make_move("move " + tile, token_number)
self.assertEqual(test_board, final_board)
def confirmGameOver(board: List[List[int]], result: str) -> None:
gameOfFifteenModel.update_board(board)
game_over = gameOfFifteenModel.determine_game_over(["first_player"])
self.assertEqual(game_over, result)
def confirm_coordinates(board: List[List[int]], result: Dict[int, Tuple[int, int]]) -> None:
gameOfFifteenModel.update_board(board)
coordinates = gameOfFifteenModel.get_coordinates(board)
self.assertEqual(coordinates, result)
gameOfFifteenModel = GameOfFifteenModel()
# Basic Board setups
initial_board = [[8, 7, 6], [5, 4, 3], [2, 1, 0]]
sample_board = [[7, 6, 8], [3, 0, 1], [2, 4, 5]]
winning_board = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
# Test Move Validation Logic
confirmAvailableMoves([1, 2, 3, 4, 5, 6, 7, 8], [0, 9, -1], initial_board)
# Test Move Logic
confirmMove("1", 0, initial_board, [[8, 7, 6], [5, 4, 3], [2, 0, 1]])
confirmMove("1 2", 0, initial_board, [[8, 7, 6], [5, 4, 3], [0, 2, 1]])
confirmMove("1 2 5", 0, initial_board, [[8, 7, 6], [0, 4, 3], [5, 2, 1]])
confirmMove("1 2 5 4", 0, initial_board, [[8, 7, 6], [4, 0, 3], [5, 2, 1]])
confirmMove("3", 0, sample_board, [[7, 6, 8], [0, 3, 1], [2, 4, 5]])
confirmMove("3 7", 0, sample_board, [[0, 6, 8], [7, 3, 1], [2, 4, 5]])
# Test coordinates logic:
confirm_coordinates(
initial_board,
{
8: (0, 0),
7: (0, 1),
6: (0, 2),
5: (1, 0),
4: (1, 1),
3: (1, 2),
2: (2, 0),
1: (2, 1),
0: (2, 2),
},
)
# Test Game Over Logic:
confirmGameOver(winning_board, "current turn")
confirmGameOver(sample_board, "")
def test_invalid_moves(self) -> None:
model = GameOfFifteenModel()
move1 = "move 2"
move2 = "move 5"
move3 = "move 23"
move4 = "move 0"
move5 = "move 1 2"
initial_board = [[8, 7, 6], [5, 4, 3], [2, 1, 0]]
model.update_board(initial_board)
with self.assertRaises(BadMoveException):
model.make_move(move1, player_number=0)
with self.assertRaises(BadMoveException):
model.make_move(move2, player_number=0)
with self.assertRaises(BadMoveException):
model.make_move(move3, player_number=0)
with self.assertRaises(BadMoveException):
model.make_move(move4, player_number=0)
with self.assertRaises(BadMoveException):
model.make_move(move5, player_number=0) |
6,579 | reverse map | # sage.doctest: needs sage.rings.number_field
"""
QQbar decorators
Python decorators for use with the algebraic field QQbar.
AUTHORS:
- Brent Baccala (7 Jun 2018) -- handle_AA_and_QQbar
Decorators
==========
"""
from sage.misc.decorators import decorator_keywords, sage_wraps
@decorator_keywords
def handle_AA_and_QQbar(func):
r"""
Decorator to call a function that only accepts arguments in number fields.
The argument list is scanned for ideals and/or polynomials over algebraic
fields (``QQbar`` or ``AA``). If any exist, they are converted to a common
number field before calling the function, and the results are converted back.
Lists, dictionaries (values only), sets, and tuples are converted recursively.
This decorator can not used with methods that depend on factoring, since
factorization might require larger number fields than those required to
express the polynomials. No means is provided to check whether factoring
is being attempted by a wrapped method, and if a method invoked a library
or subprocess (like Singular), it's hard to imagine how such a check could
be performed.
See https://mathoverflow.net/questions/304525 for a discussion of why a
simple attempt to overcome this limitation didn't work.
"""
@sage_wraps(func)
def wrapper(*args, **kwds):
"""
TESTS::
sage: from sage.rings.qqbar_decorators import handle_AA_and_QQbar
sage: @handle_AA_and_QQbar
....: def return_base_ring(x):
....: return x.base_ring()
sage: P.<x> = QQbar[]
sage: return_base_ring(x)
Rational Field
sage: P.<y,z> = QQbar[]
sage: return_base_ring(y)
Rational Field
sage: return_base_ring(ideal(y,z))
Rational Field
Check that :trac:`29468` is fixed::
sage: J = QQbar['x,y'].ideal('x^2 - y')
sage: type(J.groebner_basis())
<class 'sage.rings.polynomial.multi_polynomial_sequence.PolynomialSequence_generic'>
sage: J.groebner_basis().is_immutable()
True
::
sage: @handle_AA_and_QQbar
....: def f(x):
....: print(x.ring().base_ring())
....: return x
sage: R.<x,y> = QQbar[]
sage: s = Sequence([x, R(sqrt(2)) * y], immutable=True)
sage: t = f(s)
Number Field in a with defining polynomial y^2 - 2
sage: t.ring().base_ring()
Algebraic Field
sage: t.is_immutable()
True
sage: s == t
True
"""
from sage.misc.flatten import flatten
from sage.rings.polynomial.polynomial_element import Polynomial
from sage.rings.polynomial.multi_polynomial import MPolynomial
from sage.rings.polynomial.multi_polynomial_sequence import PolynomialSequence, is_PolynomialSequence
from sage.rings.ideal import Ideal, Ideal_generic
from sage.rings.abc import AlgebraicField_common
if not any(isinstance(a, (Polynomial, MPolynomial, Ideal_generic))
and isinstance(a.base_ring(), AlgebraicField_common)
or is_PolynomialSequence(a)
and isinstance(a.ring().base_ring(), AlgebraicField_common) for a in args):
return func(*args, **kwds)
polynomials = []
for a in flatten(args, ltypes=(list, tuple, set)):
if isinstance(a, Ideal_generic):
polynomials.extend(a.gens())
elif isinstance(a, Polynomial):
polynomials.append(a)
elif isinstance(a, MPolynomial):
polynomials.append(a)
orig_elems = flatten([p.coefficients() for p in polynomials])
# We need minimal=True if these elements are over AA, because
# same_field=True might trigger an exception otherwise.
from sage.rings.qqbar import number_field_elements_from_algebraics
numfield, new_elems, morphism = number_field_elements_from_algebraics(orig_elems, same_field=True, minimal=True)
elem_dict = dict(zip(orig_elems, new_elems))
def forward_map(item):
if isinstance(item, Ideal_generic):
return Ideal([forward_map(g) for g in item.gens()])
elif isinstance(item, Polynomial):
return item.map_coefficients(elem_dict.__getitem__, new_base_ring=numfield)
elif isinstance(item, MPolynomial):
return item.map_coefficients(elem_dict.__getitem__, new_base_ring=numfield)
elif is_PolynomialSequence(item):
return PolynomialSequence(map(forward_map, item),
immutable=item.is_immutable())
elif isinstance(item, list):
return list(map(forward_map, item))
elif isinstance(item, dict):
return {k: forward_map(v) for k,v in item.items()}
elif isinstance(item, tuple):
return tuple(map(forward_map, item))
elif isinstance(item, set):
return set(map(forward_map, list(item)))
else:
return item
def METHOD_NAME(item):
if isinstance(item, Ideal_generic):
return Ideal([METHOD_NAME(g) for g in item.gens()])
elif isinstance(item, Polynomial):
return item.map_coefficients(morphism)
elif isinstance(item, MPolynomial):
return item.map_coefficients(morphism)
elif is_PolynomialSequence(item):
return PolynomialSequence(map(METHOD_NAME, item),
immutable=item.is_immutable())
elif isinstance(item, list):
return list(map(METHOD_NAME, item))
elif isinstance(item, tuple):
return tuple(map(METHOD_NAME, item))
elif isinstance(item, set):
return set(map(METHOD_NAME, list(item)))
else:
return item
args = forward_map(args)
kwds = forward_map(kwds)
return METHOD_NAME(func(*args, **kwds))
return wrapper |
6,580 | find scale by percentile | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Find scales for quantization on the dataset."""
from __future__ import absolute_import
import logging
import multiprocessing as mp
import numpy as np
import tvm
import tvm.driver
from tvm.ir import IRModule
from . import _quantize
from . import quantize
from .. import op as _op
from .. import expr as _expr
from .. import analysis as _analysis
from .. import build_module as _build_module
from ...contrib import graph_executor
from .kl_divergence import _find_scale_by_kl
def _get_profile_runtime(mod):
func = mod["main"]
func = _quantize.CreateStatsCollector(func)
if tvm.target.Target.current():
target = tvm.target.Target.current()
dev = tvm.device(target.kind.name)
else:
target = "llvm"
dev = tvm.device(target)
with tvm.transform.PassContext(opt_level=3):
lib = _build_module.build(func, target=target)
runtime = graph_executor.GraphModule(lib["default"](dev))
return runtime
def collect_stats(mod, dataset, chunk_by=-1):
"""Given an annotated graph, create a profile graph to collect profile data from the
calibration dataset. This pass collects simulated_quantize op input into a tuple.
Simulated_quantize ops are rewritten to identity mode. The tuple is the output of the profile
graph.
Parameters
----------
mod: Module
The simulation graph after annotation.
dataset: Iterable[NDArray]
The calibration dataset.
chunk_by: optional, int
The size of chunk to be returned in one iteration. It is meant to be
used for reducing memory usage. If not specified, return samples for
all layers in one chunk.
Returns
-------
ret: Iterable[list of ndarray]
List of output data of each layer, chunked by the chunk_by parameter
"""
logging.info("collecting statistics for calibration...")
runtime = _get_profile_runtime(mod)
num_outputs = runtime.get_num_outputs()
chunk_by = num_outputs if chunk_by == -1 else chunk_by
for i in range(0, num_outputs, chunk_by):
outputs = [[] for i in range(min(chunk_by, num_outputs - i))]
for batch in dataset:
runtime.set_input(**batch)
runtime.run()
for j in range(i, min(i + chunk_by, num_outputs)):
outputs[j - i].append(runtime.get_output(j).numpy())
yield [np.concatenate(output).reshape(-1) for output in outputs]
def _kl_scale(mod, dataset):
cfg = quantize.current_qconfig()
chunk_by = cfg.calibrate_chunk_by
scales = []
for samples in collect_stats(mod, dataset, chunk_by):
logging.info("finding threshold with kl for calibration...")
with mp.Pool() as pool:
scales += list(pool.map(_find_scale_by_kl, samples))
def func(_):
scale = scales[func.scale_idx]
func.scale_idx += 1
return scale
func.scale_idx = 0
return func
def METHOD_NAME(arr, percentile=0.99999):
assert isinstance(arr, np.ndarray)
x = np.abs(arr)
max_k = int(x.size * percentile)
return np.partition(x, max_k)[max_k]
def _percentile_scale(mod, dataset):
cfg = quantize.current_qconfig()
chunk_by = cfg.calibrate_chunk_by
scales = []
for samples in collect_stats(mod, dataset, chunk_by):
logging.info("finding threshold with percentile for calibration...")
with mp.Pool() as pool:
scales += list(pool.map(METHOD_NAME, samples))
def func(_):
scale = scales[func.scale_idx]
func.scale_idx += 1
return scale
func.scale_idx = 0
return func
def _set_params(mod, input_scale_func, weight_scale_func):
quantize_op = _op.get("relay.op.annotation.simulated_quantize")
cfg = quantize.current_qconfig()
const_params = {}
def visit_func(expr):
"""visitor function for traverse"""
if isinstance(expr, _expr.Call) and expr.op == quantize_op:
_, ndom_scale, nclip_min, nclip_max = expr.args
attrs = expr.attrs
kind = attrs.kind
nbit = cfg.get_nbit_by_kind(kind)
valid_bit = nbit - attrs.sign
# set scale
if kind == quantize.QAnnotateKind.WEIGHT:
assert isinstance(expr.args[0], _expr.Constant)
scale = weight_scale_func(expr)
else:
scale = input_scale_func(expr)
def _make_const(val):
return _expr.const(val, "float32")
valid_range = 2**valid_bit
const_params[ndom_scale] = _make_const(scale / valid_range)
const_params[nclip_min] = _make_const(-(valid_range - 1))
const_params[nclip_max] = _make_const((valid_range - 1))
main_func = mod["main"]
_analysis.post_order_visit(main_func, visit_func)
main_func = _expr.bind(main_func, const_params)
func_dict = {}
for global_var, func in mod.functions.items():
if global_var.name_hint != "main":
func_dict[global_var] = func
return IRModule.from_expr(main_func, func_dict)
# weight scale functions
def _power2_scale(sq_call): # pylint: disable=unused-argument
"""calculate weight scale with nearest mode-2 scale"""
var = sq_call.args[0]
assert isinstance(var, _expr.Constant)
val = np.amax(np.abs(var.data.numpy()))
return 2 ** np.math.ceil(np.math.log(val, 2)) if val > 0 else 1.0
def _max_scale(sq_call):
"""calculate weight scale with maximum absolute value"""
var = sq_call.args[0]
assert isinstance(var, _expr.Constant)
val = np.amax(np.abs(var.data.numpy()))
return val
# input scale functions
def _global_scale(sq_call): # pylint: disable=unused-argument
cfg = quantize.current_qconfig()
return cfg.global_scale
def calibrate(dataset=None):
"""The calibrate procedure will try to calculate the content of
dom_scale, nbit, clip_min, clip_max for every `simulated_quantize`
operator.
Parameters
---------
dataset: Optional[Iterable[NDArray]]
The calibration dataset.
Returns
-------
ret: Function
The module pass function.
"""
def wrapped_func(mod, _):
"""make transform.module pass happy"""
cfg = quantize.current_qconfig()
if cfg.calibrate_mode == "kl_divergence":
input_scale_func = _kl_scale(mod, dataset)
elif cfg.calibrate_mode == "global_scale":
input_scale_func = _global_scale
elif cfg.calibrate_mode == "percentile":
input_scale_func = _percentile_scale(mod, dataset)
else:
raise ValueError(f"Unknown calibrate mode {cfg.calibrate_mode}")
if cfg.weight_scale == "max":
weight_scale_func = _max_scale
elif cfg.weight_scale == "power2":
weight_scale_func = _power2_scale
else:
raise ValueError(f"Unknown weight scale mode {cfg.weight_scale}")
return _set_params(mod, input_scale_func, weight_scale_func)
return wrapped_func |
6,581 | replace installed | import os
import pathlib
import sys
import libmambapy
from .utils import get_index, load_channels
__all__ = ["MambaSolver", "create", "install"]
class MambaSolver:
def __init__(self, channels, platform, context, output_folder=None):
self.channels = channels
self.platform = platform
self.context = context
self.output_folder = output_folder or "local"
self.pool = libmambapy.Pool()
self.repos = []
self.index = load_channels(
self.pool, self.channels, self.repos, platform=platform
)
self.local_index = []
self.local_repos = {}
# load local repo, too
self.replace_channels()
def METHOD_NAME(self, prefix):
prefix_data = libmambapy.PrefixData(prefix)
vp = libmambapy.get_virtual_packages()
prefix_data.add_packages(vp)
repo = libmambapy.Repo(self.pool, prefix_data)
repo.set_installed()
def replace_channels(self):
self.local_index = get_index(
(self.output_folder,), platform=self.platform, prepend=False
)
for _, v in self.local_repos.items():
v.clear(True)
start_prio = len(self.channels) + len(self.index)
for subdir, channel in self.local_index:
if not subdir.loaded():
continue
# support new mamba
if isinstance(channel, dict):
channelstr = channel["url"]
channelurl = channel["url"]
else:
channelstr = str(channel)
channelurl = channel.url(with_credentials=True)
cp = subdir.cache_path()
if cp.endswith(".solv"):
os.remove(subdir.cache_path())
cp = cp.replace(".solv", ".json")
self.local_repos[channelstr] = libmambapy.Repo(
self.pool, channelstr, cp, channelurl
)
self.local_repos[channelstr].set_priority(start_prio, 0)
start_prio -= 1
def solve(self, specs, pkg_cache_path=None):
"""Solve given a set of specs.
Parameters
----------
specs : list of str
A list of package specs. You can use `conda.models.match_spec.MatchSpec`
to get them to the right form by calling
`MatchSpec(mypec).conda_build_form()`
Returns
-------
transaction : libmambapy.Transaction
The mamba transaction.
Raises
------
RuntimeError :
If the solver did not find a solution.
"""
solver_options = [(libmambapy.SOLVER_FLAG_ALLOW_DOWNGRADE, 1)]
api_solver = libmambapy.Solver(self.pool, solver_options)
_specs = specs
api_solver.add_jobs(_specs, libmambapy.SOLVER_INSTALL)
success = api_solver.try_solve()
if not success:
error_string = "Mamba failed to solve:\n"
for s in _specs:
error_string += f" - {s}\n"
error_string += "\nwith channels:\n"
for c in self.channels:
error_string += f" - {c}\n"
error_string += api_solver.explain_problems()
print(error_string)
raise RuntimeError("Solver could not find solution." + error_string)
if pkg_cache_path is None:
# use values from conda
pkg_cache_path = self.context.pkgs_dirs
package_cache = libmambapy.MultiPackageCache(pkg_cache_path)
return libmambapy.Transaction(api_solver, package_cache)
def install(
env_name: str,
specs: tuple = (),
channels: tuple = (),
target_platform: str = None,
base_prefix: str = None,
):
"""Install packages in a given environment.
Arguments
---------
env_name : str
The name of the environment where to install the packages.
specs : tuple of str
The list of spec strings e.g. ['xeus-python', 'matplotlib=3'].
channels : tuple of str
The channels from which to pull packages e.g. ['default', 'conda-forge'].
base_prefix : str
The base prefix where to create the environment, defaults to the current base prefix.
Raises
------
RuntimeError :
If the solver did not find a solution or if the installation failed.
"""
if base_prefix is None:
base_prefix = os.environ.get("MAMBA_ROOT_PREFIX", sys.prefix)
base_prefix = pathlib.Path(base_prefix)
prefix = base_prefix / "envs" / env_name
(prefix / "conda-meta").mkdir(parents=True, exist_ok=True)
(base_prefix / "pkgs").mkdir(parents=True, exist_ok=True)
context = libmambapy.Context()
context.prefix_params.target_prefix = str(prefix)
context.pkgs_dirs = [str(base_prefix / "pkgs")]
solver = MambaSolver(channels, target_platform, context)
transaction = solver.solve(specs)
return transaction.execute(libmambapy.PrefixData(str(prefix)))
def create(
env_name: str,
specs: tuple = (),
channels: tuple = (),
target_platform: str = None,
base_prefix: str = None,
):
"""Create a mamba environment.
Arguments
---------
env_name : str
The name of the environment.
specs : tuple of str
The list of spec strings e.g. ['xeus-python', 'matplotlib=3'].
channels : tuple of str
The channels from which to pull packages e.g. ['default', 'conda-forge'].
target_platform : str
The target platform for the environment.
base_prefix : str
The base prefix where to create the environment, defaults to the current base prefix.
Raises
------
RuntimeError :
If the solver did not find a solution or if the installation failed.
"""
return install(env_name, specs, channels, target_platform, base_prefix) |
6,582 | make bold | import abc
import colorlog
import contextlib
import inspect
import io
import logging
import sys
import traceback
from magma.backend.util import make_relative
from magma.common import Stack
from magma.config import config, EnvConfig
config._register(
log_stream=EnvConfig("MAGMA_LOG_STREAM", "stderr"),
log_level=EnvConfig("MAGMA_LOG_LEVEL", "INFO"),
include_traceback=EnvConfig("MAGMA_INCLUDE_WIRE_TRACEBACK", False, bool),
traceback_limit=EnvConfig("MAGMA_ERROR_TRACEBACK_LIMIT", 5, int),
)
_staged_logs_stack = Stack()
_log_capturer_stack = Stack()
def METHOD_NAME(string):
return f"\033[1m{string}\033[0m"
def _get_source_line(filename, lineno):
with open(filename, "r") as f:
return f.readlines()[lineno - 1]
def _attach_debug_info(msg, debug_info):
file = debug_info.filename
line = debug_info.lineno
line_info = METHOD_NAME(f"{make_relative(file)}:{line}")
msg = f"{line_info}: {msg}"
try:
source = _get_source_line(file, line).rstrip("\n")
source = f">> {source}"
except FileNotFoundError:
source = f"(Could not file file {file})"
msg = f"{msg}\n{source}"
return msg
def _attach_traceback(msg, frame_selector, limit):
"""
Attaches traceback string to @msg and returns new string.
@frame_selector is a function which takes a list of stack frames and selects
one. For example, it could select the frame based on an index, or based on
the function names.
"""
frame = frame_selector(inspect.stack()).frame
with io.StringIO() as io_:
traceback.print_stack(f=frame, limit=limit, file=io_)
tb = io_.getvalue()
msg = f"{msg}\n{tb}"
return msg
def _frame_selector(frames):
return frames[3]
def _get_additional_kwarg(kwargs, key):
try:
value = kwargs.pop(key)
return value
except KeyError:
return None
def get_staged_logs_stack() -> Stack:
global _staged_logs_stack
return _staged_logs_stack
class _MagmaLogger(logging.Logger):
"""
Derivative of logging.Logger class, with two additional keyword args:
* 'debug_info': Tuple of (file_name, line_no). If 'debug_info' is included,
this source-level information is logged along with the message.
* 'include_traceback': If True, a traceback is printed along with the
message.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._raw = False
@property
def raw(self) -> bool:
return self._raw
@raw.setter
def raw(self, raw: bool):
self._raw = raw
@contextlib.contextmanager
def as_raw(self):
prev_raw = self.raw
self.raw = True
try:
yield self
finally:
self.raw = prev_raw
def _log(self, level, msg, args, **kwargs):
if not self.raw and self._staged_log(level, msg, args, **kwargs):
return
self._raw_log(level, msg, args, **kwargs)
def _staged_log(self, level, msg, args, **kwargs) -> bool:
staged_logs_stack = get_staged_logs_stack()
try:
staged_logs = staged_logs_stack.peek()
except IndexError:
return False
staged_logs.append((self, level, msg, args, kwargs))
return True
def _capture_log(self, level, msg, args, **kwargs):
try:
log_capturer = get_log_capturer()
except IndexError:
return
log_capturer.add_log((level, msg, args, kwargs))
def _raw_log(self, level, msg, args, **kwargs):
debug_info = _get_additional_kwarg(kwargs, "debug_info")
if debug_info:
msg = _attach_debug_info(msg, debug_info)
include_traceback = _get_additional_kwarg(kwargs, "include_traceback")
if include_traceback or config.include_traceback:
msg = _attach_traceback(
msg, _frame_selector, config.traceback_limit)
self._capture_log(level, msg, args, **kwargs)
super()._log(level, msg, args, **kwargs)
# Set logging class to _MagmaLogger to override logging behavior. Also, setup
# root logger parameters.
logging.setLoggerClass(_MagmaLogger)
_log_stream = getattr(sys, config.log_stream)
_root_logger = logging.getLogger("magma")
_handler = colorlog.StreamHandler(_log_stream)
_handler.setFormatter(colorlog.ColoredFormatter(
'%(log_color)s%(levelname)s%(reset)s:%(name)s:%(message)s'))
_root_logger.addHandler(_handler)
_root_logger.setLevel(config.log_level)
def root_logger():
return logging.getLogger("magma")
# NOTE(rsetaluri): For some reason the following code which uses
# contextlib.contextmanager results in the context manager being entered into
# twice. It may be cached somewhere in the pipeline.
#
# @contextlib.contextmanager
# def logging_level(level):
# root = root_logger()
# prev_level = root.level
# root.setLevel(level)
# try:
# yield
# finally:
# root.setLevel(prev_level)
class logging_level:
def __init__(self, level):
self.level = level
self.root = root_logger()
def __enter__(self):
self.prev_level = self.root.level
self.root.setLevel(self.level)
def __exit__(self, *_):
self.root.setLevel(self.prev_level)
def stage_logger():
get_staged_logs_stack().push([])
def _flush(staged_logs):
for logger, level, obj, args, kwargs in staged_logs:
with logger.as_raw():
logger.log(level, obj, *args, **kwargs)
def flush():
staged_logs = get_staged_logs_stack().pop()
_flush(staged_logs)
return staged_logs
def unstage_logger():
return flush()
def flush_all():
staged_logs_stack = get_staged_logs_stack()
while staged_logs_stack:
staged_logs = staged_logs_stack.pop()
_flush(staged_logs)
@contextlib.contextmanager
def staged_logs():
stage_logger()
staged_logs = get_staged_logs_stack().peek()
try:
yield staged_logs
finally:
unstage_logger()
class StagedLogRecord(abc.ABC):
def __init__(self, tpl: str):
self._tpl = tpl
@abc.abstractmethod
def args(self):
raise NotImplementedError()
def __str__(self):
return self._tpl.format(*self.args())
def _get_log_capturer_stack() -> Stack:
global _log_capturer_stack
return _log_capturer_stack
def push_log_capturer(log_capturer):
_get_log_capturer_stack().push(log_capturer)
def pop_log_capturer():
_get_log_capturer_stack().pop()
def get_log_capturer():
return _get_log_capturer_stack().peek()
@contextlib.contextmanager
def capture_logs(log_capturer):
push_log_capturer(log_capturer)
try:
yield
finally:
pop_log_capturer() |
6,583 | related adjudicator set | from django.db import models
from django.utils.translation import gettext_lazy as _
class DebateAdjudicatorManager(models.Manager):
use_for_related_fields = True
def get_queryset(self):
return super().get_queryset().select_related('debate')
class DebateAdjudicator(models.Model):
TYPE_CHAIR = 'C'
TYPE_PANEL = 'P'
TYPE_TRAINEE = 'T'
TYPE_CHOICES = (
(TYPE_CHAIR, _("chair")),
(TYPE_PANEL, _("panellist")),
(TYPE_TRAINEE, _("trainee")),
)
objects = DebateAdjudicatorManager()
debate = models.ForeignKey('draw.Debate', models.CASCADE,
verbose_name=_("debate"))
adjudicator = models.ForeignKey('participants.Adjudicator', models.CASCADE,
verbose_name=_("adjudicator"))
type = models.CharField(max_length=2, choices=TYPE_CHOICES,
verbose_name=_("type"))
timing_confirmed = models.BooleanField(null=True, verbose_name=_("available?"))
class Meta:
verbose_name = _("debate adjudicator")
verbose_name_plural = _("debate adjudicators")
unique_together = ('debate', 'adjudicator')
def __str__(self):
return '{} in {} ({})'.format(self.adjudicator, self.debate, self.get_type_display())
# ==============================================================================
# Conflicts
# ==============================================================================
class AdjudicatorTeamConflict(models.Model):
adjudicator = models.ForeignKey('participants.Adjudicator', models.CASCADE,
verbose_name=_("adjudicator"))
team = models.ForeignKey('participants.Team', models.CASCADE,
verbose_name=_("team"))
class Meta:
verbose_name = _("adjudicator-team conflict")
verbose_name_plural = _("adjudicator-team conflicts")
unique_together = ('adjudicator', 'team')
def __str__(self):
return '{} with {}'.format(self.adjudicator, self.team)
class AdjudicatorAdjudicatorConflict(models.Model):
adjudicator1 = models.ForeignKey('participants.Adjudicator', models.CASCADE,
related_name="adjudicatoradjudicatorconflict_source_set",
verbose_name=_("adjudicator 1"))
adjudicator2 = models.ForeignKey('participants.Adjudicator', models.CASCADE,
related_name="adjudicatoradjudicatorconflict_target_set",
verbose_name=_("adjudicator 2"))
class Meta:
verbose_name = _("adjudicator-adjudicator conflict")
verbose_name_plural = _("adjudicator-adjudicator conflicts")
unique_together = ('adjudicator1', 'adjudicator2')
def __str__(self):
return '{} with {}'.format(self.adjudicator1, self.adjudicator2)
class AdjudicatorInstitutionConflict(models.Model):
adjudicator = models.ForeignKey('participants.Adjudicator', models.CASCADE,
verbose_name=_("adjudicator"))
institution = models.ForeignKey('participants.Institution', models.CASCADE,
verbose_name=_("institution"))
class Meta:
verbose_name = _("adjudicator-institution conflict")
verbose_name_plural = _("adjudicator-institution conflicts")
unique_together = ('adjudicator', 'institution')
def __str__(self):
return '{} with {}'.format(self.adjudicator, self.institution)
class TeamInstitutionConflict(models.Model):
team = models.ForeignKey('participants.Team', models.CASCADE,
verbose_name=_("team"))
institution = models.ForeignKey('participants.Institution', models.CASCADE,
verbose_name=_("institution"))
class Meta:
verbose_name = _("team-institution conflict")
verbose_name_plural = _("team-institution conflicts")
unique_together = ('team', 'institution')
def __str__(self):
return '{} with {}'.format(self.team, self.institution)
# ==============================================================================
# Preformed panels
# ==============================================================================
class PreformedPanel(models.Model):
round = models.ForeignKey('tournaments.Round', models.CASCADE,
verbose_name=_("round"))
importance = models.FloatField(default=0.0, choices=[(float(i), i) for i in range(-2, 3)],
verbose_name=_("importance"))
bracket_min = models.FloatField(default=0,
verbose_name=_("minimum bracket"),
help_text=_("Estimate of the lowest bracket for which this panel might be"))
bracket_max = models.FloatField(default=0,
verbose_name=_("maximum bracket"),
help_text=_("Estimate of the highest bracket for which this panel might be"))
room_rank = models.IntegerField(default=0,
verbose_name=_("room rank"),
help_text=_("Sequential number of panel, not used in any algorithms"))
liveness = models.IntegerField(default=0,
verbose_name=_("liveness"),
help_text=_("Number of categories this room is expected to be live for"))
class Meta:
verbose_name = _("preformed panel")
verbose_name_plural = _("preformed panels")
def __str__(self):
return "[{x.id}] {x.round.name} impt={x.importance}".format(x=self)
@property
def METHOD_NAME(self):
"""Used by objects that work with both Debate and PreformedPanel."""
return self.preformedpaneladjudicator_set
@property
def adjudicators(self):
"""Returns an AdjudicatorAllocation containing the adjudicators on this
panel."""
try:
return self._adjudicators
except AttributeError:
from adjallocation.allocation import AdjudicatorAllocation
self._adjudicators = AdjudicatorAllocation(self, from_db=True)
return self._adjudicators
# Properties to make this look like Debate for the adjudicator allocators
@property
def bracket(self):
return self.bracket_max
@property
def teams(self):
return []
class PreformedPanelAdjudicator(models.Model):
panel = models.ForeignKey(PreformedPanel, models.CASCADE,
verbose_name=_("panel"))
adjudicator = models.ForeignKey('participants.Adjudicator', models.CASCADE,
verbose_name=_("adjudicator"))
type = models.CharField(max_length=2, choices=DebateAdjudicator.TYPE_CHOICES,
verbose_name=_("type"))
class Meta:
verbose_name = _("preformed panel adjudicator")
verbose_name_plural = _("preformed panel adjudicators")
def __str__(self):
return "[{x.id}] {x.adjudicator.name} in panel {x.panel_id}".format(x=self) |
6,584 | save packages | # -*- coding: utf-8 -*-
#
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
# this file implements the ServerWrapper class, which takes care
# of all the load and save functions for misc tables associated
# with a server (such as packages, hardware, history)
#
# the server.Server class inherits this ServerWrapper class
#
from .server_hardware import Hardware
from .server_packages import Packages
from .server_history import History
from .server_suse import SuseData
from rhn.UserDictCase import UserDictCase
from spacewalk.server import rhnSQL
class ServerWrapper(Packages, Hardware, History, SuseData):
""" This is a middle class that ties all the subclasses together, plus it
provides a cleaner way to keep all the wrapper functions in one place.
The main Server class is based on this one and it looks a little bit
cleaner that way.
"""
def __init__(self):
self.server = UserDictCase()
Packages.__init__(self)
History.__init__(self)
Hardware.__init__(self)
SuseData.__init__(self)
def __repr__(self):
return "<%s instance>" % (self.__class__,)
def set_value(self, name, value):
""" update a value in self.server """
if name is None or value is None:
return -1
self.server[name] = value
return 0
###
# PACKAGES
###
def add_package(self, entry):
""" Wrappers for the similar functions from Packages class that supplementaly
require a valid sysid.
"""
return Packages.add_package(self, self.server.get("id"), entry)
def delete_package(self, entry):
return Packages.delete_package(self, self.server.get("id"), entry)
def dispose_packages(self):
return Packages.dispose_packages(self, self.server["id"])
def METHOD_NAME(self, schedule=1):
""" wrapper for the Packages.save_packages_byid() which requires the sysid """
ret = self.save_packages_byid(self.server["id"], schedule=schedule)
# this function is primarily called from outside
# so we have to commit here
rhnSQL.commit()
return ret
###
# HARDWARE
###
def delete_hardware(self):
""" Wrappers for the similar functions from Hardware class """
return Hardware.delete_hardware(self, self.server.get("id"))
def save_hardware(self):
""" wrapper for the Hardware.save_hardware_byid() which requires the sysid """
ret = self.save_hardware_byid(self.server["id"])
# this function is primarily called from outside
# so we have to commit here
rhnSQL.commit()
return ret
def reload_hardware(self):
""" wrapper for the Hardware.reload_hardware_byid() which requires the sysid """
ret = self.reload_hardware_byid(self.server["id"])
return ret
###
# HISTORY
###
def save_history(self):
ret = self.save_history_byid(self.server["id"])
# this function is primarily called from outside
# so we have to commit here
rhnSQL.commit()
return ret
###
### SUSE PRODUCT DATA
###
def save_suse_products(self):
ret = self.save_suse_products_byid(self.server["id"])
rhnSQL.commit()
return ret
def update_suse_products(self, products):
self.add_suse_products(products)
return self.save_suse_products() |
6,585 | create controller inventory | """Inventory creation from host profiles."""
from __future__ import annotations
import shutil
import typing as t
from .config import (
EnvironmentConfig,
)
from .util import (
sanitize_host_name,
exclude_none_values,
)
from .host_profiles import (
ControllerHostProfile,
ControllerProfile,
HostProfile,
Inventory,
NetworkInventoryProfile,
NetworkRemoteProfile,
SshTargetHostProfile,
WindowsInventoryProfile,
WindowsRemoteProfile,
)
from .ssh import (
ssh_options_to_str,
)
def METHOD_NAME(args: EnvironmentConfig, path: str, controller_host: ControllerHostProfile) -> None:
"""Create and return inventory for use in controller-only integration tests."""
inventory = Inventory(
host_groups=dict(
testgroup=dict(
testhost=dict(
ansible_connection='local',
ansible_pipelining='yes',
ansible_python_interpreter=controller_host.python.path,
),
),
),
)
inventory.write(args, path)
def create_windows_inventory(args: EnvironmentConfig, path: str, target_hosts: list[HostProfile]) -> None:
"""Create and return inventory for use in target Windows integration tests."""
first = target_hosts[0]
if isinstance(first, WindowsInventoryProfile):
if args.explain:
return
try:
shutil.copyfile(first.config.path, path)
except shutil.SameFileError:
pass
return
target_hosts = t.cast(list[WindowsRemoteProfile], target_hosts)
hosts = [(target_host, target_host.wait_for_instance().connection) for target_host in target_hosts]
windows_hosts = {sanitize_host_name(host.config.name): host.get_inventory_variables() for host, connection in hosts}
inventory = Inventory(
host_groups=dict(
windows=windows_hosts,
),
# The `testhost` group is needed to support the `binary_modules_winrm` integration test.
# The test should be updated to remove the need for this.
extra_groups={
'testhost:children': [
'windows',
],
},
)
inventory.write(args, path)
def create_network_inventory(args: EnvironmentConfig, path: str, target_hosts: list[HostProfile]) -> None:
"""Create and return inventory for use in target network integration tests."""
first = target_hosts[0]
if isinstance(first, NetworkInventoryProfile):
if args.explain:
return
try:
shutil.copyfile(first.config.path, path)
except shutil.SameFileError:
pass
return
target_hosts = t.cast(list[NetworkRemoteProfile], target_hosts)
host_groups: dict[str, dict[str, dict[str, t.Union[str, int]]]] = {target_host.config.platform: {} for target_host in target_hosts}
for target_host in target_hosts:
host_groups[target_host.config.platform][sanitize_host_name(target_host.config.name)] = target_host.get_inventory_variables()
inventory = Inventory(
host_groups=host_groups,
# The `net` group was added to support platform agnostic testing. It may not longer be needed.
# see: https://github.com/ansible/ansible/pull/34661
# see: https://github.com/ansible/ansible/pull/34707
extra_groups={
'net:children': sorted(host_groups),
},
)
inventory.write(args, path)
def create_posix_inventory(args: EnvironmentConfig, path: str, target_hosts: list[HostProfile], needs_ssh: bool = False) -> None:
"""Create and return inventory for use in POSIX integration tests."""
target_hosts = t.cast(list[SshTargetHostProfile], target_hosts)
if len(target_hosts) != 1:
raise Exception()
target_host = target_hosts[0]
if isinstance(target_host, ControllerProfile) and not needs_ssh:
inventory = Inventory(
host_groups=dict(
testgroup=dict(
testhost=dict(
ansible_connection='local',
ansible_pipelining='yes',
ansible_python_interpreter=target_host.python.path,
),
),
),
)
else:
connections = target_host.get_controller_target_connections()
if len(connections) != 1:
raise Exception()
ssh = connections[0]
testhost: dict[str, t.Optional[t.Union[str, int]]] = dict(
ansible_connection='ssh',
ansible_pipelining='yes',
ansible_python_interpreter=ssh.settings.python_interpreter,
ansible_host=ssh.settings.host,
ansible_port=ssh.settings.port,
ansible_user=ssh.settings.user,
ansible_ssh_private_key_file=ssh.settings.identity_file,
ansible_ssh_extra_args=ssh_options_to_str(ssh.settings.options),
)
if ssh.become:
testhost.update(
ansible_become='yes',
ansible_become_method=ssh.become.method,
)
testhost = exclude_none_values(testhost)
inventory = Inventory(
host_groups=dict(
testgroup=dict(
testhost=testhost,
),
),
)
inventory.write(args, path) |
6,586 | load pascal annotation | # imports
import json
import time
import pickle
import scipy.misc
import skimage.io
import caffe
import numpy as np
import os.path as osp
from xml.dom import minidom
from random import shuffle
from threading import Thread
from PIL import Image
from tools import SimpleTransformer
class PascalMultilabelDataLayerSync(caffe.Layer):
"""
This is a simple synchronous datalayer for training a multilabel model on
PASCAL.
"""
def setup(self, bottom, top):
self.top_names = ['data', 'label']
# === Read input parameters ===
# params is a python dictionary with layer parameters.
params = eval(self.param_str)
# Check the parameters for validity.
check_params(params)
# store input as class variables
self.batch_size = params['batch_size']
# Create a batch loader to load the images.
self.batch_loader = BatchLoader(params, None)
# === reshape tops ===
# since we use a fixed input image size, we can shape the data layer
# once. Else, we'd have to do it in the reshape call.
top[0].reshape(
self.batch_size, 3, params['im_shape'][0], params['im_shape'][1])
# Note the 20 channels (because PASCAL has 20 classes.)
top[1].reshape(self.batch_size, 20)
print_info("PascalMultilabelDataLayerSync", params)
def forward(self, bottom, top):
"""
Load data.
"""
for itt in range(self.batch_size):
# Use the batch loader to load the next image.
im, multilabel = self.batch_loader.load_next_image()
# Add directly to the caffe data layer
top[0].data[itt, ...] = im
top[1].data[itt, ...] = multilabel
def reshape(self, bottom, top):
"""
There is no need to reshape the data, since the input is of fixed size
(rows and columns)
"""
pass
def backward(self, top, propagate_down, bottom):
"""
These layers does not back propagate
"""
pass
class BatchLoader(object):
"""
This class abstracts away the loading of images.
Images can either be loaded singly, or in a batch. The latter is used for
the asyncronous data layer to preload batches while other processing is
performed.
"""
def __init__(self, params, result):
self.result = result
self.batch_size = params['batch_size']
self.pascal_root = params['pascal_root']
self.im_shape = params['im_shape']
# get list of image indexes.
list_file = params['split'] + '.txt'
self.indexlist = [line.rstrip('\n') for line in open(
osp.join(self.pascal_root, 'ImageSets/Main', list_file))]
self._cur = 0 # current image
# this class does some simple data-manipulations
self.transformer = SimpleTransformer()
print "BatchLoader initialized with {} images".format(
len(self.indexlist))
def load_next_image(self):
"""
Load the next image in a batch.
"""
# Did we finish an epoch?
if self._cur == len(self.indexlist):
self._cur = 0
shuffle(self.indexlist)
# Load an image
index = self.indexlist[self._cur] # Get the image index
image_file_name = index + '.jpg'
im = np.asarray(Image.open(
osp.join(self.pascal_root, 'JPEGImages', image_file_name)))
im = scipy.misc.imresize(im, self.im_shape) # resize
# do a simple horizontal flip as data augmentation
flip = np.random.choice(2)*2-1
im = im[:, ::flip, :]
# Load and prepare ground truth
multilabel = np.zeros(20).astype(np.float32)
anns = METHOD_NAME(index, self.pascal_root)
for label in anns['gt_classes']:
# in the multilabel problem we don't care how MANY instances
# there are of each class. Only if they are present.
# The "-1" is b/c we are not interested in the background
# class.
multilabel[label - 1] = 1
self._cur += 1
return self.transformer.preprocess(im), multilabel
def METHOD_NAME(index, pascal_root):
"""
This code is borrowed from Ross Girshick's FAST-RCNN code
(https://github.com/rbgirshick/fast-rcnn).
It parses the PASCAL .xml metadata files.
See publication for further details: (http://arxiv.org/abs/1504.08083).
Thanks Ross!
"""
classes = ('__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
class_to_ind = dict(zip(classes, xrange(21)))
filename = osp.join(pascal_root, 'Annotations', index + '.xml')
# print 'Loading: {}'.format(filename)
def get_data_from_tag(node, tag):
return node.getElementsByTagName(tag)[0].childNodes[0].data
with open(filename) as f:
data = minidom.parseString(f.read())
objs = data.getElementsByTagName('object')
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, 21), dtype=np.float32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
# Make pixel indexes 0-based
x1 = float(get_data_from_tag(obj, 'xmin')) - 1
y1 = float(get_data_from_tag(obj, 'ymin')) - 1
x2 = float(get_data_from_tag(obj, 'xmax')) - 1
y2 = float(get_data_from_tag(obj, 'ymax')) - 1
cls = class_to_ind[
str(get_data_from_tag(obj, "name")).lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'flipped': False,
'index': index}
def check_params(params):
"""
A utility function to check the parameters for the data layers.
"""
assert 'split' in params.keys(
), 'Params must include split (train, val, or test).'
required = ['batch_size', 'pascal_root', 'im_shape']
for r in required:
assert r in params.keys(), 'Params must include {}'.format(r)
def print_info(name, params):
"""
Output some info regarding the class
"""
print "{} initialized for split: {}, with bs: {}, im_shape: {}.".format(
name,
params['split'],
params['batch_size'],
params['im_shape']) |
6,587 | cleanup | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import torch
from omegaconf import OmegaConf
from pytorch_lightning import Trainer
from pytorch_lightning.utilities import rank_zero_only
from nemo.core import ModelPT
from nemo.utils import logging
from nemo.utils.exp_manager import ExpManagerConfig, exp_manager
class OnesDataset(torch.utils.data.Dataset):
def __init__(self, dataset_len):
super().__init__()
self.__dataset_len = dataset_len
def __getitem__(self, *args):
return torch.ones(2)
def __len__(self):
return self.__dataset_len
class ExampleModel(ModelPT):
def __init__(self, *args, **kwargs):
cfg = OmegaConf.structured({})
super().__init__(cfg, trainer=kwargs.get('trainer', None))
# dummy parameter in order to allow DDP to execute
self.l1 = torch.nn.modules.Linear(in_features=2, out_features=1)
def train_dataloader(self):
return None
def val_dataloader(self):
return None
def predict_dataloader(self):
dataset = OnesDataset(2)
return torch.utils.data.DataLoader(dataset, batch_size=2)
def forward(self, batch):
return batch.mean()
def validation_step(self, batch, batch_idx):
loss = self(batch)
self.validation_step_outputs.append(loss)
return loss
def training_step(self, batch, batch_idx):
return self(batch)
def list_available_models(self):
pass
def setup_training_data(self):
pass
def setup_validation_data(self):
pass
def on_validation_epoch_end(self):
self.log("val_loss", torch.stack(self.validation_step_outputs).mean())
self.validation_step_outputs.clear() # free memory
def instantiate_multinode_ddp_if_possible():
num_gpus = torch.cuda.device_count()
## Change logger=None to logger=False to support PTL 2.0
trainer = Trainer(devices=num_gpus, accelerator='gpu', strategy='ddp', logger=False, enable_checkpointing=False)
exp_manager_cfg = ExpManagerConfig(exp_dir='./ddp_check/', use_datetime_version=False, version="")
exp_manager(trainer, cfg=OmegaConf.structured(exp_manager_cfg))
return trainer
def setup_model(trainer: Trainer):
model = ExampleModel(trainer=trainer)
logging.info(f"M.Global Rank:{model.global_rank}")
logging.info(f"M.Local Rank:{model.local_rank}")
logging.info(f"M.World Size:{model.trainer.world_size}")
trainer.predict(model)
return model
def get_rank_info(texts: list, rank_key: str) -> int:
for line in texts:
if rank_key in line:
rank_value = line.split(":")[-1]
rank_value = int(rank_value)
return rank_value
print("Could not find the correct rank key !")
exit(1)
@rank_zero_only
def check_model_ranks(model: ExampleModel):
basedir = os.path.join('./ddp_check/', 'default', 'version_0')
file_template = "nemo_log_globalrank-{rank}_localrank-{rank}.txt"
world_size = torch.cuda.device_count()
for rank in range(world_size):
filename = file_template.format(rank=rank)
filepath = os.path.join(basedir, filename)
with open(filepath, 'r', encoding='utf-8') as f:
texts = f.readlines()
texts = [t.replace("\n", "") for t in texts]
log_global_rank = get_rank_info(texts, rank_key='M.Global Rank')
log_world_size = get_rank_info(texts, rank_key='M.World Size')
if log_global_rank != rank:
print("Logged global rank is not equal to trainer.global_rank !")
exit(1)
if log_world_size != world_size:
print("Logged world size if not equal to trainer.world_size !")
exit(1)
@rank_zero_only
def METHOD_NAME():
if os.path.exists('./ddp_check'):
shutil.rmtree('./ddp_check', ignore_errors=True)
def run_checks():
METHOD_NAME()
trainer = instantiate_multinode_ddp_if_possible()
model = setup_model(trainer)
check_model_ranks(model)
print("DDP checks passed !")
METHOD_NAME()
if __name__ == '__main__':
run_checks() |
6,588 | training step | # Copyright The Lightning AI team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from unittest import mock
import pytest
import torch
from lightning_utilities.core import module_available
from lightning.pytorch import LightningModule, Trainer
from lightning.pytorch.demos.boring_classes import BoringModel
from lightning.pytorch.utilities.compile import from_compiled, to_uncompiled
from tests_pytorch.conftest import mock_cuda_count
from tests_pytorch.helpers.runif import RunIf
@RunIf(dynamo=True)
@pytest.mark.skipif(sys.platform == "darwin", reason="https://github.com/pytorch/pytorch/issues/95708")
@mock.patch("lightning.pytorch.trainer.call._call_and_handle_interrupt")
def test_trainer_compiled_model(_, tmp_path, monkeypatch):
trainer_kwargs = {
"default_root_dir": tmp_path,
"fast_dev_run": True,
"logger": False,
"enable_checkpointing": False,
"enable_model_summary": False,
"enable_progress_bar": False,
}
model = BoringModel()
compiled_model = torch.compile(model)
assert model._compiler_ctx is compiled_model._compiler_ctx # shared reference
# can train with compiled model
trainer = Trainer(**trainer_kwargs)
trainer.fit(compiled_model)
assert trainer.model._compiler_ctx["compiler"] == "dynamo"
# the compiled model can be uncompiled
to_uncompiled_model = to_uncompiled(compiled_model)
assert model._compiler_ctx is None
assert compiled_model._compiler_ctx is None
assert to_uncompiled_model._compiler_ctx is None
# the compiled model needs to be passed
with pytest.raises(ValueError, match="required to be a compiled LightningModule"):
to_uncompiled(to_uncompiled_model)
# the uncompiled model can be fitted
trainer = Trainer(**trainer_kwargs)
trainer.fit(model)
assert trainer.model._compiler_ctx is None
# some strategies do not support it
if module_available("deepspeed"):
compiled_model = torch.compile(model)
mock_cuda_count(monkeypatch, 2)
trainer = Trainer(strategy="deepspeed", accelerator="cuda", **trainer_kwargs)
with pytest.raises(RuntimeError, match="Using a compiled model is incompatible with the current strategy.*"):
trainer.fit(compiled_model)
# ddp does
trainer = Trainer(strategy="ddp", **trainer_kwargs)
trainer.fit(compiled_model)
# an exception is raised
trainer = Trainer(**trainer_kwargs)
with pytest.raises(TypeError, match="must be a `Light"):
trainer.fit(object())
@RunIf(dynamo=True)
def test_compile_uncompile():
model = BoringModel()
compiled_model = torch.compile(model)
def has_dynamo(fn):
return any(el for el in dir(fn) if el.startswith("_torchdynamo"))
from_compiled_model = from_compiled(compiled_model)
assert isinstance(from_compiled_model, LightningModule)
assert from_compiled_model._compiler_ctx is not None
assert has_dynamo(from_compiled_model.forward)
assert has_dynamo(from_compiled_model.METHOD_NAME)
assert has_dynamo(from_compiled_model.validation_step)
assert has_dynamo(from_compiled_model.test_step)
assert has_dynamo(from_compiled_model.predict_step)
to_uncompiled_model = to_uncompiled(model)
assert to_uncompiled_model._compiler_ctx is None
assert to_uncompiled_model.forward == model.forward
assert to_uncompiled_model.METHOD_NAME == model.METHOD_NAME
assert to_uncompiled_model.validation_step == model.validation_step
assert to_uncompiled_model.test_step == model.test_step
assert to_uncompiled_model.predict_step == model.predict_step
assert not has_dynamo(to_uncompiled_model.forward)
assert not has_dynamo(to_uncompiled_model.METHOD_NAME)
assert not has_dynamo(to_uncompiled_model.validation_step)
assert not has_dynamo(to_uncompiled_model.test_step)
assert not has_dynamo(to_uncompiled_model.predict_step)
@pytest.mark.skipif(sys.platform == "darwin", reason="https://github.com/pytorch/pytorch/issues/95708")
@RunIf(dynamo=True)
def test_trainer_compiled_model_that_logs(tmp_path):
class MyModel(BoringModel):
def METHOD_NAME(self, batch, batch_idx):
loss = self.step(batch)
self.log("loss", loss)
return loss
model = MyModel()
compiled_model = torch.compile(model)
trainer = Trainer(
default_root_dir=tmp_path,
fast_dev_run=True,
enable_checkpointing=False,
enable_model_summary=False,
enable_progress_bar=False,
)
trainer.fit(compiled_model)
assert set(trainer.callback_metrics) == {"loss"}
@pytest.mark.skipif(sys.platform == "darwin", reason="https://github.com/pytorch/pytorch/issues/95708")
@RunIf(dynamo=True)
def test_trainer_compiled_model_test(tmp_path):
model = BoringModel()
compiled_model = torch.compile(model)
trainer = Trainer(
default_root_dir=tmp_path,
fast_dev_run=True,
enable_checkpointing=False,
enable_model_summary=False,
enable_progress_bar=False,
)
trainer.test(compiled_model) |
6,589 | present | """
Management of PostgreSQL databases
==================================
The postgres_database module is used to create and manage Postgres databases.
Databases can be set as either absent or present
.. code-block:: yaml
frank:
postgres_database.present
"""
def __virtual__():
"""
Only load if the postgres module is present
"""
if "postgres.user_exists" not in __salt__:
return (
False,
"Unable to load postgres module. Make sure `postgres.bins_dir` is set.",
)
return True
def METHOD_NAME(
name,
tablespace=None,
encoding=None,
lc_collate=None,
lc_ctype=None,
owner=None,
owner_recurse=False,
template=None,
user=None,
maintenance_db=None,
db_password=None,
db_host=None,
db_port=None,
db_user=None,
):
"""
Ensure that the named database is present with the specified properties.
For more information about all of these options see man createdb(1)
name
The name of the database to manage
tablespace
Default tablespace for the database
encoding
The character encoding scheme to be used in this database
lc_collate
The LC_COLLATE setting to be used in this database
lc_ctype
The LC_CTYPE setting to be used in this database
owner
The username of the database owner
owner_recurse
Recurse owner change to all relations in the database
template
The template database from which to build this database
user
System user all operations should be performed on behalf of
db_user
database username if different from config or default
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
.. versionadded:: 0.17.0
"""
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "Database {} is already present".format(name),
}
db_args = {
"maintenance_db": maintenance_db,
"runas": user,
"host": db_host,
"user": db_user,
"port": db_port,
"password": db_password,
}
dbs = __salt__["postgres.db_list"](**db_args)
db_params = dbs.get(name, {})
if name in dbs and all(
(
db_params.get("Tablespace") == tablespace if tablespace else True,
(
db_params.get("Encoding").lower() == encoding.lower()
if encoding
else True
),
db_params.get("Collate") == lc_collate if lc_collate else True,
db_params.get("Ctype") == lc_ctype if lc_ctype else True,
db_params.get("Owner") == owner if owner else True,
)
):
return ret
elif name in dbs and any(
(
db_params.get("Encoding").lower() != encoding.lower()
if encoding
else False,
db_params.get("Collate") != lc_collate if lc_collate else False,
db_params.get("Ctype") != lc_ctype if lc_ctype else False,
)
):
ret[
"comment"
] = "Database {} has wrong parameters which couldn't be changed on fly.".format(
name
)
ret["result"] = False
return ret
# The database is not present, make it!
if __opts__["test"]:
ret["result"] = None
if name not in dbs:
ret["comment"] = "Database {} is set to be created".format(name)
else:
ret[
"comment"
] = "Database {} exists, but parameters need to be changed".format(name)
return ret
if name not in dbs and __salt__["postgres.db_create"](
name,
tablespace=tablespace,
encoding=encoding,
lc_collate=lc_collate,
lc_ctype=lc_ctype,
owner=owner,
template=template,
**db_args
):
ret["comment"] = "The database {} has been created".format(name)
ret["changes"][name] = "Present"
elif name in dbs and __salt__["postgres.db_alter"](
name, tablespace=tablespace, owner=owner, owner_recurse=owner_recurse, **db_args
):
ret["comment"] = "Parameters for database {} have been changed".format(name)
ret["changes"][name] = "Parameters changed"
elif name in dbs:
ret["comment"] = "Failed to change parameters for database {}".format(name)
ret["result"] = False
else:
ret["comment"] = "Failed to create database {}".format(name)
ret["result"] = False
return ret
def absent(
name,
user=None,
maintenance_db=None,
db_password=None,
db_host=None,
db_port=None,
db_user=None,
):
"""
Ensure that the named database is absent
name
The name of the database to remove
db_user
database username if different from config or default
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
user
System user all operations should be performed on behalf of
.. versionadded:: 0.17.0
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
db_args = {
"maintenance_db": maintenance_db,
"runas": user,
"host": db_host,
"user": db_user,
"port": db_port,
"password": db_password,
}
# check if db exists and remove it
if __salt__["postgres.db_exists"](name, **db_args):
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Database {} is set to be removed".format(name)
return ret
if __salt__["postgres.db_remove"](name, **db_args):
ret["comment"] = "Database {} has been removed".format(name)
ret["changes"][name] = "Absent"
return ret
# fallback
ret["comment"] = "Database {} is not present, so it cannot be removed".format(name)
return ret |
6,590 | on model | # Class for handling Wired Networking Patches, invocation from build.py
# Copyright (C) 2020-2023, Dhinak G, Mykola Grymalyuk
from resources import constants, device_probe
from resources.build import support
from data import smbios_data, cpu_data
class BuildWiredNetworking:
"""
Build Library for Wired Networking Support
Invoke from build.py
"""
def __init__(self, model: str, global_constants: constants.Constants, config: dict) -> None:
self.model: str = model
self.config: dict = config
self.constants: constants.Constants = global_constants
self.computer: device_probe.Computer = self.constants.computer
self._build()
def _build(self) -> None:
"""
Kick off Wired Build Process
"""
# Check if Ethernet was detected, otherwise fall back to assumptions (mainly for 2011 MacBook Airs and TB Ethernet)
if not self.constants.custom_model and self.constants.computer.ethernet:
self.METHOD_NAME()
else:
self._prebuilt_assumption()
def METHOD_NAME(self) -> None:
"""
On-Model Hardware Detection Handling
"""
for controller in self.constants.computer.ethernet:
if isinstance(controller, device_probe.BroadcomEthernet) and controller.chipset == device_probe.BroadcomEthernet.Chipsets.AppleBCM5701Ethernet:
if not self.model in smbios_data.smbios_dictionary:
continue
if smbios_data.smbios_dictionary[self.model]["CPU Generation"] < cpu_data.CPUGen.ivy_bridge.value:
# Required due to Big Sur's BCM5701 requiring VT-D support
# Applicable for pre-Ivy Bridge models
support.BuildSupport(self.model, self.constants, self.config).enable_kext("CatalinaBCM5701Ethernet.kext", self.constants.bcm570_version, self.constants.bcm570_path)
elif isinstance(controller, device_probe.IntelEthernet):
if not self.model in smbios_data.smbios_dictionary:
continue
if smbios_data.smbios_dictionary[self.model]["CPU Generation"] < cpu_data.CPUGen.ivy_bridge.value:
# Apple's IOSkywalkFamily in DriverKit requires VT-D support
# Applicable for pre-Ivy Bridge models
if controller.chipset == device_probe.IntelEthernet.Chipsets.AppleIntelI210Ethernet:
support.BuildSupport(self.model, self.constants, self.config).enable_kext("CatalinaIntelI210Ethernet.kext", self.constants.i210_version, self.constants.i210_path)
elif controller.chipset == device_probe.IntelEthernet.Chipsets.AppleIntel8254XEthernet:
support.BuildSupport(self.model, self.constants, self.config).enable_kext("AppleIntel8254XEthernet.kext", self.constants.intel_8254x_version, self.constants.intel_8254x_path)
elif controller.chipset == device_probe.IntelEthernet.Chipsets.Intel82574L:
support.BuildSupport(self.model, self.constants, self.config).enable_kext("Intel82574L.kext", self.constants.intel_82574l_version, self.constants.intel_82574l_path)
elif isinstance(controller, device_probe.NVIDIAEthernet):
support.BuildSupport(self.model, self.constants, self.config).enable_kext("nForceEthernet.kext", self.constants.nforce_version, self.constants.nforce_path)
elif isinstance(controller, device_probe.Marvell) or isinstance(controller, device_probe.SysKonnect):
support.BuildSupport(self.model, self.constants, self.config).enable_kext("MarvelYukonEthernet.kext", self.constants.marvel_version, self.constants.marvel_path)
# Pre-Ivy Bridge Aquantia Ethernet Patch
if isinstance(controller, device_probe.Aquantia) and controller.chipset == device_probe.Aquantia.Chipsets.AppleEthernetAquantiaAqtion:
if not self.model in smbios_data.smbios_dictionary:
continue
if smbios_data.smbios_dictionary[self.model]["CPU Generation"] < cpu_data.CPUGen.ivy_bridge.value:
support.BuildSupport(self.model, self.constants, self.config).enable_kext("AppleEthernetAbuantiaAqtion.kext", self.constants.aquantia_version, self.constants.aquantia_path)
def _prebuilt_assumption(self) -> None:
"""
Fall back to pre-built assumptions
"""
if not self.model in smbios_data.smbios_dictionary:
return
if not "Ethernet Chipset" in smbios_data.smbios_dictionary[self.model]:
return
if smbios_data.smbios_dictionary[self.model]["Ethernet Chipset"] == "Broadcom":
if smbios_data.smbios_dictionary[self.model]["CPU Generation"] < cpu_data.CPUGen.ivy_bridge.value:
# Required due to Big Sur's BCM5701 requiring VT-D support
# Applicable for pre-Ivy Bridge models
support.BuildSupport(self.model, self.constants, self.config).enable_kext("CatalinaBCM5701Ethernet.kext", self.constants.bcm570_version, self.constants.bcm570_path)
elif smbios_data.smbios_dictionary[self.model]["Ethernet Chipset"] == "Nvidia":
support.BuildSupport(self.model, self.constants, self.config).enable_kext("nForceEthernet.kext", self.constants.nforce_version, self.constants.nforce_path)
elif smbios_data.smbios_dictionary[self.model]["Ethernet Chipset"] == "Marvell":
support.BuildSupport(self.model, self.constants, self.config).enable_kext("MarvelYukonEthernet.kext", self.constants.marvel_version, self.constants.marvel_path)
elif smbios_data.smbios_dictionary[self.model]["Ethernet Chipset"] == "Intel 80003ES2LAN":
support.BuildSupport(self.model, self.constants, self.config).enable_kext("AppleIntel8254XEthernet.kext", self.constants.intel_8254x_version, self.constants.intel_8254x_path)
elif smbios_data.smbios_dictionary[self.model]["Ethernet Chipset"] == "Intel 82574L":
support.BuildSupport(self.model, self.constants, self.config).enable_kext("Intel82574L.kext", self.constants.intel_82574l_version, self.constants.intel_82574l_path |
6,591 | test linear | # /*##########################################################################
# Copyright (C) 2016 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ############################################################################*/
import copy
import unittest
import numpy
import random
from silx.math.fit import bgtheories
from silx.math.fit.functions import sum_gauss
class TestBgTheories(unittest.TestCase):
"""
"""
def setUp(self):
self.x = numpy.arange(100)
self.y = 10 + 0.05 * self.x + sum_gauss(self.x, 10., 45., 15.)
# add a very narrow high amplitude peak to test strip and snip
self.y += sum_gauss(self.x, 100., 75., 2.)
self.narrow_peak_index = list(self.x).index(75)
random.seed()
def tearDown(self):
pass
def testTheoriesAttrs(self):
for theory_name in bgtheories.THEORY:
self.assertIsInstance(theory_name, str)
self.assertTrue(hasattr(bgtheories.THEORY[theory_name],
"function"))
self.assertTrue(hasattr(bgtheories.THEORY[theory_name].function,
"__call__"))
# Ensure legacy functions are not renamed accidentally
self.assertTrue(
{"No Background", "Constant", "Linear", "Strip", "Snip"}.issubset(
set(bgtheories.THEORY)))
def testNoBg(self):
nobgfun = bgtheories.THEORY["No Background"].function
self.assertTrue(numpy.array_equal(nobgfun(self.x, self.y),
numpy.zeros_like(self.x)))
# default estimate
self.assertEqual(bgtheories.THEORY["No Background"].estimate(self.x, self.y),
([], []))
def testConstant(self):
consfun = bgtheories.THEORY["Constant"].function
c = random.random() * 100
self.assertTrue(numpy.array_equal(consfun(self.x, self.y, c),
c * numpy.ones_like(self.x)))
# default estimate
esti_par, cons = bgtheories.THEORY["Constant"].estimate(self.x, self.y)
self.assertEqual(cons,
[[0, 0, 0]])
self.assertAlmostEqual(esti_par,
min(self.y))
def METHOD_NAME(self):
linfun = bgtheories.THEORY["Linear"].function
a = random.random() * 100
b = random.random() * 100
self.assertTrue(numpy.array_equal(linfun(self.x, self.y, a, b),
a + b * self.x))
# default estimate
esti_par, cons = bgtheories.THEORY["Linear"].estimate(self.x, self.y)
self.assertEqual(cons,
[[0, 0, 0], [0, 0, 0]])
self.assertAlmostEqual(esti_par[0], 10, places=3)
self.assertAlmostEqual(esti_par[1], 0.05, places=3)
def testStrip(self):
stripfun = bgtheories.THEORY["Strip"].function
anchors = sorted(random.sample(list(self.x), 4))
anchors_indices = [list(self.x).index(a) for a in anchors]
# we really want to strip away the narrow peak
anchors_indices_copy = copy.deepcopy(anchors_indices)
for idx in anchors_indices_copy:
if abs(idx - self.narrow_peak_index) < 5:
anchors_indices.remove(idx)
anchors.remove(self.x[idx])
width = 2
niter = 1000
bgtheories.THEORY["Strip"].configure(AnchorsList=anchors, AnchorsFlag=True)
bg = stripfun(self.x, self.y, width, niter)
# assert peak amplitude has been decreased
self.assertLess(bg[self.narrow_peak_index],
self.y[self.narrow_peak_index])
# default estimate
for i in anchors_indices:
self.assertEqual(bg[i], self.y[i])
# estimated parameters are equal to the default ones in the config dict
bgtheories.THEORY["Strip"].configure(StripWidth=7, StripIterations=8)
esti_par, cons = bgtheories.THEORY["Strip"].estimate(self.x, self.y)
self.assertTrue(numpy.array_equal(cons, [[3, 0, 0], [3, 0, 0]]))
self.assertEqual(esti_par, [7, 8])
def testSnip(self):
snipfun = bgtheories.THEORY["Snip"].function
anchors = sorted(random.sample(list(self.x), 4))
anchors_indices = [list(self.x).index(a) for a in anchors]
# we want to strip away the narrow peak, so remove nearby anchors
anchors_indices_copy = copy.deepcopy(anchors_indices)
for idx in anchors_indices_copy:
if abs(idx - self.narrow_peak_index) < 5:
anchors_indices.remove(idx)
anchors.remove(self.x[idx])
width = 16
bgtheories.THEORY["Snip"].configure(AnchorsList=anchors, AnchorsFlag=True)
bg = snipfun(self.x, self.y, width)
# assert peak amplitude has been decreased
self.assertLess(bg[self.narrow_peak_index],
self.y[self.narrow_peak_index],
"Snip didn't decrease the peak amplitude.")
# anchored data must remain fixed
for i in anchors_indices:
self.assertEqual(bg[i], self.y[i])
# estimated parameters are equal to the default ones in the config dict
bgtheories.THEORY["Snip"].configure(SnipWidth=7)
esti_par, cons = bgtheories.THEORY["Snip"].estimate(self.x, self.y)
self.assertTrue(numpy.array_equal(cons, [[3, 0, 0]]))
self.assertEqual(esti_par, [7]) |
6,592 | print deprecation message | import logging
import sublime
from . import events, util
from .const import IS_ENABLED_SWITCH
from jsonschema import validate, FormatChecker, ValidationError
logger = logging.getLogger(__name__)
class Settings:
"""This class provides global access to and management of plugin settings."""
def __init__(self):
self._previous_state = {}
self._current_state = {}
self.__settings = None
self._change_count = 0
def load(self):
"""Load the plugin settings."""
self.observe()
self.on_update()
@property
def settings(self):
s = self.__settings
if not s:
s = self.__settings = sublime.load_settings("SublimeLinter.sublime-settings")
return s
def has(self, name):
"""Return whether the given setting exists."""
return self.settings.has(name)
def get(self, name, default=None):
"""Return a plugin setting, defaulting to default if not found."""
try:
return self._current_state[name]
except KeyError:
self._current_state[name] = current_value = self.settings.get(name, default)
return current_value
def has_changed(self, name):
current_value = self.get(name)
try:
old_value = self._previous_state[name]
except KeyError:
return False
else:
return (old_value != current_value)
def change_count(self):
return self._change_count
def observe(self):
"""Observe changes."""
self.settings.clear_on_change('sublimelinter-persist-settings')
self.settings.add_on_change('sublimelinter-persist-settings', self.on_update)
def unobserve(self):
self.settings.clear_on_change('sublimelinter-persist-settings')
def on_update(self):
"""
Update state when the user settings change.
The settings before the change are compared with the new settings.
Depending on what changes, views will either be redrawn or relinted.
"""
self._previous_state = self._current_state.copy()
self._current_state.clear()
self._change_count += 1
events.broadcast('settings_changed', {'settings': self})
validate_global_settings()
def get_settings_objects():
for name in sublime.find_resources("SublimeLinter.sublime-settings"):
try:
yield name, util.load_json(name, from_sl_dir=False)
except (IOError, ValueError):
pass
def validate_global_settings():
return validate_settings(get_settings_objects())
def validate_settings(filename_settings_pairs, flat=False):
status_msg = "SublimeLinter - Settings invalid!"
schema_file = "resources/settings-schema.json"
schema = util.load_json(schema_file, from_sl_dir=True)
window = sublime.active_window()
good = True
for name, settings in filename_settings_pairs:
if settings:
try:
validate(settings, schema, format_checker=FormatChecker())
except ValidationError as error:
good = False
if flat:
path_to_err = '"{}": '.format(
'SublimeLinter.' + '.'.join(error.path)
)
else:
path_to_err = (' > '.join(
repr(part)
for part in error.path
if not isinstance(part, int) # drop array indices
) + ': ') if error.path else ''
logger.warning("Invalid settings in '{}'".format(name))
util.show_message(
"Invalid settings in '{}':\n"
'{}{}'.format(name, path_to_err, error.message)
)
window.status_message(status_msg)
if good:
util.close_error_panel()
return good
def validate_project_settings(filename):
try:
with open(filename, 'r') as fh:
contents = fh.read()
except IOError:
return True # Very optimistic
try:
obj = sublime.decode_value(contents)
except ValueError:
return False
if 'SublimeLinter' in obj:
METHOD_NAME(obj.get('SublimeLinter', {}))
return False
settings = obj.get('settings', {})
if not settings:
util.close_error_panel()
return True
sl_settings = {
key: value
for key, value in settings.items()
if key.startswith('SublimeLinter.') and key != IS_ENABLED_SWITCH
}
if not sl_settings:
util.close_error_panel()
return True
invalid_top_level_keys = [
key
for key in sl_settings
if not key.startswith('SublimeLinter.linters.')
]
if invalid_top_level_keys:
logger.error(
"Invalid settings in '{}':\n"
"Only '{}' and 'SublimeLinter.linters.*' "
"keys are allowed. Got {}."
.format(
filename,
IS_ENABLED_SWITCH,
', '.join(map(repr, invalid_top_level_keys))
)
)
return False
invalid_deep_keys = [
key
for key in sl_settings
if len(key.rstrip('.').split('.')) < 4
]
if invalid_deep_keys:
logger.error(
"Invalid settings in '{}':\n"
"{} {} too short.".format(
filename,
', '.join(map(repr, invalid_deep_keys)),
'are' if len(invalid_deep_keys) > 1 else 'is'
)
)
return False
deep_settings = {} # type: ignore[var-annotated]
for key, value in sl_settings.items():
_, *parts = key.split('.')
edge = deep_settings
for part in parts[:-1]:
edge = edge.setdefault(part, {})
edge[parts[-1]] = value
return validate_settings([(filename, deep_settings)], flat=True)
def METHOD_NAME(settings):
import json
message = """
Project settings for SublimeLinter have a new, flat format following
Sublime Text conventions. The old format has been deprecated, use this instead:
{}
"""
new_settings = {}
for linter_name, linter_settings in settings.get('linters', {}).items():
for key, value in linter_settings.items():
new_settings['.'.join(('SublimeLinter', 'linters', linter_name, key))] = value
if not new_settings:
# User has an empty SublimeLinter obj in their project file. So we
# make up an example
new_settings['SublimeLinter.linters.eslint.disable'] = True
formatted_settings = json.dumps(
{'settings': new_settings}, sort_keys=True, indent=4
)[1:-1]
util.show_message(
message.format(formatted_settings)
) |
6,593 | test empty storage | # coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for jobs.batch_jobs.exp_search_indexing_jobs."""
from __future__ import annotations
from core.constants import constants
from core.domain import search_services
from core.jobs import job_test_utils
from core.jobs.batch_jobs import exp_search_indexing_jobs
from core.jobs.types import job_run_result
from core.platform import models
from typing import Dict, List, Tuple, Type, Union
MYPY = False
if MYPY:
from mypy_imports import exp_models
from mypy_imports import search_services as platform_search_services
(exp_models,) = models.Registry.import_models([models.Names.EXPLORATION])
platform_search_services = models.Registry.import_search_services()
StatsType = List[Tuple[str, List[Dict[str, Union[bool, int, str]]]]]
class IndexExplorationsInSearchJobTests(job_test_utils.JobTestBase):
JOB_CLASS: Type[
exp_search_indexing_jobs.IndexExplorationsInSearchJob
] = exp_search_indexing_jobs.IndexExplorationsInSearchJob
def METHOD_NAME(self) -> None:
self.assert_job_output_is_empty()
def test_indexes_non_deleted_model(self) -> None:
exp_summary = self.create_model(
exp_models.ExpSummaryModel,
id='abcd',
deleted=False,
title='title',
category='category',
objective='objective',
language_code='lang',
community_owned=False,
status=constants.ACTIVITY_STATUS_PUBLIC
)
exp_summary.update_timestamps()
exp_summary.put()
add_docs_to_index_swap = self.swap_with_checks(
platform_search_services,
'add_documents_to_index',
lambda _, __: None,
expected_args=[
([{
'id': 'abcd',
'language_code': 'lang',
'title': 'title',
'category': 'category',
'tags': [],
'objective': 'objective',
'rank': 20,
}], search_services.SEARCH_INDEX_EXPLORATIONS)
]
)
with add_docs_to_index_swap:
self.assert_job_output_is([
job_run_result.JobRunResult.as_stdout('SUCCESS: 1')
])
def test_indexes_non_deleted_models(self) -> None:
for i in range(5):
exp_summary = self.create_model(
exp_models.ExpSummaryModel,
id='abcd%s' % i,
deleted=False,
title='title',
category='category',
objective='objective',
language_code='lang',
community_owned=False,
status=constants.ACTIVITY_STATUS_PUBLIC
)
exp_summary.update_timestamps()
exp_summary.put()
add_docs_to_index_swap = self.swap_with_checks(
platform_search_services,
'add_documents_to_index',
lambda _, __: None,
expected_args=[
(
[{
'id': 'abcd%s' % i,
'language_code': 'lang',
'title': 'title',
'category': 'category',
'tags': [],
'objective': 'objective',
'rank': 20,
}],
search_services.SEARCH_INDEX_EXPLORATIONS
) for i in range(5)
]
)
max_batch_size_swap = self.swap(
exp_search_indexing_jobs.IndexExplorationsInSearchJob,
'MAX_BATCH_SIZE', 1)
with add_docs_to_index_swap, max_batch_size_swap:
self.assert_job_output_is([
job_run_result.JobRunResult.as_stdout('SUCCESS: 5')
])
def test_reports_failed_when_indexing_fails(self) -> None:
exp_summary = self.create_model(
exp_models.ExpSummaryModel,
id='abcd',
deleted=False,
title='title',
category='category',
objective='objective',
language_code='lang',
community_owned=False,
status=constants.ACTIVITY_STATUS_PUBLIC
)
exp_summary.update_timestamps()
exp_summary.put()
def add_docs_to_index_mock(
unused_documents: Dict[str, Union[int, str, List[str]]],
unused_index_name: str
) -> None:
raise platform_search_services.SearchException('search exception')
add_docs_to_index_swap = self.swap_with_checks(
platform_search_services,
'add_documents_to_index',
add_docs_to_index_mock,
expected_args=[
(
[{
'id': 'abcd',
'language_code': 'lang',
'title': 'title',
'category': 'category',
'tags': [],
'objective': 'objective',
'rank': 20,
}],
search_services.SEARCH_INDEX_EXPLORATIONS
)
]
)
with add_docs_to_index_swap:
self.assert_job_output_is([
job_run_result.JobRunResult.as_stderr(
'ERROR: "search exception": 1'
)
])
def test_skips_deleted_model(self) -> None:
exp_summary = self.create_model(
exp_models.ExpSummaryModel,
id='abcd',
deleted=True,
title='title',
category='category',
objective='objective',
language_code='lang',
community_owned=False,
status=constants.ACTIVITY_STATUS_PUBLIC
)
exp_summary.update_timestamps()
exp_summary.put()
add_docs_to_index_swap = self.swap_with_checks(
platform_search_services,
'add_documents_to_index',
lambda _, __: None,
called=False
)
with add_docs_to_index_swap:
self.assert_job_output_is_empty()
def test_skips_private_model(self) -> None:
exp_summary = self.create_model(
exp_models.ExpSummaryModel,
id='abcd',
deleted=False,
title='title',
category='category',
objective='objective',
language_code='lang',
community_owned=False,
status=constants.ACTIVITY_STATUS_PRIVATE
)
exp_summary.update_timestamps()
exp_summary.put()
add_docs_to_index_swap = self.swap_with_checks(
platform_search_services,
'add_documents_to_index',
lambda _, __: None,
expected_args=[([], search_services.SEARCH_INDEX_EXPLORATIONS)]
)
with add_docs_to_index_swap:
self.assert_job_output_is([
job_run_result.JobRunResult.as_stdout('SUCCESS: 1')
]) |
6,594 | test discover master sentinel down | import socket
import pytest
import redis.sentinel
from redis import exceptions
from redis.sentinel import (
MasterNotFoundError,
Sentinel,
SentinelConnectionPool,
SlaveNotFoundError,
)
@pytest.fixture(scope="module")
def master_ip(master_host):
yield socket.gethostbyname(master_host[0])
class SentinelTestClient:
def __init__(self, cluster, id):
self.cluster = cluster
self.id = id
def sentinel_masters(self):
self.cluster.connection_error_if_down(self)
self.cluster.timeout_if_down(self)
return {self.cluster.service_name: self.cluster.master}
def sentinel_slaves(self, master_name):
self.cluster.connection_error_if_down(self)
self.cluster.timeout_if_down(self)
if master_name != self.cluster.service_name:
return []
return self.cluster.slaves
def execute_command(self, *args, **kwargs):
# wrapper purely to validate the calls don't explode
from redis.client import bool_ok
return bool_ok
class SentinelTestCluster:
def __init__(self, servisentinel_ce_name="mymaster", ip="127.0.0.1", port=6379):
self.clients = {}
self.master = {
"ip": ip,
"port": port,
"is_master": True,
"is_sdown": False,
"is_odown": False,
"num-other-sentinels": 0,
}
self.service_name = servisentinel_ce_name
self.slaves = []
self.nodes_down = set()
self.nodes_timeout = set()
def connection_error_if_down(self, node):
if node.id in self.nodes_down:
raise exceptions.ConnectionError
def timeout_if_down(self, node):
if node.id in self.nodes_timeout:
raise exceptions.TimeoutError
def client(self, host, port, **kwargs):
return SentinelTestClient(self, (host, port))
@pytest.fixture()
def cluster(request, master_ip):
def teardown():
redis.sentinel.Redis = saved_Redis
cluster = SentinelTestCluster(ip=master_ip)
saved_Redis = redis.sentinel.Redis
redis.sentinel.Redis = cluster.client
request.addfinalizer(teardown)
return cluster
@pytest.fixture()
def sentinel(request, cluster):
return Sentinel([("foo", 26379), ("bar", 26379)])
@pytest.mark.onlynoncluster
def test_discover_master(sentinel, master_ip):
address = sentinel.discover_master("mymaster")
assert address == (master_ip, 6379)
@pytest.mark.onlynoncluster
def test_discover_master_error(sentinel):
with pytest.raises(MasterNotFoundError):
sentinel.discover_master("xxx")
@pytest.mark.onlynoncluster
def test_dead_pool(sentinel):
master = sentinel.master_for("mymaster", db=9)
conn = master.connection_pool.get_connection("_")
conn.disconnect()
del master
conn.connect()
@pytest.mark.onlynoncluster
def METHOD_NAME(cluster, sentinel, master_ip):
# Put first sentinel 'foo' down
cluster.nodes_down.add(("foo", 26379))
address = sentinel.discover_master("mymaster")
assert address == (master_ip, 6379)
# 'bar' is now first sentinel
assert sentinel.sentinels[0].id == ("bar", 26379)
@pytest.mark.onlynoncluster
def test_discover_master_sentinel_timeout(cluster, sentinel, master_ip):
# Put first sentinel 'foo' down
cluster.nodes_timeout.add(("foo", 26379))
address = sentinel.discover_master("mymaster")
assert address == (master_ip, 6379)
# 'bar' is now first sentinel
assert sentinel.sentinels[0].id == ("bar", 26379)
@pytest.mark.onlynoncluster
def test_master_min_other_sentinels(cluster, master_ip):
sentinel = Sentinel([("foo", 26379)], min_other_sentinels=1)
# min_other_sentinels
with pytest.raises(MasterNotFoundError):
sentinel.discover_master("mymaster")
cluster.master["num-other-sentinels"] = 2
address = sentinel.discover_master("mymaster")
assert address == (master_ip, 6379)
@pytest.mark.onlynoncluster
def test_master_odown(cluster, sentinel):
cluster.master["is_odown"] = True
with pytest.raises(MasterNotFoundError):
sentinel.discover_master("mymaster")
@pytest.mark.onlynoncluster
def test_master_sdown(cluster, sentinel):
cluster.master["is_sdown"] = True
with pytest.raises(MasterNotFoundError):
sentinel.discover_master("mymaster")
@pytest.mark.onlynoncluster
def test_discover_slaves(cluster, sentinel):
assert sentinel.discover_slaves("mymaster") == []
cluster.slaves = [
{"ip": "slave0", "port": 1234, "is_odown": False, "is_sdown": False},
{"ip": "slave1", "port": 1234, "is_odown": False, "is_sdown": False},
]
assert sentinel.discover_slaves("mymaster") == [("slave0", 1234), ("slave1", 1234)]
# slave0 -> ODOWN
cluster.slaves[0]["is_odown"] = True
assert sentinel.discover_slaves("mymaster") == [("slave1", 1234)]
# slave1 -> SDOWN
cluster.slaves[1]["is_sdown"] = True
assert sentinel.discover_slaves("mymaster") == []
cluster.slaves[0]["is_odown"] = False
cluster.slaves[1]["is_sdown"] = False
# node0 -> DOWN
cluster.nodes_down.add(("foo", 26379))
assert sentinel.discover_slaves("mymaster") == [("slave0", 1234), ("slave1", 1234)]
cluster.nodes_down.clear()
# node0 -> TIMEOUT
cluster.nodes_timeout.add(("foo", 26379))
assert sentinel.discover_slaves("mymaster") == [("slave0", 1234), ("slave1", 1234)]
@pytest.mark.onlynoncluster
def test_master_for(cluster, sentinel, master_ip):
master = sentinel.master_for("mymaster", db=9)
assert master.ping()
assert master.connection_pool.master_address == (master_ip, 6379)
# Use internal connection check
master = sentinel.master_for("mymaster", db=9, check_connection=True)
assert master.ping()
@pytest.mark.onlynoncluster
def test_slave_for(cluster, sentinel):
cluster.slaves = [
{"ip": "127.0.0.1", "port": 6379, "is_odown": False, "is_sdown": False}
]
slave = sentinel.slave_for("mymaster", db=9)
assert slave.ping()
@pytest.mark.onlynoncluster
def test_slave_for_slave_not_found_error(cluster, sentinel):
cluster.master["is_odown"] = True
slave = sentinel.slave_for("mymaster", db=9)
with pytest.raises(SlaveNotFoundError):
slave.ping()
@pytest.mark.onlynoncluster
def test_slave_round_robin(cluster, sentinel, master_ip):
cluster.slaves = [
{"ip": "slave0", "port": 6379, "is_odown": False, "is_sdown": False},
{"ip": "slave1", "port": 6379, "is_odown": False, "is_sdown": False},
]
pool = SentinelConnectionPool("mymaster", sentinel)
rotator = pool.rotate_slaves()
assert next(rotator) in (("slave0", 6379), ("slave1", 6379))
assert next(rotator) in (("slave0", 6379), ("slave1", 6379))
# Fallback to master
assert next(rotator) == (master_ip, 6379)
with pytest.raises(SlaveNotFoundError):
next(rotator)
@pytest.mark.onlynoncluster
def test_ckquorum(cluster, sentinel):
assert sentinel.sentinel_ckquorum("mymaster")
@pytest.mark.onlynoncluster
def test_flushconfig(cluster, sentinel):
assert sentinel.sentinel_flushconfig()
@pytest.mark.onlynoncluster
def test_reset(cluster, sentinel):
cluster.master["is_odown"] = True
assert sentinel.sentinel_reset("mymaster") |
6,595 | table dictize | # encoding: utf-8
from __future__ import annotations
import datetime
from typing import Any, Callable, Iterable
import sqlalchemy
from sqlalchemy import Table
from sqlalchemy.engine import Row # type: ignore
from sqlalchemy.orm import class_mapper
from ckan.model.core import State
from ckan.types import Context
# NOTE The functions in this file contain very generic methods for dictizing
# objects and saving dictized objects. If a specialised use is needed please do
# NOT extend these functions. Copy code from here as needed.
legacy_dict_sort: Callable[[dict[str, Any]],
Any] = lambda x: (len(x), dict.items(x))
def METHOD_NAME(obj: Any, context: Context, **kw: Any) -> dict[str, Any]:
'''Get any model object and represent it as a dict'''
result_dict: dict[str, Any] = {}
if isinstance(obj, Row):
fields = obj.keys()
else:
ModelClass = obj.__class__
table = class_mapper(ModelClass).persist_selectable
fields = [field.name for field in table.c]
for field in fields:
name = field
if name in ('current', 'expired_timestamp', 'expired_id'):
continue
if name in ('continuity_id', 'revision_id'):
continue
value = getattr(obj, name)
if value is None:
result_dict[name] = value
elif isinstance(value, dict):
result_dict[name] = value
elif isinstance(value, int):
result_dict[name] = value
elif isinstance(value, datetime.datetime):
result_dict[name] = value.isoformat()
elif isinstance(value, list):
result_dict[name] = value
else:
result_dict[name] = str(value)
result_dict.update(kw)
return result_dict
def obj_list_dictize(
obj_list: list[Any],
context: Context,
sort_key: Callable[..., Any] = legacy_dict_sort
) -> list[dict[str, Any]]:
'''Get a list of model object and represent it as a list of dicts'''
result_list = []
active = context.get('active', True)
for obj in obj_list:
if context.get('with_capacity'):
obj, capacity = obj
dictized = METHOD_NAME(obj, context, capacity=capacity)
else:
dictized = METHOD_NAME(obj, context)
if active and obj.state != 'active':
continue
result_list.append(dictized)
return sorted(result_list, key=sort_key)
def obj_dict_dictize(
obj_dict: dict[str, Any],
context: Context,
sort_key: Callable[..., Any] = lambda x: x) -> list[dict[str, Any]]:
'''Get a dict whose values are model objects
and represent it as a list of dicts'''
result_list: list[dict[str, Any]] = []
for obj in obj_dict.values():
result_list.append(METHOD_NAME(obj, context))
return sorted(result_list, key=sort_key)
def get_unique_constraints(table: Table, context: Context) -> list[list[str]]:
'''Get a list of unique constraints for a sqlalchemy table'''
list_of_constraints: list[list[str]] = []
for contraint in table.constraints:
if isinstance(contraint, sqlalchemy.UniqueConstraint):
columns = [column.name for column in contraint.columns]
list_of_constraints.append(columns)
return list_of_constraints
def table_dict_save(table_dict: dict[str, Any],
ModelClass: Any,
context: Context,
extra_attrs: Iterable[str] = ()) -> Any:
'''Given a dict and a model class, update or create a sqlalchemy object.
This will use an existing object if "id" is supplied OR if any unique
constraints are met. e.g supplying just a tag name will get out that tag obj.
'''
session = context["session"]
table = class_mapper(ModelClass).persist_selectable
obj = None
id = table_dict.get("id")
if id:
obj = session.query(ModelClass).get(id)
if not obj:
unique_constraints = get_unique_constraints(table, context)
for constraint in unique_constraints:
params = dict((key, table_dict.get(key)) for key in constraint)
obj = session.query(ModelClass).filter_by(**params).first()
if obj:
if 'name' in params and getattr(
obj, 'state', None) == State.DELETED:
obj.name = obj.id
obj = None
else:
break
if not obj:
obj = ModelClass()
obj.from_dict(table_dict)
for a in extra_attrs:
if a in table_dict:
setattr(obj, a, table_dict[a])
session.add(obj)
return obj |
6,596 | is hip clang | #!/usr/bin/env python3
import argparse
import os
import sys
sys.path.append(
os.path.realpath(
os.path.join(
__file__, os.path.pardir, os.path.pardir, os.path.pardir, "torch", "utils"
)
)
)
from hipify import hipify_python # type: ignore[import]
parser = argparse.ArgumentParser(
description="Top-level script for HIPifying, filling in most common parameters"
)
parser.add_argument(
"--out-of-place-only",
action="store_true",
help="Whether to only run hipify out-of-place on source files",
)
parser.add_argument(
"--project-directory",
type=str,
default="",
help="The root of the project.",
required=False,
)
parser.add_argument(
"--output-directory",
type=str,
default="",
help="The directory to store the hipified project",
required=False,
)
parser.add_argument(
"--extra-include-dir",
type=str,
default=[],
nargs="+",
help="The list of extra directories in caffe2 to hipify",
required=False,
)
args = parser.parse_args()
amd_build_dir = os.path.dirname(os.path.realpath(__file__))
proj_dir = os.path.join(os.path.dirname(os.path.dirname(amd_build_dir)))
if args.project_directory:
proj_dir = args.project_directory
out_dir = proj_dir
if args.output_directory:
out_dir = args.output_directory
includes = [
"caffe2/operators/*",
"caffe2/sgd/*",
"caffe2/image/*",
"caffe2/transforms/*",
"caffe2/video/*",
"caffe2/distributed/*",
"caffe2/queue/*",
"caffe2/contrib/aten/*",
"binaries/*",
"caffe2/**/*_test*",
"caffe2/core/*",
"caffe2/db/*",
"caffe2/utils/*",
"caffe2/contrib/gloo/*",
"caffe2/contrib/nccl/*",
"c10/cuda/*",
"c10/cuda/test/CMakeLists.txt",
"modules/*",
"third_party/nvfuser/*",
# PyTorch paths
# Keep this synchronized with is_pytorch_file in hipify_python.py
"aten/src/ATen/cuda/*",
"aten/src/ATen/native/cuda/*",
"aten/src/ATen/native/cudnn/*",
"aten/src/ATen/native/quantized/cudnn/*",
"aten/src/ATen/native/nested/cuda/*",
"aten/src/ATen/native/sparse/cuda/*",
"aten/src/ATen/native/quantized/cuda/*",
"aten/src/ATen/native/transformers/cuda/*",
"aten/src/THC/*",
"aten/src/ATen/test/*",
# CMakeLists.txt isn't processed by default, but there are a few
# we do want to handle, so explicitly specify them
"aten/src/THC/CMakeLists.txt",
"torch/*",
"tools/autograd/templates/python_variable_methods.cpp",
]
includes = [os.path.join(proj_dir, include) for include in includes]
for new_dir in args.extra_include_dir:
abs_new_dir = os.path.join(proj_dir, new_dir)
if os.path.exists(abs_new_dir):
abs_new_dir = os.path.join(abs_new_dir, "**/*")
includes.append(abs_new_dir)
ignores = [
"caffe2/operators/depthwise_3x3_conv_op_cudnn.cu",
"caffe2/operators/pool_op_cudnn.cu",
"*/hip/*",
# These files are compatible with both cuda and hip
"aten/src/ATen/core/*",
# Correct path to generate HIPConfig.h:
# CUDAConfig.h.in -> (amd_build) HIPConfig.h.in -> (cmake) HIPConfig.h
"aten/src/ATen/cuda/CUDAConfig.h",
"third_party/nvfuser/csrc/codegen.cpp",
"third_party/nvfuser/runtime/block_reduction.cu",
"third_party/nvfuser/runtime/block_sync_atomic.cu",
"third_party/nvfuser/runtime/block_sync_default_rocm.cu",
"third_party/nvfuser/runtime/broadcast.cu",
"third_party/nvfuser/runtime/grid_reduction.cu",
"third_party/nvfuser/runtime/helpers.cu",
"torch/csrc/jit/codegen/fuser/cuda/resource_strings.h",
"torch/csrc/jit/tensorexpr/ir_printer.cpp",
# generated files we shouldn't frob
"torch/lib/tmp_install/*",
"torch/include/*",
]
ignores = [os.path.join(proj_dir, ignore) for ignore in ignores]
# Check if the compiler is hip-clang.
def METHOD_NAME() -> bool:
try:
hip_path = os.getenv("HIP_PATH", "/opt/rocm/hip")
with open(hip_path + "/lib/.hipInfo") as f:
return "HIP_COMPILER=clang" in f.read()
except OSError:
return False
# TODO Remove once gloo submodule is recent enough to contain upstream fix.
if METHOD_NAME():
gloo_cmake_file = "third_party/gloo/cmake/Hip.cmake"
do_write = False
if os.path.exists(gloo_cmake_file):
with open(gloo_cmake_file) as sources:
lines = sources.readlines()
newlines = [line.replace(" hip_hcc ", " amdhip64 ") for line in lines]
if lines == newlines:
print(f"{gloo_cmake_file} skipped")
else:
with open(gloo_cmake_file, "w") as sources:
for line in newlines:
sources.write(line)
print(f"{gloo_cmake_file} updated")
gloo_cmake_file = "third_party/gloo/cmake/Modules/Findrccl.cmake"
if os.path.exists(gloo_cmake_file):
do_write = False
with open(gloo_cmake_file) as sources:
lines = sources.readlines()
newlines = [line.replace("RCCL_LIBRARY", "RCCL_LIB_PATH") for line in lines]
if lines == newlines:
print(f"{gloo_cmake_file} skipped")
else:
with open(gloo_cmake_file, "w") as sources:
for line in newlines:
sources.write(line)
print(f"{gloo_cmake_file} updated")
# TODO Remove once gloo submodule is recent enough to contain upstream fix.
if METHOD_NAME():
gloo_cmake_file = "third_party/gloo/cmake/Dependencies.cmake"
do_write = False
if os.path.exists(gloo_cmake_file):
with open(gloo_cmake_file) as sources:
lines = sources.readlines()
newlines = [line.replace("HIP_HCC_FLAGS", "HIP_CLANG_FLAGS") for line in lines]
if lines == newlines:
print(f"{gloo_cmake_file} skipped")
else:
with open(gloo_cmake_file, "w") as sources:
for line in newlines:
sources.write(line)
print(f"{gloo_cmake_file} updated")
hipify_python.hipify(
project_directory=proj_dir,
output_directory=out_dir,
includes=includes,
ignores=ignores,
out_of_place_only=args.out_of_place_only,
hip_clang_launch=METHOD_NAME(),
) |
6,597 | setup method | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import string
import numpy as np
import pytest
from nemo.collections.common.tokenizers.column_coder import ColumnCodes
from nemo.collections.common.tokenizers.tabular_tokenizer import TabularTokenizer
class TestTabularTokenizer:
def METHOD_NAME(self, test_method):
column_configs = [
{
"name": "col_a",
"code_type": "float",
"args": {"code_len": 4, "base": 16, "fillall": False, "hasnan": True, "transform": 'yeo-johnson'},
},
{
"name": "col_b",
"code_type": "float",
"args": {"code_len": 4, "base": 177, "fillall": True, "hasnan": True, "transform": 'quantile'},
},
{
"name": "col_c",
"code_type": "int",
"args": {"code_len": 3, "base": 12, "fillall": True, "hasnan": True},
},
{"name": "col_d", "code_type": "category",},
]
example_arrays = {}
np.random.seed(1234)
array = np.random.random(100)
example_arrays['col_a'] = array
array = np.random.random(100)
example_arrays['col_b'] = array
array = np.random.randint(3, 1000, 100)
example_arrays['col_c'] = array
ALPHABET = np.array(list(string.ascii_lowercase + ' '))
array = np.char.add(np.random.choice(ALPHABET, 1000), np.random.choice(ALPHABET, 1000))
example_arrays['col_d'] = array
self.cc = ColumnCodes.get_column_codes(column_configs, example_arrays)
@pytest.mark.unit
def test_tabular_tokenizer(self):
tab = TabularTokenizer(self.cc, delimiter=',')
text = "0.323, 0.1, 232, xy\n0.323, 0.1, 232, xy<|endoftext|>"
r = tab.text_to_tokens(text)
assert len(r) == 10
assert tab.eod == 1351
assert tab.eor == 1352
assert tab.num_columns == 4
assert self.cc.vocab_size == 1351
assert tab.vocab_size == 1353
r = tab.text_to_ids(text)
assert (sum(self.cc.sizes) + 1) * 2 == len(r)
assert np.array_equal(
np.array(r[0:13]), np.array([49, 32, 29, 15, 584, 417, 305, 76, 787, 780, 773, 1313, 1352])
)
assert np.array_equal(
np.array(r[13:]), np.array([49, 32, 29, 15, 584, 417, 305, 76, 787, 780, 773, 1313, 1351])
)
reversed_text = tab.ids_to_text(r)
assert reversed_text == '0.3230,0.0999998,232,xy\n0.3230,0.0999998,232,xy<|endoftext|>'
text = "xy\n0.323, 0.1, 232, xy<|endoftext|>"
r = tab.text_to_tokens(text)
assert len(r) == 7
r = tab.text_to_ids(text)
assert sum(self.cc.sizes) + 1 + 2 == len(r)
assert np.array_equal(np.array(r[0:2]), np.array([1313, 1352]))
assert np.array_equal(
np.array(r[2:15]), np.array([49, 32, 29, 15, 584, 417, 305, 76, 787, 780, 773, 1313, 1351])
)
reversed_text = tab.ids_to_text(r)
assert reversed_text == 'xy\n0.3230,0.0999998,232,xy<|endoftext|>'
text = "\n0.323, 0.1, 232, xy<|endoftext|>"
r = tab.text_to_tokens(text)
assert len(r) == 5
r = tab.text_to_ids(text)
assert sum(self.cc.sizes) + 1 == len(r)
assert np.array_equal(
np.array(r[0:13]), np.array([49, 32, 29, 15, 584, 417, 305, 76, 787, 780, 773, 1313, 1351])
)
reversed_text = tab.ids_to_text(r)
assert reversed_text == '0.3230,0.0999998,232,xy<|endoftext|>'
text = "232, xy\n0.323, 0.1, 232, xy<|endoftext|>"
r = tab.text_to_tokens(text)
assert len(r) == 8
r = tab.text_to_ids(text)
assert sum(self.cc.sizes) + 1 + 5 == len(r)
assert np.array_equal(np.array(r[0:5]), np.array([787, 780, 773, 1313, 1352]))
assert np.array_equal(
np.array(r[5:18]), np.array([49, 32, 29, 15, 584, 417, 305, 76, 787, 780, 773, 1313, 1351])
)
reversed_text = tab.ids_to_text(r)
assert reversed_text == '232,xy\n0.3230,0.0999998,232,xy<|endoftext|>'
text = "0.1, 232, xy\n0.323, 0.1, 232, xy<|endoftext|>"
r = tab.text_to_tokens(text)
assert len(r) == 9
r = tab.text_to_ids(text)
assert sum(self.cc.sizes) + 1 + 9 == len(r)
assert np.array_equal(np.array(r[0:9]), np.array([584, 417, 305, 76, 787, 780, 773, 1313, 1352]))
assert np.array_equal(
np.array(r[9:22]), np.array([49, 32, 29, 15, 584, 417, 305, 76, 787, 780, 773, 1313, 1351])
)
reversed_text = tab.ids_to_text(r)
assert reversed_text == '0.0999998,232,xy\n0.3230,0.0999998,232,xy<|endoftext|>' |
6,598 | unify configs | import json
import os
import logging
from omegaconf import OmegaConf
from buildpack import util
from buildpack.databroker.config_generator.scripts.utils import (
convert_dot_field_to_dict,
get_value_for_constant,
)
from buildpack.databroker.config_generator.scripts.constants import (
ENV_VAR_RUNTIME_PREFIX,
ENV_VAR_BROKER_PREFIX,
CONSTANTS_ENV_VAR_PREFIX,
BOOTSTRAP_SERVERS_KEY,
SUPPORTED_DBS,
POSTGRESQL_MAX_TABLE_LENGTH,
NODE_COUNT_KEY,
)
from buildpack.databroker.config_generator.scripts.config_env_whitelist import (
whitelist,
)
# variables generated with this method are going to have a single key of type "a.b.c"
# they are not nested
# this will be possible in a future version of OmegaConf
def __curate_key(key, prefix, replace_underscores=True):
new_key = key.replace(prefix, "", 1)
if replace_underscores:
new_key = new_key.replace("_", ".")
return new_key
def __generate_source_topic_names(config):
for service in config.DataBrokerConfiguration.publishedServices:
for entity in service.entities:
entity.rawTopic = "{}.{}.{}.{}".format(
config.DatabaseName,
"public",
entity.originalEntityName.replace(".", "_").lower(),
"private",
)
def validate_config(complete_conf):
# check supported dbs
if complete_conf.DatabaseType.lower() not in [db.lower() for db in SUPPORTED_DBS]:
raise Exception(
f"{complete_conf.DatabaseType} is not supported."
f"Supported dbs: {SUPPORTED_DBS}"
)
# validate objectname length & constants
for published_service in complete_conf.DataBrokerConfiguration.publishedServices:
if not get_value_for_constant(complete_conf, published_service.brokerUrl):
raise Exception(f"No Constants found for {published_service.brokerUrl}")
for entity in published_service.entities:
if len(entity.publicEntityName) > POSTGRESQL_MAX_TABLE_LENGTH:
raise Exception(
f"Entity {entity.publicEntityName}'s name is too long. "
f"Max length of {POSTGRESQL_MAX_TABLE_LENGTH} supported"
)
# check if bootstrap server is empty
if not complete_conf.bootstrap_servers:
raise Exception("Broker URL not specified")
def METHOD_NAME(configs, database_config, parameters_replacement=None):
if parameters_replacement is None:
parameters_replacement = {}
complete_conf = load_config(configs, database_config, parameters_replacement)
validate_config(complete_conf)
return complete_conf
def load_config(configs, database_config, parameters_replacement):
loaded_json = []
for config in configs:
try:
tmp_json = json.loads(config.read())
except Exception as exception:
raise (
f"Error loading input file called {config.name}."
f"Reason: '{exception}'"
)
# Special check for metadata files, if they exist the idea is to replace the
# non existent constants with their default values
if (
config.name.endswith("metadata.json")
and tmp_json["Constants"]
and isinstance(tmp_json["Constants"], list)
):
tmp_json["Constants"] = dict(
map(
lambda constant: (
constant["Name"],
constant["DefaultValue"],
),
tmp_json["Constants"],
)
)
loaded_json.append(convert_dot_field_to_dict(tmp_json))
modified_env_vars = OmegaConf.create()
if database_config:
modified_env_vars.update(database_config)
for prefix in [ENV_VAR_RUNTIME_PREFIX, ENV_VAR_BROKER_PREFIX]:
env_vars = dict(
filter(
lambda key: key[0].startswith(prefix) and key[0] in whitelist,
dict(os.environ).items(),
)
)
for key, value in env_vars.items():
new_key = __curate_key(key, prefix)
OmegaConf.update(modified_env_vars, new_key, value)
# Fetch and update any constants passed as env var
const_env_vars = dict(
filter(
lambda key: key[0].startswith(CONSTANTS_ENV_VAR_PREFIX),
dict(os.environ).items(),
)
)
modified_constants = OmegaConf.create({"Constants": {}})
for key, value in const_env_vars.items():
new_key = key.replace(CONSTANTS_ENV_VAR_PREFIX, "", 1)
new_key = new_key.replace("_", ".", 1)
OmegaConf.update(modified_constants.Constants, new_key, value)
parameters_replacement_dict = OmegaConf.create()
for key, value in parameters_replacement:
OmegaConf.update(parameters_replacement_dict, key, value)
try:
complete_conf = OmegaConf.merge(
*loaded_json,
modified_env_vars,
modified_constants,
parameters_replacement_dict,
)
bootstrap_servers = get_value_for_constant(
complete_conf,
complete_conf.DataBrokerConfiguration.publishedServices[0].brokerUrl,
)
OmegaConf.update(complete_conf, BOOTSTRAP_SERVERS_KEY, bootstrap_servers)
if not OmegaConf.select(complete_conf, NODE_COUNT_KEY):
complete_conf[NODE_COUNT_KEY] = 1
__generate_source_topic_names(complete_conf)
OmegaConf.update(
complete_conf,
"log_level",
"DEBUG" if util.get_buildpack_loglevel() == logging.DEBUG else "INFO",
)
return complete_conf
except Exception as exception:
raise Exception(
"Error while reading input config files. " f"Reason: '{exception}'"
) from exception |
6,599 | to device | import copy
from typing import Any, cast
import srsly
from ..compat import mxnet as mx
from ..optimizers import Optimizer
from ..types import ArgsKwargs, FloatsXd
from ..util import (
convert_recursive,
get_array_module,
make_tempfile,
mxnet2xp,
xp2mxnet,
)
from .shim import Shim
class MXNetShim(Shim):
"""Interface between a MXNet model and a Thinc Model. This container is
*not* a Thinc Model subclass itself.
"""
def __call__(self, inputs, is_train):
if is_train:
return self.begin_update(inputs)
else:
return self.predict(inputs), lambda a: ...
def predict(self, inputs: ArgsKwargs) -> Any:
"""Pass inputs through to the underlying MXNet model, and return the
output. No conversions are performed. The MXNet model is set into
evaluation mode.
"""
mx.autograd.set_training(train_mode=False)
with mx.autograd.pause():
outputs = self._model(*inputs.args, **inputs.kwargs)
mx.autograd.set_training(train_mode=True)
return outputs
def begin_update(self, inputs: ArgsKwargs):
"""Pass the inputs through to the underlying MXNet model, keeping
track of which items in the input are tensors requiring gradients.
If the model returns a single value, it is converted into a one-element
tuple. Return the outputs and a callback to backpropagate.
"""
mx.autograd.set_training(train_mode=True)
mx.autograd.set_recording(True)
output = self._model(*inputs.args, **inputs.kwargs)
def backprop(grads):
mx.autograd.set_recording(False)
mx.autograd.backward(*grads.args, **grads.kwargs)
return convert_recursive(
lambda x: hasattr(x, "grad"), lambda x: x.grad, inputs
)
return output, backprop
def finish_update(self, optimizer: Optimizer):
params = []
grads = []
shapes = []
ctx = mx.current_context()
for key, value in self._model.collect_params().items():
grad = cast(FloatsXd, mxnet2xp(value.grad(ctx)))
param = cast(FloatsXd, mxnet2xp(value.data(ctx)))
params.append(param.ravel())
grads.append(grad.ravel())
shapes.append((param.size, param.shape))
if not params:
return
xp = get_array_module(params[0])
flat_params, flat_grads = optimizer(
(self.id, "mxnet-shim"), xp.concatenate(params), xp.concatenate(grads)
)
start = 0
for key, value in self._model.collect_params().items():
size, shape = shapes.pop(0)
param = flat_params[start : start + size].reshape(shape)
value.set_data(xp2mxnet(param))
value.zero_grad()
start += size
def copy(self, ctx: "mx.context.Context" = None):
if ctx is None:
ctx = mx.current_context()
model_bytes = self.to_bytes()
copied = copy.deepcopy(self)
copied._model.initialize(ctx=ctx)
copied.from_bytes(model_bytes)
return copied
def METHOD_NAME(self, device_type: str, device_id: int):
if device_type == "cpu":
self._model = self.copy(mx.cpu())
elif device_type == "gpu":
self._model = self.copy(mx.gpu())
else:
msg = f"Unexpected device_type: {device_type}. Try 'cpu' or 'gpu'."
raise ValueError(msg)
def to_bytes(self):
# MXNet doesn't implement save/load without a filename
with make_tempfile("w+b") as temp:
self._model.save_parameters(temp.name)
temp.seek(0)
weights_bytes = temp.read()
msg = {"config": self.cfg, "state": weights_bytes}
return srsly.msgpack_dumps(msg)
def from_bytes(self, bytes_data):
msg = srsly.msgpack_loads(bytes_data)
self.cfg = msg["config"]
self._load_params(msg["state"])
return self
def _load_params(self, params):
# MXNet doesn't implement save/load without a filename :(
with make_tempfile("w+b") as temp:
temp.write(params)
self._model.load_parameters(temp.name, ctx=mx.current_context()) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.