id int64 0 300k | label stringlengths 1 74 ⌀ | text stringlengths 4k 8k |
|---|---|---|
4,000 | draw buttons | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
from bpy.props import EnumProperty, IntProperty, BoolProperty
from mathutils import noise
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode
from sverchok.utils.sv_noise_utils import noise_options, PERLIN_ORIGINAL, noise_numpy_types
import numpy as np
def numpy_noise(vecs, out, out_mode, seed, noise_function, smooth, output_numpy):
if out_mode == 'VECTOR':
obj = np.array(vecs)
r_noise = np.stack((
noise_function(obj, seed, smooth),
noise_function(obj, seed + 1, smooth),
noise_function(obj, seed + 2, smooth)
)).T
out.append((2 * r_noise - 1) if output_numpy else (2 * r_noise - 1).tolist())
else:
if output_numpy:
out.append(noise_function(np.array(vecs), seed, smooth))
else:
out.append(noise_function(np.array(vecs), seed, smooth).tolist())
def mathulis_noise(vecs, out, out_mode, noise_type, noise_function, output_numpy):
if out_mode == 'VECTOR':
if output_numpy:
out.append(np.array([noise_function(v, noise_basis=noise_type)[:] for v in vecs]))
else:
out.append([noise_function(v, noise_basis=noise_type)[:] for v in vecs])
else:
vecs = np.array([noise_function(v, noise_basis=noise_type)[:] for v in vecs])
vecs -= [0, 0, 1]
noise_output = np.linalg.norm(vecs, axis=1)*0.5
out.append(noise_output if output_numpy else noise_output.tolist())
avail_noise = [(t[0], t[0].title().replace('_', ' '), t[0].title(), '', t[1]) for t in noise_options]
for idx, new_type in enumerate(noise_numpy_types.keys()):
avail_noise.append((new_type, new_type.title().replace('_', ' '), new_type.title(), '', 100 + idx))
class SvNoiseNodeMK2(SverchCustomTreeNode, bpy.types.Node):
"""
Triggers: Vector Noise
Tooltip: Affect input verts with a noise function.
A short description for reader of node code
"""
bl_idname = 'SvNoiseNodeMK2'
bl_label = 'Vector Noise'
bl_icon = 'FORCE_TURBULENCE'
sv_icon = 'SV_VECTOR_NOISE'
replacement_nodes = [('SvNoiseNodeMK3', None, None)]
def changeMode(self, context):
outputs = self.outputs
if self.out_mode == 'SCALAR':
if 'Noise S' not in outputs:
outputs[0].replace_socket('SvStringsSocket', 'Noise S')
return
if self.out_mode == 'VECTOR':
if 'Noise V' not in outputs:
outputs[0].replace_socket('SvVerticesSocket', 'Noise V')
return
updateNode(self, context)
out_modes = [
('SCALAR', 'Scalar', 'Scalar output', '', 1),
('VECTOR', 'Vector', 'Vector output', '', 2)]
out_mode: EnumProperty(
items=out_modes,
default='VECTOR',
description='Output type',
update=changeMode)
noise_type: EnumProperty(
items=avail_noise,
default=PERLIN_ORIGINAL,
description="Noise type",
update=updateNode)
seed: IntProperty(default=0, name='Seed', update=updateNode)
smooth: BoolProperty(
name='Smooth',
description='Smooth noise',
default=True, update=updateNode)
interpolate: BoolProperty(
name='Interpolate',
description='Interpolate gradients',
default=True, update=updateNode)
output_numpy: BoolProperty(
name='Output NumPy',
description='Output NumPy arrays',
default=False, update=updateNode)
def sv_init(self, context):
self.inputs.new('SvVerticesSocket', 'Vertices')
self.inputs.new('SvStringsSocket', 'Seed').prop_name = 'seed'
self.outputs.new('SvVerticesSocket', 'Noise V')
def METHOD_NAME(self, context, layout):
layout.prop(self, 'out_mode', expand=True)
layout.prop(self, 'noise_type', text="Type")
if self.noise_type in noise_numpy_types.keys():
row = layout.row(align=True)
row.prop(self, 'smooth', toggle=True)
row.prop(self, 'interpolate', toggle=True)
def draw_buttons_ext(self, ctx, layout):
self.METHOD_NAME(ctx, layout)
layout.prop(self, "output_numpy", toggle=False)
def rclick_menu(self, context, layout):
layout.prop_menu_enum(self, "out_mode")
layout.prop_menu_enum(self, "noise_type")
if self.noise_type in noise_numpy_types.keys():
layout.prop(self, 'smooth', toggle=True)
layout.prop(self, 'interpolate', toggle=True)
layout.prop(self, "output_numpy", toggle=True)
def process(self):
inputs, outputs = self.inputs, self.outputs
if not (outputs[0].is_linked and inputs[0].is_linked):
return
out = []
verts = inputs['Vertices'].sv_get(deepcopy=False)
seeds = inputs['Seed'].sv_get()[0]
max_len = max(map(len, (seeds, verts)))
noise_type = self.noise_type
out_mode = self.out_mode
output_numpy = self.output_numpy
if noise_type in noise_numpy_types.keys():
noise_function = noise_numpy_types[noise_type][self.interpolate]
smooth = self.smooth
for i in range(max_len):
seed = seeds[min(i, len(seeds)-1)]
obj_id = min(i, len(verts)-1)
numpy_noise(verts[obj_id], out, out_mode, seed, noise_function, smooth, output_numpy)
else:
noise_function = noise.noise_vector
for i in range(max_len):
seed = seeds[min(i, len(seeds)-1)]
obj_id = min(i, len(verts)-1)
# 0 unsets the seed and generates unreproducable output based on system time
seed_val = int(round(seed)) or 140230
noise.seed_set(seed_val)
mathulis_noise(verts[obj_id], out, out_mode, noise_type, noise_function, output_numpy)
outputs[0].sv_set(out)
def draw_label(self):
if self.hide:
if not self.inputs['Seed'].is_linked:
seed = ' + ({0})'.format(str(int(self.seed)))
else:
seed = ' + seed(s)'
return self.noise_type.title() + seed
else:
return self.label or self.name
classes = [SvNoiseNodeMK2]
register, unregister = bpy.utils.register_classes_factory(classes) |
4,001 | async swap | from typing import TYPE_CHECKING, Any, Awaitable, Callable, TypeVar
from returns.io import IO, IOResult
from returns.primitives.hkt import Kind2, dekind
from returns.result import Failure, Result, Success
if TYPE_CHECKING:
from returns.future import Future, FutureResult # noqa: F401
_ValueType = TypeVar('_ValueType', covariant=True)
_NewValueType = TypeVar('_NewValueType')
_ErrorType = TypeVar('_ErrorType', covariant=True)
_NewErrorType = TypeVar('_NewErrorType')
async def METHOD_NAME(
inner_value: Awaitable[Result[_ValueType, _ErrorType]],
) -> Result[_ErrorType, _ValueType]:
"""Swaps value and error types in ``Result``."""
return (await inner_value).swap()
async def async_map(
function: Callable[[_ValueType], _NewValueType],
inner_value: Awaitable[Result[_ValueType, _ErrorType]],
) -> Result[_NewValueType, _ErrorType]:
"""Async maps a function over a value."""
return (await inner_value).map(function)
async def async_apply(
container:
'FutureResult[Callable[[_ValueType], _NewValueType], _ErrorType]',
inner_value: Awaitable[Result[_ValueType, _ErrorType]],
) -> Result[_NewValueType, _ErrorType]:
"""Async maps a function over a value."""
return (await inner_value).apply((await container)._inner_value)
async def async_bind(
function: Callable[
[_ValueType],
Kind2['FutureResult', _NewValueType, _ErrorType],
],
inner_value: Awaitable[Result[_ValueType, _ErrorType]],
) -> Result[_NewValueType, _ErrorType]:
"""Async binds a container over a value."""
container = await inner_value
if isinstance(container, Success):
return (await dekind(function(container.unwrap())))._inner_value
return container # type: ignore[return-value]
async def async_bind_awaitable(
function: Callable[[_ValueType], Awaitable[_NewValueType]],
inner_value: Awaitable[Result[_ValueType, _ErrorType]],
) -> Result[_NewValueType, _ErrorType]:
"""Async binds a coroutine over a value."""
container = await inner_value
if isinstance(container, Success):
return Result.from_value(await function(container.unwrap()))
return container # type: ignore[return-value]
async def async_bind_async(
function: Callable[
[_ValueType],
Awaitable[Kind2['FutureResult', _NewValueType, _ErrorType]],
],
inner_value: Awaitable[Result[_ValueType, _ErrorType]],
) -> Result[_NewValueType, _ErrorType]:
"""Async binds a coroutine with container over a value."""
container = await inner_value
if isinstance(container, Success):
return await dekind(await function(container.unwrap()))._inner_value
return container # type: ignore[return-value]
async def async_bind_result(
function: Callable[[_ValueType], Result[_NewValueType, _ErrorType]],
inner_value: Awaitable[Result[_ValueType, _ErrorType]],
) -> Result[_NewValueType, _ErrorType]:
"""Async binds a container returning ``Result`` over a value."""
return (await inner_value).bind(function)
async def async_bind_ioresult(
function: Callable[[_ValueType], IOResult[_NewValueType, _ErrorType]],
inner_value: Awaitable[Result[_ValueType, _ErrorType]],
) -> Result[_NewValueType, _ErrorType]:
"""Async binds a container returning ``IOResult`` over a value."""
container = await inner_value
if isinstance(container, Success):
return function(container.unwrap())._inner_value
return container # type: ignore[return-value]
async def async_bind_io(
function: Callable[[_ValueType], IO[_NewValueType]],
inner_value: Awaitable[Result[_ValueType, _ErrorType]],
) -> Result[_NewValueType, _ErrorType]:
"""Async binds a container returning ``IO`` over a value."""
container = await inner_value
if isinstance(container, Success):
return Success(function(container.unwrap())._inner_value)
return container # type: ignore[return-value]
async def async_bind_future(
function: Callable[[_ValueType], 'Future[_NewValueType]'],
inner_value: Awaitable[Result[_ValueType, _ErrorType]],
) -> Result[_NewValueType, _ErrorType]:
"""Async binds a container returning ``IO`` over a value."""
container = await inner_value
if isinstance(container, Success):
return await async_from_success(function(container.unwrap()))
return container # type: ignore[return-value]
async def async_bind_async_future(
function: Callable[[_ValueType], Awaitable['Future[_NewValueType]']],
inner_value: Awaitable[Result[_ValueType, _ErrorType]],
) -> Result[_NewValueType, _ErrorType]:
"""Async binds a container returning ``IO`` over a value."""
container = await inner_value
if isinstance(container, Success):
return await async_from_success(await function(container.unwrap()))
return container # type: ignore[return-value]
async def async_alt(
function: Callable[[_ErrorType], _NewErrorType],
inner_value: Awaitable[Result[_ValueType, _ErrorType]],
) -> Result[_ValueType, _NewErrorType]:
"""Async alts a function over a value."""
container = await inner_value
if isinstance(container, Success):
return container
return Failure(function(container.failure()))
async def async_lash(
function: Callable[
[_ErrorType],
Kind2['FutureResult', _ValueType, _NewErrorType],
],
inner_value: Awaitable[Result[_ValueType, _ErrorType]],
) -> Result[_ValueType, _NewErrorType]:
"""Async lashes a function returning a container over a value."""
container = await inner_value
if isinstance(container, Success):
return container
return (await dekind(function(container.failure())))._inner_value
async def async_from_success(
container: 'Future[_NewValueType]',
) -> Result[_NewValueType, Any]:
"""Async success unit factory."""
return Success((await container)._inner_value)
async def async_from_failure(
container: 'Future[_NewErrorType]',
) -> Result[Any, _NewErrorType]:
"""Async failure unit factory."""
return Failure((await container)._inner_value)
async def async_compose_result(
function: Callable[
[Result[_ValueType, _ErrorType]],
Kind2['FutureResult', _NewValueType, _ErrorType],
],
inner_value: Awaitable[Result[_ValueType, _ErrorType]],
) -> Result[_NewValueType, _ErrorType]:
"""Async composes ``Result`` based function."""
return (await dekind(function(await inner_value)))._inner_value |
4,002 | test ungzip | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import pytest
from parlai.core import build_data
import unittest
import unittest.mock
import requests
import parlai.utils.testing as testing_utils
import multiprocessing
from parlai.utils.io import PathManager
from parlai.core.params import ParlaiParser
@pytest.mark.nofbcode
@testing_utils.skipUnlessGPU
class TestBuildData(unittest.TestCase):
"""
Basic tests on the build_data.py download_multiprocess.
"""
dest_filenames = ('mnist0.tar.gz', 'mnist1.tar.gz', 'mnist2.tar.gz')
def setUp(self):
self.datapath = ParlaiParser().parse_args([])['datapath']
self.datapath = os.path.join(self.datapath, 'build_data_pyt_data')
PathManager.mkdirs(self.datapath)
for d in self.dest_filenames:
# Removing files if they are already there b/c otherwise it won't try to download them again
try:
PathManager.rm(os.path.join(self.datapath, d))
except OSError:
pass
def test_download_multiprocess(self):
urls = [
'https://parl.ai/downloads/mnist/mnist.tar.gz',
'https://parl.ai/downloads/mnist/mnist.tar.gz.BAD',
'https://parl.ai/downloads/mnist/mnist.tar.gz.BAD',
]
download_results = build_data.download_multiprocess(
urls, self.datapath, dest_filenames=self.dest_filenames
)
output_filenames, output_statuses, output_errors = zip(*download_results)
self.assertEqual(
output_filenames, self.dest_filenames, 'output filenames not correct'
)
self.assertEqual(
output_statuses, (200, 403, 403), 'output http statuses not correct'
)
def test_download_multiprocess_chunks(self):
# Tests that the three finish downloading but may finish in any order
urls = [
'https://parl.ai/downloads/mnist/mnist.tar.gz',
'https://parl.ai/downloads/mnist/mnist.tar.gz.BAD',
'https://parl.ai/downloads/mnist/mnist.tar.gz.BAD',
]
download_results = build_data.download_multiprocess(
urls, self.datapath, dest_filenames=self.dest_filenames, chunk_size=1
)
output_filenames, output_statuses, output_errors = zip(*download_results)
self.assertIn('mnist0.tar.gz', output_filenames)
self.assertIn('mnist1.tar.gz', output_filenames)
self.assertIn('mnist2.tar.gz', output_filenames)
self.assertIn(200, output_statuses, 'unexpected error code')
self.assertIn(403, output_statuses, 'unexpected error code')
def test_connectionerror_download(self):
with unittest.mock.patch('requests.Session.get') as Session:
Session.side_effect = requests.exceptions.ConnectTimeout
with testing_utils.tempdir() as tmpdir:
with self.assertRaises(RuntimeError):
build_data.download(
'http://test.com/bad', tmpdir, 'foo', num_retries=3
)
assert Session.call_count == 3
class TestUnzip(unittest.TestCase):
def METHOD_NAME(self):
with testing_utils.tempdir() as tmpdir:
import gzip
fname = os.path.join(tmpdir, "test.txt.gz")
with gzip.GzipFile(fname, mode="w") as f:
f.write("This is a test\n".encode("utf-8"))
build_data.ungzip(tmpdir, "test.txt.gz")
out_fn = os.path.join(tmpdir, "test.txt")
assert os.path.exists(out_fn)
assert not os.path.exists(fname)
with open(out_fn) as f:
assert f.read() == "This is a test\n"
def test_unzip(self):
with testing_utils.tempdir() as tmpdir:
import zipfile
zname = os.path.join(tmpdir, "test.zip")
with zipfile.ZipFile(zname, "w") as zf:
with zf.open("test1.txt", "w") as f:
f.write(b"Test1\n")
with zf.open("test2.txt", "w") as f:
f.write(b"Test2\n")
build_data._unzip(tmpdir, "test.zip")
assert os.path.exists(os.path.join(tmpdir, "test1.txt"))
assert os.path.exists(os.path.join(tmpdir, "test2.txt"))
with open(os.path.join(tmpdir, "test1.txt")) as f:
assert f.read() == "Test1\n"
with open(os.path.join(tmpdir, "test2.txt")) as f:
assert f.read() == "Test2\n"
assert not os.path.exists(zname)
def test_untar(self):
with testing_utils.tempdir() as tmpdir:
import io
import tarfile
zname = os.path.join(tmpdir, "test.tar.gz")
with tarfile.open(zname, "w") as zf:
with io.BytesIO(b"Test1\n") as f:
tarinfo = tarfile.TarInfo("test1.txt")
tarinfo.size = 6
zf.addfile(tarinfo, fileobj=f)
with io.BytesIO(b"Test2\n") as f:
tarinfo = tarfile.TarInfo("test2.txt")
tarinfo.size = 6
zf.addfile(tarinfo, fileobj=f)
build_data._untar(tmpdir, "test.tar.gz")
assert os.path.exists(os.path.join(tmpdir, "test1.txt"))
assert os.path.exists(os.path.join(tmpdir, "test2.txt"))
with open(os.path.join(tmpdir, "test1.txt")) as f:
assert f.read() == "Test1\n"
with open(os.path.join(tmpdir, "test2.txt")) as f:
assert f.read() == "Test2\n"
assert not os.path.exists(zname)
if __name__ == '__main__':
multiprocessing.set_start_method('spawn')
unittest.main() |
4,003 | test covariancenormsmetric compute metric | """Uncertainty metric tests."""
import datetime
import numpy as np
import pytest
from ..manager import MultiManager
from ..uncertaintymetric import SumofCovarianceNormsMetric, MeanofCovarianceNormsMetric
from ...types.detection import Detection
from ...types.groundtruth import GroundTruthPath, GroundTruthState
from ...types.state import State, GaussianState
from ...types.track import Track
@pytest.fixture(params=[SumofCovarianceNormsMetric, MeanofCovarianceNormsMetric])
def generator(request):
return request.param()
def test_covariancenormsmetric_extractstates(generator):
"""Test Covariance Norms Metric extract states."""
# Test state extraction
time_start = datetime.datetime.now()
detections = [Detection(state_vector=np.array([[i]]), timestamp=time_start)
for i in range(5)]
tracks = {Track(states=[State(state_vector=[[i]],
timestamp=time_start)]) for i in range(5)}
truths = {GroundTruthPath(states=[GroundTruthState(state_vector=[[i]],
timestamp=time_start)])
for i in range(5)}
det_states = generator.extract_states(detections)
assert det_states.states == detections
track_states = generator.extract_states(tracks)
assert set(track_states) == set(t.states[0] for t in tracks)
truth_states = generator.extract_states(truths)
assert set(truth_states) == set(t.states[0] for t in truths)
with pytest.raises(ValueError):
generator.extract_states([1, 2, 3])
def test_covariancenormsmetric_compute_covariancenorms(generator):
"""Test Covariance Norms Metric compute uncertainty."""
time = datetime.datetime.now()
track = Track(states=[GaussianState(state_vector=[[1], [2], [1], [2]],
timestamp=time,
covar=np.diag([i, i, i, i]))
for i in range(5)])
metric = generator.compute_covariancenorms(track.states)
divide = len(track) if isinstance(generator, MeanofCovarianceNormsMetric) else 1
assert metric.title == f"Covariance Matrix Norm {generator._type}"
assert metric.value == 20 / divide
assert metric.timestamp == time
assert metric.generator == generator
with pytest.raises(ValueError,
match="All states must be from the same time to compute total uncertainty"):
generator.compute_covariancenorms([
GaussianState(state_vector=[[1], [2], [1], [2]],
timestamp=time,
covar=np.diag([0, 0, 0, 0])),
GaussianState(state_vector=[[1], [2], [1], [2]],
timestamp=time+datetime.timedelta(seconds=1),
covar=np.diag([0, 0, 0, 0]))])
def METHOD_NAME(generator):
"""Test Covariance Norms compute metric."""
time = datetime.datetime.now()
# Multiple tracks present at two timesteps
tracks = {Track(states=[GaussianState(state_vector=[[1], [2], [1], [2]], timestamp=time,
covar=np.diag([i, i, i, i])),
GaussianState(state_vector=[[1.5], [2.5], [1.5], [2.5]],
timestamp=time + datetime.timedelta(seconds=1),
covar=np.diag([i+0.5, i+0.5, i+0.5, i+0.5]))])
for i in range(5)}
manager = MultiManager([generator])
manager.add_data({'tracks': tracks})
main_metric = generator.compute_metric(manager)
first_association, second_association = main_metric.value
assert main_metric.title == f"{generator._type} of Covariance Norms Metric"
assert main_metric.time_range.start_timestamp == time
assert main_metric.time_range.end_timestamp == time + datetime.timedelta(
seconds=1)
divide = len(tracks) if isinstance(generator, MeanofCovarianceNormsMetric) else 1
assert first_association.title == f"Covariance Matrix Norm {generator._type}"
assert first_association.value == 20 / divide
assert first_association.timestamp == time
assert first_association.generator == generator
assert second_association.title == f"Covariance Matrix Norm {generator._type}"
assert second_association.value == 25 / divide
assert second_association.timestamp == time + datetime.timedelta(seconds=1)
assert second_association.generator == generator |
4,004 | native query | from collections import OrderedDict
import psycopg
from psycopg.pq import ExecStatus
from pandas import DataFrame
from mindsdb_sql import parse_sql
from mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender
from mindsdb_sql.parser.ast.base import ASTNode
from mindsdb.integrations.libs.base import DatabaseHandler
from mindsdb.integrations.libs.const import HANDLER_CONNECTION_ARG_TYPE as ARG_TYPE
from mindsdb.utilities import log
from mindsdb.integrations.libs.response import (
HandlerStatusResponse as StatusResponse,
HandlerResponse as Response,
RESPONSE_TYPE
)
import mindsdb.utilities.profiler as profiler
class PostgresHandler(DatabaseHandler):
"""
This handler handles connection and execution of the PostgreSQL statements.
"""
name = 'postgres'
@profiler.profile('init_pg_handler')
def __init__(self, name=None, **kwargs):
super().__init__(name)
self.parser = parse_sql
self.connection_args = kwargs.get('connection_data')
self.dialect = 'postgresql'
self.database = self.connection_args.get('database')
self.renderer = SqlalchemyRender('postgres')
self.connection = None
self.is_connected = False
def __del__(self):
if self.is_connected is True:
self.disconnect()
@profiler.profile()
def connect(self):
"""
Handles the connection to a PostgreSQL database instance.
"""
if self.is_connected is True:
return self.connection
config = {
'host': self.connection_args.get('host'),
'port': self.connection_args.get('port'),
'user': self.connection_args.get('user'),
'password': self.connection_args.get('password'),
'dbname': self.connection_args.get('database')
}
if self.connection_args.get('sslmode'):
config['sslmode'] = self.connection_args.get('sslmode')
if self.connection_args.get('schema'):
config['options'] = f'-c search_path={self.connection_args.get("schema")},public'
connection = psycopg.connect(**config, connect_timeout=10)
self.is_connected = True
self.connection = connection
return self.connection
def disconnect(self):
if self.is_connected is False:
return
self.connection.close()
self.is_connected = False
def check_connection(self) -> StatusResponse:
"""
Check the connection of the PostgreSQL database
:return: success status and error message if error occurs
"""
response = StatusResponse(False)
need_to_close = self.is_connected is False
try:
connection = self.connect()
with connection.cursor() as cur:
cur.execute('select 1;')
response.success = True
except psycopg.Error as e:
log.logger.error(f'Error connecting to PostgreSQL {self.database}, {e}!')
response.error_message = e
if response.success is True and need_to_close:
self.disconnect()
if response.success is False and self.is_connected is True:
self.is_connected = False
return response
@profiler.profile()
def METHOD_NAME(self, query: str) -> Response:
"""
Receive SQL query and runs it
:param query: The SQL query to run in PostgreSQL
:return: returns the records from the current recordset
"""
need_to_close = self.is_connected is False
connection = self.connect()
with connection.cursor() as cur:
try:
cur.execute(query)
if ExecStatus(cur.pgresult.status) == ExecStatus.COMMAND_OK:
response = Response(RESPONSE_TYPE.OK)
else:
result = cur.fetchall()
response = Response(
RESPONSE_TYPE.TABLE,
DataFrame(
result,
columns=[x.name for x in cur.description]
)
)
connection.commit()
except Exception as e:
log.logger.error(f'Error running query: {query} on {self.database}!')
response = Response(
RESPONSE_TYPE.ERROR,
error_code=0,
error_message=str(e)
)
connection.rollback()
if need_to_close is True:
self.disconnect()
return response
@profiler.profile()
def query(self, query: ASTNode) -> Response:
"""
Retrieve the data from the SQL statement with eliminated rows that dont satisfy the WHERE condition
"""
query_str = self.renderer.get_string(query, with_failback=True)
return self.METHOD_NAME(query_str)
def get_tables(self) -> Response:
"""
List all tables in PostgreSQL without the system tables information_schema and pg_catalog
"""
query = """
SELECT
table_schema,
table_name,
table_type
FROM
information_schema.tables
WHERE
table_schema NOT IN ('information_schema', 'pg_catalog')
and table_type in ('BASE TABLE', 'VIEW')
"""
return self.METHOD_NAME(query)
def get_columns(self, table_name: str) -> Response:
query = f"""
SELECT
column_name as "Field",
data_type as "Type"
FROM
information_schema.columns
WHERE
table_name = '{table_name}'
"""
return self.METHOD_NAME(query)
connection_args = OrderedDict(
user={
'type': ARG_TYPE.STR,
'description': 'The user name used to authenticate with the PostgreSQL server.',
'required': True,
'label': 'User'
},
password={
'type': ARG_TYPE.PWD,
'description': 'The password to authenticate the user with the PostgreSQL server.',
'required': True,
'label': 'Password'
},
database={
'type': ARG_TYPE.STR,
'description': 'The database name to use when connecting with the PostgreSQL server.',
'required': True,
'label': 'Database'
},
host={
'type': ARG_TYPE.STR,
'description': 'The host name or IP address of the PostgreSQL server. NOTE: use \'127.0.0.1\' instead of \'localhost\' to connect to local server.',
'required': True,
'label': 'Host'
},
port={
'type': ARG_TYPE.INT,
'description': 'The TCP/IP port of the PostgreSQL server. Must be an integer.',
'required': True,
'label': 'Port'
},
schema={
'type': ARG_TYPE.STR,
'description': 'The schema in which objects are searched first.',
'required': False,
'label': 'Schema'
},
sslmode={
'type': ARG_TYPE.STR,
'description': 'sslmode that will be used for connection.',
'required': False,
'label': 'sslmode'
}
)
connection_args_example = OrderedDict(
host='127.0.0.1',
port=5432,
user='root',
password='password',
database='database'
) |
4,005 | tear down | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import uuid
import numpy as np
from fate_arch.session import computing_session as session
from federatedml.feature.feature_selection.filter_factory import get_filter
from federatedml.feature.feature_selection.model_adapter.adapter_factory import adapter_factory
from federatedml.feature.feature_selection.selection_properties import SelectionProperties
from federatedml.feature.hetero_feature_selection.base_feature_selection import BaseHeteroFeatureSelection
from federatedml.feature.instance import Instance
from federatedml.param.feature_selection_param import FeatureSelectionParam
from federatedml.param.statistics_param import StatisticsParam
from federatedml.statistic.data_statistics import DataStatistics
from federatedml.util import consts
class TestVarianceCoeFilter(unittest.TestCase):
def setUp(self):
self.job_id = str(uuid.uuid1())
session.init(self.job_id)
def gen_data(self, data_num, feature_num, partition):
data = []
header = [str(i) for i in range(feature_num)]
anonymous_header = ["guest_9999_x" + str(i) for i in range(feature_num)]
# col_2 = np.random.rand(data_num)
col_data = []
for _ in range(feature_num - 1):
col_1 = np.random.randn(data_num)
col_data.append(col_1)
outlier_data = list(np.random.randn(int(data_num * 0.8)))
outlier_data.extend(100 * np.ones(data_num - int(data_num * 0.8)))
col_data.append(outlier_data)
for key in range(data_num):
data.append((key, Instance(features=np.array([col[key] for col in col_data]))))
result = session.parallelize(data, include_key=True, partition=partition)
result.schema = {'header': header,
"anonymous_header": anonymous_header
}
self.header = header
return result
def test_filter_logic(self):
data_table = self.gen_data(1000, 10, 4)
select_param = FeatureSelectionParam()
select_param.outlier_param.percentile = 0.9
select_param.outlier_param.upper_threshold = 99
selection_obj = self._make_selection_obj(data_table)
filter_obj = get_filter(consts.OUTLIER_COLS, select_param, model=selection_obj)
select_properties = SelectionProperties()
select_properties.set_header(self.header)
select_properties.set_last_left_col_indexes([x for x in range(len(self.header))])
select_properties.set_select_all_cols()
filter_obj.set_selection_properties(select_properties)
res_select_properties = filter_obj.fit(data_table, suffix='').selection_properties
self.assertEqual(res_select_properties.all_left_col_names, [self.header[x] for x in range(9)])
self.assertEqual(len(res_select_properties.all_left_col_names), 9)
def _make_selection_obj(self, data_table):
statistics_param = StatisticsParam(statistics="90%")
statistics_param.check()
print(statistics_param.statistics)
test_obj = DataStatistics()
test_obj.model_param = statistics_param
test_obj._init_model(statistics_param)
test_obj.fit(data_table)
adapter = adapter_factory(consts.STATISTIC_MODEL)
meta_obj = test_obj.export_model()['StatisticMeta']
param_obj = test_obj.export_model()['StatisticParam']
iso_model = adapter.convert(meta_obj, param_obj)
selection_obj = BaseHeteroFeatureSelection()
selection_obj.isometric_models = {consts.STATISTIC_MODEL: iso_model}
return selection_obj
def METHOD_NAME(self):
session.stop()
if __name__ == '__main__':
unittest.main() |
4,006 | type | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetTableResult',
'AwaitableGetTableResult',
'get_table',
'get_table_output',
]
@pulumi.output_type
class GetTableResult:
"""
Properties of the table, including Id, resource name, resource type.
"""
def __init__(__self__, id=None, name=None, signed_identifiers=None, table_name=None, METHOD_NAME=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if signed_identifiers and not isinstance(signed_identifiers, list):
raise TypeError("Expected argument 'signed_identifiers' to be a list")
pulumi.set(__self__, "signed_identifiers", signed_identifiers)
if table_name and not isinstance(table_name, str):
raise TypeError("Expected argument 'table_name' to be a str")
pulumi.set(__self__, "table_name", table_name)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", METHOD_NAME)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="signedIdentifiers")
def signed_identifiers(self) -> Optional[Sequence['outputs.TableSignedIdentifierResponse']]:
"""
List of stored access policies specified on the table.
"""
return pulumi.get(self, "signed_identifiers")
@property
@pulumi.getter(name="tableName")
def table_name(self) -> str:
"""
Table name under the specified account
"""
return pulumi.get(self, "table_name")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetTableResult(GetTableResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetTableResult(
id=self.id,
name=self.name,
signed_identifiers=self.signed_identifiers,
table_name=self.table_name,
METHOD_NAME=self.METHOD_NAME)
def get_table(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
table_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTableResult:
"""
Gets the table with the specified table name, under the specified account if it exists.
:param str account_name: The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only.
:param str resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
:param str table_name: A table name must be unique within a storage account and must be between 3 and 63 characters.The name must comprise of only alphanumeric characters and it cannot begin with a numeric character.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
__args__['tableName'] = table_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:storage/v20230101:getTable', __args__, opts=opts, typ=GetTableResult).value
return AwaitableGetTableResult(
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
signed_identifiers=pulumi.get(__ret__, 'signed_identifiers'),
table_name=pulumi.get(__ret__, 'table_name'),
METHOD_NAME=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_table)
def get_table_output(account_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
table_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetTableResult]:
"""
Gets the table with the specified table name, under the specified account if it exists.
:param str account_name: The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only.
:param str resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
:param str table_name: A table name must be unique within a storage account and must be between 3 and 63 characters.The name must comprise of only alphanumeric characters and it cannot begin with a numeric character.
"""
... |
4,007 | make workout | # -*- coding: utf-8 -*-
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Standard Library
import copy
import logging
# Django
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import (
HttpResponseForbidden,
HttpResponseRedirect,
)
from django.shortcuts import (
get_object_or_404,
render,
)
from django.template.context_processors import csrf
from django.urls import (
reverse,
reverse_lazy,
)
from django.utils.text import slugify
from django.utils.translation import (
gettext as _,
gettext_lazy,
)
from django.views.generic import (
DeleteView,
UpdateView,
)
# Third Party
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
# wger
from wger.manager.forms import (
WorkoutCopyForm,
WorkoutForm,
WorkoutMakeTemplateForm,
)
from wger.manager.models import (
Schedule,
Workout,
WorkoutLog,
)
from wger.utils.generic_views import (
WgerDeleteMixin,
WgerFormMixin,
)
from wger.utils.helpers import make_token
logger = logging.getLogger(__name__)
# ************************
# Workout functions
# ************************
@login_required
def template_overview(request):
"""
"""
return render(
request, 'workout/overview.html', {
'workouts': Workout.templates.filter(user=request.user),
'title': _('Your templates'),
'template_overview': True
}
)
@login_required
def public_template_overview(request):
"""
"""
return render(
request, 'workout/overview.html', {
'workouts': Workout.templates.filter(is_public=True),
'title': _('Public templates'),
'template_overview': True
}
)
def view(request, pk):
"""
Show the workout with the given ID
"""
workout = get_object_or_404(Workout, pk=pk)
user = workout.user
is_owner = request.user == user
if not is_owner and not user.userprofile.ro_access:
return HttpResponseForbidden()
uid, token = make_token(user)
context = {
'workout': workout,
'uid': uid,
'token': token,
'is_owner': is_owner,
'owner_user': user,
'show_shariff': is_owner,
}
return render(request, 'workout/view.html', context)
@login_required()
def template_view(request, pk):
"""
Show the template with the given ID
"""
template = get_object_or_404(Workout.templates, pk=pk)
if not template.is_public and request.user != template.user:
return HttpResponseForbidden()
context = {
'workout': template,
'muscles': template.canonical_representation['muscles'],
'is_owner': template.user == request.user,
'owner_user': template.user,
}
return render(request, 'workout/template_view.html', context)
@login_required
def copy_workout(request, pk):
"""
Makes a copy of a workout
"""
workout = get_object_or_404(Workout.both, pk=pk)
if not workout.is_public and request.user != workout.user:
return HttpResponseForbidden()
# Process request
if request.method == 'POST':
workout_form = WorkoutCopyForm(request.POST)
if workout_form.is_valid():
# Copy workout
workout_copy: Workout = copy.copy(workout)
workout_copy.pk = None
workout_copy.name = workout_form.cleaned_data['name']
workout_copy.user = request.user
workout_copy.is_template = False
workout_copy.is_public = False
workout_copy.save()
# Copy the days
for day in workout.day_set.all():
day_copy = copy.copy(day)
day_copy.pk = None
day_copy.training = workout_copy
day_copy.save()
for i in day.day.all():
day_copy.day.add(i)
day_copy.save()
# Copy the sets
for current_set in day.set_set.all():
current_set_copy = copy.copy(current_set)
current_set_copy.pk = None
current_set_copy.exerciseday = day_copy
current_set_copy.save()
# Copy the settings
for current_setting in current_set.setting_set.all():
setting_copy = copy.copy(current_setting)
setting_copy.pk = None
setting_copy.set = current_set_copy
setting_copy.save()
return HttpResponseRedirect(workout_copy.get_absolute_url())
else:
workout_form = WorkoutCopyForm({'name': workout.name, 'description': workout.description})
workout_form.helper = FormHelper()
workout_form.helper.form_id = slugify(request.path)
workout_form.helper.form_method = 'post'
workout_form.helper.form_action = request.path
workout_form.helper.add_input(
Submit('submit', _('Save'), css_class='btn-success btn-block')
)
workout_form.helper.form_class = 'wger-form'
template_data = {}
template_data.update(csrf(request))
template_data['title'] = _('Copy workout')
template_data['form'] = workout_form
template_data['form_fields'] = [workout_form['name']]
template_data['submit_text'] = _('Copy')
return render(request, 'form.html', template_data)
def METHOD_NAME(request, pk):
workout = get_object_or_404(Workout.both, pk=pk)
if request.user != workout.user:
return HttpResponseForbidden()
workout.is_template = False
workout.is_public = False
workout.save()
return HttpResponseRedirect(workout.get_absolute_url())
@login_required
def add(request):
"""
Add a new workout and redirect to its page
"""
workout = Workout()
workout.user = request.user
workout.save()
return HttpResponseRedirect(workout.get_absolute_url())
class WorkoutDeleteView(WgerDeleteMixin, LoginRequiredMixin, DeleteView):
"""
Generic view to delete a workout routine
"""
model = Workout
success_url = reverse_lazy('manager:workout:overview')
messages = gettext_lazy('Successfully deleted')
def get_context_data(self, **kwargs):
context = super(WorkoutDeleteView, self).get_context_data(**kwargs)
context['title'] = _('Delete {0}?').format(self.object)
return context
class WorkoutEditView(WgerFormMixin, LoginRequiredMixin, UpdateView):
"""
Generic view to update an existing workout routine
"""
model = Workout
form_class = WorkoutForm
def get_context_data(self, **kwargs):
context = super(WorkoutEditView, self).get_context_data(**kwargs)
context['title'] = _('Edit {0}').format(self.object)
return context
class WorkoutMarkAsTemplateView(WgerFormMixin, LoginRequiredMixin, UpdateView):
"""
Generic view to update an existing workout routine
"""
model = Workout
form_class = WorkoutMakeTemplateForm
def get_context_data(self, **kwargs):
context = super(WorkoutMarkAsTemplateView, self).get_context_data(**kwargs)
context['title'] = _('Mark as template')
return context |
4,008 | node parent | # This file is part of rinohtype, the Python document preparation system.
#
# Copyright (c) Brecht Machiels.
#
# Use of this source code is subject to the terms of the GNU Affero General
# Public License v3. See the LICENSE file or http://www.gnu.org/licenses/.
from pathlib import Path
from docutils.core import publish_doctree
from docutils.io import FileInput
from docutils.parsers.rst import Parser as ReStructuredTextParser
from ...text import MixedStyledText
from .. import (TreeNode, TreeNodeMeta, InlineNode, BodyNode, BodySubNode,
GroupingNode, DummyNode, Reader)
__all__ = ['DocutilsNode', 'DocutilsInlineNode',
'DocutilsBodyNode', 'DocutilsBodySubNode',
'DocutilsGroupingNode', 'DocutilsDummyNode',
'ReStructuredTextReader', 'from_doctree']
class DocutilsNode(TreeNode, metaclass=TreeNodeMeta):
@staticmethod
def node_tag_name(node):
return node.tagname
@staticmethod
def METHOD_NAME(node):
return node.parent
@staticmethod
def node_children(node):
return node.children
@staticmethod
def node_location(node):
return node.source, node.line, node.tagname
@property
def location(self):
node_source, line, tag_name = self.node_location(self.node)
source = node_source or self.node.get('source')
return source, line, tag_name
@property
def node_document(self):
node = self.node
while node.document is None:
node = node.parent # https://sourceforge.net/p/docutils/bugs/410/
return node.document
@property
def root(self):
settings = self.node_document.settings
try: # Sphinx
sphinx_env = settings.env
except AttributeError: # plain docutils
source_path = settings._source
return Path(source_path).parent if source_path else None
return Path(sphinx_env.srcdir)
@property
def _ids(self):
return self.get('ids')
@property
def text(self):
return self.node.astext()
@property
def attributes(self):
return self.node.attributes
def get(self, key, default=None):
return self.node.get(key, default)
def __getitem__(self, name):
return self.node[name]
def process_content(self, style=None):
children_text = (child.styled_text() for child in self.getchildren())
return MixedStyledText([text for text in children_text if text],
style=style)
class DocutilsInlineNode(DocutilsNode, InlineNode):
@property
def text(self):
return super().text.replace('\n', ' ')
def styled_text(self):
styled_text = super().styled_text()
try:
styled_text.classes.extend(self.get('classes'))
except AttributeError:
pass
return styled_text
class DocutilsBodyNode(DocutilsNode, BodyNode):
def flowables(self):
classes = self.get('classes')
for flowable in super().flowables():
flowable.classes.extend(classes)
yield flowable
class DocutilsBodySubNode(DocutilsNode, BodySubNode):
pass
class DocutilsGroupingNode(DocutilsBodyNode, GroupingNode):
pass
class DocutilsDummyNode(DocutilsNode, DummyNode):
pass
from . import nodes
class DocutilsReader(Reader):
parser_class = None
def parse(self, filename_or_file, **context):
try:
file, filename = None, Path(filename_or_file)
kwargs = dict(source_path=str(filename),
settings_overrides=dict(input_encoding='utf-8'))
except TypeError:
file, kwargs = filename_or_file, {}
doctree = publish_doctree(file, source_class=FileInput,
parser=self.parser_class(), **kwargs)
return from_doctree(doctree, **context)
class ReStructuredTextReader(DocutilsReader):
extensions = ('rst', )
parser_class = ReStructuredTextParser
def from_doctree(doctree, **context):
mapped_tree = DocutilsNode.map_node(doctree, **context)
return mapped_tree.flowable() |
4,009 | get data | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import os
from paddle.dataset.common import md5file
from paddle.utils.download import get_path_from_url
from ..utils.env import DATA_HOME
from .dataset import DatasetBuilder
class Conll2002(DatasetBuilder):
"""
Named entities are phrases that contain the names of persons, organizations,
locations, times and quantities. Example: [PER Wolff] , currently a journalist
in [LOC Argentina] , played with [PER Del Bosque] in the final years of the seventies in [ORG Real Madrid] .
The shared task of CoNLL-2002 concerns language-independent named entity recognition.
We will concentrate on four types of named entities: persons, locations, organizations and names of
miscellaneous entities that do not belong to the previous three groups. The participants of the
shared task will be offered training and test data for at least two languages.
They will use the data for developing a named-entity recognition system that includes a machine learning component.
Information sources other than the training data may be used in this shared task. We are especially interested
in methods that can use additional unannotated data for improving their performance (for example co-training).
For more details see https://www.clips.uantwerpen.be/conll2002/ner/
and https://www.aclweb.org/anthology/W02-2024/
"""
META_INFO = collections.namedtuple("META_INFO", ("file", "url", "md5"))
BASE_URL = "https://bj.bcebos.com/paddlenlp/datasets/conll2002/"
BUILDER_CONFIGS = {
"es": {
"splits": {
"train": META_INFO("esp.train", BASE_URL + "esp.train", "c8c6b342371b9de2f83a93767d352c17"),
"dev": META_INFO("esp.testa", BASE_URL + "esp.testa", "de0578160dde26ec68cc580595587dde"),
"test": META_INFO("esp.testb", BASE_URL + "esp.testb", "c8d35f340685a2ce6559ee90d78f9e37"),
},
"pos_tags": [
"AO",
"AQ",
"CC",
"CS",
"DA",
"DE",
"DD",
"DI",
"DN",
"DP",
"DT",
"Faa",
"Fat",
"Fc",
"Fd",
"Fe",
"Fg",
"Fh",
"Fia",
"Fit",
"Fp",
"Fpa",
"Fpt",
"Fs",
"Ft",
"Fx",
"Fz",
"I",
"NC",
"NP",
"P0",
"PD",
"PI",
"PN",
"PP",
"PR",
"PT",
"PX",
"RG",
"RN",
"SP",
"VAI",
"VAM",
"VAN",
"VAP",
"VAS",
"VMG",
"VMI",
"VMM",
"VMN",
"VMP",
"VMS",
"VSG",
"VSI",
"VSM",
"VSN",
"VSP",
"VSS",
"Y",
"Z",
],
},
"nl": {
"splits": {
"train": META_INFO("ned.train", BASE_URL + "ned.train", "b6189d04eb34597d2a98ca5cec477605"),
"dev": META_INFO("ned.testa", BASE_URL + "ned.testa", "626900497823fdbc4f84335518cb85ce"),
"test": META_INFO("ned.testb", BASE_URL + "ned.testb", "c37de92da20c68c6418a73dd42e322dc"),
},
"pos_tags": ["Adj", "Adv", "Art", "Conj", "Int", "Misc", "N", "Num", "Prep", "Pron", "Punc", "V"],
},
}
def METHOD_NAME(self, mode, **kwargs):
builder_config = self.BUILDER_CONFIGS[self.name]
default_root = os.path.join(DATA_HOME, self.__class__.__name__)
filename, url, data_hash = builder_config["splits"][mode]
fullname = os.path.join(default_root, filename)
if not os.path.exists(fullname) or (data_hash and not md5file(fullname) == data_hash):
get_path_from_url(url, default_root, data_hash)
return fullname
def _read(self, filename, *args):
with open(filename, "r", encoding="utf-8") as f:
tokens = []
ner_tags = []
pos_tags = []
for line in f.readlines():
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if tokens:
yield {"tokens": tokens, "ner_tags": ner_tags, "pos_tags": pos_tags}
tokens = []
ner_tags = []
pos_tags = []
else:
# conll2002 tokens are space separated
splits = line.split(" ")
tokens.append(splits[0])
pos_tags.append(splits[1])
ner_tags.append(splits[2].rstrip())
# last example
yield {"tokens": tokens, "ner_tags": ner_tags, "pos_tags": pos_tags}
def get_labels(self):
"""
Returns labels of ner tags and pos tags.
"""
return ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", "B-MISC", "I-MISC"], self.BUILDER_CONFIGS[
self.name
]["pos_tags"] |
4,010 | mock db fetch upcoming event | from datetime import datetime
from pathlib import Path
import socket
import pytest
from voting_node.db import EventDb
from voting_node.models import Contribution, Event, HostInfo, Objective, Proposal, ServiceSettings, Voter, VotingGroup
from voting_node.tasks import Leader0Schedule
# Test Fixtures
@pytest.fixture
def mock_event():
return Event(
row_id=1234,
name="Test Event",
description="Describe it.",
committee_size=1,
committee_threshold=1,
start_time=datetime.now(),
snapshot_start=datetime.now(),
)
@pytest.fixture
def leader0_host_info(mock_event, mock_leader0_hostname):
return HostInfo(
hostname=mock_leader0_hostname, event=mock_event.row_id, seckey="secretkey", pubkey="publickey", netkey="netkey"
)
@pytest.fixture
def voting_groups():
return [VotingGroup(name="direct"), VotingGroup(name="rep")]
@pytest.fixture
def mock_voters():
return [
Voter(1, "votekey", 1, "direct", 5000),
Voter(2, "votekey", 1, "rep", 5000),
Voter(3, "votekey", 1, "direct", 5000),
Voter(4, "votekey", 1, "rep", 5000),
Voter(5, "votekey", 1, "direct", 5000),
Voter(6, "votekey", 1, "direct", 5000),
]
@pytest.fixture
def mock_contributions():
return [
Contribution(1, "stakekey", 1, "direct", 5000, "votekey", 1),
Contribution(2, "stakekey", 1, "rep", 5000, "votekey", 1),
Contribution(3, "stakekey", 1, "direct", 5000, "votekey", 1),
Contribution(4, "stakekey", 1, "rep", 5000, "votekey", 1),
Contribution(5, "stakekey", 1, "direct", 5000, "votekey", 1),
Contribution(6, "stakekey", 1, "direct", 5000, "votekey", 1),
]
@pytest.fixture
def mock_objectives():
return [
Objective(1, 1001, 1, "Category", "Title", "Description", False, "ADA"),
Objective(2, 1002, 1, "Category", "Title", "Description", False, "ADA"),
Objective(3, 1003, 1, "Category", "Title", "Description", False, "ADA"),
Objective(4, 1004, 1, "Category", "Title", "Description", False, "ADA"),
]
@pytest.fixture
def mock_proposals():
return [
Proposal(1, 301, 1, "Title", "Summary", "Category", "publickey", 7000000, "http://url", "http://files", 1.0, "Name", "Contact", "http://proposer", "Experience"),
Proposal(2, 302, 1, "Title", "Summary", "Category", "publickey", 7000000, "http://url", "http://files", 1.0, "Name", "Contact", "http://proposer", "Experience"),
Proposal(3, 303, 1, "Title", "Summary", "Category", "publickey", 7000000, "http://url", "http://files", 1.0, "Name", "Contact", "http://proposer", "Experience"),
Proposal(4, 304, 2, "Title", "Summary", "Category", "publickey", 7000000, "http://url", "http://files", 1.0, "Name", "Contact", "http://proposer", "Experience"),
Proposal(5, 305, 2, "Title", "Summary", "Category", "publickey", 7000000, "http://url", "http://files", 1.0, "Name", "Contact", "http://proposer", "Experience"),
Proposal(6, 306, 3, "Title", "Summary", "Category", "publickey", 7000000, "http://url", "http://files", 1.0, "Name", "Contact", "http://proposer", "Experience"),
Proposal(7, 307, 4, "Title", "Summary", "Category", "publickey", 7000000, "http://url", "http://files", 1.0, "Name", "Contact", "http://proposer", "Experience"),
]
## Monkeypatches
@pytest.fixture
def mock_leader0_hostname(monkeypatch):
monkeypatch.setattr(socket, "gethostname", "leader0")
@pytest.fixture
def METHOD_NAME(monkeypatch, mock_event):
async def mock_db_call(*args, **kwargs):
return mock_event
monkeypatch.setattr(EventDb, "fetch_upcoming_event", mock_db_call)
@pytest.fixture
def mock_db_fetch_leader0_host_info(monkeypatch, leader0_host_info):
async def mock_db_call(*args, **kwargs):
return leader0_host_info
monkeypatch.setattr(EventDb, "fetch_leader_host_info", mock_db_call)
@pytest.fixture
def mock_db_check_if_snapshot_is_final(monkeypatch):
async def mock_db_call(*args, **kwargs):
return True
monkeypatch.setattr(EventDb, "check_if_snapshot_is_final", mock_db_call)
@pytest.fixture
def mock_db_fetch_voting_groups(monkeypatch, voting_groups):
async def mock_db_call(*args, **kwargs):
return voting_groups
monkeypatch.setattr(EventDb, "fetch_voting_groups", mock_db_call)
@pytest.fixture
def mock_db_fetch_voters(monkeypatch, mock_voters):
async def mock_db_call(*args, **kwargs):
return mock_voters
monkeypatch.setattr(EventDb, "fetch_voters", mock_db_call)
@pytest.fixture
def mock_db_fetch_contributions(monkeypatch, mock_contributions):
async def mock_db_call(*args, **kwargs):
return mock_contributions
monkeypatch.setattr(EventDb, "fetch_contributions", mock_db_call)
@pytest.fixture
def mock_db_fetch_objectives(monkeypatch, mock_objectives):
async def mock_db_call(*args, **kwargs):
return mock_objectives
monkeypatch.setattr(EventDb, "fetch_objectives", mock_db_call)
@pytest.fixture
def mock_db_fetch_proposals(monkeypatch, mock_proposals):
async def mock_db_call(*args, **kwargs):
return mock_proposals
monkeypatch.setattr(EventDb, "fetch_proposals", mock_db_call)
# TESTS
@pytest.mark.asyncio
async def test_leader0_schedule_instantiates_with_defaults():
schedule = Leader0Schedule()
assert schedule.settings == ServiceSettings()
assert schedule.db.db_url == schedule.settings.db_url
assert schedule.node.storage == Path(schedule.settings.storage)
assert schedule.current_task is None
@pytest.mark.asyncio
async def test_task_node_fetch_event(mock_event, METHOD_NAME):
schedule = Leader0Schedule()
await schedule.node_fetch_event()
assert schedule.node.event == mock_event
@pytest.mark.asyncio
async def test_task_node_fetch_host_keys(leader0_host_info, mock_event, mock_db_fetch_leader0_host_info):
schedule = Leader0Schedule()
schedule.node.event = mock_event
await schedule.node_fetch_host_keys()
assert schedule.node.host_info == leader0_host_info
### TODO: Other tasks
@pytest.mark.asyncio
async def test_task_node_snapshot_data(
mock_event, mock_db_check_if_snapshot_is_final, mock_db_fetch_voting_groups, mock_db_fetch_voters, mock_db_fetch_contributions, mock_db_fetch_objectives, mock_db_fetch_proposals
):
schedule = Leader0Schedule()
schedule.node.event = mock_event
await schedule.node_snapshot_data()
... |
4,011 | source | from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.build import check_min_cppstd, stdcpp_library
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get
from conan.tools.microsoft import is_msvc
from conan.tools.scm import Version
import os
required_conan_version = ">=1.54.0"
class VkBootstrapConan(ConanFile):
name = "vk-bootstrap"
description = "Vulkan bootstraping library."
license = "MIT"
topics = ("vulkan", "bootstrap", "setup")
homepage = "https://github.com/charles-lunarg/vk-bootstrap"
url = "https://github.com/conan-io/conan-center-index"
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
@property
def _min_cppstd(self):
return "14"
@property
def _compilers_minimum_version(self):
return {
"gcc": "5",
"Visual Studio": "15",
"msvc": "191",
"clang": "3.7" if stdcpp_library(self) == "stdc++" else "6",
"apple-clang": "10",
}
def export_sources(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
def layout(self):
cmake_layout(self, src_folder="src")
def requirements(self):
if Version(self.version) < "0.7":
self.requires("vulkan-headers/1.3.236.0", transitive_headers=True)
else:
self.requires("vulkan-headers/1.3.239.0", transitive_headers=True)
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
check_min_cppstd(self, self._min_cppstd)
def loose_lt_semver(v1, v2):
lv1 = [int(v) for v in v1.split(".")]
lv2 = [int(v) for v in v2.split(".")]
min_length = min(len(lv1), len(lv2))
return lv1[:min_length] < lv2[:min_length]
minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)
if minimum_version and loose_lt_semver(str(self.settings.compiler.version), minimum_version):
raise ConanInvalidConfiguration(
f"{self.ref} requires C++{self._min_cppstd}, which your compiler does not support.",
)
if is_msvc(self) and self.options.shared:
raise ConanInvalidConfiguration(f"{self.ref} shared not supported with Visual Studio")
def METHOD_NAME(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.variables["VK_BOOTSTRAP_TEST"] = False
vulkan_headers = self.dependencies["vulkan-headers"]
includedirs = ";".join(
[os.path.join(vulkan_headers.package_folder, includedir).replace("\\", "/")
for includedir in vulkan_headers.cpp_info.includedirs],
)
if Version(self.version) < "0.3.0":
tc.variables["Vulkan_INCLUDE_DIR"] = includedirs
else:
tc.variables["VK_BOOTSTRAP_VULKAN_HEADER_DIR"] = includedirs
if Version(self.version) >= "0.4.0":
tc.variables["VK_BOOTSTRAP_WERROR"] = False
tc.generate()
def build(self):
apply_conandata_patches(self)
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
copy(self, "LICENSE.txt", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
cmake = CMake(self)
cmake.install()
def package_info(self):
self.cpp_info.libs = ["vk-bootstrap"]
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.system_libs = ["dl"] |
4,012 | as feature | from __future__ import annotations # superfluous in Python 3.10
import logging
from abc import ABCMeta, abstractmethod
from typing import NamedTuple
import numpy as np
from geojson import Feature
from ohsome_quality_api.definitions import get_attribution, get_metadata
from ohsome_quality_api.indicators.base import BaseIndicator
from ohsome_quality_api.reports.models import ReportMetadata, Result
class IndicatorTopic(NamedTuple):
indicator_name: str
topic_key: str
class BaseReport(metaclass=ABCMeta):
def __init__(
self,
feature: Feature,
indicator_topic: tuple[IndicatorTopic] = None,
blocking_red: bool = False,
blocking_undefined: bool = False,
):
self.metadata: ReportMetadata = get_metadata("reports", type(self).__name__)
self.feature = feature
self.indicators: list[BaseIndicator] = []
self.indicator_topic = indicator_topic # Defines indicator+topic combinations
self.blocking_undefined = blocking_undefined
self.blocking_red = blocking_red
# Results will be written during the lifecycle of the report object (combine())
self.result = Result()
def METHOD_NAME(self, include_data: bool = False) -> Feature:
"""Returns a GeoJSON Feature object.
The properties of the Feature contains the attributes of all indicators.
The geometry (and properties) of the input GeoJSON object is preserved.
"""
result = self.result.dict(by_alias=True) # only attributes, no properties
result["label"] = self.result.label # label is a property
properties = {
"report": {
"metadata": self.metadata.dict(),
"result": result,
},
"indicators": [],
}
properties["report"]["metadata"].pop("label_description", None)
for i, indicator in enumerate(self.indicators):
properties["indicators"].append(
indicator.METHOD_NAME(include_data=include_data)["properties"]
)
if "id" in self.feature.keys():
return Feature(
id=self.feature.id,
geometry=self.feature.geometry,
properties=properties,
)
else:
return Feature(geometry=self.feature.geometry, properties=properties)
@abstractmethod
def combine_indicators(self) -> None:
"""Combine indicators results and create the report result object."""
logging.info(f"Combine indicators for report: {self.metadata.name}")
if self.blocking_undefined:
if any(i.result.class_ is None for i in self.indicators):
self.result.class_ = None
self.result.description = self.metadata.label_description["undefined"]
return
if self.blocking_red:
if any(i.result.class_ == 1 for i in self.indicators):
self.result.class_ = 1
self.result.description = self.metadata.label_description["red"]
return
if all(i.result.class_ is None for i in self.indicators):
self.result.class_ = None
self.result.description = self.metadata.label_description["undefined"]
else:
self.result.class_ = round(
np.mean(
[
i.result.class_
for i in self.indicators
if i.result.class_ is not None
]
)
)
if self.result.class_ in (4, 5):
self.result.description = self.metadata.label_description["green"]
elif self.result.class_ in (2, 3):
self.result.description = self.metadata.label_description["yellow"]
elif self.result.class_ == 1:
self.result.description = self.metadata.label_description["red"]
else:
self.result.description = self.metadata.label_description["undefined"]
@classmethod
def attribution(cls) -> str:
"""Data attribution as text.
Defaults to OpenStreetMap attribution.
This property should be overwritten by the Sub Class if additional data
attribution is necessary.
"""
return get_attribution(["OSM"]) |
4,013 | mk submitter | # This file is part of the Open Data Cube, see https://opendatacube.org for more information
#
# Copyright (c) 2015-2023 ODC Contributors
# SPDX-License-Identifier: Apache-2.0
#
# type: ignore
import sys
from deprecat import deprecat
_REMOTE_LOG_FORMAT_STRING = '%(asctime)s {} %(process)d %(name)s %(levelname)s %(message)s'
@deprecat(reason="Executors have been deprecated and will be removed in v1.9", version='1.8.14')
class SerialExecutor(object):
def __repr__(self):
return 'SerialExecutor'
@staticmethod
def submit(func, *args, **kwargs):
return func, args, kwargs
@staticmethod
def map(func, iterable):
return [SerialExecutor.submit(func, data) for data in iterable]
@staticmethod
def get_ready(futures):
def reraise(t, e, traceback):
raise t.with_traceback(e, traceback)
try:
result = SerialExecutor.result(futures[0])
return [(lambda x: x, [result], {})], [], futures[1:]
except Exception: # pylint: disable=broad-except
exc_info = sys.exc_info()
return [], [(reraise, exc_info, {})], futures[1:]
@staticmethod
def as_completed(futures):
for future in futures:
yield future
@classmethod
def next_completed(cls, futures, default):
results = list(futures)
if not results:
return default, results
result = next(cls.as_completed(results), default)
results.remove(result)
return result, results
@staticmethod
def results(futures):
return [SerialExecutor.result(future) for future in futures]
@staticmethod
def result(future):
func, args, kwargs = future
return func(*args, **kwargs)
@staticmethod
def release(future):
pass
def setup_logging():
import logging
import socket
hostname = socket.gethostname()
log_format_string = _REMOTE_LOG_FORMAT_STRING.format(hostname)
handler = logging.StreamHandler()
handler.formatter = logging.Formatter(log_format_string)
logging.root.handlers = [handler]
@deprecat(reason="Executors have been deprecated and will be removed in v1.9", version='1.8.14')
def _get_distributed_executor(scheduler):
"""
:param scheduler: Address of a scheduler
"""
try:
import distributed
except ImportError:
return None
class DistributedExecutor(object):
def __init__(self, executor):
"""
:type executor: distributed.Executor
:return:
"""
self._executor = executor
self.setup_logging()
def setup_logging(self):
self._executor.run(setup_logging)
def submit(self, func, *args, **kwargs):
return self._executor.submit(func, *args, pure=False, **kwargs)
def map(self, func, iterable):
return self._executor.map(func, iterable)
@staticmethod
def get_ready(futures):
groups = {}
for f in futures:
groups.setdefault(f.status, []).append(f)
return groups.get('finished', []), groups.get('error', []), groups.get('pending', [])
@staticmethod
def as_completed(futures):
return distributed.as_completed(futures)
@classmethod
def next_completed(cls, futures, default):
results = list(futures)
if not results:
return default, results
result = next(cls.as_completed(results), default)
results.remove(result)
return result, results
def results(self, futures):
return self._executor.gather(futures)
@staticmethod
def result(future):
return future.result()
@staticmethod
def release(future):
future.release()
try:
executor = DistributedExecutor(distributed.Client(scheduler))
return executor
except IOError:
return None
def _run_cloud_pickled_function(f_data, *args, **kwargs):
from cloudpickle import loads
func = loads(f_data)
return func(*args, **kwargs)
@deprecat(reason="Executors have been deprecated and will be removed in v1.9", version='1.8.14')
def _get_concurrent_executor(workers, use_cloud_pickle=False):
try:
from concurrent.futures import ProcessPoolExecutor, as_completed
except ImportError:
return None
def METHOD_NAME(pool, use_cloud_pickle):
def submit_direct(func, *args, **kwargs):
return pool.submit(func, *args, **kwargs)
def submit_cloud_pickle(func, *args, **kwargs):
from cloudpickle import dumps
return pool.submit(_run_cloud_pickled_function, dumps(func), *args, **kwargs)
return submit_cloud_pickle if use_cloud_pickle else submit_direct
class MultiprocessingExecutor(object):
def __init__(self, pool, use_cloud_pickle):
self._pool = pool
self._submitter = METHOD_NAME(pool, use_cloud_pickle)
def __repr__(self):
max_workers = self._pool.__dict__.get('_max_workers', '??')
return 'Multiprocessing ({})'.format(max_workers)
def submit(self, func, *args, **kwargs):
return self._submitter(func, *args, **kwargs)
def map(self, func, iterable):
return [self.submit(func, data) for data in iterable]
@staticmethod
def get_ready(futures):
completed = []
failed = []
pending = []
for f in futures:
if f.done():
if f.exception():
failed.append(f)
else:
completed.append(f)
else:
pending.append(f)
return completed, failed, pending
@staticmethod
def as_completed(futures):
return as_completed(futures)
@classmethod
def next_completed(cls, futures, default):
results = list(futures)
if not results:
return default, results
result = next(cls.as_completed(results), default)
results.remove(result)
return result, results
@staticmethod
def results(futures):
return [future.result() for future in futures]
@staticmethod
def result(future):
return future.result()
@staticmethod
def release(future):
pass
if workers <= 0:
return None
return MultiprocessingExecutor(ProcessPoolExecutor(workers), use_cloud_pickle)
@deprecat(reason="Executors have been deprecated and will be removed in v1.9", version='1.8.14')
def get_executor(scheduler, workers, use_cloud_pickle=True):
"""
Return a task executor based on input parameters. Falling back as required.
:param scheduler: IP address and port of a distributed.Scheduler, or a Scheduler instance
:param workers: Number of processes to start for process based parallel execution
:param use_cloud_pickle: Only applies when scheduler is None and workers > 0, default is True
"""
if not workers:
return SerialExecutor()
if scheduler:
distributed_exec = _get_distributed_executor(scheduler)
if distributed_exec:
return distributed_exec
concurrent_exec = _get_concurrent_executor(workers, use_cloud_pickle=use_cloud_pickle)
if concurrent_exec:
return concurrent_exec
return SerialExecutor() |
4,014 | generate | from conan import ConanFile
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, replace_in_file, save
from conan.tools.scm import Version
from conan.tools.microsoft import is_msvc
import os
import textwrap
required_conan_version = ">=1.53.0"
class JsoncppConan(ConanFile):
name = "jsoncpp"
license = "MIT"
homepage = "https://github.com/open-source-parsers/jsoncpp"
url = "https://github.com/conan-io/conan-center-index"
topics = ("json", "parser", "config")
description = "A C++ library for interacting with JSON."
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
def export_sources(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
def layout(self):
cmake_layout(self, src_folder="src")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def METHOD_NAME(self):
tc = CMakeToolchain(self)
tc.variables["JSONCPP_WITH_TESTS"] = False
tc.variables["JSONCPP_WITH_WARNING_AS_ERROR"] = False
tc.variables["JSONCPP_WITH_CMAKE_PACKAGE"] = False
tc.variables["JSONCPP_WITH_STRICT_ISO"] = False
tc.variables["JSONCPP_WITH_PKGCONFIG_SUPPORT"] = False
jsoncpp_version = Version(self.version)
if jsoncpp_version < "1.9.0" or jsoncpp_version >= "1.9.4":
tc.variables["BUILD_STATIC_LIBS"] = not self.options.shared
if jsoncpp_version >= "1.9.3":
tc.variables["JSONCPP_WITH_EXAMPLE"] = False
if jsoncpp_version >= "1.9.4":
tc.variables["BUILD_OBJECT_LIBS"] = False
if jsoncpp_version < "1.9.0":
# Honor BUILD_SHARED_LIBS from conan_toolchain (see https://github.com/conan-io/conan/issues/11840)
tc.cache_variables["CMAKE_POLICY_DEFAULT_CMP0077"] = "NEW"
# No opt-out of ccache
if Version(self.version) < "1.9.3":
tc.cache_variables["CCACHE_FOUND"] = ""
else:
tc.cache_variables["CCACHE_EXECUTABLE"] = ""
tc.METHOD_NAME()
def _patch_sources(self):
apply_conandata_patches(self)
if is_msvc(self) and str(self.settings.compiler.version) in ("11", "170"):
replace_in_file(self, os.path.join(self.source_folder, "include", "json", "value.h"),
"explicit operator bool()",
"operator bool()")
def build(self):
self._patch_sources()
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
copy(self, "LICENSE", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
cmake = CMake(self)
cmake.install()
# TODO: to remove in conan v2 once legacy generators removed
self._create_cmake_module_alias_targets(
os.path.join(self.package_folder, self._module_file_rel_path),
{
"JsonCpp::JsonCpp": "jsoncpp::jsoncpp", # alias target since 1.9.5
"jsoncpp_lib": "jsoncpp::jsoncpp", # imported target for shared lib, but also static between 1.9.0 & 1.9.3
"jsoncpp_static": "jsoncpp::jsoncpp", # imported target for static lib if >= 1.9.4
"jsoncpp_lib_static": "jsoncpp::jsoncpp", # imported target for static lib if < 1.9.0
}
)
def _create_cmake_module_alias_targets(self, module_file, targets):
content = ""
for alias, aliased in targets.items():
content += textwrap.dedent(f"""\
if(TARGET {aliased} AND NOT TARGET {alias})
add_library({alias} INTERFACE IMPORTED)
set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased})
endif()
""")
save(self, module_file, content)
@property
def _module_file_rel_path(self):
return os.path.join("lib", "cmake", f"conan-official-{self.name}-targets.cmake")
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "jsoncpp")
self.cpp_info.set_property("cmake_target_name", "JsonCpp::JsonCpp")
self.cpp_info.set_property(
"cmake_target_aliases",
["jsoncpp_lib"] if self.options.shared else ["jsoncpp_lib", "jsoncpp_static", "jsoncpp_lib_static"],
)
self.cpp_info.set_property("pkg_config_name", "jsoncpp")
self.cpp_info.libs = ["jsoncpp"]
if self.settings.os == "Windows" and self.options.shared:
self.cpp_info.defines.append("JSON_DLL")
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.system_libs.append("m")
# TODO: to remove in conan v2 once legacy generators removed
self.cpp_info.build_modules["cmake_find_package"] = [self._module_file_rel_path]
self.cpp_info.build_modules["cmake_find_package_multi"] = [self._module_file_rel_path] |
4,015 | synapse information | # Copyright (c) 2014 The University of Manchester
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from spinn_utilities.overrides import overrides
from pacman.model.graphs.application import ApplicationEdge
from spinn_front_end_common.interface.provenance import (
AbstractProvidesLocalProvenanceData)
from spynnaker.pyNN.exceptions import SynapticConfigurationException
_DynamicsStructural = None
_DynamicsSTDP = None
_DynamicsNeuromodulation = None
def are_dynamics_structural(synapse_dynamics):
# pylint: disable=global-statement
global _DynamicsStructural
if _DynamicsStructural is None:
# Avoid import loop by postponing this import
from spynnaker.pyNN.models.neuron.synapse_dynamics import (
AbstractSynapseDynamicsStructural)
_DynamicsStructural = AbstractSynapseDynamicsStructural
return isinstance(synapse_dynamics, _DynamicsStructural)
def are_dynamics_stdp(synapse_dynamics):
# pylint: disable=global-statement
global _DynamicsSTDP
if _DynamicsSTDP is None:
# Avoid import loop by postponing this import
from spynnaker.pyNN.models.neuron.synapse_dynamics import (
SynapseDynamicsSTDP)
_DynamicsSTDP = SynapseDynamicsSTDP
return isinstance(synapse_dynamics, _DynamicsSTDP)
def are_dynamics_neuromodulation(synapse_dynamics):
# pylint: disable=global-statement
global _DynamicsNeuromodulation
if _DynamicsNeuromodulation is None:
# Avoid import loop by postponing this import
from spynnaker.pyNN.models.neuron.synapse_dynamics import (
SynapseDynamicsNeuromodulation)
_DynamicsNeuromodulation = SynapseDynamicsNeuromodulation
return isinstance(synapse_dynamics, _DynamicsNeuromodulation)
class ProjectionApplicationEdge(
ApplicationEdge, AbstractProvidesLocalProvenanceData):
"""
An edge which terminates on an :py:class:`AbstractPopulationVertex`.
"""
__slots__ = [
"__delay_edge",
"__synapse_information",
"__is_neuromodulation"
]
def __init__(
self, pre_vertex, post_vertex, METHOD_NAME, label=None):
"""
:param AbstractPopulationVertex pre_vertex:
:param AbstractPopulationVertex post_vertex:
:param SynapseInformation synapse_information:
The synapse information on this edge
:param str label:
"""
super().__init__(pre_vertex, post_vertex, label=label)
# A list of all synapse information for all the projections that are
# represented by this edge
self.__synapse_information = [METHOD_NAME]
self.__is_neuromodulation = are_dynamics_neuromodulation(
METHOD_NAME.synapse_dynamics)
# The edge from the delay extension of the pre_vertex to the
# post_vertex - this might be None if no long delays are present
self.__delay_edge = None
def add_synapse_information(self, METHOD_NAME):
"""
:param SynapseInformation synapse_information:
"""
dynamics = METHOD_NAME.synapse_dynamics
is_neuromodulation = are_dynamics_neuromodulation(dynamics)
if is_neuromodulation != self.__is_neuromodulation:
raise SynapticConfigurationException(
"Cannot mix neuromodulated and non-neuromodulated synapses"
f" between the same source Population {self._pre_vertex} and"
f" target Population {self._post_vertex}")
self.__synapse_information.append(METHOD_NAME)
@property
def METHOD_NAME(self):
"""
:rtype: list(SynapseInformation)
"""
return self.__synapse_information
@property
def delay_edge(self):
"""
Settable.
:rtype: DelayedApplicationEdge or None
"""
return self.__delay_edge
@delay_edge.setter
def delay_edge(self, delay_edge):
self.__delay_edge = delay_edge
@property
def is_neuromodulation(self):
"""
Whether this edge is providing neuromodulation.
:rtype: bool
"""
return self.__is_neuromodulation
@property
def n_delay_stages(self):
"""
:rtype: int
"""
if self.__delay_edge is None:
return 0
return self.__delay_edge.pre_vertex.n_delay_stages
@overrides(AbstractProvidesLocalProvenanceData.get_local_provenance_data)
def get_local_provenance_data(self):
for synapse_info in self.METHOD_NAME:
synapse_info.connector.get_provenance_data(synapse_info) |
4,016 | end file | #!/usr/bin/env python3
#
# Copyright (c) 2021 LunarG, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import sys
from base_generator import write
from dx12_base_generator import Dx12BaseGenerator
# Generates declarations for functions to unwrap struct members with a
# COM object type before the struct is passed as an argument to a DX12
# API call during DX12 capture.
class Dx12StructUnwrappersHeaderGenerator(Dx12BaseGenerator):
# Default C++ code indentation size.
INDENT_SIZE = 4
def __init__(
self,
source_dict,
dx12_prefix_strings,
err_file=sys.stderr,
warn_file=sys.stderr,
diag_file=sys.stdout
):
Dx12BaseGenerator.__init__(
self, source_dict, dx12_prefix_strings, err_file, warn_file,
diag_file
)
# Method override
def beginFile(self, genOpts):
Dx12BaseGenerator.beginFile(self, genOpts)
self.write_include()
write('GFXRECON_BEGIN_NAMESPACE(gfxrecon)', file=self.outFile)
write('GFXRECON_BEGIN_NAMESPACE(encode)', file=self.outFile)
# Method override
def METHOD_NAME(self):
self.newline()
# List containing names of structs with COM object members.
structs_with_objects = {
**self.CUSTOM_STRUCT_HANDLE_MAP, 'D3D12_CPU_DESCRIPTOR_HANDLE':
['ptr']
}
# Find structs with COM object members, which will need to be
# unwrapped.
struct_dict = self.source_dict['struct_dict']
for struct in struct_dict:
self.check_struct_member_handles(
struct, structs_with_objects, None, True
)
# Generate unwrap functions for any structs that were added to
# the list.
for key in structs_with_objects:
if not self.is_struct_black_listed(key):
self.write_struct_unwrap_def(key)
write('template <typename T>', file=self.outFile)
write(
'T* MakeUnwrapStructs(const T* values, size_t len, HandleUnwrapMemory* unwrap_memory)',
file=self.outFile
)
write('{', file=self.outFile)
write(
' assert((values != nullptr) && (len > 0) && (unwrap_memory != nullptr));',
file=self.outFile
)
self.newline()
write(
' const uint8_t* bytes = reinterpret_cast<const uint8_t*>(values);',
file=self.outFile
)
write(
' size_t num_bytes = len * sizeof(T);',
file=self.outFile
)
self.newline()
write(
' return reinterpret_cast<T*>(unwrap_memory->GetFilledBuffer(bytes, num_bytes));',
file=self.outFile
)
write('}', file=self.outFile)
self.newline()
write('template <typename T>', file=self.outFile)
write(
'const T* UnwrapStructPtrObjects(const T* value, HandleUnwrapMemory* unwrap_memory)',
file=self.outFile
)
write('{', file=self.outFile)
write(' T* unwrapped_struct = nullptr;', file=self.outFile)
self.newline()
write(' if (value != nullptr)', file=self.outFile)
write(' {', file=self.outFile)
write(
' unwrapped_struct = MakeUnwrapStructs(value, 1, unwrap_memory);',
file=self.outFile
)
write(
' UnwrapStructObjects(unwrapped_struct, unwrap_memory);',
file=self.outFile
)
write(' }', file=self.outFile)
self.newline()
write(' return unwrapped_struct;', file=self.outFile)
write('}', file=self.outFile)
self.newline()
write('template <typename T>', file=self.outFile)
write(
'const T* UnwrapStructArrayObjects(const T* values, size_t len, HandleUnwrapMemory* unwrap_memory)',
file=self.outFile
)
write('{', file=self.outFile)
write(' if ((values != nullptr) && (len > 0))', file=self.outFile)
write(' {', file=self.outFile)
write(
' auto unwrapped_structs = MakeUnwrapStructs(values, len, unwrap_memory);',
file=self.outFile
)
self.newline()
write(' for (size_t i = 0; i < len; ++i)', file=self.outFile)
write(' {', file=self.outFile)
write(
' UnwrapStructObjects(&unwrapped_structs[i], unwrap_memory);',
file=self.outFile
)
write(' }', file=self.outFile)
self.newline()
write(' return unwrapped_structs;', file=self.outFile)
write(' }', file=self.outFile)
self.newline()
write(
' // Leave the original memory in place when the pointer is not null, but size is zero.',
file=self.outFile
)
write(' return values;', file=self.outFile)
write('}', file=self.outFile)
self.newline()
write('GFXRECON_END_NAMESPACE(encode)', file=self.outFile)
write('GFXRECON_END_NAMESPACE(gfxrecon)', file=self.outFile)
# Finish processing in superclass
Dx12BaseGenerator.METHOD_NAME(self)
# Method override
def generate_feature(self):
Dx12BaseGenerator.generate_feature(self)
def write_struct_unwrap_def(self, struct, indent=''):
expr = indent + 'void UnwrapStructObjects({}* value, '\
'HandleUnwrapMemory* unwrap_memory);\n'.format(struct)
write(expr, file=self.outFile)
def write_include(self):
code = ''
code += '#include "encode/handle_unwrap_memory.h"\n'
code += '#include "util/defines.h"\n'
code += '\n'
header_dict = self.source_dict['header_dict']
for k, v in header_dict.items():
code += '#include <{}>\n'.format(k)
write(code, file=self.outFile)
def increment_indent(self, indent):
return indent + (' ' * self.INDENT_SIZE)
def decrement_indent(self, indent):
return indent[:-self.INDENT_SIZE] |
4,017 | test lsblk unescape | #!/usr/bin/env python3
# Copyright 2015-2020 Canonical Ltd.
# Written by:
# Shawn Wang <shawn.wang@canonical.com>
# Jonathan Cave <jonathan.cave@canonical.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3,
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import unittest
from unittest import mock
from unittest.mock import patch
import sys
import recovery_info
class FunctionTests(unittest.TestCase):
"""Tests for several functions."""
@mock.patch('subprocess.check_output')
def test_get_recovery_package(self, mock_subprocess_check_output):
"""Smoke test for get_recovery_package()."""
mock_subprocess_check_output.return_value = """\
dell-recovery:
Installed: 1.11
Candidate: 1.11
Version table:
1.11
500 https://archive/cesg-mirror/ test/public amd64 Packages
"""
self.assertEqual(recovery_info.get_recovery_package(),
"dell-recovery_1.11")
@mock.patch('subprocess.check_output')
def test_get_recovery_partition(self, mock_subprocess_check_output):
"""Smoke test for get_recovery_partition()."""
mock_subprocess_check_output.return_value = (
b'TYPE FSTYPE NAME LABEL\n'
b'disk linux_raid_member sda fx:2x250GB\n'
b'raid1 bcache md127 \n'
b'disk ext4 bcache0 Ultra\n'
b'disk linux_raid_member sdb fx:2x250GB\n'
b'raid1 bcache md127 \n'
b'disk ext4 bcache0 Ultra\n'
b'disk sdc \n'
b'part btrfs sdc1 vol1\n'
b'disk sdd \n'
b'part ntfs sdd1 Windows\x208.1\n'
b'part sdd2 \n'
b'part ext4 sdd5 Utopic\n'
b'part swap sdd6 \n'
b'disk bcache sde \n'
b'disk ext4 bcache0 Ultra\n'
b'disk sdf \n'
b'part ntfs sda3 RECOVERY\n')
self.assertEqual(recovery_info.get_recovery_partition(),
("DELL", "/dev/sda3"))
def METHOD_NAME(self):
"""Smoke tests for lsblk_unescape()."""
self.assertEqual(recovery_info.lsblk_unescape(
'Windows\\x208.1'), 'Windows 8.1')
self.assertEqual(recovery_info.lsblk_unescape(
'Windows XP'), 'Windows XP')
class MountedPartitionTests(unittest.TestCase):
"""Unittest of MountedPartition."""
@mock.patch('subprocess.check_output')
def test_with_of_MountedPartition(self, mock_subprocess_check_output):
"""Test mount point."""
test_dir = ""
with recovery_info.MountedPartition("/dev/test") as tmp:
test_dir = tmp
self.assertTrue(os.path.exists(test_dir))
mock_subprocess_check_output.assert_has_calls(
[mock.call(['mount', '/dev/test', test_dir],
universal_newlines=True)])
self.assertFalse(os.path.exists(test_dir))
mock_subprocess_check_output.assert_has_calls(
[mock.call(['umount', test_dir],
universal_newlines=True)])
class RecoveryInfoTests(unittest.TestCase):
"""Tests for RecoveryInfo."""
@mock.patch('recovery_info.get_recovery_package')
@mock.patch('recovery_info.get_recovery_partition')
def test_smoke(self, mock_get_recovery_partition,
mock_get_recovery_package):
"""Smoke tests for running recovery_info."""
mock_get_recovery_partition.return_value = ("DELL", "/dev/sda3")
mock_get_recovery_package.return_value = "dell-recovery_1.11"
testargs = ["recovery_info.py"]
with patch.object(sys, 'argv', testargs):
self.assertIsNone(recovery_info.RecoveryInfo().main())
testargs = ["recovery_info.py", "checktype", "HP"]
with patch.object(sys, 'argv', testargs):
with self.assertRaises(SystemExit):
recovery_info.RecoveryInfo().main()
testargs = ["recovery_info.py", "checktype", "DELL"]
with patch.object(sys, 'argv', testargs):
self.assertIsNone(recovery_info.RecoveryInfo().main()) |
4,018 | is mine | # Copyright (c) 2022 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from typing import Optional, TYPE_CHECKING, List
from PyQt6.QtCore import pyqtSignal, pyqtProperty, QObject, pyqtSlot, QUrl
from PyQt6.QtGui import QImage
from cura.CuraApplication import CuraApplication
if TYPE_CHECKING:
from cura.PrinterOutput.PrinterOutputController import PrinterOutputController
from cura.PrinterOutput.Models.PrinterOutputModel import PrinterOutputModel
from cura.PrinterOutput.Models.PrinterConfigurationModel import PrinterConfigurationModel
class PrintJobOutputModel(QObject):
stateChanged = pyqtSignal()
timeTotalChanged = pyqtSignal()
timeElapsedChanged = pyqtSignal()
nameChanged = pyqtSignal()
keyChanged = pyqtSignal()
assignedPrinterChanged = pyqtSignal()
ownerChanged = pyqtSignal()
configurationChanged = pyqtSignal()
previewImageChanged = pyqtSignal()
compatibleMachineFamiliesChanged = pyqtSignal()
def __init__(self, output_controller: "PrinterOutputController", key: str = "", name: str = "", parent = None) -> None:
super().__init__(parent)
self._output_controller = output_controller
self._state = ""
self._time_total = 0
self._time_elapsed = 0
self._name = name # Human readable name
self._key = key # Unique identifier
self._assigned_printer = None # type: Optional[PrinterOutputModel]
self._owner = "" # Who started/owns the print job?
self._configuration = None # type: Optional[PrinterConfigurationModel]
self._compatible_machine_families = [] # type: List[str]
self._preview_image_id = 0
self._preview_image = None # type: Optional[QImage]
@pyqtProperty("QStringList", notify=compatibleMachineFamiliesChanged)
def compatibleMachineFamilies(self) -> List[str]:
# Hack; Some versions of cluster will return a family more than once...
return list(set(self._compatible_machine_families))
def setCompatibleMachineFamilies(self, compatible_machine_families: List[str]) -> None:
if self._compatible_machine_families != compatible_machine_families:
self._compatible_machine_families = compatible_machine_families
self.compatibleMachineFamiliesChanged.emit()
@pyqtProperty(QUrl, notify=previewImageChanged)
def previewImageUrl(self):
self._preview_image_id += 1
# There is an image provider that is called "print_job_preview". In order to ensure that the image qml object, that
# requires a QUrl to function, updates correctly we add an increasing number. This causes to see the QUrl
# as new (instead of relying on cached version and thus forces an update.
temp = "image://print_job_preview/" + str(self._preview_image_id) + "/" + self._key
return QUrl(temp, QUrl.ParsingMode.TolerantMode)
def getPreviewImage(self) -> Optional[QImage]:
return self._preview_image
def updatePreviewImage(self, preview_image: Optional[QImage]) -> None:
if self._preview_image != preview_image:
self._preview_image = preview_image
self.previewImageChanged.emit()
@pyqtProperty(QObject, notify=configurationChanged)
def configuration(self) -> Optional["PrinterConfigurationModel"]:
return self._configuration
def updateConfiguration(self, configuration: Optional["PrinterConfigurationModel"]) -> None:
if self._configuration != configuration:
self._configuration = configuration
self.configurationChanged.emit()
@pyqtProperty(str, notify = ownerChanged)
def owner(self) -> str:
return self._owner
def updateOwner(self, owner: str) -> None:
if self._owner != owner:
self._owner = owner
self.ownerChanged.emit()
@pyqtProperty(bool, notify = ownerChanged)
def METHOD_NAME(self) -> bool:
"""
Returns whether this print job was sent by the currently logged in user.
This checks the owner of the print job with the owner of the currently
logged in account. Both of these are human-readable account names which
may be duplicate. In practice the harm here is limited, but it's the
best we can do with the information available to the API.
"""
return self._owner == CuraApplication.getInstance().getCuraAPI().account.userName
@pyqtProperty(QObject, notify=assignedPrinterChanged)
def assignedPrinter(self):
return self._assigned_printer
def updateAssignedPrinter(self, assigned_printer: Optional["PrinterOutputModel"]) -> None:
if self._assigned_printer != assigned_printer:
old_printer = self._assigned_printer
self._assigned_printer = assigned_printer
if old_printer is not None:
# If the previously assigned printer is set, this job is moved away from it.
old_printer.updateActivePrintJob(None)
self.assignedPrinterChanged.emit()
@pyqtProperty(str, notify=keyChanged)
def key(self):
return self._key
def updateKey(self, key: str):
if self._key != key:
self._key = key
self.keyChanged.emit()
@pyqtProperty(str, notify = nameChanged)
def name(self):
return self._name
def updateName(self, name: str):
if self._name != name:
self._name = name
self.nameChanged.emit()
@pyqtProperty(int, notify = timeTotalChanged)
def timeTotal(self) -> int:
return int(self._time_total)
@pyqtProperty(int, notify = timeElapsedChanged)
def timeElapsed(self) -> int:
return int(self._time_elapsed)
@pyqtProperty(int, notify = timeElapsedChanged)
def timeRemaining(self) -> int:
# Never get a negative time remaining
return int(max(self.timeTotal - self.timeElapsed, 0))
@pyqtProperty(float, notify = timeElapsedChanged)
def progress(self) -> float:
result = float(self.timeElapsed) / max(self.timeTotal, 1.0) # Prevent a division by zero exception.
return min(result, 1.0) # Never get a progress past 1.0
@pyqtProperty(str, notify=stateChanged)
def state(self) -> str:
return self._state
@pyqtProperty(bool, notify=stateChanged)
def isActive(self) -> bool:
inactive_states = [
"pausing",
"paused",
"resuming",
"wait_cleanup"
]
if self.state in inactive_states and self.timeRemaining > 0:
return False
return True
def updateTimeTotal(self, new_time_total: int) -> None:
if self._time_total != new_time_total:
self._time_total = new_time_total
self.timeTotalChanged.emit()
def updateTimeElapsed(self, new_time_elapsed: int) -> None:
if self._time_elapsed != new_time_elapsed:
self._time_elapsed = new_time_elapsed
self.timeElapsedChanged.emit()
def updateState(self, new_state: str) -> None:
if self._state != new_state:
self._state = new_state
self.stateChanged.emit()
@pyqtSlot(str)
def setState(self, state):
self._output_controller.setJobState(self, state) |
4,019 | cleanup | # The following comment should be removed at some point in the future.
# mypy: strict-optional=False
from __future__ import absolute_import
import contextlib
import errno
import hashlib
import logging
import os
from pip._vendor import contextlib2
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from types import TracebackType
from typing import Dict, Iterator, Optional, Set, Type, Union
from pip._internal.req.req_install import InstallRequirement
from pip._internal.models.link import Link
logger = logging.getLogger(__name__)
@contextlib.contextmanager
def update_env_context_manager(**changes):
# type: (str) -> Iterator[None]
target = os.environ
# Save values from the target and change them.
non_existent_marker = object()
saved_values = {} # type: Dict[str, Union[object, str]]
for name, new_value in changes.items():
try:
saved_values[name] = target[name]
except KeyError:
saved_values[name] = non_existent_marker
target[name] = new_value
try:
yield
finally:
# Restore original values in the target.
for name, original_value in saved_values.items():
if original_value is non_existent_marker:
del target[name]
else:
assert isinstance(original_value, str) # for mypy
target[name] = original_value
@contextlib.contextmanager
def get_requirement_tracker():
# type: () -> Iterator[RequirementTracker]
root = os.environ.get('PIP_REQ_TRACKER')
with contextlib2.ExitStack() as ctx:
if root is None:
root = ctx.enter_context(
TempDirectory(kind='req-tracker')
).path
ctx.enter_context(update_env_context_manager(PIP_REQ_TRACKER=root))
logger.debug("Initialized build tracking at %s", root)
with RequirementTracker(root) as tracker:
yield tracker
class RequirementTracker(object):
def __init__(self, root):
# type: (str) -> None
self._root = root
self._entries = set() # type: Set[InstallRequirement]
logger.debug("Created build tracker: %s", self._root)
def __enter__(self):
# type: () -> RequirementTracker
logger.debug("Entered build tracker: %s", self._root)
return self
def __exit__(
self,
exc_type, # type: Optional[Type[BaseException]]
exc_val, # type: Optional[BaseException]
exc_tb # type: Optional[TracebackType]
):
# type: (...) -> None
self.METHOD_NAME()
def _entry_path(self, link):
# type: (Link) -> str
hashed = hashlib.sha224(link.url_without_fragment.encode()).hexdigest()
return os.path.join(self._root, hashed)
def add(self, req):
# type: (InstallRequirement) -> None
"""Add an InstallRequirement to build tracking.
"""
# Get the file to write information about this requirement.
entry_path = self._entry_path(req.link)
# Try reading from the file. If it exists and can be read from, a build
# is already in progress, so a LookupError is raised.
try:
with open(entry_path) as fp:
contents = fp.read()
except IOError as e:
# if the error is anything other than "file does not exist", raise.
if e.errno != errno.ENOENT:
raise
else:
message = '{} is already being built: {}'.format(
req.link, contents)
raise LookupError(message)
# If we're here, req should really not be building already.
assert req not in self._entries
# Start tracking this requirement.
with open(entry_path, 'w') as fp:
fp.write(str(req))
self._entries.add(req)
logger.debug('Added %s to build tracker %r', req, self._root)
def remove(self, req):
# type: (InstallRequirement) -> None
"""Remove an InstallRequirement from build tracking.
"""
# Delete the created file and the corresponding entries.
os.unlink(self._entry_path(req.link))
self._entries.remove(req)
logger.debug('Removed %s from build tracker %r', req, self._root)
def METHOD_NAME(self):
# type: () -> None
for req in set(self._entries):
self.remove(req)
logger.debug("Removed build tracker: %r", self._root)
@contextlib.contextmanager
def track(self, req):
# type: (InstallRequirement) -> Iterator[None]
self.add(req)
yield
self.remove(req) |
4,020 | get reshape dim | # SPDX-License-Identifier: Apache-2.0
"""symbolic executor
Computes a part of the graph symbolically using SymbolicTensorElements
"""
import numpy as np
from tf2onnx import utils
# pylint: disable=logging-not-lazy,unused-argument,missing-docstring
class SymbolicTensorElement:
"""
Helps with symbolic execution of the graph, in particular tensors representing shapes. Supports multiplication
and tensor ops.
"""
def __init__(self, terms, constant):
# Terms is a list representing variables
self.terms = terms
self.constant = constant
if self.constant == 0:
self.terms = []
def __mul__(self, other):
if isinstance(other, SymbolicTensorElement):
# Concat terms, multiply constant
return SymbolicTensorElement(self.terms + other.terms, self.constant * other.constant)
# Other term is a constant
return SymbolicTensorElement(self.terms, self.constant * other)
def __rmul__(self, other):
return self.__mul__(other)
def is_const(self):
return len(self.terms) == 0
def is_one(self):
return len(self.terms) == 0 and self.constant == 1
def is_single_var(self):
return len(self.terms) == 1 and self.constant == 1
def has_multiple_terms(self):
return not self.is_const() and not self.is_single_var()
def METHOD_NAME(self, i, offset):
if self.is_const():
return self.constant
if self.get_offset(i) == offset:
return 0
return -1
@staticmethod
def from_const(constant):
return SymbolicTensorElement([], constant)
@staticmethod
def from_variable(variable):
return SymbolicTensorElement([variable], 1)
@staticmethod
def from_value(value):
if isinstance(value, SymbolicTensorElement):
return value
return SymbolicTensorElement.from_const(value)
@staticmethod
def np_array(np_array):
return np.vectorize(SymbolicTensorElement.from_value)(np_array)
class SymbolicExecutionException(Exception):
pass
class SymbolicExecutor:
def __init__(self, graph):
self.graph = graph
self.op_map = {
"Unsqueeze": self.compute_squeeze_unsqueeze,
"Squeeze": self.compute_squeeze_unsqueeze,
"Gather": self.compute_gather,
"Mul": self.compute_mul,
"ReduceProd": self.compute_reduceprod,
"Slice": self.compute_slice,
"Cast": self.compute_cast,
"Concat": self.compute_concat,
"Const": self.compute_const
}
def compute_outputs(self, outputs, feed_dict):
"""Given a map of inputs to np arrays, outputs a list of np arrays of SymbolicTensorElements"""
nodes_to_compute = self.plan_computation(outputs, feed_dict)
if nodes_to_compute is None:
return None
results = feed_dict.copy()
for node in nodes_to_compute:
try:
results.update(self.compute_node(node, results))
except Exception as e:
raise SymbolicExecutionException(str(e))
# Intermediate results might be non-symbolic numpy arrays
return [SymbolicTensorElement.np_array(results[out]) for out in outputs]
def plan_computation(self, outputs, feed_dict):
nodes = list(set(self.graph.get_node_by_output(out) for out in outputs))
sorted_nodes = []
while nodes:
n = nodes.pop()
if n.type not in self.op_map:
raise SymbolicExecutionException("Unsupported op %s" % n.type)
sorted_nodes.append(n)
for inp, inp_name in zip(n.inputs, n.input):
if inp_name != '' and inp_name not in feed_dict:
nodes.append(inp)
return sorted_nodes[::-1]
def compute_node(self, node, feed_dict):
results = self.op_map[node.type](node, feed_dict)
return {out: np.array(res) for out, res in zip(node.output, results)}
def compute_const(self, node, feed_dict):
return [node.get_tensor_value(as_list=False)]
def compute_squeeze_unsqueeze(self, node, feed_dict):
inp1 = feed_dict[node.input[0]]
if self.graph.opset < 13:
axes = node.get_attr_value("axes")
else:
axes = feed_dict[node.input[1]].tolist()
shape = inp1.shape
handler = self.compute_unsqueeze_shape if node.type == "Unsqueeze" else self.compute_squeeze_shape
new_shape = handler(shape, axes)
return [inp1.reshape(new_shape)]
def compute_cast(self, node, feed_dict):
inp = feed_dict[node.input[0]]
if inp.dtype == object:
return [inp]
np_dtype = utils.ONNX_TO_NUMPY_DTYPE[node.get_attr("to").i]
return [inp.astype(np_dtype)]
def compute_mul(self, node, feed_dict):
return [feed_dict[node.input[0]] * feed_dict[node.input[1]]]
def compute_reduceprod(self, node, feed_dict):
inp = feed_dict[node.input[0]]
axes = node.get_attr_value("axes")
keepdims = node.get_attr_value("keepdims", 1)
return [np.prod(inp, axis=tuple(axes), keepdims=keepdims)]
def compute_slice(self, node, feed_dict):
inps = [feed_dict[inp] if inp != '' else None for inp in node.input]
if self.graph.opset >= 10:
while len(inps) < 5:
inps.append(None)
data, starts, ends, axes, steps = inps
else:
data = inps[0]
starts = node.get_attr_value("starts")
ends = node.get_attr_value("ends")
axes = node.get_attr_value("axes")
steps = None
rank = len(data.shape)
ndims = len(starts)
if axes is None:
axes = list(range(ndims))
if steps is None:
steps = [1] * ndims
slices = [slice(None, None, None) for _ in range(rank)]
for axis, start, end, step in zip(axes, starts, ends, steps):
slices[axis] = slice(start, end, step)
return [data[tuple(slices)]]
def compute_concat(self, node, feed_dict):
axis = node.get_attr_value("axis")
inps = [feed_dict[inp] for inp in node.input]
return [np.concatenate(inps, axis=axis)]
def compute_gather(self, node, feed_dict):
data = feed_dict[node.input[0]]
indices = feed_dict[node.input[1]]
if indices.dtype == object:
raise SymbolicExecutionException("Gather requires non-symbolic indices")
axis = node.get_attr_value("axis", 0)
return [np.take(data, indices, axis=axis)]
def compute_unsqueeze_shape(self, shape_in, axes):
dims_out = len(shape_in) + len(axes)
axes = [i if i >= 0 else i + dims_out for i in axes]
shape_in = iter(shape_in)
shape_out = [None] * dims_out
for ind in axes:
shape_out[ind] = 1
for ind, val in enumerate(shape_out):
if val is None:
shape_out[ind] = next(shape_in)
return shape_out
def compute_squeeze_shape(self, shape_in, axes):
axes = [i if i >= 0 else i + len(axes) for i in axes]
shape_out = []
for ind, val in enumerate(shape_in):
if ind not in axes:
shape_out.append(val)
return shape_out |
4,021 | difference | # Access WeakSet through the weakref module.
# This code is separated-out because it is needed
# by abc.py to load everything else at startup.
from _weakref import ref
__all__ = ['WeakSet']
class _IterationGuard(object):
# This context manager registers itself in the current iterators of the
# weak container, such as to delay all removals until the context manager
# exits.
# This technique should be relatively thread-safe (since sets are).
def __init__(self, weakcontainer):
# Don't create cycles
self.weakcontainer = ref(weakcontainer)
def __enter__(self):
w = self.weakcontainer()
if w is not None:
w._iterating.add(self)
return self
def __exit__(self, e, t, b):
w = self.weakcontainer()
if w is not None:
s = w._iterating
s.remove(self)
if not s:
w._commit_removals()
class WeakSet(object):
def __init__(self, data=None):
self.data = set()
def _remove(item, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(item)
else:
self.data.discard(item)
self._remove = _remove
# A list of keys to be removed
self._pending_removals = []
self._iterating = set()
if data is not None:
self.update(data)
def _commit_removals(self):
l = self._pending_removals
discard = self.data.discard
while l:
discard(l.pop())
def __iter__(self):
with _IterationGuard(self):
for itemref in self.data:
item = itemref()
if item is not None:
# Caveat: the iterator will keep a strong reference to
# `item` until it is resumed or closed.
yield item
def __len__(self):
return len(self.data) - len(self._pending_removals)
def __contains__(self, item):
try:
wr = ref(item)
except TypeError:
return False
return wr in self.data
def __reduce__(self):
return (self.__class__, (list(self),),
getattr(self, '__dict__', None))
__hash__ = None
def add(self, item):
if self._pending_removals:
self._commit_removals()
self.data.add(ref(item, self._remove))
def clear(self):
if self._pending_removals:
self._commit_removals()
self.data.clear()
def copy(self):
return self.__class__(self)
def pop(self):
if self._pending_removals:
self._commit_removals()
while True:
try:
itemref = self.data.pop()
except KeyError:
raise KeyError('pop from empty WeakSet')
item = itemref()
if item is not None:
return item
def remove(self, item):
if self._pending_removals:
self._commit_removals()
self.data.remove(ref(item))
def discard(self, item):
if self._pending_removals:
self._commit_removals()
self.data.discard(ref(item))
def update(self, other):
if self._pending_removals:
self._commit_removals()
for element in other:
self.add(element)
def __ior__(self, other):
self.update(other)
return self
def METHOD_NAME(self, other):
newset = self.copy()
newset.difference_update(other)
return newset
__sub__ = METHOD_NAME
def difference_update(self, other):
self.__isub__(other)
def __isub__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.difference_update(ref(item) for item in other)
return self
def intersection(self, other):
return self.__class__(item for item in other if item in self)
__and__ = intersection
def intersection_update(self, other):
self.__iand__(other)
def __iand__(self, other):
if self._pending_removals:
self._commit_removals()
self.data.intersection_update(ref(item) for item in other)
return self
def issubset(self, other):
return self.data.issubset(ref(item) for item in other)
__le__ = issubset
def __lt__(self, other):
return self.data < set(ref(item) for item in other)
def issuperset(self, other):
return self.data.issuperset(ref(item) for item in other)
__ge__ = issuperset
def __gt__(self, other):
return self.data > set(ref(item) for item in other)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.data == set(ref(item) for item in other)
def __ne__(self, other):
opposite = self.__eq__(other)
if opposite is NotImplemented:
return NotImplemented
return not opposite
def symmetric_difference(self, other):
newset = self.copy()
newset.symmetric_difference_update(other)
return newset
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
self.__ixor__(other)
def __ixor__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.symmetric_difference_update(ref(item, self._remove) for item in other)
return self
def union(self, other):
return self.__class__(e for s in (self, other) for e in s)
__or__ = union
def isdisjoint(self, other):
return len(self.intersection(other)) == 0 |
4,022 | get latencies | """
Utilities to measure metrics of a model.
"""
import torch
import time
import dataclasses
from torchbenchmark.util.model import BenchmarkModel
from torchbenchmark.util.experiment.instantiator import TorchBenchModelConfig
from torchbenchmark import ModelTask
from typing import List, Union, Tuple, Optional
WARMUP_ROUNDS = 10
BENCHMARK_ITERS = 15
MEMPROF_ITER = 2
NANOSECONDS_PER_MILLISECONDS = 1_000_000.0
@dataclasses.dataclass
class TorchBenchModelMetrics:
latencies: List[float]
throughputs: List[float]
cpu_peak_mem: Optional[float]
gpu_peak_mem: Optional[float]
pt2_compilation_time: Optional[float]
pt2_graph_breaks: Optional[float]
model_flops: Optional[float]
def METHOD_NAME(func, device: str, nwarmup=WARMUP_ROUNDS, num_iter=BENCHMARK_ITERS) -> List[float]:
"Run one step of the model, and return the latency in milliseconds."
# Warm-up `nwarmup` rounds
for _i in range(nwarmup):
func()
result_summary = []
for _i in range(num_iter):
if device == "cuda":
torch.cuda.synchronize()
# Collect time_ns() instead of time() which does not provide better precision than 1
# second according to https://docs.python.org/3/library/time.html#time.time.
t0 = time.time_ns()
func()
torch.cuda.synchronize() # Wait for the events to be recorded!
t1 = time.time_ns()
else:
t0 = time.time_ns()
func()
t1 = time.time_ns()
result_summary.append((t1 - t0) / NANOSECONDS_PER_MILLISECONDS)
return result_summary
def get_peak_memory(func, device: str, num_iter=MEMPROF_ITER, export_metrics_file='', metrics_needed=[], metrics_gpu_backend='dcgm', cpu_monitored_pid=None) -> Tuple[Optional[float], Optional[str], Optional[float]]:
"Run one step of the model, and return the peak memory in MB."
from components.model_analyzer.TorchBenchAnalyzer import ModelAnalyzer
new_metrics_needed = [_ for _ in metrics_needed if _ in ['cpu_peak_mem', 'gpu_peak_mem']]
if not new_metrics_needed:
raise ValueError(f"Expected metrics_needed to be non-empty, get: {metrics_needed}")
mem_model_analyzer = ModelAnalyzer(export_metrics_file, new_metrics_needed, metrics_gpu_backend, cpu_monitored_pid)
continue_num_iter = BENCHMARK_ITERS - num_iter
def work_func():
if device == "cuda":
torch.cuda.synchronize()
func()
torch.cuda.synchronize()
else:
func()
t0 = time.time_ns()
work_func()
t1 = time.time_ns()
# if total execution time is less than 15ms, we run the model for BENCHMARK_ITERS times
# to get more accurate peak memory
if (t1 - t0) < 15 * NANOSECONDS_PER_MILLISECONDS:
num_iter = BENCHMARK_ITERS
else:
num_iter = MEMPROF_ITER
mem_model_analyzer.start_monitor()
for _i in range(num_iter):
work_func()
mem_model_analyzer.stop_monitor()
mem_model_analyzer.aggregate()
device_id = None
gpu_peak_mem = None
cpu_peak_mem = None
if 'gpu_peak_mem' in metrics_needed:
device_id, gpu_peak_mem = mem_model_analyzer.calculate_gpu_peak_mem()
if 'cpu_peak_mem' in metrics_needed:
cpu_peak_mem = mem_model_analyzer.calculate_cpu_peak_mem()
if export_metrics_file:
mem_model_analyzer.update_export_name("_peak_memory")
mem_model_analyzer.export_all_records_to_csv()
return cpu_peak_mem, device_id, gpu_peak_mem
def get_model_flops(model: Union[BenchmarkModel, ModelTask]) -> float:
"Run one step of the model, and return the model total flops."
from torch.utils.flop_counter import FlopCounterMode
flop_counter = FlopCounterMode()
def work_func():
if model.device == "cuda":
torch.cuda.synchronize()
model.invoke()
torch.cuda.synchronize()
else:
model.invoke()
with flop_counter:
work_func()
total_flops = sum([v for _, v in flop_counter.flop_counts["Global"].items()])
return total_flops
def get_model_test_metrics(model: Union[BenchmarkModel, ModelTask], metrics=[], export_metrics_file=False, metrics_gpu_backend='nvml', nwarmup=WARMUP_ROUNDS, num_iter=BENCHMARK_ITERS) -> TorchBenchModelMetrics:
import os
latencies = None
throughputs = None
cpu_peak_mem = None
gpu_peak_mem = None
pt2_compilation_time = None
pt2_graph_breaks = None
model_flops = None
if not (isinstance(model, BenchmarkModel) or isinstance(model, ModelTask)):
raise ValueError(f"Expected BenchmarkModel or ModelTask, get type: {type(model)}")
model_pid = os.getpid() if isinstance(model, BenchmarkModel) else model.worker.proc_pid()
device = model.device if isinstance(model, BenchmarkModel) else model.get_model_attribute("device")
if 'latencies' in metrics or 'throughputs' in metrics:
latencies = METHOD_NAME(model.invoke, device, nwarmup=nwarmup, num_iter=num_iter)
if 'cpu_peak_mem' in metrics or 'gpu_peak_mem' in metrics:
cpu_peak_mem, _device_id, gpu_peak_mem = get_peak_memory(model.invoke, device, export_metrics_file=export_metrics_file, metrics_needed=metrics, metrics_gpu_backend=metrics_gpu_backend, cpu_monitored_pid=model_pid)
if 'throughputs' in metrics:
throughputs = [model.batch_size * 1000 / latency for latency in latencies]
if 'pt2_compilation_time' in metrics:
pt2_compilation_time = model.get_model_attribute('pt2_compilation_time') \
if isinstance(model, ModelTask) else model.pt2_compilation_time
if 'pt2_graph_breaks' in metrics:
pt2_graph_breaks = model.get_model_attribute('pt2_graph_breaks') \
if isinstance(model, ModelTask) else model.pt2_graph_breaks
if 'model_flops' in metrics:
model_flops = get_model_flops(model)
return TorchBenchModelMetrics(latencies, throughputs, cpu_peak_mem, gpu_peak_mem, pt2_compilation_time, pt2_graph_breaks, model_flops)
def get_model_accuracy(model_config: TorchBenchModelConfig, isolated: bool=True) -> str:
import copy
from torchbenchmark.util.experiment.instantiator import load_model_isolated, load_model
# Try load minimal batch size, if fail, load the default batch size
accuracy_model_config = copy.deepcopy(model_config)
if not "--accuracy" in accuracy_model_config.extra_args:
accuracy_model_config.extra_args = ["--accuracy"] + accuracy_model_config.extra_args
if isolated:
model = load_model_isolated(accuracy_model_config)
accuracy = model.get_model_attribute("accuracy")
del model
return accuracy
else:
model = load_model(model_config)
return model.accuracy |
4,023 | set start method | #
# SPDX-FileCopyrightText:
# Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""This is a helper module for distributed training.
The code uses an official implementation of
distributed data parallel launcher as just a reference.
https://github.com/pytorch/pytorch/blob/v1.8.2/torch/distributed/launch.py
One main difference is this code focuses on
launching simple function with given arguments.
"""
import multiprocessing
import os
import signal
import socket
import time
if hasattr(signal, "valid_signals"):
_signalno_name_map = {
s.value: s.name for s in signal.valid_signals() if isinstance(s, signal.Signals)
}
else:
# TODO(lazykyama): It should be deprecated
# once Python 3.7 is removed from supported platform.
_signalno_name_map = dict(
[
(1, "SIGHUP"),
(2, "SIGINT"),
(3, "SIGQUIT"),
(4, "SIGILL"),
(5, "SIGTRAP"),
(6, "SIGABRT"),
(7, "SIGBUS"),
(8, "SIGFPE"),
(9, "SIGKILL"),
(10, "SIGUSR1"),
(11, "SIGSEGV"),
(12, "SIGUSR2"),
(13, "SIGPIPE"),
(14, "SIGALRM"),
(15, "SIGTERM"),
(17, "SIGCHLD"),
(18, "SIGCONT"),
(19, "SIGSTOP"),
(20, "SIGTSTP"),
(21, "SIGTTIN"),
(22, "SIGTTOU"),
(23, "SIGURG"),
(24, "SIGXCPU"),
(25, "SIGXFSZ"),
(26, "SIGVTALRM"),
(27, "SIGPROF"),
(28, "SIGWINCH"),
(29, "SIGIO"),
(30, "SIGPWR"),
(31, "SIGSYS"),
(34, "SIGRTMIN"),
(64, "SIGRTMAX"),
]
)
class WorkerError(multiprocessing.ProcessError):
"""An error happened within each worker."""
def __init__(self, *, msg, exitcode, worker_id):
"""Initialize error class."""
super(WorkerError, self).__init__(msg)
self._exitcode = exitcode
self._worker_id = worker_id
def __str__(self):
"""Construct and return a special error message."""
return f"worker[{self._worker_id}] failed with exitcode={self._exitcode}"
@property
def exitcode(self):
"""Return exitcode from worker process."""
return self._exitcode
@property
def worker_id(self):
"""Return worker ID related to a process causes this error."""
return self._worker_id
class MainProcessError(multiprocessing.ProcessError):
"""An error happened from main process."""
def __init__(self, *, signal_no):
"""Initialize error class."""
msg = (
f"{_signalno_name_map[signal_no]} received, "
f"exiting due to {signal.strsignal(signal_no)}."
)
super(MainProcessError, self).__init__(msg)
self._signal_no = signal_no
self._msg = msg
def __str__(self):
"""Return a custom error message."""
return self._msg
@property
def signal_no(self):
"""Return signal number which stops main process."""
return self._signal_no
def METHOD_NAME(method):
"""Set multiprocess start method."""
assert method in ("fork", "spawn", "forkserver")
return multiprocessing.METHOD_NAME(method)
def free_port():
"""Find free port using bind().
There are some interval between finding this port and using it
and the other process might catch the port by that time.
Thus it is not guaranteed that the port is really empty.
"""
# This method is copied from ESPnet v2's utility below.
# https://github.com/espnet/espnet/blob/43ce0c69fb32961235534b348700dc6c74ad5792/espnet2/train/distributed_utils.py#L187-L198
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.bind(("", 0))
return sock.getsockname()[1]
def _kill_processes(processes):
# TODO(lazykyama): This implementation can't stop all processes
# which have grandchildren processes launched
# within each child process directly forked from this script.
# Need improvement for more safe termination.
for p in processes:
try:
# NOTE: multiprocessing.Process.kill() was introduced in 3.7.
# https://docs.python.org/3.7/library/multiprocessing.html#multiprocessing.Process.kill
if not hasattr(p, "kill"):
p.terminate()
else:
p.kill()
except Exception: # noqa: E722
# NOTE: Ignore any exception happens during killing a process
# because this intends to send kill signal to *all* processes.
pass
def launch(func, args, nprocs, master_addr="localhost", master_port=None):
"""Launch processes with a given function and given arguments.
.. note:: Current implementaiton supports only single node case.
"""
if master_port is None:
master_port = free_port()
# Set PyTorch distributed related environmental variables
# NOTE: in contrast to subprocess.Popen,
# explicit environment variables can not be specified.
# It's necessary to add additional variables to
# current environment variable list.
original_env = os.environ.copy()
# TODO(lazykyama): multi-node support
os.environ["WORLD_SIZE"] = str(nprocs)
os.environ["MASTER_ADDR"] = master_addr
os.environ["MASTER_PORT"] = str(master_port)
processes = []
for local_rank in range(nprocs):
# Each process's rank
# TODO(lazykyama): multi-node support
os.environ["RANK"] = str(local_rank)
os.environ["LOCAL_RANK"] = str(local_rank)
process = multiprocessing.Process(target=func, args=(args,))
process.start()
processes.append(process)
# Set signal handler to capture signals sent to main process,
# and ensure that all children processes will be terminated.
def _handler(signal_no, _):
_kill_processes(processes)
raise MainProcessError(signal_no=signal_no)
signal.signal(signal.SIGINT, _handler)
signal.signal(signal.SIGTERM, _handler)
# Recovery environment variables.
os.environ.clear()
os.environ.update(original_env)
# Monitor all workers.
worker_error = None
finished_process_ids = set()
while len(processes) > len(finished_process_ids):
for localrank, p in enumerate(processes):
if p.pid in finished_process_ids:
# Skip rest of checks becuase
# this process has been already finished.
continue
if p.is_alive():
# This process is still running.
continue
elif p.exitcode == 0:
# This process properly finished.
finished_process_ids.add(p.pid)
else:
# An error happens in one process.
# Will try to terminate all other processes.
worker_error = WorkerError(
msg=(f"{func.__name__} failed with error code: {p.exitcode}"),
exitcode=p.exitcode,
worker_id=localrank,
)
break
if worker_error is not None:
# Go out of this while loop to terminate all processes.
break
time.sleep(1.0)
if worker_error is not None:
# Trying to stop all workers.
_kill_processes(processes)
raise worker_error |
4,024 | get instances | import json
import os
from typing import Dict, List
from helm.common.general import ensure_directory_exists, ensure_file_downloaded
from .scenario import Scenario, Instance, ALL_SPLITS, CORRECT_TAG, Reference, PassageQuestionInput, Output
class PubMedQAScenario(Scenario):
"""
From "PubMedQA: A Dataset for Biomedical Research Question Answering" (Jin et al.),
PubMedQA is a biomedical QA dataset collected from PubMed abstracts, where the answer to the questions are
one of yes/no/maybe. We use the " PQA-L(abeled)" subset, which has 1,000 labeled question-answer pairs
annotated by human experts.
We generated the splits using the official script:
https://github.com/pubmedqa/pubmedqa/blob/master/preprocess/split_dataset.py.
The train and dev splits are from the "pqal_fold0" fold. A copy of the preprocessed dataset is stored at
https://worksheets.codalab.org/bundles/0x531c9c54d8314d289da812af608b86fb.
The following is an example from the dataset
```
"QUESTION": "Is anorectal endosonography valuable in dyschesia?",
"CONTEXTS": [
"Dyschesia can be provoked by inappropriate defecation movements. The aim of this prospective study was to
demonstrate dysfunction of the anal sphincter and/or the musculus (m.) puborectalis in patients with dyschesia
using anorectal endosonography.",
"Twenty consecutive patients with a medical history of dyschesia and a control group of 20 healthy subjects
underwent linear anorectal endosonography (Toshiba models IUV 5060 and PVL-625 RT). In both groups, the
dimensions of the anal sphincter and the m. puborectalis were measured at rest, and during voluntary squeezing
and straining. Statistical analysis was performed within and between the two groups.",
"The anal sphincter became paradoxically shorter and/or thicker during straining (versus the resting state) in
85% of patients but in only 35% of control subjects. Changes in sphincter length were statistically
significantly different (p<0.01, chi(2) test) in patients compared with control subjects. The m. puborectalis
became paradoxically shorter and/or thicker during straining in 80% of patients but in only 30% of controls.
Both the changes in length and thickness of the m. puborectalis were significantly different (p<0.01, chi(2)
test) in patients versus control subjects."
],
"LABELS": [
"AIMS",
"METHODS",
"RESULTS"
],
"MESHES": [
"Adolescent",
"Adult",
"Aged",
"Aged, 80 and over",
"Anal Canal",
"Case-Control Studies",
"Chi-Square Distribution",
"Constipation",
"Defecation",
"Endosonography",
"Female",
"Humans",
"Male",
"Middle Aged",
"Pelvic Floor",
"Rectum"
],
"YEAR": "2002",
"reasoning_required_pred": "yes",
"reasoning_free_pred": "yes",
"final_decision": "yes"
```
Citation
```
@inproceedings{jin2019pubmedqa,
title={PubMedQA: A Dataset for Biomedical Research Question Answering},
author={Jin, Qiao and Dhingra, Bhuwan and Liu, Zhengping and Cohen, William and Lu, Xinghua},
booktitle={Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the
9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)},
pages={2567--2577},
year={2019}
}
```
To reproduce the zero-shot performance of OpenAI's text-davinci-002 model on PubMedQA, we follow what was
done in "Can large language models reason about medical questions?" (Liévin et al.) when constructing
the `Instance`s.
The following is the template of how they constructed the prompts
```
Context: <Label>. <context>
<Label>. <context>
<Label>. <context>
Question: <Question>
A) yes
B) no
C) maybe
```
among A through C, the answer is
Citation
```
@misc{https://doi.org/10.48550/arxiv.2207.08143,
doi = {10.48550/ARXIV.2207.08143},
url = {https://arxiv.org/abs/2207.08143},
author = {Liévin, Valentin and Hother, Christoffer Egeberg and Winther, Ole},
keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), Machine Learning (cs.LG),
FOS: Computer and information sciences, FOS: Computer and information sciences, I.2.1; I.2.7},
title = {Can large language models reason about medical questions?},
publisher = {arXiv},
year = {2022},
copyright = {arXiv.org perpetual, non-exclusive license}
}
```
"""
name = "pubmed_qa"
description = "A biomedical question answering (QA) dataset collected from PubMed abstracts."
tags = ["question_answering", "biomedical"]
POSSIBLE_ANSWER_CHOICES: List[str] = ["yes", "no", "maybe"]
def METHOD_NAME(self) -> List[Instance]:
data_path: str = os.path.join(self.output_path, "data")
ensure_directory_exists(data_path)
instances: List[Instance] = []
for split in ALL_SPLITS:
split_file_name: str = f"{split}_set.json"
split_path: str = os.path.join(data_path, split_file_name)
ensure_file_downloaded(
source_url="https://worksheets.codalab.org/rest/bundles/0x531c9c54d8314d289da812af608b86fb/"
f"contents/blob/{split_file_name}",
target_path=split_path,
unpack=False,
)
with open(split_path, "r") as f:
split_examples: Dict = json.load(f)
for example in split_examples.values():
context_labels: List[str] = example["LABELS"]
contexts: List[str] = example["CONTEXTS"]
assert len(contexts) == len(context_labels)
# Format: <Label>. <context>
# <Label>. <context>
# Example: Methods. Sixteen swine were used...
# Results. Application of QC led to...
background: str = "\n".join(
[f"{label.title()}. {context}" for label, context in zip(context_labels, contexts)]
)
# Build `Reference`s. The possible answer choices are one of: "yes", "no" or "maybe"
correct_answer: str = example["final_decision"]
assert correct_answer in PubMedQAScenario.POSSIBLE_ANSWER_CHOICES
references: List[Reference] = [
Reference(Output(text=answer), tags=[CORRECT_TAG] if answer == correct_answer else [])
for answer in PubMedQAScenario.POSSIBLE_ANSWER_CHOICES
]
# Following Liévin et al., prepend the question with the provided context.
# Examples can be found here: https://vlievin.github.io/medical-reasoning/samples/pubmedqa.html.
question: str = example["QUESTION"]
prompt = PassageQuestionInput(
passage=background, question=question + "\n", passage_prefix="Context: ", separator="\n\n"
)
instance: Instance = Instance(input=prompt, references=references, split=split)
instances.append(instance)
return instances |
4,025 | remove pipeline | """
Pipeline manager module that provides functionality to add, modify and delete pipelines.
"""
import os
import sys
from argparse import ArgumentParser
if __name__ == '__main__':
# django needs to be loaded when this script is run standalone from the command line
sys.path.append(os.path.join(os.path.dirname(__file__), '../../'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
import django
django.setup()
from django.contrib.auth.models import User
from pipelines.models import Pipeline
from pipelines.serializers import PipelineSerializer
class PipelineManager(object):
def __init__(self):
parser = ArgumentParser(description='Manage pipelines')
subparsers = parser.add_subparsers(dest='subparser_name', title='subcommands',
description='valid subcommands',
help='sub-command help')
# create the parser for the "add" command
parser_add = subparsers.add_parser('add', help='Add a new pipeline')
parser_add.add_argument('name', help="Pipeline's name")
parser_add.add_argument('owner', help="Pipeline's owner username")
parser_add.add_argument('plugintree',
help="A json string with the plugin tree for the pipeline")
parser_add.add_argument('--authors', help="Pipeline's authors string")
parser_add.add_argument('--category', help="Pipeline's category")
parser_add.add_argument('--description', help="Pipeline's description")
parser_add.add_argument('--unlock', action='store_true',
help="Unlock pipeline to make it immutable and visible "
"to other users")
# create the parser for the "modify" command
parser_modify = subparsers.add_parser('modify', help='Modify existing pipeline')
parser_modify.add_argument('id', type=int, help="Plugin's id")
parser_modify.add_argument('--name', help="Pipeline's name")
parser_modify.add_argument('--authors', help="Pipeline's authors string")
parser_modify.add_argument('--category', help="Pipeline's category")
parser_modify.add_argument('--description', help="Pipeline's description")
parser_modify.add_argument('--unlock', action='store_true',
help="Unlock pipeline to make it immutable and visible "
"to other users")
# create the parser for the "remove" command
parser_remove = subparsers.add_parser('remove', help='Remove an existing pipeline')
parser_remove.add_argument('id', type=int, help="Plugin's id")
self.parser = parser
def add_pipeline(self, args):
"""
Add a new pipeline to the system.
"""
data = {'name': args.name, 'plugin_tree': args.plugintree}
if args.authors:
data['authors'] = args.authors
if args.category:
data['category'] = args.category
if args.description:
data['description'] = args.description
if args.unlock:
data['locked'] = False
pipeline_serializer = PipelineSerializer(data=data)
pipeline_serializer.is_valid(raise_exception=True)
owner = User.objects.get(username=args.owner)
pipeline_serializer.save(owner=owner)
def modify_pipeline(self, args):
"""
Modify an existing pipeline.
"""
pipeline = self.get_pipeline(args.id)
data = {}
if args.name:
data['name'] = args.name
if args.authors:
data['authors'] = args.authors
if args.category:
data['category'] = args.category
if args.description:
data['description'] = args.description
if args.unlock:
data['locked'] = False
pipeline_serializer = PipelineSerializer(pipeline, data=data)
pipeline_serializer.is_valid(raise_exception=True)
pipeline_serializer.save()
def METHOD_NAME(self, args):
"""
Remove an existing pipeline from the system.
"""
pipeline = self.get_pipeline(args.id)
pipeline.delete()
def run(self, args=None):
"""
Parse the arguments passed to the manager and perform the appropriate action.
"""
options = self.parser.parse_args(args)
if options.subparser_name == 'add':
self.add_pipeline(options)
elif options.subparser_name == 'modify':
self.modify_pipeline(options)
elif options.subparser_name == 'remove':
self.METHOD_NAME(options)
@staticmethod
def get_pipeline(id):
"""
Get an existing pipeline.
"""
try:
pipeline = Pipeline.objects.get(pk=id)
except Pipeline.DoesNotExist:
raise NameError("Couldn't find pipeline with id '%s' in the system" % id)
return pipeline
# ENTRYPOINT
if __name__ == "__main__":
manager = PipelineManager()
manager.run() |
4,026 | enable | '''
Wifi Facade.
=============
The :class:`Wifi` is to provide access to the wifi of your mobile/ desktop
devices.
It currently supports `connecting`, `disconnecting`, `scanning`, `getting
available wifi network list` and `getting network information`.
Simple examples
---------------
To enable/ turn on wifi scanning::
>>> from plyer import wifi
>>> wifi.start_scanning()
Once the wifi is enabled/ turned on, then this command starts to scan
all the nearby available wifi networks.
To get network info::
>>> from plyer import wifi
>>> wifi.start_scanning()
>>> return wifi.get_network_info(name)
Returns network details of the network who's name/ssid is provided in the
`name` parameter.
To connect to a network::
>>> from plyer import wifi
>>> wifi.start_scanning()
>>> wifi.connect(network, parameters)
This connects to the network who's name/ssid is provided under `network`
parameter and along with other necessary methods for connection
which depends upon platform to platform.
please visit following files for more details about requirements of
`paramaters` argument in `connect` method:
plyer/platforms/win/wifi.py
plyer/platforms/macosx/wifi.py
plyer/platforms/win/wifi.py
To disconnect from wifi::
>>> from plyer import wifi
>>> wifi.disconnect()
This disconnects your device from any wifi network.
To get available wifi networks::
>>> from plyer import wifi
>>> wifi.start_scanning()
>>> return wifi.get_available_wifi()
This returns all the available wifi networks near the device.
Supported Platforms
-------------------
Windows, OS X, Linux
Ex: 6
----------
from plyer import wifi
wifi.enable()
This enables wifi device.
Ex: 7
----------
from plyer import wifi
wifi.disable()
This disable wifi device
'''
class Wifi:
'''
Wifi Facade.
'''
def is_enabled(self):
'''
Return enabled status of WiFi hardware.
'''
return self._is_enabled()
def is_connected(self, interface=None):
'''
Return connection state of WiFi interface.
.. versionadded:: 1.4.0
'''
return self._is_connected(interface=interface)
@property
def interfaces(self):
'''
List all available WiFi interfaces.
.. versionadded:: 1.4.0
'''
raise NotImplementedError()
def start_scanning(self, interface=None):
'''
Turn on scanning.
'''
return self._start_scanning(interface=interface)
def get_network_info(self, name):
'''
Return a dictionary of secified network.
'''
return self._get_network_info(name=name)
def get_available_wifi(self):
'''
Returns a list of all the available wifi.
'''
return self._get_available_wifi()
def connect(self, network, parameters, interface=None):
'''
Method to connect to some network.
'''
self._connect(
network=network,
parameters=parameters,
interface=interface
)
def disconnect(self, interface=None):
'''
To disconnect from some network.
'''
self._disconnect(interface=interface)
def METHOD_NAME(self):
'''
Wifi interface power state is set to "ON".
'''
self._enable()
def disable(self):
'''
Wifi interface power state is set to "OFF".
'''
self._disable()
# private
def _is_enabled(self):
raise NotImplementedError()
def _is_connected(self, interface=None):
raise NotImplementedError()
def _start_scanning(self, interface=None):
raise NotImplementedError()
def _get_network_info(self, **kwargs):
raise NotImplementedError()
def _get_available_wifi(self):
raise NotImplementedError()
def _connect(self, **kwargs):
raise NotImplementedError()
def _disconnect(self, interface=None):
raise NotImplementedError()
def _enable(self):
raise NotImplementedError()
def _disable(self):
raise NotImplementedError() |
4,027 | filter on subsidiary | # coding: utf-8
"""
Pydici billing tables
@author: Sébastien Renard (sebastien.renard@digitalfox.org)
@license: AGPL v3 or newer (http://www.gnu.org/licenses/agpl-3.0.html)
"""
from itertools import chain
from django.db.models import Q
from django.utils.safestring import mark_safe
from django.urls import reverse
from django_datatables_view.base_datatable_view import BaseDatatableView
from core.decorator import PydiciFeatureMixin, PydiciNonPublicdMixin
from core.utils import to_int_or_round
from billing.views import BillingRequestMixin
from billing.models import ClientBill, SupplierBill
from people.models import Consultant
from crm.utils import get_subsidiary_from_session
class BillTableDT(PydiciNonPublicdMixin, BillingRequestMixin, BaseDatatableView):
"""Base bill table backend for datatables"""
def METHOD_NAME(self, qs):
subsidiary = get_subsidiary_from_session(self.request)
if subsidiary:
qs = qs.filter(lead__subsidiary=subsidiary)
return qs
def get_filters(self, search):
"""Custom method to get Q filter objects that should be combined with OR keyword"""
filters = []
try:
# Just try to cast to see if we have a number but use str for filter to allow proper casting by django himself
float(search)
filters.extend([Q(amount=search),
Q(amount_with_vat=search)])
except ValueError:
# search term is not a number
filters.extend([Q(bill_id__icontains=search),
Q(state__icontains=search),
Q(client_deal_id__icontains=search),
Q(lead__deal_id__icontains=search),
Q(lead__client_deal_id__icontains=search),
Q(lead__name__icontains=search),
Q(lead__subsidiary__name__icontains=search),
Q(lead__responsible__name__icontains=search),
Q(lead__client__organisation__company__name__icontains=search)])
return filters
def filter_queryset(self, qs):
""" simple search on some attributes"""
search = self.request.GET.get('search[value]', None)
qs = self.METHOD_NAME(qs)
if search:
filters = self.get_filters(search)
query = Q()
for filter in filters:
query |= filter
qs = qs.filter(query).distinct()
return qs
def render_column(self, row, column):
if column in ("amount", "amount_with_vat"):
return to_int_or_round(getattr(row, column), 2)
elif column == "lead":
if row.lead:
return "<a href='{0}'>{1}</a>".format(row.lead.get_absolute_url(), row.lead)
else:
return "-"
elif column in ("creation_date", "due_date", "payment_date"):
attr = getattr(row, column)
if attr:
return attr.strftime("%d/%m/%y")
else:
return "-"
elif column == "state":
return row.get_state_display()
elif column == "file":
return mark_safe("""<a href='%s'><i class="bi bi-file-earmark-text"></i></a>""" % row.bill_file_url())
elif column == "subsidiary":
return str(row.lead.subsidiary)
elif column == "comment":
return row.comment
else:
return super(BillTableDT, self).render_column(row, column)
class ClientBillInCreationTableDT(BillTableDT):
"""Client Bill tables backend for datatables"""
columns = ("bill_id", "subsidiary", "lead", "responsible", "creation_date", "state", "amount", "amount_with_vat", "comment")
order_columns = columns
def get_initial_queryset(self):
qs = ClientBill.objects.filter(state__in=("0_DRAFT", "0_PROPOSED"))
qs = self.METHOD_NAME(qs)
return qs
def get_filters(self, search):
filters = super(ClientBillInCreationTableDT, self).get_filters(search)
filters.extend([
Q(billdetail__mission__responsible__name__icontains=search)
])
return filters
def render_column(self, row, column):
if column == "responsible":
# Get missions and lead responsibles
responsibles = ClientBill.objects.filter(id=row.id).values_list("billdetail__mission__responsible__id", "lead__responsible__id")
responsibles = set(chain(*responsibles)) # flatten it
responsibles = Consultant.objects.filter(id__in=responsibles)
return ", ".join([str(c) for c in responsibles])
elif column == "bill_id": # Use edit link instead of default detail display
return "<a href='%s'>%s</a>" % (reverse("billing:client_bill", args=[row.id]), row.bill_id)
else:
return super(ClientBillInCreationTableDT, self).render_column(row, column)
class ClientBillArchiveTableDT(BillTableDT):
"""Client bill archive"""
columns = ("bill_id", "subsidiary", "deal_id", "lead", "creation_date", "payment_date", "state", "amount", "amount_with_vat", "comment", "file")
order_columns = columns
max_display_length = 500
def get_initial_queryset(self):
qs = ClientBill.objects.exclude(state__in=("0_DRAFT", "0_PROPOSED"))
qs = qs.select_related("lead")
qs = self.METHOD_NAME(qs)
return qs
def render_column(self, row, column):
if column == "deal_id":
return row.lead.deal_id
else:
return super(ClientBillArchiveTableDT, self).render_column(row, column)
class SupplierBillArchiveTableDT(BillTableDT):
"""Supplier bill archive"""
columns = ("bill_id", "supplier", "subsidiary", "lead","creation_date", "payment_date", "state", "amount", "amount_with_vat", "comment", "file")
order_columns = columns
max_display_length = 20
def get_initial_queryset(self):
qs = SupplierBill.objects.all()
qs = self.METHOD_NAME(qs)
return qs
def filter_queryset(self, qs):
""" simple search on some attributes"""
search = self.request.GET.get('search[value]', None)
qs = self.METHOD_NAME(qs)
if search:
qs = qs.filter(Q(bill_id__icontains=search) |
Q(lead__deal_id__icontains=search) |
Q(lead__name__icontains=search) |
Q(lead__responsible__name__icontains=search) |
Q(lead__subsidiary__name__icontains=search) |
Q(lead__client__organisation__company__name__icontains=search) |
Q(supplier__company__name__icontains=search) |
Q(supplier__contact__name__icontains=search)
)
return qs |
4,028 | test process current order | import unittest
from unittest import mock
from betfairlightweight.resources.bettingresources import PriceSize
from flumine.order.order import OrderStatus, OrderTypes
from flumine import config
from flumine.markets.market import Market
from flumine.markets.markets import Markets
from flumine.order.order import (
BaseOrder,
BetfairOrder,
)
from flumine.order import process
from flumine.strategy.strategy import Strategies
from flumine.utils import create_cheap_hash
class BaseOrderTest(unittest.TestCase):
def setUp(self) -> None:
mock_client = mock.Mock(paper_trade=False)
self.mock_trade = mock.Mock(client=mock_client)
self.mock_order_type = mock.Mock()
self.order = BaseOrder(self.mock_trade, "BACK", self.mock_order_type, 1)
config.simulated = True
def tearDown(self) -> None:
config.simulated = False
@mock.patch("flumine.order.process.process_current_order")
def test_process_current_orders_with_default_sep(self, mock_process_current_order):
mock_log_control = mock.Mock()
mock_add_market = mock.Mock()
market_book = mock.Mock()
markets = Markets()
market = Market(
flumine=mock.Mock(), market_id="market_id", market_book=market_book
)
markets.add_market("market_id", market)
strategies = Strategies()
cheap_hash = create_cheap_hash("strategy_name", 13)
trade = mock.Mock(market_id="market_id")
trade.strategy.name_hash = cheap_hash
current_order = mock.Mock(
customer_order_ref=f"{cheap_hash}I123", market_id="market_id", bet_id=None
)
betfair_order = BetfairOrder(trade=trade, side="BACK", order_type=mock.Mock())
betfair_order.id = "123"
betfair_order.complete = True
market.blotter["123"] = betfair_order
event = mock.Mock(event=[mock.Mock(orders=[current_order])])
process.process_current_orders(
markets=markets,
strategies=strategies,
event=event,
log_control=mock_log_control,
add_market=mock_add_market,
)
mock_process_current_order.assert_called_with(
betfair_order, current_order, mock_log_control
)
self.assertEqual(market.blotter._live_orders, [])
def METHOD_NAME(self):
mock_order = mock.Mock(status=OrderStatus.EXECUTABLE)
mock_order.current_order.status = "EXECUTION_COMPLETE"
mock_current_order = mock.Mock()
mock_log_control = mock.Mock()
process.process_current_order(mock_order, mock_current_order, mock_log_control)
mock_order.update_current_order.assert_called_with(mock_current_order)
mock_order.execution_complete.assert_called()
@mock.patch("flumine.order.process.OrderEvent")
def test_process_current_order_async(self, mock_order_event):
mock_order = mock.Mock(status=OrderStatus.EXECUTABLE, async_=True, bet_id=None)
mock_order.current_order.status = "EXECUTION_COMPLETE"
mock_current_order = mock.Mock(bet_id=1234)
mock_log_control = mock.Mock()
process.process_current_order(mock_order, mock_current_order, mock_log_control)
mock_order.update_current_order.assert_called_with(mock_current_order)
mock_order.execution_complete.assert_called()
self.assertEqual(mock_order.bet_id, 1234)
mock_order.responses.placed.assert_called_with()
mock_order_event.assert_called_with(mock_order)
mock_log_control.assert_called_with(mock_order_event())
def test_create_order_from_current(self):
mock_add_market = mock.Mock()
market_book = mock.Mock()
mock_client = mock.Mock()
markets = Markets()
market = Market(
flumine=mock.Mock(), market_id="market_id", market_book=market_book
)
markets.add_market("market_id", market)
cheap_hash = create_cheap_hash("strategy_name", 13)
strategy = mock.Mock(name_hash=cheap_hash)
strategies = Strategies()
strategies(strategy=strategy, clients=mock.Mock())
current_order = mock.Mock(
customer_order_ref=f"{cheap_hash}I123",
market_id="market_id",
bet_id=None,
selection_id="selection_id",
handicap="handicap",
order_type="LIMIT",
price_size=PriceSize(price=10.0, size=2.0),
persistence_type="LAPSE",
)
new_order = process.create_order_from_current(
markets=markets,
strategies=strategies,
current_order=current_order,
add_market=mock_add_market,
client=mock_client,
)
self.assertEqual(market.blotter["123"], new_order)
self.assertEqual(new_order.market_id, "market_id")
self.assertEqual(new_order.selection_id, "selection_id")
self.assertEqual(new_order.handicap, "handicap")
self.assertEqual(new_order.order_type.ORDER_TYPE, OrderTypes.LIMIT)
self.assertEqual(new_order.order_type.size, 2.0)
self.assertEqual(new_order.order_type.price, 10.0)
self.assertEqual(new_order.client, mock_client) |
4,029 | write jsonl | import json
from visidata import vd, date, VisiData, PyobjSheet, AttrDict, stacktrace, TypedExceptionWrapper, options, visidata, ColumnItem, wrapply, TypedWrapper, Progress, Sheet, InferColumnsSheet
vd.option('json_indent', None, 'indent to use when saving json')
vd.option('json_sort_keys', False, 'sort object keys when saving to json')
vd.option('json_ensure_ascii', True, 'ensure ascii encode when saving json')
vd.option('default_colname', '', 'column name to use for non-dict rows')
@VisiData.api
def guess_json(vd, p):
with p.open(encoding=vd.options.encoding) as fp:
line = next(fp)
line = line.strip()
if line.startswith('{') and line.endswith('}'):
return dict(filetype='jsonl')
if line.startswith(tuple('[{')):
return dict(filetype='json')
@VisiData.api
def open_jsonobj(vd, p):
return JsonSheet(p.name, source=p)
@VisiData.api
def open_jsonl(vd, p):
return JsonSheet(p.name, source=p)
VisiData.open_ndjson = VisiData.open_ldjson = VisiData.open_json = VisiData.open_jsonl
class JsonSheet(InferColumnsSheet):
def iterload(self):
with self.source.open(encoding=self.options.encoding) as fp:
for L in fp:
try:
if L.startswith('#'): # skip commented lines
continue
elif not L.strip(): # skip blank lines
continue
ret = json.loads(L, object_hook=AttrDict)
if isinstance(ret, list):
yield from ret
else:
yield ret
except ValueError as e:
if self.rows: # if any rows have been added already
e.stacktrace = stacktrace()
yield TypedExceptionWrapper(json.loads, L, exception=e) # an error on one line
else:
with self.source.open(encoding=self.options.encoding) as fp:
ret = json.load(fp)
if isinstance(ret, list):
yield from ret
else:
yield ret
break
def addRow(self, row, index=None):
# Wrap non-dict rows in a dummy object with a predictable key name.
# This allows for more consistent handling of rows containing scalars
# or lists.
if not isinstance(row, dict):
v = {options.default_colname: row}
row = visidata.AlwaysDict(row, **v)
return super().addRow(row, index=index)
def newRow(self, **fields):
return fields
def openRow(self, row):
return PyobjSheet("%s[%s]" % (self.name, self.keystr(row)), source=row)
## saving json and jsonl
class _vjsonEncoder(json.JSONEncoder):
def default(self, obj):
return str(obj)
def _rowdict(cols, row, keep_nulls=False):
ret = {}
for col in cols:
o = wrapply(col.getTypedValue, row)
if isinstance(o, TypedExceptionWrapper):
o = col.sheet.options.safe_error or str(o.exception)
elif isinstance(o, TypedWrapper):
o = o.val
elif isinstance(o, date):
o = col.getDisplayValue(row)
if keep_nulls or o is not None:
ret[col.name] = o
return ret
@VisiData.api
def encode_json(vd, row, cols, enc=_vjsonEncoder(sort_keys=False)):
'Return JSON string for given *row* and given *cols*.'
return enc.encode(_rowdict(cols, row))
@VisiData.api
def save_json(vd, p, *vsheets):
vs = vsheets[0]
with p.open(mode='w', encoding=vs.options.save_encoding) as fp:
try:
indent = int(vs.options.json_indent)
except Exception:
indent = vs.options.json_indent
jsonenc = _vjsonEncoder(indent=indent, sort_keys=vs.options.json_sort_keys, ensure_ascii=vs.options.json_ensure_ascii)
if len(vsheets) == 1:
fp.write('[\n')
vs = vsheets[0]
with Progress(gerund='saving'):
for i, row in enumerate(vs.iterrows()):
if i > 0:
fp.write(',\n')
rd = _rowdict(vs.visibleCols, row)
fp.write(jsonenc.encode(rd))
fp.write('\n]\n')
else:
it = {vs.name: [_rowdict(vs.visibleCols, row) for row in vs.iterrows()] for vs in vsheets}
with Progress(gerund='saving'):
for chunk in jsonenc.iterencode(it):
fp.write(chunk)
@Sheet.api
def METHOD_NAME(vs, fp):
vcols = vs.visibleCols
jsonenc = _vjsonEncoder()
with Progress(gerund='saving'):
for i, row in enumerate(vs.iterrows()):
rowdict = _rowdict(vcols, row, keep_nulls=(i==0))
fp.write(jsonenc.encode(rowdict) + '\n')
if len(vs) == 0:
vd.warning(
"Output file is empty - cannot save headers without data for jsonl.\n"
"Use `.jsonla` filetype to save as JSONL arrays format "
"rather than JSONL dict format to preserve the headers."
)
@VisiData.api
def save_jsonl(vd, p, *vsheets):
with p.open(mode='w', encoding=vsheets[0].options.save_encoding) as fp:
for vs in vsheets:
vs.METHOD_NAME(fp)
JsonSheet.options.encoding = 'utf-8'
VisiData.save_ndjson = VisiData.save_jsonl
VisiData.save_ldjson = VisiData.save_jsonl
vd.addGlobals({
'JsonSheet': JsonSheet,
'JsonLinesSheet': JsonSheet,
}) |
4,030 | test middle css changes order with first | import pytest
from django.urls import reverse
from ....cache.test import assert_invalidates_cache
from ....test import assert_has_error_message
from ... import THEME_CACHE
from ..css import get_next_css_order
FIRST = 0
MIDDLE = 1
LAST = 2
@pytest.fixture
def css_list(theme):
return [
theme.css.create(name="CSS", url="https://test.cdn/font.css", order=FIRST),
theme.css.create(name="CSS", url="https://test.cdn/font.css", order=MIDDLE),
theme.css.create(name="CSS", url="https://test.cdn/font.css", order=LAST),
]
@pytest.fixture
def move_up(admin_client):
def move_up_client(theme, css):
url = reverse(
"misago:admin:themes:move-css-up", kwargs={"pk": theme.pk, "css_pk": css.pk}
)
return admin_client.post(url)
return move_up_client
@pytest.fixture
def move_down(admin_client):
def move_down_client(theme, css):
url = reverse(
"misago:admin:themes:move-css-down",
kwargs={"pk": theme.pk, "css_pk": css.pk},
)
return admin_client.post(url)
return move_down_client
def test_first_css_cant_be_moved_up(move_up, theme, css_list):
first_css = css_list[FIRST]
move_up(theme, first_css)
first_css.refresh_from_db()
assert first_css.order == FIRST
def test_last_css_cant_be_moved_down(move_down, theme, css_list):
last_css = css_list[LAST]
move_down(theme, last_css)
last_css.refresh_from_db()
assert last_css.order == LAST
def test_first_css_can_be_moved_down(move_down, theme, css_list):
first_css = css_list[FIRST]
move_down(theme, first_css)
first_css.refresh_from_db()
assert first_css.order == MIDDLE
def test_last_css_can_be_moved_up(move_up, theme, css_list):
last_css = css_list[LAST]
move_up(theme, last_css)
last_css.refresh_from_db()
assert last_css.order == MIDDLE
def test_middle_css_can_be_moved_down(move_down, theme, css_list):
middle_css = css_list[MIDDLE]
move_down(theme, middle_css)
middle_css.refresh_from_db()
assert middle_css.order == LAST
def test_middle_css_can_be_moved_up(move_up, theme, css_list):
middle_css = css_list[MIDDLE]
move_up(theme, middle_css)
middle_css.refresh_from_db()
assert middle_css.order == FIRST
def test_first_css_changes_order_with_middle_css_when_moved_down(
move_down, theme, css_list
):
move_down(theme, css_list[FIRST])
middle_css = css_list[MIDDLE]
middle_css.refresh_from_db()
assert middle_css.order == FIRST
def test_last_css_changes_order_with_middle_css_when_moved_up(move_up, theme, css_list):
move_up(theme, css_list[LAST])
middle_css = css_list[MIDDLE]
middle_css.refresh_from_db()
assert middle_css.order == LAST
def test_middle_css_changes_order_with_last_css_when_moved_down(
move_down, theme, css_list
):
move_down(theme, css_list[MIDDLE])
last_css = css_list[LAST]
last_css.refresh_from_db()
assert last_css.order == MIDDLE
def METHOD_NAME(
move_up, theme, css_list
):
move_up(theme, css_list[MIDDLE])
first_css = css_list[FIRST]
first_css.refresh_from_db()
assert first_css.order == MIDDLE
def test_first_css_changes_order_with_last_css_when_moved_down_after_middle_deletion(
move_down, theme, css_list
):
css_list[MIDDLE].delete()
move_down(theme, css_list[FIRST])
last_css = css_list[LAST]
last_css.refresh_from_db()
assert last_css.order == FIRST
def test_last_css_changes_order_with_first_css_when_moved_up_after_middle_deletion(
move_up, theme, css_list
):
css_list[MIDDLE].delete()
move_up(theme, css_list[LAST])
first_css = css_list[FIRST]
first_css.refresh_from_db()
assert first_css.order == LAST
def test_if_css_doesnt_belong_to_theme_move_down_action_sets_error_message(
move_down, other_theme, css_list
):
response = move_down(other_theme, css_list[MIDDLE])
assert_has_error_message(response)
def test_if_css_doesnt_belong_to_theme_move_up_action_sets_error_message(
move_up, other_theme, css_list
):
response = move_up(other_theme, css_list[MIDDLE])
assert_has_error_message(response)
def test_if_ran_for_default_theme_move_down_action_sets_error_message(
move_down, default_theme, css_list
):
response = move_down(default_theme, css_list[MIDDLE])
assert_has_error_message(response)
def test_if_ran_for_default_theme_move_up_action_sets_error_message(
move_up, default_theme, css_list
):
response = move_up(default_theme, css_list[MIDDLE])
assert_has_error_message(response)
def test_if_given_nonexisting_css_id_move_down_action_sets_error_message(
mocker, move_down, theme, css_list
):
response = move_down(theme, mocker.Mock(pk=css_list[LAST].pk + 1))
assert_has_error_message(response)
def test_if_given_nonexisting_css_id_move_up_action_sets_error_message(
mocker, move_up, theme, css_list
):
response = move_up(theme, mocker.Mock(pk=css_list[LAST].pk + 1))
assert_has_error_message(response)
def test_if_given_nonexisting_theme_id_move_down_action_sets_error_message(
mocker, move_down, nonexisting_theme, css_list
):
response = move_down(nonexisting_theme, css_list[FIRST])
assert_has_error_message(response)
def test_if_given_nonexisting_theme_id_move_up_action_sets_error_message(
mocker, move_up, nonexisting_theme, css_list
):
response = move_up(nonexisting_theme, css_list[LAST])
assert_has_error_message(response)
def test_next_new_css_order_is_larger_than_largest_existing_css_order(theme):
theme.css.create(name="CSS", url="https://test.cdn/font.css", order=4)
assert get_next_css_order(theme) == 5
def test_moving_css_up_invalidates_theme_cache(move_up, theme, css_list):
with assert_invalidates_cache(THEME_CACHE):
move_up(theme, css_list[LAST])
def test_moving_css_down_invalidates_theme_cache(move_down, theme, css_list):
with assert_invalidates_cache(THEME_CACHE):
move_down(theme, css_list[FIRST]) |
4,031 | test derivatives | import pytest
from diofant import (Derivative, Float, I, O, PoleError, Rational, Symbol,
dirichlet_eta, exp, exp_polar, expand_func, lerchphi, log,
nan, oo, pi, polar_lift, polylog, sqrt, zeta, zoo)
from diofant.abc import a, s, x, z
from diofant.core.function import ArgumentIndexError
from diofant.functions.special.zeta_functions import _zetas
from diofant.utilities.randtest import random_complex_number as randcplx
from diofant.utilities.randtest import verify_derivative_numerically as td
from diofant.utilities.randtest import verify_numerically as tn
__all__ = ()
b = Symbol('b', negative=True)
def test_zeta_eval():
assert zeta(nan) == nan
assert zeta(x, nan) == nan
assert zeta(oo) == 1
assert zeta(0) == Rational(-1, 2)
assert zeta(0, x) == Rational(1, 2) - x
assert zeta(0, b) == Rational(1, 2) - b
assert zeta(1) == zoo
assert zeta(1, 2) == zoo
assert zeta(1, -7) == zoo
assert zeta(1, x) == zoo
assert zeta(2, 1) == pi**2/6
assert zeta(2) == pi**2/6
assert zeta(4) == pi**4/90
assert zeta(6) == pi**6/945
assert zeta(2, 2) == pi**2/6 - 1
assert zeta(4, 3) == pi**4/90 - Rational(17, 16)
assert zeta(6, 4) == pi**6/945 - Rational(47449, 46656)
assert zeta(2, -2) == pi**2/6 + Rational(5, 4)
assert zeta(4, -3) == pi**4/90 + Rational(1393, 1296)
assert zeta(6, -4) == pi**6/945 + Rational(3037465, 2985984)
assert zeta(-1) == -Rational(1, 12)
assert zeta(-2) == 0
assert zeta(-3) == Rational(1, 120)
assert zeta(-4) == 0
assert zeta(-5) == -Rational(1, 252)
assert zeta(-1, 3) == -Rational(37, 12)
assert zeta(-1, 7) == -Rational(253, 12)
assert zeta(-1, -4) == Rational(119, 12)
assert zeta(-1, -9) == Rational(539, 12)
assert zeta(-4, 3) == -17
assert zeta(-4, -8) == 8772
assert zeta(0, 1) == -Rational(1, 2)
assert zeta(0, -1) == Rational(3, 2)
assert zeta(0, 2) == -Rational(3, 2)
assert zeta(0, -2) == Rational(5, 2)
assert zeta(
3).evalf(20).epsilon_eq(Float('1.2020569031595942854', 20), 1e-19)
assert zeta(Rational(1, 2)) == zeta(Rational(1, 2), evaluate=False)
def test__zetas():
assert _zetas(1/x).series(x, n=0) == O(1, x)
assert _zetas(1/x).series(x, n=2) == (1 + x**log(7) + x**log(6) +
x**log(5) + x**log(4) + x**log(3) +
x**log(2) + O(x**2))
pytest.raises(PoleError, lambda: _zetas(-1/x).series(x, n=2))
def test_zeta_series():
assert zeta(x, a).series(a, 0, 2) == \
zeta(x, 0) - x*a*zeta(x + 1, 0) + O(a**2)
def test_dirichlet_eta_eval():
assert dirichlet_eta(0) == Rational(1, 2)
assert dirichlet_eta(-1) == Rational(1, 4)
assert dirichlet_eta(1) == log(2)
assert dirichlet_eta(2) == pi**2/12
assert dirichlet_eta(4) == pi**4*Rational(7, 720)
def test_rewriting():
assert dirichlet_eta(x).rewrite(zeta) == (1 - 2**(1 - x))*zeta(x)
assert zeta(x).rewrite(dirichlet_eta) == dirichlet_eta(x)/(1 - 2**(1 - x))
assert zeta(z, 2).rewrite(dirichlet_eta) == zeta(z, 2)
assert zeta(z, 2).rewrite('tractable') == zeta(z, 2)
assert tn(dirichlet_eta(x), dirichlet_eta(x).rewrite(zeta), x)
assert tn(zeta(x), zeta(x).rewrite(dirichlet_eta), x)
assert zeta(x, a).rewrite(lerchphi) == lerchphi(1, x, a)
assert polylog(s, z).rewrite(lerchphi) == lerchphi(z, s, 1)*z
assert lerchphi(1, x, a).rewrite(zeta) == zeta(x, a)
assert z*lerchphi(z, s, 1).rewrite(polylog) == polylog(s, z)
assert lerchphi(z, s, a).rewrite(zeta) == lerchphi(z, s, a)
def METHOD_NAME():
assert zeta(x, a).diff(x) == Derivative(zeta(x, a), x)
assert zeta(x, a).diff(a) == -x*zeta(x + 1, a)
assert zeta(z).diff(z) == Derivative(zeta(z), z)
assert lerchphi(
z, s, a).diff(z) == (lerchphi(z, s - 1, a) - a*lerchphi(z, s, a))/z
pytest.raises(ArgumentIndexError, lambda: lerchphi(z, s, a).fdiff(4))
assert lerchphi(z, s, a).diff(a) == -s*lerchphi(z, s + 1, a)
assert polylog(s, z).diff(z) == polylog(s - 1, z)/z
pytest.raises(ArgumentIndexError, lambda: polylog(s, z).fdiff(3))
b = randcplx()
c = randcplx()
assert td(zeta(b, x), x)
assert td(polylog(b, z), z)
assert td(lerchphi(c, b, x), x)
assert td(lerchphi(x, b, c), x)
def myexpand(func, target):
expanded = expand_func(func)
if target is not None:
return expanded == target
if expanded == func: # it didn't expand
return False
# check to see that the expanded and original evaluate to the same value
subs = {}
for a in func.free_symbols:
subs[a] = randcplx()
return abs(func.subs(subs).evalf()
- expanded.replace(exp_polar, exp).subs(subs).evalf()) < 1e-10
def test_polylog_eval():
assert polylog(s, 0) == 0
assert polylog(s, 1) == zeta(s)
assert polylog(s, -1) == -dirichlet_eta(s)
assert polylog(s, exp_polar(I*pi)) == polylog(s, -1)
assert polylog(s, 2*exp_polar(2*I*pi)) == polylog(s, 2*exp_polar(2*I*pi), evaluate=False)
def test_polylog_expansion():
assert myexpand(polylog(1, z), -log(1 - z))
assert myexpand(polylog(0, z), z/(1 - z))
assert myexpand(polylog(-1, z), z**2/(1 - z)**2 + z/(1 - z))
assert myexpand(polylog(-5, z), None)
def test_lerchphi_expansion():
assert myexpand(lerchphi(1, s, a), zeta(s, a))
assert myexpand(lerchphi(z, s, 1), polylog(s, z)/z)
# direct summation
assert myexpand(lerchphi(z, -1, a), a/(1 - z) + z/(1 - z)**2)
assert myexpand(lerchphi(z, -3, a), None)
# polylog reduction
assert myexpand(lerchphi(z, s, Rational(1, 2)),
2**(s - 1)*(polylog(s, sqrt(z))/sqrt(z)
- polylog(s, polar_lift(-1)*sqrt(z))/sqrt(z)))
assert myexpand(lerchphi(z, s, 2), -1/z + polylog(s, z)/z**2)
assert myexpand(lerchphi(z, s, Rational(3, 2)), None)
assert myexpand(lerchphi(z, s, Rational(7, 3)), None)
assert myexpand(lerchphi(z, s, -Rational(1, 3)), None)
assert myexpand(lerchphi(z, s, -Rational(5, 2)), None)
# hurwitz zeta reduction
assert myexpand(lerchphi(-1, s, a),
2**(-s)*zeta(s, a/2) - 2**(-s)*zeta(s, (a + 1)/2))
assert myexpand(lerchphi(I, s, a), None)
assert myexpand(lerchphi(-I, s, a), None)
assert myexpand(lerchphi(exp(2*I*pi/5), s, a), None) |
4,032 | main | #!/usr/bin/python
# Copyright (c) 2010-2022 Belledonne Communications SARL.
#
# This file is part of Liblinphone
# (see https://gitlab.linphone.org/BC/public/liblinphone).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from distutils.spawn import find_executable
import os
import sys
from subprocess import Popen, PIPE
def find_xsdcxx():
xsdcxx = find_executable("xsdcxx")
if xsdcxx is not None:
return xsdcxx
xsdcxx = find_executable("xsd")
return xsdcxx
def generate(name):
xsdcxx = find_xsdcxx()
if xsdcxx is None:
print("Cannot find xsdcxx (or xsd) program in the PATH")
return -1
print("Using " + xsdcxx)
script_dir = os.path.dirname(os.path.realpath(__file__))
source_file = name + ".xsd"
print("Generating code from " + source_file)
prologue_file = "prologue.txt"
epilogue_file = "epilogue.txt"
p = Popen([xsdcxx,
"cxx-tree",
"--generate-wildcard",
"--generate-serialization",
"--generate-ostream",
"--generate-detach",
"--generate-polymorphic",
"--std", "c++11",
"--type-naming", "java",
"--function-naming", "java",
"--hxx-suffix", ".h",
"--ixx-suffix", ".h",
"--cxx-suffix", ".cpp",
"--location-regex", "%http://.+/(.+)%$1%",
"--output-dir", ".",
"--show-sloc",
"--prologue-file", prologue_file,
"--cxx-prologue-file", prologue_file,
"--cxx-epilogue-file", epilogue_file,
"--epilogue-file", epilogue_file,
"--root-element-first",
"--type-regex", "%(?:[^ ]* )?([^,-]+)-([^,-]+)-([^,-]+)-?([^,-]*)%\\u$1\\u$2\\u$3\\u$4%",
"--type-regex", "%(?:[^ ]* )?([^,-]+)-([^,-]+)-?([^,-]*)%\\u$1\\u$2\\u$3%",
"--type-regex", "%(?:[^ ]* )?([^,-]+)-?([^,-]*)%\\u$1\\u$2%",
"--type-regex", "%(?:[^ ]* )?([^,-]+)-([^,-]+)-([^,-]+)-?([^,-]*),([^,]+)%\\u$1\\u$2\\u$3\\u$4\\l\\u$5%",
"--type-regex", "%(?:[^ ]* )?([^,-]+)-([^,-]+)-?([^,-]*),([^,]+)%\\u$1\\u$2\\u$3\\l\\u$4%",
"--type-regex", "%(?:[^ ]* )?([^,-]+)-?([^,-]*),([^,]+)%\\u$1\\u$2\\l\\u$3%",
"--type-regex", "%(?:[^ ]* )?([^,-]+)-([^,-]+)-([^,-]+)-?([^,-]*),([^,]+),([^,]+)%\\u$1\\u$2\\u$3\\u$4\\l\\u$5\\u$6%",
"--type-regex", "%(?:[^ ]* )?([^,-]+)-([^,-]+)-?([^,-]*),([^,]+),([^,]+)%\\u$1\\u$2\\u$3\\l\\u$4\\u$5%",
"--type-regex", "%(?:[^ ]* )?([^,-]+)-?([^,-]*),([^,]+),([^,]+)%\\u$1\\u$2\\l\\u$3\\u$4%",
"--type-regex", "%(?:[^ ]* )?([^,-]+)-([^,-]+)-([^,-]+)-?([^,-]*),([^,]+),([^,]+),([^,]+)%\\u$1\\u$2\\u$3\\u$4\\l\\u$5\\u$6\\u$7%",
"--type-regex", "%(?:[^ ]* )?([^,-]+)-([^,-]+)-?([^,-]*),([^,]+),([^,]+),([^,]+)%\\u$1\\u$2\\u$3\\l\\u$4\\u$5\\u$6%",
"--type-regex", "%(?:[^ ]* )?([^,-]+)-?([^,-]*),([^,]+),([^,]+),([^,]+)%\\u$1\\u$2\\l\\u$3\\u$4\\u$5%",
"--accessor-regex", "%([^,-]+)-([^,-]+)-?([^,-]*)%get\\u$1\\u$2\\u$3%",
"--accessor-regex", "%([^,-]+)-?([^,-]*)%get\\u$1\\u$2%",
"--accessor-regex", "%([^,-]+)-([^,-]+)-?([^,-]*),([^,]+)%get\\u$1\\u$2\\u$3\\l\\u$4%",
"--accessor-regex", "%([^,-]+)-?([^,-]*),([^,]+)%get\\u$1\\u$2\\l\\u$3%",
"--accessor-regex", "%([^,-]+)-([^,-]+)-?([^,-]*),([^,]+),([^,]+)%get\\u$1\\u$2\\u$3\\l\\u$4\\u$5%",
"--accessor-regex", "%([^,-]+)-?([^,-]*),([^,]+),([^,]+)%get\\u$1\\u$2\\l\\u$3\\u$4%",
"--modifier-regex", "%([^,-]+)-([^,-]+)-?([^,-]*)%set\\u$1\\u$2\\u$3%",
"--modifier-regex", "%([^,-]+)-?([^,-]*)%set\\u$1\\u$2%",
"--modifier-regex", "%([^,-]+)-([^,-]+)-?([^,-]*),([^,]+)%set\\u$1\\u$2\\u$3\\l\\u$4%",
"--modifier-regex", "%([^,-]+)-?([^,-]*),([^,]+)%set\\u$1\\u$2\\l\\u$3%",
"--modifier-regex", "%([^,-]+)-([^,-]+)-?([^,-]*),([^,]+),([^,]+)%set\\u$1\\u$2\\u$3\\l\\u$4\\u$5%",
"--modifier-regex", "%([^,-]+)-?([^,-]*),([^,]+),([^,]+)%set\\u$1\\u$2\\l\\u$3\\u$4%",
"--parser-regex", "%([^-]+)-?([^-]*)%parse\\u$1\\u$2%",
"--parser-regex", "%([^-]+)-?([^-]*)-?([^-]*)%parse\\u$1\\u$2\\u$3%",
"--parser-regex", "%([^-]+)-?([^-]*)-?([^-]*)-?([^-]*)%parse\\u$1\\u$2\\u$3\\u$4%",
"--serializer-regex", "%([^-]+)-?([^-]*)%serialize\\u$1\\u$2%",
"--serializer-regex", "%([^-]+)-?([^-]*)-?([^-]*)%serialize\\u$1\\u$2\\u$3%",
"--serializer-regex", "%([^-]+)-?([^-]*)-?([^-]*)-?([^-]*)%serialize\\u$1\\u$2\\u$3\\u$4%",
"--namespace-map", "http://www.w3.org/2001/XMLSchema=LinphonePrivate::Xsd::XmlSchema",
"--namespace-map", "urn:ietf:params:xml:ns:conference-info=LinphonePrivate::Xsd::ConferenceInfo",
"--namespace-map", "linphone:xml:ns:conference-info-linphone-extension=LinphonePrivate::Xsd::ConferenceInfoLinphoneExtension",
"--namespace-map", "urn:ietf:params:xml:ns:imdn=LinphonePrivate::Xsd::Imdn",
"--namespace-map", "urn:ietf:params:xml:ns:im-iscomposing=LinphonePrivate::Xsd::IsComposing",
"--namespace-map", "http://www.linphone.org/xsds/imdn.xsd=LinphonePrivate::Xsd::LinphoneImdn",
"--namespace-map", "linphone:xml:ns:publish-linphone-extension=LinphonePrivate::Xsd::PublishLinphoneExtension",
"--namespace-map", "urn:ietf:params:xml:ns:resource-lists=LinphonePrivate::Xsd::ResourceLists",
"--namespace-map", "urn:ietf:params:xml:ns:rlmi=LinphonePrivate::Xsd::Rlmi",
source_file
], shell=False)
p.communicate()
os.system("sed -e \'1,32d\' "+os.path.splitext(source_file)[0]+".cpp"+">" + os.path.splitext(source_file)[0]+".cpp.tmp")
os.system("cat linphone-copyright.txt >"+os.path.splitext(source_file)[0]+".cpp")
os.system("cat "+os.path.splitext(source_file)[0]+".cpp.tmp >>"+os.path.splitext(source_file)[0]+".cpp")
os.system("rm "+os.path.splitext(source_file)[0]+".cpp.tmp ")
os.system("sed -e \'1,32d\' "+os.path.splitext(source_file)[0]+".h"+">" + os.path.splitext(source_file)[0]+".h.tmp")
os.system("cat linphone-copyright.txt >"+os.path.splitext(source_file)[0]+".h")
os.system("cat "+os.path.splitext(source_file)[0]+".h.tmp >>"+os.path.splitext(source_file)[0]+".h")
os.system("rm "+os.path.splitext(source_file)[0]+".h.tmp ")
return 0
def METHOD_NAME(argv = None):
generate("xml")
generate("conference-info")
generate("conference-info-linphone-extension")
generate("imdn")
generate("is-composing")
generate("linphone-imdn")
generate("publish-linphone-extension")
generate("resource-lists")
generate("rlmi")
if __name__ == "__main__":
sys.exit(METHOD_NAME()) |
4,033 | format row | #!/usr/bin/env python3
import argparse
from collections import defaultdict
import difflib
import pickle
from openpilot.selfdrive.car.docs import get_all_car_info
from openpilot.selfdrive.car.docs_definitions import Column
FOOTNOTE_TAG = "<sup>{}</sup>"
STAR_ICON = '<a href="##"><img valign="top" src="https://raw.githubusercontent.com/commaai/openpilot/master/docs/assets/icon-star-{}.svg" width="22" /></a>'
VIDEO_ICON = '<a href="{}" target="_blank">\
<img height="18px" src="https://raw.githubusercontent.com/commaai/openpilot/master/docs/assets/icon-youtube.svg"></img></a>'
COLUMNS = "|" + "|".join([column.value for column in Column]) + "|"
COLUMN_HEADER = "|---|---|---|{}|".format("|".join([":---:"] * (len(Column) - 3)))
ARROW_SYMBOL = "➡️"
def load_base_car_info(path):
with open(path, "rb") as f:
return pickle.load(f)
def match_cars(base_cars, new_cars):
changes = []
additions = []
for new in new_cars:
# Addition if no close matches or close match already used
# Change if close match and not already used
matches = difflib.get_close_matches(new.name, [b.name for b in base_cars], cutoff=0.)
if not len(matches) or matches[0] in [c[1].name for c in changes]:
additions.append(new)
else:
changes.append((new, next(car for car in base_cars if car.name == matches[0])))
# Removal if base car not in changes
removals = [b for b in base_cars if b.name not in [c[1].name for c in changes]]
return changes, additions, removals
def build_column_diff(base_car, new_car):
row_builder = []
for column in Column:
base_column = base_car.get_column(column, STAR_ICON, VIDEO_ICON, FOOTNOTE_TAG)
new_column = new_car.get_column(column, STAR_ICON, VIDEO_ICON, FOOTNOTE_TAG)
if base_column != new_column:
row_builder.append(f"{base_column} {ARROW_SYMBOL} {new_column}")
else:
row_builder.append(new_column)
return METHOD_NAME(row_builder)
def METHOD_NAME(builder):
return "|" + "|".join(builder) + "|"
def print_car_info_diff(path):
base_car_info = defaultdict(list)
new_car_info = defaultdict(list)
for car in load_base_car_info(path):
base_car_info[car.car_fingerprint].append(car)
for car in get_all_car_info():
new_car_info[car.car_fingerprint].append(car)
# Add new platforms to base cars so we can detect additions and removals in one pass
base_car_info.update({car: [] for car in new_car_info if car not in base_car_info})
changes = defaultdict(list)
for base_car_model, base_cars in base_car_info.items():
# Match car info changes, and get additions and removals
new_cars = new_car_info[base_car_model]
car_changes, car_additions, car_removals = match_cars(base_cars, new_cars)
# Removals
for car_info in car_removals:
changes["removals"].append(METHOD_NAME([car_info.get_column(column, STAR_ICON, VIDEO_ICON, FOOTNOTE_TAG) for column in Column]))
# Additions
for car_info in car_additions:
changes["additions"].append(METHOD_NAME([car_info.get_column(column, STAR_ICON, VIDEO_ICON, FOOTNOTE_TAG) for column in Column]))
for new_car, base_car in car_changes:
# Column changes
row_diff = build_column_diff(base_car, new_car)
if ARROW_SYMBOL in row_diff:
changes["column"].append(row_diff)
# Detail sentence changes
if base_car.detail_sentence != new_car.detail_sentence:
changes["detail"].append(f"- Sentence for {base_car.name} changed!\n" +
" ```diff\n" +
f" - {base_car.detail_sentence}\n" +
f" + {new_car.detail_sentence}\n" +
" ```")
# Print diff
if any(len(c) for c in changes.values()):
markdown_builder = ["### ⚠️ This PR makes changes to [CARS.md](../blob/master/docs/CARS.md) ⚠️"]
for title, category in (("## 🔀 Column Changes", "column"), ("## ❌ Removed", "removals"),
("## ➕ Added", "additions"), ("## 📖 Detail Sentence Changes", "detail")):
if len(changes[category]):
markdown_builder.append(title)
if category not in ("detail",):
markdown_builder.append(COLUMNS)
markdown_builder.append(COLUMN_HEADER)
markdown_builder.extend(changes[category])
print("\n".join(markdown_builder))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--path", required=True)
args = parser.parse_args()
print_car_info_diff(args.path) |
4,034 | get instance version name | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import base64
import functools
import json
import time
from django.conf import settings
from django.utils import timezone
from rest_framework.response import Response
from backend.components import paas_cc
from backend.templatesets.legacy_apps.configuration.constants import K8sResourceName
from backend.templatesets.legacy_apps.instance import constants as instance_constants
from backend.templatesets.legacy_apps.instance.models import InstanceConfig
from backend.utils.basic import getitems
from backend.utils.errcodes import ErrorCode
from backend.utils.error_codes import error_codes
from . import constants
STAG_ENV = 2
PROD_ENV = 1
class APIResponse(Response):
def __init__(self, data, *args, **kwargs):
data.setdefault('code', 0)
data.setdefault('message', '')
return super(APIResponse, self).__init__(data, *args, **kwargs)
def image_handler(image):
"""处理镜像,只展示用户填写的一部分"""
for env in constants.SPLIT_IMAGE:
info_split = image.split("/")
if env in info_split:
image = "/" + "/".join(info_split[info_split.index(env) :])
break
return image
def get_k8s_desired_ready_instance_count(info, resource_name):
"""获取应用期望/正常的实例数量"""
filter_keys = constants.RESOURCE_REPLICAS_KEYS[resource_name]
# 针对不同的模板获取不同key对应的值
ready_replicas = getitems(info, filter_keys['ready_replicas_keys'], default=0)
desired_replicas = getitems(info, filter_keys['desired_replicas_keys'], default=0)
return desired_replicas, ready_replicas
def cluster_env(env, ret_num_flag=True):
"""集群环境匹配"""
all_env = settings.CLUSTER_ENV_FOR_FRONT
front_env = all_env.get(env)
if ret_num_flag:
if front_env == "stag":
return STAG_ENV
else:
return PROD_ENV
else:
return front_env
def get_project_namespaces(access_token, project_id):
ns_resp = paas_cc.get_namespace_list(access_token, project_id, desire_all_data=True)
if ns_resp.get('code') != ErrorCode.NoError:
raise error_codes.APIError(ns_resp.get('message'))
data = ns_resp.get('data') or {}
return data.get('results') or []
def get_namespace_name_map(access_token, project_id):
project_ns_info = get_project_namespaces(access_token, project_id)
return {ns['name']: ns for ns in project_ns_info}
def base64_encode_params(info):
"""base64编码"""
json_extra = bytes(json.dumps(info), 'utf-8')
return base64.b64encode(json_extra)
def get_k8s_resource_status(resource_kind, resource, replicas, available):
"""获取资源(deployment/sts/job/ds)运行状态"""
status = constants.ResourceStatus.Unready.value
# 期望的数量和可用的数量都为0时,认为也是正常的
if (available == replicas and available > 0) or (available == replicas == 0):
status = constants.ResourceStatus.Running.value
# 针对job添加complete状态的判断
if resource_kind == constants.REVERSE_CATEGORY_MAP[K8sResourceName.K8sJob.value]:
# 获取completed的replica的数量
completed_replicas = getitems(resource, ['data', 'spec', 'completions'], default=0)
if completed_replicas == replicas and available > 0:
status = constants.ResourceStatus.Completed.value
return status
def delete_instance_records(online_instances, local_instances):
diff_insts = set(local_instances) - set(online_instances.keys())
instance_id_list = [local_instances[key].get('id') for key in diff_insts]
InstanceConfig.objects.filter(id__in=instance_id_list).exclude(oper_type=constants.REBUILD_INSTANCE).update(
is_deleted=True, deleted_time=timezone.now()
)
def METHOD_NAME(annotations, labels):
name_key = instance_constants.ANNOTATIONS_VERSION
return annotations.get(name_key) or labels.get(name_key)
def get_instance_version_id(annotations, labels):
id_key = instance_constants.ANNOTATIONS_VERSION_ID
return annotations.get(id_key) or labels.get(id_key)
def get_instance_version(annotations, labels):
name = METHOD_NAME(annotations, labels)
id = get_instance_version_id(annotations, labels)
return {'version': name, 'version_id': id}
def retry_requests(func, params=None, data=None, max_retries=2):
"""查询应用信息
因为现在通过接口以storage为数据源,因此,为防止接口失败或者接口为空的情况,增加请求次数
"""
for i in range(1, max_retries + 1):
try:
resp = func(params) if params else func(**data)
if i == max_retries:
return resp
# 如果为data为空时,code肯定不为0
if not resp.get("data"):
time.sleep(0.5)
continue
return resp
except Exception:
# 设置等待时间
time.sleep(0.5)
raise error_codes.APIError("query storage api error")
def exclude_records(
cluster_id_from_params: str,
cluster_id_from_instance: str,
cluster_type_from_params: str,
cluster_type_from_instance: str,
) -> bool:
"""判断是否排除记录
:param cluster_id_from_params: 请求参数中的集群 ID,用以过滤集群下的资源
:param cluster_id_from_instance: 实例中携带的集群 ID
:param cluster_type_from_params: 请求参数中的集群环境,包含正式环境和测试环境
:param cluster_type_from_instance: 实例中的集群环境类型
:returns: 返回True/False, 其中 True标识可以排除记录
"""
if not cluster_id_from_instance:
return True
if cluster_id_from_params:
if cluster_id_from_instance != cluster_id_from_params:
return True
elif str(cluster_type_from_params) != str(cluster_type_from_instance):
return True
return False |
4,035 | get payment | from datetime import datetime, timedelta
from tapiriik.database import db
from tapiriik.settings import PAYMENT_AMOUNT, PAYMENT_SYNC_DAYS
from bson.objectid import ObjectId
class Payments:
def LogPayment(id, amount, initialAssociatedAccount, email):
# pro-rate their expiry date
expires_in_days = min(PAYMENT_SYNC_DAYS, float(amount) / float(PAYMENT_AMOUNT) * float(PAYMENT_SYNC_DAYS))
# would use upsert, except that would reset the timestamp value
existingRecord = db.payments.find_one({"Txn": id})
if existingRecord is None:
existingRecord = {
"Txn": id,
"Timestamp": datetime.utcnow(),
"Expiry": datetime.utcnow() + timedelta(days=expires_in_days),
"Amount": amount,
"InitialAssociatedAccount": initialAssociatedAccount,
"Email": email
}
db.payments.insert(existingRecord)
return existingRecord
def ReversePayment(id):
# Mark the transaction, and pull it from any users who have it.
db.payments.update({"Txn": id}, {"$set": {"Reversed": True}})
db.users.update({"Payments.Txn": id}, {"$pull": {"Payments": {"Txn": id}}}, multi=True)
def METHOD_NAME(id=None, email=None):
if id:
return db.payments.find_one({"Txn": id, "Reversed": {"$ne": True}})
elif email:
res = db.payments.find({"Email": email, "Expiry":{"$gt": datetime.utcnow()}, "Reversed": {"$ne": True}}, limit=1)
for payment in res:
return payment
def GenerateClaimCode(user, payment):
db.payments_claim.remove({"Txn": payment["Txn"]}) # Remove any old codes, just to reduce the number kicking around at any one time.
return str(db.payments_claim.insert({"Txn": payment["Txn"], "User": user["_id"], "Timestamp": datetime.utcnow()})) # Return is the new _id, aka the claim code.
def HasOutstandingClaimCode(user):
return db.payments_claim.find_one({"User": user["_id"]}) is not None
def ConsumeClaimCode(code):
claim = db.payments_claim.find_one({"_id": ObjectId(code)})
if not claim:
return (None, None)
db.payments_claim.remove(claim)
return (db.users.find_one({"_id": claim["User"]}), db.payments.find_one({"Txn": claim["Txn"]}))
def EnsureExternalPayment(provider, externalID, duration=None):
existingRecord = db.external_payments.find_one({
"Provider": provider,
"ExternalID": externalID,
"$or": [
{"Expiry": {"$exists": False}},
{"Expiry": None},
{"Expiry": {"$gte": datetime.utcnow()}}
]
})
if existingRecord is None:
existingRecord = {
"Provider": provider,
"ExternalID": externalID,
"Timestamp": datetime.utcnow(),
"Expiry": datetime.utcnow() + duration if duration else None
}
db.external_payments.insert(existingRecord)
return existingRecord
def ExpireExternalPayment(provider, externalID):
now = datetime.utcnow()
db.external_payments.update(
{
"Provider": provider,
"ExternalID": externalID,
"$or": [
{"Expiry": {"$exists": False}},
{"Expiry": None},
]
}, {
"$set": {"Expiry": now}
})
# Wrangle the user copies - man, should have used an RDBMS
expired_payment = db.external_payments.find_one({"Provider": provider, "ExternalID": externalID, "Expiry": now})
# Could be already expired, no need to rerun the update
if expired_payment:
affected_user_ids = [x["_id"] for x in db.users.find({"ExternalPayments._id": expired_payment["_id"]}, {"_id": True})]
db.users.update({"_id": {"$in": affected_user_ids}}, {"$pull": {"ExternalPayments": {"_id": expired_payment["_id"]}}}, multi=True)
db.users.update({"_id": {"$in": affected_user_ids}}, {"$addToSet": {"ExternalPayments": expired_payment}}, multi=True)
def GetAndActivatePromo(code):
promo = db.promo_codes.find_one({"Code": code})
if not promo:
return None
if "FirstClaimedTimestamp" not in promo:
promo["FirstClaimedTimestamp"] = datetime.utcnow()
# In seconds!
if "Duration" in promo:
promo["Expiry"] = promo["FirstClaimedTimestamp"] + timedelta(seconds=promo["Duration"])
else:
promo["Expiry"] = None
# Write back, as we may have just activated it
db.promo_codes.save(promo)
return promo |
4,036 | delete | # This file is part of wger Workout Manager <https://github.com/wger-project>.
# Copyright (C) 2013 - 2021 wger Team
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Django
from django.contrib.auth.models import User
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.db import models
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
# wger
from wger.manager.managers import (
WorkoutAndTemplateManager,
WorkoutManager,
WorkoutTemplateManager,
)
from wger.utils.cache import (
cache_mapper,
reset_workout_canonical_form,
)
class Workout(models.Model):
"""
Model for a training schedule
"""
objects = WorkoutManager()
templates = WorkoutTemplateManager()
both = WorkoutAndTemplateManager()
class Meta:
"""
Meta class to set some other properties
"""
ordering = [
"-creation_date",
]
creation_date = models.DateField(_('Creation date'), auto_now_add=True)
name = models.CharField(
verbose_name=_('Name'),
max_length=100,
blank=True,
help_text=_("The name of the workout"),
)
description = models.TextField(
verbose_name=_('Description'),
max_length=1000,
blank=True,
help_text=_(
"A short description or goal of the workout. For "
"example 'Focus on back' or 'Week 1 of program "
"xy'."
),
)
is_template = models.BooleanField(
verbose_name=_('Workout template'),
help_text=_(
'Marking a workout as a template will freeze it and allow you to '
'make copies of it'
),
default=False,
null=False,
)
is_public = models.BooleanField(
verbose_name=_('Public template'),
help_text=_('A public template is available to other users'),
default=False,
null=False,
)
user = models.ForeignKey(
User,
verbose_name=_('User'),
on_delete=models.CASCADE,
)
def get_absolute_url(self):
"""
Returns the canonical URL to view a workout
"""
return reverse(
'manager:template:view' if self.is_template else 'manager:workout:view',
kwargs={'pk': self.id}
)
def __str__(self):
"""
Return a more human-readable representation
"""
if self.name:
return self.name
else:
return "{0} ({1})".format(_('Workout'), self.creation_date)
def clean(self):
if self.is_public and not self.is_template:
raise ValidationError(
_('You must mark this workout as a template before declaring it public')
)
def save(self, *args, **kwargs):
"""
Reset all cached infos
"""
reset_workout_canonical_form(self.id)
super(Workout, self).save(*args, **kwargs)
def METHOD_NAME(self, *args, **kwargs):
"""
Reset all cached infos
"""
reset_workout_canonical_form(self.id)
super(Workout, self).METHOD_NAME(*args, **kwargs)
def get_owner_object(self):
"""
Returns the object that has owner information
"""
return self
@property
def canonical_representation(self):
"""
Returns a canonical representation of the workout
This form makes it easier to cache and use everywhere where all or part
of a workout structure is needed. As an additional benefit, the template
caches are not needed anymore.
"""
workout_canonical_form = cache.get(cache_mapper.get_workout_canonical(self.pk))
if not workout_canonical_form:
day_canonical_repr = []
muscles_front = []
muscles_back = []
muscles_front_secondary = []
muscles_back_secondary = []
# Sort list by weekday
day_list = [i for i in self.day_set.select_related()]
day_list.sort(key=lambda day: day.get_first_day_id)
for day in day_list:
canonical_repr_day = day.get_canonical_representation()
# Collect all muscles
for i in canonical_repr_day['muscles']['front']:
if i not in muscles_front:
muscles_front.append(i)
for i in canonical_repr_day['muscles']['back']:
if i not in muscles_back:
muscles_back.append(i)
for i in canonical_repr_day['muscles']['frontsecondary']:
if i not in muscles_front_secondary:
muscles_front_secondary.append(i)
for i in canonical_repr_day['muscles']['backsecondary']:
if i not in muscles_back_secondary:
muscles_back_secondary.append(i)
day_canonical_repr.append(canonical_repr_day)
workout_canonical_form = {
'obj': self,
'muscles': {
'front': muscles_front,
'back': muscles_back,
'frontsecondary': muscles_front_secondary,
'backsecondary': muscles_back_secondary
},
'day_list': day_canonical_repr
}
# Save to cache
cache.set(cache_mapper.get_workout_canonical(self.pk), workout_canonical_form)
return workout_canonical_form |
4,037 | delay unlock | """Generic thread tests.
Meant to be used by dummy_thread and thread. To allow for different modules
to be used, test_main() can be called with the module to use as the thread
implementation as its sole argument.
"""
import _dummy_thread as _thread
import time
import queue
import random
import unittest
from test import support
DELAY = 0 # Set > 0 when testing a module other than _dummy_thread, such as
# the '_thread' module.
class LockTests(unittest.TestCase):
"""Test lock objects."""
def setUp(self):
# Create a lock
self.lock = _thread.allocate_lock()
def test_initlock(self):
#Make sure locks start locked
self.assertTrue(not self.lock.locked(),
"Lock object is not initialized unlocked.")
def test_release(self):
# Test self.lock.release()
self.lock.acquire()
self.lock.release()
self.assertTrue(not self.lock.locked(),
"Lock object did not release properly.")
def test_improper_release(self):
#Make sure release of an unlocked thread raises RuntimeError
self.assertRaises(RuntimeError, self.lock.release)
def test_cond_acquire_success(self):
#Make sure the conditional acquiring of the lock works.
self.assertTrue(self.lock.acquire(0),
"Conditional acquiring of the lock failed.")
def test_cond_acquire_fail(self):
#Test acquiring locked lock returns False
self.lock.acquire(0)
self.assertTrue(not self.lock.acquire(0),
"Conditional acquiring of a locked lock incorrectly "
"succeeded.")
def test_uncond_acquire_success(self):
#Make sure unconditional acquiring of a lock works.
self.lock.acquire()
self.assertTrue(self.lock.locked(),
"Uncondional locking failed.")
def test_uncond_acquire_return_val(self):
#Make sure that an unconditional locking returns True.
self.assertTrue(self.lock.acquire(1) is True,
"Unconditional locking did not return True.")
self.assertTrue(self.lock.acquire() is True)
def test_uncond_acquire_blocking(self):
#Make sure that unconditional acquiring of a locked lock blocks.
def METHOD_NAME(to_unlock, delay):
"""Hold on to lock for a set amount of time before unlocking."""
time.sleep(delay)
to_unlock.release()
self.lock.acquire()
start_time = int(time.time())
_thread.start_new_thread(METHOD_NAME,(self.lock, DELAY))
if support.verbose:
print()
print("*** Waiting for thread to release the lock "\
"(approx. %s sec.) ***" % DELAY)
self.lock.acquire()
end_time = int(time.time())
if support.verbose:
print("done")
self.assertTrue((end_time - start_time) >= DELAY,
"Blocking by unconditional acquiring failed.")
class MiscTests(unittest.TestCase):
"""Miscellaneous tests."""
def test_exit(self):
#Make sure _thread.exit() raises SystemExit
self.assertRaises(SystemExit, _thread.exit)
def test_ident(self):
#Test sanity of _thread.get_ident()
self.assertIsInstance(_thread.get_ident(), int,
"_thread.get_ident() returned a non-integer")
self.assertTrue(_thread.get_ident() != 0,
"_thread.get_ident() returned 0")
def test_LockType(self):
#Make sure _thread.LockType is the same type as _thread.allocate_locke()
self.assertIsInstance(_thread.allocate_lock(), _thread.LockType,
"_thread.LockType is not an instance of what "
"is returned by _thread.allocate_lock()")
def test_interrupt_main(self):
#Calling start_new_thread with a function that executes interrupt_main
# should raise KeyboardInterrupt upon completion.
def call_interrupt():
_thread.interrupt_main()
self.assertRaises(KeyboardInterrupt, _thread.start_new_thread,
call_interrupt, tuple())
def test_interrupt_in_main(self):
# Make sure that if interrupt_main is called in main threat that
# KeyboardInterrupt is raised instantly.
self.assertRaises(KeyboardInterrupt, _thread.interrupt_main)
class ThreadTests(unittest.TestCase):
"""Test thread creation."""
def test_arg_passing(self):
#Make sure that parameter passing works.
def arg_tester(queue, arg1=False, arg2=False):
"""Use to test _thread.start_new_thread() passes args properly."""
queue.put((arg1, arg2))
testing_queue = queue.Queue(1)
_thread.start_new_thread(arg_tester, (testing_queue, True, True))
result = testing_queue.get()
self.assertTrue(result[0] and result[1],
"Argument passing for thread creation using tuple failed")
_thread.start_new_thread(arg_tester, tuple(), {'queue':testing_queue,
'arg1':True, 'arg2':True})
result = testing_queue.get()
self.assertTrue(result[0] and result[1],
"Argument passing for thread creation using kwargs failed")
_thread.start_new_thread(arg_tester, (testing_queue, True), {'arg2':True})
result = testing_queue.get()
self.assertTrue(result[0] and result[1],
"Argument passing for thread creation using both tuple"
" and kwargs failed")
def test_multi_creation(self):
#Make sure multiple threads can be created.
def queue_mark(queue, delay):
"""Wait for ``delay`` seconds and then put something into ``queue``"""
time.sleep(delay)
queue.put(_thread.get_ident())
thread_count = 5
testing_queue = queue.Queue(thread_count)
if support.verbose:
print()
print("*** Testing multiple thread creation "\
"(will take approx. %s to %s sec.) ***" % (DELAY, thread_count))
for count in range(thread_count):
if DELAY:
local_delay = round(random.random(), 1)
else:
local_delay = 0
_thread.start_new_thread(queue_mark,
(testing_queue, local_delay))
time.sleep(DELAY)
if support.verbose:
print('done')
self.assertTrue(testing_queue.qsize() == thread_count,
"Not all %s threads executed properly after %s sec." %
(thread_count, DELAY))
def test_main(imported_module=None):
global _thread, DELAY
if imported_module:
_thread = imported_module
DELAY = 2
if support.verbose:
print()
print("*** Using %s as _thread module ***" % _thread)
support.run_unittest(LockTests, MiscTests, ThreadTests)
if __name__ == '__main__':
test_main() |
4,038 | archive | import click
from kfp import client
from kfp.cli import output
from kfp.cli.utils import parsing
@click.group()
def experiment():
"""Manage experiment resources."""
@experiment.command()
@click.option(
'-d',
'--description',
help=parsing.get_param_descr(client.Client.create_experiment,
'description'))
@click.argument('name')
@click.pass_context
def create(ctx: click.Context, description: str, name: str):
"""Create an experiment."""
client_obj: client.Client = ctx.obj['client']
output_format = ctx.obj['output']
experiment = client_obj.create_experiment(name, description=description)
output.print_output(
experiment,
output.ModelType.EXPERIMENT,
output_format,
)
@experiment.command()
@click.option(
'--page-token',
default='',
help=parsing.get_param_descr(client.Client.list_experiments, 'page_token'))
@click.option(
'-m',
'--max-size',
default=100,
help=parsing.get_param_descr(client.Client.list_experiments, 'page_size'))
@click.option(
'--sort-by',
default='created_at desc',
help=parsing.get_param_descr(client.Client.list_experiments, 'sort_by'))
@click.option(
'--filter',
help=parsing.get_param_descr(client.Client.list_experiments, 'filter'))
@click.pass_context
def list(ctx: click.Context, page_token: str, max_size: int, sort_by: str,
filter: str):
"""List experiments."""
client_obj: client.Client = ctx.obj['client']
output_format = ctx.obj['output']
response = client_obj.list_experiments(
page_token=page_token,
page_size=max_size,
sort_by=sort_by,
filter=filter)
output.print_output(
response.experiments or [],
output.ModelType.EXPERIMENT,
output_format,
)
@experiment.command()
@click.argument('experiment-id')
@click.pass_context
def get(ctx: click.Context, experiment_id: str):
"""Get information about an experiment."""
client_obj: client.Client = ctx.obj['client']
output_format = ctx.obj['output']
experiment = client_obj.get_experiment(experiment_id)
output.print_output(
experiment,
output.ModelType.EXPERIMENT,
output_format,
)
@experiment.command()
@click.argument('experiment-id')
@click.pass_context
def delete(ctx: click.Context, experiment_id: str):
"""Delete an experiment."""
confirmation = 'Caution. The RunDetails page could have an issue' \
' when it renders a run that has no experiment.' \
' Do you want to continue?'
if not click.confirm(confirmation):
return
client_obj: client.Client = ctx.obj['client']
output_format = ctx.obj['output']
client_obj.delete_experiment(experiment_id)
output.print_deleted_text('experiment', experiment_id, output_format)
either_option_required = 'Either --experiment-id or --experiment-name is required.'
@experiment.command()
@click.option(
'--experiment-id',
default=None,
help=parsing.get_param_descr(client.Client.archive_experiment,
'experiment_id') + ' ' + either_option_required
)
@click.option(
'--experiment-name',
default=None,
help='Name of the experiment.' + ' ' + either_option_required)
@click.pass_context
def METHOD_NAME(ctx: click.Context, experiment_id: str, experiment_name: str):
"""Archive an experiment."""
client_obj: client.Client = ctx.obj['client']
output_format = ctx.obj['output']
if (experiment_id is None) == (experiment_name is None):
raise ValueError(either_option_required)
if not experiment_id:
experiment = client_obj.get_experiment(experiment_name=experiment_name)
experiment_id = experiment.experiment_id
client_obj.archive_experiment(experiment_id=experiment_id)
if experiment_id:
experiment = client_obj.get_experiment(experiment_id=experiment_id)
else:
experiment = client_obj.get_experiment(experiment_name=experiment_name)
output.print_output(
experiment,
output.ModelType.EXPERIMENT,
output_format,
)
@experiment.command()
@click.option(
'--experiment-id',
default=None,
help=parsing.get_param_descr(client.Client.unarchive_experiment,
'experiment_id') + ' ' + either_option_required
)
@click.option(
'--experiment-name',
default=None,
help='Name of the experiment.' + ' ' + either_option_required)
@click.pass_context
def unarchive(ctx: click.Context, experiment_id: str, experiment_name: str):
"""Unarchive an experiment."""
client_obj: client.Client = ctx.obj['client']
output_format = ctx.obj['output']
if (experiment_id is None) == (experiment_name is None):
raise ValueError(either_option_required)
if not experiment_id:
experiment = client_obj.get_experiment(experiment_name=experiment_name)
experiment_id = experiment.experiment_id
client_obj.unarchive_experiment(experiment_id=experiment_id)
if experiment_id:
experiment = client_obj.get_experiment(experiment_id=experiment_id)
else:
experiment = client_obj.get_experiment(experiment_name=experiment_name)
output.print_output(
experiment,
output.ModelType.EXPERIMENT,
output_format,
) |
4,039 | test wrapped function contextmanager | """Test the autodoc extension.
This tests mainly the Documenters; the auto directives are tested in a test
source file translated by test_build.
"""
import pytest
from .test_ext_autodoc import do_autodoc
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_classes(app):
actual = do_autodoc(app, 'function', 'target.classes.Foo')
assert list(actual) == [
'',
'.. py:function:: Foo()',
' :module: target.classes',
'',
]
actual = do_autodoc(app, 'function', 'target.classes.Bar')
assert list(actual) == [
'',
'.. py:function:: Bar(x, y)',
' :module: target.classes',
'',
]
actual = do_autodoc(app, 'function', 'target.classes.Baz')
assert list(actual) == [
'',
'.. py:function:: Baz(x, y)',
' :module: target.classes',
'',
]
actual = do_autodoc(app, 'function', 'target.classes.Qux')
assert list(actual) == [
'',
'.. py:function:: Qux(foo, bar)',
' :module: target.classes',
'',
]
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_callable(app):
actual = do_autodoc(app, 'function', 'target.callable.function')
assert list(actual) == [
'',
'.. py:function:: function(arg1, arg2, **kwargs)',
' :module: target.callable',
'',
' A callable object that behaves like a function.',
'',
]
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_method(app):
actual = do_autodoc(app, 'function', 'target.callable.method')
assert list(actual) == [
'',
'.. py:function:: method(arg1, arg2)',
' :module: target.callable',
'',
' docstring of Callable.method().',
'',
]
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_builtin_function(app):
actual = do_autodoc(app, 'function', 'os.umask')
assert list(actual) == [
'',
'.. py:function:: umask(mask, /)',
' :module: os',
'',
' Set the current numeric umask and return the previous umask.',
'',
]
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_methoddescriptor(app):
actual = do_autodoc(app, 'function', 'builtins.int.__add__')
assert list(actual) == [
'',
'.. py:function:: __add__(self, value, /)',
' :module: builtins.int',
'',
' Return self+value.',
'',
]
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_decorated(app):
actual = do_autodoc(app, 'function', 'target.decorator.foo')
assert list(actual) == [
'',
'.. py:function:: foo(name=None, age=None)',
' :module: target.decorator',
'',
]
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_singledispatch(app):
options = {}
actual = do_autodoc(app, 'function', 'target.singledispatch.func', options)
assert list(actual) == [
'',
'.. py:function:: func(arg, kwarg=None)',
' func(arg: float, kwarg=None)',
' func(arg: int, kwarg=None)',
' func(arg: str, kwarg=None)',
' func(arg: dict, kwarg=None)',
' :module: target.singledispatch',
'',
' A function for general use.',
'',
]
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_cfunction(app):
actual = do_autodoc(app, 'function', 'time.asctime')
assert list(actual) == [
'',
'.. py:function:: asctime([tuple]) -> string',
' :module: time',
'',
" Convert a time tuple to a string, e.g. 'Sat Jun 06 16:26:11 1998'.",
' When the time tuple is not present, current time as returned by localtime()',
' is used.',
'',
]
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_wrapped_function(app):
actual = do_autodoc(app, 'function', 'target.wrappedfunction.slow_function')
assert list(actual) == [
'',
'.. py:function:: slow_function(message, timeout)',
' :module: target.wrappedfunction',
'',
' This function is slow.',
'',
]
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def METHOD_NAME(app):
actual = do_autodoc(app, 'function', 'target.wrappedfunction.feeling_good')
assert list(actual) == [
'',
'.. py:function:: feeling_good(x: int, y: int) -> ~typing.Generator',
' :module: target.wrappedfunction',
'',
" You'll feel better in this context!",
'',
]
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_coroutine(app):
actual = do_autodoc(app, 'function', 'target.functions.coroutinefunc')
assert list(actual) == [
'',
'.. py:function:: coroutinefunc()',
' :module: target.functions',
' :async:',
'',
]
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_synchronized_coroutine(app):
actual = do_autodoc(app, 'function', 'target.coroutine.sync_func')
assert list(actual) == [
'',
'.. py:function:: sync_func()',
' :module: target.coroutine',
'',
]
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_async_generator(app):
actual = do_autodoc(app, 'function', 'target.functions.asyncgenerator')
assert list(actual) == [
'',
'.. py:function:: asyncgenerator()',
' :module: target.functions',
' :async:',
'',
] |
4,040 | filter | """Filters for filtering the data of the projects app endpoints."""
from datetime import date, timedelta
from django.db.models import Count, Q
from django_filters.constants import EMPTY_VALUES
from django_filters.rest_framework import BaseInFilter, Filter, FilterSet, NumberFilter
from timed.projects import models
class NumberInFilter(BaseInFilter, NumberFilter):
pass
class CustomerFilterSet(FilterSet):
"""Filter set for the customers endpoint."""
archived = NumberFilter(field_name="archived")
class Meta:
"""Meta information for the customer filter set."""
model = models.Customer
fields = ["archived", "reference"]
class ProjectFilterSet(FilterSet):
"""Filter set for the projects endpoint."""
archived = NumberFilter(field_name="archived")
has_manager = NumberFilter(method="filter_has_manager")
has_reviewer = NumberFilter(method="filter_has_reviewer")
customer = NumberInFilter(field_name="customer")
def filter_has_manager(self, queryset, name, value):
if not value: # pragma: no cover
return queryset
return queryset.METHOD_NAME(
Q(
pk__in=models.ProjectAssignee.objects.METHOD_NAME(
is_manager=True, user_id=value
).values("project_id"),
)
| Q(
customer_id__in=models.CustomerAssignee.objects.METHOD_NAME(
is_manager=True, user_id=value
).values("customer_id"),
)
)
def filter_has_reviewer(self, queryset, name, value):
if not value: # pragma: no cover
return queryset
return queryset.METHOD_NAME(
Q(
pk__in=models.ProjectAssignee.objects.METHOD_NAME(
is_reviewer=True, user_id=value
).values("project_id"),
)
| Q(
customer_id__in=models.CustomerAssignee.objects.METHOD_NAME(
is_reviewer=True, user_id=value
).values("customer_id"),
)
)
class Meta:
"""Meta information for the project filter set."""
model = models.Project
fields = ["archived", "customer", "billing_type", "cost_center", "reference"]
class MyMostFrequentTaskFilter(Filter):
"""Filter most frequently used tasks.
TODO:
From an api and framework standpoint instead of an additional filter it
would be more desirable to assign an ordering field frecency and to
limit by use paging. This is way harder to implement therefore on hold.
"""
def METHOD_NAME(self, qs, value):
"""Filter for given most frequently used tasks.
Most frequently used tasks are only counted within last
few months as older tasks are not relevant anymore
for today's usage.
:param QuerySet qs: The queryset to filter
:param int value: number of most frequent items
:return: The filtered queryset
:rtype: QuerySet
"""
if value in EMPTY_VALUES:
return qs
user = self.parent.request.user
from_date = date.today() - timedelta(days=60)
qs = qs.METHOD_NAME(
reports__user=user,
reports__date__gt=from_date,
archived=False,
project__archived=False,
)
qs = qs.annotate(frequency=Count("reports")).order_by("-frequency")
# limit number of results to given value
qs = qs[: int(value)]
return qs
class TaskFilterSet(FilterSet):
"""Filter set for the tasks endpoint."""
my_most_frequent = MyMostFrequentTaskFilter()
archived = NumberFilter(field_name="archived")
project = NumberInFilter(field_name="project")
class Meta:
"""Meta information for the task filter set."""
model = models.Task
fields = ["archived", "project", "my_most_frequent", "reference", "cost_center"]
class TaskAssigneeFilterSet(FilterSet):
"""Filter set for the task assignees endpoint."""
task = NumberFilter(field_name="task")
tasks = NumberInFilter(field_name="task")
user = NumberFilter(field_name="user")
class Meta:
"""Meta information for the task assignee filter set."""
model = models.TaskAssignee
fields = ["task", "user", "is_reviewer", "is_manager", "is_resource"]
class ProjectAssigneeFilterSet(FilterSet):
"""Filter set for the project assignees endpoint."""
project = NumberFilter(field_name="project")
projects = NumberInFilter(field_name="project")
user = NumberFilter(field_name="user")
class Meta:
"""Meta information for the project assignee filter set."""
model = models.ProjectAssignee
fields = ["project", "user", "is_reviewer", "is_manager", "is_resource"]
class CustomerAssigneeFilterSet(FilterSet):
"""Filter set for the customer assignees endpoint."""
customer = NumberFilter(field_name="customer")
customers = NumberInFilter(field_name="customer")
user = NumberFilter(field_name="user")
class Meta:
"""Meta information for the customer assignee filter set."""
model = models.CustomerAssignee
fields = ["customer", "user", "is_reviewer", "is_manager", "is_resource"] |
4,041 | test cross net numercial forward 1 expert | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from torch.fx import GraphModule, Tracer
from torchrec.modules.crossnet import (
CrossNet,
LowRankCrossNet,
LowRankMixtureCrossNet,
VectorCrossNet,
)
# unit test for Full Rank CrossNet: CrossNet
class TestCrossNet(unittest.TestCase):
def test_cross_net_numercial_forward(self) -> None:
torch.manual_seed(0)
batch_size = 3
num_layers = 20
in_features = 2
input = torch.randn(batch_size, in_features)
# test using vector for crossing
dcn = CrossNet(in_features=in_features, num_layers=num_layers)
output = dcn(input)
expected_output = torch.Tensor(
[
[2.4481, 2.2710],
[-63.1721, -109.2410],
[1.4030, 1.0054],
]
)
self.assertTrue(torch.allclose(output, expected_output, rtol=1e-4, atol=1e-4))
def test_fx_script_cross_net(self) -> None:
input = torch.randn(2, 3)
dcn = CrossNet(in_features=3, num_layers=2)
dcn(input)
# dry-run to initialize lazy module
gm = GraphModule(dcn, Tracer().trace(dcn))
torch.jit.script(gm)
# unit test for Low Rank CrossNet: LowRankCrossNet
class TestLowRankCrossNet(unittest.TestCase):
def test_cross_net_numercial_forward(self) -> None:
torch.manual_seed(0)
batch_size = 3
num_layers = 20
in_features = 2
input = torch.randn(batch_size, in_features)
# test using vector for crossing
dcn = LowRankCrossNet(
in_features=in_features, num_layers=num_layers, low_rank=10
)
output = dcn(input)
expected_output = torch.Tensor(
[
[-11.5000, -3.4863],
[-0.2742, -0.3330],
[249.6694, 117.3466],
]
)
self.assertTrue(torch.allclose(output, expected_output, rtol=1e-4, atol=1e-4))
def test_fx_script_cross_net(self) -> None:
input = torch.randn(2, 3)
dcn = LowRankCrossNet(in_features=3, num_layers=2, low_rank=2)
dcn(input)
# dry-run to initialize lazy module
gm = GraphModule(dcn, Tracer().trace(dcn))
torch.jit.script(gm)
# unit test for Vector Version CrossNet: VectorCrossNet
class TestVectorCrossNet(unittest.TestCase):
def test_cross_net_numercial_forward(self) -> None:
torch.manual_seed(0)
batch_size = 3
num_layers = 20
in_features = 2
input = torch.randn(batch_size, in_features)
# test using vector for crossing
dcn = VectorCrossNet(in_features=in_features, num_layers=num_layers)
output = dcn(input)
expected_output = torch.Tensor(
[
[1.8289e-04, -3.4827e-05],
[-2.2084e02, 5.7615e01],
[-1.3328e02, -1.7187e02],
]
)
self.assertTrue(torch.allclose(output, expected_output, rtol=1e-4, atol=1e-4))
def test_fx_script_cross_net(self) -> None:
input = torch.randn(2, 3)
dcn = VectorCrossNet(in_features=3, num_layers=2)
dcn(input)
# dry-run to initialize lazy module
gm = GraphModule(dcn, Tracer().trace(dcn))
torch.jit.script(gm)
# unit test for Low Rank CrossNet with Mixture of Expert: LowRankMixtureCrossNet
class TestLowRankMixtureCrossNet(unittest.TestCase):
def test_cross_net_numercial_forward(self) -> None:
torch.manual_seed(0)
batch_size = 3
num_layers = 20
in_features = 2
input = torch.randn(batch_size, in_features)
# test using vector for crossing
dcn = LowRankMixtureCrossNet(
in_features=in_features, num_layers=num_layers, num_experts=4, low_rank=10
)
output = dcn(input)
expected_output = torch.Tensor(
[
[1.7045, -0.2848],
[-2.5357, 0.5811],
[-0.9467, -1.3091],
]
)
self.assertTrue(torch.allclose(output, expected_output, rtol=1e-4, atol=1e-4))
def METHOD_NAME(self) -> None:
torch.manual_seed(0)
batch_size = 3
num_layers = 20
in_features = 2
input = torch.randn(batch_size, in_features)
# test using vector for crossing
dcn = LowRankMixtureCrossNet(
in_features=in_features, num_layers=num_layers, num_experts=1, low_rank=10
)
output = dcn(input)
expected_output = torch.Tensor(
[
[3.9203, -0.2686],
[-9.5767, 0.8621],
[-2.5836, -1.8124],
]
)
self.assertTrue(torch.allclose(output, expected_output, rtol=1e-4, atol=1e-4))
def test_fx_script_cross_net(self) -> None:
input = torch.randn(2, 3)
dcn = LowRankMixtureCrossNet(in_features=3, num_layers=2)
dcn(input)
# dry-run to initialize lazy module
gm = GraphModule(dcn, Tracer().trace(dcn))
torch.jit.script(gm) |
4,042 | reset world | # noqa: D212, D415
"""
# Simple Reference
```{figure} mpe_simple_reference.gif
:width: 140px
:name: simple_reference
```
This environment is part of the <a href='..'>MPE environments</a>. Please read that page first for general information.
| Import | `from pettingzoo.mpe import simple_reference_v3` |
|--------------------|--------------------------------------------------|
| Actions | Discrete/Continuous |
| Parallel API | Yes |
| Manual Control | No |
| Agents | `agents= [adversary_0, agent_0,agent_1]` |
| Agents | 3 |
| Action Shape | (5) |
| Action Values | Discrete(5)/Box(0.0, 1.0, (5)) |
| Observation Shape | (8),(10) |
| Observation Values | (-inf,inf) |
| State Shape | (28,) |
| State Values | (-inf,inf) |
This environment has 2 agents and 3 landmarks of different colors. Each agent wants to get closer to their target landmark, which is known only by the other agents. Both agents are simultaneous speakers and listeners.
Locally, the agents are rewarded by their distance to their target landmark. Globally, all agents are rewarded by the average distance of all the agents to their respective landmarks. The relative weight of these rewards is controlled by the `local_ratio` parameter.
Agent observation space: `[self_vel, all_landmark_rel_positions, landmark_ids, goal_id, communication]`
Agent discrete action space: `[say_0, say_1, say_2, say_3, say_4, say_5, say_6, say_7, say_8, say_9] X [no_action, move_left, move_right, move_down, move_up]`
Where X is the Cartesian product (giving a total action space of 50).
Agent continuous action space: `[no_action, move_left, move_right, move_down, move_up, say_0, say_1, say_2, say_3, say_4, say_5, say_6, say_7, say_8, say_9]`
### Arguments
``` python
simple_reference_v3.env(local_ratio=0.5, max_cycles=25, continuous_actions=False)
```
`local_ratio`: Weight applied to local reward and global reward. Global reward weight will always be 1 - local reward weight.
`max_cycles`: number of frames (a step for each agent) until game terminates
`continuous_actions`: Whether agent action spaces are discrete(default) or continuous
"""
import numpy as np
from gymnasium.utils import EzPickle
from pettingzoo.mpe._mpe_utils.core import Agent, Landmark, World
from pettingzoo.mpe._mpe_utils.scenario import BaseScenario
from pettingzoo.mpe._mpe_utils.simple_env import SimpleEnv, make_env
from pettingzoo.utils.conversions import parallel_wrapper_fn
class raw_env(SimpleEnv, EzPickle):
def __init__(
self, local_ratio=0.5, max_cycles=25, continuous_actions=False, render_mode=None
):
EzPickle.__init__(
self,
local_ratio=local_ratio,
max_cycles=max_cycles,
continuous_actions=continuous_actions,
render_mode=render_mode,
)
assert (
0.0 <= local_ratio <= 1.0
), "local_ratio is a proportion. Must be between 0 and 1."
scenario = Scenario()
world = scenario.make_world()
SimpleEnv.__init__(
self,
scenario=scenario,
world=world,
render_mode=render_mode,
max_cycles=max_cycles,
continuous_actions=continuous_actions,
local_ratio=local_ratio,
)
self.metadata["name"] = "simple_reference_v3"
env = make_env(raw_env)
parallel_env = parallel_wrapper_fn(env)
class Scenario(BaseScenario):
def make_world(self):
world = World()
# set any world properties first
world.dim_c = 10
world.collaborative = True # whether agents share rewards
# add agents
world.agents = [Agent() for i in range(2)]
for i, agent in enumerate(world.agents):
agent.name = f"agent_{i}"
agent.collide = False
# add landmarks
world.landmarks = [Landmark() for i in range(3)]
for i, landmark in enumerate(world.landmarks):
landmark.name = "landmark %d" % i
landmark.collide = False
landmark.movable = False
return world
def METHOD_NAME(self, world, np_random):
# assign goals to agents
for agent in world.agents:
agent.goal_a = None
agent.goal_b = None
# want other agent to go to the goal landmark
world.agents[0].goal_a = world.agents[1]
world.agents[0].goal_b = np_random.choice(world.landmarks)
world.agents[1].goal_a = world.agents[0]
world.agents[1].goal_b = np_random.choice(world.landmarks)
# random properties for agents
for i, agent in enumerate(world.agents):
agent.color = np.array([0.25, 0.25, 0.25])
# random properties for landmarks
world.landmarks[0].color = np.array([0.75, 0.25, 0.25])
world.landmarks[1].color = np.array([0.25, 0.75, 0.25])
world.landmarks[2].color = np.array([0.25, 0.25, 0.75])
# special colors for goals
world.agents[0].goal_a.color = world.agents[0].goal_b.color
world.agents[1].goal_a.color = world.agents[1].goal_b.color
# set random initial states
for agent in world.agents:
agent.state.p_pos = np_random.uniform(-1, +1, world.dim_p)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
for i, landmark in enumerate(world.landmarks):
landmark.state.p_pos = np_random.uniform(-1, +1, world.dim_p)
landmark.state.p_vel = np.zeros(world.dim_p)
def reward(self, agent, world):
if agent.goal_a is None or agent.goal_b is None:
agent_reward = 0.0
else:
agent_reward = np.sqrt(
np.sum(np.square(agent.goal_a.state.p_pos - agent.goal_b.state.p_pos))
)
return -agent_reward
def global_reward(self, world):
all_rewards = sum(self.reward(agent, world) for agent in world.agents)
return all_rewards / len(world.agents)
def observation(self, agent, world):
# goal color
goal_color = [np.zeros(world.dim_color), np.zeros(world.dim_color)]
if agent.goal_b is not None:
goal_color[1] = agent.goal_b.color
# get positions of all entities in this agent's reference frame
entity_pos = []
for entity in world.landmarks:
entity_pos.append(entity.state.p_pos - agent.state.p_pos)
# entity colors
entity_color = []
for entity in world.landmarks:
entity_color.append(entity.color)
# communication of all other agents
comm = []
for other in world.agents:
if other is agent:
continue
comm.append(other.state.c)
return np.concatenate([agent.state.p_vel] + entity_pos + [goal_color[1]] + comm) |
4,043 | slc id dict for azimuth time test | from pathlib import Path
from typing import Callable
import pytest
test_dir = Path(__file__).parents[0]
TEST_DIR = test_dir.resolve()
def pytest_addoption(parser):
parser.addoption(
"--skip-isce3", action="store_true", default=False, help="skip tests which require ISCE3"
)
def pytest_configure(config):
config.addinivalue_line("markers", "isce3: mark test as requiring ISCE3 to run")
def pytest_collection_modifyitems(config, items):
if config.getoption("--skip-isce3"):
skip_isce3 = pytest.mark.skip(reason="--skip-isce3 option given")
for item in items:
if "isce3" in item.keywords:
item.add_marker(skip_isce3)
@pytest.fixture(scope='session')
def test_dir_path() -> Path:
return TEST_DIR
@pytest.fixture(scope='session')
def test_gunw_path_factory() -> Callable:
def factory(location: str = 'california-t71') -> Path:
if location == 'california-t71':
file_name = 'S1-GUNW-D-R-071-tops-20200130_20200124-135156-34956N_32979N-PP-913f-v2_0_4.nc'
elif location == 'alaska':
file_name = 'S1-GUNW-D-R-059-tops-20230320_20220418-180300-00179W_00051N-PP-c92e-v2_0_6.nc'
else:
raise NotImplementedError
return TEST_DIR / 'gunw_test_data' / file_name
return factory
@pytest.fixture(scope='session')
def test_gunw_json_path() -> Path:
p = TEST_DIR / 'gunw_test_data' / 'S1-GUNW-A-R-064-tops-20210723_20210711-015001-35393N_33512N-PP-6267-v2_0_4.json'
return p
@pytest.fixture(scope='session')
def test_gunw_json_schema_path() -> Path:
return TEST_DIR / 'gunw_test_data' / 'gunw_schema.json'
@pytest.fixture(scope='session')
def gunw_azimuth_test():
test_data = TEST_DIR / 'gunw_azimuth_test_data'
return test_data / 'S1-GUNW-A-R-064-tops-20210723_20210711-015000-00119W_00033N-PP-6267-v2_0_6.nc'
@pytest.fixture(scope='session')
def orbit_dict_for_azimuth_time_test():
test_data = TEST_DIR / 'gunw_azimuth_test_data'
return {'reference': test_data / 'S1B_OPER_AUX_POEORB_OPOD_20210812T111941_V20210722T225942_20210724T005942.EOF',
'secondary': test_data / 'S1B_OPER_AUX_POEORB_OPOD_20210731T111940_V20210710T225942_20210712T005942.EOF'}
@pytest.fixture(scope='session')
def METHOD_NAME():
test_data = TEST_DIR / 'gunw_azimuth_test_data'
return {'reference': [test_data / 'S1B_IW_SLC__1SDV_20210723T014947_20210723T015014_027915_0354B4_B3A9'],
'secondary': [test_data / 'S1B_IW_SLC__1SDV_20210711T014922_20210711T014949_027740_034F80_859D',
test_data / 'S1B_IW_SLC__1SDV_20210711T015011_20210711T015038_027740_034F80_376C']}
@pytest.fixture(scope='session')
def weather_model_dict_for_azimuth_time_test():
"""The order is important; will be closest to InSAR acq time so goes 2, 1, 3 AM."""
test_data = TEST_DIR / 'gunw_azimuth_test_data' / 'weather_files'
return {'HRRR': [test_data / 'HRRR_2021_07_23_T02_00_00_33N_36N_120W_115W.nc',
test_data / 'HRRR_2021_07_23_T01_00_00_33N_36N_120W_115W.nc',
test_data / 'HRRR_2021_07_23_T03_00_00_33N_36N_120W_115W.nc',
test_data / 'HRRR_2021_07_11_T02_00_00_33N_36N_120W_115W.nc',
test_data / 'HRRR_2021_07_11_T01_00_00_33N_36N_120W_115W.nc',
test_data / 'HRRR_2021_07_11_T03_00_00_33N_36N_120W_115W.nc'
]}
@pytest.fixture(scope='session')
def weather_model_dict_for_center_time_test():
"""Order is important here; will be in chronological order with respect to closest date times"""
test_data = TEST_DIR / 'gunw_azimuth_test_data' / 'weather_files'
return {'HRRR': [test_data / 'HRRR_2021_07_23_T01_00_00_33N_36N_120W_115W.nc',
test_data / 'HRRR_2021_07_23_T02_00_00_33N_36N_120W_115W.nc',
test_data / 'HRRR_2021_07_11_T01_00_00_33N_36N_120W_115W.nc',
test_data / 'HRRR_2021_07_11_T02_00_00_33N_36N_120W_115W.nc',
]
}
@pytest.fixture(scope='session')
def orbit_paths_for_duplicate_orbit_xml_test():
test_data = TEST_DIR / 'data_for_overlapping_orbits'
orbit_file_names = ['S1A_OPER_AUX_POEORB_OPOD_20230413T080643_V20230323T225942_20230325T005942.EOF',
'S1A_OPER_AUX_POEORB_OPOD_20230413T080643_V20230323T225942_20230325T005942.EOF',
'S1A_OPER_AUX_POEORB_OPOD_20230413T080643_V20230323T225942_20230325T005942.EOF',
'S1A_OPER_AUX_POEORB_OPOD_20230412T080821_V20230322T225942_20230324T005942.EOF']
return [test_data / fn for fn in orbit_file_names |
4,044 | get registered | import logging
import os
from ddtrace.internal import agent
from ddtrace.internal import atexit
from ddtrace.internal import forksafe
from ddtrace.internal import periodic
from ddtrace.internal.logger import get_logger
from ddtrace.internal.remoteconfig._pubsub import PubSub
from ddtrace.internal.remoteconfig.client import RemoteConfigClient
from ddtrace.internal.remoteconfig.constants import REMOTE_CONFIG_AGENT_ENDPOINT
from ddtrace.internal.remoteconfig.utils import get_poll_interval_seconds
from ddtrace.internal.service import ServiceStatus
from ddtrace.internal.utils.time import StopWatch
from ddtrace.settings import _config as ddconfig
log = get_logger(__name__)
class RemoteConfigPoller(periodic.PeriodicService):
"""Remote configuration worker.
This implements a finite-state machine that allows checking the agent for
the expected endpoint, which could be enabled after the client is started.
"""
_worker_lock = forksafe.Lock()
_enable = True
def __init__(self):
super(RemoteConfigPoller, self).__init__(interval=get_poll_interval_seconds())
self._client = RemoteConfigClient()
self._state = self._agent_check
self._parent_id = os.getpid()
log.debug("RemoteConfigWorker created with polling interval %d", get_poll_interval_seconds())
def _agent_check(self):
# type: () -> None
try:
info = agent.info()
except Exception:
info = None
if info:
endpoints = info.get("endpoints", [])
if endpoints and (
REMOTE_CONFIG_AGENT_ENDPOINT in endpoints or ("/" + REMOTE_CONFIG_AGENT_ENDPOINT) in endpoints
):
self._state = self._online
return
if ddconfig._debug_mode or ddconfig._remote_config_enabled:
LOG_LEVEL = logging.WARNING
else:
LOG_LEVEL = logging.DEBUG
log.log(
LOG_LEVEL,
"Agent is down or Remote Config is not enabled in the Agent\n"
"Check your Agent version, you need an Agent running on 7.39.1 version or above.\n"
"Check Your Remote Config environment variables on your Agent:\n"
"DD_REMOTE_CONFIGURATION_ENABLED=true\n"
"See: https://docs.datadoghq.com/agent/guide/how_remote_config_works/",
)
def _online(self):
# type: () -> None
with StopWatch() as sw:
if not self._client.request():
# An error occurred, so we transition back to the agent check
self._state = self._agent_check
return
elapsed = sw.elapsed()
if elapsed >= self.interval:
log_level = logging.WARNING
else:
log_level = logging.DEBUG
log.log(log_level, "request config in %.5fs to %s", elapsed, self._client.agent_url)
def periodic(self):
# type: () -> None
return self._state()
def enable(self):
# type: () -> bool
# TODO: this is only temporary. DD_REMOTE_CONFIGURATION_ENABLED variable will be deprecated
rc_env_enabled = ddconfig._remote_config_enabled
if rc_env_enabled and self._enable:
if self.status == ServiceStatus.RUNNING:
return True
self.start()
forksafe.register(self.start_subscribers)
atexit.register(self.disable)
return True
return False
def start_subscribers(self):
# type: () -> None
"""Subscribers need to be restarted when application forks"""
self._enable = False
log.debug("[%s][P: %s] Remote Config Poller fork. Starting Pubsub services", os.getpid(), os.getppid())
self._client.renew_id()
for pubsub in self._client.get_pubsubs():
pubsub.restart_subscriber()
def _poll_data(self, test_tracer=None):
"""Force subscribers to poll new data. This function is only used in tests"""
for pubsub in self._client.get_pubsubs():
pubsub._poll_data(test_tracer=test_tracer)
def stop_subscribers(self, join=False):
# type: (bool) -> None
"""
Disable the remote config service and drop, remote config can be re-enabled
by calling ``enable`` again.
"""
log.debug(
"[%s][P: %s] Remote Config Poller fork. Stopping Pubsub services",
os.getpid(),
self._parent_id,
)
for pubsub in self._client.get_pubsubs():
pubsub.stop(join=join)
def disable(self, join=False):
# type: (bool) -> None
self.stop_subscribers(join=join)
self._client.reset_products()
if self.status == ServiceStatus.STOPPED:
return
forksafe.unregister(self.start_subscribers)
atexit.unregister(self.disable)
self.stop()
def _stop_service(self, *args, **kwargs):
# type: (...) -> None
self.stop_subscribers()
if self.status == ServiceStatus.STOPPED or self._worker is None:
return
super(RemoteConfigPoller, self)._stop_service(*args, **kwargs)
def update_product_callback(self, product, callback):
"""Some Products fork and restart their instances when application creates new process. In that case,
we need to update the callback instance to ensure the instance of the child process receives correctly the
Remote Configuration payloads.
"""
return self._client.update_product_callback(product, callback)
def register(self, product, pubsub_instance, skip_enabled=False):
# type: (str, PubSub, bool) -> None
try:
# By enabling on registration we ensure we start the RCM client only
# if there is at least one registered product.
enabled = True
if not skip_enabled:
enabled = self.enable()
if enabled:
self._client.register_product(product, pubsub_instance)
if not self._client.is_subscriber_running(pubsub_instance):
pubsub_instance.start_subscriber()
except Exception:
log.debug("error starting the RCM client", exc_info=True)
def unregister(self, product):
try:
self._client.unregister_product(product)
except Exception:
log.debug("error starting the RCM client", exc_info=True)
def METHOD_NAME(self, product):
return self._client._products.get(product)
def __enter__(self):
# type: () -> RemoteConfigPoller
self.enable()
return self
def __exit__(self, *args):
# type: (...) -> None
self.disable()
remoteconfig_poller = RemoteConfigPoller() |
4,045 | list steps | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Purpose
Shows how to use the AWS SDK for Python (Boto3) with the Amazon EMR API to create
and manage clusters and job steps.
"""
import logging
from botocore.exceptions import ClientError
logger = logging.getLogger(__name__)
# snippet-start:[python.example_code.emr.RunJobFlow]
def run_job_flow(
name, log_uri, keep_alive, applications, job_flow_role, service_role,
security_groups, steps, emr_client):
"""
Runs a job flow with the specified steps. A job flow creates a cluster of
instances and adds steps to be run on the cluster. Steps added to the cluster
are run as soon as the cluster is ready.
This example uses the 'emr-5.30.1' release. A list of recent releases can be
found here:
https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-release-components.html.
:param name: The name of the cluster.
:param log_uri: The URI where logs are stored. This can be an Amazon S3 bucket URL,
such as 's3://my-log-bucket'.
:param keep_alive: When True, the cluster is put into a Waiting state after all
steps are run. When False, the cluster terminates itself when
the step queue is empty.
:param applications: The applications to install on each instance in the cluster,
such as Hive or Spark.
:param job_flow_role: The IAM role assumed by the cluster.
:param service_role: The IAM role assumed by the service.
:param security_groups: The security groups to assign to the cluster instances.
Amazon EMR adds all needed rules to these groups, so
they can be empty if you require only the default rules.
:param steps: The job flow steps to add to the cluster. These are run in order
when the cluster is ready.
:param emr_client: The Boto3 EMR client object.
:return: The ID of the newly created cluster.
"""
try:
response = emr_client.run_job_flow(
Name=name,
LogUri=log_uri,
ReleaseLabel='emr-5.30.1',
Instances={
'MasterInstanceType': 'm5.xlarge',
'SlaveInstanceType': 'm5.xlarge',
'InstanceCount': 3,
'KeepJobFlowAliveWhenNoSteps': keep_alive,
'EmrManagedMasterSecurityGroup': security_groups['manager'].id,
'EmrManagedSlaveSecurityGroup': security_groups['worker'].id,
},
Steps=[{
'Name': step['name'],
'ActionOnFailure': 'CONTINUE',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': ['spark-submit', '--deploy-mode', 'cluster',
step['script_uri'], *step['script_args']]
}
} for step in steps],
Applications=[{
'Name': app
} for app in applications],
JobFlowRole=job_flow_role.name,
ServiceRole=service_role.name,
EbsRootVolumeSize=10,
VisibleToAllUsers=True
)
cluster_id = response['JobFlowId']
logger.info("Created cluster %s.", cluster_id)
except ClientError:
logger.exception("Couldn't create cluster.")
raise
else:
return cluster_id
# snippet-end:[python.example_code.emr.RunJobFlow]
# snippet-start:[python.example_code.emr.DescribeCluster]
def describe_cluster(cluster_id, emr_client):
"""
Gets detailed information about a cluster.
:param cluster_id: The ID of the cluster to describe.
:param emr_client: The Boto3 EMR client object.
:return: The retrieved cluster information.
"""
try:
response = emr_client.describe_cluster(ClusterId=cluster_id)
cluster = response['Cluster']
logger.info("Got data for cluster %s.", cluster['Name'])
except ClientError:
logger.exception("Couldn't get data for cluster %s.", cluster_id)
raise
else:
return cluster
# snippet-end:[python.example_code.emr.DescribeCluster]
# snippet-start:[python.example_code.emr.TerminateJobFlows]
def terminate_cluster(cluster_id, emr_client):
"""
Terminates a cluster. This terminates all instances in the cluster and cannot
be undone. Any data not saved elsewhere, such as in an Amazon S3 bucket, is lost.
:param cluster_id: The ID of the cluster to terminate.
:param emr_client: The Boto3 EMR client object.
"""
try:
emr_client.terminate_job_flows(JobFlowIds=[cluster_id])
logger.info("Terminated cluster %s.", cluster_id)
except ClientError:
logger.exception("Couldn't terminate cluster %s.", cluster_id)
raise
# snippet-end:[python.example_code.emr.TerminateJobFlows]
# snippet-start:[python.example_code.emr.AddJobFlowSteps]
def add_step(cluster_id, name, script_uri, script_args, emr_client):
"""
Adds a job step to the specified cluster. This example adds a Spark
step, which is run by the cluster as soon as it is added.
:param cluster_id: The ID of the cluster.
:param name: The name of the step.
:param script_uri: The URI where the Python script is stored.
:param script_args: Arguments to pass to the Python script.
:param emr_client: The Boto3 EMR client object.
:return: The ID of the newly added step.
"""
try:
response = emr_client.add_job_flow_steps(
JobFlowId=cluster_id,
Steps=[{
'Name': name,
'ActionOnFailure': 'CONTINUE',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': ['spark-submit', '--deploy-mode', 'cluster',
script_uri, *script_args]
}
}])
step_id = response['StepIds'][0]
logger.info("Started step with ID %s", step_id)
except ClientError:
logger.exception("Couldn't start step %s with URI %s.", name, script_uri)
raise
else:
return step_id
# snippet-end:[python.example_code.emr.AddJobFlowSteps]
# snippet-start:[python.example_code.emr.ListSteps]
def METHOD_NAME(cluster_id, emr_client):
"""
Gets a list of steps for the specified cluster. In this example, all steps are
returned, including completed and failed steps.
:param cluster_id: The ID of the cluster.
:param emr_client: The Boto3 EMR client object.
:return: The list of steps for the specified cluster.
"""
try:
response = emr_client.METHOD_NAME(ClusterId=cluster_id)
steps = response['Steps']
logger.info("Got %s steps for cluster %s.", len(steps), cluster_id)
except ClientError:
logger.exception("Couldn't get steps for cluster %s.", cluster_id)
raise
else:
return steps
# snippet-end:[python.example_code.emr.ListSteps]
# snippet-start:[python.example_code.emr.DescribeStep]
def describe_step(cluster_id, step_id, emr_client):
"""
Gets detailed information about the specified step, including the current state of
the step.
:param cluster_id: The ID of the cluster.
:param step_id: The ID of the step.
:param emr_client: The Boto3 EMR client object.
:return: The retrieved information about the specified step.
"""
try:
response = emr_client.describe_step(ClusterId=cluster_id, StepId=step_id)
step = response['Step']
logger.info("Got data for step %s.", step_id)
except ClientError:
logger.exception("Couldn't get data for step %s.", step_id)
raise
else:
return step
# snippet-end:[python.example_code.emr.DescribeStep] |
4,046 | add fields | import io
from typing import Any, Iterable, List, Optional
from urllib.parse import urlencode
from multidict import MultiDict, MultiDictProxy
from . import hdrs, multipart, payload
from .helpers import guess_filename
from .payload import Payload
__all__ = ("FormData",)
class FormData:
"""Helper class for form body generation.
Supports multipart/form-data and application/x-www-form-urlencoded.
"""
def __init__(
self,
fields: Iterable[Any] = (),
quote_fields: bool = True,
charset: Optional[str] = None,
boundary: Optional[str] = None,
) -> None:
self._boundary = boundary
self._writer = multipart.MultipartWriter("form-data", boundary=self._boundary)
self._fields: List[Any] = []
self._is_multipart = False
self._is_processed = False
self._quote_fields = quote_fields
self._charset = charset
if isinstance(fields, dict):
fields = list(fields.items())
elif not isinstance(fields, (list, tuple)):
fields = (fields,)
self.METHOD_NAME(*fields)
@property
def is_multipart(self) -> bool:
return self._is_multipart
def add_field(
self,
name: str,
value: Any,
*,
content_type: Optional[str] = None,
filename: Optional[str] = None,
content_transfer_encoding: Optional[str] = None,
) -> None:
if isinstance(value, io.IOBase):
self._is_multipart = True
elif isinstance(value, (bytes, bytearray, memoryview)):
if filename is None and content_transfer_encoding is None:
filename = name
type_options: MultiDict[str] = MultiDict({"name": name})
if filename is not None and not isinstance(filename, str):
raise TypeError(
"filename must be an instance of str. " "Got: %s" % filename
)
if filename is None and isinstance(value, io.IOBase):
filename = guess_filename(value, name)
if filename is not None:
type_options["filename"] = filename
self._is_multipart = True
headers = {}
if content_type is not None:
if not isinstance(content_type, str):
raise TypeError(
"content_type must be an instance of str. " "Got: %s" % content_type
)
headers[hdrs.CONTENT_TYPE] = content_type
self._is_multipart = True
if content_transfer_encoding is not None:
if not isinstance(content_transfer_encoding, str):
raise TypeError(
"content_transfer_encoding must be an instance"
" of str. Got: %s" % content_transfer_encoding
)
headers[hdrs.CONTENT_TRANSFER_ENCODING] = content_transfer_encoding
self._is_multipart = True
self._fields.append((type_options, headers, value))
def METHOD_NAME(self, *fields: Any) -> None:
to_add = list(fields)
while to_add:
rec = to_add.pop(0)
if isinstance(rec, io.IOBase):
k = guess_filename(rec, "unknown")
self.add_field(k, rec) # type: ignore[arg-type]
elif isinstance(rec, (MultiDictProxy, MultiDict)):
to_add.extend(rec.items())
elif isinstance(rec, (list, tuple)) and len(rec) == 2:
k, fp = rec
self.add_field(k, fp) # type: ignore[arg-type]
else:
raise TypeError(
"Only io.IOBase, multidict and (name, file) "
"pairs allowed, use .add_field() for passing "
"more complex parameters, got {!r}".format(rec)
)
def _gen_form_urlencoded(self) -> payload.BytesPayload:
# form data (x-www-form-urlencoded)
data = []
for type_options, _, value in self._fields:
data.append((type_options["name"], value))
charset = self._charset if self._charset is not None else "utf-8"
if charset == "utf-8":
content_type = "application/x-www-form-urlencoded"
else:
content_type = "application/x-www-form-urlencoded; " "charset=%s" % charset
return payload.BytesPayload(
urlencode(data, doseq=True, encoding=charset).encode(),
content_type=content_type,
)
def _gen_form_data(self) -> multipart.MultipartWriter:
"""Encode a list of fields using the multipart/form-data MIME format"""
if self._is_processed:
raise RuntimeError("Form data has been processed already")
for dispparams, headers, value in self._fields:
try:
if hdrs.CONTENT_TYPE in headers:
part = payload.get_payload(
value,
content_type=headers[hdrs.CONTENT_TYPE],
headers=headers,
encoding=self._charset,
)
else:
part = payload.get_payload(
value, headers=headers, encoding=self._charset
)
except Exception as exc:
raise TypeError(
"Can not serialize value type: %r\n "
"headers: %r\n value: %r" % (type(value), headers, value)
) from exc
if dispparams:
part.set_content_disposition(
"form-data", quote_fields=self._quote_fields, **dispparams
)
# FIXME cgi.FieldStorage doesn't likes body parts with
# Content-Length which were sent via chunked transfer encoding
assert part.headers is not None
part.headers.popall(hdrs.CONTENT_LENGTH, None)
self._writer.append_payload(part)
self._is_processed = True
return self._writer
def __call__(self) -> Payload:
if self._is_multipart:
return self._gen_form_data()
else:
return self._gen_form_urlencoded() |
4,047 | can change organiser settings | import datetime as dt
import rules
from django.utils.timezone import now
from pretalx.person.permissions import (
can_change_submissions,
is_administrator,
is_reviewer,
)
from pretalx.submission.permissions import (
can_be_reviewed,
can_view_all_reviews,
can_view_reviews,
is_review_author,
reviewer_can_change_submissions,
)
@rules.predicate
def can_change_event_settings(user, obj):
event = getattr(obj, "event", None)
if not user or user.is_anonymous or not obj or not event:
return False
if user.is_administrator:
return True
team_permissions = user.team_permissions.get(event.slug)
if team_permissions is not None:
return "can_change_event_settings" in team_permissions
return event.teams.filter(
members__in=[user], can_change_event_settings=True
).exists()
@rules.predicate
def METHOD_NAME(user, obj):
event = getattr(obj, "event", None)
if event:
obj = event.organiser
return (
user.is_administrator
or user.teams.filter(organiser=obj, METHOD_NAME=True).exists()
)
@rules.predicate
def can_change_any_organiser_settings(user, obj):
return not user.is_anonymous and (
user.is_administrator
or user.teams.filter(METHOD_NAME=True).exists()
)
@rules.predicate
def can_create_events(user, obj):
return user.is_administrator or user.teams.filter(can_create_events=True).exists()
@rules.predicate
def can_change_teams(user, obj):
from pretalx.event.models import Organiser, Team
if isinstance(obj, Team):
obj = obj.organiser
if isinstance(obj, Organiser):
return user.teams.filter(organiser=obj, can_change_teams=True).exists()
event = getattr(obj, "event", None)
if not user or user.is_anonymous or not obj or not event:
return False
if user.is_administrator:
return True
team_permissions = user.team_permissions.get(event.slug)
if team_permissions is not None:
return "can_change_teams" in team_permissions
return event.teams.filter(members__in=[user], can_change_teams=True).exists()
@rules.predicate
def reviews_are_open(user, obj):
event = obj.event
return event.active_review_phase and event.active_review_phase.can_review
@rules.predicate
def can_edit_mail(user, obj):
return getattr(obj, "sent", False) is None
@rules.predicate
def can_mark_speakers_arrived(user, obj):
event = obj.event
return (event.date_from - dt.timedelta(days=1)) <= now().date() <= event.date_to
@rules.predicate
def is_event_over(user, obj):
event = obj.event
return event.date_to < now().date()
@rules.predicate
def can_view_speaker_names(user, obj):
"""ONLY in use with users who don't have change permissions."""
event = obj.event
reviewer_teams = obj.event.teams.filter(members__in=[user], is_reviewer=True)
if reviewer_teams and all(
[team.force_hide_speaker_names for team in reviewer_teams]
):
return False
return event.active_review_phase and event.active_review_phase.can_see_speaker_names
@rules.predicate
def can_view_reviewer_names(user, obj):
event = obj.event
return (
event.active_review_phase and event.active_review_phase.can_see_reviewer_names
)
@rules.predicate
def can_add_tags(user, obj):
event = obj.event
return (
event.active_review_phase
and event.active_review_phase.can_tag_submissions == "create_tags"
)
@rules.predicate
def can_change_tags(user, obj):
event = obj.event
return (
event.active_review_phase
and event.active_review_phase.can_tag_submissions == "use_tags"
)
rules.add_perm(
"orga.view_orga_area",
can_change_submissions | can_change_event_settings | is_reviewer,
)
rules.add_perm("orga.change_settings", can_change_event_settings)
rules.add_perm("orga.change_organiser_settings", METHOD_NAME)
rules.add_perm("orga.view_organisers", can_change_any_organiser_settings)
rules.add_perm("orga.change_teams", is_administrator | can_change_teams)
rules.add_perm("orga.view_submission_cards", can_change_submissions)
rules.add_perm("orga.edit_cfp", can_change_event_settings)
rules.add_perm("orga.view_question", can_change_submissions)
rules.add_perm("orga.edit_question", can_change_event_settings)
rules.add_perm("orga.remove_question", can_change_event_settings)
rules.add_perm("orga.view_submission_type", can_change_submissions)
rules.add_perm("orga.edit_submission_type", can_change_event_settings)
rules.add_perm("orga.remove_submission_type", can_change_event_settings)
rules.add_perm("orga.view_tracks", can_change_submissions)
rules.add_perm("orga.view_track", can_change_submissions)
rules.add_perm("orga.edit_track", can_change_event_settings)
rules.add_perm("orga.remove_track", can_change_event_settings)
rules.add_perm("orga.add_tags", can_change_submissions | (is_reviewer & can_add_tags))
rules.add_perm(
"orga.edit_tags", can_change_submissions | (is_reviewer & can_change_tags)
)
rules.add_perm(
"orga.remove_tags", can_change_submissions | (is_reviewer & can_change_tags)
)
rules.add_perm("orga.view_access_codes", can_change_submissions)
rules.add_perm("orga.view_access_code", can_change_submissions)
rules.add_perm("orga.edit_access_code", can_change_event_settings)
rules.add_perm("orga.remove_access_code", can_change_event_settings)
rules.add_perm("orga.view_mails", can_change_submissions)
rules.add_perm("orga.send_mails", can_change_submissions)
rules.add_perm("orga.edit_mails", can_change_submissions & can_edit_mail)
rules.add_perm("orga.purge_mails", can_change_submissions)
rules.add_perm("orga.view_mail_templates", can_change_submissions)
rules.add_perm("orga.edit_mail_templates", can_change_submissions)
rules.add_perm("orga.view_review_dashboard", can_change_submissions | is_reviewer)
rules.add_perm(
"orga.view_reviews", can_change_submissions | (is_reviewer & can_view_reviews)
)
rules.add_perm(
"orga.view_all_reviews",
can_change_submissions | (is_reviewer & can_view_all_reviews),
)
rules.add_perm("orga.perform_reviews", is_reviewer & reviews_are_open)
rules.add_perm(
"orga.remove_review", is_administrator | (is_review_author & can_be_reviewed)
)
rules.add_perm(
"orga.view_schedule",
can_change_submissions | (is_reviewer & can_view_speaker_names),
)
rules.add_perm("orga.release_schedule", can_change_submissions)
rules.add_perm("orga.edit_schedule", can_change_submissions)
rules.add_perm("orga.schedule_talk", can_change_submissions)
rules.add_perm("orga.view_room", can_change_submissions)
rules.add_perm("orga.edit_room", can_change_submissions)
rules.add_perm(
"orga.view_speakers",
can_change_submissions | (is_reviewer & can_view_speaker_names),
)
rules.add_perm(
"orga.view_speaker", can_change_submissions | (is_reviewer & can_view_speaker_names)
)
rules.add_perm(
"orga.view_reviewer_names",
can_change_submissions | (is_reviewer & can_view_reviewer_names),
)
rules.add_perm("orga.change_speaker", can_change_submissions)
rules.add_perm("orga.view_submissions", can_change_submissions | is_reviewer)
rules.add_perm("orga.create_submission", can_change_submissions)
rules.add_perm("orga.change_submissions", can_change_submissions)
rules.add_perm(
"orga.change_submission_state",
can_change_submissions | (is_reviewer & reviewer_can_change_submissions),
)
rules.add_perm("orga.view_information", can_change_submissions)
rules.add_perm("orga.change_information", can_change_event_settings)
rules.add_perm("orga.create_events", can_create_events)
rules.add_perm("orga.change_plugins", can_change_event_settings)
rules.add_perm(
"orga.mark_speakers_arrived", can_change_submissions & can_mark_speakers_arrived
)
rules.add_perm("orga.see_speakers_arrival", can_change_submissions & is_event_over) |
4,048 | test goal state generation | import io
import os
import os.path
import attr
import pytest
import salt.config
import salt.loader
from salt.exceptions import SaltRenderError
REQUISITES = ["require", "require_in", "use", "use_in", "watch", "watch_in"]
@attr.s
class Renderer:
tmp_path = attr.ib()
def __call__(
self, content, sls="", saltenv="base", argline="-G yaml . jinja", **kws
):
root_dir = self.tmp_path
state_tree_dir = self.tmp_path / "state_tree"
cache_dir = self.tmp_path / "cachedir"
state_tree_dir.mkdir()
cache_dir.mkdir()
config = salt.config.minion_config(None)
config["root_dir"] = str(root_dir)
config["state_events"] = False
config["id"] = "match"
config["file_client"] = "local"
config["file_roots"] = dict(base=[str(state_tree_dir)])
config["cachedir"] = str(cache_dir)
config["test"] = False
_renderers = salt.loader.render(config, {"config.get": lambda a, b: False})
return _renderers["stateconf"](
io.StringIO(content),
saltenv=saltenv,
sls=sls,
argline=argline,
renderers=salt.loader.render(config, {}),
**kws
)
@pytest.fixture
def renderer(tmp_path):
return Renderer(tmp_path)
def test_state_config(renderer):
result = renderer(
"""
.sls_params:
stateconf.set:
- name1: value1
- name2: value2
.extra:
stateconf:
- set
- name: value
# --- end of state config ---
test:
cmd.run:
- name: echo name1={{sls_params.name1}} name2={{sls_params.name2}} {{extra.name}}
- cwd: /
""",
sls="test",
)
assert len(result) == 3
assert "test::sls_params" in result and "test" in result
assert "test::extra" in result
assert (
result["test"]["cmd.run"][0]["name"] == "echo name1=value1 name2=value2 value"
)
def test_sls_dir(renderer):
result = renderer(
"""
test:
cmd.run:
- name: echo sls_dir={{sls_dir}}
- cwd: /
""",
sls="path.to.sls",
)
assert result["test"]["cmd.run"][0]["name"] == "echo sls_dir=path{}to".format(
os.sep
)
def test_states_declared_with_shorthand_no_args(renderer):
result = renderer(
"""
test:
cmd.run:
- name: echo testing
- cwd: /
test1:
pkg.installed
test2:
user.present
"""
)
assert len(result) == 3
for args in (result["test1"]["pkg.installed"], result["test2"]["user.present"]):
assert isinstance(args, list)
assert len(args) == 0
assert result["test"]["cmd.run"][0]["name"] == "echo testing"
def test_adding_state_name_arg_for_dot_state_id(renderer):
result = renderer(
"""
.test:
pkg.installed:
- cwd: /
.test2:
pkg.installed:
- name: vim
""",
sls="test",
)
assert result["test::test"]["pkg.installed"][0]["name"] == "test"
assert result["test::test2"]["pkg.installed"][0]["name"] == "vim"
def test_state_prefix(renderer):
result = renderer(
"""
.test:
cmd.run:
- name: echo renamed
- cwd: /
state_id:
cmd:
- run
- name: echo not renamed
- cwd: /
""",
sls="test",
)
assert len(result) == 2
assert "test::test" in result
assert "state_id" in result
@pytest.mark.parametrize("req", REQUISITES)
def test_dot_state_id_in_requisites(req, renderer):
result = renderer(
"""
.test:
cmd.run:
- name: echo renamed
- cwd: /
state_id:
cmd.run:
- name: echo not renamed
- cwd: /
- {}:
- cmd: .test
""".format(
req
),
sls="test",
)
assert len(result) == 2
assert "test::test" in result
assert "state_id" in result
assert result["state_id"]["cmd.run"][2][req][0]["cmd"] == "test::test"
@pytest.mark.parametrize("req", REQUISITES)
def test_relative_include_with_requisites(req, renderer):
result = renderer(
"""
include:
- some.helper
- .utils
state_id:
cmd.run:
- name: echo test
- cwd: /
- {}:
- cmd: .utils::some_state
""".format(
req
),
sls="test.work",
)
assert result["include"][1] == {"base": "test.utils"}
assert result["state_id"]["cmd.run"][2][req][0]["cmd"] == "test.utils::some_state"
def test_relative_include_and_extend(renderer):
result = renderer(
"""
include:
- some.helper
- .utils
extend:
.utils::some_state:
cmd.run:
- name: echo overridden
""",
sls="test.work",
)
assert "test.utils::some_state" in result["extend"]
@pytest.mark.parametrize("req", REQUISITES)
def test_multilevel_relative_include_with_requisites(req, renderer):
result = renderer(
"""
include:
- .shared
- ..utils
- ...helper
state_id:
cmd.run:
- name: echo test
- cwd: /
- {}:
- cmd: ..utils::some_state
""".format(
req
),
sls="test.nested.work",
)
assert result["include"][0] == {"base": "test.nested.shared"}
assert result["include"][1] == {"base": "test.utils"}
assert result["include"][2] == {"base": "helper"}
assert result["state_id"]["cmd.run"][2][req][0]["cmd"] == "test.utils::some_state"
def test_multilevel_relative_include_beyond_top_level(renderer):
pytest.raises(
SaltRenderError,
renderer,
"""
include:
- ...shared
""",
sls="test.work",
)
def test_start_state_generation(renderer):
result = renderer(
"""
A:
cmd.run:
- name: echo hello
- cwd: /
B:
cmd.run:
- name: echo world
- cwd: /
""",
sls="test",
argline="-so yaml . jinja",
)
assert len(result) == 4
assert result["test::start"]["stateconf.set"][0]["require_in"][0]["cmd"] == "A"
def METHOD_NAME(renderer):
result = renderer(
"""
{% for sid in "ABCDE": %}
{{sid}}:
cmd.run:
- name: echo this is {{sid}}
- cwd: /
{% endfor %}
""",
sls="test.goalstate",
argline="yaml . jinja",
)
assert len(result) == len("ABCDE") + 1
reqs = result["test.goalstate::goal"]["stateconf.set"][0]["require"]
assert {next(iter(i.values())) for i in reqs} == set("ABCDE")
def test_implicit_require_with_goal_state(renderer):
result = renderer(
"""
{% for sid in "ABCDE": %}
{{sid}}:
cmd.run:
- name: echo this is {{sid}}
- cwd: /
{% endfor %}
F:
cmd.run:
- name: echo this is F
- cwd: /
- require:
- cmd: A
- cmd: B
G:
cmd.run:
- name: echo this is G
- cwd: /
- require:
- cmd: D
- cmd: F
""",
sls="test",
argline="-o yaml . jinja",
)
sids = "ABCDEFG"[::-1]
for i, sid in enumerate(sids):
if i < len(sids) - 1:
assert result[sid]["cmd.run"][2]["require"][0]["cmd"] == sids[i + 1]
F_args = result["F"]["cmd.run"]
assert len(F_args) == 3
F_req = F_args[2]["require"]
assert len(F_req) == 3
assert F_req[1]["cmd"] == "A"
assert F_req[2]["cmd"] == "B"
G_args = result["G"]["cmd.run"]
assert len(G_args) == 3
G_req = G_args[2]["require"]
assert len(G_req) == 3
assert G_req[1]["cmd"] == "D"
assert G_req[2]["cmd"] == "F"
goal_args = result["test::goal"]["stateconf.set"]
assert len(goal_args) == 1
assert [next(iter(i.values())) for i in goal_args[0]["require"]] == list("ABCDEFG")
def test_slsdir(renderer):
result = renderer(
"""
formula/woot.sls:
cmd.run:
- name: echo {{ slspath }}
- cwd: /
""",
sls="formula.woot",
argline="yaml . jinja",
)
r = result["formula/woot.sls"]["cmd.run"][0]["name"]
assert r == "echo formula/woot" |
4,049 | test random mix good and bad gradients | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LossScaleManager classes.."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.mixed_precision.python import loss_scale_manager as lsm_lib
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _GetExampleIter(inputs):
dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
return dataset_ops.make_one_shot_iterator(dataset)
class FixedLossScaleManagerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_basic(self):
itr = _GetExampleIter([True] * 10 + [False] * 10)
loss_scale = 1000
lsm = lsm_lib.FixedLossScaleManager(loss_scale)
update_fn = lambda: lsm.update_loss_scale(itr.get_next())
self.evaluate(variables.global_variables_initializer())
if not context.executing_eagerly():
update_op = update_fn()
for _ in range(10):
if context.executing_eagerly():
update_fn()
else:
self.evaluate(update_op)
self.assertEqual(loss_scale, self.evaluate(lsm.get_loss_scale()))
class ExponentialUpdateLossScaleManagerTest(test.TestCase):
def _test_helper(self,
inputs,
expected_outputs,
init_loss_scale=1,
incr_every_n_step=2,
decr_every_n_nan_or_inf=2):
ratio = 2
lsm = lsm_lib.ExponentialUpdateLossScaleManager(
init_loss_scale=init_loss_scale,
incr_every_n_steps=incr_every_n_step,
decr_every_n_nan_or_inf=decr_every_n_nan_or_inf,
incr_ratio=ratio,
decr_ratio=1. / ratio)
itr = _GetExampleIter(inputs)
update_fn = lambda: lsm.update_loss_scale(itr.get_next())
self.evaluate(variables.global_variables_initializer())
actual_outputs = []
if not context.executing_eagerly():
update_op = update_fn()
for _ in range(len(inputs)):
if context.executing_eagerly():
update_fn()
else:
self.evaluate(update_op)
actual_outputs.append(self.evaluate(lsm.get_loss_scale()))
self.assertEqual(actual_outputs, expected_outputs)
@test_util.run_in_graph_and_eager_modes
def test_increase_every_n_steps(self):
inputs = [True] * 6
expected_outputs = [1, 2, 2, 4, 4, 8]
self._test_helper(inputs, expected_outputs)
@test_util.run_in_graph_and_eager_modes
def test_keep_increasing_until_capped(self):
init_loss_scale = np.finfo(np.float32).max / 4 + 10
max_float = np.finfo(np.float32).max
inputs = [True] * 6
# Output is capped the 2nd time it doubles.
expected_outputs = [
init_loss_scale, init_loss_scale * 2, init_loss_scale * 2, max_float,
max_float, max_float
]
self._test_helper(inputs, expected_outputs, init_loss_scale)
@test_util.run_in_graph_and_eager_modes
def test_decrease_every_n_steps(self):
inputs = [False] * 6
init_loss_scale = 1024
expected_outputs = [1024, 512, 512, 256, 256, 128]
self._test_helper(inputs, expected_outputs, init_loss_scale)
@test_util.run_in_graph_and_eager_modes
def test_keep_decreasing_until_one(self):
inputs = [False] * 10
init_loss_scale = 16
expected_outputs = [16, 8, 8, 4, 4, 2, 2, 1, 1, 1]
self._test_helper(inputs, expected_outputs, init_loss_scale)
@test_util.run_in_graph_and_eager_modes
def test_incr_bad_step_clear_good_step(self):
inputs = [True, True, True, False, True]
expected_outputs = [1, 2, 2, 2, 2]
self._test_helper(inputs, expected_outputs)
@test_util.run_in_graph_and_eager_modes
def test_incr_good_step_does_not_clear_bad_step(self):
inputs = [True, True, True, False, True, False]
expected_outputs = [1, 2, 2, 2, 2, 1]
self._test_helper(inputs, expected_outputs)
@test_util.run_in_graph_and_eager_modes
def test_trigger_loss_scale_update_each_step(self):
"""Test when incr_every_n_step and decr_every_n_nan_or_inf is 1."""
init_loss_scale = 1
incr_every_n_step = 1
decr_every_n_nan_or_inf = 1
inputs = [True] * 3 + [False, True, True]
expected_outputs = [2, 4, 8, 4, 8, 16]
self._test_helper(inputs, expected_outputs, init_loss_scale,
incr_every_n_step, decr_every_n_nan_or_inf)
@test_util.run_in_graph_and_eager_modes
def test_alternating_good_and_bad_gradients_trigger_each_step(self):
init_loss_scale = 1
incr_every_n_step = 1
decr_every_n_nan_or_inf = 1
inputs = [True, False] * 4 + [True]
expected_outputs = [2, 1, 2, 1, 2, 1, 2, 1, 2]
self._test_helper(inputs, expected_outputs, init_loss_scale,
incr_every_n_step, decr_every_n_nan_or_inf)
@test_util.run_in_graph_and_eager_modes
def test_alternating_good_and_bad_gradients_trigger_incr_every_2steps(self):
init_loss_scale = 32
incr_every_n_step = 2
decr_every_n_nan_or_inf = 1
inputs = [True, False] * 3 + [True]
expected_outputs = [32, 16, 16, 8, 8, 4, 4]
self._test_helper(inputs, expected_outputs, init_loss_scale,
incr_every_n_step, decr_every_n_nan_or_inf)
@test_util.run_in_graph_and_eager_modes
def METHOD_NAME(self):
init_loss_scale = 4
inputs = [
False, False, True, True, True, False, True, False, True, True, True,
False
]
expected_outputs = [4, 2, 2, 4, 4, 4, 4, 2, 2, 4, 4, 4]
self._test_helper(inputs, expected_outputs, init_loss_scale)
if __name__ == "__main__":
test.main() |
4,050 | fastapi handled exception | # Copyright 2023 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import typing
from http import HTTPStatus
import fastapi
import pydantic
import pytest
from fastapi.exception_handlers import http_exception_handler
from fastapi.testclient import TestClient
from sqlalchemy.orm import Session
from mlrun.api.main import app
from mlrun.utils import logger
class HandledException1(Exception):
pass
class HandledException2(Exception):
pass
class UnhandledException(Exception):
pass
@app.exception_handler(HandledException1)
async def handler_returning_response(request: fastapi.Request, exc: HandledException1):
logger.warning("Handler caught HandledException1 exception, returning 204 response")
return fastapi.Response(status_code=HTTPStatus.NO_CONTENT.value)
@app.exception_handler(HandledException2)
async def handler_returning_http_exception(
request: fastapi.Request, exc: HandledException2
):
logger.warning(
"Handler caught HandledException2 exception, returning HTTPException with 401"
)
return await http_exception_handler(
request, fastapi.HTTPException(status_code=HTTPStatus.UNAUTHORIZED.value)
)
test_router = fastapi.APIRouter()
@test_router.get("/success")
def success():
logger.info("Success endpoint received request, returning 202")
return fastapi.Response(status_code=202)
@test_router.get("/handled_exception_1")
def handled_exception_1():
logger.info(
"handled_exception_1 endpoint received request, raising handled exception 1"
)
raise HandledException1("handled exception 1")
@test_router.get("/handled_exception_2")
def handled_exception_2():
logger.info(
"handled_exception_2 endpoint received request, raising handled exception 2"
)
raise HandledException2("handled exception 2")
@test_router.get("/unhandled_exception")
def unhandled_exception():
logger.info("unhandled endpoint received request, raising unhandled exception")
raise UnhandledException("Unhandled exception")
class SomeScheme(pydantic.BaseModel):
id: str
@test_router.post("/fastapi_handled_exception")
def METHOD_NAME(model: SomeScheme):
logger.info("Should not get here, will fail on body validation")
middleware_modes = [
"with_middleware",
"without_middleware",
]
# must add it here since we're adding routes
@pytest.fixture(params=middleware_modes)
def client(request) -> typing.Generator:
# save a copy of the middlewares. we would want to restore them once we're done with the test
user_middleware = app.user_middleware.copy()
try:
if request.param == "without_middleware":
# this overrides the webapp middlewares by removing the logging middleware
app.user_middleware = []
app.middleware_stack = app.build_middleware_stack()
app.include_router(test_router, prefix="/test")
with TestClient(app) as c:
yield c
finally:
# restore back the middlewares
if request.param == "without_middleware":
app.user_middleware = user_middleware
app.middleware_stack = app.build_middleware_stack()
def test_logging_middleware(db: Session, client: TestClient) -> None:
resp = client.get("/test/success")
assert resp.status_code == HTTPStatus.ACCEPTED.value
resp = client.get("/test/handled_exception_1")
assert resp.status_code == HTTPStatus.NO_CONTENT.value
resp = client.get("/test/handled_exception_2")
assert resp.status_code == HTTPStatus.UNAUTHORIZED.value
resp = client.post("/test/fastapi_handled_exception")
assert resp.status_code == HTTPStatus.UNPROCESSABLE_ENTITY.value
with pytest.raises(UnhandledException):
# In a real fastapi (and not test) unhandled exception returns 500
client.get("/test/unhandled_exception") |
4,051 | filter stdlib tests | # pylint: disable=3rd-party-module-not-gated
import logging
import pathlib
import sys
from PyInstaller.utils import hooks
log = logging.getLogger(__name__)
def METHOD_NAME(name):
"""
Filter out non useful modules from the stdlib
"""
if ".test." in name:
return False
if ".tests." in name:
return False
if ".idle_test" in name:
return False
return True
def _python_stdlib_path():
"""
Return the path to the standard library folder
"""
base_exec_prefix = pathlib.Path(sys.base_exec_prefix)
log.info("Grabbing 'base_exec_prefix' for platform: %s", sys.platform)
if not sys.platform.lower().startswith("win"):
return base_exec_prefix / "lib" / "python{}.{}".format(*sys.version_info)
return base_exec_prefix / "Lib"
def _collect_python_stdlib_hidden_imports():
"""
Collect all of the standard library(most of it) as hidden imports.
"""
_hidden_imports = set()
stdlib = _python_stdlib_path()
if not stdlib.exists():
log.error("The path '%s' does not exist", stdlib)
return list(_hidden_imports)
log.info(
"Collecting hidden imports from the python standard library at: %s",
stdlib,
)
for path in stdlib.glob("*"):
if path.is_dir():
if path.name in (
"__pycache__",
"site-packages",
"test",
"turtledemo",
"ensurepip",
):
continue
if path.joinpath("__init__.py").is_file():
log.info("Collecting: %s", path.name)
try:
_module_hidden_imports = hooks.collect_submodules(
path.name, filter=METHOD_NAME
)
log.debug("Collected(%s): %s", path.name, _module_hidden_imports)
_hidden_imports.update(set(_module_hidden_imports))
except Exception as exc: # pylint: disable=broad-except
log.error("Failed to collect %r: %s", path.name, exc)
continue
if path.suffix not in (".py", ".pyc", ".pyo"):
continue
_hidden_imports.add(path.stem)
log.info("Collected stdlib hidden imports: %s", sorted(_hidden_imports))
return sorted(_hidden_imports)
def _collect_python_stdlib_dynamic_libraries():
"""
Collect all of the standard library(most of it) dynamic libraries.
"""
_dynamic_libs = set()
stdlib = _python_stdlib_path()
if not stdlib.exists():
log.error("The path '%s' does not exist", stdlib)
return list(_dynamic_libs)
log.info(
"Collecting dynamic libraries from the python standard library at: %s",
stdlib,
)
for path in stdlib.glob("*"):
if not path.is_dir():
continue
if path.name in (
"__pycache__",
"site-packages",
"test",
"turtledemo",
"ensurepip",
):
continue
if path.joinpath("__init__.py").is_file():
log.info("Collecting: %s", path.name)
try:
_module_dynamic_libs = hooks.collect_dynamic_libs(path.name, path.name)
log.debug("Collected(%s): %s", path.name, _module_dynamic_libs)
_dynamic_libs.update(set(_module_dynamic_libs))
except Exception as exc: # pylint: disable=broad-except
log.error("Failed to collect %r: %s", path.name, exc)
log.info("Collected stdlib dynamic libs: %s", sorted(_dynamic_libs))
return sorted(_dynamic_libs)
def _filter_submodules(name):
# this should never happen, but serves as a place-holder for when/if we have to filter
if not name.startswith("salt"):
return False
return True
# Collect Salt datas, binaries(should be None) and hidden imports
SALT_DATAS, SALT_BINARIES, SALT_HIDDENIMPORTS = hooks.collect_all(
"salt",
include_py_files=True,
filter_submodules=_filter_submodules,
)
# In case there's salt-extensions installed, collect their datas and hidden imports
SALT_EXTENSIONS_DATAS, SALT_EXTENSIONS_HIDDENIMPORTS = hooks.collect_entry_point(
"salt.loader"
)
# PyInstaller attributes
datas = sorted(set(SALT_DATAS + SALT_EXTENSIONS_DATAS))
binaries = sorted(set(SALT_BINARIES))
hiddenimports = sorted(
set(
SALT_HIDDENIMPORTS
+ SALT_EXTENSIONS_HIDDENIMPORTS
+ _collect_python_stdlib_hidden_imports()
)
) |
4,052 | create run in thread decorator | """
Implements run_in_thread_with_timeout decorator for running tests that might
deadlock.
"""
from __future__ import print_function
import functools
import os
import sys
import threading
import traceback
import unittest
MODULE_PID = os.getpid()
DEFAULT_TEST_TIMEOUT = 15
def METHOD_NAME(test_timeout=None):
"""Create a decorator that will run the decorated method in a thread via
`_ThreadedTestWrapper` and return the value that is returned by the
given function, unless it exits with exception or times out, in which
case AssertionError will be raised
:param int | float | None test_timeout: maximum number of seconds to wait
for test to complete. If None, `DEFAULT_TEST_TIMEOUT` will be used.
NOTE: we handle default this way to facilitate patching of the timeout
in our self-tests.
:return: decorator
"""
def run_in_thread_with_timeout_decorator(fun):
"""Create a wrapper that will run the decorated method in a thread via
`_ThreadedTestWrapper` and return the value that is returned by the
given function, unless it exits with exception or times out, in which
case AssertionError will be raised
:param fun: function to run in thread
:return: wrapper function
"""
@functools.wraps(fun)
def run_in_thread_with_timeout_wrapper(*args, **kwargs):
"""
:param args: positional args to pass to wrapped function
:param kwargs: keyword args to pass to wrapped function
:return: value returned by the function, unless it exits with
exception or times out
:raises AssertionError: if wrapped function exits with exception or
times out
"""
runner = _ThreadedTestWrapper(
functools.partial(fun, *args, **kwargs),
test_timeout)
return runner.kick_off()
return run_in_thread_with_timeout_wrapper
return run_in_thread_with_timeout_decorator
run_in_thread_with_timeout = METHOD_NAME() # pylint: disable=C0103
class _ThreadedTestWrapper(object):
"""Runs user's function in a thread. Then wait on the
thread to terminate up to the given `test_timeout` seconds, raising
`AssertionError` if user's function exits with exception or times out.
"""
# We use the saved member when printing to facilitate patching by our
# self-tests
_stderr = sys.stderr
def __init__(self, fun, test_timeout):
"""
:param callable fun: the function to run in thread, no args.
:param int | float test_timeout: maximum number of seconds to wait for
thread to exit.
"""
self._fun = fun
if test_timeout is None:
# NOTE: we handle default here to facilitate patching of
# DEFAULT_TEST_TIMEOUT in our self-tests
self._test_timeout = DEFAULT_TEST_TIMEOUT
else:
self._test_timeout = test_timeout
# Save possibly-patched class-level _stderr value in instance so in case
# user's function times out and later exits with exception, our
# exception handler in `_thread_entry` won't inadvertently output to the
# wrong object.
self._stderr = self._stderr
self._fun_result = None # result returned by function being run
self._exc_info = None
def kick_off(self):
"""Run user's function in a thread. Then wait on the
thread to terminate up to self._test_timeout seconds, raising
`AssertionError` if user's function exits with exception or times out.
:return: the value returned by function if function exited without
exception and didn't time out
:raises AssertionError: if user's function timed out or exited with
exception.
"""
try:
runner = threading.Thread(target=self._thread_entry)
# `daemon = True` so that the script won't wait for thread's exit
runner.daemon = True
runner.start()
runner.join(self._test_timeout)
if runner.is_alive():
raise AssertionError('The test timed out.')
if self._exc_info is not None:
if isinstance(self._exc_info[1], unittest.SkipTest):
raise self._exc_info[1]
# Fail the test because the thread running the test's start()
# failed
raise AssertionError(self._exc_info_to_str(self._exc_info))
return self._fun_result
finally:
# Facilitate garbage collection
self._exc_info = None
self._fun = None
def _thread_entry(self):
"""Our test-execution thread entry point that calls the test's `start()`
method.
Here, we catch all exceptions from `start()`, save the `exc_info` for
processing by `_kick_off()`, and print the stack trace to `sys.stderr`.
"""
try:
self._fun_result = self._fun()
except: # pylint: disable=W0702
self._exc_info = sys.exc_info()
del self._fun_result # to force exception on inadvertent access
if not isinstance(self._exc_info[1], unittest.SkipTest):
print(
'ERROR start() of test {} failed:\n{}'.format(
self,
self._exc_info_to_str(self._exc_info)),
end='',
file=self._stderr)
@staticmethod
def _exc_info_to_str(exc_info):
"""Convenience method for converting the value returned by
`sys.exc_info()` to a string.
:param tuple exc_info: Value returned by `sys.exc_info()`.
:return: A string representation of the given `exc_info`.
:rtype: str
"""
return ''.join(traceback.format_exception(*exc_info)) |
4,053 | test dashboard tags strategy | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
"""Unit tests for Superset cache warmup"""
from unittest.mock import MagicMock
from tests.integration_tests.fixtures.birth_names_dashboard import (
load_birth_names_dashboard_with_slices,
load_birth_names_data,
)
from sqlalchemy import String, Date, Float
import pytest
import pandas as pd
from superset.models.slice import Slice
from superset.utils.database import get_example_database
from superset import db
from superset.models.core import Log
from superset.tags.models import get_tag, ObjectTypes, TaggedObject, TagTypes
from superset.tasks.cache import (
DashboardTagsStrategy,
TopNDashboardsStrategy,
)
from superset.utils.urls import get_url_host
from .base_tests import SupersetTestCase
from .dashboard_utils import create_dashboard, create_slice, create_table_metadata
from .fixtures.unicode_dashboard import (
load_unicode_dashboard_with_slice,
load_unicode_data,
)
mock_positions = {
"DASHBOARD_VERSION_KEY": "v2",
"DASHBOARD_CHART_TYPE-1": {
"type": "CHART",
"id": "DASHBOARD_CHART_TYPE-1",
"children": [],
"meta": {"width": 4, "height": 50, "chartId": 1},
},
"DASHBOARD_CHART_TYPE-2": {
"type": "CHART",
"id": "DASHBOARD_CHART_TYPE-2",
"children": [],
"meta": {"width": 4, "height": 50, "chartId": 2},
},
}
class TestCacheWarmUp(SupersetTestCase):
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_top_n_dashboards_strategy(self):
# create a top visited dashboard
db.session.query(Log).delete()
self.login(username="admin")
dash = self.get_dash_by_slug("births")
for _ in range(10):
self.client.get(f"/superset/dashboard/{dash.id}/")
strategy = TopNDashboardsStrategy(1)
result = strategy.get_payloads()
expected = [
{"chart_id": chart.id, "dashboard_id": dash.id} for chart in dash.slices
]
self.assertCountEqual(result, expected)
def reset_tag(self, tag):
"""Remove associated object from tag, used to reset tests"""
if tag.objects:
for o in tag.objects:
db.session.delete(o)
db.session.commit()
@pytest.mark.usefixtures(
"load_unicode_dashboard_with_slice", "load_birth_names_dashboard_with_slices"
)
def METHOD_NAME(self):
tag1 = get_tag("tag1", db.session, TagTypes.custom)
# delete first to make test idempotent
self.reset_tag(tag1)
strategy = DashboardTagsStrategy(["tag1"])
result = strategy.get_payloads()
expected = []
self.assertEqual(result, expected)
# tag dashboard 'births' with `tag1`
tag1 = get_tag("tag1", db.session, TagTypes.custom)
dash = self.get_dash_by_slug("births")
tag1_urls = [{"chart_id": chart.id} for chart in dash.slices]
tagged_object = TaggedObject(
tag_id=tag1.id, object_id=dash.id, object_type=ObjectTypes.dashboard
)
db.session.add(tagged_object)
db.session.commit()
self.assertCountEqual(strategy.get_payloads(), tag1_urls)
strategy = DashboardTagsStrategy(["tag2"])
tag2 = get_tag("tag2", db.session, TagTypes.custom)
self.reset_tag(tag2)
result = strategy.get_payloads()
expected = []
self.assertEqual(result, expected)
# tag first slice
dash = self.get_dash_by_slug("unicode-test")
chart = dash.slices[0]
tag2_urls = [{"chart_id": chart.id}]
object_id = chart.id
tagged_object = TaggedObject(
tag_id=tag2.id, object_id=object_id, object_type=ObjectTypes.chart
)
db.session.add(tagged_object)
db.session.commit()
result = strategy.get_payloads()
self.assertCountEqual(result, tag2_urls)
strategy = DashboardTagsStrategy(["tag1", "tag2"])
result = strategy.get_payloads()
expected = tag1_urls + tag2_urls
self.assertCountEqual(result, expected) |
4,054 | get index for keyword | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
import re
from datadog_checks.base.utils.platform import Platform
from datadog_checks.sqlserver.const import ENGINE_EDITION_AZURE_MANAGED_INSTANCE, ENGINE_EDITION_SQL_DATABASE
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
DRIVER_CONFIG_DIR = os.path.join(CURRENT_DIR, 'data', 'driver_config')
PROC_CHAR_LIMIT = 500
# Database is used to store both the name and physical_database_name
# for a database, which is discovered via autodiscovery
class Database:
def __init__(self, name, physical_db_name=None):
self.name = name
self.physical_db_name = physical_db_name
def __hash__(self):
return hash((self.name, self.physical_db_name))
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self.name == other.name and self.physical_db_name == other.physical_db_name
def __str__(self):
return "name:{}, physical_db_name:{}".format(self.name, self.physical_db_name)
def set_default_driver_conf():
if Platform.is_containerized():
# Use default `./driver_config/odbcinst.ini` when Agent is running in docker.
# `freetds` is shipped with the Docker Agent.
os.environ.setdefault('ODBCSYSINI', DRIVER_CONFIG_DIR)
else:
# required when using pyodbc with FreeTDS on Ubuntu 18.04
# see https://stackoverflow.com/a/22988748/1258743
os.environ.setdefault('TDSVER', '8.0')
def construct_use_statement(database):
return 'use [{}]'.format(database)
def is_statement_proc(text):
if text:
# take first 500 chars, upper case and split into string
# to get individual keywords
t = text[0:PROC_CHAR_LIMIT].upper().split()
idx_create = METHOD_NAME(t, 'CREATE')
idx_proc = METHOD_NAME(t, 'PROCEDURE')
if idx_proc < 0:
idx_proc = METHOD_NAME(t, 'PROC')
# ensure either PROC or PROCEDURE are found and CREATE occurs before PROCEDURE
if 0 <= idx_create < idx_proc and idx_proc >= 0:
return True, _get_procedure_name(t, idx_proc)
return False, None
def _get_procedure_name(t, idx):
if idx >= 0 and idx + 1 < len(t):
return t[idx + 1].lower()
return None
def METHOD_NAME(text, keyword):
try:
return text.index(keyword)
except ValueError:
return -1
def extract_sql_comments(text):
if not text:
return []
in_single_line_comment = False
in_multi_line_comment = False
comment_start = None
result = []
for i in range(len(text)):
if in_multi_line_comment:
if i < len(text) - 1 and text[i : i + 2] == '*/':
in_multi_line_comment = False
# strip all non-space/newline chars from multi-line comments
lines = [line.strip() for line in text[comment_start : i + 2].split('\n')]
result.append(' '.join(lines))
elif in_single_line_comment:
if text[i] == '\n':
in_single_line_comment = False
# strip any extra whitespace at the end
# of the single line comment
result.append(text[comment_start:i].rstrip())
else:
if i < len(text) - 1 and text[i : i + 2] == '--':
in_single_line_comment = True
comment_start = i
elif i < len(text) - 1 and text[i : i + 2] == '/*':
in_multi_line_comment = True
comment_start = i
return result
def parse_sqlserver_major_version(version):
"""
Parses the SQL Server major version out of the full version
:param version: String representation of full SQL Server version (from @@version)
:return: integer representation of SQL Server major version (i.e. 2012, 2019)
"""
match = re.search(r"Microsoft SQL Server (\d+)", version)
if not match:
return None
return int(match.group(1))
def is_azure_database(engine_edition):
"""
Checks if engine edition matches Azure SQL MI or Azure SQL DB
:param engine_edition: The engine version of the database host
:return: bool
"""
return engine_edition == ENGINE_EDITION_AZURE_MANAGED_INSTANCE or engine_edition == ENGINE_EDITION_SQL_DATABASE |
4,055 | get antlir linux flavor | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This provides helpers useful for working with flavors. For more
information check out [the flavor docs](/docs/concepts/rpms/overview).
"""
load("//antlir/bzl:build_defs.bzl", "use_antlir2")
load(":check_flavor_exists.bzl", "check_flavor_exists")
load(":constants.bzl", "REPO_CFG", "new_flavor_config")
load(":flavor_alias.bzl", "alias_flavor")
load(":flavor_impl.bzl", "flavor_to_struct", "get_unaliased_flavor")
load(":shape.bzl", "shape")
load(":structs.bzl", "structs")
load(":target_helpers.bzl", "normalize_target")
def _get_flavor_config(
flavor,
flavor_config_override = None,
# TODO(T139523690) when flavors are always targets, this check will be
# completely unnecessary
assume_flavor_exists = False):
"""
Arguments
- `flavor`: The name of the flavor to fetch the config.
- `flavor_config_override`: An opts that contains any overrides for
the default config of a flavor that will be applied.
Example usage:
```
load("//antlir/bzl:flavor_helpers.bzl", "flavor_helpers")
flavor_config = flavor_helpers.get_flavor_config(flavor, flavor_config_override)
build_appliance = flavor_config["build_appliance"]
```
"""
if not flavor and flavor_config_override:
fail("Please specify the flavor when overriding the flavor config")
flavor = flavor_to_struct(flavor)
if not assume_flavor_exists:
check_flavor_exists(flavor)
flavor_config = shape.as_dict_shallow(REPO_CFG.flavor_to_config[flavor.name])
overrides = structs.to_dict(flavor_config_override) if flavor_config_override else {}
# This override is forbidden because vset paths are currently consumed
# in `image/feature/new.bzl`, where per-layer overrides are NOT available.
if "version_set_path" in overrides:
fail("Cannot override `version_set_path`", "flavor_config_override")
if "rpm_installer" in overrides and not "rpm_repo_snapshot" in overrides:
fail(
"Please override the `rpm_repo_snapshot` as well to make sure it " +
"matches `rpm_installer`. Set it to `None` to use the default snapshot.",
)
flavor_config.update(overrides)
return new_flavor_config(**flavor_config)
def _get_flavor_default():
#
# Technically we don't need to call alias_flavor() here (since it's
# already been invoked for this REPO_CFG variable), but we do it
# anyway to support `fail-on-flavor-aliasing` testing. Basically,
# alias_flavor() will fail() if flavor aliasing is disabled and we
# try to return an aliased flavor.
#
return alias_flavor(REPO_CFG.flavor_default)
def METHOD_NAME():
if use_antlir2():
return "//antlir/antlir2/facebook/flavor:centos9"
# See the comment above in _get_flavor_default().
return alias_flavor(REPO_CFG.antlir_linux_flavor)
def _get_build_appliance(flavor = None):
"""
Arguments
- `flavor`: The flavor of the build appliance to return.
"""
flavor = flavor_to_struct(flavor or _get_flavor_default())
return REPO_CFG.flavor_to_config[flavor.name].build_appliance
def _get_rpm_installer(flavor = None):
"""
Arguments
- `flavor`: The flavor of the rpm installer to return.
"""
flavor = flavor_to_struct(flavor or _get_flavor_default())
return REPO_CFG.flavor_to_config[flavor.name].rpm_installer
def _get_rpm_installers_supported():
"""
Returns all possible rpm installers in `REPO_CFG.flavor_to_config` deduplicated.
"""
rpm_installers = {}
for _, config in REPO_CFG.flavor_to_config.items():
if config.rpm_installer:
rpm_installers[config.rpm_installer] = 1
return rpm_installers.keys()
def _get_flavor_from_build_appliance(build_appliance):
build_appliance = normalize_target(build_appliance)
return REPO_CFG.ba_to_flavor[build_appliance]
def _maybe_get_tgt_flavor(tgt):
tgt = normalize_target(tgt)
return REPO_CFG.ba_to_flavor.get(
tgt,
REPO_CFG.buck1_tgts_to_flavors.get(tgt, None),
)
def _get_shortname(flavor):
# Flavor shortnames are commonly used in target and fbpkg names,
# where we generally don't want flavor aliasing to be used.
flavor = get_unaliased_flavor(flavor)
return REPO_CFG.flavor_to_config[flavor.name].shortname
flavor_helpers = struct(
get_build_appliance = _get_build_appliance,
get_flavor_from_build_appliance = _get_flavor_from_build_appliance,
get_flavor_default = _get_flavor_default,
get_antlir_linux_flavor = METHOD_NAME,
get_flavor_config = _get_flavor_config,
get_shortname = _get_shortname,
get_rpm_installer = _get_rpm_installer,
get_rpm_installers_supported = _get_rpm_installers_supported,
maybe_get_tgt_flavor = _maybe_get_tgt_flavor,
) |
4,056 | prepare data disks | import logging
from avocado.core import exceptions
from avocado.utils import memory
from virttest import data_dir
from virttest import utils_misc
from virttest import env_process
from virttest import qemu_storage
from virttest import error_context
from virttest import utils_disk
from virttest.qemu_capabilities import Flags
from provider import backup_utils
from provider import job_utils
from provider.virt_storage.storage_admin import sp_admin
LOG_JOB = logging.getLogger('avocado.test')
class BlockdevBaseTest(object):
def __init__(self, test, params, env):
self.main_vm = None
self.params = params
self.test = test
self.env = env
self.disks_info = {} # tag, [dev, mount_point]
self.files_info = {} # tag, [file]
self._tmp_dir = data_dir.get_tmp_dir()
self.trash = []
def is_blockdev_mode(self):
return self.main_vm.check_capability(Flags.BLOCKDEV)
def disk_define_by_params(self, params, image_name):
images_dir = data_dir.get_data_dir()
image_params = params.object_params(image_name)
img = qemu_storage.QemuImg(image_params, images_dir, image_name)
return img
def source_disk_define_by_params(self, params, image_name):
img = self.disk_define_by_params(params, image_name)
return img
def target_disk_define_by_params(self, params, image_name):
if params.get("random_cluster_size") == "yes":
blacklist = list(
map(int, params.objects("cluster_size_blacklist")))
cluster_size = backup_utils.generate_random_cluster_size(blacklist)
params["image_cluster_size"] = cluster_size
LOG_JOB.info(
"set target image cluster size to '%s'",
cluster_size)
params.setdefault("target_path", data_dir.get_data_dir())
vol = sp_admin.volume_define_by_params(image_name, params)
return vol
def preprocess_data_disks(self):
for tag in self.params.objects("source_images"):
params = self.params.object_params(tag)
if params.get("random_cluster_size") == "yes":
blacklist = list(
map(int, params.objects("cluster_size_blacklist")))
cluster_size = backup_utils.generate_random_cluster_size(
blacklist)
params["image_cluster_size"] = cluster_size
LOG_JOB.info(
"set image cluster size to '%s'",
cluster_size)
disk = self.source_disk_define_by_params(params, tag)
disk.create(params)
self.trash.append(disk)
def prepare_main_vm(self):
for vm in self.env.get_all_vms():
if vm.is_alive():
vm.destroy()
vm_name = self.params["main_vm"]
vm_params = self.params.object_params(vm_name)
env_process.preprocess_vm(self.test, vm_params, self.env, vm_name)
main_vm = self.env.get_vm(vm_name)
main_vm.create()
main_vm.verify_alive()
self.main_vm = main_vm
def generate_data_file(self, tag, filename=None):
"""
Generate tempfile in the image
:param tag: image tag
:param filename: temp filename
"""
if not filename:
filename = utils_misc.generate_random_string(4)
params = self.params.object_params(tag)
image_size = params.get("tempfile_size", "10M")
timeout = params.get_numeric("create_tempfile_timeout", 720)
backup_utils.generate_tempfile(
self.main_vm, self.disks_info[tag][1], filename, image_size, timeout)
if tag not in self.files_info:
self.files_info[tag] = [filename]
else:
self.files_info[tag].append(filename)
def prepare_data_disk(self, tag):
"""
Make file system on the disk, then create temp file
and save it md5sum.
:param tag: image tag
"""
if tag != "image1":
self.format_data_disk(tag)
self.generate_data_file(tag)
def METHOD_NAME(self):
"""
prepare all data disks
"""
for tag in self.params.objects("source_images"):
self.prepare_data_disk(tag)
def verify_data_files(self):
"""
Verify temp file's md5sum in all data disks
"""
session = self.clone_vm.wait_for_login()
try:
backup_utils.refresh_mounts(self.disks_info, self.params, session)
for tag, info in self.disks_info.items():
if tag != 'image1':
LOG_JOB.debug("mount target disk in VM!")
utils_disk.mount(info[0], info[1], session=session)
for data_file in self.files_info[tag]:
backup_utils.verify_file_md5(
self.clone_vm, info[1], data_file)
finally:
session.close()
@error_context.context_aware
def format_data_disk(self, tag):
session = self.main_vm.wait_for_login()
try:
info = backup_utils.get_disk_info_by_param(tag, self.params,
session)
if info is None:
raise exceptions.TestFail("disk not found in guest ...")
disk_path = "/dev/%s1" % info['kname']
mount_point = utils_disk.configure_empty_linux_disk(
session, info['kname'], info['size'])[0]
self.disks_info[tag] = [disk_path, mount_point]
finally:
session.close()
@error_context.context_aware
def add_target_data_disks(self):
"""Hot add target disk to VM with qmp monitor"""
error_context.context("Create target disk")
for tag in self.params.objects("source_images"):
image_params = self.params.object_params(tag)
for img in image_params.objects("image_backup_chain"):
disk = self.target_disk_define_by_params(self.params, img)
disk.hotplug(self.main_vm)
self.trash.append(disk)
def prepare_test(self):
self.prepare_main_vm()
self.METHOD_NAME()
self.add_target_data_disks()
def post_test(self):
try:
self.destroy_vms()
self.clean_images()
finally:
memory.drop_caches()
def destroy_vms(self):
"""
Stop all VMs
"""
for vm in self.env.get_all_vms():
if vm.is_alive():
vm.destroy()
def run_test(self):
self.prepare_test()
try:
self.do_test()
finally:
self.post_test()
def do_test(self):
raise NotImplementedError
def clean_images(self):
"""
Cleanup all data images
"""
for img in set(self.trash):
try:
# A QemuImg object
img.remove()
except AttributeError:
# A StorageVolume object
sp_admin.remove_volume(img)
except Exception as e:
LOG_JOB.warn(str(e))
def check_block_jobs_started(self, jobid_list, tmo=10):
"""
Test failed if any block job failed to start
"""
job_utils.check_block_jobs_started(self.main_vm, jobid_list, tmo)
def check_block_jobs_running(self, jobid_list, tmo=200):
"""
Test failed if any block job's offset never increased
"""
job_utils.check_block_jobs_running(self.main_vm, jobid_list, tmo)
def check_block_jobs_paused(self, jobid_list, tmo=50):
"""
Test failed if any block job's offset changed
"""
job_utils.check_block_jobs_paused(self.main_vm, jobid_list, tmo) |
4,057 | query index | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle as pkl
from argparse import ArgumentParser
from collections import OrderedDict
from typing import Dict
import numpy as np
import torch
from build_index import load_model
from omegaconf import DictConfig, OmegaConf
from nemo.utils import logging
try:
import faiss
except ModuleNotFoundError:
logging.warning("Faiss is required for building the index. Please install faiss-gpu")
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def get_query_embedding(query, model):
"""Use entity linking encoder to get embedding for index query"""
model_input = model.tokenizer(
query,
add_special_tokens=True,
padding=True,
truncation=True,
max_length=512,
return_token_type_ids=True,
return_attention_mask=True,
)
query_emb = model.forward(
input_ids=torch.LongTensor([model_input["input_ids"]]).to(device),
token_type_ids=torch.LongTensor([model_input["token_type_ids"]]).to(device),
attention_mask=torch.LongTensor([model_input["attention_mask"]]).to(device),
)
return query_emb
def METHOD_NAME(
query: str, cfg: DictConfig, model: object, index: object, pca: object, idx2id: dict, id2string: dict,
) -> Dict:
"""
Query the nearest neighbor index of entities to find the
concepts in the index dataset that are most similar to the
query.
Args:
query (str): entity to look up in the index
cfg (DictConfig): config object to specifiy query parameters
model (EntityLinkingModel): entity linking encoder model
index (object): faiss index
pca (object): sklearn pca transformation to be applied to queries
idx2id (dict): dictionary mapping unique concept dataset index to
its CUI
id2string (dict): dictionary mapping each unqiue CUI to a
representative english description of
the concept
Returns:
A dictionary with the concept ids of the index's most similar
entities as the keys and a tuple containing the string
representation of that concept and its cosine similarity to
the query as the values.
"""
query_emb = get_query_embedding(query, model).detach().cpu().numpy()
if cfg.apply_pca:
query_emb = pca.transform(query_emb)
dist, neighbors = index.search(query_emb.astype(np.float32), cfg.query_num_factor * cfg.top_n)
dist, neighbors = dist[0], neighbors[0]
unique_ids = OrderedDict()
neighbor_idx = 0
# Many of nearest neighbors could map to the same concept id, their idx is their unique identifier
while len(unique_ids) < cfg.top_n and neighbor_idx < len(neighbors):
concept_id_idx = neighbors[neighbor_idx]
concept_id = idx2id[concept_id_idx]
# Only want one instance of each unique concept
if concept_id not in unique_ids:
concept = id2string[concept_id]
unique_ids[concept_id] = (concept, 1 - dist[neighbor_idx])
neighbor_idx += 1
unique_ids = dict(unique_ids)
return unique_ids
def main(cfg: DictConfig, restore: bool):
"""
Loads faiss index and allows commandline queries
to the index. Builds new index if one hasn't been built yet.
Args:
cfg: Config file specifying index parameters
restore: Whether to restore model weights trained
by the user. Otherwise will load weights
used before self alignment pretraining.
"""
if not os.path.isfile(cfg.index.index_save_name) or (
cfg.apply_pca and not os.path.isfile(cfg.index.pca.pca_save_name) or not os.path.isfile(cfg.index.idx_to_id)
):
logging.warning("Either no index and/or no mapping from entity idx to ids exists. Please run `build_index.py`")
return
logging.info("Loading entity linking encoder model")
model = load_model(cfg.model, restore)
logging.info("Loading index and associated files")
index = faiss.read_index(cfg.index.index_save_name)
idx2id = pkl.load(open(cfg.index.idx_to_id, "rb"))
id2string = pkl.load(open(cfg.index.id_to_string, "rb")) # Should be created during dataset prep
if cfg.index.apply_pca:
pca = pkl.load(open(cfg.index.pca.pca_save_name, "rb"))
while True:
query = input("enter index query: ")
output = METHOD_NAME(query, cfg.top_n, cfg.index, model, index, pca, idx2id, id2string)
if query == "exit":
break
for concept_id in output:
concept_details = output[concept_id]
concept_id = "C" + str(concept_id).zfill(7)
print(concept_id, concept_details)
print("----------------\n")
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument(
"--restore", action="store_true", help="Whether to restore encoder model weights from nemo path"
)
parser.add_argument("--project_dir", required=False, type=str, default=".")
parser.add_argument("--cfg", required=False, type=str, default="./conf/umls_medical_entity_linking_config.yaml")
args = parser.parse_args()
cfg = OmegaConf.load(args.cfg)
cfg.project_dir = args.project_dir
main(cfg, args.restore) |
4,058 | run wing | """A script wrapper for the Wing IDE."""
import os
import os.path
from os.path import exists, abspath, dirname, join
import sys
import fnmatch
import logging
from subprocess import Popen
from configparser import ConfigParser
from optparse import OptionParser
def find_up(name, path=None):
"""Find the named file or directory in a parent directory.
Search upward from the starting path (or the current directory)
until the given file or directory is found. The given name is
assumed to be a basename, not a path. Returns the absolute path
of the file or directory if found, or None otherwise.
Parameters
----------
name : str
Base name of the file or directory being searched for.
path : str, optional
Starting directory. If not supplied, current directory is used.
"""
if not path:
path = os.getcwd()
if not exists(path):
return None
while path:
if exists(join(path, name)):
return abspath(join(path, name))
else:
pth = path
path = dirname(path)
if path == pth:
return None
return None
def _modify_wpr_file(template, outfile, version):
config = ConfigParser()
config.read(template)
if sys.platform == 'darwin':
config.set('user attributes', 'proj.pyexec',
str(dict({None: ('custom', sys.executable)})))
config.set('user attributes', 'proj.pypath',
str(dict({None: ('custom',
os.pathsep.join(sys.path))})))
with open(outfile, 'w') as fp:
fp.write('#!wing\n#!version=%s\n' % version)
config.write(fp)
def _find_wing():
if sys.platform == 'win32':
wname = 'wing.exe'
tdir = r'C:\Program Files (x86)'
try:
locs = [os.path.join(tdir, p, 'bin') for p in
fnmatch.filter(os.listdir(tdir), r'Wing IDE ?.?')]
except:
locs = []
tdir = r'C:\Program Files'
try:
locs.extend([os.path.join(tdir, p, 'bin') for p in
fnmatch.filter(os.listdir(tdir), r'Wing IDE ?.?')])
except:
pass
elif sys.platform == 'darwin':
wname = 'wing'
locs = ['/Applications/WingIDE.app/Contents/MacOS',
'/Applications/Wing/WingIDE.app/Contents/MacOS']
else:
wname = 'wing?.?'
locs = ['/usr/bin', '/usr/sbin', '/usr/local/bin']
try:
pathvar = os.environ['PATH']
except KeyError:
pathvar = ''
all_locs = [p for p in pathvar.split(os.pathsep) if p.strip()] + locs
for path in all_locs:
try:
matches = fnmatch.filter(os.listdir(path), wname)
except:
continue
if matches:
return os.path.join(path, sorted(matches)[-1])
raise OSError("%s was not found in PATH or in any of the common places." %
wname)
def METHOD_NAME():
"""Run the Wing IDE using our template project file."""
parser = OptionParser()
parser.add_option("-w", "--wingpath", action="store", type="string",
dest="wingpath", help="location of WingIDE executable")
parser.add_option("-p", "--projpath", action="store", type="string",
dest="projpath", default='',
help="location of WingIDE project file")
parser.add_option("-v", "--version", action="store", type="string",
dest="version", default='7.0',
help="version of WingIDE")
(options, args) = parser.parse_args(sys.argv[1:])
wingpath = options.wingpath
projpath = options.projpath
version = options.version
if len(version) == 1:
version = version + '.0'
if not os.path.isfile(projpath):
wingproj_file = 'wing_proj_template.wpr'
mydir = os.path.dirname(os.path.abspath(__file__))
proj_template = os.path.join(mydir, wingproj_file)
projpath = os.path.join(mydir, 'wingproj.wpr')
_modify_wpr_file(proj_template, projpath, version)
# in order to find all of our shared libraries,
# put their directories in LD_LIBRARY_PATH
env = {}
env.update(os.environ)
if sys.platform == 'darwin':
libpname = 'DYLD_LIBRARY_PATH'
libext = '*.dyld'
elif not sys.platform.startswith('win'):
libpname = 'LD_LIBRARY_PATH'
libext = '*.so'
else:
libpname = None
if libpname:
libs = env.get(libpname, '').split(os.pathsep)
rtop = find_up('.git')
if rtop:
rtop = os.path.dirname(rtop)
sodirs = set()
for path, dirlist, filelist in os.walk(rtop):
for name in [f for f in filelist if fnmatch.fnmatch(f, libext)]:
sodirs.add(os.path.dirname(join(path, name)))
libs.extend(sodirs)
env[libpname] = os.pathsep.join(libs)
if sys.platform == 'darwin':
cmd = ['open', projpath]
else:
if not wingpath:
wingpath = _find_wing()
cmd = [wingpath, projpath]
try:
print("wing command: ", ' '.join(cmd))
Popen(cmd, env=env)
except Exception as err:
print("Failed to run command '%s'." % ' '.join(cmd))
if __name__ == '__main__':
METHOD_NAME() |
4,059 | extract frames | from typing import List, Optional, Tuple
import cv2
import PIL
import numpy as np
from super_gradients.common.abstractions.abstract_logger import get_logger
logger = get_logger(__name__)
__all__ = ["load_video", "save_video", "includes_video_extension", "show_video_from_disk", "show_video_from_frames"]
VIDEO_EXTENSIONS = (".mp4", ".avi", ".mov", ".wmv", ".flv", ".gif")
def load_video(file_path: str, max_frames: Optional[int] = None) -> Tuple[List[np.ndarray], int]:
"""Open a video file and extract each frame into numpy array.
:param file_path: Path to the video file.
:param max_frames: Optional, maximum number of frames to extract.
:return:
- Frames representing the video, each in (H, W, C), RGB.
- Frames per Second (FPS).
"""
cap = _open_video(file_path)
frames = METHOD_NAME(cap, max_frames)
fps = cap.get(cv2.CAP_PROP_FPS)
cap.release()
return frames, fps
def _open_video(file_path: str) -> cv2.VideoCapture:
"""Open a video file.
:param file_path: Path to the video file
:return: Opened video capture object
"""
cap = cv2.VideoCapture(file_path)
if not cap.isOpened():
raise ValueError(f"Failed to open video file: {file_path}")
return cap
def METHOD_NAME(cap: cv2.VideoCapture, max_frames: Optional[int] = None) -> List[np.ndarray]:
"""Extract frames from an opened video capture object.
:param cap: Opened video capture object.
:param max_frames: Optional maximum number of frames to extract.
:return: Frames representing the video, each in (H, W, C), RGB.
"""
frames = []
while max_frames != len(frames):
frame_read_success, frame = cap.read()
if not frame_read_success:
break
frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
return frames
def save_video(output_path: str, frames: List[np.ndarray], fps: int) -> None:
"""Save a video locally. Depending on the extension, the video will be saved as a .mp4 file or as a .gif file.
:param output_path: Where the video will be saved
:param frames: Frames representing the video, each in (H, W, C), RGB. Note that all the frames are expected to have the same shape.
:param fps: Frames per second
"""
if not includes_video_extension(output_path):
logger.info(f'Output path "{output_path}" does not have a video extension, and therefore will be saved as {output_path}.mp4')
output_path += ".mp4"
if check_is_gif(output_path):
save_gif(output_path, frames, fps)
else:
save_mp4(output_path, frames, fps)
def save_gif(output_path: str, frames: List[np.ndarray], fps: int) -> None:
"""Save a video locally in .gif format.
:param output_path: Where the video will be saved
:param frames: Frames representing the video, each in (H, W, C), RGB. Note that all the frames are expected to have the same shape.
:param fps: Frames per second
"""
frames_pil = [PIL.Image.fromarray(frame) for frame in frames]
frames_pil[0].save(output_path, save_all=True, append_images=frames_pil[1:], duration=int(1000 / fps), loop=0)
def save_mp4(output_path: str, frames: List[np.ndarray], fps: int) -> None:
"""Save a video locally in .mp4 format.
:param output_path: Where the video will be saved
:param frames: Frames representing the video, each in (H, W, C), RGB. Note that all the frames are expected to have the same shape.
:param fps: Frames per second
"""
video_height, video_width = _validate_frames(frames)
video_writer = cv2.VideoWriter(
output_path,
cv2.VideoWriter_fourcc(*"mp4v"),
fps,
(video_width, video_height),
)
for frame in frames:
video_writer.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
video_writer.release()
def _validate_frames(frames: List[np.ndarray]) -> Tuple[float, float]:
"""Validate the frames to make sure that every frame has the same size and includes the channel dimension. (i.e. (H, W, C))
:param frames: Frames representing the video, each in (H, W, C), RGB. Note that all the frames are expected to have the same shape.
:return: (Height, Weight) of the video.
"""
min_height = min(frame.shape[0] for frame in frames)
max_height = max(frame.shape[0] for frame in frames)
min_width = min(frame.shape[1] for frame in frames)
max_width = max(frame.shape[1] for frame in frames)
if (min_height, min_width) != (max_height, max_width):
raise RuntimeError(
f"Your video is made of frames that have (height, width) going from ({min_height}, {min_width}) to ({max_height}, {max_width}).\n"
f"Please make sure that all the frames have the same shape."
)
if set(frame.ndim for frame in frames) != {3} or set(frame.shape[-1] for frame in frames) != {3}:
raise RuntimeError("Your frames must include 3 channels.")
return max_height, max_width
def show_video_from_disk(video_path: str, window_name: str = "Prediction"):
"""Display a video from disk using OpenCV.
:param video_path: Path to the video file.
:param window_name: Name of the window to display the video
"""
cap = _open_video(video_path)
fps = cap.get(cv2.CAP_PROP_FPS)
while cap.isOpened():
ret, frame = cap.read()
if ret:
# Display the frame
cv2.imshow(window_name, frame)
# Wait for the specified number of milliseconds before displaying the next frame
if cv2.waitKey(int(1000 / fps)) & 0xFF == ord("q"):
break
else:
break
# Release the VideoCapture object and destroy the window
cap.release()
cv2.destroyAllWindows()
cv2.waitKey(1)
def show_video_from_frames(frames: List[np.ndarray], fps: float, window_name: str = "Prediction") -> None:
"""Display a video from a list of frames using OpenCV.
:param frames: Frames representing the video, each in (H, W, C), RGB. Note that all the frames are expected to have the same shape.
:param fps: Frames per second
:param window_name: Name of the window to display the video
"""
for frame in frames:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
cv2.imshow(window_name, frame)
cv2.waitKey(int(1000 / fps))
cv2.destroyAllWindows()
cv2.waitKey(1)
def includes_video_extension(file_path: str) -> bool:
"""Check if a file includes a video extension.
:param file_path: Path to the video file.
:return: True if the file includes a video extension.
"""
return isinstance(file_path, str) and file_path.lower().endswith(VIDEO_EXTENSIONS)
def check_is_gif(file_path: str) -> bool:
return file_path.lower().endswith(".gif") |
4,060 | test float str | """Float tests
Made for Jython.
"""
import math
import sys
import unittest
from test import test_support
jython = test_support.is_jython
class FloatTestCase(unittest.TestCase):
def test_float_repr(self):
self.assertEqual(repr(12345678.000000005), '12345678.000000006')
self.assertEqual(repr(12345678.0000000005), '12345678.0')
self.assertRegexpMatches(repr(math.pi**-100), '1.927581416056020[0-9]e-50')
self.assertEqual(repr(-1.0), '-1.0')
self.assertEqual(repr(-9876.543210), '-9876.54321')
self.assertEqual(repr(0.123456789e+35), '1.23456789e+34')
def test_float_repr2(self):
# Quite possibly these divergences result from JDK bug JDK-4511638:
self.assertEqual(repr(9876.543210e+15),
jython and '9.876543209999999e+18' or '9.87654321e+18')
self.assertEqual(repr(1235235235235240000.0),
jython and '1.2352352352352399e+18' or '1.23523523523524e+18')
def METHOD_NAME(self):
self.assertEqual(str(12345678.000005), '12345678.0')
self.assertEqual(str(12345678.00005), '12345678.0001')
self.assertEqual(str(12345678.00005), '12345678.0001')
self.assertEqual(str(12345678.0005), '12345678.0005')
self.assertEqual(str(math.pi**-100), '1.92758141606e-50')
self.assertEqual(str(0.0), '0.0')
self.assertEqual(str(-1.0), '-1.0')
self.assertEqual(str(-9876.543210), '-9876.54321')
self.assertEqual(str(23456789012E666), 'inf')
self.assertEqual(str(-23456789012E666), '-inf')
def test_float_str_formatting(self):
self.assertEqual('%.13g' % 12345678.00005, '12345678.00005')
self.assertEqual('%.12g' % 12345678.00005, '12345678.0001')
self.assertEqual('%.11g' % 12345678.00005, '12345678')
self.assertEqual('%.12g' % math.pi**-100, '1.92758141606e-50')
self.assertEqual('%.5g' % 123.005, '123')
self.assertEqual('%#.5g' % 123.005, '123.00')
self.assertEqual('%#g' % 0.001, '0.00100000')
self.assertEqual('%#.5g' % 0.001, '0.0010000')
self.assertEqual('%#.1g' % 0.0001, '0.0001')
self.assertEqual('%#.4g' % 100, '100.0')
self.assertEqual('%#.4g' % 100.25, '100.2')
self.assertEqual('%g' % 0.00001, '1e-05')
self.assertEqual('%#g' % 0.00001, '1.00000e-05')
self.assertEqual('%e' % -400.0, '-4.000000e+02')
self.assertEqual('%.2g' % 99, '99')
self.assertEqual('%.2g' % 100, '1e+02')
def test_overflow(self):
shuge = '12345' * 120
shuge_float = float(shuge)
shuge_int = int(shuge)
self.assertRaises(OverflowError, float, shuge_int)
self.assertRaises(OverflowError, int, shuge_float)
# and cmp should not overflow
self.assertNotEqual(0.1, shuge_int)
def test_nan(self):
nan = float('nan')
self.assert_(type(nan), float)
if jython:
# support Java syntax
self.assert_(type(float('NaN')), float)
self.assertNotEqual(nan, float('nan'))
self.assertNotEqual(nan, nan)
self.assertEqual(cmp(nan, float('nan')), 1)
self.assertEqual(cmp(nan, nan), 0)
for i in (-1, 1, -1.0, 1.0):
self.assertEqual(cmp(nan, i), -1)
self.assertEqual(cmp(i, nan), 1)
def test_infinity(self):
self.assert_(type(float('Infinity')), float)
self.assert_(type(float('inf')), float)
self.assertRaises(OverflowError, long, float('Infinity'))
def test_minus_zero(self):
# Some operations confused by -0.0
mz = float('-0.0')
self.assertEquals(mz, 0.)
self.assertEquals(repr(mz)[0], '-')
self.assertEquals(repr(abs(mz))[0], '0')
def test_float_none(self):
self.assertRaises(TypeError, float, None)
def test_pow(self):
class Foo(object):
def __rpow__(self, other):
return other ** 2
self.assertEqual(4.0 ** Foo(), 16.0) # regression in 2.5 alphas
self.assertEqual((4.0).__pow__(2, None), 16.0)
def test_faux(self):
class F(object):
def __float__(self):
return 1.6
self.assertEqual(math.cos(1.6), math.cos(F()))
def test_main():
test_support.run_unittest(FloatTestCase)
if __name__ == '__main__':
test_main() |
4,061 | get lb | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Contributors to the Ansible project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = r"""
---
module: elb_classic_lb_info
version_added: 1.0.0
short_description: Gather information about EC2 Elastic Load Balancers in AWS
description:
- Gather information about EC2 Elastic Load Balancers in AWS
author:
- "Michael Schultz (@mjschultz)"
- "Fernando Jose Pando (@nand0p)"
options:
names:
description:
- List of ELB names to gather information about. Pass this option to gather information about a set of ELBs, otherwise, all ELBs are returned.
type: list
elements: str
default: []
extends_documentation_fragment:
- amazon.aws.common.modules
- amazon.aws.region.modules
- amazon.aws.boto3
"""
EXAMPLES = r"""
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Output format tries to match amazon.aws.ec2_elb_lb module input parameters
# Gather information about all ELBs
- community.aws.elb_classic_lb_info:
register: elb_info
- ansible.builtin.debug:
msg: "{{ item.dns_name }}"
loop: "{{ elb_info.elbs }}"
# Gather information about a particular ELB
- community.aws.elb_classic_lb_info:
names: frontend-prod-elb
register: elb_info
- ansible.builtin.debug:
msg: "{{ elb_info.elbs.0.dns_name }}"
# Gather information about a set of ELBs
- community.aws.elb_classic_lb_info:
names:
- frontend-prod-elb
- backend-prod-elb
register: elb_info
- ansible.builtin.debug:
msg: "{{ item.dns_name }}"
loop: "{{ elb_info.elbs }}"
"""
RETURN = r"""
elbs:
description: a list of load balancers
returned: always
type: list
sample:
elbs:
- attributes:
access_log:
enabled: false
connection_draining:
enabled: true
timeout: 300
connection_settings:
idle_timeout: 60
cross_zone_load_balancing:
enabled: true
availability_zones:
- "us-east-1a"
- "us-east-1b"
- "us-east-1c"
- "us-east-1d"
- "us-east-1e"
backend_server_description: []
canonical_hosted_zone_name: test-lb-XXXXXXXXXXXX.us-east-1.elb.amazonaws.com
canonical_hosted_zone_name_id: XXXXXXXXXXXXXX
created_time: '2017-08-23T18:25:03.280000+00:00'
dns_name: test-lb-XXXXXXXXXXXX.us-east-1.elb.amazonaws.com
health_check:
healthy_threshold: 10
interval: 30
target: HTTP:80/index.html
timeout: 5
unhealthy_threshold: 2
instances: []
instances_inservice: []
instances_inservice_count: 0
instances_outofservice: []
instances_outofservice_count: 0
instances_unknownservice: []
instances_unknownservice_count: 0
listener_descriptions:
- listener:
instance_port: 80
instance_protocol: HTTP
load_balancer_port: 80
protocol: HTTP
policy_names: []
load_balancer_name: test-lb
policies:
app_cookie_stickiness_policies: []
lb_cookie_stickiness_policies: []
other_policies: []
scheme: internet-facing
security_groups:
- sg-29d13055
source_security_group:
group_name: default
owner_alias: XXXXXXXXXXXX
subnets:
- subnet-XXXXXXXX
- subnet-XXXXXXXX
tags: {}
vpc_id: vpc-c248fda4
"""
try:
import botocore
except ImportError:
pass # caught by AnsibleAWSModule
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict
from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
MAX_AWS_RETRIES = 5
MAX_AWS_DELAY = 5
def list_elbs(connection, load_balancer_names):
results = []
if not load_balancer_names:
for lb in get_all_lb(connection):
results.append(describe_elb(connection, lb))
for load_balancer_name in load_balancer_names:
lb = METHOD_NAME(connection, load_balancer_name)
if not lb:
continue
results.append(describe_elb(connection, lb))
return results
def describe_elb(connection, lb):
description = camel_dict_to_snake_dict(lb)
name = lb["LoadBalancerName"]
instances = lb.get("Instances", [])
description["tags"] = get_tags(connection, name)
description["instances_inservice"], description["instances_inservice_count"] = lb_instance_health(
connection, name, instances, "InService"
)
description["instances_outofservice"], description["instances_outofservice_count"] = lb_instance_health(
connection, name, instances, "OutOfService"
)
description["instances_unknownservice"], description["instances_unknownservice_count"] = lb_instance_health(
connection, name, instances, "Unknown"
)
description["attributes"] = get_lb_attributes(connection, name)
return description
@AWSRetry.jittered_backoff()
def get_all_lb(connection):
paginator = connection.get_paginator("describe_load_balancers")
return paginator.paginate().build_full_result()["LoadBalancerDescriptions"]
def METHOD_NAME(connection, load_balancer_name):
try:
return connection.describe_load_balancers(aws_retry=True, LoadBalancerNames=[load_balancer_name])[
"LoadBalancerDescriptions"
][0]
except is_boto3_error_code("LoadBalancerNotFound"):
return []
def get_lb_attributes(connection, load_balancer_name):
attributes = connection.describe_load_balancer_attributes(aws_retry=True, LoadBalancerName=load_balancer_name).get(
"LoadBalancerAttributes", {}
)
return camel_dict_to_snake_dict(attributes)
def get_tags(connection, load_balancer_name):
tags = connection.describe_tags(aws_retry=True, LoadBalancerNames=[load_balancer_name])["TagDescriptions"]
if not tags:
return {}
return boto3_tag_list_to_ansible_dict(tags[0]["Tags"])
def lb_instance_health(connection, load_balancer_name, instances, state):
instance_states = connection.describe_instance_health(LoadBalancerName=load_balancer_name, Instances=instances).get(
"InstanceStates", []
)
instate = [instance["InstanceId"] for instance in instance_states if instance["State"] == state]
return instate, len(instate)
def main():
argument_spec = dict(
names=dict(default=[], type="list", elements="str"),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
connection = module.client(
"elb", retry_decorator=AWSRetry.jittered_backoff(retries=MAX_AWS_RETRIES, delay=MAX_AWS_DELAY)
)
try:
elbs = list_elbs(connection, module.params.get("names"))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to get load balancer information.")
module.exit_json(elbs=elbs)
if __name__ == "__main__":
main() |
4,062 | test mcmt v chain simulation | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test library of multi-controlled multi-target circuits."""
import unittest
from ddt import ddt, data, unpack
import numpy as np
from qiskit.test.base import QiskitTestCase
from qiskit.exceptions import QiskitError
from qiskit.circuit import QuantumCircuit, QuantumRegister
from qiskit.circuit.library import MCMT, MCMTVChain, CHGate, XGate, ZGate, CXGate, CZGate
from qiskit.quantum_info import Statevector
from qiskit.quantum_info.states import state_fidelity
@ddt
class TestMCMT(QiskitTestCase):
"""Test the multi-controlled multi-target circuit."""
@data(MCMT, MCMTVChain)
def test_mcmt_as_normal_control(self, mcmt_class):
"""Test that the MCMT can act as normal control gate."""
qc = QuantumCircuit(2)
mcmt = mcmt_class(gate=CHGate(), num_ctrl_qubits=1, num_target_qubits=1)
qc = qc.compose(mcmt, [0, 1])
ref = QuantumCircuit(2)
ref.ch(0, 1)
self.assertEqual(qc, ref)
def test_missing_qubits(self):
"""Test that an error is raised if qubits are missing."""
with self.subTest(msg="no control qubits"):
with self.assertRaises(AttributeError):
_ = MCMT(XGate(), num_ctrl_qubits=0, num_target_qubits=1)
with self.subTest(msg="no target qubits"):
with self.assertRaises(AttributeError):
_ = MCMT(ZGate(), num_ctrl_qubits=4, num_target_qubits=0)
def test_different_gate_types(self):
"""Test the different supported input types for the target gate."""
x_circ = QuantumCircuit(1)
x_circ.x(0)
for input_gate in [x_circ, QuantumCircuit.cx, QuantumCircuit.x, "cx", "x", CXGate()]:
with self.subTest(input_gate=input_gate):
mcmt = MCMT(input_gate, 2, 2)
if isinstance(input_gate, QuantumCircuit):
self.assertEqual(mcmt.gate.definition[0].operation, XGate())
self.assertEqual(len(mcmt.gate.definition), 1)
else:
self.assertEqual(mcmt.gate, XGate())
def test_mcmt_v_chain_ancilla_test(self):
"""Test too few and too many ancillas for the MCMT V-chain mode."""
with self.subTest(msg="insufficient number of auxiliary qubits on gate"):
qc = QuantumCircuit(5)
mcmt = MCMTVChain(ZGate(), 3, 1)
with self.assertRaises(QiskitError):
qc.append(mcmt, range(5))
with self.subTest(msg="too many auxiliary qubits on gate"):
qc = QuantumCircuit(9)
mcmt = MCMTVChain(ZGate(), 3, 1)
with self.assertRaises(QiskitError):
qc.append(mcmt, range(9))
@data(
[CZGate(), 1, 1],
[CHGate(), 1, 1],
[CZGate(), 3, 3],
[CHGate(), 3, 3],
[CZGate(), 1, 5],
[CHGate(), 1, 5],
[CZGate(), 5, 1],
[CHGate(), 5, 1],
)
@unpack
def METHOD_NAME(self, cgate, num_controls, num_targets):
"""Test the MCMT V-chain implementation test on a simulation."""
controls = QuantumRegister(num_controls)
targets = QuantumRegister(num_targets)
subsets = [tuple(range(i)) for i in range(num_controls + 1)]
for subset in subsets:
qc = QuantumCircuit(targets, controls)
# Initialize all targets to 1, just to be sure that
# the generic gate has some effect (f.e. Z gate has no effect
# on a 0 state)
qc.x(targets)
num_ancillas = max(0, num_controls - 1)
if num_ancillas > 0:
ancillas = QuantumRegister(num_ancillas)
qc.add_register(ancillas)
qubits = controls[:] + targets[:] + ancillas[:]
else:
qubits = controls[:] + targets[:]
for i in subset:
qc.x(controls[i])
mcmt = MCMTVChain(cgate, num_controls, num_targets)
qc.compose(mcmt, qubits, inplace=True)
for i in subset:
qc.x(controls[i])
vec = Statevector.from_label("0" * qc.num_qubits).evolve(qc)
# target register is initially |11...1>, with length equal to 2**(n_targets)
vec_exp = np.array([0] * (2 ** (num_targets) - 1) + [1])
if isinstance(cgate, CZGate):
# Z gate flips the last qubit only if it's applied an odd number of times
if len(subset) == num_controls and (num_controls % 2) == 1:
vec_exp[-1] = -1
elif isinstance(cgate, CHGate):
# if all the control qubits have been activated,
# we repeatedly apply the kronecker product of the Hadamard
# with itself and then multiply the results for the original
# state of the target qubits
if len(subset) == num_controls:
h_i = 1 / np.sqrt(2) * np.array([[1, 1], [1, -1]])
h_tot = np.array([1])
for _ in range(num_targets):
h_tot = np.kron(h_tot, h_i)
vec_exp = np.dot(h_tot, vec_exp)
else:
raise ValueError(f"Test not implement for gate: {cgate}")
# append the remaining part of the state
vec_exp = np.concatenate(
(vec_exp, [0] * (2 ** (num_controls + num_ancillas + num_targets) - vec_exp.size))
)
f_i = state_fidelity(vec, vec_exp)
self.assertAlmostEqual(f_i, 1)
if __name__ == "__main__":
unittest.main() |
4,063 | test bitbakelayers showappends | #
# SPDX-License-Identifier: MIT
#
import os
import re
import oeqa.utils.ftools as ftools
from oeqa.utils.commands import runCmd, get_bb_var, get_bb_vars
from oeqa.selftest.case import OESelftestTestCase
class BitbakeLayers(OESelftestTestCase):
def test_bitbakelayers_layerindexshowdepends(self):
result = runCmd('bitbake-layers layerindex-show-depends meta-poky')
find_in_contents = re.search("openembedded-core", result.output)
self.assertTrue(find_in_contents, msg = "openembedded-core should have been listed at this step. bitbake-layers layerindex-show-depends meta-poky output: %s" % result.output)
def test_bitbakelayers_showcrossdepends(self):
result = runCmd('bitbake-layers show-cross-depends')
self.assertIn('aspell', result.output)
def test_bitbakelayers_showlayers(self):
result = runCmd('bitbake-layers show-layers')
self.assertIn('meta-selftest', result.output)
def METHOD_NAME(self):
recipe = "xcursor-transparent-theme"
bb_file = self.get_recipe_basename(recipe)
result = runCmd('bitbake-layers show-appends')
self.assertIn(bb_file, result.output)
def test_bitbakelayers_showoverlayed(self):
result = runCmd('bitbake-layers show-overlayed')
self.assertIn('aspell', result.output)
def test_bitbakelayers_flatten(self):
recipe = "xcursor-transparent-theme"
recipe_path = "recipes-graphics/xcursor-transparent-theme"
recipe_file = self.get_recipe_basename(recipe)
testoutdir = os.path.join(self.builddir, 'test_bitbakelayers_flatten')
self.assertFalse(os.path.isdir(testoutdir), msg = "test_bitbakelayers_flatten should not exist at this point in time")
self.track_for_cleanup(testoutdir)
result = runCmd('bitbake-layers flatten %s' % testoutdir)
bb_file = os.path.join(testoutdir, recipe_path, recipe_file)
self.assertTrue(os.path.isfile(bb_file), msg = "Cannot find xcursor-transparent-theme_0.1.1.bb in the test_bitbakelayers_flatten local dir.")
contents = ftools.read_file(bb_file)
find_in_contents = re.search("##### bbappended from meta-selftest #####\n(.*\n)*include test_recipe.inc", contents)
self.assertTrue(find_in_contents, msg = "Flattening layers did not work. bitbake-layers flatten output: %s" % result.output)
def test_bitbakelayers_add_remove(self):
test_layer = os.path.join(get_bb_var('COREBASE'), 'meta-skeleton')
result = runCmd('bitbake-layers show-layers')
self.assertNotIn('meta-skeleton', result.output, "This test cannot run with meta-skeleton in bblayers.conf. bitbake-layers show-layers output: %s" % result.output)
result = runCmd('bitbake-layers add-layer %s' % test_layer)
result = runCmd('bitbake-layers show-layers')
self.assertIn('meta-skeleton', result.output, msg = "Something wrong happened. meta-skeleton layer was not added to conf/bblayers.conf. bitbake-layers show-layers output: %s" % result.output)
result = runCmd('bitbake-layers remove-layer %s' % test_layer)
result = runCmd('bitbake-layers show-layers')
self.assertNotIn('meta-skeleton', result.output, msg = "meta-skeleton should have been removed at this step. bitbake-layers show-layers output: %s" % result.output)
result = runCmd('bitbake-layers add-layer %s' % test_layer)
result = runCmd('bitbake-layers show-layers')
self.assertIn('meta-skeleton', result.output, msg = "Something wrong happened. meta-skeleton layer was not added to conf/bblayers.conf. bitbake-layers show-layers output: %s" % result.output)
result = runCmd('bitbake-layers remove-layer */meta-skeleton')
result = runCmd('bitbake-layers show-layers')
self.assertNotIn('meta-skeleton', result.output, msg = "meta-skeleton should have been removed at this step. bitbake-layers show-layers output: %s" % result.output)
def test_bitbakelayers_showrecipes(self):
result = runCmd('bitbake-layers show-recipes')
self.assertIn('aspell:', result.output)
self.assertIn('mtd-utils:', result.output)
self.assertIn('core-image-minimal:', result.output)
result = runCmd('bitbake-layers show-recipes mtd-utils')
self.assertIn('mtd-utils:', result.output)
self.assertNotIn('aspell:', result.output)
result = runCmd('bitbake-layers show-recipes -i image')
self.assertIn('core-image-minimal', result.output)
self.assertNotIn('mtd-utils:', result.output)
result = runCmd('bitbake-layers show-recipes -i cmake,pkgconfig')
self.assertIn('libproxy:', result.output)
self.assertNotIn('mtd-utils:', result.output) # doesn't inherit either
self.assertNotIn('wget:', result.output) # doesn't inherit cmake
self.assertNotIn('waffle:', result.output) # doesn't inherit pkgconfig
result = runCmd('bitbake-layers show-recipes -i nonexistentclass', ignore_status=True)
self.assertNotEqual(result.status, 0, 'bitbake-layers show-recipes -i nonexistentclass should have failed')
self.assertIn('ERROR:', result.output)
def test_bitbakelayers_createlayer(self):
priority = 10
layername = 'test-bitbakelayer-layercreate'
layerpath = os.path.join(self.builddir, layername)
self.assertFalse(os.path.exists(layerpath), '%s should not exist at this point in time' % layerpath)
result = runCmd('bitbake-layers create-layer --priority=%d %s' % (priority, layerpath))
self.track_for_cleanup(layerpath)
result = runCmd('bitbake-layers add-layer %s' % layerpath)
self.add_command_to_tearDown('bitbake-layers remove-layer %s' % layerpath)
result = runCmd('bitbake-layers show-layers')
find_in_contents = re.search(re.escape(layername) + r'\s+' + re.escape(layerpath) + r'\s+' + re.escape(str(priority)), result.output)
self.assertTrue(find_in_contents, "%s not found in layers\n%s" % (layername, result.output))
layervars = ['BBFILE_PRIORITY', 'BBFILE_PATTERN', 'LAYERDEPENDS', 'LAYERSERIES_COMPAT']
bb_vars = get_bb_vars(['BBFILE_COLLECTIONS'] + ['%s_%s' % (v, layername) for v in layervars])
for v in layervars:
varname = '%s_%s' % (v, layername)
self.assertIsNotNone(bb_vars[varname], "%s not found" % varname)
find_in_contents = re.search(r'(^|\s)' + re.escape(layername) + r'($|\s)', bb_vars['BBFILE_COLLECTIONS'])
self.assertTrue(find_in_contents, "%s not in BBFILE_COLLECTIONS" % layername)
self.assertEqual(bb_vars['BBFILE_PRIORITY_%s' % layername], str(priority), 'BBFILE_PRIORITY_%s != %d' % (layername, priority))
def get_recipe_basename(self, recipe):
recipe_file = ""
result = runCmd("bitbake-layers show-recipes -f %s" % recipe)
for line in result.output.splitlines():
if recipe in line:
recipe_file = line
break
self.assertTrue(os.path.isfile(recipe_file), msg = "Can't find recipe file for %s" % recipe)
return os.path.basename(recipe_file) |
4,064 | save json | """Serializable configuration classes for specifying all training job parameters.
These configuration classes are intended to specify all the parameters required to run
a training job or perform inference from a serialized one.
They are explicitly not intended to implement any of the underlying functionality that
they parametrize. This serves two purposes:
1. Parameter specification through simple attributes. These can be read/edited by a
human, as well as easily be serialized/deserialized to/from simple dictionaries
and JSON.
2. Decoupling from the implementation. This makes it easier to design functional
modules with attributes/parameters that contain objects that may not be easily
serializable or may implement additional logic that relies on runtime
information or other parameters.
In general, classes that implement the actual functionality related to these
configuration classes should provide a classmethod for instantiation from the
configuration class instances. This makes it easier to implement other logic not related
to the high level parameters at creation time.
Conveniently, this format also provides a single location where all user-facing
parameters are aggregated and documented for end users (as opposed to developers).
"""
import os
import attr
import cattr
import sleap
from sleap.nn.config.data import DataConfig
from sleap.nn.config.model import ModelConfig
from sleap.nn.config.optimization import OptimizationConfig
from sleap.nn.config.outputs import OutputsConfig
import json
from jsmin import jsmin
from typing import Text, Dict, Any, Optional
@attr.s(auto_attribs=True)
class TrainingJobConfig:
"""Configuration of a training job.
Attributes:
data: Configuration options related to the training data.
model: Configuration options related to the model architecture.
optimization: Configuration options related to the training.
outputs: Configuration options related to outputs during training.
name: Optional name for this configuration profile.
description: Optional description of the configuration.
sleap_version: Version of SLEAP that generated this configuration.
filename: Path to this config file if it was loaded from disk.
"""
data: DataConfig = attr.ib(factory=DataConfig)
model: ModelConfig = attr.ib(factory=ModelConfig)
optimization: OptimizationConfig = attr.ib(factory=OptimizationConfig)
outputs: OutputsConfig = attr.ib(factory=OutputsConfig)
name: Optional[Text] = ""
description: Optional[Text] = ""
sleap_version: Optional[Text] = sleap.__version__
filename: Optional[Text] = ""
@classmethod
def from_json_dicts(cls, json_data_dicts: Dict[Text, Any]) -> "TrainingJobConfig":
"""Create training job configuration from dictionaries decoded from JSON.
Arguments:
json_data_dicts: Dictionaries that specify the configurations. These are
typically generated by structuring raw JSON formatted text.
Returns:
A TrainingJobConfig instance parsed from the JSON dicts.
"""
# TODO: Detect and parse legacy training job format.
return cattr.structure(json_data_dicts, cls)
@classmethod
def from_json(cls, json_data: Text) -> "TrainingJobConfig":
"""Create training job configuration from JSON text data.
Arguments:
json_data: JSON-formatted string that specifies the configurations.
Returns:
A TrainingJobConfig instance parsed from the JSON text.
"""
# Open and parse the JSON data into dictionaries.
json_data_dicts = json.loads(jsmin(json_data))
return cls.from_json_dicts(json_data_dicts)
@classmethod
def load_json(
cls, filename: Text, load_training_config: bool = True
) -> "TrainingJobConfig":
"""Load a training job configuration from a file.
Arguments:
filename: Path to a training job configuration JSON file or a directory
containing `"training_job.json"`.
load_training_config: If `True` (the default), prefer `training_job.json`
over `initial_config.json` if it is present in the same folder.
Returns:
A TrainingJobConfig instance parsed from the file.
"""
if load_training_config and filename.endswith("initial_config.json"):
training_config_path = os.path.join(
os.path.dirname(filename), "training_config.json"
)
if os.path.exists(training_config_path):
filename = training_config_path
# Use stored configuration if a directory was provided.
if os.path.isdir(filename):
filename = os.path.join(filename, "training_config.json")
# Open and read the JSON data.
with open(filename, "r") as f:
json_data = f.read()
obj = cls.from_json(json_data)
obj.filename = filename
return obj
def to_json(self) -> str:
"""Serialize the configuration into JSON-encoded string format.
Returns:
The JSON encoded string representation of the configuration.
"""
json_dicts = cattr.unstructure(self)
return json.dumps(json_dicts, indent=4)
def METHOD_NAME(self, filename: Text):
"""Save the configuration to a JSON file.
Arguments:
filename: Path to save the training job file to.
"""
self.filename = filename
with open(filename, "w") as f:
f.write(self.to_json())
def load_config(filename: Text, load_training_config: bool = True) -> TrainingJobConfig:
"""Load a training job configuration for a model run.
Args:
filename: Path to a JSON file or directory containing `training_job.json`.
load_training_config: If `True` (the default), prefer `training_job.json` over
`initial_config.json` if it is present in the same folder.
Returns:
The parsed `TrainingJobConfig`.
"""
return TrainingJobConfig.load_json(
filename, load_training_config=load_training_config
) |
4,065 | configure | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Optional, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy
from ._version import VERSION
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class StorageImportExportConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for StorageImportExport.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The subscription ID for the Azure user. Required.
:type subscription_id: str
:param accept_language: Specifies the preferred language for the response. Default value is
None.
:type accept_language: str
:keyword api_version: Api Version. Default value is "2021-01-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self, credential: "TokenCredential", subscription_id: str, accept_language: Optional[str] = None, **kwargs: Any
) -> None:
super(StorageImportExportConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop("api_version", "2021-01-01") # type: Literal["2021-01-01"]
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.accept_language = accept_language
self.api_version = api_version
self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"])
kwargs.setdefault("sdk_moniker", "mgmt-storageimportexport/{}".format(VERSION))
self.METHOD_NAME(**kwargs)
def METHOD_NAME(
self, **kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = ARMChallengeAuthenticationPolicy(
self.credential, *self.credential_scopes, **kwargs
) |
4,066 | format inline epc qr | import io
from lxml import etree
import asyncio
from flask import render_template, current_app as app
from markupsafe import Markup
from pyppeteer.launcher import launch
import barcode
from barcode.writer import ImageWriter, SVGWriter
import segno
from segno import helpers
from main import external_url
from models import event_year
from models.product import Product, ProductGroup, PriceTier
from models.purchase import Purchase, PurchaseTransfer
RECEIPT_TYPES = ["admissions", "parking", "campervan", "tees", "hire"]
def render_receipt(user, png=False, pdf=False):
purchases = (
user.owned_purchases.filter_by(is_paid_for=True)
.join(PriceTier, Product, ProductGroup)
.with_entities(Purchase)
.order_by(Purchase.id)
)
admissions = purchases.filter(ProductGroup.type == "admissions").all()
vehicle_tickets = purchases.filter(
ProductGroup.type.in_(["parking", "campervan"])
).all()
tees = purchases.filter(ProductGroup.type == "tees").all()
hires = purchases.filter(ProductGroup.type == "hire").all()
transferred_tickets = (
user.transfers_from.join(Purchase)
.filter_by(state="paid")
.with_entities(PurchaseTransfer)
.order_by("timestamp")
.all()
)
return render_template(
"receipt.html",
user=user,
format_inline_qr=format_inline_qr,
format_inline_barcode=format_inline_barcode,
admissions=admissions,
vehicle_tickets=vehicle_tickets,
transferred_tickets=transferred_tickets,
tees=tees,
hires=hires,
pdf=pdf,
png=png,
)
def render_pdf(url, html):
# This needs to fetch URLs found within the page, so if
# you're running a dev server, use app.run(processes=2)
async def to_pdf():
browser = await launch(
# Handlers don't work as we're not in the main thread.
handleSIGINT=False,
handleSIGTERM=False,
handleSIGHUP=False,
# --no-sandbox is necessary as we're running as root (in docker!)
args=["--no-sandbox"],
)
page = await browser.newPage()
async def request_intercepted(request):
app.logger.debug("Intercepted URL: %s", request.url)
if request.url == url:
await request.respond({"body": html})
else:
await request.continue_()
page.on("request", lambda r: asyncio.ensure_future(request_intercepted(r)))
await page.setRequestInterception(True)
await page.goto(url)
pdf = await page.pdf(format="A4")
await browser.close()
return pdf
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
pdf = loop.run_until_complete(to_pdf())
pdffile = io.BytesIO(pdf)
return pdffile
def make_qrfile(data, **kwargs):
qrfile = io.BytesIO()
qr = segno.make_qr(data)
qr.save(qrfile, **kwargs)
qrfile.seek(0)
return qrfile
def make_epc_qrfile(payment, **kwargs):
qrfile = io.BytesIO()
# TODO: this isn't currently used. Need to fetch IBAN from payment.recommended_destination
# and name from somewhere - maybe config rather than hard-coding.
qr = helpers.make_epc_qr(
name="FIXME FIXME FIXME",
iban="FIXME FIXME FIXME",
amount=payment.amount,
reference=payment.bankref,
encoding=1,
)
qr.save(qrfile, **kwargs)
qrfile.seek(0)
return qrfile
def qrfile_to_svg(qrfile):
return Markup(qrfile.getvalue().decode("utf-8"))
def format_inline_qr(data):
qrfile = make_qrfile(
data,
kind="svg",
svgclass=None,
omitsize=True,
xmldecl=False,
svgns=False,
nl=False,
)
return qrfile_to_svg(qrfile)
def METHOD_NAME(payment):
qrfile = make_epc_qrfile(
payment,
kind="svg",
svgclass=None,
omitsize=True,
xmldecl=False,
svgns=False,
nl=False,
)
return qrfile_to_svg(qrfile)
def make_qr_png(url):
return make_qrfile(url, kind="png", scale=3)
def format_inline_barcode(data):
barcodefile = io.BytesIO()
# data is written into the SVG without a CDATA, so base64 encode it
code128 = barcode.get("code128", data, writer=SVGWriter())
code128.write(barcodefile, {"write_text": False})
barcodefile.seek(0)
root = etree.XML(barcodefile.read())
# Allow us to scale it with CSS
root.attrib["viewBox"] = "0 0 %s %s" % (root.attrib["width"], root.attrib["height"])
del root.attrib["width"]
del root.attrib["height"]
root.attrib["preserveAspectRatio"] = "none"
return Markup(etree.tostring(root).decode("utf-8"))
def make_barcode_png(data, **options):
barcodefile = io.BytesIO()
code128 = barcode.get("code128", data, writer=ImageWriter())
# Sizes here are the ones used in the PDF
code128.write(barcodefile, {"write_text": False, "module_height": 8})
barcodefile.seek(0)
return barcodefile
def attach_tickets(msg, user):
# Attach tickets to a mail Message
page = render_receipt(user, pdf=True)
url = external_url("tickets.receipt", user_id=user.id)
pdf = render_pdf(url, page)
msg.attach("EMF{}.pdf".format(event_year()), pdf.read(), "application/pdf")
def set_tickets_emailed(user):
purchases = (
user.owned_purchases.filter_by(is_paid_for=True)
.filter(Purchase.state.in_(["paid"]))
.join(PriceTier, Product, ProductGroup)
.filter(ProductGroup.type.in_(RECEIPT_TYPES))
.with_entities(Purchase)
.group_by(Purchase)
.order_by(Purchase.id)
)
already_emailed = False
for p in purchases:
if p.ticket_issued:
already_emailed = True
p.ticket_issued = True
return already_emailed |
4,067 | call fn | from typing import Any, Callable, Dict, Optional, Tuple, Type, TypeVar
import srsly
from ..compat import tensorflow as tf
from ..model import Model
from ..shims import TensorFlowShim, keras_model_fns, maybe_handshake_model
from ..types import ArgsKwargs, ArrayXd
from ..util import (
assert_tensorflow_installed,
convert_recursive,
is_tensorflow_array,
is_xp_array,
tensorflow2xp,
xp2tensorflow,
)
InT = TypeVar("InT")
OutT = TypeVar("OutT")
InFunc = TypeVar("InFunc")
XType = TypeVar("XType", bound=ArrayXd)
YType = TypeVar("YType", bound=ArrayXd)
def keras_subclass(
name: str,
X: XType,
Y: YType,
input_shape: Tuple[int, ...],
compile_args: Optional[Dict[str, Any]] = None,
) -> Callable[[InFunc], InFunc]:
"""Decorate a custom keras subclassed model with enough information to
serialize and deserialize it reliably in the face of the many restrictions
on keras subclassed models.
name (str): The unique namespace string to use to represent this model class.
X (Any): A sample X input for performing a forward pass on the network.
Y (Any): A sample Y input for performing a backward pass on the network.
input_shape (Tuple[int, ...]): A set of input shapes for building the network.
compile: Arguments to pass directly to the keras `model.compile` call.
RETURNS (Callable): The decorated class.
"""
compile_defaults = {"optimizer": "adam", "loss": "mse"}
if compile_args is None:
compile_args = compile_defaults
else:
compile_args = {**compile_defaults, **compile_args}
def METHOD_NAME(clazz):
clazz.catalogue_name = property(lambda inst: name)
clazz.eg_shape = property(lambda inst: input_shape)
clazz.eg_compile = property(lambda inst: compile_args)
clazz.eg_x = property(lambda inst: X)
clazz.eg_y = property(lambda inst: Y)
@keras_model_fns(name)
def create_component(*call_args, **call_kwargs):
return clazz(*call_args, **call_kwargs)
# Capture construction args and store them on the instance
wrapped_init = clazz.__init__
def __init__(self, *args, **kwargs):
wrapped_init(self, *args, **kwargs)
try:
srsly.json_dumps(args)
srsly.json_dumps(kwargs)
except BaseException as _err:
raise ValueError(
"In order to serialize Keras Subclass models, the constructor "
"arguments must be serializable. This allows thinc to recreate "
"the code-based model with the same configuration.\n"
f"The encountered error is: {_err}"
)
self.eg_args = ArgsKwargs(args, kwargs)
clazz.__init__ = __init__
return clazz
return METHOD_NAME
def TensorFlowWrapper(
tensorflow_model: Any,
convert_inputs: Optional[Callable] = None,
convert_outputs: Optional[Callable] = None,
optimizer: Optional[Any] = None,
model_class: Type[Model] = Model,
model_name: str = "tensorflow",
) -> Model[InT, OutT]:
"""Wrap a TensorFlow model, so that it has the same API as Thinc models.
To optimize the model, you'll need to create a TensorFlow optimizer and call
optimizer.apply_gradients after each batch.
"""
assert_tensorflow_installed()
if not isinstance(tensorflow_model, tf.keras.models.Model):
err = f"Expected tf.keras.models.Model, got: {type(tensorflow_model)}"
raise ValueError(err)
tensorflow_model = maybe_handshake_model(tensorflow_model)
if convert_inputs is None:
convert_inputs = _convert_inputs
if convert_outputs is None:
convert_outputs = _convert_outputs
return model_class(
model_name,
forward,
shims=[TensorFlowShim(tensorflow_model, optimizer=optimizer)],
attrs={"convert_inputs": convert_inputs, "convert_outputs": convert_outputs},
)
def forward(model: Model[InT, OutT], X: InT, is_train: bool) -> Tuple[OutT, Callable]:
"""Return the output of the wrapped TensorFlow model for the given input,
along with a callback to handle the backward pass.
"""
convert_inputs = model.attrs["convert_inputs"]
convert_outputs = model.attrs["convert_outputs"]
tensorflow_model = model.shims[0]
X_tensorflow, get_dX = convert_inputs(model, X, is_train)
if is_train:
Y_tensorflow, tensorflow_backprop = tensorflow_model(X_tensorflow, is_train)
else:
Y_tensorflow = tensorflow_model(X_tensorflow, is_train)
Y, get_dY_tensorflow = convert_outputs(model, Y_tensorflow, is_train)
def backprop(dY: OutT) -> InT:
dY_tensorflow = get_dY_tensorflow(dY)
dX_tensorflow = tensorflow_backprop(dY_tensorflow)
return get_dX(dX_tensorflow)
return Y, backprop
# Default conversion functions
# These are pretty much the same as the PyTorch one, but I think we should
# leave the duplication -- I think the abstraction could get pretty messy,
# and then may need to be undone, as there can always be different specifics.
def _convert_inputs(model, X, is_train):
xp2tensorflow_ = lambda x: xp2tensorflow(x, requires_grad=is_train)
converted = convert_recursive(is_xp_array, xp2tensorflow_, X)
if isinstance(converted, ArgsKwargs):
def reverse_conversion(dXtf):
return convert_recursive(is_tensorflow_array, tensorflow2xp, dXtf)
return converted, reverse_conversion
elif isinstance(converted, dict):
def reverse_conversion(dXtf):
dX = convert_recursive(is_tensorflow_array, tensorflow2xp, dXtf)
return dX.kwargs
return ArgsKwargs(args=tuple(), kwargs=converted), reverse_conversion
elif isinstance(converted, (tuple, list)):
def reverse_conversion(dXtf):
dX = convert_recursive(is_tensorflow_array, tensorflow2xp, dXtf)
return dX.args
return ArgsKwargs(args=converted, kwargs={}), reverse_conversion
else:
def reverse_conversion(dXtf):
dX = convert_recursive(is_tensorflow_array, tensorflow2xp, dXtf)
return dX.args[0]
return ArgsKwargs(args=(converted,), kwargs={}), reverse_conversion
def _convert_outputs(model, Ytf, is_train):
Y = convert_recursive(is_tensorflow_array, tensorflow2xp, Ytf)
def reverse_conversion(dY):
return convert_recursive(is_xp_array, xp2tensorflow, dY)
return Y, reverse_conversion |
4,068 | get submesh choices | from math import pow
import numpy as np
def METHOD_NAME(num_hosts, num_devices_per_host, mode="new"):
submesh_choices = []
i = 1
p = -1
while i <= num_devices_per_host:
i *= 2
p += 1
assert pow(2, p) == num_devices_per_host, ("Only supports the cases where num_devices_per_host is power of two, "
f"while now num_devices_per_host = {num_devices_per_host}")
if mode == "alpa":
for i in range(p + 1):
submesh_choices.append((1, pow(2, i)))
for i in range(2, num_hosts + 1):
submesh_choices.append((i, num_devices_per_host))
elif mode == "new":
for i in range(p // 2 + 1):
for j in range(i, p - i + 1):
submesh_choices.append((pow(2, i), pow(2, j)))
return submesh_choices
def alpa_dp_impl(num_layers, num_devices, num_microbatches, submesh_choices, compute_cost, max_stage_cost,
best_configs):
"""Implementation of Alpa DP for pipeline strategy
Paper reference: https://www.usenix.org/system/files/osdi22-zheng-lianmin.pdf
Arguments:
num_layers: K
num_devices: N*M
num_microbatches: B
submesh_choices: List[(n_i,m_i)]
compute_cost: t_intra
"""
# For f, layer ID start from 0
# f[#pipeline stages, layer id that is currently being considered, number of devices used]
f = np.full((num_layers + 1, num_layers + 1, num_devices + 1), np.inf, dtype=np.float32)
f_stage_max = np.full((num_layers + 1, num_layers + 1, num_devices + 1), 0.0, dtype=np.float32)
f_argmin = np.full((num_layers + 1, num_layers + 1, num_devices + 1, 3), -1, dtype=np.int32)
f[0, num_layers, 0] = 0
for s in range(1, num_layers + 1):
for k in range(num_layers - 1, -1, -1):
for d in range(1, num_devices + 1):
for m, submesh in enumerate(submesh_choices):
n_submesh_devices = np.prod(np.array(submesh))
if n_submesh_devices <= d:
# TODO: [luzgh]: Why alpa needs max_n_succ_stages? Delete.
# if s - 1 <= max_n_succ_stages[i, k - 1, m, n_config]:
# ...
for i in range(num_layers, k, -1):
stage_cost = compute_cost[k, i, m]
new_cost = f[s - 1, k, d - n_submesh_devices] + stage_cost
if (stage_cost <= max_stage_cost and new_cost < f[s, k, d]):
f[s, k, d] = new_cost
f_stage_max[s, k, d] = max(stage_cost, f_stage_max[s - 1, i, d - n_submesh_devices])
f_argmin[s, k, d] = (i, m, best_configs[k, i, m])
best_s = -1
best_total_cost = np.inf
for s in range(1, num_layers + 1):
if f[s, 0, num_devices] < best_total_cost:
best_s = s
best_total_cost = f[s, 0, num_devices]
if np.isinf(best_total_cost):
return np.inf, None
total_cost = f[best_s, 0, num_devices] + (num_microbatches - 1) * f_stage_max[best_s, 0, num_devices]
current_s = best_s
current_layer = 0
current_devices = num_devices
res = []
while current_s > 0 and current_layer < num_layers and current_devices > 0:
next_start_layer, submesh_choice, autosharding_choice = (f_argmin[current_s, current_layer, current_devices])
assert next_start_layer != -1 and current_devices != -1
res.append(((current_layer, next_start_layer), submesh_choice, autosharding_choice))
current_s -= 1
current_layer = next_start_layer
current_devices -= np.prod(np.array(submesh_choices[submesh_choice]))
assert (current_s == 0 and current_layer == num_layers and current_devices == 0)
return total_cost, res
def alpa_dp(num_layers,
num_devices,
num_microbatches,
submesh_choices,
num_autosharding_configs,
compute_cost,
gap=1e-6):
"""Alpa auto stage dynamic programming.
Code reference: https://github.com/alpa-projects/alpa/blob/main/alpa/pipeline_parallel/stage_construction.py
Arguments:
submesh_choices: List[(int,int)]
num_autosharding_configs: Max number of t_intra(start_layer, end_layer, LogicalMesh)
compute_cost: np.array(num_layers,num_layers,num_submesh_choices,num_autosharding_configs)
"""
assert np.shape(compute_cost) == (num_layers, num_layers, len(submesh_choices),
num_autosharding_configs), "Cost shape wrong."
all_possible_stage_costs = np.sort(np.unique(compute_cost))
best_cost = np.inf
best_solution = None
last_max_stage_cost = 0.0
# TODO: [luzgh]: Why alpa needs the num_autosharding_configs dimension in compute_cost?
# In dp_impl it seems the argmin n_config will be chosen. Just amin here.
best_configs = np.argmin(compute_cost, axis=3)
best_compute_cost = np.amin(compute_cost, axis=3)
assert len(all_possible_stage_costs), "no solution in auto stage construction."
for max_stage_cost in all_possible_stage_costs:
if max_stage_cost * num_microbatches >= best_cost:
break
if max_stage_cost - last_max_stage_cost < gap:
continue
cost, solution = alpa_dp_impl(num_layers, num_devices, num_microbatches, submesh_choices, best_compute_cost,
max_stage_cost, best_configs)
if cost < best_cost:
best_cost = cost
best_solution = solution
last_max_stage_cost = max_stage_cost
return best_cost, best_solution |
4,069 | test unoptimized scopes | # -*- coding: utf-8 -*-
import pytest
from jinja2 import DictLoader
from jinja2 import Environment
from jinja2.exceptions import TemplateNotFound
from jinja2.exceptions import TemplatesNotFound
from jinja2.exceptions import TemplateSyntaxError
@pytest.fixture
def test_env():
env = Environment(
loader=DictLoader(
dict(
module="{% macro test() %}[{{ foo }}|{{ bar }}]{% endmacro %}",
header="[{{ foo }}|{{ 23 }}]",
o_printer="({{ o }})",
)
)
)
env.globals["bar"] = 23
return env
class TestImports(object):
def test_context_imports(self, test_env):
t = test_env.from_string('{% import "module" as m %}{{ m.test() }}')
assert t.render(foo=42) == "[|23]"
t = test_env.from_string(
'{% import "module" as m without context %}{{ m.test() }}'
)
assert t.render(foo=42) == "[|23]"
t = test_env.from_string(
'{% import "module" as m with context %}{{ m.test() }}'
)
assert t.render(foo=42) == "[42|23]"
t = test_env.from_string('{% from "module" import test %}{{ test() }}')
assert t.render(foo=42) == "[|23]"
t = test_env.from_string(
'{% from "module" import test without context %}{{ test() }}'
)
assert t.render(foo=42) == "[|23]"
t = test_env.from_string(
'{% from "module" import test with context %}{{ test() }}'
)
assert t.render(foo=42) == "[42|23]"
def test_import_needs_name(self, test_env):
test_env.from_string('{% from "foo" import bar %}')
test_env.from_string('{% from "foo" import bar, baz %}')
with pytest.raises(TemplateSyntaxError):
test_env.from_string('{% from "foo" import %}')
def test_no_trailing_comma(self, test_env):
with pytest.raises(TemplateSyntaxError):
test_env.from_string('{% from "foo" import bar, %}')
with pytest.raises(TemplateSyntaxError):
test_env.from_string('{% from "foo" import bar,, %}')
with pytest.raises(TemplateSyntaxError):
test_env.from_string('{% from "foo" import, %}')
def test_trailing_comma_with_context(self, test_env):
test_env.from_string('{% from "foo" import bar, baz with context %}')
test_env.from_string('{% from "foo" import bar, baz, with context %}')
test_env.from_string('{% from "foo" import bar, with context %}')
test_env.from_string('{% from "foo" import bar, with, context %}')
test_env.from_string('{% from "foo" import bar, with with context %}')
with pytest.raises(TemplateSyntaxError):
test_env.from_string('{% from "foo" import bar,, with context %}')
with pytest.raises(TemplateSyntaxError):
test_env.from_string('{% from "foo" import bar with context, %}')
def test_exports(self, test_env):
m = test_env.from_string(
"""
{% macro toplevel() %}...{% endmacro %}
{% macro __private() %}...{% endmacro %}
{% set variable = 42 %}
{% for item in [1] %}
{% macro notthere() %}{% endmacro %}
{% endfor %}
"""
).module
assert m.toplevel() == "..."
assert not hasattr(m, "__missing")
assert m.variable == 42
assert not hasattr(m, "notthere")
class TestIncludes(object):
def test_context_include(self, test_env):
t = test_env.from_string('{% include "header" %}')
assert t.render(foo=42) == "[42|23]"
t = test_env.from_string('{% include "header" with context %}')
assert t.render(foo=42) == "[42|23]"
t = test_env.from_string('{% include "header" without context %}')
assert t.render(foo=42) == "[|23]"
def test_choice_includes(self, test_env):
t = test_env.from_string('{% include ["missing", "header"] %}')
assert t.render(foo=42) == "[42|23]"
t = test_env.from_string('{% include ["missing", "missing2"] ignore missing %}')
assert t.render(foo=42) == ""
t = test_env.from_string('{% include ["missing", "missing2"] %}')
pytest.raises(TemplateNotFound, t.render)
with pytest.raises(TemplatesNotFound) as e:
t.render()
assert e.value.templates == ["missing", "missing2"]
assert e.value.name == "missing2"
def test_includes(t, **ctx):
ctx["foo"] = 42
assert t.render(ctx) == "[42|23]"
t = test_env.from_string('{% include ["missing", "header"] %}')
test_includes(t)
t = test_env.from_string("{% include x %}")
test_includes(t, x=["missing", "header"])
t = test_env.from_string('{% include [x, "header"] %}')
test_includes(t, x="missing")
t = test_env.from_string("{% include x %}")
test_includes(t, x="header")
t = test_env.from_string("{% include [x] %}")
test_includes(t, x="header")
def test_include_ignoring_missing(self, test_env):
t = test_env.from_string('{% include "missing" %}')
pytest.raises(TemplateNotFound, t.render)
for extra in "", "with context", "without context":
t = test_env.from_string(
'{% include "missing" ignore missing ' + extra + " %}"
)
assert t.render() == ""
def test_context_include_with_overrides(self, test_env):
env = Environment(
loader=DictLoader(
dict(
main="{% for item in [1, 2, 3] %}{% include 'item' %}{% endfor %}",
item="{{ item }}",
)
)
)
assert env.get_template("main").render() == "123"
def METHOD_NAME(self, test_env):
t = test_env.from_string(
"""
{% macro outer(o) %}
{% macro inner() %}
{% include "o_printer" %}
{% endmacro %}
{{ inner() }}
{% endmacro %}
{{ outer("FOO") }}
"""
)
assert t.render().strip() == "(FOO)"
def test_import_from_with_context(self):
env = Environment(
loader=DictLoader({"a": "{% macro x() %}{{ foobar }}{% endmacro %}"})
)
t = env.from_string(
"{% set foobar = 42 %}{% from 'a' import x with context %}{{ x() }}"
)
assert t.render() == "42" |
4,070 | test issue backlog | #SPDX-License-Identifier: MIT
import pytest
import pandas as pd
from augur.api.metrics.issue import *
def test_issues_new():
#repo_id
assert issues_new(1, 1 , period='year').iloc[0]['issues'] > 0
#repo_group_id
assert issues_new(10, period='year').iloc[1]['issues'] > 0
#begin_date & end_date
assert issues_new(10, 25430, period='week', begin_date='2017',
end_date='2017-10').iloc[1]['issues'] > 0
assert issues_new(10, period='month', begin_date='2017-05',
end_date='2018').iloc[2]['issues'] > 0
def test_issues_active():
# repo
assert issues_active(1, 1, period='year').iloc[0]['issues'] > 0
# repo_group
assert issues_active(10, period='year').iloc[0]['issues'] > 0
# begin_date & end_date
assert issues_active(10, 25430, period='month', begin_date='2020-02',
end_date='2020-03').iloc[0]['issues'] > 0
assert issues_active(10, period='week', begin_date='2020-01',
end_date='2020-03') .iloc[0]['issues'] > 0
def test_issues_closed():
# repo
assert issues_closed(10, 25430, period='year').iloc[0]['issues'] > 0
#repo_group
assert issues_closed(10, period='year').iloc[0]['issues'] > 0
# begin_date & end_date
assert issues_closed(10, 25430, period='week', begin_date='2019',
end_date='2020-02').iloc[0]['issues'] > 0
assert issues_closed(10, period='month', begin_date='2018-05',
end_date='2019-08-15').iloc[0]['issues'] > 0
def test_issue_duration():
# repo
assert issue_duration(10, 25430).iloc[0]['duration'] == '20 days 03:08:22.000000000'
# repo_group
assert issue_duration(10).iloc[0]['duration'] == '20 days 03:08:22.000000000'
def test_issue_participants():
# repo
assert issue_participants(10, 25430).iloc[0]['participants'] > 0
# repo_group
assert issue_participants(10).iloc[0]['participants'] > 0
def test_issue_throughput():
# repo
assert issue_throughput(10, 25430).iloc[0]['throughput'] >= 0
# repo_group
assert issue_throughput(10).iloc[0]['throughput'] >= 0
def METHOD_NAME():
#repo_id
assert issue_backlog(10, 25430).iloc[0]['issue_backlog'] > 0
#repo_group_id
assert issue_backlog(10).iloc[0]['issue_backlog'] > 0
def test_issues_first_time_closed():
# repo id
assert issues_first_time_closed(10, repo_id=25430, period='year').isin(
[pd.Timestamp('2019', tz='UTC')]).any().any()
# repo_group_id
assert issues_first_time_closed(10, period='year').isin(
[pd.Timestamp('2020', tz='UTC')]).any().any()
# begin_date and end_date
assert issues_first_time_closed(10, period='year', begin_date='2019-1-1 00:00:00',
end_date='2019-12-31 23:59:59').isin([pd.Timestamp('2019-01-01 00:00:00', tz='UTC')]).any().any()
assert issues_first_time_closed(10, repo_id=25430, period='year', begin_date='2019-1-1 00:00:00',
end_date='2019-12-31 23:59:59').isin([pd.Timestamp('2019-01-01 00:00:00', tz='UTC')]).any().any()
def test_open_issues_count():
# repo
assert open_issues_count(10, 25430).iloc[0]['open_count'] > 0
# repo_group
assert open_issues_count(10).iloc[0]['open_count'] > 0
def test_closed_issues_count():
# repo
assert closed_issues_count(10, 25430).iloc[0]['closed_count'] > 0
# repo_group
assert closed_issues_count(10).iloc[0]['closed_count'] > 0
def test_issues_open_age():
#repo group
assert issues_open_age(10).iloc[0]['open_date'] > 0
# repo
assert issues_open_age(10, 25430).iloc[0]['open_date'] > 0
def test_issues_closed_resolution_duration():
# repo group
assert issues_closed_resolution_duration(10).iloc[0]['diffdate'] >= 0
# repo
assert issues_closed_resolution_duration(10, 25430).iloc[0]['diffdate'] >= 0
def test_average_issue_resolution_time():
#repo
assert average_issue_resolution_time(10, 25430).isin(
['augur', '61 days 12:20:43.791667']).any().any()
# repo_group
assert average_issue_resolution_time(10).isin(
['grimoirelab', ' 67 days 22:41:55.260417']).any().any()
def test_issues_maintainer_response_duration():
assert issues_maintainer_response_duration(10, 25430).iloc[0].average_days_comment > 0
assert issues_maintainer_response_duration(10).iloc[0].average_days_comment > 0
assert issues_maintainer_response_duration(10, 25430).iloc[0].average_days_comment > 0
def test_issue_comments_mean():
assert issue_comments_mean(10).any().any()
assert issue_comments_mean(10, 25430).any().any()
assert issue_comments_mean(10, group_by='year').any().any()
assert issue_comments_mean(10, 25430, group_by='year').any().any()
def test_issue_comments_mean_std():
assert issue_comments_mean_std(10).any().any()
assert issue_comments_mean_std(10, 25430).any().any()
assert issue_comments_mean_std(10, group_by='year').any().any()
assert issue_comments_mean_std(10, 25430, group_by='year').any().any() |
4,071 | l1 t stage1 digis summary | from __future__ import print_function
import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
import os
import sys
##############################################################################
# customisations for L1T utilities
#
# customisations which add utilities features such as debugging of L1T,
# summary module, etc.
#
##############################################################################
# Unpack Stage-2 GT and GMT
def L1TTurnOffGtAndGmtEmulation(process):
cutlist=['simDtTriggerPrimitiveDigis','simCscTriggerPrimitiveDigis','simTwinMuxDigis','simBmtfDigis','simEmtfDigis','simOmtfDigis','simGmtCaloSumDigis','simMuonQualityAdjusterDigis','simGmtStage2Digis','simGtStage2Digis']
for b in cutlist:
process.SimL1Emulator.remove(getattr(process,b))
return process
# Unpack Stage-2 GT and GMT
def L1TTurnOffUnpackStage2GtAndGmt(process):
cutlist=['gtStage2Digis','gmtStage2Digis']
for b in cutlist:
process.L1TRawToDigi.remove(getattr(process,b))
return process
# Unpack Stage-2 GT and GMT
def L1TTurnOffUnpackStage2GtGmtAndCalo(process):
cutlist=['gtStage2Digis','gmtStage2Digis','caloStage2Digis']
for b in cutlist:
process.L1TRawToDigi.remove(getattr(process,b))
return process
def METHOD_NAME(process):
print("L1T INFO: will dump a summary of unpacked Stage1 content to screen.")
process.load('L1Trigger.L1TCommon.l1tSummaryStage1Digis_cfi')
process.l1tstage1summary = cms.Path(process.l1tSummaryStage1Digis)
process.schedule.append(process.l1tstage1summary)
return process
def L1TStage2DigisSummary(process):
print("L1T INFO: will dump a summary of unpacked Stage2 content to screen.")
process.load('L1Trigger.L1TCommon.l1tSummaryStage2Digis_cfi')
process.l1tstage2summary = cms.Path(process.l1tSummaryStage2Digis)
process.schedule.append(process.l1tstage2summary)
return process
def L1TStage1SimDigisSummary(process):
print("L1T INFO: will dump a summary of simulated Stage1 content to screen.")
process.load('L1Trigger.L1TCommon.l1tSummaryStage1SimDigis_cfi')
process.l1tsimstage1summary = cms.Path(process.l1tSummaryStage1SimDigis)
process.schedule.append(process.l1tsimstage1summary)
return process
def L1TStage2SimDigisSummary(process):
print("L1T INFO: will dump a summary of simulated Stage2 content to screen.")
process.load('L1Trigger.L1TCommon.l1tSummaryStage2SimDigis_cfi')
process.l1tsimstage2summary = cms.Path(process.l1tSummaryStage2SimDigis)
process.schedule.append(process.l1tsimstage2summary)
return process
def L1TGlobalDigisSummary(process):
print("L1T INFO: will dump a summary of unpacked L1T Global output to screen.")
process.l1tGlobalSummary = cms.EDAnalyzer(
'L1TGlobalSummary',
AlgInputTag = cms.InputTag("gtStage2Digis"),
ExtInputTag = cms.InputTag("gtStage2Digis"),
DumpTrigResults = cms.bool(False), # per event dump of trig results
DumpTrigSummary = cms.bool(True), # pre run dump of trig results
)
process.l1tglobalsummary = cms.Path(process.l1tGlobalSummary)
process.schedule.append(process.l1tglobalsummary)
return process
def L1TGlobalMenuXML(process):
process.load('L1Trigger.L1TGlobal.GlobalParameters_cff')
process.load('L1Trigger.L1TGlobal.TriggerMenu_cff')
process.TriggerMenu.L1TriggerMenuFile = cms.string('L1Menu_Collisions2022_v1_2_0.xml')
return process
def L1TGlobalSimDigisSummary(process):
print("L1T INFO: will dump a summary of simulated L1T Global output to screen.")
process.l1tSimGlobalSummary = cms.EDAnalyzer(
'L1TGlobalSummary',
AlgInputTag = cms.InputTag("simGtStage2Digis"),
ExtInputTag = cms.InputTag("simGtStage2Digis"),
DumpTrigResults = cms.bool(False), # per event dump of trig results
DumpTrigSummary = cms.bool(True), # pre run dump of trig results
)
process.l1tsimglobalsummary = cms.Path(process.l1tSimGlobalSummary)
process.schedule.append(process.l1tsimglobalsummary)
return process
def L1TAddInfoOutput(process):
process.MessageLogger = cms.Service(
"MessageLogger",
destinations = cms.untracked.vstring('cout','cerr'),
cout = cms.untracked.PSet(threshold = cms.untracked.string('INFO')),
cerr = cms.untracked.PSet(threshold = cms.untracked.string('WARNING')),
)
return process
def L1TAddDebugOutput(process):
print("L1T INFO: sending debugging ouput to file l1tdebug.log")
print("L1T INFO: add <flags CXXFLAGS=\"-g -D=EDM_ML_DEBUG\"/> in BuildFile.xml of any package you want to debug...")
process.MessageLogger = cms.Service(
"MessageLogger",
destinations = cms.untracked.vstring('l1tdebug','cerr'),
l1tdebug = cms.untracked.PSet(threshold = cms.untracked.string('DEBUG')),
#debugModules = cms.untracked.vstring('caloStage1Digis'))
cerr = cms.untracked.PSet(threshold = cms.untracked.string('WARNING')),
debugModules = cms.untracked.vstring('*'))
return process
def L1TDumpEventData(process):
print("L1T INFO: adding EventContentAnalyzer to process schedule")
process.dumpED = cms.EDAnalyzer("EventContentAnalyzer")
process.l1tdumpevent = cms.Path(process.dumpED)
process.schedule.append(process.l1tdumpevent)
return process
def L1TDumpEventSummary(process):
process.dumpES = cms.EDAnalyzer("PrintEventSetupContent")
process.l1tdumpeventsetup = cms.Path(process.dumpES)
process.schedule.append(process.l1tdumpeventsetup)
return process
def L1TStage2ComparisonRAWvsEMU(process):
print("L1T INFO: will dump a comparison of unpacked vs emulated Stage2 content to screen.")
process.load('L1Trigger.L1TCommon.l1tComparisonStage2RAWvsEMU_cfi')
process.l1tstage2comparison = cms.Path(process.l1tComparisonStage2RAWvsEMU)
process.schedule.append(process.l1tstage2comparison)
return process
def L1TGtStage2ComparisonRAWvsEMU(process):
print("L1T INFO: will dump a comparison of unpacked vs emulated GT Stage2 content to screen.")
process.load('L1Trigger.L1TCommon.l1tComparisonGtStage2RAWvsEMU_cfi')
process.l1tgtstage2comparison = cms.Path(process.l1tComparisonGtStage2RAWvsEMU)
process.schedule.append(process.l1tgtstage2comparison)
return process
def L1TStage2SetPrefireVetoBit(process):
process.load('L1Trigger.L1TGlobal.simGtExtFakeProd_cfi')
process.l1tstage2gtext = cms.Path(process.simGtExtUnprefireable)
process.schedule.insert(0,process.l1tstage2gtext)
return process |
4,072 | test names | """
Tests the pure Python functionality of `cpp_param`: (a) idempotent mapping for
unaliased types, and (b) correct mapping for aliased types (as the aliases
relate in C++).
N.B. The C++ types are not registered in this test. They are registered and
tested in `cpp_param_pybind_test`.
"""
import ctypes
import unittest
import numpy as np
from pydrake.common import _MangledName
import pydrake.common.cpp_param as mut
class CustomPyType:
pass
class TemplateOnFloat:
"""Pretends to be a class template instanation named Template[float]."""
setattr(TemplateOnFloat, "__name__", "Template{}float{}".format(
_MangledName.UNICODE_LEFT_BRACKET,
_MangledName.UNICODE_RIGHT_BRACKET))
globals()[TemplateOnFloat.__name__] = TemplateOnFloat
class TestCppParam(unittest.TestCase):
def _check_alias(self, canonical, alias):
actual = mut.get_param_canonical([alias])[0]
self.assertTrue(actual is canonical)
def _check_idempotent(self, canonical):
self._check_alias(canonical, canonical)
def _check_aliases(self, canonical, aliases):
for alias in aliases:
self._check_alias(canonical, alias)
def _check_names(self, name_canonical, aliases):
for alias in aliases:
actual = mut.get_param_names([alias])[0]
self.assertEqual(actual, name_canonical)
def test_idempotent(self):
# Check idempotent mapping for unaliased types.
# This follows the ordering in `cpp_param_pybind.cc`,
# `RegisterCommon`.
self._check_idempotent(bool)
self._check_idempotent(str)
self._check_idempotent(float)
self._check_idempotent(np.float32)
self._check_idempotent(int)
self._check_idempotent(np.int16)
self._check_idempotent(np.int64)
self._check_idempotent(np.uint16)
self._check_idempotent(np.uint32)
self._check_idempotent(np.uint64)
self._check_idempotent(object)
# - Custom Types.
self._check_idempotent(CustomPyType)
# - Literals.
self._check_idempotent(1)
def test_aliases(self):
# Aliases:
# This follows the ordering in `cpp_param.py`,
# `_ParamAliases._register_common`.
self._check_aliases(float, [np.double, ctypes.c_double])
self._check_aliases(np.float32, [ctypes.c_float])
self._check_aliases(int, [np.int32, ctypes.c_int32])
self._check_aliases(np.int16, [ctypes.c_int16])
self._check_aliases(np.int64, [ctypes.c_int64])
self._check_aliases(np.uint16, [ctypes.c_uint16])
self._check_aliases(np.uint32, [ctypes.c_uint32])
self._check_aliases(np.uint64, [ctypes.c_uint64])
def METHOD_NAME(self):
self._check_names("int", [int, np.int32, ctypes.c_int32])
self._check_names("CustomPyType", [CustomPyType])
self._check_names("1", [1])
self._check_names(
"dict[str,CustomPyType]", [mut.Dict[str, CustomPyType]])
self._check_names(
"list[CustomPyType]", [mut.List[CustomPyType]])
self._check_names(
"list[list[CustomPyType]]", [mut.List[mut.List[CustomPyType]]])
self._check_names(
"typing.Optional[CustomPyType]", [mut.Optional[CustomPyType]])
self._check_names(
"typing.Union[str,CustomPyType]", [mut.Union[str, CustomPyType]])
self._check_names("Template[float]", [TemplateOnFloat])
def test_mangled_names(self):
# Nested generic types.
param = [mut.Dict[str, CustomPyType]]
self.assertEqual(
mut.get_param_names(param=param, mangle=True)[0],
"dict𝓣str𝓬CustomPyType𝓤")
# Drake template types.
param = [TemplateOnFloat]
self.assertEqual(
mut.get_param_names(param=param, mangle=True)[0],
"Template𝓣float𝓤")
# Literals.
param = [0.0]
self.assertEqual(
mut.get_param_names(param=param, mangle=True)[0],
"0𝓹0")
def assert_equal_but_not_aliased(self, a, b):
self.assertEqual(a, b)
self.assertIsNot(a, b)
def test_generic(self):
empty = []
self.assert_equal_but_not_aliased(mut.List[int](), empty)
self.assert_equal_but_not_aliased(mut.List[int](empty), empty)
nonempty = [1]
self.assert_equal_but_not_aliased(mut.List[int](nonempty), nonempty)
# N.B. This does not do any type checking at construction.
nonempty_random = ["hello"]
self.assert_equal_but_not_aliased(
mut.List[int](nonempty_random), nonempty_random)
def test_generic_dims(self):
"""Ensures errors are detected and use provide good error messages."""
with self.assertRaises(RuntimeError) as cm:
mut.Dict[int]
self.assertEqual(
str(cm.exception),
"Dict[] requires exactly 2 type parameter(s)")
with self.assertRaises(RuntimeError) as cm:
mut.List[int, float]
self.assertEqual(
str(cm.exception),
"List[] requires exactly 1 type parameter(s)")
with self.assertRaises(RuntimeError) as cm:
mut.Optional[()]
self.assertEqual(
str(cm.exception),
"Optional[] requires exactly 1 type parameter(s)")
def test_identifier_mangling(self):
for pretty in ["Value[object]",
"LeafSystem[AutoDiff[float,7]]",
"SizedImage[PixelType.kConstant,640,480]",
"AutoDiffXd", # (This doesn't change during mangling.)
]:
with self.subTest(pretty=pretty):
mangled = _MangledName.mangle(pretty)
roundtrip = _MangledName.demangle(mangled)
self.assertEqual(roundtrip, pretty)
# Demonstrate and sanity-check mangling the param name separately,
# ahead of mangling the full type name.
pretty_param = "AutoDiff[float,7]"
mangled_param = _MangledName.mangle(pretty_param)
self.assertNotEqual(pretty_param, mangled_param)
heterogenous = f"LeafSystem[{mangled_param}]"
mangled = _MangledName.mangle(heterogenous)
pretty = _MangledName.demangle(mangled)
self.assertNotEqual(mangled, heterogenous)
self.assertNotEqual(pretty, mangled)
self.assertNotEqual(pretty, heterogenous)
self.assertEqual(_MangledName.demangle(heterogenous),
_MangledName.demangle(mangled))
def test_mangling_module_lookup(self):
# Looking up a pretty name should find the mangled class.
self.assertIs(_MangledName.module_getattr(
module_name=__name__,
module_globals=globals(),
name="Template[float]"), TemplateOnFloat)
# Unknown names raise the conventional error.
message = "module 'cpp_param_test' has no attribute 'NoSuchClass'"
with self.assertRaisesRegex(AttributeError, message):
_MangledName.module_getattr(
module_name=__name__,
module_globals=globals(),
name="NoSuchClass") |
4,073 | test kernel projection | from UQpy.utilities import ProjectionKernel
from UQpy.utilities.GrassmannPoint import GrassmannPoint
from UQpy.dimension_reduction.grassmann_manifold.projections.SVDProjection import SVDProjection
from UQpy.utilities.kernels.grassmannian_kernels.BinetCauchyKernel import BinetCauchyKernel
from UQpy.utilities.kernels.GaussianKernel import GaussianKernel
import numpy as np
def METHOD_NAME():
xi = GrassmannPoint(np.array([[-np.sqrt(2) / 2, -np.sqrt(2) / 4], [np.sqrt(2) / 2,
-np.sqrt(2) / 4], [0, -np.sqrt(3) / 2]]))
xj = GrassmannPoint(np.array([[0, np.sqrt(2) / 2], [1, 0], [0, -np.sqrt(2) / 2]]))
xk = GrassmannPoint(np.array([[-0.69535592, -0.0546034], [-0.34016974, -0.85332868],
[-0.63305978, 0.51850616]]))
points = [xi, xj, xk]
k = ProjectionKernel()
k.calculate_kernel_matrix(points, points)
kernel = np.matrix.round(k.kernel_matrix, 4)
assert np.allclose(kernel, np.array([[2, 1.0063, 1.2345], [1.0063, 2, 1.0101], [1.2345, 1.0101, 2]]))
def test_kernel_binet_cauchy():
xi = GrassmannPoint(np.array([[-np.sqrt(2) / 2, -np.sqrt(2) / 4], [np.sqrt(2) / 2, -np.sqrt(2) / 4],
[0, -np.sqrt(3) / 2]]))
xj = GrassmannPoint(np.array([[0, np.sqrt(2) / 2], [1, 0], [0, -np.sqrt(2) / 2]]))
xk = GrassmannPoint(np.array([[-0.69535592, -0.0546034], [-0.34016974, -0.85332868], [-0.63305978, 0.51850616]]))
points = [xi, xj, xk]
kernel = BinetCauchyKernel()
kernel.calculate_kernel_matrix(points, points)
kernel = np.matrix.round(kernel.kernel_matrix, 4)
assert np.allclose(kernel, np.array([[1, 0.0063, 0.2345], [0.0063, 1, 0.0101], [0.2345, 0.0101, 1]]))
def test_kernel_gaussian_1d():
xi = np.array([1, 2, 3, 4])
xj = np.array([0.2, -1, 3, 5])
xk = np.array([1, 2, 3, 4])
points = [xi, xj, xk]
gaussian = GaussianKernel(kernel_parameter=2.0)
gaussian.calculate_kernel_matrix(points, points)
assert np.allclose(np.matrix.round(gaussian.kernel_matrix, 4),
np.array([[1., 0.26447726, 1.], [0.26447726, 1., 0.26447726], [1, 0.26447726, 1]]),
atol=1e-04)
assert np.round(gaussian.kernel_parameter, 4) == 2
def test_kernel_gaussian_2d():
xi = np.array([[-np.sqrt(2) / 2, -np.sqrt(2) / 4], [np.sqrt(2) / 2, -np.sqrt(2) / 4], [0, -np.sqrt(3) / 2]])
xj = np.array([[0, np.sqrt(2) / 2], [1, 0], [0, -np.sqrt(2) / 2]])
xk = np.array([[-0.69535592, -0.0546034], [-0.34016974, -0.85332868], [-0.63305978, 0.51850616]])
points = [xi, xj, xk]
gaussian = GaussianKernel()
gaussian.calculate_kernel_matrix(points, points)
assert np.allclose(np.matrix.round(gaussian.kernel_matrix, 4), np.array([[1., 0.39434829, 0.15306655],
[0.39434829, 1., 0.06422136],
[0.15306655, 0.06422136, 1.]]), atol=1e-4)
assert np.round(gaussian.kernel_parameter, 4) == 1.0
sol0 = np.array([[0.61415, 1.03029, 1.02001, 0.57327, 0.79874, 0.73274],
[0.56924, 0.91700, 0.88841, 0.53737, 0.68676, 0.67751],
[0.51514, 0.87898, 0.87779, 0.47850, 0.69085, 0.61525],
[0.63038, 1.10822, 1.12313, 0.58038, 0.89142, 0.75429],
[0.69666, 1.03114, 0.95037, 0.67211, 0.71184, 0.82522],
[0.66595, 1.03789, 0.98690, 0.63420, 0.75416, 0.79110]])
sol1 = np.array([[1.05134, 1.37652, 0.95634, 0.85630, 0.47570, 1.22488],
[0.16370, 0.63105, 0.14533, 0.81030, 0.44559, 0.43358],
[1.23478, 2.10342, 1.04698, 1.68755, 0.92792, 1.73277],
[0.90538, 1.64067, 0.62027, 1.17577, 0.63644, 1.34925],
[0.58210, 0.75795, 0.65519, 0.65712, 0.37251, 0.65740],
[0.99174, 1.59375, 0.63724, 0.89107, 0.47631, 1.36581]])
sol2 = np.array([[1.04142, 0.91670, 1.47962, 1.23350, 0.94111, 0.61858],
[1.00464, 0.65684, 1.35136, 1.11288, 0.96093, 0.42340],
[1.05567, 1.33192, 1.56286, 1.43412, 0.77044, 0.97182],
[0.89812, 0.86136, 1.20204, 1.17892, 0.83788, 0.61160],
[0.46935, 0.39371, 0.63534, 0.57856, 0.47615, 0.26407],
[1.14102, 0.80869, 1.39123, 1.33076, 0.47719, 0.68170]])
sol3 = np.array([[0.60547, 0.11492, 0.78956, 0.13796, 0.76685, 0.41661],
[0.32771, 0.11606, 0.67630, 0.15208, 0.44845, 0.34840],
[0.58959, 0.10156, 0.72623, 0.11859, 0.73671, 0.38714],
[0.36283, 0.07979, 0.52824, 0.09760, 0.46313, 0.27906],
[0.87487, 0.22452, 1.30208, 0.30189, 1.22015, 0.62918],
[0.56006, 0.16879, 1.09635, 0.20431, 0.69439, 0.60317]])
def test_kernel():
np.random.seed(1111) # For reproducibility.
from numpy.random import RandomState
rnd = RandomState(0)
# Creating a list of solutions.
Solutions = [sol0, sol1, sol2, sol3]
manifold_projection = SVDProjection(Solutions, p="max")
kernel = ProjectionKernel()
kernel.calculate_kernel_matrix(manifold_projection.u, manifold_projection.u)
assert np.round(kernel.kernel_matrix[0, 1], 8) == 6.0 |
4,074 | get urns not in | from typing import Any, Dict, Iterable, List, Tuple, Type
import pydantic
from datahub.emitter.mce_builder import make_assertion_urn, make_container_urn
from datahub.ingestion.source.state.checkpoint import CheckpointStateBase
from datahub.utilities.checkpoint_state_util import CheckpointStateUtil
from datahub.utilities.dedup_list import deduplicate_list
from datahub.utilities.urns.urn import guess_entity_type
def pydantic_state_migrator(mapping: Dict[str, str]) -> classmethod:
# mapping would be something like:
# {
# 'encoded_view_urns': 'dataset',
# 'encoded_container_urns': 'container',
# }
SUPPORTED_TYPES = [
"dataset",
"container",
"assertion",
"topic",
]
assert set(mapping.values()) <= set(SUPPORTED_TYPES)
def _validate_field_rename(cls: Type, values: dict) -> dict:
values.setdefault("urns", [])
for old_field, mapped_type in mapping.items():
if old_field not in values:
continue
value = values.pop(old_field)
if mapped_type == "dataset":
values["urns"] += [
CheckpointStateUtil.get_urn_from_encoded_dataset(encoded_urn)
for encoded_urn in value
]
elif mapped_type == "topic":
values["urns"] += [
CheckpointStateUtil.get_urn_from_encoded_topic(encoded_urn)
for encoded_urn in value
]
elif mapped_type == "container":
values["urns"] += [make_container_urn(guid) for guid in value]
elif mapped_type == "assertion":
values["urns"] += [make_assertion_urn(encoded) for encoded in value]
else:
raise ValueError(f"Unsupported type {mapped_type}")
return values
return pydantic.root_validator(pre=True, allow_reuse=True)(_validate_field_rename)
class GenericCheckpointState(CheckpointStateBase):
urns: List[str] = pydantic.Field(default_factory=list)
# We store a bit of extra internal-only state so that we can keep the urns list deduplicated.
# However, we still want `urns` to be a list so that it maintains its order.
# We can't used OrderedSet here because pydantic doesn't recognize it and
# it isn't JSON serializable.
_urns_set: set = pydantic.PrivateAttr(default_factory=set)
_migration = pydantic_state_migrator(
{
# From SQL:
"encoded_table_urns": "dataset",
"encoded_view_urns": "dataset",
"encoded_container_urns": "container",
"encoded_assertion_urns": "assertion",
# From kafka:
"encoded_topic_urns": "topic",
# From dbt:
"encoded_node_urns": "dataset",
# "encoded_assertion_urns": "assertion", # already handled from SQL
}
)
def __init__(self, **data: Any): # type: ignore
super().__init__(**data)
self.urns = deduplicate_list(self.urns)
self._urns_set = set(self.urns)
def add_checkpoint_urn(self, type: str, urn: str) -> None:
"""
Adds an urn into the list used for tracking the type.
:param type: Deprecated parameter, has no effect.
:param urn: The urn string
"""
# TODO: Deprecate the `type` parameter and remove it.
if urn not in self._urns_set:
self.urns.append(urn)
self._urns_set.add(urn)
def METHOD_NAME(
self, type: str, other_checkpoint_state: "GenericCheckpointState"
) -> Iterable[str]:
"""
Gets the urns present in this checkpoint but not the other_checkpoint for the given type.
:param type: Deprecated. Set to "*".
:param other_checkpoint_state: the checkpoint state to compute the urn set difference against.
:return: an iterable to the set of urns present in this checkpoint state but not in the other_checkpoint.
"""
diff = set(self.urns) - set(other_checkpoint_state.urns)
# To maintain backwards compatibility, we provide this filtering mechanism.
# TODO: Deprecate the `type` parameter and remove it.
if type == "*":
yield from diff
elif type == "topic":
yield from (urn for urn in diff if guess_entity_type(urn) == "dataset")
else:
yield from (urn for urn in diff if guess_entity_type(urn) == type)
def get_percent_entities_changed(
self, old_checkpoint_state: "GenericCheckpointState"
) -> float:
"""
Returns the percentage of entities that have changed relative to `old_checkpoint_state`.
:param old_checkpoint_state: the old checkpoint state to compute the relative change percent against.
:return: (1-|intersection(self, old_checkpoint_state)| / |old_checkpoint_state|) * 100.0
"""
return compute_percent_entities_changed(
new_entities=self.urns, old_entities=old_checkpoint_state.urns
)
def compute_percent_entities_changed(
new_entities: List[str], old_entities: List[str]
) -> float:
(overlap_count, old_count, _,) = _get_entity_overlap_and_cardinalities(
new_entities=new_entities, old_entities=old_entities
)
if old_count:
return (1 - overlap_count / old_count) * 100.0
return 0.0
def _get_entity_overlap_and_cardinalities(
new_entities: List[str], old_entities: List[str]
) -> Tuple[int, int, int]:
new_set = set(new_entities)
old_set = set(old_entities)
return len(new_set.intersection(old_set)), len(old_set), len(new_set) |
4,075 | make extension | #* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import re
import pydoc
import logging
import inspect
from MooseDocs.base import components
from MooseDocs.common import exceptions
from MooseDocs.tree import tokens, html
from . import core, command
LOG = logging.getLogger(__name__)
PyClass = tokens.newToken('PyClass')
PyFunction = tokens.newToken('PyFunction')
def METHOD_NAME(**kwargs):
"""Return the pysyntax extension object."""
return PySyntaxExtension(**kwargs)
class PySyntax(object):
"""Helper class for extracting documentation from a python object."""
class Info(object):
"""Data struct for storing information about a member."""
def __init__(self, name, member):
self.name = name
self.internal = self.name.startswith('__') and self.name.endswith('__')
self.private = (not self.internal) and self.name.startswith('_') and ('__' in self.name)
self.protected = (not self.private) and (not self.internal) and self.name.startswith('_')
self.public = not any([self.internal, self.private, self.protected])
self.function = inspect.isfunction(member)
self.signature = re.sub(r'self[, ]*', '', str(inspect.signature(member))) if self.function else None
self.documentation = inspect.getdoc(member)
def __eq__(self, rhs):
return all([self.internal == rhs.internal,
self.private == rhs.private,
self.protected == rhs.protected,
self.public == rhs.public,
self.function == rhs.function,
self.signature == rhs.signature,
self.documentation == rhs.documentation])
def __str__(self):
out = '{}:\n'.format(self.name, self.signature or self.name)
if self.documentation: out += '"{}"\n'.format(self.documentation)
out += ' public: {}\n'.format(self.public)
out += ' protected: {}\n'.format(self.protected)
out += ' private: {}\n'.format(self.private)
out += ' internal: {}\n'.format(self.internal)
out += ' function: {}\n'.format(self.function)
return out
def __init__(self, cls):
cls = pydoc.locate(cls) if isinstance(cls, str) else cls
self.documentation = inspect.getdoc(cls)
self.filename = inspect.getfile(cls)
self.signature = str(inspect.signature(cls))
self.is_class = inspect.isclass(cls)
self.is_function = inspect.isfunction(cls)
self._members = dict()
if self.is_class:
for name, member in inspect.getmembers(cls):
self._members[name] = PySyntax.Info(name, member)
elif self.is_function:
name = cls.__qualname__
self._members[name] = PySyntax.Info(name, cls)
def items(self, function=None, **kwargs):
"""Return dict() style generator to name and `Info` objects."""
default = False if kwargs else True
internal = kwargs.get('internal', default)
private = kwargs.get('private', default)
protected = kwargs.get('protected', default)
public = kwargs.get('public', default)
for name, info in self._members.items():
if any([(internal and info.internal),
(private and info.private),
(protected and info.protected),
(public and info.public)]) and \
((function is None) or (info.function == function)):
yield name, info
class PySyntaxExtension(command.CommandExtension):
def extend(self, reader, renderer):
self.requires(command)
self.addCommand(reader, PySyntaxClassCommand())
self.addCommand(reader, PySyntaxFunctionCommand())
renderer.add('PyClass', RenderPyClass())
renderer.add('PyFunction', RenderPyFunction())
class PySyntaxCommandBase(command.CommandComponent):
COMMAND = 'pysyntax'
@staticmethod
def defaultSettings():
settings = command.CommandComponent.defaultSettings()
settings['name'] = (None, "The name python object/function to extract documentation.")
settings['heading-level'] = (2, "The heading level to use for class documentation.")
return settings
def _addDocumentation(self, parent, page, doc, h_level, **kwargs):
for name, pyinfo in doc.items(**kwargs):
h = core.Heading(parent, level=h_level, class_='moose-pysyntax-member-heading')
fname = name + pyinfo.signature if pyinfo.signature is not None else name
core.Monospace(core.Strong(h), string=fname)
if pyinfo.documentation is None:
msg = "Missing documentation for '%s'.\n%s"
LOG.error(msg, name, doc.filename)
else:
self.reader.tokenize(parent, pyinfo.documentation, page)
def _addFunctionDocumentation(self, parent, page, doc, h_level):
sec = PyFunction(parent)
self._addDocumentation(sec, page, doc, h_level)
def _addClassDocumentation(self, parent, page, name, doc, h_level, **kwargs):
"""Helper for listing class members"""
sec = PyClass(parent)
h = core.Heading(sec, level=h_level, string=name, class_='moose-pysyntax-class-heading')
core.Monospace(sec, string=name + doc.signature)
if doc.documentation is None:
msg = "Missing documentation for '%s'.\n%s"
LOG.error(msg, name, doc.filename)
else:
self.reader.tokenize(sec, doc.documentation, page)
self._addDocumentation(sec, page, doc, h_level + 1, **kwargs)
class PySyntaxClassCommand(PySyntaxCommandBase):
SUBCOMMAND = 'class'
@staticmethod
def defaultSettings():
settings = PySyntaxCommandBase.defaultSettings()
return settings
def createToken(self, parent, info, page, settings):
h_level = int(settings['heading-level'])
obj = settings.get('name', None)
if obj is None:
raise exceptions.MooseDocsException("The 'name' setting is required.")
doc = PySyntax(obj)
if not doc.is_class:
raise exceptions.MooseDocsException("'%s' is not a python class.", obj)
self._addClassDocumentation(parent, page, obj, doc, h_level, public=True, protected=True)
return parent
class PySyntaxFunctionCommand(PySyntaxCommandBase):
SUBCOMMAND = 'function'
@staticmethod
def defaultSettings():
settings = PySyntaxCommandBase.defaultSettings()
settings['heading-level'] = (2, settings['heading-level'][1])
return settings
def createToken(self, parent, info, page, settings):
h_level = int(settings['heading-level'])
obj = settings.get('name', None)
if obj is None:
raise exceptions.MooseDocsException("The 'name' setting is required.")
doc = PySyntax(obj)
if not doc.is_function:
raise exceptions.MooseDocsException("'%s' is not a python function.", obj)
self._addFunctionDocumentation(parent, page, doc, h_level)
return parent
class RenderPyClass(components.RenderComponent):
def createLatex(self, parent, token, page):
return parent
def createHTML(self, parent, token, page):
return html.Tag(parent, 'div', class_='moose-pysyntax-class')
class RenderPyFunction(components.RenderComponent):
def createLatex(self, parent, token, page):
return parent
def createHTML(self, parent, token, page):
return html.Tag(parent, 'div', class_='moose-pysyntax-function') |
4,076 | setup ui | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'rare/ui/components/tabs/games/game_info/game_dlc.ui'
#
# Created by: PyQt5 UI code generator 5.15.7
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_GameDlc(object):
def METHOD_NAME(self, GameDlc):
GameDlc.setObjectName("GameDlc")
GameDlc.resize(271, 139)
GameDlc.setWindowTitle("GameDlc")
GameDlc.setFrameShape(QtWidgets.QFrame.StyledPanel)
GameDlc.setFrameShadow(QtWidgets.QFrame.Sunken)
GameDlc.setLineWidth(0)
self.installed_dlc_page = QtWidgets.QWidget()
self.installed_dlc_page.setGeometry(QtCore.QRect(0, 0, 267, 79))
self.installed_dlc_page.setObjectName("installed_dlc_page")
self.installed_dlc_page_layout = QtWidgets.QVBoxLayout(self.installed_dlc_page)
self.installed_dlc_page_layout.setContentsMargins(0, 0, 0, 0)
self.installed_dlc_page_layout.setObjectName("installed_dlc_page_layout")
self.installed_dlc_label = QtWidgets.QLabel(self.installed_dlc_page)
self.installed_dlc_label.setObjectName("installed_dlc_label")
self.installed_dlc_page_layout.addWidget(self.installed_dlc_label, 0, QtCore.Qt.AlignTop)
self.installed_dlc_container = QtWidgets.QWidget(self.installed_dlc_page)
self.installed_dlc_container.setObjectName("installed_dlc_container")
self.installed_dlc_container_layout = QtWidgets.QVBoxLayout(self.installed_dlc_container)
self.installed_dlc_container_layout.setContentsMargins(0, 0, 3, 0)
self.installed_dlc_container_layout.setObjectName("installed_dlc_container_layout")
self.installed_dlc_page_layout.addWidget(self.installed_dlc_container, 0, QtCore.Qt.AlignTop)
self.installed_dlc_page_layout.setStretch(1, 1)
GameDlc.addItem(self.installed_dlc_page, "")
self.available_dlc_page = QtWidgets.QWidget()
self.available_dlc_page.setGeometry(QtCore.QRect(0, 0, 267, 79))
self.available_dlc_page.setObjectName("available_dlc_page")
self.available_dlc_page_layou = QtWidgets.QVBoxLayout(self.available_dlc_page)
self.available_dlc_page_layou.setContentsMargins(0, 0, 0, 0)
self.available_dlc_page_layou.setObjectName("available_dlc_page_layou")
self.available_dlc_label = QtWidgets.QLabel(self.available_dlc_page)
self.available_dlc_label.setObjectName("available_dlc_label")
self.available_dlc_page_layou.addWidget(self.available_dlc_label, 0, QtCore.Qt.AlignTop)
self.available_dlc_container = QtWidgets.QWidget(self.available_dlc_page)
self.available_dlc_container.setObjectName("available_dlc_container")
self.available_dlc_container_layout = QtWidgets.QVBoxLayout(self.available_dlc_container)
self.available_dlc_container_layout.setContentsMargins(0, 0, 3, 0)
self.available_dlc_container_layout.setObjectName("available_dlc_container_layout")
self.available_dlc_page_layou.addWidget(self.available_dlc_container, 0, QtCore.Qt.AlignTop)
self.available_dlc_page_layou.setStretch(1, 1)
GameDlc.addItem(self.available_dlc_page, "")
self.retranslateUi(GameDlc)
def retranslateUi(self, GameDlc):
_translate = QtCore.QCoreApplication.translate
self.installed_dlc_label.setText(_translate("GameDlc", "No Downloadable Content has been installed."))
GameDlc.setItemText(GameDlc.indexOf(self.installed_dlc_page), _translate("GameDlc", "Installed DLCs"))
self.available_dlc_label.setText(_translate("GameDlc", "No Downloadable Content is available"))
GameDlc.setItemText(GameDlc.indexOf(self.available_dlc_page), _translate("GameDlc", "Available DLCs"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
GameDlc = QtWidgets.QToolBox()
ui = Ui_GameDlc()
ui.METHOD_NAME(GameDlc)
GameDlc.show()
sys.exit(app.exec_()) |
4,077 | vcenter password | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'ListPrivateCloudAdminCredentialsResult',
'AwaitableListPrivateCloudAdminCredentialsResult',
'list_private_cloud_admin_credentials',
'list_private_cloud_admin_credentials_output',
]
@pulumi.output_type
class ListPrivateCloudAdminCredentialsResult:
"""
Administrative credentials for accessing vCenter and NSX-T
"""
def __init__(__self__, nsxt_password=None, nsxt_username=None, METHOD_NAME=None, vcenter_username=None):
if nsxt_password and not isinstance(nsxt_password, str):
raise TypeError("Expected argument 'nsxt_password' to be a str")
pulumi.set(__self__, "nsxt_password", nsxt_password)
if nsxt_username and not isinstance(nsxt_username, str):
raise TypeError("Expected argument 'nsxt_username' to be a str")
pulumi.set(__self__, "nsxt_username", nsxt_username)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'vcenter_password' to be a str")
pulumi.set(__self__, "vcenter_password", METHOD_NAME)
if vcenter_username and not isinstance(vcenter_username, str):
raise TypeError("Expected argument 'vcenter_username' to be a str")
pulumi.set(__self__, "vcenter_username", vcenter_username)
@property
@pulumi.getter(name="nsxtPassword")
def nsxt_password(self) -> str:
"""
NSX-T Manager password
"""
return pulumi.get(self, "nsxt_password")
@property
@pulumi.getter(name="nsxtUsername")
def nsxt_username(self) -> str:
"""
NSX-T Manager username
"""
return pulumi.get(self, "nsxt_username")
@property
@pulumi.getter(name="vcenterPassword")
def METHOD_NAME(self) -> str:
"""
vCenter admin password
"""
return pulumi.get(self, "vcenter_password")
@property
@pulumi.getter(name="vcenterUsername")
def vcenter_username(self) -> str:
"""
vCenter admin username
"""
return pulumi.get(self, "vcenter_username")
class AwaitableListPrivateCloudAdminCredentialsResult(ListPrivateCloudAdminCredentialsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListPrivateCloudAdminCredentialsResult(
nsxt_password=self.nsxt_password,
nsxt_username=self.nsxt_username,
METHOD_NAME=self.METHOD_NAME,
vcenter_username=self.vcenter_username)
def list_private_cloud_admin_credentials(private_cloud_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListPrivateCloudAdminCredentialsResult:
"""
Administrative credentials for accessing vCenter and NSX-T
:param str private_cloud_name: Name of the private cloud
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['privateCloudName'] = private_cloud_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:avs/v20230301:listPrivateCloudAdminCredentials', __args__, opts=opts, typ=ListPrivateCloudAdminCredentialsResult).value
return AwaitableListPrivateCloudAdminCredentialsResult(
nsxt_password=pulumi.get(__ret__, 'nsxt_password'),
nsxt_username=pulumi.get(__ret__, 'nsxt_username'),
METHOD_NAME=pulumi.get(__ret__, 'vcenter_password'),
vcenter_username=pulumi.get(__ret__, 'vcenter_username'))
@_utilities.lift_output_func(list_private_cloud_admin_credentials)
def list_private_cloud_admin_credentials_output(private_cloud_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListPrivateCloudAdminCredentialsResult]:
"""
Administrative credentials for accessing vCenter and NSX-T
:param str private_cloud_name: Name of the private cloud
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
... |
4,078 | get next | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
from urllib.parse import parse_qs, urljoin, urlparse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._private_end_point_connections_operations import build_list_by_factory_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateEndPointConnectionsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.datafactory.aio.DataFactoryManagementClient`'s
:attr:`private_end_point_connections` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_factory(
self, resource_group_name: str, factory_name: str, **kwargs: Any
) -> AsyncIterable["_models.PrivateEndpointConnectionResource"]:
"""Lists Private endpoint connections.
:param resource_group_name: The resource group name. Required.
:type resource_group_name: str
:param factory_name: The factory name. Required.
:type factory_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateEndpointConnectionResource or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.datafactory.models.PrivateEndpointConnectionResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.PrivateEndpointConnectionListResponse]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_factory_request(
resource_group_name=resource_group_name,
factory_name=factory_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_factory.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PrivateEndpointConnectionListResponse", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def METHOD_NAME(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(METHOD_NAME, extract_data)
list_by_factory.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/privateEndPointConnections"} # type: ignore |
4,079 | test rss nzedb parser | #!/usr/bin/python3 -OO
# Copyright 2007-2023 The SABnzbd-Team (sabnzbd.org)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
tests.test_misc - Testing functions in misc.py
"""
import datetime
import time
import configobj
import sabnzbd.rss as rss
import sabnzbd.config
class TestRSS:
@staticmethod
def setup_rss(feed_name, feed_url):
"""Setup the basic settings to get things going"""
# Setup the config settings
sabnzbd.config.CFG_OBJ = configobj.ConfigObj()
sabnzbd.config.ConfigRSS(feed_name, {"uri": feed_url})
# Need to create the Default category
# Otherwise it will try to save the config
sabnzbd.config.ConfigCat("*", {})
sabnzbd.config.ConfigCat("tv", {})
sabnzbd.config.ConfigCat("movies", {})
def test_rss_newznab_parser(self):
"""Test basic RSS-parsing of custom elements
Harder to test in functional test
"""
feed_name = "TestFeedNewznab"
self.setup_rss(feed_name, "https://sabnzbd.org/tests/rss_newznab_test.xml")
# Start the RSS reader
rss_obj = rss.RSSReader()
rss_obj.run_feed(feed_name)
# Is the feed processed?
assert feed_name in rss_obj.jobs
assert "https://cdn.nzbgeek.info/cdn?t=get&id=FakeKey&apikey=FakeKey" in rss_obj.jobs[feed_name]
# Check some job-data
job_data = rss_obj.jobs[feed_name]["https://cdn.nzbgeek.info/cdn?t=get&id=FakeKey&apikey=FakeKey"]
assert job_data["title"] == "FakeShow.S04E03.720p.WEB.H264-Obfuscated"
assert job_data["infourl"] == "https://nzbgeek.info/geekseek.php?guid=FakeKey"
assert job_data["orgcat"] == "TV > HD"
assert job_data["cat"] == "tv"
assert job_data["episode"] == "3"
assert job_data["season"] == "4"
assert job_data["size"] == 1209464000
# feedparser returns UTC so SABnzbd converts to locale
# of the system, so now we have to return to UTC
adjusted_date = datetime.datetime(2018, 4, 13, 5, 46, 25) - datetime.timedelta(seconds=time.timezone)
assert job_data["age"] == adjusted_date
def METHOD_NAME(self):
feed_name = "TestFeednZEDb"
self.setup_rss(feed_name, "https://sabnzbd.org/tests/rss_nzedb_test.xml")
# Start the RSS reader
rss_obj = rss.RSSReader()
rss_obj.run_feed(feed_name)
# Is the feed processed?
assert feed_name in rss_obj.jobs
assert "https://nzbfinder.ws/getnzb/FakeKey.nzb&i=46181&r=FakeKey" in rss_obj.jobs[feed_name]
# Check some job-data
# Added fake season and episode to test file
job_data = rss_obj.jobs[feed_name]["https://nzbfinder.ws/getnzb/FakeKey.nzb&i=46181&r=FakeKey"]
assert job_data["title"] == "Movie.With.a.Dog.2018.720p.BluRay.x264-SPRiNTER"
assert job_data["infourl"] == "https://nzbfinder.ws/details/FakeKey"
assert job_data["orgcat"] == "Movies > HD"
assert job_data["cat"] == "movies"
assert job_data["episode"] == "720"
assert job_data["season"] == "2018"
assert job_data["size"] == 5164539914
# feedparser returns UTC so SABnzbd converts to locale
# of the system, so now we have to return to UTC
adjusted_date = datetime.datetime(2019, 3, 2, 17, 18, 7) - datetime.timedelta(seconds=time.timezone)
assert job_data["age"] == adjusted_date |
4,080 | test remove prefix | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RegexReplace op from string_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gen_string_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
@parameterized.parameters(
(gen_string_ops.regex_replace),
(gen_string_ops.static_regex_replace))
class RegexReplaceOpVariantsTest(test.TestCase, parameterized.TestCase):
@test_util.run_deprecated_v1
def testForwarding(self, op):
with self.cached_session():
# Generate an input that is uniquely consumed by the regex op.
# This exercises code paths which are optimized for this case
# (e.g., using forwarding).
inp = string_ops.substr(
constant_op.constant(["AbCdEfG",
"HiJkLmN"], dtypes.string),
pos=0,
len=5)
stripped = op(inp, "\\p{Ll}", ".").eval()
self.assertAllEqual([b"A.C.E", b"H.J.L"], stripped)
@test_util.run_deprecated_v1
def METHOD_NAME(self, op):
values = ["a:foo", "a:bar", "a:foo", "b:baz", "b:qux", "ca:b"]
with self.cached_session():
input_vector = constant_op.constant(values, dtypes.string)
stripped = op(input_vector, "^(a:|b:)", "", replace_global=False).eval()
self.assertAllEqual([b"foo", b"bar", b"foo", b"baz", b"qux", b"ca:b"],
stripped)
@test_util.run_deprecated_v1
def testRegexReplace(self, op):
values = ["aba\naba", "abcdabcde"]
with self.cached_session():
input_vector = constant_op.constant(values, dtypes.string)
stripped = op(input_vector, "a.*a", "(\\0)").eval()
self.assertAllEqual([b"(aba)\n(aba)", b"(abcda)bcde"], stripped)
@test_util.run_deprecated_v1
def testEmptyMatch(self, op):
values = ["abc", "1"]
with self.cached_session():
input_vector = constant_op.constant(values, dtypes.string)
stripped = op(input_vector, "", "x").eval()
self.assertAllEqual([b"xaxbxcx", b"x1x"], stripped)
@test_util.run_deprecated_v1
def testInvalidPattern(self, op):
values = ["abc", "1"]
with self.cached_session():
input_vector = constant_op.constant(values, dtypes.string)
invalid_pattern = "A["
replace = op(input_vector, invalid_pattern, "x")
with self.assertRaisesOpError("Invalid pattern"):
self.evaluate(replace)
@test_util.run_deprecated_v1
def testGlobal(self, op):
values = ["ababababab", "abcabcabc", ""]
with self.cached_session():
input_vector = constant_op.constant(values, dtypes.string)
stripped = op(input_vector, "ab", "abc", True).eval()
self.assertAllEqual([b"abcabcabcabcabc", b"abccabccabcc", b""], stripped)
def as_string(s):
return s
def as_tensor(s):
return constant_op.constant(s, dtypes.string)
class RegexReplaceTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters(
(as_string, as_tensor),
(as_tensor, as_string),
(as_tensor, as_tensor))
@test_util.run_deprecated_v1
def testRegexReplaceDelegation(self, pattern_fn, rewrite_fn):
with self.cached_session():
input_vector = constant_op.constant("foo", dtypes.string)
pattern = pattern_fn("[a-z]")
replace = rewrite_fn(".")
op = string_ops.regex_replace(input_vector, pattern, replace)
self.assertTrue(op.name.startswith("RegexReplace"))
@test_util.run_deprecated_v1
def testStaticRegexReplaceDelegation(self):
with self.cached_session():
input_vector = constant_op.constant("foo", dtypes.string)
pattern = "[a-z]"
replace = "."
op = string_ops.regex_replace(input_vector, pattern, replace)
self.assertTrue(op.name.startswith("StaticRegexReplace"))
if __name__ == "__main__":
test.main() |
4,081 | type | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetVaultResult',
'AwaitableGetVaultResult',
'get_vault',
'get_vault_output',
]
@pulumi.output_type
class GetVaultResult:
"""
Resource information, as returned by the resource provider.
"""
def __init__(__self__, etag=None, id=None, identity=None, location=None, name=None, properties=None, sku=None, system_data=None, tags=None, METHOD_NAME=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", METHOD_NAME)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Optional ETag.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id represents the complete path to the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.IdentityDataResponse']:
"""
Identity for the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name associated with the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.VaultPropertiesResponse':
"""
Properties of the vault.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
Identifies the unique system identifier for each Azure resource.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Resource type represents the complete path of the form Namespace/ResourceType/ResourceType/...
"""
return pulumi.get(self, "type")
class AwaitableGetVaultResult(GetVaultResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVaultResult(
etag=self.etag,
id=self.id,
identity=self.identity,
location=self.location,
name=self.name,
properties=self.properties,
sku=self.sku,
system_data=self.system_data,
tags=self.tags,
METHOD_NAME=self.METHOD_NAME)
def get_vault(resource_group_name: Optional[str] = None,
vault_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVaultResult:
"""
Get the Vault details.
Azure REST API version: 2023-04-01.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str vault_name: The name of the recovery services vault.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['vaultName'] = vault_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:recoveryservices:getVault', __args__, opts=opts, typ=GetVaultResult).value
return AwaitableGetVaultResult(
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
identity=pulumi.get(__ret__, 'identity'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
sku=pulumi.get(__ret__, 'sku'),
system_data=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
METHOD_NAME=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_vault)
def get_vault_output(resource_group_name: Optional[pulumi.Input[str]] = None,
vault_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetVaultResult]:
"""
Get the Vault details.
Azure REST API version: 2023-04-01.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str vault_name: The name of the recovery services vault.
"""
... |
4,082 | show version | import sudo
import errno
import sys
import os
import pwd
import grp
import shutil
VERSION = 1.0
class SudoPolicyPlugin(sudo.Plugin):
"""Example sudo policy plugin
Demonstrates how to use the sudo policy plugin API. All functions are added
as an example on their syntax, but note that most of them are optional
(except check_policy).
On detailed description of the functions refer to sudo_plugin manual (man
sudo_plugin).
Most functions can express error or reject through their "int" return value
as documented in the manual. The sudo module also has constants for these:
sudo.RC.ACCEPT / sudo.RC.OK 1
sudo.RC.REJECT 0
sudo.RC.ERROR -1
sudo.RC.USAGE_ERROR -2
If the plugin encounters an error, instead of just returning sudo.RC.ERROR
result code it can also add a message describing the problem.
This can be done by raising the special exception:
raise sudo.PluginError("Message")
This added message will be used by the audit plugins.
If the function returns "None" (for example does not call return), it will
be considered sudo.RC.OK. If an exception other than sudo.PluginError is
raised, its backtrace will be shown to the user and the plugin function
returns sudo.RC.ERROR. If that is not acceptable, catch it.
"""
_allowed_commands = ("id", "whoami")
_safe_password = "12345"
# -- Plugin API functions --
def __init__(self, user_env: tuple, settings: tuple,
version: str, **kwargs):
"""The constructor matches the C sudo plugin API open() call
Other variables you can currently use as arguments are:
user_info: tuple
plugin_options: tuple
For their detailed description, see the open() call of the C plugin API
in the sudo manual ("man sudo").
"""
if not version.startswith("1."):
raise sudo.PluginError(
"This plugin plugin is not compatible with python plugin"
"API version {}".format(version))
self.user_env = sudo.options_as_dict(user_env)
self.settings = sudo.options_as_dict(settings)
def check_policy(self, argv: tuple, env_add: tuple):
cmd = argv[0]
# Example for a simple reject:
if not self._is_command_allowed(cmd):
sudo.log_error("You are not allowed to run this command!")
return sudo.RC.REJECT
raise sudo.PluginError("You are not allowed to run this command!")
# The environment the command will be executed with (we allow any here)
user_env_out = sudo.options_from_dict(self.user_env) + env_add
command_info_out = sudo.options_from_dict({
"command": self._find_on_path(cmd), # Absolute path of command
"runas_uid": self._runas_uid(), # The user id
"runas_gid": self._runas_gid(), # The group id
})
return (sudo.RC.ACCEPT, command_info_out, argv, user_env_out)
def init_session(self, user_pwd: tuple, user_env: tuple):
"""Perform session setup
Beware that user_pwd can be None if user is not present in the password
database. Otherwise it is a tuple convertible to pwd.struct_passwd.
"""
# conversion example:
user_pwd = pwd.struct_passwd(user_pwd) if user_pwd else None
# This is how you change the user_env:
return (sudo.RC.OK, user_env + ("PLUGIN_EXAMPLE_ENV=1",))
# If you do not want to change user_env, you can just return (or None):
# return sudo.RC.OK
def list(self, argv: tuple, is_verbose: int, user: str):
cmd = argv[0] if argv else None
as_user_text = "as user '{}'".format(user) if user else ""
if cmd:
allowed_text = "" if self._is_command_allowed(cmd) else "NOT "
sudo.log_info("You are {}allowed to execute command '{}'{}"
.format(allowed_text, cmd, as_user_text))
if not cmd or is_verbose:
sudo.log_info("Only the following commands are allowed:",
", ".join(self._allowed_commands), as_user_text)
def validate(self):
pass # we have no cache
def invalidate(self, remove: int):
pass # we have no cache
def METHOD_NAME(self, is_verbose: int):
sudo.log_info("Python Example Policy Plugin "
"version: {}".format(VERSION))
if is_verbose:
sudo.log_info("Python interpreter version:", sys.version)
def close(self, exit_status: int, error: int) -> None:
if error == 0:
sudo.log_info("The command returned with exit_status {}".format(
exit_status))
else:
error_name = errno.errorcode.get(error, "???")
sudo.log_error(
"Failed to execute command, execve syscall returned "
"{} ({})".format(error, error_name))
# -- Helper functions --
def _is_command_allowed(self, cmd):
return os.path.basename(cmd) in self._allowed_commands
def _find_on_path(self, cmd):
if os.path.isabs(cmd):
return cmd
path = self.user_env.get("PATH", "/usr/bin:/bin")
absolute_cmd = shutil.which(cmd, path=path)
if not absolute_cmd:
raise sudo.PluginError("Can not find cmd '{}' on PATH".format(cmd))
return absolute_cmd
def _runas_pwd(self):
runas_user = self.settings.get("runas_user") or "root"
try:
return pwd.getpwnam(runas_user)
except KeyError:
raise sudo.PluginError("Could not find user "
"'{}'".format(runas_user))
def _runas_uid(self):
return self._runas_pwd().pw_uid
def _runas_gid(self):
runas_group = self.settings.get("runas_group")
if runas_group is None:
return self._runas_pwd().pw_gid
try:
return grp.getgrnam(runas_group).gr_gid
except KeyError:
raise sudo.PluginError(
"Could not find group '{}'".format(runas_group)) |
4,083 | run max procs | #
# See top-level LICENSE.rst file for Copyright information
#
# -*- coding: utf-8 -*-
"""
desispec.pipeline.tasks.redshift
================================
"""
from __future__ import absolute_import, division, print_function
import numpy as np
from .base import BaseTask, task_classes, task_type
from ...io import findfile
from ...util import option_list
from redrock.external.desi import rrdesi
from desiutil.log import get_logger
import os
# NOTE: only one class in this file should have a name that starts with "Task".
class TaskRedshift(BaseTask):
"""Class containing the properties of one spectra task.
"""
def __init__(self):
super(TaskRedshift, self).__init__()
# then put int the specifics of this class
# _cols must have a state
self._type = "redshift"
self._cols = [
"nside",
"pixel",
"state"
]
self._coltypes = [
"integer",
"integer",
"integer"
]
# _name_fields must also be in _cols
self._name_fields = ["nside","pixel"]
self._name_formats = ["d","d"]
def _paths(self, name):
"""See BaseTask.paths.
"""
props = self.name_split(name)
hpix = props["pixel"]
nside = props["nside"]
redrock = findfile("redrock", groupname=hpix, nside=nside)
rrdetails = findfile("rrdetails", groupname=hpix, nside=nside)
return [redrock, rrdetails]
def _deps(self, name, db, inputs):
"""See BaseTask.deps.
"""
props = self.name_split(name)
deptasks = {
"infile" : task_classes["spectra"].name_join(props)
}
return deptasks
def METHOD_NAME(self):
# Redshifts can run on any number of procs.
return 0
def _run_time(self, name, procs, db):
# Run time on one task on machine with scale factor == 1.0.
# This should depend on the total number of unique targets, which is
# not known a priori. Instead, we compute the total targets and reduce
# this by some factor.
if db is not None:
props = self.name_split(name)
entries = db.select_healpix_frame(
{"pixel":props["pixel"],
"nside":props["nside"]}
)
ntarget = np.sum([x["ntargets"] for x in entries])
neff = 0.3 * ntarget
# 2.5 seconds per targets
tm = 1 + 2.5 * 0.0167 * neff
else:
tm = 60
return tm
def _run_max_mem_proc(self, name, db):
# Per-process memory requirements. This is determined by the largest
# Spectra file that must be read and broadcast. We compute that size
# assuming no coadd and using the total number of targets falling in
# our pixel.
mem = 0.0
if db is not None:
props = self.name_split(name)
entries = db.select_healpix_frame(
{"pixel":props["pixel"],
"nside":props["nside"]}
)
ntarget = np.sum([x["ntargets"] for x in entries])
# DB entry is for one exposure and spectrograph.
mem = 0.2 + 0.0002 * 3 * ntarget
return mem
def _run_max_mem_task(self, name, db):
# This returns the total aggregate memory needed for the task,
# which should be based on the larger of:
# 1) the total number of unique (coadded) targets.
# 2) the largest spectra file times the number of processes
# Since it is not easy to calculate (1), and the constraint for (2)
# is already encapsulated in the per-process memory requirements,
# we return zero here. This effectively selects one node.
mem = 0.0
return mem
def _run_defaults(self):
"""See BaseTask.run_defaults.
"""
return {'no-mpi-abort': True}
def _option_list(self, name, opts):
"""Build the full list of options.
This includes appending the filenames and incorporating runtime
options.
"""
redrockfile, rrdetailsfile = self.paths(name)
outdir = os.path.dirname(redrockfile)
options = {}
options["details"] = rrdetailsfile
options["outfile"] = redrockfile
options.update(opts)
optarray = option_list(options)
deps = self.deps(name)
specfile = task_classes["spectra"].paths(deps["infile"])[0]
optarray.append(specfile)
return optarray
def _run_cli(self, name, opts, procs, db):
"""See BaseTask.run_cli.
"""
entry = "rrdesi_mpi"
optlist = self._option_list(name, opts)
return "{} {}".format(entry, " ".join(optlist))
def _run(self, name, opts, comm, db):
"""See BaseTask.run.
"""
optlist = self._option_list(name, opts)
rrdesi(options=optlist, comm=comm)
return
def run_and_update(self, db, name, opts, comm=None):
"""Run the task and update DB state.
The state of the task is marked as "done" if the command completes
without raising an exception and if the output files exist.
It is specific for redshift because the healpix_frame table has to be updated
Args:
db (pipeline.db.DB): The database.
name (str): the name of this task.
opts (dict): options to use for this task.
comm (mpi4py.MPI.Comm): optional MPI communicator.
Returns:
int: the number of processes that failed.
"""
nproc = 1
rank = 0
if comm is not None:
nproc = comm.size
rank = comm.rank
failed = self.run(name, opts, comm=comm, db=db)
if rank == 0:
if failed > 0:
self.state_set(db, name, "failed")
else:
outputs = self.paths(name)
done = True
for out in outputs:
if not os.path.isfile(out):
done = False
failed = nproc
break
if done:
props=self.name_split(name)
props["state"]=2 # selection, only those for which we had already updated the spectra
with db.cursor() as cur :
self.state_set(db, name, "done",cur=cur)
db.update_healpix_frame_state(props,state=3,cur=cur) # 3=redshifts have been updated
else:
self.state_set(db, name, "failed")
return failed |
4,084 | get duplicates | # add the root of the project into the path to allow cd-ing into this folder and running the script.
from os.path import abspath
from sys import path
path.insert(0, abspath(__file__).rsplit('/', 2)[0])
from typing import List
from collections import Counter
from constants.data_stream_constants import CHUNKABLE_FILES
from database.data_access_models import ChunkRegistry, FileToProcess
print("""
This script can take quite a while to run, it depends on the size of the ChunkRegistry database table.
It is STRONGLY recommended that you disable data processing before executing this script.
(ssh and run processing-stop, run processing-start when the script has finished.)
Finding duplicate file chunks. This needs to be done in a memory-safe, so it could take several minutes.
When the initial database query operation finishes you will start seeing additional output.
This script is incremental. You can stop it at any time and restart it later.
DO NOT RUN MULTIPLE INSTANCES OF THIS SCRIPT AT THE SAME TIME.
""")
from django.db.migrations.executor import MigrationExecutor
from django.db import connections, DEFAULT_DB_ALIAS
connection = connections[DEFAULT_DB_ALIAS]
connection.prepare_database()
applied_migrations = set(migration_name for _, migration_name in MigrationExecutor(connection).loader.applied_migrations)
if "0025_auto_20200106_2153.py" in applied_migrations:
print("\nYour chunk paths are already unique.\n")
exit(0)
DEBUG = False
if DEBUG:
print("\nRUNNING IN DEBUG MODE, NO DESTRUCTIVE ACTIONS WILL BE TAKEN.\n")
def run():
duplicate_chunks, duplicate_media = METHOD_NAME()
for media_file in duplicate_media:
remove_all_but_one_chunk(media_file)
fix_duplicates(duplicate_chunks)
def METHOD_NAME() -> (List[str], List[str]):
print("Getting duplicates...")
# counter = Counter(ChunkRegistry.objects.values_list("chunk_path", flat=True).iterator()).most_common()
counter = Counter(ChunkRegistry.objects.values_list("chunk_path", flat=True)).most_common()
duplicate_chunks = []
duplicate_media = []
for chunk_path, count in counter:
if count == 1:
break
if chunk_path.endswith(".csv"):
duplicate_chunks.append(chunk_path)
else:
duplicate_media.append(chunk_path)
print(f"Discovered {len(duplicate_media)} duplicate media file(s) and {len(duplicate_chunks)} duplicate chunked file(s).")
return duplicate_chunks, duplicate_media
def fix_duplicates(duplicate_chunks):
for path in duplicate_chunks:
# deconstruct relevant information from chunk path, clean it
path_components = path.split("/")
if len(path_components) == 5:
_, study_obj_id, username, data_stream, timestamp = path.split("/")
elif len(path_components) == 4:
study_obj_id, username, data_stream, timestamp = path.split("/")
else:
print("You appear to have an invalid file path. Please report this error to https://github.com/onnela-lab/beiwe-backend/issues")
raise Exception("invalid_path: %s" % path)
# not all files are chunkable, they will require different logic.
if data_stream not in CHUNKABLE_FILES:
remove_all_but_one_chunk(path)
continue
else:
try:
FileToProcess.reprocess_originals_from_chunk_path(path)
except Exception as e:
if "did not find any matching files" in str(e):
pass
else:
raise
remove_all_but_one_chunk(path)
def remove_all_but_one_chunk(chunk_path: str):
chunk_ids = list(ChunkRegistry.objects.filter(chunk_path=chunk_path).values_list("id", flat=True))
if len(chunk_ids) in [0, 1]:
raise Exception("This s not possible, are you running multiple instances of this script?")
# the lowest oldest id is probably the oldest. It is ~most likely that users who have downloaded
# data have newer chunk hash values, so we will ensure the
chunk_ids.sort()
remaining_id = chunk_ids.pop()
print(remaining_id)
print(f"Deleting {len(chunk_ids)} duplicate instance(s) for {chunk_path}.")
if not DEBUG:
ChunkRegistry.objects.filter(id__in=chunk_ids).delete()
if __name__ == "__main__":
run() |
4,085 | test file props mkv | import numpy as np
import pytest
from utils import (
DEFAULT_FOURCC,
MJPEG_FOURCC,
MJPEG_FPS,
MJPEG_LEN,
MJPEG_SIZE,
MKV_FOURCC,
MKV_FPS,
MKV_LEN,
MKV_SIZE,
MP4_FOURCC,
MP4_FPS,
MP4_LEN,
MP4_SIZE,
TEST_FPS,
TEST_HEIGHT,
TEST_NUM_COLOR_FRAMES,
TEST_WIDTH,
assert_images_equal,
)
from kedro.extras.datasets.video.video_dataset import (
FileVideo,
GeneratorVideo,
SequenceVideo,
)
class TestSequenceVideo:
def test_sequence_video_indexing_first(self, color_video, red_frame):
"""Test indexing a SequenceVideo"""
red = np.array(color_video[0])
assert red.shape == (TEST_HEIGHT, TEST_WIDTH, 3)
assert np.all(red == red_frame)
def test_sequence_video_indexing_last(self, color_video, purple_frame):
"""Test indexing a SequenceVideo"""
purple = np.array(color_video[-1])
assert purple.shape == (TEST_HEIGHT, TEST_WIDTH, 3)
assert np.all(purple == purple_frame)
def test_sequence_video_iterable(self, color_video):
"""Test iterating a SequenceVideo"""
for i, img in enumerate(map(np.array, color_video)):
assert np.all(img == np.array(color_video[i]))
assert i == TEST_NUM_COLOR_FRAMES - 1
def test_sequence_video_fps(self, color_video):
# Test the one set by the fixture
assert color_video.fps == TEST_FPS
# Test creating with another fps
test_fps_new = 123
color_video_new = SequenceVideo(color_video._frames, fps=test_fps_new)
assert color_video_new.fps == test_fps_new
def test_sequence_video_len(self, color_video):
assert len(color_video) == TEST_NUM_COLOR_FRAMES
def test_sequence_video_size(self, color_video):
assert color_video.size == (TEST_WIDTH, TEST_HEIGHT)
def test_sequence_video_fourcc_default_value(self, color_video):
assert color_video.fourcc == DEFAULT_FOURCC
def test_sequence_video_fourcc(self, color_video):
fourcc_new = "mjpg"
assert (
DEFAULT_FOURCC != fourcc_new
), "Test does not work if new test value is same as default"
color_video_new = SequenceVideo(
color_video._frames, fps=TEST_FPS, fourcc=fourcc_new
)
assert color_video_new.fourcc == fourcc_new
class TestGeneratorVideo:
def test_generator_video_iterable(self, color_video_generator, color_video):
"""Test iterating a GeneratorVideo
The content of the mock GeneratorVideo should be the same as the SequenceVideo,
the content in the later is tested in other unit tests and can thus be trusted
"""
for i, img in enumerate(map(np.array, color_video_generator)):
assert np.all(img == np.array(color_video[i]))
assert i == TEST_NUM_COLOR_FRAMES - 1
def test_generator_video_fps(self, color_video_generator):
# Test the one set by the fixture
assert color_video_generator.fps == TEST_FPS
# Test creating with another fps
test_fps_new = 123
color_video_new = GeneratorVideo(
color_video_generator._gen, length=TEST_NUM_COLOR_FRAMES, fps=test_fps_new
)
assert color_video_new.fps == test_fps_new
def test_generator_video_len(self, color_video_generator):
assert len(color_video_generator) == TEST_NUM_COLOR_FRAMES
def test_generator_video_size(self, color_video_generator):
assert color_video_generator.size == (TEST_WIDTH, TEST_HEIGHT)
def test_generator_video_fourcc_default_value(self, color_video_generator):
assert color_video_generator.fourcc == DEFAULT_FOURCC
def test_generator_video_fourcc(self, color_video_generator):
fourcc_new = "mjpg"
assert (
DEFAULT_FOURCC != fourcc_new
), "Test does not work if new test value is same as default"
color_video_new = GeneratorVideo(
color_video_generator._gen,
length=TEST_NUM_COLOR_FRAMES,
fps=TEST_FPS,
fourcc=fourcc_new,
)
assert color_video_new.fourcc == fourcc_new
class TestFileVideo:
@pytest.mark.skip(reason="Can't deal with videos with missing time info")
def test_file_props_mjpeg(self, mjpeg_object):
assert mjpeg_object.fourcc == MJPEG_FOURCC
assert mjpeg_object.fps == MJPEG_FPS
assert mjpeg_object.size == MJPEG_SIZE
assert len(mjpeg_object) == MJPEG_LEN
def METHOD_NAME(self, mkv_object):
assert mkv_object.fourcc == MKV_FOURCC
assert mkv_object.fps == MKV_FPS
assert mkv_object.size == MKV_SIZE
assert len(mkv_object) == MKV_LEN
def test_file_props_mp4(self, mp4_object):
assert mp4_object.fourcc == MP4_FOURCC
assert mp4_object.fps == MP4_FPS
assert mp4_object.size == MP4_SIZE
assert len(mp4_object) == MP4_LEN
def test_file_index_first(self, color_video_object, red_frame):
assert_images_equal(color_video_object[0], red_frame)
def test_file_index_last_by_index(self, color_video_object, purple_frame):
assert_images_equal(color_video_object[TEST_NUM_COLOR_FRAMES - 1], purple_frame)
def test_file_index_last(self, color_video_object, purple_frame):
assert_images_equal(color_video_object[-1], purple_frame)
def test_file_video_failed_capture(self, mocker):
"""Validate good behavior on failed decode
The best behavior in this case is not obvious, the len property of the
video object specifies more frames than is actually possible to decode. We
cannot know this in advance without spending loads of time to decode all frames
in order to count them."""
mock_cv2 = mocker.patch("kedro.extras.datasets.video.video_dataset.cv2")
mock_cap = mock_cv2.VideoCapture.return_value = mocker.Mock()
mock_cap.get.return_value = 2 # Set the length of the video
ds = FileVideo("/a/b/c")
mock_cap.read.return_value = True, np.zeros((1, 1))
assert ds[0]
mock_cap.read.return_value = False, None
with pytest.raises(IndexError):
ds[1] |
4,086 | import note | from django.core.management.base import BaseCommand
from django.db import transaction
from django.core.files import File
from optparse import make_option
import csv
import datetime
import os.path
import mimetypes
from advisornotes.models import AdvisorNote
from coredata.models import Person, Unit
from coredata.queries import add_person
from courselib.text import normalize_newlines
class Command(BaseCommand):
help = 'Import CSV advising data from CRIM.'
args = '<unit_slug> <advisor_userid> <csv_data> <file_base>'
option_list = BaseCommand.option_list + (
make_option('-n', '--dry-run',
action='store_true',
dest='dry_run',
default=False,
help='Don\'t actually modify anything.'),
)
def get_filepath(self, fm_filename):
if not fm_filename:
return None
filename = os.path.split(fm_filename)[1]
filepath = os.path.join(self.file_base, filename)
if os.path.isfile(filepath):
return filepath
else:
if self.verbosity > 0:
print("Missing file %s." % (filename,))
def get_advisornote(self, key, person, advisor, created, delete_old_file=False, offset=0):
"""
get_or_create for this usage
"""
created = created + datetime.timedelta(minutes=offset)
# look for previously-imported version of this note, so we're roughly idempotent
oldnotes = AdvisorNote.objects.filter(student=person, advisor=advisor, created_at=created, unit=self.unit)
oldnotes = [n for n in oldnotes if 'import_key' in n.config and n.config['import_key'] == key]
if oldnotes:
note = oldnotes[0]
if delete_old_file and note.file_attachment and os.path.isfile(note.file_attachment.path):
# let file be recreated below
os.remove(note.file_attachment.path)
note.file_attachment = None
note.file_mediatype = None
else:
note = AdvisorNote(student=person, advisor=advisor, created_at=created, unit=self.unit)
note.config['import_key'] = key
note.config['src'] = 'crim_import'
return note, bool(oldnotes)
def attach_file(self, note, filepath):
"""
Use this filepath as the attachment for this note.
"""
with File(open(filepath, 'rb')) as fh:
base = os.path.split(filepath)[1]
if self.commit:
note.file_attachment.save(base, fh)
mediatype = mimetypes.guess_type(filepath)[0]
note.file_mediatype = mediatype
def METHOD_NAME(self, advisor, fn, i, row):
emplid = row['Student ID']
date_str = row['Date Modified']
notes = normalize_newlines(row['Notes'])
files = [
row.get('Transcript', None),
row.get('Files', None),
row.get('Files2', None),
row.get('Files3', None),
]
files = list(map(self.get_filepath, files))
files = list(filter(bool, files))
# fix mis-typed emplids we found
# Lindsay
if emplid == '960022098':
emplid = '963022098'
elif emplid == '30108409':
emplid = '301078409'
elif emplid == '30115964':
emplid = '301115964'
elif emplid == '30117882':
emplid = '301178882'
# Michael Sean
elif emplid == '30105659':
emplid = '301040985' # ?
# Dijana
elif emplid == '30120965':
emplid = '301202965'
if not emplid or emplid == '0':
if self.verbosity > 0:
print('No emplid on row %i' % (i+2))
return
p = add_person(emplid, commit=self.commit)
if not p:
if self.verbosity > 0:
print("Can't find person on row %i (emplid %s)" % (i+2, emplid))
return
if self.verbosity > 1:
print("Importing %s with %i file(s)." % (emplid, len(files)))
try:
date = datetime.datetime.strptime(date_str, '%m-%d-%Y').date()
except ValueError:
date = datetime.datetime.strptime(date_str, '%Y-%m-%d').date()
created = datetime.datetime.combine(date, datetime.time(hour=12, minute=0))
key = '%s-%i' % (fn, i)
note, _ = self.get_advisornote(key, p, advisor, created, delete_old_file=self.commit)
if files:
path = files[0]
self.attach_file(note, path)
for j, path in enumerate(files[1:]):
# these get stashed in accompanying notes
k = key + '-auxfile-' + str(i)
n, _ = self.get_advisornote(k, p, advisor, created, delete_old_file=self.commit, offset=(j+1))
n.text = '[Additional file for previous note.]'
self.attach_file(n, path)
if self.commit:
n.save()
note.text = notes
if self.commit:
note.save()
def import_notes(self, unit_slug, advisor_userid, inputfile, file_base):
self.unit = Unit.objects.get(slug=unit_slug)
self.file_base = file_base
advisor = Person.objects.get(userid=advisor_userid)
with open(inputfile, 'rb') as fh:
data = csv.DictReader(fh)
fn = os.path.split(inputfile)[1]
for i, row in enumerate(data):
with transaction.atomic():
self.METHOD_NAME(advisor, fn, i, row)
def handle(self, *args, **options):
self.verbosity = int(options['verbosity'])
self.commit = not options['dry_run']
self.import_notes(args[0], args[1], args[2], args[3] |
4,087 | do get | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Tests for proxy app.
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import str
import threading
import logging
import http.server
import sys
from nose.tools import assert_true, assert_false
from django.test.client import Client
from desktop.lib.django_test_util import make_logged_in_client
from proxy.views import _rewrite_links
import proxy.conf
if sys.version_info[0] > 2:
from io import StringIO as string_io
else:
from StringIO import StringIO as string_io
class Handler(http.server.BaseHTTPRequestHandler):
"""
To avoid mocking out urllib, we setup a web server
that does very little, and test proxying against it.
"""
def METHOD_NAME(self):
self.send_response(200)
self.send_header("Content-type", "text/html; charset=utf8")
self.end_headers()
self.wfile.write(b"Hello there.")
path = self.path
if not isinstance(path, bytes):
path = path.encode('utf-8')
self.wfile.write(b"You requested: " + path + b".")
self.wfile.write(b"Image: <img src='/foo.jpg'>")
self.wfile.write(b"Link: <a href='/baz?with=parameter'>link</a>")
def do_POST(self):
self.send_response(200)
self.send_header("Content-type", "text/html; charset=utf8")
self.end_headers()
self.wfile.write(b"Hello there.")
path = self.path
if not isinstance(path, bytes):
path = path.encode('utf-8')
self.wfile.write(b"You requested: " + path + b".")
# Somehow in this architecture read() blocks, so we read the exact
# number of bytes the test sends.
self.wfile.write(b"Data: " + self.rfile.read(16))
def log_message(self, fmt, *args):
logging.debug("%s - - [%s] %s" %
(self.address_string(),
self.log_date_time_string(),
fmt % args))
def run_test_server():
"""
Returns the server, and a method to close it out.
"""
# We need to proxy a server, so we go ahead and create one.
httpd = http.server.HTTPServer(("127.0.0.1", 0), Handler)
# Spawn a thread that serves exactly one request.
thread = threading.Thread(target=httpd.handle_request)
thread.daemon = True
thread.start()
def finish():
# Make sure the server thread is done.
print("Closing thread " + str(thread))
thread.join(10.0) # Wait at most 10 seconds
assert_false(thread.is_alive())
return httpd, finish
run_test_server.__test__ = False
def test_proxy_get():
"""
Proxying test.
"""
# All apps require login.
client = make_logged_in_client(username="test", is_superuser=True)
httpd, finish = run_test_server()
try:
# Test the proxying
finish_conf = proxy.conf.WHITELIST.set_for_testing(r"127\.0\.0\.1:\d*")
try:
response_get = client.get('/proxy/127.0.0.1/%s/' % httpd.server_port, dict(foo="bar"))
finally:
finish_conf()
assert_true(b"Hello there" in response_get.content)
assert_true(b"You requested: /?foo=bar." in response_get.content)
proxy_url = "/proxy/127.0.0.1/%s/foo.jpg" % httpd.server_port
if not isinstance(proxy_url, bytes):
proxy_url = proxy_url.encode('utf-8')
assert_true(proxy_url in response_get.content)
proxy_url = "/proxy/127.0.0.1/%s/baz?with=parameter" % httpd.server_port
if not isinstance(proxy_url, bytes):
proxy_url = proxy_url.encode('utf-8')
assert_true(proxy_url in response_get.content)
finally:
finish()
def test_proxy_post():
"""
Proxying test, using POST.
"""
client = make_logged_in_client(username="test", is_superuser=True)
httpd, finish = run_test_server()
try:
# Test the proxying
finish_conf = proxy.conf.WHITELIST.set_for_testing(r"127\.0\.0\.1:\d*")
try:
response_post = client.post('/proxy/127.0.0.1/%s/' % httpd.server_port, dict(foo="bar", foo2="bar"))
finally:
finish_conf()
assert_true(b"Hello there" in response_post.content)
assert_true(b"You requested: /." in response_post.content)
assert_true(b"foo=bar" in response_post.content)
assert_true(b"foo2=bar" in response_post.content)
finally:
finish()
def test_blacklist():
client = make_logged_in_client('test')
finish_confs = [
proxy.conf.WHITELIST.set_for_testing(r"localhost:\d*"),
proxy.conf.BLACKLIST.set_for_testing(r"localhost:\d*/(foo|bar)/fred/"),
]
try:
# Request 1: Hit the blacklist
resp = client.get('/proxy/localhost/1234//foo//fred/')
assert_true(b"is blocked" in resp.content)
# Request 2: This is not a match
httpd, finish = run_test_server()
try:
resp = client.get('/proxy/localhost/%s//foo//fred_ok' % (httpd.server_port,))
assert_true(b"Hello there" in resp.content)
finally:
finish()
finally:
for fin in finish_confs:
fin()
class UrlLibFileWrapper(string_io):
"""
urllib2.urlopen returns a file-like object; we fake it here.
"""
def __init__(self, buf, url):
string_io.__init__(self, buf)
self.url = url
def geturl(self):
"""URL we were initialized with."""
return self.url
def test_rewriting():
"""
Tests that simple re-writing is working.
"""
html = "<a href='foo'>bar</a><a href='http://alpha.com'>baz</a>"
assert_true(b'<a href="/proxy/abc.com/80/sub/foo">bar</a>' in _rewrite_links(UrlLibFileWrapper(html, "http://abc.com/sub/")),
msg="Relative links")
assert_true(b'<a href="/proxy/alpha.com/80/">baz</a>' in _rewrite_links(UrlLibFileWrapper(html, "http://abc.com/sub/")),
msg="Absolute links")
# Test url with port and invalid port
html = "<a href='http://alpha.com:1234/bar'>bar</a><a href='http://alpha.com:-1/baz'>baz</a>"
assert_true(b'<a href="/proxy/alpha.com/1234/bar">bar</a><a>baz</a>' in
_rewrite_links(UrlLibFileWrapper(html, "http://abc.com/sub/")),
msg="URL with invalid port")
html = """
<img src="/static/hadoop-logo.jpg"/><br>
"""
rewritten = _rewrite_links(UrlLibFileWrapper(html, "http://abc.com/sub/"))
assert_true(b'<img src="/proxy/abc.com/80/static/hadoop-logo.jpg">' in
rewritten,
msg="Rewrite images") |
4,088 | serialize | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides a RegionsRegistry class.
"""
from astropy.table import Table
__all__ = []
class IORegistryError(Exception):
"""Exception class for various registry errors."""
class RegionsRegistry:
"""
Class to hold a registry to read, write, parse, and serialize regions
in various formats.
"""
registry = {}
@classmethod
def register(cls, classobj, methodname, filetype):
def inner_wrapper(wrapped_func):
key = (classobj, methodname, filetype)
if key in cls.registry:
raise ValueError(f'{methodname} for {filetype} is already '
f'registered for {classobj.__name__}')
cls.registry[key] = wrapped_func
return wrapped_func
return inner_wrapper
@classmethod
def get_identifiers(cls, classobj):
return [key for key in cls.registry
if key[0] == classobj and key[1] == 'identify']
@classmethod
def _no_format_error(cls, classobj):
msg = ('Format could not be identified based on the file name or '
'contents, please provide a "format" argument.'
f'\n{cls._get_format_table_str(classobj)}')
raise IORegistryError(msg)
@classmethod
def identify_format(cls, filename, classobj, methodname):
format = None
identifiers = cls.get_identifiers(classobj)
if identifiers:
for identifier in identifiers:
if cls.registry[identifier](methodname, filename):
format = identifier[2]
break # finds the first valid filetype
if format is None:
cls._no_format_error(classobj)
return format
@classmethod
def read(cls, filename, classobj, format=None, **kwargs):
"""
Read in a regions file.
"""
if format is None:
format = cls.identify_format(filename, classobj, 'read')
key = (classobj, 'read', format)
try:
reader = cls.registry[key]
except KeyError:
msg = (f'No reader defined for format "{format}" and class '
f'"{classobj.__name__}".\n'
f'{cls._get_format_table_str(classobj)}')
raise IORegistryError(msg) from None
return reader(filename, **kwargs)
@classmethod
def parse(cls, data, classobj, format=None, **kwargs):
"""
Parse a regions string or table.
"""
if format is None:
cls._no_format_error(classobj)
key = (classobj, 'parse', format)
try:
parser = cls.registry[key]
except KeyError:
msg = (f'No parser defined for format "{format}" and class '
f'"{classobj.__name__}".\n'
f'{cls._get_format_table_str(classobj)}')
raise IORegistryError(msg) from None
return parser(data, **kwargs)
@classmethod
def write(cls, regions, filename, classobj, format=None, **kwargs):
"""
Write to a regions file.
"""
if format is None:
format = cls.identify_format(filename, classobj, 'write')
key = (classobj, 'write', format)
try:
writer = cls.registry[key]
except KeyError:
msg = (f'No writer defined for format "{format}" and class '
f'"{classobj.__name__}".\n'
f'{cls._get_format_table_str(classobj)}')
raise IORegistryError(msg) from None
return writer(regions, filename, **kwargs)
@classmethod
def METHOD_NAME(cls, regions, classobj, format=None, **kwargs):
"""
Serialize to a regions string or table.
"""
if format is None:
cls._no_format_error(classobj)
key = (classobj, 'serialize', format)
try:
serializer = cls.registry[key]
except KeyError:
msg = (f'No serializer defined for format "{format}" and class '
f'"{classobj.__name__}".\n'
f'{cls._get_format_table_str(classobj)}')
raise IORegistryError(msg) from None
return serializer(regions, **kwargs)
@classmethod
def get_formats(cls, classobj):
"""
Get the registered I/O formats as a Table.
"""
filetypes = list({key[2] for key in cls.registry
if key[0] == classobj})
rows = [['Format', 'Parse', 'Serialize', 'Read', 'Write',
'Auto-identify']]
for filetype in sorted(filetypes):
keys = {key[1] for key in cls.registry
if key[0] == classobj and key[2] == filetype}
row = [filetype]
for methodname in rows[0][1:]:
name = ('identify' if 'identify' in methodname
else methodname.lower())
row.append('Yes' if name in keys else 'No')
rows.append(row)
if len(rows) == 1:
return Table()
cols = list(zip(*rows))
tbl = Table()
for col in cols:
tbl[col[0]] = col[1:]
return tbl
@classmethod
def _get_format_table_str(cls, classobj):
lines = ['', f'The available formats for the {classobj.__name__} '
'class are:', '']
tbl = cls.get_formats(classobj)
lines.extend(tbl.pformat(max_lines=-1, max_width=80))
return '\n'.join(lines)
def _update_docstring(classobj, methodname):
"""
Update the docstring to include a table of all available registered
formats and methods.
"""
import re
if methodname == 'identify':
return
lines = getattr(classobj, methodname).__doc__.splitlines()
matches = [re.search(r'(\S)', line) for line in lines[1:]]
left_indent = ' ' * min(match.start() for match in matches if match)
new_lines = RegionsRegistry._get_format_table_str(classobj).splitlines()
lines.extend([left_indent + line for line in new_lines])
try:
# classmethod
getattr(classobj, methodname).__func__.__doc__ = '\n'.join(lines)
except AttributeError:
# instancemethod
getattr(classobj, methodname).__doc__ = '\n'.join(lines)
return |
4,089 | operation | # Copyright 2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from legate.core import LEGATE_MAX_DIM
from utils.contractions import (
check_default,
check_permutations,
check_shapes,
check_types,
)
import cunumeric as num
from cunumeric.utils import matmul_modes
@pytest.mark.parametrize("a_ndim", range(1, LEGATE_MAX_DIM + 1))
@pytest.mark.parametrize("b_ndim", range(1, LEGATE_MAX_DIM + 1))
def test(a_ndim, b_ndim):
name = f"matmul({a_ndim} x {b_ndim})"
modes = matmul_modes(a_ndim, b_ndim)
def METHOD_NAME(lib, *args, **kwargs):
return lib.matmul(*args, **kwargs)
check_default(name, modes, METHOD_NAME)
check_permutations(name, modes, METHOD_NAME)
check_shapes(name, modes, METHOD_NAME)
if a_ndim <= 2 and b_ndim <= 2:
check_types(name, modes, METHOD_NAME)
class TestMatmulErrors:
@pytest.mark.parametrize(
"shapesAB",
(
((2, 4), (2, 3)),
((3, 2, 4), (2, 4, 3)),
((3, 2, 4), (3, 2, 3)),
),
ids=lambda shapesAB: f"(shapesAB={shapesAB})",
)
def test_invalid_shape_dim_greater_than_one(self, shapesAB):
expected_exc = ValueError
shapeA, shapeB = shapesAB
A_np = np.ones(shapeA)
B_np = np.ones(shapeB)
A_num = num.ones(shapeA)
B_num = num.ones(shapeB)
with pytest.raises(expected_exc):
np.matmul(A_np, B_np)
with pytest.raises(expected_exc):
num.matmul(A_num, B_num)
@pytest.mark.parametrize(
"shapesAB",
(
((3, 2), (3,)),
pytest.param(((4, 1), (3,)), marks=pytest.mark.xfail),
((1, 4), (3,)),
((3,), (2, 3)),
((3,), (4, 1)),
pytest.param(((3,), (1, 4)), marks=pytest.mark.xfail),
((3,), (2,)),
pytest.param(((3,), (1,)), marks=pytest.mark.xfail),
),
ids=lambda shapesAB: f"(shapesAB={shapesAB})",
)
def test_invalid_shape_with_vector(self, shapesAB):
# For ((4, 1), (3,)), ((3,), (1, 4)), ((3,), (1,)),
# In Numpy, raise ValueError
# In cuNumeric, broadcast 1 to 3 and pass
expected_exc = ValueError
shapeA, shapeB = shapesAB
A_np = np.ones(shapeA)
B_np = np.ones(shapeB)
A_num = num.ones(shapeA)
B_num = num.ones(shapeB)
with pytest.raises(expected_exc):
np.matmul(A_np, B_np)
with pytest.raises(expected_exc):
num.matmul(A_num, B_num)
def test_invalid_shape_with_scalar(self):
expected_exc = ValueError
with pytest.raises(expected_exc):
np.matmul(3, 3)
with pytest.raises(expected_exc):
num.matmul(3, 3)
with pytest.raises(expected_exc):
np.matmul(3, np.ones((1,)))
with pytest.raises(expected_exc):
num.matmul(3, num.ones((1,)))
with pytest.raises(expected_exc):
np.matmul(np.ones((1,)), 3)
with pytest.raises(expected_exc):
num.matmul(num.ones((1,)), 3)
with pytest.raises(expected_exc):
np.matmul(3, np.ones((1, 1)))
with pytest.raises(expected_exc):
num.matmul(3, num.ones((1, 1)))
with pytest.raises(expected_exc):
np.matmul(np.ones((1, 1)), 3)
with pytest.raises(expected_exc):
num.matmul(num.ones((1, 1)), 3)
@pytest.mark.parametrize(
"shape", ((2, 3), (3, 4, 3)), ids=lambda shape: f"(shape={shape})"
)
def test_out_invalid_shape(self, shape):
expected_exc = ValueError
A_np = np.ones((3, 2, 4))
B_np = np.ones((3, 4, 3))
out_np = np.zeros(shape)
A_num = num.ones((3, 2, 4))
B_num = num.ones((3, 4, 3))
out_num = num.zeros(shape)
with pytest.raises(expected_exc):
np.matmul(A_np, B_np, out=out_np)
with pytest.raises(expected_exc):
num.matmul(A_num, B_num, out=out_num)
@pytest.mark.xfail
def test_out_invalid_shape_DIVERGENCE(self):
# In Numpy, PASS
# In cuNumeric, raise ValueError
A = num.ones((3, 2, 4))
B = num.ones((3, 4, 3))
shape = (3, 3, 2, 3)
out = num.zeros(shape)
num.matmul(A, B, out=out)
@pytest.mark.parametrize(
("dtype", "out_dtype", "casting"),
((None, np.int64, "same_kind"), (float, str, "safe")),
ids=("direct", "intermediate"),
)
def test_out_invalid_dtype(self, dtype, out_dtype, casting):
expected_exc = TypeError
A_np = np.ones((3, 2, 4))
B_np = np.ones((3, 4, 3))
A_num = num.ones((3, 2, 4))
B_num = num.ones((3, 4, 3))
out_np = np.zeros((3, 2, 3), dtype=out_dtype)
out_num = num.zeros((3, 2, 3), dtype=out_dtype)
with pytest.raises(expected_exc):
np.matmul(A_np, B_np, dtype=dtype, out=out_np, casting=casting)
with pytest.raises(expected_exc):
num.matmul(A_num, B_num, dtype=dtype, out=out_num, casting=casting)
@pytest.mark.parametrize(
"casting_dtype",
(
("no", np.float32),
("equiv", np.float32),
("safe", np.float32),
("same_kind", np.int64),
),
ids=lambda casting_dtype: f"(casting_dtype={casting_dtype})",
)
def test_invalid_casting_dtype(self, casting_dtype):
expected_exc = TypeError
casting, dtype = casting_dtype
A_np = np.ones((2, 4))
B_np = np.ones((4, 3))
A_num = num.ones((2, 4))
B_num = num.ones((4, 3))
with pytest.raises(expected_exc):
np.matmul(A_np, B_np, casting=casting, dtype=dtype)
with pytest.raises(expected_exc):
num.matmul(A_num, B_num, casting=casting, dtype=dtype)
@pytest.mark.parametrize(
"dtype", (str, pytest.param(float, marks=pytest.mark.xfail)), ids=str
)
def test_invalid_casting(self, dtype):
expected_exc = ValueError
casting = "unknown"
A_np = np.ones((2, 4))
B_np = np.ones((4, 3), dtype=dtype)
A_num = num.ones((2, 4))
B_num = num.ones((4, 3), dtype=dtype)
# In Numpy, raise ValueError
with pytest.raises(expected_exc):
np.matmul(A_np, B_np, casting=casting)
# cuNumeric does not check casting when A and B are of the same dtype
with pytest.raises(expected_exc):
num.matmul(A_num, B_num, casting=casting)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(sys.argv)) |
4,090 | outputs | """
cos_fc
======
Autogenerated DPF operator classes.
"""
from warnings import warn
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.METHOD_NAME import Output, _Outputs
from ansys.dpf.core.operators.specification import PinSpecification, Specification
class cos_fc(Operator):
"""Computes element-wise cos(field[i]).
Parameters
----------
fields_container : FieldsContainer
Field or fields container with only one field
is expected
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.math.cos_fc()
>>> # Make input connections
>>> my_fields_container = dpf.FieldsContainer()
>>> op.inputs.fields_container.connect(my_fields_container)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.math.cos_fc(
... fields_container=my_fields_container,
... )
>>> # Get output data
>>> result_fields_container = op.outputs.fields_container()
"""
def __init__(self, fields_container=None, config=None, server=None):
super().__init__(name="cos_fc", config=config, server=server)
self._inputs = InputsCosFc(self)
self._outputs = OutputsCosFc(self)
if fields_container is not None:
self.inputs.fields_container.connect(fields_container)
@staticmethod
def _spec():
description = """Computes element-wise cos(field[i])."""
spec = Specification(
description=description,
map_input_pin_spec={
0: PinSpecification(
name="fields_container",
type_names=["fields_container"],
optional=False,
document="""Field or fields container with only one field
is expected""",
),
},
map_output_pin_spec={
0: PinSpecification(
name="fields_container",
type_names=["fields_container"],
optional=False,
document="""""",
),
},
)
return spec
@staticmethod
def default_config(server=None):
"""Returns the default config of the operator.
This config can then be changed to the user needs and be used to
instantiate the operator. The Configuration allows to customize
how the operation will be processed by the operator.
Parameters
----------
server : server.DPFServer, optional
Server with channel connected to the remote or local instance. When
``None``, attempts to use the global server.
"""
return Operator.default_config(name="cos_fc", server=server)
@property
def inputs(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsCosFc
"""
return super().inputs
@property
def METHOD_NAME(self):
"""Enables to get outputs of the operator by evaluating it
Returns
--------
outputs : OutputsCosFc
"""
return super().METHOD_NAME
class InputsCosFc(_Inputs):
"""Intermediate class used to connect user inputs to
cos_fc operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.cos_fc()
>>> my_fields_container = dpf.FieldsContainer()
>>> op.inputs.fields_container.connect(my_fields_container)
"""
def __init__(self, op: Operator):
super().__init__(cos_fc._spec().inputs, op)
self._fields_container = Input(cos_fc._spec().input_pin(0), 0, op, -1)
self._inputs.append(self._fields_container)
@property
def fields_container(self):
"""Allows to connect fields_container input to the operator.
Field or fields container with only one field
is expected
Parameters
----------
my_fields_container : FieldsContainer
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.cos_fc()
>>> op.inputs.fields_container.connect(my_fields_container)
>>> # or
>>> op.inputs.fields_container(my_fields_container)
"""
return self._fields_container
class OutputsCosFc(_Outputs):
"""Intermediate class used to get outputs from
cos_fc operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.cos_fc()
>>> # Connect inputs : op.inputs. ...
>>> result_fields_container = op.outputs.fields_container()
"""
def __init__(self, op: Operator):
super().__init__(cos_fc._spec().METHOD_NAME, op)
self._fields_container = Output(cos_fc._spec().output_pin(0), 0, op)
self._outputs.append(self._fields_container)
@property
def fields_container(self):
"""Allows to get fields_container output of the operator
Returns
----------
my_fields_container : FieldsContainer
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.cos_fc()
>>> # Connect inputs : op.inputs. ...
>>> result_fields_container = op.outputs.fields_container()
""" # noqa: E501
return self._fields_container |
4,091 | multilingual cleaners | """Set of default text cleaners"""
# TODO: pick the cleaner for languages dynamically
import re
from anyascii import anyascii
from TTS.tts.utils.text.chinese_mandarin.numbers import replace_numbers_to_characters_in_text
from .english.abbreviations import abbreviations_en
from .english.number_norm import normalize_numbers as en_normalize_numbers
from .english.time_norm import expand_time_english
from .french.abbreviations import abbreviations_fr
# Regular expression matching whitespace:
_whitespace_re = re.compile(r"\s+")
def expand_abbreviations(text, lang="en"):
if lang == "en":
_abbreviations = abbreviations_en
elif lang == "fr":
_abbreviations = abbreviations_fr
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, " ", text).strip()
def convert_to_ascii(text):
return anyascii(text)
def remove_aux_symbols(text):
text = re.sub(r"[\<\>\(\)\[\]\"]+", "", text)
return text
def replace_symbols(text, lang="en"):
"""Replace symbols based on the lenguage tag.
Args:
text:
Input text.
lang:
Lenguage identifier. ex: "en", "fr", "pt", "ca".
Returns:
The modified text
example:
input args:
text: "si l'avi cau, diguem-ho"
lang: "ca"
Output:
text: "si lavi cau, diguemho"
"""
text = text.replace(";", ",")
text = text.replace("-", " ") if lang != "ca" else text.replace("-", "")
text = text.replace(":", ",")
if lang == "en":
text = text.replace("&", " and ")
elif lang == "fr":
text = text.replace("&", " et ")
elif lang == "pt":
text = text.replace("&", " e ")
elif lang == "ca":
text = text.replace("&", " i ")
text = text.replace("'", "")
return text
def basic_cleaners(text):
"""Basic pipeline that lowercases and collapses whitespace without transliteration."""
text = lowercase(text)
text = collapse_whitespace(text)
return text
def transliteration_cleaners(text):
"""Pipeline for non-English text that transliterates to ASCII."""
# text = convert_to_ascii(text)
text = lowercase(text)
text = collapse_whitespace(text)
return text
def basic_german_cleaners(text):
"""Pipeline for German text"""
text = lowercase(text)
text = collapse_whitespace(text)
return text
# TODO: elaborate it
def basic_turkish_cleaners(text):
"""Pipeline for Turkish text"""
text = text.replace("I", "ı")
text = lowercase(text)
text = collapse_whitespace(text)
return text
def english_cleaners(text):
"""Pipeline for English text, including number and abbreviation expansion."""
# text = convert_to_ascii(text)
text = lowercase(text)
text = expand_time_english(text)
text = en_normalize_numbers(text)
text = expand_abbreviations(text)
text = replace_symbols(text)
text = remove_aux_symbols(text)
text = collapse_whitespace(text)
return text
def phoneme_cleaners(text):
"""Pipeline for phonemes mode, including number and abbreviation expansion."""
text = en_normalize_numbers(text)
text = expand_abbreviations(text)
text = replace_symbols(text)
text = remove_aux_symbols(text)
text = collapse_whitespace(text)
return text
def french_cleaners(text):
"""Pipeline for French text. There is no need to expand numbers, phonemizer already does that"""
text = expand_abbreviations(text, lang="fr")
text = lowercase(text)
text = replace_symbols(text, lang="fr")
text = remove_aux_symbols(text)
text = collapse_whitespace(text)
return text
def portuguese_cleaners(text):
"""Basic pipeline for Portuguese text. There is no need to expand abbreviation and
numbers, phonemizer already does that"""
text = lowercase(text)
text = replace_symbols(text, lang="pt")
text = remove_aux_symbols(text)
text = collapse_whitespace(text)
return text
def chinese_mandarin_cleaners(text: str) -> str:
"""Basic pipeline for chinese"""
text = replace_numbers_to_characters_in_text(text)
return text
def METHOD_NAME(text):
"""Pipeline for multilingual text"""
text = lowercase(text)
text = replace_symbols(text, lang=None)
text = remove_aux_symbols(text)
text = collapse_whitespace(text)
return text
def no_cleaners(text):
# remove newline characters
text = text.replace("\n", "")
return text |
4,092 | send | import enum
import json
import typing
from starlette.requests import HTTPConnection
from starlette.types import Message, Receive, Scope, Send
class WebSocketState(enum.Enum):
CONNECTING = 0
CONNECTED = 1
DISCONNECTED = 2
class WebSocketDisconnect(Exception):
def __init__(self, code: int = 1000, reason: typing.Optional[str] = None) -> None:
self.code = code
self.reason = reason or ""
class WebSocket(HTTPConnection):
def __init__(self, scope: Scope, receive: Receive, METHOD_NAME: Send) -> None:
super().__init__(scope)
assert scope["type"] == "websocket"
self._receive = receive
self._send = METHOD_NAME
self.client_state = WebSocketState.CONNECTING
self.application_state = WebSocketState.CONNECTING
async def receive(self) -> Message:
"""
Receive ASGI websocket messages, ensuring valid state transitions.
"""
if self.client_state == WebSocketState.CONNECTING:
message = await self._receive()
message_type = message["type"]
if message_type != "websocket.connect":
raise RuntimeError(
'Expected ASGI message "websocket.connect", '
f"but got {message_type!r}"
)
self.client_state = WebSocketState.CONNECTED
return message
elif self.client_state == WebSocketState.CONNECTED:
message = await self._receive()
message_type = message["type"]
if message_type not in {"websocket.receive", "websocket.disconnect"}:
raise RuntimeError(
'Expected ASGI message "websocket.receive" or '
f'"websocket.disconnect", but got {message_type!r}'
)
if message_type == "websocket.disconnect":
self.client_state = WebSocketState.DISCONNECTED
return message
else:
raise RuntimeError(
'Cannot call "receive" once a disconnect message has been received.'
)
async def METHOD_NAME(self, message: Message) -> None:
"""
Send ASGI websocket messages, ensuring valid state transitions.
"""
if self.application_state == WebSocketState.CONNECTING:
message_type = message["type"]
if message_type not in {"websocket.accept", "websocket.close"}:
raise RuntimeError(
'Expected ASGI message "websocket.accept" or '
f'"websocket.close", but got {message_type!r}'
)
if message_type == "websocket.close":
self.application_state = WebSocketState.DISCONNECTED
else:
self.application_state = WebSocketState.CONNECTED
await self._send(message)
elif self.application_state == WebSocketState.CONNECTED:
message_type = message["type"]
if message_type not in {"websocket.send", "websocket.close"}:
raise RuntimeError(
'Expected ASGI message "websocket.send" or "websocket.close", '
f"but got {message_type!r}"
)
if message_type == "websocket.close":
self.application_state = WebSocketState.DISCONNECTED
await self._send(message)
else:
raise RuntimeError('Cannot call "send" once a close message has been sent.')
async def accept(
self,
subprotocol: typing.Optional[str] = None,
headers: typing.Optional[typing.Iterable[typing.Tuple[bytes, bytes]]] = None,
) -> None:
headers = headers or []
if self.client_state == WebSocketState.CONNECTING:
# If we haven't yet seen the 'connect' message, then wait for it first.
await self.receive()
await self.METHOD_NAME(
{"type": "websocket.accept", "subprotocol": subprotocol, "headers": headers}
)
def _raise_on_disconnect(self, message: Message) -> None:
if message["type"] == "websocket.disconnect":
raise WebSocketDisconnect(message["code"])
async def receive_text(self) -> str:
if self.application_state != WebSocketState.CONNECTED:
raise RuntimeError(
'WebSocket is not connected. Need to call "accept" first.'
)
message = await self.receive()
self._raise_on_disconnect(message)
return typing.cast(str, message["text"])
async def receive_bytes(self) -> bytes:
if self.application_state != WebSocketState.CONNECTED:
raise RuntimeError(
'WebSocket is not connected. Need to call "accept" first.'
)
message = await self.receive()
self._raise_on_disconnect(message)
return typing.cast(bytes, message["bytes"])
async def receive_json(self, mode: str = "text") -> typing.Any:
if mode not in {"text", "binary"}:
raise RuntimeError('The "mode" argument should be "text" or "binary".')
if self.application_state != WebSocketState.CONNECTED:
raise RuntimeError(
'WebSocket is not connected. Need to call "accept" first.'
)
message = await self.receive()
self._raise_on_disconnect(message)
if mode == "text":
text = message["text"]
else:
text = message["bytes"].decode("utf-8")
return json.loads(text)
async def iter_text(self) -> typing.AsyncIterator[str]:
try:
while True:
yield await self.receive_text()
except WebSocketDisconnect:
pass
async def iter_bytes(self) -> typing.AsyncIterator[bytes]:
try:
while True:
yield await self.receive_bytes()
except WebSocketDisconnect:
pass
async def iter_json(self) -> typing.AsyncIterator[typing.Any]:
try:
while True:
yield await self.receive_json()
except WebSocketDisconnect:
pass
async def send_text(self, data: str) -> None:
await self.METHOD_NAME({"type": "websocket.send", "text": data})
async def send_bytes(self, data: bytes) -> None:
await self.METHOD_NAME({"type": "websocket.send", "bytes": data})
async def send_json(self, data: typing.Any, mode: str = "text") -> None:
if mode not in {"text", "binary"}:
raise RuntimeError('The "mode" argument should be "text" or "binary".')
text = json.dumps(data, separators=(",", ":"))
if mode == "text":
await self.METHOD_NAME({"type": "websocket.send", "text": text})
else:
await self.METHOD_NAME({"type": "websocket.send", "bytes": text.encode("utf-8")})
async def close(
self, code: int = 1000, reason: typing.Optional[str] = None
) -> None:
await self.METHOD_NAME(
{"type": "websocket.close", "code": code, "reason": reason or ""}
)
class WebSocketClose:
def __init__(self, code: int = 1000, reason: typing.Optional[str] = None) -> None:
self.code = code
self.reason = reason or ""
async def __call__(self, scope: Scope, receive: Receive, METHOD_NAME: Send) -> None:
await METHOD_NAME(
{"type": "websocket.close", "code": self.code, "reason": self.reason}
) |
4,093 | get next | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._operations import build_list_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.containerservice.v2020_12_01.aio.ContainerServiceClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version")
@distributed_trace
def list(self, **kwargs: Any) -> AsyncIterable["_models.OperationValue"]:
"""Gets a list of compute operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationValue or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2020_12_01.models.OperationValue]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2020-12-01"))
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, AsyncList(list_of_elem)
async def METHOD_NAME(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(METHOD_NAME, extract_data)
list.metadata = {"url": "/providers/Microsoft.ContainerService/operations"} |
4,094 | get enabled zones | from __future__ import annotations
from typing import Set, Dict
from seedemu.core import Node, Emulator, Layer
from seedemu.services import DomainNameServer, DomainNameService
DnssecFileTemplates: Dict[str, str] = {}
DnssecFileTemplates['enable_dnssec_script'] = """\
#!/bin/bash
rm -fr /etc/bind/keys
mkdir /etc/bind/keys
cd /etc/bind/keys
rndc freeze
while read -r zonename; do {
[ -z "$zonename" ] && continue
zonefile="$zonename"
[ "$zonename" = "." ] && zonefile="root"
echo "setting up DNSSEC for "$zonename"..."
sed -i 's|zone "'"$zonename"'" { type master; file "/etc/bind/zones/'"$zonefile"'"; allow-update { any; }; };|zone "'"$zonename"'" { type master; file "/etc/bind/zones/'"$zonefile"'"; allow-update { any; }; key-directory "/etc/bind/keys"; auto-dnssec maintain; inline-signing yes; };|' /etc/bind/named.conf.zones
dnssec-keygen -a NSEC3RSASHA1 -b 2048 -n ZONE "$zonename"
dnssec-keygen -f KSK -a NSEC3RSASHA1 -b 4096 -n ZONE "$zonename"
}; done < /dnssec_zones.txt
chown -R bind:bind /etc/bind/keys
rndc thaw
rndc reload
while read -r zonename; do {
[ -z "$zonename" ] && continue
[ "$zonename" = "." ] && continue
pzonename="`tr '.' '\\n' <<< "$zonename" | sed '1d' | tr '\\n' '.' | sed -e 's/\\.\\.$/./'`"
while true; do {
pns="`dig +short NS "$pzonename"`" || pns=''
[ -z "$pns" ] && echo "cannot get NS for parent zone ($pzonename), retrying in 1 second..." || break
sleep 1
}; done
dig +short NS "$pzonename" | while read -r ns; do dig +short "$ns"; done | while read -r nsaddr; do {
dss="`dig @127.0.0.1 dnskey "$zonename" | dnssec-dsfromkey -f- "$zonename" | sed 's/IN/300/; s/^/update add /;'`"
echo "$dss"
echo "submitting DS record to parent zone $nsaddr..."
while true; do {
cat << UPDATE | nsupdate && echo "parent accepted the update." && break
server $nsaddr
zone $pzonename
$dss
send
UPDATE
echo "submission failed, retrying in 1 second..."
sleep 1
}; done
};done
}; done < /dnssec_zones.txt
"""
class Dnssec(Layer):
"""!
@brief The Dnssec (DNSSEC) layer.
This layer helps setting up DNSSEC. It works by signing the zones and send
the DS record to parent(s) with nsupdate. Note that to build a DNSSEC
infrastructure, you will need to sign the entire chain. You will also need
working local DNS server configured on the node hosting the zone for it to
find the parent name server.
"""
__zonenames: Set[str]
def __init__(self):
"""!
@brief Dnssec layer constructor.
"""
super().__init__()
self.__zonenames = set()
self.addDependency('DomainNameService', False, False)
def __findZoneNode(self, dns: DomainNameService, zonename: str) -> Node:
targets = dns.getTargets()
for (server, node) in targets:
dns_s: DomainNameServer = server
zones = dns_s.getZones()
for zone in zones:
# TODO: what if multiple nodes host the same zone?
if zone == zonename: return node
return None
def getName(self):
return 'Dnssec'
def enableOn(self, zonename: str) -> Dnssec:
"""!
@brief Enable DNSSEC on the given zone.
@param zonename zonename.
@returns self, for chaining API calls.
"""
if zonename[-1] != '.': zonename += '.'
self.__zonenames.add(zonename)
return self
def METHOD_NAME(self) -> Set[str]:
"""!
@brief Get set of zonenames with DNSSEC enabled.
@return set of zonenames.
"""
return self.__zonenames
def render(self, emulator: Emulator):
reg = emulator.getRegistry()
dns: DomainNameService = reg.get('seedemu', 'layer', 'DomainNameService')
nodes: Set[Node] = set()
for zonename in self.__zonenames:
self._log('Looking for server hosting "{}"...'.format(zonename))
node = self.__findZoneNode(dns, zonename)
assert node != None, 'no server found for dnssec-enabled zone {}'.format(zonename)
(scope, _, name) = node.getRegistryInfo()
self._log('Setting up DNSSEC for "{}" on as{}/{}'.format(zonename, scope, name))
nodes.add(node)
node.appendFile('/dnssec_zones.txt', '{}\n'.format(zonename))
for node in nodes:
node.appendFile('/enable_dnssec', DnssecFileTemplates['enable_dnssec_script'])
node.appendStartCommand('chmod +x /enable_dnssec')
node.appendStartCommand('/enable_dnssec')
def print(self, indent: int) -> str:
out = ' ' * indent
out += 'DnssecLayer:\n'
indent += 4
out += ' ' * indent
out += 'DNSSEC-enabled zones:\n'
for zonename in self.__zonenames:
out += ' ' * indent
out += '{}\n'.format(zonename)
return out
|
4,095 | test modified potentials | import numpy as np
import pytest
import unyt as u
from gmso.exceptions import EngineIncompatibilityError
from gmso.formats.mcf import write_mcf
from gmso.tests.base_test import BaseTest
class TestMCF(BaseTest):
def test_write_lj_simple(self, n_typed_ar_system):
top = n_typed_ar_system(n_sites=1)
top.save("ar.mcf")
def test_write_mie_simple(self, n_typed_xe_mie):
top = n_typed_xe_mie()
top.save("xe.mcf")
def test_write_lj_full(self, n_typed_ar_system):
top = n_typed_ar_system(n_sites=1)
top.save("ar.mcf")
mcf_data = []
with open("ar.mcf") as f:
for line in f:
mcf_data.append(line.strip().split())
for idx, line in enumerate(mcf_data):
if len(line) > 1:
if line[1] == "Atom_Info":
atom_section_start = idx
assert mcf_data[atom_section_start + 1][0] == "1"
assert mcf_data[atom_section_start + 2][1] == "Ar"
assert mcf_data[atom_section_start + 2][2] == "Ar"
assert mcf_data[atom_section_start + 2][5] == "LJ"
assert np.isclose(
float(mcf_data[atom_section_start + 2][3]),
top.sites[0].mass.in_units(u.amu).value,
)
assert np.isclose(
float(mcf_data[atom_section_start + 2][4]),
top.sites[0].charge.in_units(u.elementary_charge).value,
)
assert np.isclose(
float(mcf_data[atom_section_start + 2][6]),
(top.sites[0].atom_type.parameters["epsilon"] / u.kb)
.in_units(u.K)
.value,
)
assert np.isclose(
float(mcf_data[atom_section_start + 2][7]),
top.sites[0]
.atom_type.parameters["sigma"]
.in_units(u.Angstrom)
.value,
)
def test_write_mie_full(self, n_typed_xe_mie):
top = n_typed_xe_mie()
top.save("xe.mcf")
mcf_data = []
with open("xe.mcf") as f:
for line in f:
mcf_data.append(line.strip().split())
for idx, line in enumerate(mcf_data):
if len(line) > 1:
if line[1] == "Atom_Info":
atom_section_start = idx
# Check a some atom info
assert mcf_data[atom_section_start + 1][0] == "1"
assert mcf_data[atom_section_start + 2][1] == "Xe"
assert mcf_data[atom_section_start + 2][2] == "Xe"
assert mcf_data[atom_section_start + 2][5] == "Mie"
assert np.isclose(
float(mcf_data[atom_section_start + 2][3]),
top.sites[0].mass.in_units(u.amu).value,
)
assert np.isclose(
float(mcf_data[atom_section_start + 2][4]),
top.sites[0].charge.in_units(u.elementary_charge).value,
)
assert np.isclose(
float(mcf_data[atom_section_start + 2][6]),
(top.sites[0].atom_type.parameters["epsilon"] / u.kb)
.in_units(u.K)
.value,
)
assert np.isclose(
float(mcf_data[atom_section_start + 2][7]),
top.sites[0]
.atom_type.parameters["sigma"]
.in_units(u.Angstrom)
.value,
)
assert np.isclose(
float(mcf_data[atom_section_start + 2][8]),
top.sites[0].atom_type.parameters["n"],
)
assert np.isclose(
float(mcf_data[atom_section_start + 2][9]),
top.sites[0].atom_type.parameters["m"],
)
def METHOD_NAME(self, n_typed_ar_system):
top = n_typed_ar_system(n_sites=1)
next(iter(top.atom_types)).set_expression("sigma + epsilon*r")
with pytest.raises(EngineIncompatibilityError):
top.save("out.mcf")
alternate_lj = "4*epsilon*sigma**12/r**12 - 4*epsilon*sigma**6/r**6"
next(iter(top.atom_types)).set_expression(alternate_lj)
top.save("ar.mcf")
def test_scaling_factors(self, n_typed_ar_system):
top = n_typed_ar_system(n_sites=1)
top.save("ar.mcf")
mcf_data = []
with open("ar.mcf") as f:
for line in f:
mcf_data.append(line.strip().split())
assert np.allclose(float(mcf_data[-5][0]), 0.0)
assert np.allclose(float(mcf_data[-5][1]), 0.0)
assert np.allclose(float(mcf_data[-5][2]), 0.5)
assert np.allclose(float(mcf_data[-5][3]), 1.0)
assert np.allclose(float(mcf_data[-4][0]), 0.0)
assert np.allclose(float(mcf_data[-4][1]), 0.0)
assert np.allclose(float(mcf_data[-4][2]), 0.5)
assert np.allclose(float(mcf_data[-4][3]), 1.0)
top.set_lj_scale([0.1, 0.2, 0.5])
top.set_electrostatics_scale([0.2, 0.4, 0.6])
top.save("ar.mcf", overwrite=True)
mcf_data = []
with open("ar.mcf") as f:
for line in f:
mcf_data.append(line.strip().split())
assert np.allclose(float(mcf_data[-5][0]), 0.1)
assert np.allclose(float(mcf_data[-5][1]), 0.2)
assert np.allclose(float(mcf_data[-5][2]), 0.5)
assert np.allclose(float(mcf_data[-5][3]), 1.0)
assert np.allclose(float(mcf_data[-4][0]), 0.2)
assert np.allclose(float(mcf_data[-4][1]), 0.4)
assert np.allclose(float(mcf_data[-4][2]), 0.6)
assert np.allclose(float(mcf_data[-4][3]), 1.0) |
4,096 | test post sorting | from datetime import datetime, timedelta
from django.conf import settings
from kitsune.forums.models import Forum, Thread, ThreadLockedError
from kitsune.forums.tests import ForumTestCase, PostFactory, ThreadFactory
from kitsune.forums.views import sort_threads
from kitsune.sumo.tests import get
from kitsune.users.tests import UserFactory
class PostTestCase(ForumTestCase):
def test_new_post_updates_thread(self):
# Saving a new post in a thread should update the last_post
# key in that thread to point to the new post.
t = ThreadFactory()
PostFactory(thread=t)
p = t.new_post(author=t.creator, content="an update")
p.save()
t = Thread.objects.get(id=t.id)
self.assertEqual(p.id, t.last_post_id)
def test_new_post_updates_forum(self):
# Saving a new post should update the last_post key in the
# forum to point to the new post.
t = ThreadFactory()
PostFactory(thread=t)
p = t.new_post(author=t.creator, content="another update")
p.save()
f = Forum.objects.get(id=t.forum_id)
self.assertEqual(p.id, f.last_post_id)
def test_update_post_does_not_update_thread(self):
# Updating/saving an old post in a thread should _not_ update
# the last_post key in that thread.
t = ThreadFactory()
old = PostFactory(thread=t)
last = PostFactory(thread=t)
old.content = "updated content"
old.save()
self.assertEqual(last.id, old.thread.last_post_id)
def test_update_forum_does_not_update_thread(self):
# Updating/saving an old post in a forum should _not_ update
# the last_post key in that forum.
t = ThreadFactory()
old = PostFactory(thread=t)
last = PostFactory(thread=t)
old.content = "updated content"
old.save()
self.assertEqual(last.id, t.forum.last_post_id)
def test_replies_count(self):
# The Thread.replies value should remain one less than the
# number of posts in the thread.
t = ThreadFactory(posts=[{}, {}, {}])
old = t.replies
self.assertEqual(2, old)
t.new_post(author=t.creator, content="test").save()
self.assertEqual(old + 1, t.replies)
def test_sticky_threads_first(self):
# Sticky threads should come before non-sticky threads.
t = ThreadFactory()
yesterday = datetime.now() - timedelta(days=1)
sticky = ThreadFactory(forum=t.forum, is_sticky=True, posts=[{"created": yesterday}])
# The older sticky thread shows up first.
self.assertEqual(sticky.id, Thread.objects.all()[0].id)
def test_thread_sorting(self):
# After the sticky threads, threads should be sorted by the
# created date of the last post.
# Make sure the datetimes are different.
PostFactory(created=datetime.now() - timedelta(days=1))
PostFactory()
Thread(is_sticky=True)
threads = Thread.objects.filter(is_sticky=False)
self.assertTrue(threads[0].last_post.created > threads[1].last_post.created)
def METHOD_NAME(self):
"""Posts should be sorted chronologically."""
now = datetime.now()
t = ThreadFactory(posts=[{"created": now - timedelta(days=n)} for n in [0, 1, 4, 7, 11]])
posts = t.post_set.all()
for i in range(len(posts) - 1):
self.assertTrue(posts[i].created <= posts[i + 1].created)
def test_sorting_creator(self):
"""Sorting threads by creator."""
ThreadFactory(creator__username="aaa")
ThreadFactory(creator__username="bbb")
threads = sort_threads(Thread.objects, 3, 1)
self.assertTrue(threads[0].creator.username >= threads[1].creator.username)
def test_sorting_replies(self):
"""Sorting threads by replies."""
ThreadFactory(posts=[{}, {}, {}])
ThreadFactory()
threads = sort_threads(Thread.objects, 4)
self.assertTrue(threads[0].replies <= threads[1].replies)
def test_sorting_last_post_desc(self):
"""Sorting threads by last_post descendingly."""
ThreadFactory(posts=[{}, {}, {}])
PostFactory(created=datetime.now() - timedelta(days=1))
threads = sort_threads(Thread.objects, 5, 1)
self.assertTrue(threads[0].last_post.created >= threads[1].last_post.created)
def test_thread_last_page(self):
"""Thread's last_page property is accurate."""
t = ThreadFactory()
# Format: (# replies, # of pages to expect)
test_data = (
(t.replies, 1), # Test default
(50, 3), # Test a large number
(19, 1), # Test off-by-one error, low
(20, 2),
) # Test off-by-one error, high
for replies, pages in test_data:
t.replies = replies
self.assertEqual(t.last_page, pages)
def test_locked_thread(self):
"""Trying to reply to a locked thread should raise an exception."""
with self.assertRaises(ThreadLockedError):
locked = ThreadFactory(is_locked=True)
user = UserFactory()
# This should raise an exception
locked.new_post(author=user, content="empty")
def test_unlocked_thread(self):
unlocked = ThreadFactory()
user = UserFactory()
# This should not raise an exception
unlocked.new_post(author=user, content="empty")
def test_post_no_session(self):
r = get(self.client, "forums.new_thread", kwargs={"forum_slug": "test-forum"})
assert settings.LOGIN_URL in r.redirect_chain[0][0]
self.assertEqual(302, r.redirect_chain[0][1])
class ThreadTestCase(ForumTestCase):
def test_delete_no_session(self):
"""Delete a thread while logged out redirects."""
r = get(
self.client,
"forums.delete_thread",
kwargs={"forum_slug": "test-forum", "thread_id": 1},
)
assert settings.LOGIN_URL in r.redirect_chain[0][0]
self.assertEqual(302, r.redirect_chain[0][1]) |
4,097 | test call to discrete | import numpy as np
import pytest
from probnum import diffeq, randvars
import probnum.problems.zoo.diffeq as diffeq_zoo
@pytest.fixture
def rng():
return np.random.default_rng(seed=42)
@pytest.fixture
def stepsize():
return 0.1
@pytest.fixture
def timespan():
return (0.0, 0.5)
@pytest.fixture
def posterior(stepsize, timespan):
"""Kalman smoothing posterior."""
t0, tmax = timespan
y0 = 20 * np.ones(2)
ivp = diffeq_zoo.lotkavolterra(t0=t0, tmax=tmax, y0=y0)
f = ivp.f
t0, tmax = ivp.t0, ivp.tmax
return diffeq.probsolve_ivp(f, t0, tmax, y0, step=stepsize, adaptive=False)
def test_len(posterior):
"""__len__ performs as expected."""
assert len(posterior) > 0
assert len(posterior.locations) == len(posterior)
assert len(posterior.states) == len(posterior)
def test_locations(posterior, stepsize, timespan):
"""Locations are stored correctly."""
np.testing.assert_allclose(posterior.locations, np.sort(posterior.locations))
t0, tmax = timespan
expected = np.arange(t0, tmax + stepsize, step=stepsize)
np.testing.assert_allclose(posterior.locations, expected)
def test_getitem(posterior):
"""Getitem performs as expected."""
np.testing.assert_allclose(posterior[0].mean, posterior.states[0].mean)
np.testing.assert_allclose(posterior[0].cov, posterior.states[0].cov)
np.testing.assert_allclose(posterior[-1].mean, posterior.states[-1].mean)
np.testing.assert_allclose(posterior[-1].cov, posterior.states[-1].cov)
np.testing.assert_allclose(posterior[:].mean, posterior.states[:].mean)
np.testing.assert_allclose(posterior[:].cov, posterior.states[:].cov)
def test_states(posterior):
"""RVs are stored correctly."""
assert isinstance(posterior.states, randvars._RandomVariableList)
assert len(posterior.states[0].shape) == 1
def test_call_error_if_small(posterior):
"""Evaluating in the past of the data raises an error."""
assert -0.5 < posterior.locations[0]
with pytest.raises(NotImplementedError):
posterior(-0.5)
def test_call_vectorisation(posterior):
"""Evaluation allows vector inputs."""
locs = np.arange(0, 1, 20)
evals = posterior(locs)
assert len(evals) == len(locs)
def test_call_interpolation(posterior):
"""Interpolation is possible and returns a Normal RV."""
a = 0.4 + 0.1 * np.random.rand()
t0, t1 = posterior.locations[:2]
random_location_between_t0_and_t1 = t0 + a * (t1 - t0)
assert (
posterior.locations[0]
< random_location_between_t0_and_t1
< posterior.locations[-1]
)
assert random_location_between_t0_and_t1 not in posterior.locations
out_rv = posterior(random_location_between_t0_and_t1)
assert isinstance(out_rv, randvars.Normal)
def METHOD_NAME(posterior):
"""Called at a grid point, the respective disrete solution is returned."""
first_point = posterior.locations[0]
np.testing.assert_allclose(posterior(first_point).mean, posterior[0].mean)
np.testing.assert_allclose(posterior(first_point).cov, posterior[0].cov)
final_point = posterior.locations[-1]
np.testing.assert_allclose(posterior(final_point).mean, posterior[-1].mean)
np.testing.assert_allclose(posterior(final_point).cov, posterior[-1].cov)
mid_point = posterior.locations[4]
np.testing.assert_allclose(posterior(mid_point).mean, posterior[4].mean)
np.testing.assert_allclose(posterior(mid_point).cov, posterior[4].cov)
def test_call_extrapolation(posterior):
"""Extrapolation is possible and returns a Normal RV."""
assert posterior.locations[-1] < 30.0
out_rv = posterior(30.0)
assert isinstance(out_rv, randvars.Normal)
@pytest.fixture
def seed():
return 42
# Sampling shape checks include extrapolation phases
IN_DOMAIN_DENSE_LOCS = np.arange(0.0, 0.5, 0.025)
OUT_OF_DOMAIN_DENSE_LOCS = np.arange(0.0, 500.0, 25.0)
@pytest.mark.parametrize("locs", [None, IN_DOMAIN_DENSE_LOCS, OUT_OF_DOMAIN_DENSE_LOCS])
@pytest.mark.parametrize("size", [(), 2, (2,), (2, 2)])
def test_sampling_shapes(posterior, locs, size, rng):
"""Shape of the returned samples matches expectation."""
samples = posterior.sample(rng=rng, t=locs, size=size)
if isinstance(size, int):
size = (size,)
if locs is None:
expected_size = (
size + posterior.states.shape
) # (*size, *posterior.states.shape)
else:
expected_size = (
size + locs.shape + posterior.states[0].shape
) # (*size, *posterior(locs).mean.shape)
assert samples.shape == expected_size
def test_transform_base_measure_realizations_raises_error(posterior):
"""The ODEFilterSolution does not implement transformation of base measure
realizations, but refers to KalmanPosterior instead."""
with pytest.raises(NotImplementedError):
posterior.transform_base_measure_realizations(None) |
4,098 | on 200 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"vm list-vm-resize-options",
)
class ListVmResizeOptions(AAZCommand):
"""List available resizing options for VMs.
:example: List all available VM sizes for resizing.
az vm list-vm-resize-options -g MyResourceGroup -n MyVm
:example: List available sizes for all VMs in a resource group.
az vm list-vm-resize-options --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
_aaz_info = {
"version": "2017-12-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.compute/virtualmachines/{}/vmsizes", "2017-12-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.vm_name = AAZStrArg(
options=["-n", "--name", "--vm-name"],
help="The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=<name>`",
required=True,
id_part="name",
configured_default="vm",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.VirtualMachinesListAvailableSizes(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True)
return result
class VirtualMachinesListAvailableSizes(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.METHOD_NAME(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/vmSizes",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"vmName", self.ctx.args.vm_name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2017-12-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def METHOD_NAME(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.value = AAZListType()
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.max_data_disk_count = AAZIntType(
serialized_name="maxDataDiskCount",
)
_element.memory_in_mb = AAZIntType(
serialized_name="memoryInMB",
)
_element.name = AAZStrType()
_element.number_of_cores = AAZIntType(
serialized_name="numberOfCores",
)
_element.os_disk_size_in_mb = AAZIntType(
serialized_name="osDiskSizeInMB",
)
_element.resource_disk_size_in_mb = AAZIntType(
serialized_name="resourceDiskSizeInMB",
)
return cls._schema_on_200
class _ListVmResizeOptionsHelper:
"""Helper class for ListVmResizeOptions"""
__all__ = ["ListVmResizeOptions"] |
4,099 | submit | import os
import re
import shutil
import socket
import tempfile
import six
from django.utils.translation import gettext_lazy as _
OIOIOI_LANGUAGE_TO_MOSS = {
"C++": "cc",
"C": "c",
"Python": "python",
"Pascal": "pascal",
"Java": "java",
}
MOSS_SUPPORTED_LANGUAGES = set(OIOIOI_LANGUAGE_TO_MOSS)
class MossException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return str(self.message)
# Based on: https://github.com/soachishti/moss.py
class MossClient(object):
HOSTNAME = 'moss.stanford.edu'
PORT = 7690
RESULT_URL_REGEX = re.compile(r"^http://moss\.stanford\.edu/results/\d+/\d+$")
def __init__(self, userid, lang):
self.userid = userid
self.lang = OIOIOI_LANGUAGE_TO_MOSS[lang]
self.files = []
def add_file(self, filepath, name):
self.files.append((filepath, name))
def METHOD_NAME(self, query_comment=""):
sock = socket.socket()
try:
sock.connect((self.HOSTNAME, self.PORT))
prelude = (
"moss %(userid)d\n"
"directory %(directory_mode)d\n"
"X %(experimental)d\n"
"maxmatches %(maxmatches)d\n"
"show %(show)d\n"
"language %(language)s\n"
% {
# default MOSS settings taken from the official script
'userid': self.userid,
'directory_mode': 0,
'experimental': 0,
'maxmatches': 10,
'show': 250,
'language': self.lang,
}
)
sock.sendall(six.ensure_binary(prelude))
response = sock.recv(32)
if not response.startswith(b"yes"):
sock.sendall(b"end\n")
raise MossException(_("Moss rejected the query, check your user ID."))
if not self.files:
raise MossException(_("Can't make a query with no submissions."))
for i, (path, name) in enumerate(self.files):
size = os.path.getsize(path)
message = "file %d %s %d %s\n" % (
i + 1, # file id
self.lang, # programming language
size, # file size
name, # name of the submission
)
sock.sendall(six.ensure_binary(message))
with open(path, 'rb') as f:
if hasattr(sock, 'sendfile'): # new in Python 3.5
while f.tell() != os.fstat(f.fileno()).st_size:
sock.sendfile(f)
else:
for chunk in iter(lambda: f.read(4096), b''):
sock.sendall(chunk)
sock.sendall(six.ensure_binary("query 0 %s\n" % query_comment))
url = sock.recv(256)
try:
url = six.ensure_text(url).replace('\n', '')
except UnicodeError:
raise MossException(_("Moss returned an invalid url."))
if not self.RESULT_URL_REGEX.match(url):
raise MossException(_("Moss returned an invalid url."))
sock.sendall(b"end\n")
except (OSError, IOError, socket.herror, socket.gaierror, socket.timeout):
raise MossException(_("Could not connect with the MOSS."))
finally:
sock.close()
return url
def submit_and_get_url(client, submission_collector):
submission_list = submission_collector.collect_list()
if not submission_list:
raise MossException(_("Can't make a query with no submissions."))
tmpdir = tempfile.mkdtemp()
try:
for s in submission_list:
display_name = (
(s.first_name[0] if s.first_name else '')
+ (s.last_name[0] if s.last_name else '')
+ str(s.user_id)
+ '_'
+ str(s.submission_id)
)
dest = os.path.join(tmpdir, display_name)
submission_collector.get_submission_source(dest, s.source_file)
client.add_file(dest, display_name)
return client.METHOD_NAME()
finally:
shutil.rmtree(tmpdir) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.